bxe.c revision 316539
1/*-
2 * Copyright (c) 2007-2014 QLogic Corporation. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 *
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS'
15 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
18 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
19 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
20 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
21 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
22 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
23 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
24 * THE POSSIBILITY OF SUCH DAMAGE.
25 */
26
27#include <sys/cdefs.h>
28__FBSDID("$FreeBSD: stable/11/sys/dev/bxe/bxe.c 316539 2017-04-05 19:33:04Z sbruno $");
29
30#define BXE_DRIVER_VERSION "1.78.90"
31
32#include "bxe.h"
33#include "ecore_sp.h"
34#include "ecore_init.h"
35#include "ecore_init_ops.h"
36
37#include "57710_int_offsets.h"
38#include "57711_int_offsets.h"
39#include "57712_int_offsets.h"
40
41/*
42 * CTLTYPE_U64 and sysctl_handle_64 were added in r217616. Define these
43 * explicitly here for older kernels that don't include this changeset.
44 */
45#ifndef CTLTYPE_U64
46#define CTLTYPE_U64      CTLTYPE_QUAD
47#define sysctl_handle_64 sysctl_handle_quad
48#endif
49
50/*
51 * CSUM_TCP_IPV6 and CSUM_UDP_IPV6 were added in r236170. Define these
52 * here as zero(0) for older kernels that don't include this changeset
53 * thereby masking the functionality.
54 */
55#ifndef CSUM_TCP_IPV6
56#define CSUM_TCP_IPV6 0
57#define CSUM_UDP_IPV6 0
58#endif
59
60/*
61 * pci_find_cap was added in r219865. Re-define this at pci_find_extcap
62 * for older kernels that don't include this changeset.
63 */
64#if __FreeBSD_version < 900035
65#define pci_find_cap pci_find_extcap
66#endif
67
68#define BXE_DEF_SB_ATT_IDX 0x0001
69#define BXE_DEF_SB_IDX     0x0002
70
71/*
72 * FLR Support - bxe_pf_flr_clnup() is called during nic_load in the per
73 * function HW initialization.
74 */
75#define FLR_WAIT_USEC     10000 /* 10 msecs */
76#define FLR_WAIT_INTERVAL 50    /* usecs */
77#define FLR_POLL_CNT      (FLR_WAIT_USEC / FLR_WAIT_INTERVAL) /* 200 */
78
79struct pbf_pN_buf_regs {
80    int pN;
81    uint32_t init_crd;
82    uint32_t crd;
83    uint32_t crd_freed;
84};
85
86struct pbf_pN_cmd_regs {
87    int pN;
88    uint32_t lines_occup;
89    uint32_t lines_freed;
90};
91
92/*
93 * PCI Device ID Table used by bxe_probe().
94 */
95#define BXE_DEVDESC_MAX 64
96static struct bxe_device_type bxe_devs[] = {
97    {
98        BRCM_VENDORID,
99        CHIP_NUM_57710,
100        PCI_ANY_ID, PCI_ANY_ID,
101        "QLogic NetXtreme II BCM57710 10GbE"
102    },
103    {
104        BRCM_VENDORID,
105        CHIP_NUM_57711,
106        PCI_ANY_ID, PCI_ANY_ID,
107        "QLogic NetXtreme II BCM57711 10GbE"
108    },
109    {
110        BRCM_VENDORID,
111        CHIP_NUM_57711E,
112        PCI_ANY_ID, PCI_ANY_ID,
113        "QLogic NetXtreme II BCM57711E 10GbE"
114    },
115    {
116        BRCM_VENDORID,
117        CHIP_NUM_57712,
118        PCI_ANY_ID, PCI_ANY_ID,
119        "QLogic NetXtreme II BCM57712 10GbE"
120    },
121    {
122        BRCM_VENDORID,
123        CHIP_NUM_57712_MF,
124        PCI_ANY_ID, PCI_ANY_ID,
125        "QLogic NetXtreme II BCM57712 MF 10GbE"
126    },
127    {
128        BRCM_VENDORID,
129        CHIP_NUM_57800,
130        PCI_ANY_ID, PCI_ANY_ID,
131        "QLogic NetXtreme II BCM57800 10GbE"
132    },
133    {
134        BRCM_VENDORID,
135        CHIP_NUM_57800_MF,
136        PCI_ANY_ID, PCI_ANY_ID,
137        "QLogic NetXtreme II BCM57800 MF 10GbE"
138    },
139    {
140        BRCM_VENDORID,
141        CHIP_NUM_57810,
142        PCI_ANY_ID, PCI_ANY_ID,
143        "QLogic NetXtreme II BCM57810 10GbE"
144    },
145    {
146        BRCM_VENDORID,
147        CHIP_NUM_57810_MF,
148        PCI_ANY_ID, PCI_ANY_ID,
149        "QLogic NetXtreme II BCM57810 MF 10GbE"
150    },
151    {
152        BRCM_VENDORID,
153        CHIP_NUM_57811,
154        PCI_ANY_ID, PCI_ANY_ID,
155        "QLogic NetXtreme II BCM57811 10GbE"
156    },
157    {
158        BRCM_VENDORID,
159        CHIP_NUM_57811_MF,
160        PCI_ANY_ID, PCI_ANY_ID,
161        "QLogic NetXtreme II BCM57811 MF 10GbE"
162    },
163    {
164        BRCM_VENDORID,
165        CHIP_NUM_57840_4_10,
166        PCI_ANY_ID, PCI_ANY_ID,
167        "QLogic NetXtreme II BCM57840 4x10GbE"
168    },
169    {
170        BRCM_VENDORID,
171        CHIP_NUM_57840_2_20,
172        PCI_ANY_ID, PCI_ANY_ID,
173        "QLogic NetXtreme II BCM57840 2x20GbE"
174    },
175    {
176        BRCM_VENDORID,
177        CHIP_NUM_57840_MF,
178        PCI_ANY_ID, PCI_ANY_ID,
179        "QLogic NetXtreme II BCM57840 MF 10GbE"
180    },
181    {
182        0, 0, 0, 0, NULL
183    }
184};
185
186MALLOC_DECLARE(M_BXE_ILT);
187MALLOC_DEFINE(M_BXE_ILT, "bxe_ilt", "bxe ILT pointer");
188
189/*
190 * FreeBSD device entry points.
191 */
192static int bxe_probe(device_t);
193static int bxe_attach(device_t);
194static int bxe_detach(device_t);
195static int bxe_shutdown(device_t);
196
197/*
198 * FreeBSD KLD module/device interface event handler method.
199 */
200static device_method_t bxe_methods[] = {
201    /* Device interface (device_if.h) */
202    DEVMETHOD(device_probe,     bxe_probe),
203    DEVMETHOD(device_attach,    bxe_attach),
204    DEVMETHOD(device_detach,    bxe_detach),
205    DEVMETHOD(device_shutdown,  bxe_shutdown),
206    /* Bus interface (bus_if.h) */
207    DEVMETHOD(bus_print_child,  bus_generic_print_child),
208    DEVMETHOD(bus_driver_added, bus_generic_driver_added),
209    KOBJMETHOD_END
210};
211
212/*
213 * FreeBSD KLD Module data declaration
214 */
215static driver_t bxe_driver = {
216    "bxe",                   /* module name */
217    bxe_methods,             /* event handler */
218    sizeof(struct bxe_softc) /* extra data */
219};
220
221/*
222 * FreeBSD dev class is needed to manage dev instances and
223 * to associate with a bus type
224 */
225static devclass_t bxe_devclass;
226
227MODULE_DEPEND(bxe, pci, 1, 1, 1);
228MODULE_DEPEND(bxe, ether, 1, 1, 1);
229DRIVER_MODULE(bxe, pci, bxe_driver, bxe_devclass, 0, 0);
230
231/* resources needed for unloading a previously loaded device */
232
233#define BXE_PREV_WAIT_NEEDED 1
234struct mtx bxe_prev_mtx;
235MTX_SYSINIT(bxe_prev_mtx, &bxe_prev_mtx, "bxe_prev_lock", MTX_DEF);
236struct bxe_prev_list_node {
237    LIST_ENTRY(bxe_prev_list_node) node;
238    uint8_t bus;
239    uint8_t slot;
240    uint8_t path;
241    uint8_t aer; /* XXX automatic error recovery */
242    uint8_t undi;
243};
244static LIST_HEAD(, bxe_prev_list_node) bxe_prev_list = LIST_HEAD_INITIALIZER(bxe_prev_list);
245
246static int load_count[2][3] = { {0} }; /* per-path: 0-common, 1-port0, 2-port1 */
247
248/* Tunable device values... */
249
250SYSCTL_NODE(_hw, OID_AUTO, bxe, CTLFLAG_RD, 0, "bxe driver parameters");
251
252/* Debug */
253unsigned long bxe_debug = 0;
254SYSCTL_ULONG(_hw_bxe, OID_AUTO, debug, CTLFLAG_RDTUN,
255             &bxe_debug, 0, "Debug logging mode");
256
257/* Interrupt Mode: 0 (IRQ), 1 (MSI/IRQ), and 2 (MSI-X/MSI/IRQ) */
258static int bxe_interrupt_mode = INTR_MODE_MSIX;
259SYSCTL_INT(_hw_bxe, OID_AUTO, interrupt_mode, CTLFLAG_RDTUN,
260           &bxe_interrupt_mode, 0, "Interrupt (MSI-X/MSI/INTx) mode");
261
262/* Number of Queues: 0 (Auto) or 1 to 16 (fixed queue number) */
263static int bxe_queue_count = 4;
264SYSCTL_INT(_hw_bxe, OID_AUTO, queue_count, CTLFLAG_RDTUN,
265           &bxe_queue_count, 0, "Multi-Queue queue count");
266
267/* max number of buffers per queue (default RX_BD_USABLE) */
268static int bxe_max_rx_bufs = 0;
269SYSCTL_INT(_hw_bxe, OID_AUTO, max_rx_bufs, CTLFLAG_RDTUN,
270           &bxe_max_rx_bufs, 0, "Maximum Number of Rx Buffers Per Queue");
271
272/* Host interrupt coalescing RX tick timer (usecs) */
273static int bxe_hc_rx_ticks = 25;
274SYSCTL_INT(_hw_bxe, OID_AUTO, hc_rx_ticks, CTLFLAG_RDTUN,
275           &bxe_hc_rx_ticks, 0, "Host Coalescing Rx ticks");
276
277/* Host interrupt coalescing TX tick timer (usecs) */
278static int bxe_hc_tx_ticks = 50;
279SYSCTL_INT(_hw_bxe, OID_AUTO, hc_tx_ticks, CTLFLAG_RDTUN,
280           &bxe_hc_tx_ticks, 0, "Host Coalescing Tx ticks");
281
282/* Maximum number of Rx packets to process at a time */
283static int bxe_rx_budget = 0xffffffff;
284SYSCTL_INT(_hw_bxe, OID_AUTO, rx_budget, CTLFLAG_TUN,
285           &bxe_rx_budget, 0, "Rx processing budget");
286
287/* Maximum LRO aggregation size */
288static int bxe_max_aggregation_size = 0;
289SYSCTL_INT(_hw_bxe, OID_AUTO, max_aggregation_size, CTLFLAG_TUN,
290           &bxe_max_aggregation_size, 0, "max aggregation size");
291
292/* PCI MRRS: -1 (Auto), 0 (128B), 1 (256B), 2 (512B), 3 (1KB) */
293static int bxe_mrrs = -1;
294SYSCTL_INT(_hw_bxe, OID_AUTO, mrrs, CTLFLAG_RDTUN,
295           &bxe_mrrs, 0, "PCIe maximum read request size");
296
297/* AutoGrEEEn: 0 (hardware default), 1 (force on), 2 (force off) */
298static int bxe_autogreeen = 0;
299SYSCTL_INT(_hw_bxe, OID_AUTO, autogreeen, CTLFLAG_RDTUN,
300           &bxe_autogreeen, 0, "AutoGrEEEn support");
301
302/* 4-tuple RSS support for UDP: 0 (disabled), 1 (enabled) */
303static int bxe_udp_rss = 0;
304SYSCTL_INT(_hw_bxe, OID_AUTO, udp_rss, CTLFLAG_RDTUN,
305           &bxe_udp_rss, 0, "UDP RSS support");
306
307
308#define STAT_NAME_LEN 32 /* no stat names below can be longer than this */
309
310#define STATS_OFFSET32(stat_name)                   \
311    (offsetof(struct bxe_eth_stats, stat_name) / 4)
312
313#define Q_STATS_OFFSET32(stat_name)                   \
314    (offsetof(struct bxe_eth_q_stats, stat_name) / 4)
315
316static const struct {
317    uint32_t offset;
318    uint32_t size;
319    uint32_t flags;
320#define STATS_FLAGS_PORT  1
321#define STATS_FLAGS_FUNC  2 /* MF only cares about function stats */
322#define STATS_FLAGS_BOTH  (STATS_FLAGS_FUNC | STATS_FLAGS_PORT)
323    char string[STAT_NAME_LEN];
324} bxe_eth_stats_arr[] = {
325    { STATS_OFFSET32(total_bytes_received_hi),
326                8, STATS_FLAGS_BOTH, "rx_bytes" },
327    { STATS_OFFSET32(error_bytes_received_hi),
328                8, STATS_FLAGS_BOTH, "rx_error_bytes" },
329    { STATS_OFFSET32(total_unicast_packets_received_hi),
330                8, STATS_FLAGS_BOTH, "rx_ucast_packets" },
331    { STATS_OFFSET32(total_multicast_packets_received_hi),
332                8, STATS_FLAGS_BOTH, "rx_mcast_packets" },
333    { STATS_OFFSET32(total_broadcast_packets_received_hi),
334                8, STATS_FLAGS_BOTH, "rx_bcast_packets" },
335    { STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi),
336                8, STATS_FLAGS_PORT, "rx_crc_errors" },
337    { STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi),
338                8, STATS_FLAGS_PORT, "rx_align_errors" },
339    { STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi),
340                8, STATS_FLAGS_PORT, "rx_undersize_packets" },
341    { STATS_OFFSET32(etherstatsoverrsizepkts_hi),
342                8, STATS_FLAGS_PORT, "rx_oversize_packets" },
343    { STATS_OFFSET32(rx_stat_etherstatsfragments_hi),
344                8, STATS_FLAGS_PORT, "rx_fragments" },
345    { STATS_OFFSET32(rx_stat_etherstatsjabbers_hi),
346                8, STATS_FLAGS_PORT, "rx_jabbers" },
347    { STATS_OFFSET32(no_buff_discard_hi),
348                8, STATS_FLAGS_BOTH, "rx_discards" },
349    { STATS_OFFSET32(mac_filter_discard),
350                4, STATS_FLAGS_PORT, "rx_filtered_packets" },
351    { STATS_OFFSET32(mf_tag_discard),
352                4, STATS_FLAGS_PORT, "rx_mf_tag_discard" },
353    { STATS_OFFSET32(pfc_frames_received_hi),
354                8, STATS_FLAGS_PORT, "pfc_frames_received" },
355    { STATS_OFFSET32(pfc_frames_sent_hi),
356                8, STATS_FLAGS_PORT, "pfc_frames_sent" },
357    { STATS_OFFSET32(brb_drop_hi),
358                8, STATS_FLAGS_PORT, "rx_brb_discard" },
359    { STATS_OFFSET32(brb_truncate_hi),
360                8, STATS_FLAGS_PORT, "rx_brb_truncate" },
361    { STATS_OFFSET32(pause_frames_received_hi),
362                8, STATS_FLAGS_PORT, "rx_pause_frames" },
363    { STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi),
364                8, STATS_FLAGS_PORT, "rx_mac_ctrl_frames" },
365    { STATS_OFFSET32(nig_timer_max),
366                4, STATS_FLAGS_PORT, "rx_constant_pause_events" },
367    { STATS_OFFSET32(total_bytes_transmitted_hi),
368                8, STATS_FLAGS_BOTH, "tx_bytes" },
369    { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi),
370                8, STATS_FLAGS_PORT, "tx_error_bytes" },
371    { STATS_OFFSET32(total_unicast_packets_transmitted_hi),
372                8, STATS_FLAGS_BOTH, "tx_ucast_packets" },
373    { STATS_OFFSET32(total_multicast_packets_transmitted_hi),
374                8, STATS_FLAGS_BOTH, "tx_mcast_packets" },
375    { STATS_OFFSET32(total_broadcast_packets_transmitted_hi),
376                8, STATS_FLAGS_BOTH, "tx_bcast_packets" },
377    { STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi),
378                8, STATS_FLAGS_PORT, "tx_mac_errors" },
379    { STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi),
380                8, STATS_FLAGS_PORT, "tx_carrier_errors" },
381    { STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi),
382                8, STATS_FLAGS_PORT, "tx_single_collisions" },
383    { STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi),
384                8, STATS_FLAGS_PORT, "tx_multi_collisions" },
385    { STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi),
386                8, STATS_FLAGS_PORT, "tx_deferred" },
387    { STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi),
388                8, STATS_FLAGS_PORT, "tx_excess_collisions" },
389    { STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi),
390                8, STATS_FLAGS_PORT, "tx_late_collisions" },
391    { STATS_OFFSET32(tx_stat_etherstatscollisions_hi),
392                8, STATS_FLAGS_PORT, "tx_total_collisions" },
393    { STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi),
394                8, STATS_FLAGS_PORT, "tx_64_byte_packets" },
395    { STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi),
396                8, STATS_FLAGS_PORT, "tx_65_to_127_byte_packets" },
397    { STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi),
398                8, STATS_FLAGS_PORT, "tx_128_to_255_byte_packets" },
399    { STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi),
400                8, STATS_FLAGS_PORT, "tx_256_to_511_byte_packets" },
401    { STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi),
402                8, STATS_FLAGS_PORT, "tx_512_to_1023_byte_packets" },
403    { STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi),
404                8, STATS_FLAGS_PORT, "tx_1024_to_1522_byte_packets" },
405    { STATS_OFFSET32(etherstatspktsover1522octets_hi),
406                8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" },
407    { STATS_OFFSET32(pause_frames_sent_hi),
408                8, STATS_FLAGS_PORT, "tx_pause_frames" },
409    { STATS_OFFSET32(total_tpa_aggregations_hi),
410                8, STATS_FLAGS_FUNC, "tpa_aggregations" },
411    { STATS_OFFSET32(total_tpa_aggregated_frames_hi),
412                8, STATS_FLAGS_FUNC, "tpa_aggregated_frames"},
413    { STATS_OFFSET32(total_tpa_bytes_hi),
414                8, STATS_FLAGS_FUNC, "tpa_bytes"},
415    { STATS_OFFSET32(eee_tx_lpi),
416                4, STATS_FLAGS_PORT, "eee_tx_lpi"},
417    { STATS_OFFSET32(rx_calls),
418                4, STATS_FLAGS_FUNC, "rx_calls"},
419    { STATS_OFFSET32(rx_pkts),
420                4, STATS_FLAGS_FUNC, "rx_pkts"},
421    { STATS_OFFSET32(rx_tpa_pkts),
422                4, STATS_FLAGS_FUNC, "rx_tpa_pkts"},
423    { STATS_OFFSET32(rx_erroneous_jumbo_sge_pkts),
424                4, STATS_FLAGS_FUNC, "rx_erroneous_jumbo_sge_pkts"},
425    { STATS_OFFSET32(rx_bxe_service_rxsgl),
426                4, STATS_FLAGS_FUNC, "rx_bxe_service_rxsgl"},
427    { STATS_OFFSET32(rx_jumbo_sge_pkts),
428                4, STATS_FLAGS_FUNC, "rx_jumbo_sge_pkts"},
429    { STATS_OFFSET32(rx_soft_errors),
430                4, STATS_FLAGS_FUNC, "rx_soft_errors"},
431    { STATS_OFFSET32(rx_hw_csum_errors),
432                4, STATS_FLAGS_FUNC, "rx_hw_csum_errors"},
433    { STATS_OFFSET32(rx_ofld_frames_csum_ip),
434                4, STATS_FLAGS_FUNC, "rx_ofld_frames_csum_ip"},
435    { STATS_OFFSET32(rx_ofld_frames_csum_tcp_udp),
436                4, STATS_FLAGS_FUNC, "rx_ofld_frames_csum_tcp_udp"},
437    { STATS_OFFSET32(rx_budget_reached),
438                4, STATS_FLAGS_FUNC, "rx_budget_reached"},
439    { STATS_OFFSET32(tx_pkts),
440                4, STATS_FLAGS_FUNC, "tx_pkts"},
441    { STATS_OFFSET32(tx_soft_errors),
442                4, STATS_FLAGS_FUNC, "tx_soft_errors"},
443    { STATS_OFFSET32(tx_ofld_frames_csum_ip),
444                4, STATS_FLAGS_FUNC, "tx_ofld_frames_csum_ip"},
445    { STATS_OFFSET32(tx_ofld_frames_csum_tcp),
446                4, STATS_FLAGS_FUNC, "tx_ofld_frames_csum_tcp"},
447    { STATS_OFFSET32(tx_ofld_frames_csum_udp),
448                4, STATS_FLAGS_FUNC, "tx_ofld_frames_csum_udp"},
449    { STATS_OFFSET32(tx_ofld_frames_lso),
450                4, STATS_FLAGS_FUNC, "tx_ofld_frames_lso"},
451    { STATS_OFFSET32(tx_ofld_frames_lso_hdr_splits),
452                4, STATS_FLAGS_FUNC, "tx_ofld_frames_lso_hdr_splits"},
453    { STATS_OFFSET32(tx_encap_failures),
454                4, STATS_FLAGS_FUNC, "tx_encap_failures"},
455    { STATS_OFFSET32(tx_hw_queue_full),
456                4, STATS_FLAGS_FUNC, "tx_hw_queue_full"},
457    { STATS_OFFSET32(tx_hw_max_queue_depth),
458                4, STATS_FLAGS_FUNC, "tx_hw_max_queue_depth"},
459    { STATS_OFFSET32(tx_dma_mapping_failure),
460                4, STATS_FLAGS_FUNC, "tx_dma_mapping_failure"},
461    { STATS_OFFSET32(tx_max_drbr_queue_depth),
462                4, STATS_FLAGS_FUNC, "tx_max_drbr_queue_depth"},
463    { STATS_OFFSET32(tx_window_violation_std),
464                4, STATS_FLAGS_FUNC, "tx_window_violation_std"},
465    { STATS_OFFSET32(tx_window_violation_tso),
466                4, STATS_FLAGS_FUNC, "tx_window_violation_tso"},
467    { STATS_OFFSET32(tx_chain_lost_mbuf),
468                4, STATS_FLAGS_FUNC, "tx_chain_lost_mbuf"},
469    { STATS_OFFSET32(tx_frames_deferred),
470                4, STATS_FLAGS_FUNC, "tx_frames_deferred"},
471    { STATS_OFFSET32(tx_queue_xoff),
472                4, STATS_FLAGS_FUNC, "tx_queue_xoff"},
473    { STATS_OFFSET32(mbuf_defrag_attempts),
474                4, STATS_FLAGS_FUNC, "mbuf_defrag_attempts"},
475    { STATS_OFFSET32(mbuf_defrag_failures),
476                4, STATS_FLAGS_FUNC, "mbuf_defrag_failures"},
477    { STATS_OFFSET32(mbuf_rx_bd_alloc_failed),
478                4, STATS_FLAGS_FUNC, "mbuf_rx_bd_alloc_failed"},
479    { STATS_OFFSET32(mbuf_rx_bd_mapping_failed),
480                4, STATS_FLAGS_FUNC, "mbuf_rx_bd_mapping_failed"},
481    { STATS_OFFSET32(mbuf_rx_tpa_alloc_failed),
482                4, STATS_FLAGS_FUNC, "mbuf_rx_tpa_alloc_failed"},
483    { STATS_OFFSET32(mbuf_rx_tpa_mapping_failed),
484                4, STATS_FLAGS_FUNC, "mbuf_rx_tpa_mapping_failed"},
485    { STATS_OFFSET32(mbuf_rx_sge_alloc_failed),
486                4, STATS_FLAGS_FUNC, "mbuf_rx_sge_alloc_failed"},
487    { STATS_OFFSET32(mbuf_rx_sge_mapping_failed),
488                4, STATS_FLAGS_FUNC, "mbuf_rx_sge_mapping_failed"},
489    { STATS_OFFSET32(mbuf_alloc_tx),
490                4, STATS_FLAGS_FUNC, "mbuf_alloc_tx"},
491    { STATS_OFFSET32(mbuf_alloc_rx),
492                4, STATS_FLAGS_FUNC, "mbuf_alloc_rx"},
493    { STATS_OFFSET32(mbuf_alloc_sge),
494                4, STATS_FLAGS_FUNC, "mbuf_alloc_sge"},
495    { STATS_OFFSET32(mbuf_alloc_tpa),
496                4, STATS_FLAGS_FUNC, "mbuf_alloc_tpa"},
497    { STATS_OFFSET32(tx_queue_full_return),
498                4, STATS_FLAGS_FUNC, "tx_queue_full_return"},
499    { STATS_OFFSET32(bxe_tx_mq_sc_state_failures),
500                4, STATS_FLAGS_FUNC, "bxe_tx_mq_sc_state_failures"},
501    { STATS_OFFSET32(tx_request_link_down_failures),
502                4, STATS_FLAGS_FUNC, "tx_request_link_down_failures"},
503    { STATS_OFFSET32(bd_avail_too_less_failures),
504                4, STATS_FLAGS_FUNC, "bd_avail_too_less_failures"},
505    { STATS_OFFSET32(tx_mq_not_empty),
506                4, STATS_FLAGS_FUNC, "tx_mq_not_empty"},
507    { STATS_OFFSET32(nsegs_path1_errors),
508                4, STATS_FLAGS_FUNC, "nsegs_path1_errors"},
509    { STATS_OFFSET32(nsegs_path2_errors),
510                4, STATS_FLAGS_FUNC, "nsegs_path2_errors"}
511
512
513};
514
515static const struct {
516    uint32_t offset;
517    uint32_t size;
518    char string[STAT_NAME_LEN];
519} bxe_eth_q_stats_arr[] = {
520    { Q_STATS_OFFSET32(total_bytes_received_hi),
521                8, "rx_bytes" },
522    { Q_STATS_OFFSET32(total_unicast_packets_received_hi),
523                8, "rx_ucast_packets" },
524    { Q_STATS_OFFSET32(total_multicast_packets_received_hi),
525                8, "rx_mcast_packets" },
526    { Q_STATS_OFFSET32(total_broadcast_packets_received_hi),
527                8, "rx_bcast_packets" },
528    { Q_STATS_OFFSET32(no_buff_discard_hi),
529                8, "rx_discards" },
530    { Q_STATS_OFFSET32(total_bytes_transmitted_hi),
531                8, "tx_bytes" },
532    { Q_STATS_OFFSET32(total_unicast_packets_transmitted_hi),
533                8, "tx_ucast_packets" },
534    { Q_STATS_OFFSET32(total_multicast_packets_transmitted_hi),
535                8, "tx_mcast_packets" },
536    { Q_STATS_OFFSET32(total_broadcast_packets_transmitted_hi),
537                8, "tx_bcast_packets" },
538    { Q_STATS_OFFSET32(total_tpa_aggregations_hi),
539                8, "tpa_aggregations" },
540    { Q_STATS_OFFSET32(total_tpa_aggregated_frames_hi),
541                8, "tpa_aggregated_frames"},
542    { Q_STATS_OFFSET32(total_tpa_bytes_hi),
543                8, "tpa_bytes"},
544    { Q_STATS_OFFSET32(rx_calls),
545                4, "rx_calls"},
546    { Q_STATS_OFFSET32(rx_pkts),
547                4, "rx_pkts"},
548    { Q_STATS_OFFSET32(rx_tpa_pkts),
549                4, "rx_tpa_pkts"},
550    { Q_STATS_OFFSET32(rx_erroneous_jumbo_sge_pkts),
551                4, "rx_erroneous_jumbo_sge_pkts"},
552    { Q_STATS_OFFSET32(rx_bxe_service_rxsgl),
553                4, "rx_bxe_service_rxsgl"},
554    { Q_STATS_OFFSET32(rx_jumbo_sge_pkts),
555                4, "rx_jumbo_sge_pkts"},
556    { Q_STATS_OFFSET32(rx_soft_errors),
557                4, "rx_soft_errors"},
558    { Q_STATS_OFFSET32(rx_hw_csum_errors),
559                4, "rx_hw_csum_errors"},
560    { Q_STATS_OFFSET32(rx_ofld_frames_csum_ip),
561                4, "rx_ofld_frames_csum_ip"},
562    { Q_STATS_OFFSET32(rx_ofld_frames_csum_tcp_udp),
563                4, "rx_ofld_frames_csum_tcp_udp"},
564    { Q_STATS_OFFSET32(rx_budget_reached),
565                4, "rx_budget_reached"},
566    { Q_STATS_OFFSET32(tx_pkts),
567                4, "tx_pkts"},
568    { Q_STATS_OFFSET32(tx_soft_errors),
569                4, "tx_soft_errors"},
570    { Q_STATS_OFFSET32(tx_ofld_frames_csum_ip),
571                4, "tx_ofld_frames_csum_ip"},
572    { Q_STATS_OFFSET32(tx_ofld_frames_csum_tcp),
573                4, "tx_ofld_frames_csum_tcp"},
574    { Q_STATS_OFFSET32(tx_ofld_frames_csum_udp),
575                4, "tx_ofld_frames_csum_udp"},
576    { Q_STATS_OFFSET32(tx_ofld_frames_lso),
577                4, "tx_ofld_frames_lso"},
578    { Q_STATS_OFFSET32(tx_ofld_frames_lso_hdr_splits),
579                4, "tx_ofld_frames_lso_hdr_splits"},
580    { Q_STATS_OFFSET32(tx_encap_failures),
581                4, "tx_encap_failures"},
582    { Q_STATS_OFFSET32(tx_hw_queue_full),
583                4, "tx_hw_queue_full"},
584    { Q_STATS_OFFSET32(tx_hw_max_queue_depth),
585                4, "tx_hw_max_queue_depth"},
586    { Q_STATS_OFFSET32(tx_dma_mapping_failure),
587                4, "tx_dma_mapping_failure"},
588    { Q_STATS_OFFSET32(tx_max_drbr_queue_depth),
589                4, "tx_max_drbr_queue_depth"},
590    { Q_STATS_OFFSET32(tx_window_violation_std),
591                4, "tx_window_violation_std"},
592    { Q_STATS_OFFSET32(tx_window_violation_tso),
593                4, "tx_window_violation_tso"},
594    { Q_STATS_OFFSET32(tx_chain_lost_mbuf),
595                4, "tx_chain_lost_mbuf"},
596    { Q_STATS_OFFSET32(tx_frames_deferred),
597                4, "tx_frames_deferred"},
598    { Q_STATS_OFFSET32(tx_queue_xoff),
599                4, "tx_queue_xoff"},
600    { Q_STATS_OFFSET32(mbuf_defrag_attempts),
601                4, "mbuf_defrag_attempts"},
602    { Q_STATS_OFFSET32(mbuf_defrag_failures),
603                4, "mbuf_defrag_failures"},
604    { Q_STATS_OFFSET32(mbuf_rx_bd_alloc_failed),
605                4, "mbuf_rx_bd_alloc_failed"},
606    { Q_STATS_OFFSET32(mbuf_rx_bd_mapping_failed),
607                4, "mbuf_rx_bd_mapping_failed"},
608    { Q_STATS_OFFSET32(mbuf_rx_tpa_alloc_failed),
609                4, "mbuf_rx_tpa_alloc_failed"},
610    { Q_STATS_OFFSET32(mbuf_rx_tpa_mapping_failed),
611                4, "mbuf_rx_tpa_mapping_failed"},
612    { Q_STATS_OFFSET32(mbuf_rx_sge_alloc_failed),
613                4, "mbuf_rx_sge_alloc_failed"},
614    { Q_STATS_OFFSET32(mbuf_rx_sge_mapping_failed),
615                4, "mbuf_rx_sge_mapping_failed"},
616    { Q_STATS_OFFSET32(mbuf_alloc_tx),
617                4, "mbuf_alloc_tx"},
618    { Q_STATS_OFFSET32(mbuf_alloc_rx),
619                4, "mbuf_alloc_rx"},
620    { Q_STATS_OFFSET32(mbuf_alloc_sge),
621                4, "mbuf_alloc_sge"},
622    { Q_STATS_OFFSET32(mbuf_alloc_tpa),
623                4, "mbuf_alloc_tpa"},
624    { Q_STATS_OFFSET32(tx_queue_full_return),
625                4, "tx_queue_full_return"},
626    { Q_STATS_OFFSET32(bxe_tx_mq_sc_state_failures),
627                4, "bxe_tx_mq_sc_state_failures"},
628    { Q_STATS_OFFSET32(tx_request_link_down_failures),
629                4, "tx_request_link_down_failures"},
630    { Q_STATS_OFFSET32(bd_avail_too_less_failures),
631                4, "bd_avail_too_less_failures"},
632    { Q_STATS_OFFSET32(tx_mq_not_empty),
633                4, "tx_mq_not_empty"},
634    { Q_STATS_OFFSET32(nsegs_path1_errors),
635                4, "nsegs_path1_errors"},
636    { Q_STATS_OFFSET32(nsegs_path2_errors),
637                4, "nsegs_path2_errors"}
638
639
640};
641
642#define BXE_NUM_ETH_STATS   ARRAY_SIZE(bxe_eth_stats_arr)
643#define BXE_NUM_ETH_Q_STATS ARRAY_SIZE(bxe_eth_q_stats_arr)
644
645
646static void    bxe_cmng_fns_init(struct bxe_softc *sc,
647                                 uint8_t          read_cfg,
648                                 uint8_t          cmng_type);
649static int     bxe_get_cmng_fns_mode(struct bxe_softc *sc);
650static void    storm_memset_cmng(struct bxe_softc *sc,
651                                 struct cmng_init *cmng,
652                                 uint8_t          port);
653static void    bxe_set_reset_global(struct bxe_softc *sc);
654static void    bxe_set_reset_in_progress(struct bxe_softc *sc);
655static uint8_t bxe_reset_is_done(struct bxe_softc *sc,
656                                 int              engine);
657static uint8_t bxe_clear_pf_load(struct bxe_softc *sc);
658static uint8_t bxe_chk_parity_attn(struct bxe_softc *sc,
659                                   uint8_t          *global,
660                                   uint8_t          print);
661static void    bxe_int_disable(struct bxe_softc *sc);
662static int     bxe_release_leader_lock(struct bxe_softc *sc);
663static void    bxe_pf_disable(struct bxe_softc *sc);
664static void    bxe_free_fp_buffers(struct bxe_softc *sc);
665static inline void bxe_update_rx_prod(struct bxe_softc    *sc,
666                                      struct bxe_fastpath *fp,
667                                      uint16_t            rx_bd_prod,
668                                      uint16_t            rx_cq_prod,
669                                      uint16_t            rx_sge_prod);
670static void    bxe_link_report_locked(struct bxe_softc *sc);
671static void    bxe_link_report(struct bxe_softc *sc);
672static void    bxe_link_status_update(struct bxe_softc *sc);
673static void    bxe_periodic_callout_func(void *xsc);
674static void    bxe_periodic_start(struct bxe_softc *sc);
675static void    bxe_periodic_stop(struct bxe_softc *sc);
676static int     bxe_alloc_rx_bd_mbuf(struct bxe_fastpath *fp,
677                                    uint16_t prev_index,
678                                    uint16_t index);
679static int     bxe_alloc_rx_tpa_mbuf(struct bxe_fastpath *fp,
680                                     int                 queue);
681static int     bxe_alloc_rx_sge_mbuf(struct bxe_fastpath *fp,
682                                     uint16_t            index);
683static uint8_t bxe_txeof(struct bxe_softc *sc,
684                         struct bxe_fastpath *fp);
685static void    bxe_task_fp(struct bxe_fastpath *fp);
686static __noinline void bxe_dump_mbuf(struct bxe_softc *sc,
687                                     struct mbuf      *m,
688                                     uint8_t          contents);
689static int     bxe_alloc_mem(struct bxe_softc *sc);
690static void    bxe_free_mem(struct bxe_softc *sc);
691static int     bxe_alloc_fw_stats_mem(struct bxe_softc *sc);
692static void    bxe_free_fw_stats_mem(struct bxe_softc *sc);
693static int     bxe_interrupt_attach(struct bxe_softc *sc);
694static void    bxe_interrupt_detach(struct bxe_softc *sc);
695static void    bxe_set_rx_mode(struct bxe_softc *sc);
696static int     bxe_init_locked(struct bxe_softc *sc);
697static int     bxe_stop_locked(struct bxe_softc *sc);
698static __noinline int bxe_nic_load(struct bxe_softc *sc,
699                                   int              load_mode);
700static __noinline int bxe_nic_unload(struct bxe_softc *sc,
701                                     uint32_t         unload_mode,
702                                     uint8_t          keep_link);
703
704static void bxe_handle_sp_tq(void *context, int pending);
705static void bxe_handle_fp_tq(void *context, int pending);
706
707static int bxe_add_cdev(struct bxe_softc *sc);
708static void bxe_del_cdev(struct bxe_softc *sc);
709int bxe_grc_dump(struct bxe_softc *sc);
710static int bxe_alloc_buf_rings(struct bxe_softc *sc);
711static void bxe_free_buf_rings(struct bxe_softc *sc);
712
713/* calculate crc32 on a buffer (NOTE: crc32_length MUST be aligned to 8) */
714uint32_t
715calc_crc32(uint8_t  *crc32_packet,
716           uint32_t crc32_length,
717           uint32_t crc32_seed,
718           uint8_t  complement)
719{
720   uint32_t byte         = 0;
721   uint32_t bit          = 0;
722   uint8_t  msb          = 0;
723   uint32_t temp         = 0;
724   uint32_t shft         = 0;
725   uint8_t  current_byte = 0;
726   uint32_t crc32_result = crc32_seed;
727   const uint32_t CRC32_POLY = 0x1edc6f41;
728
729   if ((crc32_packet == NULL) ||
730       (crc32_length == 0) ||
731       ((crc32_length % 8) != 0))
732    {
733        return (crc32_result);
734    }
735
736    for (byte = 0; byte < crc32_length; byte = byte + 1)
737    {
738        current_byte = crc32_packet[byte];
739        for (bit = 0; bit < 8; bit = bit + 1)
740        {
741            /* msb = crc32_result[31]; */
742            msb = (uint8_t)(crc32_result >> 31);
743
744            crc32_result = crc32_result << 1;
745
746            /* it (msb != current_byte[bit]) */
747            if (msb != (0x1 & (current_byte >> bit)))
748            {
749                crc32_result = crc32_result ^ CRC32_POLY;
750                /* crc32_result[0] = 1 */
751                crc32_result |= 1;
752            }
753        }
754    }
755
756    /* Last step is to:
757     * 1. "mirror" every bit
758     * 2. swap the 4 bytes
759     * 3. complement each bit
760     */
761
762    /* Mirror */
763    temp = crc32_result;
764    shft = sizeof(crc32_result) * 8 - 1;
765
766    for (crc32_result >>= 1; crc32_result; crc32_result >>= 1)
767    {
768        temp <<= 1;
769        temp |= crc32_result & 1;
770        shft-- ;
771    }
772
773    /* temp[31-bit] = crc32_result[bit] */
774    temp <<= shft;
775
776    /* Swap */
777    /* crc32_result = {temp[7:0], temp[15:8], temp[23:16], temp[31:24]} */
778    {
779        uint32_t t0, t1, t2, t3;
780        t0 = (0x000000ff & (temp >> 24));
781        t1 = (0x0000ff00 & (temp >> 8));
782        t2 = (0x00ff0000 & (temp << 8));
783        t3 = (0xff000000 & (temp << 24));
784        crc32_result = t0 | t1 | t2 | t3;
785    }
786
787    /* Complement */
788    if (complement)
789    {
790        crc32_result = ~crc32_result;
791    }
792
793    return (crc32_result);
794}
795
796int
797bxe_test_bit(int                    nr,
798             volatile unsigned long *addr)
799{
800    return ((atomic_load_acq_long(addr) & (1 << nr)) != 0);
801}
802
803void
804bxe_set_bit(unsigned int           nr,
805            volatile unsigned long *addr)
806{
807    atomic_set_acq_long(addr, (1 << nr));
808}
809
810void
811bxe_clear_bit(int                    nr,
812              volatile unsigned long *addr)
813{
814    atomic_clear_acq_long(addr, (1 << nr));
815}
816
817int
818bxe_test_and_set_bit(int                    nr,
819                       volatile unsigned long *addr)
820{
821    unsigned long x;
822    nr = (1 << nr);
823    do {
824        x = *addr;
825    } while (atomic_cmpset_acq_long(addr, x, x | nr) == 0);
826    // if (x & nr) bit_was_set; else bit_was_not_set;
827    return (x & nr);
828}
829
830int
831bxe_test_and_clear_bit(int                    nr,
832                       volatile unsigned long *addr)
833{
834    unsigned long x;
835    nr = (1 << nr);
836    do {
837        x = *addr;
838    } while (atomic_cmpset_acq_long(addr, x, x & ~nr) == 0);
839    // if (x & nr) bit_was_set; else bit_was_not_set;
840    return (x & nr);
841}
842
843int
844bxe_cmpxchg(volatile int *addr,
845            int          old,
846            int          new)
847{
848    int x;
849    do {
850        x = *addr;
851    } while (atomic_cmpset_acq_int(addr, old, new) == 0);
852    return (x);
853}
854
855/*
856 * Get DMA memory from the OS.
857 *
858 * Validates that the OS has provided DMA buffers in response to a
859 * bus_dmamap_load call and saves the physical address of those buffers.
860 * When the callback is used the OS will return 0 for the mapping function
861 * (bus_dmamap_load) so we use the value of map_arg->maxsegs to pass any
862 * failures back to the caller.
863 *
864 * Returns:
865 *   Nothing.
866 */
867static void
868bxe_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error)
869{
870    struct bxe_dma *dma = arg;
871
872    if (error) {
873        dma->paddr = 0;
874        dma->nseg  = 0;
875        BLOGE(dma->sc, "Failed DMA alloc '%s' (%d)!\n", dma->msg, error);
876    } else {
877        dma->paddr = segs->ds_addr;
878        dma->nseg  = nseg;
879    }
880}
881
882/*
883 * Allocate a block of memory and map it for DMA. No partial completions
884 * allowed and release any resources acquired if we can't acquire all
885 * resources.
886 *
887 * Returns:
888 *   0 = Success, !0 = Failure
889 */
890int
891bxe_dma_alloc(struct bxe_softc *sc,
892              bus_size_t       size,
893              struct bxe_dma   *dma,
894              const char       *msg)
895{
896    int rc;
897
898    if (dma->size > 0) {
899        BLOGE(sc, "dma block '%s' already has size %lu\n", msg,
900              (unsigned long)dma->size);
901        return (1);
902    }
903
904    memset(dma, 0, sizeof(*dma)); /* sanity */
905    dma->sc   = sc;
906    dma->size = size;
907    snprintf(dma->msg, sizeof(dma->msg), "%s", msg);
908
909    rc = bus_dma_tag_create(sc->parent_dma_tag, /* parent tag */
910                            BCM_PAGE_SIZE,      /* alignment */
911                            0,                  /* boundary limit */
912                            BUS_SPACE_MAXADDR,  /* restricted low */
913                            BUS_SPACE_MAXADDR,  /* restricted hi */
914                            NULL,               /* addr filter() */
915                            NULL,               /* addr filter() arg */
916                            size,               /* max map size */
917                            1,                  /* num discontinuous */
918                            size,               /* max seg size */
919                            BUS_DMA_ALLOCNOW,   /* flags */
920                            NULL,               /* lock() */
921                            NULL,               /* lock() arg */
922                            &dma->tag);         /* returned dma tag */
923    if (rc != 0) {
924        BLOGE(sc, "Failed to create dma tag for '%s' (%d)\n", msg, rc);
925        memset(dma, 0, sizeof(*dma));
926        return (1);
927    }
928
929    rc = bus_dmamem_alloc(dma->tag,
930                          (void **)&dma->vaddr,
931                          (BUS_DMA_NOWAIT | BUS_DMA_ZERO),
932                          &dma->map);
933    if (rc != 0) {
934        BLOGE(sc, "Failed to alloc dma mem for '%s' (%d)\n", msg, rc);
935        bus_dma_tag_destroy(dma->tag);
936        memset(dma, 0, sizeof(*dma));
937        return (1);
938    }
939
940    rc = bus_dmamap_load(dma->tag,
941                         dma->map,
942                         dma->vaddr,
943                         size,
944                         bxe_dma_map_addr, /* BLOGD in here */
945                         dma,
946                         BUS_DMA_NOWAIT);
947    if (rc != 0) {
948        BLOGE(sc, "Failed to load dma map for '%s' (%d)\n", msg, rc);
949        bus_dmamem_free(dma->tag, dma->vaddr, dma->map);
950        bus_dma_tag_destroy(dma->tag);
951        memset(dma, 0, sizeof(*dma));
952        return (1);
953    }
954
955    return (0);
956}
957
958void
959bxe_dma_free(struct bxe_softc *sc,
960             struct bxe_dma   *dma)
961{
962    if (dma->size > 0) {
963        DBASSERT(sc, (dma->tag != NULL), ("dma tag is NULL"));
964
965        bus_dmamap_sync(dma->tag, dma->map,
966                        (BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE));
967        bus_dmamap_unload(dma->tag, dma->map);
968        bus_dmamem_free(dma->tag, dma->vaddr, dma->map);
969        bus_dma_tag_destroy(dma->tag);
970    }
971
972    memset(dma, 0, sizeof(*dma));
973}
974
975/*
976 * These indirect read and write routines are only during init.
977 * The locking is handled by the MCP.
978 */
979
980void
981bxe_reg_wr_ind(struct bxe_softc *sc,
982               uint32_t         addr,
983               uint32_t         val)
984{
985    pci_write_config(sc->dev, PCICFG_GRC_ADDRESS, addr, 4);
986    pci_write_config(sc->dev, PCICFG_GRC_DATA, val, 4);
987    pci_write_config(sc->dev, PCICFG_GRC_ADDRESS, 0, 4);
988}
989
990uint32_t
991bxe_reg_rd_ind(struct bxe_softc *sc,
992               uint32_t         addr)
993{
994    uint32_t val;
995
996    pci_write_config(sc->dev, PCICFG_GRC_ADDRESS, addr, 4);
997    val = pci_read_config(sc->dev, PCICFG_GRC_DATA, 4);
998    pci_write_config(sc->dev, PCICFG_GRC_ADDRESS, 0, 4);
999
1000    return (val);
1001}
1002
1003static int
1004bxe_acquire_hw_lock(struct bxe_softc *sc,
1005                    uint32_t         resource)
1006{
1007    uint32_t lock_status;
1008    uint32_t resource_bit = (1 << resource);
1009    int func = SC_FUNC(sc);
1010    uint32_t hw_lock_control_reg;
1011    int cnt;
1012
1013    /* validate the resource is within range */
1014    if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1015        BLOGE(sc, "(resource 0x%x > HW_LOCK_MAX_RESOURCE_VALUE)"
1016            " resource_bit 0x%x\n", resource, resource_bit);
1017        return (-1);
1018    }
1019
1020    if (func <= 5) {
1021        hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + (func * 8));
1022    } else {
1023        hw_lock_control_reg =
1024                (MISC_REG_DRIVER_CONTROL_7 + ((func - 6) * 8));
1025    }
1026
1027    /* validate the resource is not already taken */
1028    lock_status = REG_RD(sc, hw_lock_control_reg);
1029    if (lock_status & resource_bit) {
1030        BLOGE(sc, "resource (0x%x) in use (status 0x%x bit 0x%x)\n",
1031              resource, lock_status, resource_bit);
1032        return (-1);
1033    }
1034
1035    /* try every 5ms for 5 seconds */
1036    for (cnt = 0; cnt < 1000; cnt++) {
1037        REG_WR(sc, (hw_lock_control_reg + 4), resource_bit);
1038        lock_status = REG_RD(sc, hw_lock_control_reg);
1039        if (lock_status & resource_bit) {
1040            return (0);
1041        }
1042        DELAY(5000);
1043    }
1044
1045    BLOGE(sc, "Resource 0x%x resource_bit 0x%x lock timeout!\n",
1046        resource, resource_bit);
1047    return (-1);
1048}
1049
1050static int
1051bxe_release_hw_lock(struct bxe_softc *sc,
1052                    uint32_t         resource)
1053{
1054    uint32_t lock_status;
1055    uint32_t resource_bit = (1 << resource);
1056    int func = SC_FUNC(sc);
1057    uint32_t hw_lock_control_reg;
1058
1059    /* validate the resource is within range */
1060    if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1061        BLOGE(sc, "(resource 0x%x > HW_LOCK_MAX_RESOURCE_VALUE)"
1062            " resource_bit 0x%x\n", resource, resource_bit);
1063        return (-1);
1064    }
1065
1066    if (func <= 5) {
1067        hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + (func * 8));
1068    } else {
1069        hw_lock_control_reg =
1070                (MISC_REG_DRIVER_CONTROL_7 + ((func - 6) * 8));
1071    }
1072
1073    /* validate the resource is currently taken */
1074    lock_status = REG_RD(sc, hw_lock_control_reg);
1075    if (!(lock_status & resource_bit)) {
1076        BLOGE(sc, "resource (0x%x) not in use (status 0x%x bit 0x%x)\n",
1077              resource, lock_status, resource_bit);
1078        return (-1);
1079    }
1080
1081    REG_WR(sc, hw_lock_control_reg, resource_bit);
1082    return (0);
1083}
1084static void bxe_acquire_phy_lock(struct bxe_softc *sc)
1085{
1086	BXE_PHY_LOCK(sc);
1087	bxe_acquire_hw_lock(sc,HW_LOCK_RESOURCE_MDIO);
1088}
1089
1090static void bxe_release_phy_lock(struct bxe_softc *sc)
1091{
1092	bxe_release_hw_lock(sc,HW_LOCK_RESOURCE_MDIO);
1093	BXE_PHY_UNLOCK(sc);
1094}
1095/*
1096 * Per pf misc lock must be acquired before the per port mcp lock. Otherwise,
1097 * had we done things the other way around, if two pfs from the same port
1098 * would attempt to access nvram at the same time, we could run into a
1099 * scenario such as:
1100 * pf A takes the port lock.
1101 * pf B succeeds in taking the same lock since they are from the same port.
1102 * pf A takes the per pf misc lock. Performs eeprom access.
1103 * pf A finishes. Unlocks the per pf misc lock.
1104 * Pf B takes the lock and proceeds to perform it's own access.
1105 * pf A unlocks the per port lock, while pf B is still working (!).
1106 * mcp takes the per port lock and corrupts pf B's access (and/or has it's own
1107 * access corrupted by pf B).*
1108 */
1109static int
1110bxe_acquire_nvram_lock(struct bxe_softc *sc)
1111{
1112    int port = SC_PORT(sc);
1113    int count, i;
1114    uint32_t val = 0;
1115
1116    /* acquire HW lock: protect against other PFs in PF Direct Assignment */
1117    bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_NVRAM);
1118
1119    /* adjust timeout for emulation/FPGA */
1120    count = NVRAM_TIMEOUT_COUNT;
1121    if (CHIP_REV_IS_SLOW(sc)) {
1122        count *= 100;
1123    }
1124
1125    /* request access to nvram interface */
1126    REG_WR(sc, MCP_REG_MCPR_NVM_SW_ARB,
1127           (MCPR_NVM_SW_ARB_ARB_REQ_SET1 << port));
1128
1129    for (i = 0; i < count*10; i++) {
1130        val = REG_RD(sc, MCP_REG_MCPR_NVM_SW_ARB);
1131        if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) {
1132            break;
1133        }
1134
1135        DELAY(5);
1136    }
1137
1138    if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) {
1139        BLOGE(sc, "Cannot get access to nvram interface "
1140            "port %d val 0x%x (MCPR_NVM_SW_ARB_ARB_ARB1 << port)\n",
1141            port, val);
1142        return (-1);
1143    }
1144
1145    return (0);
1146}
1147
1148static int
1149bxe_release_nvram_lock(struct bxe_softc *sc)
1150{
1151    int port = SC_PORT(sc);
1152    int count, i;
1153    uint32_t val = 0;
1154
1155    /* adjust timeout for emulation/FPGA */
1156    count = NVRAM_TIMEOUT_COUNT;
1157    if (CHIP_REV_IS_SLOW(sc)) {
1158        count *= 100;
1159    }
1160
1161    /* relinquish nvram interface */
1162    REG_WR(sc, MCP_REG_MCPR_NVM_SW_ARB,
1163           (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << port));
1164
1165    for (i = 0; i < count*10; i++) {
1166        val = REG_RD(sc, MCP_REG_MCPR_NVM_SW_ARB);
1167        if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) {
1168            break;
1169        }
1170
1171        DELAY(5);
1172    }
1173
1174    if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) {
1175        BLOGE(sc, "Cannot free access to nvram interface "
1176            "port %d val 0x%x (MCPR_NVM_SW_ARB_ARB_ARB1 << port)\n",
1177            port, val);
1178        return (-1);
1179    }
1180
1181    /* release HW lock: protect against other PFs in PF Direct Assignment */
1182    bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_NVRAM);
1183
1184    return (0);
1185}
1186
1187static void
1188bxe_enable_nvram_access(struct bxe_softc *sc)
1189{
1190    uint32_t val;
1191
1192    val = REG_RD(sc, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
1193
1194    /* enable both bits, even on read */
1195    REG_WR(sc, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
1196           (val | MCPR_NVM_ACCESS_ENABLE_EN | MCPR_NVM_ACCESS_ENABLE_WR_EN));
1197}
1198
1199static void
1200bxe_disable_nvram_access(struct bxe_softc *sc)
1201{
1202    uint32_t val;
1203
1204    val = REG_RD(sc, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
1205
1206    /* disable both bits, even after read */
1207    REG_WR(sc, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
1208           (val & ~(MCPR_NVM_ACCESS_ENABLE_EN |
1209                    MCPR_NVM_ACCESS_ENABLE_WR_EN)));
1210}
1211
1212static int
1213bxe_nvram_read_dword(struct bxe_softc *sc,
1214                     uint32_t         offset,
1215                     uint32_t         *ret_val,
1216                     uint32_t         cmd_flags)
1217{
1218    int count, i, rc;
1219    uint32_t val;
1220
1221    /* build the command word */
1222    cmd_flags |= MCPR_NVM_COMMAND_DOIT;
1223
1224    /* need to clear DONE bit separately */
1225    REG_WR(sc, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
1226
1227    /* address of the NVRAM to read from */
1228    REG_WR(sc, MCP_REG_MCPR_NVM_ADDR,
1229           (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
1230
1231    /* issue a read command */
1232    REG_WR(sc, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
1233
1234    /* adjust timeout for emulation/FPGA */
1235    count = NVRAM_TIMEOUT_COUNT;
1236    if (CHIP_REV_IS_SLOW(sc)) {
1237        count *= 100;
1238    }
1239
1240    /* wait for completion */
1241    *ret_val = 0;
1242    rc = -1;
1243    for (i = 0; i < count; i++) {
1244        DELAY(5);
1245        val = REG_RD(sc, MCP_REG_MCPR_NVM_COMMAND);
1246
1247        if (val & MCPR_NVM_COMMAND_DONE) {
1248            val = REG_RD(sc, MCP_REG_MCPR_NVM_READ);
1249            /* we read nvram data in cpu order
1250             * but ethtool sees it as an array of bytes
1251             * converting to big-endian will do the work
1252             */
1253            *ret_val = htobe32(val);
1254            rc = 0;
1255            break;
1256        }
1257    }
1258
1259    if (rc == -1) {
1260        BLOGE(sc, "nvram read timeout expired "
1261            "(offset 0x%x cmd_flags 0x%x val 0x%x)\n",
1262            offset, cmd_flags, val);
1263    }
1264
1265    return (rc);
1266}
1267
1268static int
1269bxe_nvram_read(struct bxe_softc *sc,
1270               uint32_t         offset,
1271               uint8_t          *ret_buf,
1272               int              buf_size)
1273{
1274    uint32_t cmd_flags;
1275    uint32_t val;
1276    int rc;
1277
1278    if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
1279        BLOGE(sc, "Invalid parameter, offset 0x%x buf_size 0x%x\n",
1280              offset, buf_size);
1281        return (-1);
1282    }
1283
1284    if ((offset + buf_size) > sc->devinfo.flash_size) {
1285        BLOGE(sc, "Invalid parameter, "
1286                  "offset 0x%x + buf_size 0x%x > flash_size 0x%x\n",
1287              offset, buf_size, sc->devinfo.flash_size);
1288        return (-1);
1289    }
1290
1291    /* request access to nvram interface */
1292    rc = bxe_acquire_nvram_lock(sc);
1293    if (rc) {
1294        return (rc);
1295    }
1296
1297    /* enable access to nvram interface */
1298    bxe_enable_nvram_access(sc);
1299
1300    /* read the first word(s) */
1301    cmd_flags = MCPR_NVM_COMMAND_FIRST;
1302    while ((buf_size > sizeof(uint32_t)) && (rc == 0)) {
1303        rc = bxe_nvram_read_dword(sc, offset, &val, cmd_flags);
1304        memcpy(ret_buf, &val, 4);
1305
1306        /* advance to the next dword */
1307        offset += sizeof(uint32_t);
1308        ret_buf += sizeof(uint32_t);
1309        buf_size -= sizeof(uint32_t);
1310        cmd_flags = 0;
1311    }
1312
1313    if (rc == 0) {
1314        cmd_flags |= MCPR_NVM_COMMAND_LAST;
1315        rc = bxe_nvram_read_dword(sc, offset, &val, cmd_flags);
1316        memcpy(ret_buf, &val, 4);
1317    }
1318
1319    /* disable access to nvram interface */
1320    bxe_disable_nvram_access(sc);
1321    bxe_release_nvram_lock(sc);
1322
1323    return (rc);
1324}
1325
1326static int
1327bxe_nvram_write_dword(struct bxe_softc *sc,
1328                      uint32_t         offset,
1329                      uint32_t         val,
1330                      uint32_t         cmd_flags)
1331{
1332    int count, i, rc;
1333
1334    /* build the command word */
1335    cmd_flags |= (MCPR_NVM_COMMAND_DOIT | MCPR_NVM_COMMAND_WR);
1336
1337    /* need to clear DONE bit separately */
1338    REG_WR(sc, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
1339
1340    /* write the data */
1341    REG_WR(sc, MCP_REG_MCPR_NVM_WRITE, val);
1342
1343    /* address of the NVRAM to write to */
1344    REG_WR(sc, MCP_REG_MCPR_NVM_ADDR,
1345           (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
1346
1347    /* issue the write command */
1348    REG_WR(sc, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
1349
1350    /* adjust timeout for emulation/FPGA */
1351    count = NVRAM_TIMEOUT_COUNT;
1352    if (CHIP_REV_IS_SLOW(sc)) {
1353        count *= 100;
1354    }
1355
1356    /* wait for completion */
1357    rc = -1;
1358    for (i = 0; i < count; i++) {
1359        DELAY(5);
1360        val = REG_RD(sc, MCP_REG_MCPR_NVM_COMMAND);
1361        if (val & MCPR_NVM_COMMAND_DONE) {
1362            rc = 0;
1363            break;
1364        }
1365    }
1366
1367    if (rc == -1) {
1368        BLOGE(sc, "nvram write timeout expired "
1369            "(offset 0x%x cmd_flags 0x%x val 0x%x)\n",
1370            offset, cmd_flags, val);
1371    }
1372
1373    return (rc);
1374}
1375
1376#define BYTE_OFFSET(offset) (8 * (offset & 0x03))
1377
1378static int
1379bxe_nvram_write1(struct bxe_softc *sc,
1380                 uint32_t         offset,
1381                 uint8_t          *data_buf,
1382                 int              buf_size)
1383{
1384    uint32_t cmd_flags;
1385    uint32_t align_offset;
1386    uint32_t val;
1387    int rc;
1388
1389    if ((offset + buf_size) > sc->devinfo.flash_size) {
1390        BLOGE(sc, "Invalid parameter, "
1391                  "offset 0x%x + buf_size 0x%x > flash_size 0x%x\n",
1392              offset, buf_size, sc->devinfo.flash_size);
1393        return (-1);
1394    }
1395
1396    /* request access to nvram interface */
1397    rc = bxe_acquire_nvram_lock(sc);
1398    if (rc) {
1399        return (rc);
1400    }
1401
1402    /* enable access to nvram interface */
1403    bxe_enable_nvram_access(sc);
1404
1405    cmd_flags = (MCPR_NVM_COMMAND_FIRST | MCPR_NVM_COMMAND_LAST);
1406    align_offset = (offset & ~0x03);
1407    rc = bxe_nvram_read_dword(sc, align_offset, &val, cmd_flags);
1408
1409    if (rc == 0) {
1410        val &= ~(0xff << BYTE_OFFSET(offset));
1411        val |= (*data_buf << BYTE_OFFSET(offset));
1412
1413        /* nvram data is returned as an array of bytes
1414         * convert it back to cpu order
1415         */
1416        val = be32toh(val);
1417
1418        rc = bxe_nvram_write_dword(sc, align_offset, val, cmd_flags);
1419    }
1420
1421    /* disable access to nvram interface */
1422    bxe_disable_nvram_access(sc);
1423    bxe_release_nvram_lock(sc);
1424
1425    return (rc);
1426}
1427
1428static int
1429bxe_nvram_write(struct bxe_softc *sc,
1430                uint32_t         offset,
1431                uint8_t          *data_buf,
1432                int              buf_size)
1433{
1434    uint32_t cmd_flags;
1435    uint32_t val;
1436    uint32_t written_so_far;
1437    int rc;
1438
1439    if (buf_size == 1) {
1440        return (bxe_nvram_write1(sc, offset, data_buf, buf_size));
1441    }
1442
1443    if ((offset & 0x03) || (buf_size & 0x03) /* || (buf_size == 0) */) {
1444        BLOGE(sc, "Invalid parameter, offset 0x%x buf_size 0x%x\n",
1445              offset, buf_size);
1446        return (-1);
1447    }
1448
1449    if (buf_size == 0) {
1450        return (0); /* nothing to do */
1451    }
1452
1453    if ((offset + buf_size) > sc->devinfo.flash_size) {
1454        BLOGE(sc, "Invalid parameter, "
1455                  "offset 0x%x + buf_size 0x%x > flash_size 0x%x\n",
1456              offset, buf_size, sc->devinfo.flash_size);
1457        return (-1);
1458    }
1459
1460    /* request access to nvram interface */
1461    rc = bxe_acquire_nvram_lock(sc);
1462    if (rc) {
1463        return (rc);
1464    }
1465
1466    /* enable access to nvram interface */
1467    bxe_enable_nvram_access(sc);
1468
1469    written_so_far = 0;
1470    cmd_flags = MCPR_NVM_COMMAND_FIRST;
1471    while ((written_so_far < buf_size) && (rc == 0)) {
1472        if (written_so_far == (buf_size - sizeof(uint32_t))) {
1473            cmd_flags |= MCPR_NVM_COMMAND_LAST;
1474        } else if (((offset + 4) % NVRAM_PAGE_SIZE) == 0) {
1475            cmd_flags |= MCPR_NVM_COMMAND_LAST;
1476        } else if ((offset % NVRAM_PAGE_SIZE) == 0) {
1477            cmd_flags |= MCPR_NVM_COMMAND_FIRST;
1478        }
1479
1480        memcpy(&val, data_buf, 4);
1481
1482        rc = bxe_nvram_write_dword(sc, offset, val, cmd_flags);
1483
1484        /* advance to the next dword */
1485        offset += sizeof(uint32_t);
1486        data_buf += sizeof(uint32_t);
1487        written_so_far += sizeof(uint32_t);
1488        cmd_flags = 0;
1489    }
1490
1491    /* disable access to nvram interface */
1492    bxe_disable_nvram_access(sc);
1493    bxe_release_nvram_lock(sc);
1494
1495    return (rc);
1496}
1497
1498/* copy command into DMAE command memory and set DMAE command Go */
1499void
1500bxe_post_dmae(struct bxe_softc    *sc,
1501              struct dmae_cmd *dmae,
1502              int                 idx)
1503{
1504    uint32_t cmd_offset;
1505    int i;
1506
1507    cmd_offset = (DMAE_REG_CMD_MEM + (sizeof(struct dmae_cmd) * idx));
1508    for (i = 0; i < ((sizeof(struct dmae_cmd) / 4)); i++) {
1509        REG_WR(sc, (cmd_offset + (i * 4)), *(((uint32_t *)dmae) + i));
1510    }
1511
1512    REG_WR(sc, dmae_reg_go_c[idx], 1);
1513}
1514
1515uint32_t
1516bxe_dmae_opcode_add_comp(uint32_t opcode,
1517                         uint8_t  comp_type)
1518{
1519    return (opcode | ((comp_type << DMAE_CMD_C_DST_SHIFT) |
1520                      DMAE_CMD_C_TYPE_ENABLE));
1521}
1522
1523uint32_t
1524bxe_dmae_opcode_clr_src_reset(uint32_t opcode)
1525{
1526    return (opcode & ~DMAE_CMD_SRC_RESET);
1527}
1528
1529uint32_t
1530bxe_dmae_opcode(struct bxe_softc *sc,
1531                uint8_t          src_type,
1532                uint8_t          dst_type,
1533                uint8_t          with_comp,
1534                uint8_t          comp_type)
1535{
1536    uint32_t opcode = 0;
1537
1538    opcode |= ((src_type << DMAE_CMD_SRC_SHIFT) |
1539               (dst_type << DMAE_CMD_DST_SHIFT));
1540
1541    opcode |= (DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET);
1542
1543    opcode |= (SC_PORT(sc) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0);
1544
1545    opcode |= ((SC_VN(sc) << DMAE_CMD_E1HVN_SHIFT) |
1546               (SC_VN(sc) << DMAE_CMD_DST_VN_SHIFT));
1547
1548    opcode |= (DMAE_COM_SET_ERR << DMAE_CMD_ERR_POLICY_SHIFT);
1549
1550#ifdef __BIG_ENDIAN
1551    opcode |= DMAE_CMD_ENDIANITY_B_DW_SWAP;
1552#else
1553    opcode |= DMAE_CMD_ENDIANITY_DW_SWAP;
1554#endif
1555
1556    if (with_comp) {
1557        opcode = bxe_dmae_opcode_add_comp(opcode, comp_type);
1558    }
1559
1560    return (opcode);
1561}
1562
1563static void
1564bxe_prep_dmae_with_comp(struct bxe_softc    *sc,
1565                        struct dmae_cmd *dmae,
1566                        uint8_t             src_type,
1567                        uint8_t             dst_type)
1568{
1569    memset(dmae, 0, sizeof(struct dmae_cmd));
1570
1571    /* set the opcode */
1572    dmae->opcode = bxe_dmae_opcode(sc, src_type, dst_type,
1573                                   TRUE, DMAE_COMP_PCI);
1574
1575    /* fill in the completion parameters */
1576    dmae->comp_addr_lo = U64_LO(BXE_SP_MAPPING(sc, wb_comp));
1577    dmae->comp_addr_hi = U64_HI(BXE_SP_MAPPING(sc, wb_comp));
1578    dmae->comp_val     = DMAE_COMP_VAL;
1579}
1580
1581/* issue a DMAE command over the init channel and wait for completion */
1582static int
1583bxe_issue_dmae_with_comp(struct bxe_softc    *sc,
1584                         struct dmae_cmd *dmae)
1585{
1586    uint32_t *wb_comp = BXE_SP(sc, wb_comp);
1587    int timeout = CHIP_REV_IS_SLOW(sc) ? 400000 : 4000;
1588
1589    BXE_DMAE_LOCK(sc);
1590
1591    /* reset completion */
1592    *wb_comp = 0;
1593
1594    /* post the command on the channel used for initializations */
1595    bxe_post_dmae(sc, dmae, INIT_DMAE_C(sc));
1596
1597    /* wait for completion */
1598    DELAY(5);
1599
1600    while ((*wb_comp & ~DMAE_PCI_ERR_FLAG) != DMAE_COMP_VAL) {
1601        if (!timeout ||
1602            (sc->recovery_state != BXE_RECOVERY_DONE &&
1603             sc->recovery_state != BXE_RECOVERY_NIC_LOADING)) {
1604            BLOGE(sc, "DMAE timeout! *wb_comp 0x%x recovery_state 0x%x\n",
1605                *wb_comp, sc->recovery_state);
1606            BXE_DMAE_UNLOCK(sc);
1607            return (DMAE_TIMEOUT);
1608        }
1609
1610        timeout--;
1611        DELAY(50);
1612    }
1613
1614    if (*wb_comp & DMAE_PCI_ERR_FLAG) {
1615        BLOGE(sc, "DMAE PCI error! *wb_comp 0x%x recovery_state 0x%x\n",
1616                *wb_comp, sc->recovery_state);
1617        BXE_DMAE_UNLOCK(sc);
1618        return (DMAE_PCI_ERROR);
1619    }
1620
1621    BXE_DMAE_UNLOCK(sc);
1622    return (0);
1623}
1624
1625void
1626bxe_read_dmae(struct bxe_softc *sc,
1627              uint32_t         src_addr,
1628              uint32_t         len32)
1629{
1630    struct dmae_cmd dmae;
1631    uint32_t *data;
1632    int i, rc;
1633
1634    DBASSERT(sc, (len32 <= 4), ("DMAE read length is %d", len32));
1635
1636    if (!sc->dmae_ready) {
1637        data = BXE_SP(sc, wb_data[0]);
1638
1639        for (i = 0; i < len32; i++) {
1640            data[i] = (CHIP_IS_E1(sc)) ?
1641                          bxe_reg_rd_ind(sc, (src_addr + (i * 4))) :
1642                          REG_RD(sc, (src_addr + (i * 4)));
1643        }
1644
1645        return;
1646    }
1647
1648    /* set opcode and fixed command fields */
1649    bxe_prep_dmae_with_comp(sc, &dmae, DMAE_SRC_GRC, DMAE_DST_PCI);
1650
1651    /* fill in addresses and len */
1652    dmae.src_addr_lo = (src_addr >> 2); /* GRC addr has dword resolution */
1653    dmae.src_addr_hi = 0;
1654    dmae.dst_addr_lo = U64_LO(BXE_SP_MAPPING(sc, wb_data));
1655    dmae.dst_addr_hi = U64_HI(BXE_SP_MAPPING(sc, wb_data));
1656    dmae.len         = len32;
1657
1658    /* issue the command and wait for completion */
1659    if ((rc = bxe_issue_dmae_with_comp(sc, &dmae)) != 0) {
1660        bxe_panic(sc, ("DMAE failed (%d)\n", rc));
1661    }
1662}
1663
1664void
1665bxe_write_dmae(struct bxe_softc *sc,
1666               bus_addr_t       dma_addr,
1667               uint32_t         dst_addr,
1668               uint32_t         len32)
1669{
1670    struct dmae_cmd dmae;
1671    int rc;
1672
1673    if (!sc->dmae_ready) {
1674        DBASSERT(sc, (len32 <= 4), ("DMAE not ready and length is %d", len32));
1675
1676        if (CHIP_IS_E1(sc)) {
1677            ecore_init_ind_wr(sc, dst_addr, BXE_SP(sc, wb_data[0]), len32);
1678        } else {
1679            ecore_init_str_wr(sc, dst_addr, BXE_SP(sc, wb_data[0]), len32);
1680        }
1681
1682        return;
1683    }
1684
1685    /* set opcode and fixed command fields */
1686    bxe_prep_dmae_with_comp(sc, &dmae, DMAE_SRC_PCI, DMAE_DST_GRC);
1687
1688    /* fill in addresses and len */
1689    dmae.src_addr_lo = U64_LO(dma_addr);
1690    dmae.src_addr_hi = U64_HI(dma_addr);
1691    dmae.dst_addr_lo = (dst_addr >> 2); /* GRC addr has dword resolution */
1692    dmae.dst_addr_hi = 0;
1693    dmae.len         = len32;
1694
1695    /* issue the command and wait for completion */
1696    if ((rc = bxe_issue_dmae_with_comp(sc, &dmae)) != 0) {
1697        bxe_panic(sc, ("DMAE failed (%d)\n", rc));
1698    }
1699}
1700
1701void
1702bxe_write_dmae_phys_len(struct bxe_softc *sc,
1703                        bus_addr_t       phys_addr,
1704                        uint32_t         addr,
1705                        uint32_t         len)
1706{
1707    int dmae_wr_max = DMAE_LEN32_WR_MAX(sc);
1708    int offset = 0;
1709
1710    while (len > dmae_wr_max) {
1711        bxe_write_dmae(sc,
1712                       (phys_addr + offset), /* src DMA address */
1713                       (addr + offset),      /* dst GRC address */
1714                       dmae_wr_max);
1715        offset += (dmae_wr_max * 4);
1716        len -= dmae_wr_max;
1717    }
1718
1719    bxe_write_dmae(sc,
1720                   (phys_addr + offset), /* src DMA address */
1721                   (addr + offset),      /* dst GRC address */
1722                   len);
1723}
1724
1725void
1726bxe_set_ctx_validation(struct bxe_softc   *sc,
1727                       struct eth_context *cxt,
1728                       uint32_t           cid)
1729{
1730    /* ustorm cxt validation */
1731    cxt->ustorm_ag_context.cdu_usage =
1732        CDU_RSRVD_VALUE_TYPE_A(HW_CID(sc, cid),
1733            CDU_REGION_NUMBER_UCM_AG, ETH_CONNECTION_TYPE);
1734    /* xcontext validation */
1735    cxt->xstorm_ag_context.cdu_reserved =
1736        CDU_RSRVD_VALUE_TYPE_A(HW_CID(sc, cid),
1737            CDU_REGION_NUMBER_XCM_AG, ETH_CONNECTION_TYPE);
1738}
1739
1740static void
1741bxe_storm_memset_hc_timeout(struct bxe_softc *sc,
1742                            uint8_t          port,
1743                            uint8_t          fw_sb_id,
1744                            uint8_t          sb_index,
1745                            uint8_t          ticks)
1746{
1747    uint32_t addr =
1748        (BAR_CSTRORM_INTMEM +
1749         CSTORM_STATUS_BLOCK_DATA_TIMEOUT_OFFSET(fw_sb_id, sb_index));
1750
1751    REG_WR8(sc, addr, ticks);
1752
1753    BLOGD(sc, DBG_LOAD,
1754          "port %d fw_sb_id %d sb_index %d ticks %d\n",
1755          port, fw_sb_id, sb_index, ticks);
1756}
1757
1758static void
1759bxe_storm_memset_hc_disable(struct bxe_softc *sc,
1760                            uint8_t          port,
1761                            uint16_t         fw_sb_id,
1762                            uint8_t          sb_index,
1763                            uint8_t          disable)
1764{
1765    uint32_t enable_flag =
1766        (disable) ? 0 : (1 << HC_INDEX_DATA_HC_ENABLED_SHIFT);
1767    uint32_t addr =
1768        (BAR_CSTRORM_INTMEM +
1769         CSTORM_STATUS_BLOCK_DATA_FLAGS_OFFSET(fw_sb_id, sb_index));
1770    uint8_t flags;
1771
1772    /* clear and set */
1773    flags = REG_RD8(sc, addr);
1774    flags &= ~HC_INDEX_DATA_HC_ENABLED;
1775    flags |= enable_flag;
1776    REG_WR8(sc, addr, flags);
1777
1778    BLOGD(sc, DBG_LOAD,
1779          "port %d fw_sb_id %d sb_index %d disable %d\n",
1780          port, fw_sb_id, sb_index, disable);
1781}
1782
1783void
1784bxe_update_coalesce_sb_index(struct bxe_softc *sc,
1785                             uint8_t          fw_sb_id,
1786                             uint8_t          sb_index,
1787                             uint8_t          disable,
1788                             uint16_t         usec)
1789{
1790    int port = SC_PORT(sc);
1791    uint8_t ticks = (usec / 4); /* XXX ??? */
1792
1793    bxe_storm_memset_hc_timeout(sc, port, fw_sb_id, sb_index, ticks);
1794
1795    disable = (disable) ? 1 : ((usec) ? 0 : 1);
1796    bxe_storm_memset_hc_disable(sc, port, fw_sb_id, sb_index, disable);
1797}
1798
1799void
1800elink_cb_udelay(struct bxe_softc *sc,
1801                uint32_t         usecs)
1802{
1803    DELAY(usecs);
1804}
1805
1806uint32_t
1807elink_cb_reg_read(struct bxe_softc *sc,
1808                  uint32_t         reg_addr)
1809{
1810    return (REG_RD(sc, reg_addr));
1811}
1812
1813void
1814elink_cb_reg_write(struct bxe_softc *sc,
1815                   uint32_t         reg_addr,
1816                   uint32_t         val)
1817{
1818    REG_WR(sc, reg_addr, val);
1819}
1820
1821void
1822elink_cb_reg_wb_write(struct bxe_softc *sc,
1823                      uint32_t         offset,
1824                      uint32_t         *wb_write,
1825                      uint16_t         len)
1826{
1827    REG_WR_DMAE(sc, offset, wb_write, len);
1828}
1829
1830void
1831elink_cb_reg_wb_read(struct bxe_softc *sc,
1832                     uint32_t         offset,
1833                     uint32_t         *wb_write,
1834                     uint16_t         len)
1835{
1836    REG_RD_DMAE(sc, offset, wb_write, len);
1837}
1838
1839uint8_t
1840elink_cb_path_id(struct bxe_softc *sc)
1841{
1842    return (SC_PATH(sc));
1843}
1844
1845void
1846elink_cb_event_log(struct bxe_softc     *sc,
1847                   const elink_log_id_t elink_log_id,
1848                   ...)
1849{
1850    /* XXX */
1851    BLOGI(sc, "ELINK EVENT LOG (%d)\n", elink_log_id);
1852}
1853
1854static int
1855bxe_set_spio(struct bxe_softc *sc,
1856             int              spio,
1857             uint32_t         mode)
1858{
1859    uint32_t spio_reg;
1860
1861    /* Only 2 SPIOs are configurable */
1862    if ((spio != MISC_SPIO_SPIO4) && (spio != MISC_SPIO_SPIO5)) {
1863        BLOGE(sc, "Invalid SPIO 0x%x mode 0x%x\n", spio, mode);
1864        return (-1);
1865    }
1866
1867    bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_SPIO);
1868
1869    /* read SPIO and mask except the float bits */
1870    spio_reg = (REG_RD(sc, MISC_REG_SPIO) & MISC_SPIO_FLOAT);
1871
1872    switch (mode) {
1873    case MISC_SPIO_OUTPUT_LOW:
1874        BLOGD(sc, DBG_LOAD, "Set SPIO 0x%x -> output low\n", spio);
1875        /* clear FLOAT and set CLR */
1876        spio_reg &= ~(spio << MISC_SPIO_FLOAT_POS);
1877        spio_reg |=  (spio << MISC_SPIO_CLR_POS);
1878        break;
1879
1880    case MISC_SPIO_OUTPUT_HIGH:
1881        BLOGD(sc, DBG_LOAD, "Set SPIO 0x%x -> output high\n", spio);
1882        /* clear FLOAT and set SET */
1883        spio_reg &= ~(spio << MISC_SPIO_FLOAT_POS);
1884        spio_reg |=  (spio << MISC_SPIO_SET_POS);
1885        break;
1886
1887    case MISC_SPIO_INPUT_HI_Z:
1888        BLOGD(sc, DBG_LOAD, "Set SPIO 0x%x -> input\n", spio);
1889        /* set FLOAT */
1890        spio_reg |= (spio << MISC_SPIO_FLOAT_POS);
1891        break;
1892
1893    default:
1894        break;
1895    }
1896
1897    REG_WR(sc, MISC_REG_SPIO, spio_reg);
1898    bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_SPIO);
1899
1900    return (0);
1901}
1902
1903static int
1904bxe_gpio_read(struct bxe_softc *sc,
1905              int              gpio_num,
1906              uint8_t          port)
1907{
1908    /* The GPIO should be swapped if swap register is set and active */
1909    int gpio_port = ((REG_RD(sc, NIG_REG_PORT_SWAP) &&
1910                      REG_RD(sc, NIG_REG_STRAP_OVERRIDE)) ^ port);
1911    int gpio_shift = (gpio_num +
1912                      (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0));
1913    uint32_t gpio_mask = (1 << gpio_shift);
1914    uint32_t gpio_reg;
1915
1916    if (gpio_num > MISC_REGISTERS_GPIO_3) {
1917        BLOGE(sc, "Invalid GPIO %d port 0x%x gpio_port %d gpio_shift %d"
1918            " gpio_mask 0x%x\n", gpio_num, port, gpio_port, gpio_shift,
1919            gpio_mask);
1920        return (-1);
1921    }
1922
1923    /* read GPIO value */
1924    gpio_reg = REG_RD(sc, MISC_REG_GPIO);
1925
1926    /* get the requested pin value */
1927    return ((gpio_reg & gpio_mask) == gpio_mask) ? 1 : 0;
1928}
1929
1930static int
1931bxe_gpio_write(struct bxe_softc *sc,
1932               int              gpio_num,
1933               uint32_t         mode,
1934               uint8_t          port)
1935{
1936    /* The GPIO should be swapped if swap register is set and active */
1937    int gpio_port = ((REG_RD(sc, NIG_REG_PORT_SWAP) &&
1938                      REG_RD(sc, NIG_REG_STRAP_OVERRIDE)) ^ port);
1939    int gpio_shift = (gpio_num +
1940                      (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0));
1941    uint32_t gpio_mask = (1 << gpio_shift);
1942    uint32_t gpio_reg;
1943
1944    if (gpio_num > MISC_REGISTERS_GPIO_3) {
1945        BLOGE(sc, "Invalid GPIO %d mode 0x%x port 0x%x gpio_port %d"
1946            " gpio_shift %d gpio_mask 0x%x\n",
1947            gpio_num, mode, port, gpio_port, gpio_shift, gpio_mask);
1948        return (-1);
1949    }
1950
1951    bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_GPIO);
1952
1953    /* read GPIO and mask except the float bits */
1954    gpio_reg = (REG_RD(sc, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
1955
1956    switch (mode) {
1957    case MISC_REGISTERS_GPIO_OUTPUT_LOW:
1958        BLOGD(sc, DBG_PHY,
1959              "Set GPIO %d (shift %d) -> output low\n",
1960              gpio_num, gpio_shift);
1961        /* clear FLOAT and set CLR */
1962        gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1963        gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
1964        break;
1965
1966    case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
1967        BLOGD(sc, DBG_PHY,
1968              "Set GPIO %d (shift %d) -> output high\n",
1969              gpio_num, gpio_shift);
1970        /* clear FLOAT and set SET */
1971        gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1972        gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
1973        break;
1974
1975    case MISC_REGISTERS_GPIO_INPUT_HI_Z:
1976        BLOGD(sc, DBG_PHY,
1977              "Set GPIO %d (shift %d) -> input\n",
1978              gpio_num, gpio_shift);
1979        /* set FLOAT */
1980        gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1981        break;
1982
1983    default:
1984        break;
1985    }
1986
1987    REG_WR(sc, MISC_REG_GPIO, gpio_reg);
1988    bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_GPIO);
1989
1990    return (0);
1991}
1992
1993static int
1994bxe_gpio_mult_write(struct bxe_softc *sc,
1995                    uint8_t          pins,
1996                    uint32_t         mode)
1997{
1998    uint32_t gpio_reg;
1999
2000    /* any port swapping should be handled by caller */
2001
2002    bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_GPIO);
2003
2004    /* read GPIO and mask except the float bits */
2005    gpio_reg = REG_RD(sc, MISC_REG_GPIO);
2006    gpio_reg &= ~(pins << MISC_REGISTERS_GPIO_FLOAT_POS);
2007    gpio_reg &= ~(pins << MISC_REGISTERS_GPIO_CLR_POS);
2008    gpio_reg &= ~(pins << MISC_REGISTERS_GPIO_SET_POS);
2009
2010    switch (mode) {
2011    case MISC_REGISTERS_GPIO_OUTPUT_LOW:
2012        BLOGD(sc, DBG_PHY, "Set GPIO 0x%x -> output low\n", pins);
2013        /* set CLR */
2014        gpio_reg |= (pins << MISC_REGISTERS_GPIO_CLR_POS);
2015        break;
2016
2017    case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
2018        BLOGD(sc, DBG_PHY, "Set GPIO 0x%x -> output high\n", pins);
2019        /* set SET */
2020        gpio_reg |= (pins << MISC_REGISTERS_GPIO_SET_POS);
2021        break;
2022
2023    case MISC_REGISTERS_GPIO_INPUT_HI_Z:
2024        BLOGD(sc, DBG_PHY, "Set GPIO 0x%x -> input\n", pins);
2025        /* set FLOAT */
2026        gpio_reg |= (pins << MISC_REGISTERS_GPIO_FLOAT_POS);
2027        break;
2028
2029    default:
2030        BLOGE(sc, "Invalid GPIO mode assignment pins 0x%x mode 0x%x"
2031            " gpio_reg 0x%x\n", pins, mode, gpio_reg);
2032        bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_GPIO);
2033        return (-1);
2034    }
2035
2036    REG_WR(sc, MISC_REG_GPIO, gpio_reg);
2037    bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_GPIO);
2038
2039    return (0);
2040}
2041
2042static int
2043bxe_gpio_int_write(struct bxe_softc *sc,
2044                   int              gpio_num,
2045                   uint32_t         mode,
2046                   uint8_t          port)
2047{
2048    /* The GPIO should be swapped if swap register is set and active */
2049    int gpio_port = ((REG_RD(sc, NIG_REG_PORT_SWAP) &&
2050                      REG_RD(sc, NIG_REG_STRAP_OVERRIDE)) ^ port);
2051    int gpio_shift = (gpio_num +
2052                      (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0));
2053    uint32_t gpio_mask = (1 << gpio_shift);
2054    uint32_t gpio_reg;
2055
2056    if (gpio_num > MISC_REGISTERS_GPIO_3) {
2057        BLOGE(sc, "Invalid GPIO %d mode 0x%x port 0x%x gpio_port %d"
2058            " gpio_shift %d gpio_mask 0x%x\n",
2059            gpio_num, mode, port, gpio_port, gpio_shift, gpio_mask);
2060        return (-1);
2061    }
2062
2063    bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_GPIO);
2064
2065    /* read GPIO int */
2066    gpio_reg = REG_RD(sc, MISC_REG_GPIO_INT);
2067
2068    switch (mode) {
2069    case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR:
2070        BLOGD(sc, DBG_PHY,
2071              "Clear GPIO INT %d (shift %d) -> output low\n",
2072              gpio_num, gpio_shift);
2073        /* clear SET and set CLR */
2074        gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
2075        gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
2076        break;
2077
2078    case MISC_REGISTERS_GPIO_INT_OUTPUT_SET:
2079        BLOGD(sc, DBG_PHY,
2080              "Set GPIO INT %d (shift %d) -> output high\n",
2081              gpio_num, gpio_shift);
2082        /* clear CLR and set SET */
2083        gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
2084        gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
2085        break;
2086
2087    default:
2088        break;
2089    }
2090
2091    REG_WR(sc, MISC_REG_GPIO_INT, gpio_reg);
2092    bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_GPIO);
2093
2094    return (0);
2095}
2096
2097uint32_t
2098elink_cb_gpio_read(struct bxe_softc *sc,
2099                   uint16_t         gpio_num,
2100                   uint8_t          port)
2101{
2102    return (bxe_gpio_read(sc, gpio_num, port));
2103}
2104
2105uint8_t
2106elink_cb_gpio_write(struct bxe_softc *sc,
2107                    uint16_t         gpio_num,
2108                    uint8_t          mode, /* 0=low 1=high */
2109                    uint8_t          port)
2110{
2111    return (bxe_gpio_write(sc, gpio_num, mode, port));
2112}
2113
2114uint8_t
2115elink_cb_gpio_mult_write(struct bxe_softc *sc,
2116                         uint8_t          pins,
2117                         uint8_t          mode) /* 0=low 1=high */
2118{
2119    return (bxe_gpio_mult_write(sc, pins, mode));
2120}
2121
2122uint8_t
2123elink_cb_gpio_int_write(struct bxe_softc *sc,
2124                        uint16_t         gpio_num,
2125                        uint8_t          mode, /* 0=low 1=high */
2126                        uint8_t          port)
2127{
2128    return (bxe_gpio_int_write(sc, gpio_num, mode, port));
2129}
2130
2131void
2132elink_cb_notify_link_changed(struct bxe_softc *sc)
2133{
2134    REG_WR(sc, (MISC_REG_AEU_GENERAL_ATTN_12 +
2135                (SC_FUNC(sc) * sizeof(uint32_t))), 1);
2136}
2137
2138/* send the MCP a request, block until there is a reply */
2139uint32_t
2140elink_cb_fw_command(struct bxe_softc *sc,
2141                    uint32_t         command,
2142                    uint32_t         param)
2143{
2144    int mb_idx = SC_FW_MB_IDX(sc);
2145    uint32_t seq;
2146    uint32_t rc = 0;
2147    uint32_t cnt = 1;
2148    uint8_t delay = CHIP_REV_IS_SLOW(sc) ? 100 : 10;
2149
2150    BXE_FWMB_LOCK(sc);
2151
2152    seq = ++sc->fw_seq;
2153    SHMEM_WR(sc, func_mb[mb_idx].drv_mb_param, param);
2154    SHMEM_WR(sc, func_mb[mb_idx].drv_mb_header, (command | seq));
2155
2156    BLOGD(sc, DBG_PHY,
2157          "wrote command 0x%08x to FW MB param 0x%08x\n",
2158          (command | seq), param);
2159
2160    /* Let the FW do it's magic. GIve it up to 5 seconds... */
2161    do {
2162        DELAY(delay * 1000);
2163        rc = SHMEM_RD(sc, func_mb[mb_idx].fw_mb_header);
2164    } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 500));
2165
2166    BLOGD(sc, DBG_PHY,
2167          "[after %d ms] read 0x%x seq 0x%x from FW MB\n",
2168          cnt*delay, rc, seq);
2169
2170    /* is this a reply to our command? */
2171    if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK)) {
2172        rc &= FW_MSG_CODE_MASK;
2173    } else {
2174        /* Ruh-roh! */
2175        BLOGE(sc, "FW failed to respond!\n");
2176        // XXX bxe_fw_dump(sc);
2177        rc = 0;
2178    }
2179
2180    BXE_FWMB_UNLOCK(sc);
2181    return (rc);
2182}
2183
2184static uint32_t
2185bxe_fw_command(struct bxe_softc *sc,
2186               uint32_t         command,
2187               uint32_t         param)
2188{
2189    return (elink_cb_fw_command(sc, command, param));
2190}
2191
2192static void
2193__storm_memset_dma_mapping(struct bxe_softc *sc,
2194                           uint32_t         addr,
2195                           bus_addr_t       mapping)
2196{
2197    REG_WR(sc, addr, U64_LO(mapping));
2198    REG_WR(sc, (addr + 4), U64_HI(mapping));
2199}
2200
2201static void
2202storm_memset_spq_addr(struct bxe_softc *sc,
2203                      bus_addr_t       mapping,
2204                      uint16_t         abs_fid)
2205{
2206    uint32_t addr = (XSEM_REG_FAST_MEMORY +
2207                     XSTORM_SPQ_PAGE_BASE_OFFSET(abs_fid));
2208    __storm_memset_dma_mapping(sc, addr, mapping);
2209}
2210
2211static void
2212storm_memset_vf_to_pf(struct bxe_softc *sc,
2213                      uint16_t         abs_fid,
2214                      uint16_t         pf_id)
2215{
2216    REG_WR8(sc, (BAR_XSTRORM_INTMEM + XSTORM_VF_TO_PF_OFFSET(abs_fid)), pf_id);
2217    REG_WR8(sc, (BAR_CSTRORM_INTMEM + CSTORM_VF_TO_PF_OFFSET(abs_fid)), pf_id);
2218    REG_WR8(sc, (BAR_TSTRORM_INTMEM + TSTORM_VF_TO_PF_OFFSET(abs_fid)), pf_id);
2219    REG_WR8(sc, (BAR_USTRORM_INTMEM + USTORM_VF_TO_PF_OFFSET(abs_fid)), pf_id);
2220}
2221
2222static void
2223storm_memset_func_en(struct bxe_softc *sc,
2224                     uint16_t         abs_fid,
2225                     uint8_t          enable)
2226{
2227    REG_WR8(sc, (BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(abs_fid)), enable);
2228    REG_WR8(sc, (BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(abs_fid)), enable);
2229    REG_WR8(sc, (BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(abs_fid)), enable);
2230    REG_WR8(sc, (BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(abs_fid)), enable);
2231}
2232
2233static void
2234storm_memset_eq_data(struct bxe_softc       *sc,
2235                     struct event_ring_data *eq_data,
2236                     uint16_t               pfid)
2237{
2238    uint32_t addr;
2239    size_t size;
2240
2241    addr = (BAR_CSTRORM_INTMEM + CSTORM_EVENT_RING_DATA_OFFSET(pfid));
2242    size = sizeof(struct event_ring_data);
2243    ecore_storm_memset_struct(sc, addr, size, (uint32_t *)eq_data);
2244}
2245
2246static void
2247storm_memset_eq_prod(struct bxe_softc *sc,
2248                     uint16_t         eq_prod,
2249                     uint16_t         pfid)
2250{
2251    uint32_t addr = (BAR_CSTRORM_INTMEM +
2252                     CSTORM_EVENT_RING_PROD_OFFSET(pfid));
2253    REG_WR16(sc, addr, eq_prod);
2254}
2255
2256/*
2257 * Post a slowpath command.
2258 *
2259 * A slowpath command is used to propagate a configuration change through
2260 * the controller in a controlled manner, allowing each STORM processor and
2261 * other H/W blocks to phase in the change.  The commands sent on the
2262 * slowpath are referred to as ramrods.  Depending on the ramrod used the
2263 * completion of the ramrod will occur in different ways.  Here's a
2264 * breakdown of ramrods and how they complete:
2265 *
2266 * RAMROD_CMD_ID_ETH_PORT_SETUP
2267 *   Used to setup the leading connection on a port.  Completes on the
2268 *   Receive Completion Queue (RCQ) of that port (typically fp[0]).
2269 *
2270 * RAMROD_CMD_ID_ETH_CLIENT_SETUP
2271 *   Used to setup an additional connection on a port.  Completes on the
2272 *   RCQ of the multi-queue/RSS connection being initialized.
2273 *
2274 * RAMROD_CMD_ID_ETH_STAT_QUERY
2275 *   Used to force the storm processors to update the statistics database
2276 *   in host memory.  This ramrod is send on the leading connection CID and
2277 *   completes as an index increment of the CSTORM on the default status
2278 *   block.
2279 *
2280 * RAMROD_CMD_ID_ETH_UPDATE
2281 *   Used to update the state of the leading connection, usually to udpate
2282 *   the RSS indirection table.  Completes on the RCQ of the leading
2283 *   connection. (Not currently used under FreeBSD until OS support becomes
2284 *   available.)
2285 *
2286 * RAMROD_CMD_ID_ETH_HALT
2287 *   Used when tearing down a connection prior to driver unload.  Completes
2288 *   on the RCQ of the multi-queue/RSS connection being torn down.  Don't
2289 *   use this on the leading connection.
2290 *
2291 * RAMROD_CMD_ID_ETH_SET_MAC
2292 *   Sets the Unicast/Broadcast/Multicast used by the port.  Completes on
2293 *   the RCQ of the leading connection.
2294 *
2295 * RAMROD_CMD_ID_ETH_CFC_DEL
2296 *   Used when tearing down a conneciton prior to driver unload.  Completes
2297 *   on the RCQ of the leading connection (since the current connection
2298 *   has been completely removed from controller memory).
2299 *
2300 * RAMROD_CMD_ID_ETH_PORT_DEL
2301 *   Used to tear down the leading connection prior to driver unload,
2302 *   typically fp[0].  Completes as an index increment of the CSTORM on the
2303 *   default status block.
2304 *
2305 * RAMROD_CMD_ID_ETH_FORWARD_SETUP
2306 *   Used for connection offload.  Completes on the RCQ of the multi-queue
2307 *   RSS connection that is being offloaded.  (Not currently used under
2308 *   FreeBSD.)
2309 *
2310 * There can only be one command pending per function.
2311 *
2312 * Returns:
2313 *   0 = Success, !0 = Failure.
2314 */
2315
2316/* must be called under the spq lock */
2317static inline
2318struct eth_spe *bxe_sp_get_next(struct bxe_softc *sc)
2319{
2320    struct eth_spe *next_spe = sc->spq_prod_bd;
2321
2322    if (sc->spq_prod_bd == sc->spq_last_bd) {
2323        /* wrap back to the first eth_spq */
2324        sc->spq_prod_bd = sc->spq;
2325        sc->spq_prod_idx = 0;
2326    } else {
2327        sc->spq_prod_bd++;
2328        sc->spq_prod_idx++;
2329    }
2330
2331    return (next_spe);
2332}
2333
2334/* must be called under the spq lock */
2335static inline
2336void bxe_sp_prod_update(struct bxe_softc *sc)
2337{
2338    int func = SC_FUNC(sc);
2339
2340    /*
2341     * Make sure that BD data is updated before writing the producer.
2342     * BD data is written to the memory, the producer is read from the
2343     * memory, thus we need a full memory barrier to ensure the ordering.
2344     */
2345    mb();
2346
2347    REG_WR16(sc, (BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func)),
2348             sc->spq_prod_idx);
2349
2350    bus_space_barrier(sc->bar[BAR0].tag, sc->bar[BAR0].handle, 0, 0,
2351                      BUS_SPACE_BARRIER_WRITE);
2352}
2353
2354/**
2355 * bxe_is_contextless_ramrod - check if the current command ends on EQ
2356 *
2357 * @cmd:      command to check
2358 * @cmd_type: command type
2359 */
2360static inline
2361int bxe_is_contextless_ramrod(int cmd,
2362                              int cmd_type)
2363{
2364    if ((cmd_type == NONE_CONNECTION_TYPE) ||
2365        (cmd == RAMROD_CMD_ID_ETH_FORWARD_SETUP) ||
2366        (cmd == RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES) ||
2367        (cmd == RAMROD_CMD_ID_ETH_FILTER_RULES) ||
2368        (cmd == RAMROD_CMD_ID_ETH_MULTICAST_RULES) ||
2369        (cmd == RAMROD_CMD_ID_ETH_SET_MAC) ||
2370        (cmd == RAMROD_CMD_ID_ETH_RSS_UPDATE)) {
2371        return (TRUE);
2372    } else {
2373        return (FALSE);
2374    }
2375}
2376
2377/**
2378 * bxe_sp_post - place a single command on an SP ring
2379 *
2380 * @sc:         driver handle
2381 * @command:    command to place (e.g. SETUP, FILTER_RULES, etc.)
2382 * @cid:        SW CID the command is related to
2383 * @data_hi:    command private data address (high 32 bits)
2384 * @data_lo:    command private data address (low 32 bits)
2385 * @cmd_type:   command type (e.g. NONE, ETH)
2386 *
2387 * SP data is handled as if it's always an address pair, thus data fields are
2388 * not swapped to little endian in upper functions. Instead this function swaps
2389 * data as if it's two uint32 fields.
2390 */
2391int
2392bxe_sp_post(struct bxe_softc *sc,
2393            int              command,
2394            int              cid,
2395            uint32_t         data_hi,
2396            uint32_t         data_lo,
2397            int              cmd_type)
2398{
2399    struct eth_spe *spe;
2400    uint16_t type;
2401    int common;
2402
2403    common = bxe_is_contextless_ramrod(command, cmd_type);
2404
2405    BXE_SP_LOCK(sc);
2406
2407    if (common) {
2408        if (!atomic_load_acq_long(&sc->eq_spq_left)) {
2409            BLOGE(sc, "EQ ring is full!\n");
2410            BXE_SP_UNLOCK(sc);
2411            return (-1);
2412        }
2413    } else {
2414        if (!atomic_load_acq_long(&sc->cq_spq_left)) {
2415            BLOGE(sc, "SPQ ring is full!\n");
2416            BXE_SP_UNLOCK(sc);
2417            return (-1);
2418        }
2419    }
2420
2421    spe = bxe_sp_get_next(sc);
2422
2423    /* CID needs port number to be encoded int it */
2424    spe->hdr.conn_and_cmd_data =
2425        htole32((command << SPE_HDR_T_CMD_ID_SHIFT) | HW_CID(sc, cid));
2426
2427    type = (cmd_type << SPE_HDR_T_CONN_TYPE_SHIFT) & SPE_HDR_T_CONN_TYPE;
2428
2429    /* TBD: Check if it works for VFs */
2430    type |= ((SC_FUNC(sc) << SPE_HDR_T_FUNCTION_ID_SHIFT) &
2431             SPE_HDR_T_FUNCTION_ID);
2432
2433    spe->hdr.type = htole16(type);
2434
2435    spe->data.update_data_addr.hi = htole32(data_hi);
2436    spe->data.update_data_addr.lo = htole32(data_lo);
2437
2438    /*
2439     * It's ok if the actual decrement is issued towards the memory
2440     * somewhere between the lock and unlock. Thus no more explict
2441     * memory barrier is needed.
2442     */
2443    if (common) {
2444        atomic_subtract_acq_long(&sc->eq_spq_left, 1);
2445    } else {
2446        atomic_subtract_acq_long(&sc->cq_spq_left, 1);
2447    }
2448
2449    BLOGD(sc, DBG_SP, "SPQE -> %#jx\n", (uintmax_t)sc->spq_dma.paddr);
2450    BLOGD(sc, DBG_SP, "FUNC_RDATA -> %p / %#jx\n",
2451          BXE_SP(sc, func_rdata), (uintmax_t)BXE_SP_MAPPING(sc, func_rdata));
2452    BLOGD(sc, DBG_SP,
2453          "SPQE[%x] (%x:%x) (cmd, common?) (%d,%d) hw_cid %x data (%x:%x) type(0x%x) left (CQ, EQ) (%lx,%lx)\n",
2454          sc->spq_prod_idx,
2455          (uint32_t)U64_HI(sc->spq_dma.paddr),
2456          (uint32_t)(U64_LO(sc->spq_dma.paddr) + (uint8_t *)sc->spq_prod_bd - (uint8_t *)sc->spq),
2457          command,
2458          common,
2459          HW_CID(sc, cid),
2460          data_hi,
2461          data_lo,
2462          type,
2463          atomic_load_acq_long(&sc->cq_spq_left),
2464          atomic_load_acq_long(&sc->eq_spq_left));
2465
2466    bxe_sp_prod_update(sc);
2467
2468    BXE_SP_UNLOCK(sc);
2469    return (0);
2470}
2471
2472/**
2473 * bxe_debug_print_ind_table - prints the indirection table configuration.
2474 *
2475 * @sc: driver hanlde
2476 * @p:  pointer to rss configuration
2477 */
2478
2479/*
2480 * FreeBSD Device probe function.
2481 *
2482 * Compares the device found to the driver's list of supported devices and
2483 * reports back to the bsd loader whether this is the right driver for the device.
2484 * This is the driver entry function called from the "kldload" command.
2485 *
2486 * Returns:
2487 *   BUS_PROBE_DEFAULT on success, positive value on failure.
2488 */
2489static int
2490bxe_probe(device_t dev)
2491{
2492    struct bxe_device_type *t;
2493    char *descbuf;
2494    uint16_t did, sdid, svid, vid;
2495
2496    /* Find our device structure */
2497    t = bxe_devs;
2498
2499    /* Get the data for the device to be probed. */
2500    vid  = pci_get_vendor(dev);
2501    did  = pci_get_device(dev);
2502    svid = pci_get_subvendor(dev);
2503    sdid = pci_get_subdevice(dev);
2504
2505    /* Look through the list of known devices for a match. */
2506    while (t->bxe_name != NULL) {
2507        if ((vid == t->bxe_vid) && (did == t->bxe_did) &&
2508            ((svid == t->bxe_svid) || (t->bxe_svid == PCI_ANY_ID)) &&
2509            ((sdid == t->bxe_sdid) || (t->bxe_sdid == PCI_ANY_ID))) {
2510            descbuf = malloc(BXE_DEVDESC_MAX, M_TEMP, M_NOWAIT);
2511            if (descbuf == NULL)
2512                return (ENOMEM);
2513
2514            /* Print out the device identity. */
2515            snprintf(descbuf, BXE_DEVDESC_MAX,
2516                     "%s (%c%d) BXE v:%s\n", t->bxe_name,
2517                     (((pci_read_config(dev, PCIR_REVID, 4) &
2518                        0xf0) >> 4) + 'A'),
2519                     (pci_read_config(dev, PCIR_REVID, 4) & 0xf),
2520                     BXE_DRIVER_VERSION);
2521
2522            device_set_desc_copy(dev, descbuf);
2523            free(descbuf, M_TEMP);
2524            return (BUS_PROBE_DEFAULT);
2525        }
2526        t++;
2527    }
2528
2529    return (ENXIO);
2530}
2531
2532static void
2533bxe_init_mutexes(struct bxe_softc *sc)
2534{
2535#ifdef BXE_CORE_LOCK_SX
2536    snprintf(sc->core_sx_name, sizeof(sc->core_sx_name),
2537             "bxe%d_core_lock", sc->unit);
2538    sx_init(&sc->core_sx, sc->core_sx_name);
2539#else
2540    snprintf(sc->core_mtx_name, sizeof(sc->core_mtx_name),
2541             "bxe%d_core_lock", sc->unit);
2542    mtx_init(&sc->core_mtx, sc->core_mtx_name, NULL, MTX_DEF);
2543#endif
2544
2545    snprintf(sc->sp_mtx_name, sizeof(sc->sp_mtx_name),
2546             "bxe%d_sp_lock", sc->unit);
2547    mtx_init(&sc->sp_mtx, sc->sp_mtx_name, NULL, MTX_DEF);
2548
2549    snprintf(sc->dmae_mtx_name, sizeof(sc->dmae_mtx_name),
2550             "bxe%d_dmae_lock", sc->unit);
2551    mtx_init(&sc->dmae_mtx, sc->dmae_mtx_name, NULL, MTX_DEF);
2552
2553    snprintf(sc->port.phy_mtx_name, sizeof(sc->port.phy_mtx_name),
2554             "bxe%d_phy_lock", sc->unit);
2555    mtx_init(&sc->port.phy_mtx, sc->port.phy_mtx_name, NULL, MTX_DEF);
2556
2557    snprintf(sc->fwmb_mtx_name, sizeof(sc->fwmb_mtx_name),
2558             "bxe%d_fwmb_lock", sc->unit);
2559    mtx_init(&sc->fwmb_mtx, sc->fwmb_mtx_name, NULL, MTX_DEF);
2560
2561    snprintf(sc->print_mtx_name, sizeof(sc->print_mtx_name),
2562             "bxe%d_print_lock", sc->unit);
2563    mtx_init(&(sc->print_mtx), sc->print_mtx_name, NULL, MTX_DEF);
2564
2565    snprintf(sc->stats_mtx_name, sizeof(sc->stats_mtx_name),
2566             "bxe%d_stats_lock", sc->unit);
2567    mtx_init(&(sc->stats_mtx), sc->stats_mtx_name, NULL, MTX_DEF);
2568
2569    snprintf(sc->mcast_mtx_name, sizeof(sc->mcast_mtx_name),
2570             "bxe%d_mcast_lock", sc->unit);
2571    mtx_init(&(sc->mcast_mtx), sc->mcast_mtx_name, NULL, MTX_DEF);
2572}
2573
2574static void
2575bxe_release_mutexes(struct bxe_softc *sc)
2576{
2577#ifdef BXE_CORE_LOCK_SX
2578    sx_destroy(&sc->core_sx);
2579#else
2580    if (mtx_initialized(&sc->core_mtx)) {
2581        mtx_destroy(&sc->core_mtx);
2582    }
2583#endif
2584
2585    if (mtx_initialized(&sc->sp_mtx)) {
2586        mtx_destroy(&sc->sp_mtx);
2587    }
2588
2589    if (mtx_initialized(&sc->dmae_mtx)) {
2590        mtx_destroy(&sc->dmae_mtx);
2591    }
2592
2593    if (mtx_initialized(&sc->port.phy_mtx)) {
2594        mtx_destroy(&sc->port.phy_mtx);
2595    }
2596
2597    if (mtx_initialized(&sc->fwmb_mtx)) {
2598        mtx_destroy(&sc->fwmb_mtx);
2599    }
2600
2601    if (mtx_initialized(&sc->print_mtx)) {
2602        mtx_destroy(&sc->print_mtx);
2603    }
2604
2605    if (mtx_initialized(&sc->stats_mtx)) {
2606        mtx_destroy(&sc->stats_mtx);
2607    }
2608
2609    if (mtx_initialized(&sc->mcast_mtx)) {
2610        mtx_destroy(&sc->mcast_mtx);
2611    }
2612}
2613
2614static void
2615bxe_tx_disable(struct bxe_softc* sc)
2616{
2617    if_t ifp = sc->ifp;
2618
2619    /* tell the stack the driver is stopped and TX queue is full */
2620    if (ifp !=  NULL) {
2621        if_setdrvflags(ifp, 0);
2622    }
2623}
2624
2625static void
2626bxe_drv_pulse(struct bxe_softc *sc)
2627{
2628    SHMEM_WR(sc, func_mb[SC_FW_MB_IDX(sc)].drv_pulse_mb,
2629             sc->fw_drv_pulse_wr_seq);
2630}
2631
2632static inline uint16_t
2633bxe_tx_avail(struct bxe_softc *sc,
2634             struct bxe_fastpath *fp)
2635{
2636    int16_t  used;
2637    uint16_t prod;
2638    uint16_t cons;
2639
2640    prod = fp->tx_bd_prod;
2641    cons = fp->tx_bd_cons;
2642
2643    used = SUB_S16(prod, cons);
2644
2645    return (int16_t)(sc->tx_ring_size) - used;
2646}
2647
2648static inline int
2649bxe_tx_queue_has_work(struct bxe_fastpath *fp)
2650{
2651    uint16_t hw_cons;
2652
2653    mb(); /* status block fields can change */
2654    hw_cons = le16toh(*fp->tx_cons_sb);
2655    return (hw_cons != fp->tx_pkt_cons);
2656}
2657
2658static inline uint8_t
2659bxe_has_tx_work(struct bxe_fastpath *fp)
2660{
2661    /* expand this for multi-cos if ever supported */
2662    return (bxe_tx_queue_has_work(fp)) ? TRUE : FALSE;
2663}
2664
2665static inline int
2666bxe_has_rx_work(struct bxe_fastpath *fp)
2667{
2668    uint16_t rx_cq_cons_sb;
2669
2670    mb(); /* status block fields can change */
2671    rx_cq_cons_sb = le16toh(*fp->rx_cq_cons_sb);
2672    if ((rx_cq_cons_sb & RCQ_MAX) == RCQ_MAX)
2673        rx_cq_cons_sb++;
2674    return (fp->rx_cq_cons != rx_cq_cons_sb);
2675}
2676
2677static void
2678bxe_sp_event(struct bxe_softc    *sc,
2679             struct bxe_fastpath *fp,
2680             union eth_rx_cqe    *rr_cqe)
2681{
2682    int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
2683    int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
2684    enum ecore_queue_cmd drv_cmd = ECORE_Q_CMD_MAX;
2685    struct ecore_queue_sp_obj *q_obj = &BXE_SP_OBJ(sc, fp).q_obj;
2686
2687    BLOGD(sc, DBG_SP, "fp=%d cid=%d got ramrod #%d state is %x type is %d\n",
2688          fp->index, cid, command, sc->state, rr_cqe->ramrod_cqe.ramrod_type);
2689
2690    switch (command) {
2691    case (RAMROD_CMD_ID_ETH_CLIENT_UPDATE):
2692        BLOGD(sc, DBG_SP, "got UPDATE ramrod. CID %d\n", cid);
2693        drv_cmd = ECORE_Q_CMD_UPDATE;
2694        break;
2695
2696    case (RAMROD_CMD_ID_ETH_CLIENT_SETUP):
2697        BLOGD(sc, DBG_SP, "got MULTI[%d] setup ramrod\n", cid);
2698        drv_cmd = ECORE_Q_CMD_SETUP;
2699        break;
2700
2701    case (RAMROD_CMD_ID_ETH_TX_QUEUE_SETUP):
2702        BLOGD(sc, DBG_SP, "got MULTI[%d] tx-only setup ramrod\n", cid);
2703        drv_cmd = ECORE_Q_CMD_SETUP_TX_ONLY;
2704        break;
2705
2706    case (RAMROD_CMD_ID_ETH_HALT):
2707        BLOGD(sc, DBG_SP, "got MULTI[%d] halt ramrod\n", cid);
2708        drv_cmd = ECORE_Q_CMD_HALT;
2709        break;
2710
2711    case (RAMROD_CMD_ID_ETH_TERMINATE):
2712        BLOGD(sc, DBG_SP, "got MULTI[%d] teminate ramrod\n", cid);
2713        drv_cmd = ECORE_Q_CMD_TERMINATE;
2714        break;
2715
2716    case (RAMROD_CMD_ID_ETH_EMPTY):
2717        BLOGD(sc, DBG_SP, "got MULTI[%d] empty ramrod\n", cid);
2718        drv_cmd = ECORE_Q_CMD_EMPTY;
2719        break;
2720
2721    default:
2722        BLOGD(sc, DBG_SP, "ERROR: unexpected MC reply (%d) on fp[%d]\n",
2723              command, fp->index);
2724        return;
2725    }
2726
2727    if ((drv_cmd != ECORE_Q_CMD_MAX) &&
2728        q_obj->complete_cmd(sc, q_obj, drv_cmd)) {
2729        /*
2730         * q_obj->complete_cmd() failure means that this was
2731         * an unexpected completion.
2732         *
2733         * In this case we don't want to increase the sc->spq_left
2734         * because apparently we haven't sent this command the first
2735         * place.
2736         */
2737        // bxe_panic(sc, ("Unexpected SP completion\n"));
2738        return;
2739    }
2740
2741    atomic_add_acq_long(&sc->cq_spq_left, 1);
2742
2743    BLOGD(sc, DBG_SP, "sc->cq_spq_left 0x%lx\n",
2744          atomic_load_acq_long(&sc->cq_spq_left));
2745}
2746
2747/*
2748 * The current mbuf is part of an aggregation. Move the mbuf into the TPA
2749 * aggregation queue, put an empty mbuf back onto the receive chain, and mark
2750 * the current aggregation queue as in-progress.
2751 */
2752static void
2753bxe_tpa_start(struct bxe_softc            *sc,
2754              struct bxe_fastpath         *fp,
2755              uint16_t                    queue,
2756              uint16_t                    cons,
2757              uint16_t                    prod,
2758              struct eth_fast_path_rx_cqe *cqe)
2759{
2760    struct bxe_sw_rx_bd tmp_bd;
2761    struct bxe_sw_rx_bd *rx_buf;
2762    struct eth_rx_bd *rx_bd;
2763    int max_agg_queues;
2764    struct bxe_sw_tpa_info *tpa_info = &fp->rx_tpa_info[queue];
2765    uint16_t index;
2766
2767    BLOGD(sc, DBG_LRO, "fp[%02d].tpa[%02d] TPA START "
2768                       "cons=%d prod=%d\n",
2769          fp->index, queue, cons, prod);
2770
2771    max_agg_queues = MAX_AGG_QS(sc);
2772
2773    KASSERT((queue < max_agg_queues),
2774            ("fp[%02d] invalid aggr queue (%d >= %d)!",
2775             fp->index, queue, max_agg_queues));
2776
2777    KASSERT((tpa_info->state == BXE_TPA_STATE_STOP),
2778            ("fp[%02d].tpa[%02d] starting aggr on queue not stopped!",
2779             fp->index, queue));
2780
2781    /* copy the existing mbuf and mapping from the TPA pool */
2782    tmp_bd = tpa_info->bd;
2783
2784    if (tmp_bd.m == NULL) {
2785        uint32_t *tmp;
2786
2787        tmp = (uint32_t *)cqe;
2788
2789        BLOGE(sc, "fp[%02d].tpa[%02d] cons[%d] prod[%d]mbuf not allocated!\n",
2790              fp->index, queue, cons, prod);
2791        BLOGE(sc, "cqe [0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x]\n",
2792            *tmp, *(tmp+1), *(tmp+2), *(tmp+3), *(tmp+4), *(tmp+5), *(tmp+6), *(tmp+7));
2793
2794        /* XXX Error handling? */
2795        return;
2796    }
2797
2798    /* change the TPA queue to the start state */
2799    tpa_info->state            = BXE_TPA_STATE_START;
2800    tpa_info->placement_offset = cqe->placement_offset;
2801    tpa_info->parsing_flags    = le16toh(cqe->pars_flags.flags);
2802    tpa_info->vlan_tag         = le16toh(cqe->vlan_tag);
2803    tpa_info->len_on_bd        = le16toh(cqe->len_on_bd);
2804
2805    fp->rx_tpa_queue_used |= (1 << queue);
2806
2807    /*
2808     * If all the buffer descriptors are filled with mbufs then fill in
2809     * the current consumer index with a new BD. Else if a maximum Rx
2810     * buffer limit is imposed then fill in the next producer index.
2811     */
2812    index = (sc->max_rx_bufs != RX_BD_USABLE) ?
2813                prod : cons;
2814
2815    /* move the received mbuf and mapping to TPA pool */
2816    tpa_info->bd = fp->rx_mbuf_chain[cons];
2817
2818    /* release any existing RX BD mbuf mappings */
2819    if (cons != index) {
2820        rx_buf = &fp->rx_mbuf_chain[cons];
2821
2822        if (rx_buf->m_map != NULL) {
2823            bus_dmamap_sync(fp->rx_mbuf_tag, rx_buf->m_map,
2824                            BUS_DMASYNC_POSTREAD);
2825            bus_dmamap_unload(fp->rx_mbuf_tag, rx_buf->m_map);
2826        }
2827
2828        /*
2829         * We get here when the maximum number of rx buffers is less than
2830         * RX_BD_USABLE. The mbuf is already saved above so it's OK to NULL
2831         * it out here without concern of a memory leak.
2832         */
2833        fp->rx_mbuf_chain[cons].m = NULL;
2834    }
2835
2836    /* update the Rx SW BD with the mbuf info from the TPA pool */
2837    fp->rx_mbuf_chain[index] = tmp_bd;
2838
2839    /* update the Rx BD with the empty mbuf phys address from the TPA pool */
2840    rx_bd = &fp->rx_chain[index];
2841    rx_bd->addr_hi = htole32(U64_HI(tpa_info->seg.ds_addr));
2842    rx_bd->addr_lo = htole32(U64_LO(tpa_info->seg.ds_addr));
2843}
2844
2845/*
2846 * When a TPA aggregation is completed, loop through the individual mbufs
2847 * of the aggregation, combining them into a single mbuf which will be sent
2848 * up the stack. Refill all freed SGEs with mbufs as we go along.
2849 */
2850static int
2851bxe_fill_frag_mbuf(struct bxe_softc          *sc,
2852                   struct bxe_fastpath       *fp,
2853                   struct bxe_sw_tpa_info    *tpa_info,
2854                   uint16_t                  queue,
2855                   uint16_t                  pages,
2856                   struct mbuf               *m,
2857			       struct eth_end_agg_rx_cqe *cqe,
2858                   uint16_t                  cqe_idx)
2859{
2860    struct mbuf *m_frag;
2861    uint32_t frag_len, frag_size, i;
2862    uint16_t sge_idx;
2863    int rc = 0;
2864    int j;
2865
2866    frag_size = le16toh(cqe->pkt_len) - tpa_info->len_on_bd;
2867
2868    BLOGD(sc, DBG_LRO,
2869          "fp[%02d].tpa[%02d] TPA fill len_on_bd=%d frag_size=%d pages=%d\n",
2870          fp->index, queue, tpa_info->len_on_bd, frag_size, pages);
2871
2872    /* make sure the aggregated frame is not too big to handle */
2873    if (pages > 8 * PAGES_PER_SGE) {
2874
2875        uint32_t *tmp = (uint32_t *)cqe;
2876
2877        BLOGE(sc, "fp[%02d].sge[0x%04x] has too many pages (%d)! "
2878                  "pkt_len=%d len_on_bd=%d frag_size=%d\n",
2879              fp->index, cqe_idx, pages, le16toh(cqe->pkt_len),
2880              tpa_info->len_on_bd, frag_size);
2881
2882        BLOGE(sc, "cqe [0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x]\n",
2883            *tmp, *(tmp+1), *(tmp+2), *(tmp+3), *(tmp+4), *(tmp+5), *(tmp+6), *(tmp+7));
2884
2885        bxe_panic(sc, ("sge page count error\n"));
2886        return (EINVAL);
2887    }
2888
2889    /*
2890     * Scan through the scatter gather list pulling individual mbufs into a
2891     * single mbuf for the host stack.
2892     */
2893    for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
2894        sge_idx = RX_SGE(le16toh(cqe->sgl_or_raw_data.sgl[j]));
2895
2896        /*
2897         * Firmware gives the indices of the SGE as if the ring is an array
2898         * (meaning that the "next" element will consume 2 indices).
2899         */
2900        frag_len = min(frag_size, (uint32_t)(SGE_PAGES));
2901
2902        BLOGD(sc, DBG_LRO, "fp[%02d].tpa[%02d] TPA fill i=%d j=%d "
2903                           "sge_idx=%d frag_size=%d frag_len=%d\n",
2904              fp->index, queue, i, j, sge_idx, frag_size, frag_len);
2905
2906        m_frag = fp->rx_sge_mbuf_chain[sge_idx].m;
2907
2908        /* allocate a new mbuf for the SGE */
2909        rc = bxe_alloc_rx_sge_mbuf(fp, sge_idx);
2910        if (rc) {
2911            /* Leave all remaining SGEs in the ring! */
2912            return (rc);
2913        }
2914
2915        /* update the fragment length */
2916        m_frag->m_len = frag_len;
2917
2918        /* concatenate the fragment to the head mbuf */
2919        m_cat(m, m_frag);
2920        fp->eth_q_stats.mbuf_alloc_sge--;
2921
2922        /* update the TPA mbuf size and remaining fragment size */
2923        m->m_pkthdr.len += frag_len;
2924        frag_size -= frag_len;
2925    }
2926
2927    BLOGD(sc, DBG_LRO,
2928          "fp[%02d].tpa[%02d] TPA fill done frag_size=%d\n",
2929          fp->index, queue, frag_size);
2930
2931    return (rc);
2932}
2933
2934static inline void
2935bxe_clear_sge_mask_next_elems(struct bxe_fastpath *fp)
2936{
2937    int i, j;
2938
2939    for (i = 1; i <= RX_SGE_NUM_PAGES; i++) {
2940        int idx = RX_SGE_TOTAL_PER_PAGE * i - 1;
2941
2942        for (j = 0; j < 2; j++) {
2943            BIT_VEC64_CLEAR_BIT(fp->sge_mask, idx);
2944            idx--;
2945        }
2946    }
2947}
2948
2949static inline void
2950bxe_init_sge_ring_bit_mask(struct bxe_fastpath *fp)
2951{
2952    /* set the mask to all 1's, it's faster to compare to 0 than to 0xf's */
2953    memset(fp->sge_mask, 0xff, sizeof(fp->sge_mask));
2954
2955    /*
2956     * Clear the two last indices in the page to 1. These are the indices that
2957     * correspond to the "next" element, hence will never be indicated and
2958     * should be removed from the calculations.
2959     */
2960    bxe_clear_sge_mask_next_elems(fp);
2961}
2962
2963static inline void
2964bxe_update_last_max_sge(struct bxe_fastpath *fp,
2965                        uint16_t            idx)
2966{
2967    uint16_t last_max = fp->last_max_sge;
2968
2969    if (SUB_S16(idx, last_max) > 0) {
2970        fp->last_max_sge = idx;
2971    }
2972}
2973
2974static inline void
2975bxe_update_sge_prod(struct bxe_softc          *sc,
2976                    struct bxe_fastpath       *fp,
2977                    uint16_t                  sge_len,
2978                    union eth_sgl_or_raw_data *cqe)
2979{
2980    uint16_t last_max, last_elem, first_elem;
2981    uint16_t delta = 0;
2982    uint16_t i;
2983
2984    if (!sge_len) {
2985        return;
2986    }
2987
2988    /* first mark all used pages */
2989    for (i = 0; i < sge_len; i++) {
2990        BIT_VEC64_CLEAR_BIT(fp->sge_mask,
2991                            RX_SGE(le16toh(cqe->sgl[i])));
2992    }
2993
2994    BLOGD(sc, DBG_LRO,
2995          "fp[%02d] fp_cqe->sgl[%d] = %d\n",
2996          fp->index, sge_len - 1,
2997          le16toh(cqe->sgl[sge_len - 1]));
2998
2999    /* assume that the last SGE index is the biggest */
3000    bxe_update_last_max_sge(fp,
3001                            le16toh(cqe->sgl[sge_len - 1]));
3002
3003    last_max = RX_SGE(fp->last_max_sge);
3004    last_elem = last_max >> BIT_VEC64_ELEM_SHIFT;
3005    first_elem = RX_SGE(fp->rx_sge_prod) >> BIT_VEC64_ELEM_SHIFT;
3006
3007    /* if ring is not full */
3008    if (last_elem + 1 != first_elem) {
3009        last_elem++;
3010    }
3011
3012    /* now update the prod */
3013    for (i = first_elem; i != last_elem; i = RX_SGE_NEXT_MASK_ELEM(i)) {
3014        if (__predict_true(fp->sge_mask[i])) {
3015            break;
3016        }
3017
3018        fp->sge_mask[i] = BIT_VEC64_ELEM_ONE_MASK;
3019        delta += BIT_VEC64_ELEM_SZ;
3020    }
3021
3022    if (delta > 0) {
3023        fp->rx_sge_prod += delta;
3024        /* clear page-end entries */
3025        bxe_clear_sge_mask_next_elems(fp);
3026    }
3027
3028    BLOGD(sc, DBG_LRO,
3029          "fp[%02d] fp->last_max_sge=%d fp->rx_sge_prod=%d\n",
3030          fp->index, fp->last_max_sge, fp->rx_sge_prod);
3031}
3032
3033/*
3034 * The aggregation on the current TPA queue has completed. Pull the individual
3035 * mbuf fragments together into a single mbuf, perform all necessary checksum
3036 * calculations, and send the resuting mbuf to the stack.
3037 */
3038static void
3039bxe_tpa_stop(struct bxe_softc          *sc,
3040             struct bxe_fastpath       *fp,
3041             struct bxe_sw_tpa_info    *tpa_info,
3042             uint16_t                  queue,
3043             uint16_t                  pages,
3044			 struct eth_end_agg_rx_cqe *cqe,
3045             uint16_t                  cqe_idx)
3046{
3047    if_t ifp = sc->ifp;
3048    struct mbuf *m;
3049    int rc = 0;
3050
3051    BLOGD(sc, DBG_LRO,
3052          "fp[%02d].tpa[%02d] pad=%d pkt_len=%d pages=%d vlan=%d\n",
3053          fp->index, queue, tpa_info->placement_offset,
3054          le16toh(cqe->pkt_len), pages, tpa_info->vlan_tag);
3055
3056    m = tpa_info->bd.m;
3057
3058    /* allocate a replacement before modifying existing mbuf */
3059    rc = bxe_alloc_rx_tpa_mbuf(fp, queue);
3060    if (rc) {
3061        /* drop the frame and log an error */
3062        fp->eth_q_stats.rx_soft_errors++;
3063        goto bxe_tpa_stop_exit;
3064    }
3065
3066    /* we have a replacement, fixup the current mbuf */
3067    m_adj(m, tpa_info->placement_offset);
3068    m->m_pkthdr.len = m->m_len = tpa_info->len_on_bd;
3069
3070    /* mark the checksums valid (taken care of by the firmware) */
3071    fp->eth_q_stats.rx_ofld_frames_csum_ip++;
3072    fp->eth_q_stats.rx_ofld_frames_csum_tcp_udp++;
3073    m->m_pkthdr.csum_data = 0xffff;
3074    m->m_pkthdr.csum_flags |= (CSUM_IP_CHECKED |
3075                               CSUM_IP_VALID   |
3076                               CSUM_DATA_VALID |
3077                               CSUM_PSEUDO_HDR);
3078
3079    /* aggregate all of the SGEs into a single mbuf */
3080    rc = bxe_fill_frag_mbuf(sc, fp, tpa_info, queue, pages, m, cqe, cqe_idx);
3081    if (rc) {
3082        /* drop the packet and log an error */
3083        fp->eth_q_stats.rx_soft_errors++;
3084        m_freem(m);
3085    } else {
3086        if (tpa_info->parsing_flags & PARSING_FLAGS_INNER_VLAN_EXIST) {
3087            m->m_pkthdr.ether_vtag = tpa_info->vlan_tag;
3088            m->m_flags |= M_VLANTAG;
3089        }
3090
3091        /* assign packet to this interface interface */
3092        if_setrcvif(m, ifp);
3093
3094#if __FreeBSD_version >= 800000
3095        /* specify what RSS queue was used for this flow */
3096        m->m_pkthdr.flowid = fp->index;
3097        BXE_SET_FLOWID(m);
3098#endif
3099
3100        if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
3101        fp->eth_q_stats.rx_tpa_pkts++;
3102
3103        /* pass the frame to the stack */
3104        if_input(ifp, m);
3105    }
3106
3107    /* we passed an mbuf up the stack or dropped the frame */
3108    fp->eth_q_stats.mbuf_alloc_tpa--;
3109
3110bxe_tpa_stop_exit:
3111
3112    fp->rx_tpa_info[queue].state = BXE_TPA_STATE_STOP;
3113    fp->rx_tpa_queue_used &= ~(1 << queue);
3114}
3115
3116static uint8_t
3117bxe_service_rxsgl(
3118                 struct bxe_fastpath *fp,
3119                 uint16_t len,
3120                 uint16_t lenonbd,
3121                 struct mbuf *m,
3122                 struct eth_fast_path_rx_cqe *cqe_fp)
3123{
3124    struct mbuf *m_frag;
3125    uint16_t frags, frag_len;
3126    uint16_t sge_idx = 0;
3127    uint16_t j;
3128    uint8_t i, rc = 0;
3129    uint32_t frag_size;
3130
3131    /* adjust the mbuf */
3132    m->m_len = lenonbd;
3133
3134    frag_size =  len - lenonbd;
3135    frags = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
3136
3137    for (i = 0, j = 0; i < frags; i += PAGES_PER_SGE, j++) {
3138        sge_idx = RX_SGE(le16toh(cqe_fp->sgl_or_raw_data.sgl[j]));
3139
3140        m_frag = fp->rx_sge_mbuf_chain[sge_idx].m;
3141        frag_len = min(frag_size, (uint32_t)(SGE_PAGE_SIZE));
3142        m_frag->m_len = frag_len;
3143
3144       /* allocate a new mbuf for the SGE */
3145        rc = bxe_alloc_rx_sge_mbuf(fp, sge_idx);
3146        if (rc) {
3147            /* Leave all remaining SGEs in the ring! */
3148            return (rc);
3149        }
3150        fp->eth_q_stats.mbuf_alloc_sge--;
3151
3152        /* concatenate the fragment to the head mbuf */
3153        m_cat(m, m_frag);
3154
3155        frag_size -= frag_len;
3156    }
3157
3158    bxe_update_sge_prod(fp->sc, fp, frags, &cqe_fp->sgl_or_raw_data);
3159
3160    return rc;
3161}
3162
3163static uint8_t
3164bxe_rxeof(struct bxe_softc    *sc,
3165          struct bxe_fastpath *fp)
3166{
3167    if_t ifp = sc->ifp;
3168    uint16_t bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
3169    uint16_t hw_cq_cons, sw_cq_cons, sw_cq_prod;
3170    int rx_pkts = 0;
3171    int rc = 0;
3172
3173    BXE_FP_RX_LOCK(fp);
3174
3175    /* CQ "next element" is of the size of the regular element */
3176    hw_cq_cons = le16toh(*fp->rx_cq_cons_sb);
3177    if ((hw_cq_cons & RCQ_USABLE_PER_PAGE) == RCQ_USABLE_PER_PAGE) {
3178        hw_cq_cons++;
3179    }
3180
3181    bd_cons = fp->rx_bd_cons;
3182    bd_prod = fp->rx_bd_prod;
3183    bd_prod_fw = bd_prod;
3184    sw_cq_cons = fp->rx_cq_cons;
3185    sw_cq_prod = fp->rx_cq_prod;
3186
3187    /*
3188     * Memory barrier necessary as speculative reads of the rx
3189     * buffer can be ahead of the index in the status block
3190     */
3191    rmb();
3192
3193    BLOGD(sc, DBG_RX,
3194          "fp[%02d] Rx START hw_cq_cons=%u sw_cq_cons=%u\n",
3195          fp->index, hw_cq_cons, sw_cq_cons);
3196
3197    while (sw_cq_cons != hw_cq_cons) {
3198        struct bxe_sw_rx_bd *rx_buf = NULL;
3199        union eth_rx_cqe *cqe;
3200        struct eth_fast_path_rx_cqe *cqe_fp;
3201        uint8_t cqe_fp_flags;
3202        enum eth_rx_cqe_type cqe_fp_type;
3203        uint16_t len, lenonbd,  pad;
3204        struct mbuf *m = NULL;
3205
3206        comp_ring_cons = RCQ(sw_cq_cons);
3207        bd_prod = RX_BD(bd_prod);
3208        bd_cons = RX_BD(bd_cons);
3209
3210        cqe          = &fp->rcq_chain[comp_ring_cons];
3211        cqe_fp       = &cqe->fast_path_cqe;
3212        cqe_fp_flags = cqe_fp->type_error_flags;
3213        cqe_fp_type  = cqe_fp_flags & ETH_FAST_PATH_RX_CQE_TYPE;
3214
3215        BLOGD(sc, DBG_RX,
3216              "fp[%02d] Rx hw_cq_cons=%d hw_sw_cons=%d "
3217              "BD prod=%d cons=%d CQE type=0x%x err=0x%x "
3218              "status=0x%x rss_hash=0x%x vlan=0x%x len=%u lenonbd=%u\n",
3219              fp->index,
3220              hw_cq_cons,
3221              sw_cq_cons,
3222              bd_prod,
3223              bd_cons,
3224              CQE_TYPE(cqe_fp_flags),
3225              cqe_fp_flags,
3226              cqe_fp->status_flags,
3227              le32toh(cqe_fp->rss_hash_result),
3228              le16toh(cqe_fp->vlan_tag),
3229              le16toh(cqe_fp->pkt_len_or_gro_seg_len),
3230              le16toh(cqe_fp->len_on_bd));
3231
3232        /* is this a slowpath msg? */
3233        if (__predict_false(CQE_TYPE_SLOW(cqe_fp_type))) {
3234            bxe_sp_event(sc, fp, cqe);
3235            goto next_cqe;
3236        }
3237
3238        rx_buf = &fp->rx_mbuf_chain[bd_cons];
3239
3240        if (!CQE_TYPE_FAST(cqe_fp_type)) {
3241            struct bxe_sw_tpa_info *tpa_info;
3242            uint16_t frag_size, pages;
3243            uint8_t queue;
3244
3245            if (CQE_TYPE_START(cqe_fp_type)) {
3246                bxe_tpa_start(sc, fp, cqe_fp->queue_index,
3247                              bd_cons, bd_prod, cqe_fp);
3248                m = NULL; /* packet not ready yet */
3249                goto next_rx;
3250            }
3251
3252            KASSERT(CQE_TYPE_STOP(cqe_fp_type),
3253                    ("CQE type is not STOP! (0x%x)\n", cqe_fp_type));
3254
3255            queue = cqe->end_agg_cqe.queue_index;
3256            tpa_info = &fp->rx_tpa_info[queue];
3257
3258            BLOGD(sc, DBG_LRO, "fp[%02d].tpa[%02d] TPA STOP\n",
3259                  fp->index, queue);
3260
3261            frag_size = (le16toh(cqe->end_agg_cqe.pkt_len) -
3262                         tpa_info->len_on_bd);
3263            pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
3264
3265            bxe_tpa_stop(sc, fp, tpa_info, queue, pages,
3266                         &cqe->end_agg_cqe, comp_ring_cons);
3267
3268            bxe_update_sge_prod(sc, fp, pages, &cqe->end_agg_cqe.sgl_or_raw_data);
3269
3270            goto next_cqe;
3271        }
3272
3273        /* non TPA */
3274
3275        /* is this an error packet? */
3276        if (__predict_false(cqe_fp_flags &
3277                            ETH_FAST_PATH_RX_CQE_PHY_DECODE_ERR_FLG)) {
3278            BLOGE(sc, "flags 0x%x rx packet %u\n", cqe_fp_flags, sw_cq_cons);
3279            fp->eth_q_stats.rx_soft_errors++;
3280            goto next_rx;
3281        }
3282
3283        len = le16toh(cqe_fp->pkt_len_or_gro_seg_len);
3284        lenonbd = le16toh(cqe_fp->len_on_bd);
3285        pad = cqe_fp->placement_offset;
3286
3287        m = rx_buf->m;
3288
3289        if (__predict_false(m == NULL)) {
3290            BLOGE(sc, "No mbuf in rx chain descriptor %d for fp[%02d]\n",
3291                  bd_cons, fp->index);
3292            goto next_rx;
3293        }
3294
3295        /* XXX double copy if packet length under a threshold */
3296
3297        /*
3298         * If all the buffer descriptors are filled with mbufs then fill in
3299         * the current consumer index with a new BD. Else if a maximum Rx
3300         * buffer limit is imposed then fill in the next producer index.
3301         */
3302        rc = bxe_alloc_rx_bd_mbuf(fp, bd_cons,
3303                                  (sc->max_rx_bufs != RX_BD_USABLE) ?
3304                                      bd_prod : bd_cons);
3305        if (rc != 0) {
3306
3307            /* we simply reuse the received mbuf and don't post it to the stack */
3308            m = NULL;
3309
3310            BLOGE(sc, "mbuf alloc fail for fp[%02d] rx chain (%d)\n",
3311                  fp->index, rc);
3312            fp->eth_q_stats.rx_soft_errors++;
3313
3314            if (sc->max_rx_bufs != RX_BD_USABLE) {
3315                /* copy this consumer index to the producer index */
3316                memcpy(&fp->rx_mbuf_chain[bd_prod], rx_buf,
3317                       sizeof(struct bxe_sw_rx_bd));
3318                memset(rx_buf, 0, sizeof(struct bxe_sw_rx_bd));
3319            }
3320
3321            goto next_rx;
3322        }
3323
3324        /* current mbuf was detached from the bd */
3325        fp->eth_q_stats.mbuf_alloc_rx--;
3326
3327        /* we allocated a replacement mbuf, fixup the current one */
3328        m_adj(m, pad);
3329        m->m_pkthdr.len = m->m_len = len;
3330
3331        if ((len > 60) && (len > lenonbd)) {
3332            fp->eth_q_stats.rx_bxe_service_rxsgl++;
3333            rc = bxe_service_rxsgl(fp, len, lenonbd, m, cqe_fp);
3334            if (rc)
3335                break;
3336            fp->eth_q_stats.rx_jumbo_sge_pkts++;
3337        } else if (lenonbd < len) {
3338            fp->eth_q_stats.rx_erroneous_jumbo_sge_pkts++;
3339        }
3340
3341        /* assign packet to this interface interface */
3342	if_setrcvif(m, ifp);
3343
3344        /* assume no hardware checksum has complated */
3345        m->m_pkthdr.csum_flags = 0;
3346
3347        /* validate checksum if offload enabled */
3348        if (if_getcapenable(ifp) & IFCAP_RXCSUM) {
3349            /* check for a valid IP frame */
3350            if (!(cqe->fast_path_cqe.status_flags &
3351                  ETH_FAST_PATH_RX_CQE_IP_XSUM_NO_VALIDATION_FLG)) {
3352                m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
3353                if (__predict_false(cqe_fp_flags &
3354                                    ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG)) {
3355                    fp->eth_q_stats.rx_hw_csum_errors++;
3356                } else {
3357                    fp->eth_q_stats.rx_ofld_frames_csum_ip++;
3358                    m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
3359                }
3360            }
3361
3362            /* check for a valid TCP/UDP frame */
3363            if (!(cqe->fast_path_cqe.status_flags &
3364                  ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG)) {
3365                if (__predict_false(cqe_fp_flags &
3366                                    ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG)) {
3367                    fp->eth_q_stats.rx_hw_csum_errors++;
3368                } else {
3369                    fp->eth_q_stats.rx_ofld_frames_csum_tcp_udp++;
3370                    m->m_pkthdr.csum_data = 0xFFFF;
3371                    m->m_pkthdr.csum_flags |= (CSUM_DATA_VALID |
3372                                               CSUM_PSEUDO_HDR);
3373                }
3374            }
3375        }
3376
3377        /* if there is a VLAN tag then flag that info */
3378        if (cqe->fast_path_cqe.pars_flags.flags & PARSING_FLAGS_INNER_VLAN_EXIST) {
3379            m->m_pkthdr.ether_vtag = cqe->fast_path_cqe.vlan_tag;
3380            m->m_flags |= M_VLANTAG;
3381        }
3382
3383#if __FreeBSD_version >= 800000
3384        /* specify what RSS queue was used for this flow */
3385        m->m_pkthdr.flowid = fp->index;
3386        BXE_SET_FLOWID(m);
3387#endif
3388
3389next_rx:
3390
3391        bd_cons    = RX_BD_NEXT(bd_cons);
3392        bd_prod    = RX_BD_NEXT(bd_prod);
3393        bd_prod_fw = RX_BD_NEXT(bd_prod_fw);
3394
3395        /* pass the frame to the stack */
3396        if (__predict_true(m != NULL)) {
3397            if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
3398            rx_pkts++;
3399            if_input(ifp, m);
3400        }
3401
3402next_cqe:
3403
3404        sw_cq_prod = RCQ_NEXT(sw_cq_prod);
3405        sw_cq_cons = RCQ_NEXT(sw_cq_cons);
3406
3407        /* limit spinning on the queue */
3408        if (rc != 0)
3409            break;
3410
3411        if (rx_pkts == sc->rx_budget) {
3412            fp->eth_q_stats.rx_budget_reached++;
3413            break;
3414        }
3415    } /* while work to do */
3416
3417    fp->rx_bd_cons = bd_cons;
3418    fp->rx_bd_prod = bd_prod_fw;
3419    fp->rx_cq_cons = sw_cq_cons;
3420    fp->rx_cq_prod = sw_cq_prod;
3421
3422    /* Update producers */
3423    bxe_update_rx_prod(sc, fp, bd_prod_fw, sw_cq_prod, fp->rx_sge_prod);
3424
3425    fp->eth_q_stats.rx_pkts += rx_pkts;
3426    fp->eth_q_stats.rx_calls++;
3427
3428    BXE_FP_RX_UNLOCK(fp);
3429
3430    return (sw_cq_cons != hw_cq_cons);
3431}
3432
3433static uint16_t
3434bxe_free_tx_pkt(struct bxe_softc    *sc,
3435                struct bxe_fastpath *fp,
3436                uint16_t            idx)
3437{
3438    struct bxe_sw_tx_bd *tx_buf = &fp->tx_mbuf_chain[idx];
3439    struct eth_tx_start_bd *tx_start_bd;
3440    uint16_t bd_idx = TX_BD(tx_buf->first_bd);
3441    uint16_t new_cons;
3442    int nbd;
3443
3444    /* unmap the mbuf from non-paged memory */
3445    bus_dmamap_unload(fp->tx_mbuf_tag, tx_buf->m_map);
3446
3447    tx_start_bd = &fp->tx_chain[bd_idx].start_bd;
3448    nbd = le16toh(tx_start_bd->nbd) - 1;
3449
3450    new_cons = (tx_buf->first_bd + nbd);
3451
3452    /* free the mbuf */
3453    if (__predict_true(tx_buf->m != NULL)) {
3454        m_freem(tx_buf->m);
3455        fp->eth_q_stats.mbuf_alloc_tx--;
3456    } else {
3457        fp->eth_q_stats.tx_chain_lost_mbuf++;
3458    }
3459
3460    tx_buf->m = NULL;
3461    tx_buf->first_bd = 0;
3462
3463    return (new_cons);
3464}
3465
3466/* transmit timeout watchdog */
3467static int
3468bxe_watchdog(struct bxe_softc    *sc,
3469             struct bxe_fastpath *fp)
3470{
3471    BXE_FP_TX_LOCK(fp);
3472
3473    if ((fp->watchdog_timer == 0) || (--fp->watchdog_timer)) {
3474        BXE_FP_TX_UNLOCK(fp);
3475        return (0);
3476    }
3477
3478    BLOGE(sc, "TX watchdog timeout on fp[%02d], resetting!\n", fp->index);
3479    if(sc->trigger_grcdump) {
3480         /* taking grcdump */
3481         bxe_grc_dump(sc);
3482    }
3483
3484    BXE_FP_TX_UNLOCK(fp);
3485
3486    atomic_store_rel_long(&sc->chip_tq_flags, CHIP_TQ_REINIT);
3487    taskqueue_enqueue(sc->chip_tq, &sc->chip_tq_task);
3488
3489    return (-1);
3490}
3491
3492/* processes transmit completions */
3493static uint8_t
3494bxe_txeof(struct bxe_softc    *sc,
3495          struct bxe_fastpath *fp)
3496{
3497    if_t ifp = sc->ifp;
3498    uint16_t bd_cons, hw_cons, sw_cons, pkt_cons;
3499    uint16_t tx_bd_avail;
3500
3501    BXE_FP_TX_LOCK_ASSERT(fp);
3502
3503    bd_cons = fp->tx_bd_cons;
3504    hw_cons = le16toh(*fp->tx_cons_sb);
3505    sw_cons = fp->tx_pkt_cons;
3506
3507    while (sw_cons != hw_cons) {
3508        pkt_cons = TX_BD(sw_cons);
3509
3510        BLOGD(sc, DBG_TX,
3511              "TX: fp[%d]: hw_cons=%u sw_cons=%u pkt_cons=%u\n",
3512              fp->index, hw_cons, sw_cons, pkt_cons);
3513
3514        bd_cons = bxe_free_tx_pkt(sc, fp, pkt_cons);
3515
3516        sw_cons++;
3517    }
3518
3519    fp->tx_pkt_cons = sw_cons;
3520    fp->tx_bd_cons  = bd_cons;
3521
3522    BLOGD(sc, DBG_TX,
3523          "TX done: fp[%d]: hw_cons=%u sw_cons=%u sw_prod=%u\n",
3524          fp->index, hw_cons, fp->tx_pkt_cons, fp->tx_pkt_prod);
3525
3526    mb();
3527
3528    tx_bd_avail = bxe_tx_avail(sc, fp);
3529
3530    if (tx_bd_avail < BXE_TX_CLEANUP_THRESHOLD) {
3531        if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, 0);
3532    } else {
3533        if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE);
3534    }
3535
3536    if (fp->tx_pkt_prod != fp->tx_pkt_cons) {
3537        /* reset the watchdog timer if there are pending transmits */
3538        fp->watchdog_timer = BXE_TX_TIMEOUT;
3539        return (TRUE);
3540    } else {
3541        /* clear watchdog when there are no pending transmits */
3542        fp->watchdog_timer = 0;
3543        return (FALSE);
3544    }
3545}
3546
3547static void
3548bxe_drain_tx_queues(struct bxe_softc *sc)
3549{
3550    struct bxe_fastpath *fp;
3551    int i, count;
3552
3553    /* wait until all TX fastpath tasks have completed */
3554    for (i = 0; i < sc->num_queues; i++) {
3555        fp = &sc->fp[i];
3556
3557        count = 1000;
3558
3559        while (bxe_has_tx_work(fp)) {
3560
3561            BXE_FP_TX_LOCK(fp);
3562            bxe_txeof(sc, fp);
3563            BXE_FP_TX_UNLOCK(fp);
3564
3565            if (count == 0) {
3566                BLOGE(sc, "Timeout waiting for fp[%d] "
3567                          "transmits to complete!\n", i);
3568                bxe_panic(sc, ("tx drain failure\n"));
3569                return;
3570            }
3571
3572            count--;
3573            DELAY(1000);
3574            rmb();
3575        }
3576    }
3577
3578    return;
3579}
3580
3581static int
3582bxe_del_all_macs(struct bxe_softc          *sc,
3583                 struct ecore_vlan_mac_obj *mac_obj,
3584                 int                       mac_type,
3585                 uint8_t                   wait_for_comp)
3586{
3587    unsigned long ramrod_flags = 0, vlan_mac_flags = 0;
3588    int rc;
3589
3590    /* wait for completion of requested */
3591    if (wait_for_comp) {
3592        bxe_set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
3593    }
3594
3595    /* Set the mac type of addresses we want to clear */
3596    bxe_set_bit(mac_type, &vlan_mac_flags);
3597
3598    rc = mac_obj->delete_all(sc, mac_obj, &vlan_mac_flags, &ramrod_flags);
3599    if (rc < 0) {
3600        BLOGE(sc, "Failed to delete MACs (%d) mac_type %d wait_for_comp 0x%x\n",
3601            rc, mac_type, wait_for_comp);
3602    }
3603
3604    return (rc);
3605}
3606
3607static int
3608bxe_fill_accept_flags(struct bxe_softc *sc,
3609                      uint32_t         rx_mode,
3610                      unsigned long    *rx_accept_flags,
3611                      unsigned long    *tx_accept_flags)
3612{
3613    /* Clear the flags first */
3614    *rx_accept_flags = 0;
3615    *tx_accept_flags = 0;
3616
3617    switch (rx_mode) {
3618    case BXE_RX_MODE_NONE:
3619        /*
3620         * 'drop all' supersedes any accept flags that may have been
3621         * passed to the function.
3622         */
3623        break;
3624
3625    case BXE_RX_MODE_NORMAL:
3626        bxe_set_bit(ECORE_ACCEPT_UNICAST, rx_accept_flags);
3627        bxe_set_bit(ECORE_ACCEPT_MULTICAST, rx_accept_flags);
3628        bxe_set_bit(ECORE_ACCEPT_BROADCAST, rx_accept_flags);
3629
3630        /* internal switching mode */
3631        bxe_set_bit(ECORE_ACCEPT_UNICAST, tx_accept_flags);
3632        bxe_set_bit(ECORE_ACCEPT_MULTICAST, tx_accept_flags);
3633        bxe_set_bit(ECORE_ACCEPT_BROADCAST, tx_accept_flags);
3634
3635        break;
3636
3637    case BXE_RX_MODE_ALLMULTI:
3638        bxe_set_bit(ECORE_ACCEPT_UNICAST, rx_accept_flags);
3639        bxe_set_bit(ECORE_ACCEPT_ALL_MULTICAST, rx_accept_flags);
3640        bxe_set_bit(ECORE_ACCEPT_BROADCAST, rx_accept_flags);
3641
3642        /* internal switching mode */
3643        bxe_set_bit(ECORE_ACCEPT_UNICAST, tx_accept_flags);
3644        bxe_set_bit(ECORE_ACCEPT_ALL_MULTICAST, tx_accept_flags);
3645        bxe_set_bit(ECORE_ACCEPT_BROADCAST, tx_accept_flags);
3646
3647        break;
3648
3649    case BXE_RX_MODE_PROMISC:
3650        /*
3651         * According to deffinition of SI mode, iface in promisc mode
3652         * should receive matched and unmatched (in resolution of port)
3653         * unicast packets.
3654         */
3655        bxe_set_bit(ECORE_ACCEPT_UNMATCHED, rx_accept_flags);
3656        bxe_set_bit(ECORE_ACCEPT_UNICAST, rx_accept_flags);
3657        bxe_set_bit(ECORE_ACCEPT_ALL_MULTICAST, rx_accept_flags);
3658        bxe_set_bit(ECORE_ACCEPT_BROADCAST, rx_accept_flags);
3659
3660        /* internal switching mode */
3661        bxe_set_bit(ECORE_ACCEPT_ALL_MULTICAST, tx_accept_flags);
3662        bxe_set_bit(ECORE_ACCEPT_BROADCAST, tx_accept_flags);
3663
3664        if (IS_MF_SI(sc)) {
3665            bxe_set_bit(ECORE_ACCEPT_ALL_UNICAST, tx_accept_flags);
3666        } else {
3667            bxe_set_bit(ECORE_ACCEPT_UNICAST, tx_accept_flags);
3668        }
3669
3670        break;
3671
3672    default:
3673        BLOGE(sc, "Unknown rx_mode (0x%x)\n", rx_mode);
3674        return (-1);
3675    }
3676
3677    /* Set ACCEPT_ANY_VLAN as we do not enable filtering by VLAN */
3678    if (rx_mode != BXE_RX_MODE_NONE) {
3679        bxe_set_bit(ECORE_ACCEPT_ANY_VLAN, rx_accept_flags);
3680        bxe_set_bit(ECORE_ACCEPT_ANY_VLAN, tx_accept_flags);
3681    }
3682
3683    return (0);
3684}
3685
3686static int
3687bxe_set_q_rx_mode(struct bxe_softc *sc,
3688                  uint8_t          cl_id,
3689                  unsigned long    rx_mode_flags,
3690                  unsigned long    rx_accept_flags,
3691                  unsigned long    tx_accept_flags,
3692                  unsigned long    ramrod_flags)
3693{
3694    struct ecore_rx_mode_ramrod_params ramrod_param;
3695    int rc;
3696
3697    memset(&ramrod_param, 0, sizeof(ramrod_param));
3698
3699    /* Prepare ramrod parameters */
3700    ramrod_param.cid = 0;
3701    ramrod_param.cl_id = cl_id;
3702    ramrod_param.rx_mode_obj = &sc->rx_mode_obj;
3703    ramrod_param.func_id = SC_FUNC(sc);
3704
3705    ramrod_param.pstate = &sc->sp_state;
3706    ramrod_param.state = ECORE_FILTER_RX_MODE_PENDING;
3707
3708    ramrod_param.rdata = BXE_SP(sc, rx_mode_rdata);
3709    ramrod_param.rdata_mapping = BXE_SP_MAPPING(sc, rx_mode_rdata);
3710
3711    bxe_set_bit(ECORE_FILTER_RX_MODE_PENDING, &sc->sp_state);
3712
3713    ramrod_param.ramrod_flags = ramrod_flags;
3714    ramrod_param.rx_mode_flags = rx_mode_flags;
3715
3716    ramrod_param.rx_accept_flags = rx_accept_flags;
3717    ramrod_param.tx_accept_flags = tx_accept_flags;
3718
3719    rc = ecore_config_rx_mode(sc, &ramrod_param);
3720    if (rc < 0) {
3721        BLOGE(sc, "Set rx_mode %d cli_id 0x%x rx_mode_flags 0x%x "
3722            "rx_accept_flags 0x%x tx_accept_flags 0x%x "
3723            "ramrod_flags 0x%x rc %d failed\n", sc->rx_mode, cl_id,
3724            (uint32_t)rx_mode_flags, (uint32_t)rx_accept_flags,
3725            (uint32_t)tx_accept_flags, (uint32_t)ramrod_flags, rc);
3726        return (rc);
3727    }
3728
3729    return (0);
3730}
3731
3732static int
3733bxe_set_storm_rx_mode(struct bxe_softc *sc)
3734{
3735    unsigned long rx_mode_flags = 0, ramrod_flags = 0;
3736    unsigned long rx_accept_flags = 0, tx_accept_flags = 0;
3737    int rc;
3738
3739    rc = bxe_fill_accept_flags(sc, sc->rx_mode, &rx_accept_flags,
3740                               &tx_accept_flags);
3741    if (rc) {
3742        return (rc);
3743    }
3744
3745    bxe_set_bit(RAMROD_RX, &ramrod_flags);
3746    bxe_set_bit(RAMROD_TX, &ramrod_flags);
3747
3748    /* XXX ensure all fastpath have same cl_id and/or move it to bxe_softc */
3749    return (bxe_set_q_rx_mode(sc, sc->fp[0].cl_id, rx_mode_flags,
3750                              rx_accept_flags, tx_accept_flags,
3751                              ramrod_flags));
3752}
3753
3754/* returns the "mcp load_code" according to global load_count array */
3755static int
3756bxe_nic_load_no_mcp(struct bxe_softc *sc)
3757{
3758    int path = SC_PATH(sc);
3759    int port = SC_PORT(sc);
3760
3761    BLOGI(sc, "NO MCP - load counts[%d]      %d, %d, %d\n",
3762          path, load_count[path][0], load_count[path][1],
3763          load_count[path][2]);
3764    load_count[path][0]++;
3765    load_count[path][1 + port]++;
3766    BLOGI(sc, "NO MCP - new load counts[%d]  %d, %d, %d\n",
3767          path, load_count[path][0], load_count[path][1],
3768          load_count[path][2]);
3769    if (load_count[path][0] == 1) {
3770        return (FW_MSG_CODE_DRV_LOAD_COMMON);
3771    } else if (load_count[path][1 + port] == 1) {
3772        return (FW_MSG_CODE_DRV_LOAD_PORT);
3773    } else {
3774        return (FW_MSG_CODE_DRV_LOAD_FUNCTION);
3775    }
3776}
3777
3778/* returns the "mcp load_code" according to global load_count array */
3779static int
3780bxe_nic_unload_no_mcp(struct bxe_softc *sc)
3781{
3782    int port = SC_PORT(sc);
3783    int path = SC_PATH(sc);
3784
3785    BLOGI(sc, "NO MCP - load counts[%d]      %d, %d, %d\n",
3786          path, load_count[path][0], load_count[path][1],
3787          load_count[path][2]);
3788    load_count[path][0]--;
3789    load_count[path][1 + port]--;
3790    BLOGI(sc, "NO MCP - new load counts[%d]  %d, %d, %d\n",
3791          path, load_count[path][0], load_count[path][1],
3792          load_count[path][2]);
3793    if (load_count[path][0] == 0) {
3794        return (FW_MSG_CODE_DRV_UNLOAD_COMMON);
3795    } else if (load_count[path][1 + port] == 0) {
3796        return (FW_MSG_CODE_DRV_UNLOAD_PORT);
3797    } else {
3798        return (FW_MSG_CODE_DRV_UNLOAD_FUNCTION);
3799    }
3800}
3801
3802/* request unload mode from the MCP: COMMON, PORT or FUNCTION */
3803static uint32_t
3804bxe_send_unload_req(struct bxe_softc *sc,
3805                    int              unload_mode)
3806{
3807    uint32_t reset_code = 0;
3808
3809    /* Select the UNLOAD request mode */
3810    if (unload_mode == UNLOAD_NORMAL) {
3811        reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
3812    } else {
3813        reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
3814    }
3815
3816    /* Send the request to the MCP */
3817    if (!BXE_NOMCP(sc)) {
3818        reset_code = bxe_fw_command(sc, reset_code, 0);
3819    } else {
3820        reset_code = bxe_nic_unload_no_mcp(sc);
3821    }
3822
3823    return (reset_code);
3824}
3825
3826/* send UNLOAD_DONE command to the MCP */
3827static void
3828bxe_send_unload_done(struct bxe_softc *sc,
3829                     uint8_t          keep_link)
3830{
3831    uint32_t reset_param =
3832        keep_link ? DRV_MSG_CODE_UNLOAD_SKIP_LINK_RESET : 0;
3833
3834    /* Report UNLOAD_DONE to MCP */
3835    if (!BXE_NOMCP(sc)) {
3836        bxe_fw_command(sc, DRV_MSG_CODE_UNLOAD_DONE, reset_param);
3837    }
3838}
3839
3840static int
3841bxe_func_wait_started(struct bxe_softc *sc)
3842{
3843    int tout = 50;
3844
3845    if (!sc->port.pmf) {
3846        return (0);
3847    }
3848
3849    /*
3850     * (assumption: No Attention from MCP at this stage)
3851     * PMF probably in the middle of TX disable/enable transaction
3852     * 1. Sync IRS for default SB
3853     * 2. Sync SP queue - this guarantees us that attention handling started
3854     * 3. Wait, that TX disable/enable transaction completes
3855     *
3856     * 1+2 guarantee that if DCBX attention was scheduled it already changed
3857     * pending bit of transaction from STARTED-->TX_STOPPED, if we already
3858     * received completion for the transaction the state is TX_STOPPED.
3859     * State will return to STARTED after completion of TX_STOPPED-->STARTED
3860     * transaction.
3861     */
3862
3863    /* XXX make sure default SB ISR is done */
3864    /* need a way to synchronize an irq (intr_mtx?) */
3865
3866    /* XXX flush any work queues */
3867
3868    while (ecore_func_get_state(sc, &sc->func_obj) !=
3869           ECORE_F_STATE_STARTED && tout--) {
3870        DELAY(20000);
3871    }
3872
3873    if (ecore_func_get_state(sc, &sc->func_obj) != ECORE_F_STATE_STARTED) {
3874        /*
3875         * Failed to complete the transaction in a "good way"
3876         * Force both transactions with CLR bit.
3877         */
3878        struct ecore_func_state_params func_params = { NULL };
3879
3880        BLOGE(sc, "Unexpected function state! "
3881                  "Forcing STARTED-->TX_STOPPED-->STARTED\n");
3882
3883        func_params.f_obj = &sc->func_obj;
3884        bxe_set_bit(RAMROD_DRV_CLR_ONLY, &func_params.ramrod_flags);
3885
3886        /* STARTED-->TX_STOPPED */
3887        func_params.cmd = ECORE_F_CMD_TX_STOP;
3888        ecore_func_state_change(sc, &func_params);
3889
3890        /* TX_STOPPED-->STARTED */
3891        func_params.cmd = ECORE_F_CMD_TX_START;
3892        return (ecore_func_state_change(sc, &func_params));
3893    }
3894
3895    return (0);
3896}
3897
3898static int
3899bxe_stop_queue(struct bxe_softc *sc,
3900               int              index)
3901{
3902    struct bxe_fastpath *fp = &sc->fp[index];
3903    struct ecore_queue_state_params q_params = { NULL };
3904    int rc;
3905
3906    BLOGD(sc, DBG_LOAD, "stopping queue %d cid %d\n", index, fp->index);
3907
3908    q_params.q_obj = &sc->sp_objs[fp->index].q_obj;
3909    /* We want to wait for completion in this context */
3910    bxe_set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags);
3911
3912    /* Stop the primary connection: */
3913
3914    /* ...halt the connection */
3915    q_params.cmd = ECORE_Q_CMD_HALT;
3916    rc = ecore_queue_state_change(sc, &q_params);
3917    if (rc) {
3918        return (rc);
3919    }
3920
3921    /* ...terminate the connection */
3922    q_params.cmd = ECORE_Q_CMD_TERMINATE;
3923    memset(&q_params.params.terminate, 0, sizeof(q_params.params.terminate));
3924    q_params.params.terminate.cid_index = FIRST_TX_COS_INDEX;
3925    rc = ecore_queue_state_change(sc, &q_params);
3926    if (rc) {
3927        return (rc);
3928    }
3929
3930    /* ...delete cfc entry */
3931    q_params.cmd = ECORE_Q_CMD_CFC_DEL;
3932    memset(&q_params.params.cfc_del, 0, sizeof(q_params.params.cfc_del));
3933    q_params.params.cfc_del.cid_index = FIRST_TX_COS_INDEX;
3934    return (ecore_queue_state_change(sc, &q_params));
3935}
3936
3937/* wait for the outstanding SP commands */
3938static inline uint8_t
3939bxe_wait_sp_comp(struct bxe_softc *sc,
3940                 unsigned long    mask)
3941{
3942    unsigned long tmp;
3943    int tout = 5000; /* wait for 5 secs tops */
3944
3945    while (tout--) {
3946        mb();
3947        if (!(atomic_load_acq_long(&sc->sp_state) & mask)) {
3948            return (TRUE);
3949        }
3950
3951        DELAY(1000);
3952    }
3953
3954    mb();
3955
3956    tmp = atomic_load_acq_long(&sc->sp_state);
3957    if (tmp & mask) {
3958        BLOGE(sc, "Filtering completion timed out: "
3959                  "sp_state 0x%lx, mask 0x%lx\n",
3960              tmp, mask);
3961        return (FALSE);
3962    }
3963
3964    return (FALSE);
3965}
3966
3967static int
3968bxe_func_stop(struct bxe_softc *sc)
3969{
3970    struct ecore_func_state_params func_params = { NULL };
3971    int rc;
3972
3973    /* prepare parameters for function state transitions */
3974    bxe_set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
3975    func_params.f_obj = &sc->func_obj;
3976    func_params.cmd = ECORE_F_CMD_STOP;
3977
3978    /*
3979     * Try to stop the function the 'good way'. If it fails (in case
3980     * of a parity error during bxe_chip_cleanup()) and we are
3981     * not in a debug mode, perform a state transaction in order to
3982     * enable further HW_RESET transaction.
3983     */
3984    rc = ecore_func_state_change(sc, &func_params);
3985    if (rc) {
3986        BLOGE(sc, "FUNC_STOP ramrod failed. "
3987                  "Running a dry transaction (%d)\n", rc);
3988        bxe_set_bit(RAMROD_DRV_CLR_ONLY, &func_params.ramrod_flags);
3989        return (ecore_func_state_change(sc, &func_params));
3990    }
3991
3992    return (0);
3993}
3994
3995static int
3996bxe_reset_hw(struct bxe_softc *sc,
3997             uint32_t         load_code)
3998{
3999    struct ecore_func_state_params func_params = { NULL };
4000
4001    /* Prepare parameters for function state transitions */
4002    bxe_set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
4003
4004    func_params.f_obj = &sc->func_obj;
4005    func_params.cmd = ECORE_F_CMD_HW_RESET;
4006
4007    func_params.params.hw_init.load_phase = load_code;
4008
4009    return (ecore_func_state_change(sc, &func_params));
4010}
4011
4012static void
4013bxe_int_disable_sync(struct bxe_softc *sc,
4014                     int              disable_hw)
4015{
4016    if (disable_hw) {
4017        /* prevent the HW from sending interrupts */
4018        bxe_int_disable(sc);
4019    }
4020
4021    /* XXX need a way to synchronize ALL irqs (intr_mtx?) */
4022    /* make sure all ISRs are done */
4023
4024    /* XXX make sure sp_task is not running */
4025    /* cancel and flush work queues */
4026}
4027
4028static void
4029bxe_chip_cleanup(struct bxe_softc *sc,
4030                 uint32_t         unload_mode,
4031                 uint8_t          keep_link)
4032{
4033    int port = SC_PORT(sc);
4034    struct ecore_mcast_ramrod_params rparam = { NULL };
4035    uint32_t reset_code;
4036    int i, rc = 0;
4037
4038    bxe_drain_tx_queues(sc);
4039
4040    /* give HW time to discard old tx messages */
4041    DELAY(1000);
4042
4043    /* Clean all ETH MACs */
4044    rc = bxe_del_all_macs(sc, &sc->sp_objs[0].mac_obj, ECORE_ETH_MAC, FALSE);
4045    if (rc < 0) {
4046        BLOGE(sc, "Failed to delete all ETH MACs (%d)\n", rc);
4047    }
4048
4049    /* Clean up UC list  */
4050    rc = bxe_del_all_macs(sc, &sc->sp_objs[0].mac_obj, ECORE_UC_LIST_MAC, TRUE);
4051    if (rc < 0) {
4052        BLOGE(sc, "Failed to delete UC MACs list (%d)\n", rc);
4053    }
4054
4055    /* Disable LLH */
4056    if (!CHIP_IS_E1(sc)) {
4057        REG_WR(sc, NIG_REG_LLH0_FUNC_EN + port*8, 0);
4058    }
4059
4060    /* Set "drop all" to stop Rx */
4061
4062    /*
4063     * We need to take the BXE_MCAST_LOCK() here in order to prevent
4064     * a race between the completion code and this code.
4065     */
4066    BXE_MCAST_LOCK(sc);
4067
4068    if (bxe_test_bit(ECORE_FILTER_RX_MODE_PENDING, &sc->sp_state)) {
4069        bxe_set_bit(ECORE_FILTER_RX_MODE_SCHED, &sc->sp_state);
4070    } else {
4071        bxe_set_storm_rx_mode(sc);
4072    }
4073
4074    /* Clean up multicast configuration */
4075    rparam.mcast_obj = &sc->mcast_obj;
4076    rc = ecore_config_mcast(sc, &rparam, ECORE_MCAST_CMD_DEL);
4077    if (rc < 0) {
4078        BLOGE(sc, "Failed to send DEL MCAST command (%d)\n", rc);
4079    }
4080
4081    BXE_MCAST_UNLOCK(sc);
4082
4083    // XXX bxe_iov_chip_cleanup(sc);
4084
4085    /*
4086     * Send the UNLOAD_REQUEST to the MCP. This will return if
4087     * this function should perform FUNCTION, PORT, or COMMON HW
4088     * reset.
4089     */
4090    reset_code = bxe_send_unload_req(sc, unload_mode);
4091
4092    /*
4093     * (assumption: No Attention from MCP at this stage)
4094     * PMF probably in the middle of TX disable/enable transaction
4095     */
4096    rc = bxe_func_wait_started(sc);
4097    if (rc) {
4098        BLOGE(sc, "bxe_func_wait_started failed (%d)\n", rc);
4099    }
4100
4101    /*
4102     * Close multi and leading connections
4103     * Completions for ramrods are collected in a synchronous way
4104     */
4105    for (i = 0; i < sc->num_queues; i++) {
4106        if (bxe_stop_queue(sc, i)) {
4107            goto unload_error;
4108        }
4109    }
4110
4111    /*
4112     * If SP settings didn't get completed so far - something
4113     * very wrong has happen.
4114     */
4115    if (!bxe_wait_sp_comp(sc, ~0x0UL)) {
4116        BLOGE(sc, "Common slow path ramrods got stuck!(%d)\n", rc);
4117    }
4118
4119unload_error:
4120
4121    rc = bxe_func_stop(sc);
4122    if (rc) {
4123        BLOGE(sc, "Function stop failed!(%d)\n", rc);
4124    }
4125
4126    /* disable HW interrupts */
4127    bxe_int_disable_sync(sc, TRUE);
4128
4129    /* detach interrupts */
4130    bxe_interrupt_detach(sc);
4131
4132    /* Reset the chip */
4133    rc = bxe_reset_hw(sc, reset_code);
4134    if (rc) {
4135        BLOGE(sc, "Hardware reset failed(%d)\n", rc);
4136    }
4137
4138    /* Report UNLOAD_DONE to MCP */
4139    bxe_send_unload_done(sc, keep_link);
4140}
4141
4142static void
4143bxe_disable_close_the_gate(struct bxe_softc *sc)
4144{
4145    uint32_t val;
4146    int port = SC_PORT(sc);
4147
4148    BLOGD(sc, DBG_LOAD,
4149          "Disabling 'close the gates'\n");
4150
4151    if (CHIP_IS_E1(sc)) {
4152        uint32_t addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
4153                               MISC_REG_AEU_MASK_ATTN_FUNC_0;
4154        val = REG_RD(sc, addr);
4155        val &= ~(0x300);
4156        REG_WR(sc, addr, val);
4157    } else {
4158        val = REG_RD(sc, MISC_REG_AEU_GENERAL_MASK);
4159        val &= ~(MISC_AEU_GENERAL_MASK_REG_AEU_PXP_CLOSE_MASK |
4160                 MISC_AEU_GENERAL_MASK_REG_AEU_NIG_CLOSE_MASK);
4161        REG_WR(sc, MISC_REG_AEU_GENERAL_MASK, val);
4162    }
4163}
4164
4165/*
4166 * Cleans the object that have internal lists without sending
4167 * ramrods. Should be run when interrutps are disabled.
4168 */
4169static void
4170bxe_squeeze_objects(struct bxe_softc *sc)
4171{
4172    unsigned long ramrod_flags = 0, vlan_mac_flags = 0;
4173    struct ecore_mcast_ramrod_params rparam = { NULL };
4174    struct ecore_vlan_mac_obj *mac_obj = &sc->sp_objs->mac_obj;
4175    int rc;
4176
4177    /* Cleanup MACs' object first... */
4178
4179    /* Wait for completion of requested */
4180    bxe_set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
4181    /* Perform a dry cleanup */
4182    bxe_set_bit(RAMROD_DRV_CLR_ONLY, &ramrod_flags);
4183
4184    /* Clean ETH primary MAC */
4185    bxe_set_bit(ECORE_ETH_MAC, &vlan_mac_flags);
4186    rc = mac_obj->delete_all(sc, &sc->sp_objs->mac_obj, &vlan_mac_flags,
4187                             &ramrod_flags);
4188    if (rc != 0) {
4189        BLOGE(sc, "Failed to clean ETH MACs (%d)\n", rc);
4190    }
4191
4192    /* Cleanup UC list */
4193    vlan_mac_flags = 0;
4194    bxe_set_bit(ECORE_UC_LIST_MAC, &vlan_mac_flags);
4195    rc = mac_obj->delete_all(sc, mac_obj, &vlan_mac_flags,
4196                             &ramrod_flags);
4197    if (rc != 0) {
4198        BLOGE(sc, "Failed to clean UC list MACs (%d)\n", rc);
4199    }
4200
4201    /* Now clean mcast object... */
4202
4203    rparam.mcast_obj = &sc->mcast_obj;
4204    bxe_set_bit(RAMROD_DRV_CLR_ONLY, &rparam.ramrod_flags);
4205
4206    /* Add a DEL command... */
4207    rc = ecore_config_mcast(sc, &rparam, ECORE_MCAST_CMD_DEL);
4208    if (rc < 0) {
4209        BLOGE(sc, "Failed to send DEL MCAST command (%d)\n", rc);
4210    }
4211
4212    /* now wait until all pending commands are cleared */
4213
4214    rc = ecore_config_mcast(sc, &rparam, ECORE_MCAST_CMD_CONT);
4215    while (rc != 0) {
4216        if (rc < 0) {
4217            BLOGE(sc, "Failed to clean MCAST object (%d)\n", rc);
4218            return;
4219        }
4220
4221        rc = ecore_config_mcast(sc, &rparam, ECORE_MCAST_CMD_CONT);
4222    }
4223}
4224
4225/* stop the controller */
4226static __noinline int
4227bxe_nic_unload(struct bxe_softc *sc,
4228               uint32_t         unload_mode,
4229               uint8_t          keep_link)
4230{
4231    uint8_t global = FALSE;
4232    uint32_t val;
4233    int i;
4234
4235    BXE_CORE_LOCK_ASSERT(sc);
4236
4237    if_setdrvflagbits(sc->ifp, 0, IFF_DRV_RUNNING);
4238
4239    for (i = 0; i < sc->num_queues; i++) {
4240        struct bxe_fastpath *fp;
4241
4242        fp = &sc->fp[i];
4243        BXE_FP_TX_LOCK(fp);
4244        BXE_FP_TX_UNLOCK(fp);
4245    }
4246
4247    BLOGD(sc, DBG_LOAD, "Starting NIC unload...\n");
4248
4249    /* mark driver as unloaded in shmem2 */
4250    if (IS_PF(sc) && SHMEM2_HAS(sc, drv_capabilities_flag)) {
4251        val = SHMEM2_RD(sc, drv_capabilities_flag[SC_FW_MB_IDX(sc)]);
4252        SHMEM2_WR(sc, drv_capabilities_flag[SC_FW_MB_IDX(sc)],
4253                  val & ~DRV_FLAGS_CAPABILITIES_LOADED_L2);
4254    }
4255
4256    if (IS_PF(sc) && sc->recovery_state != BXE_RECOVERY_DONE &&
4257        (sc->state == BXE_STATE_CLOSED || sc->state == BXE_STATE_ERROR)) {
4258        /*
4259         * We can get here if the driver has been unloaded
4260         * during parity error recovery and is either waiting for a
4261         * leader to complete or for other functions to unload and
4262         * then ifconfig down has been issued. In this case we want to
4263         * unload and let other functions to complete a recovery
4264         * process.
4265         */
4266        sc->recovery_state = BXE_RECOVERY_DONE;
4267        sc->is_leader = 0;
4268        bxe_release_leader_lock(sc);
4269        mb();
4270
4271        BLOGD(sc, DBG_LOAD, "Releasing a leadership...\n");
4272        BLOGE(sc, "Can't unload in closed or error state recover_state 0x%x"
4273            " state = 0x%x\n", sc->recovery_state, sc->state);
4274        return (-1);
4275    }
4276
4277    /*
4278     * Nothing to do during unload if previous bxe_nic_load()
4279     * did not completed successfully - all resourses are released.
4280     */
4281    if ((sc->state == BXE_STATE_CLOSED) ||
4282        (sc->state == BXE_STATE_ERROR)) {
4283        return (0);
4284    }
4285
4286    sc->state = BXE_STATE_CLOSING_WAITING_HALT;
4287    mb();
4288
4289    /* stop tx */
4290    bxe_tx_disable(sc);
4291
4292    sc->rx_mode = BXE_RX_MODE_NONE;
4293    /* XXX set rx mode ??? */
4294
4295    if (IS_PF(sc) && !sc->grcdump_done) {
4296        /* set ALWAYS_ALIVE bit in shmem */
4297        sc->fw_drv_pulse_wr_seq |= DRV_PULSE_ALWAYS_ALIVE;
4298
4299        bxe_drv_pulse(sc);
4300
4301        bxe_stats_handle(sc, STATS_EVENT_STOP);
4302        bxe_save_statistics(sc);
4303    }
4304
4305    /* wait till consumers catch up with producers in all queues */
4306    bxe_drain_tx_queues(sc);
4307
4308    /* if VF indicate to PF this function is going down (PF will delete sp
4309     * elements and clear initializations
4310     */
4311    if (IS_VF(sc)) {
4312        ; /* bxe_vfpf_close_vf(sc); */
4313    } else if (unload_mode != UNLOAD_RECOVERY) {
4314        /* if this is a normal/close unload need to clean up chip */
4315        if (!sc->grcdump_done)
4316            bxe_chip_cleanup(sc, unload_mode, keep_link);
4317    } else {
4318        /* Send the UNLOAD_REQUEST to the MCP */
4319        bxe_send_unload_req(sc, unload_mode);
4320
4321        /*
4322         * Prevent transactions to host from the functions on the
4323         * engine that doesn't reset global blocks in case of global
4324         * attention once gloabl blocks are reset and gates are opened
4325         * (the engine which leader will perform the recovery
4326         * last).
4327         */
4328        if (!CHIP_IS_E1x(sc)) {
4329            bxe_pf_disable(sc);
4330        }
4331
4332        /* disable HW interrupts */
4333        bxe_int_disable_sync(sc, TRUE);
4334
4335        /* detach interrupts */
4336        bxe_interrupt_detach(sc);
4337
4338        /* Report UNLOAD_DONE to MCP */
4339        bxe_send_unload_done(sc, FALSE);
4340    }
4341
4342    /*
4343     * At this stage no more interrupts will arrive so we may safely clean
4344     * the queue'able objects here in case they failed to get cleaned so far.
4345     */
4346    if (IS_PF(sc)) {
4347        bxe_squeeze_objects(sc);
4348    }
4349
4350    /* There should be no more pending SP commands at this stage */
4351    sc->sp_state = 0;
4352
4353    sc->port.pmf = 0;
4354
4355    bxe_free_fp_buffers(sc);
4356
4357    if (IS_PF(sc)) {
4358        bxe_free_mem(sc);
4359    }
4360
4361    bxe_free_fw_stats_mem(sc);
4362
4363    sc->state = BXE_STATE_CLOSED;
4364
4365    /*
4366     * Check if there are pending parity attentions. If there are - set
4367     * RECOVERY_IN_PROGRESS.
4368     */
4369    if (IS_PF(sc) && bxe_chk_parity_attn(sc, &global, FALSE)) {
4370        bxe_set_reset_in_progress(sc);
4371
4372        /* Set RESET_IS_GLOBAL if needed */
4373        if (global) {
4374            bxe_set_reset_global(sc);
4375        }
4376    }
4377
4378    /*
4379     * The last driver must disable a "close the gate" if there is no
4380     * parity attention or "process kill" pending.
4381     */
4382    if (IS_PF(sc) && !bxe_clear_pf_load(sc) &&
4383        bxe_reset_is_done(sc, SC_PATH(sc))) {
4384        bxe_disable_close_the_gate(sc);
4385    }
4386
4387    BLOGD(sc, DBG_LOAD, "Ended NIC unload\n");
4388
4389    return (0);
4390}
4391
4392/*
4393 * Called by the OS to set various media options (i.e. link, speed, etc.) when
4394 * the user runs "ifconfig bxe media ..." or "ifconfig bxe mediaopt ...".
4395 */
4396static int
4397bxe_ifmedia_update(struct ifnet  *ifp)
4398{
4399    struct bxe_softc *sc = (struct bxe_softc *)if_getsoftc(ifp);
4400    struct ifmedia *ifm;
4401
4402    ifm = &sc->ifmedia;
4403
4404    /* We only support Ethernet media type. */
4405    if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) {
4406        return (EINVAL);
4407    }
4408
4409    switch (IFM_SUBTYPE(ifm->ifm_media)) {
4410    case IFM_AUTO:
4411         break;
4412    case IFM_10G_CX4:
4413    case IFM_10G_SR:
4414    case IFM_10G_T:
4415    case IFM_10G_TWINAX:
4416    default:
4417        /* We don't support changing the media type. */
4418        BLOGD(sc, DBG_LOAD, "Invalid media type (%d)\n",
4419              IFM_SUBTYPE(ifm->ifm_media));
4420        return (EINVAL);
4421    }
4422
4423    return (0);
4424}
4425
4426/*
4427 * Called by the OS to get the current media status (i.e. link, speed, etc.).
4428 */
4429static void
4430bxe_ifmedia_status(struct ifnet *ifp, struct ifmediareq *ifmr)
4431{
4432    struct bxe_softc *sc = if_getsoftc(ifp);
4433
4434    /* Report link down if the driver isn't running. */
4435    if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0) {
4436        ifmr->ifm_active |= IFM_NONE;
4437        return;
4438    }
4439
4440    /* Setup the default interface info. */
4441    ifmr->ifm_status = IFM_AVALID;
4442    ifmr->ifm_active = IFM_ETHER;
4443
4444    if (sc->link_vars.link_up) {
4445        ifmr->ifm_status |= IFM_ACTIVE;
4446    } else {
4447        ifmr->ifm_active |= IFM_NONE;
4448        return;
4449    }
4450
4451    ifmr->ifm_active |= sc->media;
4452
4453    if (sc->link_vars.duplex == DUPLEX_FULL) {
4454        ifmr->ifm_active |= IFM_FDX;
4455    } else {
4456        ifmr->ifm_active |= IFM_HDX;
4457    }
4458}
4459
4460static void
4461bxe_handle_chip_tq(void *context,
4462                   int  pending)
4463{
4464    struct bxe_softc *sc = (struct bxe_softc *)context;
4465    long work = atomic_load_acq_long(&sc->chip_tq_flags);
4466
4467    switch (work)
4468    {
4469
4470    case CHIP_TQ_REINIT:
4471        if (if_getdrvflags(sc->ifp) & IFF_DRV_RUNNING) {
4472            /* restart the interface */
4473            BLOGD(sc, DBG_LOAD, "Restarting the interface...\n");
4474            bxe_periodic_stop(sc);
4475            BXE_CORE_LOCK(sc);
4476            bxe_stop_locked(sc);
4477            bxe_init_locked(sc);
4478            BXE_CORE_UNLOCK(sc);
4479        }
4480        break;
4481
4482    default:
4483        break;
4484    }
4485}
4486
4487/*
4488 * Handles any IOCTL calls from the operating system.
4489 *
4490 * Returns:
4491 *   0 = Success, >0 Failure
4492 */
4493static int
4494bxe_ioctl(if_t ifp,
4495          u_long       command,
4496          caddr_t      data)
4497{
4498    struct bxe_softc *sc = if_getsoftc(ifp);
4499    struct ifreq *ifr = (struct ifreq *)data;
4500    int mask = 0;
4501    int reinit = 0;
4502    int error = 0;
4503
4504    int mtu_min = (ETH_MIN_PACKET_SIZE - ETH_HLEN);
4505    int mtu_max = (MJUM9BYTES - ETH_OVERHEAD - IP_HEADER_ALIGNMENT_PADDING);
4506
4507    switch (command)
4508    {
4509    case SIOCSIFMTU:
4510        BLOGD(sc, DBG_IOCTL, "Received SIOCSIFMTU ioctl (mtu=%d)\n",
4511              ifr->ifr_mtu);
4512
4513        if (sc->mtu == ifr->ifr_mtu) {
4514            /* nothing to change */
4515            break;
4516        }
4517
4518        if ((ifr->ifr_mtu < mtu_min) || (ifr->ifr_mtu > mtu_max)) {
4519            BLOGE(sc, "Unsupported MTU size %d (range is %d-%d)\n",
4520                  ifr->ifr_mtu, mtu_min, mtu_max);
4521            error = EINVAL;
4522            break;
4523        }
4524
4525        atomic_store_rel_int((volatile unsigned int *)&sc->mtu,
4526                             (unsigned long)ifr->ifr_mtu);
4527	/*
4528        atomic_store_rel_long((volatile unsigned long *)&if_getmtu(ifp),
4529                              (unsigned long)ifr->ifr_mtu);
4530	XXX - Not sure why it needs to be atomic
4531	*/
4532	if_setmtu(ifp, ifr->ifr_mtu);
4533        reinit = 1;
4534        break;
4535
4536    case SIOCSIFFLAGS:
4537        /* toggle the interface state up or down */
4538        BLOGD(sc, DBG_IOCTL, "Received SIOCSIFFLAGS ioctl\n");
4539
4540	BXE_CORE_LOCK(sc);
4541        /* check if the interface is up */
4542        if (if_getflags(ifp) & IFF_UP) {
4543            if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
4544                /* set the receive mode flags */
4545                bxe_set_rx_mode(sc);
4546            } else if(sc->state != BXE_STATE_DISABLED) {
4547		bxe_init_locked(sc);
4548            }
4549        } else {
4550            if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
4551		bxe_periodic_stop(sc);
4552		bxe_stop_locked(sc);
4553            }
4554        }
4555	BXE_CORE_UNLOCK(sc);
4556
4557        break;
4558
4559    case SIOCADDMULTI:
4560    case SIOCDELMULTI:
4561        /* add/delete multicast addresses */
4562        BLOGD(sc, DBG_IOCTL, "Received SIOCADDMULTI/SIOCDELMULTI ioctl\n");
4563
4564        /* check if the interface is up */
4565        if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
4566            /* set the receive mode flags */
4567	    BXE_CORE_LOCK(sc);
4568            bxe_set_rx_mode(sc);
4569	    BXE_CORE_UNLOCK(sc);
4570        }
4571
4572        break;
4573
4574    case SIOCSIFCAP:
4575        /* find out which capabilities have changed */
4576        mask = (ifr->ifr_reqcap ^ if_getcapenable(ifp));
4577
4578        BLOGD(sc, DBG_IOCTL, "Received SIOCSIFCAP ioctl (mask=0x%08x)\n",
4579              mask);
4580
4581        /* toggle the LRO capabilites enable flag */
4582        if (mask & IFCAP_LRO) {
4583	    if_togglecapenable(ifp, IFCAP_LRO);
4584            BLOGD(sc, DBG_IOCTL, "Turning LRO %s\n",
4585                  (if_getcapenable(ifp) & IFCAP_LRO) ? "ON" : "OFF");
4586            reinit = 1;
4587        }
4588
4589        /* toggle the TXCSUM checksum capabilites enable flag */
4590        if (mask & IFCAP_TXCSUM) {
4591	    if_togglecapenable(ifp, IFCAP_TXCSUM);
4592            BLOGD(sc, DBG_IOCTL, "Turning TXCSUM %s\n",
4593                  (if_getcapenable(ifp) & IFCAP_TXCSUM) ? "ON" : "OFF");
4594            if (if_getcapenable(ifp) & IFCAP_TXCSUM) {
4595                if_sethwassistbits(ifp, (CSUM_IP      |
4596                                    CSUM_TCP      |
4597                                    CSUM_UDP      |
4598                                    CSUM_TSO      |
4599                                    CSUM_TCP_IPV6 |
4600                                    CSUM_UDP_IPV6), 0);
4601            } else {
4602		if_clearhwassist(ifp); /* XXX */
4603            }
4604        }
4605
4606        /* toggle the RXCSUM checksum capabilities enable flag */
4607        if (mask & IFCAP_RXCSUM) {
4608	    if_togglecapenable(ifp, IFCAP_RXCSUM);
4609            BLOGD(sc, DBG_IOCTL, "Turning RXCSUM %s\n",
4610                  (if_getcapenable(ifp) & IFCAP_RXCSUM) ? "ON" : "OFF");
4611            if (if_getcapenable(ifp) & IFCAP_RXCSUM) {
4612                if_sethwassistbits(ifp, (CSUM_IP      |
4613                                    CSUM_TCP      |
4614                                    CSUM_UDP      |
4615                                    CSUM_TSO      |
4616                                    CSUM_TCP_IPV6 |
4617                                    CSUM_UDP_IPV6), 0);
4618            } else {
4619		if_clearhwassist(ifp); /* XXX */
4620            }
4621        }
4622
4623        /* toggle TSO4 capabilities enabled flag */
4624        if (mask & IFCAP_TSO4) {
4625            if_togglecapenable(ifp, IFCAP_TSO4);
4626            BLOGD(sc, DBG_IOCTL, "Turning TSO4 %s\n",
4627                  (if_getcapenable(ifp) & IFCAP_TSO4) ? "ON" : "OFF");
4628        }
4629
4630        /* toggle TSO6 capabilities enabled flag */
4631        if (mask & IFCAP_TSO6) {
4632	    if_togglecapenable(ifp, IFCAP_TSO6);
4633            BLOGD(sc, DBG_IOCTL, "Turning TSO6 %s\n",
4634                  (if_getcapenable(ifp) & IFCAP_TSO6) ? "ON" : "OFF");
4635        }
4636
4637        /* toggle VLAN_HWTSO capabilities enabled flag */
4638        if (mask & IFCAP_VLAN_HWTSO) {
4639
4640	    if_togglecapenable(ifp, IFCAP_VLAN_HWTSO);
4641            BLOGD(sc, DBG_IOCTL, "Turning VLAN_HWTSO %s\n",
4642                  (if_getcapenable(ifp) & IFCAP_VLAN_HWTSO) ? "ON" : "OFF");
4643        }
4644
4645        /* toggle VLAN_HWCSUM capabilities enabled flag */
4646        if (mask & IFCAP_VLAN_HWCSUM) {
4647            /* XXX investigate this... */
4648            BLOGE(sc, "Changing VLAN_HWCSUM is not supported!\n");
4649            error = EINVAL;
4650        }
4651
4652        /* toggle VLAN_MTU capabilities enable flag */
4653        if (mask & IFCAP_VLAN_MTU) {
4654            /* XXX investigate this... */
4655            BLOGE(sc, "Changing VLAN_MTU is not supported!\n");
4656            error = EINVAL;
4657        }
4658
4659        /* toggle VLAN_HWTAGGING capabilities enabled flag */
4660        if (mask & IFCAP_VLAN_HWTAGGING) {
4661            /* XXX investigate this... */
4662            BLOGE(sc, "Changing VLAN_HWTAGGING is not supported!\n");
4663            error = EINVAL;
4664        }
4665
4666        /* toggle VLAN_HWFILTER capabilities enabled flag */
4667        if (mask & IFCAP_VLAN_HWFILTER) {
4668            /* XXX investigate this... */
4669            BLOGE(sc, "Changing VLAN_HWFILTER is not supported!\n");
4670            error = EINVAL;
4671        }
4672
4673        /* XXX not yet...
4674         * IFCAP_WOL_MAGIC
4675         */
4676
4677        break;
4678
4679    case SIOCSIFMEDIA:
4680    case SIOCGIFMEDIA:
4681        /* set/get interface media */
4682        BLOGD(sc, DBG_IOCTL,
4683              "Received SIOCSIFMEDIA/SIOCGIFMEDIA ioctl (cmd=%lu)\n",
4684              (command & 0xff));
4685        error = ifmedia_ioctl(ifp, ifr, &sc->ifmedia, command);
4686        break;
4687
4688    default:
4689        BLOGD(sc, DBG_IOCTL, "Received Unknown Ioctl (cmd=%lu)\n",
4690              (command & 0xff));
4691        error = ether_ioctl(ifp, command, data);
4692        break;
4693    }
4694
4695    if (reinit && (if_getdrvflags(sc->ifp) & IFF_DRV_RUNNING)) {
4696        BLOGD(sc, DBG_LOAD | DBG_IOCTL,
4697              "Re-initializing hardware from IOCTL change\n");
4698	bxe_periodic_stop(sc);
4699	BXE_CORE_LOCK(sc);
4700	bxe_stop_locked(sc);
4701	bxe_init_locked(sc);
4702	BXE_CORE_UNLOCK(sc);
4703    }
4704
4705    return (error);
4706}
4707
4708static __noinline void
4709bxe_dump_mbuf(struct bxe_softc *sc,
4710              struct mbuf      *m,
4711              uint8_t          contents)
4712{
4713    char * type;
4714    int i = 0;
4715
4716    if (!(sc->debug & DBG_MBUF)) {
4717        return;
4718    }
4719
4720    if (m == NULL) {
4721        BLOGD(sc, DBG_MBUF, "mbuf: null pointer\n");
4722        return;
4723    }
4724
4725    while (m) {
4726
4727#if __FreeBSD_version >= 1000000
4728        BLOGD(sc, DBG_MBUF,
4729              "%02d: mbuf=%p m_len=%d m_flags=0x%b m_data=%p\n",
4730              i, m, m->m_len, m->m_flags, M_FLAG_BITS, m->m_data);
4731
4732        if (m->m_flags & M_PKTHDR) {
4733             BLOGD(sc, DBG_MBUF,
4734                   "%02d: - m_pkthdr: tot_len=%d flags=0x%b csum_flags=%b\n",
4735                   i, m->m_pkthdr.len, m->m_flags, M_FLAG_BITS,
4736                   (int)m->m_pkthdr.csum_flags, CSUM_BITS);
4737        }
4738#else
4739        BLOGD(sc, DBG_MBUF,
4740              "%02d: mbuf=%p m_len=%d m_flags=0x%b m_data=%p\n",
4741              i, m, m->m_len, m->m_flags,
4742              "\20\1M_EXT\2M_PKTHDR\3M_EOR\4M_RDONLY", m->m_data);
4743
4744        if (m->m_flags & M_PKTHDR) {
4745             BLOGD(sc, DBG_MBUF,
4746                   "%02d: - m_pkthdr: tot_len=%d flags=0x%b csum_flags=%b\n",
4747                   i, m->m_pkthdr.len, m->m_flags,
4748                   "\20\12M_BCAST\13M_MCAST\14M_FRAG"
4749                   "\15M_FIRSTFRAG\16M_LASTFRAG\21M_VLANTAG"
4750                   "\22M_PROMISC\23M_NOFREE",
4751                   (int)m->m_pkthdr.csum_flags,
4752                   "\20\1CSUM_IP\2CSUM_TCP\3CSUM_UDP\4CSUM_IP_FRAGS"
4753                   "\5CSUM_FRAGMENT\6CSUM_TSO\11CSUM_IP_CHECKED"
4754                   "\12CSUM_IP_VALID\13CSUM_DATA_VALID"
4755                   "\14CSUM_PSEUDO_HDR");
4756        }
4757#endif /* #if __FreeBSD_version >= 1000000 */
4758
4759        if (m->m_flags & M_EXT) {
4760            switch (m->m_ext.ext_type) {
4761            case EXT_CLUSTER:    type = "EXT_CLUSTER";    break;
4762            case EXT_SFBUF:      type = "EXT_SFBUF";      break;
4763            case EXT_JUMBOP:     type = "EXT_JUMBOP";     break;
4764            case EXT_JUMBO9:     type = "EXT_JUMBO9";     break;
4765            case EXT_JUMBO16:    type = "EXT_JUMBO16";    break;
4766            case EXT_PACKET:     type = "EXT_PACKET";     break;
4767            case EXT_MBUF:       type = "EXT_MBUF";       break;
4768            case EXT_NET_DRV:    type = "EXT_NET_DRV";    break;
4769            case EXT_MOD_TYPE:   type = "EXT_MOD_TYPE";   break;
4770            case EXT_DISPOSABLE: type = "EXT_DISPOSABLE"; break;
4771            case EXT_EXTREF:     type = "EXT_EXTREF";     break;
4772            default:             type = "UNKNOWN";        break;
4773            }
4774
4775            BLOGD(sc, DBG_MBUF,
4776                  "%02d: - m_ext: %p ext_size=%d type=%s\n",
4777                  i, m->m_ext.ext_buf, m->m_ext.ext_size, type);
4778        }
4779
4780        if (contents) {
4781            bxe_dump_mbuf_data(sc, "mbuf data", m, TRUE);
4782        }
4783
4784        m = m->m_next;
4785        i++;
4786    }
4787}
4788
4789/*
4790 * Checks to ensure the 13 bd sliding window is >= MSS for TSO.
4791 * Check that (13 total bds - 3 bds) = 10 bd window >= MSS.
4792 * The window: 3 bds are = 1 for headers BD + 2 for parse BD and last BD
4793 * The headers comes in a separate bd in FreeBSD so 13-3=10.
4794 * Returns: 0 if OK to send, 1 if packet needs further defragmentation
4795 */
4796static int
4797bxe_chktso_window(struct bxe_softc  *sc,
4798                  int               nsegs,
4799                  bus_dma_segment_t *segs,
4800                  struct mbuf       *m)
4801{
4802    uint32_t num_wnds, wnd_size, wnd_sum;
4803    int32_t frag_idx, wnd_idx;
4804    unsigned short lso_mss;
4805    int defrag;
4806
4807    defrag = 0;
4808    wnd_sum = 0;
4809    wnd_size = 10;
4810    num_wnds = nsegs - wnd_size;
4811    lso_mss = htole16(m->m_pkthdr.tso_segsz);
4812
4813    /*
4814     * Total header lengths Eth+IP+TCP in first FreeBSD mbuf so calculate the
4815     * first window sum of data while skipping the first assuming it is the
4816     * header in FreeBSD.
4817     */
4818    for (frag_idx = 1; (frag_idx <= wnd_size); frag_idx++) {
4819        wnd_sum += htole16(segs[frag_idx].ds_len);
4820    }
4821
4822    /* check the first 10 bd window size */
4823    if (wnd_sum < lso_mss) {
4824        return (1);
4825    }
4826
4827    /* run through the windows */
4828    for (wnd_idx = 0; wnd_idx < num_wnds; wnd_idx++, frag_idx++) {
4829        /* subtract the first mbuf->m_len of the last wndw(-header) */
4830        wnd_sum -= htole16(segs[wnd_idx+1].ds_len);
4831        /* add the next mbuf len to the len of our new window */
4832        wnd_sum += htole16(segs[frag_idx].ds_len);
4833        if (wnd_sum < lso_mss) {
4834            return (1);
4835        }
4836    }
4837
4838    return (0);
4839}
4840
4841static uint8_t
4842bxe_set_pbd_csum_e2(struct bxe_fastpath *fp,
4843                    struct mbuf         *m,
4844                    uint32_t            *parsing_data)
4845{
4846    struct ether_vlan_header *eh = NULL;
4847    struct ip *ip4 = NULL;
4848    struct ip6_hdr *ip6 = NULL;
4849    caddr_t ip = NULL;
4850    struct tcphdr *th = NULL;
4851    int e_hlen, ip_hlen, l4_off;
4852    uint16_t proto;
4853
4854    if (m->m_pkthdr.csum_flags == CSUM_IP) {
4855        /* no L4 checksum offload needed */
4856        return (0);
4857    }
4858
4859    /* get the Ethernet header */
4860    eh = mtod(m, struct ether_vlan_header *);
4861
4862    /* handle VLAN encapsulation if present */
4863    if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
4864        e_hlen = (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN);
4865        proto  = ntohs(eh->evl_proto);
4866    } else {
4867        e_hlen = ETHER_HDR_LEN;
4868        proto  = ntohs(eh->evl_encap_proto);
4869    }
4870
4871    switch (proto) {
4872    case ETHERTYPE_IP:
4873        /* get the IP header, if mbuf len < 20 then header in next mbuf */
4874        ip4 = (m->m_len < sizeof(struct ip)) ?
4875                  (struct ip *)m->m_next->m_data :
4876                  (struct ip *)(m->m_data + e_hlen);
4877        /* ip_hl is number of 32-bit words */
4878        ip_hlen = (ip4->ip_hl << 2);
4879        ip = (caddr_t)ip4;
4880        break;
4881    case ETHERTYPE_IPV6:
4882        /* get the IPv6 header, if mbuf len < 40 then header in next mbuf */
4883        ip6 = (m->m_len < sizeof(struct ip6_hdr)) ?
4884                  (struct ip6_hdr *)m->m_next->m_data :
4885                  (struct ip6_hdr *)(m->m_data + e_hlen);
4886        /* XXX cannot support offload with IPv6 extensions */
4887        ip_hlen = sizeof(struct ip6_hdr);
4888        ip = (caddr_t)ip6;
4889        break;
4890    default:
4891        /* We can't offload in this case... */
4892        /* XXX error stat ??? */
4893        return (0);
4894    }
4895
4896    /* XXX assuming L4 header is contiguous to IPv4/IPv6 in the same mbuf */
4897    l4_off = (e_hlen + ip_hlen);
4898
4899    *parsing_data |=
4900        (((l4_off >> 1) << ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W_SHIFT) &
4901         ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W);
4902
4903    if (m->m_pkthdr.csum_flags & (CSUM_TCP |
4904                                  CSUM_TSO |
4905                                  CSUM_TCP_IPV6)) {
4906        fp->eth_q_stats.tx_ofld_frames_csum_tcp++;
4907        th = (struct tcphdr *)(ip + ip_hlen);
4908        /* th_off is number of 32-bit words */
4909        *parsing_data |= ((th->th_off <<
4910                           ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) &
4911                          ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW);
4912        return (l4_off + (th->th_off << 2)); /* entire header length */
4913    } else if (m->m_pkthdr.csum_flags & (CSUM_UDP |
4914                                         CSUM_UDP_IPV6)) {
4915        fp->eth_q_stats.tx_ofld_frames_csum_udp++;
4916        return (l4_off + sizeof(struct udphdr)); /* entire header length */
4917    } else {
4918        /* XXX error stat ??? */
4919        return (0);
4920    }
4921}
4922
4923static uint8_t
4924bxe_set_pbd_csum(struct bxe_fastpath        *fp,
4925                 struct mbuf                *m,
4926                 struct eth_tx_parse_bd_e1x *pbd)
4927{
4928    struct ether_vlan_header *eh = NULL;
4929    struct ip *ip4 = NULL;
4930    struct ip6_hdr *ip6 = NULL;
4931    caddr_t ip = NULL;
4932    struct tcphdr *th = NULL;
4933    struct udphdr *uh = NULL;
4934    int e_hlen, ip_hlen;
4935    uint16_t proto;
4936    uint8_t hlen;
4937    uint16_t tmp_csum;
4938    uint32_t *tmp_uh;
4939
4940    /* get the Ethernet header */
4941    eh = mtod(m, struct ether_vlan_header *);
4942
4943    /* handle VLAN encapsulation if present */
4944    if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
4945        e_hlen = (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN);
4946        proto  = ntohs(eh->evl_proto);
4947    } else {
4948        e_hlen = ETHER_HDR_LEN;
4949        proto  = ntohs(eh->evl_encap_proto);
4950    }
4951
4952    switch (proto) {
4953    case ETHERTYPE_IP:
4954        /* get the IP header, if mbuf len < 20 then header in next mbuf */
4955        ip4 = (m->m_len < sizeof(struct ip)) ?
4956                  (struct ip *)m->m_next->m_data :
4957                  (struct ip *)(m->m_data + e_hlen);
4958        /* ip_hl is number of 32-bit words */
4959        ip_hlen = (ip4->ip_hl << 1);
4960        ip = (caddr_t)ip4;
4961        break;
4962    case ETHERTYPE_IPV6:
4963        /* get the IPv6 header, if mbuf len < 40 then header in next mbuf */
4964        ip6 = (m->m_len < sizeof(struct ip6_hdr)) ?
4965                  (struct ip6_hdr *)m->m_next->m_data :
4966                  (struct ip6_hdr *)(m->m_data + e_hlen);
4967        /* XXX cannot support offload with IPv6 extensions */
4968        ip_hlen = (sizeof(struct ip6_hdr) >> 1);
4969        ip = (caddr_t)ip6;
4970        break;
4971    default:
4972        /* We can't offload in this case... */
4973        /* XXX error stat ??? */
4974        return (0);
4975    }
4976
4977    hlen = (e_hlen >> 1);
4978
4979    /* note that rest of global_data is indirectly zeroed here */
4980    if (m->m_flags & M_VLANTAG) {
4981        pbd->global_data =
4982            htole16(hlen | (1 << ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN_SHIFT));
4983    } else {
4984        pbd->global_data = htole16(hlen);
4985    }
4986
4987    pbd->ip_hlen_w = ip_hlen;
4988
4989    hlen += pbd->ip_hlen_w;
4990
4991    /* XXX assuming L4 header is contiguous to IPv4/IPv6 in the same mbuf */
4992
4993    if (m->m_pkthdr.csum_flags & (CSUM_TCP |
4994                                  CSUM_TSO |
4995                                  CSUM_TCP_IPV6)) {
4996        th = (struct tcphdr *)(ip + (ip_hlen << 1));
4997        /* th_off is number of 32-bit words */
4998        hlen += (uint16_t)(th->th_off << 1);
4999    } else if (m->m_pkthdr.csum_flags & (CSUM_UDP |
5000                                         CSUM_UDP_IPV6)) {
5001        uh = (struct udphdr *)(ip + (ip_hlen << 1));
5002        hlen += (sizeof(struct udphdr) / 2);
5003    } else {
5004        /* valid case as only CSUM_IP was set */
5005        return (0);
5006    }
5007
5008    pbd->total_hlen_w = htole16(hlen);
5009
5010    if (m->m_pkthdr.csum_flags & (CSUM_TCP |
5011                                  CSUM_TSO |
5012                                  CSUM_TCP_IPV6)) {
5013        fp->eth_q_stats.tx_ofld_frames_csum_tcp++;
5014        pbd->tcp_pseudo_csum = ntohs(th->th_sum);
5015    } else if (m->m_pkthdr.csum_flags & (CSUM_UDP |
5016                                         CSUM_UDP_IPV6)) {
5017        fp->eth_q_stats.tx_ofld_frames_csum_udp++;
5018
5019        /*
5020         * Everest1 (i.e. 57710, 57711, 57711E) does not natively support UDP
5021         * checksums and does not know anything about the UDP header and where
5022         * the checksum field is located. It only knows about TCP. Therefore
5023         * we "lie" to the hardware for outgoing UDP packets w/ checksum
5024         * offload. Since the checksum field offset for TCP is 16 bytes and
5025         * for UDP it is 6 bytes we pass a pointer to the hardware that is 10
5026         * bytes less than the start of the UDP header. This allows the
5027         * hardware to write the checksum in the correct spot. But the
5028         * hardware will compute a checksum which includes the last 10 bytes
5029         * of the IP header. To correct this we tweak the stack computed
5030         * pseudo checksum by folding in the calculation of the inverse
5031         * checksum for those final 10 bytes of the IP header. This allows
5032         * the correct checksum to be computed by the hardware.
5033         */
5034
5035        /* set pointer 10 bytes before UDP header */
5036        tmp_uh = (uint32_t *)((uint8_t *)uh - 10);
5037
5038        /* calculate a pseudo header checksum over the first 10 bytes */
5039        tmp_csum = in_pseudo(*tmp_uh,
5040                             *(tmp_uh + 1),
5041                             *(uint16_t *)(tmp_uh + 2));
5042
5043        pbd->tcp_pseudo_csum = ntohs(in_addword(uh->uh_sum, ~tmp_csum));
5044    }
5045
5046    return (hlen * 2); /* entire header length, number of bytes */
5047}
5048
5049static void
5050bxe_set_pbd_lso_e2(struct mbuf *m,
5051                   uint32_t    *parsing_data)
5052{
5053    *parsing_data |= ((m->m_pkthdr.tso_segsz <<
5054                       ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT) &
5055                      ETH_TX_PARSE_BD_E2_LSO_MSS);
5056
5057    /* XXX test for IPv6 with extension header... */
5058}
5059
5060static void
5061bxe_set_pbd_lso(struct mbuf                *m,
5062                struct eth_tx_parse_bd_e1x *pbd)
5063{
5064    struct ether_vlan_header *eh = NULL;
5065    struct ip *ip = NULL;
5066    struct tcphdr *th = NULL;
5067    int e_hlen;
5068
5069    /* get the Ethernet header */
5070    eh = mtod(m, struct ether_vlan_header *);
5071
5072    /* handle VLAN encapsulation if present */
5073    e_hlen = (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) ?
5074                 (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN) : ETHER_HDR_LEN;
5075
5076    /* get the IP and TCP header, with LSO entire header in first mbuf */
5077    /* XXX assuming IPv4 */
5078    ip = (struct ip *)(m->m_data + e_hlen);
5079    th = (struct tcphdr *)((caddr_t)ip + (ip->ip_hl << 2));
5080
5081    pbd->lso_mss = htole16(m->m_pkthdr.tso_segsz);
5082    pbd->tcp_send_seq = ntohl(th->th_seq);
5083    pbd->tcp_flags = ((ntohl(((uint32_t *)th)[3]) >> 16) & 0xff);
5084
5085#if 1
5086        /* XXX IPv4 */
5087        pbd->ip_id = ntohs(ip->ip_id);
5088        pbd->tcp_pseudo_csum =
5089            ntohs(in_pseudo(ip->ip_src.s_addr,
5090                            ip->ip_dst.s_addr,
5091                            htons(IPPROTO_TCP)));
5092#else
5093        /* XXX IPv6 */
5094        pbd->tcp_pseudo_csum =
5095            ntohs(in_pseudo(&ip6->ip6_src,
5096                            &ip6->ip6_dst,
5097                            htons(IPPROTO_TCP)));
5098#endif
5099
5100    pbd->global_data |=
5101        htole16(ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN);
5102}
5103
5104/*
5105 * Encapsulte an mbuf cluster into the tx bd chain and makes the memory
5106 * visible to the controller.
5107 *
5108 * If an mbuf is submitted to this routine and cannot be given to the
5109 * controller (e.g. it has too many fragments) then the function may free
5110 * the mbuf and return to the caller.
5111 *
5112 * Returns:
5113 *   0 = Success, !0 = Failure
5114 *   Note the side effect that an mbuf may be freed if it causes a problem.
5115 */
5116static int
5117bxe_tx_encap(struct bxe_fastpath *fp, struct mbuf **m_head)
5118{
5119    bus_dma_segment_t segs[32];
5120    struct mbuf *m0;
5121    struct bxe_sw_tx_bd *tx_buf;
5122    struct eth_tx_parse_bd_e1x *pbd_e1x = NULL;
5123    struct eth_tx_parse_bd_e2 *pbd_e2 = NULL;
5124    /* struct eth_tx_parse_2nd_bd *pbd2 = NULL; */
5125    struct eth_tx_bd *tx_data_bd;
5126    struct eth_tx_bd *tx_total_pkt_size_bd;
5127    struct eth_tx_start_bd *tx_start_bd;
5128    uint16_t bd_prod, pkt_prod, total_pkt_size;
5129    uint8_t mac_type;
5130    int defragged, error, nsegs, rc, nbds, vlan_off, ovlan;
5131    struct bxe_softc *sc;
5132    uint16_t tx_bd_avail;
5133    struct ether_vlan_header *eh;
5134    uint32_t pbd_e2_parsing_data = 0;
5135    uint8_t hlen = 0;
5136    int tmp_bd;
5137    int i;
5138
5139    sc = fp->sc;
5140
5141#if __FreeBSD_version >= 800000
5142    M_ASSERTPKTHDR(*m_head);
5143#endif /* #if __FreeBSD_version >= 800000 */
5144
5145    m0 = *m_head;
5146    rc = defragged = nbds = ovlan = vlan_off = total_pkt_size = 0;
5147    tx_start_bd = NULL;
5148    tx_data_bd = NULL;
5149    tx_total_pkt_size_bd = NULL;
5150
5151    /* get the H/W pointer for packets and BDs */
5152    pkt_prod = fp->tx_pkt_prod;
5153    bd_prod = fp->tx_bd_prod;
5154
5155    mac_type = UNICAST_ADDRESS;
5156
5157    /* map the mbuf into the next open DMAable memory */
5158    tx_buf = &fp->tx_mbuf_chain[TX_BD(pkt_prod)];
5159    error = bus_dmamap_load_mbuf_sg(fp->tx_mbuf_tag,
5160                                    tx_buf->m_map, m0,
5161                                    segs, &nsegs, BUS_DMA_NOWAIT);
5162
5163    /* mapping errors */
5164    if(__predict_false(error != 0)) {
5165        fp->eth_q_stats.tx_dma_mapping_failure++;
5166        if (error == ENOMEM) {
5167            /* resource issue, try again later */
5168            rc = ENOMEM;
5169        } else if (error == EFBIG) {
5170            /* possibly recoverable with defragmentation */
5171            fp->eth_q_stats.mbuf_defrag_attempts++;
5172            m0 = m_defrag(*m_head, M_NOWAIT);
5173            if (m0 == NULL) {
5174                fp->eth_q_stats.mbuf_defrag_failures++;
5175                rc = ENOBUFS;
5176            } else {
5177                /* defrag successful, try mapping again */
5178                *m_head = m0;
5179                error = bus_dmamap_load_mbuf_sg(fp->tx_mbuf_tag,
5180                                                tx_buf->m_map, m0,
5181                                                segs, &nsegs, BUS_DMA_NOWAIT);
5182                if (error) {
5183                    fp->eth_q_stats.tx_dma_mapping_failure++;
5184                    rc = error;
5185                }
5186            }
5187        } else {
5188            /* unknown, unrecoverable mapping error */
5189            BLOGE(sc, "Unknown TX mapping error rc=%d\n", error);
5190            bxe_dump_mbuf(sc, m0, FALSE);
5191            rc = error;
5192        }
5193
5194        goto bxe_tx_encap_continue;
5195    }
5196
5197    tx_bd_avail = bxe_tx_avail(sc, fp);
5198
5199    /* make sure there is enough room in the send queue */
5200    if (__predict_false(tx_bd_avail < (nsegs + 2))) {
5201        /* Recoverable, try again later. */
5202        fp->eth_q_stats.tx_hw_queue_full++;
5203        bus_dmamap_unload(fp->tx_mbuf_tag, tx_buf->m_map);
5204        rc = ENOMEM;
5205        goto bxe_tx_encap_continue;
5206    }
5207
5208    /* capture the current H/W TX chain high watermark */
5209    if (__predict_false(fp->eth_q_stats.tx_hw_max_queue_depth <
5210                        (TX_BD_USABLE - tx_bd_avail))) {
5211        fp->eth_q_stats.tx_hw_max_queue_depth = (TX_BD_USABLE - tx_bd_avail);
5212    }
5213
5214    /* make sure it fits in the packet window */
5215    if (__predict_false(nsegs > BXE_MAX_SEGMENTS)) {
5216        /*
5217         * The mbuf may be to big for the controller to handle. If the frame
5218         * is a TSO frame we'll need to do an additional check.
5219         */
5220        if (m0->m_pkthdr.csum_flags & CSUM_TSO) {
5221            if (bxe_chktso_window(sc, nsegs, segs, m0) == 0) {
5222                goto bxe_tx_encap_continue; /* OK to send */
5223            } else {
5224                fp->eth_q_stats.tx_window_violation_tso++;
5225            }
5226        } else {
5227            fp->eth_q_stats.tx_window_violation_std++;
5228        }
5229
5230        /* lets try to defragment this mbuf and remap it */
5231        fp->eth_q_stats.mbuf_defrag_attempts++;
5232        bus_dmamap_unload(fp->tx_mbuf_tag, tx_buf->m_map);
5233
5234        m0 = m_defrag(*m_head, M_NOWAIT);
5235        if (m0 == NULL) {
5236            fp->eth_q_stats.mbuf_defrag_failures++;
5237            /* Ugh, just drop the frame... :( */
5238            rc = ENOBUFS;
5239        } else {
5240            /* defrag successful, try mapping again */
5241            *m_head = m0;
5242            error = bus_dmamap_load_mbuf_sg(fp->tx_mbuf_tag,
5243                                            tx_buf->m_map, m0,
5244                                            segs, &nsegs, BUS_DMA_NOWAIT);
5245            if (error) {
5246                fp->eth_q_stats.tx_dma_mapping_failure++;
5247                /* No sense in trying to defrag/copy chain, drop it. :( */
5248                rc = error;
5249            } else {
5250               /* if the chain is still too long then drop it */
5251                if(m0->m_pkthdr.csum_flags & CSUM_TSO) {
5252                    /*
5253                     * in case TSO is enabled nsegs should be checked against
5254                     * BXE_TSO_MAX_SEGMENTS
5255                     */
5256                    if (__predict_false(nsegs > BXE_TSO_MAX_SEGMENTS)) {
5257                        bus_dmamap_unload(fp->tx_mbuf_tag, tx_buf->m_map);
5258                        fp->eth_q_stats.nsegs_path1_errors++;
5259                        rc = ENODEV;
5260                    }
5261                } else {
5262                    if (__predict_false(nsegs > BXE_MAX_SEGMENTS)) {
5263                        bus_dmamap_unload(fp->tx_mbuf_tag, tx_buf->m_map);
5264                        fp->eth_q_stats.nsegs_path2_errors++;
5265                        rc = ENODEV;
5266                    }
5267                }
5268            }
5269        }
5270    }
5271
5272bxe_tx_encap_continue:
5273
5274    /* Check for errors */
5275    if (rc) {
5276        if (rc == ENOMEM) {
5277            /* recoverable try again later  */
5278        } else {
5279            fp->eth_q_stats.tx_soft_errors++;
5280            fp->eth_q_stats.mbuf_alloc_tx--;
5281            m_freem(*m_head);
5282            *m_head = NULL;
5283        }
5284
5285        return (rc);
5286    }
5287
5288    /* set flag according to packet type (UNICAST_ADDRESS is default) */
5289    if (m0->m_flags & M_BCAST) {
5290        mac_type = BROADCAST_ADDRESS;
5291    } else if (m0->m_flags & M_MCAST) {
5292        mac_type = MULTICAST_ADDRESS;
5293    }
5294
5295    /* store the mbuf into the mbuf ring */
5296    tx_buf->m        = m0;
5297    tx_buf->first_bd = fp->tx_bd_prod;
5298    tx_buf->flags    = 0;
5299
5300    /* prepare the first transmit (start) BD for the mbuf */
5301    tx_start_bd = &fp->tx_chain[TX_BD(bd_prod)].start_bd;
5302
5303    BLOGD(sc, DBG_TX,
5304          "sending pkt_prod=%u tx_buf=%p next_idx=%u bd=%u tx_start_bd=%p\n",
5305          pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_start_bd);
5306
5307    tx_start_bd->addr_lo = htole32(U64_LO(segs[0].ds_addr));
5308    tx_start_bd->addr_hi = htole32(U64_HI(segs[0].ds_addr));
5309    tx_start_bd->nbytes  = htole16(segs[0].ds_len);
5310    total_pkt_size += tx_start_bd->nbytes;
5311    tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
5312
5313    tx_start_bd->general_data = (1 << ETH_TX_START_BD_HDR_NBDS_SHIFT);
5314
5315    /* all frames have at least Start BD + Parsing BD */
5316    nbds = nsegs + 1;
5317    tx_start_bd->nbd = htole16(nbds);
5318
5319    if (m0->m_flags & M_VLANTAG) {
5320        tx_start_bd->vlan_or_ethertype = htole16(m0->m_pkthdr.ether_vtag);
5321        tx_start_bd->bd_flags.as_bitfield |=
5322            (X_ETH_OUTBAND_VLAN << ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT);
5323    } else {
5324        /* vf tx, start bd must hold the ethertype for fw to enforce it */
5325        if (IS_VF(sc)) {
5326            /* map ethernet header to find type and header length */
5327            eh = mtod(m0, struct ether_vlan_header *);
5328            tx_start_bd->vlan_or_ethertype = eh->evl_encap_proto;
5329        } else {
5330            /* used by FW for packet accounting */
5331            tx_start_bd->vlan_or_ethertype = htole16(fp->tx_pkt_prod);
5332        }
5333    }
5334
5335    /*
5336     * add a parsing BD from the chain. The parsing BD is always added
5337     * though it is only used for TSO and chksum
5338     */
5339    bd_prod = TX_BD_NEXT(bd_prod);
5340
5341    if (m0->m_pkthdr.csum_flags) {
5342        if (m0->m_pkthdr.csum_flags & CSUM_IP) {
5343            fp->eth_q_stats.tx_ofld_frames_csum_ip++;
5344            tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IP_CSUM;
5345        }
5346
5347        if (m0->m_pkthdr.csum_flags & CSUM_TCP_IPV6) {
5348            tx_start_bd->bd_flags.as_bitfield |= (ETH_TX_BD_FLAGS_IPV6 |
5349                                                  ETH_TX_BD_FLAGS_L4_CSUM);
5350        } else if (m0->m_pkthdr.csum_flags & CSUM_UDP_IPV6) {
5351            tx_start_bd->bd_flags.as_bitfield |= (ETH_TX_BD_FLAGS_IPV6   |
5352                                                  ETH_TX_BD_FLAGS_IS_UDP |
5353                                                  ETH_TX_BD_FLAGS_L4_CSUM);
5354        } else if ((m0->m_pkthdr.csum_flags & CSUM_TCP) ||
5355                   (m0->m_pkthdr.csum_flags & CSUM_TSO)) {
5356            tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
5357        } else if (m0->m_pkthdr.csum_flags & CSUM_UDP) {
5358            tx_start_bd->bd_flags.as_bitfield |= (ETH_TX_BD_FLAGS_L4_CSUM |
5359                                                  ETH_TX_BD_FLAGS_IS_UDP);
5360        }
5361    }
5362
5363    if (!CHIP_IS_E1x(sc)) {
5364        pbd_e2 = &fp->tx_chain[TX_BD(bd_prod)].parse_bd_e2;
5365        memset(pbd_e2, 0, sizeof(struct eth_tx_parse_bd_e2));
5366
5367        if (m0->m_pkthdr.csum_flags) {
5368            hlen = bxe_set_pbd_csum_e2(fp, m0, &pbd_e2_parsing_data);
5369        }
5370
5371        SET_FLAG(pbd_e2_parsing_data, ETH_TX_PARSE_BD_E2_ETH_ADDR_TYPE,
5372                 mac_type);
5373    } else {
5374        uint16_t global_data = 0;
5375
5376        pbd_e1x = &fp->tx_chain[TX_BD(bd_prod)].parse_bd_e1x;
5377        memset(pbd_e1x, 0, sizeof(struct eth_tx_parse_bd_e1x));
5378
5379        if (m0->m_pkthdr.csum_flags) {
5380            hlen = bxe_set_pbd_csum(fp, m0, pbd_e1x);
5381        }
5382
5383        SET_FLAG(global_data,
5384                 ETH_TX_PARSE_BD_E1X_ETH_ADDR_TYPE, mac_type);
5385        pbd_e1x->global_data |= htole16(global_data);
5386    }
5387
5388    /* setup the parsing BD with TSO specific info */
5389    if (m0->m_pkthdr.csum_flags & CSUM_TSO) {
5390        fp->eth_q_stats.tx_ofld_frames_lso++;
5391        tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
5392
5393        if (__predict_false(tx_start_bd->nbytes > hlen)) {
5394            fp->eth_q_stats.tx_ofld_frames_lso_hdr_splits++;
5395
5396            /* split the first BD into header/data making the fw job easy */
5397            nbds++;
5398            tx_start_bd->nbd = htole16(nbds);
5399            tx_start_bd->nbytes = htole16(hlen);
5400
5401            bd_prod = TX_BD_NEXT(bd_prod);
5402
5403            /* new transmit BD after the tx_parse_bd */
5404            tx_data_bd = &fp->tx_chain[TX_BD(bd_prod)].reg_bd;
5405            tx_data_bd->addr_hi = htole32(U64_HI(segs[0].ds_addr + hlen));
5406            tx_data_bd->addr_lo = htole32(U64_LO(segs[0].ds_addr + hlen));
5407            tx_data_bd->nbytes  = htole16(segs[0].ds_len - hlen);
5408            if (tx_total_pkt_size_bd == NULL) {
5409                tx_total_pkt_size_bd = tx_data_bd;
5410            }
5411
5412            BLOGD(sc, DBG_TX,
5413                  "TSO split header size is %d (%x:%x) nbds %d\n",
5414                  le16toh(tx_start_bd->nbytes),
5415                  le32toh(tx_start_bd->addr_hi),
5416                  le32toh(tx_start_bd->addr_lo),
5417                  nbds);
5418        }
5419
5420        if (!CHIP_IS_E1x(sc)) {
5421            bxe_set_pbd_lso_e2(m0, &pbd_e2_parsing_data);
5422        } else {
5423            bxe_set_pbd_lso(m0, pbd_e1x);
5424        }
5425    }
5426
5427    if (pbd_e2_parsing_data) {
5428        pbd_e2->parsing_data = htole32(pbd_e2_parsing_data);
5429    }
5430
5431    /* prepare remaining BDs, start tx bd contains first seg/frag */
5432    for (i = 1; i < nsegs ; i++) {
5433        bd_prod = TX_BD_NEXT(bd_prod);
5434        tx_data_bd = &fp->tx_chain[TX_BD(bd_prod)].reg_bd;
5435        tx_data_bd->addr_lo = htole32(U64_LO(segs[i].ds_addr));
5436        tx_data_bd->addr_hi = htole32(U64_HI(segs[i].ds_addr));
5437        tx_data_bd->nbytes  = htole16(segs[i].ds_len);
5438        if (tx_total_pkt_size_bd == NULL) {
5439            tx_total_pkt_size_bd = tx_data_bd;
5440        }
5441        total_pkt_size += tx_data_bd->nbytes;
5442    }
5443
5444    BLOGD(sc, DBG_TX, "last bd %p\n", tx_data_bd);
5445
5446    if (tx_total_pkt_size_bd != NULL) {
5447        tx_total_pkt_size_bd->total_pkt_bytes = total_pkt_size;
5448    }
5449
5450    if (__predict_false(sc->debug & DBG_TX)) {
5451        tmp_bd = tx_buf->first_bd;
5452        for (i = 0; i < nbds; i++)
5453        {
5454            if (i == 0) {
5455                BLOGD(sc, DBG_TX,
5456                      "TX Strt: %p bd=%d nbd=%d vlan=0x%x "
5457                      "bd_flags=0x%x hdr_nbds=%d\n",
5458                      tx_start_bd,
5459                      tmp_bd,
5460                      le16toh(tx_start_bd->nbd),
5461                      le16toh(tx_start_bd->vlan_or_ethertype),
5462                      tx_start_bd->bd_flags.as_bitfield,
5463                      (tx_start_bd->general_data & ETH_TX_START_BD_HDR_NBDS));
5464            } else if (i == 1) {
5465                if (pbd_e1x) {
5466                    BLOGD(sc, DBG_TX,
5467                          "-> Prse: %p bd=%d global=0x%x ip_hlen_w=%u "
5468                          "ip_id=%u lso_mss=%u tcp_flags=0x%x csum=0x%x "
5469                          "tcp_seq=%u total_hlen_w=%u\n",
5470                          pbd_e1x,
5471                          tmp_bd,
5472                          pbd_e1x->global_data,
5473                          pbd_e1x->ip_hlen_w,
5474                          pbd_e1x->ip_id,
5475                          pbd_e1x->lso_mss,
5476                          pbd_e1x->tcp_flags,
5477                          pbd_e1x->tcp_pseudo_csum,
5478                          pbd_e1x->tcp_send_seq,
5479                          le16toh(pbd_e1x->total_hlen_w));
5480                } else { /* if (pbd_e2) */
5481                    BLOGD(sc, DBG_TX,
5482                          "-> Parse: %p bd=%d dst=%02x:%02x:%02x "
5483                          "src=%02x:%02x:%02x parsing_data=0x%x\n",
5484                          pbd_e2,
5485                          tmp_bd,
5486                          pbd_e2->data.mac_addr.dst_hi,
5487                          pbd_e2->data.mac_addr.dst_mid,
5488                          pbd_e2->data.mac_addr.dst_lo,
5489                          pbd_e2->data.mac_addr.src_hi,
5490                          pbd_e2->data.mac_addr.src_mid,
5491                          pbd_e2->data.mac_addr.src_lo,
5492                          pbd_e2->parsing_data);
5493                }
5494            }
5495
5496            if (i != 1) { /* skip parse db as it doesn't hold data */
5497                tx_data_bd = &fp->tx_chain[TX_BD(tmp_bd)].reg_bd;
5498                BLOGD(sc, DBG_TX,
5499                      "-> Frag: %p bd=%d nbytes=%d hi=0x%x lo: 0x%x\n",
5500                      tx_data_bd,
5501                      tmp_bd,
5502                      le16toh(tx_data_bd->nbytes),
5503                      le32toh(tx_data_bd->addr_hi),
5504                      le32toh(tx_data_bd->addr_lo));
5505            }
5506
5507            tmp_bd = TX_BD_NEXT(tmp_bd);
5508        }
5509    }
5510
5511    BLOGD(sc, DBG_TX, "doorbell: nbds=%d bd=%u\n", nbds, bd_prod);
5512
5513    /* update TX BD producer index value for next TX */
5514    bd_prod = TX_BD_NEXT(bd_prod);
5515
5516    /*
5517     * If the chain of tx_bd's describing this frame is adjacent to or spans
5518     * an eth_tx_next_bd element then we need to increment the nbds value.
5519     */
5520    if (TX_BD_IDX(bd_prod) < nbds) {
5521        nbds++;
5522    }
5523
5524    /* don't allow reordering of writes for nbd and packets */
5525    mb();
5526
5527    fp->tx_db.data.prod += nbds;
5528
5529    /* producer points to the next free tx_bd at this point */
5530    fp->tx_pkt_prod++;
5531    fp->tx_bd_prod = bd_prod;
5532
5533    DOORBELL(sc, fp->index, fp->tx_db.raw);
5534
5535    fp->eth_q_stats.tx_pkts++;
5536
5537    /* Prevent speculative reads from getting ahead of the status block. */
5538    bus_space_barrier(sc->bar[BAR0].tag, sc->bar[BAR0].handle,
5539                      0, 0, BUS_SPACE_BARRIER_READ);
5540
5541    /* Prevent speculative reads from getting ahead of the doorbell. */
5542    bus_space_barrier(sc->bar[BAR2].tag, sc->bar[BAR2].handle,
5543                      0, 0, BUS_SPACE_BARRIER_READ);
5544
5545    return (0);
5546}
5547
5548static void
5549bxe_tx_start_locked(struct bxe_softc *sc,
5550                    if_t ifp,
5551                    struct bxe_fastpath *fp)
5552{
5553    struct mbuf *m = NULL;
5554    int tx_count = 0;
5555    uint16_t tx_bd_avail;
5556
5557    BXE_FP_TX_LOCK_ASSERT(fp);
5558
5559    /* keep adding entries while there are frames to send */
5560    while (!if_sendq_empty(ifp)) {
5561
5562        /*
5563         * check for any frames to send
5564         * dequeue can still be NULL even if queue is not empty
5565         */
5566        m = if_dequeue(ifp);
5567        if (__predict_false(m == NULL)) {
5568            break;
5569        }
5570
5571        /* the mbuf now belongs to us */
5572        fp->eth_q_stats.mbuf_alloc_tx++;
5573
5574        /*
5575         * Put the frame into the transmit ring. If we don't have room,
5576         * place the mbuf back at the head of the TX queue, set the
5577         * OACTIVE flag, and wait for the NIC to drain the chain.
5578         */
5579        if (__predict_false(bxe_tx_encap(fp, &m))) {
5580            fp->eth_q_stats.tx_encap_failures++;
5581            if (m != NULL) {
5582                /* mark the TX queue as full and return the frame */
5583                if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, 0);
5584		if_sendq_prepend(ifp, m);
5585                fp->eth_q_stats.mbuf_alloc_tx--;
5586                fp->eth_q_stats.tx_queue_xoff++;
5587            }
5588
5589            /* stop looking for more work */
5590            break;
5591        }
5592
5593        /* the frame was enqueued successfully */
5594        tx_count++;
5595
5596        /* send a copy of the frame to any BPF listeners. */
5597        if_etherbpfmtap(ifp, m);
5598
5599        tx_bd_avail = bxe_tx_avail(sc, fp);
5600
5601        /* handle any completions if we're running low */
5602        if (tx_bd_avail < BXE_TX_CLEANUP_THRESHOLD) {
5603            /* bxe_txeof will set IFF_DRV_OACTIVE appropriately */
5604            bxe_txeof(sc, fp);
5605            if (if_getdrvflags(ifp) & IFF_DRV_OACTIVE) {
5606                break;
5607            }
5608        }
5609    }
5610
5611    /* all TX packets were dequeued and/or the tx ring is full */
5612    if (tx_count > 0) {
5613        /* reset the TX watchdog timeout timer */
5614        fp->watchdog_timer = BXE_TX_TIMEOUT;
5615    }
5616}
5617
5618/* Legacy (non-RSS) dispatch routine */
5619static void
5620bxe_tx_start(if_t ifp)
5621{
5622    struct bxe_softc *sc;
5623    struct bxe_fastpath *fp;
5624
5625    sc = if_getsoftc(ifp);
5626
5627    if (!(if_getdrvflags(ifp) & IFF_DRV_RUNNING)) {
5628        BLOGW(sc, "Interface not running, ignoring transmit request\n");
5629        return;
5630    }
5631
5632    if (!sc->link_vars.link_up) {
5633        BLOGW(sc, "Interface link is down, ignoring transmit request\n");
5634        return;
5635    }
5636
5637    fp = &sc->fp[0];
5638
5639    if (ifp->if_drv_flags & IFF_DRV_OACTIVE) {
5640        fp->eth_q_stats.tx_queue_full_return++;
5641        return;
5642    }
5643
5644    BXE_FP_TX_LOCK(fp);
5645    bxe_tx_start_locked(sc, ifp, fp);
5646    BXE_FP_TX_UNLOCK(fp);
5647}
5648
5649#if __FreeBSD_version >= 901504
5650
5651static int
5652bxe_tx_mq_start_locked(struct bxe_softc    *sc,
5653                       if_t                ifp,
5654                       struct bxe_fastpath *fp,
5655                       struct mbuf         *m)
5656{
5657    struct buf_ring *tx_br = fp->tx_br;
5658    struct mbuf *next;
5659    int depth, rc, tx_count;
5660    uint16_t tx_bd_avail;
5661
5662    rc = tx_count = 0;
5663
5664    BXE_FP_TX_LOCK_ASSERT(fp);
5665
5666    if (sc->state != BXE_STATE_OPEN)  {
5667        fp->eth_q_stats.bxe_tx_mq_sc_state_failures++;
5668        return ENETDOWN;
5669    }
5670
5671    if (!tx_br) {
5672        BLOGE(sc, "Multiqueue TX and no buf_ring!\n");
5673        return (EINVAL);
5674    }
5675
5676    if (m != NULL) {
5677        rc = drbr_enqueue(ifp, tx_br, m);
5678        if (rc != 0) {
5679            fp->eth_q_stats.tx_soft_errors++;
5680            goto bxe_tx_mq_start_locked_exit;
5681        }
5682    }
5683
5684    if (!sc->link_vars.link_up || !(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
5685        fp->eth_q_stats.tx_request_link_down_failures++;
5686        goto bxe_tx_mq_start_locked_exit;
5687    }
5688
5689    /* fetch the depth of the driver queue */
5690    depth = drbr_inuse_drv(ifp, tx_br);
5691    if (depth > fp->eth_q_stats.tx_max_drbr_queue_depth) {
5692        fp->eth_q_stats.tx_max_drbr_queue_depth = depth;
5693    }
5694
5695    /* keep adding entries while there are frames to send */
5696    while ((next = drbr_peek(ifp, tx_br)) != NULL) {
5697        /* handle any completions if we're running low */
5698        tx_bd_avail = bxe_tx_avail(sc, fp);
5699        if (tx_bd_avail < BXE_TX_CLEANUP_THRESHOLD) {
5700            /* bxe_txeof will set IFF_DRV_OACTIVE appropriately */
5701            bxe_txeof(sc, fp);
5702            tx_bd_avail = bxe_tx_avail(sc, fp);
5703            if (tx_bd_avail < (BXE_TSO_MAX_SEGMENTS + 1)) {
5704                fp->eth_q_stats.bd_avail_too_less_failures++;
5705                m_freem(next);
5706                drbr_advance(ifp, tx_br);
5707                rc = ENOBUFS;
5708                break;
5709            }
5710        }
5711
5712        /* the mbuf now belongs to us */
5713        fp->eth_q_stats.mbuf_alloc_tx++;
5714
5715        /*
5716         * Put the frame into the transmit ring. If we don't have room,
5717         * place the mbuf back at the head of the TX queue, set the
5718         * OACTIVE flag, and wait for the NIC to drain the chain.
5719         */
5720        rc = bxe_tx_encap(fp, &next);
5721        if (__predict_false(rc != 0)) {
5722            fp->eth_q_stats.tx_encap_failures++;
5723            if (next != NULL) {
5724                /* mark the TX queue as full and save the frame */
5725                if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, 0);
5726                drbr_putback(ifp, tx_br, next);
5727                fp->eth_q_stats.mbuf_alloc_tx--;
5728                fp->eth_q_stats.tx_frames_deferred++;
5729            } else
5730                drbr_advance(ifp, tx_br);
5731
5732            /* stop looking for more work */
5733            break;
5734        }
5735
5736        /* the transmit frame was enqueued successfully */
5737        tx_count++;
5738
5739        /* send a copy of the frame to any BPF listeners */
5740	if_etherbpfmtap(ifp, next);
5741
5742        drbr_advance(ifp, tx_br);
5743    }
5744
5745    /* all TX packets were dequeued and/or the tx ring is full */
5746    if (tx_count > 0) {
5747        /* reset the TX watchdog timeout timer */
5748        fp->watchdog_timer = BXE_TX_TIMEOUT;
5749    }
5750
5751bxe_tx_mq_start_locked_exit:
5752    /* If we didn't drain the drbr, enqueue a task in the future to do it. */
5753    if (!drbr_empty(ifp, tx_br)) {
5754        fp->eth_q_stats.tx_mq_not_empty++;
5755        taskqueue_enqueue_timeout(fp->tq, &fp->tx_timeout_task, 1);
5756    }
5757
5758    return (rc);
5759}
5760
5761static void
5762bxe_tx_mq_start_deferred(void *arg,
5763                         int pending)
5764{
5765    struct bxe_fastpath *fp = (struct bxe_fastpath *)arg;
5766    struct bxe_softc *sc = fp->sc;
5767    if_t ifp = sc->ifp;
5768
5769    BXE_FP_TX_LOCK(fp);
5770    bxe_tx_mq_start_locked(sc, ifp, fp, NULL);
5771    BXE_FP_TX_UNLOCK(fp);
5772}
5773
5774/* Multiqueue (TSS) dispatch routine. */
5775static int
5776bxe_tx_mq_start(struct ifnet *ifp,
5777                struct mbuf  *m)
5778{
5779    struct bxe_softc *sc = if_getsoftc(ifp);
5780    struct bxe_fastpath *fp;
5781    int fp_index, rc;
5782
5783    fp_index = 0; /* default is the first queue */
5784
5785    /* check if flowid is set */
5786
5787    if (BXE_VALID_FLOWID(m))
5788        fp_index = (m->m_pkthdr.flowid % sc->num_queues);
5789
5790    fp = &sc->fp[fp_index];
5791
5792    if (sc->state != BXE_STATE_OPEN)  {
5793        fp->eth_q_stats.bxe_tx_mq_sc_state_failures++;
5794        return ENETDOWN;
5795    }
5796
5797    if (BXE_FP_TX_TRYLOCK(fp)) {
5798        rc = bxe_tx_mq_start_locked(sc, ifp, fp, m);
5799        BXE_FP_TX_UNLOCK(fp);
5800    } else {
5801        rc = drbr_enqueue(ifp, fp->tx_br, m);
5802        taskqueue_enqueue(fp->tq, &fp->tx_task);
5803    }
5804
5805    return (rc);
5806}
5807
5808static void
5809bxe_mq_flush(struct ifnet *ifp)
5810{
5811    struct bxe_softc *sc = if_getsoftc(ifp);
5812    struct bxe_fastpath *fp;
5813    struct mbuf *m;
5814    int i;
5815
5816    for (i = 0; i < sc->num_queues; i++) {
5817        fp = &sc->fp[i];
5818
5819        if (fp->state != BXE_FP_STATE_IRQ) {
5820            BLOGD(sc, DBG_LOAD, "Not clearing fp[%02d] buf_ring (state=%d)\n",
5821                  fp->index, fp->state);
5822            continue;
5823        }
5824
5825        if (fp->tx_br != NULL) {
5826            BLOGD(sc, DBG_LOAD, "Clearing fp[%02d] buf_ring\n", fp->index);
5827            BXE_FP_TX_LOCK(fp);
5828            while ((m = buf_ring_dequeue_sc(fp->tx_br)) != NULL) {
5829                m_freem(m);
5830            }
5831            BXE_FP_TX_UNLOCK(fp);
5832        }
5833    }
5834
5835    if_qflush(ifp);
5836}
5837
5838#endif /* FreeBSD_version >= 901504 */
5839
5840static uint16_t
5841bxe_cid_ilt_lines(struct bxe_softc *sc)
5842{
5843    if (IS_SRIOV(sc)) {
5844        return ((BXE_FIRST_VF_CID + BXE_VF_CIDS) / ILT_PAGE_CIDS);
5845    }
5846    return (L2_ILT_LINES(sc));
5847}
5848
5849static void
5850bxe_ilt_set_info(struct bxe_softc *sc)
5851{
5852    struct ilt_client_info *ilt_client;
5853    struct ecore_ilt *ilt = sc->ilt;
5854    uint16_t line = 0;
5855
5856    ilt->start_line = FUNC_ILT_BASE(SC_FUNC(sc));
5857    BLOGD(sc, DBG_LOAD, "ilt starts at line %d\n", ilt->start_line);
5858
5859    /* CDU */
5860    ilt_client = &ilt->clients[ILT_CLIENT_CDU];
5861    ilt_client->client_num = ILT_CLIENT_CDU;
5862    ilt_client->page_size = CDU_ILT_PAGE_SZ;
5863    ilt_client->flags = ILT_CLIENT_SKIP_MEM;
5864    ilt_client->start = line;
5865    line += bxe_cid_ilt_lines(sc);
5866
5867    if (CNIC_SUPPORT(sc)) {
5868        line += CNIC_ILT_LINES;
5869    }
5870
5871    ilt_client->end = (line - 1);
5872
5873    BLOGD(sc, DBG_LOAD,
5874          "ilt client[CDU]: start %d, end %d, "
5875          "psz 0x%x, flags 0x%x, hw psz %d\n",
5876          ilt_client->start, ilt_client->end,
5877          ilt_client->page_size,
5878          ilt_client->flags,
5879          ilog2(ilt_client->page_size >> 12));
5880
5881    /* QM */
5882    if (QM_INIT(sc->qm_cid_count)) {
5883        ilt_client = &ilt->clients[ILT_CLIENT_QM];
5884        ilt_client->client_num = ILT_CLIENT_QM;
5885        ilt_client->page_size = QM_ILT_PAGE_SZ;
5886        ilt_client->flags = 0;
5887        ilt_client->start = line;
5888
5889        /* 4 bytes for each cid */
5890        line += DIV_ROUND_UP(sc->qm_cid_count * QM_QUEUES_PER_FUNC * 4,
5891                             QM_ILT_PAGE_SZ);
5892
5893        ilt_client->end = (line - 1);
5894
5895        BLOGD(sc, DBG_LOAD,
5896              "ilt client[QM]: start %d, end %d, "
5897              "psz 0x%x, flags 0x%x, hw psz %d\n",
5898              ilt_client->start, ilt_client->end,
5899              ilt_client->page_size, ilt_client->flags,
5900              ilog2(ilt_client->page_size >> 12));
5901    }
5902
5903    if (CNIC_SUPPORT(sc)) {
5904        /* SRC */
5905        ilt_client = &ilt->clients[ILT_CLIENT_SRC];
5906        ilt_client->client_num = ILT_CLIENT_SRC;
5907        ilt_client->page_size = SRC_ILT_PAGE_SZ;
5908        ilt_client->flags = 0;
5909        ilt_client->start = line;
5910        line += SRC_ILT_LINES;
5911        ilt_client->end = (line - 1);
5912
5913        BLOGD(sc, DBG_LOAD,
5914              "ilt client[SRC]: start %d, end %d, "
5915              "psz 0x%x, flags 0x%x, hw psz %d\n",
5916              ilt_client->start, ilt_client->end,
5917              ilt_client->page_size, ilt_client->flags,
5918              ilog2(ilt_client->page_size >> 12));
5919
5920        /* TM */
5921        ilt_client = &ilt->clients[ILT_CLIENT_TM];
5922        ilt_client->client_num = ILT_CLIENT_TM;
5923        ilt_client->page_size = TM_ILT_PAGE_SZ;
5924        ilt_client->flags = 0;
5925        ilt_client->start = line;
5926        line += TM_ILT_LINES;
5927        ilt_client->end = (line - 1);
5928
5929        BLOGD(sc, DBG_LOAD,
5930              "ilt client[TM]: start %d, end %d, "
5931              "psz 0x%x, flags 0x%x, hw psz %d\n",
5932              ilt_client->start, ilt_client->end,
5933              ilt_client->page_size, ilt_client->flags,
5934              ilog2(ilt_client->page_size >> 12));
5935    }
5936
5937    KASSERT((line <= ILT_MAX_LINES), ("Invalid number of ILT lines!"));
5938}
5939
5940static void
5941bxe_set_fp_rx_buf_size(struct bxe_softc *sc)
5942{
5943    int i;
5944    uint32_t rx_buf_size;
5945
5946    rx_buf_size = (IP_HEADER_ALIGNMENT_PADDING + ETH_OVERHEAD + sc->mtu);
5947
5948    for (i = 0; i < sc->num_queues; i++) {
5949        if(rx_buf_size <= MCLBYTES){
5950            sc->fp[i].rx_buf_size = rx_buf_size;
5951            sc->fp[i].mbuf_alloc_size = MCLBYTES;
5952        }else if (rx_buf_size <= MJUMPAGESIZE){
5953            sc->fp[i].rx_buf_size = rx_buf_size;
5954            sc->fp[i].mbuf_alloc_size = MJUMPAGESIZE;
5955        }else if (rx_buf_size <= (MJUMPAGESIZE + MCLBYTES)){
5956            sc->fp[i].rx_buf_size = MCLBYTES;
5957            sc->fp[i].mbuf_alloc_size = MCLBYTES;
5958        }else if (rx_buf_size <= (2 * MJUMPAGESIZE)){
5959            sc->fp[i].rx_buf_size = MJUMPAGESIZE;
5960            sc->fp[i].mbuf_alloc_size = MJUMPAGESIZE;
5961        }else {
5962            sc->fp[i].rx_buf_size = MCLBYTES;
5963            sc->fp[i].mbuf_alloc_size = MCLBYTES;
5964        }
5965    }
5966}
5967
5968static int
5969bxe_alloc_ilt_mem(struct bxe_softc *sc)
5970{
5971    int rc = 0;
5972
5973    if ((sc->ilt =
5974         (struct ecore_ilt *)malloc(sizeof(struct ecore_ilt),
5975                                    M_BXE_ILT,
5976                                    (M_NOWAIT | M_ZERO))) == NULL) {
5977        rc = 1;
5978    }
5979
5980    return (rc);
5981}
5982
5983static int
5984bxe_alloc_ilt_lines_mem(struct bxe_softc *sc)
5985{
5986    int rc = 0;
5987
5988    if ((sc->ilt->lines =
5989         (struct ilt_line *)malloc((sizeof(struct ilt_line) * ILT_MAX_LINES),
5990                                    M_BXE_ILT,
5991                                    (M_NOWAIT | M_ZERO))) == NULL) {
5992        rc = 1;
5993    }
5994
5995    return (rc);
5996}
5997
5998static void
5999bxe_free_ilt_mem(struct bxe_softc *sc)
6000{
6001    if (sc->ilt != NULL) {
6002        free(sc->ilt, M_BXE_ILT);
6003        sc->ilt = NULL;
6004    }
6005}
6006
6007static void
6008bxe_free_ilt_lines_mem(struct bxe_softc *sc)
6009{
6010    if (sc->ilt->lines != NULL) {
6011        free(sc->ilt->lines, M_BXE_ILT);
6012        sc->ilt->lines = NULL;
6013    }
6014}
6015
6016static void
6017bxe_free_mem(struct bxe_softc *sc)
6018{
6019    int i;
6020
6021    for (i = 0; i < L2_ILT_LINES(sc); i++) {
6022        bxe_dma_free(sc, &sc->context[i].vcxt_dma);
6023        sc->context[i].vcxt = NULL;
6024        sc->context[i].size = 0;
6025    }
6026
6027    ecore_ilt_mem_op(sc, ILT_MEMOP_FREE);
6028
6029    bxe_free_ilt_lines_mem(sc);
6030
6031}
6032
6033static int
6034bxe_alloc_mem(struct bxe_softc *sc)
6035{
6036
6037    int context_size;
6038    int allocated;
6039    int i;
6040
6041    /*
6042     * Allocate memory for CDU context:
6043     * This memory is allocated separately and not in the generic ILT
6044     * functions because CDU differs in few aspects:
6045     * 1. There can be multiple entities allocating memory for context -
6046     * regular L2, CNIC, and SRIOV drivers. Each separately controls
6047     * its own ILT lines.
6048     * 2. Since CDU page-size is not a single 4KB page (which is the case
6049     * for the other ILT clients), to be efficient we want to support
6050     * allocation of sub-page-size in the last entry.
6051     * 3. Context pointers are used by the driver to pass to FW / update
6052     * the context (for the other ILT clients the pointers are used just to
6053     * free the memory during unload).
6054     */
6055    context_size = (sizeof(union cdu_context) * BXE_L2_CID_COUNT(sc));
6056    for (i = 0, allocated = 0; allocated < context_size; i++) {
6057        sc->context[i].size = min(CDU_ILT_PAGE_SZ,
6058                                  (context_size - allocated));
6059
6060        if (bxe_dma_alloc(sc, sc->context[i].size,
6061                          &sc->context[i].vcxt_dma,
6062                          "cdu context") != 0) {
6063            bxe_free_mem(sc);
6064            return (-1);
6065        }
6066
6067        sc->context[i].vcxt =
6068            (union cdu_context *)sc->context[i].vcxt_dma.vaddr;
6069
6070        allocated += sc->context[i].size;
6071    }
6072
6073    bxe_alloc_ilt_lines_mem(sc);
6074
6075    BLOGD(sc, DBG_LOAD, "ilt=%p start_line=%u lines=%p\n",
6076          sc->ilt, sc->ilt->start_line, sc->ilt->lines);
6077    {
6078        for (i = 0; i < 4; i++) {
6079            BLOGD(sc, DBG_LOAD,
6080                  "c%d page_size=%u start=%u end=%u num=%u flags=0x%x\n",
6081                  i,
6082                  sc->ilt->clients[i].page_size,
6083                  sc->ilt->clients[i].start,
6084                  sc->ilt->clients[i].end,
6085                  sc->ilt->clients[i].client_num,
6086                  sc->ilt->clients[i].flags);
6087        }
6088    }
6089    if (ecore_ilt_mem_op(sc, ILT_MEMOP_ALLOC)) {
6090        BLOGE(sc, "ecore_ilt_mem_op ILT_MEMOP_ALLOC failed\n");
6091        bxe_free_mem(sc);
6092        return (-1);
6093    }
6094
6095    return (0);
6096}
6097
6098static void
6099bxe_free_rx_bd_chain(struct bxe_fastpath *fp)
6100{
6101    struct bxe_softc *sc;
6102    int i;
6103
6104    sc = fp->sc;
6105
6106    if (fp->rx_mbuf_tag == NULL) {
6107        return;
6108    }
6109
6110    /* free all mbufs and unload all maps */
6111    for (i = 0; i < RX_BD_TOTAL; i++) {
6112        if (fp->rx_mbuf_chain[i].m_map != NULL) {
6113            bus_dmamap_sync(fp->rx_mbuf_tag,
6114                            fp->rx_mbuf_chain[i].m_map,
6115                            BUS_DMASYNC_POSTREAD);
6116            bus_dmamap_unload(fp->rx_mbuf_tag,
6117                              fp->rx_mbuf_chain[i].m_map);
6118        }
6119
6120        if (fp->rx_mbuf_chain[i].m != NULL) {
6121            m_freem(fp->rx_mbuf_chain[i].m);
6122            fp->rx_mbuf_chain[i].m = NULL;
6123            fp->eth_q_stats.mbuf_alloc_rx--;
6124        }
6125    }
6126}
6127
6128static void
6129bxe_free_tpa_pool(struct bxe_fastpath *fp)
6130{
6131    struct bxe_softc *sc;
6132    int i, max_agg_queues;
6133
6134    sc = fp->sc;
6135
6136    if (fp->rx_mbuf_tag == NULL) {
6137        return;
6138    }
6139
6140    max_agg_queues = MAX_AGG_QS(sc);
6141
6142    /* release all mbufs and unload all DMA maps in the TPA pool */
6143    for (i = 0; i < max_agg_queues; i++) {
6144        if (fp->rx_tpa_info[i].bd.m_map != NULL) {
6145            bus_dmamap_sync(fp->rx_mbuf_tag,
6146                            fp->rx_tpa_info[i].bd.m_map,
6147                            BUS_DMASYNC_POSTREAD);
6148            bus_dmamap_unload(fp->rx_mbuf_tag,
6149                              fp->rx_tpa_info[i].bd.m_map);
6150        }
6151
6152        if (fp->rx_tpa_info[i].bd.m != NULL) {
6153            m_freem(fp->rx_tpa_info[i].bd.m);
6154            fp->rx_tpa_info[i].bd.m = NULL;
6155            fp->eth_q_stats.mbuf_alloc_tpa--;
6156        }
6157    }
6158}
6159
6160static void
6161bxe_free_sge_chain(struct bxe_fastpath *fp)
6162{
6163    struct bxe_softc *sc;
6164    int i;
6165
6166    sc = fp->sc;
6167
6168    if (fp->rx_sge_mbuf_tag == NULL) {
6169        return;
6170    }
6171
6172    /* rree all mbufs and unload all maps */
6173    for (i = 0; i < RX_SGE_TOTAL; i++) {
6174        if (fp->rx_sge_mbuf_chain[i].m_map != NULL) {
6175            bus_dmamap_sync(fp->rx_sge_mbuf_tag,
6176                            fp->rx_sge_mbuf_chain[i].m_map,
6177                            BUS_DMASYNC_POSTREAD);
6178            bus_dmamap_unload(fp->rx_sge_mbuf_tag,
6179                              fp->rx_sge_mbuf_chain[i].m_map);
6180        }
6181
6182        if (fp->rx_sge_mbuf_chain[i].m != NULL) {
6183            m_freem(fp->rx_sge_mbuf_chain[i].m);
6184            fp->rx_sge_mbuf_chain[i].m = NULL;
6185            fp->eth_q_stats.mbuf_alloc_sge--;
6186        }
6187    }
6188}
6189
6190static void
6191bxe_free_fp_buffers(struct bxe_softc *sc)
6192{
6193    struct bxe_fastpath *fp;
6194    int i;
6195
6196    for (i = 0; i < sc->num_queues; i++) {
6197        fp = &sc->fp[i];
6198
6199#if __FreeBSD_version >= 901504
6200        if (fp->tx_br != NULL) {
6201            /* just in case bxe_mq_flush() wasn't called */
6202            if (mtx_initialized(&fp->tx_mtx)) {
6203                struct mbuf *m;
6204
6205                BXE_FP_TX_LOCK(fp);
6206                while ((m = buf_ring_dequeue_sc(fp->tx_br)) != NULL)
6207                    m_freem(m);
6208                BXE_FP_TX_UNLOCK(fp);
6209            }
6210        }
6211#endif
6212
6213        /* free all RX buffers */
6214        bxe_free_rx_bd_chain(fp);
6215        bxe_free_tpa_pool(fp);
6216        bxe_free_sge_chain(fp);
6217
6218        if (fp->eth_q_stats.mbuf_alloc_rx != 0) {
6219            BLOGE(sc, "failed to claim all rx mbufs (%d left)\n",
6220                  fp->eth_q_stats.mbuf_alloc_rx);
6221        }
6222
6223        if (fp->eth_q_stats.mbuf_alloc_sge != 0) {
6224            BLOGE(sc, "failed to claim all sge mbufs (%d left)\n",
6225                  fp->eth_q_stats.mbuf_alloc_sge);
6226        }
6227
6228        if (fp->eth_q_stats.mbuf_alloc_tpa != 0) {
6229            BLOGE(sc, "failed to claim all sge mbufs (%d left)\n",
6230                  fp->eth_q_stats.mbuf_alloc_tpa);
6231        }
6232
6233        if (fp->eth_q_stats.mbuf_alloc_tx != 0) {
6234            BLOGE(sc, "failed to release tx mbufs (%d left)\n",
6235                  fp->eth_q_stats.mbuf_alloc_tx);
6236        }
6237
6238        /* XXX verify all mbufs were reclaimed */
6239    }
6240}
6241
6242static int
6243bxe_alloc_rx_bd_mbuf(struct bxe_fastpath *fp,
6244                     uint16_t            prev_index,
6245                     uint16_t            index)
6246{
6247    struct bxe_sw_rx_bd *rx_buf;
6248    struct eth_rx_bd *rx_bd;
6249    bus_dma_segment_t segs[1];
6250    bus_dmamap_t map;
6251    struct mbuf *m;
6252    int nsegs, rc;
6253
6254    rc = 0;
6255
6256    /* allocate the new RX BD mbuf */
6257    m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, fp->mbuf_alloc_size);
6258    if (__predict_false(m == NULL)) {
6259        fp->eth_q_stats.mbuf_rx_bd_alloc_failed++;
6260        return (ENOBUFS);
6261    }
6262
6263    fp->eth_q_stats.mbuf_alloc_rx++;
6264
6265    /* initialize the mbuf buffer length */
6266    m->m_pkthdr.len = m->m_len = fp->rx_buf_size;
6267
6268    /* map the mbuf into non-paged pool */
6269    rc = bus_dmamap_load_mbuf_sg(fp->rx_mbuf_tag,
6270                                 fp->rx_mbuf_spare_map,
6271                                 m, segs, &nsegs, BUS_DMA_NOWAIT);
6272    if (__predict_false(rc != 0)) {
6273        fp->eth_q_stats.mbuf_rx_bd_mapping_failed++;
6274        m_freem(m);
6275        fp->eth_q_stats.mbuf_alloc_rx--;
6276        return (rc);
6277    }
6278
6279    /* all mbufs must map to a single segment */
6280    KASSERT((nsegs == 1), ("Too many segments, %d returned!", nsegs));
6281
6282    /* release any existing RX BD mbuf mappings */
6283
6284    if (prev_index != index) {
6285        rx_buf = &fp->rx_mbuf_chain[prev_index];
6286
6287        if (rx_buf->m_map != NULL) {
6288            bus_dmamap_sync(fp->rx_mbuf_tag, rx_buf->m_map,
6289                            BUS_DMASYNC_POSTREAD);
6290            bus_dmamap_unload(fp->rx_mbuf_tag, rx_buf->m_map);
6291        }
6292
6293        /*
6294         * We only get here from bxe_rxeof() when the maximum number
6295         * of rx buffers is less than RX_BD_USABLE. bxe_rxeof() already
6296         * holds the mbuf in the prev_index so it's OK to NULL it out
6297         * here without concern of a memory leak.
6298         */
6299        fp->rx_mbuf_chain[prev_index].m = NULL;
6300    }
6301
6302    rx_buf = &fp->rx_mbuf_chain[index];
6303
6304    if (rx_buf->m_map != NULL) {
6305        bus_dmamap_sync(fp->rx_mbuf_tag, rx_buf->m_map,
6306                        BUS_DMASYNC_POSTREAD);
6307        bus_dmamap_unload(fp->rx_mbuf_tag, rx_buf->m_map);
6308    }
6309
6310    /* save the mbuf and mapping info for a future packet */
6311    map = (prev_index != index) ?
6312              fp->rx_mbuf_chain[prev_index].m_map : rx_buf->m_map;
6313    rx_buf->m_map = fp->rx_mbuf_spare_map;
6314    fp->rx_mbuf_spare_map = map;
6315    bus_dmamap_sync(fp->rx_mbuf_tag, rx_buf->m_map,
6316                    BUS_DMASYNC_PREREAD);
6317    rx_buf->m = m;
6318
6319    rx_bd = &fp->rx_chain[index];
6320    rx_bd->addr_hi = htole32(U64_HI(segs[0].ds_addr));
6321    rx_bd->addr_lo = htole32(U64_LO(segs[0].ds_addr));
6322
6323    return (rc);
6324}
6325
6326static int
6327bxe_alloc_rx_tpa_mbuf(struct bxe_fastpath *fp,
6328                      int                 queue)
6329{
6330    struct bxe_sw_tpa_info *tpa_info = &fp->rx_tpa_info[queue];
6331    bus_dma_segment_t segs[1];
6332    bus_dmamap_t map;
6333    struct mbuf *m;
6334    int nsegs;
6335    int rc = 0;
6336
6337    /* allocate the new TPA mbuf */
6338    m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, fp->mbuf_alloc_size);
6339    if (__predict_false(m == NULL)) {
6340        fp->eth_q_stats.mbuf_rx_tpa_alloc_failed++;
6341        return (ENOBUFS);
6342    }
6343
6344    fp->eth_q_stats.mbuf_alloc_tpa++;
6345
6346    /* initialize the mbuf buffer length */
6347    m->m_pkthdr.len = m->m_len = fp->rx_buf_size;
6348
6349    /* map the mbuf into non-paged pool */
6350    rc = bus_dmamap_load_mbuf_sg(fp->rx_mbuf_tag,
6351                                 fp->rx_tpa_info_mbuf_spare_map,
6352                                 m, segs, &nsegs, BUS_DMA_NOWAIT);
6353    if (__predict_false(rc != 0)) {
6354        fp->eth_q_stats.mbuf_rx_tpa_mapping_failed++;
6355        m_free(m);
6356        fp->eth_q_stats.mbuf_alloc_tpa--;
6357        return (rc);
6358    }
6359
6360    /* all mbufs must map to a single segment */
6361    KASSERT((nsegs == 1), ("Too many segments, %d returned!", nsegs));
6362
6363    /* release any existing TPA mbuf mapping */
6364    if (tpa_info->bd.m_map != NULL) {
6365        bus_dmamap_sync(fp->rx_mbuf_tag, tpa_info->bd.m_map,
6366                        BUS_DMASYNC_POSTREAD);
6367        bus_dmamap_unload(fp->rx_mbuf_tag, tpa_info->bd.m_map);
6368    }
6369
6370    /* save the mbuf and mapping info for the TPA mbuf */
6371    map = tpa_info->bd.m_map;
6372    tpa_info->bd.m_map = fp->rx_tpa_info_mbuf_spare_map;
6373    fp->rx_tpa_info_mbuf_spare_map = map;
6374    bus_dmamap_sync(fp->rx_mbuf_tag, tpa_info->bd.m_map,
6375                    BUS_DMASYNC_PREREAD);
6376    tpa_info->bd.m = m;
6377    tpa_info->seg = segs[0];
6378
6379    return (rc);
6380}
6381
6382/*
6383 * Allocate an mbuf and assign it to the receive scatter gather chain. The
6384 * caller must take care to save a copy of the existing mbuf in the SG mbuf
6385 * chain.
6386 */
6387static int
6388bxe_alloc_rx_sge_mbuf(struct bxe_fastpath *fp,
6389                      uint16_t            index)
6390{
6391    struct bxe_sw_rx_bd *sge_buf;
6392    struct eth_rx_sge *sge;
6393    bus_dma_segment_t segs[1];
6394    bus_dmamap_t map;
6395    struct mbuf *m;
6396    int nsegs;
6397    int rc = 0;
6398
6399    /* allocate a new SGE mbuf */
6400    m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, SGE_PAGE_SIZE);
6401    if (__predict_false(m == NULL)) {
6402        fp->eth_q_stats.mbuf_rx_sge_alloc_failed++;
6403        return (ENOMEM);
6404    }
6405
6406    fp->eth_q_stats.mbuf_alloc_sge++;
6407
6408    /* initialize the mbuf buffer length */
6409    m->m_pkthdr.len = m->m_len = SGE_PAGE_SIZE;
6410
6411    /* map the SGE mbuf into non-paged pool */
6412    rc = bus_dmamap_load_mbuf_sg(fp->rx_sge_mbuf_tag,
6413                                 fp->rx_sge_mbuf_spare_map,
6414                                 m, segs, &nsegs, BUS_DMA_NOWAIT);
6415    if (__predict_false(rc != 0)) {
6416        fp->eth_q_stats.mbuf_rx_sge_mapping_failed++;
6417        m_freem(m);
6418        fp->eth_q_stats.mbuf_alloc_sge--;
6419        return (rc);
6420    }
6421
6422    /* all mbufs must map to a single segment */
6423    KASSERT((nsegs == 1), ("Too many segments, %d returned!", nsegs));
6424
6425    sge_buf = &fp->rx_sge_mbuf_chain[index];
6426
6427    /* release any existing SGE mbuf mapping */
6428    if (sge_buf->m_map != NULL) {
6429        bus_dmamap_sync(fp->rx_sge_mbuf_tag, sge_buf->m_map,
6430                        BUS_DMASYNC_POSTREAD);
6431        bus_dmamap_unload(fp->rx_sge_mbuf_tag, sge_buf->m_map);
6432    }
6433
6434    /* save the mbuf and mapping info for a future packet */
6435    map = sge_buf->m_map;
6436    sge_buf->m_map = fp->rx_sge_mbuf_spare_map;
6437    fp->rx_sge_mbuf_spare_map = map;
6438    bus_dmamap_sync(fp->rx_sge_mbuf_tag, sge_buf->m_map,
6439                    BUS_DMASYNC_PREREAD);
6440    sge_buf->m = m;
6441
6442    sge = &fp->rx_sge_chain[index];
6443    sge->addr_hi = htole32(U64_HI(segs[0].ds_addr));
6444    sge->addr_lo = htole32(U64_LO(segs[0].ds_addr));
6445
6446    return (rc);
6447}
6448
6449static __noinline int
6450bxe_alloc_fp_buffers(struct bxe_softc *sc)
6451{
6452    struct bxe_fastpath *fp;
6453    int i, j, rc = 0;
6454    int ring_prod, cqe_ring_prod;
6455    int max_agg_queues;
6456
6457    for (i = 0; i < sc->num_queues; i++) {
6458        fp = &sc->fp[i];
6459
6460        ring_prod = cqe_ring_prod = 0;
6461        fp->rx_bd_cons = 0;
6462        fp->rx_cq_cons = 0;
6463
6464        /* allocate buffers for the RX BDs in RX BD chain */
6465        for (j = 0; j < sc->max_rx_bufs; j++) {
6466            rc = bxe_alloc_rx_bd_mbuf(fp, ring_prod, ring_prod);
6467            if (rc != 0) {
6468                BLOGE(sc, "mbuf alloc fail for fp[%02d] rx chain (%d)\n",
6469                      i, rc);
6470                goto bxe_alloc_fp_buffers_error;
6471            }
6472
6473            ring_prod     = RX_BD_NEXT(ring_prod);
6474            cqe_ring_prod = RCQ_NEXT(cqe_ring_prod);
6475        }
6476
6477        fp->rx_bd_prod = ring_prod;
6478        fp->rx_cq_prod = cqe_ring_prod;
6479        fp->eth_q_stats.rx_calls = fp->eth_q_stats.rx_pkts = 0;
6480
6481        max_agg_queues = MAX_AGG_QS(sc);
6482
6483        fp->tpa_enable = TRUE;
6484
6485        /* fill the TPA pool */
6486        for (j = 0; j < max_agg_queues; j++) {
6487            rc = bxe_alloc_rx_tpa_mbuf(fp, j);
6488            if (rc != 0) {
6489                BLOGE(sc, "mbuf alloc fail for fp[%02d] TPA queue %d\n",
6490                          i, j);
6491                fp->tpa_enable = FALSE;
6492                goto bxe_alloc_fp_buffers_error;
6493            }
6494
6495            fp->rx_tpa_info[j].state = BXE_TPA_STATE_STOP;
6496        }
6497
6498        if (fp->tpa_enable) {
6499            /* fill the RX SGE chain */
6500            ring_prod = 0;
6501            for (j = 0; j < RX_SGE_USABLE; j++) {
6502                rc = bxe_alloc_rx_sge_mbuf(fp, ring_prod);
6503                if (rc != 0) {
6504                    BLOGE(sc, "mbuf alloc fail for fp[%02d] SGE %d\n",
6505                              i, ring_prod);
6506                    fp->tpa_enable = FALSE;
6507                    ring_prod = 0;
6508                    goto bxe_alloc_fp_buffers_error;
6509                }
6510
6511                ring_prod = RX_SGE_NEXT(ring_prod);
6512            }
6513
6514            fp->rx_sge_prod = ring_prod;
6515        }
6516    }
6517
6518    return (0);
6519
6520bxe_alloc_fp_buffers_error:
6521
6522    /* unwind what was already allocated */
6523    bxe_free_rx_bd_chain(fp);
6524    bxe_free_tpa_pool(fp);
6525    bxe_free_sge_chain(fp);
6526
6527    return (ENOBUFS);
6528}
6529
6530static void
6531bxe_free_fw_stats_mem(struct bxe_softc *sc)
6532{
6533    bxe_dma_free(sc, &sc->fw_stats_dma);
6534
6535    sc->fw_stats_num = 0;
6536
6537    sc->fw_stats_req_size = 0;
6538    sc->fw_stats_req = NULL;
6539    sc->fw_stats_req_mapping = 0;
6540
6541    sc->fw_stats_data_size = 0;
6542    sc->fw_stats_data = NULL;
6543    sc->fw_stats_data_mapping = 0;
6544}
6545
6546static int
6547bxe_alloc_fw_stats_mem(struct bxe_softc *sc)
6548{
6549    uint8_t num_queue_stats;
6550    int num_groups;
6551
6552    /* number of queues for statistics is number of eth queues */
6553    num_queue_stats = BXE_NUM_ETH_QUEUES(sc);
6554
6555    /*
6556     * Total number of FW statistics requests =
6557     *   1 for port stats + 1 for PF stats + num of queues
6558     */
6559    sc->fw_stats_num = (2 + num_queue_stats);
6560
6561    /*
6562     * Request is built from stats_query_header and an array of
6563     * stats_query_cmd_group each of which contains STATS_QUERY_CMD_COUNT
6564     * rules. The real number or requests is configured in the
6565     * stats_query_header.
6566     */
6567    num_groups =
6568        ((sc->fw_stats_num / STATS_QUERY_CMD_COUNT) +
6569         ((sc->fw_stats_num % STATS_QUERY_CMD_COUNT) ? 1 : 0));
6570
6571    BLOGD(sc, DBG_LOAD, "stats fw_stats_num %d num_groups %d\n",
6572          sc->fw_stats_num, num_groups);
6573
6574    sc->fw_stats_req_size =
6575        (sizeof(struct stats_query_header) +
6576         (num_groups * sizeof(struct stats_query_cmd_group)));
6577
6578    /*
6579     * Data for statistics requests + stats_counter.
6580     * stats_counter holds per-STORM counters that are incremented when
6581     * STORM has finished with the current request. Memory for FCoE
6582     * offloaded statistics are counted anyway, even if they will not be sent.
6583     * VF stats are not accounted for here as the data of VF stats is stored
6584     * in memory allocated by the VF, not here.
6585     */
6586    sc->fw_stats_data_size =
6587        (sizeof(struct stats_counter) +
6588         sizeof(struct per_port_stats) +
6589         sizeof(struct per_pf_stats) +
6590         /* sizeof(struct fcoe_statistics_params) + */
6591         (sizeof(struct per_queue_stats) * num_queue_stats));
6592
6593    if (bxe_dma_alloc(sc, (sc->fw_stats_req_size + sc->fw_stats_data_size),
6594                      &sc->fw_stats_dma, "fw stats") != 0) {
6595        bxe_free_fw_stats_mem(sc);
6596        return (-1);
6597    }
6598
6599    /* set up the shortcuts */
6600
6601    sc->fw_stats_req =
6602        (struct bxe_fw_stats_req *)sc->fw_stats_dma.vaddr;
6603    sc->fw_stats_req_mapping = sc->fw_stats_dma.paddr;
6604
6605    sc->fw_stats_data =
6606        (struct bxe_fw_stats_data *)((uint8_t *)sc->fw_stats_dma.vaddr +
6607                                     sc->fw_stats_req_size);
6608    sc->fw_stats_data_mapping = (sc->fw_stats_dma.paddr +
6609                                 sc->fw_stats_req_size);
6610
6611    BLOGD(sc, DBG_LOAD, "statistics request base address set to %#jx\n",
6612          (uintmax_t)sc->fw_stats_req_mapping);
6613
6614    BLOGD(sc, DBG_LOAD, "statistics data base address set to %#jx\n",
6615          (uintmax_t)sc->fw_stats_data_mapping);
6616
6617    return (0);
6618}
6619
6620/*
6621 * Bits map:
6622 * 0-7  - Engine0 load counter.
6623 * 8-15 - Engine1 load counter.
6624 * 16   - Engine0 RESET_IN_PROGRESS bit.
6625 * 17   - Engine1 RESET_IN_PROGRESS bit.
6626 * 18   - Engine0 ONE_IS_LOADED. Set when there is at least one active
6627 *        function on the engine
6628 * 19   - Engine1 ONE_IS_LOADED.
6629 * 20   - Chip reset flow bit. When set none-leader must wait for both engines
6630 *        leader to complete (check for both RESET_IN_PROGRESS bits and not
6631 *        for just the one belonging to its engine).
6632 */
6633#define BXE_RECOVERY_GLOB_REG     MISC_REG_GENERIC_POR_1
6634#define BXE_PATH0_LOAD_CNT_MASK   0x000000ff
6635#define BXE_PATH0_LOAD_CNT_SHIFT  0
6636#define BXE_PATH1_LOAD_CNT_MASK   0x0000ff00
6637#define BXE_PATH1_LOAD_CNT_SHIFT  8
6638#define BXE_PATH0_RST_IN_PROG_BIT 0x00010000
6639#define BXE_PATH1_RST_IN_PROG_BIT 0x00020000
6640#define BXE_GLOBAL_RESET_BIT      0x00040000
6641
6642/* set the GLOBAL_RESET bit, should be run under rtnl lock */
6643static void
6644bxe_set_reset_global(struct bxe_softc *sc)
6645{
6646    uint32_t val;
6647    bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG);
6648    val = REG_RD(sc, BXE_RECOVERY_GLOB_REG);
6649    REG_WR(sc, BXE_RECOVERY_GLOB_REG, val | BXE_GLOBAL_RESET_BIT);
6650    bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG);
6651}
6652
6653/* clear the GLOBAL_RESET bit, should be run under rtnl lock */
6654static void
6655bxe_clear_reset_global(struct bxe_softc *sc)
6656{
6657    uint32_t val;
6658    bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG);
6659    val = REG_RD(sc, BXE_RECOVERY_GLOB_REG);
6660    REG_WR(sc, BXE_RECOVERY_GLOB_REG, val & (~BXE_GLOBAL_RESET_BIT));
6661    bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG);
6662}
6663
6664/* checks the GLOBAL_RESET bit, should be run under rtnl lock */
6665static uint8_t
6666bxe_reset_is_global(struct bxe_softc *sc)
6667{
6668    uint32_t val = REG_RD(sc, BXE_RECOVERY_GLOB_REG);
6669    BLOGD(sc, DBG_LOAD, "GLOB_REG=0x%08x\n", val);
6670    return (val & BXE_GLOBAL_RESET_BIT) ? TRUE : FALSE;
6671}
6672
6673/* clear RESET_IN_PROGRESS bit for the engine, should be run under rtnl lock */
6674static void
6675bxe_set_reset_done(struct bxe_softc *sc)
6676{
6677    uint32_t val;
6678    uint32_t bit = SC_PATH(sc) ? BXE_PATH1_RST_IN_PROG_BIT :
6679                                 BXE_PATH0_RST_IN_PROG_BIT;
6680
6681    bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG);
6682
6683    val = REG_RD(sc, BXE_RECOVERY_GLOB_REG);
6684    /* Clear the bit */
6685    val &= ~bit;
6686    REG_WR(sc, BXE_RECOVERY_GLOB_REG, val);
6687
6688    bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG);
6689}
6690
6691/* set RESET_IN_PROGRESS for the engine, should be run under rtnl lock */
6692static void
6693bxe_set_reset_in_progress(struct bxe_softc *sc)
6694{
6695    uint32_t val;
6696    uint32_t bit = SC_PATH(sc) ? BXE_PATH1_RST_IN_PROG_BIT :
6697                                 BXE_PATH0_RST_IN_PROG_BIT;
6698
6699    bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG);
6700
6701    val = REG_RD(sc, BXE_RECOVERY_GLOB_REG);
6702    /* Set the bit */
6703    val |= bit;
6704    REG_WR(sc, BXE_RECOVERY_GLOB_REG, val);
6705
6706    bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG);
6707}
6708
6709/* check RESET_IN_PROGRESS bit for an engine, should be run under rtnl lock */
6710static uint8_t
6711bxe_reset_is_done(struct bxe_softc *sc,
6712                  int              engine)
6713{
6714    uint32_t val = REG_RD(sc, BXE_RECOVERY_GLOB_REG);
6715    uint32_t bit = engine ? BXE_PATH1_RST_IN_PROG_BIT :
6716                            BXE_PATH0_RST_IN_PROG_BIT;
6717
6718    /* return false if bit is set */
6719    return (val & bit) ? FALSE : TRUE;
6720}
6721
6722/* get the load status for an engine, should be run under rtnl lock */
6723static uint8_t
6724bxe_get_load_status(struct bxe_softc *sc,
6725                    int              engine)
6726{
6727    uint32_t mask = engine ? BXE_PATH1_LOAD_CNT_MASK :
6728                             BXE_PATH0_LOAD_CNT_MASK;
6729    uint32_t shift = engine ? BXE_PATH1_LOAD_CNT_SHIFT :
6730                              BXE_PATH0_LOAD_CNT_SHIFT;
6731    uint32_t val = REG_RD(sc, BXE_RECOVERY_GLOB_REG);
6732
6733    BLOGD(sc, DBG_LOAD, "Old value for GLOB_REG=0x%08x\n", val);
6734
6735    val = ((val & mask) >> shift);
6736
6737    BLOGD(sc, DBG_LOAD, "Load mask engine %d = 0x%08x\n", engine, val);
6738
6739    return (val != 0);
6740}
6741
6742/* set pf load mark */
6743/* XXX needs to be under rtnl lock */
6744static void
6745bxe_set_pf_load(struct bxe_softc *sc)
6746{
6747    uint32_t val;
6748    uint32_t val1;
6749    uint32_t mask = SC_PATH(sc) ? BXE_PATH1_LOAD_CNT_MASK :
6750                                  BXE_PATH0_LOAD_CNT_MASK;
6751    uint32_t shift = SC_PATH(sc) ? BXE_PATH1_LOAD_CNT_SHIFT :
6752                                   BXE_PATH0_LOAD_CNT_SHIFT;
6753
6754    bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG);
6755
6756    val = REG_RD(sc, BXE_RECOVERY_GLOB_REG);
6757    BLOGD(sc, DBG_LOAD, "Old value for GLOB_REG=0x%08x\n", val);
6758
6759    /* get the current counter value */
6760    val1 = ((val & mask) >> shift);
6761
6762    /* set bit of this PF */
6763    val1 |= (1 << SC_ABS_FUNC(sc));
6764
6765    /* clear the old value */
6766    val &= ~mask;
6767
6768    /* set the new one */
6769    val |= ((val1 << shift) & mask);
6770
6771    REG_WR(sc, BXE_RECOVERY_GLOB_REG, val);
6772
6773    bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG);
6774}
6775
6776/* clear pf load mark */
6777/* XXX needs to be under rtnl lock */
6778static uint8_t
6779bxe_clear_pf_load(struct bxe_softc *sc)
6780{
6781    uint32_t val1, val;
6782    uint32_t mask = SC_PATH(sc) ? BXE_PATH1_LOAD_CNT_MASK :
6783                                  BXE_PATH0_LOAD_CNT_MASK;
6784    uint32_t shift = SC_PATH(sc) ? BXE_PATH1_LOAD_CNT_SHIFT :
6785                                   BXE_PATH0_LOAD_CNT_SHIFT;
6786
6787    bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG);
6788    val = REG_RD(sc, BXE_RECOVERY_GLOB_REG);
6789    BLOGD(sc, DBG_LOAD, "Old GEN_REG_VAL=0x%08x\n", val);
6790
6791    /* get the current counter value */
6792    val1 = (val & mask) >> shift;
6793
6794    /* clear bit of that PF */
6795    val1 &= ~(1 << SC_ABS_FUNC(sc));
6796
6797    /* clear the old value */
6798    val &= ~mask;
6799
6800    /* set the new one */
6801    val |= ((val1 << shift) & mask);
6802
6803    REG_WR(sc, BXE_RECOVERY_GLOB_REG, val);
6804    bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG);
6805    return (val1 != 0);
6806}
6807
6808/* send load requrest to mcp and analyze response */
6809static int
6810bxe_nic_load_request(struct bxe_softc *sc,
6811                     uint32_t         *load_code)
6812{
6813    /* init fw_seq */
6814    sc->fw_seq =
6815        (SHMEM_RD(sc, func_mb[SC_FW_MB_IDX(sc)].drv_mb_header) &
6816         DRV_MSG_SEQ_NUMBER_MASK);
6817
6818    BLOGD(sc, DBG_LOAD, "initial fw_seq 0x%04x\n", sc->fw_seq);
6819
6820    /* get the current FW pulse sequence */
6821    sc->fw_drv_pulse_wr_seq =
6822        (SHMEM_RD(sc, func_mb[SC_FW_MB_IDX(sc)].drv_pulse_mb) &
6823         DRV_PULSE_SEQ_MASK);
6824
6825    BLOGD(sc, DBG_LOAD, "initial drv_pulse 0x%04x\n",
6826          sc->fw_drv_pulse_wr_seq);
6827
6828    /* load request */
6829    (*load_code) = bxe_fw_command(sc, DRV_MSG_CODE_LOAD_REQ,
6830                                  DRV_MSG_CODE_LOAD_REQ_WITH_LFA);
6831
6832    /* if the MCP fails to respond we must abort */
6833    if (!(*load_code)) {
6834        BLOGE(sc, "MCP response failure!\n");
6835        return (-1);
6836    }
6837
6838    /* if MCP refused then must abort */
6839    if ((*load_code) == FW_MSG_CODE_DRV_LOAD_REFUSED) {
6840        BLOGE(sc, "MCP refused load request\n");
6841        return (-1);
6842    }
6843
6844    return (0);
6845}
6846
6847/*
6848 * Check whether another PF has already loaded FW to chip. In virtualized
6849 * environments a pf from anoth VM may have already initialized the device
6850 * including loading FW.
6851 */
6852static int
6853bxe_nic_load_analyze_req(struct bxe_softc *sc,
6854                         uint32_t         load_code)
6855{
6856    uint32_t my_fw, loaded_fw;
6857
6858    /* is another pf loaded on this engine? */
6859    if ((load_code != FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) &&
6860        (load_code != FW_MSG_CODE_DRV_LOAD_COMMON)) {
6861        /* build my FW version dword */
6862        my_fw = (BCM_5710_FW_MAJOR_VERSION +
6863                 (BCM_5710_FW_MINOR_VERSION << 8 ) +
6864                 (BCM_5710_FW_REVISION_VERSION << 16) +
6865                 (BCM_5710_FW_ENGINEERING_VERSION << 24));
6866
6867        /* read loaded FW from chip */
6868        loaded_fw = REG_RD(sc, XSEM_REG_PRAM);
6869        BLOGD(sc, DBG_LOAD, "loaded FW 0x%08x / my FW 0x%08x\n",
6870              loaded_fw, my_fw);
6871
6872        /* abort nic load if version mismatch */
6873        if (my_fw != loaded_fw) {
6874            BLOGE(sc, "FW 0x%08x already loaded (mine is 0x%08x)",
6875                  loaded_fw, my_fw);
6876            return (-1);
6877        }
6878    }
6879
6880    return (0);
6881}
6882
6883/* mark PMF if applicable */
6884static void
6885bxe_nic_load_pmf(struct bxe_softc *sc,
6886                 uint32_t         load_code)
6887{
6888    uint32_t ncsi_oem_data_addr;
6889
6890    if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
6891        (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) ||
6892        (load_code == FW_MSG_CODE_DRV_LOAD_PORT)) {
6893        /*
6894         * Barrier here for ordering between the writing to sc->port.pmf here
6895         * and reading it from the periodic task.
6896         */
6897        sc->port.pmf = 1;
6898        mb();
6899    } else {
6900        sc->port.pmf = 0;
6901    }
6902
6903    BLOGD(sc, DBG_LOAD, "pmf %d\n", sc->port.pmf);
6904
6905    /* XXX needed? */
6906    if (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) {
6907        if (SHMEM2_HAS(sc, ncsi_oem_data_addr)) {
6908            ncsi_oem_data_addr = SHMEM2_RD(sc, ncsi_oem_data_addr);
6909            if (ncsi_oem_data_addr) {
6910                REG_WR(sc,
6911                       (ncsi_oem_data_addr +
6912                        offsetof(struct glob_ncsi_oem_data, driver_version)),
6913                       0);
6914            }
6915        }
6916    }
6917}
6918
6919static void
6920bxe_read_mf_cfg(struct bxe_softc *sc)
6921{
6922    int n = (CHIP_IS_MODE_4_PORT(sc) ? 2 : 1);
6923    int abs_func;
6924    int vn;
6925
6926    if (BXE_NOMCP(sc)) {
6927        return; /* what should be the default bvalue in this case */
6928    }
6929
6930    /*
6931     * The formula for computing the absolute function number is...
6932     * For 2 port configuration (4 functions per port):
6933     *   abs_func = 2 * vn + SC_PORT + SC_PATH
6934     * For 4 port configuration (2 functions per port):
6935     *   abs_func = 4 * vn + 2 * SC_PORT + SC_PATH
6936     */
6937    for (vn = VN_0; vn < SC_MAX_VN_NUM(sc); vn++) {
6938        abs_func = (n * (2 * vn + SC_PORT(sc)) + SC_PATH(sc));
6939        if (abs_func >= E1H_FUNC_MAX) {
6940            break;
6941        }
6942        sc->devinfo.mf_info.mf_config[vn] =
6943            MFCFG_RD(sc, func_mf_config[abs_func].config);
6944    }
6945
6946    if (sc->devinfo.mf_info.mf_config[SC_VN(sc)] &
6947        FUNC_MF_CFG_FUNC_DISABLED) {
6948        BLOGD(sc, DBG_LOAD, "mf_cfg function disabled\n");
6949        sc->flags |= BXE_MF_FUNC_DIS;
6950    } else {
6951        BLOGD(sc, DBG_LOAD, "mf_cfg function enabled\n");
6952        sc->flags &= ~BXE_MF_FUNC_DIS;
6953    }
6954}
6955
6956/* acquire split MCP access lock register */
6957static int bxe_acquire_alr(struct bxe_softc *sc)
6958{
6959    uint32_t j, val;
6960
6961    for (j = 0; j < 1000; j++) {
6962        val = (1UL << 31);
6963        REG_WR(sc, GRCBASE_MCP + 0x9c, val);
6964        val = REG_RD(sc, GRCBASE_MCP + 0x9c);
6965        if (val & (1L << 31))
6966            break;
6967
6968        DELAY(5000);
6969    }
6970
6971    if (!(val & (1L << 31))) {
6972        BLOGE(sc, "Cannot acquire MCP access lock register\n");
6973        return (-1);
6974    }
6975
6976    return (0);
6977}
6978
6979/* release split MCP access lock register */
6980static void bxe_release_alr(struct bxe_softc *sc)
6981{
6982    REG_WR(sc, GRCBASE_MCP + 0x9c, 0);
6983}
6984
6985static void
6986bxe_fan_failure(struct bxe_softc *sc)
6987{
6988    int port = SC_PORT(sc);
6989    uint32_t ext_phy_config;
6990
6991    /* mark the failure */
6992    ext_phy_config =
6993        SHMEM_RD(sc, dev_info.port_hw_config[port].external_phy_config);
6994
6995    ext_phy_config &= ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
6996    ext_phy_config |= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
6997    SHMEM_WR(sc, dev_info.port_hw_config[port].external_phy_config,
6998             ext_phy_config);
6999
7000    /* log the failure */
7001    BLOGW(sc, "Fan Failure has caused the driver to shutdown "
7002              "the card to prevent permanent damage. "
7003              "Please contact OEM Support for assistance\n");
7004
7005    /* XXX */
7006#if 1
7007    bxe_panic(sc, ("Schedule task to handle fan failure\n"));
7008#else
7009    /*
7010     * Schedule device reset (unload)
7011     * This is due to some boards consuming sufficient power when driver is
7012     * up to overheat if fan fails.
7013     */
7014    bxe_set_bit(BXE_SP_RTNL_FAN_FAILURE, &sc->sp_rtnl_state);
7015    schedule_delayed_work(&sc->sp_rtnl_task, 0);
7016#endif
7017}
7018
7019/* this function is called upon a link interrupt */
7020static void
7021bxe_link_attn(struct bxe_softc *sc)
7022{
7023    uint32_t pause_enabled = 0;
7024    struct host_port_stats *pstats;
7025    int cmng_fns;
7026    struct bxe_fastpath *fp;
7027    int i;
7028
7029    /* Make sure that we are synced with the current statistics */
7030    bxe_stats_handle(sc, STATS_EVENT_STOP);
7031	BLOGI(sc, "link_vars phy_flags : %x\n", sc->link_vars.phy_flags);
7032    elink_link_update(&sc->link_params, &sc->link_vars);
7033
7034    if (sc->link_vars.link_up) {
7035
7036        /* dropless flow control */
7037        if (!CHIP_IS_E1(sc) && sc->dropless_fc) {
7038            pause_enabled = 0;
7039
7040            if (sc->link_vars.flow_ctrl & ELINK_FLOW_CTRL_TX) {
7041                pause_enabled = 1;
7042            }
7043
7044            REG_WR(sc,
7045                   (BAR_USTRORM_INTMEM +
7046                    USTORM_ETH_PAUSE_ENABLED_OFFSET(SC_PORT(sc))),
7047                   pause_enabled);
7048        }
7049
7050        if (sc->link_vars.mac_type != ELINK_MAC_TYPE_EMAC) {
7051            pstats = BXE_SP(sc, port_stats);
7052            /* reset old mac stats */
7053            memset(&(pstats->mac_stx[0]), 0, sizeof(struct mac_stx));
7054        }
7055
7056        if (sc->state == BXE_STATE_OPEN) {
7057            bxe_stats_handle(sc, STATS_EVENT_LINK_UP);
7058        }
7059
7060	/* Restart tx when the link comes back. */
7061        FOR_EACH_ETH_QUEUE(sc, i) {
7062            fp = &sc->fp[i];
7063            taskqueue_enqueue(fp->tq, &fp->tx_task);
7064	}
7065    }
7066
7067    if (sc->link_vars.link_up && sc->link_vars.line_speed) {
7068        cmng_fns = bxe_get_cmng_fns_mode(sc);
7069
7070        if (cmng_fns != CMNG_FNS_NONE) {
7071            bxe_cmng_fns_init(sc, FALSE, cmng_fns);
7072            storm_memset_cmng(sc, &sc->cmng, SC_PORT(sc));
7073        } else {
7074            /* rate shaping and fairness are disabled */
7075            BLOGD(sc, DBG_LOAD, "single function mode without fairness\n");
7076        }
7077    }
7078
7079    bxe_link_report_locked(sc);
7080
7081    if (IS_MF(sc)) {
7082        ; // XXX bxe_link_sync_notify(sc);
7083    }
7084}
7085
7086static void
7087bxe_attn_int_asserted(struct bxe_softc *sc,
7088                      uint32_t         asserted)
7089{
7090    int port = SC_PORT(sc);
7091    uint32_t aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
7092                               MISC_REG_AEU_MASK_ATTN_FUNC_0;
7093    uint32_t nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
7094                                        NIG_REG_MASK_INTERRUPT_PORT0;
7095    uint32_t aeu_mask;
7096    uint32_t nig_mask = 0;
7097    uint32_t reg_addr;
7098    uint32_t igu_acked;
7099    uint32_t cnt;
7100
7101    if (sc->attn_state & asserted) {
7102        BLOGE(sc, "IGU ERROR attn=0x%08x\n", asserted);
7103    }
7104
7105    bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
7106
7107    aeu_mask = REG_RD(sc, aeu_addr);
7108
7109    BLOGD(sc, DBG_INTR, "aeu_mask 0x%08x newly asserted 0x%08x\n",
7110          aeu_mask, asserted);
7111
7112    aeu_mask &= ~(asserted & 0x3ff);
7113
7114    BLOGD(sc, DBG_INTR, "new mask 0x%08x\n", aeu_mask);
7115
7116    REG_WR(sc, aeu_addr, aeu_mask);
7117
7118    bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
7119
7120    BLOGD(sc, DBG_INTR, "attn_state 0x%08x\n", sc->attn_state);
7121    sc->attn_state |= asserted;
7122    BLOGD(sc, DBG_INTR, "new state 0x%08x\n", sc->attn_state);
7123
7124    if (asserted & ATTN_HARD_WIRED_MASK) {
7125        if (asserted & ATTN_NIG_FOR_FUNC) {
7126
7127	    bxe_acquire_phy_lock(sc);
7128            /* save nig interrupt mask */
7129            nig_mask = REG_RD(sc, nig_int_mask_addr);
7130
7131            /* If nig_mask is not set, no need to call the update function */
7132            if (nig_mask) {
7133                REG_WR(sc, nig_int_mask_addr, 0);
7134
7135                bxe_link_attn(sc);
7136            }
7137
7138            /* handle unicore attn? */
7139        }
7140
7141        if (asserted & ATTN_SW_TIMER_4_FUNC) {
7142            BLOGD(sc, DBG_INTR, "ATTN_SW_TIMER_4_FUNC!\n");
7143        }
7144
7145        if (asserted & GPIO_2_FUNC) {
7146            BLOGD(sc, DBG_INTR, "GPIO_2_FUNC!\n");
7147        }
7148
7149        if (asserted & GPIO_3_FUNC) {
7150            BLOGD(sc, DBG_INTR, "GPIO_3_FUNC!\n");
7151        }
7152
7153        if (asserted & GPIO_4_FUNC) {
7154            BLOGD(sc, DBG_INTR, "GPIO_4_FUNC!\n");
7155        }
7156
7157        if (port == 0) {
7158            if (asserted & ATTN_GENERAL_ATTN_1) {
7159                BLOGD(sc, DBG_INTR, "ATTN_GENERAL_ATTN_1!\n");
7160                REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
7161            }
7162            if (asserted & ATTN_GENERAL_ATTN_2) {
7163                BLOGD(sc, DBG_INTR, "ATTN_GENERAL_ATTN_2!\n");
7164                REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
7165            }
7166            if (asserted & ATTN_GENERAL_ATTN_3) {
7167                BLOGD(sc, DBG_INTR, "ATTN_GENERAL_ATTN_3!\n");
7168                REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
7169            }
7170        } else {
7171            if (asserted & ATTN_GENERAL_ATTN_4) {
7172                BLOGD(sc, DBG_INTR, "ATTN_GENERAL_ATTN_4!\n");
7173                REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
7174            }
7175            if (asserted & ATTN_GENERAL_ATTN_5) {
7176                BLOGD(sc, DBG_INTR, "ATTN_GENERAL_ATTN_5!\n");
7177                REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
7178            }
7179            if (asserted & ATTN_GENERAL_ATTN_6) {
7180                BLOGD(sc, DBG_INTR, "ATTN_GENERAL_ATTN_6!\n");
7181                REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
7182            }
7183        }
7184    } /* hardwired */
7185
7186    if (sc->devinfo.int_block == INT_BLOCK_HC) {
7187        reg_addr = (HC_REG_COMMAND_REG + port*32 + COMMAND_REG_ATTN_BITS_SET);
7188    } else {
7189        reg_addr = (BAR_IGU_INTMEM + IGU_CMD_ATTN_BIT_SET_UPPER*8);
7190    }
7191
7192    BLOGD(sc, DBG_INTR, "about to mask 0x%08x at %s addr 0x%08x\n",
7193          asserted,
7194          (sc->devinfo.int_block == INT_BLOCK_HC) ? "HC" : "IGU", reg_addr);
7195    REG_WR(sc, reg_addr, asserted);
7196
7197    /* now set back the mask */
7198    if (asserted & ATTN_NIG_FOR_FUNC) {
7199        /*
7200         * Verify that IGU ack through BAR was written before restoring
7201         * NIG mask. This loop should exit after 2-3 iterations max.
7202         */
7203        if (sc->devinfo.int_block != INT_BLOCK_HC) {
7204            cnt = 0;
7205
7206            do {
7207                igu_acked = REG_RD(sc, IGU_REG_ATTENTION_ACK_BITS);
7208            } while (((igu_acked & ATTN_NIG_FOR_FUNC) == 0) &&
7209                     (++cnt < MAX_IGU_ATTN_ACK_TO));
7210
7211            if (!igu_acked) {
7212                BLOGE(sc, "Failed to verify IGU ack on time\n");
7213            }
7214
7215            mb();
7216        }
7217
7218        REG_WR(sc, nig_int_mask_addr, nig_mask);
7219
7220	bxe_release_phy_lock(sc);
7221    }
7222}
7223
7224static void
7225bxe_print_next_block(struct bxe_softc *sc,
7226                     int              idx,
7227                     const char       *blk)
7228{
7229    BLOGI(sc, "%s%s", idx ? ", " : "", blk);
7230}
7231
7232static int
7233bxe_check_blocks_with_parity0(struct bxe_softc *sc,
7234                              uint32_t         sig,
7235                              int              par_num,
7236                              uint8_t          print)
7237{
7238    uint32_t cur_bit = 0;
7239    int i = 0;
7240
7241    for (i = 0; sig; i++) {
7242        cur_bit = ((uint32_t)0x1 << i);
7243        if (sig & cur_bit) {
7244            switch (cur_bit) {
7245            case AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR:
7246                if (print)
7247                    bxe_print_next_block(sc, par_num++, "BRB");
7248                break;
7249            case AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR:
7250                if (print)
7251                    bxe_print_next_block(sc, par_num++, "PARSER");
7252                break;
7253            case AEU_INPUTS_ATTN_BITS_TSDM_PARITY_ERROR:
7254                if (print)
7255                    bxe_print_next_block(sc, par_num++, "TSDM");
7256                break;
7257            case AEU_INPUTS_ATTN_BITS_SEARCHER_PARITY_ERROR:
7258                if (print)
7259                    bxe_print_next_block(sc, par_num++, "SEARCHER");
7260                break;
7261            case AEU_INPUTS_ATTN_BITS_TCM_PARITY_ERROR:
7262                if (print)
7263                    bxe_print_next_block(sc, par_num++, "TCM");
7264                break;
7265            case AEU_INPUTS_ATTN_BITS_TSEMI_PARITY_ERROR:
7266                if (print)
7267                    bxe_print_next_block(sc, par_num++, "TSEMI");
7268                break;
7269            case AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR:
7270                if (print)
7271                    bxe_print_next_block(sc, par_num++, "XPB");
7272                break;
7273            }
7274
7275            /* Clear the bit */
7276            sig &= ~cur_bit;
7277        }
7278    }
7279
7280    return (par_num);
7281}
7282
7283static int
7284bxe_check_blocks_with_parity1(struct bxe_softc *sc,
7285                              uint32_t         sig,
7286                              int              par_num,
7287                              uint8_t          *global,
7288                              uint8_t          print)
7289{
7290    int i = 0;
7291    uint32_t cur_bit = 0;
7292    for (i = 0; sig; i++) {
7293        cur_bit = ((uint32_t)0x1 << i);
7294        if (sig & cur_bit) {
7295            switch (cur_bit) {
7296            case AEU_INPUTS_ATTN_BITS_PBF_PARITY_ERROR:
7297                if (print)
7298                    bxe_print_next_block(sc, par_num++, "PBF");
7299                break;
7300            case AEU_INPUTS_ATTN_BITS_QM_PARITY_ERROR:
7301                if (print)
7302                    bxe_print_next_block(sc, par_num++, "QM");
7303                break;
7304            case AEU_INPUTS_ATTN_BITS_TIMERS_PARITY_ERROR:
7305                if (print)
7306                    bxe_print_next_block(sc, par_num++, "TM");
7307                break;
7308            case AEU_INPUTS_ATTN_BITS_XSDM_PARITY_ERROR:
7309                if (print)
7310                    bxe_print_next_block(sc, par_num++, "XSDM");
7311                break;
7312            case AEU_INPUTS_ATTN_BITS_XCM_PARITY_ERROR:
7313                if (print)
7314                    bxe_print_next_block(sc, par_num++, "XCM");
7315                break;
7316            case AEU_INPUTS_ATTN_BITS_XSEMI_PARITY_ERROR:
7317                if (print)
7318                    bxe_print_next_block(sc, par_num++, "XSEMI");
7319                break;
7320            case AEU_INPUTS_ATTN_BITS_DOORBELLQ_PARITY_ERROR:
7321                if (print)
7322                    bxe_print_next_block(sc, par_num++, "DOORBELLQ");
7323                break;
7324            case AEU_INPUTS_ATTN_BITS_NIG_PARITY_ERROR:
7325                if (print)
7326                    bxe_print_next_block(sc, par_num++, "NIG");
7327                break;
7328            case AEU_INPUTS_ATTN_BITS_VAUX_PCI_CORE_PARITY_ERROR:
7329                if (print)
7330                    bxe_print_next_block(sc, par_num++, "VAUX PCI CORE");
7331                *global = TRUE;
7332                break;
7333            case AEU_INPUTS_ATTN_BITS_DEBUG_PARITY_ERROR:
7334                if (print)
7335                    bxe_print_next_block(sc, par_num++, "DEBUG");
7336                break;
7337            case AEU_INPUTS_ATTN_BITS_USDM_PARITY_ERROR:
7338                if (print)
7339                    bxe_print_next_block(sc, par_num++, "USDM");
7340                break;
7341            case AEU_INPUTS_ATTN_BITS_UCM_PARITY_ERROR:
7342                if (print)
7343                    bxe_print_next_block(sc, par_num++, "UCM");
7344                break;
7345            case AEU_INPUTS_ATTN_BITS_USEMI_PARITY_ERROR:
7346                if (print)
7347                    bxe_print_next_block(sc, par_num++, "USEMI");
7348                break;
7349            case AEU_INPUTS_ATTN_BITS_UPB_PARITY_ERROR:
7350                if (print)
7351                    bxe_print_next_block(sc, par_num++, "UPB");
7352                break;
7353            case AEU_INPUTS_ATTN_BITS_CSDM_PARITY_ERROR:
7354                if (print)
7355                    bxe_print_next_block(sc, par_num++, "CSDM");
7356                break;
7357            case AEU_INPUTS_ATTN_BITS_CCM_PARITY_ERROR:
7358                if (print)
7359                    bxe_print_next_block(sc, par_num++, "CCM");
7360                break;
7361            }
7362
7363            /* Clear the bit */
7364            sig &= ~cur_bit;
7365        }
7366    }
7367
7368    return (par_num);
7369}
7370
7371static int
7372bxe_check_blocks_with_parity2(struct bxe_softc *sc,
7373                              uint32_t         sig,
7374                              int              par_num,
7375                              uint8_t          print)
7376{
7377    uint32_t cur_bit = 0;
7378    int i = 0;
7379
7380    for (i = 0; sig; i++) {
7381        cur_bit = ((uint32_t)0x1 << i);
7382        if (sig & cur_bit) {
7383            switch (cur_bit) {
7384            case AEU_INPUTS_ATTN_BITS_CSEMI_PARITY_ERROR:
7385                if (print)
7386                    bxe_print_next_block(sc, par_num++, "CSEMI");
7387                break;
7388            case AEU_INPUTS_ATTN_BITS_PXP_PARITY_ERROR:
7389                if (print)
7390                    bxe_print_next_block(sc, par_num++, "PXP");
7391                break;
7392            case AEU_IN_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR:
7393                if (print)
7394                    bxe_print_next_block(sc, par_num++, "PXPPCICLOCKCLIENT");
7395                break;
7396            case AEU_INPUTS_ATTN_BITS_CFC_PARITY_ERROR:
7397                if (print)
7398                    bxe_print_next_block(sc, par_num++, "CFC");
7399                break;
7400            case AEU_INPUTS_ATTN_BITS_CDU_PARITY_ERROR:
7401                if (print)
7402                    bxe_print_next_block(sc, par_num++, "CDU");
7403                break;
7404            case AEU_INPUTS_ATTN_BITS_DMAE_PARITY_ERROR:
7405                if (print)
7406                    bxe_print_next_block(sc, par_num++, "DMAE");
7407                break;
7408            case AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR:
7409                if (print)
7410                    bxe_print_next_block(sc, par_num++, "IGU");
7411                break;
7412            case AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR:
7413                if (print)
7414                    bxe_print_next_block(sc, par_num++, "MISC");
7415                break;
7416            }
7417
7418            /* Clear the bit */
7419            sig &= ~cur_bit;
7420        }
7421    }
7422
7423    return (par_num);
7424}
7425
7426static int
7427bxe_check_blocks_with_parity3(struct bxe_softc *sc,
7428                              uint32_t         sig,
7429                              int              par_num,
7430                              uint8_t          *global,
7431                              uint8_t          print)
7432{
7433    uint32_t cur_bit = 0;
7434    int i = 0;
7435
7436    for (i = 0; sig; i++) {
7437        cur_bit = ((uint32_t)0x1 << i);
7438        if (sig & cur_bit) {
7439            switch (cur_bit) {
7440            case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY:
7441                if (print)
7442                    bxe_print_next_block(sc, par_num++, "MCP ROM");
7443                *global = TRUE;
7444                break;
7445            case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY:
7446                if (print)
7447                    bxe_print_next_block(sc, par_num++,
7448                              "MCP UMP RX");
7449                *global = TRUE;
7450                break;
7451            case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY:
7452                if (print)
7453                    bxe_print_next_block(sc, par_num++,
7454                              "MCP UMP TX");
7455                *global = TRUE;
7456                break;
7457            case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY:
7458                if (print)
7459                    bxe_print_next_block(sc, par_num++,
7460                              "MCP SCPAD");
7461                *global = TRUE;
7462                break;
7463            }
7464
7465            /* Clear the bit */
7466            sig &= ~cur_bit;
7467        }
7468    }
7469
7470    return (par_num);
7471}
7472
7473static int
7474bxe_check_blocks_with_parity4(struct bxe_softc *sc,
7475                              uint32_t         sig,
7476                              int              par_num,
7477                              uint8_t          print)
7478{
7479    uint32_t cur_bit = 0;
7480    int i = 0;
7481
7482    for (i = 0; sig; i++) {
7483        cur_bit = ((uint32_t)0x1 << i);
7484        if (sig & cur_bit) {
7485            switch (cur_bit) {
7486            case AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR:
7487                if (print)
7488                    bxe_print_next_block(sc, par_num++, "PGLUE_B");
7489                break;
7490            case AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR:
7491                if (print)
7492                    bxe_print_next_block(sc, par_num++, "ATC");
7493                break;
7494            }
7495
7496            /* Clear the bit */
7497            sig &= ~cur_bit;
7498        }
7499    }
7500
7501    return (par_num);
7502}
7503
7504static uint8_t
7505bxe_parity_attn(struct bxe_softc *sc,
7506                uint8_t          *global,
7507                uint8_t          print,
7508                uint32_t         *sig)
7509{
7510    int par_num = 0;
7511
7512    if ((sig[0] & HW_PRTY_ASSERT_SET_0) ||
7513        (sig[1] & HW_PRTY_ASSERT_SET_1) ||
7514        (sig[2] & HW_PRTY_ASSERT_SET_2) ||
7515        (sig[3] & HW_PRTY_ASSERT_SET_3) ||
7516        (sig[4] & HW_PRTY_ASSERT_SET_4)) {
7517        BLOGE(sc, "Parity error: HW block parity attention:\n"
7518                  "[0]:0x%08x [1]:0x%08x [2]:0x%08x [3]:0x%08x [4]:0x%08x\n",
7519              (uint32_t)(sig[0] & HW_PRTY_ASSERT_SET_0),
7520              (uint32_t)(sig[1] & HW_PRTY_ASSERT_SET_1),
7521              (uint32_t)(sig[2] & HW_PRTY_ASSERT_SET_2),
7522              (uint32_t)(sig[3] & HW_PRTY_ASSERT_SET_3),
7523              (uint32_t)(sig[4] & HW_PRTY_ASSERT_SET_4));
7524
7525        if (print)
7526            BLOGI(sc, "Parity errors detected in blocks: ");
7527
7528        par_num =
7529            bxe_check_blocks_with_parity0(sc, sig[0] &
7530                                          HW_PRTY_ASSERT_SET_0,
7531                                          par_num, print);
7532        par_num =
7533            bxe_check_blocks_with_parity1(sc, sig[1] &
7534                                          HW_PRTY_ASSERT_SET_1,
7535                                          par_num, global, print);
7536        par_num =
7537            bxe_check_blocks_with_parity2(sc, sig[2] &
7538                                          HW_PRTY_ASSERT_SET_2,
7539                                          par_num, print);
7540        par_num =
7541            bxe_check_blocks_with_parity3(sc, sig[3] &
7542                                          HW_PRTY_ASSERT_SET_3,
7543                                          par_num, global, print);
7544        par_num =
7545            bxe_check_blocks_with_parity4(sc, sig[4] &
7546                                          HW_PRTY_ASSERT_SET_4,
7547                                          par_num, print);
7548
7549        if (print)
7550            BLOGI(sc, "\n");
7551
7552        return (TRUE);
7553    }
7554
7555    return (FALSE);
7556}
7557
7558static uint8_t
7559bxe_chk_parity_attn(struct bxe_softc *sc,
7560                    uint8_t          *global,
7561                    uint8_t          print)
7562{
7563    struct attn_route attn = { {0} };
7564    int port = SC_PORT(sc);
7565
7566    attn.sig[0] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
7567    attn.sig[1] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
7568    attn.sig[2] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
7569    attn.sig[3] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
7570
7571    /*
7572     * Since MCP attentions can't be disabled inside the block, we need to
7573     * read AEU registers to see whether they're currently disabled
7574     */
7575    attn.sig[3] &= ((REG_RD(sc, (!port ? MISC_REG_AEU_ENABLE4_FUNC_0_OUT_0
7576                                      : MISC_REG_AEU_ENABLE4_FUNC_1_OUT_0)) &
7577                         MISC_AEU_ENABLE_MCP_PRTY_BITS) |
7578                        ~MISC_AEU_ENABLE_MCP_PRTY_BITS);
7579
7580
7581    if (!CHIP_IS_E1x(sc))
7582        attn.sig[4] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_5_FUNC_0 + port*4);
7583
7584    return (bxe_parity_attn(sc, global, print, attn.sig));
7585}
7586
7587static void
7588bxe_attn_int_deasserted4(struct bxe_softc *sc,
7589                         uint32_t         attn)
7590{
7591    uint32_t val;
7592
7593    if (attn & AEU_INPUTS_ATTN_BITS_PGLUE_HW_INTERRUPT) {
7594        val = REG_RD(sc, PGLUE_B_REG_PGLUE_B_INT_STS_CLR);
7595        BLOGE(sc, "PGLUE hw attention 0x%08x\n", val);
7596        if (val & PGLUE_B_PGLUE_B_INT_STS_REG_ADDRESS_ERROR)
7597            BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_ADDRESS_ERROR\n");
7598        if (val & PGLUE_B_PGLUE_B_INT_STS_REG_INCORRECT_RCV_BEHAVIOR)
7599            BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_INCORRECT_RCV_BEHAVIOR\n");
7600        if (val & PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN)
7601            BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN\n");
7602        if (val & PGLUE_B_PGLUE_B_INT_STS_REG_VF_LENGTH_VIOLATION_ATTN)
7603            BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_VF_LENGTH_VIOLATION_ATTN\n");
7604        if (val & PGLUE_B_PGLUE_B_INT_STS_REG_VF_GRC_SPACE_VIOLATION_ATTN)
7605            BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_VF_GRC_SPACE_VIOLATION_ATTN\n");
7606        if (val & PGLUE_B_PGLUE_B_INT_STS_REG_VF_MSIX_BAR_VIOLATION_ATTN)
7607            BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_VF_MSIX_BAR_VIOLATION_ATTN\n");
7608        if (val & PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_ERROR_ATTN)
7609            BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_ERROR_ATTN\n");
7610        if (val & PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_IN_TWO_RCBS_ATTN)
7611            BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_IN_TWO_RCBS_ATTN\n");
7612        if (val & PGLUE_B_PGLUE_B_INT_STS_REG_CSSNOOP_FIFO_OVERFLOW)
7613            BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_CSSNOOP_FIFO_OVERFLOW\n");
7614    }
7615
7616    if (attn & AEU_INPUTS_ATTN_BITS_ATC_HW_INTERRUPT) {
7617        val = REG_RD(sc, ATC_REG_ATC_INT_STS_CLR);
7618        BLOGE(sc, "ATC hw attention 0x%08x\n", val);
7619        if (val & ATC_ATC_INT_STS_REG_ADDRESS_ERROR)
7620            BLOGE(sc, "ATC_ATC_INT_STS_REG_ADDRESS_ERROR\n");
7621        if (val & ATC_ATC_INT_STS_REG_ATC_TCPL_TO_NOT_PEND)
7622            BLOGE(sc, "ATC_ATC_INT_STS_REG_ATC_TCPL_TO_NOT_PEND\n");
7623        if (val & ATC_ATC_INT_STS_REG_ATC_GPA_MULTIPLE_HITS)
7624            BLOGE(sc, "ATC_ATC_INT_STS_REG_ATC_GPA_MULTIPLE_HITS\n");
7625        if (val & ATC_ATC_INT_STS_REG_ATC_RCPL_TO_EMPTY_CNT)
7626            BLOGE(sc, "ATC_ATC_INT_STS_REG_ATC_RCPL_TO_EMPTY_CNT\n");
7627        if (val & ATC_ATC_INT_STS_REG_ATC_TCPL_ERROR)
7628            BLOGE(sc, "ATC_ATC_INT_STS_REG_ATC_TCPL_ERROR\n");
7629        if (val & ATC_ATC_INT_STS_REG_ATC_IREQ_LESS_THAN_STU)
7630            BLOGE(sc, "ATC_ATC_INT_STS_REG_ATC_IREQ_LESS_THAN_STU\n");
7631    }
7632
7633    if (attn & (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR |
7634                AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR)) {
7635        BLOGE(sc, "FATAL parity attention set4 0x%08x\n",
7636              (uint32_t)(attn & (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR |
7637                                 AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR)));
7638    }
7639}
7640
7641static void
7642bxe_e1h_disable(struct bxe_softc *sc)
7643{
7644    int port = SC_PORT(sc);
7645
7646    bxe_tx_disable(sc);
7647
7648    REG_WR(sc, NIG_REG_LLH0_FUNC_EN + port*8, 0);
7649}
7650
7651static void
7652bxe_e1h_enable(struct bxe_softc *sc)
7653{
7654    int port = SC_PORT(sc);
7655
7656    REG_WR(sc, NIG_REG_LLH0_FUNC_EN + port*8, 1);
7657
7658    // XXX bxe_tx_enable(sc);
7659}
7660
7661/*
7662 * called due to MCP event (on pmf):
7663 *   reread new bandwidth configuration
7664 *   configure FW
7665 *   notify others function about the change
7666 */
7667static void
7668bxe_config_mf_bw(struct bxe_softc *sc)
7669{
7670    if (sc->link_vars.link_up) {
7671        bxe_cmng_fns_init(sc, TRUE, CMNG_FNS_MINMAX);
7672        // XXX bxe_link_sync_notify(sc);
7673    }
7674
7675    storm_memset_cmng(sc, &sc->cmng, SC_PORT(sc));
7676}
7677
7678static void
7679bxe_set_mf_bw(struct bxe_softc *sc)
7680{
7681    bxe_config_mf_bw(sc);
7682    bxe_fw_command(sc, DRV_MSG_CODE_SET_MF_BW_ACK, 0);
7683}
7684
7685static void
7686bxe_handle_eee_event(struct bxe_softc *sc)
7687{
7688    BLOGD(sc, DBG_INTR, "EEE - LLDP event\n");
7689    bxe_fw_command(sc, DRV_MSG_CODE_EEE_RESULTS_ACK, 0);
7690}
7691
7692#define DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED 3
7693
7694static void
7695bxe_drv_info_ether_stat(struct bxe_softc *sc)
7696{
7697    struct eth_stats_info *ether_stat =
7698        &sc->sp->drv_info_to_mcp.ether_stat;
7699
7700    strlcpy(ether_stat->version, BXE_DRIVER_VERSION,
7701            ETH_STAT_INFO_VERSION_LEN);
7702
7703    /* XXX (+ MAC_PAD) taken from other driver... verify this is right */
7704    sc->sp_objs[0].mac_obj.get_n_elements(sc, &sc->sp_objs[0].mac_obj,
7705                                          DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED,
7706                                          ether_stat->mac_local + MAC_PAD,
7707                                          MAC_PAD, ETH_ALEN);
7708
7709    ether_stat->mtu_size = sc->mtu;
7710
7711    ether_stat->feature_flags |= FEATURE_ETH_CHKSUM_OFFLOAD_MASK;
7712    if (if_getcapenable(sc->ifp) & (IFCAP_TSO4 | IFCAP_TSO6)) {
7713        ether_stat->feature_flags |= FEATURE_ETH_LSO_MASK;
7714    }
7715
7716    // XXX ether_stat->feature_flags |= ???;
7717
7718    ether_stat->promiscuous_mode = 0; // (flags & PROMISC) ? 1 : 0;
7719
7720    ether_stat->txq_size = sc->tx_ring_size;
7721    ether_stat->rxq_size = sc->rx_ring_size;
7722}
7723
7724static void
7725bxe_handle_drv_info_req(struct bxe_softc *sc)
7726{
7727    enum drv_info_opcode op_code;
7728    uint32_t drv_info_ctl = SHMEM2_RD(sc, drv_info_control);
7729
7730    /* if drv_info version supported by MFW doesn't match - send NACK */
7731    if ((drv_info_ctl & DRV_INFO_CONTROL_VER_MASK) != DRV_INFO_CUR_VER) {
7732        bxe_fw_command(sc, DRV_MSG_CODE_DRV_INFO_NACK, 0);
7733        return;
7734    }
7735
7736    op_code = ((drv_info_ctl & DRV_INFO_CONTROL_OP_CODE_MASK) >>
7737               DRV_INFO_CONTROL_OP_CODE_SHIFT);
7738
7739    memset(&sc->sp->drv_info_to_mcp, 0, sizeof(union drv_info_to_mcp));
7740
7741    switch (op_code) {
7742    case ETH_STATS_OPCODE:
7743        bxe_drv_info_ether_stat(sc);
7744        break;
7745    case FCOE_STATS_OPCODE:
7746    case ISCSI_STATS_OPCODE:
7747    default:
7748        /* if op code isn't supported - send NACK */
7749        bxe_fw_command(sc, DRV_MSG_CODE_DRV_INFO_NACK, 0);
7750        return;
7751    }
7752
7753    /*
7754     * If we got drv_info attn from MFW then these fields are defined in
7755     * shmem2 for sure
7756     */
7757    SHMEM2_WR(sc, drv_info_host_addr_lo,
7758              U64_LO(BXE_SP_MAPPING(sc, drv_info_to_mcp)));
7759    SHMEM2_WR(sc, drv_info_host_addr_hi,
7760              U64_HI(BXE_SP_MAPPING(sc, drv_info_to_mcp)));
7761
7762    bxe_fw_command(sc, DRV_MSG_CODE_DRV_INFO_ACK, 0);
7763}
7764
7765static void
7766bxe_dcc_event(struct bxe_softc *sc,
7767              uint32_t         dcc_event)
7768{
7769    BLOGD(sc, DBG_INTR, "dcc_event 0x%08x\n", dcc_event);
7770
7771    if (dcc_event & DRV_STATUS_DCC_DISABLE_ENABLE_PF) {
7772        /*
7773         * This is the only place besides the function initialization
7774         * where the sc->flags can change so it is done without any
7775         * locks
7776         */
7777        if (sc->devinfo.mf_info.mf_config[SC_VN(sc)] & FUNC_MF_CFG_FUNC_DISABLED) {
7778            BLOGD(sc, DBG_INTR, "mf_cfg function disabled\n");
7779            sc->flags |= BXE_MF_FUNC_DIS;
7780            bxe_e1h_disable(sc);
7781        } else {
7782            BLOGD(sc, DBG_INTR, "mf_cfg function enabled\n");
7783            sc->flags &= ~BXE_MF_FUNC_DIS;
7784            bxe_e1h_enable(sc);
7785        }
7786        dcc_event &= ~DRV_STATUS_DCC_DISABLE_ENABLE_PF;
7787    }
7788
7789    if (dcc_event & DRV_STATUS_DCC_BANDWIDTH_ALLOCATION) {
7790        bxe_config_mf_bw(sc);
7791        dcc_event &= ~DRV_STATUS_DCC_BANDWIDTH_ALLOCATION;
7792    }
7793
7794    /* Report results to MCP */
7795    if (dcc_event)
7796        bxe_fw_command(sc, DRV_MSG_CODE_DCC_FAILURE, 0);
7797    else
7798        bxe_fw_command(sc, DRV_MSG_CODE_DCC_OK, 0);
7799}
7800
7801static void
7802bxe_pmf_update(struct bxe_softc *sc)
7803{
7804    int port = SC_PORT(sc);
7805    uint32_t val;
7806
7807    sc->port.pmf = 1;
7808    BLOGD(sc, DBG_INTR, "pmf %d\n", sc->port.pmf);
7809
7810    /*
7811     * We need the mb() to ensure the ordering between the writing to
7812     * sc->port.pmf here and reading it from the bxe_periodic_task().
7813     */
7814    mb();
7815
7816    /* queue a periodic task */
7817    // XXX schedule task...
7818
7819    // XXX bxe_dcbx_pmf_update(sc);
7820
7821    /* enable nig attention */
7822    val = (0xff0f | (1 << (SC_VN(sc) + 4)));
7823    if (sc->devinfo.int_block == INT_BLOCK_HC) {
7824        REG_WR(sc, HC_REG_TRAILING_EDGE_0 + port*8, val);
7825        REG_WR(sc, HC_REG_LEADING_EDGE_0 + port*8, val);
7826    } else if (!CHIP_IS_E1x(sc)) {
7827        REG_WR(sc, IGU_REG_TRAILING_EDGE_LATCH, val);
7828        REG_WR(sc, IGU_REG_LEADING_EDGE_LATCH, val);
7829    }
7830
7831    bxe_stats_handle(sc, STATS_EVENT_PMF);
7832}
7833
7834static int
7835bxe_mc_assert(struct bxe_softc *sc)
7836{
7837    char last_idx;
7838    int i, rc = 0;
7839    uint32_t row0, row1, row2, row3;
7840
7841    /* XSTORM */
7842    last_idx = REG_RD8(sc, BAR_XSTRORM_INTMEM + XSTORM_ASSERT_LIST_INDEX_OFFSET);
7843    if (last_idx)
7844        BLOGE(sc, "XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
7845
7846    /* print the asserts */
7847    for (i = 0; i < STORM_ASSERT_ARRAY_SIZE; i++) {
7848
7849        row0 = REG_RD(sc, BAR_XSTRORM_INTMEM + XSTORM_ASSERT_LIST_OFFSET(i));
7850        row1 = REG_RD(sc, BAR_XSTRORM_INTMEM + XSTORM_ASSERT_LIST_OFFSET(i) + 4);
7851        row2 = REG_RD(sc, BAR_XSTRORM_INTMEM + XSTORM_ASSERT_LIST_OFFSET(i) + 8);
7852        row3 = REG_RD(sc, BAR_XSTRORM_INTMEM + XSTORM_ASSERT_LIST_OFFSET(i) + 12);
7853
7854        if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
7855            BLOGE(sc, "XSTORM_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x\n",
7856                  i, row3, row2, row1, row0);
7857            rc++;
7858        } else {
7859            break;
7860        }
7861    }
7862
7863    /* TSTORM */
7864    last_idx = REG_RD8(sc, BAR_TSTRORM_INTMEM + TSTORM_ASSERT_LIST_INDEX_OFFSET);
7865    if (last_idx) {
7866        BLOGE(sc, "TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
7867    }
7868
7869    /* print the asserts */
7870    for (i = 0; i < STORM_ASSERT_ARRAY_SIZE; i++) {
7871
7872        row0 = REG_RD(sc, BAR_TSTRORM_INTMEM + TSTORM_ASSERT_LIST_OFFSET(i));
7873        row1 = REG_RD(sc, BAR_TSTRORM_INTMEM + TSTORM_ASSERT_LIST_OFFSET(i) + 4);
7874        row2 = REG_RD(sc, BAR_TSTRORM_INTMEM + TSTORM_ASSERT_LIST_OFFSET(i) + 8);
7875        row3 = REG_RD(sc, BAR_TSTRORM_INTMEM + TSTORM_ASSERT_LIST_OFFSET(i) + 12);
7876
7877        if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
7878            BLOGE(sc, "TSTORM_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x\n",
7879                  i, row3, row2, row1, row0);
7880            rc++;
7881        } else {
7882            break;
7883        }
7884    }
7885
7886    /* CSTORM */
7887    last_idx = REG_RD8(sc, BAR_CSTRORM_INTMEM + CSTORM_ASSERT_LIST_INDEX_OFFSET);
7888    if (last_idx) {
7889        BLOGE(sc, "CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
7890    }
7891
7892    /* print the asserts */
7893    for (i = 0; i < STORM_ASSERT_ARRAY_SIZE; i++) {
7894
7895        row0 = REG_RD(sc, BAR_CSTRORM_INTMEM + CSTORM_ASSERT_LIST_OFFSET(i));
7896        row1 = REG_RD(sc, BAR_CSTRORM_INTMEM + CSTORM_ASSERT_LIST_OFFSET(i) + 4);
7897        row2 = REG_RD(sc, BAR_CSTRORM_INTMEM + CSTORM_ASSERT_LIST_OFFSET(i) + 8);
7898        row3 = REG_RD(sc, BAR_CSTRORM_INTMEM + CSTORM_ASSERT_LIST_OFFSET(i) + 12);
7899
7900        if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
7901            BLOGE(sc, "CSTORM_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x\n",
7902                  i, row3, row2, row1, row0);
7903            rc++;
7904        } else {
7905            break;
7906        }
7907    }
7908
7909    /* USTORM */
7910    last_idx = REG_RD8(sc, BAR_USTRORM_INTMEM + USTORM_ASSERT_LIST_INDEX_OFFSET);
7911    if (last_idx) {
7912        BLOGE(sc, "USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
7913    }
7914
7915    /* print the asserts */
7916    for (i = 0; i < STORM_ASSERT_ARRAY_SIZE; i++) {
7917
7918        row0 = REG_RD(sc, BAR_USTRORM_INTMEM + USTORM_ASSERT_LIST_OFFSET(i));
7919        row1 = REG_RD(sc, BAR_USTRORM_INTMEM + USTORM_ASSERT_LIST_OFFSET(i) + 4);
7920        row2 = REG_RD(sc, BAR_USTRORM_INTMEM + USTORM_ASSERT_LIST_OFFSET(i) + 8);
7921        row3 = REG_RD(sc, BAR_USTRORM_INTMEM + USTORM_ASSERT_LIST_OFFSET(i) + 12);
7922
7923        if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
7924            BLOGE(sc, "USTORM_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x\n",
7925                  i, row3, row2, row1, row0);
7926            rc++;
7927        } else {
7928            break;
7929        }
7930    }
7931
7932    return (rc);
7933}
7934
7935static void
7936bxe_attn_int_deasserted3(struct bxe_softc *sc,
7937                         uint32_t         attn)
7938{
7939    int func = SC_FUNC(sc);
7940    uint32_t val;
7941
7942    if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
7943
7944        if (attn & BXE_PMF_LINK_ASSERT(sc)) {
7945
7946            REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
7947            bxe_read_mf_cfg(sc);
7948            sc->devinfo.mf_info.mf_config[SC_VN(sc)] =
7949                MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].config);
7950            val = SHMEM_RD(sc, func_mb[SC_FW_MB_IDX(sc)].drv_status);
7951
7952            if (val & DRV_STATUS_DCC_EVENT_MASK)
7953                bxe_dcc_event(sc, (val & DRV_STATUS_DCC_EVENT_MASK));
7954
7955            if (val & DRV_STATUS_SET_MF_BW)
7956                bxe_set_mf_bw(sc);
7957
7958            if (val & DRV_STATUS_DRV_INFO_REQ)
7959                bxe_handle_drv_info_req(sc);
7960
7961            if ((sc->port.pmf == 0) && (val & DRV_STATUS_PMF))
7962                bxe_pmf_update(sc);
7963
7964            if (val & DRV_STATUS_EEE_NEGOTIATION_RESULTS)
7965                bxe_handle_eee_event(sc);
7966
7967            if (sc->link_vars.periodic_flags &
7968                ELINK_PERIODIC_FLAGS_LINK_EVENT) {
7969                /* sync with link */
7970		bxe_acquire_phy_lock(sc);
7971                sc->link_vars.periodic_flags &=
7972                    ~ELINK_PERIODIC_FLAGS_LINK_EVENT;
7973		bxe_release_phy_lock(sc);
7974                if (IS_MF(sc))
7975                    ; // XXX bxe_link_sync_notify(sc);
7976                bxe_link_report(sc);
7977            }
7978
7979            /*
7980             * Always call it here: bxe_link_report() will
7981             * prevent the link indication duplication.
7982             */
7983            bxe_link_status_update(sc);
7984
7985        } else if (attn & BXE_MC_ASSERT_BITS) {
7986
7987            BLOGE(sc, "MC assert!\n");
7988            bxe_mc_assert(sc);
7989            REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_10, 0);
7990            REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_9, 0);
7991            REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_8, 0);
7992            REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_7, 0);
7993            bxe_panic(sc, ("MC assert!\n"));
7994
7995        } else if (attn & BXE_MCP_ASSERT) {
7996
7997            BLOGE(sc, "MCP assert!\n");
7998            REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_11, 0);
7999            // XXX bxe_fw_dump(sc);
8000
8001        } else {
8002            BLOGE(sc, "Unknown HW assert! (attn 0x%08x)\n", attn);
8003        }
8004    }
8005
8006    if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
8007        BLOGE(sc, "LATCHED attention 0x%08x (masked)\n", attn);
8008        if (attn & BXE_GRC_TIMEOUT) {
8009            val = CHIP_IS_E1(sc) ? 0 : REG_RD(sc, MISC_REG_GRC_TIMEOUT_ATTN);
8010            BLOGE(sc, "GRC time-out 0x%08x\n", val);
8011        }
8012        if (attn & BXE_GRC_RSV) {
8013            val = CHIP_IS_E1(sc) ? 0 : REG_RD(sc, MISC_REG_GRC_RSV_ATTN);
8014            BLOGE(sc, "GRC reserved 0x%08x\n", val);
8015        }
8016        REG_WR(sc, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
8017    }
8018}
8019
8020static void
8021bxe_attn_int_deasserted2(struct bxe_softc *sc,
8022                         uint32_t         attn)
8023{
8024    int port = SC_PORT(sc);
8025    int reg_offset;
8026    uint32_t val0, mask0, val1, mask1;
8027    uint32_t val;
8028
8029    if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
8030        val = REG_RD(sc, CFC_REG_CFC_INT_STS_CLR);
8031        BLOGE(sc, "CFC hw attention 0x%08x\n", val);
8032        /* CFC error attention */
8033        if (val & 0x2) {
8034            BLOGE(sc, "FATAL error from CFC\n");
8035        }
8036    }
8037
8038    if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
8039        val = REG_RD(sc, PXP_REG_PXP_INT_STS_CLR_0);
8040        BLOGE(sc, "PXP hw attention-0 0x%08x\n", val);
8041        /* RQ_USDMDP_FIFO_OVERFLOW */
8042        if (val & 0x18000) {
8043            BLOGE(sc, "FATAL error from PXP\n");
8044        }
8045
8046        if (!CHIP_IS_E1x(sc)) {
8047            val = REG_RD(sc, PXP_REG_PXP_INT_STS_CLR_1);
8048            BLOGE(sc, "PXP hw attention-1 0x%08x\n", val);
8049        }
8050    }
8051
8052#define PXP2_EOP_ERROR_BIT  PXP2_PXP2_INT_STS_CLR_0_REG_WR_PGLUE_EOP_ERROR
8053#define AEU_PXP2_HW_INT_BIT AEU_INPUTS_ATTN_BITS_PXPPCICLOCKCLIENT_HW_INTERRUPT
8054
8055    if (attn & AEU_PXP2_HW_INT_BIT) {
8056        /*  CQ47854 workaround do not panic on
8057         *  PXP2_PXP2_INT_STS_0_REG_WR_PGLUE_EOP_ERROR
8058         */
8059        if (!CHIP_IS_E1x(sc)) {
8060            mask0 = REG_RD(sc, PXP2_REG_PXP2_INT_MASK_0);
8061            val1 = REG_RD(sc, PXP2_REG_PXP2_INT_STS_1);
8062            mask1 = REG_RD(sc, PXP2_REG_PXP2_INT_MASK_1);
8063            val0 = REG_RD(sc, PXP2_REG_PXP2_INT_STS_0);
8064            /*
8065             * If the only PXP2_EOP_ERROR_BIT is set in
8066             * STS0 and STS1 - clear it
8067             *
8068             * probably we lose additional attentions between
8069             * STS0 and STS_CLR0, in this case user will not
8070             * be notified about them
8071             */
8072            if (val0 & mask0 & PXP2_EOP_ERROR_BIT &&
8073                !(val1 & mask1))
8074                val0 = REG_RD(sc, PXP2_REG_PXP2_INT_STS_CLR_0);
8075
8076            /* print the register, since no one can restore it */
8077            BLOGE(sc, "PXP2_REG_PXP2_INT_STS_CLR_0 0x%08x\n", val0);
8078
8079            /*
8080             * if PXP2_PXP2_INT_STS_0_REG_WR_PGLUE_EOP_ERROR
8081             * then notify
8082             */
8083            if (val0 & PXP2_EOP_ERROR_BIT) {
8084                BLOGE(sc, "PXP2_WR_PGLUE_EOP_ERROR\n");
8085
8086                /*
8087                 * if only PXP2_PXP2_INT_STS_0_REG_WR_PGLUE_EOP_ERROR is
8088                 * set then clear attention from PXP2 block without panic
8089                 */
8090                if (((val0 & mask0) == PXP2_EOP_ERROR_BIT) &&
8091                    ((val1 & mask1) == 0))
8092                    attn &= ~AEU_PXP2_HW_INT_BIT;
8093            }
8094        }
8095    }
8096
8097    if (attn & HW_INTERRUT_ASSERT_SET_2) {
8098        reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
8099                             MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
8100
8101        val = REG_RD(sc, reg_offset);
8102        val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
8103        REG_WR(sc, reg_offset, val);
8104
8105        BLOGE(sc, "FATAL HW block attention set2 0x%x\n",
8106              (uint32_t)(attn & HW_INTERRUT_ASSERT_SET_2));
8107        bxe_panic(sc, ("HW block attention set2\n"));
8108    }
8109}
8110
8111static void
8112bxe_attn_int_deasserted1(struct bxe_softc *sc,
8113                         uint32_t         attn)
8114{
8115    int port = SC_PORT(sc);
8116    int reg_offset;
8117    uint32_t val;
8118
8119    if (attn & AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT) {
8120        val = REG_RD(sc, DORQ_REG_DORQ_INT_STS_CLR);
8121        BLOGE(sc, "DB hw attention 0x%08x\n", val);
8122        /* DORQ discard attention */
8123        if (val & 0x2) {
8124            BLOGE(sc, "FATAL error from DORQ\n");
8125        }
8126    }
8127
8128    if (attn & HW_INTERRUT_ASSERT_SET_1) {
8129        reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
8130                             MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
8131
8132        val = REG_RD(sc, reg_offset);
8133        val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
8134        REG_WR(sc, reg_offset, val);
8135
8136        BLOGE(sc, "FATAL HW block attention set1 0x%08x\n",
8137              (uint32_t)(attn & HW_INTERRUT_ASSERT_SET_1));
8138        bxe_panic(sc, ("HW block attention set1\n"));
8139    }
8140}
8141
8142static void
8143bxe_attn_int_deasserted0(struct bxe_softc *sc,
8144                         uint32_t         attn)
8145{
8146    int port = SC_PORT(sc);
8147    int reg_offset;
8148    uint32_t val;
8149
8150    reg_offset = (port) ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
8151                          MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0;
8152
8153    if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
8154        val = REG_RD(sc, reg_offset);
8155        val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
8156        REG_WR(sc, reg_offset, val);
8157
8158        BLOGW(sc, "SPIO5 hw attention\n");
8159
8160        /* Fan failure attention */
8161        elink_hw_reset_phy(&sc->link_params);
8162        bxe_fan_failure(sc);
8163    }
8164
8165    if ((attn & sc->link_vars.aeu_int_mask) && sc->port.pmf) {
8166	bxe_acquire_phy_lock(sc);
8167        elink_handle_module_detect_int(&sc->link_params);
8168	bxe_release_phy_lock(sc);
8169    }
8170
8171    if (attn & HW_INTERRUT_ASSERT_SET_0) {
8172        val = REG_RD(sc, reg_offset);
8173        val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
8174        REG_WR(sc, reg_offset, val);
8175
8176        bxe_panic(sc, ("FATAL HW block attention set0 0x%lx\n",
8177                       (attn & HW_INTERRUT_ASSERT_SET_0)));
8178    }
8179}
8180
8181static void
8182bxe_attn_int_deasserted(struct bxe_softc *sc,
8183                        uint32_t         deasserted)
8184{
8185    struct attn_route attn;
8186    struct attn_route *group_mask;
8187    int port = SC_PORT(sc);
8188    int index;
8189    uint32_t reg_addr;
8190    uint32_t val;
8191    uint32_t aeu_mask;
8192    uint8_t global = FALSE;
8193
8194    /*
8195     * Need to take HW lock because MCP or other port might also
8196     * try to handle this event.
8197     */
8198    bxe_acquire_alr(sc);
8199
8200    if (bxe_chk_parity_attn(sc, &global, TRUE)) {
8201        /* XXX
8202         * In case of parity errors don't handle attentions so that
8203         * other function would "see" parity errors.
8204         */
8205        sc->recovery_state = BXE_RECOVERY_INIT;
8206        // XXX schedule a recovery task...
8207        /* disable HW interrupts */
8208        bxe_int_disable(sc);
8209        bxe_release_alr(sc);
8210        return;
8211    }
8212
8213    attn.sig[0] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
8214    attn.sig[1] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
8215    attn.sig[2] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
8216    attn.sig[3] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
8217    if (!CHIP_IS_E1x(sc)) {
8218        attn.sig[4] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_5_FUNC_0 + port*4);
8219    } else {
8220        attn.sig[4] = 0;
8221    }
8222
8223    BLOGD(sc, DBG_INTR, "attn: 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n",
8224          attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3], attn.sig[4]);
8225
8226    for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
8227        if (deasserted & (1 << index)) {
8228            group_mask = &sc->attn_group[index];
8229
8230            BLOGD(sc, DBG_INTR,
8231                  "group[%d]: 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n", index,
8232                  group_mask->sig[0], group_mask->sig[1],
8233                  group_mask->sig[2], group_mask->sig[3],
8234                  group_mask->sig[4]);
8235
8236            bxe_attn_int_deasserted4(sc, attn.sig[4] & group_mask->sig[4]);
8237            bxe_attn_int_deasserted3(sc, attn.sig[3] & group_mask->sig[3]);
8238            bxe_attn_int_deasserted1(sc, attn.sig[1] & group_mask->sig[1]);
8239            bxe_attn_int_deasserted2(sc, attn.sig[2] & group_mask->sig[2]);
8240            bxe_attn_int_deasserted0(sc, attn.sig[0] & group_mask->sig[0]);
8241        }
8242    }
8243
8244    bxe_release_alr(sc);
8245
8246    if (sc->devinfo.int_block == INT_BLOCK_HC) {
8247        reg_addr = (HC_REG_COMMAND_REG + port*32 +
8248                    COMMAND_REG_ATTN_BITS_CLR);
8249    } else {
8250        reg_addr = (BAR_IGU_INTMEM + IGU_CMD_ATTN_BIT_CLR_UPPER*8);
8251    }
8252
8253    val = ~deasserted;
8254    BLOGD(sc, DBG_INTR,
8255          "about to mask 0x%08x at %s addr 0x%08x\n", val,
8256          (sc->devinfo.int_block == INT_BLOCK_HC) ? "HC" : "IGU", reg_addr);
8257    REG_WR(sc, reg_addr, val);
8258
8259    if (~sc->attn_state & deasserted) {
8260        BLOGE(sc, "IGU error\n");
8261    }
8262
8263    reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
8264                      MISC_REG_AEU_MASK_ATTN_FUNC_0;
8265
8266    bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
8267
8268    aeu_mask = REG_RD(sc, reg_addr);
8269
8270    BLOGD(sc, DBG_INTR, "aeu_mask 0x%08x newly deasserted 0x%08x\n",
8271          aeu_mask, deasserted);
8272    aeu_mask |= (deasserted & 0x3ff);
8273    BLOGD(sc, DBG_INTR, "new mask 0x%08x\n", aeu_mask);
8274
8275    REG_WR(sc, reg_addr, aeu_mask);
8276    bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
8277
8278    BLOGD(sc, DBG_INTR, "attn_state 0x%08x\n", sc->attn_state);
8279    sc->attn_state &= ~deasserted;
8280    BLOGD(sc, DBG_INTR, "new state 0x%08x\n", sc->attn_state);
8281}
8282
8283static void
8284bxe_attn_int(struct bxe_softc *sc)
8285{
8286    /* read local copy of bits */
8287    uint32_t attn_bits = le32toh(sc->def_sb->atten_status_block.attn_bits);
8288    uint32_t attn_ack = le32toh(sc->def_sb->atten_status_block.attn_bits_ack);
8289    uint32_t attn_state = sc->attn_state;
8290
8291    /* look for changed bits */
8292    uint32_t asserted   =  attn_bits & ~attn_ack & ~attn_state;
8293    uint32_t deasserted = ~attn_bits &  attn_ack &  attn_state;
8294
8295    BLOGD(sc, DBG_INTR,
8296          "attn_bits 0x%08x attn_ack 0x%08x asserted 0x%08x deasserted 0x%08x\n",
8297          attn_bits, attn_ack, asserted, deasserted);
8298
8299    if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state)) {
8300        BLOGE(sc, "BAD attention state\n");
8301    }
8302
8303    /* handle bits that were raised */
8304    if (asserted) {
8305        bxe_attn_int_asserted(sc, asserted);
8306    }
8307
8308    if (deasserted) {
8309        bxe_attn_int_deasserted(sc, deasserted);
8310    }
8311}
8312
8313static uint16_t
8314bxe_update_dsb_idx(struct bxe_softc *sc)
8315{
8316    struct host_sp_status_block *def_sb = sc->def_sb;
8317    uint16_t rc = 0;
8318
8319    mb(); /* status block is written to by the chip */
8320
8321    if (sc->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
8322        sc->def_att_idx = def_sb->atten_status_block.attn_bits_index;
8323        rc |= BXE_DEF_SB_ATT_IDX;
8324    }
8325
8326    if (sc->def_idx != def_sb->sp_sb.running_index) {
8327        sc->def_idx = def_sb->sp_sb.running_index;
8328        rc |= BXE_DEF_SB_IDX;
8329    }
8330
8331    mb();
8332
8333    return (rc);
8334}
8335
8336static inline struct ecore_queue_sp_obj *
8337bxe_cid_to_q_obj(struct bxe_softc *sc,
8338                 uint32_t         cid)
8339{
8340    BLOGD(sc, DBG_SP, "retrieving fp from cid %d\n", cid);
8341    return (&sc->sp_objs[CID_TO_FP(cid, sc)].q_obj);
8342}
8343
8344static void
8345bxe_handle_mcast_eqe(struct bxe_softc *sc)
8346{
8347    struct ecore_mcast_ramrod_params rparam;
8348    int rc;
8349
8350    memset(&rparam, 0, sizeof(rparam));
8351
8352    rparam.mcast_obj = &sc->mcast_obj;
8353
8354    BXE_MCAST_LOCK(sc);
8355
8356    /* clear pending state for the last command */
8357    sc->mcast_obj.raw.clear_pending(&sc->mcast_obj.raw);
8358
8359    /* if there are pending mcast commands - send them */
8360    if (sc->mcast_obj.check_pending(&sc->mcast_obj)) {
8361        rc = ecore_config_mcast(sc, &rparam, ECORE_MCAST_CMD_CONT);
8362        if (rc < 0) {
8363            BLOGD(sc, DBG_SP,
8364                "ERROR: Failed to send pending mcast commands (%d)\n", rc);
8365        }
8366    }
8367
8368    BXE_MCAST_UNLOCK(sc);
8369}
8370
8371static void
8372bxe_handle_classification_eqe(struct bxe_softc      *sc,
8373                              union event_ring_elem *elem)
8374{
8375    unsigned long ramrod_flags = 0;
8376    int rc = 0;
8377    uint32_t cid = elem->message.data.eth_event.echo & BXE_SWCID_MASK;
8378    struct ecore_vlan_mac_obj *vlan_mac_obj;
8379
8380    /* always push next commands out, don't wait here */
8381    bit_set(&ramrod_flags, RAMROD_CONT);
8382
8383    switch (le32toh(elem->message.data.eth_event.echo) >> BXE_SWCID_SHIFT) {
8384    case ECORE_FILTER_MAC_PENDING:
8385        BLOGD(sc, DBG_SP, "Got SETUP_MAC completions\n");
8386        vlan_mac_obj = &sc->sp_objs[cid].mac_obj;
8387        break;
8388
8389    case ECORE_FILTER_MCAST_PENDING:
8390        BLOGD(sc, DBG_SP, "Got SETUP_MCAST completions\n");
8391        /*
8392         * This is only relevant for 57710 where multicast MACs are
8393         * configured as unicast MACs using the same ramrod.
8394         */
8395        bxe_handle_mcast_eqe(sc);
8396        return;
8397
8398    default:
8399        BLOGE(sc, "Unsupported classification command: %d\n",
8400              elem->message.data.eth_event.echo);
8401        return;
8402    }
8403
8404    rc = vlan_mac_obj->complete(sc, vlan_mac_obj, elem, &ramrod_flags);
8405
8406    if (rc < 0) {
8407        BLOGE(sc, "Failed to schedule new commands (%d)\n", rc);
8408    } else if (rc > 0) {
8409        BLOGD(sc, DBG_SP, "Scheduled next pending commands...\n");
8410    }
8411}
8412
8413static void
8414bxe_handle_rx_mode_eqe(struct bxe_softc      *sc,
8415                       union event_ring_elem *elem)
8416{
8417    bxe_clear_bit(ECORE_FILTER_RX_MODE_PENDING, &sc->sp_state);
8418
8419    /* send rx_mode command again if was requested */
8420    if (bxe_test_and_clear_bit(ECORE_FILTER_RX_MODE_SCHED,
8421                               &sc->sp_state)) {
8422        bxe_set_storm_rx_mode(sc);
8423    }
8424}
8425
8426static void
8427bxe_update_eq_prod(struct bxe_softc *sc,
8428                   uint16_t         prod)
8429{
8430    storm_memset_eq_prod(sc, prod, SC_FUNC(sc));
8431    wmb(); /* keep prod updates ordered */
8432}
8433
8434static void
8435bxe_eq_int(struct bxe_softc *sc)
8436{
8437    uint16_t hw_cons, sw_cons, sw_prod;
8438    union event_ring_elem *elem;
8439    uint8_t echo;
8440    uint32_t cid;
8441    uint8_t opcode;
8442    int spqe_cnt = 0;
8443    struct ecore_queue_sp_obj *q_obj;
8444    struct ecore_func_sp_obj *f_obj = &sc->func_obj;
8445    struct ecore_raw_obj *rss_raw = &sc->rss_conf_obj.raw;
8446
8447    hw_cons = le16toh(*sc->eq_cons_sb);
8448
8449    /*
8450     * The hw_cons range is 1-255, 257 - the sw_cons range is 0-254, 256.
8451     * when we get to the next-page we need to adjust so the loop
8452     * condition below will be met. The next element is the size of a
8453     * regular element and hence incrementing by 1
8454     */
8455    if ((hw_cons & EQ_DESC_MAX_PAGE) == EQ_DESC_MAX_PAGE) {
8456        hw_cons++;
8457    }
8458
8459    /*
8460     * This function may never run in parallel with itself for a
8461     * specific sc and no need for a read memory barrier here.
8462     */
8463    sw_cons = sc->eq_cons;
8464    sw_prod = sc->eq_prod;
8465
8466    BLOGD(sc, DBG_SP,"EQ: hw_cons=%u sw_cons=%u eq_spq_left=0x%lx\n",
8467          hw_cons, sw_cons, atomic_load_acq_long(&sc->eq_spq_left));
8468
8469    for (;
8470         sw_cons != hw_cons;
8471         sw_prod = NEXT_EQ_IDX(sw_prod), sw_cons = NEXT_EQ_IDX(sw_cons)) {
8472
8473        elem = &sc->eq[EQ_DESC(sw_cons)];
8474
8475        /* elem CID originates from FW, actually LE */
8476        cid = SW_CID(elem->message.data.cfc_del_event.cid);
8477        opcode = elem->message.opcode;
8478
8479        /* handle eq element */
8480        switch (opcode) {
8481
8482        case EVENT_RING_OPCODE_STAT_QUERY:
8483            BLOGD(sc, DBG_SP, "got statistics completion event %d\n",
8484                  sc->stats_comp++);
8485            /* nothing to do with stats comp */
8486            goto next_spqe;
8487
8488        case EVENT_RING_OPCODE_CFC_DEL:
8489            /* handle according to cid range */
8490            /* we may want to verify here that the sc state is HALTING */
8491            BLOGD(sc, DBG_SP, "got delete ramrod for MULTI[%d]\n", cid);
8492            q_obj = bxe_cid_to_q_obj(sc, cid);
8493            if (q_obj->complete_cmd(sc, q_obj, ECORE_Q_CMD_CFC_DEL)) {
8494                break;
8495            }
8496            goto next_spqe;
8497
8498        case EVENT_RING_OPCODE_STOP_TRAFFIC:
8499            BLOGD(sc, DBG_SP, "got STOP TRAFFIC\n");
8500            if (f_obj->complete_cmd(sc, f_obj, ECORE_F_CMD_TX_STOP)) {
8501                break;
8502            }
8503            // XXX bxe_dcbx_set_params(sc, BXE_DCBX_STATE_TX_PAUSED);
8504            goto next_spqe;
8505
8506        case EVENT_RING_OPCODE_START_TRAFFIC:
8507            BLOGD(sc, DBG_SP, "got START TRAFFIC\n");
8508            if (f_obj->complete_cmd(sc, f_obj, ECORE_F_CMD_TX_START)) {
8509                break;
8510            }
8511            // XXX bxe_dcbx_set_params(sc, BXE_DCBX_STATE_TX_RELEASED);
8512            goto next_spqe;
8513
8514        case EVENT_RING_OPCODE_FUNCTION_UPDATE:
8515            echo = elem->message.data.function_update_event.echo;
8516            if (echo == SWITCH_UPDATE) {
8517                BLOGD(sc, DBG_SP, "got FUNC_SWITCH_UPDATE ramrod\n");
8518                if (f_obj->complete_cmd(sc, f_obj,
8519                                        ECORE_F_CMD_SWITCH_UPDATE)) {
8520                    break;
8521                }
8522            }
8523            else {
8524                BLOGD(sc, DBG_SP,
8525                      "AFEX: ramrod completed FUNCTION_UPDATE\n");
8526            }
8527            goto next_spqe;
8528
8529        case EVENT_RING_OPCODE_FORWARD_SETUP:
8530            q_obj = &bxe_fwd_sp_obj(sc, q_obj);
8531            if (q_obj->complete_cmd(sc, q_obj,
8532                                    ECORE_Q_CMD_SETUP_TX_ONLY)) {
8533                break;
8534            }
8535            goto next_spqe;
8536
8537        case EVENT_RING_OPCODE_FUNCTION_START:
8538            BLOGD(sc, DBG_SP, "got FUNC_START ramrod\n");
8539            if (f_obj->complete_cmd(sc, f_obj, ECORE_F_CMD_START)) {
8540                break;
8541            }
8542            goto next_spqe;
8543
8544        case EVENT_RING_OPCODE_FUNCTION_STOP:
8545            BLOGD(sc, DBG_SP, "got FUNC_STOP ramrod\n");
8546            if (f_obj->complete_cmd(sc, f_obj, ECORE_F_CMD_STOP)) {
8547                break;
8548            }
8549            goto next_spqe;
8550        }
8551
8552        switch (opcode | sc->state) {
8553        case (EVENT_RING_OPCODE_RSS_UPDATE_RULES | BXE_STATE_OPEN):
8554        case (EVENT_RING_OPCODE_RSS_UPDATE_RULES | BXE_STATE_OPENING_WAITING_PORT):
8555            cid = elem->message.data.eth_event.echo & BXE_SWCID_MASK;
8556            BLOGD(sc, DBG_SP, "got RSS_UPDATE ramrod. CID %d\n", cid);
8557            rss_raw->clear_pending(rss_raw);
8558            break;
8559
8560        case (EVENT_RING_OPCODE_SET_MAC | BXE_STATE_OPEN):
8561        case (EVENT_RING_OPCODE_SET_MAC | BXE_STATE_DIAG):
8562        case (EVENT_RING_OPCODE_SET_MAC | BXE_STATE_CLOSING_WAITING_HALT):
8563        case (EVENT_RING_OPCODE_CLASSIFICATION_RULES | BXE_STATE_OPEN):
8564        case (EVENT_RING_OPCODE_CLASSIFICATION_RULES | BXE_STATE_DIAG):
8565        case (EVENT_RING_OPCODE_CLASSIFICATION_RULES | BXE_STATE_CLOSING_WAITING_HALT):
8566            BLOGD(sc, DBG_SP, "got (un)set mac ramrod\n");
8567            bxe_handle_classification_eqe(sc, elem);
8568            break;
8569
8570        case (EVENT_RING_OPCODE_MULTICAST_RULES | BXE_STATE_OPEN):
8571        case (EVENT_RING_OPCODE_MULTICAST_RULES | BXE_STATE_DIAG):
8572        case (EVENT_RING_OPCODE_MULTICAST_RULES | BXE_STATE_CLOSING_WAITING_HALT):
8573            BLOGD(sc, DBG_SP, "got mcast ramrod\n");
8574            bxe_handle_mcast_eqe(sc);
8575            break;
8576
8577        case (EVENT_RING_OPCODE_FILTERS_RULES | BXE_STATE_OPEN):
8578        case (EVENT_RING_OPCODE_FILTERS_RULES | BXE_STATE_DIAG):
8579        case (EVENT_RING_OPCODE_FILTERS_RULES | BXE_STATE_CLOSING_WAITING_HALT):
8580            BLOGD(sc, DBG_SP, "got rx_mode ramrod\n");
8581            bxe_handle_rx_mode_eqe(sc, elem);
8582            break;
8583
8584        default:
8585            /* unknown event log error and continue */
8586            BLOGE(sc, "Unknown EQ event %d, sc->state 0x%x\n",
8587                  elem->message.opcode, sc->state);
8588        }
8589
8590next_spqe:
8591        spqe_cnt++;
8592    } /* for */
8593
8594    mb();
8595    atomic_add_acq_long(&sc->eq_spq_left, spqe_cnt);
8596
8597    sc->eq_cons = sw_cons;
8598    sc->eq_prod = sw_prod;
8599
8600    /* make sure that above mem writes were issued towards the memory */
8601    wmb();
8602
8603    /* update producer */
8604    bxe_update_eq_prod(sc, sc->eq_prod);
8605}
8606
8607static void
8608bxe_handle_sp_tq(void *context,
8609                 int  pending)
8610{
8611    struct bxe_softc *sc = (struct bxe_softc *)context;
8612    uint16_t status;
8613
8614    BLOGD(sc, DBG_SP, "---> SP TASK <---\n");
8615
8616    /* what work needs to be performed? */
8617    status = bxe_update_dsb_idx(sc);
8618
8619    BLOGD(sc, DBG_SP, "dsb status 0x%04x\n", status);
8620
8621    /* HW attentions */
8622    if (status & BXE_DEF_SB_ATT_IDX) {
8623        BLOGD(sc, DBG_SP, "---> ATTN INTR <---\n");
8624        bxe_attn_int(sc);
8625        status &= ~BXE_DEF_SB_ATT_IDX;
8626    }
8627
8628    /* SP events: STAT_QUERY and others */
8629    if (status & BXE_DEF_SB_IDX) {
8630        /* handle EQ completions */
8631        BLOGD(sc, DBG_SP, "---> EQ INTR <---\n");
8632        bxe_eq_int(sc);
8633        bxe_ack_sb(sc, sc->igu_dsb_id, USTORM_ID,
8634                   le16toh(sc->def_idx), IGU_INT_NOP, 1);
8635        status &= ~BXE_DEF_SB_IDX;
8636    }
8637
8638    /* if status is non zero then something went wrong */
8639    if (__predict_false(status)) {
8640        BLOGE(sc, "Got an unknown SP interrupt! (0x%04x)\n", status);
8641    }
8642
8643    /* ack status block only if something was actually handled */
8644    bxe_ack_sb(sc, sc->igu_dsb_id, ATTENTION_ID,
8645               le16toh(sc->def_att_idx), IGU_INT_ENABLE, 1);
8646
8647    /*
8648     * Must be called after the EQ processing (since eq leads to sriov
8649     * ramrod completion flows).
8650     * This flow may have been scheduled by the arrival of a ramrod
8651     * completion, or by the sriov code rescheduling itself.
8652     */
8653    // XXX bxe_iov_sp_task(sc);
8654
8655}
8656
8657static void
8658bxe_handle_fp_tq(void *context,
8659                 int  pending)
8660{
8661    struct bxe_fastpath *fp = (struct bxe_fastpath *)context;
8662    struct bxe_softc *sc = fp->sc;
8663    uint8_t more_tx = FALSE;
8664    uint8_t more_rx = FALSE;
8665
8666    BLOGD(sc, DBG_INTR, "---> FP TASK QUEUE (%d) <---\n", fp->index);
8667
8668    /* XXX
8669     * IFF_DRV_RUNNING state can't be checked here since we process
8670     * slowpath events on a client queue during setup. Instead
8671     * we need to add a "process/continue" flag here that the driver
8672     * can use to tell the task here not to do anything.
8673     */
8674#if 0
8675    if (!(if_getdrvflags(sc->ifp) & IFF_DRV_RUNNING)) {
8676        return;
8677    }
8678#endif
8679
8680    /* update the fastpath index */
8681    bxe_update_fp_sb_idx(fp);
8682
8683    /* XXX add loop here if ever support multiple tx CoS */
8684    /* fp->txdata[cos] */
8685    if (bxe_has_tx_work(fp)) {
8686        BXE_FP_TX_LOCK(fp);
8687        more_tx = bxe_txeof(sc, fp);
8688        BXE_FP_TX_UNLOCK(fp);
8689    }
8690
8691    if (bxe_has_rx_work(fp)) {
8692        more_rx = bxe_rxeof(sc, fp);
8693    }
8694
8695    if (more_rx /*|| more_tx*/) {
8696        /* still more work to do */
8697        taskqueue_enqueue(fp->tq, &fp->tq_task);
8698        return;
8699    }
8700
8701    bxe_ack_sb(sc, fp->igu_sb_id, USTORM_ID,
8702               le16toh(fp->fp_hc_idx), IGU_INT_ENABLE, 1);
8703}
8704
8705static void
8706bxe_task_fp(struct bxe_fastpath *fp)
8707{
8708    struct bxe_softc *sc = fp->sc;
8709    uint8_t more_tx = FALSE;
8710    uint8_t more_rx = FALSE;
8711
8712    BLOGD(sc, DBG_INTR, "---> FP TASK ISR (%d) <---\n", fp->index);
8713
8714    /* update the fastpath index */
8715    bxe_update_fp_sb_idx(fp);
8716
8717    /* XXX add loop here if ever support multiple tx CoS */
8718    /* fp->txdata[cos] */
8719    if (bxe_has_tx_work(fp)) {
8720        BXE_FP_TX_LOCK(fp);
8721        more_tx = bxe_txeof(sc, fp);
8722        BXE_FP_TX_UNLOCK(fp);
8723    }
8724
8725    if (bxe_has_rx_work(fp)) {
8726        more_rx = bxe_rxeof(sc, fp);
8727    }
8728
8729    if (more_rx /*|| more_tx*/) {
8730        /* still more work to do, bail out if this ISR and process later */
8731        taskqueue_enqueue(fp->tq, &fp->tq_task);
8732        return;
8733    }
8734
8735    /*
8736     * Here we write the fastpath index taken before doing any tx or rx work.
8737     * It is very well possible other hw events occurred up to this point and
8738     * they were actually processed accordingly above. Since we're going to
8739     * write an older fastpath index, an interrupt is coming which we might
8740     * not do any work in.
8741     */
8742    bxe_ack_sb(sc, fp->igu_sb_id, USTORM_ID,
8743               le16toh(fp->fp_hc_idx), IGU_INT_ENABLE, 1);
8744}
8745
8746/*
8747 * Legacy interrupt entry point.
8748 *
8749 * Verifies that the controller generated the interrupt and
8750 * then calls a separate routine to handle the various
8751 * interrupt causes: link, RX, and TX.
8752 */
8753static void
8754bxe_intr_legacy(void *xsc)
8755{
8756    struct bxe_softc *sc = (struct bxe_softc *)xsc;
8757    struct bxe_fastpath *fp;
8758    uint16_t status, mask;
8759    int i;
8760
8761    BLOGD(sc, DBG_INTR, "---> BXE INTx <---\n");
8762
8763    /*
8764     * 0 for ustorm, 1 for cstorm
8765     * the bits returned from ack_int() are 0-15
8766     * bit 0 = attention status block
8767     * bit 1 = fast path status block
8768     * a mask of 0x2 or more = tx/rx event
8769     * a mask of 1 = slow path event
8770     */
8771
8772    status = bxe_ack_int(sc);
8773
8774    /* the interrupt is not for us */
8775    if (__predict_false(status == 0)) {
8776        BLOGD(sc, DBG_INTR, "Not our interrupt!\n");
8777        return;
8778    }
8779
8780    BLOGD(sc, DBG_INTR, "Interrupt status 0x%04x\n", status);
8781
8782    FOR_EACH_ETH_QUEUE(sc, i) {
8783        fp = &sc->fp[i];
8784        mask = (0x2 << (fp->index + CNIC_SUPPORT(sc)));
8785        if (status & mask) {
8786            /* acknowledge and disable further fastpath interrupts */
8787            bxe_ack_sb(sc, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
8788            bxe_task_fp(fp);
8789            status &= ~mask;
8790        }
8791    }
8792
8793    if (__predict_false(status & 0x1)) {
8794        /* acknowledge and disable further slowpath interrupts */
8795        bxe_ack_sb(sc, sc->igu_dsb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
8796
8797        /* schedule slowpath handler */
8798        taskqueue_enqueue(sc->sp_tq, &sc->sp_tq_task);
8799
8800        status &= ~0x1;
8801    }
8802
8803    if (__predict_false(status)) {
8804        BLOGW(sc, "Unexpected fastpath status (0x%08x)!\n", status);
8805    }
8806}
8807
8808/* slowpath interrupt entry point */
8809static void
8810bxe_intr_sp(void *xsc)
8811{
8812    struct bxe_softc *sc = (struct bxe_softc *)xsc;
8813
8814    BLOGD(sc, (DBG_INTR | DBG_SP), "---> SP INTR <---\n");
8815
8816    /* acknowledge and disable further slowpath interrupts */
8817    bxe_ack_sb(sc, sc->igu_dsb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
8818
8819    /* schedule slowpath handler */
8820    taskqueue_enqueue(sc->sp_tq, &sc->sp_tq_task);
8821}
8822
8823/* fastpath interrupt entry point */
8824static void
8825bxe_intr_fp(void *xfp)
8826{
8827    struct bxe_fastpath *fp = (struct bxe_fastpath *)xfp;
8828    struct bxe_softc *sc = fp->sc;
8829
8830    BLOGD(sc, DBG_INTR, "---> FP INTR %d <---\n", fp->index);
8831
8832    BLOGD(sc, DBG_INTR,
8833          "(cpu=%d) MSI-X fp=%d fw_sb=%d igu_sb=%d\n",
8834          curcpu, fp->index, fp->fw_sb_id, fp->igu_sb_id);
8835
8836    /* acknowledge and disable further fastpath interrupts */
8837    bxe_ack_sb(sc, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
8838
8839    bxe_task_fp(fp);
8840}
8841
8842/* Release all interrupts allocated by the driver. */
8843static void
8844bxe_interrupt_free(struct bxe_softc *sc)
8845{
8846    int i;
8847
8848    switch (sc->interrupt_mode) {
8849    case INTR_MODE_INTX:
8850        BLOGD(sc, DBG_LOAD, "Releasing legacy INTx vector\n");
8851        if (sc->intr[0].resource != NULL) {
8852            bus_release_resource(sc->dev,
8853                                 SYS_RES_IRQ,
8854                                 sc->intr[0].rid,
8855                                 sc->intr[0].resource);
8856        }
8857        break;
8858    case INTR_MODE_MSI:
8859        for (i = 0; i < sc->intr_count; i++) {
8860            BLOGD(sc, DBG_LOAD, "Releasing MSI vector %d\n", i);
8861            if (sc->intr[i].resource && sc->intr[i].rid) {
8862                bus_release_resource(sc->dev,
8863                                     SYS_RES_IRQ,
8864                                     sc->intr[i].rid,
8865                                     sc->intr[i].resource);
8866            }
8867        }
8868        pci_release_msi(sc->dev);
8869        break;
8870    case INTR_MODE_MSIX:
8871        for (i = 0; i < sc->intr_count; i++) {
8872            BLOGD(sc, DBG_LOAD, "Releasing MSI-X vector %d\n", i);
8873            if (sc->intr[i].resource && sc->intr[i].rid) {
8874                bus_release_resource(sc->dev,
8875                                     SYS_RES_IRQ,
8876                                     sc->intr[i].rid,
8877                                     sc->intr[i].resource);
8878            }
8879        }
8880        pci_release_msi(sc->dev);
8881        break;
8882    default:
8883        /* nothing to do as initial allocation failed */
8884        break;
8885    }
8886}
8887
8888/*
8889 * This function determines and allocates the appropriate
8890 * interrupt based on system capabilites and user request.
8891 *
8892 * The user may force a particular interrupt mode, specify
8893 * the number of receive queues, specify the method for
8894 * distribuitng received frames to receive queues, or use
8895 * the default settings which will automatically select the
8896 * best supported combination.  In addition, the OS may or
8897 * may not support certain combinations of these settings.
8898 * This routine attempts to reconcile the settings requested
8899 * by the user with the capabilites available from the system
8900 * to select the optimal combination of features.
8901 *
8902 * Returns:
8903 *   0 = Success, !0 = Failure.
8904 */
8905static int
8906bxe_interrupt_alloc(struct bxe_softc *sc)
8907{
8908    int msix_count = 0;
8909    int msi_count = 0;
8910    int num_requested = 0;
8911    int num_allocated = 0;
8912    int rid, i, j;
8913    int rc;
8914
8915    /* get the number of available MSI/MSI-X interrupts from the OS */
8916    if (sc->interrupt_mode > 0) {
8917        if (sc->devinfo.pcie_cap_flags & BXE_MSIX_CAPABLE_FLAG) {
8918            msix_count = pci_msix_count(sc->dev);
8919        }
8920
8921        if (sc->devinfo.pcie_cap_flags & BXE_MSI_CAPABLE_FLAG) {
8922            msi_count = pci_msi_count(sc->dev);
8923        }
8924
8925        BLOGD(sc, DBG_LOAD, "%d MSI and %d MSI-X vectors available\n",
8926              msi_count, msix_count);
8927    }
8928
8929    do { /* try allocating MSI-X interrupt resources (at least 2) */
8930        if (sc->interrupt_mode != INTR_MODE_MSIX) {
8931            break;
8932        }
8933
8934        if (((sc->devinfo.pcie_cap_flags & BXE_MSIX_CAPABLE_FLAG) == 0) ||
8935            (msix_count < 2)) {
8936            sc->interrupt_mode = INTR_MODE_MSI; /* try MSI next */
8937            break;
8938        }
8939
8940        /* ask for the necessary number of MSI-X vectors */
8941        num_requested = min((sc->num_queues + 1), msix_count);
8942
8943        BLOGD(sc, DBG_LOAD, "Requesting %d MSI-X vectors\n", num_requested);
8944
8945        num_allocated = num_requested;
8946        if ((rc = pci_alloc_msix(sc->dev, &num_allocated)) != 0) {
8947            BLOGE(sc, "MSI-X alloc failed! (%d)\n", rc);
8948            sc->interrupt_mode = INTR_MODE_MSI; /* try MSI next */
8949            break;
8950        }
8951
8952        if (num_allocated < 2) { /* possible? */
8953            BLOGE(sc, "MSI-X allocation less than 2!\n");
8954            sc->interrupt_mode = INTR_MODE_MSI; /* try MSI next */
8955            pci_release_msi(sc->dev);
8956            break;
8957        }
8958
8959        BLOGI(sc, "MSI-X vectors Requested %d and Allocated %d\n",
8960              num_requested, num_allocated);
8961
8962        /* best effort so use the number of vectors allocated to us */
8963        sc->intr_count = num_allocated;
8964        sc->num_queues = num_allocated - 1;
8965
8966        rid = 1; /* initial resource identifier */
8967
8968        /* allocate the MSI-X vectors */
8969        for (i = 0; i < num_allocated; i++) {
8970            sc->intr[i].rid = (rid + i);
8971
8972            if ((sc->intr[i].resource =
8973                 bus_alloc_resource_any(sc->dev,
8974                                        SYS_RES_IRQ,
8975                                        &sc->intr[i].rid,
8976                                        RF_ACTIVE)) == NULL) {
8977                BLOGE(sc, "Failed to map MSI-X[%d] (rid=%d)!\n",
8978                      i, (rid + i));
8979
8980                for (j = (i - 1); j >= 0; j--) {
8981                    bus_release_resource(sc->dev,
8982                                         SYS_RES_IRQ,
8983                                         sc->intr[j].rid,
8984                                         sc->intr[j].resource);
8985                }
8986
8987                sc->intr_count = 0;
8988                sc->num_queues = 0;
8989                sc->interrupt_mode = INTR_MODE_MSI; /* try MSI next */
8990                pci_release_msi(sc->dev);
8991                break;
8992            }
8993
8994            BLOGD(sc, DBG_LOAD, "Mapped MSI-X[%d] (rid=%d)\n", i, (rid + i));
8995        }
8996    } while (0);
8997
8998    do { /* try allocating MSI vector resources (at least 2) */
8999        if (sc->interrupt_mode != INTR_MODE_MSI) {
9000            break;
9001        }
9002
9003        if (((sc->devinfo.pcie_cap_flags & BXE_MSI_CAPABLE_FLAG) == 0) ||
9004            (msi_count < 1)) {
9005            sc->interrupt_mode = INTR_MODE_INTX; /* try INTx next */
9006            break;
9007        }
9008
9009        /* ask for a single MSI vector */
9010        num_requested = 1;
9011
9012        BLOGD(sc, DBG_LOAD, "Requesting %d MSI vectors\n", num_requested);
9013
9014        num_allocated = num_requested;
9015        if ((rc = pci_alloc_msi(sc->dev, &num_allocated)) != 0) {
9016            BLOGE(sc, "MSI alloc failed (%d)!\n", rc);
9017            sc->interrupt_mode = INTR_MODE_INTX; /* try INTx next */
9018            break;
9019        }
9020
9021        if (num_allocated != 1) { /* possible? */
9022            BLOGE(sc, "MSI allocation is not 1!\n");
9023            sc->interrupt_mode = INTR_MODE_INTX; /* try INTx next */
9024            pci_release_msi(sc->dev);
9025            break;
9026        }
9027
9028        BLOGI(sc, "MSI vectors Requested %d and Allocated %d\n",
9029              num_requested, num_allocated);
9030
9031        /* best effort so use the number of vectors allocated to us */
9032        sc->intr_count = num_allocated;
9033        sc->num_queues = num_allocated;
9034
9035        rid = 1; /* initial resource identifier */
9036
9037        sc->intr[0].rid = rid;
9038
9039        if ((sc->intr[0].resource =
9040             bus_alloc_resource_any(sc->dev,
9041                                    SYS_RES_IRQ,
9042                                    &sc->intr[0].rid,
9043                                    RF_ACTIVE)) == NULL) {
9044            BLOGE(sc, "Failed to map MSI[0] (rid=%d)!\n", rid);
9045            sc->intr_count = 0;
9046            sc->num_queues = 0;
9047            sc->interrupt_mode = INTR_MODE_INTX; /* try INTx next */
9048            pci_release_msi(sc->dev);
9049            break;
9050        }
9051
9052        BLOGD(sc, DBG_LOAD, "Mapped MSI[0] (rid=%d)\n", rid);
9053    } while (0);
9054
9055    do { /* try allocating INTx vector resources */
9056        if (sc->interrupt_mode != INTR_MODE_INTX) {
9057            break;
9058        }
9059
9060        BLOGD(sc, DBG_LOAD, "Requesting legacy INTx interrupt\n");
9061
9062        /* only one vector for INTx */
9063        sc->intr_count = 1;
9064        sc->num_queues = 1;
9065
9066        rid = 0; /* initial resource identifier */
9067
9068        sc->intr[0].rid = rid;
9069
9070        if ((sc->intr[0].resource =
9071             bus_alloc_resource_any(sc->dev,
9072                                    SYS_RES_IRQ,
9073                                    &sc->intr[0].rid,
9074                                    (RF_ACTIVE | RF_SHAREABLE))) == NULL) {
9075            BLOGE(sc, "Failed to map INTx (rid=%d)!\n", rid);
9076            sc->intr_count = 0;
9077            sc->num_queues = 0;
9078            sc->interrupt_mode = -1; /* Failed! */
9079            break;
9080        }
9081
9082        BLOGD(sc, DBG_LOAD, "Mapped INTx (rid=%d)\n", rid);
9083    } while (0);
9084
9085    if (sc->interrupt_mode == -1) {
9086        BLOGE(sc, "Interrupt Allocation: FAILED!!!\n");
9087        rc = 1;
9088    } else {
9089        BLOGD(sc, DBG_LOAD,
9090              "Interrupt Allocation: interrupt_mode=%d, num_queues=%d\n",
9091              sc->interrupt_mode, sc->num_queues);
9092        rc = 0;
9093    }
9094
9095    return (rc);
9096}
9097
9098static void
9099bxe_interrupt_detach(struct bxe_softc *sc)
9100{
9101    struct bxe_fastpath *fp;
9102    int i;
9103
9104    /* release interrupt resources */
9105    for (i = 0; i < sc->intr_count; i++) {
9106        if (sc->intr[i].resource && sc->intr[i].tag) {
9107            BLOGD(sc, DBG_LOAD, "Disabling interrupt vector %d\n", i);
9108            bus_teardown_intr(sc->dev, sc->intr[i].resource, sc->intr[i].tag);
9109        }
9110    }
9111
9112    for (i = 0; i < sc->num_queues; i++) {
9113        fp = &sc->fp[i];
9114        if (fp->tq) {
9115            taskqueue_drain(fp->tq, &fp->tq_task);
9116            taskqueue_drain(fp->tq, &fp->tx_task);
9117            while (taskqueue_cancel_timeout(fp->tq, &fp->tx_timeout_task,
9118                NULL))
9119                taskqueue_drain_timeout(fp->tq, &fp->tx_timeout_task);
9120            taskqueue_free(fp->tq);
9121            fp->tq = NULL;
9122        }
9123    }
9124
9125
9126    if (sc->sp_tq) {
9127        taskqueue_drain(sc->sp_tq, &sc->sp_tq_task);
9128        taskqueue_free(sc->sp_tq);
9129        sc->sp_tq = NULL;
9130    }
9131}
9132
9133/*
9134 * Enables interrupts and attach to the ISR.
9135 *
9136 * When using multiple MSI/MSI-X vectors the first vector
9137 * is used for slowpath operations while all remaining
9138 * vectors are used for fastpath operations.  If only a
9139 * single MSI/MSI-X vector is used (SINGLE_ISR) then the
9140 * ISR must look for both slowpath and fastpath completions.
9141 */
9142static int
9143bxe_interrupt_attach(struct bxe_softc *sc)
9144{
9145    struct bxe_fastpath *fp;
9146    int rc = 0;
9147    int i;
9148
9149    snprintf(sc->sp_tq_name, sizeof(sc->sp_tq_name),
9150             "bxe%d_sp_tq", sc->unit);
9151    TASK_INIT(&sc->sp_tq_task, 0, bxe_handle_sp_tq, sc);
9152    sc->sp_tq = taskqueue_create(sc->sp_tq_name, M_NOWAIT,
9153                                 taskqueue_thread_enqueue,
9154                                 &sc->sp_tq);
9155    taskqueue_start_threads(&sc->sp_tq, 1, PWAIT, /* lower priority */
9156                            "%s", sc->sp_tq_name);
9157
9158
9159    for (i = 0; i < sc->num_queues; i++) {
9160        fp = &sc->fp[i];
9161        snprintf(fp->tq_name, sizeof(fp->tq_name),
9162                 "bxe%d_fp%d_tq", sc->unit, i);
9163        TASK_INIT(&fp->tq_task, 0, bxe_handle_fp_tq, fp);
9164        TASK_INIT(&fp->tx_task, 0, bxe_tx_mq_start_deferred, fp);
9165        fp->tq = taskqueue_create(fp->tq_name, M_NOWAIT,
9166                                  taskqueue_thread_enqueue,
9167                                  &fp->tq);
9168        TIMEOUT_TASK_INIT(fp->tq, &fp->tx_timeout_task, 0,
9169                          bxe_tx_mq_start_deferred, fp);
9170        taskqueue_start_threads(&fp->tq, 1, PI_NET, /* higher priority */
9171                                "%s", fp->tq_name);
9172    }
9173
9174    /* setup interrupt handlers */
9175    if (sc->interrupt_mode == INTR_MODE_MSIX) {
9176        BLOGD(sc, DBG_LOAD, "Enabling slowpath MSI-X[0] vector\n");
9177
9178        /*
9179         * Setup the interrupt handler. Note that we pass the driver instance
9180         * to the interrupt handler for the slowpath.
9181         */
9182        if ((rc = bus_setup_intr(sc->dev, sc->intr[0].resource,
9183                                 (INTR_TYPE_NET | INTR_MPSAFE),
9184                                 NULL, bxe_intr_sp, sc,
9185                                 &sc->intr[0].tag)) != 0) {
9186            BLOGE(sc, "Failed to allocate MSI-X[0] vector (%d)\n", rc);
9187            goto bxe_interrupt_attach_exit;
9188        }
9189
9190        bus_describe_intr(sc->dev, sc->intr[0].resource,
9191                          sc->intr[0].tag, "sp");
9192
9193        /* bus_bind_intr(sc->dev, sc->intr[0].resource, 0); */
9194
9195        /* initialize the fastpath vectors (note the first was used for sp) */
9196        for (i = 0; i < sc->num_queues; i++) {
9197            fp = &sc->fp[i];
9198            BLOGD(sc, DBG_LOAD, "Enabling MSI-X[%d] vector\n", (i + 1));
9199
9200            /*
9201             * Setup the interrupt handler. Note that we pass the
9202             * fastpath context to the interrupt handler in this
9203             * case.
9204             */
9205            if ((rc = bus_setup_intr(sc->dev, sc->intr[i + 1].resource,
9206                                     (INTR_TYPE_NET | INTR_MPSAFE),
9207                                     NULL, bxe_intr_fp, fp,
9208                                     &sc->intr[i + 1].tag)) != 0) {
9209                BLOGE(sc, "Failed to allocate MSI-X[%d] vector (%d)\n",
9210                      (i + 1), rc);
9211                goto bxe_interrupt_attach_exit;
9212            }
9213
9214            bus_describe_intr(sc->dev, sc->intr[i + 1].resource,
9215                              sc->intr[i + 1].tag, "fp%02d", i);
9216
9217            /* bind the fastpath instance to a cpu */
9218            if (sc->num_queues > 1) {
9219                bus_bind_intr(sc->dev, sc->intr[i + 1].resource, i);
9220            }
9221
9222            fp->state = BXE_FP_STATE_IRQ;
9223        }
9224    } else if (sc->interrupt_mode == INTR_MODE_MSI) {
9225        BLOGD(sc, DBG_LOAD, "Enabling MSI[0] vector\n");
9226
9227        /*
9228         * Setup the interrupt handler. Note that we pass the
9229         * driver instance to the interrupt handler which
9230         * will handle both the slowpath and fastpath.
9231         */
9232        if ((rc = bus_setup_intr(sc->dev, sc->intr[0].resource,
9233                                 (INTR_TYPE_NET | INTR_MPSAFE),
9234                                 NULL, bxe_intr_legacy, sc,
9235                                 &sc->intr[0].tag)) != 0) {
9236            BLOGE(sc, "Failed to allocate MSI[0] vector (%d)\n", rc);
9237            goto bxe_interrupt_attach_exit;
9238        }
9239
9240    } else { /* (sc->interrupt_mode == INTR_MODE_INTX) */
9241        BLOGD(sc, DBG_LOAD, "Enabling INTx interrupts\n");
9242
9243        /*
9244         * Setup the interrupt handler. Note that we pass the
9245         * driver instance to the interrupt handler which
9246         * will handle both the slowpath and fastpath.
9247         */
9248        if ((rc = bus_setup_intr(sc->dev, sc->intr[0].resource,
9249                                 (INTR_TYPE_NET | INTR_MPSAFE),
9250                                 NULL, bxe_intr_legacy, sc,
9251                                 &sc->intr[0].tag)) != 0) {
9252            BLOGE(sc, "Failed to allocate INTx interrupt (%d)\n", rc);
9253            goto bxe_interrupt_attach_exit;
9254        }
9255    }
9256
9257bxe_interrupt_attach_exit:
9258
9259    return (rc);
9260}
9261
9262static int  bxe_init_hw_common_chip(struct bxe_softc *sc);
9263static int  bxe_init_hw_common(struct bxe_softc *sc);
9264static int  bxe_init_hw_port(struct bxe_softc *sc);
9265static int  bxe_init_hw_func(struct bxe_softc *sc);
9266static void bxe_reset_common(struct bxe_softc *sc);
9267static void bxe_reset_port(struct bxe_softc *sc);
9268static void bxe_reset_func(struct bxe_softc *sc);
9269static int  bxe_gunzip_init(struct bxe_softc *sc);
9270static void bxe_gunzip_end(struct bxe_softc *sc);
9271static int  bxe_init_firmware(struct bxe_softc *sc);
9272static void bxe_release_firmware(struct bxe_softc *sc);
9273
9274static struct
9275ecore_func_sp_drv_ops bxe_func_sp_drv = {
9276    .init_hw_cmn_chip = bxe_init_hw_common_chip,
9277    .init_hw_cmn      = bxe_init_hw_common,
9278    .init_hw_port     = bxe_init_hw_port,
9279    .init_hw_func     = bxe_init_hw_func,
9280
9281    .reset_hw_cmn     = bxe_reset_common,
9282    .reset_hw_port    = bxe_reset_port,
9283    .reset_hw_func    = bxe_reset_func,
9284
9285    .gunzip_init      = bxe_gunzip_init,
9286    .gunzip_end       = bxe_gunzip_end,
9287
9288    .init_fw          = bxe_init_firmware,
9289    .release_fw       = bxe_release_firmware,
9290};
9291
9292static void
9293bxe_init_func_obj(struct bxe_softc *sc)
9294{
9295    sc->dmae_ready = 0;
9296
9297    ecore_init_func_obj(sc,
9298                        &sc->func_obj,
9299                        BXE_SP(sc, func_rdata),
9300                        BXE_SP_MAPPING(sc, func_rdata),
9301                        BXE_SP(sc, func_afex_rdata),
9302                        BXE_SP_MAPPING(sc, func_afex_rdata),
9303                        &bxe_func_sp_drv);
9304}
9305
9306static int
9307bxe_init_hw(struct bxe_softc *sc,
9308            uint32_t         load_code)
9309{
9310    struct ecore_func_state_params func_params = { NULL };
9311    int rc;
9312
9313    /* prepare the parameters for function state transitions */
9314    bit_set(&func_params.ramrod_flags, RAMROD_COMP_WAIT);
9315
9316    func_params.f_obj = &sc->func_obj;
9317    func_params.cmd = ECORE_F_CMD_HW_INIT;
9318
9319    func_params.params.hw_init.load_phase = load_code;
9320
9321    /*
9322     * Via a plethora of function pointers, we will eventually reach
9323     * bxe_init_hw_common(), bxe_init_hw_port(), or bxe_init_hw_func().
9324     */
9325    rc = ecore_func_state_change(sc, &func_params);
9326
9327    return (rc);
9328}
9329
9330static void
9331bxe_fill(struct bxe_softc *sc,
9332         uint32_t         addr,
9333         int              fill,
9334         uint32_t         len)
9335{
9336    uint32_t i;
9337
9338    if (!(len % 4) && !(addr % 4)) {
9339        for (i = 0; i < len; i += 4) {
9340            REG_WR(sc, (addr + i), fill);
9341        }
9342    } else {
9343        for (i = 0; i < len; i++) {
9344            REG_WR8(sc, (addr + i), fill);
9345        }
9346    }
9347}
9348
9349/* writes FP SP data to FW - data_size in dwords */
9350static void
9351bxe_wr_fp_sb_data(struct bxe_softc *sc,
9352                  int              fw_sb_id,
9353                  uint32_t         *sb_data_p,
9354                  uint32_t         data_size)
9355{
9356    int index;
9357
9358    for (index = 0; index < data_size; index++) {
9359        REG_WR(sc,
9360               (BAR_CSTRORM_INTMEM +
9361                CSTORM_STATUS_BLOCK_DATA_OFFSET(fw_sb_id) +
9362                (sizeof(uint32_t) * index)),
9363               *(sb_data_p + index));
9364    }
9365}
9366
9367static void
9368bxe_zero_fp_sb(struct bxe_softc *sc,
9369               int              fw_sb_id)
9370{
9371    struct hc_status_block_data_e2 sb_data_e2;
9372    struct hc_status_block_data_e1x sb_data_e1x;
9373    uint32_t *sb_data_p;
9374    uint32_t data_size = 0;
9375
9376    if (!CHIP_IS_E1x(sc)) {
9377        memset(&sb_data_e2, 0, sizeof(struct hc_status_block_data_e2));
9378        sb_data_e2.common.state = SB_DISABLED;
9379        sb_data_e2.common.p_func.vf_valid = FALSE;
9380        sb_data_p = (uint32_t *)&sb_data_e2;
9381        data_size = (sizeof(struct hc_status_block_data_e2) /
9382                     sizeof(uint32_t));
9383    } else {
9384        memset(&sb_data_e1x, 0, sizeof(struct hc_status_block_data_e1x));
9385        sb_data_e1x.common.state = SB_DISABLED;
9386        sb_data_e1x.common.p_func.vf_valid = FALSE;
9387        sb_data_p = (uint32_t *)&sb_data_e1x;
9388        data_size = (sizeof(struct hc_status_block_data_e1x) /
9389                     sizeof(uint32_t));
9390    }
9391
9392    bxe_wr_fp_sb_data(sc, fw_sb_id, sb_data_p, data_size);
9393
9394    bxe_fill(sc, (BAR_CSTRORM_INTMEM + CSTORM_STATUS_BLOCK_OFFSET(fw_sb_id)),
9395             0, CSTORM_STATUS_BLOCK_SIZE);
9396    bxe_fill(sc, (BAR_CSTRORM_INTMEM + CSTORM_SYNC_BLOCK_OFFSET(fw_sb_id)),
9397             0, CSTORM_SYNC_BLOCK_SIZE);
9398}
9399
9400static void
9401bxe_wr_sp_sb_data(struct bxe_softc               *sc,
9402                  struct hc_sp_status_block_data *sp_sb_data)
9403{
9404    int i;
9405
9406    for (i = 0;
9407         i < (sizeof(struct hc_sp_status_block_data) / sizeof(uint32_t));
9408         i++) {
9409        REG_WR(sc,
9410               (BAR_CSTRORM_INTMEM +
9411                CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(SC_FUNC(sc)) +
9412                (i * sizeof(uint32_t))),
9413               *((uint32_t *)sp_sb_data + i));
9414    }
9415}
9416
9417static void
9418bxe_zero_sp_sb(struct bxe_softc *sc)
9419{
9420    struct hc_sp_status_block_data sp_sb_data;
9421
9422    memset(&sp_sb_data, 0, sizeof(struct hc_sp_status_block_data));
9423
9424    sp_sb_data.state           = SB_DISABLED;
9425    sp_sb_data.p_func.vf_valid = FALSE;
9426
9427    bxe_wr_sp_sb_data(sc, &sp_sb_data);
9428
9429    bxe_fill(sc,
9430             (BAR_CSTRORM_INTMEM +
9431              CSTORM_SP_STATUS_BLOCK_OFFSET(SC_FUNC(sc))),
9432              0, CSTORM_SP_STATUS_BLOCK_SIZE);
9433    bxe_fill(sc,
9434             (BAR_CSTRORM_INTMEM +
9435              CSTORM_SP_SYNC_BLOCK_OFFSET(SC_FUNC(sc))),
9436              0, CSTORM_SP_SYNC_BLOCK_SIZE);
9437}
9438
9439static void
9440bxe_setup_ndsb_state_machine(struct hc_status_block_sm *hc_sm,
9441                             int                       igu_sb_id,
9442                             int                       igu_seg_id)
9443{
9444    hc_sm->igu_sb_id      = igu_sb_id;
9445    hc_sm->igu_seg_id     = igu_seg_id;
9446    hc_sm->timer_value    = 0xFF;
9447    hc_sm->time_to_expire = 0xFFFFFFFF;
9448}
9449
9450static void
9451bxe_map_sb_state_machines(struct hc_index_data *index_data)
9452{
9453    /* zero out state machine indices */
9454
9455    /* rx indices */
9456    index_data[HC_INDEX_ETH_RX_CQ_CONS].flags &= ~HC_INDEX_DATA_SM_ID;
9457
9458    /* tx indices */
9459    index_data[HC_INDEX_OOO_TX_CQ_CONS].flags      &= ~HC_INDEX_DATA_SM_ID;
9460    index_data[HC_INDEX_ETH_TX_CQ_CONS_COS0].flags &= ~HC_INDEX_DATA_SM_ID;
9461    index_data[HC_INDEX_ETH_TX_CQ_CONS_COS1].flags &= ~HC_INDEX_DATA_SM_ID;
9462    index_data[HC_INDEX_ETH_TX_CQ_CONS_COS2].flags &= ~HC_INDEX_DATA_SM_ID;
9463
9464    /* map indices */
9465
9466    /* rx indices */
9467    index_data[HC_INDEX_ETH_RX_CQ_CONS].flags |=
9468        (SM_RX_ID << HC_INDEX_DATA_SM_ID_SHIFT);
9469
9470    /* tx indices */
9471    index_data[HC_INDEX_OOO_TX_CQ_CONS].flags |=
9472        (SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT);
9473    index_data[HC_INDEX_ETH_TX_CQ_CONS_COS0].flags |=
9474        (SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT);
9475    index_data[HC_INDEX_ETH_TX_CQ_CONS_COS1].flags |=
9476        (SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT);
9477    index_data[HC_INDEX_ETH_TX_CQ_CONS_COS2].flags |=
9478        (SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT);
9479}
9480
9481static void
9482bxe_init_sb(struct bxe_softc *sc,
9483            bus_addr_t       busaddr,
9484            int              vfid,
9485            uint8_t          vf_valid,
9486            int              fw_sb_id,
9487            int              igu_sb_id)
9488{
9489    struct hc_status_block_data_e2  sb_data_e2;
9490    struct hc_status_block_data_e1x sb_data_e1x;
9491    struct hc_status_block_sm       *hc_sm_p;
9492    uint32_t *sb_data_p;
9493    int igu_seg_id;
9494    int data_size;
9495
9496    if (CHIP_INT_MODE_IS_BC(sc)) {
9497        igu_seg_id = HC_SEG_ACCESS_NORM;
9498    } else {
9499        igu_seg_id = IGU_SEG_ACCESS_NORM;
9500    }
9501
9502    bxe_zero_fp_sb(sc, fw_sb_id);
9503
9504    if (!CHIP_IS_E1x(sc)) {
9505        memset(&sb_data_e2, 0, sizeof(struct hc_status_block_data_e2));
9506        sb_data_e2.common.state = SB_ENABLED;
9507        sb_data_e2.common.p_func.pf_id = SC_FUNC(sc);
9508        sb_data_e2.common.p_func.vf_id = vfid;
9509        sb_data_e2.common.p_func.vf_valid = vf_valid;
9510        sb_data_e2.common.p_func.vnic_id = SC_VN(sc);
9511        sb_data_e2.common.same_igu_sb_1b = TRUE;
9512        sb_data_e2.common.host_sb_addr.hi = U64_HI(busaddr);
9513        sb_data_e2.common.host_sb_addr.lo = U64_LO(busaddr);
9514        hc_sm_p = sb_data_e2.common.state_machine;
9515        sb_data_p = (uint32_t *)&sb_data_e2;
9516        data_size = (sizeof(struct hc_status_block_data_e2) /
9517                     sizeof(uint32_t));
9518        bxe_map_sb_state_machines(sb_data_e2.index_data);
9519    } else {
9520        memset(&sb_data_e1x, 0, sizeof(struct hc_status_block_data_e1x));
9521        sb_data_e1x.common.state = SB_ENABLED;
9522        sb_data_e1x.common.p_func.pf_id = SC_FUNC(sc);
9523        sb_data_e1x.common.p_func.vf_id = 0xff;
9524        sb_data_e1x.common.p_func.vf_valid = FALSE;
9525        sb_data_e1x.common.p_func.vnic_id = SC_VN(sc);
9526        sb_data_e1x.common.same_igu_sb_1b = TRUE;
9527        sb_data_e1x.common.host_sb_addr.hi = U64_HI(busaddr);
9528        sb_data_e1x.common.host_sb_addr.lo = U64_LO(busaddr);
9529        hc_sm_p = sb_data_e1x.common.state_machine;
9530        sb_data_p = (uint32_t *)&sb_data_e1x;
9531        data_size = (sizeof(struct hc_status_block_data_e1x) /
9532                     sizeof(uint32_t));
9533        bxe_map_sb_state_machines(sb_data_e1x.index_data);
9534    }
9535
9536    bxe_setup_ndsb_state_machine(&hc_sm_p[SM_RX_ID], igu_sb_id, igu_seg_id);
9537    bxe_setup_ndsb_state_machine(&hc_sm_p[SM_TX_ID], igu_sb_id, igu_seg_id);
9538
9539    BLOGD(sc, DBG_LOAD, "Init FW SB %d\n", fw_sb_id);
9540
9541    /* write indices to HW - PCI guarantees endianity of regpairs */
9542    bxe_wr_fp_sb_data(sc, fw_sb_id, sb_data_p, data_size);
9543}
9544
9545static inline uint8_t
9546bxe_fp_qzone_id(struct bxe_fastpath *fp)
9547{
9548    if (CHIP_IS_E1x(fp->sc)) {
9549        return (fp->cl_id + SC_PORT(fp->sc) * ETH_MAX_RX_CLIENTS_E1H);
9550    } else {
9551        return (fp->cl_id);
9552    }
9553}
9554
9555static inline uint32_t
9556bxe_rx_ustorm_prods_offset(struct bxe_softc    *sc,
9557                           struct bxe_fastpath *fp)
9558{
9559    uint32_t offset = BAR_USTRORM_INTMEM;
9560
9561    if (!CHIP_IS_E1x(sc)) {
9562        offset += USTORM_RX_PRODS_E2_OFFSET(fp->cl_qzone_id);
9563    } else {
9564        offset += USTORM_RX_PRODS_E1X_OFFSET(SC_PORT(sc), fp->cl_id);
9565    }
9566
9567    return (offset);
9568}
9569
9570static void
9571bxe_init_eth_fp(struct bxe_softc *sc,
9572                int              idx)
9573{
9574    struct bxe_fastpath *fp = &sc->fp[idx];
9575    uint32_t cids[ECORE_MULTI_TX_COS] = { 0 };
9576    unsigned long q_type = 0;
9577    int cos;
9578
9579    fp->sc    = sc;
9580    fp->index = idx;
9581
9582    fp->igu_sb_id = (sc->igu_base_sb + idx + CNIC_SUPPORT(sc));
9583    fp->fw_sb_id = (sc->base_fw_ndsb + idx + CNIC_SUPPORT(sc));
9584
9585    fp->cl_id = (CHIP_IS_E1x(sc)) ?
9586                    (SC_L_ID(sc) + idx) :
9587                    /* want client ID same as IGU SB ID for non-E1 */
9588                    fp->igu_sb_id;
9589    fp->cl_qzone_id = bxe_fp_qzone_id(fp);
9590
9591    /* setup sb indices */
9592    if (!CHIP_IS_E1x(sc)) {
9593        fp->sb_index_values  = fp->status_block.e2_sb->sb.index_values;
9594        fp->sb_running_index = fp->status_block.e2_sb->sb.running_index;
9595    } else {
9596        fp->sb_index_values  = fp->status_block.e1x_sb->sb.index_values;
9597        fp->sb_running_index = fp->status_block.e1x_sb->sb.running_index;
9598    }
9599
9600    /* init shortcut */
9601    fp->ustorm_rx_prods_offset = bxe_rx_ustorm_prods_offset(sc, fp);
9602
9603    fp->rx_cq_cons_sb = &fp->sb_index_values[HC_INDEX_ETH_RX_CQ_CONS];
9604
9605    /*
9606     * XXX If multiple CoS is ever supported then each fastpath structure
9607     * will need to maintain tx producer/consumer/dma/etc values *per* CoS.
9608     */
9609    for (cos = 0; cos < sc->max_cos; cos++) {
9610        cids[cos] = idx;
9611    }
9612    fp->tx_cons_sb = &fp->sb_index_values[HC_INDEX_ETH_TX_CQ_CONS_COS0];
9613
9614    /* nothing more for a VF to do */
9615    if (IS_VF(sc)) {
9616        return;
9617    }
9618
9619    bxe_init_sb(sc, fp->sb_dma.paddr, BXE_VF_ID_INVALID, FALSE,
9620                fp->fw_sb_id, fp->igu_sb_id);
9621
9622    bxe_update_fp_sb_idx(fp);
9623
9624    /* Configure Queue State object */
9625    bit_set(&q_type, ECORE_Q_TYPE_HAS_RX);
9626    bit_set(&q_type, ECORE_Q_TYPE_HAS_TX);
9627
9628    ecore_init_queue_obj(sc,
9629                         &sc->sp_objs[idx].q_obj,
9630                         fp->cl_id,
9631                         cids,
9632                         sc->max_cos,
9633                         SC_FUNC(sc),
9634                         BXE_SP(sc, q_rdata),
9635                         BXE_SP_MAPPING(sc, q_rdata),
9636                         q_type);
9637
9638    /* configure classification DBs */
9639    ecore_init_mac_obj(sc,
9640                       &sc->sp_objs[idx].mac_obj,
9641                       fp->cl_id,
9642                       idx,
9643                       SC_FUNC(sc),
9644                       BXE_SP(sc, mac_rdata),
9645                       BXE_SP_MAPPING(sc, mac_rdata),
9646                       ECORE_FILTER_MAC_PENDING,
9647                       &sc->sp_state,
9648                       ECORE_OBJ_TYPE_RX_TX,
9649                       &sc->macs_pool);
9650
9651    BLOGD(sc, DBG_LOAD, "fp[%d]: sb=%p cl_id=%d fw_sb=%d igu_sb=%d\n",
9652          idx, fp->status_block.e2_sb, fp->cl_id, fp->fw_sb_id, fp->igu_sb_id);
9653}
9654
9655static inline void
9656bxe_update_rx_prod(struct bxe_softc    *sc,
9657                   struct bxe_fastpath *fp,
9658                   uint16_t            rx_bd_prod,
9659                   uint16_t            rx_cq_prod,
9660                   uint16_t            rx_sge_prod)
9661{
9662    struct ustorm_eth_rx_producers rx_prods = { 0 };
9663    uint32_t i;
9664
9665    /* update producers */
9666    rx_prods.bd_prod  = rx_bd_prod;
9667    rx_prods.cqe_prod = rx_cq_prod;
9668    rx_prods.sge_prod = rx_sge_prod;
9669
9670    /*
9671     * Make sure that the BD and SGE data is updated before updating the
9672     * producers since FW might read the BD/SGE right after the producer
9673     * is updated.
9674     * This is only applicable for weak-ordered memory model archs such
9675     * as IA-64. The following barrier is also mandatory since FW will
9676     * assumes BDs must have buffers.
9677     */
9678    wmb();
9679
9680    for (i = 0; i < (sizeof(rx_prods) / 4); i++) {
9681        REG_WR(sc,
9682               (fp->ustorm_rx_prods_offset + (i * 4)),
9683               ((uint32_t *)&rx_prods)[i]);
9684    }
9685
9686    wmb(); /* keep prod updates ordered */
9687
9688    BLOGD(sc, DBG_RX,
9689          "RX fp[%d]: wrote prods bd_prod=%u cqe_prod=%u sge_prod=%u\n",
9690          fp->index, rx_bd_prod, rx_cq_prod, rx_sge_prod);
9691}
9692
9693static void
9694bxe_init_rx_rings(struct bxe_softc *sc)
9695{
9696    struct bxe_fastpath *fp;
9697    int i;
9698
9699    for (i = 0; i < sc->num_queues; i++) {
9700        fp = &sc->fp[i];
9701
9702        fp->rx_bd_cons = 0;
9703
9704        /*
9705         * Activate the BD ring...
9706         * Warning, this will generate an interrupt (to the TSTORM)
9707         * so this can only be done after the chip is initialized
9708         */
9709        bxe_update_rx_prod(sc, fp,
9710                           fp->rx_bd_prod,
9711                           fp->rx_cq_prod,
9712                           fp->rx_sge_prod);
9713
9714        if (i != 0) {
9715            continue;
9716        }
9717
9718        if (CHIP_IS_E1(sc)) {
9719            REG_WR(sc,
9720                   (BAR_USTRORM_INTMEM +
9721                    USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(SC_FUNC(sc))),
9722                   U64_LO(fp->rcq_dma.paddr));
9723            REG_WR(sc,
9724                   (BAR_USTRORM_INTMEM +
9725                    USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(SC_FUNC(sc)) + 4),
9726                   U64_HI(fp->rcq_dma.paddr));
9727        }
9728    }
9729}
9730
9731static void
9732bxe_init_tx_ring_one(struct bxe_fastpath *fp)
9733{
9734    SET_FLAG(fp->tx_db.data.header.data, DOORBELL_HDR_T_DB_TYPE, 1);
9735    fp->tx_db.data.zero_fill1 = 0;
9736    fp->tx_db.data.prod = 0;
9737
9738    fp->tx_pkt_prod = 0;
9739    fp->tx_pkt_cons = 0;
9740    fp->tx_bd_prod = 0;
9741    fp->tx_bd_cons = 0;
9742    fp->eth_q_stats.tx_pkts = 0;
9743}
9744
9745static inline void
9746bxe_init_tx_rings(struct bxe_softc *sc)
9747{
9748    int i;
9749
9750    for (i = 0; i < sc->num_queues; i++) {
9751        bxe_init_tx_ring_one(&sc->fp[i]);
9752    }
9753}
9754
9755static void
9756bxe_init_def_sb(struct bxe_softc *sc)
9757{
9758    struct host_sp_status_block *def_sb = sc->def_sb;
9759    bus_addr_t mapping = sc->def_sb_dma.paddr;
9760    int igu_sp_sb_index;
9761    int igu_seg_id;
9762    int port = SC_PORT(sc);
9763    int func = SC_FUNC(sc);
9764    int reg_offset, reg_offset_en5;
9765    uint64_t section;
9766    int index, sindex;
9767    struct hc_sp_status_block_data sp_sb_data;
9768
9769    memset(&sp_sb_data, 0, sizeof(struct hc_sp_status_block_data));
9770
9771    if (CHIP_INT_MODE_IS_BC(sc)) {
9772        igu_sp_sb_index = DEF_SB_IGU_ID;
9773        igu_seg_id = HC_SEG_ACCESS_DEF;
9774    } else {
9775        igu_sp_sb_index = sc->igu_dsb_id;
9776        igu_seg_id = IGU_SEG_ACCESS_DEF;
9777    }
9778
9779    /* attentions */
9780    section = ((uint64_t)mapping +
9781               offsetof(struct host_sp_status_block, atten_status_block));
9782    def_sb->atten_status_block.status_block_id = igu_sp_sb_index;
9783    sc->attn_state = 0;
9784
9785    reg_offset = (port) ?
9786                     MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
9787                     MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0;
9788    reg_offset_en5 = (port) ?
9789                         MISC_REG_AEU_ENABLE5_FUNC_1_OUT_0 :
9790                         MISC_REG_AEU_ENABLE5_FUNC_0_OUT_0;
9791
9792    for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
9793        /* take care of sig[0]..sig[4] */
9794        for (sindex = 0; sindex < 4; sindex++) {
9795            sc->attn_group[index].sig[sindex] =
9796                REG_RD(sc, (reg_offset + (sindex * 0x4) + (0x10 * index)));
9797        }
9798
9799        if (!CHIP_IS_E1x(sc)) {
9800            /*
9801             * enable5 is separate from the rest of the registers,
9802             * and the address skip is 4 and not 16 between the
9803             * different groups
9804             */
9805            sc->attn_group[index].sig[4] =
9806                REG_RD(sc, (reg_offset_en5 + (0x4 * index)));
9807        } else {
9808            sc->attn_group[index].sig[4] = 0;
9809        }
9810    }
9811
9812    if (sc->devinfo.int_block == INT_BLOCK_HC) {
9813        reg_offset = (port) ?
9814                         HC_REG_ATTN_MSG1_ADDR_L :
9815                         HC_REG_ATTN_MSG0_ADDR_L;
9816        REG_WR(sc, reg_offset, U64_LO(section));
9817        REG_WR(sc, (reg_offset + 4), U64_HI(section));
9818    } else if (!CHIP_IS_E1x(sc)) {
9819        REG_WR(sc, IGU_REG_ATTN_MSG_ADDR_L, U64_LO(section));
9820        REG_WR(sc, IGU_REG_ATTN_MSG_ADDR_H, U64_HI(section));
9821    }
9822
9823    section = ((uint64_t)mapping +
9824               offsetof(struct host_sp_status_block, sp_sb));
9825
9826    bxe_zero_sp_sb(sc);
9827
9828    /* PCI guarantees endianity of regpair */
9829    sp_sb_data.state           = SB_ENABLED;
9830    sp_sb_data.host_sb_addr.lo = U64_LO(section);
9831    sp_sb_data.host_sb_addr.hi = U64_HI(section);
9832    sp_sb_data.igu_sb_id       = igu_sp_sb_index;
9833    sp_sb_data.igu_seg_id      = igu_seg_id;
9834    sp_sb_data.p_func.pf_id    = func;
9835    sp_sb_data.p_func.vnic_id  = SC_VN(sc);
9836    sp_sb_data.p_func.vf_id    = 0xff;
9837
9838    bxe_wr_sp_sb_data(sc, &sp_sb_data);
9839
9840    bxe_ack_sb(sc, sc->igu_dsb_id, USTORM_ID, 0, IGU_INT_ENABLE, 0);
9841}
9842
9843static void
9844bxe_init_sp_ring(struct bxe_softc *sc)
9845{
9846    atomic_store_rel_long(&sc->cq_spq_left, MAX_SPQ_PENDING);
9847    sc->spq_prod_idx = 0;
9848    sc->dsb_sp_prod = &sc->def_sb->sp_sb.index_values[HC_SP_INDEX_ETH_DEF_CONS];
9849    sc->spq_prod_bd = sc->spq;
9850    sc->spq_last_bd = (sc->spq_prod_bd + MAX_SP_DESC_CNT);
9851}
9852
9853static void
9854bxe_init_eq_ring(struct bxe_softc *sc)
9855{
9856    union event_ring_elem *elem;
9857    int i;
9858
9859    for (i = 1; i <= NUM_EQ_PAGES; i++) {
9860        elem = &sc->eq[EQ_DESC_CNT_PAGE * i - 1];
9861
9862        elem->next_page.addr.hi = htole32(U64_HI(sc->eq_dma.paddr +
9863                                                 BCM_PAGE_SIZE *
9864                                                 (i % NUM_EQ_PAGES)));
9865        elem->next_page.addr.lo = htole32(U64_LO(sc->eq_dma.paddr +
9866                                                 BCM_PAGE_SIZE *
9867                                                 (i % NUM_EQ_PAGES)));
9868    }
9869
9870    sc->eq_cons    = 0;
9871    sc->eq_prod    = NUM_EQ_DESC;
9872    sc->eq_cons_sb = &sc->def_sb->sp_sb.index_values[HC_SP_INDEX_EQ_CONS];
9873
9874    atomic_store_rel_long(&sc->eq_spq_left,
9875                          (min((MAX_SP_DESC_CNT - MAX_SPQ_PENDING),
9876                               NUM_EQ_DESC) - 1));
9877}
9878
9879static void
9880bxe_init_internal_common(struct bxe_softc *sc)
9881{
9882    int i;
9883
9884    /*
9885     * Zero this manually as its initialization is currently missing
9886     * in the initTool.
9887     */
9888    for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++) {
9889        REG_WR(sc,
9890               (BAR_USTRORM_INTMEM + USTORM_AGG_DATA_OFFSET + (i * 4)),
9891               0);
9892    }
9893
9894    if (!CHIP_IS_E1x(sc)) {
9895        REG_WR8(sc, (BAR_CSTRORM_INTMEM + CSTORM_IGU_MODE_OFFSET),
9896                CHIP_INT_MODE_IS_BC(sc) ? HC_IGU_BC_MODE : HC_IGU_NBC_MODE);
9897    }
9898}
9899
9900static void
9901bxe_init_internal(struct bxe_softc *sc,
9902                  uint32_t         load_code)
9903{
9904    switch (load_code) {
9905    case FW_MSG_CODE_DRV_LOAD_COMMON:
9906    case FW_MSG_CODE_DRV_LOAD_COMMON_CHIP:
9907        bxe_init_internal_common(sc);
9908        /* no break */
9909
9910    case FW_MSG_CODE_DRV_LOAD_PORT:
9911        /* nothing to do */
9912        /* no break */
9913
9914    case FW_MSG_CODE_DRV_LOAD_FUNCTION:
9915        /* internal memory per function is initialized inside bxe_pf_init */
9916        break;
9917
9918    default:
9919        BLOGE(sc, "Unknown load_code (0x%x) from MCP\n", load_code);
9920        break;
9921    }
9922}
9923
9924static void
9925storm_memset_func_cfg(struct bxe_softc                         *sc,
9926                      struct tstorm_eth_function_common_config *tcfg,
9927                      uint16_t                                  abs_fid)
9928{
9929    uint32_t addr;
9930    size_t size;
9931
9932    addr = (BAR_TSTRORM_INTMEM +
9933            TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(abs_fid));
9934    size = sizeof(struct tstorm_eth_function_common_config);
9935    ecore_storm_memset_struct(sc, addr, size, (uint32_t *)tcfg);
9936}
9937
9938static void
9939bxe_func_init(struct bxe_softc            *sc,
9940              struct bxe_func_init_params *p)
9941{
9942    struct tstorm_eth_function_common_config tcfg = { 0 };
9943
9944    if (CHIP_IS_E1x(sc)) {
9945        storm_memset_func_cfg(sc, &tcfg, p->func_id);
9946    }
9947
9948    /* Enable the function in the FW */
9949    storm_memset_vf_to_pf(sc, p->func_id, p->pf_id);
9950    storm_memset_func_en(sc, p->func_id, 1);
9951
9952    /* spq */
9953    if (p->func_flgs & FUNC_FLG_SPQ) {
9954        storm_memset_spq_addr(sc, p->spq_map, p->func_id);
9955        REG_WR(sc,
9956               (XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(p->func_id)),
9957               p->spq_prod);
9958    }
9959}
9960
9961/*
9962 * Calculates the sum of vn_min_rates.
9963 * It's needed for further normalizing of the min_rates.
9964 * Returns:
9965 *   sum of vn_min_rates.
9966 *     or
9967 *   0 - if all the min_rates are 0.
9968 * In the later case fainess algorithm should be deactivated.
9969 * If all min rates are not zero then those that are zeroes will be set to 1.
9970 */
9971static void
9972bxe_calc_vn_min(struct bxe_softc       *sc,
9973                struct cmng_init_input *input)
9974{
9975    uint32_t vn_cfg;
9976    uint32_t vn_min_rate;
9977    int all_zero = 1;
9978    int vn;
9979
9980    for (vn = VN_0; vn < SC_MAX_VN_NUM(sc); vn++) {
9981        vn_cfg = sc->devinfo.mf_info.mf_config[vn];
9982        vn_min_rate = (((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
9983                        FUNC_MF_CFG_MIN_BW_SHIFT) * 100);
9984
9985        if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
9986            /* skip hidden VNs */
9987            vn_min_rate = 0;
9988        } else if (!vn_min_rate) {
9989            /* If min rate is zero - set it to 100 */
9990            vn_min_rate = DEF_MIN_RATE;
9991        } else {
9992            all_zero = 0;
9993        }
9994
9995        input->vnic_min_rate[vn] = vn_min_rate;
9996    }
9997
9998    /* if ETS or all min rates are zeros - disable fairness */
9999    if (BXE_IS_ETS_ENABLED(sc)) {
10000        input->flags.cmng_enables &= ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
10001        BLOGD(sc, DBG_LOAD, "Fairness disabled (ETS)\n");
10002    } else if (all_zero) {
10003        input->flags.cmng_enables &= ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
10004        BLOGD(sc, DBG_LOAD,
10005              "Fariness disabled (all MIN values are zeroes)\n");
10006    } else {
10007        input->flags.cmng_enables |= CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
10008    }
10009}
10010
10011static inline uint16_t
10012bxe_extract_max_cfg(struct bxe_softc *sc,
10013                    uint32_t         mf_cfg)
10014{
10015    uint16_t max_cfg = ((mf_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
10016                        FUNC_MF_CFG_MAX_BW_SHIFT);
10017
10018    if (!max_cfg) {
10019        BLOGD(sc, DBG_LOAD, "Max BW configured to 0 - using 100 instead\n");
10020        max_cfg = 100;
10021    }
10022
10023    return (max_cfg);
10024}
10025
10026static void
10027bxe_calc_vn_max(struct bxe_softc       *sc,
10028                int                    vn,
10029                struct cmng_init_input *input)
10030{
10031    uint16_t vn_max_rate;
10032    uint32_t vn_cfg = sc->devinfo.mf_info.mf_config[vn];
10033    uint32_t max_cfg;
10034
10035    if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
10036        vn_max_rate = 0;
10037    } else {
10038        max_cfg = bxe_extract_max_cfg(sc, vn_cfg);
10039
10040        if (IS_MF_SI(sc)) {
10041            /* max_cfg in percents of linkspeed */
10042            vn_max_rate = ((sc->link_vars.line_speed * max_cfg) / 100);
10043        } else { /* SD modes */
10044            /* max_cfg is absolute in 100Mb units */
10045            vn_max_rate = (max_cfg * 100);
10046        }
10047    }
10048
10049    BLOGD(sc, DBG_LOAD, "vn %d: vn_max_rate %d\n", vn, vn_max_rate);
10050
10051    input->vnic_max_rate[vn] = vn_max_rate;
10052}
10053
10054static void
10055bxe_cmng_fns_init(struct bxe_softc *sc,
10056                  uint8_t          read_cfg,
10057                  uint8_t          cmng_type)
10058{
10059    struct cmng_init_input input;
10060    int vn;
10061
10062    memset(&input, 0, sizeof(struct cmng_init_input));
10063
10064    input.port_rate = sc->link_vars.line_speed;
10065
10066    if (cmng_type == CMNG_FNS_MINMAX) {
10067        /* read mf conf from shmem */
10068        if (read_cfg) {
10069            bxe_read_mf_cfg(sc);
10070        }
10071
10072        /* get VN min rate and enable fairness if not 0 */
10073        bxe_calc_vn_min(sc, &input);
10074
10075        /* get VN max rate */
10076        if (sc->port.pmf) {
10077            for (vn = VN_0; vn < SC_MAX_VN_NUM(sc); vn++) {
10078                bxe_calc_vn_max(sc, vn, &input);
10079            }
10080        }
10081
10082        /* always enable rate shaping and fairness */
10083        input.flags.cmng_enables |= CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN;
10084
10085        ecore_init_cmng(&input, &sc->cmng);
10086        return;
10087    }
10088
10089    /* rate shaping and fairness are disabled */
10090    BLOGD(sc, DBG_LOAD, "rate shaping and fairness have been disabled\n");
10091}
10092
10093static int
10094bxe_get_cmng_fns_mode(struct bxe_softc *sc)
10095{
10096    if (CHIP_REV_IS_SLOW(sc)) {
10097        return (CMNG_FNS_NONE);
10098    }
10099
10100    if (IS_MF(sc)) {
10101        return (CMNG_FNS_MINMAX);
10102    }
10103
10104    return (CMNG_FNS_NONE);
10105}
10106
10107static void
10108storm_memset_cmng(struct bxe_softc *sc,
10109                  struct cmng_init *cmng,
10110                  uint8_t          port)
10111{
10112    int vn;
10113    int func;
10114    uint32_t addr;
10115    size_t size;
10116
10117    addr = (BAR_XSTRORM_INTMEM +
10118            XSTORM_CMNG_PER_PORT_VARS_OFFSET(port));
10119    size = sizeof(struct cmng_struct_per_port);
10120    ecore_storm_memset_struct(sc, addr, size, (uint32_t *)&cmng->port);
10121
10122    for (vn = VN_0; vn < SC_MAX_VN_NUM(sc); vn++) {
10123        func = func_by_vn(sc, vn);
10124
10125        addr = (BAR_XSTRORM_INTMEM +
10126                XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func));
10127        size = sizeof(struct rate_shaping_vars_per_vn);
10128        ecore_storm_memset_struct(sc, addr, size,
10129                                  (uint32_t *)&cmng->vnic.vnic_max_rate[vn]);
10130
10131        addr = (BAR_XSTRORM_INTMEM +
10132                XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func));
10133        size = sizeof(struct fairness_vars_per_vn);
10134        ecore_storm_memset_struct(sc, addr, size,
10135                                  (uint32_t *)&cmng->vnic.vnic_min_rate[vn]);
10136    }
10137}
10138
10139static void
10140bxe_pf_init(struct bxe_softc *sc)
10141{
10142    struct bxe_func_init_params func_init = { 0 };
10143    struct event_ring_data eq_data = { { 0 } };
10144    uint16_t flags;
10145
10146    if (!CHIP_IS_E1x(sc)) {
10147        /* reset IGU PF statistics: MSIX + ATTN */
10148        /* PF */
10149        REG_WR(sc,
10150               (IGU_REG_STATISTIC_NUM_MESSAGE_SENT +
10151                (BXE_IGU_STAS_MSG_VF_CNT * 4) +
10152                ((CHIP_IS_MODE_4_PORT(sc) ? SC_FUNC(sc) : SC_VN(sc)) * 4)),
10153               0);
10154        /* ATTN */
10155        REG_WR(sc,
10156               (IGU_REG_STATISTIC_NUM_MESSAGE_SENT +
10157                (BXE_IGU_STAS_MSG_VF_CNT * 4) +
10158                (BXE_IGU_STAS_MSG_PF_CNT * 4) +
10159                ((CHIP_IS_MODE_4_PORT(sc) ? SC_FUNC(sc) : SC_VN(sc)) * 4)),
10160               0);
10161    }
10162
10163    /* function setup flags */
10164    flags = (FUNC_FLG_STATS | FUNC_FLG_LEADING | FUNC_FLG_SPQ);
10165
10166    /*
10167     * This flag is relevant for E1x only.
10168     * E2 doesn't have a TPA configuration in a function level.
10169     */
10170    flags |= (if_getcapenable(sc->ifp) & IFCAP_LRO) ? FUNC_FLG_TPA : 0;
10171
10172    func_init.func_flgs = flags;
10173    func_init.pf_id     = SC_FUNC(sc);
10174    func_init.func_id   = SC_FUNC(sc);
10175    func_init.spq_map   = sc->spq_dma.paddr;
10176    func_init.spq_prod  = sc->spq_prod_idx;
10177
10178    bxe_func_init(sc, &func_init);
10179
10180    memset(&sc->cmng, 0, sizeof(struct cmng_struct_per_port));
10181
10182    /*
10183     * Congestion management values depend on the link rate.
10184     * There is no active link so initial link rate is set to 10Gbps.
10185     * When the link comes up the congestion management values are
10186     * re-calculated according to the actual link rate.
10187     */
10188    sc->link_vars.line_speed = SPEED_10000;
10189    bxe_cmng_fns_init(sc, TRUE, bxe_get_cmng_fns_mode(sc));
10190
10191    /* Only the PMF sets the HW */
10192    if (sc->port.pmf) {
10193        storm_memset_cmng(sc, &sc->cmng, SC_PORT(sc));
10194    }
10195
10196    /* init Event Queue - PCI bus guarantees correct endainity */
10197    eq_data.base_addr.hi = U64_HI(sc->eq_dma.paddr);
10198    eq_data.base_addr.lo = U64_LO(sc->eq_dma.paddr);
10199    eq_data.producer     = sc->eq_prod;
10200    eq_data.index_id     = HC_SP_INDEX_EQ_CONS;
10201    eq_data.sb_id        = DEF_SB_ID;
10202    storm_memset_eq_data(sc, &eq_data, SC_FUNC(sc));
10203}
10204
10205static void
10206bxe_hc_int_enable(struct bxe_softc *sc)
10207{
10208    int port = SC_PORT(sc);
10209    uint32_t addr = (port) ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
10210    uint32_t val = REG_RD(sc, addr);
10211    uint8_t msix = (sc->interrupt_mode == INTR_MODE_MSIX) ? TRUE : FALSE;
10212    uint8_t single_msix = ((sc->interrupt_mode == INTR_MODE_MSIX) &&
10213                           (sc->intr_count == 1)) ? TRUE : FALSE;
10214    uint8_t msi = (sc->interrupt_mode == INTR_MODE_MSI) ? TRUE : FALSE;
10215
10216    if (msix) {
10217        val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
10218                 HC_CONFIG_0_REG_INT_LINE_EN_0);
10219        val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
10220                HC_CONFIG_0_REG_ATTN_BIT_EN_0);
10221        if (single_msix) {
10222            val |= HC_CONFIG_0_REG_SINGLE_ISR_EN_0;
10223        }
10224    } else if (msi) {
10225        val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0;
10226        val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
10227                HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
10228                HC_CONFIG_0_REG_ATTN_BIT_EN_0);
10229    } else {
10230        val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
10231                HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
10232                HC_CONFIG_0_REG_INT_LINE_EN_0 |
10233                HC_CONFIG_0_REG_ATTN_BIT_EN_0);
10234
10235        if (!CHIP_IS_E1(sc)) {
10236            BLOGD(sc, DBG_INTR, "write %x to HC %d (addr 0x%x)\n",
10237                  val, port, addr);
10238
10239            REG_WR(sc, addr, val);
10240
10241            val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
10242        }
10243    }
10244
10245    if (CHIP_IS_E1(sc)) {
10246        REG_WR(sc, (HC_REG_INT_MASK + port*4), 0x1FFFF);
10247    }
10248
10249    BLOGD(sc, DBG_INTR, "write %x to HC %d (addr 0x%x) mode %s\n",
10250          val, port, addr, ((msix) ? "MSI-X" : ((msi) ? "MSI" : "INTx")));
10251
10252    REG_WR(sc, addr, val);
10253
10254    /* ensure that HC_CONFIG is written before leading/trailing edge config */
10255    mb();
10256
10257    if (!CHIP_IS_E1(sc)) {
10258        /* init leading/trailing edge */
10259        if (IS_MF(sc)) {
10260            val = (0xee0f | (1 << (SC_VN(sc) + 4)));
10261            if (sc->port.pmf) {
10262                /* enable nig and gpio3 attention */
10263                val |= 0x1100;
10264            }
10265        } else {
10266            val = 0xffff;
10267        }
10268
10269        REG_WR(sc, (HC_REG_TRAILING_EDGE_0 + port*8), val);
10270        REG_WR(sc, (HC_REG_LEADING_EDGE_0 + port*8), val);
10271    }
10272
10273    /* make sure that interrupts are indeed enabled from here on */
10274    mb();
10275}
10276
10277static void
10278bxe_igu_int_enable(struct bxe_softc *sc)
10279{
10280    uint32_t val;
10281    uint8_t msix = (sc->interrupt_mode == INTR_MODE_MSIX) ? TRUE : FALSE;
10282    uint8_t single_msix = ((sc->interrupt_mode == INTR_MODE_MSIX) &&
10283                           (sc->intr_count == 1)) ? TRUE : FALSE;
10284    uint8_t msi = (sc->interrupt_mode == INTR_MODE_MSI) ? TRUE : FALSE;
10285
10286    val = REG_RD(sc, IGU_REG_PF_CONFIGURATION);
10287
10288    if (msix) {
10289        val &= ~(IGU_PF_CONF_INT_LINE_EN |
10290                 IGU_PF_CONF_SINGLE_ISR_EN);
10291        val |= (IGU_PF_CONF_MSI_MSIX_EN |
10292                IGU_PF_CONF_ATTN_BIT_EN);
10293        if (single_msix) {
10294            val |= IGU_PF_CONF_SINGLE_ISR_EN;
10295        }
10296    } else if (msi) {
10297        val &= ~IGU_PF_CONF_INT_LINE_EN;
10298        val |= (IGU_PF_CONF_MSI_MSIX_EN |
10299                IGU_PF_CONF_ATTN_BIT_EN |
10300                IGU_PF_CONF_SINGLE_ISR_EN);
10301    } else {
10302        val &= ~IGU_PF_CONF_MSI_MSIX_EN;
10303        val |= (IGU_PF_CONF_INT_LINE_EN |
10304                IGU_PF_CONF_ATTN_BIT_EN |
10305                IGU_PF_CONF_SINGLE_ISR_EN);
10306    }
10307
10308    /* clean previous status - need to configure igu prior to ack*/
10309    if ((!msix) || single_msix) {
10310        REG_WR(sc, IGU_REG_PF_CONFIGURATION, val);
10311        bxe_ack_int(sc);
10312    }
10313
10314    val |= IGU_PF_CONF_FUNC_EN;
10315
10316    BLOGD(sc, DBG_INTR, "write 0x%x to IGU mode %s\n",
10317          val, ((msix) ? "MSI-X" : ((msi) ? "MSI" : "INTx")));
10318
10319    REG_WR(sc, IGU_REG_PF_CONFIGURATION, val);
10320
10321    mb();
10322
10323    /* init leading/trailing edge */
10324    if (IS_MF(sc)) {
10325        val = (0xee0f | (1 << (SC_VN(sc) + 4)));
10326        if (sc->port.pmf) {
10327            /* enable nig and gpio3 attention */
10328            val |= 0x1100;
10329        }
10330    } else {
10331        val = 0xffff;
10332    }
10333
10334    REG_WR(sc, IGU_REG_TRAILING_EDGE_LATCH, val);
10335    REG_WR(sc, IGU_REG_LEADING_EDGE_LATCH, val);
10336
10337    /* make sure that interrupts are indeed enabled from here on */
10338    mb();
10339}
10340
10341static void
10342bxe_int_enable(struct bxe_softc *sc)
10343{
10344    if (sc->devinfo.int_block == INT_BLOCK_HC) {
10345        bxe_hc_int_enable(sc);
10346    } else {
10347        bxe_igu_int_enable(sc);
10348    }
10349}
10350
10351static void
10352bxe_hc_int_disable(struct bxe_softc *sc)
10353{
10354    int port = SC_PORT(sc);
10355    uint32_t addr = (port) ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
10356    uint32_t val = REG_RD(sc, addr);
10357
10358    /*
10359     * In E1 we must use only PCI configuration space to disable MSI/MSIX
10360     * capablility. It's forbidden to disable IGU_PF_CONF_MSI_MSIX_EN in HC
10361     * block
10362     */
10363    if (CHIP_IS_E1(sc)) {
10364        /*
10365         * Since IGU_PF_CONF_MSI_MSIX_EN still always on use mask register
10366         * to prevent from HC sending interrupts after we exit the function
10367         */
10368        REG_WR(sc, (HC_REG_INT_MASK + port*4), 0);
10369
10370        val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
10371                 HC_CONFIG_0_REG_INT_LINE_EN_0 |
10372                 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
10373    } else {
10374        val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
10375                 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
10376                 HC_CONFIG_0_REG_INT_LINE_EN_0 |
10377                 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
10378    }
10379
10380    BLOGD(sc, DBG_INTR, "write %x to HC %d (addr 0x%x)\n", val, port, addr);
10381
10382    /* flush all outstanding writes */
10383    mb();
10384
10385    REG_WR(sc, addr, val);
10386    if (REG_RD(sc, addr) != val) {
10387        BLOGE(sc, "proper val not read from HC IGU!\n");
10388    }
10389}
10390
10391static void
10392bxe_igu_int_disable(struct bxe_softc *sc)
10393{
10394    uint32_t val = REG_RD(sc, IGU_REG_PF_CONFIGURATION);
10395
10396    val &= ~(IGU_PF_CONF_MSI_MSIX_EN |
10397             IGU_PF_CONF_INT_LINE_EN |
10398             IGU_PF_CONF_ATTN_BIT_EN);
10399
10400    BLOGD(sc, DBG_INTR, "write %x to IGU\n", val);
10401
10402    /* flush all outstanding writes */
10403    mb();
10404
10405    REG_WR(sc, IGU_REG_PF_CONFIGURATION, val);
10406    if (REG_RD(sc, IGU_REG_PF_CONFIGURATION) != val) {
10407        BLOGE(sc, "proper val not read from IGU!\n");
10408    }
10409}
10410
10411static void
10412bxe_int_disable(struct bxe_softc *sc)
10413{
10414    if (sc->devinfo.int_block == INT_BLOCK_HC) {
10415        bxe_hc_int_disable(sc);
10416    } else {
10417        bxe_igu_int_disable(sc);
10418    }
10419}
10420
10421static void
10422bxe_nic_init(struct bxe_softc *sc,
10423             int              load_code)
10424{
10425    int i;
10426
10427    for (i = 0; i < sc->num_queues; i++) {
10428        bxe_init_eth_fp(sc, i);
10429    }
10430
10431    rmb(); /* ensure status block indices were read */
10432
10433    bxe_init_rx_rings(sc);
10434    bxe_init_tx_rings(sc);
10435
10436    if (IS_VF(sc)) {
10437        return;
10438    }
10439
10440    /* initialize MOD_ABS interrupts */
10441    elink_init_mod_abs_int(sc, &sc->link_vars,
10442                           sc->devinfo.chip_id,
10443                           sc->devinfo.shmem_base,
10444                           sc->devinfo.shmem2_base,
10445                           SC_PORT(sc));
10446
10447    bxe_init_def_sb(sc);
10448    bxe_update_dsb_idx(sc);
10449    bxe_init_sp_ring(sc);
10450    bxe_init_eq_ring(sc);
10451    bxe_init_internal(sc, load_code);
10452    bxe_pf_init(sc);
10453    bxe_stats_init(sc);
10454
10455    /* flush all before enabling interrupts */
10456    mb();
10457
10458    bxe_int_enable(sc);
10459
10460    /* check for SPIO5 */
10461    bxe_attn_int_deasserted0(sc,
10462                             REG_RD(sc,
10463                                    (MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 +
10464                                     SC_PORT(sc)*4)) &
10465                             AEU_INPUTS_ATTN_BITS_SPIO5);
10466}
10467
10468static inline void
10469bxe_init_objs(struct bxe_softc *sc)
10470{
10471    /* mcast rules must be added to tx if tx switching is enabled */
10472    ecore_obj_type o_type =
10473        (sc->flags & BXE_TX_SWITCHING) ? ECORE_OBJ_TYPE_RX_TX :
10474                                         ECORE_OBJ_TYPE_RX;
10475
10476    /* RX_MODE controlling object */
10477    ecore_init_rx_mode_obj(sc, &sc->rx_mode_obj);
10478
10479    /* multicast configuration controlling object */
10480    ecore_init_mcast_obj(sc,
10481                         &sc->mcast_obj,
10482                         sc->fp[0].cl_id,
10483                         sc->fp[0].index,
10484                         SC_FUNC(sc),
10485                         SC_FUNC(sc),
10486                         BXE_SP(sc, mcast_rdata),
10487                         BXE_SP_MAPPING(sc, mcast_rdata),
10488                         ECORE_FILTER_MCAST_PENDING,
10489                         &sc->sp_state,
10490                         o_type);
10491
10492    /* Setup CAM credit pools */
10493    ecore_init_mac_credit_pool(sc,
10494                               &sc->macs_pool,
10495                               SC_FUNC(sc),
10496                               CHIP_IS_E1x(sc) ? VNICS_PER_PORT(sc) :
10497                                                 VNICS_PER_PATH(sc));
10498
10499    ecore_init_vlan_credit_pool(sc,
10500                                &sc->vlans_pool,
10501                                SC_ABS_FUNC(sc) >> 1,
10502                                CHIP_IS_E1x(sc) ? VNICS_PER_PORT(sc) :
10503                                                  VNICS_PER_PATH(sc));
10504
10505    /* RSS configuration object */
10506    ecore_init_rss_config_obj(sc,
10507                              &sc->rss_conf_obj,
10508                              sc->fp[0].cl_id,
10509                              sc->fp[0].index,
10510                              SC_FUNC(sc),
10511                              SC_FUNC(sc),
10512                              BXE_SP(sc, rss_rdata),
10513                              BXE_SP_MAPPING(sc, rss_rdata),
10514                              ECORE_FILTER_RSS_CONF_PENDING,
10515                              &sc->sp_state, ECORE_OBJ_TYPE_RX);
10516}
10517
10518/*
10519 * Initialize the function. This must be called before sending CLIENT_SETUP
10520 * for the first client.
10521 */
10522static inline int
10523bxe_func_start(struct bxe_softc *sc)
10524{
10525    struct ecore_func_state_params func_params = { NULL };
10526    struct ecore_func_start_params *start_params = &func_params.params.start;
10527
10528    /* Prepare parameters for function state transitions */
10529    bit_set(&func_params.ramrod_flags, RAMROD_COMP_WAIT);
10530
10531    func_params.f_obj = &sc->func_obj;
10532    func_params.cmd = ECORE_F_CMD_START;
10533
10534    /* Function parameters */
10535    start_params->mf_mode     = sc->devinfo.mf_info.mf_mode;
10536    start_params->sd_vlan_tag = OVLAN(sc);
10537
10538    if (CHIP_IS_E2(sc) || CHIP_IS_E3(sc)) {
10539        start_params->network_cos_mode = STATIC_COS;
10540    } else { /* CHIP_IS_E1X */
10541        start_params->network_cos_mode = FW_WRR;
10542    }
10543
10544    //start_params->gre_tunnel_mode = 0;
10545    //start_params->gre_tunnel_rss  = 0;
10546
10547    return (ecore_func_state_change(sc, &func_params));
10548}
10549
10550static int
10551bxe_set_power_state(struct bxe_softc *sc,
10552                    uint8_t          state)
10553{
10554    uint16_t pmcsr;
10555
10556    /* If there is no power capability, silently succeed */
10557    if (!(sc->devinfo.pcie_cap_flags & BXE_PM_CAPABLE_FLAG)) {
10558        BLOGW(sc, "No power capability\n");
10559        return (0);
10560    }
10561
10562    pmcsr = pci_read_config(sc->dev,
10563                            (sc->devinfo.pcie_pm_cap_reg + PCIR_POWER_STATUS),
10564                            2);
10565
10566    switch (state) {
10567    case PCI_PM_D0:
10568        pci_write_config(sc->dev,
10569                         (sc->devinfo.pcie_pm_cap_reg + PCIR_POWER_STATUS),
10570                         ((pmcsr & ~PCIM_PSTAT_DMASK) | PCIM_PSTAT_PME), 2);
10571
10572        if (pmcsr & PCIM_PSTAT_DMASK) {
10573            /* delay required during transition out of D3hot */
10574            DELAY(20000);
10575        }
10576
10577        break;
10578
10579    case PCI_PM_D3hot:
10580        /* XXX if there are other clients above don't shut down the power */
10581
10582        /* don't shut down the power for emulation and FPGA */
10583        if (CHIP_REV_IS_SLOW(sc)) {
10584            return (0);
10585        }
10586
10587        pmcsr &= ~PCIM_PSTAT_DMASK;
10588        pmcsr |= PCIM_PSTAT_D3;
10589
10590        if (sc->wol) {
10591            pmcsr |= PCIM_PSTAT_PMEENABLE;
10592        }
10593
10594        pci_write_config(sc->dev,
10595                         (sc->devinfo.pcie_pm_cap_reg + PCIR_POWER_STATUS),
10596                         pmcsr, 4);
10597
10598        /*
10599         * No more memory access after this point until device is brought back
10600         * to D0 state.
10601         */
10602        break;
10603
10604    default:
10605        BLOGE(sc, "Can't support PCI power state = 0x%x pmcsr 0x%x\n",
10606            state, pmcsr);
10607        return (-1);
10608    }
10609
10610    return (0);
10611}
10612
10613
10614/* return true if succeeded to acquire the lock */
10615static uint8_t
10616bxe_trylock_hw_lock(struct bxe_softc *sc,
10617                    uint32_t         resource)
10618{
10619    uint32_t lock_status;
10620    uint32_t resource_bit = (1 << resource);
10621    int func = SC_FUNC(sc);
10622    uint32_t hw_lock_control_reg;
10623
10624    BLOGD(sc, DBG_LOAD, "Trying to take a resource lock 0x%x\n", resource);
10625
10626    /* Validating that the resource is within range */
10627    if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
10628        BLOGD(sc, DBG_LOAD,
10629              "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
10630              resource, HW_LOCK_MAX_RESOURCE_VALUE);
10631        return (FALSE);
10632    }
10633
10634    if (func <= 5) {
10635        hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
10636    } else {
10637        hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
10638    }
10639
10640    /* try to acquire the lock */
10641    REG_WR(sc, hw_lock_control_reg + 4, resource_bit);
10642    lock_status = REG_RD(sc, hw_lock_control_reg);
10643    if (lock_status & resource_bit) {
10644        return (TRUE);
10645    }
10646
10647    BLOGE(sc, "Failed to get a resource lock 0x%x func %d "
10648        "lock_status 0x%x resource_bit 0x%x\n", resource, func,
10649        lock_status, resource_bit);
10650
10651    return (FALSE);
10652}
10653
10654/*
10655 * Get the recovery leader resource id according to the engine this function
10656 * belongs to. Currently only only 2 engines is supported.
10657 */
10658static int
10659bxe_get_leader_lock_resource(struct bxe_softc *sc)
10660{
10661    if (SC_PATH(sc)) {
10662        return (HW_LOCK_RESOURCE_RECOVERY_LEADER_1);
10663    } else {
10664        return (HW_LOCK_RESOURCE_RECOVERY_LEADER_0);
10665    }
10666}
10667
10668/* try to acquire a leader lock for current engine */
10669static uint8_t
10670bxe_trylock_leader_lock(struct bxe_softc *sc)
10671{
10672    return (bxe_trylock_hw_lock(sc, bxe_get_leader_lock_resource(sc)));
10673}
10674
10675static int
10676bxe_release_leader_lock(struct bxe_softc *sc)
10677{
10678    return (bxe_release_hw_lock(sc, bxe_get_leader_lock_resource(sc)));
10679}
10680
10681/* close gates #2, #3 and #4 */
10682static void
10683bxe_set_234_gates(struct bxe_softc *sc,
10684                  uint8_t          close)
10685{
10686    uint32_t val;
10687
10688    /* gates #2 and #4a are closed/opened for "not E1" only */
10689    if (!CHIP_IS_E1(sc)) {
10690        /* #4 */
10691        REG_WR(sc, PXP_REG_HST_DISCARD_DOORBELLS, !!close);
10692        /* #2 */
10693        REG_WR(sc, PXP_REG_HST_DISCARD_INTERNAL_WRITES, !!close);
10694    }
10695
10696    /* #3 */
10697    if (CHIP_IS_E1x(sc)) {
10698        /* prevent interrupts from HC on both ports */
10699        val = REG_RD(sc, HC_REG_CONFIG_1);
10700        REG_WR(sc, HC_REG_CONFIG_1,
10701               (!close) ? (val | HC_CONFIG_1_REG_BLOCK_DISABLE_1) :
10702               (val & ~(uint32_t)HC_CONFIG_1_REG_BLOCK_DISABLE_1));
10703
10704        val = REG_RD(sc, HC_REG_CONFIG_0);
10705        REG_WR(sc, HC_REG_CONFIG_0,
10706               (!close) ? (val | HC_CONFIG_0_REG_BLOCK_DISABLE_0) :
10707               (val & ~(uint32_t)HC_CONFIG_0_REG_BLOCK_DISABLE_0));
10708    } else {
10709        /* Prevent incoming interrupts in IGU */
10710        val = REG_RD(sc, IGU_REG_BLOCK_CONFIGURATION);
10711
10712        REG_WR(sc, IGU_REG_BLOCK_CONFIGURATION,
10713               (!close) ?
10714               (val | IGU_BLOCK_CONFIGURATION_REG_BLOCK_ENABLE) :
10715               (val & ~(uint32_t)IGU_BLOCK_CONFIGURATION_REG_BLOCK_ENABLE));
10716    }
10717
10718    BLOGD(sc, DBG_LOAD, "%s gates #2, #3 and #4\n",
10719          close ? "closing" : "opening");
10720
10721    wmb();
10722}
10723
10724/* poll for pending writes bit, it should get cleared in no more than 1s */
10725static int
10726bxe_er_poll_igu_vq(struct bxe_softc *sc)
10727{
10728    uint32_t cnt = 1000;
10729    uint32_t pend_bits = 0;
10730
10731    do {
10732        pend_bits = REG_RD(sc, IGU_REG_PENDING_BITS_STATUS);
10733
10734        if (pend_bits == 0) {
10735            break;
10736        }
10737
10738        DELAY(1000);
10739    } while (--cnt > 0);
10740
10741    if (cnt == 0) {
10742        BLOGE(sc, "Still pending IGU requests bits=0x%08x!\n", pend_bits);
10743        return (-1);
10744    }
10745
10746    return (0);
10747}
10748
10749#define SHARED_MF_CLP_MAGIC  0x80000000 /* 'magic' bit */
10750
10751static void
10752bxe_clp_reset_prep(struct bxe_softc *sc,
10753                   uint32_t         *magic_val)
10754{
10755    /* Do some magic... */
10756    uint32_t val = MFCFG_RD(sc, shared_mf_config.clp_mb);
10757    *magic_val = val & SHARED_MF_CLP_MAGIC;
10758    MFCFG_WR(sc, shared_mf_config.clp_mb, val | SHARED_MF_CLP_MAGIC);
10759}
10760
10761/* restore the value of the 'magic' bit */
10762static void
10763bxe_clp_reset_done(struct bxe_softc *sc,
10764                   uint32_t         magic_val)
10765{
10766    /* Restore the 'magic' bit value... */
10767    uint32_t val = MFCFG_RD(sc, shared_mf_config.clp_mb);
10768    MFCFG_WR(sc, shared_mf_config.clp_mb,
10769              (val & (~SHARED_MF_CLP_MAGIC)) | magic_val);
10770}
10771
10772/* prepare for MCP reset, takes care of CLP configurations */
10773static void
10774bxe_reset_mcp_prep(struct bxe_softc *sc,
10775                   uint32_t         *magic_val)
10776{
10777    uint32_t shmem;
10778    uint32_t validity_offset;
10779
10780    /* set `magic' bit in order to save MF config */
10781    if (!CHIP_IS_E1(sc)) {
10782        bxe_clp_reset_prep(sc, magic_val);
10783    }
10784
10785    /* get shmem offset */
10786    shmem = REG_RD(sc, MISC_REG_SHARED_MEM_ADDR);
10787    validity_offset =
10788        offsetof(struct shmem_region, validity_map[SC_PORT(sc)]);
10789
10790    /* Clear validity map flags */
10791    if (shmem > 0) {
10792        REG_WR(sc, shmem + validity_offset, 0);
10793    }
10794}
10795
10796#define MCP_TIMEOUT      5000   /* 5 seconds (in ms) */
10797#define MCP_ONE_TIMEOUT  100    /* 100 ms */
10798
10799static void
10800bxe_mcp_wait_one(struct bxe_softc *sc)
10801{
10802    /* special handling for emulation and FPGA (10 times longer) */
10803    if (CHIP_REV_IS_SLOW(sc)) {
10804        DELAY((MCP_ONE_TIMEOUT*10) * 1000);
10805    } else {
10806        DELAY((MCP_ONE_TIMEOUT) * 1000);
10807    }
10808}
10809
10810/* initialize shmem_base and waits for validity signature to appear */
10811static int
10812bxe_init_shmem(struct bxe_softc *sc)
10813{
10814    int cnt = 0;
10815    uint32_t val = 0;
10816
10817    do {
10818        sc->devinfo.shmem_base     =
10819        sc->link_params.shmem_base =
10820            REG_RD(sc, MISC_REG_SHARED_MEM_ADDR);
10821
10822        if (sc->devinfo.shmem_base) {
10823            val = SHMEM_RD(sc, validity_map[SC_PORT(sc)]);
10824            if (val & SHR_MEM_VALIDITY_MB)
10825                return (0);
10826        }
10827
10828        bxe_mcp_wait_one(sc);
10829
10830    } while (cnt++ < (MCP_TIMEOUT / MCP_ONE_TIMEOUT));
10831
10832    BLOGE(sc, "BAD MCP validity signature\n");
10833
10834    return (-1);
10835}
10836
10837static int
10838bxe_reset_mcp_comp(struct bxe_softc *sc,
10839                   uint32_t         magic_val)
10840{
10841    int rc = bxe_init_shmem(sc);
10842
10843    /* Restore the `magic' bit value */
10844    if (!CHIP_IS_E1(sc)) {
10845        bxe_clp_reset_done(sc, magic_val);
10846    }
10847
10848    return (rc);
10849}
10850
10851static void
10852bxe_pxp_prep(struct bxe_softc *sc)
10853{
10854    if (!CHIP_IS_E1(sc)) {
10855        REG_WR(sc, PXP2_REG_RD_START_INIT, 0);
10856        REG_WR(sc, PXP2_REG_RQ_RBC_DONE, 0);
10857        wmb();
10858    }
10859}
10860
10861/*
10862 * Reset the whole chip except for:
10863 *      - PCIE core
10864 *      - PCI Glue, PSWHST, PXP/PXP2 RF (all controlled by one reset bit)
10865 *      - IGU
10866 *      - MISC (including AEU)
10867 *      - GRC
10868 *      - RBCN, RBCP
10869 */
10870static void
10871bxe_process_kill_chip_reset(struct bxe_softc *sc,
10872                            uint8_t          global)
10873{
10874    uint32_t not_reset_mask1, reset_mask1, not_reset_mask2, reset_mask2;
10875    uint32_t global_bits2, stay_reset2;
10876
10877    /*
10878     * Bits that have to be set in reset_mask2 if we want to reset 'global'
10879     * (per chip) blocks.
10880     */
10881    global_bits2 =
10882        MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_CMN_CPU |
10883        MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_CMN_CORE;
10884
10885    /*
10886     * Don't reset the following blocks.
10887     * Important: per port blocks (such as EMAC, BMAC, UMAC) can't be
10888     *            reset, as in 4 port device they might still be owned
10889     *            by the MCP (there is only one leader per path).
10890     */
10891    not_reset_mask1 =
10892        MISC_REGISTERS_RESET_REG_1_RST_HC |
10893        MISC_REGISTERS_RESET_REG_1_RST_PXPV |
10894        MISC_REGISTERS_RESET_REG_1_RST_PXP;
10895
10896    not_reset_mask2 =
10897        MISC_REGISTERS_RESET_REG_2_RST_PCI_MDIO |
10898        MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE |
10899        MISC_REGISTERS_RESET_REG_2_RST_EMAC1_HARD_CORE |
10900        MISC_REGISTERS_RESET_REG_2_RST_MISC_CORE |
10901        MISC_REGISTERS_RESET_REG_2_RST_RBCN |
10902        MISC_REGISTERS_RESET_REG_2_RST_GRC  |
10903        MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_REG_HARD_CORE |
10904        MISC_REGISTERS_RESET_REG_2_RST_MCP_N_HARD_CORE_RST_B |
10905        MISC_REGISTERS_RESET_REG_2_RST_ATC |
10906        MISC_REGISTERS_RESET_REG_2_PGLC |
10907        MISC_REGISTERS_RESET_REG_2_RST_BMAC0 |
10908        MISC_REGISTERS_RESET_REG_2_RST_BMAC1 |
10909        MISC_REGISTERS_RESET_REG_2_RST_EMAC0 |
10910        MISC_REGISTERS_RESET_REG_2_RST_EMAC1 |
10911        MISC_REGISTERS_RESET_REG_2_UMAC0 |
10912        MISC_REGISTERS_RESET_REG_2_UMAC1;
10913
10914    /*
10915     * Keep the following blocks in reset:
10916     *  - all xxMACs are handled by the elink code.
10917     */
10918    stay_reset2 =
10919        MISC_REGISTERS_RESET_REG_2_XMAC |
10920        MISC_REGISTERS_RESET_REG_2_XMAC_SOFT;
10921
10922    /* Full reset masks according to the chip */
10923    reset_mask1 = 0xffffffff;
10924
10925    if (CHIP_IS_E1(sc))
10926        reset_mask2 = 0xffff;
10927    else if (CHIP_IS_E1H(sc))
10928        reset_mask2 = 0x1ffff;
10929    else if (CHIP_IS_E2(sc))
10930        reset_mask2 = 0xfffff;
10931    else /* CHIP_IS_E3 */
10932        reset_mask2 = 0x3ffffff;
10933
10934    /* Don't reset global blocks unless we need to */
10935    if (!global)
10936        reset_mask2 &= ~global_bits2;
10937
10938    /*
10939     * In case of attention in the QM, we need to reset PXP
10940     * (MISC_REGISTERS_RESET_REG_2_RST_PXP_RQ_RD_WR) before QM
10941     * because otherwise QM reset would release 'close the gates' shortly
10942     * before resetting the PXP, then the PSWRQ would send a write
10943     * request to PGLUE. Then when PXP is reset, PGLUE would try to
10944     * read the payload data from PSWWR, but PSWWR would not
10945     * respond. The write queue in PGLUE would stuck, dmae commands
10946     * would not return. Therefore it's important to reset the second
10947     * reset register (containing the
10948     * MISC_REGISTERS_RESET_REG_2_RST_PXP_RQ_RD_WR bit) before the
10949     * first one (containing the MISC_REGISTERS_RESET_REG_1_RST_QM
10950     * bit).
10951     */
10952    REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
10953           reset_mask2 & (~not_reset_mask2));
10954
10955    REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
10956           reset_mask1 & (~not_reset_mask1));
10957
10958    mb();
10959    wmb();
10960
10961    REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET,
10962           reset_mask2 & (~stay_reset2));
10963
10964    mb();
10965    wmb();
10966
10967    REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, reset_mask1);
10968    wmb();
10969}
10970
10971static int
10972bxe_process_kill(struct bxe_softc *sc,
10973                 uint8_t          global)
10974{
10975    int cnt = 1000;
10976    uint32_t val = 0;
10977    uint32_t sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1, pgl_exp_rom2;
10978    uint32_t tags_63_32 = 0;
10979
10980    /* Empty the Tetris buffer, wait for 1s */
10981    do {
10982        sr_cnt  = REG_RD(sc, PXP2_REG_RD_SR_CNT);
10983        blk_cnt = REG_RD(sc, PXP2_REG_RD_BLK_CNT);
10984        port_is_idle_0 = REG_RD(sc, PXP2_REG_RD_PORT_IS_IDLE_0);
10985        port_is_idle_1 = REG_RD(sc, PXP2_REG_RD_PORT_IS_IDLE_1);
10986        pgl_exp_rom2 = REG_RD(sc, PXP2_REG_PGL_EXP_ROM2);
10987        if (CHIP_IS_E3(sc)) {
10988            tags_63_32 = REG_RD(sc, PGLUE_B_REG_TAGS_63_32);
10989        }
10990
10991        if ((sr_cnt == 0x7e) && (blk_cnt == 0xa0) &&
10992            ((port_is_idle_0 & 0x1) == 0x1) &&
10993            ((port_is_idle_1 & 0x1) == 0x1) &&
10994            (pgl_exp_rom2 == 0xffffffff) &&
10995            (!CHIP_IS_E3(sc) || (tags_63_32 == 0xffffffff)))
10996            break;
10997        DELAY(1000);
10998    } while (cnt-- > 0);
10999
11000    if (cnt <= 0) {
11001        BLOGE(sc, "ERROR: Tetris buffer didn't get empty or there "
11002                  "are still outstanding read requests after 1s! "
11003                  "sr_cnt=0x%08x, blk_cnt=0x%08x, port_is_idle_0=0x%08x, "
11004                  "port_is_idle_1=0x%08x, pgl_exp_rom2=0x%08x\n",
11005              sr_cnt, blk_cnt, port_is_idle_0,
11006              port_is_idle_1, pgl_exp_rom2);
11007        return (-1);
11008    }
11009
11010    mb();
11011
11012    /* Close gates #2, #3 and #4 */
11013    bxe_set_234_gates(sc, TRUE);
11014
11015    /* Poll for IGU VQs for 57712 and newer chips */
11016    if (!CHIP_IS_E1x(sc) && bxe_er_poll_igu_vq(sc)) {
11017        return (-1);
11018    }
11019
11020    /* XXX indicate that "process kill" is in progress to MCP */
11021
11022    /* clear "unprepared" bit */
11023    REG_WR(sc, MISC_REG_UNPREPARED, 0);
11024    mb();
11025
11026    /* Make sure all is written to the chip before the reset */
11027    wmb();
11028
11029    /*
11030     * Wait for 1ms to empty GLUE and PCI-E core queues,
11031     * PSWHST, GRC and PSWRD Tetris buffer.
11032     */
11033    DELAY(1000);
11034
11035    /* Prepare to chip reset: */
11036    /* MCP */
11037    if (global) {
11038        bxe_reset_mcp_prep(sc, &val);
11039    }
11040
11041    /* PXP */
11042    bxe_pxp_prep(sc);
11043    mb();
11044
11045    /* reset the chip */
11046    bxe_process_kill_chip_reset(sc, global);
11047    mb();
11048
11049    /* clear errors in PGB */
11050    if (!CHIP_IS_E1(sc))
11051        REG_WR(sc, PGLUE_B_REG_LATCHED_ERRORS_CLR, 0x7f);
11052
11053    /* Recover after reset: */
11054    /* MCP */
11055    if (global && bxe_reset_mcp_comp(sc, val)) {
11056        return (-1);
11057    }
11058
11059    /* XXX add resetting the NO_MCP mode DB here */
11060
11061    /* Open the gates #2, #3 and #4 */
11062    bxe_set_234_gates(sc, FALSE);
11063
11064    /* XXX
11065     * IGU/AEU preparation bring back the AEU/IGU to a reset state
11066     * re-enable attentions
11067     */
11068
11069    return (0);
11070}
11071
11072static int
11073bxe_leader_reset(struct bxe_softc *sc)
11074{
11075    int rc = 0;
11076    uint8_t global = bxe_reset_is_global(sc);
11077    uint32_t load_code;
11078
11079    /*
11080     * If not going to reset MCP, load "fake" driver to reset HW while
11081     * driver is owner of the HW.
11082     */
11083    if (!global && !BXE_NOMCP(sc)) {
11084        load_code = bxe_fw_command(sc, DRV_MSG_CODE_LOAD_REQ,
11085                                   DRV_MSG_CODE_LOAD_REQ_WITH_LFA);
11086        if (!load_code) {
11087            BLOGE(sc, "MCP response failure, aborting\n");
11088            rc = -1;
11089            goto exit_leader_reset;
11090        }
11091
11092        if ((load_code != FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) &&
11093            (load_code != FW_MSG_CODE_DRV_LOAD_COMMON)) {
11094            BLOGE(sc, "MCP unexpected response, aborting\n");
11095            rc = -1;
11096            goto exit_leader_reset2;
11097        }
11098
11099        load_code = bxe_fw_command(sc, DRV_MSG_CODE_LOAD_DONE, 0);
11100        if (!load_code) {
11101            BLOGE(sc, "MCP response failure, aborting\n");
11102            rc = -1;
11103            goto exit_leader_reset2;
11104        }
11105    }
11106
11107    /* try to recover after the failure */
11108    if (bxe_process_kill(sc, global)) {
11109        BLOGE(sc, "Something bad occurred on engine %d!\n", SC_PATH(sc));
11110        rc = -1;
11111        goto exit_leader_reset2;
11112    }
11113
11114    /*
11115     * Clear the RESET_IN_PROGRESS and RESET_GLOBAL bits and update the driver
11116     * state.
11117     */
11118    bxe_set_reset_done(sc);
11119    if (global) {
11120        bxe_clear_reset_global(sc);
11121    }
11122
11123exit_leader_reset2:
11124
11125    /* unload "fake driver" if it was loaded */
11126    if (!global && !BXE_NOMCP(sc)) {
11127        bxe_fw_command(sc, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0);
11128        bxe_fw_command(sc, DRV_MSG_CODE_UNLOAD_DONE, 0);
11129    }
11130
11131exit_leader_reset:
11132
11133    sc->is_leader = 0;
11134    bxe_release_leader_lock(sc);
11135
11136    mb();
11137    return (rc);
11138}
11139
11140/*
11141 * prepare INIT transition, parameters configured:
11142 *   - HC configuration
11143 *   - Queue's CDU context
11144 */
11145static void
11146bxe_pf_q_prep_init(struct bxe_softc               *sc,
11147                   struct bxe_fastpath            *fp,
11148                   struct ecore_queue_init_params *init_params)
11149{
11150    uint8_t cos;
11151    int cxt_index, cxt_offset;
11152
11153    bxe_set_bit(ECORE_Q_FLG_HC, &init_params->rx.flags);
11154    bxe_set_bit(ECORE_Q_FLG_HC, &init_params->tx.flags);
11155
11156    bxe_set_bit(ECORE_Q_FLG_HC_EN, &init_params->rx.flags);
11157    bxe_set_bit(ECORE_Q_FLG_HC_EN, &init_params->tx.flags);
11158
11159    /* HC rate */
11160    init_params->rx.hc_rate =
11161        sc->hc_rx_ticks ? (1000000 / sc->hc_rx_ticks) : 0;
11162    init_params->tx.hc_rate =
11163        sc->hc_tx_ticks ? (1000000 / sc->hc_tx_ticks) : 0;
11164
11165    /* FW SB ID */
11166    init_params->rx.fw_sb_id = init_params->tx.fw_sb_id = fp->fw_sb_id;
11167
11168    /* CQ index among the SB indices */
11169    init_params->rx.sb_cq_index = HC_INDEX_ETH_RX_CQ_CONS;
11170    init_params->tx.sb_cq_index = HC_INDEX_ETH_FIRST_TX_CQ_CONS;
11171
11172    /* set maximum number of COSs supported by this queue */
11173    init_params->max_cos = sc->max_cos;
11174
11175    BLOGD(sc, DBG_LOAD, "fp %d setting queue params max cos to %d\n",
11176          fp->index, init_params->max_cos);
11177
11178    /* set the context pointers queue object */
11179    for (cos = FIRST_TX_COS_INDEX; cos < init_params->max_cos; cos++) {
11180        /* XXX change index/cid here if ever support multiple tx CoS */
11181        /* fp->txdata[cos]->cid */
11182        cxt_index = fp->index / ILT_PAGE_CIDS;
11183        cxt_offset = fp->index - (cxt_index * ILT_PAGE_CIDS);
11184        init_params->cxts[cos] = &sc->context[cxt_index].vcxt[cxt_offset].eth;
11185    }
11186}
11187
11188/* set flags that are common for the Tx-only and not normal connections */
11189static unsigned long
11190bxe_get_common_flags(struct bxe_softc    *sc,
11191                     struct bxe_fastpath *fp,
11192                     uint8_t             zero_stats)
11193{
11194    unsigned long flags = 0;
11195
11196    /* PF driver will always initialize the Queue to an ACTIVE state */
11197    bxe_set_bit(ECORE_Q_FLG_ACTIVE, &flags);
11198
11199    /*
11200     * tx only connections collect statistics (on the same index as the
11201     * parent connection). The statistics are zeroed when the parent
11202     * connection is initialized.
11203     */
11204
11205    bxe_set_bit(ECORE_Q_FLG_STATS, &flags);
11206    if (zero_stats) {
11207        bxe_set_bit(ECORE_Q_FLG_ZERO_STATS, &flags);
11208    }
11209
11210    /*
11211     * tx only connections can support tx-switching, though their
11212     * CoS-ness doesn't survive the loopback
11213     */
11214    if (sc->flags & BXE_TX_SWITCHING) {
11215        bxe_set_bit(ECORE_Q_FLG_TX_SWITCH, &flags);
11216    }
11217
11218    bxe_set_bit(ECORE_Q_FLG_PCSUM_ON_PKT, &flags);
11219
11220    return (flags);
11221}
11222
11223static unsigned long
11224bxe_get_q_flags(struct bxe_softc    *sc,
11225                struct bxe_fastpath *fp,
11226                uint8_t             leading)
11227{
11228    unsigned long flags = 0;
11229
11230    if (IS_MF_SD(sc)) {
11231        bxe_set_bit(ECORE_Q_FLG_OV, &flags);
11232    }
11233
11234    if (if_getcapenable(sc->ifp) & IFCAP_LRO) {
11235        bxe_set_bit(ECORE_Q_FLG_TPA, &flags);
11236#if __FreeBSD_version >= 800000
11237        bxe_set_bit(ECORE_Q_FLG_TPA_IPV6, &flags);
11238#endif
11239    }
11240
11241    if (leading) {
11242        bxe_set_bit(ECORE_Q_FLG_LEADING_RSS, &flags);
11243        bxe_set_bit(ECORE_Q_FLG_MCAST, &flags);
11244    }
11245
11246    bxe_set_bit(ECORE_Q_FLG_VLAN, &flags);
11247
11248    /* merge with common flags */
11249    return (flags | bxe_get_common_flags(sc, fp, TRUE));
11250}
11251
11252static void
11253bxe_pf_q_prep_general(struct bxe_softc                  *sc,
11254                      struct bxe_fastpath               *fp,
11255                      struct ecore_general_setup_params *gen_init,
11256                      uint8_t                           cos)
11257{
11258    gen_init->stat_id = bxe_stats_id(fp);
11259    gen_init->spcl_id = fp->cl_id;
11260    gen_init->mtu = sc->mtu;
11261    gen_init->cos = cos;
11262}
11263
11264static void
11265bxe_pf_rx_q_prep(struct bxe_softc              *sc,
11266                 struct bxe_fastpath           *fp,
11267                 struct rxq_pause_params       *pause,
11268                 struct ecore_rxq_setup_params *rxq_init)
11269{
11270    uint8_t max_sge = 0;
11271    uint16_t sge_sz = 0;
11272    uint16_t tpa_agg_size = 0;
11273
11274    pause->sge_th_lo = SGE_TH_LO(sc);
11275    pause->sge_th_hi = SGE_TH_HI(sc);
11276
11277    /* validate SGE ring has enough to cross high threshold */
11278    if (sc->dropless_fc &&
11279            (pause->sge_th_hi + FW_PREFETCH_CNT) >
11280            (RX_SGE_USABLE_PER_PAGE * RX_SGE_NUM_PAGES)) {
11281        BLOGW(sc, "sge ring threshold limit\n");
11282    }
11283
11284    /* minimum max_aggregation_size is 2*MTU (two full buffers) */
11285    tpa_agg_size = (2 * sc->mtu);
11286    if (tpa_agg_size < sc->max_aggregation_size) {
11287        tpa_agg_size = sc->max_aggregation_size;
11288    }
11289
11290    max_sge = SGE_PAGE_ALIGN(sc->mtu) >> SGE_PAGE_SHIFT;
11291    max_sge = ((max_sge + PAGES_PER_SGE - 1) &
11292                   (~(PAGES_PER_SGE - 1))) >> PAGES_PER_SGE_SHIFT;
11293    sge_sz = (uint16_t)min(SGE_PAGES, 0xffff);
11294
11295    /* pause - not for e1 */
11296    if (!CHIP_IS_E1(sc)) {
11297        pause->bd_th_lo = BD_TH_LO(sc);
11298        pause->bd_th_hi = BD_TH_HI(sc);
11299
11300        pause->rcq_th_lo = RCQ_TH_LO(sc);
11301        pause->rcq_th_hi = RCQ_TH_HI(sc);
11302
11303        /* validate rings have enough entries to cross high thresholds */
11304        if (sc->dropless_fc &&
11305            pause->bd_th_hi + FW_PREFETCH_CNT >
11306            sc->rx_ring_size) {
11307            BLOGW(sc, "rx bd ring threshold limit\n");
11308        }
11309
11310        if (sc->dropless_fc &&
11311            pause->rcq_th_hi + FW_PREFETCH_CNT >
11312            RCQ_NUM_PAGES * RCQ_USABLE_PER_PAGE) {
11313            BLOGW(sc, "rcq ring threshold limit\n");
11314        }
11315
11316        pause->pri_map = 1;
11317    }
11318
11319    /* rxq setup */
11320    rxq_init->dscr_map   = fp->rx_dma.paddr;
11321    rxq_init->sge_map    = fp->rx_sge_dma.paddr;
11322    rxq_init->rcq_map    = fp->rcq_dma.paddr;
11323    rxq_init->rcq_np_map = (fp->rcq_dma.paddr + BCM_PAGE_SIZE);
11324
11325    /*
11326     * This should be a maximum number of data bytes that may be
11327     * placed on the BD (not including paddings).
11328     */
11329    rxq_init->buf_sz = (fp->rx_buf_size -
11330                        IP_HEADER_ALIGNMENT_PADDING);
11331
11332    rxq_init->cl_qzone_id     = fp->cl_qzone_id;
11333    rxq_init->tpa_agg_sz      = tpa_agg_size;
11334    rxq_init->sge_buf_sz      = sge_sz;
11335    rxq_init->max_sges_pkt    = max_sge;
11336    rxq_init->rss_engine_id   = SC_FUNC(sc);
11337    rxq_init->mcast_engine_id = SC_FUNC(sc);
11338
11339    /*
11340     * Maximum number or simultaneous TPA aggregation for this Queue.
11341     * For PF Clients it should be the maximum available number.
11342     * VF driver(s) may want to define it to a smaller value.
11343     */
11344    rxq_init->max_tpa_queues = MAX_AGG_QS(sc);
11345
11346    rxq_init->cache_line_log = BXE_RX_ALIGN_SHIFT;
11347    rxq_init->fw_sb_id = fp->fw_sb_id;
11348
11349    rxq_init->sb_cq_index = HC_INDEX_ETH_RX_CQ_CONS;
11350
11351    /*
11352     * configure silent vlan removal
11353     * if multi function mode is afex, then mask default vlan
11354     */
11355    if (IS_MF_AFEX(sc)) {
11356        rxq_init->silent_removal_value =
11357            sc->devinfo.mf_info.afex_def_vlan_tag;
11358        rxq_init->silent_removal_mask = EVL_VLID_MASK;
11359    }
11360}
11361
11362static void
11363bxe_pf_tx_q_prep(struct bxe_softc              *sc,
11364                 struct bxe_fastpath           *fp,
11365                 struct ecore_txq_setup_params *txq_init,
11366                 uint8_t                       cos)
11367{
11368    /*
11369     * XXX If multiple CoS is ever supported then each fastpath structure
11370     * will need to maintain tx producer/consumer/dma/etc values *per* CoS.
11371     * fp->txdata[cos]->tx_dma.paddr;
11372     */
11373    txq_init->dscr_map     = fp->tx_dma.paddr;
11374    txq_init->sb_cq_index  = HC_INDEX_ETH_FIRST_TX_CQ_CONS + cos;
11375    txq_init->traffic_type = LLFC_TRAFFIC_TYPE_NW;
11376    txq_init->fw_sb_id     = fp->fw_sb_id;
11377
11378    /*
11379     * set the TSS leading client id for TX classfication to the
11380     * leading RSS client id
11381     */
11382    txq_init->tss_leading_cl_id = BXE_FP(sc, 0, cl_id);
11383}
11384
11385/*
11386 * This function performs 2 steps in a queue state machine:
11387 *   1) RESET->INIT
11388 *   2) INIT->SETUP
11389 */
11390static int
11391bxe_setup_queue(struct bxe_softc    *sc,
11392                struct bxe_fastpath *fp,
11393                uint8_t             leading)
11394{
11395    struct ecore_queue_state_params q_params = { NULL };
11396    struct ecore_queue_setup_params *setup_params =
11397                        &q_params.params.setup;
11398    int rc;
11399
11400    BLOGD(sc, DBG_LOAD, "setting up queue %d\n", fp->index);
11401
11402    bxe_ack_sb(sc, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_ENABLE, 0);
11403
11404    q_params.q_obj = &BXE_SP_OBJ(sc, fp).q_obj;
11405
11406    /* we want to wait for completion in this context */
11407    bxe_set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags);
11408
11409    /* prepare the INIT parameters */
11410    bxe_pf_q_prep_init(sc, fp, &q_params.params.init);
11411
11412    /* Set the command */
11413    q_params.cmd = ECORE_Q_CMD_INIT;
11414
11415    /* Change the state to INIT */
11416    rc = ecore_queue_state_change(sc, &q_params);
11417    if (rc) {
11418        BLOGE(sc, "Queue(%d) INIT failed rc = %d\n", fp->index, rc);
11419        return (rc);
11420    }
11421
11422    BLOGD(sc, DBG_LOAD, "init complete\n");
11423
11424    /* now move the Queue to the SETUP state */
11425    memset(setup_params, 0, sizeof(*setup_params));
11426
11427    /* set Queue flags */
11428    setup_params->flags = bxe_get_q_flags(sc, fp, leading);
11429
11430    /* set general SETUP parameters */
11431    bxe_pf_q_prep_general(sc, fp, &setup_params->gen_params,
11432                          FIRST_TX_COS_INDEX);
11433
11434    bxe_pf_rx_q_prep(sc, fp,
11435                     &setup_params->pause_params,
11436                     &setup_params->rxq_params);
11437
11438    bxe_pf_tx_q_prep(sc, fp,
11439                     &setup_params->txq_params,
11440                     FIRST_TX_COS_INDEX);
11441
11442    /* Set the command */
11443    q_params.cmd = ECORE_Q_CMD_SETUP;
11444
11445    /* change the state to SETUP */
11446    rc = ecore_queue_state_change(sc, &q_params);
11447    if (rc) {
11448        BLOGE(sc, "Queue(%d) SETUP failed (rc = %d)\n", fp->index, rc);
11449        return (rc);
11450    }
11451
11452    return (rc);
11453}
11454
11455static int
11456bxe_setup_leading(struct bxe_softc *sc)
11457{
11458    return (bxe_setup_queue(sc, &sc->fp[0], TRUE));
11459}
11460
11461static int
11462bxe_config_rss_pf(struct bxe_softc            *sc,
11463                  struct ecore_rss_config_obj *rss_obj,
11464                  uint8_t                     config_hash)
11465{
11466    struct ecore_config_rss_params params = { NULL };
11467    int i;
11468
11469    /*
11470     * Although RSS is meaningless when there is a single HW queue we
11471     * still need it enabled in order to have HW Rx hash generated.
11472     */
11473
11474    params.rss_obj = rss_obj;
11475
11476    bxe_set_bit(RAMROD_COMP_WAIT, &params.ramrod_flags);
11477
11478    bxe_set_bit(ECORE_RSS_MODE_REGULAR, &params.rss_flags);
11479
11480    /* RSS configuration */
11481    bxe_set_bit(ECORE_RSS_IPV4, &params.rss_flags);
11482    bxe_set_bit(ECORE_RSS_IPV4_TCP, &params.rss_flags);
11483    bxe_set_bit(ECORE_RSS_IPV6, &params.rss_flags);
11484    bxe_set_bit(ECORE_RSS_IPV6_TCP, &params.rss_flags);
11485    if (rss_obj->udp_rss_v4) {
11486        bxe_set_bit(ECORE_RSS_IPV4_UDP, &params.rss_flags);
11487    }
11488    if (rss_obj->udp_rss_v6) {
11489        bxe_set_bit(ECORE_RSS_IPV6_UDP, &params.rss_flags);
11490    }
11491
11492    /* Hash bits */
11493    params.rss_result_mask = MULTI_MASK;
11494
11495    memcpy(params.ind_table, rss_obj->ind_table, sizeof(params.ind_table));
11496
11497    if (config_hash) {
11498        /* RSS keys */
11499        for (i = 0; i < sizeof(params.rss_key) / 4; i++) {
11500            params.rss_key[i] = arc4random();
11501        }
11502
11503        bxe_set_bit(ECORE_RSS_SET_SRCH, &params.rss_flags);
11504    }
11505
11506    return (ecore_config_rss(sc, &params));
11507}
11508
11509static int
11510bxe_config_rss_eth(struct bxe_softc *sc,
11511                   uint8_t          config_hash)
11512{
11513    return (bxe_config_rss_pf(sc, &sc->rss_conf_obj, config_hash));
11514}
11515
11516static int
11517bxe_init_rss_pf(struct bxe_softc *sc)
11518{
11519    uint8_t num_eth_queues = BXE_NUM_ETH_QUEUES(sc);
11520    int i;
11521
11522    /*
11523     * Prepare the initial contents of the indirection table if
11524     * RSS is enabled
11525     */
11526    for (i = 0; i < sizeof(sc->rss_conf_obj.ind_table); i++) {
11527        sc->rss_conf_obj.ind_table[i] =
11528            (sc->fp->cl_id + (i % num_eth_queues));
11529    }
11530
11531    if (sc->udp_rss) {
11532        sc->rss_conf_obj.udp_rss_v4 = sc->rss_conf_obj.udp_rss_v6 = 1;
11533    }
11534
11535    /*
11536     * For 57710 and 57711 SEARCHER configuration (rss_keys) is
11537     * per-port, so if explicit configuration is needed, do it only
11538     * for a PMF.
11539     *
11540     * For 57712 and newer it's a per-function configuration.
11541     */
11542    return (bxe_config_rss_eth(sc, sc->port.pmf || !CHIP_IS_E1x(sc)));
11543}
11544
11545static int
11546bxe_set_mac_one(struct bxe_softc          *sc,
11547                uint8_t                   *mac,
11548                struct ecore_vlan_mac_obj *obj,
11549                uint8_t                   set,
11550                int                       mac_type,
11551                unsigned long             *ramrod_flags)
11552{
11553    struct ecore_vlan_mac_ramrod_params ramrod_param;
11554    int rc;
11555
11556    memset(&ramrod_param, 0, sizeof(ramrod_param));
11557
11558    /* fill in general parameters */
11559    ramrod_param.vlan_mac_obj = obj;
11560    ramrod_param.ramrod_flags = *ramrod_flags;
11561
11562    /* fill a user request section if needed */
11563    if (!bxe_test_bit(RAMROD_CONT, ramrod_flags)) {
11564        memcpy(ramrod_param.user_req.u.mac.mac, mac, ETH_ALEN);
11565
11566        bxe_set_bit(mac_type, &ramrod_param.user_req.vlan_mac_flags);
11567
11568        /* Set the command: ADD or DEL */
11569        ramrod_param.user_req.cmd = (set) ? ECORE_VLAN_MAC_ADD :
11570                                            ECORE_VLAN_MAC_DEL;
11571    }
11572
11573    rc = ecore_config_vlan_mac(sc, &ramrod_param);
11574
11575    if (rc == ECORE_EXISTS) {
11576        BLOGD(sc, DBG_SP, "Failed to schedule ADD operations (EEXIST)\n");
11577        /* do not treat adding same MAC as error */
11578        rc = 0;
11579    } else if (rc < 0) {
11580        BLOGE(sc, "%s MAC failed (%d)\n", (set ? "Set" : "Delete"), rc);
11581    }
11582
11583    return (rc);
11584}
11585
11586static int
11587bxe_set_eth_mac(struct bxe_softc *sc,
11588                uint8_t          set)
11589{
11590    unsigned long ramrod_flags = 0;
11591
11592    BLOGD(sc, DBG_LOAD, "Adding Ethernet MAC\n");
11593
11594    bxe_set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
11595
11596    /* Eth MAC is set on RSS leading client (fp[0]) */
11597    return (bxe_set_mac_one(sc, sc->link_params.mac_addr,
11598                            &sc->sp_objs->mac_obj,
11599                            set, ECORE_ETH_MAC, &ramrod_flags));
11600}
11601
11602static int
11603bxe_get_cur_phy_idx(struct bxe_softc *sc)
11604{
11605    uint32_t sel_phy_idx = 0;
11606
11607    if (sc->link_params.num_phys <= 1) {
11608        return (ELINK_INT_PHY);
11609    }
11610
11611    if (sc->link_vars.link_up) {
11612        sel_phy_idx = ELINK_EXT_PHY1;
11613        /* In case link is SERDES, check if the ELINK_EXT_PHY2 is the one */
11614        if ((sc->link_vars.link_status & LINK_STATUS_SERDES_LINK) &&
11615            (sc->link_params.phy[ELINK_EXT_PHY2].supported &
11616             ELINK_SUPPORTED_FIBRE))
11617            sel_phy_idx = ELINK_EXT_PHY2;
11618    } else {
11619        switch (elink_phy_selection(&sc->link_params)) {
11620        case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT:
11621        case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY:
11622        case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY:
11623               sel_phy_idx = ELINK_EXT_PHY1;
11624               break;
11625        case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY:
11626        case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY:
11627               sel_phy_idx = ELINK_EXT_PHY2;
11628               break;
11629        }
11630    }
11631
11632    return (sel_phy_idx);
11633}
11634
11635static int
11636bxe_get_link_cfg_idx(struct bxe_softc *sc)
11637{
11638    uint32_t sel_phy_idx = bxe_get_cur_phy_idx(sc);
11639
11640    /*
11641     * The selected activated PHY is always after swapping (in case PHY
11642     * swapping is enabled). So when swapping is enabled, we need to reverse
11643     * the configuration
11644     */
11645
11646    if (sc->link_params.multi_phy_config & PORT_HW_CFG_PHY_SWAPPED_ENABLED) {
11647        if (sel_phy_idx == ELINK_EXT_PHY1)
11648            sel_phy_idx = ELINK_EXT_PHY2;
11649        else if (sel_phy_idx == ELINK_EXT_PHY2)
11650            sel_phy_idx = ELINK_EXT_PHY1;
11651    }
11652
11653    return (ELINK_LINK_CONFIG_IDX(sel_phy_idx));
11654}
11655
11656static void
11657bxe_set_requested_fc(struct bxe_softc *sc)
11658{
11659    /*
11660     * Initialize link parameters structure variables
11661     * It is recommended to turn off RX FC for jumbo frames
11662     * for better performance
11663     */
11664    if (CHIP_IS_E1x(sc) && (sc->mtu > 5000)) {
11665        sc->link_params.req_fc_auto_adv = ELINK_FLOW_CTRL_TX;
11666    } else {
11667        sc->link_params.req_fc_auto_adv = ELINK_FLOW_CTRL_BOTH;
11668    }
11669}
11670
11671static void
11672bxe_calc_fc_adv(struct bxe_softc *sc)
11673{
11674    uint8_t cfg_idx = bxe_get_link_cfg_idx(sc);
11675
11676
11677    sc->port.advertising[cfg_idx] &= ~(ADVERTISED_Asym_Pause |
11678                                           ADVERTISED_Pause);
11679
11680    switch (sc->link_vars.ieee_fc &
11681            MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
11682
11683    case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
11684        sc->port.advertising[cfg_idx] |= (ADVERTISED_Asym_Pause |
11685                                          ADVERTISED_Pause);
11686        break;
11687
11688    case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
11689        sc->port.advertising[cfg_idx] |= ADVERTISED_Asym_Pause;
11690        break;
11691
11692    default:
11693        break;
11694
11695    }
11696}
11697
11698static uint16_t
11699bxe_get_mf_speed(struct bxe_softc *sc)
11700{
11701    uint16_t line_speed = sc->link_vars.line_speed;
11702    if (IS_MF(sc)) {
11703        uint16_t maxCfg =
11704            bxe_extract_max_cfg(sc, sc->devinfo.mf_info.mf_config[SC_VN(sc)]);
11705
11706        /* calculate the current MAX line speed limit for the MF devices */
11707        if (IS_MF_SI(sc)) {
11708            line_speed = (line_speed * maxCfg) / 100;
11709        } else { /* SD mode */
11710            uint16_t vn_max_rate = maxCfg * 100;
11711
11712            if (vn_max_rate < line_speed) {
11713                line_speed = vn_max_rate;
11714            }
11715        }
11716    }
11717
11718    return (line_speed);
11719}
11720
11721static void
11722bxe_fill_report_data(struct bxe_softc            *sc,
11723                     struct bxe_link_report_data *data)
11724{
11725    uint16_t line_speed = bxe_get_mf_speed(sc);
11726
11727    memset(data, 0, sizeof(*data));
11728
11729    /* fill the report data with the effective line speed */
11730    data->line_speed = line_speed;
11731
11732    /* Link is down */
11733    if (!sc->link_vars.link_up || (sc->flags & BXE_MF_FUNC_DIS)) {
11734        bxe_set_bit(BXE_LINK_REPORT_LINK_DOWN, &data->link_report_flags);
11735    }
11736
11737    /* Full DUPLEX */
11738    if (sc->link_vars.duplex == DUPLEX_FULL) {
11739        bxe_set_bit(BXE_LINK_REPORT_FULL_DUPLEX, &data->link_report_flags);
11740    }
11741
11742    /* Rx Flow Control is ON */
11743    if (sc->link_vars.flow_ctrl & ELINK_FLOW_CTRL_RX) {
11744        bxe_set_bit(BXE_LINK_REPORT_RX_FC_ON, &data->link_report_flags);
11745    }
11746
11747    /* Tx Flow Control is ON */
11748    if (sc->link_vars.flow_ctrl & ELINK_FLOW_CTRL_TX) {
11749        bxe_set_bit(BXE_LINK_REPORT_TX_FC_ON, &data->link_report_flags);
11750    }
11751}
11752
11753/* report link status to OS, should be called under phy_lock */
11754static void
11755bxe_link_report_locked(struct bxe_softc *sc)
11756{
11757    struct bxe_link_report_data cur_data;
11758
11759    /* reread mf_cfg */
11760    if (IS_PF(sc) && !CHIP_IS_E1(sc)) {
11761        bxe_read_mf_cfg(sc);
11762    }
11763
11764    /* Read the current link report info */
11765    bxe_fill_report_data(sc, &cur_data);
11766
11767    /* Don't report link down or exactly the same link status twice */
11768    if (!memcmp(&cur_data, &sc->last_reported_link, sizeof(cur_data)) ||
11769        (bxe_test_bit(BXE_LINK_REPORT_LINK_DOWN,
11770                      &sc->last_reported_link.link_report_flags) &&
11771         bxe_test_bit(BXE_LINK_REPORT_LINK_DOWN,
11772                      &cur_data.link_report_flags))) {
11773        return;
11774    }
11775
11776	ELINK_DEBUG_P2(sc, "Change in link status : cur_data = %x, last_reported_link = %x\n",
11777					cur_data.link_report_flags, sc->last_reported_link.link_report_flags);
11778    sc->link_cnt++;
11779
11780	ELINK_DEBUG_P1(sc, "link status change count = %x\n", sc->link_cnt);
11781    /* report new link params and remember the state for the next time */
11782    memcpy(&sc->last_reported_link, &cur_data, sizeof(cur_data));
11783
11784    if (bxe_test_bit(BXE_LINK_REPORT_LINK_DOWN,
11785                     &cur_data.link_report_flags)) {
11786        if_link_state_change(sc->ifp, LINK_STATE_DOWN);
11787    } else {
11788        const char *duplex;
11789        const char *flow;
11790
11791        if (bxe_test_and_clear_bit(BXE_LINK_REPORT_FULL_DUPLEX,
11792                                   &cur_data.link_report_flags)) {
11793            duplex = "full";
11794			ELINK_DEBUG_P0(sc, "link set to full duplex\n");
11795        } else {
11796            duplex = "half";
11797			ELINK_DEBUG_P0(sc, "link set to half duplex\n");
11798        }
11799
11800        /*
11801         * Handle the FC at the end so that only these flags would be
11802         * possibly set. This way we may easily check if there is no FC
11803         * enabled.
11804         */
11805        if (cur_data.link_report_flags) {
11806            if (bxe_test_bit(BXE_LINK_REPORT_RX_FC_ON,
11807                             &cur_data.link_report_flags) &&
11808                bxe_test_bit(BXE_LINK_REPORT_TX_FC_ON,
11809                             &cur_data.link_report_flags)) {
11810                flow = "ON - receive & transmit";
11811            } else if (bxe_test_bit(BXE_LINK_REPORT_RX_FC_ON,
11812                                    &cur_data.link_report_flags) &&
11813                       !bxe_test_bit(BXE_LINK_REPORT_TX_FC_ON,
11814                                     &cur_data.link_report_flags)) {
11815                flow = "ON - receive";
11816            } else if (!bxe_test_bit(BXE_LINK_REPORT_RX_FC_ON,
11817                                     &cur_data.link_report_flags) &&
11818                       bxe_test_bit(BXE_LINK_REPORT_TX_FC_ON,
11819                                    &cur_data.link_report_flags)) {
11820                flow = "ON - transmit";
11821            } else {
11822                flow = "none"; /* possible? */
11823            }
11824        } else {
11825            flow = "none";
11826        }
11827
11828        if_link_state_change(sc->ifp, LINK_STATE_UP);
11829        BLOGI(sc, "NIC Link is Up, %d Mbps %s duplex, Flow control: %s\n",
11830              cur_data.line_speed, duplex, flow);
11831    }
11832}
11833
11834static void
11835bxe_link_report(struct bxe_softc *sc)
11836{
11837    bxe_acquire_phy_lock(sc);
11838    bxe_link_report_locked(sc);
11839    bxe_release_phy_lock(sc);
11840}
11841
11842static void
11843bxe_link_status_update(struct bxe_softc *sc)
11844{
11845    if (sc->state != BXE_STATE_OPEN) {
11846        return;
11847    }
11848
11849    if (IS_PF(sc) && !CHIP_REV_IS_SLOW(sc)) {
11850        elink_link_status_update(&sc->link_params, &sc->link_vars);
11851    } else {
11852        sc->port.supported[0] |= (ELINK_SUPPORTED_10baseT_Half |
11853                                  ELINK_SUPPORTED_10baseT_Full |
11854                                  ELINK_SUPPORTED_100baseT_Half |
11855                                  ELINK_SUPPORTED_100baseT_Full |
11856                                  ELINK_SUPPORTED_1000baseT_Full |
11857                                  ELINK_SUPPORTED_2500baseX_Full |
11858                                  ELINK_SUPPORTED_10000baseT_Full |
11859                                  ELINK_SUPPORTED_TP |
11860                                  ELINK_SUPPORTED_FIBRE |
11861                                  ELINK_SUPPORTED_Autoneg |
11862                                  ELINK_SUPPORTED_Pause |
11863                                  ELINK_SUPPORTED_Asym_Pause);
11864        sc->port.advertising[0] = sc->port.supported[0];
11865
11866        sc->link_params.sc                = sc;
11867        sc->link_params.port              = SC_PORT(sc);
11868        sc->link_params.req_duplex[0]     = DUPLEX_FULL;
11869        sc->link_params.req_flow_ctrl[0]  = ELINK_FLOW_CTRL_NONE;
11870        sc->link_params.req_line_speed[0] = SPEED_10000;
11871        sc->link_params.speed_cap_mask[0] = 0x7f0000;
11872        sc->link_params.switch_cfg        = ELINK_SWITCH_CFG_10G;
11873
11874        if (CHIP_REV_IS_FPGA(sc)) {
11875            sc->link_vars.mac_type    = ELINK_MAC_TYPE_EMAC;
11876            sc->link_vars.line_speed  = ELINK_SPEED_1000;
11877            sc->link_vars.link_status = (LINK_STATUS_LINK_UP |
11878                                         LINK_STATUS_SPEED_AND_DUPLEX_1000TFD);
11879        } else {
11880            sc->link_vars.mac_type    = ELINK_MAC_TYPE_BMAC;
11881            sc->link_vars.line_speed  = ELINK_SPEED_10000;
11882            sc->link_vars.link_status = (LINK_STATUS_LINK_UP |
11883                                         LINK_STATUS_SPEED_AND_DUPLEX_10GTFD);
11884        }
11885
11886        sc->link_vars.link_up = 1;
11887
11888        sc->link_vars.duplex    = DUPLEX_FULL;
11889        sc->link_vars.flow_ctrl = ELINK_FLOW_CTRL_NONE;
11890
11891        if (IS_PF(sc)) {
11892            REG_WR(sc, NIG_REG_EGRESS_DRAIN0_MODE + sc->link_params.port*4, 0);
11893            bxe_stats_handle(sc, STATS_EVENT_LINK_UP);
11894            bxe_link_report(sc);
11895        }
11896    }
11897
11898    if (IS_PF(sc)) {
11899        if (sc->link_vars.link_up) {
11900            bxe_stats_handle(sc, STATS_EVENT_LINK_UP);
11901        } else {
11902            bxe_stats_handle(sc, STATS_EVENT_STOP);
11903        }
11904        bxe_link_report(sc);
11905    } else {
11906        bxe_link_report(sc);
11907        bxe_stats_handle(sc, STATS_EVENT_LINK_UP);
11908    }
11909}
11910
11911static int
11912bxe_initial_phy_init(struct bxe_softc *sc,
11913                     int              load_mode)
11914{
11915    int rc, cfg_idx = bxe_get_link_cfg_idx(sc);
11916    uint16_t req_line_speed = sc->link_params.req_line_speed[cfg_idx];
11917    struct elink_params *lp = &sc->link_params;
11918
11919    bxe_set_requested_fc(sc);
11920
11921    if (CHIP_REV_IS_SLOW(sc)) {
11922        uint32_t bond = CHIP_BOND_ID(sc);
11923        uint32_t feat = 0;
11924
11925        if (CHIP_IS_E2(sc) && CHIP_IS_MODE_4_PORT(sc)) {
11926            feat |= ELINK_FEATURE_CONFIG_EMUL_DISABLE_BMAC;
11927        } else if (bond & 0x4) {
11928            if (CHIP_IS_E3(sc)) {
11929                feat |= ELINK_FEATURE_CONFIG_EMUL_DISABLE_XMAC;
11930            } else {
11931                feat |= ELINK_FEATURE_CONFIG_EMUL_DISABLE_BMAC;
11932            }
11933        } else if (bond & 0x8) {
11934            if (CHIP_IS_E3(sc)) {
11935                feat |= ELINK_FEATURE_CONFIG_EMUL_DISABLE_UMAC;
11936            } else {
11937                feat |= ELINK_FEATURE_CONFIG_EMUL_DISABLE_EMAC;
11938            }
11939        }
11940
11941        /* disable EMAC for E3 and above */
11942        if (bond & 0x2) {
11943            feat |= ELINK_FEATURE_CONFIG_EMUL_DISABLE_EMAC;
11944        }
11945
11946        sc->link_params.feature_config_flags |= feat;
11947    }
11948
11949    bxe_acquire_phy_lock(sc);
11950
11951    if (load_mode == LOAD_DIAG) {
11952        lp->loopback_mode = ELINK_LOOPBACK_XGXS;
11953        /* Prefer doing PHY loopback at 10G speed, if possible */
11954        if (lp->req_line_speed[cfg_idx] < ELINK_SPEED_10000) {
11955            if (lp->speed_cap_mask[cfg_idx] &
11956                PORT_HW_CFG_SPEED_CAPABILITY_D0_10G) {
11957                lp->req_line_speed[cfg_idx] = ELINK_SPEED_10000;
11958            } else {
11959                lp->req_line_speed[cfg_idx] = ELINK_SPEED_1000;
11960            }
11961        }
11962    }
11963
11964    if (load_mode == LOAD_LOOPBACK_EXT) {
11965        lp->loopback_mode = ELINK_LOOPBACK_EXT;
11966    }
11967
11968    rc = elink_phy_init(&sc->link_params, &sc->link_vars);
11969
11970    bxe_release_phy_lock(sc);
11971
11972    bxe_calc_fc_adv(sc);
11973
11974    if (sc->link_vars.link_up) {
11975        bxe_stats_handle(sc, STATS_EVENT_LINK_UP);
11976        bxe_link_report(sc);
11977    }
11978
11979    if (!CHIP_REV_IS_SLOW(sc)) {
11980        bxe_periodic_start(sc);
11981    }
11982
11983    sc->link_params.req_line_speed[cfg_idx] = req_line_speed;
11984    return (rc);
11985}
11986
11987/* must be called under IF_ADDR_LOCK */
11988
11989static int
11990bxe_set_mc_list(struct bxe_softc *sc)
11991{
11992    struct ecore_mcast_ramrod_params rparam = { NULL };
11993    int rc = 0;
11994    int mc_count = 0;
11995    int mcnt, i;
11996    struct ecore_mcast_list_elem *mc_mac, *mc_mac_start;
11997    unsigned char *mta;
11998    if_t ifp = sc->ifp;
11999
12000    mc_count = if_multiaddr_count(ifp, -1);/* XXX they don't have a limit */
12001    if (!mc_count)
12002        return (0);
12003
12004    mta = malloc(sizeof(unsigned char) * ETHER_ADDR_LEN *
12005            mc_count, M_DEVBUF, M_NOWAIT);
12006
12007    if(mta == NULL) {
12008        BLOGE(sc, "Failed to allocate temp mcast list\n");
12009        return (-1);
12010    }
12011    bzero(mta, (sizeof(unsigned char) * ETHER_ADDR_LEN * mc_count));
12012
12013    mc_mac = malloc(sizeof(*mc_mac) * mc_count, M_DEVBUF, (M_NOWAIT | M_ZERO));
12014    mc_mac_start = mc_mac;
12015
12016    if (!mc_mac) {
12017        free(mta, M_DEVBUF);
12018        BLOGE(sc, "Failed to allocate temp mcast list\n");
12019        return (-1);
12020    }
12021    bzero(mc_mac, (sizeof(*mc_mac) * mc_count));
12022
12023    /* mta and mcnt not expected to be  different */
12024    if_multiaddr_array(ifp, mta, &mcnt, mc_count);
12025
12026
12027    rparam.mcast_obj = &sc->mcast_obj;
12028    ECORE_LIST_INIT(&rparam.mcast_list);
12029
12030    for(i=0; i< mcnt; i++) {
12031
12032        mc_mac->mac = (uint8_t *)(mta + (i * ETHER_ADDR_LEN));
12033        ECORE_LIST_PUSH_TAIL(&mc_mac->link, &rparam.mcast_list);
12034
12035        BLOGD(sc, DBG_LOAD,
12036              "Setting MCAST %02X:%02X:%02X:%02X:%02X:%02X\n",
12037              mc_mac->mac[0], mc_mac->mac[1], mc_mac->mac[2],
12038              mc_mac->mac[3], mc_mac->mac[4], mc_mac->mac[5]);
12039
12040        mc_mac++;
12041    }
12042    rparam.mcast_list_len = mc_count;
12043
12044    BXE_MCAST_LOCK(sc);
12045
12046    /* first, clear all configured multicast MACs */
12047    rc = ecore_config_mcast(sc, &rparam, ECORE_MCAST_CMD_DEL);
12048    if (rc < 0) {
12049        BLOGE(sc, "Failed to clear multicast configuration: %d\n", rc);
12050        BXE_MCAST_UNLOCK(sc);
12051    	free(mc_mac_start, M_DEVBUF);
12052        free(mta, M_DEVBUF);
12053        return (rc);
12054    }
12055
12056    /* Now add the new MACs */
12057    rc = ecore_config_mcast(sc, &rparam, ECORE_MCAST_CMD_ADD);
12058    if (rc < 0) {
12059        BLOGE(sc, "Failed to set new mcast config (%d)\n", rc);
12060    }
12061
12062    BXE_MCAST_UNLOCK(sc);
12063
12064    free(mc_mac_start, M_DEVBUF);
12065    free(mta, M_DEVBUF);
12066
12067    return (rc);
12068}
12069
12070static int
12071bxe_set_uc_list(struct bxe_softc *sc)
12072{
12073    if_t ifp = sc->ifp;
12074    struct ecore_vlan_mac_obj *mac_obj = &sc->sp_objs->mac_obj;
12075    struct ifaddr *ifa;
12076    unsigned long ramrod_flags = 0;
12077    int rc;
12078
12079#if __FreeBSD_version < 800000
12080    IF_ADDR_LOCK(ifp);
12081#else
12082    if_addr_rlock(ifp);
12083#endif
12084
12085    /* first schedule a cleanup up of old configuration */
12086    rc = bxe_del_all_macs(sc, mac_obj, ECORE_UC_LIST_MAC, FALSE);
12087    if (rc < 0) {
12088        BLOGE(sc, "Failed to schedule delete of all ETH MACs (%d)\n", rc);
12089#if __FreeBSD_version < 800000
12090        IF_ADDR_UNLOCK(ifp);
12091#else
12092        if_addr_runlock(ifp);
12093#endif
12094        return (rc);
12095    }
12096
12097    ifa = if_getifaddr(ifp); /* XXX Is this structure */
12098    while (ifa) {
12099        if (ifa->ifa_addr->sa_family != AF_LINK) {
12100            ifa = TAILQ_NEXT(ifa, ifa_link);
12101            continue;
12102        }
12103
12104        rc = bxe_set_mac_one(sc, (uint8_t *)LLADDR((struct sockaddr_dl *)ifa->ifa_addr),
12105                             mac_obj, TRUE, ECORE_UC_LIST_MAC, &ramrod_flags);
12106        if (rc == -EEXIST) {
12107            BLOGD(sc, DBG_SP, "Failed to schedule ADD operations (EEXIST)\n");
12108            /* do not treat adding same MAC as an error */
12109            rc = 0;
12110        } else if (rc < 0) {
12111            BLOGE(sc, "Failed to schedule ADD operations (%d)\n", rc);
12112#if __FreeBSD_version < 800000
12113            IF_ADDR_UNLOCK(ifp);
12114#else
12115            if_addr_runlock(ifp);
12116#endif
12117            return (rc);
12118        }
12119
12120        ifa = TAILQ_NEXT(ifa, ifa_link);
12121    }
12122
12123#if __FreeBSD_version < 800000
12124    IF_ADDR_UNLOCK(ifp);
12125#else
12126    if_addr_runlock(ifp);
12127#endif
12128
12129    /* Execute the pending commands */
12130    bit_set(&ramrod_flags, RAMROD_CONT);
12131    return (bxe_set_mac_one(sc, NULL, mac_obj, FALSE /* don't care */,
12132                            ECORE_UC_LIST_MAC, &ramrod_flags));
12133}
12134
12135static void
12136bxe_set_rx_mode(struct bxe_softc *sc)
12137{
12138    if_t ifp = sc->ifp;
12139    uint32_t rx_mode = BXE_RX_MODE_NORMAL;
12140
12141    if (sc->state != BXE_STATE_OPEN) {
12142        BLOGD(sc, DBG_SP, "state is %x, returning\n", sc->state);
12143        return;
12144    }
12145
12146    BLOGD(sc, DBG_SP, "if_flags(ifp)=0x%x\n", if_getflags(sc->ifp));
12147
12148    if (if_getflags(ifp) & IFF_PROMISC) {
12149        rx_mode = BXE_RX_MODE_PROMISC;
12150    } else if ((if_getflags(ifp) & IFF_ALLMULTI) ||
12151               ((if_getamcount(ifp) > BXE_MAX_MULTICAST) &&
12152                CHIP_IS_E1(sc))) {
12153        rx_mode = BXE_RX_MODE_ALLMULTI;
12154    } else {
12155        if (IS_PF(sc)) {
12156            /* some multicasts */
12157            if (bxe_set_mc_list(sc) < 0) {
12158                rx_mode = BXE_RX_MODE_ALLMULTI;
12159            }
12160            if (bxe_set_uc_list(sc) < 0) {
12161                rx_mode = BXE_RX_MODE_PROMISC;
12162            }
12163        }
12164    }
12165
12166    sc->rx_mode = rx_mode;
12167
12168    /* schedule the rx_mode command */
12169    if (bxe_test_bit(ECORE_FILTER_RX_MODE_PENDING, &sc->sp_state)) {
12170        BLOGD(sc, DBG_LOAD, "Scheduled setting rx_mode with ECORE...\n");
12171        bxe_set_bit(ECORE_FILTER_RX_MODE_SCHED, &sc->sp_state);
12172        return;
12173    }
12174
12175    if (IS_PF(sc)) {
12176        bxe_set_storm_rx_mode(sc);
12177    }
12178}
12179
12180
12181/* update flags in shmem */
12182static void
12183bxe_update_drv_flags(struct bxe_softc *sc,
12184                     uint32_t         flags,
12185                     uint32_t         set)
12186{
12187    uint32_t drv_flags;
12188
12189    if (SHMEM2_HAS(sc, drv_flags)) {
12190        bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_DRV_FLAGS);
12191        drv_flags = SHMEM2_RD(sc, drv_flags);
12192
12193        if (set) {
12194            SET_FLAGS(drv_flags, flags);
12195        } else {
12196            RESET_FLAGS(drv_flags, flags);
12197        }
12198
12199        SHMEM2_WR(sc, drv_flags, drv_flags);
12200        BLOGD(sc, DBG_LOAD, "drv_flags 0x%08x\n", drv_flags);
12201
12202        bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_DRV_FLAGS);
12203    }
12204}
12205
12206/* periodic timer callout routine, only runs when the interface is up */
12207
12208static void
12209bxe_periodic_callout_func(void *xsc)
12210{
12211    struct bxe_softc *sc = (struct bxe_softc *)xsc;
12212    int i;
12213
12214    if (!BXE_CORE_TRYLOCK(sc)) {
12215        /* just bail and try again next time */
12216
12217        if ((sc->state == BXE_STATE_OPEN) &&
12218            (atomic_load_acq_long(&sc->periodic_flags) == PERIODIC_GO)) {
12219            /* schedule the next periodic callout */
12220            callout_reset(&sc->periodic_callout, hz,
12221                          bxe_periodic_callout_func, sc);
12222        }
12223
12224        return;
12225    }
12226
12227    if ((sc->state != BXE_STATE_OPEN) ||
12228        (atomic_load_acq_long(&sc->periodic_flags) == PERIODIC_STOP)) {
12229        BLOGW(sc, "periodic callout exit (state=0x%x)\n", sc->state);
12230        BXE_CORE_UNLOCK(sc);
12231        return;
12232        }
12233
12234
12235    /* Check for TX timeouts on any fastpath. */
12236    FOR_EACH_QUEUE(sc, i) {
12237        if (bxe_watchdog(sc, &sc->fp[i]) != 0) {
12238            /* Ruh-Roh, chip was reset! */
12239            break;
12240        }
12241    }
12242
12243    if (!CHIP_REV_IS_SLOW(sc)) {
12244        /*
12245         * This barrier is needed to ensure the ordering between the writing
12246         * to the sc->port.pmf in the bxe_nic_load() or bxe_pmf_update() and
12247         * the reading here.
12248         */
12249        mb();
12250        if (sc->port.pmf) {
12251	    bxe_acquire_phy_lock(sc);
12252            elink_period_func(&sc->link_params, &sc->link_vars);
12253	    bxe_release_phy_lock(sc);
12254        }
12255    }
12256
12257    if (IS_PF(sc) && !(sc->flags & BXE_NO_PULSE)) {
12258        int mb_idx = SC_FW_MB_IDX(sc);
12259        uint32_t drv_pulse;
12260        uint32_t mcp_pulse;
12261
12262        ++sc->fw_drv_pulse_wr_seq;
12263        sc->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
12264
12265        drv_pulse = sc->fw_drv_pulse_wr_seq;
12266        bxe_drv_pulse(sc);
12267
12268        mcp_pulse = (SHMEM_RD(sc, func_mb[mb_idx].mcp_pulse_mb) &
12269                     MCP_PULSE_SEQ_MASK);
12270
12271        /*
12272         * The delta between driver pulse and mcp response should
12273         * be 1 (before mcp response) or 0 (after mcp response).
12274         */
12275        if ((drv_pulse != mcp_pulse) &&
12276            (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
12277            /* someone lost a heartbeat... */
12278            BLOGE(sc, "drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
12279                  drv_pulse, mcp_pulse);
12280        }
12281    }
12282
12283    /* state is BXE_STATE_OPEN */
12284    bxe_stats_handle(sc, STATS_EVENT_UPDATE);
12285
12286    BXE_CORE_UNLOCK(sc);
12287
12288    if ((sc->state == BXE_STATE_OPEN) &&
12289        (atomic_load_acq_long(&sc->periodic_flags) == PERIODIC_GO)) {
12290        /* schedule the next periodic callout */
12291        callout_reset(&sc->periodic_callout, hz,
12292                      bxe_periodic_callout_func, sc);
12293    }
12294}
12295
12296static void
12297bxe_periodic_start(struct bxe_softc *sc)
12298{
12299    atomic_store_rel_long(&sc->periodic_flags, PERIODIC_GO);
12300    callout_reset(&sc->periodic_callout, hz, bxe_periodic_callout_func, sc);
12301}
12302
12303static void
12304bxe_periodic_stop(struct bxe_softc *sc)
12305{
12306    atomic_store_rel_long(&sc->periodic_flags, PERIODIC_STOP);
12307    callout_drain(&sc->periodic_callout);
12308}
12309
12310/* start the controller */
12311static __noinline int
12312bxe_nic_load(struct bxe_softc *sc,
12313             int              load_mode)
12314{
12315    uint32_t val;
12316    int load_code = 0;
12317    int i, rc = 0;
12318
12319    BXE_CORE_LOCK_ASSERT(sc);
12320
12321    BLOGD(sc, DBG_LOAD, "Starting NIC load...\n");
12322
12323    sc->state = BXE_STATE_OPENING_WAITING_LOAD;
12324
12325    if (IS_PF(sc)) {
12326        /* must be called before memory allocation and HW init */
12327        bxe_ilt_set_info(sc);
12328    }
12329
12330    sc->last_reported_link_state = LINK_STATE_UNKNOWN;
12331
12332    bxe_set_fp_rx_buf_size(sc);
12333
12334    if (bxe_alloc_fp_buffers(sc) != 0) {
12335        BLOGE(sc, "Failed to allocate fastpath memory\n");
12336        sc->state = BXE_STATE_CLOSED;
12337        rc = ENOMEM;
12338        goto bxe_nic_load_error0;
12339    }
12340
12341    if (bxe_alloc_mem(sc) != 0) {
12342        sc->state = BXE_STATE_CLOSED;
12343        rc = ENOMEM;
12344        goto bxe_nic_load_error0;
12345    }
12346
12347    if (bxe_alloc_fw_stats_mem(sc) != 0) {
12348        sc->state = BXE_STATE_CLOSED;
12349        rc = ENOMEM;
12350        goto bxe_nic_load_error0;
12351    }
12352
12353    if (IS_PF(sc)) {
12354        /* set pf load just before approaching the MCP */
12355        bxe_set_pf_load(sc);
12356
12357        /* if MCP exists send load request and analyze response */
12358        if (!BXE_NOMCP(sc)) {
12359            /* attempt to load pf */
12360            if (bxe_nic_load_request(sc, &load_code) != 0) {
12361                sc->state = BXE_STATE_CLOSED;
12362                rc = ENXIO;
12363                goto bxe_nic_load_error1;
12364            }
12365
12366            /* what did the MCP say? */
12367            if (bxe_nic_load_analyze_req(sc, load_code) != 0) {
12368                bxe_fw_command(sc, DRV_MSG_CODE_LOAD_DONE, 0);
12369                sc->state = BXE_STATE_CLOSED;
12370                rc = ENXIO;
12371                goto bxe_nic_load_error2;
12372            }
12373        } else {
12374            BLOGI(sc, "Device has no MCP!\n");
12375            load_code = bxe_nic_load_no_mcp(sc);
12376        }
12377
12378        /* mark PMF if applicable */
12379        bxe_nic_load_pmf(sc, load_code);
12380
12381        /* Init Function state controlling object */
12382        bxe_init_func_obj(sc);
12383
12384        /* Initialize HW */
12385        if (bxe_init_hw(sc, load_code) != 0) {
12386            BLOGE(sc, "HW init failed\n");
12387            bxe_fw_command(sc, DRV_MSG_CODE_LOAD_DONE, 0);
12388            sc->state = BXE_STATE_CLOSED;
12389            rc = ENXIO;
12390            goto bxe_nic_load_error2;
12391        }
12392    }
12393
12394    /* set ALWAYS_ALIVE bit in shmem */
12395    sc->fw_drv_pulse_wr_seq |= DRV_PULSE_ALWAYS_ALIVE;
12396    bxe_drv_pulse(sc);
12397    sc->flags |= BXE_NO_PULSE;
12398
12399    /* attach interrupts */
12400    if (bxe_interrupt_attach(sc) != 0) {
12401        sc->state = BXE_STATE_CLOSED;
12402        rc = ENXIO;
12403        goto bxe_nic_load_error2;
12404    }
12405
12406    bxe_nic_init(sc, load_code);
12407
12408    /* Init per-function objects */
12409    if (IS_PF(sc)) {
12410        bxe_init_objs(sc);
12411        // XXX bxe_iov_nic_init(sc);
12412
12413        /* set AFEX default VLAN tag to an invalid value */
12414        sc->devinfo.mf_info.afex_def_vlan_tag = -1;
12415        // XXX bxe_nic_load_afex_dcc(sc, load_code);
12416
12417        sc->state = BXE_STATE_OPENING_WAITING_PORT;
12418        rc = bxe_func_start(sc);
12419        if (rc) {
12420            BLOGE(sc, "Function start failed! rc = %d\n", rc);
12421            bxe_fw_command(sc, DRV_MSG_CODE_LOAD_DONE, 0);
12422            sc->state = BXE_STATE_ERROR;
12423            goto bxe_nic_load_error3;
12424        }
12425
12426        /* send LOAD_DONE command to MCP */
12427        if (!BXE_NOMCP(sc)) {
12428            load_code = bxe_fw_command(sc, DRV_MSG_CODE_LOAD_DONE, 0);
12429            if (!load_code) {
12430                BLOGE(sc, "MCP response failure, aborting\n");
12431                sc->state = BXE_STATE_ERROR;
12432                rc = ENXIO;
12433                goto bxe_nic_load_error3;
12434            }
12435        }
12436
12437        rc = bxe_setup_leading(sc);
12438        if (rc) {
12439            BLOGE(sc, "Setup leading failed! rc = %d\n", rc);
12440            sc->state = BXE_STATE_ERROR;
12441            goto bxe_nic_load_error3;
12442        }
12443
12444        FOR_EACH_NONDEFAULT_ETH_QUEUE(sc, i) {
12445            rc = bxe_setup_queue(sc, &sc->fp[i], FALSE);
12446            if (rc) {
12447                BLOGE(sc, "Queue(%d) setup failed rc = %d\n", i, rc);
12448                sc->state = BXE_STATE_ERROR;
12449                goto bxe_nic_load_error3;
12450            }
12451        }
12452
12453        rc = bxe_init_rss_pf(sc);
12454        if (rc) {
12455            BLOGE(sc, "PF RSS init failed\n");
12456            sc->state = BXE_STATE_ERROR;
12457            goto bxe_nic_load_error3;
12458        }
12459    }
12460    /* XXX VF */
12461
12462    /* now when Clients are configured we are ready to work */
12463    sc->state = BXE_STATE_OPEN;
12464
12465    /* Configure a ucast MAC */
12466    if (IS_PF(sc)) {
12467        rc = bxe_set_eth_mac(sc, TRUE);
12468    }
12469    if (rc) {
12470        BLOGE(sc, "Setting Ethernet MAC failed rc = %d\n", rc);
12471        sc->state = BXE_STATE_ERROR;
12472        goto bxe_nic_load_error3;
12473    }
12474
12475    if (sc->port.pmf) {
12476        rc = bxe_initial_phy_init(sc, /* XXX load_mode */LOAD_OPEN);
12477        if (rc) {
12478            sc->state = BXE_STATE_ERROR;
12479            goto bxe_nic_load_error3;
12480        }
12481    }
12482
12483    sc->link_params.feature_config_flags &=
12484        ~ELINK_FEATURE_CONFIG_BOOT_FROM_SAN;
12485
12486    /* start fast path */
12487
12488    /* Initialize Rx filter */
12489    bxe_set_rx_mode(sc);
12490
12491    /* start the Tx */
12492    switch (/* XXX load_mode */LOAD_OPEN) {
12493    case LOAD_NORMAL:
12494    case LOAD_OPEN:
12495        break;
12496
12497    case LOAD_DIAG:
12498    case LOAD_LOOPBACK_EXT:
12499        sc->state = BXE_STATE_DIAG;
12500        break;
12501
12502    default:
12503        break;
12504    }
12505
12506    if (sc->port.pmf) {
12507        bxe_update_drv_flags(sc, 1 << DRV_FLAGS_PORT_MASK, 0);
12508    } else {
12509        bxe_link_status_update(sc);
12510    }
12511
12512    /* start the periodic timer callout */
12513    bxe_periodic_start(sc);
12514
12515    if (IS_PF(sc) && SHMEM2_HAS(sc, drv_capabilities_flag)) {
12516        /* mark driver is loaded in shmem2 */
12517        val = SHMEM2_RD(sc, drv_capabilities_flag[SC_FW_MB_IDX(sc)]);
12518        SHMEM2_WR(sc, drv_capabilities_flag[SC_FW_MB_IDX(sc)],
12519                  (val |
12520                   DRV_FLAGS_CAPABILITIES_LOADED_SUPPORTED |
12521                   DRV_FLAGS_CAPABILITIES_LOADED_L2));
12522    }
12523
12524    /* wait for all pending SP commands to complete */
12525    if (IS_PF(sc) && !bxe_wait_sp_comp(sc, ~0x0UL)) {
12526        BLOGE(sc, "Timeout waiting for all SPs to complete!\n");
12527        bxe_periodic_stop(sc);
12528        bxe_nic_unload(sc, UNLOAD_CLOSE, FALSE);
12529        return (ENXIO);
12530    }
12531
12532    /* Tell the stack the driver is running! */
12533    if_setdrvflags(sc->ifp, IFF_DRV_RUNNING);
12534
12535    BLOGD(sc, DBG_LOAD, "NIC successfully loaded\n");
12536
12537    return (0);
12538
12539bxe_nic_load_error3:
12540
12541    if (IS_PF(sc)) {
12542        bxe_int_disable_sync(sc, 1);
12543
12544        /* clean out queued objects */
12545        bxe_squeeze_objects(sc);
12546    }
12547
12548    bxe_interrupt_detach(sc);
12549
12550bxe_nic_load_error2:
12551
12552    if (IS_PF(sc) && !BXE_NOMCP(sc)) {
12553        bxe_fw_command(sc, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0);
12554        bxe_fw_command(sc, DRV_MSG_CODE_UNLOAD_DONE, 0);
12555    }
12556
12557    sc->port.pmf = 0;
12558
12559bxe_nic_load_error1:
12560
12561    /* clear pf_load status, as it was already set */
12562    if (IS_PF(sc)) {
12563        bxe_clear_pf_load(sc);
12564    }
12565
12566bxe_nic_load_error0:
12567
12568    bxe_free_fw_stats_mem(sc);
12569    bxe_free_fp_buffers(sc);
12570    bxe_free_mem(sc);
12571
12572    return (rc);
12573}
12574
12575static int
12576bxe_init_locked(struct bxe_softc *sc)
12577{
12578    int other_engine = SC_PATH(sc) ? 0 : 1;
12579    uint8_t other_load_status, load_status;
12580    uint8_t global = FALSE;
12581    int rc;
12582
12583    BXE_CORE_LOCK_ASSERT(sc);
12584
12585    /* check if the driver is already running */
12586    if (if_getdrvflags(sc->ifp) & IFF_DRV_RUNNING) {
12587        BLOGD(sc, DBG_LOAD, "Init called while driver is running!\n");
12588        return (0);
12589    }
12590
12591    bxe_set_power_state(sc, PCI_PM_D0);
12592
12593    /*
12594     * If parity occurred during the unload, then attentions and/or
12595     * RECOVERY_IN_PROGRES may still be set. If so we want the first function
12596     * loaded on the current engine to complete the recovery. Parity recovery
12597     * is only relevant for PF driver.
12598     */
12599    if (IS_PF(sc)) {
12600        other_load_status = bxe_get_load_status(sc, other_engine);
12601        load_status = bxe_get_load_status(sc, SC_PATH(sc));
12602
12603        if (!bxe_reset_is_done(sc, SC_PATH(sc)) ||
12604            bxe_chk_parity_attn(sc, &global, TRUE)) {
12605            do {
12606                /*
12607                 * If there are attentions and they are in global blocks, set
12608                 * the GLOBAL_RESET bit regardless whether it will be this
12609                 * function that will complete the recovery or not.
12610                 */
12611                if (global) {
12612                    bxe_set_reset_global(sc);
12613                }
12614
12615                /*
12616                 * Only the first function on the current engine should try
12617                 * to recover in open. In case of attentions in global blocks
12618                 * only the first in the chip should try to recover.
12619                 */
12620                if ((!load_status && (!global || !other_load_status)) &&
12621                    bxe_trylock_leader_lock(sc) && !bxe_leader_reset(sc)) {
12622                    BLOGI(sc, "Recovered during init\n");
12623                    break;
12624                }
12625
12626                /* recovery has failed... */
12627                bxe_set_power_state(sc, PCI_PM_D3hot);
12628                sc->recovery_state = BXE_RECOVERY_FAILED;
12629
12630                BLOGE(sc, "Recovery flow hasn't properly "
12631                          "completed yet, try again later. "
12632                          "If you still see this message after a "
12633                          "few retries then power cycle is required.\n");
12634
12635                rc = ENXIO;
12636                goto bxe_init_locked_done;
12637            } while (0);
12638        }
12639    }
12640
12641    sc->recovery_state = BXE_RECOVERY_DONE;
12642
12643    rc = bxe_nic_load(sc, LOAD_OPEN);
12644
12645bxe_init_locked_done:
12646
12647    if (rc) {
12648        /* Tell the stack the driver is NOT running! */
12649        BLOGE(sc, "Initialization failed, "
12650                  "stack notified driver is NOT running!\n");
12651	if_setdrvflagbits(sc->ifp, 0, IFF_DRV_RUNNING);
12652    }
12653
12654    return (rc);
12655}
12656
12657static int
12658bxe_stop_locked(struct bxe_softc *sc)
12659{
12660    BXE_CORE_LOCK_ASSERT(sc);
12661    return (bxe_nic_unload(sc, UNLOAD_NORMAL, TRUE));
12662}
12663
12664/*
12665 * Handles controller initialization when called from an unlocked routine.
12666 * ifconfig calls this function.
12667 *
12668 * Returns:
12669 *   void
12670 */
12671static void
12672bxe_init(void *xsc)
12673{
12674    struct bxe_softc *sc = (struct bxe_softc *)xsc;
12675
12676    BXE_CORE_LOCK(sc);
12677    bxe_init_locked(sc);
12678    BXE_CORE_UNLOCK(sc);
12679}
12680
12681static int
12682bxe_init_ifnet(struct bxe_softc *sc)
12683{
12684    if_t ifp;
12685    int capabilities;
12686
12687    /* ifconfig entrypoint for media type/status reporting */
12688    ifmedia_init(&sc->ifmedia, IFM_IMASK,
12689                 bxe_ifmedia_update,
12690                 bxe_ifmedia_status);
12691
12692    /* set the default interface values */
12693    ifmedia_add(&sc->ifmedia, (IFM_ETHER | IFM_FDX | sc->media), 0, NULL);
12694    ifmedia_add(&sc->ifmedia, (IFM_ETHER | IFM_AUTO), 0, NULL);
12695    ifmedia_set(&sc->ifmedia, (IFM_ETHER | IFM_AUTO));
12696
12697    sc->ifmedia.ifm_media = sc->ifmedia.ifm_cur->ifm_media; /* XXX ? */
12698	BLOGI(sc, "IFMEDIA flags : %x\n", sc->ifmedia.ifm_media);
12699
12700    /* allocate the ifnet structure */
12701    if ((ifp = if_gethandle(IFT_ETHER)) == NULL) {
12702        BLOGE(sc, "Interface allocation failed!\n");
12703        return (ENXIO);
12704    }
12705
12706    if_setsoftc(ifp, sc);
12707    if_initname(ifp, device_get_name(sc->dev), device_get_unit(sc->dev));
12708    if_setflags(ifp, (IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST));
12709    if_setioctlfn(ifp, bxe_ioctl);
12710    if_setstartfn(ifp, bxe_tx_start);
12711    if_setgetcounterfn(ifp, bxe_get_counter);
12712#if __FreeBSD_version >= 901504
12713    if_settransmitfn(ifp, bxe_tx_mq_start);
12714    if_setqflushfn(ifp, bxe_mq_flush);
12715#endif
12716#ifdef FreeBSD8_0
12717    if_settimer(ifp, 0);
12718#endif
12719    if_setinitfn(ifp, bxe_init);
12720    if_setmtu(ifp, sc->mtu);
12721    if_sethwassist(ifp, (CSUM_IP      |
12722                        CSUM_TCP      |
12723                        CSUM_UDP      |
12724                        CSUM_TSO      |
12725                        CSUM_TCP_IPV6 |
12726                        CSUM_UDP_IPV6));
12727
12728    capabilities =
12729#if __FreeBSD_version < 700000
12730        (IFCAP_VLAN_MTU       |
12731         IFCAP_VLAN_HWTAGGING |
12732         IFCAP_HWCSUM         |
12733         IFCAP_JUMBO_MTU      |
12734         IFCAP_LRO);
12735#else
12736        (IFCAP_VLAN_MTU       |
12737         IFCAP_VLAN_HWTAGGING |
12738         IFCAP_VLAN_HWTSO     |
12739         IFCAP_VLAN_HWFILTER  |
12740         IFCAP_VLAN_HWCSUM    |
12741         IFCAP_HWCSUM         |
12742         IFCAP_JUMBO_MTU      |
12743         IFCAP_LRO            |
12744         IFCAP_TSO4           |
12745         IFCAP_TSO6           |
12746         IFCAP_WOL_MAGIC);
12747#endif
12748    if_setcapabilitiesbit(ifp, capabilities, 0); /* XXX */
12749    if_setcapenable(ifp, if_getcapabilities(ifp));
12750    if_setbaudrate(ifp, IF_Gbps(10));
12751/* XXX */
12752    if_setsendqlen(ifp, sc->tx_ring_size);
12753    if_setsendqready(ifp);
12754/* XXX */
12755
12756    sc->ifp = ifp;
12757
12758    /* attach to the Ethernet interface list */
12759    ether_ifattach(ifp, sc->link_params.mac_addr);
12760
12761    return (0);
12762}
12763
12764static void
12765bxe_deallocate_bars(struct bxe_softc *sc)
12766{
12767    int i;
12768
12769    for (i = 0; i < MAX_BARS; i++) {
12770        if (sc->bar[i].resource != NULL) {
12771            bus_release_resource(sc->dev,
12772                                 SYS_RES_MEMORY,
12773                                 sc->bar[i].rid,
12774                                 sc->bar[i].resource);
12775            BLOGD(sc, DBG_LOAD, "Released PCI BAR%d [%02x] memory\n",
12776                  i, PCIR_BAR(i));
12777        }
12778    }
12779}
12780
12781static int
12782bxe_allocate_bars(struct bxe_softc *sc)
12783{
12784    u_int flags;
12785    int i;
12786
12787    memset(sc->bar, 0, sizeof(sc->bar));
12788
12789    for (i = 0; i < MAX_BARS; i++) {
12790
12791        /* memory resources reside at BARs 0, 2, 4 */
12792        /* Run `pciconf -lb` to see mappings */
12793        if ((i != 0) && (i != 2) && (i != 4)) {
12794            continue;
12795        }
12796
12797        sc->bar[i].rid = PCIR_BAR(i);
12798
12799        flags = RF_ACTIVE;
12800        if (i == 0) {
12801            flags |= RF_SHAREABLE;
12802        }
12803
12804        if ((sc->bar[i].resource =
12805             bus_alloc_resource_any(sc->dev,
12806                                    SYS_RES_MEMORY,
12807                                    &sc->bar[i].rid,
12808                                    flags)) == NULL) {
12809            return (0);
12810        }
12811
12812        sc->bar[i].tag    = rman_get_bustag(sc->bar[i].resource);
12813        sc->bar[i].handle = rman_get_bushandle(sc->bar[i].resource);
12814        sc->bar[i].kva    = (vm_offset_t)rman_get_virtual(sc->bar[i].resource);
12815
12816        BLOGI(sc, "PCI BAR%d [%02x] memory allocated: %p-%p (%jd) -> %p\n",
12817              i, PCIR_BAR(i),
12818              (void *)rman_get_start(sc->bar[i].resource),
12819              (void *)rman_get_end(sc->bar[i].resource),
12820              rman_get_size(sc->bar[i].resource),
12821              (void *)sc->bar[i].kva);
12822    }
12823
12824    return (0);
12825}
12826
12827static void
12828bxe_get_function_num(struct bxe_softc *sc)
12829{
12830    uint32_t val = 0;
12831
12832    /*
12833     * Read the ME register to get the function number. The ME register
12834     * holds the relative-function number and absolute-function number. The
12835     * absolute-function number appears only in E2 and above. Before that
12836     * these bits always contained zero, therefore we cannot blindly use them.
12837     */
12838
12839    val = REG_RD(sc, BAR_ME_REGISTER);
12840
12841    sc->pfunc_rel =
12842        (uint8_t)((val & ME_REG_PF_NUM) >> ME_REG_PF_NUM_SHIFT);
12843    sc->path_id =
12844        (uint8_t)((val & ME_REG_ABS_PF_NUM) >> ME_REG_ABS_PF_NUM_SHIFT) & 1;
12845
12846    if (CHIP_PORT_MODE(sc) == CHIP_4_PORT_MODE) {
12847        sc->pfunc_abs = ((sc->pfunc_rel << 1) | sc->path_id);
12848    } else {
12849        sc->pfunc_abs = (sc->pfunc_rel | sc->path_id);
12850    }
12851
12852    BLOGD(sc, DBG_LOAD,
12853          "Relative function %d, Absolute function %d, Path %d\n",
12854          sc->pfunc_rel, sc->pfunc_abs, sc->path_id);
12855}
12856
12857static uint32_t
12858bxe_get_shmem_mf_cfg_base(struct bxe_softc *sc)
12859{
12860    uint32_t shmem2_size;
12861    uint32_t offset;
12862    uint32_t mf_cfg_offset_value;
12863
12864    /* Non 57712 */
12865    offset = (SHMEM_RD(sc, func_mb) +
12866              (MAX_FUNC_NUM * sizeof(struct drv_func_mb)));
12867
12868    /* 57712 plus */
12869    if (sc->devinfo.shmem2_base != 0) {
12870        shmem2_size = SHMEM2_RD(sc, size);
12871        if (shmem2_size > offsetof(struct shmem2_region, mf_cfg_addr)) {
12872            mf_cfg_offset_value = SHMEM2_RD(sc, mf_cfg_addr);
12873            if (SHMEM_MF_CFG_ADDR_NONE != mf_cfg_offset_value) {
12874                offset = mf_cfg_offset_value;
12875            }
12876        }
12877    }
12878
12879    return (offset);
12880}
12881
12882static uint32_t
12883bxe_pcie_capability_read(struct bxe_softc *sc,
12884                         int    reg,
12885                         int    width)
12886{
12887    int pcie_reg;
12888
12889    /* ensure PCIe capability is enabled */
12890    if (pci_find_cap(sc->dev, PCIY_EXPRESS, &pcie_reg) == 0) {
12891        if (pcie_reg != 0) {
12892            BLOGD(sc, DBG_LOAD, "PCIe capability at 0x%04x\n", pcie_reg);
12893            return (pci_read_config(sc->dev, (pcie_reg + reg), width));
12894        }
12895    }
12896
12897    BLOGE(sc, "PCIe capability NOT FOUND!!!\n");
12898
12899    return (0);
12900}
12901
12902static uint8_t
12903bxe_is_pcie_pending(struct bxe_softc *sc)
12904{
12905    return (bxe_pcie_capability_read(sc, PCIR_EXPRESS_DEVICE_STA, 2) &
12906            PCIM_EXP_STA_TRANSACTION_PND);
12907}
12908
12909/*
12910 * Walk the PCI capabiites list for the device to find what features are
12911 * supported. These capabilites may be enabled/disabled by firmware so it's
12912 * best to walk the list rather than make assumptions.
12913 */
12914static void
12915bxe_probe_pci_caps(struct bxe_softc *sc)
12916{
12917    uint16_t link_status;
12918    int reg;
12919
12920    /* check if PCI Power Management is enabled */
12921    if (pci_find_cap(sc->dev, PCIY_PMG, &reg) == 0) {
12922        if (reg != 0) {
12923            BLOGD(sc, DBG_LOAD, "Found PM capability at 0x%04x\n", reg);
12924
12925            sc->devinfo.pcie_cap_flags |= BXE_PM_CAPABLE_FLAG;
12926            sc->devinfo.pcie_pm_cap_reg = (uint16_t)reg;
12927        }
12928    }
12929
12930    link_status = bxe_pcie_capability_read(sc, PCIR_EXPRESS_LINK_STA, 2);
12931
12932    /* handle PCIe 2.0 workarounds for 57710 */
12933    if (CHIP_IS_E1(sc)) {
12934        /* workaround for 57710 errata E4_57710_27462 */
12935        sc->devinfo.pcie_link_speed =
12936            (REG_RD(sc, 0x3d04) & (1 << 24)) ? 2 : 1;
12937
12938        /* workaround for 57710 errata E4_57710_27488 */
12939        sc->devinfo.pcie_link_width =
12940            ((link_status & PCIM_LINK_STA_WIDTH) >> 4);
12941        if (sc->devinfo.pcie_link_speed > 1) {
12942            sc->devinfo.pcie_link_width =
12943                ((link_status & PCIM_LINK_STA_WIDTH) >> 4) >> 1;
12944        }
12945    } else {
12946        sc->devinfo.pcie_link_speed =
12947            (link_status & PCIM_LINK_STA_SPEED);
12948        sc->devinfo.pcie_link_width =
12949            ((link_status & PCIM_LINK_STA_WIDTH) >> 4);
12950    }
12951
12952    BLOGD(sc, DBG_LOAD, "PCIe link speed=%d width=%d\n",
12953          sc->devinfo.pcie_link_speed, sc->devinfo.pcie_link_width);
12954
12955    sc->devinfo.pcie_cap_flags |= BXE_PCIE_CAPABLE_FLAG;
12956    sc->devinfo.pcie_pcie_cap_reg = (uint16_t)reg;
12957
12958    /* check if MSI capability is enabled */
12959    if (pci_find_cap(sc->dev, PCIY_MSI, &reg) == 0) {
12960        if (reg != 0) {
12961            BLOGD(sc, DBG_LOAD, "Found MSI capability at 0x%04x\n", reg);
12962
12963            sc->devinfo.pcie_cap_flags |= BXE_MSI_CAPABLE_FLAG;
12964            sc->devinfo.pcie_msi_cap_reg = (uint16_t)reg;
12965        }
12966    }
12967
12968    /* check if MSI-X capability is enabled */
12969    if (pci_find_cap(sc->dev, PCIY_MSIX, &reg) == 0) {
12970        if (reg != 0) {
12971            BLOGD(sc, DBG_LOAD, "Found MSI-X capability at 0x%04x\n", reg);
12972
12973            sc->devinfo.pcie_cap_flags |= BXE_MSIX_CAPABLE_FLAG;
12974            sc->devinfo.pcie_msix_cap_reg = (uint16_t)reg;
12975        }
12976    }
12977}
12978
12979static int
12980bxe_get_shmem_mf_cfg_info_sd(struct bxe_softc *sc)
12981{
12982    struct bxe_mf_info *mf_info = &sc->devinfo.mf_info;
12983    uint32_t val;
12984
12985    /* get the outer vlan if we're in switch-dependent mode */
12986
12987    val = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].e1hov_tag);
12988    mf_info->ext_id = (uint16_t)val;
12989
12990    mf_info->multi_vnics_mode = 1;
12991
12992    if (!VALID_OVLAN(mf_info->ext_id)) {
12993        BLOGE(sc, "Invalid VLAN (%d)\n", mf_info->ext_id);
12994        return (1);
12995    }
12996
12997    /* get the capabilities */
12998    if ((mf_info->mf_config[SC_VN(sc)] & FUNC_MF_CFG_PROTOCOL_MASK) ==
12999        FUNC_MF_CFG_PROTOCOL_ISCSI) {
13000        mf_info->mf_protos_supported |= MF_PROTO_SUPPORT_ISCSI;
13001    } else if ((mf_info->mf_config[SC_VN(sc)] & FUNC_MF_CFG_PROTOCOL_MASK) ==
13002               FUNC_MF_CFG_PROTOCOL_FCOE) {
13003        mf_info->mf_protos_supported |= MF_PROTO_SUPPORT_FCOE;
13004    } else {
13005        mf_info->mf_protos_supported |= MF_PROTO_SUPPORT_ETHERNET;
13006    }
13007
13008    mf_info->vnics_per_port =
13009        (CHIP_PORT_MODE(sc) == CHIP_4_PORT_MODE) ? 2 : 4;
13010
13011    return (0);
13012}
13013
13014static uint32_t
13015bxe_get_shmem_ext_proto_support_flags(struct bxe_softc *sc)
13016{
13017    uint32_t retval = 0;
13018    uint32_t val;
13019
13020    val = MFCFG_RD(sc, func_ext_config[SC_ABS_FUNC(sc)].func_cfg);
13021
13022    if (val & MACP_FUNC_CFG_FLAGS_ENABLED) {
13023        if (val & MACP_FUNC_CFG_FLAGS_ETHERNET) {
13024            retval |= MF_PROTO_SUPPORT_ETHERNET;
13025        }
13026        if (val & MACP_FUNC_CFG_FLAGS_ISCSI_OFFLOAD) {
13027            retval |= MF_PROTO_SUPPORT_ISCSI;
13028        }
13029        if (val & MACP_FUNC_CFG_FLAGS_FCOE_OFFLOAD) {
13030            retval |= MF_PROTO_SUPPORT_FCOE;
13031        }
13032    }
13033
13034    return (retval);
13035}
13036
13037static int
13038bxe_get_shmem_mf_cfg_info_si(struct bxe_softc *sc)
13039{
13040    struct bxe_mf_info *mf_info = &sc->devinfo.mf_info;
13041    uint32_t val;
13042
13043    /*
13044     * There is no outer vlan if we're in switch-independent mode.
13045     * If the mac is valid then assume multi-function.
13046     */
13047
13048    val = MFCFG_RD(sc, func_ext_config[SC_ABS_FUNC(sc)].func_cfg);
13049
13050    mf_info->multi_vnics_mode = ((val & MACP_FUNC_CFG_FLAGS_MASK) != 0);
13051
13052    mf_info->mf_protos_supported = bxe_get_shmem_ext_proto_support_flags(sc);
13053
13054    mf_info->vnics_per_port =
13055        (CHIP_PORT_MODE(sc) == CHIP_4_PORT_MODE) ? 2 : 4;
13056
13057    return (0);
13058}
13059
13060static int
13061bxe_get_shmem_mf_cfg_info_niv(struct bxe_softc *sc)
13062{
13063    struct bxe_mf_info *mf_info = &sc->devinfo.mf_info;
13064    uint32_t e1hov_tag;
13065    uint32_t func_config;
13066    uint32_t niv_config;
13067
13068    mf_info->multi_vnics_mode = 1;
13069
13070    e1hov_tag   = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].e1hov_tag);
13071    func_config = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].config);
13072    niv_config  = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].afex_config);
13073
13074    mf_info->ext_id =
13075        (uint16_t)((e1hov_tag & FUNC_MF_CFG_E1HOV_TAG_MASK) >>
13076                   FUNC_MF_CFG_E1HOV_TAG_SHIFT);
13077
13078    mf_info->default_vlan =
13079        (uint16_t)((e1hov_tag & FUNC_MF_CFG_AFEX_VLAN_MASK) >>
13080                   FUNC_MF_CFG_AFEX_VLAN_SHIFT);
13081
13082    mf_info->niv_allowed_priorities =
13083        (uint8_t)((niv_config & FUNC_MF_CFG_AFEX_COS_FILTER_MASK) >>
13084                  FUNC_MF_CFG_AFEX_COS_FILTER_SHIFT);
13085
13086    mf_info->niv_default_cos =
13087        (uint8_t)((func_config & FUNC_MF_CFG_TRANSMIT_PRIORITY_MASK) >>
13088                  FUNC_MF_CFG_TRANSMIT_PRIORITY_SHIFT);
13089
13090    mf_info->afex_vlan_mode =
13091        ((niv_config & FUNC_MF_CFG_AFEX_VLAN_MODE_MASK) >>
13092         FUNC_MF_CFG_AFEX_VLAN_MODE_SHIFT);
13093
13094    mf_info->niv_mba_enabled =
13095        ((niv_config & FUNC_MF_CFG_AFEX_MBA_ENABLED_MASK) >>
13096         FUNC_MF_CFG_AFEX_MBA_ENABLED_SHIFT);
13097
13098    mf_info->mf_protos_supported = bxe_get_shmem_ext_proto_support_flags(sc);
13099
13100    mf_info->vnics_per_port =
13101        (CHIP_PORT_MODE(sc) == CHIP_4_PORT_MODE) ? 2 : 4;
13102
13103    return (0);
13104}
13105
13106static int
13107bxe_check_valid_mf_cfg(struct bxe_softc *sc)
13108{
13109    struct bxe_mf_info *mf_info = &sc->devinfo.mf_info;
13110    uint32_t mf_cfg1;
13111    uint32_t mf_cfg2;
13112    uint32_t ovlan1;
13113    uint32_t ovlan2;
13114    uint8_t i, j;
13115
13116    BLOGD(sc, DBG_LOAD, "MF config parameters for function %d\n",
13117          SC_PORT(sc));
13118    BLOGD(sc, DBG_LOAD, "\tmf_config=0x%x\n",
13119          mf_info->mf_config[SC_VN(sc)]);
13120    BLOGD(sc, DBG_LOAD, "\tmulti_vnics_mode=%d\n",
13121          mf_info->multi_vnics_mode);
13122    BLOGD(sc, DBG_LOAD, "\tvnics_per_port=%d\n",
13123          mf_info->vnics_per_port);
13124    BLOGD(sc, DBG_LOAD, "\tovlan/vifid=%d\n",
13125          mf_info->ext_id);
13126    BLOGD(sc, DBG_LOAD, "\tmin_bw=%d/%d/%d/%d\n",
13127          mf_info->min_bw[0], mf_info->min_bw[1],
13128          mf_info->min_bw[2], mf_info->min_bw[3]);
13129    BLOGD(sc, DBG_LOAD, "\tmax_bw=%d/%d/%d/%d\n",
13130          mf_info->max_bw[0], mf_info->max_bw[1],
13131          mf_info->max_bw[2], mf_info->max_bw[3]);
13132    BLOGD(sc, DBG_LOAD, "\tmac_addr: %s\n",
13133          sc->mac_addr_str);
13134
13135    /* various MF mode sanity checks... */
13136
13137    if (mf_info->mf_config[SC_VN(sc)] & FUNC_MF_CFG_FUNC_HIDE) {
13138        BLOGE(sc, "Enumerated function %d is marked as hidden\n",
13139              SC_PORT(sc));
13140        return (1);
13141    }
13142
13143    if ((mf_info->vnics_per_port > 1) && !mf_info->multi_vnics_mode) {
13144        BLOGE(sc, "vnics_per_port=%d multi_vnics_mode=%d\n",
13145              mf_info->vnics_per_port, mf_info->multi_vnics_mode);
13146        return (1);
13147    }
13148
13149    if (mf_info->mf_mode == MULTI_FUNCTION_SD) {
13150        /* vnic id > 0 must have valid ovlan in switch-dependent mode */
13151        if ((SC_VN(sc) > 0) && !VALID_OVLAN(OVLAN(sc))) {
13152            BLOGE(sc, "mf_mode=SD vnic_id=%d ovlan=%d\n",
13153                  SC_VN(sc), OVLAN(sc));
13154            return (1);
13155        }
13156
13157        if (!VALID_OVLAN(OVLAN(sc)) && mf_info->multi_vnics_mode) {
13158            BLOGE(sc, "mf_mode=SD multi_vnics_mode=%d ovlan=%d\n",
13159                  mf_info->multi_vnics_mode, OVLAN(sc));
13160            return (1);
13161        }
13162
13163        /*
13164         * Verify all functions are either MF or SF mode. If MF, make sure
13165         * sure that all non-hidden functions have a valid ovlan. If SF,
13166         * make sure that all non-hidden functions have an invalid ovlan.
13167         */
13168        FOREACH_ABS_FUNC_IN_PORT(sc, i) {
13169            mf_cfg1 = MFCFG_RD(sc, func_mf_config[i].config);
13170            ovlan1  = MFCFG_RD(sc, func_mf_config[i].e1hov_tag);
13171            if (!(mf_cfg1 & FUNC_MF_CFG_FUNC_HIDE) &&
13172                (((mf_info->multi_vnics_mode) && !VALID_OVLAN(ovlan1)) ||
13173                 ((!mf_info->multi_vnics_mode) && VALID_OVLAN(ovlan1)))) {
13174                BLOGE(sc, "mf_mode=SD function %d MF config "
13175                          "mismatch, multi_vnics_mode=%d ovlan=%d\n",
13176                      i, mf_info->multi_vnics_mode, ovlan1);
13177                return (1);
13178            }
13179        }
13180
13181        /* Verify all funcs on the same port each have a different ovlan. */
13182        FOREACH_ABS_FUNC_IN_PORT(sc, i) {
13183            mf_cfg1 = MFCFG_RD(sc, func_mf_config[i].config);
13184            ovlan1  = MFCFG_RD(sc, func_mf_config[i].e1hov_tag);
13185            /* iterate from the next function on the port to the max func */
13186            for (j = i + 2; j < MAX_FUNC_NUM; j += 2) {
13187                mf_cfg2 = MFCFG_RD(sc, func_mf_config[j].config);
13188                ovlan2  = MFCFG_RD(sc, func_mf_config[j].e1hov_tag);
13189                if (!(mf_cfg1 & FUNC_MF_CFG_FUNC_HIDE) &&
13190                    VALID_OVLAN(ovlan1) &&
13191                    !(mf_cfg2 & FUNC_MF_CFG_FUNC_HIDE) &&
13192                    VALID_OVLAN(ovlan2) &&
13193                    (ovlan1 == ovlan2)) {
13194                    BLOGE(sc, "mf_mode=SD functions %d and %d "
13195                              "have the same ovlan (%d)\n",
13196                          i, j, ovlan1);
13197                    return (1);
13198                }
13199            }
13200        }
13201    } /* MULTI_FUNCTION_SD */
13202
13203    return (0);
13204}
13205
13206static int
13207bxe_get_mf_cfg_info(struct bxe_softc *sc)
13208{
13209    struct bxe_mf_info *mf_info = &sc->devinfo.mf_info;
13210    uint32_t val, mac_upper;
13211    uint8_t i, vnic;
13212
13213    /* initialize mf_info defaults */
13214    mf_info->vnics_per_port   = 1;
13215    mf_info->multi_vnics_mode = FALSE;
13216    mf_info->path_has_ovlan   = FALSE;
13217    mf_info->mf_mode          = SINGLE_FUNCTION;
13218
13219    if (!CHIP_IS_MF_CAP(sc)) {
13220        return (0);
13221    }
13222
13223    if (sc->devinfo.mf_cfg_base == SHMEM_MF_CFG_ADDR_NONE) {
13224        BLOGE(sc, "Invalid mf_cfg_base!\n");
13225        return (1);
13226    }
13227
13228    /* get the MF mode (switch dependent / independent / single-function) */
13229
13230    val = SHMEM_RD(sc, dev_info.shared_feature_config.config);
13231
13232    switch (val & SHARED_FEAT_CFG_FORCE_SF_MODE_MASK)
13233    {
13234    case SHARED_FEAT_CFG_FORCE_SF_MODE_SWITCH_INDEPT:
13235
13236        mac_upper = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].mac_upper);
13237
13238        /* check for legal upper mac bytes */
13239        if (mac_upper != FUNC_MF_CFG_UPPERMAC_DEFAULT) {
13240            mf_info->mf_mode = MULTI_FUNCTION_SI;
13241        } else {
13242            BLOGE(sc, "Invalid config for Switch Independent mode\n");
13243        }
13244
13245        break;
13246
13247    case SHARED_FEAT_CFG_FORCE_SF_MODE_MF_ALLOWED:
13248    case SHARED_FEAT_CFG_FORCE_SF_MODE_SPIO4:
13249
13250        /* get outer vlan configuration */
13251        val = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].e1hov_tag);
13252
13253        if ((val & FUNC_MF_CFG_E1HOV_TAG_MASK) !=
13254            FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
13255            mf_info->mf_mode = MULTI_FUNCTION_SD;
13256        } else {
13257            BLOGE(sc, "Invalid config for Switch Dependent mode\n");
13258        }
13259
13260        break;
13261
13262    case SHARED_FEAT_CFG_FORCE_SF_MODE_FORCED_SF:
13263
13264        /* not in MF mode, vnics_per_port=1 and multi_vnics_mode=FALSE */
13265        return (0);
13266
13267    case SHARED_FEAT_CFG_FORCE_SF_MODE_AFEX_MODE:
13268
13269        /*
13270         * Mark MF mode as NIV if MCP version includes NPAR-SD support
13271         * and the MAC address is valid.
13272         */
13273        mac_upper = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].mac_upper);
13274
13275        if ((SHMEM2_HAS(sc, afex_driver_support)) &&
13276            (mac_upper != FUNC_MF_CFG_UPPERMAC_DEFAULT)) {
13277            mf_info->mf_mode = MULTI_FUNCTION_AFEX;
13278        } else {
13279            BLOGE(sc, "Invalid config for AFEX mode\n");
13280        }
13281
13282        break;
13283
13284    default:
13285
13286        BLOGE(sc, "Unknown MF mode (0x%08x)\n",
13287              (val & SHARED_FEAT_CFG_FORCE_SF_MODE_MASK));
13288
13289        return (1);
13290    }
13291
13292    /* set path mf_mode (which could be different than function mf_mode) */
13293    if (mf_info->mf_mode == MULTI_FUNCTION_SD) {
13294        mf_info->path_has_ovlan = TRUE;
13295    } else if (mf_info->mf_mode == SINGLE_FUNCTION) {
13296        /*
13297         * Decide on path multi vnics mode. If we're not in MF mode and in
13298         * 4-port mode, this is good enough to check vnic-0 of the other port
13299         * on the same path
13300         */
13301        if (CHIP_PORT_MODE(sc) == CHIP_4_PORT_MODE) {
13302            uint8_t other_port = !(PORT_ID(sc) & 1);
13303            uint8_t abs_func_other_port = (SC_PATH(sc) + (2 * other_port));
13304
13305            val = MFCFG_RD(sc, func_mf_config[abs_func_other_port].e1hov_tag);
13306
13307            mf_info->path_has_ovlan = VALID_OVLAN((uint16_t)val) ? 1 : 0;
13308        }
13309    }
13310
13311    if (mf_info->mf_mode == SINGLE_FUNCTION) {
13312        /* invalid MF config */
13313        if (SC_VN(sc) >= 1) {
13314            BLOGE(sc, "VNIC ID >= 1 in SF mode\n");
13315            return (1);
13316        }
13317
13318        return (0);
13319    }
13320
13321    /* get the MF configuration */
13322    mf_info->mf_config[SC_VN(sc)] =
13323        MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].config);
13324
13325    switch(mf_info->mf_mode)
13326    {
13327    case MULTI_FUNCTION_SD:
13328
13329        bxe_get_shmem_mf_cfg_info_sd(sc);
13330        break;
13331
13332    case MULTI_FUNCTION_SI:
13333
13334        bxe_get_shmem_mf_cfg_info_si(sc);
13335        break;
13336
13337    case MULTI_FUNCTION_AFEX:
13338
13339        bxe_get_shmem_mf_cfg_info_niv(sc);
13340        break;
13341
13342    default:
13343
13344        BLOGE(sc, "Get MF config failed (mf_mode=0x%08x)\n",
13345              mf_info->mf_mode);
13346        return (1);
13347    }
13348
13349    /* get the congestion management parameters */
13350
13351    vnic = 0;
13352    FOREACH_ABS_FUNC_IN_PORT(sc, i) {
13353        /* get min/max bw */
13354        val = MFCFG_RD(sc, func_mf_config[i].config);
13355        mf_info->min_bw[vnic] =
13356            ((val & FUNC_MF_CFG_MIN_BW_MASK) >> FUNC_MF_CFG_MIN_BW_SHIFT);
13357        mf_info->max_bw[vnic] =
13358            ((val & FUNC_MF_CFG_MAX_BW_MASK) >> FUNC_MF_CFG_MAX_BW_SHIFT);
13359        vnic++;
13360    }
13361
13362    return (bxe_check_valid_mf_cfg(sc));
13363}
13364
13365static int
13366bxe_get_shmem_info(struct bxe_softc *sc)
13367{
13368    int port;
13369    uint32_t mac_hi, mac_lo, val;
13370
13371    port = SC_PORT(sc);
13372    mac_hi = mac_lo = 0;
13373
13374    sc->link_params.sc   = sc;
13375    sc->link_params.port = port;
13376
13377    /* get the hardware config info */
13378    sc->devinfo.hw_config =
13379        SHMEM_RD(sc, dev_info.shared_hw_config.config);
13380    sc->devinfo.hw_config2 =
13381        SHMEM_RD(sc, dev_info.shared_hw_config.config2);
13382
13383    sc->link_params.hw_led_mode =
13384        ((sc->devinfo.hw_config & SHARED_HW_CFG_LED_MODE_MASK) >>
13385         SHARED_HW_CFG_LED_MODE_SHIFT);
13386
13387    /* get the port feature config */
13388    sc->port.config =
13389        SHMEM_RD(sc, dev_info.port_feature_config[port].config);
13390
13391    /* get the link params */
13392    sc->link_params.speed_cap_mask[0] =
13393        SHMEM_RD(sc, dev_info.port_hw_config[port].speed_capability_mask);
13394    sc->link_params.speed_cap_mask[1] =
13395        SHMEM_RD(sc, dev_info.port_hw_config[port].speed_capability_mask2);
13396
13397    /* get the lane config */
13398    sc->link_params.lane_config =
13399        SHMEM_RD(sc, dev_info.port_hw_config[port].lane_config);
13400
13401    /* get the link config */
13402    val = SHMEM_RD(sc, dev_info.port_feature_config[port].link_config);
13403    sc->port.link_config[ELINK_INT_PHY] = val;
13404    sc->link_params.switch_cfg = (val & PORT_FEATURE_CONNECTED_SWITCH_MASK);
13405    sc->port.link_config[ELINK_EXT_PHY1] =
13406        SHMEM_RD(sc, dev_info.port_feature_config[port].link_config2);
13407
13408    /* get the override preemphasis flag and enable it or turn it off */
13409    val = SHMEM_RD(sc, dev_info.shared_feature_config.config);
13410    if (val & SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED) {
13411        sc->link_params.feature_config_flags |=
13412            ELINK_FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
13413    } else {
13414        sc->link_params.feature_config_flags &=
13415            ~ELINK_FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
13416    }
13417
13418    /* get the initial value of the link params */
13419    sc->link_params.multi_phy_config =
13420        SHMEM_RD(sc, dev_info.port_hw_config[port].multi_phy_config);
13421
13422    /* get external phy info */
13423    sc->port.ext_phy_config =
13424        SHMEM_RD(sc, dev_info.port_hw_config[port].external_phy_config);
13425
13426    /* get the multifunction configuration */
13427    bxe_get_mf_cfg_info(sc);
13428
13429    /* get the mac address */
13430    if (IS_MF(sc)) {
13431        mac_hi = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].mac_upper);
13432        mac_lo = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].mac_lower);
13433    } else {
13434        mac_hi = SHMEM_RD(sc, dev_info.port_hw_config[port].mac_upper);
13435        mac_lo = SHMEM_RD(sc, dev_info.port_hw_config[port].mac_lower);
13436    }
13437
13438    if ((mac_lo == 0) && (mac_hi == 0)) {
13439        *sc->mac_addr_str = 0;
13440        BLOGE(sc, "No Ethernet address programmed!\n");
13441    } else {
13442        sc->link_params.mac_addr[0] = (uint8_t)(mac_hi >> 8);
13443        sc->link_params.mac_addr[1] = (uint8_t)(mac_hi);
13444        sc->link_params.mac_addr[2] = (uint8_t)(mac_lo >> 24);
13445        sc->link_params.mac_addr[3] = (uint8_t)(mac_lo >> 16);
13446        sc->link_params.mac_addr[4] = (uint8_t)(mac_lo >> 8);
13447        sc->link_params.mac_addr[5] = (uint8_t)(mac_lo);
13448        snprintf(sc->mac_addr_str, sizeof(sc->mac_addr_str),
13449                 "%02x:%02x:%02x:%02x:%02x:%02x",
13450                 sc->link_params.mac_addr[0], sc->link_params.mac_addr[1],
13451                 sc->link_params.mac_addr[2], sc->link_params.mac_addr[3],
13452                 sc->link_params.mac_addr[4], sc->link_params.mac_addr[5]);
13453        BLOGD(sc, DBG_LOAD, "Ethernet address: %s\n", sc->mac_addr_str);
13454    }
13455
13456    return (0);
13457}
13458
13459static void
13460bxe_get_tunable_params(struct bxe_softc *sc)
13461{
13462    /* sanity checks */
13463
13464    if ((bxe_interrupt_mode != INTR_MODE_INTX) &&
13465        (bxe_interrupt_mode != INTR_MODE_MSI)  &&
13466        (bxe_interrupt_mode != INTR_MODE_MSIX)) {
13467        BLOGW(sc, "invalid interrupt_mode value (%d)\n", bxe_interrupt_mode);
13468        bxe_interrupt_mode = INTR_MODE_MSIX;
13469    }
13470
13471    if ((bxe_queue_count < 0) || (bxe_queue_count > MAX_RSS_CHAINS)) {
13472        BLOGW(sc, "invalid queue_count value (%d)\n", bxe_queue_count);
13473        bxe_queue_count = 0;
13474    }
13475
13476    if ((bxe_max_rx_bufs < 1) || (bxe_max_rx_bufs > RX_BD_USABLE)) {
13477        if (bxe_max_rx_bufs == 0) {
13478            bxe_max_rx_bufs = RX_BD_USABLE;
13479        } else {
13480            BLOGW(sc, "invalid max_rx_bufs (%d)\n", bxe_max_rx_bufs);
13481            bxe_max_rx_bufs = 2048;
13482        }
13483    }
13484
13485    if ((bxe_hc_rx_ticks < 1) || (bxe_hc_rx_ticks > 100)) {
13486        BLOGW(sc, "invalid hc_rx_ticks (%d)\n", bxe_hc_rx_ticks);
13487        bxe_hc_rx_ticks = 25;
13488    }
13489
13490    if ((bxe_hc_tx_ticks < 1) || (bxe_hc_tx_ticks > 100)) {
13491        BLOGW(sc, "invalid hc_tx_ticks (%d)\n", bxe_hc_tx_ticks);
13492        bxe_hc_tx_ticks = 50;
13493    }
13494
13495    if (bxe_max_aggregation_size == 0) {
13496        bxe_max_aggregation_size = TPA_AGG_SIZE;
13497    }
13498
13499    if (bxe_max_aggregation_size > 0xffff) {
13500        BLOGW(sc, "invalid max_aggregation_size (%d)\n",
13501              bxe_max_aggregation_size);
13502        bxe_max_aggregation_size = TPA_AGG_SIZE;
13503    }
13504
13505    if ((bxe_mrrs < -1) || (bxe_mrrs > 3)) {
13506        BLOGW(sc, "invalid mrrs (%d)\n", bxe_mrrs);
13507        bxe_mrrs = -1;
13508    }
13509
13510    if ((bxe_autogreeen < 0) || (bxe_autogreeen > 2)) {
13511        BLOGW(sc, "invalid autogreeen (%d)\n", bxe_autogreeen);
13512        bxe_autogreeen = 0;
13513    }
13514
13515    if ((bxe_udp_rss < 0) || (bxe_udp_rss > 1)) {
13516        BLOGW(sc, "invalid udp_rss (%d)\n", bxe_udp_rss);
13517        bxe_udp_rss = 0;
13518    }
13519
13520    /* pull in user settings */
13521
13522    sc->interrupt_mode       = bxe_interrupt_mode;
13523    sc->max_rx_bufs          = bxe_max_rx_bufs;
13524    sc->hc_rx_ticks          = bxe_hc_rx_ticks;
13525    sc->hc_tx_ticks          = bxe_hc_tx_ticks;
13526    sc->max_aggregation_size = bxe_max_aggregation_size;
13527    sc->mrrs                 = bxe_mrrs;
13528    sc->autogreeen           = bxe_autogreeen;
13529    sc->udp_rss              = bxe_udp_rss;
13530
13531    if (bxe_interrupt_mode == INTR_MODE_INTX) {
13532        sc->num_queues = 1;
13533    } else { /* INTR_MODE_MSI or INTR_MODE_MSIX */
13534        sc->num_queues =
13535            min((bxe_queue_count ? bxe_queue_count : mp_ncpus),
13536                MAX_RSS_CHAINS);
13537        if (sc->num_queues > mp_ncpus) {
13538            sc->num_queues = mp_ncpus;
13539        }
13540    }
13541
13542    BLOGD(sc, DBG_LOAD,
13543          "User Config: "
13544          "debug=0x%lx "
13545          "interrupt_mode=%d "
13546          "queue_count=%d "
13547          "hc_rx_ticks=%d "
13548          "hc_tx_ticks=%d "
13549          "rx_budget=%d "
13550          "max_aggregation_size=%d "
13551          "mrrs=%d "
13552          "autogreeen=%d "
13553          "udp_rss=%d\n",
13554          bxe_debug,
13555          sc->interrupt_mode,
13556          sc->num_queues,
13557          sc->hc_rx_ticks,
13558          sc->hc_tx_ticks,
13559          bxe_rx_budget,
13560          sc->max_aggregation_size,
13561          sc->mrrs,
13562          sc->autogreeen,
13563          sc->udp_rss);
13564}
13565
13566static int
13567bxe_media_detect(struct bxe_softc *sc)
13568{
13569    int port_type;
13570    uint32_t phy_idx = bxe_get_cur_phy_idx(sc);
13571
13572    switch (sc->link_params.phy[phy_idx].media_type) {
13573    case ELINK_ETH_PHY_SFPP_10G_FIBER:
13574    case ELINK_ETH_PHY_XFP_FIBER:
13575        BLOGI(sc, "Found 10Gb Fiber media.\n");
13576        sc->media = IFM_10G_SR;
13577        port_type = PORT_FIBRE;
13578        break;
13579    case ELINK_ETH_PHY_SFP_1G_FIBER:
13580        BLOGI(sc, "Found 1Gb Fiber media.\n");
13581        sc->media = IFM_1000_SX;
13582        port_type = PORT_FIBRE;
13583        break;
13584    case ELINK_ETH_PHY_KR:
13585    case ELINK_ETH_PHY_CX4:
13586        BLOGI(sc, "Found 10GBase-CX4 media.\n");
13587        sc->media = IFM_10G_CX4;
13588        port_type = PORT_FIBRE;
13589        break;
13590    case ELINK_ETH_PHY_DA_TWINAX:
13591        BLOGI(sc, "Found 10Gb Twinax media.\n");
13592        sc->media = IFM_10G_TWINAX;
13593        port_type = PORT_DA;
13594        break;
13595    case ELINK_ETH_PHY_BASE_T:
13596        if (sc->link_params.speed_cap_mask[0] &
13597            PORT_HW_CFG_SPEED_CAPABILITY_D0_10G) {
13598            BLOGI(sc, "Found 10GBase-T media.\n");
13599            sc->media = IFM_10G_T;
13600            port_type = PORT_TP;
13601        } else {
13602            BLOGI(sc, "Found 1000Base-T media.\n");
13603            sc->media = IFM_1000_T;
13604            port_type = PORT_TP;
13605        }
13606        break;
13607    case ELINK_ETH_PHY_NOT_PRESENT:
13608        BLOGI(sc, "Media not present.\n");
13609        sc->media = 0;
13610        port_type = PORT_OTHER;
13611        break;
13612    case ELINK_ETH_PHY_UNSPECIFIED:
13613    default:
13614        BLOGI(sc, "Unknown media!\n");
13615        sc->media = 0;
13616        port_type = PORT_OTHER;
13617        break;
13618    }
13619    return port_type;
13620}
13621
13622#define GET_FIELD(value, fname)                     \
13623    (((value) & (fname##_MASK)) >> (fname##_SHIFT))
13624#define IGU_FID(val) GET_FIELD((val), IGU_REG_MAPPING_MEMORY_FID)
13625#define IGU_VEC(val) GET_FIELD((val), IGU_REG_MAPPING_MEMORY_VECTOR)
13626
13627static int
13628bxe_get_igu_cam_info(struct bxe_softc *sc)
13629{
13630    int pfid = SC_FUNC(sc);
13631    int igu_sb_id;
13632    uint32_t val;
13633    uint8_t fid, igu_sb_cnt = 0;
13634
13635    sc->igu_base_sb = 0xff;
13636
13637    if (CHIP_INT_MODE_IS_BC(sc)) {
13638        int vn = SC_VN(sc);
13639        igu_sb_cnt = sc->igu_sb_cnt;
13640        sc->igu_base_sb = ((CHIP_IS_MODE_4_PORT(sc) ? pfid : vn) *
13641                           FP_SB_MAX_E1x);
13642        sc->igu_dsb_id = (E1HVN_MAX * FP_SB_MAX_E1x +
13643                          (CHIP_IS_MODE_4_PORT(sc) ? pfid : vn));
13644        return (0);
13645    }
13646
13647    /* IGU in normal mode - read CAM */
13648    for (igu_sb_id = 0;
13649         igu_sb_id < IGU_REG_MAPPING_MEMORY_SIZE;
13650         igu_sb_id++) {
13651        val = REG_RD(sc, IGU_REG_MAPPING_MEMORY + igu_sb_id * 4);
13652        if (!(val & IGU_REG_MAPPING_MEMORY_VALID)) {
13653            continue;
13654        }
13655        fid = IGU_FID(val);
13656        if ((fid & IGU_FID_ENCODE_IS_PF)) {
13657            if ((fid & IGU_FID_PF_NUM_MASK) != pfid) {
13658                continue;
13659            }
13660            if (IGU_VEC(val) == 0) {
13661                /* default status block */
13662                sc->igu_dsb_id = igu_sb_id;
13663            } else {
13664                if (sc->igu_base_sb == 0xff) {
13665                    sc->igu_base_sb = igu_sb_id;
13666                }
13667                igu_sb_cnt++;
13668            }
13669        }
13670    }
13671
13672    /*
13673     * Due to new PF resource allocation by MFW T7.4 and above, it's optional
13674     * that number of CAM entries will not be equal to the value advertised in
13675     * PCI. Driver should use the minimal value of both as the actual status
13676     * block count
13677     */
13678    sc->igu_sb_cnt = min(sc->igu_sb_cnt, igu_sb_cnt);
13679
13680    if (igu_sb_cnt == 0) {
13681        BLOGE(sc, "CAM configuration error\n");
13682        return (-1);
13683    }
13684
13685    return (0);
13686}
13687
13688/*
13689 * Gather various information from the device config space, the device itself,
13690 * shmem, and the user input.
13691 */
13692static int
13693bxe_get_device_info(struct bxe_softc *sc)
13694{
13695    uint32_t val;
13696    int rc;
13697
13698    /* Get the data for the device */
13699    sc->devinfo.vendor_id    = pci_get_vendor(sc->dev);
13700    sc->devinfo.device_id    = pci_get_device(sc->dev);
13701    sc->devinfo.subvendor_id = pci_get_subvendor(sc->dev);
13702    sc->devinfo.subdevice_id = pci_get_subdevice(sc->dev);
13703
13704    /* get the chip revision (chip metal comes from pci config space) */
13705    sc->devinfo.chip_id     =
13706    sc->link_params.chip_id =
13707        (((REG_RD(sc, MISC_REG_CHIP_NUM)                   & 0xffff) << 16) |
13708         ((REG_RD(sc, MISC_REG_CHIP_REV)                   & 0xf)    << 12) |
13709         (((REG_RD(sc, PCICFG_OFFSET + PCI_ID_VAL3) >> 24) & 0xf)    << 4)  |
13710         ((REG_RD(sc, MISC_REG_BOND_ID)                    & 0xf)    << 0));
13711
13712    /* force 57811 according to MISC register */
13713    if (REG_RD(sc, MISC_REG_CHIP_TYPE) & MISC_REG_CHIP_TYPE_57811_MASK) {
13714        if (CHIP_IS_57810(sc)) {
13715            sc->devinfo.chip_id = ((CHIP_NUM_57811 << 16) |
13716                                   (sc->devinfo.chip_id & 0x0000ffff));
13717        } else if (CHIP_IS_57810_MF(sc)) {
13718            sc->devinfo.chip_id = ((CHIP_NUM_57811_MF << 16) |
13719                                   (sc->devinfo.chip_id & 0x0000ffff));
13720        }
13721        sc->devinfo.chip_id |= 0x1;
13722    }
13723
13724    BLOGD(sc, DBG_LOAD,
13725          "chip_id=0x%08x (num=0x%04x rev=0x%01x metal=0x%02x bond=0x%01x)\n",
13726          sc->devinfo.chip_id,
13727          ((sc->devinfo.chip_id >> 16) & 0xffff),
13728          ((sc->devinfo.chip_id >> 12) & 0xf),
13729          ((sc->devinfo.chip_id >>  4) & 0xff),
13730          ((sc->devinfo.chip_id >>  0) & 0xf));
13731
13732    val = (REG_RD(sc, 0x2874) & 0x55);
13733    if ((sc->devinfo.chip_id & 0x1) ||
13734        (CHIP_IS_E1(sc) && val) ||
13735        (CHIP_IS_E1H(sc) && (val == 0x55))) {
13736        sc->flags |= BXE_ONE_PORT_FLAG;
13737        BLOGD(sc, DBG_LOAD, "single port device\n");
13738    }
13739
13740    /* set the doorbell size */
13741    sc->doorbell_size = (1 << BXE_DB_SHIFT);
13742
13743    /* determine whether the device is in 2 port or 4 port mode */
13744    sc->devinfo.chip_port_mode = CHIP_PORT_MODE_NONE; /* E1 & E1h*/
13745    if (CHIP_IS_E2E3(sc)) {
13746        /*
13747         * Read port4mode_en_ovwr[0]:
13748         *   If 1, four port mode is in port4mode_en_ovwr[1].
13749         *   If 0, four port mode is in port4mode_en[0].
13750         */
13751        val = REG_RD(sc, MISC_REG_PORT4MODE_EN_OVWR);
13752        if (val & 1) {
13753            val = ((val >> 1) & 1);
13754        } else {
13755            val = REG_RD(sc, MISC_REG_PORT4MODE_EN);
13756        }
13757
13758        sc->devinfo.chip_port_mode =
13759            (val) ? CHIP_4_PORT_MODE : CHIP_2_PORT_MODE;
13760
13761        BLOGD(sc, DBG_LOAD, "Port mode = %s\n", (val) ? "4" : "2");
13762    }
13763
13764    /* get the function and path info for the device */
13765    bxe_get_function_num(sc);
13766
13767    /* get the shared memory base address */
13768    sc->devinfo.shmem_base     =
13769    sc->link_params.shmem_base =
13770        REG_RD(sc, MISC_REG_SHARED_MEM_ADDR);
13771    sc->devinfo.shmem2_base =
13772        REG_RD(sc, (SC_PATH(sc) ? MISC_REG_GENERIC_CR_1 :
13773                                  MISC_REG_GENERIC_CR_0));
13774
13775    BLOGD(sc, DBG_LOAD, "shmem_base=0x%08x, shmem2_base=0x%08x\n",
13776          sc->devinfo.shmem_base, sc->devinfo.shmem2_base);
13777
13778    if (!sc->devinfo.shmem_base) {
13779        /* this should ONLY prevent upcoming shmem reads */
13780        BLOGI(sc, "MCP not active\n");
13781        sc->flags |= BXE_NO_MCP_FLAG;
13782        return (0);
13783    }
13784
13785    /* make sure the shared memory contents are valid */
13786    val = SHMEM_RD(sc, validity_map[SC_PORT(sc)]);
13787    if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) !=
13788        (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) {
13789        BLOGE(sc, "Invalid SHMEM validity signature: 0x%08x\n", val);
13790        return (0);
13791    }
13792    BLOGD(sc, DBG_LOAD, "Valid SHMEM validity signature: 0x%08x\n", val);
13793
13794    /* get the bootcode version */
13795    sc->devinfo.bc_ver = SHMEM_RD(sc, dev_info.bc_rev);
13796    snprintf(sc->devinfo.bc_ver_str,
13797             sizeof(sc->devinfo.bc_ver_str),
13798             "%d.%d.%d",
13799             ((sc->devinfo.bc_ver >> 24) & 0xff),
13800             ((sc->devinfo.bc_ver >> 16) & 0xff),
13801             ((sc->devinfo.bc_ver >>  8) & 0xff));
13802    BLOGD(sc, DBG_LOAD, "Bootcode version: %s\n", sc->devinfo.bc_ver_str);
13803
13804    /* get the bootcode shmem address */
13805    sc->devinfo.mf_cfg_base = bxe_get_shmem_mf_cfg_base(sc);
13806    BLOGD(sc, DBG_LOAD, "mf_cfg_base=0x08%x \n", sc->devinfo.mf_cfg_base);
13807
13808    /* clean indirect addresses as they're not used */
13809    pci_write_config(sc->dev, PCICFG_GRC_ADDRESS, 0, 4);
13810    if (IS_PF(sc)) {
13811        REG_WR(sc, PXP2_REG_PGL_ADDR_88_F0, 0);
13812        REG_WR(sc, PXP2_REG_PGL_ADDR_8C_F0, 0);
13813        REG_WR(sc, PXP2_REG_PGL_ADDR_90_F0, 0);
13814        REG_WR(sc, PXP2_REG_PGL_ADDR_94_F0, 0);
13815        if (CHIP_IS_E1x(sc)) {
13816            REG_WR(sc, PXP2_REG_PGL_ADDR_88_F1, 0);
13817            REG_WR(sc, PXP2_REG_PGL_ADDR_8C_F1, 0);
13818            REG_WR(sc, PXP2_REG_PGL_ADDR_90_F1, 0);
13819            REG_WR(sc, PXP2_REG_PGL_ADDR_94_F1, 0);
13820        }
13821
13822        /*
13823         * Enable internal target-read (in case we are probed after PF
13824         * FLR). Must be done prior to any BAR read access. Only for
13825         * 57712 and up
13826         */
13827        if (!CHIP_IS_E1x(sc)) {
13828            REG_WR(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ, 1);
13829        }
13830    }
13831
13832    /* get the nvram size */
13833    val = REG_RD(sc, MCP_REG_MCPR_NVM_CFG4);
13834    sc->devinfo.flash_size =
13835        (NVRAM_1MB_SIZE << (val & MCPR_NVM_CFG4_FLASH_SIZE));
13836    BLOGD(sc, DBG_LOAD, "nvram flash size: %d\n", sc->devinfo.flash_size);
13837
13838    /* get PCI capabilites */
13839    bxe_probe_pci_caps(sc);
13840
13841    bxe_set_power_state(sc, PCI_PM_D0);
13842
13843    /* get various configuration parameters from shmem */
13844    bxe_get_shmem_info(sc);
13845
13846    if (sc->devinfo.pcie_msix_cap_reg != 0) {
13847        val = pci_read_config(sc->dev,
13848                              (sc->devinfo.pcie_msix_cap_reg +
13849                               PCIR_MSIX_CTRL),
13850                              2);
13851        sc->igu_sb_cnt = (val & PCIM_MSIXCTRL_TABLE_SIZE);
13852    } else {
13853        sc->igu_sb_cnt = 1;
13854    }
13855
13856    sc->igu_base_addr = BAR_IGU_INTMEM;
13857
13858    /* initialize IGU parameters */
13859    if (CHIP_IS_E1x(sc)) {
13860        sc->devinfo.int_block = INT_BLOCK_HC;
13861        sc->igu_dsb_id = DEF_SB_IGU_ID;
13862        sc->igu_base_sb = 0;
13863    } else {
13864        sc->devinfo.int_block = INT_BLOCK_IGU;
13865
13866        /* do not allow device reset during IGU info preocessing */
13867        bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RESET);
13868
13869        val = REG_RD(sc, IGU_REG_BLOCK_CONFIGURATION);
13870
13871        if (val & IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN) {
13872            int tout = 5000;
13873
13874            BLOGD(sc, DBG_LOAD, "FORCING IGU Normal Mode\n");
13875
13876            val &= ~(IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN);
13877            REG_WR(sc, IGU_REG_BLOCK_CONFIGURATION, val);
13878            REG_WR(sc, IGU_REG_RESET_MEMORIES, 0x7f);
13879
13880            while (tout && REG_RD(sc, IGU_REG_RESET_MEMORIES)) {
13881                tout--;
13882                DELAY(1000);
13883            }
13884
13885            if (REG_RD(sc, IGU_REG_RESET_MEMORIES)) {
13886                BLOGD(sc, DBG_LOAD, "FORCING IGU Normal Mode failed!!!\n");
13887                bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RESET);
13888                return (-1);
13889            }
13890        }
13891
13892        if (val & IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN) {
13893            BLOGD(sc, DBG_LOAD, "IGU Backward Compatible Mode\n");
13894            sc->devinfo.int_block |= INT_BLOCK_MODE_BW_COMP;
13895        } else {
13896            BLOGD(sc, DBG_LOAD, "IGU Normal Mode\n");
13897        }
13898
13899        rc = bxe_get_igu_cam_info(sc);
13900
13901        bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RESET);
13902
13903        if (rc) {
13904            return (rc);
13905        }
13906    }
13907
13908    /*
13909     * Get base FW non-default (fast path) status block ID. This value is
13910     * used to initialize the fw_sb_id saved on the fp/queue structure to
13911     * determine the id used by the FW.
13912     */
13913    if (CHIP_IS_E1x(sc)) {
13914        sc->base_fw_ndsb = ((SC_PORT(sc) * FP_SB_MAX_E1x) + SC_L_ID(sc));
13915    } else {
13916        /*
13917         * 57712+ - We currently use one FW SB per IGU SB (Rx and Tx of
13918         * the same queue are indicated on the same IGU SB). So we prefer
13919         * FW and IGU SBs to be the same value.
13920         */
13921        sc->base_fw_ndsb = sc->igu_base_sb;
13922    }
13923
13924    BLOGD(sc, DBG_LOAD,
13925          "igu_dsb_id=%d igu_base_sb=%d igu_sb_cnt=%d base_fw_ndsb=%d\n",
13926          sc->igu_dsb_id, sc->igu_base_sb,
13927          sc->igu_sb_cnt, sc->base_fw_ndsb);
13928
13929    elink_phy_probe(&sc->link_params);
13930
13931    return (0);
13932}
13933
13934static void
13935bxe_link_settings_supported(struct bxe_softc *sc,
13936                            uint32_t         switch_cfg)
13937{
13938    uint32_t cfg_size = 0;
13939    uint32_t idx;
13940    uint8_t port = SC_PORT(sc);
13941
13942    /* aggregation of supported attributes of all external phys */
13943    sc->port.supported[0] = 0;
13944    sc->port.supported[1] = 0;
13945
13946    switch (sc->link_params.num_phys) {
13947    case 1:
13948        sc->port.supported[0] = sc->link_params.phy[ELINK_INT_PHY].supported;
13949        cfg_size = 1;
13950        break;
13951    case 2:
13952        sc->port.supported[0] = sc->link_params.phy[ELINK_EXT_PHY1].supported;
13953        cfg_size = 1;
13954        break;
13955    case 3:
13956        if (sc->link_params.multi_phy_config &
13957            PORT_HW_CFG_PHY_SWAPPED_ENABLED) {
13958            sc->port.supported[1] =
13959                sc->link_params.phy[ELINK_EXT_PHY1].supported;
13960            sc->port.supported[0] =
13961                sc->link_params.phy[ELINK_EXT_PHY2].supported;
13962        } else {
13963            sc->port.supported[0] =
13964                sc->link_params.phy[ELINK_EXT_PHY1].supported;
13965            sc->port.supported[1] =
13966                sc->link_params.phy[ELINK_EXT_PHY2].supported;
13967        }
13968        cfg_size = 2;
13969        break;
13970    }
13971
13972    if (!(sc->port.supported[0] || sc->port.supported[1])) {
13973        BLOGE(sc, "Invalid phy config in NVRAM (PHY1=0x%08x PHY2=0x%08x)\n",
13974              SHMEM_RD(sc,
13975                       dev_info.port_hw_config[port].external_phy_config),
13976              SHMEM_RD(sc,
13977                       dev_info.port_hw_config[port].external_phy_config2));
13978        return;
13979    }
13980
13981    if (CHIP_IS_E3(sc))
13982        sc->port.phy_addr = REG_RD(sc, MISC_REG_WC0_CTRL_PHY_ADDR);
13983    else {
13984        switch (switch_cfg) {
13985        case ELINK_SWITCH_CFG_1G:
13986            sc->port.phy_addr =
13987                REG_RD(sc, NIG_REG_SERDES0_CTRL_PHY_ADDR + port*0x10);
13988            break;
13989        case ELINK_SWITCH_CFG_10G:
13990            sc->port.phy_addr =
13991                REG_RD(sc, NIG_REG_XGXS0_CTRL_PHY_ADDR + port*0x18);
13992            break;
13993        default:
13994            BLOGE(sc, "Invalid switch config in link_config=0x%08x\n",
13995                  sc->port.link_config[0]);
13996            return;
13997        }
13998    }
13999
14000    BLOGD(sc, DBG_LOAD, "PHY addr 0x%08x\n", sc->port.phy_addr);
14001
14002    /* mask what we support according to speed_cap_mask per configuration */
14003    for (idx = 0; idx < cfg_size; idx++) {
14004        if (!(sc->link_params.speed_cap_mask[idx] &
14005              PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF)) {
14006            sc->port.supported[idx] &= ~ELINK_SUPPORTED_10baseT_Half;
14007        }
14008
14009        if (!(sc->link_params.speed_cap_mask[idx] &
14010              PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL)) {
14011            sc->port.supported[idx] &= ~ELINK_SUPPORTED_10baseT_Full;
14012        }
14013
14014        if (!(sc->link_params.speed_cap_mask[idx] &
14015              PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF)) {
14016            sc->port.supported[idx] &= ~ELINK_SUPPORTED_100baseT_Half;
14017        }
14018
14019        if (!(sc->link_params.speed_cap_mask[idx] &
14020              PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL)) {
14021            sc->port.supported[idx] &= ~ELINK_SUPPORTED_100baseT_Full;
14022        }
14023
14024        if (!(sc->link_params.speed_cap_mask[idx] &
14025              PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)) {
14026            sc->port.supported[idx] &= ~ELINK_SUPPORTED_1000baseT_Full;
14027        }
14028
14029        if (!(sc->link_params.speed_cap_mask[idx] &
14030              PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G)) {
14031            sc->port.supported[idx] &= ~ELINK_SUPPORTED_2500baseX_Full;
14032        }
14033
14034        if (!(sc->link_params.speed_cap_mask[idx] &
14035              PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)) {
14036            sc->port.supported[idx] &= ~ELINK_SUPPORTED_10000baseT_Full;
14037        }
14038
14039        if (!(sc->link_params.speed_cap_mask[idx] &
14040              PORT_HW_CFG_SPEED_CAPABILITY_D0_20G)) {
14041            sc->port.supported[idx] &= ~ELINK_SUPPORTED_20000baseKR2_Full;
14042        }
14043    }
14044
14045    BLOGD(sc, DBG_LOAD, "PHY supported 0=0x%08x 1=0x%08x\n",
14046          sc->port.supported[0], sc->port.supported[1]);
14047	ELINK_DEBUG_P2(sc, "PHY supported 0=0x%08x 1=0x%08x\n",
14048					sc->port.supported[0], sc->port.supported[1]);
14049}
14050
14051static void
14052bxe_link_settings_requested(struct bxe_softc *sc)
14053{
14054    uint32_t link_config;
14055    uint32_t idx;
14056    uint32_t cfg_size = 0;
14057
14058    sc->port.advertising[0] = 0;
14059    sc->port.advertising[1] = 0;
14060
14061    switch (sc->link_params.num_phys) {
14062    case 1:
14063    case 2:
14064        cfg_size = 1;
14065        break;
14066    case 3:
14067        cfg_size = 2;
14068        break;
14069    }
14070
14071    for (idx = 0; idx < cfg_size; idx++) {
14072        sc->link_params.req_duplex[idx] = DUPLEX_FULL;
14073        link_config = sc->port.link_config[idx];
14074
14075        switch (link_config & PORT_FEATURE_LINK_SPEED_MASK) {
14076        case PORT_FEATURE_LINK_SPEED_AUTO:
14077            if (sc->port.supported[idx] & ELINK_SUPPORTED_Autoneg) {
14078                sc->link_params.req_line_speed[idx] = ELINK_SPEED_AUTO_NEG;
14079                sc->port.advertising[idx] |= sc->port.supported[idx];
14080                if (sc->link_params.phy[ELINK_EXT_PHY1].type ==
14081                    PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833)
14082                    sc->port.advertising[idx] |=
14083                        (ELINK_SUPPORTED_100baseT_Half |
14084                         ELINK_SUPPORTED_100baseT_Full);
14085            } else {
14086                /* force 10G, no AN */
14087                sc->link_params.req_line_speed[idx] = ELINK_SPEED_10000;
14088                sc->port.advertising[idx] |=
14089                    (ADVERTISED_10000baseT_Full | ADVERTISED_FIBRE);
14090                continue;
14091            }
14092            break;
14093
14094        case PORT_FEATURE_LINK_SPEED_10M_FULL:
14095            if (sc->port.supported[idx] & ELINK_SUPPORTED_10baseT_Full) {
14096                sc->link_params.req_line_speed[idx] = ELINK_SPEED_10;
14097                sc->port.advertising[idx] |= (ADVERTISED_10baseT_Full |
14098                                              ADVERTISED_TP);
14099            } else {
14100                BLOGE(sc, "Invalid NVRAM config link_config=0x%08x "
14101                          "speed_cap_mask=0x%08x\n",
14102                      link_config, sc->link_params.speed_cap_mask[idx]);
14103                return;
14104            }
14105            break;
14106
14107        case PORT_FEATURE_LINK_SPEED_10M_HALF:
14108            if (sc->port.supported[idx] & ELINK_SUPPORTED_10baseT_Half) {
14109                sc->link_params.req_line_speed[idx] = ELINK_SPEED_10;
14110                sc->link_params.req_duplex[idx] = DUPLEX_HALF;
14111                sc->port.advertising[idx] |= (ADVERTISED_10baseT_Half |
14112                                              ADVERTISED_TP);
14113				ELINK_DEBUG_P1(sc, "driver requesting DUPLEX_HALF req_duplex = %x!\n",
14114								sc->link_params.req_duplex[idx]);
14115            } else {
14116                BLOGE(sc, "Invalid NVRAM config link_config=0x%08x "
14117                          "speed_cap_mask=0x%08x\n",
14118                      link_config, sc->link_params.speed_cap_mask[idx]);
14119                return;
14120            }
14121            break;
14122
14123        case PORT_FEATURE_LINK_SPEED_100M_FULL:
14124            if (sc->port.supported[idx] & ELINK_SUPPORTED_100baseT_Full) {
14125                sc->link_params.req_line_speed[idx] = ELINK_SPEED_100;
14126                sc->port.advertising[idx] |= (ADVERTISED_100baseT_Full |
14127                                              ADVERTISED_TP);
14128            } else {
14129                BLOGE(sc, "Invalid NVRAM config link_config=0x%08x "
14130                          "speed_cap_mask=0x%08x\n",
14131                      link_config, sc->link_params.speed_cap_mask[idx]);
14132                return;
14133            }
14134            break;
14135
14136        case PORT_FEATURE_LINK_SPEED_100M_HALF:
14137            if (sc->port.supported[idx] & ELINK_SUPPORTED_100baseT_Half) {
14138                sc->link_params.req_line_speed[idx] = ELINK_SPEED_100;
14139                sc->link_params.req_duplex[idx] = DUPLEX_HALF;
14140                sc->port.advertising[idx] |= (ADVERTISED_100baseT_Half |
14141                                              ADVERTISED_TP);
14142            } else {
14143                BLOGE(sc, "Invalid NVRAM config link_config=0x%08x "
14144                          "speed_cap_mask=0x%08x\n",
14145                      link_config, sc->link_params.speed_cap_mask[idx]);
14146                return;
14147            }
14148            break;
14149
14150        case PORT_FEATURE_LINK_SPEED_1G:
14151            if (sc->port.supported[idx] & ELINK_SUPPORTED_1000baseT_Full) {
14152                sc->link_params.req_line_speed[idx] = ELINK_SPEED_1000;
14153                sc->port.advertising[idx] |= (ADVERTISED_1000baseT_Full |
14154                                              ADVERTISED_TP);
14155            } else {
14156                BLOGE(sc, "Invalid NVRAM config link_config=0x%08x "
14157                          "speed_cap_mask=0x%08x\n",
14158                      link_config, sc->link_params.speed_cap_mask[idx]);
14159                return;
14160            }
14161            break;
14162
14163        case PORT_FEATURE_LINK_SPEED_2_5G:
14164            if (sc->port.supported[idx] & ELINK_SUPPORTED_2500baseX_Full) {
14165                sc->link_params.req_line_speed[idx] = ELINK_SPEED_2500;
14166                sc->port.advertising[idx] |= (ADVERTISED_2500baseX_Full |
14167                                              ADVERTISED_TP);
14168            } else {
14169                BLOGE(sc, "Invalid NVRAM config link_config=0x%08x "
14170                          "speed_cap_mask=0x%08x\n",
14171                      link_config, sc->link_params.speed_cap_mask[idx]);
14172                return;
14173            }
14174            break;
14175
14176        case PORT_FEATURE_LINK_SPEED_10G_CX4:
14177            if (sc->port.supported[idx] & ELINK_SUPPORTED_10000baseT_Full) {
14178                sc->link_params.req_line_speed[idx] = ELINK_SPEED_10000;
14179                sc->port.advertising[idx] |= (ADVERTISED_10000baseT_Full |
14180                                              ADVERTISED_FIBRE);
14181            } else {
14182                BLOGE(sc, "Invalid NVRAM config link_config=0x%08x "
14183                          "speed_cap_mask=0x%08x\n",
14184                      link_config, sc->link_params.speed_cap_mask[idx]);
14185                return;
14186            }
14187            break;
14188
14189        case PORT_FEATURE_LINK_SPEED_20G:
14190            sc->link_params.req_line_speed[idx] = ELINK_SPEED_20000;
14191            break;
14192
14193        default:
14194            BLOGE(sc, "Invalid NVRAM config link_config=0x%08x "
14195                      "speed_cap_mask=0x%08x\n",
14196                  link_config, sc->link_params.speed_cap_mask[idx]);
14197            sc->link_params.req_line_speed[idx] = ELINK_SPEED_AUTO_NEG;
14198            sc->port.advertising[idx] = sc->port.supported[idx];
14199            break;
14200        }
14201
14202        sc->link_params.req_flow_ctrl[idx] =
14203            (link_config & PORT_FEATURE_FLOW_CONTROL_MASK);
14204
14205        if (sc->link_params.req_flow_ctrl[idx] == ELINK_FLOW_CTRL_AUTO) {
14206            if (!(sc->port.supported[idx] & ELINK_SUPPORTED_Autoneg)) {
14207                sc->link_params.req_flow_ctrl[idx] = ELINK_FLOW_CTRL_NONE;
14208            } else {
14209                bxe_set_requested_fc(sc);
14210            }
14211        }
14212
14213        BLOGD(sc, DBG_LOAD, "req_line_speed=%d req_duplex=%d "
14214                            "req_flow_ctrl=0x%x advertising=0x%x\n",
14215              sc->link_params.req_line_speed[idx],
14216              sc->link_params.req_duplex[idx],
14217              sc->link_params.req_flow_ctrl[idx],
14218              sc->port.advertising[idx]);
14219		ELINK_DEBUG_P3(sc, "req_line_speed=%d req_duplex=%d "
14220						"advertising=0x%x\n",
14221						sc->link_params.req_line_speed[idx],
14222						sc->link_params.req_duplex[idx],
14223						sc->port.advertising[idx]);
14224    }
14225}
14226
14227static void
14228bxe_get_phy_info(struct bxe_softc *sc)
14229{
14230    uint8_t port = SC_PORT(sc);
14231    uint32_t config = sc->port.config;
14232    uint32_t eee_mode;
14233
14234    /* shmem data already read in bxe_get_shmem_info() */
14235
14236    ELINK_DEBUG_P3(sc, "lane_config=0x%08x speed_cap_mask0=0x%08x "
14237                        "link_config0=0x%08x\n",
14238               sc->link_params.lane_config,
14239               sc->link_params.speed_cap_mask[0],
14240               sc->port.link_config[0]);
14241
14242
14243    bxe_link_settings_supported(sc, sc->link_params.switch_cfg);
14244    bxe_link_settings_requested(sc);
14245
14246    if (sc->autogreeen == AUTO_GREEN_FORCE_ON) {
14247        sc->link_params.feature_config_flags |=
14248            ELINK_FEATURE_CONFIG_AUTOGREEEN_ENABLED;
14249    } else if (sc->autogreeen == AUTO_GREEN_FORCE_OFF) {
14250        sc->link_params.feature_config_flags &=
14251            ~ELINK_FEATURE_CONFIG_AUTOGREEEN_ENABLED;
14252    } else if (config & PORT_FEAT_CFG_AUTOGREEEN_ENABLED) {
14253        sc->link_params.feature_config_flags |=
14254            ELINK_FEATURE_CONFIG_AUTOGREEEN_ENABLED;
14255    }
14256
14257    /* configure link feature according to nvram value */
14258    eee_mode =
14259        (((SHMEM_RD(sc, dev_info.port_feature_config[port].eee_power_mode)) &
14260          PORT_FEAT_CFG_EEE_POWER_MODE_MASK) >>
14261         PORT_FEAT_CFG_EEE_POWER_MODE_SHIFT);
14262    if (eee_mode != PORT_FEAT_CFG_EEE_POWER_MODE_DISABLED) {
14263        sc->link_params.eee_mode = (ELINK_EEE_MODE_ADV_LPI |
14264                                    ELINK_EEE_MODE_ENABLE_LPI |
14265                                    ELINK_EEE_MODE_OUTPUT_TIME);
14266    } else {
14267        sc->link_params.eee_mode = 0;
14268    }
14269
14270    /* get the media type */
14271    bxe_media_detect(sc);
14272	ELINK_DEBUG_P1(sc, "detected media type\n", sc->media);
14273}
14274
14275static void
14276bxe_get_params(struct bxe_softc *sc)
14277{
14278    /* get user tunable params */
14279    bxe_get_tunable_params(sc);
14280
14281    /* select the RX and TX ring sizes */
14282    sc->tx_ring_size = TX_BD_USABLE;
14283    sc->rx_ring_size = RX_BD_USABLE;
14284
14285    /* XXX disable WoL */
14286    sc->wol = 0;
14287}
14288
14289static void
14290bxe_set_modes_bitmap(struct bxe_softc *sc)
14291{
14292    uint32_t flags = 0;
14293
14294    if (CHIP_REV_IS_FPGA(sc)) {
14295        SET_FLAGS(flags, MODE_FPGA);
14296    } else if (CHIP_REV_IS_EMUL(sc)) {
14297        SET_FLAGS(flags, MODE_EMUL);
14298    } else {
14299        SET_FLAGS(flags, MODE_ASIC);
14300    }
14301
14302    if (CHIP_IS_MODE_4_PORT(sc)) {
14303        SET_FLAGS(flags, MODE_PORT4);
14304    } else {
14305        SET_FLAGS(flags, MODE_PORT2);
14306    }
14307
14308    if (CHIP_IS_E2(sc)) {
14309        SET_FLAGS(flags, MODE_E2);
14310    } else if (CHIP_IS_E3(sc)) {
14311        SET_FLAGS(flags, MODE_E3);
14312        if (CHIP_REV(sc) == CHIP_REV_Ax) {
14313            SET_FLAGS(flags, MODE_E3_A0);
14314        } else /*if (CHIP_REV(sc) == CHIP_REV_Bx)*/ {
14315            SET_FLAGS(flags, MODE_E3_B0 | MODE_COS3);
14316        }
14317    }
14318
14319    if (IS_MF(sc)) {
14320        SET_FLAGS(flags, MODE_MF);
14321        switch (sc->devinfo.mf_info.mf_mode) {
14322        case MULTI_FUNCTION_SD:
14323            SET_FLAGS(flags, MODE_MF_SD);
14324            break;
14325        case MULTI_FUNCTION_SI:
14326            SET_FLAGS(flags, MODE_MF_SI);
14327            break;
14328        case MULTI_FUNCTION_AFEX:
14329            SET_FLAGS(flags, MODE_MF_AFEX);
14330            break;
14331        }
14332    } else {
14333        SET_FLAGS(flags, MODE_SF);
14334    }
14335
14336#if defined(__LITTLE_ENDIAN)
14337    SET_FLAGS(flags, MODE_LITTLE_ENDIAN);
14338#else /* __BIG_ENDIAN */
14339    SET_FLAGS(flags, MODE_BIG_ENDIAN);
14340#endif
14341
14342    INIT_MODE_FLAGS(sc) = flags;
14343}
14344
14345static int
14346bxe_alloc_hsi_mem(struct bxe_softc *sc)
14347{
14348    struct bxe_fastpath *fp;
14349    bus_addr_t busaddr;
14350    int max_agg_queues;
14351    int max_segments;
14352    bus_size_t max_size;
14353    bus_size_t max_seg_size;
14354    char buf[32];
14355    int rc;
14356    int i, j;
14357
14358    /* XXX zero out all vars here and call bxe_alloc_hsi_mem on error */
14359
14360    /* allocate the parent bus DMA tag */
14361    rc = bus_dma_tag_create(bus_get_dma_tag(sc->dev), /* parent tag */
14362                            1,                        /* alignment */
14363                            0,                        /* boundary limit */
14364                            BUS_SPACE_MAXADDR,        /* restricted low */
14365                            BUS_SPACE_MAXADDR,        /* restricted hi */
14366                            NULL,                     /* addr filter() */
14367                            NULL,                     /* addr filter() arg */
14368                            BUS_SPACE_MAXSIZE_32BIT,  /* max map size */
14369                            BUS_SPACE_UNRESTRICTED,   /* num discontinuous */
14370                            BUS_SPACE_MAXSIZE_32BIT,  /* max seg size */
14371                            0,                        /* flags */
14372                            NULL,                     /* lock() */
14373                            NULL,                     /* lock() arg */
14374                            &sc->parent_dma_tag);     /* returned dma tag */
14375    if (rc != 0) {
14376        BLOGE(sc, "Failed to alloc parent DMA tag (%d)!\n", rc);
14377        return (1);
14378    }
14379
14380    /************************/
14381    /* DEFAULT STATUS BLOCK */
14382    /************************/
14383
14384    if (bxe_dma_alloc(sc, sizeof(struct host_sp_status_block),
14385                      &sc->def_sb_dma, "default status block") != 0) {
14386        /* XXX */
14387        bus_dma_tag_destroy(sc->parent_dma_tag);
14388        return (1);
14389    }
14390
14391    sc->def_sb = (struct host_sp_status_block *)sc->def_sb_dma.vaddr;
14392
14393    /***************/
14394    /* EVENT QUEUE */
14395    /***************/
14396
14397    if (bxe_dma_alloc(sc, BCM_PAGE_SIZE,
14398                      &sc->eq_dma, "event queue") != 0) {
14399        /* XXX */
14400        bxe_dma_free(sc, &sc->def_sb_dma);
14401        sc->def_sb = NULL;
14402        bus_dma_tag_destroy(sc->parent_dma_tag);
14403        return (1);
14404    }
14405
14406    sc->eq = (union event_ring_elem * )sc->eq_dma.vaddr;
14407
14408    /*************/
14409    /* SLOW PATH */
14410    /*************/
14411
14412    if (bxe_dma_alloc(sc, sizeof(struct bxe_slowpath),
14413                      &sc->sp_dma, "slow path") != 0) {
14414        /* XXX */
14415        bxe_dma_free(sc, &sc->eq_dma);
14416        sc->eq = NULL;
14417        bxe_dma_free(sc, &sc->def_sb_dma);
14418        sc->def_sb = NULL;
14419        bus_dma_tag_destroy(sc->parent_dma_tag);
14420        return (1);
14421    }
14422
14423    sc->sp = (struct bxe_slowpath *)sc->sp_dma.vaddr;
14424
14425    /*******************/
14426    /* SLOW PATH QUEUE */
14427    /*******************/
14428
14429    if (bxe_dma_alloc(sc, BCM_PAGE_SIZE,
14430                      &sc->spq_dma, "slow path queue") != 0) {
14431        /* XXX */
14432        bxe_dma_free(sc, &sc->sp_dma);
14433        sc->sp = NULL;
14434        bxe_dma_free(sc, &sc->eq_dma);
14435        sc->eq = NULL;
14436        bxe_dma_free(sc, &sc->def_sb_dma);
14437        sc->def_sb = NULL;
14438        bus_dma_tag_destroy(sc->parent_dma_tag);
14439        return (1);
14440    }
14441
14442    sc->spq = (struct eth_spe *)sc->spq_dma.vaddr;
14443
14444    /***************************/
14445    /* FW DECOMPRESSION BUFFER */
14446    /***************************/
14447
14448    if (bxe_dma_alloc(sc, FW_BUF_SIZE, &sc->gz_buf_dma,
14449                      "fw decompression buffer") != 0) {
14450        /* XXX */
14451        bxe_dma_free(sc, &sc->spq_dma);
14452        sc->spq = NULL;
14453        bxe_dma_free(sc, &sc->sp_dma);
14454        sc->sp = NULL;
14455        bxe_dma_free(sc, &sc->eq_dma);
14456        sc->eq = NULL;
14457        bxe_dma_free(sc, &sc->def_sb_dma);
14458        sc->def_sb = NULL;
14459        bus_dma_tag_destroy(sc->parent_dma_tag);
14460        return (1);
14461    }
14462
14463    sc->gz_buf = (void *)sc->gz_buf_dma.vaddr;
14464
14465    if ((sc->gz_strm =
14466         malloc(sizeof(*sc->gz_strm), M_DEVBUF, M_NOWAIT)) == NULL) {
14467        /* XXX */
14468        bxe_dma_free(sc, &sc->gz_buf_dma);
14469        sc->gz_buf = NULL;
14470        bxe_dma_free(sc, &sc->spq_dma);
14471        sc->spq = NULL;
14472        bxe_dma_free(sc, &sc->sp_dma);
14473        sc->sp = NULL;
14474        bxe_dma_free(sc, &sc->eq_dma);
14475        sc->eq = NULL;
14476        bxe_dma_free(sc, &sc->def_sb_dma);
14477        sc->def_sb = NULL;
14478        bus_dma_tag_destroy(sc->parent_dma_tag);
14479        return (1);
14480    }
14481
14482    /*************/
14483    /* FASTPATHS */
14484    /*************/
14485
14486    /* allocate DMA memory for each fastpath structure */
14487    for (i = 0; i < sc->num_queues; i++) {
14488        fp = &sc->fp[i];
14489        fp->sc    = sc;
14490        fp->index = i;
14491
14492        /*******************/
14493        /* FP STATUS BLOCK */
14494        /*******************/
14495
14496        snprintf(buf, sizeof(buf), "fp %d status block", i);
14497        if (bxe_dma_alloc(sc, sizeof(union bxe_host_hc_status_block),
14498                          &fp->sb_dma, buf) != 0) {
14499            /* XXX unwind and free previous fastpath allocations */
14500            BLOGE(sc, "Failed to alloc %s\n", buf);
14501            return (1);
14502        } else {
14503            if (CHIP_IS_E2E3(sc)) {
14504                fp->status_block.e2_sb =
14505                    (struct host_hc_status_block_e2 *)fp->sb_dma.vaddr;
14506            } else {
14507                fp->status_block.e1x_sb =
14508                    (struct host_hc_status_block_e1x *)fp->sb_dma.vaddr;
14509            }
14510        }
14511
14512        /******************/
14513        /* FP TX BD CHAIN */
14514        /******************/
14515
14516        snprintf(buf, sizeof(buf), "fp %d tx bd chain", i);
14517        if (bxe_dma_alloc(sc, (BCM_PAGE_SIZE * TX_BD_NUM_PAGES),
14518                          &fp->tx_dma, buf) != 0) {
14519            /* XXX unwind and free previous fastpath allocations */
14520            BLOGE(sc, "Failed to alloc %s\n", buf);
14521            return (1);
14522        } else {
14523            fp->tx_chain = (union eth_tx_bd_types *)fp->tx_dma.vaddr;
14524        }
14525
14526        /* link together the tx bd chain pages */
14527        for (j = 1; j <= TX_BD_NUM_PAGES; j++) {
14528            /* index into the tx bd chain array to last entry per page */
14529            struct eth_tx_next_bd *tx_next_bd =
14530                &fp->tx_chain[TX_BD_TOTAL_PER_PAGE * j - 1].next_bd;
14531            /* point to the next page and wrap from last page */
14532            busaddr = (fp->tx_dma.paddr +
14533                       (BCM_PAGE_SIZE * (j % TX_BD_NUM_PAGES)));
14534            tx_next_bd->addr_hi = htole32(U64_HI(busaddr));
14535            tx_next_bd->addr_lo = htole32(U64_LO(busaddr));
14536        }
14537
14538        /******************/
14539        /* FP RX BD CHAIN */
14540        /******************/
14541
14542        snprintf(buf, sizeof(buf), "fp %d rx bd chain", i);
14543        if (bxe_dma_alloc(sc, (BCM_PAGE_SIZE * RX_BD_NUM_PAGES),
14544                          &fp->rx_dma, buf) != 0) {
14545            /* XXX unwind and free previous fastpath allocations */
14546            BLOGE(sc, "Failed to alloc %s\n", buf);
14547            return (1);
14548        } else {
14549            fp->rx_chain = (struct eth_rx_bd *)fp->rx_dma.vaddr;
14550        }
14551
14552        /* link together the rx bd chain pages */
14553        for (j = 1; j <= RX_BD_NUM_PAGES; j++) {
14554            /* index into the rx bd chain array to last entry per page */
14555            struct eth_rx_bd *rx_bd =
14556                &fp->rx_chain[RX_BD_TOTAL_PER_PAGE * j - 2];
14557            /* point to the next page and wrap from last page */
14558            busaddr = (fp->rx_dma.paddr +
14559                       (BCM_PAGE_SIZE * (j % RX_BD_NUM_PAGES)));
14560            rx_bd->addr_hi = htole32(U64_HI(busaddr));
14561            rx_bd->addr_lo = htole32(U64_LO(busaddr));
14562        }
14563
14564        /*******************/
14565        /* FP RX RCQ CHAIN */
14566        /*******************/
14567
14568        snprintf(buf, sizeof(buf), "fp %d rcq chain", i);
14569        if (bxe_dma_alloc(sc, (BCM_PAGE_SIZE * RCQ_NUM_PAGES),
14570                          &fp->rcq_dma, buf) != 0) {
14571            /* XXX unwind and free previous fastpath allocations */
14572            BLOGE(sc, "Failed to alloc %s\n", buf);
14573            return (1);
14574        } else {
14575            fp->rcq_chain = (union eth_rx_cqe *)fp->rcq_dma.vaddr;
14576        }
14577
14578        /* link together the rcq chain pages */
14579        for (j = 1; j <= RCQ_NUM_PAGES; j++) {
14580            /* index into the rcq chain array to last entry per page */
14581            struct eth_rx_cqe_next_page *rx_cqe_next =
14582                (struct eth_rx_cqe_next_page *)
14583                &fp->rcq_chain[RCQ_TOTAL_PER_PAGE * j - 1];
14584            /* point to the next page and wrap from last page */
14585            busaddr = (fp->rcq_dma.paddr +
14586                       (BCM_PAGE_SIZE * (j % RCQ_NUM_PAGES)));
14587            rx_cqe_next->addr_hi = htole32(U64_HI(busaddr));
14588            rx_cqe_next->addr_lo = htole32(U64_LO(busaddr));
14589        }
14590
14591        /*******************/
14592        /* FP RX SGE CHAIN */
14593        /*******************/
14594
14595        snprintf(buf, sizeof(buf), "fp %d sge chain", i);
14596        if (bxe_dma_alloc(sc, (BCM_PAGE_SIZE * RX_SGE_NUM_PAGES),
14597                          &fp->rx_sge_dma, buf) != 0) {
14598            /* XXX unwind and free previous fastpath allocations */
14599            BLOGE(sc, "Failed to alloc %s\n", buf);
14600            return (1);
14601        } else {
14602            fp->rx_sge_chain = (struct eth_rx_sge *)fp->rx_sge_dma.vaddr;
14603        }
14604
14605        /* link together the sge chain pages */
14606        for (j = 1; j <= RX_SGE_NUM_PAGES; j++) {
14607            /* index into the rcq chain array to last entry per page */
14608            struct eth_rx_sge *rx_sge =
14609                &fp->rx_sge_chain[RX_SGE_TOTAL_PER_PAGE * j - 2];
14610            /* point to the next page and wrap from last page */
14611            busaddr = (fp->rx_sge_dma.paddr +
14612                       (BCM_PAGE_SIZE * (j % RX_SGE_NUM_PAGES)));
14613            rx_sge->addr_hi = htole32(U64_HI(busaddr));
14614            rx_sge->addr_lo = htole32(U64_LO(busaddr));
14615        }
14616
14617        /***********************/
14618        /* FP TX MBUF DMA MAPS */
14619        /***********************/
14620
14621        /* set required sizes before mapping to conserve resources */
14622        if (if_getcapenable(sc->ifp) & (IFCAP_TSO4 | IFCAP_TSO6)) {
14623            max_size     = BXE_TSO_MAX_SIZE;
14624            max_segments = BXE_TSO_MAX_SEGMENTS;
14625            max_seg_size = BXE_TSO_MAX_SEG_SIZE;
14626        } else {
14627            max_size     = (MCLBYTES * BXE_MAX_SEGMENTS);
14628            max_segments = BXE_MAX_SEGMENTS;
14629            max_seg_size = MCLBYTES;
14630        }
14631
14632        /* create a dma tag for the tx mbufs */
14633        rc = bus_dma_tag_create(sc->parent_dma_tag, /* parent tag */
14634                                1,                  /* alignment */
14635                                0,                  /* boundary limit */
14636                                BUS_SPACE_MAXADDR,  /* restricted low */
14637                                BUS_SPACE_MAXADDR,  /* restricted hi */
14638                                NULL,               /* addr filter() */
14639                                NULL,               /* addr filter() arg */
14640                                max_size,           /* max map size */
14641                                max_segments,       /* num discontinuous */
14642                                max_seg_size,       /* max seg size */
14643                                0,                  /* flags */
14644                                NULL,               /* lock() */
14645                                NULL,               /* lock() arg */
14646                                &fp->tx_mbuf_tag);  /* returned dma tag */
14647        if (rc != 0) {
14648            /* XXX unwind and free previous fastpath allocations */
14649            BLOGE(sc, "Failed to create dma tag for "
14650                      "'fp %d tx mbufs' (%d)\n", i, rc);
14651            return (1);
14652        }
14653
14654        /* create dma maps for each of the tx mbuf clusters */
14655        for (j = 0; j < TX_BD_TOTAL; j++) {
14656            if (bus_dmamap_create(fp->tx_mbuf_tag,
14657                                  BUS_DMA_NOWAIT,
14658                                  &fp->tx_mbuf_chain[j].m_map)) {
14659                /* XXX unwind and free previous fastpath allocations */
14660                BLOGE(sc, "Failed to create dma map for "
14661                          "'fp %d tx mbuf %d' (%d)\n", i, j, rc);
14662                return (1);
14663            }
14664        }
14665
14666        /***********************/
14667        /* FP RX MBUF DMA MAPS */
14668        /***********************/
14669
14670        /* create a dma tag for the rx mbufs */
14671        rc = bus_dma_tag_create(sc->parent_dma_tag, /* parent tag */
14672                                1,                  /* alignment */
14673                                0,                  /* boundary limit */
14674                                BUS_SPACE_MAXADDR,  /* restricted low */
14675                                BUS_SPACE_MAXADDR,  /* restricted hi */
14676                                NULL,               /* addr filter() */
14677                                NULL,               /* addr filter() arg */
14678                                MJUM9BYTES,         /* max map size */
14679                                1,                  /* num discontinuous */
14680                                MJUM9BYTES,         /* max seg size */
14681                                0,                  /* flags */
14682                                NULL,               /* lock() */
14683                                NULL,               /* lock() arg */
14684                                &fp->rx_mbuf_tag);  /* returned dma tag */
14685        if (rc != 0) {
14686            /* XXX unwind and free previous fastpath allocations */
14687            BLOGE(sc, "Failed to create dma tag for "
14688                      "'fp %d rx mbufs' (%d)\n", i, rc);
14689            return (1);
14690        }
14691
14692        /* create dma maps for each of the rx mbuf clusters */
14693        for (j = 0; j < RX_BD_TOTAL; j++) {
14694            if (bus_dmamap_create(fp->rx_mbuf_tag,
14695                                  BUS_DMA_NOWAIT,
14696                                  &fp->rx_mbuf_chain[j].m_map)) {
14697                /* XXX unwind and free previous fastpath allocations */
14698                BLOGE(sc, "Failed to create dma map for "
14699                          "'fp %d rx mbuf %d' (%d)\n", i, j, rc);
14700                return (1);
14701            }
14702        }
14703
14704        /* create dma map for the spare rx mbuf cluster */
14705        if (bus_dmamap_create(fp->rx_mbuf_tag,
14706                              BUS_DMA_NOWAIT,
14707                              &fp->rx_mbuf_spare_map)) {
14708            /* XXX unwind and free previous fastpath allocations */
14709            BLOGE(sc, "Failed to create dma map for "
14710                      "'fp %d spare rx mbuf' (%d)\n", i, rc);
14711            return (1);
14712        }
14713
14714        /***************************/
14715        /* FP RX SGE MBUF DMA MAPS */
14716        /***************************/
14717
14718        /* create a dma tag for the rx sge mbufs */
14719        rc = bus_dma_tag_create(sc->parent_dma_tag, /* parent tag */
14720                                1,                  /* alignment */
14721                                0,                  /* boundary limit */
14722                                BUS_SPACE_MAXADDR,  /* restricted low */
14723                                BUS_SPACE_MAXADDR,  /* restricted hi */
14724                                NULL,               /* addr filter() */
14725                                NULL,               /* addr filter() arg */
14726                                BCM_PAGE_SIZE,      /* max map size */
14727                                1,                  /* num discontinuous */
14728                                BCM_PAGE_SIZE,      /* max seg size */
14729                                0,                  /* flags */
14730                                NULL,               /* lock() */
14731                                NULL,               /* lock() arg */
14732                                &fp->rx_sge_mbuf_tag); /* returned dma tag */
14733        if (rc != 0) {
14734            /* XXX unwind and free previous fastpath allocations */
14735            BLOGE(sc, "Failed to create dma tag for "
14736                      "'fp %d rx sge mbufs' (%d)\n", i, rc);
14737            return (1);
14738        }
14739
14740        /* create dma maps for the rx sge mbuf clusters */
14741        for (j = 0; j < RX_SGE_TOTAL; j++) {
14742            if (bus_dmamap_create(fp->rx_sge_mbuf_tag,
14743                                  BUS_DMA_NOWAIT,
14744                                  &fp->rx_sge_mbuf_chain[j].m_map)) {
14745                /* XXX unwind and free previous fastpath allocations */
14746                BLOGE(sc, "Failed to create dma map for "
14747                          "'fp %d rx sge mbuf %d' (%d)\n", i, j, rc);
14748                return (1);
14749            }
14750        }
14751
14752        /* create dma map for the spare rx sge mbuf cluster */
14753        if (bus_dmamap_create(fp->rx_sge_mbuf_tag,
14754                              BUS_DMA_NOWAIT,
14755                              &fp->rx_sge_mbuf_spare_map)) {
14756            /* XXX unwind and free previous fastpath allocations */
14757            BLOGE(sc, "Failed to create dma map for "
14758                      "'fp %d spare rx sge mbuf' (%d)\n", i, rc);
14759            return (1);
14760        }
14761
14762        /***************************/
14763        /* FP RX TPA MBUF DMA MAPS */
14764        /***************************/
14765
14766        /* create dma maps for the rx tpa mbuf clusters */
14767        max_agg_queues = MAX_AGG_QS(sc);
14768
14769        for (j = 0; j < max_agg_queues; j++) {
14770            if (bus_dmamap_create(fp->rx_mbuf_tag,
14771                                  BUS_DMA_NOWAIT,
14772                                  &fp->rx_tpa_info[j].bd.m_map)) {
14773                /* XXX unwind and free previous fastpath allocations */
14774                BLOGE(sc, "Failed to create dma map for "
14775                          "'fp %d rx tpa mbuf %d' (%d)\n", i, j, rc);
14776                return (1);
14777            }
14778        }
14779
14780        /* create dma map for the spare rx tpa mbuf cluster */
14781        if (bus_dmamap_create(fp->rx_mbuf_tag,
14782                              BUS_DMA_NOWAIT,
14783                              &fp->rx_tpa_info_mbuf_spare_map)) {
14784            /* XXX unwind and free previous fastpath allocations */
14785            BLOGE(sc, "Failed to create dma map for "
14786                      "'fp %d spare rx tpa mbuf' (%d)\n", i, rc);
14787            return (1);
14788        }
14789
14790        bxe_init_sge_ring_bit_mask(fp);
14791    }
14792
14793    return (0);
14794}
14795
14796static void
14797bxe_free_hsi_mem(struct bxe_softc *sc)
14798{
14799    struct bxe_fastpath *fp;
14800    int max_agg_queues;
14801    int i, j;
14802
14803    if (sc->parent_dma_tag == NULL) {
14804        return; /* assume nothing was allocated */
14805    }
14806
14807    for (i = 0; i < sc->num_queues; i++) {
14808        fp = &sc->fp[i];
14809
14810        /*******************/
14811        /* FP STATUS BLOCK */
14812        /*******************/
14813
14814        bxe_dma_free(sc, &fp->sb_dma);
14815        memset(&fp->status_block, 0, sizeof(fp->status_block));
14816
14817        /******************/
14818        /* FP TX BD CHAIN */
14819        /******************/
14820
14821        bxe_dma_free(sc, &fp->tx_dma);
14822        fp->tx_chain = NULL;
14823
14824        /******************/
14825        /* FP RX BD CHAIN */
14826        /******************/
14827
14828        bxe_dma_free(sc, &fp->rx_dma);
14829        fp->rx_chain = NULL;
14830
14831        /*******************/
14832        /* FP RX RCQ CHAIN */
14833        /*******************/
14834
14835        bxe_dma_free(sc, &fp->rcq_dma);
14836        fp->rcq_chain = NULL;
14837
14838        /*******************/
14839        /* FP RX SGE CHAIN */
14840        /*******************/
14841
14842        bxe_dma_free(sc, &fp->rx_sge_dma);
14843        fp->rx_sge_chain = NULL;
14844
14845        /***********************/
14846        /* FP TX MBUF DMA MAPS */
14847        /***********************/
14848
14849        if (fp->tx_mbuf_tag != NULL) {
14850            for (j = 0; j < TX_BD_TOTAL; j++) {
14851                if (fp->tx_mbuf_chain[j].m_map != NULL) {
14852                    bus_dmamap_unload(fp->tx_mbuf_tag,
14853                                      fp->tx_mbuf_chain[j].m_map);
14854                    bus_dmamap_destroy(fp->tx_mbuf_tag,
14855                                       fp->tx_mbuf_chain[j].m_map);
14856                }
14857            }
14858
14859            bus_dma_tag_destroy(fp->tx_mbuf_tag);
14860            fp->tx_mbuf_tag = NULL;
14861        }
14862
14863        /***********************/
14864        /* FP RX MBUF DMA MAPS */
14865        /***********************/
14866
14867        if (fp->rx_mbuf_tag != NULL) {
14868            for (j = 0; j < RX_BD_TOTAL; j++) {
14869                if (fp->rx_mbuf_chain[j].m_map != NULL) {
14870                    bus_dmamap_unload(fp->rx_mbuf_tag,
14871                                      fp->rx_mbuf_chain[j].m_map);
14872                    bus_dmamap_destroy(fp->rx_mbuf_tag,
14873                                       fp->rx_mbuf_chain[j].m_map);
14874                }
14875            }
14876
14877            if (fp->rx_mbuf_spare_map != NULL) {
14878                bus_dmamap_unload(fp->rx_mbuf_tag, fp->rx_mbuf_spare_map);
14879                bus_dmamap_destroy(fp->rx_mbuf_tag, fp->rx_mbuf_spare_map);
14880            }
14881
14882            /***************************/
14883            /* FP RX TPA MBUF DMA MAPS */
14884            /***************************/
14885
14886            max_agg_queues = MAX_AGG_QS(sc);
14887
14888            for (j = 0; j < max_agg_queues; j++) {
14889                if (fp->rx_tpa_info[j].bd.m_map != NULL) {
14890                    bus_dmamap_unload(fp->rx_mbuf_tag,
14891                                      fp->rx_tpa_info[j].bd.m_map);
14892                    bus_dmamap_destroy(fp->rx_mbuf_tag,
14893                                       fp->rx_tpa_info[j].bd.m_map);
14894                }
14895            }
14896
14897            if (fp->rx_tpa_info_mbuf_spare_map != NULL) {
14898                bus_dmamap_unload(fp->rx_mbuf_tag,
14899                                  fp->rx_tpa_info_mbuf_spare_map);
14900                bus_dmamap_destroy(fp->rx_mbuf_tag,
14901                                   fp->rx_tpa_info_mbuf_spare_map);
14902            }
14903
14904            bus_dma_tag_destroy(fp->rx_mbuf_tag);
14905            fp->rx_mbuf_tag = NULL;
14906        }
14907
14908        /***************************/
14909        /* FP RX SGE MBUF DMA MAPS */
14910        /***************************/
14911
14912        if (fp->rx_sge_mbuf_tag != NULL) {
14913            for (j = 0; j < RX_SGE_TOTAL; j++) {
14914                if (fp->rx_sge_mbuf_chain[j].m_map != NULL) {
14915                    bus_dmamap_unload(fp->rx_sge_mbuf_tag,
14916                                      fp->rx_sge_mbuf_chain[j].m_map);
14917                    bus_dmamap_destroy(fp->rx_sge_mbuf_tag,
14918                                       fp->rx_sge_mbuf_chain[j].m_map);
14919                }
14920            }
14921
14922            if (fp->rx_sge_mbuf_spare_map != NULL) {
14923                bus_dmamap_unload(fp->rx_sge_mbuf_tag,
14924                                  fp->rx_sge_mbuf_spare_map);
14925                bus_dmamap_destroy(fp->rx_sge_mbuf_tag,
14926                                   fp->rx_sge_mbuf_spare_map);
14927            }
14928
14929            bus_dma_tag_destroy(fp->rx_sge_mbuf_tag);
14930            fp->rx_sge_mbuf_tag = NULL;
14931        }
14932    }
14933
14934    /***************************/
14935    /* FW DECOMPRESSION BUFFER */
14936    /***************************/
14937
14938    bxe_dma_free(sc, &sc->gz_buf_dma);
14939    sc->gz_buf = NULL;
14940    free(sc->gz_strm, M_DEVBUF);
14941    sc->gz_strm = NULL;
14942
14943    /*******************/
14944    /* SLOW PATH QUEUE */
14945    /*******************/
14946
14947    bxe_dma_free(sc, &sc->spq_dma);
14948    sc->spq = NULL;
14949
14950    /*************/
14951    /* SLOW PATH */
14952    /*************/
14953
14954    bxe_dma_free(sc, &sc->sp_dma);
14955    sc->sp = NULL;
14956
14957    /***************/
14958    /* EVENT QUEUE */
14959    /***************/
14960
14961    bxe_dma_free(sc, &sc->eq_dma);
14962    sc->eq = NULL;
14963
14964    /************************/
14965    /* DEFAULT STATUS BLOCK */
14966    /************************/
14967
14968    bxe_dma_free(sc, &sc->def_sb_dma);
14969    sc->def_sb = NULL;
14970
14971    bus_dma_tag_destroy(sc->parent_dma_tag);
14972    sc->parent_dma_tag = NULL;
14973}
14974
14975/*
14976 * Previous driver DMAE transaction may have occurred when pre-boot stage
14977 * ended and boot began. This would invalidate the addresses of the
14978 * transaction, resulting in was-error bit set in the PCI causing all
14979 * hw-to-host PCIe transactions to timeout. If this happened we want to clear
14980 * the interrupt which detected this from the pglueb and the was-done bit
14981 */
14982static void
14983bxe_prev_interrupted_dmae(struct bxe_softc *sc)
14984{
14985    uint32_t val;
14986
14987    if (!CHIP_IS_E1x(sc)) {
14988        val = REG_RD(sc, PGLUE_B_REG_PGLUE_B_INT_STS);
14989        if (val & PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN) {
14990            BLOGD(sc, DBG_LOAD,
14991                  "Clearing 'was-error' bit that was set in pglueb");
14992            REG_WR(sc, PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR, 1 << SC_FUNC(sc));
14993        }
14994    }
14995}
14996
14997static int
14998bxe_prev_mcp_done(struct bxe_softc *sc)
14999{
15000    uint32_t rc = bxe_fw_command(sc, DRV_MSG_CODE_UNLOAD_DONE,
15001                                 DRV_MSG_CODE_UNLOAD_SKIP_LINK_RESET);
15002    if (!rc) {
15003        BLOGE(sc, "MCP response failure, aborting\n");
15004        return (-1);
15005    }
15006
15007    return (0);
15008}
15009
15010static struct bxe_prev_list_node *
15011bxe_prev_path_get_entry(struct bxe_softc *sc)
15012{
15013    struct bxe_prev_list_node *tmp;
15014
15015    LIST_FOREACH(tmp, &bxe_prev_list, node) {
15016        if ((sc->pcie_bus == tmp->bus) &&
15017            (sc->pcie_device == tmp->slot) &&
15018            (SC_PATH(sc) == tmp->path)) {
15019            return (tmp);
15020        }
15021    }
15022
15023    return (NULL);
15024}
15025
15026static uint8_t
15027bxe_prev_is_path_marked(struct bxe_softc *sc)
15028{
15029    struct bxe_prev_list_node *tmp;
15030    int rc = FALSE;
15031
15032    mtx_lock(&bxe_prev_mtx);
15033
15034    tmp = bxe_prev_path_get_entry(sc);
15035    if (tmp) {
15036        if (tmp->aer) {
15037            BLOGD(sc, DBG_LOAD,
15038                  "Path %d/%d/%d was marked by AER\n",
15039                  sc->pcie_bus, sc->pcie_device, SC_PATH(sc));
15040        } else {
15041            rc = TRUE;
15042            BLOGD(sc, DBG_LOAD,
15043                  "Path %d/%d/%d was already cleaned from previous drivers\n",
15044                  sc->pcie_bus, sc->pcie_device, SC_PATH(sc));
15045        }
15046    }
15047
15048    mtx_unlock(&bxe_prev_mtx);
15049
15050    return (rc);
15051}
15052
15053static int
15054bxe_prev_mark_path(struct bxe_softc *sc,
15055                   uint8_t          after_undi)
15056{
15057    struct bxe_prev_list_node *tmp;
15058
15059    mtx_lock(&bxe_prev_mtx);
15060
15061    /* Check whether the entry for this path already exists */
15062    tmp = bxe_prev_path_get_entry(sc);
15063    if (tmp) {
15064        if (!tmp->aer) {
15065            BLOGD(sc, DBG_LOAD,
15066                  "Re-marking AER in path %d/%d/%d\n",
15067                  sc->pcie_bus, sc->pcie_device, SC_PATH(sc));
15068        } else {
15069            BLOGD(sc, DBG_LOAD,
15070                  "Removing AER indication from path %d/%d/%d\n",
15071                  sc->pcie_bus, sc->pcie_device, SC_PATH(sc));
15072            tmp->aer = 0;
15073        }
15074
15075        mtx_unlock(&bxe_prev_mtx);
15076        return (0);
15077    }
15078
15079    mtx_unlock(&bxe_prev_mtx);
15080
15081    /* Create an entry for this path and add it */
15082    tmp = malloc(sizeof(struct bxe_prev_list_node), M_DEVBUF,
15083                 (M_NOWAIT | M_ZERO));
15084    if (!tmp) {
15085        BLOGE(sc, "Failed to allocate 'bxe_prev_list_node'\n");
15086        return (-1);
15087    }
15088
15089    tmp->bus  = sc->pcie_bus;
15090    tmp->slot = sc->pcie_device;
15091    tmp->path = SC_PATH(sc);
15092    tmp->aer  = 0;
15093    tmp->undi = after_undi ? (1 << SC_PORT(sc)) : 0;
15094
15095    mtx_lock(&bxe_prev_mtx);
15096
15097    BLOGD(sc, DBG_LOAD,
15098          "Marked path %d/%d/%d - finished previous unload\n",
15099          sc->pcie_bus, sc->pcie_device, SC_PATH(sc));
15100    LIST_INSERT_HEAD(&bxe_prev_list, tmp, node);
15101
15102    mtx_unlock(&bxe_prev_mtx);
15103
15104    return (0);
15105}
15106
15107static int
15108bxe_do_flr(struct bxe_softc *sc)
15109{
15110    int i;
15111
15112    /* only E2 and onwards support FLR */
15113    if (CHIP_IS_E1x(sc)) {
15114        BLOGD(sc, DBG_LOAD, "FLR not supported in E1/E1H\n");
15115        return (-1);
15116    }
15117
15118    /* only bootcode REQ_BC_VER_4_INITIATE_FLR and onwards support flr */
15119    if (sc->devinfo.bc_ver < REQ_BC_VER_4_INITIATE_FLR) {
15120        BLOGD(sc, DBG_LOAD, "FLR not supported by BC_VER: 0x%08x\n",
15121              sc->devinfo.bc_ver);
15122        return (-1);
15123    }
15124
15125    /* Wait for Transaction Pending bit clean */
15126    for (i = 0; i < 4; i++) {
15127        if (i) {
15128            DELAY(((1 << (i - 1)) * 100) * 1000);
15129        }
15130
15131        if (!bxe_is_pcie_pending(sc)) {
15132            goto clear;
15133        }
15134    }
15135
15136    BLOGE(sc, "PCIE transaction is not cleared, "
15137              "proceeding with reset anyway\n");
15138
15139clear:
15140
15141    BLOGD(sc, DBG_LOAD, "Initiating FLR\n");
15142    bxe_fw_command(sc, DRV_MSG_CODE_INITIATE_FLR, 0);
15143
15144    return (0);
15145}
15146
15147struct bxe_mac_vals {
15148    uint32_t xmac_addr;
15149    uint32_t xmac_val;
15150    uint32_t emac_addr;
15151    uint32_t emac_val;
15152    uint32_t umac_addr;
15153    uint32_t umac_val;
15154    uint32_t bmac_addr;
15155    uint32_t bmac_val[2];
15156};
15157
15158static void
15159bxe_prev_unload_close_mac(struct bxe_softc *sc,
15160                          struct bxe_mac_vals *vals)
15161{
15162    uint32_t val, base_addr, offset, mask, reset_reg;
15163    uint8_t mac_stopped = FALSE;
15164    uint8_t port = SC_PORT(sc);
15165    uint32_t wb_data[2];
15166
15167    /* reset addresses as they also mark which values were changed */
15168    vals->bmac_addr = 0;
15169    vals->umac_addr = 0;
15170    vals->xmac_addr = 0;
15171    vals->emac_addr = 0;
15172
15173    reset_reg = REG_RD(sc, MISC_REG_RESET_REG_2);
15174
15175    if (!CHIP_IS_E3(sc)) {
15176        val = REG_RD(sc, NIG_REG_BMAC0_REGS_OUT_EN + port * 4);
15177        mask = MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port;
15178        if ((mask & reset_reg) && val) {
15179            BLOGD(sc, DBG_LOAD, "Disable BMAC Rx\n");
15180            base_addr = SC_PORT(sc) ? NIG_REG_INGRESS_BMAC1_MEM
15181                                    : NIG_REG_INGRESS_BMAC0_MEM;
15182            offset = CHIP_IS_E2(sc) ? BIGMAC2_REGISTER_BMAC_CONTROL
15183                                    : BIGMAC_REGISTER_BMAC_CONTROL;
15184
15185            /*
15186             * use rd/wr since we cannot use dmae. This is safe
15187             * since MCP won't access the bus due to the request
15188             * to unload, and no function on the path can be
15189             * loaded at this time.
15190             */
15191            wb_data[0] = REG_RD(sc, base_addr + offset);
15192            wb_data[1] = REG_RD(sc, base_addr + offset + 0x4);
15193            vals->bmac_addr = base_addr + offset;
15194            vals->bmac_val[0] = wb_data[0];
15195            vals->bmac_val[1] = wb_data[1];
15196            wb_data[0] &= ~ELINK_BMAC_CONTROL_RX_ENABLE;
15197            REG_WR(sc, vals->bmac_addr, wb_data[0]);
15198            REG_WR(sc, vals->bmac_addr + 0x4, wb_data[1]);
15199        }
15200
15201        BLOGD(sc, DBG_LOAD, "Disable EMAC Rx\n");
15202        vals->emac_addr = NIG_REG_NIG_EMAC0_EN + SC_PORT(sc)*4;
15203        vals->emac_val = REG_RD(sc, vals->emac_addr);
15204        REG_WR(sc, vals->emac_addr, 0);
15205        mac_stopped = TRUE;
15206    } else {
15207        if (reset_reg & MISC_REGISTERS_RESET_REG_2_XMAC) {
15208            BLOGD(sc, DBG_LOAD, "Disable XMAC Rx\n");
15209            base_addr = SC_PORT(sc) ? GRCBASE_XMAC1 : GRCBASE_XMAC0;
15210            val = REG_RD(sc, base_addr + XMAC_REG_PFC_CTRL_HI);
15211            REG_WR(sc, base_addr + XMAC_REG_PFC_CTRL_HI, val & ~(1 << 1));
15212            REG_WR(sc, base_addr + XMAC_REG_PFC_CTRL_HI, val | (1 << 1));
15213            vals->xmac_addr = base_addr + XMAC_REG_CTRL;
15214            vals->xmac_val = REG_RD(sc, vals->xmac_addr);
15215            REG_WR(sc, vals->xmac_addr, 0);
15216            mac_stopped = TRUE;
15217        }
15218
15219        mask = MISC_REGISTERS_RESET_REG_2_UMAC0 << port;
15220        if (mask & reset_reg) {
15221            BLOGD(sc, DBG_LOAD, "Disable UMAC Rx\n");
15222            base_addr = SC_PORT(sc) ? GRCBASE_UMAC1 : GRCBASE_UMAC0;
15223            vals->umac_addr = base_addr + UMAC_REG_COMMAND_CONFIG;
15224            vals->umac_val = REG_RD(sc, vals->umac_addr);
15225            REG_WR(sc, vals->umac_addr, 0);
15226            mac_stopped = TRUE;
15227        }
15228    }
15229
15230    if (mac_stopped) {
15231        DELAY(20000);
15232    }
15233}
15234
15235#define BXE_PREV_UNDI_PROD_ADDR(p)  (BAR_TSTRORM_INTMEM + 0x1508 + ((p) << 4))
15236#define BXE_PREV_UNDI_RCQ(val)      ((val) & 0xffff)
15237#define BXE_PREV_UNDI_BD(val)       ((val) >> 16 & 0xffff)
15238#define BXE_PREV_UNDI_PROD(rcq, bd) ((bd) << 16 | (rcq))
15239
15240static void
15241bxe_prev_unload_undi_inc(struct bxe_softc *sc,
15242                         uint8_t          port,
15243                         uint8_t          inc)
15244{
15245    uint16_t rcq, bd;
15246    uint32_t tmp_reg = REG_RD(sc, BXE_PREV_UNDI_PROD_ADDR(port));
15247
15248    rcq = BXE_PREV_UNDI_RCQ(tmp_reg) + inc;
15249    bd = BXE_PREV_UNDI_BD(tmp_reg) + inc;
15250
15251    tmp_reg = BXE_PREV_UNDI_PROD(rcq, bd);
15252    REG_WR(sc, BXE_PREV_UNDI_PROD_ADDR(port), tmp_reg);
15253
15254    BLOGD(sc, DBG_LOAD,
15255          "UNDI producer [%d] rings bd -> 0x%04x, rcq -> 0x%04x\n",
15256          port, bd, rcq);
15257}
15258
15259static int
15260bxe_prev_unload_common(struct bxe_softc *sc)
15261{
15262    uint32_t reset_reg, tmp_reg = 0, rc;
15263    uint8_t prev_undi = FALSE;
15264    struct bxe_mac_vals mac_vals;
15265    uint32_t timer_count = 1000;
15266    uint32_t prev_brb;
15267
15268    /*
15269     * It is possible a previous function received 'common' answer,
15270     * but hasn't loaded yet, therefore creating a scenario of
15271     * multiple functions receiving 'common' on the same path.
15272     */
15273    BLOGD(sc, DBG_LOAD, "Common unload Flow\n");
15274
15275    memset(&mac_vals, 0, sizeof(mac_vals));
15276
15277    if (bxe_prev_is_path_marked(sc)) {
15278        return (bxe_prev_mcp_done(sc));
15279    }
15280
15281    reset_reg = REG_RD(sc, MISC_REG_RESET_REG_1);
15282
15283    /* Reset should be performed after BRB is emptied */
15284    if (reset_reg & MISC_REGISTERS_RESET_REG_1_RST_BRB1) {
15285        /* Close the MAC Rx to prevent BRB from filling up */
15286        bxe_prev_unload_close_mac(sc, &mac_vals);
15287
15288        /* close LLH filters towards the BRB */
15289        elink_set_rx_filter(&sc->link_params, 0);
15290
15291        /*
15292         * Check if the UNDI driver was previously loaded.
15293         * UNDI driver initializes CID offset for normal bell to 0x7
15294         */
15295        if (reset_reg & MISC_REGISTERS_RESET_REG_1_RST_DORQ) {
15296            tmp_reg = REG_RD(sc, DORQ_REG_NORM_CID_OFST);
15297            if (tmp_reg == 0x7) {
15298                BLOGD(sc, DBG_LOAD, "UNDI previously loaded\n");
15299                prev_undi = TRUE;
15300                /* clear the UNDI indication */
15301                REG_WR(sc, DORQ_REG_NORM_CID_OFST, 0);
15302                /* clear possible idle check errors */
15303                REG_RD(sc, NIG_REG_NIG_INT_STS_CLR_0);
15304            }
15305        }
15306
15307        /* wait until BRB is empty */
15308        tmp_reg = REG_RD(sc, BRB1_REG_NUM_OF_FULL_BLOCKS);
15309        while (timer_count) {
15310            prev_brb = tmp_reg;
15311
15312            tmp_reg = REG_RD(sc, BRB1_REG_NUM_OF_FULL_BLOCKS);
15313            if (!tmp_reg) {
15314                break;
15315            }
15316
15317            BLOGD(sc, DBG_LOAD, "BRB still has 0x%08x\n", tmp_reg);
15318
15319            /* reset timer as long as BRB actually gets emptied */
15320            if (prev_brb > tmp_reg) {
15321                timer_count = 1000;
15322            } else {
15323                timer_count--;
15324            }
15325
15326            /* If UNDI resides in memory, manually increment it */
15327            if (prev_undi) {
15328                bxe_prev_unload_undi_inc(sc, SC_PORT(sc), 1);
15329            }
15330
15331            DELAY(10);
15332        }
15333
15334        if (!timer_count) {
15335            BLOGE(sc, "Failed to empty BRB\n");
15336        }
15337    }
15338
15339    /* No packets are in the pipeline, path is ready for reset */
15340    bxe_reset_common(sc);
15341
15342    if (mac_vals.xmac_addr) {
15343        REG_WR(sc, mac_vals.xmac_addr, mac_vals.xmac_val);
15344    }
15345    if (mac_vals.umac_addr) {
15346        REG_WR(sc, mac_vals.umac_addr, mac_vals.umac_val);
15347    }
15348    if (mac_vals.emac_addr) {
15349        REG_WR(sc, mac_vals.emac_addr, mac_vals.emac_val);
15350    }
15351    if (mac_vals.bmac_addr) {
15352        REG_WR(sc, mac_vals.bmac_addr, mac_vals.bmac_val[0]);
15353        REG_WR(sc, mac_vals.bmac_addr + 4, mac_vals.bmac_val[1]);
15354    }
15355
15356    rc = bxe_prev_mark_path(sc, prev_undi);
15357    if (rc) {
15358        bxe_prev_mcp_done(sc);
15359        return (rc);
15360    }
15361
15362    return (bxe_prev_mcp_done(sc));
15363}
15364
15365static int
15366bxe_prev_unload_uncommon(struct bxe_softc *sc)
15367{
15368    int rc;
15369
15370    BLOGD(sc, DBG_LOAD, "Uncommon unload Flow\n");
15371
15372    /* Test if previous unload process was already finished for this path */
15373    if (bxe_prev_is_path_marked(sc)) {
15374        return (bxe_prev_mcp_done(sc));
15375    }
15376
15377    BLOGD(sc, DBG_LOAD, "Path is unmarked\n");
15378
15379    /*
15380     * If function has FLR capabilities, and existing FW version matches
15381     * the one required, then FLR will be sufficient to clean any residue
15382     * left by previous driver
15383     */
15384    rc = bxe_nic_load_analyze_req(sc, FW_MSG_CODE_DRV_LOAD_FUNCTION);
15385    if (!rc) {
15386        /* fw version is good */
15387        BLOGD(sc, DBG_LOAD, "FW version matches our own, attempting FLR\n");
15388        rc = bxe_do_flr(sc);
15389    }
15390
15391    if (!rc) {
15392        /* FLR was performed */
15393        BLOGD(sc, DBG_LOAD, "FLR successful\n");
15394        return (0);
15395    }
15396
15397    BLOGD(sc, DBG_LOAD, "Could not FLR\n");
15398
15399    /* Close the MCP request, return failure*/
15400    rc = bxe_prev_mcp_done(sc);
15401    if (!rc) {
15402        rc = BXE_PREV_WAIT_NEEDED;
15403    }
15404
15405    return (rc);
15406}
15407
15408static int
15409bxe_prev_unload(struct bxe_softc *sc)
15410{
15411    int time_counter = 10;
15412    uint32_t fw, hw_lock_reg, hw_lock_val;
15413    uint32_t rc = 0;
15414
15415    /*
15416     * Clear HW from errors which may have resulted from an interrupted
15417     * DMAE transaction.
15418     */
15419    bxe_prev_interrupted_dmae(sc);
15420
15421    /* Release previously held locks */
15422    hw_lock_reg =
15423        (SC_FUNC(sc) <= 5) ?
15424            (MISC_REG_DRIVER_CONTROL_1 + SC_FUNC(sc) * 8) :
15425            (MISC_REG_DRIVER_CONTROL_7 + (SC_FUNC(sc) - 6) * 8);
15426
15427    hw_lock_val = (REG_RD(sc, hw_lock_reg));
15428    if (hw_lock_val) {
15429        if (hw_lock_val & HW_LOCK_RESOURCE_NVRAM) {
15430            BLOGD(sc, DBG_LOAD, "Releasing previously held NVRAM lock\n");
15431            REG_WR(sc, MCP_REG_MCPR_NVM_SW_ARB,
15432                   (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << SC_PORT(sc)));
15433        }
15434        BLOGD(sc, DBG_LOAD, "Releasing previously held HW lock\n");
15435        REG_WR(sc, hw_lock_reg, 0xffffffff);
15436    } else {
15437        BLOGD(sc, DBG_LOAD, "No need to release HW/NVRAM locks\n");
15438    }
15439
15440    if (MCPR_ACCESS_LOCK_LOCK & REG_RD(sc, MCP_REG_MCPR_ACCESS_LOCK)) {
15441        BLOGD(sc, DBG_LOAD, "Releasing previously held ALR\n");
15442        REG_WR(sc, MCP_REG_MCPR_ACCESS_LOCK, 0);
15443    }
15444
15445    do {
15446        /* Lock MCP using an unload request */
15447        fw = bxe_fw_command(sc, DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS, 0);
15448        if (!fw) {
15449            BLOGE(sc, "MCP response failure, aborting\n");
15450            rc = -1;
15451            break;
15452        }
15453
15454        if (fw == FW_MSG_CODE_DRV_UNLOAD_COMMON) {
15455            rc = bxe_prev_unload_common(sc);
15456            break;
15457        }
15458
15459        /* non-common reply from MCP night require looping */
15460        rc = bxe_prev_unload_uncommon(sc);
15461        if (rc != BXE_PREV_WAIT_NEEDED) {
15462            break;
15463        }
15464
15465        DELAY(20000);
15466    } while (--time_counter);
15467
15468    if (!time_counter || rc) {
15469        BLOGE(sc, "Failed to unload previous driver!"
15470            " time_counter %d rc %d\n", time_counter, rc);
15471        rc = -1;
15472    }
15473
15474    return (rc);
15475}
15476
15477void
15478bxe_dcbx_set_state(struct bxe_softc *sc,
15479                   uint8_t          dcb_on,
15480                   uint32_t         dcbx_enabled)
15481{
15482    if (!CHIP_IS_E1x(sc)) {
15483        sc->dcb_state = dcb_on;
15484        sc->dcbx_enabled = dcbx_enabled;
15485    } else {
15486        sc->dcb_state = FALSE;
15487        sc->dcbx_enabled = BXE_DCBX_ENABLED_INVALID;
15488    }
15489    BLOGD(sc, DBG_LOAD,
15490          "DCB state [%s:%s]\n",
15491          dcb_on ? "ON" : "OFF",
15492          (dcbx_enabled == BXE_DCBX_ENABLED_OFF) ? "user-mode" :
15493          (dcbx_enabled == BXE_DCBX_ENABLED_ON_NEG_OFF) ? "on-chip static" :
15494          (dcbx_enabled == BXE_DCBX_ENABLED_ON_NEG_ON) ?
15495          "on-chip with negotiation" : "invalid");
15496}
15497
15498/* must be called after sriov-enable */
15499static int
15500bxe_set_qm_cid_count(struct bxe_softc *sc)
15501{
15502    int cid_count = BXE_L2_MAX_CID(sc);
15503
15504    if (IS_SRIOV(sc)) {
15505        cid_count += BXE_VF_CIDS;
15506    }
15507
15508    if (CNIC_SUPPORT(sc)) {
15509        cid_count += CNIC_CID_MAX;
15510    }
15511
15512    return (roundup(cid_count, QM_CID_ROUND));
15513}
15514
15515static void
15516bxe_init_multi_cos(struct bxe_softc *sc)
15517{
15518    int pri, cos;
15519
15520    uint32_t pri_map = 0; /* XXX change to user config */
15521
15522    for (pri = 0; pri < BXE_MAX_PRIORITY; pri++) {
15523        cos = ((pri_map & (0xf << (pri * 4))) >> (pri * 4));
15524        if (cos < sc->max_cos) {
15525            sc->prio_to_cos[pri] = cos;
15526        } else {
15527            BLOGW(sc, "Invalid COS %d for priority %d "
15528                      "(max COS is %d), setting to 0\n",
15529                  cos, pri, (sc->max_cos - 1));
15530            sc->prio_to_cos[pri] = 0;
15531        }
15532    }
15533}
15534
15535static int
15536bxe_sysctl_state(SYSCTL_HANDLER_ARGS)
15537{
15538    struct bxe_softc *sc;
15539    int error, result;
15540
15541    result = 0;
15542    error = sysctl_handle_int(oidp, &result, 0, req);
15543
15544    if (error || !req->newptr) {
15545        return (error);
15546    }
15547
15548    if (result == 1) {
15549        uint32_t  temp;
15550        sc = (struct bxe_softc *)arg1;
15551
15552        BLOGI(sc, "... dumping driver state ...\n");
15553        temp = SHMEM2_RD(sc, temperature_in_half_celsius);
15554        BLOGI(sc, "\t Device Temperature = %d Celsius\n", (temp/2));
15555    }
15556
15557    return (error);
15558}
15559
15560static int
15561bxe_sysctl_eth_stat(SYSCTL_HANDLER_ARGS)
15562{
15563    struct bxe_softc *sc = (struct bxe_softc *)arg1;
15564    uint32_t *eth_stats = (uint32_t *)&sc->eth_stats;
15565    uint32_t *offset;
15566    uint64_t value = 0;
15567    int index = (int)arg2;
15568
15569    if (index >= BXE_NUM_ETH_STATS) {
15570        BLOGE(sc, "bxe_eth_stats index out of range (%d)\n", index);
15571        return (-1);
15572    }
15573
15574    offset = (eth_stats + bxe_eth_stats_arr[index].offset);
15575
15576    switch (bxe_eth_stats_arr[index].size) {
15577    case 4:
15578        value = (uint64_t)*offset;
15579        break;
15580    case 8:
15581        value = HILO_U64(*offset, *(offset + 1));
15582        break;
15583    default:
15584        BLOGE(sc, "Invalid bxe_eth_stats size (index=%d size=%d)\n",
15585              index, bxe_eth_stats_arr[index].size);
15586        return (-1);
15587    }
15588
15589    return (sysctl_handle_64(oidp, &value, 0, req));
15590}
15591
15592static int
15593bxe_sysctl_eth_q_stat(SYSCTL_HANDLER_ARGS)
15594{
15595    struct bxe_softc *sc = (struct bxe_softc *)arg1;
15596    uint32_t *eth_stats;
15597    uint32_t *offset;
15598    uint64_t value = 0;
15599    uint32_t q_stat = (uint32_t)arg2;
15600    uint32_t fp_index = ((q_stat >> 16) & 0xffff);
15601    uint32_t index = (q_stat & 0xffff);
15602
15603    eth_stats = (uint32_t *)&sc->fp[fp_index].eth_q_stats;
15604
15605    if (index >= BXE_NUM_ETH_Q_STATS) {
15606        BLOGE(sc, "bxe_eth_q_stats index out of range (%d)\n", index);
15607        return (-1);
15608    }
15609
15610    offset = (eth_stats + bxe_eth_q_stats_arr[index].offset);
15611
15612    switch (bxe_eth_q_stats_arr[index].size) {
15613    case 4:
15614        value = (uint64_t)*offset;
15615        break;
15616    case 8:
15617        value = HILO_U64(*offset, *(offset + 1));
15618        break;
15619    default:
15620        BLOGE(sc, "Invalid bxe_eth_q_stats size (index=%d size=%d)\n",
15621              index, bxe_eth_q_stats_arr[index].size);
15622        return (-1);
15623    }
15624
15625    return (sysctl_handle_64(oidp, &value, 0, req));
15626}
15627
15628static void bxe_force_link_reset(struct bxe_softc *sc)
15629{
15630
15631        bxe_acquire_phy_lock(sc);
15632        elink_link_reset(&sc->link_params, &sc->link_vars, 1);
15633        bxe_release_phy_lock(sc);
15634}
15635
15636static int
15637bxe_sysctl_pauseparam(SYSCTL_HANDLER_ARGS)
15638{
15639        struct bxe_softc *sc = (struct bxe_softc *)arg1;;
15640        uint32_t cfg_idx = bxe_get_link_cfg_idx(sc);
15641        int rc = 0;
15642        int error;
15643        int result;
15644
15645
15646        error = sysctl_handle_int(oidp, &sc->bxe_pause_param, 0, req);
15647
15648        if (error || !req->newptr) {
15649                return (error);
15650        }
15651        if ((sc->bxe_pause_param < 0) ||  (sc->bxe_pause_param > 8)) {
15652                BLOGW(sc, "invalid pause param (%d) - use intergers between 1 & 8\n",sc->bxe_pause_param);
15653                sc->bxe_pause_param = 8;
15654        }
15655
15656        result = (sc->bxe_pause_param << PORT_FEATURE_FLOW_CONTROL_SHIFT);
15657
15658
15659        if((result & 0x400) && !(sc->port.supported[cfg_idx] & ELINK_SUPPORTED_Autoneg))  {
15660                        BLOGW(sc, "Does not support Autoneg pause_param %d\n", sc->bxe_pause_param);
15661                        return -EINVAL;
15662        }
15663
15664        if(IS_MF(sc))
15665                return 0;
15666       sc->link_params.req_flow_ctrl[cfg_idx] = ELINK_FLOW_CTRL_AUTO;
15667        if(result & ELINK_FLOW_CTRL_RX)
15668                sc->link_params.req_flow_ctrl[cfg_idx] |= ELINK_FLOW_CTRL_RX;
15669
15670        if(result & ELINK_FLOW_CTRL_TX)
15671                sc->link_params.req_flow_ctrl[cfg_idx] |= ELINK_FLOW_CTRL_TX;
15672        if(sc->link_params.req_flow_ctrl[cfg_idx] == ELINK_FLOW_CTRL_AUTO)
15673                sc->link_params.req_flow_ctrl[cfg_idx] = ELINK_FLOW_CTRL_NONE;
15674
15675        if(result & 0x400) {
15676                if (sc->link_params.req_line_speed[cfg_idx] == ELINK_SPEED_AUTO_NEG) {
15677                        sc->link_params.req_flow_ctrl[cfg_idx] =
15678                                ELINK_FLOW_CTRL_AUTO;
15679                }
15680                sc->link_params.req_fc_auto_adv = 0;
15681                if (result & ELINK_FLOW_CTRL_RX)
15682                        sc->link_params.req_fc_auto_adv |= ELINK_FLOW_CTRL_RX;
15683
15684                if (result & ELINK_FLOW_CTRL_TX)
15685                        sc->link_params.req_fc_auto_adv |= ELINK_FLOW_CTRL_TX;
15686                if (!sc->link_params.req_fc_auto_adv)
15687                        sc->link_params.req_fc_auto_adv |= ELINK_FLOW_CTRL_NONE;
15688        }
15689         if (IS_PF(sc)) {
15690                        if (sc->link_vars.link_up) {
15691                                bxe_stats_handle(sc, STATS_EVENT_STOP);
15692                        }
15693			if (if_getdrvflags(sc->ifp) & IFF_DRV_RUNNING) {
15694                        bxe_force_link_reset(sc);
15695                        bxe_acquire_phy_lock(sc);
15696
15697                        rc = elink_phy_init(&sc->link_params, &sc->link_vars);
15698
15699                        bxe_release_phy_lock(sc);
15700
15701                        bxe_calc_fc_adv(sc);
15702                        }
15703        }
15704        return rc;
15705}
15706
15707
15708static void
15709bxe_add_sysctls(struct bxe_softc *sc)
15710{
15711    struct sysctl_ctx_list *ctx;
15712    struct sysctl_oid_list *children;
15713    struct sysctl_oid *queue_top, *queue;
15714    struct sysctl_oid_list *queue_top_children, *queue_children;
15715    char queue_num_buf[32];
15716    uint32_t q_stat;
15717    int i, j;
15718
15719    ctx = device_get_sysctl_ctx(sc->dev);
15720    children = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev));
15721
15722    SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "version",
15723                      CTLFLAG_RD, BXE_DRIVER_VERSION, 0,
15724                      "version");
15725
15726    snprintf(sc->fw_ver_str, sizeof(sc->fw_ver_str), "%d.%d.%d.%d",
15727             BCM_5710_FW_MAJOR_VERSION,
15728             BCM_5710_FW_MINOR_VERSION,
15729             BCM_5710_FW_REVISION_VERSION,
15730             BCM_5710_FW_ENGINEERING_VERSION);
15731
15732    snprintf(sc->mf_mode_str, sizeof(sc->mf_mode_str), "%s",
15733        ((sc->devinfo.mf_info.mf_mode == SINGLE_FUNCTION)     ? "Single"  :
15734         (sc->devinfo.mf_info.mf_mode == MULTI_FUNCTION_SD)   ? "MF-SD"   :
15735         (sc->devinfo.mf_info.mf_mode == MULTI_FUNCTION_SI)   ? "MF-SI"   :
15736         (sc->devinfo.mf_info.mf_mode == MULTI_FUNCTION_AFEX) ? "MF-AFEX" :
15737                                                                "Unknown"));
15738    SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "mf_vnics",
15739                    CTLFLAG_RD, &sc->devinfo.mf_info.vnics_per_port, 0,
15740                    "multifunction vnics per port");
15741
15742    snprintf(sc->pci_link_str, sizeof(sc->pci_link_str), "%s x%d",
15743        ((sc->devinfo.pcie_link_speed == 1) ? "2.5GT/s" :
15744         (sc->devinfo.pcie_link_speed == 2) ? "5.0GT/s" :
15745         (sc->devinfo.pcie_link_speed == 4) ? "8.0GT/s" :
15746                                              "???GT/s"),
15747        sc->devinfo.pcie_link_width);
15748
15749    sc->debug = bxe_debug;
15750
15751#if __FreeBSD_version >= 900000
15752    SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "bc_version",
15753                      CTLFLAG_RD, sc->devinfo.bc_ver_str, 0,
15754                      "bootcode version");
15755    SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "fw_version",
15756                      CTLFLAG_RD, sc->fw_ver_str, 0,
15757                      "firmware version");
15758    SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "mf_mode",
15759                      CTLFLAG_RD, sc->mf_mode_str, 0,
15760                      "multifunction mode");
15761    SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "mac_addr",
15762                      CTLFLAG_RD, sc->mac_addr_str, 0,
15763                      "mac address");
15764    SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "pci_link",
15765                      CTLFLAG_RD, sc->pci_link_str, 0,
15766                      "pci link status");
15767    SYSCTL_ADD_ULONG(ctx, children, OID_AUTO, "debug",
15768                    CTLFLAG_RW, &sc->debug,
15769                    "debug logging mode");
15770#else
15771    SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "bc_version",
15772                      CTLFLAG_RD, &sc->devinfo.bc_ver_str, 0,
15773                      "bootcode version");
15774    SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "fw_version",
15775                      CTLFLAG_RD, &sc->fw_ver_str, 0,
15776                      "firmware version");
15777    SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "mf_mode",
15778                      CTLFLAG_RD, &sc->mf_mode_str, 0,
15779                      "multifunction mode");
15780    SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "mac_addr",
15781                      CTLFLAG_RD, &sc->mac_addr_str, 0,
15782                      "mac address");
15783    SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "pci_link",
15784                      CTLFLAG_RD, &sc->pci_link_str, 0,
15785                      "pci link status");
15786    SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "debug",
15787                    CTLFLAG_RW, &sc->debug, 0,
15788                    "debug logging mode");
15789#endif /* #if __FreeBSD_version >= 900000 */
15790
15791    sc->trigger_grcdump = 0;
15792    SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "trigger_grcdump",
15793                   CTLFLAG_RW, &sc->trigger_grcdump, 0,
15794                   "trigger grcdump should be invoked"
15795                   "  before collecting grcdump");
15796
15797    sc->grcdump_started = 0;
15798    sc->grcdump_done = 0;
15799    SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "grcdump_done",
15800                   CTLFLAG_RD, &sc->grcdump_done, 0,
15801                   "set by driver when grcdump is done");
15802
15803    sc->rx_budget = bxe_rx_budget;
15804    SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "rx_budget",
15805                    CTLFLAG_RW, &sc->rx_budget, 0,
15806                    "rx processing budget");
15807
15808   SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "pause_param",
15809                    CTLTYPE_UINT | CTLFLAG_RW, sc, 0,
15810                    bxe_sysctl_pauseparam, "IU",
15811                    "need pause frames- DEF:0/TX:1/RX:2/BOTH:3/AUTO:4/AUTOTX:5/AUTORX:6/AUTORXTX:7/NONE:8");
15812
15813
15814    SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "state",
15815                    CTLTYPE_UINT | CTLFLAG_RW, sc, 0,
15816                    bxe_sysctl_state, "IU", "dump driver state");
15817
15818    for (i = 0; i < BXE_NUM_ETH_STATS; i++) {
15819        SYSCTL_ADD_PROC(ctx, children, OID_AUTO,
15820                        bxe_eth_stats_arr[i].string,
15821                        CTLTYPE_U64 | CTLFLAG_RD, sc, i,
15822                        bxe_sysctl_eth_stat, "LU",
15823                        bxe_eth_stats_arr[i].string);
15824    }
15825
15826    /* add a new parent node for all queues "dev.bxe.#.queue" */
15827    queue_top = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "queue",
15828                                CTLFLAG_RD, NULL, "queue");
15829    queue_top_children = SYSCTL_CHILDREN(queue_top);
15830
15831    for (i = 0; i < sc->num_queues; i++) {
15832        /* add a new parent node for a single queue "dev.bxe.#.queue.#" */
15833        snprintf(queue_num_buf, sizeof(queue_num_buf), "%d", i);
15834        queue = SYSCTL_ADD_NODE(ctx, queue_top_children, OID_AUTO,
15835                                queue_num_buf, CTLFLAG_RD, NULL,
15836                                "single queue");
15837        queue_children = SYSCTL_CHILDREN(queue);
15838
15839        for (j = 0; j < BXE_NUM_ETH_Q_STATS; j++) {
15840            q_stat = ((i << 16) | j);
15841            SYSCTL_ADD_PROC(ctx, queue_children, OID_AUTO,
15842                            bxe_eth_q_stats_arr[j].string,
15843                            CTLTYPE_U64 | CTLFLAG_RD, sc, q_stat,
15844                            bxe_sysctl_eth_q_stat, "LU",
15845                            bxe_eth_q_stats_arr[j].string);
15846        }
15847    }
15848}
15849
15850static int
15851bxe_alloc_buf_rings(struct bxe_softc *sc)
15852{
15853#if __FreeBSD_version >= 901504
15854
15855    int i;
15856    struct bxe_fastpath *fp;
15857
15858    for (i = 0; i < sc->num_queues; i++) {
15859
15860        fp = &sc->fp[i];
15861
15862        fp->tx_br = buf_ring_alloc(BXE_BR_SIZE, M_DEVBUF,
15863                                   M_NOWAIT, &fp->tx_mtx);
15864        if (fp->tx_br == NULL)
15865            return (-1);
15866    }
15867#endif
15868    return (0);
15869}
15870
15871static void
15872bxe_free_buf_rings(struct bxe_softc *sc)
15873{
15874#if __FreeBSD_version >= 901504
15875
15876    int i;
15877    struct bxe_fastpath *fp;
15878
15879    for (i = 0; i < sc->num_queues; i++) {
15880
15881        fp = &sc->fp[i];
15882
15883        if (fp->tx_br) {
15884            buf_ring_free(fp->tx_br, M_DEVBUF);
15885            fp->tx_br = NULL;
15886        }
15887    }
15888
15889#endif
15890}
15891
15892static void
15893bxe_init_fp_mutexs(struct bxe_softc *sc)
15894{
15895    int i;
15896    struct bxe_fastpath *fp;
15897
15898    for (i = 0; i < sc->num_queues; i++) {
15899
15900        fp = &sc->fp[i];
15901
15902        snprintf(fp->tx_mtx_name, sizeof(fp->tx_mtx_name),
15903            "bxe%d_fp%d_tx_lock", sc->unit, i);
15904        mtx_init(&fp->tx_mtx, fp->tx_mtx_name, NULL, MTX_DEF);
15905
15906        snprintf(fp->rx_mtx_name, sizeof(fp->rx_mtx_name),
15907            "bxe%d_fp%d_rx_lock", sc->unit, i);
15908        mtx_init(&fp->rx_mtx, fp->rx_mtx_name, NULL, MTX_DEF);
15909    }
15910}
15911
15912static void
15913bxe_destroy_fp_mutexs(struct bxe_softc *sc)
15914{
15915    int i;
15916    struct bxe_fastpath *fp;
15917
15918    for (i = 0; i < sc->num_queues; i++) {
15919
15920        fp = &sc->fp[i];
15921
15922        if (mtx_initialized(&fp->tx_mtx)) {
15923            mtx_destroy(&fp->tx_mtx);
15924        }
15925
15926        if (mtx_initialized(&fp->rx_mtx)) {
15927            mtx_destroy(&fp->rx_mtx);
15928        }
15929    }
15930}
15931
15932
15933/*
15934 * Device attach function.
15935 *
15936 * Allocates device resources, performs secondary chip identification, and
15937 * initializes driver instance variables. This function is called from driver
15938 * load after a successful probe.
15939 *
15940 * Returns:
15941 *   0 = Success, >0 = Failure
15942 */
15943static int
15944bxe_attach(device_t dev)
15945{
15946    struct bxe_softc *sc;
15947
15948    sc = device_get_softc(dev);
15949
15950    BLOGD(sc, DBG_LOAD, "Starting attach...\n");
15951
15952    sc->state = BXE_STATE_CLOSED;
15953
15954    sc->dev  = dev;
15955    sc->unit = device_get_unit(dev);
15956
15957    BLOGD(sc, DBG_LOAD, "softc = %p\n", sc);
15958
15959    sc->pcie_bus    = pci_get_bus(dev);
15960    sc->pcie_device = pci_get_slot(dev);
15961    sc->pcie_func   = pci_get_function(dev);
15962
15963    /* enable bus master capability */
15964    pci_enable_busmaster(dev);
15965
15966    /* get the BARs */
15967    if (bxe_allocate_bars(sc) != 0) {
15968        return (ENXIO);
15969    }
15970
15971    /* initialize the mutexes */
15972    bxe_init_mutexes(sc);
15973
15974    /* prepare the periodic callout */
15975    callout_init(&sc->periodic_callout, 0);
15976
15977    /* prepare the chip taskqueue */
15978    sc->chip_tq_flags = CHIP_TQ_NONE;
15979    snprintf(sc->chip_tq_name, sizeof(sc->chip_tq_name),
15980             "bxe%d_chip_tq", sc->unit);
15981    TASK_INIT(&sc->chip_tq_task, 0, bxe_handle_chip_tq, sc);
15982    sc->chip_tq = taskqueue_create(sc->chip_tq_name, M_NOWAIT,
15983                                   taskqueue_thread_enqueue,
15984                                   &sc->chip_tq);
15985    taskqueue_start_threads(&sc->chip_tq, 1, PWAIT, /* lower priority */
15986                            "%s", sc->chip_tq_name);
15987
15988    /* get device info and set params */
15989    if (bxe_get_device_info(sc) != 0) {
15990        BLOGE(sc, "getting device info\n");
15991        bxe_deallocate_bars(sc);
15992        pci_disable_busmaster(dev);
15993        return (ENXIO);
15994    }
15995
15996    /* get final misc params */
15997    bxe_get_params(sc);
15998
15999    /* set the default MTU (changed via ifconfig) */
16000    sc->mtu = ETHERMTU;
16001
16002    bxe_set_modes_bitmap(sc);
16003
16004    /* XXX
16005     * If in AFEX mode and the function is configured for FCoE
16006     * then bail... no L2 allowed.
16007     */
16008
16009    /* get phy settings from shmem and 'and' against admin settings */
16010    bxe_get_phy_info(sc);
16011
16012    /* initialize the FreeBSD ifnet interface */
16013    if (bxe_init_ifnet(sc) != 0) {
16014        bxe_release_mutexes(sc);
16015        bxe_deallocate_bars(sc);
16016        pci_disable_busmaster(dev);
16017        return (ENXIO);
16018    }
16019
16020    if (bxe_add_cdev(sc) != 0) {
16021        if (sc->ifp != NULL) {
16022            ether_ifdetach(sc->ifp);
16023        }
16024        ifmedia_removeall(&sc->ifmedia);
16025        bxe_release_mutexes(sc);
16026        bxe_deallocate_bars(sc);
16027        pci_disable_busmaster(dev);
16028        return (ENXIO);
16029    }
16030
16031    /* allocate device interrupts */
16032    if (bxe_interrupt_alloc(sc) != 0) {
16033        bxe_del_cdev(sc);
16034        if (sc->ifp != NULL) {
16035            ether_ifdetach(sc->ifp);
16036        }
16037        ifmedia_removeall(&sc->ifmedia);
16038        bxe_release_mutexes(sc);
16039        bxe_deallocate_bars(sc);
16040        pci_disable_busmaster(dev);
16041        return (ENXIO);
16042    }
16043
16044    bxe_init_fp_mutexs(sc);
16045
16046    if (bxe_alloc_buf_rings(sc) != 0) {
16047	bxe_free_buf_rings(sc);
16048        bxe_interrupt_free(sc);
16049        bxe_del_cdev(sc);
16050        if (sc->ifp != NULL) {
16051            ether_ifdetach(sc->ifp);
16052        }
16053        ifmedia_removeall(&sc->ifmedia);
16054        bxe_release_mutexes(sc);
16055        bxe_deallocate_bars(sc);
16056        pci_disable_busmaster(dev);
16057        return (ENXIO);
16058    }
16059
16060    /* allocate ilt */
16061    if (bxe_alloc_ilt_mem(sc) != 0) {
16062	bxe_free_buf_rings(sc);
16063        bxe_interrupt_free(sc);
16064        bxe_del_cdev(sc);
16065        if (sc->ifp != NULL) {
16066            ether_ifdetach(sc->ifp);
16067        }
16068        ifmedia_removeall(&sc->ifmedia);
16069        bxe_release_mutexes(sc);
16070        bxe_deallocate_bars(sc);
16071        pci_disable_busmaster(dev);
16072        return (ENXIO);
16073    }
16074
16075    /* allocate the host hardware/software hsi structures */
16076    if (bxe_alloc_hsi_mem(sc) != 0) {
16077        bxe_free_ilt_mem(sc);
16078	bxe_free_buf_rings(sc);
16079        bxe_interrupt_free(sc);
16080        bxe_del_cdev(sc);
16081        if (sc->ifp != NULL) {
16082            ether_ifdetach(sc->ifp);
16083        }
16084        ifmedia_removeall(&sc->ifmedia);
16085        bxe_release_mutexes(sc);
16086        bxe_deallocate_bars(sc);
16087        pci_disable_busmaster(dev);
16088        return (ENXIO);
16089    }
16090
16091    /* need to reset chip if UNDI was active */
16092    if (IS_PF(sc) && !BXE_NOMCP(sc)) {
16093        /* init fw_seq */
16094        sc->fw_seq =
16095            (SHMEM_RD(sc, func_mb[SC_FW_MB_IDX(sc)].drv_mb_header) &
16096             DRV_MSG_SEQ_NUMBER_MASK);
16097        BLOGD(sc, DBG_LOAD, "prev unload fw_seq 0x%04x\n", sc->fw_seq);
16098        bxe_prev_unload(sc);
16099    }
16100
16101#if 1
16102    /* XXX */
16103    bxe_dcbx_set_state(sc, FALSE, BXE_DCBX_ENABLED_OFF);
16104#else
16105    if (SHMEM2_HAS(sc, dcbx_lldp_params_offset) &&
16106        SHMEM2_HAS(sc, dcbx_lldp_dcbx_stat_offset) &&
16107        SHMEM2_RD(sc, dcbx_lldp_params_offset) &&
16108        SHMEM2_RD(sc, dcbx_lldp_dcbx_stat_offset)) {
16109        bxe_dcbx_set_state(sc, TRUE, BXE_DCBX_ENABLED_ON_NEG_ON);
16110        bxe_dcbx_init_params(sc);
16111    } else {
16112        bxe_dcbx_set_state(sc, FALSE, BXE_DCBX_ENABLED_OFF);
16113    }
16114#endif
16115
16116    /* calculate qm_cid_count */
16117    sc->qm_cid_count = bxe_set_qm_cid_count(sc);
16118    BLOGD(sc, DBG_LOAD, "qm_cid_count=%d\n", sc->qm_cid_count);
16119
16120    sc->max_cos = 1;
16121    bxe_init_multi_cos(sc);
16122
16123    bxe_add_sysctls(sc);
16124
16125    return (0);
16126}
16127
16128/*
16129 * Device detach function.
16130 *
16131 * Stops the controller, resets the controller, and releases resources.
16132 *
16133 * Returns:
16134 *   0 = Success, >0 = Failure
16135 */
16136static int
16137bxe_detach(device_t dev)
16138{
16139    struct bxe_softc *sc;
16140    if_t ifp;
16141
16142    sc = device_get_softc(dev);
16143
16144    BLOGD(sc, DBG_LOAD, "Starting detach...\n");
16145
16146    ifp = sc->ifp;
16147    if (ifp != NULL && if_vlantrunkinuse(ifp)) {
16148        BLOGE(sc, "Cannot detach while VLANs are in use.\n");
16149        return(EBUSY);
16150    }
16151
16152    bxe_del_cdev(sc);
16153
16154    /* stop the periodic callout */
16155    bxe_periodic_stop(sc);
16156
16157    /* stop the chip taskqueue */
16158    atomic_store_rel_long(&sc->chip_tq_flags, CHIP_TQ_NONE);
16159    if (sc->chip_tq) {
16160        taskqueue_drain(sc->chip_tq, &sc->chip_tq_task);
16161        taskqueue_free(sc->chip_tq);
16162        sc->chip_tq = NULL;
16163    }
16164
16165    /* stop and reset the controller if it was open */
16166    if (sc->state != BXE_STATE_CLOSED) {
16167        BXE_CORE_LOCK(sc);
16168        bxe_nic_unload(sc, UNLOAD_CLOSE, TRUE);
16169        sc->state = BXE_STATE_DISABLED;
16170        BXE_CORE_UNLOCK(sc);
16171    }
16172
16173    /* release the network interface */
16174    if (ifp != NULL) {
16175        ether_ifdetach(ifp);
16176    }
16177    ifmedia_removeall(&sc->ifmedia);
16178
16179    /* XXX do the following based on driver state... */
16180
16181    /* free the host hardware/software hsi structures */
16182    bxe_free_hsi_mem(sc);
16183
16184    /* free ilt */
16185    bxe_free_ilt_mem(sc);
16186
16187    bxe_free_buf_rings(sc);
16188
16189    /* release the interrupts */
16190    bxe_interrupt_free(sc);
16191
16192    /* Release the mutexes*/
16193    bxe_destroy_fp_mutexs(sc);
16194    bxe_release_mutexes(sc);
16195
16196
16197    /* Release the PCIe BAR mapped memory */
16198    bxe_deallocate_bars(sc);
16199
16200    /* Release the FreeBSD interface. */
16201    if (sc->ifp != NULL) {
16202        if_free(sc->ifp);
16203    }
16204
16205    pci_disable_busmaster(dev);
16206
16207    return (0);
16208}
16209
16210/*
16211 * Device shutdown function.
16212 *
16213 * Stops and resets the controller.
16214 *
16215 * Returns:
16216 *   Nothing
16217 */
16218static int
16219bxe_shutdown(device_t dev)
16220{
16221    struct bxe_softc *sc;
16222
16223    sc = device_get_softc(dev);
16224
16225    BLOGD(sc, DBG_LOAD, "Starting shutdown...\n");
16226
16227    /* stop the periodic callout */
16228    bxe_periodic_stop(sc);
16229
16230    BXE_CORE_LOCK(sc);
16231    bxe_nic_unload(sc, UNLOAD_NORMAL, FALSE);
16232    BXE_CORE_UNLOCK(sc);
16233
16234    return (0);
16235}
16236
16237void
16238bxe_igu_ack_sb(struct bxe_softc *sc,
16239               uint8_t          igu_sb_id,
16240               uint8_t          segment,
16241               uint16_t         index,
16242               uint8_t          op,
16243               uint8_t          update)
16244{
16245    uint32_t igu_addr = sc->igu_base_addr;
16246    igu_addr += (IGU_CMD_INT_ACK_BASE + igu_sb_id)*8;
16247    bxe_igu_ack_sb_gen(sc, igu_sb_id, segment, index, op, update, igu_addr);
16248}
16249
16250static void
16251bxe_igu_clear_sb_gen(struct bxe_softc *sc,
16252                     uint8_t          func,
16253                     uint8_t          idu_sb_id,
16254                     uint8_t          is_pf)
16255{
16256    uint32_t data, ctl, cnt = 100;
16257    uint32_t igu_addr_data = IGU_REG_COMMAND_REG_32LSB_DATA;
16258    uint32_t igu_addr_ctl = IGU_REG_COMMAND_REG_CTRL;
16259    uint32_t igu_addr_ack = IGU_REG_CSTORM_TYPE_0_SB_CLEANUP + (idu_sb_id/32)*4;
16260    uint32_t sb_bit =  1 << (idu_sb_id%32);
16261    uint32_t func_encode = func | (is_pf ? 1 : 0) << IGU_FID_ENCODE_IS_PF_SHIFT;
16262    uint32_t addr_encode = IGU_CMD_E2_PROD_UPD_BASE + idu_sb_id;
16263
16264    /* Not supported in BC mode */
16265    if (CHIP_INT_MODE_IS_BC(sc)) {
16266        return;
16267    }
16268
16269    data = ((IGU_USE_REGISTER_cstorm_type_0_sb_cleanup <<
16270             IGU_REGULAR_CLEANUP_TYPE_SHIFT) |
16271            IGU_REGULAR_CLEANUP_SET |
16272            IGU_REGULAR_BCLEANUP);
16273
16274    ctl = ((addr_encode << IGU_CTRL_REG_ADDRESS_SHIFT) |
16275           (func_encode << IGU_CTRL_REG_FID_SHIFT) |
16276           (IGU_CTRL_CMD_TYPE_WR << IGU_CTRL_REG_TYPE_SHIFT));
16277
16278    BLOGD(sc, DBG_LOAD, "write 0x%08x to IGU(via GRC) addr 0x%x\n",
16279            data, igu_addr_data);
16280    REG_WR(sc, igu_addr_data, data);
16281
16282    bus_space_barrier(sc->bar[BAR0].tag, sc->bar[BAR0].handle, 0, 0,
16283                      BUS_SPACE_BARRIER_WRITE);
16284    mb();
16285
16286    BLOGD(sc, DBG_LOAD, "write 0x%08x to IGU(via GRC) addr 0x%x\n",
16287            ctl, igu_addr_ctl);
16288    REG_WR(sc, igu_addr_ctl, ctl);
16289
16290    bus_space_barrier(sc->bar[BAR0].tag, sc->bar[BAR0].handle, 0, 0,
16291                      BUS_SPACE_BARRIER_WRITE);
16292    mb();
16293
16294    /* wait for clean up to finish */
16295    while (!(REG_RD(sc, igu_addr_ack) & sb_bit) && --cnt) {
16296        DELAY(20000);
16297    }
16298
16299    if (!(REG_RD(sc, igu_addr_ack) & sb_bit)) {
16300        BLOGD(sc, DBG_LOAD,
16301              "Unable to finish IGU cleanup: "
16302              "idu_sb_id %d offset %d bit %d (cnt %d)\n",
16303              idu_sb_id, idu_sb_id/32, idu_sb_id%32, cnt);
16304    }
16305}
16306
16307static void
16308bxe_igu_clear_sb(struct bxe_softc *sc,
16309                 uint8_t          idu_sb_id)
16310{
16311    bxe_igu_clear_sb_gen(sc, SC_FUNC(sc), idu_sb_id, TRUE /*PF*/);
16312}
16313
16314
16315
16316
16317
16318
16319
16320/*******************/
16321/* ECORE CALLBACKS */
16322/*******************/
16323
16324static void
16325bxe_reset_common(struct bxe_softc *sc)
16326{
16327    uint32_t val = 0x1400;
16328
16329    /* reset_common */
16330    REG_WR(sc, (GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR), 0xd3ffff7f);
16331
16332    if (CHIP_IS_E3(sc)) {
16333        val |= MISC_REGISTERS_RESET_REG_2_MSTAT0;
16334        val |= MISC_REGISTERS_RESET_REG_2_MSTAT1;
16335    }
16336
16337    REG_WR(sc, (GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR), val);
16338}
16339
16340static void
16341bxe_common_init_phy(struct bxe_softc *sc)
16342{
16343    uint32_t shmem_base[2];
16344    uint32_t shmem2_base[2];
16345
16346    /* Avoid common init in case MFW supports LFA */
16347    if (SHMEM2_RD(sc, size) >
16348        (uint32_t)offsetof(struct shmem2_region,
16349                           lfa_host_addr[SC_PORT(sc)])) {
16350        return;
16351    }
16352
16353    shmem_base[0]  = sc->devinfo.shmem_base;
16354    shmem2_base[0] = sc->devinfo.shmem2_base;
16355
16356    if (!CHIP_IS_E1x(sc)) {
16357        shmem_base[1]  = SHMEM2_RD(sc, other_shmem_base_addr);
16358        shmem2_base[1] = SHMEM2_RD(sc, other_shmem2_base_addr);
16359    }
16360
16361    bxe_acquire_phy_lock(sc);
16362    elink_common_init_phy(sc, shmem_base, shmem2_base,
16363                          sc->devinfo.chip_id, 0);
16364    bxe_release_phy_lock(sc);
16365}
16366
16367static void
16368bxe_pf_disable(struct bxe_softc *sc)
16369{
16370    uint32_t val = REG_RD(sc, IGU_REG_PF_CONFIGURATION);
16371
16372    val &= ~IGU_PF_CONF_FUNC_EN;
16373
16374    REG_WR(sc, IGU_REG_PF_CONFIGURATION, val);
16375    REG_WR(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 0);
16376    REG_WR(sc, CFC_REG_WEAK_ENABLE_PF, 0);
16377}
16378
16379static void
16380bxe_init_pxp(struct bxe_softc *sc)
16381{
16382    uint16_t devctl;
16383    int r_order, w_order;
16384
16385    devctl = bxe_pcie_capability_read(sc, PCIR_EXPRESS_DEVICE_CTL, 2);
16386
16387    BLOGD(sc, DBG_LOAD, "read 0x%08x from devctl\n", devctl);
16388
16389    w_order = ((devctl & PCIM_EXP_CTL_MAX_PAYLOAD) >> 5);
16390
16391    if (sc->mrrs == -1) {
16392        r_order = ((devctl & PCIM_EXP_CTL_MAX_READ_REQUEST) >> 12);
16393    } else {
16394        BLOGD(sc, DBG_LOAD, "forcing read order to %d\n", sc->mrrs);
16395        r_order = sc->mrrs;
16396    }
16397
16398    ecore_init_pxp_arb(sc, r_order, w_order);
16399}
16400
16401static uint32_t
16402bxe_get_pretend_reg(struct bxe_softc *sc)
16403{
16404    uint32_t base = PXP2_REG_PGL_PRETEND_FUNC_F0;
16405    uint32_t stride = (PXP2_REG_PGL_PRETEND_FUNC_F1 - base);
16406    return (base + (SC_ABS_FUNC(sc)) * stride);
16407}
16408
16409/*
16410 * Called only on E1H or E2.
16411 * When pretending to be PF, the pretend value is the function number 0..7.
16412 * When pretending to be VF, the pretend val is the PF-num:VF-valid:ABS-VFID
16413 * combination.
16414 */
16415static int
16416bxe_pretend_func(struct bxe_softc *sc,
16417                 uint16_t         pretend_func_val)
16418{
16419    uint32_t pretend_reg;
16420
16421    if (CHIP_IS_E1H(sc) && (pretend_func_val > E1H_FUNC_MAX)) {
16422        return (-1);
16423    }
16424
16425    /* get my own pretend register */
16426    pretend_reg = bxe_get_pretend_reg(sc);
16427    REG_WR(sc, pretend_reg, pretend_func_val);
16428    REG_RD(sc, pretend_reg);
16429    return (0);
16430}
16431
16432static void
16433bxe_iov_init_dmae(struct bxe_softc *sc)
16434{
16435    return;
16436}
16437
16438static void
16439bxe_iov_init_dq(struct bxe_softc *sc)
16440{
16441    return;
16442}
16443
16444/* send a NIG loopback debug packet */
16445static void
16446bxe_lb_pckt(struct bxe_softc *sc)
16447{
16448    uint32_t wb_write[3];
16449
16450    /* Ethernet source and destination addresses */
16451    wb_write[0] = 0x55555555;
16452    wb_write[1] = 0x55555555;
16453    wb_write[2] = 0x20;     /* SOP */
16454    REG_WR_DMAE(sc, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
16455
16456    /* NON-IP protocol */
16457    wb_write[0] = 0x09000000;
16458    wb_write[1] = 0x55555555;
16459    wb_write[2] = 0x10;     /* EOP, eop_bvalid = 0 */
16460    REG_WR_DMAE(sc, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
16461}
16462
16463/*
16464 * Some of the internal memories are not directly readable from the driver.
16465 * To test them we send debug packets.
16466 */
16467static int
16468bxe_int_mem_test(struct bxe_softc *sc)
16469{
16470    int factor;
16471    int count, i;
16472    uint32_t val = 0;
16473
16474    if (CHIP_REV_IS_FPGA(sc)) {
16475        factor = 120;
16476    } else if (CHIP_REV_IS_EMUL(sc)) {
16477        factor = 200;
16478    } else {
16479        factor = 1;
16480    }
16481
16482    /* disable inputs of parser neighbor blocks */
16483    REG_WR(sc, TSDM_REG_ENABLE_IN1, 0x0);
16484    REG_WR(sc, TCM_REG_PRS_IFEN, 0x0);
16485    REG_WR(sc, CFC_REG_DEBUG0, 0x1);
16486    REG_WR(sc, NIG_REG_PRS_REQ_IN_EN, 0x0);
16487
16488    /*  write 0 to parser credits for CFC search request */
16489    REG_WR(sc, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
16490
16491    /* send Ethernet packet */
16492    bxe_lb_pckt(sc);
16493
16494    /* TODO do i reset NIG statistic? */
16495    /* Wait until NIG register shows 1 packet of size 0x10 */
16496    count = 1000 * factor;
16497    while (count) {
16498        bxe_read_dmae(sc, NIG_REG_STAT2_BRB_OCTET, 2);
16499        val = *BXE_SP(sc, wb_data[0]);
16500        if (val == 0x10) {
16501            break;
16502        }
16503
16504        DELAY(10000);
16505        count--;
16506    }
16507
16508    if (val != 0x10) {
16509        BLOGE(sc, "NIG timeout val=0x%x\n", val);
16510        return (-1);
16511    }
16512
16513    /* wait until PRS register shows 1 packet */
16514    count = (1000 * factor);
16515    while (count) {
16516        val = REG_RD(sc, PRS_REG_NUM_OF_PACKETS);
16517        if (val == 1) {
16518            break;
16519        }
16520
16521        DELAY(10000);
16522        count--;
16523    }
16524
16525    if (val != 0x1) {
16526        BLOGE(sc, "PRS timeout val=0x%x\n", val);
16527        return (-2);
16528    }
16529
16530    /* Reset and init BRB, PRS */
16531    REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
16532    DELAY(50000);
16533    REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
16534    DELAY(50000);
16535    ecore_init_block(sc, BLOCK_BRB1, PHASE_COMMON);
16536    ecore_init_block(sc, BLOCK_PRS, PHASE_COMMON);
16537
16538    /* Disable inputs of parser neighbor blocks */
16539    REG_WR(sc, TSDM_REG_ENABLE_IN1, 0x0);
16540    REG_WR(sc, TCM_REG_PRS_IFEN, 0x0);
16541    REG_WR(sc, CFC_REG_DEBUG0, 0x1);
16542    REG_WR(sc, NIG_REG_PRS_REQ_IN_EN, 0x0);
16543
16544    /* Write 0 to parser credits for CFC search request */
16545    REG_WR(sc, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
16546
16547    /* send 10 Ethernet packets */
16548    for (i = 0; i < 10; i++) {
16549        bxe_lb_pckt(sc);
16550    }
16551
16552    /* Wait until NIG register shows 10+1 packets of size 11*0x10 = 0xb0 */
16553    count = (1000 * factor);
16554    while (count) {
16555        bxe_read_dmae(sc, NIG_REG_STAT2_BRB_OCTET, 2);
16556        val = *BXE_SP(sc, wb_data[0]);
16557        if (val == 0xb0) {
16558            break;
16559        }
16560
16561        DELAY(10000);
16562        count--;
16563    }
16564
16565    if (val != 0xb0) {
16566        BLOGE(sc, "NIG timeout val=0x%x\n", val);
16567        return (-3);
16568    }
16569
16570    /* Wait until PRS register shows 2 packets */
16571    val = REG_RD(sc, PRS_REG_NUM_OF_PACKETS);
16572    if (val != 2) {
16573        BLOGE(sc, "PRS timeout val=0x%x\n", val);
16574    }
16575
16576    /* Write 1 to parser credits for CFC search request */
16577    REG_WR(sc, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
16578
16579    /* Wait until PRS register shows 3 packets */
16580    DELAY(10000 * factor);
16581
16582    /* Wait until NIG register shows 1 packet of size 0x10 */
16583    val = REG_RD(sc, PRS_REG_NUM_OF_PACKETS);
16584    if (val != 3) {
16585        BLOGE(sc, "PRS timeout val=0x%x\n", val);
16586    }
16587
16588    /* clear NIG EOP FIFO */
16589    for (i = 0; i < 11; i++) {
16590        REG_RD(sc, NIG_REG_INGRESS_EOP_LB_FIFO);
16591    }
16592
16593    val = REG_RD(sc, NIG_REG_INGRESS_EOP_LB_EMPTY);
16594    if (val != 1) {
16595        BLOGE(sc, "clear of NIG failed val=0x%x\n", val);
16596        return (-4);
16597    }
16598
16599    /* Reset and init BRB, PRS, NIG */
16600    REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
16601    DELAY(50000);
16602    REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
16603    DELAY(50000);
16604    ecore_init_block(sc, BLOCK_BRB1, PHASE_COMMON);
16605    ecore_init_block(sc, BLOCK_PRS, PHASE_COMMON);
16606    if (!CNIC_SUPPORT(sc)) {
16607        /* set NIC mode */
16608        REG_WR(sc, PRS_REG_NIC_MODE, 1);
16609    }
16610
16611    /* Enable inputs of parser neighbor blocks */
16612    REG_WR(sc, TSDM_REG_ENABLE_IN1, 0x7fffffff);
16613    REG_WR(sc, TCM_REG_PRS_IFEN, 0x1);
16614    REG_WR(sc, CFC_REG_DEBUG0, 0x0);
16615    REG_WR(sc, NIG_REG_PRS_REQ_IN_EN, 0x1);
16616
16617    return (0);
16618}
16619
16620static void
16621bxe_setup_fan_failure_detection(struct bxe_softc *sc)
16622{
16623    int is_required;
16624    uint32_t val;
16625    int port;
16626
16627    is_required = 0;
16628    val = (SHMEM_RD(sc, dev_info.shared_hw_config.config2) &
16629           SHARED_HW_CFG_FAN_FAILURE_MASK);
16630
16631    if (val == SHARED_HW_CFG_FAN_FAILURE_ENABLED) {
16632        is_required = 1;
16633    }
16634    /*
16635     * The fan failure mechanism is usually related to the PHY type since
16636     * the power consumption of the board is affected by the PHY. Currently,
16637     * fan is required for most designs with SFX7101, BCM8727 and BCM8481.
16638     */
16639    else if (val == SHARED_HW_CFG_FAN_FAILURE_PHY_TYPE) {
16640        for (port = PORT_0; port < PORT_MAX; port++) {
16641            is_required |= elink_fan_failure_det_req(sc,
16642                                                     sc->devinfo.shmem_base,
16643                                                     sc->devinfo.shmem2_base,
16644                                                     port);
16645        }
16646    }
16647
16648    BLOGD(sc, DBG_LOAD, "fan detection setting: %d\n", is_required);
16649
16650    if (is_required == 0) {
16651        return;
16652    }
16653
16654    /* Fan failure is indicated by SPIO 5 */
16655    bxe_set_spio(sc, MISC_SPIO_SPIO5, MISC_SPIO_INPUT_HI_Z);
16656
16657    /* set to active low mode */
16658    val = REG_RD(sc, MISC_REG_SPIO_INT);
16659    val |= (MISC_SPIO_SPIO5 << MISC_SPIO_INT_OLD_SET_POS);
16660    REG_WR(sc, MISC_REG_SPIO_INT, val);
16661
16662    /* enable interrupt to signal the IGU */
16663    val = REG_RD(sc, MISC_REG_SPIO_EVENT_EN);
16664    val |= MISC_SPIO_SPIO5;
16665    REG_WR(sc, MISC_REG_SPIO_EVENT_EN, val);
16666}
16667
16668static void
16669bxe_enable_blocks_attention(struct bxe_softc *sc)
16670{
16671    uint32_t val;
16672
16673    REG_WR(sc, PXP_REG_PXP_INT_MASK_0, 0);
16674    if (!CHIP_IS_E1x(sc)) {
16675        REG_WR(sc, PXP_REG_PXP_INT_MASK_1, 0x40);
16676    } else {
16677        REG_WR(sc, PXP_REG_PXP_INT_MASK_1, 0);
16678    }
16679    REG_WR(sc, DORQ_REG_DORQ_INT_MASK, 0);
16680    REG_WR(sc, CFC_REG_CFC_INT_MASK, 0);
16681    /*
16682     * mask read length error interrupts in brb for parser
16683     * (parsing unit and 'checksum and crc' unit)
16684     * these errors are legal (PU reads fixed length and CAC can cause
16685     * read length error on truncated packets)
16686     */
16687    REG_WR(sc, BRB1_REG_BRB1_INT_MASK, 0xFC00);
16688    REG_WR(sc, QM_REG_QM_INT_MASK, 0);
16689    REG_WR(sc, TM_REG_TM_INT_MASK, 0);
16690    REG_WR(sc, XSDM_REG_XSDM_INT_MASK_0, 0);
16691    REG_WR(sc, XSDM_REG_XSDM_INT_MASK_1, 0);
16692    REG_WR(sc, XCM_REG_XCM_INT_MASK, 0);
16693/*      REG_WR(sc, XSEM_REG_XSEM_INT_MASK_0, 0); */
16694/*      REG_WR(sc, XSEM_REG_XSEM_INT_MASK_1, 0); */
16695    REG_WR(sc, USDM_REG_USDM_INT_MASK_0, 0);
16696    REG_WR(sc, USDM_REG_USDM_INT_MASK_1, 0);
16697    REG_WR(sc, UCM_REG_UCM_INT_MASK, 0);
16698/*      REG_WR(sc, USEM_REG_USEM_INT_MASK_0, 0); */
16699/*      REG_WR(sc, USEM_REG_USEM_INT_MASK_1, 0); */
16700    REG_WR(sc, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
16701    REG_WR(sc, CSDM_REG_CSDM_INT_MASK_0, 0);
16702    REG_WR(sc, CSDM_REG_CSDM_INT_MASK_1, 0);
16703    REG_WR(sc, CCM_REG_CCM_INT_MASK, 0);
16704/*      REG_WR(sc, CSEM_REG_CSEM_INT_MASK_0, 0); */
16705/*      REG_WR(sc, CSEM_REG_CSEM_INT_MASK_1, 0); */
16706
16707    val = (PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_AFT |
16708           PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_OF |
16709           PXP2_PXP2_INT_MASK_0_REG_PGL_PCIE_ATTN);
16710    if (!CHIP_IS_E1x(sc)) {
16711        val |= (PXP2_PXP2_INT_MASK_0_REG_PGL_READ_BLOCKED |
16712                PXP2_PXP2_INT_MASK_0_REG_PGL_WRITE_BLOCKED);
16713    }
16714    REG_WR(sc, PXP2_REG_PXP2_INT_MASK_0, val);
16715
16716    REG_WR(sc, TSDM_REG_TSDM_INT_MASK_0, 0);
16717    REG_WR(sc, TSDM_REG_TSDM_INT_MASK_1, 0);
16718    REG_WR(sc, TCM_REG_TCM_INT_MASK, 0);
16719/*      REG_WR(sc, TSEM_REG_TSEM_INT_MASK_0, 0); */
16720
16721    if (!CHIP_IS_E1x(sc)) {
16722        /* enable VFC attentions: bits 11 and 12, bits 31:13 reserved */
16723        REG_WR(sc, TSEM_REG_TSEM_INT_MASK_1, 0x07ff);
16724    }
16725
16726    REG_WR(sc, CDU_REG_CDU_INT_MASK, 0);
16727    REG_WR(sc, DMAE_REG_DMAE_INT_MASK, 0);
16728/*      REG_WR(sc, MISC_REG_MISC_INT_MASK, 0); */
16729    REG_WR(sc, PBF_REG_PBF_INT_MASK, 0x18);     /* bit 3,4 masked */
16730}
16731
16732/**
16733 * bxe_init_hw_common - initialize the HW at the COMMON phase.
16734 *
16735 * @sc:     driver handle
16736 */
16737static int
16738bxe_init_hw_common(struct bxe_softc *sc)
16739{
16740    uint8_t abs_func_id;
16741    uint32_t val;
16742
16743    BLOGD(sc, DBG_LOAD, "starting common init for func %d\n",
16744          SC_ABS_FUNC(sc));
16745
16746    /*
16747     * take the RESET lock to protect undi_unload flow from accessing
16748     * registers while we are resetting the chip
16749     */
16750    bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RESET);
16751
16752    bxe_reset_common(sc);
16753
16754    REG_WR(sc, (GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET), 0xffffffff);
16755
16756    val = 0xfffc;
16757    if (CHIP_IS_E3(sc)) {
16758        val |= MISC_REGISTERS_RESET_REG_2_MSTAT0;
16759        val |= MISC_REGISTERS_RESET_REG_2_MSTAT1;
16760    }
16761
16762    REG_WR(sc, (GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET), val);
16763
16764    bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RESET);
16765
16766    ecore_init_block(sc, BLOCK_MISC, PHASE_COMMON);
16767    BLOGD(sc, DBG_LOAD, "after misc block init\n");
16768
16769    if (!CHIP_IS_E1x(sc)) {
16770        /*
16771         * 4-port mode or 2-port mode we need to turn off master-enable for
16772         * everyone. After that we turn it back on for self. So, we disregard
16773         * multi-function, and always disable all functions on the given path,
16774         * this means 0,2,4,6 for path 0 and 1,3,5,7 for path 1
16775         */
16776        for (abs_func_id = SC_PATH(sc);
16777             abs_func_id < (E2_FUNC_MAX * 2);
16778             abs_func_id += 2) {
16779            if (abs_func_id == SC_ABS_FUNC(sc)) {
16780                REG_WR(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1);
16781                continue;
16782            }
16783
16784            bxe_pretend_func(sc, abs_func_id);
16785
16786            /* clear pf enable */
16787            bxe_pf_disable(sc);
16788
16789            bxe_pretend_func(sc, SC_ABS_FUNC(sc));
16790        }
16791    }
16792
16793    BLOGD(sc, DBG_LOAD, "after pf disable\n");
16794
16795    ecore_init_block(sc, BLOCK_PXP, PHASE_COMMON);
16796
16797    if (CHIP_IS_E1(sc)) {
16798        /*
16799         * enable HW interrupt from PXP on USDM overflow
16800         * bit 16 on INT_MASK_0
16801         */
16802        REG_WR(sc, PXP_REG_PXP_INT_MASK_0, 0);
16803    }
16804
16805    ecore_init_block(sc, BLOCK_PXP2, PHASE_COMMON);
16806    bxe_init_pxp(sc);
16807
16808#ifdef __BIG_ENDIAN
16809    REG_WR(sc, PXP2_REG_RQ_QM_ENDIAN_M, 1);
16810    REG_WR(sc, PXP2_REG_RQ_TM_ENDIAN_M, 1);
16811    REG_WR(sc, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
16812    REG_WR(sc, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
16813    REG_WR(sc, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
16814    /* make sure this value is 0 */
16815    REG_WR(sc, PXP2_REG_RQ_HC_ENDIAN_M, 0);
16816
16817    //REG_WR(sc, PXP2_REG_RD_PBF_SWAP_MODE, 1);
16818    REG_WR(sc, PXP2_REG_RD_QM_SWAP_MODE, 1);
16819    REG_WR(sc, PXP2_REG_RD_TM_SWAP_MODE, 1);
16820    REG_WR(sc, PXP2_REG_RD_SRC_SWAP_MODE, 1);
16821    REG_WR(sc, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
16822#endif
16823
16824    ecore_ilt_init_page_size(sc, INITOP_SET);
16825
16826    if (CHIP_REV_IS_FPGA(sc) && CHIP_IS_E1H(sc)) {
16827        REG_WR(sc, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
16828    }
16829
16830    /* let the HW do it's magic... */
16831    DELAY(100000);
16832
16833    /* finish PXP init */
16834    val = REG_RD(sc, PXP2_REG_RQ_CFG_DONE);
16835    if (val != 1) {
16836        BLOGE(sc, "PXP2 CFG failed PXP2_REG_RQ_CFG_DONE val = 0x%x\n",
16837            val);
16838        return (-1);
16839    }
16840    val = REG_RD(sc, PXP2_REG_RD_INIT_DONE);
16841    if (val != 1) {
16842        BLOGE(sc, "PXP2 RD_INIT failed val = 0x%x\n", val);
16843        return (-1);
16844    }
16845
16846    BLOGD(sc, DBG_LOAD, "after pxp init\n");
16847
16848    /*
16849     * Timer bug workaround for E2 only. We need to set the entire ILT to have
16850     * entries with value "0" and valid bit on. This needs to be done by the
16851     * first PF that is loaded in a path (i.e. common phase)
16852     */
16853    if (!CHIP_IS_E1x(sc)) {
16854/*
16855 * In E2 there is a bug in the timers block that can cause function 6 / 7
16856 * (i.e. vnic3) to start even if it is marked as "scan-off".
16857 * This occurs when a different function (func2,3) is being marked
16858 * as "scan-off". Real-life scenario for example: if a driver is being
16859 * load-unloaded while func6,7 are down. This will cause the timer to access
16860 * the ilt, translate to a logical address and send a request to read/write.
16861 * Since the ilt for the function that is down is not valid, this will cause
16862 * a translation error which is unrecoverable.
16863 * The Workaround is intended to make sure that when this happens nothing
16864 * fatal will occur. The workaround:
16865 *  1.  First PF driver which loads on a path will:
16866 *      a.  After taking the chip out of reset, by using pretend,
16867 *          it will write "0" to the following registers of
16868 *          the other vnics.
16869 *          REG_WR(pdev, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 0);
16870 *          REG_WR(pdev, CFC_REG_WEAK_ENABLE_PF,0);
16871 *          REG_WR(pdev, CFC_REG_STRONG_ENABLE_PF,0);
16872 *          And for itself it will write '1' to
16873 *          PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER to enable
16874 *          dmae-operations (writing to pram for example.)
16875 *          note: can be done for only function 6,7 but cleaner this
16876 *            way.
16877 *      b.  Write zero+valid to the entire ILT.
16878 *      c.  Init the first_timers_ilt_entry, last_timers_ilt_entry of
16879 *          VNIC3 (of that port). The range allocated will be the
16880 *          entire ILT. This is needed to prevent  ILT range error.
16881 *  2.  Any PF driver load flow:
16882 *      a.  ILT update with the physical addresses of the allocated
16883 *          logical pages.
16884 *      b.  Wait 20msec. - note that this timeout is needed to make
16885 *          sure there are no requests in one of the PXP internal
16886 *          queues with "old" ILT addresses.
16887 *      c.  PF enable in the PGLC.
16888 *      d.  Clear the was_error of the PF in the PGLC. (could have
16889 *          occurred while driver was down)
16890 *      e.  PF enable in the CFC (WEAK + STRONG)
16891 *      f.  Timers scan enable
16892 *  3.  PF driver unload flow:
16893 *      a.  Clear the Timers scan_en.
16894 *      b.  Polling for scan_on=0 for that PF.
16895 *      c.  Clear the PF enable bit in the PXP.
16896 *      d.  Clear the PF enable in the CFC (WEAK + STRONG)
16897 *      e.  Write zero+valid to all ILT entries (The valid bit must
16898 *          stay set)
16899 *      f.  If this is VNIC 3 of a port then also init
16900 *          first_timers_ilt_entry to zero and last_timers_ilt_entry
16901 *          to the last enrty in the ILT.
16902 *
16903 *      Notes:
16904 *      Currently the PF error in the PGLC is non recoverable.
16905 *      In the future the there will be a recovery routine for this error.
16906 *      Currently attention is masked.
16907 *      Having an MCP lock on the load/unload process does not guarantee that
16908 *      there is no Timer disable during Func6/7 enable. This is because the
16909 *      Timers scan is currently being cleared by the MCP on FLR.
16910 *      Step 2.d can be done only for PF6/7 and the driver can also check if
16911 *      there is error before clearing it. But the flow above is simpler and
16912 *      more general.
16913 *      All ILT entries are written by zero+valid and not just PF6/7
16914 *      ILT entries since in the future the ILT entries allocation for
16915 *      PF-s might be dynamic.
16916 */
16917        struct ilt_client_info ilt_cli;
16918        struct ecore_ilt ilt;
16919
16920        memset(&ilt_cli, 0, sizeof(struct ilt_client_info));
16921        memset(&ilt, 0, sizeof(struct ecore_ilt));
16922
16923        /* initialize dummy TM client */
16924        ilt_cli.start      = 0;
16925        ilt_cli.end        = ILT_NUM_PAGE_ENTRIES - 1;
16926        ilt_cli.client_num = ILT_CLIENT_TM;
16927
16928        /*
16929         * Step 1: set zeroes to all ilt page entries with valid bit on
16930         * Step 2: set the timers first/last ilt entry to point
16931         * to the entire range to prevent ILT range error for 3rd/4th
16932         * vnic (this code assumes existence of the vnic)
16933         *
16934         * both steps performed by call to ecore_ilt_client_init_op()
16935         * with dummy TM client
16936         *
16937         * we must use pretend since PXP2_REG_RQ_##blk##_FIRST_ILT
16938         * and his brother are split registers
16939         */
16940
16941        bxe_pretend_func(sc, (SC_PATH(sc) + 6));
16942        ecore_ilt_client_init_op_ilt(sc, &ilt, &ilt_cli, INITOP_CLEAR);
16943        bxe_pretend_func(sc, SC_ABS_FUNC(sc));
16944
16945        REG_WR(sc, PXP2_REG_RQ_DRAM_ALIGN, BXE_PXP_DRAM_ALIGN);
16946        REG_WR(sc, PXP2_REG_RQ_DRAM_ALIGN_RD, BXE_PXP_DRAM_ALIGN);
16947        REG_WR(sc, PXP2_REG_RQ_DRAM_ALIGN_SEL, 1);
16948    }
16949
16950    REG_WR(sc, PXP2_REG_RQ_DISABLE_INPUTS, 0);
16951    REG_WR(sc, PXP2_REG_RD_DISABLE_INPUTS, 0);
16952
16953    if (!CHIP_IS_E1x(sc)) {
16954        int factor = CHIP_REV_IS_EMUL(sc) ? 1000 :
16955                     (CHIP_REV_IS_FPGA(sc) ? 400 : 0);
16956
16957        ecore_init_block(sc, BLOCK_PGLUE_B, PHASE_COMMON);
16958        ecore_init_block(sc, BLOCK_ATC, PHASE_COMMON);
16959
16960        /* let the HW do it's magic... */
16961        do {
16962            DELAY(200000);
16963            val = REG_RD(sc, ATC_REG_ATC_INIT_DONE);
16964        } while (factor-- && (val != 1));
16965
16966        if (val != 1) {
16967            BLOGE(sc, "ATC_INIT failed val = 0x%x\n", val);
16968            return (-1);
16969        }
16970    }
16971
16972    BLOGD(sc, DBG_LOAD, "after pglue and atc init\n");
16973
16974    ecore_init_block(sc, BLOCK_DMAE, PHASE_COMMON);
16975
16976    bxe_iov_init_dmae(sc);
16977
16978    /* clean the DMAE memory */
16979    sc->dmae_ready = 1;
16980    ecore_init_fill(sc, TSEM_REG_PRAM, 0, 8, 1);
16981
16982    ecore_init_block(sc, BLOCK_TCM, PHASE_COMMON);
16983
16984    ecore_init_block(sc, BLOCK_UCM, PHASE_COMMON);
16985
16986    ecore_init_block(sc, BLOCK_CCM, PHASE_COMMON);
16987
16988    ecore_init_block(sc, BLOCK_XCM, PHASE_COMMON);
16989
16990    bxe_read_dmae(sc, XSEM_REG_PASSIVE_BUFFER, 3);
16991    bxe_read_dmae(sc, CSEM_REG_PASSIVE_BUFFER, 3);
16992    bxe_read_dmae(sc, TSEM_REG_PASSIVE_BUFFER, 3);
16993    bxe_read_dmae(sc, USEM_REG_PASSIVE_BUFFER, 3);
16994
16995    ecore_init_block(sc, BLOCK_QM, PHASE_COMMON);
16996
16997    /* QM queues pointers table */
16998    ecore_qm_init_ptr_table(sc, sc->qm_cid_count, INITOP_SET);
16999
17000    /* soft reset pulse */
17001    REG_WR(sc, QM_REG_SOFT_RESET, 1);
17002    REG_WR(sc, QM_REG_SOFT_RESET, 0);
17003
17004    if (CNIC_SUPPORT(sc))
17005        ecore_init_block(sc, BLOCK_TM, PHASE_COMMON);
17006
17007    ecore_init_block(sc, BLOCK_DORQ, PHASE_COMMON);
17008    REG_WR(sc, DORQ_REG_DPM_CID_OFST, BXE_DB_SHIFT);
17009    if (!CHIP_REV_IS_SLOW(sc)) {
17010        /* enable hw interrupt from doorbell Q */
17011        REG_WR(sc, DORQ_REG_DORQ_INT_MASK, 0);
17012    }
17013
17014    ecore_init_block(sc, BLOCK_BRB1, PHASE_COMMON);
17015
17016    ecore_init_block(sc, BLOCK_PRS, PHASE_COMMON);
17017    REG_WR(sc, PRS_REG_A_PRSU_20, 0xf);
17018
17019    if (!CHIP_IS_E1(sc)) {
17020        REG_WR(sc, PRS_REG_E1HOV_MODE, sc->devinfo.mf_info.path_has_ovlan);
17021    }
17022
17023    if (!CHIP_IS_E1x(sc) && !CHIP_IS_E3B0(sc)) {
17024        if (IS_MF_AFEX(sc)) {
17025            /*
17026             * configure that AFEX and VLAN headers must be
17027             * received in AFEX mode
17028             */
17029            REG_WR(sc, PRS_REG_HDRS_AFTER_BASIC, 0xE);
17030            REG_WR(sc, PRS_REG_MUST_HAVE_HDRS, 0xA);
17031            REG_WR(sc, PRS_REG_HDRS_AFTER_TAG_0, 0x6);
17032            REG_WR(sc, PRS_REG_TAG_ETHERTYPE_0, 0x8926);
17033            REG_WR(sc, PRS_REG_TAG_LEN_0, 0x4);
17034        } else {
17035            /*
17036             * Bit-map indicating which L2 hdrs may appear
17037             * after the basic Ethernet header
17038             */
17039            REG_WR(sc, PRS_REG_HDRS_AFTER_BASIC,
17040                   sc->devinfo.mf_info.path_has_ovlan ? 7 : 6);
17041        }
17042    }
17043
17044    ecore_init_block(sc, BLOCK_TSDM, PHASE_COMMON);
17045    ecore_init_block(sc, BLOCK_CSDM, PHASE_COMMON);
17046    ecore_init_block(sc, BLOCK_USDM, PHASE_COMMON);
17047    ecore_init_block(sc, BLOCK_XSDM, PHASE_COMMON);
17048
17049    if (!CHIP_IS_E1x(sc)) {
17050        /* reset VFC memories */
17051        REG_WR(sc, TSEM_REG_FAST_MEMORY + VFC_REG_MEMORIES_RST,
17052               VFC_MEMORIES_RST_REG_CAM_RST |
17053               VFC_MEMORIES_RST_REG_RAM_RST);
17054        REG_WR(sc, XSEM_REG_FAST_MEMORY + VFC_REG_MEMORIES_RST,
17055               VFC_MEMORIES_RST_REG_CAM_RST |
17056               VFC_MEMORIES_RST_REG_RAM_RST);
17057
17058        DELAY(20000);
17059    }
17060
17061    ecore_init_block(sc, BLOCK_TSEM, PHASE_COMMON);
17062    ecore_init_block(sc, BLOCK_USEM, PHASE_COMMON);
17063    ecore_init_block(sc, BLOCK_CSEM, PHASE_COMMON);
17064    ecore_init_block(sc, BLOCK_XSEM, PHASE_COMMON);
17065
17066    /* sync semi rtc */
17067    REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
17068           0x80000000);
17069    REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
17070           0x80000000);
17071
17072    ecore_init_block(sc, BLOCK_UPB, PHASE_COMMON);
17073    ecore_init_block(sc, BLOCK_XPB, PHASE_COMMON);
17074    ecore_init_block(sc, BLOCK_PBF, PHASE_COMMON);
17075
17076    if (!CHIP_IS_E1x(sc)) {
17077        if (IS_MF_AFEX(sc)) {
17078            /*
17079             * configure that AFEX and VLAN headers must be
17080             * sent in AFEX mode
17081             */
17082            REG_WR(sc, PBF_REG_HDRS_AFTER_BASIC, 0xE);
17083            REG_WR(sc, PBF_REG_MUST_HAVE_HDRS, 0xA);
17084            REG_WR(sc, PBF_REG_HDRS_AFTER_TAG_0, 0x6);
17085            REG_WR(sc, PBF_REG_TAG_ETHERTYPE_0, 0x8926);
17086            REG_WR(sc, PBF_REG_TAG_LEN_0, 0x4);
17087        } else {
17088            REG_WR(sc, PBF_REG_HDRS_AFTER_BASIC,
17089                   sc->devinfo.mf_info.path_has_ovlan ? 7 : 6);
17090        }
17091    }
17092
17093    REG_WR(sc, SRC_REG_SOFT_RST, 1);
17094
17095    ecore_init_block(sc, BLOCK_SRC, PHASE_COMMON);
17096
17097    if (CNIC_SUPPORT(sc)) {
17098        REG_WR(sc, SRC_REG_KEYSEARCH_0, 0x63285672);
17099        REG_WR(sc, SRC_REG_KEYSEARCH_1, 0x24b8f2cc);
17100        REG_WR(sc, SRC_REG_KEYSEARCH_2, 0x223aef9b);
17101        REG_WR(sc, SRC_REG_KEYSEARCH_3, 0x26001e3a);
17102        REG_WR(sc, SRC_REG_KEYSEARCH_4, 0x7ae91116);
17103        REG_WR(sc, SRC_REG_KEYSEARCH_5, 0x5ce5230b);
17104        REG_WR(sc, SRC_REG_KEYSEARCH_6, 0x298d8adf);
17105        REG_WR(sc, SRC_REG_KEYSEARCH_7, 0x6eb0ff09);
17106        REG_WR(sc, SRC_REG_KEYSEARCH_8, 0x1830f82f);
17107        REG_WR(sc, SRC_REG_KEYSEARCH_9, 0x01e46be7);
17108    }
17109    REG_WR(sc, SRC_REG_SOFT_RST, 0);
17110
17111    if (sizeof(union cdu_context) != 1024) {
17112        /* we currently assume that a context is 1024 bytes */
17113        BLOGE(sc, "please adjust the size of cdu_context(%ld)\n",
17114              (long)sizeof(union cdu_context));
17115    }
17116
17117    ecore_init_block(sc, BLOCK_CDU, PHASE_COMMON);
17118    val = (4 << 24) + (0 << 12) + 1024;
17119    REG_WR(sc, CDU_REG_CDU_GLOBAL_PARAMS, val);
17120
17121    ecore_init_block(sc, BLOCK_CFC, PHASE_COMMON);
17122
17123    REG_WR(sc, CFC_REG_INIT_REG, 0x7FF);
17124    /* enable context validation interrupt from CFC */
17125    REG_WR(sc, CFC_REG_CFC_INT_MASK, 0);
17126
17127    /* set the thresholds to prevent CFC/CDU race */
17128    REG_WR(sc, CFC_REG_DEBUG0, 0x20020000);
17129    ecore_init_block(sc, BLOCK_HC, PHASE_COMMON);
17130
17131    if (!CHIP_IS_E1x(sc) && BXE_NOMCP(sc)) {
17132        REG_WR(sc, IGU_REG_RESET_MEMORIES, 0x36);
17133    }
17134
17135    ecore_init_block(sc, BLOCK_IGU, PHASE_COMMON);
17136    ecore_init_block(sc, BLOCK_MISC_AEU, PHASE_COMMON);
17137
17138    /* Reset PCIE errors for debug */
17139    REG_WR(sc, 0x2814, 0xffffffff);
17140    REG_WR(sc, 0x3820, 0xffffffff);
17141
17142    if (!CHIP_IS_E1x(sc)) {
17143        REG_WR(sc, PCICFG_OFFSET + PXPCS_TL_CONTROL_5,
17144               (PXPCS_TL_CONTROL_5_ERR_UNSPPORT1 |
17145                PXPCS_TL_CONTROL_5_ERR_UNSPPORT));
17146        REG_WR(sc, PCICFG_OFFSET + PXPCS_TL_FUNC345_STAT,
17147               (PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT4 |
17148                PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT3 |
17149                PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT2));
17150        REG_WR(sc, PCICFG_OFFSET + PXPCS_TL_FUNC678_STAT,
17151               (PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT7 |
17152                PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT6 |
17153                PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT5));
17154    }
17155
17156    ecore_init_block(sc, BLOCK_NIG, PHASE_COMMON);
17157
17158    if (!CHIP_IS_E1(sc)) {
17159        /* in E3 this done in per-port section */
17160        if (!CHIP_IS_E3(sc))
17161            REG_WR(sc, NIG_REG_LLH_MF_MODE, IS_MF(sc));
17162    }
17163
17164    if (CHIP_IS_E1H(sc)) {
17165        /* not applicable for E2 (and above ...) */
17166        REG_WR(sc, NIG_REG_LLH_E1HOV_MODE, IS_MF_SD(sc));
17167    }
17168
17169    if (CHIP_REV_IS_SLOW(sc)) {
17170        DELAY(200000);
17171    }
17172
17173    /* finish CFC init */
17174    val = reg_poll(sc, CFC_REG_LL_INIT_DONE, 1, 100, 10);
17175    if (val != 1) {
17176        BLOGE(sc, "CFC LL_INIT failed val=0x%x\n", val);
17177        return (-1);
17178    }
17179    val = reg_poll(sc, CFC_REG_AC_INIT_DONE, 1, 100, 10);
17180    if (val != 1) {
17181        BLOGE(sc, "CFC AC_INIT failed val=0x%x\n", val);
17182        return (-1);
17183    }
17184    val = reg_poll(sc, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
17185    if (val != 1) {
17186        BLOGE(sc, "CFC CAM_INIT failed val=0x%x\n", val);
17187        return (-1);
17188    }
17189    REG_WR(sc, CFC_REG_DEBUG0, 0);
17190
17191    if (CHIP_IS_E1(sc)) {
17192        /* read NIG statistic to see if this is our first up since powerup */
17193        bxe_read_dmae(sc, NIG_REG_STAT2_BRB_OCTET, 2);
17194        val = *BXE_SP(sc, wb_data[0]);
17195
17196        /* do internal memory self test */
17197        if ((val == 0) && bxe_int_mem_test(sc)) {
17198            BLOGE(sc, "internal mem self test failed val=0x%x\n", val);
17199            return (-1);
17200        }
17201    }
17202
17203    bxe_setup_fan_failure_detection(sc);
17204
17205    /* clear PXP2 attentions */
17206    REG_RD(sc, PXP2_REG_PXP2_INT_STS_CLR_0);
17207
17208    bxe_enable_blocks_attention(sc);
17209
17210    if (!CHIP_REV_IS_SLOW(sc)) {
17211        ecore_enable_blocks_parity(sc);
17212    }
17213
17214    if (!BXE_NOMCP(sc)) {
17215        if (CHIP_IS_E1x(sc)) {
17216            bxe_common_init_phy(sc);
17217        }
17218    }
17219
17220    return (0);
17221}
17222
17223/**
17224 * bxe_init_hw_common_chip - init HW at the COMMON_CHIP phase.
17225 *
17226 * @sc:     driver handle
17227 */
17228static int
17229bxe_init_hw_common_chip(struct bxe_softc *sc)
17230{
17231    int rc = bxe_init_hw_common(sc);
17232
17233    if (rc) {
17234        BLOGE(sc, "bxe_init_hw_common failed rc=%d\n", rc);
17235        return (rc);
17236    }
17237
17238    /* In E2 2-PORT mode, same ext phy is used for the two paths */
17239    if (!BXE_NOMCP(sc)) {
17240        bxe_common_init_phy(sc);
17241    }
17242
17243    return (0);
17244}
17245
17246static int
17247bxe_init_hw_port(struct bxe_softc *sc)
17248{
17249    int port = SC_PORT(sc);
17250    int init_phase = port ? PHASE_PORT1 : PHASE_PORT0;
17251    uint32_t low, high;
17252    uint32_t val;
17253
17254    BLOGD(sc, DBG_LOAD, "starting port init for port %d\n", port);
17255
17256    REG_WR(sc, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
17257
17258    ecore_init_block(sc, BLOCK_MISC, init_phase);
17259    ecore_init_block(sc, BLOCK_PXP, init_phase);
17260    ecore_init_block(sc, BLOCK_PXP2, init_phase);
17261
17262    /*
17263     * Timers bug workaround: disables the pf_master bit in pglue at
17264     * common phase, we need to enable it here before any dmae access are
17265     * attempted. Therefore we manually added the enable-master to the
17266     * port phase (it also happens in the function phase)
17267     */
17268    if (!CHIP_IS_E1x(sc)) {
17269        REG_WR(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1);
17270    }
17271
17272    ecore_init_block(sc, BLOCK_ATC, init_phase);
17273    ecore_init_block(sc, BLOCK_DMAE, init_phase);
17274    ecore_init_block(sc, BLOCK_PGLUE_B, init_phase);
17275    ecore_init_block(sc, BLOCK_QM, init_phase);
17276
17277    ecore_init_block(sc, BLOCK_TCM, init_phase);
17278    ecore_init_block(sc, BLOCK_UCM, init_phase);
17279    ecore_init_block(sc, BLOCK_CCM, init_phase);
17280    ecore_init_block(sc, BLOCK_XCM, init_phase);
17281
17282    /* QM cid (connection) count */
17283    ecore_qm_init_cid_count(sc, sc->qm_cid_count, INITOP_SET);
17284
17285    if (CNIC_SUPPORT(sc)) {
17286        ecore_init_block(sc, BLOCK_TM, init_phase);
17287        REG_WR(sc, TM_REG_LIN0_SCAN_TIME + port*4, 20);
17288        REG_WR(sc, TM_REG_LIN0_MAX_ACTIVE_CID + port*4, 31);
17289    }
17290
17291    ecore_init_block(sc, BLOCK_DORQ, init_phase);
17292
17293    ecore_init_block(sc, BLOCK_BRB1, init_phase);
17294
17295    if (CHIP_IS_E1(sc) || CHIP_IS_E1H(sc)) {
17296        if (IS_MF(sc)) {
17297            low = (BXE_ONE_PORT(sc) ? 160 : 246);
17298        } else if (sc->mtu > 4096) {
17299            if (BXE_ONE_PORT(sc)) {
17300                low = 160;
17301            } else {
17302                val = sc->mtu;
17303                /* (24*1024 + val*4)/256 */
17304                low = (96 + (val / 64) + ((val % 64) ? 1 : 0));
17305            }
17306        } else {
17307            low = (BXE_ONE_PORT(sc) ? 80 : 160);
17308        }
17309        high = (low + 56); /* 14*1024/256 */
17310        REG_WR(sc, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port*4, low);
17311        REG_WR(sc, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high);
17312    }
17313
17314    if (CHIP_IS_MODE_4_PORT(sc)) {
17315        REG_WR(sc, SC_PORT(sc) ?
17316               BRB1_REG_MAC_GUARANTIED_1 :
17317               BRB1_REG_MAC_GUARANTIED_0, 40);
17318    }
17319
17320    ecore_init_block(sc, BLOCK_PRS, init_phase);
17321    if (CHIP_IS_E3B0(sc)) {
17322        if (IS_MF_AFEX(sc)) {
17323            /* configure headers for AFEX mode */
17324            REG_WR(sc, SC_PORT(sc) ?
17325                   PRS_REG_HDRS_AFTER_BASIC_PORT_1 :
17326                   PRS_REG_HDRS_AFTER_BASIC_PORT_0, 0xE);
17327            REG_WR(sc, SC_PORT(sc) ?
17328                   PRS_REG_HDRS_AFTER_TAG_0_PORT_1 :
17329                   PRS_REG_HDRS_AFTER_TAG_0_PORT_0, 0x6);
17330            REG_WR(sc, SC_PORT(sc) ?
17331                   PRS_REG_MUST_HAVE_HDRS_PORT_1 :
17332                   PRS_REG_MUST_HAVE_HDRS_PORT_0, 0xA);
17333        } else {
17334            /* Ovlan exists only if we are in multi-function +
17335             * switch-dependent mode, in switch-independent there
17336             * is no ovlan headers
17337             */
17338            REG_WR(sc, SC_PORT(sc) ?
17339                   PRS_REG_HDRS_AFTER_BASIC_PORT_1 :
17340                   PRS_REG_HDRS_AFTER_BASIC_PORT_0,
17341                   (sc->devinfo.mf_info.path_has_ovlan ? 7 : 6));
17342        }
17343    }
17344
17345    ecore_init_block(sc, BLOCK_TSDM, init_phase);
17346    ecore_init_block(sc, BLOCK_CSDM, init_phase);
17347    ecore_init_block(sc, BLOCK_USDM, init_phase);
17348    ecore_init_block(sc, BLOCK_XSDM, init_phase);
17349
17350    ecore_init_block(sc, BLOCK_TSEM, init_phase);
17351    ecore_init_block(sc, BLOCK_USEM, init_phase);
17352    ecore_init_block(sc, BLOCK_CSEM, init_phase);
17353    ecore_init_block(sc, BLOCK_XSEM, init_phase);
17354
17355    ecore_init_block(sc, BLOCK_UPB, init_phase);
17356    ecore_init_block(sc, BLOCK_XPB, init_phase);
17357
17358    ecore_init_block(sc, BLOCK_PBF, init_phase);
17359
17360    if (CHIP_IS_E1x(sc)) {
17361        /* configure PBF to work without PAUSE mtu 9000 */
17362        REG_WR(sc, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
17363
17364        /* update threshold */
17365        REG_WR(sc, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
17366        /* update init credit */
17367        REG_WR(sc, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
17368
17369        /* probe changes */
17370        REG_WR(sc, PBF_REG_INIT_P0 + port*4, 1);
17371        DELAY(50);
17372        REG_WR(sc, PBF_REG_INIT_P0 + port*4, 0);
17373    }
17374
17375    if (CNIC_SUPPORT(sc)) {
17376        ecore_init_block(sc, BLOCK_SRC, init_phase);
17377    }
17378
17379    ecore_init_block(sc, BLOCK_CDU, init_phase);
17380    ecore_init_block(sc, BLOCK_CFC, init_phase);
17381
17382    if (CHIP_IS_E1(sc)) {
17383        REG_WR(sc, HC_REG_LEADING_EDGE_0 + port*8, 0);
17384        REG_WR(sc, HC_REG_TRAILING_EDGE_0 + port*8, 0);
17385    }
17386    ecore_init_block(sc, BLOCK_HC, init_phase);
17387
17388    ecore_init_block(sc, BLOCK_IGU, init_phase);
17389
17390    ecore_init_block(sc, BLOCK_MISC_AEU, init_phase);
17391    /* init aeu_mask_attn_func_0/1:
17392     *  - SF mode: bits 3-7 are masked. only bits 0-2 are in use
17393     *  - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
17394     *             bits 4-7 are used for "per vn group attention" */
17395    val = IS_MF(sc) ? 0xF7 : 0x7;
17396    /* Enable DCBX attention for all but E1 */
17397    val |= CHIP_IS_E1(sc) ? 0 : 0x10;
17398    REG_WR(sc, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, val);
17399
17400    ecore_init_block(sc, BLOCK_NIG, init_phase);
17401
17402    if (!CHIP_IS_E1x(sc)) {
17403        /* Bit-map indicating which L2 hdrs may appear after the
17404         * basic Ethernet header
17405         */
17406        if (IS_MF_AFEX(sc)) {
17407            REG_WR(sc, SC_PORT(sc) ?
17408                   NIG_REG_P1_HDRS_AFTER_BASIC :
17409                   NIG_REG_P0_HDRS_AFTER_BASIC, 0xE);
17410        } else {
17411            REG_WR(sc, SC_PORT(sc) ?
17412                   NIG_REG_P1_HDRS_AFTER_BASIC :
17413                   NIG_REG_P0_HDRS_AFTER_BASIC,
17414                   IS_MF_SD(sc) ? 7 : 6);
17415        }
17416
17417        if (CHIP_IS_E3(sc)) {
17418            REG_WR(sc, SC_PORT(sc) ?
17419                   NIG_REG_LLH1_MF_MODE :
17420                   NIG_REG_LLH_MF_MODE, IS_MF(sc));
17421        }
17422    }
17423    if (!CHIP_IS_E3(sc)) {
17424        REG_WR(sc, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
17425    }
17426
17427    if (!CHIP_IS_E1(sc)) {
17428        /* 0x2 disable mf_ov, 0x1 enable */
17429        REG_WR(sc, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
17430               (IS_MF_SD(sc) ? 0x1 : 0x2));
17431
17432        if (!CHIP_IS_E1x(sc)) {
17433            val = 0;
17434            switch (sc->devinfo.mf_info.mf_mode) {
17435            case MULTI_FUNCTION_SD:
17436                val = 1;
17437                break;
17438            case MULTI_FUNCTION_SI:
17439            case MULTI_FUNCTION_AFEX:
17440                val = 2;
17441                break;
17442            }
17443
17444            REG_WR(sc, (SC_PORT(sc) ? NIG_REG_LLH1_CLS_TYPE :
17445                        NIG_REG_LLH0_CLS_TYPE), val);
17446        }
17447        REG_WR(sc, NIG_REG_LLFC_ENABLE_0 + port*4, 0);
17448        REG_WR(sc, NIG_REG_LLFC_OUT_EN_0 + port*4, 0);
17449        REG_WR(sc, NIG_REG_PAUSE_ENABLE_0 + port*4, 1);
17450    }
17451
17452    /* If SPIO5 is set to generate interrupts, enable it for this port */
17453    val = REG_RD(sc, MISC_REG_SPIO_EVENT_EN);
17454    if (val & MISC_SPIO_SPIO5) {
17455        uint32_t reg_addr = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
17456                                    MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
17457        val = REG_RD(sc, reg_addr);
17458        val |= AEU_INPUTS_ATTN_BITS_SPIO5;
17459        REG_WR(sc, reg_addr, val);
17460    }
17461
17462    return (0);
17463}
17464
17465static uint32_t
17466bxe_flr_clnup_reg_poll(struct bxe_softc *sc,
17467                       uint32_t         reg,
17468                       uint32_t         expected,
17469                       uint32_t         poll_count)
17470{
17471    uint32_t cur_cnt = poll_count;
17472    uint32_t val;
17473
17474    while ((val = REG_RD(sc, reg)) != expected && cur_cnt--) {
17475        DELAY(FLR_WAIT_INTERVAL);
17476    }
17477
17478    return (val);
17479}
17480
17481static int
17482bxe_flr_clnup_poll_hw_counter(struct bxe_softc *sc,
17483                              uint32_t         reg,
17484                              char             *msg,
17485                              uint32_t         poll_cnt)
17486{
17487    uint32_t val = bxe_flr_clnup_reg_poll(sc, reg, 0, poll_cnt);
17488
17489    if (val != 0) {
17490        BLOGE(sc, "%s usage count=%d\n", msg, val);
17491        return (1);
17492    }
17493
17494    return (0);
17495}
17496
17497/* Common routines with VF FLR cleanup */
17498static uint32_t
17499bxe_flr_clnup_poll_count(struct bxe_softc *sc)
17500{
17501    /* adjust polling timeout */
17502    if (CHIP_REV_IS_EMUL(sc)) {
17503        return (FLR_POLL_CNT * 2000);
17504    }
17505
17506    if (CHIP_REV_IS_FPGA(sc)) {
17507        return (FLR_POLL_CNT * 120);
17508    }
17509
17510    return (FLR_POLL_CNT);
17511}
17512
17513static int
17514bxe_poll_hw_usage_counters(struct bxe_softc *sc,
17515                           uint32_t         poll_cnt)
17516{
17517    /* wait for CFC PF usage-counter to zero (includes all the VFs) */
17518    if (bxe_flr_clnup_poll_hw_counter(sc,
17519                                      CFC_REG_NUM_LCIDS_INSIDE_PF,
17520                                      "CFC PF usage counter timed out",
17521                                      poll_cnt)) {
17522        return (1);
17523    }
17524
17525    /* Wait for DQ PF usage-counter to zero (until DQ cleanup) */
17526    if (bxe_flr_clnup_poll_hw_counter(sc,
17527                                      DORQ_REG_PF_USAGE_CNT,
17528                                      "DQ PF usage counter timed out",
17529                                      poll_cnt)) {
17530        return (1);
17531    }
17532
17533    /* Wait for QM PF usage-counter to zero (until DQ cleanup) */
17534    if (bxe_flr_clnup_poll_hw_counter(sc,
17535                                      QM_REG_PF_USG_CNT_0 + 4*SC_FUNC(sc),
17536                                      "QM PF usage counter timed out",
17537                                      poll_cnt)) {
17538        return (1);
17539    }
17540
17541    /* Wait for Timer PF usage-counters to zero (until DQ cleanup) */
17542    if (bxe_flr_clnup_poll_hw_counter(sc,
17543                                      TM_REG_LIN0_VNIC_UC + 4*SC_PORT(sc),
17544                                      "Timers VNIC usage counter timed out",
17545                                      poll_cnt)) {
17546        return (1);
17547    }
17548
17549    if (bxe_flr_clnup_poll_hw_counter(sc,
17550                                      TM_REG_LIN0_NUM_SCANS + 4*SC_PORT(sc),
17551                                      "Timers NUM_SCANS usage counter timed out",
17552                                      poll_cnt)) {
17553        return (1);
17554    }
17555
17556    /* Wait DMAE PF usage counter to zero */
17557    if (bxe_flr_clnup_poll_hw_counter(sc,
17558                                      dmae_reg_go_c[INIT_DMAE_C(sc)],
17559                                      "DMAE dommand register timed out",
17560                                      poll_cnt)) {
17561        return (1);
17562    }
17563
17564    return (0);
17565}
17566
17567#define OP_GEN_PARAM(param)                                            \
17568    (((param) << SDM_OP_GEN_COMP_PARAM_SHIFT) & SDM_OP_GEN_COMP_PARAM)
17569#define OP_GEN_TYPE(type)                                           \
17570    (((type) << SDM_OP_GEN_COMP_TYPE_SHIFT) & SDM_OP_GEN_COMP_TYPE)
17571#define OP_GEN_AGG_VECT(index)                                             \
17572    (((index) << SDM_OP_GEN_AGG_VECT_IDX_SHIFT) & SDM_OP_GEN_AGG_VECT_IDX)
17573
17574static int
17575bxe_send_final_clnup(struct bxe_softc *sc,
17576                     uint8_t          clnup_func,
17577                     uint32_t         poll_cnt)
17578{
17579    uint32_t op_gen_command = 0;
17580    uint32_t comp_addr = (BAR_CSTRORM_INTMEM +
17581                          CSTORM_FINAL_CLEANUP_COMPLETE_OFFSET(clnup_func));
17582    int ret = 0;
17583
17584    if (REG_RD(sc, comp_addr)) {
17585        BLOGE(sc, "Cleanup complete was not 0 before sending\n");
17586        return (1);
17587    }
17588
17589    op_gen_command |= OP_GEN_PARAM(XSTORM_AGG_INT_FINAL_CLEANUP_INDEX);
17590    op_gen_command |= OP_GEN_TYPE(XSTORM_AGG_INT_FINAL_CLEANUP_COMP_TYPE);
17591    op_gen_command |= OP_GEN_AGG_VECT(clnup_func);
17592    op_gen_command |= 1 << SDM_OP_GEN_AGG_VECT_IDX_VALID_SHIFT;
17593
17594    BLOGD(sc, DBG_LOAD, "sending FW Final cleanup\n");
17595    REG_WR(sc, XSDM_REG_OPERATION_GEN, op_gen_command);
17596
17597    if (bxe_flr_clnup_reg_poll(sc, comp_addr, 1, poll_cnt) != 1) {
17598        BLOGE(sc, "FW final cleanup did not succeed\n");
17599        BLOGD(sc, DBG_LOAD, "At timeout completion address contained %x\n",
17600              (REG_RD(sc, comp_addr)));
17601        bxe_panic(sc, ("FLR cleanup failed\n"));
17602        return (1);
17603    }
17604
17605    /* Zero completion for nxt FLR */
17606    REG_WR(sc, comp_addr, 0);
17607
17608    return (ret);
17609}
17610
17611static void
17612bxe_pbf_pN_buf_flushed(struct bxe_softc       *sc,
17613                       struct pbf_pN_buf_regs *regs,
17614                       uint32_t               poll_count)
17615{
17616    uint32_t init_crd, crd, crd_start, crd_freed, crd_freed_start;
17617    uint32_t cur_cnt = poll_count;
17618
17619    crd_freed = crd_freed_start = REG_RD(sc, regs->crd_freed);
17620    crd = crd_start = REG_RD(sc, regs->crd);
17621    init_crd = REG_RD(sc, regs->init_crd);
17622
17623    BLOGD(sc, DBG_LOAD, "INIT CREDIT[%d] : %x\n", regs->pN, init_crd);
17624    BLOGD(sc, DBG_LOAD, "CREDIT[%d]      : s:%x\n", regs->pN, crd);
17625    BLOGD(sc, DBG_LOAD, "CREDIT_FREED[%d]: s:%x\n", regs->pN, crd_freed);
17626
17627    while ((crd != init_crd) &&
17628           ((uint32_t)((int32_t)crd_freed - (int32_t)crd_freed_start) <
17629            (init_crd - crd_start))) {
17630        if (cur_cnt--) {
17631            DELAY(FLR_WAIT_INTERVAL);
17632            crd = REG_RD(sc, regs->crd);
17633            crd_freed = REG_RD(sc, regs->crd_freed);
17634        } else {
17635            BLOGD(sc, DBG_LOAD, "PBF tx buffer[%d] timed out\n", regs->pN);
17636            BLOGD(sc, DBG_LOAD, "CREDIT[%d]      : c:%x\n", regs->pN, crd);
17637            BLOGD(sc, DBG_LOAD, "CREDIT_FREED[%d]: c:%x\n", regs->pN, crd_freed);
17638            break;
17639        }
17640    }
17641
17642    BLOGD(sc, DBG_LOAD, "Waited %d*%d usec for PBF tx buffer[%d]\n",
17643          poll_count-cur_cnt, FLR_WAIT_INTERVAL, regs->pN);
17644}
17645
17646static void
17647bxe_pbf_pN_cmd_flushed(struct bxe_softc       *sc,
17648                       struct pbf_pN_cmd_regs *regs,
17649                       uint32_t               poll_count)
17650{
17651    uint32_t occup, to_free, freed, freed_start;
17652    uint32_t cur_cnt = poll_count;
17653
17654    occup = to_free = REG_RD(sc, regs->lines_occup);
17655    freed = freed_start = REG_RD(sc, regs->lines_freed);
17656
17657    BLOGD(sc, DBG_LOAD, "OCCUPANCY[%d]   : s:%x\n", regs->pN, occup);
17658    BLOGD(sc, DBG_LOAD, "LINES_FREED[%d] : s:%x\n", regs->pN, freed);
17659
17660    while (occup &&
17661           ((uint32_t)((int32_t)freed - (int32_t)freed_start) < to_free)) {
17662        if (cur_cnt--) {
17663            DELAY(FLR_WAIT_INTERVAL);
17664            occup = REG_RD(sc, regs->lines_occup);
17665            freed = REG_RD(sc, regs->lines_freed);
17666        } else {
17667            BLOGD(sc, DBG_LOAD, "PBF cmd queue[%d] timed out\n", regs->pN);
17668            BLOGD(sc, DBG_LOAD, "OCCUPANCY[%d]   : s:%x\n", regs->pN, occup);
17669            BLOGD(sc, DBG_LOAD, "LINES_FREED[%d] : s:%x\n", regs->pN, freed);
17670            break;
17671        }
17672    }
17673
17674    BLOGD(sc, DBG_LOAD, "Waited %d*%d usec for PBF cmd queue[%d]\n",
17675          poll_count - cur_cnt, FLR_WAIT_INTERVAL, regs->pN);
17676}
17677
17678static void
17679bxe_tx_hw_flushed(struct bxe_softc *sc, uint32_t poll_count)
17680{
17681    struct pbf_pN_cmd_regs cmd_regs[] = {
17682        {0, (CHIP_IS_E3B0(sc)) ?
17683            PBF_REG_TQ_OCCUPANCY_Q0 :
17684            PBF_REG_P0_TQ_OCCUPANCY,
17685            (CHIP_IS_E3B0(sc)) ?
17686            PBF_REG_TQ_LINES_FREED_CNT_Q0 :
17687            PBF_REG_P0_TQ_LINES_FREED_CNT},
17688        {1, (CHIP_IS_E3B0(sc)) ?
17689            PBF_REG_TQ_OCCUPANCY_Q1 :
17690            PBF_REG_P1_TQ_OCCUPANCY,
17691            (CHIP_IS_E3B0(sc)) ?
17692            PBF_REG_TQ_LINES_FREED_CNT_Q1 :
17693            PBF_REG_P1_TQ_LINES_FREED_CNT},
17694        {4, (CHIP_IS_E3B0(sc)) ?
17695            PBF_REG_TQ_OCCUPANCY_LB_Q :
17696            PBF_REG_P4_TQ_OCCUPANCY,
17697            (CHIP_IS_E3B0(sc)) ?
17698            PBF_REG_TQ_LINES_FREED_CNT_LB_Q :
17699            PBF_REG_P4_TQ_LINES_FREED_CNT}
17700    };
17701
17702    struct pbf_pN_buf_regs buf_regs[] = {
17703        {0, (CHIP_IS_E3B0(sc)) ?
17704            PBF_REG_INIT_CRD_Q0 :
17705            PBF_REG_P0_INIT_CRD ,
17706            (CHIP_IS_E3B0(sc)) ?
17707            PBF_REG_CREDIT_Q0 :
17708            PBF_REG_P0_CREDIT,
17709            (CHIP_IS_E3B0(sc)) ?
17710            PBF_REG_INTERNAL_CRD_FREED_CNT_Q0 :
17711            PBF_REG_P0_INTERNAL_CRD_FREED_CNT},
17712        {1, (CHIP_IS_E3B0(sc)) ?
17713            PBF_REG_INIT_CRD_Q1 :
17714            PBF_REG_P1_INIT_CRD,
17715            (CHIP_IS_E3B0(sc)) ?
17716            PBF_REG_CREDIT_Q1 :
17717            PBF_REG_P1_CREDIT,
17718            (CHIP_IS_E3B0(sc)) ?
17719            PBF_REG_INTERNAL_CRD_FREED_CNT_Q1 :
17720            PBF_REG_P1_INTERNAL_CRD_FREED_CNT},
17721        {4, (CHIP_IS_E3B0(sc)) ?
17722            PBF_REG_INIT_CRD_LB_Q :
17723            PBF_REG_P4_INIT_CRD,
17724            (CHIP_IS_E3B0(sc)) ?
17725            PBF_REG_CREDIT_LB_Q :
17726            PBF_REG_P4_CREDIT,
17727            (CHIP_IS_E3B0(sc)) ?
17728            PBF_REG_INTERNAL_CRD_FREED_CNT_LB_Q :
17729            PBF_REG_P4_INTERNAL_CRD_FREED_CNT},
17730    };
17731
17732    int i;
17733
17734    /* Verify the command queues are flushed P0, P1, P4 */
17735    for (i = 0; i < ARRAY_SIZE(cmd_regs); i++) {
17736        bxe_pbf_pN_cmd_flushed(sc, &cmd_regs[i], poll_count);
17737    }
17738
17739    /* Verify the transmission buffers are flushed P0, P1, P4 */
17740    for (i = 0; i < ARRAY_SIZE(buf_regs); i++) {
17741        bxe_pbf_pN_buf_flushed(sc, &buf_regs[i], poll_count);
17742    }
17743}
17744
17745static void
17746bxe_hw_enable_status(struct bxe_softc *sc)
17747{
17748    uint32_t val;
17749
17750    val = REG_RD(sc, CFC_REG_WEAK_ENABLE_PF);
17751    BLOGD(sc, DBG_LOAD, "CFC_REG_WEAK_ENABLE_PF is 0x%x\n", val);
17752
17753    val = REG_RD(sc, PBF_REG_DISABLE_PF);
17754    BLOGD(sc, DBG_LOAD, "PBF_REG_DISABLE_PF is 0x%x\n", val);
17755
17756    val = REG_RD(sc, IGU_REG_PCI_PF_MSI_EN);
17757    BLOGD(sc, DBG_LOAD, "IGU_REG_PCI_PF_MSI_EN is 0x%x\n", val);
17758
17759    val = REG_RD(sc, IGU_REG_PCI_PF_MSIX_EN);
17760    BLOGD(sc, DBG_LOAD, "IGU_REG_PCI_PF_MSIX_EN is 0x%x\n", val);
17761
17762    val = REG_RD(sc, IGU_REG_PCI_PF_MSIX_FUNC_MASK);
17763    BLOGD(sc, DBG_LOAD, "IGU_REG_PCI_PF_MSIX_FUNC_MASK is 0x%x\n", val);
17764
17765    val = REG_RD(sc, PGLUE_B_REG_SHADOW_BME_PF_7_0_CLR);
17766    BLOGD(sc, DBG_LOAD, "PGLUE_B_REG_SHADOW_BME_PF_7_0_CLR is 0x%x\n", val);
17767
17768    val = REG_RD(sc, PGLUE_B_REG_FLR_REQUEST_PF_7_0_CLR);
17769    BLOGD(sc, DBG_LOAD, "PGLUE_B_REG_FLR_REQUEST_PF_7_0_CLR is 0x%x\n", val);
17770
17771    val = REG_RD(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER);
17772    BLOGD(sc, DBG_LOAD, "PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER is 0x%x\n", val);
17773}
17774
17775static int
17776bxe_pf_flr_clnup(struct bxe_softc *sc)
17777{
17778    uint32_t poll_cnt = bxe_flr_clnup_poll_count(sc);
17779
17780    BLOGD(sc, DBG_LOAD, "Cleanup after FLR PF[%d]\n", SC_ABS_FUNC(sc));
17781
17782    /* Re-enable PF target read access */
17783    REG_WR(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ, 1);
17784
17785    /* Poll HW usage counters */
17786    BLOGD(sc, DBG_LOAD, "Polling usage counters\n");
17787    if (bxe_poll_hw_usage_counters(sc, poll_cnt)) {
17788        return (-1);
17789    }
17790
17791    /* Zero the igu 'trailing edge' and 'leading edge' */
17792
17793    /* Send the FW cleanup command */
17794    if (bxe_send_final_clnup(sc, (uint8_t)SC_FUNC(sc), poll_cnt)) {
17795        return (-1);
17796    }
17797
17798    /* ATC cleanup */
17799
17800    /* Verify TX hw is flushed */
17801    bxe_tx_hw_flushed(sc, poll_cnt);
17802
17803    /* Wait 100ms (not adjusted according to platform) */
17804    DELAY(100000);
17805
17806    /* Verify no pending pci transactions */
17807    if (bxe_is_pcie_pending(sc)) {
17808        BLOGE(sc, "PCIE Transactions still pending\n");
17809    }
17810
17811    /* Debug */
17812    bxe_hw_enable_status(sc);
17813
17814    /*
17815     * Master enable - Due to WB DMAE writes performed before this
17816     * register is re-initialized as part of the regular function init
17817     */
17818    REG_WR(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1);
17819
17820    return (0);
17821}
17822
17823static int
17824bxe_init_hw_func(struct bxe_softc *sc)
17825{
17826    int port = SC_PORT(sc);
17827    int func = SC_FUNC(sc);
17828    int init_phase = PHASE_PF0 + func;
17829    struct ecore_ilt *ilt = sc->ilt;
17830    uint16_t cdu_ilt_start;
17831    uint32_t addr, val;
17832    uint32_t main_mem_base, main_mem_size, main_mem_prty_clr;
17833    int i, main_mem_width, rc;
17834
17835    BLOGD(sc, DBG_LOAD, "starting func init for func %d\n", func);
17836
17837    /* FLR cleanup */
17838    if (!CHIP_IS_E1x(sc)) {
17839        rc = bxe_pf_flr_clnup(sc);
17840        if (rc) {
17841            BLOGE(sc, "FLR cleanup failed!\n");
17842            // XXX bxe_fw_dump(sc);
17843            // XXX bxe_idle_chk(sc);
17844            return (rc);
17845        }
17846    }
17847
17848    /* set MSI reconfigure capability */
17849    if (sc->devinfo.int_block == INT_BLOCK_HC) {
17850        addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0);
17851        val = REG_RD(sc, addr);
17852        val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0;
17853        REG_WR(sc, addr, val);
17854    }
17855
17856    ecore_init_block(sc, BLOCK_PXP, init_phase);
17857    ecore_init_block(sc, BLOCK_PXP2, init_phase);
17858
17859    ilt = sc->ilt;
17860    cdu_ilt_start = ilt->clients[ILT_CLIENT_CDU].start;
17861
17862    for (i = 0; i < L2_ILT_LINES(sc); i++) {
17863        ilt->lines[cdu_ilt_start + i].page = sc->context[i].vcxt;
17864        ilt->lines[cdu_ilt_start + i].page_mapping =
17865            sc->context[i].vcxt_dma.paddr;
17866        ilt->lines[cdu_ilt_start + i].size = sc->context[i].size;
17867    }
17868    ecore_ilt_init_op(sc, INITOP_SET);
17869
17870    /* Set NIC mode */
17871    REG_WR(sc, PRS_REG_NIC_MODE, 1);
17872    BLOGD(sc, DBG_LOAD, "NIC MODE configured\n");
17873
17874    if (!CHIP_IS_E1x(sc)) {
17875        uint32_t pf_conf = IGU_PF_CONF_FUNC_EN;
17876
17877        /* Turn on a single ISR mode in IGU if driver is going to use
17878         * INT#x or MSI
17879         */
17880        if (sc->interrupt_mode != INTR_MODE_MSIX) {
17881            pf_conf |= IGU_PF_CONF_SINGLE_ISR_EN;
17882        }
17883
17884        /*
17885         * Timers workaround bug: function init part.
17886         * Need to wait 20msec after initializing ILT,
17887         * needed to make sure there are no requests in
17888         * one of the PXP internal queues with "old" ILT addresses
17889         */
17890        DELAY(20000);
17891
17892        /*
17893         * Master enable - Due to WB DMAE writes performed before this
17894         * register is re-initialized as part of the regular function
17895         * init
17896         */
17897        REG_WR(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1);
17898        /* Enable the function in IGU */
17899        REG_WR(sc, IGU_REG_PF_CONFIGURATION, pf_conf);
17900    }
17901
17902    sc->dmae_ready = 1;
17903
17904    ecore_init_block(sc, BLOCK_PGLUE_B, init_phase);
17905
17906    if (!CHIP_IS_E1x(sc))
17907        REG_WR(sc, PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR, func);
17908
17909    ecore_init_block(sc, BLOCK_ATC, init_phase);
17910    ecore_init_block(sc, BLOCK_DMAE, init_phase);
17911    ecore_init_block(sc, BLOCK_NIG, init_phase);
17912    ecore_init_block(sc, BLOCK_SRC, init_phase);
17913    ecore_init_block(sc, BLOCK_MISC, init_phase);
17914    ecore_init_block(sc, BLOCK_TCM, init_phase);
17915    ecore_init_block(sc, BLOCK_UCM, init_phase);
17916    ecore_init_block(sc, BLOCK_CCM, init_phase);
17917    ecore_init_block(sc, BLOCK_XCM, init_phase);
17918    ecore_init_block(sc, BLOCK_TSEM, init_phase);
17919    ecore_init_block(sc, BLOCK_USEM, init_phase);
17920    ecore_init_block(sc, BLOCK_CSEM, init_phase);
17921    ecore_init_block(sc, BLOCK_XSEM, init_phase);
17922
17923    if (!CHIP_IS_E1x(sc))
17924        REG_WR(sc, QM_REG_PF_EN, 1);
17925
17926    if (!CHIP_IS_E1x(sc)) {
17927        REG_WR(sc, TSEM_REG_VFPF_ERR_NUM, BXE_MAX_NUM_OF_VFS + func);
17928        REG_WR(sc, USEM_REG_VFPF_ERR_NUM, BXE_MAX_NUM_OF_VFS + func);
17929        REG_WR(sc, CSEM_REG_VFPF_ERR_NUM, BXE_MAX_NUM_OF_VFS + func);
17930        REG_WR(sc, XSEM_REG_VFPF_ERR_NUM, BXE_MAX_NUM_OF_VFS + func);
17931    }
17932    ecore_init_block(sc, BLOCK_QM, init_phase);
17933
17934    ecore_init_block(sc, BLOCK_TM, init_phase);
17935    ecore_init_block(sc, BLOCK_DORQ, init_phase);
17936
17937    bxe_iov_init_dq(sc);
17938
17939    ecore_init_block(sc, BLOCK_BRB1, init_phase);
17940    ecore_init_block(sc, BLOCK_PRS, init_phase);
17941    ecore_init_block(sc, BLOCK_TSDM, init_phase);
17942    ecore_init_block(sc, BLOCK_CSDM, init_phase);
17943    ecore_init_block(sc, BLOCK_USDM, init_phase);
17944    ecore_init_block(sc, BLOCK_XSDM, init_phase);
17945    ecore_init_block(sc, BLOCK_UPB, init_phase);
17946    ecore_init_block(sc, BLOCK_XPB, init_phase);
17947    ecore_init_block(sc, BLOCK_PBF, init_phase);
17948    if (!CHIP_IS_E1x(sc))
17949        REG_WR(sc, PBF_REG_DISABLE_PF, 0);
17950
17951    ecore_init_block(sc, BLOCK_CDU, init_phase);
17952
17953    ecore_init_block(sc, BLOCK_CFC, init_phase);
17954
17955    if (!CHIP_IS_E1x(sc))
17956        REG_WR(sc, CFC_REG_WEAK_ENABLE_PF, 1);
17957
17958    if (IS_MF(sc)) {
17959        REG_WR(sc, NIG_REG_LLH0_FUNC_EN + port*8, 1);
17960        REG_WR(sc, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, OVLAN(sc));
17961    }
17962
17963    ecore_init_block(sc, BLOCK_MISC_AEU, init_phase);
17964
17965    /* HC init per function */
17966    if (sc->devinfo.int_block == INT_BLOCK_HC) {
17967        if (CHIP_IS_E1H(sc)) {
17968            REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
17969
17970            REG_WR(sc, HC_REG_LEADING_EDGE_0 + port*8, 0);
17971            REG_WR(sc, HC_REG_TRAILING_EDGE_0 + port*8, 0);
17972        }
17973        ecore_init_block(sc, BLOCK_HC, init_phase);
17974
17975    } else {
17976        int num_segs, sb_idx, prod_offset;
17977
17978        REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
17979
17980        if (!CHIP_IS_E1x(sc)) {
17981            REG_WR(sc, IGU_REG_LEADING_EDGE_LATCH, 0);
17982            REG_WR(sc, IGU_REG_TRAILING_EDGE_LATCH, 0);
17983        }
17984
17985        ecore_init_block(sc, BLOCK_IGU, init_phase);
17986
17987        if (!CHIP_IS_E1x(sc)) {
17988            int dsb_idx = 0;
17989            /**
17990             * Producer memory:
17991             * E2 mode: address 0-135 match to the mapping memory;
17992             * 136 - PF0 default prod; 137 - PF1 default prod;
17993             * 138 - PF2 default prod; 139 - PF3 default prod;
17994             * 140 - PF0 attn prod;    141 - PF1 attn prod;
17995             * 142 - PF2 attn prod;    143 - PF3 attn prod;
17996             * 144-147 reserved.
17997             *
17998             * E1.5 mode - In backward compatible mode;
17999             * for non default SB; each even line in the memory
18000             * holds the U producer and each odd line hold
18001             * the C producer. The first 128 producers are for
18002             * NDSB (PF0 - 0-31; PF1 - 32-63 and so on). The last 20
18003             * producers are for the DSB for each PF.
18004             * Each PF has five segments: (the order inside each
18005             * segment is PF0; PF1; PF2; PF3) - 128-131 U prods;
18006             * 132-135 C prods; 136-139 X prods; 140-143 T prods;
18007             * 144-147 attn prods;
18008             */
18009            /* non-default-status-blocks */
18010            num_segs = CHIP_INT_MODE_IS_BC(sc) ?
18011                IGU_BC_NDSB_NUM_SEGS : IGU_NORM_NDSB_NUM_SEGS;
18012            for (sb_idx = 0; sb_idx < sc->igu_sb_cnt; sb_idx++) {
18013                prod_offset = (sc->igu_base_sb + sb_idx) *
18014                    num_segs;
18015
18016                for (i = 0; i < num_segs; i++) {
18017                    addr = IGU_REG_PROD_CONS_MEMORY +
18018                            (prod_offset + i) * 4;
18019                    REG_WR(sc, addr, 0);
18020                }
18021                /* send consumer update with value 0 */
18022                bxe_ack_sb(sc, sc->igu_base_sb + sb_idx,
18023                           USTORM_ID, 0, IGU_INT_NOP, 1);
18024                bxe_igu_clear_sb(sc, sc->igu_base_sb + sb_idx);
18025            }
18026
18027            /* default-status-blocks */
18028            num_segs = CHIP_INT_MODE_IS_BC(sc) ?
18029                IGU_BC_DSB_NUM_SEGS : IGU_NORM_DSB_NUM_SEGS;
18030
18031            if (CHIP_IS_MODE_4_PORT(sc))
18032                dsb_idx = SC_FUNC(sc);
18033            else
18034                dsb_idx = SC_VN(sc);
18035
18036            prod_offset = (CHIP_INT_MODE_IS_BC(sc) ?
18037                       IGU_BC_BASE_DSB_PROD + dsb_idx :
18038                       IGU_NORM_BASE_DSB_PROD + dsb_idx);
18039
18040            /*
18041             * igu prods come in chunks of E1HVN_MAX (4) -
18042             * does not matters what is the current chip mode
18043             */
18044            for (i = 0; i < (num_segs * E1HVN_MAX);
18045                 i += E1HVN_MAX) {
18046                addr = IGU_REG_PROD_CONS_MEMORY +
18047                            (prod_offset + i)*4;
18048                REG_WR(sc, addr, 0);
18049            }
18050            /* send consumer update with 0 */
18051            if (CHIP_INT_MODE_IS_BC(sc)) {
18052                bxe_ack_sb(sc, sc->igu_dsb_id,
18053                           USTORM_ID, 0, IGU_INT_NOP, 1);
18054                bxe_ack_sb(sc, sc->igu_dsb_id,
18055                           CSTORM_ID, 0, IGU_INT_NOP, 1);
18056                bxe_ack_sb(sc, sc->igu_dsb_id,
18057                           XSTORM_ID, 0, IGU_INT_NOP, 1);
18058                bxe_ack_sb(sc, sc->igu_dsb_id,
18059                           TSTORM_ID, 0, IGU_INT_NOP, 1);
18060                bxe_ack_sb(sc, sc->igu_dsb_id,
18061                           ATTENTION_ID, 0, IGU_INT_NOP, 1);
18062            } else {
18063                bxe_ack_sb(sc, sc->igu_dsb_id,
18064                           USTORM_ID, 0, IGU_INT_NOP, 1);
18065                bxe_ack_sb(sc, sc->igu_dsb_id,
18066                           ATTENTION_ID, 0, IGU_INT_NOP, 1);
18067            }
18068            bxe_igu_clear_sb(sc, sc->igu_dsb_id);
18069
18070            /* !!! these should become driver const once
18071               rf-tool supports split-68 const */
18072            REG_WR(sc, IGU_REG_SB_INT_BEFORE_MASK_LSB, 0);
18073            REG_WR(sc, IGU_REG_SB_INT_BEFORE_MASK_MSB, 0);
18074            REG_WR(sc, IGU_REG_SB_MASK_LSB, 0);
18075            REG_WR(sc, IGU_REG_SB_MASK_MSB, 0);
18076            REG_WR(sc, IGU_REG_PBA_STATUS_LSB, 0);
18077            REG_WR(sc, IGU_REG_PBA_STATUS_MSB, 0);
18078        }
18079    }
18080
18081    /* Reset PCIE errors for debug */
18082    REG_WR(sc, 0x2114, 0xffffffff);
18083    REG_WR(sc, 0x2120, 0xffffffff);
18084
18085    if (CHIP_IS_E1x(sc)) {
18086        main_mem_size = HC_REG_MAIN_MEMORY_SIZE / 2; /*dwords*/
18087        main_mem_base = HC_REG_MAIN_MEMORY +
18088                SC_PORT(sc) * (main_mem_size * 4);
18089        main_mem_prty_clr = HC_REG_HC_PRTY_STS_CLR;
18090        main_mem_width = 8;
18091
18092        val = REG_RD(sc, main_mem_prty_clr);
18093        if (val) {
18094            BLOGD(sc, DBG_LOAD,
18095                  "Parity errors in HC block during function init (0x%x)!\n",
18096                  val);
18097        }
18098
18099        /* Clear "false" parity errors in MSI-X table */
18100        for (i = main_mem_base;
18101             i < main_mem_base + main_mem_size * 4;
18102             i += main_mem_width) {
18103            bxe_read_dmae(sc, i, main_mem_width / 4);
18104            bxe_write_dmae(sc, BXE_SP_MAPPING(sc, wb_data),
18105                           i, main_mem_width / 4);
18106        }
18107        /* Clear HC parity attention */
18108        REG_RD(sc, main_mem_prty_clr);
18109    }
18110
18111#if 1
18112    /* Enable STORMs SP logging */
18113    REG_WR8(sc, BAR_USTRORM_INTMEM +
18114           USTORM_RECORD_SLOW_PATH_OFFSET(SC_FUNC(sc)), 1);
18115    REG_WR8(sc, BAR_TSTRORM_INTMEM +
18116           TSTORM_RECORD_SLOW_PATH_OFFSET(SC_FUNC(sc)), 1);
18117    REG_WR8(sc, BAR_CSTRORM_INTMEM +
18118           CSTORM_RECORD_SLOW_PATH_OFFSET(SC_FUNC(sc)), 1);
18119    REG_WR8(sc, BAR_XSTRORM_INTMEM +
18120           XSTORM_RECORD_SLOW_PATH_OFFSET(SC_FUNC(sc)), 1);
18121#endif
18122
18123    elink_phy_probe(&sc->link_params);
18124
18125    return (0);
18126}
18127
18128static void
18129bxe_link_reset(struct bxe_softc *sc)
18130{
18131    if (!BXE_NOMCP(sc)) {
18132	bxe_acquire_phy_lock(sc);
18133        elink_lfa_reset(&sc->link_params, &sc->link_vars);
18134	bxe_release_phy_lock(sc);
18135    } else {
18136        if (!CHIP_REV_IS_SLOW(sc)) {
18137            BLOGW(sc, "Bootcode is missing - cannot reset link\n");
18138        }
18139    }
18140}
18141
18142static void
18143bxe_reset_port(struct bxe_softc *sc)
18144{
18145    int port = SC_PORT(sc);
18146    uint32_t val;
18147
18148	ELINK_DEBUG_P0(sc, "bxe_reset_port called\n");
18149    /* reset physical Link */
18150    bxe_link_reset(sc);
18151
18152    REG_WR(sc, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
18153
18154    /* Do not rcv packets to BRB */
18155    REG_WR(sc, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
18156    /* Do not direct rcv packets that are not for MCP to the BRB */
18157    REG_WR(sc, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
18158               NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
18159
18160    /* Configure AEU */
18161    REG_WR(sc, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
18162
18163    DELAY(100000);
18164
18165    /* Check for BRB port occupancy */
18166    val = REG_RD(sc, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
18167    if (val) {
18168        BLOGD(sc, DBG_LOAD,
18169              "BRB1 is not empty, %d blocks are occupied\n", val);
18170    }
18171
18172    /* TODO: Close Doorbell port? */
18173}
18174
18175static void
18176bxe_ilt_wr(struct bxe_softc *sc,
18177           uint32_t         index,
18178           bus_addr_t       addr)
18179{
18180    int reg;
18181    uint32_t wb_write[2];
18182
18183    if (CHIP_IS_E1(sc)) {
18184        reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
18185    } else {
18186        reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
18187    }
18188
18189    wb_write[0] = ONCHIP_ADDR1(addr);
18190    wb_write[1] = ONCHIP_ADDR2(addr);
18191    REG_WR_DMAE(sc, reg, wb_write, 2);
18192}
18193
18194static void
18195bxe_clear_func_ilt(struct bxe_softc *sc,
18196                   uint32_t         func)
18197{
18198    uint32_t i, base = FUNC_ILT_BASE(func);
18199    for (i = base; i < base + ILT_PER_FUNC; i++) {
18200        bxe_ilt_wr(sc, i, 0);
18201    }
18202}
18203
18204static void
18205bxe_reset_func(struct bxe_softc *sc)
18206{
18207    struct bxe_fastpath *fp;
18208    int port = SC_PORT(sc);
18209    int func = SC_FUNC(sc);
18210    int i;
18211
18212    /* Disable the function in the FW */
18213    REG_WR8(sc, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(func), 0);
18214    REG_WR8(sc, BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(func), 0);
18215    REG_WR8(sc, BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(func), 0);
18216    REG_WR8(sc, BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(func), 0);
18217
18218    /* FP SBs */
18219    FOR_EACH_ETH_QUEUE(sc, i) {
18220        fp = &sc->fp[i];
18221        REG_WR8(sc, BAR_CSTRORM_INTMEM +
18222                CSTORM_STATUS_BLOCK_DATA_STATE_OFFSET(fp->fw_sb_id),
18223                SB_DISABLED);
18224    }
18225
18226    /* SP SB */
18227    REG_WR8(sc, BAR_CSTRORM_INTMEM +
18228            CSTORM_SP_STATUS_BLOCK_DATA_STATE_OFFSET(func),
18229            SB_DISABLED);
18230
18231    for (i = 0; i < XSTORM_SPQ_DATA_SIZE / 4; i++) {
18232        REG_WR(sc, BAR_XSTRORM_INTMEM + XSTORM_SPQ_DATA_OFFSET(func), 0);
18233    }
18234
18235    /* Configure IGU */
18236    if (sc->devinfo.int_block == INT_BLOCK_HC) {
18237        REG_WR(sc, HC_REG_LEADING_EDGE_0 + port*8, 0);
18238        REG_WR(sc, HC_REG_TRAILING_EDGE_0 + port*8, 0);
18239    } else {
18240        REG_WR(sc, IGU_REG_LEADING_EDGE_LATCH, 0);
18241        REG_WR(sc, IGU_REG_TRAILING_EDGE_LATCH, 0);
18242    }
18243
18244    if (CNIC_LOADED(sc)) {
18245        /* Disable Timer scan */
18246        REG_WR(sc, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
18247        /*
18248         * Wait for at least 10ms and up to 2 second for the timers
18249         * scan to complete
18250         */
18251        for (i = 0; i < 200; i++) {
18252            DELAY(10000);
18253            if (!REG_RD(sc, TM_REG_LIN0_SCAN_ON + port*4))
18254                break;
18255        }
18256    }
18257
18258    /* Clear ILT */
18259    bxe_clear_func_ilt(sc, func);
18260
18261    /*
18262     * Timers workaround bug for E2: if this is vnic-3,
18263     * we need to set the entire ilt range for this timers.
18264     */
18265    if (!CHIP_IS_E1x(sc) && SC_VN(sc) == 3) {
18266        struct ilt_client_info ilt_cli;
18267        /* use dummy TM client */
18268        memset(&ilt_cli, 0, sizeof(struct ilt_client_info));
18269        ilt_cli.start = 0;
18270        ilt_cli.end = ILT_NUM_PAGE_ENTRIES - 1;
18271        ilt_cli.client_num = ILT_CLIENT_TM;
18272
18273        ecore_ilt_boundry_init_op(sc, &ilt_cli, 0, INITOP_CLEAR);
18274    }
18275
18276    /* this assumes that reset_port() called before reset_func()*/
18277    if (!CHIP_IS_E1x(sc)) {
18278        bxe_pf_disable(sc);
18279    }
18280
18281    sc->dmae_ready = 0;
18282}
18283
18284static int
18285bxe_gunzip_init(struct bxe_softc *sc)
18286{
18287    return (0);
18288}
18289
18290static void
18291bxe_gunzip_end(struct bxe_softc *sc)
18292{
18293    return;
18294}
18295
18296static int
18297bxe_init_firmware(struct bxe_softc *sc)
18298{
18299    if (CHIP_IS_E1(sc)) {
18300        ecore_init_e1_firmware(sc);
18301        sc->iro_array = e1_iro_arr;
18302    } else if (CHIP_IS_E1H(sc)) {
18303        ecore_init_e1h_firmware(sc);
18304        sc->iro_array = e1h_iro_arr;
18305    } else if (!CHIP_IS_E1x(sc)) {
18306        ecore_init_e2_firmware(sc);
18307        sc->iro_array = e2_iro_arr;
18308    } else {
18309        BLOGE(sc, "Unsupported chip revision\n");
18310        return (-1);
18311    }
18312
18313    return (0);
18314}
18315
18316static void
18317bxe_release_firmware(struct bxe_softc *sc)
18318{
18319    /* Do nothing */
18320    return;
18321}
18322
18323static int
18324ecore_gunzip(struct bxe_softc *sc,
18325             const uint8_t    *zbuf,
18326             int              len)
18327{
18328    /* XXX : Implement... */
18329    BLOGD(sc, DBG_LOAD, "ECORE_GUNZIP NOT IMPLEMENTED\n");
18330    return (FALSE);
18331}
18332
18333static void
18334ecore_reg_wr_ind(struct bxe_softc *sc,
18335                 uint32_t         addr,
18336                 uint32_t         val)
18337{
18338    bxe_reg_wr_ind(sc, addr, val);
18339}
18340
18341static void
18342ecore_write_dmae_phys_len(struct bxe_softc *sc,
18343                          bus_addr_t       phys_addr,
18344                          uint32_t         addr,
18345                          uint32_t         len)
18346{
18347    bxe_write_dmae_phys_len(sc, phys_addr, addr, len);
18348}
18349
18350void
18351ecore_storm_memset_struct(struct bxe_softc *sc,
18352                          uint32_t         addr,
18353                          size_t           size,
18354                          uint32_t         *data)
18355{
18356    uint8_t i;
18357    for (i = 0; i < size/4; i++) {
18358        REG_WR(sc, addr + (i * 4), data[i]);
18359    }
18360}
18361
18362
18363/*
18364 * character device - ioctl interface definitions
18365 */
18366
18367
18368#include "bxe_dump.h"
18369#include "bxe_ioctl.h"
18370#include <sys/conf.h>
18371
18372static int bxe_eioctl(struct cdev *dev, u_long cmd, caddr_t data, int fflag,
18373                struct thread *td);
18374
18375static struct cdevsw bxe_cdevsw = {
18376    .d_version = D_VERSION,
18377    .d_ioctl = bxe_eioctl,
18378    .d_name = "bxecnic",
18379};
18380
18381#define BXE_PATH(sc)    (CHIP_IS_E1x(sc) ? 0 : (sc->pcie_func & 1))
18382
18383
18384#define DUMP_ALL_PRESETS        0x1FFF
18385#define DUMP_MAX_PRESETS        13
18386#define IS_E1_REG(chips)        ((chips & DUMP_CHIP_E1) == DUMP_CHIP_E1)
18387#define IS_E1H_REG(chips)       ((chips & DUMP_CHIP_E1H) == DUMP_CHIP_E1H)
18388#define IS_E2_REG(chips)        ((chips & DUMP_CHIP_E2) == DUMP_CHIP_E2)
18389#define IS_E3A0_REG(chips)      ((chips & DUMP_CHIP_E3A0) == DUMP_CHIP_E3A0)
18390#define IS_E3B0_REG(chips)      ((chips & DUMP_CHIP_E3B0) == DUMP_CHIP_E3B0)
18391
18392#define IS_REG_IN_PRESET(presets, idx)  \
18393                ((presets & (1 << (idx-1))) == (1 << (idx-1)))
18394
18395
18396static int
18397bxe_get_preset_regs_len(struct bxe_softc *sc, uint32_t preset)
18398{
18399    if (CHIP_IS_E1(sc))
18400        return dump_num_registers[0][preset-1];
18401    else if (CHIP_IS_E1H(sc))
18402        return dump_num_registers[1][preset-1];
18403    else if (CHIP_IS_E2(sc))
18404        return dump_num_registers[2][preset-1];
18405    else if (CHIP_IS_E3A0(sc))
18406        return dump_num_registers[3][preset-1];
18407    else if (CHIP_IS_E3B0(sc))
18408        return dump_num_registers[4][preset-1];
18409    else
18410        return 0;
18411}
18412
18413static int
18414bxe_get_total_regs_len32(struct bxe_softc *sc)
18415{
18416    uint32_t preset_idx;
18417    int regdump_len32 = 0;
18418
18419
18420    /* Calculate the total preset regs length */
18421    for (preset_idx = 1; preset_idx <= DUMP_MAX_PRESETS; preset_idx++) {
18422        regdump_len32 += bxe_get_preset_regs_len(sc, preset_idx);
18423    }
18424
18425    return regdump_len32;
18426}
18427
18428static const uint32_t *
18429__bxe_get_page_addr_ar(struct bxe_softc *sc)
18430{
18431    if (CHIP_IS_E2(sc))
18432        return page_vals_e2;
18433    else if (CHIP_IS_E3(sc))
18434        return page_vals_e3;
18435    else
18436        return NULL;
18437}
18438
18439static uint32_t
18440__bxe_get_page_reg_num(struct bxe_softc *sc)
18441{
18442    if (CHIP_IS_E2(sc))
18443        return PAGE_MODE_VALUES_E2;
18444    else if (CHIP_IS_E3(sc))
18445        return PAGE_MODE_VALUES_E3;
18446    else
18447        return 0;
18448}
18449
18450static const uint32_t *
18451__bxe_get_page_write_ar(struct bxe_softc *sc)
18452{
18453    if (CHIP_IS_E2(sc))
18454        return page_write_regs_e2;
18455    else if (CHIP_IS_E3(sc))
18456        return page_write_regs_e3;
18457    else
18458        return NULL;
18459}
18460
18461static uint32_t
18462__bxe_get_page_write_num(struct bxe_softc *sc)
18463{
18464    if (CHIP_IS_E2(sc))
18465        return PAGE_WRITE_REGS_E2;
18466    else if (CHIP_IS_E3(sc))
18467        return PAGE_WRITE_REGS_E3;
18468    else
18469        return 0;
18470}
18471
18472static const struct reg_addr *
18473__bxe_get_page_read_ar(struct bxe_softc *sc)
18474{
18475    if (CHIP_IS_E2(sc))
18476        return page_read_regs_e2;
18477    else if (CHIP_IS_E3(sc))
18478        return page_read_regs_e3;
18479    else
18480        return NULL;
18481}
18482
18483static uint32_t
18484__bxe_get_page_read_num(struct bxe_softc *sc)
18485{
18486    if (CHIP_IS_E2(sc))
18487        return PAGE_READ_REGS_E2;
18488    else if (CHIP_IS_E3(sc))
18489        return PAGE_READ_REGS_E3;
18490    else
18491        return 0;
18492}
18493
18494static bool
18495bxe_is_reg_in_chip(struct bxe_softc *sc, const struct reg_addr *reg_info)
18496{
18497    if (CHIP_IS_E1(sc))
18498        return IS_E1_REG(reg_info->chips);
18499    else if (CHIP_IS_E1H(sc))
18500        return IS_E1H_REG(reg_info->chips);
18501    else if (CHIP_IS_E2(sc))
18502        return IS_E2_REG(reg_info->chips);
18503    else if (CHIP_IS_E3A0(sc))
18504        return IS_E3A0_REG(reg_info->chips);
18505    else if (CHIP_IS_E3B0(sc))
18506        return IS_E3B0_REG(reg_info->chips);
18507    else
18508        return 0;
18509}
18510
18511static bool
18512bxe_is_wreg_in_chip(struct bxe_softc *sc, const struct wreg_addr *wreg_info)
18513{
18514    if (CHIP_IS_E1(sc))
18515        return IS_E1_REG(wreg_info->chips);
18516    else if (CHIP_IS_E1H(sc))
18517        return IS_E1H_REG(wreg_info->chips);
18518    else if (CHIP_IS_E2(sc))
18519        return IS_E2_REG(wreg_info->chips);
18520    else if (CHIP_IS_E3A0(sc))
18521        return IS_E3A0_REG(wreg_info->chips);
18522    else if (CHIP_IS_E3B0(sc))
18523        return IS_E3B0_REG(wreg_info->chips);
18524    else
18525        return 0;
18526}
18527
18528/**
18529 * bxe_read_pages_regs - read "paged" registers
18530 *
18531 * @bp          device handle
18532 * @p           output buffer
18533 *
18534 * Reads "paged" memories: memories that may only be read by first writing to a
18535 * specific address ("write address") and then reading from a specific address
18536 * ("read address"). There may be more than one write address per "page" and
18537 * more than one read address per write address.
18538 */
18539static void
18540bxe_read_pages_regs(struct bxe_softc *sc, uint32_t *p, uint32_t preset)
18541{
18542    uint32_t i, j, k, n;
18543
18544    /* addresses of the paged registers */
18545    const uint32_t *page_addr = __bxe_get_page_addr_ar(sc);
18546    /* number of paged registers */
18547    int num_pages = __bxe_get_page_reg_num(sc);
18548    /* write addresses */
18549    const uint32_t *write_addr = __bxe_get_page_write_ar(sc);
18550    /* number of write addresses */
18551    int write_num = __bxe_get_page_write_num(sc);
18552    /* read addresses info */
18553    const struct reg_addr *read_addr = __bxe_get_page_read_ar(sc);
18554    /* number of read addresses */
18555    int read_num = __bxe_get_page_read_num(sc);
18556    uint32_t addr, size;
18557
18558    for (i = 0; i < num_pages; i++) {
18559        for (j = 0; j < write_num; j++) {
18560            REG_WR(sc, write_addr[j], page_addr[i]);
18561
18562            for (k = 0; k < read_num; k++) {
18563                if (IS_REG_IN_PRESET(read_addr[k].presets, preset)) {
18564                    size = read_addr[k].size;
18565                    for (n = 0; n < size; n++) {
18566                        addr = read_addr[k].addr + n*4;
18567                        *p++ = REG_RD(sc, addr);
18568                    }
18569                }
18570            }
18571        }
18572    }
18573    return;
18574}
18575
18576
18577static int
18578bxe_get_preset_regs(struct bxe_softc *sc, uint32_t *p, uint32_t preset)
18579{
18580    uint32_t i, j, addr;
18581    const struct wreg_addr *wreg_addr_p = NULL;
18582
18583    if (CHIP_IS_E1(sc))
18584        wreg_addr_p = &wreg_addr_e1;
18585    else if (CHIP_IS_E1H(sc))
18586        wreg_addr_p = &wreg_addr_e1h;
18587    else if (CHIP_IS_E2(sc))
18588        wreg_addr_p = &wreg_addr_e2;
18589    else if (CHIP_IS_E3A0(sc))
18590        wreg_addr_p = &wreg_addr_e3;
18591    else if (CHIP_IS_E3B0(sc))
18592        wreg_addr_p = &wreg_addr_e3b0;
18593    else
18594        return (-1);
18595
18596    /* Read the idle_chk registers */
18597    for (i = 0; i < IDLE_REGS_COUNT; i++) {
18598        if (bxe_is_reg_in_chip(sc, &idle_reg_addrs[i]) &&
18599            IS_REG_IN_PRESET(idle_reg_addrs[i].presets, preset)) {
18600            for (j = 0; j < idle_reg_addrs[i].size; j++)
18601                *p++ = REG_RD(sc, idle_reg_addrs[i].addr + j*4);
18602        }
18603    }
18604
18605    /* Read the regular registers */
18606    for (i = 0; i < REGS_COUNT; i++) {
18607        if (bxe_is_reg_in_chip(sc, &reg_addrs[i]) &&
18608            IS_REG_IN_PRESET(reg_addrs[i].presets, preset)) {
18609            for (j = 0; j < reg_addrs[i].size; j++)
18610                *p++ = REG_RD(sc, reg_addrs[i].addr + j*4);
18611        }
18612    }
18613
18614    /* Read the CAM registers */
18615    if (bxe_is_wreg_in_chip(sc, wreg_addr_p) &&
18616        IS_REG_IN_PRESET(wreg_addr_p->presets, preset)) {
18617        for (i = 0; i < wreg_addr_p->size; i++) {
18618            *p++ = REG_RD(sc, wreg_addr_p->addr + i*4);
18619
18620            /* In case of wreg_addr register, read additional
18621               registers from read_regs array
18622             */
18623            for (j = 0; j < wreg_addr_p->read_regs_count; j++) {
18624                addr = *(wreg_addr_p->read_regs);
18625                *p++ = REG_RD(sc, addr + j*4);
18626            }
18627        }
18628    }
18629
18630    /* Paged registers are supported in E2 & E3 only */
18631    if (CHIP_IS_E2(sc) || CHIP_IS_E3(sc)) {
18632        /* Read "paged" registers */
18633        bxe_read_pages_regs(sc, p, preset);
18634    }
18635
18636    return 0;
18637}
18638
18639int
18640bxe_grc_dump(struct bxe_softc *sc)
18641{
18642    int rval = 0;
18643    uint32_t preset_idx;
18644    uint8_t *buf;
18645    uint32_t size;
18646    struct  dump_header *d_hdr;
18647    uint32_t i;
18648    uint32_t reg_val;
18649    uint32_t reg_addr;
18650    uint32_t cmd_offset;
18651    struct ecore_ilt *ilt = SC_ILT(sc);
18652    struct bxe_fastpath *fp;
18653    struct ilt_client_info *ilt_cli;
18654    int grc_dump_size;
18655
18656
18657    if (sc->grcdump_done || sc->grcdump_started)
18658	return (rval);
18659
18660    sc->grcdump_started = 1;
18661    BLOGI(sc, "Started collecting grcdump\n");
18662
18663    grc_dump_size = (bxe_get_total_regs_len32(sc) * sizeof(uint32_t)) +
18664                sizeof(struct  dump_header);
18665
18666    sc->grc_dump = malloc(grc_dump_size, M_DEVBUF, M_NOWAIT);
18667
18668    if (sc->grc_dump == NULL) {
18669        BLOGW(sc, "Unable to allocate memory for grcdump collection\n");
18670        return(ENOMEM);
18671    }
18672
18673
18674
18675    /* Disable parity attentions as long as following dump may
18676     * cause false alarms by reading never written registers. We
18677     * will re-enable parity attentions right after the dump.
18678     */
18679
18680    /* Disable parity on path 0 */
18681    bxe_pretend_func(sc, 0);
18682
18683    ecore_disable_blocks_parity(sc);
18684
18685    /* Disable parity on path 1 */
18686    bxe_pretend_func(sc, 1);
18687    ecore_disable_blocks_parity(sc);
18688
18689    /* Return to current function */
18690    bxe_pretend_func(sc, SC_ABS_FUNC(sc));
18691
18692    buf = sc->grc_dump;
18693    d_hdr = sc->grc_dump;
18694
18695    d_hdr->header_size = (sizeof(struct  dump_header) >> 2) - 1;
18696    d_hdr->version = BNX2X_DUMP_VERSION;
18697    d_hdr->preset = DUMP_ALL_PRESETS;
18698
18699    if (CHIP_IS_E1(sc)) {
18700        d_hdr->dump_meta_data = DUMP_CHIP_E1;
18701    } else if (CHIP_IS_E1H(sc)) {
18702        d_hdr->dump_meta_data = DUMP_CHIP_E1H;
18703    } else if (CHIP_IS_E2(sc)) {
18704        d_hdr->dump_meta_data = DUMP_CHIP_E2 |
18705                (BXE_PATH(sc) ? DUMP_PATH_1 : DUMP_PATH_0);
18706    } else if (CHIP_IS_E3A0(sc)) {
18707        d_hdr->dump_meta_data = DUMP_CHIP_E3A0 |
18708                (BXE_PATH(sc) ? DUMP_PATH_1 : DUMP_PATH_0);
18709    } else if (CHIP_IS_E3B0(sc)) {
18710        d_hdr->dump_meta_data = DUMP_CHIP_E3B0 |
18711                (BXE_PATH(sc) ? DUMP_PATH_1 : DUMP_PATH_0);
18712    }
18713
18714    buf += sizeof(struct  dump_header);
18715
18716    for (preset_idx = 1; preset_idx <= DUMP_MAX_PRESETS; preset_idx++) {
18717
18718        /* Skip presets with IOR */
18719        if ((preset_idx == 2) || (preset_idx == 5) || (preset_idx == 8) ||
18720            (preset_idx == 11))
18721            continue;
18722
18723        rval = bxe_get_preset_regs(sc, (uint32_t *)buf, preset_idx);
18724
18725	if (rval)
18726            break;
18727
18728        size = bxe_get_preset_regs_len(sc, preset_idx) * (sizeof (uint32_t));
18729
18730        buf += size;
18731    }
18732
18733    bxe_pretend_func(sc, 0);
18734    ecore_clear_blocks_parity(sc);
18735    ecore_enable_blocks_parity(sc);
18736
18737    bxe_pretend_func(sc, 1);
18738    ecore_clear_blocks_parity(sc);
18739    ecore_enable_blocks_parity(sc);
18740
18741    /* Return to current function */
18742    bxe_pretend_func(sc, SC_ABS_FUNC(sc));
18743
18744
18745
18746    if(sc->state == BXE_STATE_OPEN) {
18747        if(sc->fw_stats_req  != NULL) {
18748    		BLOGI(sc, "fw stats start_paddr %#jx end_paddr %#jx vaddr %p size 0x%x\n",
18749        			(uintmax_t)sc->fw_stats_req_mapping,
18750        			(uintmax_t)sc->fw_stats_data_mapping,
18751        			sc->fw_stats_req, (sc->fw_stats_req_size + sc->fw_stats_data_size));
18752		}
18753		if(sc->def_sb != NULL) {
18754			BLOGI(sc, "def_status_block paddr %p vaddr %p size 0x%zx\n",
18755        			(void *)sc->def_sb_dma.paddr, sc->def_sb,
18756        			sizeof(struct host_sp_status_block));
18757		}
18758		if(sc->eq_dma.vaddr != NULL) {
18759    		BLOGI(sc, "event_queue paddr %#jx vaddr %p size 0x%x\n",
18760        			(uintmax_t)sc->eq_dma.paddr, sc->eq_dma.vaddr, BCM_PAGE_SIZE);
18761		}
18762		if(sc->sp_dma.vaddr != NULL) {
18763    		BLOGI(sc, "slow path paddr %#jx vaddr %p size 0x%zx\n",
18764        			(uintmax_t)sc->sp_dma.paddr, sc->sp_dma.vaddr,
18765        			sizeof(struct bxe_slowpath));
18766		}
18767		if(sc->spq_dma.vaddr != NULL) {
18768    		BLOGI(sc, "slow path queue paddr %#jx vaddr %p size 0x%x\n",
18769        			(uintmax_t)sc->spq_dma.paddr, sc->spq_dma.vaddr, BCM_PAGE_SIZE);
18770		}
18771		if(sc->gz_buf_dma.vaddr != NULL) {
18772    		BLOGI(sc, "fw_buf paddr %#jx vaddr %p size 0x%x\n",
18773        			(uintmax_t)sc->gz_buf_dma.paddr, sc->gz_buf_dma.vaddr,
18774        			FW_BUF_SIZE);
18775		}
18776    	for (i = 0; i < sc->num_queues; i++) {
18777        	fp = &sc->fp[i];
18778			if(fp->sb_dma.vaddr != NULL && fp->tx_dma.vaddr != NULL &&
18779                        fp->rx_dma.vaddr != NULL && fp->rcq_dma.vaddr != NULL &&
18780                        fp->rx_sge_dma.vaddr != NULL) {
18781
18782				BLOGI(sc, "FP status block fp %d paddr %#jx vaddr %p size 0x%zx\n", i,
18783            			(uintmax_t)fp->sb_dma.paddr, fp->sb_dma.vaddr,
18784            			sizeof(union bxe_host_hc_status_block));
18785				BLOGI(sc, "TX BD CHAIN fp %d paddr %#jx vaddr %p size 0x%x\n", i,
18786            			(uintmax_t)fp->tx_dma.paddr, fp->tx_dma.vaddr,
18787            			(BCM_PAGE_SIZE * TX_BD_NUM_PAGES));
18788        		BLOGI(sc, "RX BD CHAIN fp %d paddr %#jx vaddr %p size 0x%x\n", i,
18789            			(uintmax_t)fp->rx_dma.paddr, fp->rx_dma.vaddr,
18790            			(BCM_PAGE_SIZE * RX_BD_NUM_PAGES));
18791        		BLOGI(sc, "RX RCQ CHAIN fp %d paddr %#jx vaddr %p size 0x%zx\n", i,
18792            			(uintmax_t)fp->rcq_dma.paddr, fp->rcq_dma.vaddr,
18793            			(BCM_PAGE_SIZE * RCQ_NUM_PAGES));
18794        		BLOGI(sc, "RX SGE CHAIN fp %d paddr %#jx vaddr %p size 0x%x\n", i,
18795            			(uintmax_t)fp->rx_sge_dma.paddr, fp->rx_sge_dma.vaddr,
18796            			(BCM_PAGE_SIZE * RX_SGE_NUM_PAGES));
18797    		}
18798		}
18799		if(ilt != NULL ) {
18800    		ilt_cli = &ilt->clients[1];
18801			if(ilt->lines != NULL) {
18802    		for (i = ilt_cli->start; i <= ilt_cli->end; i++) {
18803        		BLOGI(sc, "ECORE_ILT paddr %#jx vaddr %p size 0x%x\n",
18804            			(uintmax_t)(((struct bxe_dma *)((&ilt->lines[i])->page))->paddr),
18805            			((struct bxe_dma *)((&ilt->lines[i])->page))->vaddr, BCM_PAGE_SIZE);
18806    		}
18807			}
18808		}
18809
18810
18811    	cmd_offset = DMAE_REG_CMD_MEM;
18812    	for (i = 0; i < 224; i++) {
18813        	reg_addr = (cmd_offset +(i * 4));
18814        	reg_val = REG_RD(sc, reg_addr);
18815        	BLOGI(sc, "DMAE_REG_CMD_MEM i=%d reg_addr 0x%x reg_val 0x%08x\n",i,
18816            			reg_addr, reg_val);
18817    	}
18818	}
18819
18820    BLOGI(sc, "Collection of grcdump done\n");
18821    sc->grcdump_done = 1;
18822    return(rval);
18823}
18824
18825static int
18826bxe_add_cdev(struct bxe_softc *sc)
18827{
18828    sc->eeprom = malloc(BXE_EEPROM_MAX_DATA_LEN, M_DEVBUF, M_NOWAIT);
18829
18830    if (sc->eeprom == NULL) {
18831        BLOGW(sc, "Unable to alloc for eeprom size buffer\n");
18832        return (-1);
18833    }
18834
18835    sc->ioctl_dev = make_dev(&bxe_cdevsw,
18836                            sc->ifp->if_dunit,
18837                            UID_ROOT,
18838                            GID_WHEEL,
18839                            0600,
18840                            "%s",
18841                            if_name(sc->ifp));
18842
18843    if (sc->ioctl_dev == NULL) {
18844        free(sc->eeprom, M_DEVBUF);
18845        sc->eeprom = NULL;
18846        return (-1);
18847    }
18848
18849    sc->ioctl_dev->si_drv1 = sc;
18850
18851    return (0);
18852}
18853
18854static void
18855bxe_del_cdev(struct bxe_softc *sc)
18856{
18857    if (sc->ioctl_dev != NULL)
18858        destroy_dev(sc->ioctl_dev);
18859
18860    if (sc->eeprom != NULL) {
18861        free(sc->eeprom, M_DEVBUF);
18862        sc->eeprom = NULL;
18863    }
18864    sc->ioctl_dev = NULL;
18865
18866    return;
18867}
18868
18869static bool bxe_is_nvram_accessible(struct bxe_softc *sc)
18870{
18871
18872    if ((if_getdrvflags(sc->ifp) & IFF_DRV_RUNNING) == 0)
18873        return FALSE;
18874
18875    return TRUE;
18876}
18877
18878
18879static int
18880bxe_wr_eeprom(struct bxe_softc *sc, void *data, uint32_t offset, uint32_t len)
18881{
18882    int rval = 0;
18883
18884    if(!bxe_is_nvram_accessible(sc)) {
18885        BLOGW(sc, "Cannot access eeprom when interface is down\n");
18886        return (-EAGAIN);
18887    }
18888    rval = bxe_nvram_write(sc, offset, (uint8_t *)data, len);
18889
18890
18891   return (rval);
18892}
18893
18894static int
18895bxe_rd_eeprom(struct bxe_softc *sc, void *data, uint32_t offset, uint32_t len)
18896{
18897    int rval = 0;
18898
18899    if(!bxe_is_nvram_accessible(sc)) {
18900        BLOGW(sc, "Cannot access eeprom when interface is down\n");
18901        return (-EAGAIN);
18902    }
18903    rval = bxe_nvram_read(sc, offset, (uint8_t *)data, len);
18904
18905   return (rval);
18906}
18907
18908static int
18909bxe_eeprom_rd_wr(struct bxe_softc *sc, bxe_eeprom_t *eeprom)
18910{
18911    int rval = 0;
18912
18913    switch (eeprom->eeprom_cmd) {
18914
18915    case BXE_EEPROM_CMD_SET_EEPROM:
18916
18917        rval = copyin(eeprom->eeprom_data, sc->eeprom,
18918                       eeprom->eeprom_data_len);
18919
18920        if (rval)
18921            break;
18922
18923        rval = bxe_wr_eeprom(sc, sc->eeprom, eeprom->eeprom_offset,
18924                       eeprom->eeprom_data_len);
18925        break;
18926
18927    case BXE_EEPROM_CMD_GET_EEPROM:
18928
18929        rval = bxe_rd_eeprom(sc, sc->eeprom, eeprom->eeprom_offset,
18930                       eeprom->eeprom_data_len);
18931
18932        if (rval) {
18933            break;
18934        }
18935
18936        rval = copyout(sc->eeprom, eeprom->eeprom_data,
18937                       eeprom->eeprom_data_len);
18938        break;
18939
18940    default:
18941            rval = EINVAL;
18942            break;
18943    }
18944
18945    if (rval) {
18946        BLOGW(sc, "ioctl cmd %d  failed rval %d\n", eeprom->eeprom_cmd, rval);
18947    }
18948
18949    return (rval);
18950}
18951
18952static int
18953bxe_get_settings(struct bxe_softc *sc, bxe_dev_setting_t *dev_p)
18954{
18955    uint32_t ext_phy_config;
18956    int port = SC_PORT(sc);
18957    int cfg_idx = bxe_get_link_cfg_idx(sc);
18958
18959    dev_p->supported = sc->port.supported[cfg_idx] |
18960            (sc->port.supported[cfg_idx ^ 1] &
18961            (ELINK_SUPPORTED_TP | ELINK_SUPPORTED_FIBRE));
18962    dev_p->advertising = sc->port.advertising[cfg_idx];
18963    if(sc->link_params.phy[bxe_get_cur_phy_idx(sc)].media_type ==
18964        ELINK_ETH_PHY_SFP_1G_FIBER) {
18965        dev_p->supported = ~(ELINK_SUPPORTED_10000baseT_Full);
18966        dev_p->advertising &= ~(ADVERTISED_10000baseT_Full);
18967    }
18968    if ((sc->state == BXE_STATE_OPEN) && sc->link_vars.link_up &&
18969        !(sc->flags & BXE_MF_FUNC_DIS)) {
18970        dev_p->duplex = sc->link_vars.duplex;
18971        if (IS_MF(sc) && !BXE_NOMCP(sc))
18972            dev_p->speed = bxe_get_mf_speed(sc);
18973        else
18974            dev_p->speed = sc->link_vars.line_speed;
18975    } else {
18976        dev_p->duplex = DUPLEX_UNKNOWN;
18977        dev_p->speed = SPEED_UNKNOWN;
18978    }
18979
18980    dev_p->port = bxe_media_detect(sc);
18981
18982    ext_phy_config = SHMEM_RD(sc,
18983                         dev_info.port_hw_config[port].external_phy_config);
18984    if((ext_phy_config & PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK) ==
18985        PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT)
18986        dev_p->phy_address =  sc->port.phy_addr;
18987    else if(((ext_phy_config & PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK) !=
18988            PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE) &&
18989        ((ext_phy_config & PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK) !=
18990            PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN))
18991        dev_p->phy_address = ELINK_XGXS_EXT_PHY_ADDR(ext_phy_config);
18992    else
18993        dev_p->phy_address = 0;
18994
18995    if(sc->link_params.req_line_speed[cfg_idx] == ELINK_SPEED_AUTO_NEG)
18996        dev_p->autoneg = AUTONEG_ENABLE;
18997    else
18998       dev_p->autoneg = AUTONEG_DISABLE;
18999
19000
19001    return 0;
19002}
19003
19004static int
19005bxe_eioctl(struct cdev *dev, u_long cmd, caddr_t data, int fflag,
19006        struct thread *td)
19007{
19008    struct bxe_softc    *sc;
19009    int                 rval = 0;
19010    device_t            pci_dev;
19011    bxe_grcdump_t       *dump = NULL;
19012    int grc_dump_size;
19013    bxe_drvinfo_t   *drv_infop = NULL;
19014    bxe_dev_setting_t  *dev_p;
19015    bxe_dev_setting_t  dev_set;
19016    bxe_get_regs_t  *reg_p;
19017    bxe_reg_rdw_t *reg_rdw_p;
19018    bxe_pcicfg_rdw_t *cfg_rdw_p;
19019    bxe_perm_mac_addr_t *mac_addr_p;
19020
19021
19022    if ((sc = (struct bxe_softc *)dev->si_drv1) == NULL)
19023        return ENXIO;
19024
19025    pci_dev= sc->dev;
19026
19027    dump = (bxe_grcdump_t *)data;
19028
19029    switch(cmd) {
19030
19031        case BXE_GRC_DUMP_SIZE:
19032            dump->pci_func = sc->pcie_func;
19033            dump->grcdump_size =
19034                (bxe_get_total_regs_len32(sc) * sizeof(uint32_t)) +
19035                     sizeof(struct  dump_header);
19036            break;
19037
19038        case BXE_GRC_DUMP:
19039
19040            grc_dump_size = (bxe_get_total_regs_len32(sc) * sizeof(uint32_t)) +
19041                                sizeof(struct  dump_header);
19042            if ((!sc->trigger_grcdump) || (dump->grcdump == NULL) ||
19043                (dump->grcdump_size < grc_dump_size)) {
19044                rval = EINVAL;
19045                break;
19046            }
19047
19048            if((sc->trigger_grcdump) && (!sc->grcdump_done) &&
19049                (!sc->grcdump_started)) {
19050                rval =  bxe_grc_dump(sc);
19051            }
19052
19053            if((!rval) && (sc->grcdump_done) && (sc->grcdump_started) &&
19054                (sc->grc_dump != NULL))  {
19055                dump->grcdump_dwords = grc_dump_size >> 2;
19056                rval = copyout(sc->grc_dump, dump->grcdump, grc_dump_size);
19057                free(sc->grc_dump, M_DEVBUF);
19058                sc->grc_dump = NULL;
19059                sc->grcdump_started = 0;
19060                sc->grcdump_done = 0;
19061            }
19062
19063            break;
19064
19065        case BXE_DRV_INFO:
19066            drv_infop = (bxe_drvinfo_t *)data;
19067            snprintf(drv_infop->drv_name, BXE_DRV_NAME_LENGTH, "%s", "bxe");
19068            snprintf(drv_infop->drv_version, BXE_DRV_VERSION_LENGTH, "v:%s",
19069                BXE_DRIVER_VERSION);
19070            snprintf(drv_infop->mfw_version, BXE_MFW_VERSION_LENGTH, "%s",
19071                sc->devinfo.bc_ver_str);
19072            snprintf(drv_infop->stormfw_version, BXE_STORMFW_VERSION_LENGTH,
19073                "%s", sc->fw_ver_str);
19074            drv_infop->eeprom_dump_len = sc->devinfo.flash_size;
19075            drv_infop->reg_dump_len =
19076                (bxe_get_total_regs_len32(sc) * sizeof(uint32_t))
19077                    + sizeof(struct  dump_header);
19078            snprintf(drv_infop->bus_info, BXE_BUS_INFO_LENGTH, "%d:%d:%d",
19079                sc->pcie_bus, sc->pcie_device, sc->pcie_func);
19080            break;
19081
19082        case BXE_DEV_SETTING:
19083            dev_p = (bxe_dev_setting_t *)data;
19084            bxe_get_settings(sc, &dev_set);
19085            dev_p->supported = dev_set.supported;
19086            dev_p->advertising = dev_set.advertising;
19087            dev_p->speed = dev_set.speed;
19088            dev_p->duplex = dev_set.duplex;
19089            dev_p->port = dev_set.port;
19090            dev_p->phy_address = dev_set.phy_address;
19091            dev_p->autoneg = dev_set.autoneg;
19092
19093            break;
19094
19095        case BXE_GET_REGS:
19096
19097            reg_p = (bxe_get_regs_t *)data;
19098            grc_dump_size = reg_p->reg_buf_len;
19099
19100            if((!sc->grcdump_done) && (!sc->grcdump_started)) {
19101                bxe_grc_dump(sc);
19102            }
19103            if((sc->grcdump_done) && (sc->grcdump_started) &&
19104                (sc->grc_dump != NULL))  {
19105                rval = copyout(sc->grc_dump, reg_p->reg_buf, grc_dump_size);
19106                free(sc->grc_dump, M_DEVBUF);
19107                sc->grc_dump = NULL;
19108                sc->grcdump_started = 0;
19109                sc->grcdump_done = 0;
19110            }
19111
19112            break;
19113
19114        case BXE_RDW_REG:
19115            reg_rdw_p = (bxe_reg_rdw_t *)data;
19116            if((reg_rdw_p->reg_cmd == BXE_READ_REG_CMD) &&
19117                (reg_rdw_p->reg_access_type == BXE_REG_ACCESS_DIRECT))
19118                reg_rdw_p->reg_val = REG_RD(sc, reg_rdw_p->reg_id);
19119
19120            if((reg_rdw_p->reg_cmd == BXE_WRITE_REG_CMD) &&
19121                (reg_rdw_p->reg_access_type == BXE_REG_ACCESS_DIRECT))
19122                REG_WR(sc, reg_rdw_p->reg_id, reg_rdw_p->reg_val);
19123
19124            break;
19125
19126        case BXE_RDW_PCICFG:
19127            cfg_rdw_p = (bxe_pcicfg_rdw_t *)data;
19128            if(cfg_rdw_p->cfg_cmd == BXE_READ_PCICFG) {
19129
19130                cfg_rdw_p->cfg_val = pci_read_config(sc->dev, cfg_rdw_p->cfg_id,
19131                                         cfg_rdw_p->cfg_width);
19132
19133            } else if(cfg_rdw_p->cfg_cmd == BXE_WRITE_PCICFG) {
19134                pci_write_config(sc->dev, cfg_rdw_p->cfg_id, cfg_rdw_p->cfg_val,
19135                            cfg_rdw_p->cfg_width);
19136            } else {
19137                BLOGW(sc, "BXE_RDW_PCICFG ioctl wrong cmd passed\n");
19138            }
19139            break;
19140
19141        case BXE_MAC_ADDR:
19142            mac_addr_p = (bxe_perm_mac_addr_t *)data;
19143            snprintf(mac_addr_p->mac_addr_str, sizeof(sc->mac_addr_str), "%s",
19144                sc->mac_addr_str);
19145            break;
19146
19147        case BXE_EEPROM:
19148            rval = bxe_eeprom_rd_wr(sc, (bxe_eeprom_t *)data);
19149            break;
19150
19151
19152        default:
19153            break;
19154    }
19155
19156    return (rval);
19157}
19158