bxe.c revision 337510
1/*-
2 * Copyright (c) 2007-2014 QLogic Corporation. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 *
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS'
15 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
18 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
19 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
20 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
21 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
22 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
23 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
24 * THE POSSIBILITY OF SUCH DAMAGE.
25 */
26
27#include <sys/cdefs.h>
28__FBSDID("$FreeBSD: stable/11/sys/dev/bxe/bxe.c 337510 2018-08-09 00:39:39Z davidcs $");
29
30#define BXE_DRIVER_VERSION "1.78.91"
31
32#include "bxe.h"
33#include "ecore_sp.h"
34#include "ecore_init.h"
35#include "ecore_init_ops.h"
36
37#include "57710_int_offsets.h"
38#include "57711_int_offsets.h"
39#include "57712_int_offsets.h"
40
41/*
42 * CTLTYPE_U64 and sysctl_handle_64 were added in r217616. Define these
43 * explicitly here for older kernels that don't include this changeset.
44 */
45#ifndef CTLTYPE_U64
46#define CTLTYPE_U64      CTLTYPE_QUAD
47#define sysctl_handle_64 sysctl_handle_quad
48#endif
49
50/*
51 * CSUM_TCP_IPV6 and CSUM_UDP_IPV6 were added in r236170. Define these
52 * here as zero(0) for older kernels that don't include this changeset
53 * thereby masking the functionality.
54 */
55#ifndef CSUM_TCP_IPV6
56#define CSUM_TCP_IPV6 0
57#define CSUM_UDP_IPV6 0
58#endif
59
60/*
61 * pci_find_cap was added in r219865. Re-define this at pci_find_extcap
62 * for older kernels that don't include this changeset.
63 */
64#if __FreeBSD_version < 900035
65#define pci_find_cap pci_find_extcap
66#endif
67
68#define BXE_DEF_SB_ATT_IDX 0x0001
69#define BXE_DEF_SB_IDX     0x0002
70
71/*
72 * FLR Support - bxe_pf_flr_clnup() is called during nic_load in the per
73 * function HW initialization.
74 */
75#define FLR_WAIT_USEC     10000 /* 10 msecs */
76#define FLR_WAIT_INTERVAL 50    /* usecs */
77#define FLR_POLL_CNT      (FLR_WAIT_USEC / FLR_WAIT_INTERVAL) /* 200 */
78
79struct pbf_pN_buf_regs {
80    int pN;
81    uint32_t init_crd;
82    uint32_t crd;
83    uint32_t crd_freed;
84};
85
86struct pbf_pN_cmd_regs {
87    int pN;
88    uint32_t lines_occup;
89    uint32_t lines_freed;
90};
91
92/*
93 * PCI Device ID Table used by bxe_probe().
94 */
95#define BXE_DEVDESC_MAX 64
96static struct bxe_device_type bxe_devs[] = {
97    {
98        BRCM_VENDORID,
99        CHIP_NUM_57710,
100        PCI_ANY_ID, PCI_ANY_ID,
101        "QLogic NetXtreme II BCM57710 10GbE"
102    },
103    {
104        BRCM_VENDORID,
105        CHIP_NUM_57711,
106        PCI_ANY_ID, PCI_ANY_ID,
107        "QLogic NetXtreme II BCM57711 10GbE"
108    },
109    {
110        BRCM_VENDORID,
111        CHIP_NUM_57711E,
112        PCI_ANY_ID, PCI_ANY_ID,
113        "QLogic NetXtreme II BCM57711E 10GbE"
114    },
115    {
116        BRCM_VENDORID,
117        CHIP_NUM_57712,
118        PCI_ANY_ID, PCI_ANY_ID,
119        "QLogic NetXtreme II BCM57712 10GbE"
120    },
121    {
122        BRCM_VENDORID,
123        CHIP_NUM_57712_MF,
124        PCI_ANY_ID, PCI_ANY_ID,
125        "QLogic NetXtreme II BCM57712 MF 10GbE"
126    },
127    {
128        BRCM_VENDORID,
129        CHIP_NUM_57800,
130        PCI_ANY_ID, PCI_ANY_ID,
131        "QLogic NetXtreme II BCM57800 10GbE"
132    },
133    {
134        BRCM_VENDORID,
135        CHIP_NUM_57800_MF,
136        PCI_ANY_ID, PCI_ANY_ID,
137        "QLogic NetXtreme II BCM57800 MF 10GbE"
138    },
139    {
140        BRCM_VENDORID,
141        CHIP_NUM_57810,
142        PCI_ANY_ID, PCI_ANY_ID,
143        "QLogic NetXtreme II BCM57810 10GbE"
144    },
145    {
146        BRCM_VENDORID,
147        CHIP_NUM_57810_MF,
148        PCI_ANY_ID, PCI_ANY_ID,
149        "QLogic NetXtreme II BCM57810 MF 10GbE"
150    },
151    {
152        BRCM_VENDORID,
153        CHIP_NUM_57811,
154        PCI_ANY_ID, PCI_ANY_ID,
155        "QLogic NetXtreme II BCM57811 10GbE"
156    },
157    {
158        BRCM_VENDORID,
159        CHIP_NUM_57811_MF,
160        PCI_ANY_ID, PCI_ANY_ID,
161        "QLogic NetXtreme II BCM57811 MF 10GbE"
162    },
163    {
164        BRCM_VENDORID,
165        CHIP_NUM_57840_4_10,
166        PCI_ANY_ID, PCI_ANY_ID,
167        "QLogic NetXtreme II BCM57840 4x10GbE"
168    },
169    {
170        QLOGIC_VENDORID,
171        CHIP_NUM_57840_4_10,
172        PCI_ANY_ID, PCI_ANY_ID,
173        "QLogic NetXtreme II BCM57840 4x10GbE"
174    },
175    {
176        BRCM_VENDORID,
177        CHIP_NUM_57840_2_20,
178        PCI_ANY_ID, PCI_ANY_ID,
179        "QLogic NetXtreme II BCM57840 2x20GbE"
180    },
181    {
182        BRCM_VENDORID,
183        CHIP_NUM_57840_MF,
184        PCI_ANY_ID, PCI_ANY_ID,
185        "QLogic NetXtreme II BCM57840 MF 10GbE"
186    },
187    {
188        0, 0, 0, 0, NULL
189    }
190};
191
192MALLOC_DECLARE(M_BXE_ILT);
193MALLOC_DEFINE(M_BXE_ILT, "bxe_ilt", "bxe ILT pointer");
194
195/*
196 * FreeBSD device entry points.
197 */
198static int bxe_probe(device_t);
199static int bxe_attach(device_t);
200static int bxe_detach(device_t);
201static int bxe_shutdown(device_t);
202
203/*
204 * FreeBSD KLD module/device interface event handler method.
205 */
206static device_method_t bxe_methods[] = {
207    /* Device interface (device_if.h) */
208    DEVMETHOD(device_probe,     bxe_probe),
209    DEVMETHOD(device_attach,    bxe_attach),
210    DEVMETHOD(device_detach,    bxe_detach),
211    DEVMETHOD(device_shutdown,  bxe_shutdown),
212    /* Bus interface (bus_if.h) */
213    DEVMETHOD(bus_print_child,  bus_generic_print_child),
214    DEVMETHOD(bus_driver_added, bus_generic_driver_added),
215    KOBJMETHOD_END
216};
217
218/*
219 * FreeBSD KLD Module data declaration
220 */
221static driver_t bxe_driver = {
222    "bxe",                   /* module name */
223    bxe_methods,             /* event handler */
224    sizeof(struct bxe_softc) /* extra data */
225};
226
227/*
228 * FreeBSD dev class is needed to manage dev instances and
229 * to associate with a bus type
230 */
231static devclass_t bxe_devclass;
232
233MODULE_DEPEND(bxe, pci, 1, 1, 1);
234MODULE_DEPEND(bxe, ether, 1, 1, 1);
235DRIVER_MODULE(bxe, pci, bxe_driver, bxe_devclass, 0, 0);
236
237/* resources needed for unloading a previously loaded device */
238
239#define BXE_PREV_WAIT_NEEDED 1
240struct mtx bxe_prev_mtx;
241MTX_SYSINIT(bxe_prev_mtx, &bxe_prev_mtx, "bxe_prev_lock", MTX_DEF);
242struct bxe_prev_list_node {
243    LIST_ENTRY(bxe_prev_list_node) node;
244    uint8_t bus;
245    uint8_t slot;
246    uint8_t path;
247    uint8_t aer; /* XXX automatic error recovery */
248    uint8_t undi;
249};
250static LIST_HEAD(, bxe_prev_list_node) bxe_prev_list = LIST_HEAD_INITIALIZER(bxe_prev_list);
251
252static int load_count[2][3] = { {0} }; /* per-path: 0-common, 1-port0, 2-port1 */
253
254/* Tunable device values... */
255
256SYSCTL_NODE(_hw, OID_AUTO, bxe, CTLFLAG_RD, 0, "bxe driver parameters");
257
258/* Debug */
259unsigned long bxe_debug = 0;
260SYSCTL_ULONG(_hw_bxe, OID_AUTO, debug, CTLFLAG_RDTUN,
261             &bxe_debug, 0, "Debug logging mode");
262
263/* Interrupt Mode: 0 (IRQ), 1 (MSI/IRQ), and 2 (MSI-X/MSI/IRQ) */
264static int bxe_interrupt_mode = INTR_MODE_MSIX;
265SYSCTL_INT(_hw_bxe, OID_AUTO, interrupt_mode, CTLFLAG_RDTUN,
266           &bxe_interrupt_mode, 0, "Interrupt (MSI-X/MSI/INTx) mode");
267
268/* Number of Queues: 0 (Auto) or 1 to 16 (fixed queue number) */
269static int bxe_queue_count = 4;
270SYSCTL_INT(_hw_bxe, OID_AUTO, queue_count, CTLFLAG_RDTUN,
271           &bxe_queue_count, 0, "Multi-Queue queue count");
272
273/* max number of buffers per queue (default RX_BD_USABLE) */
274static int bxe_max_rx_bufs = 0;
275SYSCTL_INT(_hw_bxe, OID_AUTO, max_rx_bufs, CTLFLAG_RDTUN,
276           &bxe_max_rx_bufs, 0, "Maximum Number of Rx Buffers Per Queue");
277
278/* Host interrupt coalescing RX tick timer (usecs) */
279static int bxe_hc_rx_ticks = 25;
280SYSCTL_INT(_hw_bxe, OID_AUTO, hc_rx_ticks, CTLFLAG_RDTUN,
281           &bxe_hc_rx_ticks, 0, "Host Coalescing Rx ticks");
282
283/* Host interrupt coalescing TX tick timer (usecs) */
284static int bxe_hc_tx_ticks = 50;
285SYSCTL_INT(_hw_bxe, OID_AUTO, hc_tx_ticks, CTLFLAG_RDTUN,
286           &bxe_hc_tx_ticks, 0, "Host Coalescing Tx ticks");
287
288/* Maximum number of Rx packets to process at a time */
289static int bxe_rx_budget = 0xffffffff;
290SYSCTL_INT(_hw_bxe, OID_AUTO, rx_budget, CTLFLAG_TUN,
291           &bxe_rx_budget, 0, "Rx processing budget");
292
293/* Maximum LRO aggregation size */
294static int bxe_max_aggregation_size = 0;
295SYSCTL_INT(_hw_bxe, OID_AUTO, max_aggregation_size, CTLFLAG_TUN,
296           &bxe_max_aggregation_size, 0, "max aggregation size");
297
298/* PCI MRRS: -1 (Auto), 0 (128B), 1 (256B), 2 (512B), 3 (1KB) */
299static int bxe_mrrs = -1;
300SYSCTL_INT(_hw_bxe, OID_AUTO, mrrs, CTLFLAG_RDTUN,
301           &bxe_mrrs, 0, "PCIe maximum read request size");
302
303/* AutoGrEEEn: 0 (hardware default), 1 (force on), 2 (force off) */
304static int bxe_autogreeen = 0;
305SYSCTL_INT(_hw_bxe, OID_AUTO, autogreeen, CTLFLAG_RDTUN,
306           &bxe_autogreeen, 0, "AutoGrEEEn support");
307
308/* 4-tuple RSS support for UDP: 0 (disabled), 1 (enabled) */
309static int bxe_udp_rss = 0;
310SYSCTL_INT(_hw_bxe, OID_AUTO, udp_rss, CTLFLAG_RDTUN,
311           &bxe_udp_rss, 0, "UDP RSS support");
312
313
314#define STAT_NAME_LEN 32 /* no stat names below can be longer than this */
315
316#define STATS_OFFSET32(stat_name)                   \
317    (offsetof(struct bxe_eth_stats, stat_name) / 4)
318
319#define Q_STATS_OFFSET32(stat_name)                   \
320    (offsetof(struct bxe_eth_q_stats, stat_name) / 4)
321
322static const struct {
323    uint32_t offset;
324    uint32_t size;
325    uint32_t flags;
326#define STATS_FLAGS_PORT  1
327#define STATS_FLAGS_FUNC  2 /* MF only cares about function stats */
328#define STATS_FLAGS_BOTH  (STATS_FLAGS_FUNC | STATS_FLAGS_PORT)
329    char string[STAT_NAME_LEN];
330} bxe_eth_stats_arr[] = {
331    { STATS_OFFSET32(total_bytes_received_hi),
332                8, STATS_FLAGS_BOTH, "rx_bytes" },
333    { STATS_OFFSET32(error_bytes_received_hi),
334                8, STATS_FLAGS_BOTH, "rx_error_bytes" },
335    { STATS_OFFSET32(total_unicast_packets_received_hi),
336                8, STATS_FLAGS_BOTH, "rx_ucast_packets" },
337    { STATS_OFFSET32(total_multicast_packets_received_hi),
338                8, STATS_FLAGS_BOTH, "rx_mcast_packets" },
339    { STATS_OFFSET32(total_broadcast_packets_received_hi),
340                8, STATS_FLAGS_BOTH, "rx_bcast_packets" },
341    { STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi),
342                8, STATS_FLAGS_PORT, "rx_crc_errors" },
343    { STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi),
344                8, STATS_FLAGS_PORT, "rx_align_errors" },
345    { STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi),
346                8, STATS_FLAGS_PORT, "rx_undersize_packets" },
347    { STATS_OFFSET32(etherstatsoverrsizepkts_hi),
348                8, STATS_FLAGS_PORT, "rx_oversize_packets" },
349    { STATS_OFFSET32(rx_stat_etherstatsfragments_hi),
350                8, STATS_FLAGS_PORT, "rx_fragments" },
351    { STATS_OFFSET32(rx_stat_etherstatsjabbers_hi),
352                8, STATS_FLAGS_PORT, "rx_jabbers" },
353    { STATS_OFFSET32(no_buff_discard_hi),
354                8, STATS_FLAGS_BOTH, "rx_discards" },
355    { STATS_OFFSET32(mac_filter_discard),
356                4, STATS_FLAGS_PORT, "rx_filtered_packets" },
357    { STATS_OFFSET32(mf_tag_discard),
358                4, STATS_FLAGS_PORT, "rx_mf_tag_discard" },
359    { STATS_OFFSET32(pfc_frames_received_hi),
360                8, STATS_FLAGS_PORT, "pfc_frames_received" },
361    { STATS_OFFSET32(pfc_frames_sent_hi),
362                8, STATS_FLAGS_PORT, "pfc_frames_sent" },
363    { STATS_OFFSET32(brb_drop_hi),
364                8, STATS_FLAGS_PORT, "rx_brb_discard" },
365    { STATS_OFFSET32(brb_truncate_hi),
366                8, STATS_FLAGS_PORT, "rx_brb_truncate" },
367    { STATS_OFFSET32(pause_frames_received_hi),
368                8, STATS_FLAGS_PORT, "rx_pause_frames" },
369    { STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi),
370                8, STATS_FLAGS_PORT, "rx_mac_ctrl_frames" },
371    { STATS_OFFSET32(nig_timer_max),
372                4, STATS_FLAGS_PORT, "rx_constant_pause_events" },
373    { STATS_OFFSET32(total_bytes_transmitted_hi),
374                8, STATS_FLAGS_BOTH, "tx_bytes" },
375    { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi),
376                8, STATS_FLAGS_PORT, "tx_error_bytes" },
377    { STATS_OFFSET32(total_unicast_packets_transmitted_hi),
378                8, STATS_FLAGS_BOTH, "tx_ucast_packets" },
379    { STATS_OFFSET32(total_multicast_packets_transmitted_hi),
380                8, STATS_FLAGS_BOTH, "tx_mcast_packets" },
381    { STATS_OFFSET32(total_broadcast_packets_transmitted_hi),
382                8, STATS_FLAGS_BOTH, "tx_bcast_packets" },
383    { STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi),
384                8, STATS_FLAGS_PORT, "tx_mac_errors" },
385    { STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi),
386                8, STATS_FLAGS_PORT, "tx_carrier_errors" },
387    { STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi),
388                8, STATS_FLAGS_PORT, "tx_single_collisions" },
389    { STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi),
390                8, STATS_FLAGS_PORT, "tx_multi_collisions" },
391    { STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi),
392                8, STATS_FLAGS_PORT, "tx_deferred" },
393    { STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi),
394                8, STATS_FLAGS_PORT, "tx_excess_collisions" },
395    { STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi),
396                8, STATS_FLAGS_PORT, "tx_late_collisions" },
397    { STATS_OFFSET32(tx_stat_etherstatscollisions_hi),
398                8, STATS_FLAGS_PORT, "tx_total_collisions" },
399    { STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi),
400                8, STATS_FLAGS_PORT, "tx_64_byte_packets" },
401    { STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi),
402                8, STATS_FLAGS_PORT, "tx_65_to_127_byte_packets" },
403    { STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi),
404                8, STATS_FLAGS_PORT, "tx_128_to_255_byte_packets" },
405    { STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi),
406                8, STATS_FLAGS_PORT, "tx_256_to_511_byte_packets" },
407    { STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi),
408                8, STATS_FLAGS_PORT, "tx_512_to_1023_byte_packets" },
409    { STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi),
410                8, STATS_FLAGS_PORT, "tx_1024_to_1522_byte_packets" },
411    { STATS_OFFSET32(etherstatspktsover1522octets_hi),
412                8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" },
413    { STATS_OFFSET32(pause_frames_sent_hi),
414                8, STATS_FLAGS_PORT, "tx_pause_frames" },
415    { STATS_OFFSET32(total_tpa_aggregations_hi),
416                8, STATS_FLAGS_FUNC, "tpa_aggregations" },
417    { STATS_OFFSET32(total_tpa_aggregated_frames_hi),
418                8, STATS_FLAGS_FUNC, "tpa_aggregated_frames"},
419    { STATS_OFFSET32(total_tpa_bytes_hi),
420                8, STATS_FLAGS_FUNC, "tpa_bytes"},
421    { STATS_OFFSET32(eee_tx_lpi),
422                4, STATS_FLAGS_PORT, "eee_tx_lpi"},
423    { STATS_OFFSET32(rx_calls),
424                4, STATS_FLAGS_FUNC, "rx_calls"},
425    { STATS_OFFSET32(rx_pkts),
426                4, STATS_FLAGS_FUNC, "rx_pkts"},
427    { STATS_OFFSET32(rx_tpa_pkts),
428                4, STATS_FLAGS_FUNC, "rx_tpa_pkts"},
429    { STATS_OFFSET32(rx_erroneous_jumbo_sge_pkts),
430                4, STATS_FLAGS_FUNC, "rx_erroneous_jumbo_sge_pkts"},
431    { STATS_OFFSET32(rx_bxe_service_rxsgl),
432                4, STATS_FLAGS_FUNC, "rx_bxe_service_rxsgl"},
433    { STATS_OFFSET32(rx_jumbo_sge_pkts),
434                4, STATS_FLAGS_FUNC, "rx_jumbo_sge_pkts"},
435    { STATS_OFFSET32(rx_soft_errors),
436                4, STATS_FLAGS_FUNC, "rx_soft_errors"},
437    { STATS_OFFSET32(rx_hw_csum_errors),
438                4, STATS_FLAGS_FUNC, "rx_hw_csum_errors"},
439    { STATS_OFFSET32(rx_ofld_frames_csum_ip),
440                4, STATS_FLAGS_FUNC, "rx_ofld_frames_csum_ip"},
441    { STATS_OFFSET32(rx_ofld_frames_csum_tcp_udp),
442                4, STATS_FLAGS_FUNC, "rx_ofld_frames_csum_tcp_udp"},
443    { STATS_OFFSET32(rx_budget_reached),
444                4, STATS_FLAGS_FUNC, "rx_budget_reached"},
445    { STATS_OFFSET32(tx_pkts),
446                4, STATS_FLAGS_FUNC, "tx_pkts"},
447    { STATS_OFFSET32(tx_soft_errors),
448                4, STATS_FLAGS_FUNC, "tx_soft_errors"},
449    { STATS_OFFSET32(tx_ofld_frames_csum_ip),
450                4, STATS_FLAGS_FUNC, "tx_ofld_frames_csum_ip"},
451    { STATS_OFFSET32(tx_ofld_frames_csum_tcp),
452                4, STATS_FLAGS_FUNC, "tx_ofld_frames_csum_tcp"},
453    { STATS_OFFSET32(tx_ofld_frames_csum_udp),
454                4, STATS_FLAGS_FUNC, "tx_ofld_frames_csum_udp"},
455    { STATS_OFFSET32(tx_ofld_frames_lso),
456                4, STATS_FLAGS_FUNC, "tx_ofld_frames_lso"},
457    { STATS_OFFSET32(tx_ofld_frames_lso_hdr_splits),
458                4, STATS_FLAGS_FUNC, "tx_ofld_frames_lso_hdr_splits"},
459    { STATS_OFFSET32(tx_encap_failures),
460                4, STATS_FLAGS_FUNC, "tx_encap_failures"},
461    { STATS_OFFSET32(tx_hw_queue_full),
462                4, STATS_FLAGS_FUNC, "tx_hw_queue_full"},
463    { STATS_OFFSET32(tx_hw_max_queue_depth),
464                4, STATS_FLAGS_FUNC, "tx_hw_max_queue_depth"},
465    { STATS_OFFSET32(tx_dma_mapping_failure),
466                4, STATS_FLAGS_FUNC, "tx_dma_mapping_failure"},
467    { STATS_OFFSET32(tx_max_drbr_queue_depth),
468                4, STATS_FLAGS_FUNC, "tx_max_drbr_queue_depth"},
469    { STATS_OFFSET32(tx_window_violation_std),
470                4, STATS_FLAGS_FUNC, "tx_window_violation_std"},
471    { STATS_OFFSET32(tx_window_violation_tso),
472                4, STATS_FLAGS_FUNC, "tx_window_violation_tso"},
473    { STATS_OFFSET32(tx_chain_lost_mbuf),
474                4, STATS_FLAGS_FUNC, "tx_chain_lost_mbuf"},
475    { STATS_OFFSET32(tx_frames_deferred),
476                4, STATS_FLAGS_FUNC, "tx_frames_deferred"},
477    { STATS_OFFSET32(tx_queue_xoff),
478                4, STATS_FLAGS_FUNC, "tx_queue_xoff"},
479    { STATS_OFFSET32(mbuf_defrag_attempts),
480                4, STATS_FLAGS_FUNC, "mbuf_defrag_attempts"},
481    { STATS_OFFSET32(mbuf_defrag_failures),
482                4, STATS_FLAGS_FUNC, "mbuf_defrag_failures"},
483    { STATS_OFFSET32(mbuf_rx_bd_alloc_failed),
484                4, STATS_FLAGS_FUNC, "mbuf_rx_bd_alloc_failed"},
485    { STATS_OFFSET32(mbuf_rx_bd_mapping_failed),
486                4, STATS_FLAGS_FUNC, "mbuf_rx_bd_mapping_failed"},
487    { STATS_OFFSET32(mbuf_rx_tpa_alloc_failed),
488                4, STATS_FLAGS_FUNC, "mbuf_rx_tpa_alloc_failed"},
489    { STATS_OFFSET32(mbuf_rx_tpa_mapping_failed),
490                4, STATS_FLAGS_FUNC, "mbuf_rx_tpa_mapping_failed"},
491    { STATS_OFFSET32(mbuf_rx_sge_alloc_failed),
492                4, STATS_FLAGS_FUNC, "mbuf_rx_sge_alloc_failed"},
493    { STATS_OFFSET32(mbuf_rx_sge_mapping_failed),
494                4, STATS_FLAGS_FUNC, "mbuf_rx_sge_mapping_failed"},
495    { STATS_OFFSET32(mbuf_alloc_tx),
496                4, STATS_FLAGS_FUNC, "mbuf_alloc_tx"},
497    { STATS_OFFSET32(mbuf_alloc_rx),
498                4, STATS_FLAGS_FUNC, "mbuf_alloc_rx"},
499    { STATS_OFFSET32(mbuf_alloc_sge),
500                4, STATS_FLAGS_FUNC, "mbuf_alloc_sge"},
501    { STATS_OFFSET32(mbuf_alloc_tpa),
502                4, STATS_FLAGS_FUNC, "mbuf_alloc_tpa"},
503    { STATS_OFFSET32(tx_queue_full_return),
504                4, STATS_FLAGS_FUNC, "tx_queue_full_return"},
505    { STATS_OFFSET32(bxe_tx_mq_sc_state_failures),
506                4, STATS_FLAGS_FUNC, "bxe_tx_mq_sc_state_failures"},
507    { STATS_OFFSET32(tx_request_link_down_failures),
508                4, STATS_FLAGS_FUNC, "tx_request_link_down_failures"},
509    { STATS_OFFSET32(bd_avail_too_less_failures),
510                4, STATS_FLAGS_FUNC, "bd_avail_too_less_failures"},
511    { STATS_OFFSET32(tx_mq_not_empty),
512                4, STATS_FLAGS_FUNC, "tx_mq_not_empty"},
513    { STATS_OFFSET32(nsegs_path1_errors),
514                4, STATS_FLAGS_FUNC, "nsegs_path1_errors"},
515    { STATS_OFFSET32(nsegs_path2_errors),
516                4, STATS_FLAGS_FUNC, "nsegs_path2_errors"}
517
518
519};
520
521static const struct {
522    uint32_t offset;
523    uint32_t size;
524    char string[STAT_NAME_LEN];
525} bxe_eth_q_stats_arr[] = {
526    { Q_STATS_OFFSET32(total_bytes_received_hi),
527                8, "rx_bytes" },
528    { Q_STATS_OFFSET32(total_unicast_packets_received_hi),
529                8, "rx_ucast_packets" },
530    { Q_STATS_OFFSET32(total_multicast_packets_received_hi),
531                8, "rx_mcast_packets" },
532    { Q_STATS_OFFSET32(total_broadcast_packets_received_hi),
533                8, "rx_bcast_packets" },
534    { Q_STATS_OFFSET32(no_buff_discard_hi),
535                8, "rx_discards" },
536    { Q_STATS_OFFSET32(total_bytes_transmitted_hi),
537                8, "tx_bytes" },
538    { Q_STATS_OFFSET32(total_unicast_packets_transmitted_hi),
539                8, "tx_ucast_packets" },
540    { Q_STATS_OFFSET32(total_multicast_packets_transmitted_hi),
541                8, "tx_mcast_packets" },
542    { Q_STATS_OFFSET32(total_broadcast_packets_transmitted_hi),
543                8, "tx_bcast_packets" },
544    { Q_STATS_OFFSET32(total_tpa_aggregations_hi),
545                8, "tpa_aggregations" },
546    { Q_STATS_OFFSET32(total_tpa_aggregated_frames_hi),
547                8, "tpa_aggregated_frames"},
548    { Q_STATS_OFFSET32(total_tpa_bytes_hi),
549                8, "tpa_bytes"},
550    { Q_STATS_OFFSET32(rx_calls),
551                4, "rx_calls"},
552    { Q_STATS_OFFSET32(rx_pkts),
553                4, "rx_pkts"},
554    { Q_STATS_OFFSET32(rx_tpa_pkts),
555                4, "rx_tpa_pkts"},
556    { Q_STATS_OFFSET32(rx_erroneous_jumbo_sge_pkts),
557                4, "rx_erroneous_jumbo_sge_pkts"},
558    { Q_STATS_OFFSET32(rx_bxe_service_rxsgl),
559                4, "rx_bxe_service_rxsgl"},
560    { Q_STATS_OFFSET32(rx_jumbo_sge_pkts),
561                4, "rx_jumbo_sge_pkts"},
562    { Q_STATS_OFFSET32(rx_soft_errors),
563                4, "rx_soft_errors"},
564    { Q_STATS_OFFSET32(rx_hw_csum_errors),
565                4, "rx_hw_csum_errors"},
566    { Q_STATS_OFFSET32(rx_ofld_frames_csum_ip),
567                4, "rx_ofld_frames_csum_ip"},
568    { Q_STATS_OFFSET32(rx_ofld_frames_csum_tcp_udp),
569                4, "rx_ofld_frames_csum_tcp_udp"},
570    { Q_STATS_OFFSET32(rx_budget_reached),
571                4, "rx_budget_reached"},
572    { Q_STATS_OFFSET32(tx_pkts),
573                4, "tx_pkts"},
574    { Q_STATS_OFFSET32(tx_soft_errors),
575                4, "tx_soft_errors"},
576    { Q_STATS_OFFSET32(tx_ofld_frames_csum_ip),
577                4, "tx_ofld_frames_csum_ip"},
578    { Q_STATS_OFFSET32(tx_ofld_frames_csum_tcp),
579                4, "tx_ofld_frames_csum_tcp"},
580    { Q_STATS_OFFSET32(tx_ofld_frames_csum_udp),
581                4, "tx_ofld_frames_csum_udp"},
582    { Q_STATS_OFFSET32(tx_ofld_frames_lso),
583                4, "tx_ofld_frames_lso"},
584    { Q_STATS_OFFSET32(tx_ofld_frames_lso_hdr_splits),
585                4, "tx_ofld_frames_lso_hdr_splits"},
586    { Q_STATS_OFFSET32(tx_encap_failures),
587                4, "tx_encap_failures"},
588    { Q_STATS_OFFSET32(tx_hw_queue_full),
589                4, "tx_hw_queue_full"},
590    { Q_STATS_OFFSET32(tx_hw_max_queue_depth),
591                4, "tx_hw_max_queue_depth"},
592    { Q_STATS_OFFSET32(tx_dma_mapping_failure),
593                4, "tx_dma_mapping_failure"},
594    { Q_STATS_OFFSET32(tx_max_drbr_queue_depth),
595                4, "tx_max_drbr_queue_depth"},
596    { Q_STATS_OFFSET32(tx_window_violation_std),
597                4, "tx_window_violation_std"},
598    { Q_STATS_OFFSET32(tx_window_violation_tso),
599                4, "tx_window_violation_tso"},
600    { Q_STATS_OFFSET32(tx_chain_lost_mbuf),
601                4, "tx_chain_lost_mbuf"},
602    { Q_STATS_OFFSET32(tx_frames_deferred),
603                4, "tx_frames_deferred"},
604    { Q_STATS_OFFSET32(tx_queue_xoff),
605                4, "tx_queue_xoff"},
606    { Q_STATS_OFFSET32(mbuf_defrag_attempts),
607                4, "mbuf_defrag_attempts"},
608    { Q_STATS_OFFSET32(mbuf_defrag_failures),
609                4, "mbuf_defrag_failures"},
610    { Q_STATS_OFFSET32(mbuf_rx_bd_alloc_failed),
611                4, "mbuf_rx_bd_alloc_failed"},
612    { Q_STATS_OFFSET32(mbuf_rx_bd_mapping_failed),
613                4, "mbuf_rx_bd_mapping_failed"},
614    { Q_STATS_OFFSET32(mbuf_rx_tpa_alloc_failed),
615                4, "mbuf_rx_tpa_alloc_failed"},
616    { Q_STATS_OFFSET32(mbuf_rx_tpa_mapping_failed),
617                4, "mbuf_rx_tpa_mapping_failed"},
618    { Q_STATS_OFFSET32(mbuf_rx_sge_alloc_failed),
619                4, "mbuf_rx_sge_alloc_failed"},
620    { Q_STATS_OFFSET32(mbuf_rx_sge_mapping_failed),
621                4, "mbuf_rx_sge_mapping_failed"},
622    { Q_STATS_OFFSET32(mbuf_alloc_tx),
623                4, "mbuf_alloc_tx"},
624    { Q_STATS_OFFSET32(mbuf_alloc_rx),
625                4, "mbuf_alloc_rx"},
626    { Q_STATS_OFFSET32(mbuf_alloc_sge),
627                4, "mbuf_alloc_sge"},
628    { Q_STATS_OFFSET32(mbuf_alloc_tpa),
629                4, "mbuf_alloc_tpa"},
630    { Q_STATS_OFFSET32(tx_queue_full_return),
631                4, "tx_queue_full_return"},
632    { Q_STATS_OFFSET32(bxe_tx_mq_sc_state_failures),
633                4, "bxe_tx_mq_sc_state_failures"},
634    { Q_STATS_OFFSET32(tx_request_link_down_failures),
635                4, "tx_request_link_down_failures"},
636    { Q_STATS_OFFSET32(bd_avail_too_less_failures),
637                4, "bd_avail_too_less_failures"},
638    { Q_STATS_OFFSET32(tx_mq_not_empty),
639                4, "tx_mq_not_empty"},
640    { Q_STATS_OFFSET32(nsegs_path1_errors),
641                4, "nsegs_path1_errors"},
642    { Q_STATS_OFFSET32(nsegs_path2_errors),
643                4, "nsegs_path2_errors"}
644
645
646};
647
648#define BXE_NUM_ETH_STATS   ARRAY_SIZE(bxe_eth_stats_arr)
649#define BXE_NUM_ETH_Q_STATS ARRAY_SIZE(bxe_eth_q_stats_arr)
650
651
652static void    bxe_cmng_fns_init(struct bxe_softc *sc,
653                                 uint8_t          read_cfg,
654                                 uint8_t          cmng_type);
655static int     bxe_get_cmng_fns_mode(struct bxe_softc *sc);
656static void    storm_memset_cmng(struct bxe_softc *sc,
657                                 struct cmng_init *cmng,
658                                 uint8_t          port);
659static void    bxe_set_reset_global(struct bxe_softc *sc);
660static void    bxe_set_reset_in_progress(struct bxe_softc *sc);
661static uint8_t bxe_reset_is_done(struct bxe_softc *sc,
662                                 int              engine);
663static uint8_t bxe_clear_pf_load(struct bxe_softc *sc);
664static uint8_t bxe_chk_parity_attn(struct bxe_softc *sc,
665                                   uint8_t          *global,
666                                   uint8_t          print);
667static void    bxe_int_disable(struct bxe_softc *sc);
668static int     bxe_release_leader_lock(struct bxe_softc *sc);
669static void    bxe_pf_disable(struct bxe_softc *sc);
670static void    bxe_free_fp_buffers(struct bxe_softc *sc);
671static inline void bxe_update_rx_prod(struct bxe_softc    *sc,
672                                      struct bxe_fastpath *fp,
673                                      uint16_t            rx_bd_prod,
674                                      uint16_t            rx_cq_prod,
675                                      uint16_t            rx_sge_prod);
676static void    bxe_link_report_locked(struct bxe_softc *sc);
677static void    bxe_link_report(struct bxe_softc *sc);
678static void    bxe_link_status_update(struct bxe_softc *sc);
679static void    bxe_periodic_callout_func(void *xsc);
680static void    bxe_periodic_start(struct bxe_softc *sc);
681static void    bxe_periodic_stop(struct bxe_softc *sc);
682static int     bxe_alloc_rx_bd_mbuf(struct bxe_fastpath *fp,
683                                    uint16_t prev_index,
684                                    uint16_t index);
685static int     bxe_alloc_rx_tpa_mbuf(struct bxe_fastpath *fp,
686                                     int                 queue);
687static int     bxe_alloc_rx_sge_mbuf(struct bxe_fastpath *fp,
688                                     uint16_t            index);
689static uint8_t bxe_txeof(struct bxe_softc *sc,
690                         struct bxe_fastpath *fp);
691static void    bxe_task_fp(struct bxe_fastpath *fp);
692static __noinline void bxe_dump_mbuf(struct bxe_softc *sc,
693                                     struct mbuf      *m,
694                                     uint8_t          contents);
695static int     bxe_alloc_mem(struct bxe_softc *sc);
696static void    bxe_free_mem(struct bxe_softc *sc);
697static int     bxe_alloc_fw_stats_mem(struct bxe_softc *sc);
698static void    bxe_free_fw_stats_mem(struct bxe_softc *sc);
699static int     bxe_interrupt_attach(struct bxe_softc *sc);
700static void    bxe_interrupt_detach(struct bxe_softc *sc);
701static void    bxe_set_rx_mode(struct bxe_softc *sc);
702static int     bxe_init_locked(struct bxe_softc *sc);
703static int     bxe_stop_locked(struct bxe_softc *sc);
704static __noinline int bxe_nic_load(struct bxe_softc *sc,
705                                   int              load_mode);
706static __noinline int bxe_nic_unload(struct bxe_softc *sc,
707                                     uint32_t         unload_mode,
708                                     uint8_t          keep_link);
709
710static void bxe_handle_sp_tq(void *context, int pending);
711static void bxe_handle_fp_tq(void *context, int pending);
712
713static int bxe_add_cdev(struct bxe_softc *sc);
714static void bxe_del_cdev(struct bxe_softc *sc);
715int bxe_grc_dump(struct bxe_softc *sc);
716static int bxe_alloc_buf_rings(struct bxe_softc *sc);
717static void bxe_free_buf_rings(struct bxe_softc *sc);
718
719/* calculate crc32 on a buffer (NOTE: crc32_length MUST be aligned to 8) */
720uint32_t
721calc_crc32(uint8_t  *crc32_packet,
722           uint32_t crc32_length,
723           uint32_t crc32_seed,
724           uint8_t  complement)
725{
726   uint32_t byte         = 0;
727   uint32_t bit          = 0;
728   uint8_t  msb          = 0;
729   uint32_t temp         = 0;
730   uint32_t shft         = 0;
731   uint8_t  current_byte = 0;
732   uint32_t crc32_result = crc32_seed;
733   const uint32_t CRC32_POLY = 0x1edc6f41;
734
735   if ((crc32_packet == NULL) ||
736       (crc32_length == 0) ||
737       ((crc32_length % 8) != 0))
738    {
739        return (crc32_result);
740    }
741
742    for (byte = 0; byte < crc32_length; byte = byte + 1)
743    {
744        current_byte = crc32_packet[byte];
745        for (bit = 0; bit < 8; bit = bit + 1)
746        {
747            /* msb = crc32_result[31]; */
748            msb = (uint8_t)(crc32_result >> 31);
749
750            crc32_result = crc32_result << 1;
751
752            /* it (msb != current_byte[bit]) */
753            if (msb != (0x1 & (current_byte >> bit)))
754            {
755                crc32_result = crc32_result ^ CRC32_POLY;
756                /* crc32_result[0] = 1 */
757                crc32_result |= 1;
758            }
759        }
760    }
761
762    /* Last step is to:
763     * 1. "mirror" every bit
764     * 2. swap the 4 bytes
765     * 3. complement each bit
766     */
767
768    /* Mirror */
769    temp = crc32_result;
770    shft = sizeof(crc32_result) * 8 - 1;
771
772    for (crc32_result >>= 1; crc32_result; crc32_result >>= 1)
773    {
774        temp <<= 1;
775        temp |= crc32_result & 1;
776        shft-- ;
777    }
778
779    /* temp[31-bit] = crc32_result[bit] */
780    temp <<= shft;
781
782    /* Swap */
783    /* crc32_result = {temp[7:0], temp[15:8], temp[23:16], temp[31:24]} */
784    {
785        uint32_t t0, t1, t2, t3;
786        t0 = (0x000000ff & (temp >> 24));
787        t1 = (0x0000ff00 & (temp >> 8));
788        t2 = (0x00ff0000 & (temp << 8));
789        t3 = (0xff000000 & (temp << 24));
790        crc32_result = t0 | t1 | t2 | t3;
791    }
792
793    /* Complement */
794    if (complement)
795    {
796        crc32_result = ~crc32_result;
797    }
798
799    return (crc32_result);
800}
801
802int
803bxe_test_bit(int                    nr,
804             volatile unsigned long *addr)
805{
806    return ((atomic_load_acq_long(addr) & (1 << nr)) != 0);
807}
808
809void
810bxe_set_bit(unsigned int           nr,
811            volatile unsigned long *addr)
812{
813    atomic_set_acq_long(addr, (1 << nr));
814}
815
816void
817bxe_clear_bit(int                    nr,
818              volatile unsigned long *addr)
819{
820    atomic_clear_acq_long(addr, (1 << nr));
821}
822
823int
824bxe_test_and_set_bit(int                    nr,
825                       volatile unsigned long *addr)
826{
827    unsigned long x;
828    nr = (1 << nr);
829    do {
830        x = *addr;
831    } while (atomic_cmpset_acq_long(addr, x, x | nr) == 0);
832    // if (x & nr) bit_was_set; else bit_was_not_set;
833    return (x & nr);
834}
835
836int
837bxe_test_and_clear_bit(int                    nr,
838                       volatile unsigned long *addr)
839{
840    unsigned long x;
841    nr = (1 << nr);
842    do {
843        x = *addr;
844    } while (atomic_cmpset_acq_long(addr, x, x & ~nr) == 0);
845    // if (x & nr) bit_was_set; else bit_was_not_set;
846    return (x & nr);
847}
848
849int
850bxe_cmpxchg(volatile int *addr,
851            int          old,
852            int          new)
853{
854    int x;
855    do {
856        x = *addr;
857    } while (atomic_cmpset_acq_int(addr, old, new) == 0);
858    return (x);
859}
860
861/*
862 * Get DMA memory from the OS.
863 *
864 * Validates that the OS has provided DMA buffers in response to a
865 * bus_dmamap_load call and saves the physical address of those buffers.
866 * When the callback is used the OS will return 0 for the mapping function
867 * (bus_dmamap_load) so we use the value of map_arg->maxsegs to pass any
868 * failures back to the caller.
869 *
870 * Returns:
871 *   Nothing.
872 */
873static void
874bxe_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error)
875{
876    struct bxe_dma *dma = arg;
877
878    if (error) {
879        dma->paddr = 0;
880        dma->nseg  = 0;
881        BLOGE(dma->sc, "Failed DMA alloc '%s' (%d)!\n", dma->msg, error);
882    } else {
883        dma->paddr = segs->ds_addr;
884        dma->nseg  = nseg;
885    }
886}
887
888/*
889 * Allocate a block of memory and map it for DMA. No partial completions
890 * allowed and release any resources acquired if we can't acquire all
891 * resources.
892 *
893 * Returns:
894 *   0 = Success, !0 = Failure
895 */
896int
897bxe_dma_alloc(struct bxe_softc *sc,
898              bus_size_t       size,
899              struct bxe_dma   *dma,
900              const char       *msg)
901{
902    int rc;
903
904    if (dma->size > 0) {
905        BLOGE(sc, "dma block '%s' already has size %lu\n", msg,
906              (unsigned long)dma->size);
907        return (1);
908    }
909
910    memset(dma, 0, sizeof(*dma)); /* sanity */
911    dma->sc   = sc;
912    dma->size = size;
913    snprintf(dma->msg, sizeof(dma->msg), "%s", msg);
914
915    rc = bus_dma_tag_create(sc->parent_dma_tag, /* parent tag */
916                            BCM_PAGE_SIZE,      /* alignment */
917                            0,                  /* boundary limit */
918                            BUS_SPACE_MAXADDR,  /* restricted low */
919                            BUS_SPACE_MAXADDR,  /* restricted hi */
920                            NULL,               /* addr filter() */
921                            NULL,               /* addr filter() arg */
922                            size,               /* max map size */
923                            1,                  /* num discontinuous */
924                            size,               /* max seg size */
925                            BUS_DMA_ALLOCNOW,   /* flags */
926                            NULL,               /* lock() */
927                            NULL,               /* lock() arg */
928                            &dma->tag);         /* returned dma tag */
929    if (rc != 0) {
930        BLOGE(sc, "Failed to create dma tag for '%s' (%d)\n", msg, rc);
931        memset(dma, 0, sizeof(*dma));
932        return (1);
933    }
934
935    rc = bus_dmamem_alloc(dma->tag,
936                          (void **)&dma->vaddr,
937                          (BUS_DMA_NOWAIT | BUS_DMA_ZERO),
938                          &dma->map);
939    if (rc != 0) {
940        BLOGE(sc, "Failed to alloc dma mem for '%s' (%d)\n", msg, rc);
941        bus_dma_tag_destroy(dma->tag);
942        memset(dma, 0, sizeof(*dma));
943        return (1);
944    }
945
946    rc = bus_dmamap_load(dma->tag,
947                         dma->map,
948                         dma->vaddr,
949                         size,
950                         bxe_dma_map_addr, /* BLOGD in here */
951                         dma,
952                         BUS_DMA_NOWAIT);
953    if (rc != 0) {
954        BLOGE(sc, "Failed to load dma map for '%s' (%d)\n", msg, rc);
955        bus_dmamem_free(dma->tag, dma->vaddr, dma->map);
956        bus_dma_tag_destroy(dma->tag);
957        memset(dma, 0, sizeof(*dma));
958        return (1);
959    }
960
961    return (0);
962}
963
964void
965bxe_dma_free(struct bxe_softc *sc,
966             struct bxe_dma   *dma)
967{
968    if (dma->size > 0) {
969        DBASSERT(sc, (dma->tag != NULL), ("dma tag is NULL"));
970
971        bus_dmamap_sync(dma->tag, dma->map,
972                        (BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE));
973        bus_dmamap_unload(dma->tag, dma->map);
974        bus_dmamem_free(dma->tag, dma->vaddr, dma->map);
975        bus_dma_tag_destroy(dma->tag);
976    }
977
978    memset(dma, 0, sizeof(*dma));
979}
980
981/*
982 * These indirect read and write routines are only during init.
983 * The locking is handled by the MCP.
984 */
985
986void
987bxe_reg_wr_ind(struct bxe_softc *sc,
988               uint32_t         addr,
989               uint32_t         val)
990{
991    pci_write_config(sc->dev, PCICFG_GRC_ADDRESS, addr, 4);
992    pci_write_config(sc->dev, PCICFG_GRC_DATA, val, 4);
993    pci_write_config(sc->dev, PCICFG_GRC_ADDRESS, 0, 4);
994}
995
996uint32_t
997bxe_reg_rd_ind(struct bxe_softc *sc,
998               uint32_t         addr)
999{
1000    uint32_t val;
1001
1002    pci_write_config(sc->dev, PCICFG_GRC_ADDRESS, addr, 4);
1003    val = pci_read_config(sc->dev, PCICFG_GRC_DATA, 4);
1004    pci_write_config(sc->dev, PCICFG_GRC_ADDRESS, 0, 4);
1005
1006    return (val);
1007}
1008
1009static int
1010bxe_acquire_hw_lock(struct bxe_softc *sc,
1011                    uint32_t         resource)
1012{
1013    uint32_t lock_status;
1014    uint32_t resource_bit = (1 << resource);
1015    int func = SC_FUNC(sc);
1016    uint32_t hw_lock_control_reg;
1017    int cnt;
1018
1019    /* validate the resource is within range */
1020    if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1021        BLOGE(sc, "(resource 0x%x > HW_LOCK_MAX_RESOURCE_VALUE)"
1022            " resource_bit 0x%x\n", resource, resource_bit);
1023        return (-1);
1024    }
1025
1026    if (func <= 5) {
1027        hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + (func * 8));
1028    } else {
1029        hw_lock_control_reg =
1030                (MISC_REG_DRIVER_CONTROL_7 + ((func - 6) * 8));
1031    }
1032
1033    /* validate the resource is not already taken */
1034    lock_status = REG_RD(sc, hw_lock_control_reg);
1035    if (lock_status & resource_bit) {
1036        BLOGE(sc, "resource (0x%x) in use (status 0x%x bit 0x%x)\n",
1037              resource, lock_status, resource_bit);
1038        return (-1);
1039    }
1040
1041    /* try every 5ms for 5 seconds */
1042    for (cnt = 0; cnt < 1000; cnt++) {
1043        REG_WR(sc, (hw_lock_control_reg + 4), resource_bit);
1044        lock_status = REG_RD(sc, hw_lock_control_reg);
1045        if (lock_status & resource_bit) {
1046            return (0);
1047        }
1048        DELAY(5000);
1049    }
1050
1051    BLOGE(sc, "Resource 0x%x resource_bit 0x%x lock timeout!\n",
1052        resource, resource_bit);
1053    return (-1);
1054}
1055
1056static int
1057bxe_release_hw_lock(struct bxe_softc *sc,
1058                    uint32_t         resource)
1059{
1060    uint32_t lock_status;
1061    uint32_t resource_bit = (1 << resource);
1062    int func = SC_FUNC(sc);
1063    uint32_t hw_lock_control_reg;
1064
1065    /* validate the resource is within range */
1066    if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1067        BLOGE(sc, "(resource 0x%x > HW_LOCK_MAX_RESOURCE_VALUE)"
1068            " resource_bit 0x%x\n", resource, resource_bit);
1069        return (-1);
1070    }
1071
1072    if (func <= 5) {
1073        hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + (func * 8));
1074    } else {
1075        hw_lock_control_reg =
1076                (MISC_REG_DRIVER_CONTROL_7 + ((func - 6) * 8));
1077    }
1078
1079    /* validate the resource is currently taken */
1080    lock_status = REG_RD(sc, hw_lock_control_reg);
1081    if (!(lock_status & resource_bit)) {
1082        BLOGE(sc, "resource (0x%x) not in use (status 0x%x bit 0x%x)\n",
1083              resource, lock_status, resource_bit);
1084        return (-1);
1085    }
1086
1087    REG_WR(sc, hw_lock_control_reg, resource_bit);
1088    return (0);
1089}
1090static void bxe_acquire_phy_lock(struct bxe_softc *sc)
1091{
1092	BXE_PHY_LOCK(sc);
1093	bxe_acquire_hw_lock(sc,HW_LOCK_RESOURCE_MDIO);
1094}
1095
1096static void bxe_release_phy_lock(struct bxe_softc *sc)
1097{
1098	bxe_release_hw_lock(sc,HW_LOCK_RESOURCE_MDIO);
1099	BXE_PHY_UNLOCK(sc);
1100}
1101/*
1102 * Per pf misc lock must be acquired before the per port mcp lock. Otherwise,
1103 * had we done things the other way around, if two pfs from the same port
1104 * would attempt to access nvram at the same time, we could run into a
1105 * scenario such as:
1106 * pf A takes the port lock.
1107 * pf B succeeds in taking the same lock since they are from the same port.
1108 * pf A takes the per pf misc lock. Performs eeprom access.
1109 * pf A finishes. Unlocks the per pf misc lock.
1110 * Pf B takes the lock and proceeds to perform it's own access.
1111 * pf A unlocks the per port lock, while pf B is still working (!).
1112 * mcp takes the per port lock and corrupts pf B's access (and/or has it's own
1113 * access corrupted by pf B).*
1114 */
1115static int
1116bxe_acquire_nvram_lock(struct bxe_softc *sc)
1117{
1118    int port = SC_PORT(sc);
1119    int count, i;
1120    uint32_t val = 0;
1121
1122    /* acquire HW lock: protect against other PFs in PF Direct Assignment */
1123    bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_NVRAM);
1124
1125    /* adjust timeout for emulation/FPGA */
1126    count = NVRAM_TIMEOUT_COUNT;
1127    if (CHIP_REV_IS_SLOW(sc)) {
1128        count *= 100;
1129    }
1130
1131    /* request access to nvram interface */
1132    REG_WR(sc, MCP_REG_MCPR_NVM_SW_ARB,
1133           (MCPR_NVM_SW_ARB_ARB_REQ_SET1 << port));
1134
1135    for (i = 0; i < count*10; i++) {
1136        val = REG_RD(sc, MCP_REG_MCPR_NVM_SW_ARB);
1137        if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) {
1138            break;
1139        }
1140
1141        DELAY(5);
1142    }
1143
1144    if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) {
1145        BLOGE(sc, "Cannot get access to nvram interface "
1146            "port %d val 0x%x (MCPR_NVM_SW_ARB_ARB_ARB1 << port)\n",
1147            port, val);
1148        return (-1);
1149    }
1150
1151    return (0);
1152}
1153
1154static int
1155bxe_release_nvram_lock(struct bxe_softc *sc)
1156{
1157    int port = SC_PORT(sc);
1158    int count, i;
1159    uint32_t val = 0;
1160
1161    /* adjust timeout for emulation/FPGA */
1162    count = NVRAM_TIMEOUT_COUNT;
1163    if (CHIP_REV_IS_SLOW(sc)) {
1164        count *= 100;
1165    }
1166
1167    /* relinquish nvram interface */
1168    REG_WR(sc, MCP_REG_MCPR_NVM_SW_ARB,
1169           (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << port));
1170
1171    for (i = 0; i < count*10; i++) {
1172        val = REG_RD(sc, MCP_REG_MCPR_NVM_SW_ARB);
1173        if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) {
1174            break;
1175        }
1176
1177        DELAY(5);
1178    }
1179
1180    if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) {
1181        BLOGE(sc, "Cannot free access to nvram interface "
1182            "port %d val 0x%x (MCPR_NVM_SW_ARB_ARB_ARB1 << port)\n",
1183            port, val);
1184        return (-1);
1185    }
1186
1187    /* release HW lock: protect against other PFs in PF Direct Assignment */
1188    bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_NVRAM);
1189
1190    return (0);
1191}
1192
1193static void
1194bxe_enable_nvram_access(struct bxe_softc *sc)
1195{
1196    uint32_t val;
1197
1198    val = REG_RD(sc, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
1199
1200    /* enable both bits, even on read */
1201    REG_WR(sc, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
1202           (val | MCPR_NVM_ACCESS_ENABLE_EN | MCPR_NVM_ACCESS_ENABLE_WR_EN));
1203}
1204
1205static void
1206bxe_disable_nvram_access(struct bxe_softc *sc)
1207{
1208    uint32_t val;
1209
1210    val = REG_RD(sc, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
1211
1212    /* disable both bits, even after read */
1213    REG_WR(sc, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
1214           (val & ~(MCPR_NVM_ACCESS_ENABLE_EN |
1215                    MCPR_NVM_ACCESS_ENABLE_WR_EN)));
1216}
1217
1218static int
1219bxe_nvram_read_dword(struct bxe_softc *sc,
1220                     uint32_t         offset,
1221                     uint32_t         *ret_val,
1222                     uint32_t         cmd_flags)
1223{
1224    int count, i, rc;
1225    uint32_t val;
1226
1227    /* build the command word */
1228    cmd_flags |= MCPR_NVM_COMMAND_DOIT;
1229
1230    /* need to clear DONE bit separately */
1231    REG_WR(sc, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
1232
1233    /* address of the NVRAM to read from */
1234    REG_WR(sc, MCP_REG_MCPR_NVM_ADDR,
1235           (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
1236
1237    /* issue a read command */
1238    REG_WR(sc, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
1239
1240    /* adjust timeout for emulation/FPGA */
1241    count = NVRAM_TIMEOUT_COUNT;
1242    if (CHIP_REV_IS_SLOW(sc)) {
1243        count *= 100;
1244    }
1245
1246    /* wait for completion */
1247    *ret_val = 0;
1248    rc = -1;
1249    for (i = 0; i < count; i++) {
1250        DELAY(5);
1251        val = REG_RD(sc, MCP_REG_MCPR_NVM_COMMAND);
1252
1253        if (val & MCPR_NVM_COMMAND_DONE) {
1254            val = REG_RD(sc, MCP_REG_MCPR_NVM_READ);
1255            /* we read nvram data in cpu order
1256             * but ethtool sees it as an array of bytes
1257             * converting to big-endian will do the work
1258             */
1259            *ret_val = htobe32(val);
1260            rc = 0;
1261            break;
1262        }
1263    }
1264
1265    if (rc == -1) {
1266        BLOGE(sc, "nvram read timeout expired "
1267            "(offset 0x%x cmd_flags 0x%x val 0x%x)\n",
1268            offset, cmd_flags, val);
1269    }
1270
1271    return (rc);
1272}
1273
1274static int
1275bxe_nvram_read(struct bxe_softc *sc,
1276               uint32_t         offset,
1277               uint8_t          *ret_buf,
1278               int              buf_size)
1279{
1280    uint32_t cmd_flags;
1281    uint32_t val;
1282    int rc;
1283
1284    if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
1285        BLOGE(sc, "Invalid parameter, offset 0x%x buf_size 0x%x\n",
1286              offset, buf_size);
1287        return (-1);
1288    }
1289
1290    if ((offset + buf_size) > sc->devinfo.flash_size) {
1291        BLOGE(sc, "Invalid parameter, "
1292                  "offset 0x%x + buf_size 0x%x > flash_size 0x%x\n",
1293              offset, buf_size, sc->devinfo.flash_size);
1294        return (-1);
1295    }
1296
1297    /* request access to nvram interface */
1298    rc = bxe_acquire_nvram_lock(sc);
1299    if (rc) {
1300        return (rc);
1301    }
1302
1303    /* enable access to nvram interface */
1304    bxe_enable_nvram_access(sc);
1305
1306    /* read the first word(s) */
1307    cmd_flags = MCPR_NVM_COMMAND_FIRST;
1308    while ((buf_size > sizeof(uint32_t)) && (rc == 0)) {
1309        rc = bxe_nvram_read_dword(sc, offset, &val, cmd_flags);
1310        memcpy(ret_buf, &val, 4);
1311
1312        /* advance to the next dword */
1313        offset += sizeof(uint32_t);
1314        ret_buf += sizeof(uint32_t);
1315        buf_size -= sizeof(uint32_t);
1316        cmd_flags = 0;
1317    }
1318
1319    if (rc == 0) {
1320        cmd_flags |= MCPR_NVM_COMMAND_LAST;
1321        rc = bxe_nvram_read_dword(sc, offset, &val, cmd_flags);
1322        memcpy(ret_buf, &val, 4);
1323    }
1324
1325    /* disable access to nvram interface */
1326    bxe_disable_nvram_access(sc);
1327    bxe_release_nvram_lock(sc);
1328
1329    return (rc);
1330}
1331
1332static int
1333bxe_nvram_write_dword(struct bxe_softc *sc,
1334                      uint32_t         offset,
1335                      uint32_t         val,
1336                      uint32_t         cmd_flags)
1337{
1338    int count, i, rc;
1339
1340    /* build the command word */
1341    cmd_flags |= (MCPR_NVM_COMMAND_DOIT | MCPR_NVM_COMMAND_WR);
1342
1343    /* need to clear DONE bit separately */
1344    REG_WR(sc, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
1345
1346    /* write the data */
1347    REG_WR(sc, MCP_REG_MCPR_NVM_WRITE, val);
1348
1349    /* address of the NVRAM to write to */
1350    REG_WR(sc, MCP_REG_MCPR_NVM_ADDR,
1351           (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
1352
1353    /* issue the write command */
1354    REG_WR(sc, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
1355
1356    /* adjust timeout for emulation/FPGA */
1357    count = NVRAM_TIMEOUT_COUNT;
1358    if (CHIP_REV_IS_SLOW(sc)) {
1359        count *= 100;
1360    }
1361
1362    /* wait for completion */
1363    rc = -1;
1364    for (i = 0; i < count; i++) {
1365        DELAY(5);
1366        val = REG_RD(sc, MCP_REG_MCPR_NVM_COMMAND);
1367        if (val & MCPR_NVM_COMMAND_DONE) {
1368            rc = 0;
1369            break;
1370        }
1371    }
1372
1373    if (rc == -1) {
1374        BLOGE(sc, "nvram write timeout expired "
1375            "(offset 0x%x cmd_flags 0x%x val 0x%x)\n",
1376            offset, cmd_flags, val);
1377    }
1378
1379    return (rc);
1380}
1381
1382#define BYTE_OFFSET(offset) (8 * (offset & 0x03))
1383
1384static int
1385bxe_nvram_write1(struct bxe_softc *sc,
1386                 uint32_t         offset,
1387                 uint8_t          *data_buf,
1388                 int              buf_size)
1389{
1390    uint32_t cmd_flags;
1391    uint32_t align_offset;
1392    uint32_t val;
1393    int rc;
1394
1395    if ((offset + buf_size) > sc->devinfo.flash_size) {
1396        BLOGE(sc, "Invalid parameter, "
1397                  "offset 0x%x + buf_size 0x%x > flash_size 0x%x\n",
1398              offset, buf_size, sc->devinfo.flash_size);
1399        return (-1);
1400    }
1401
1402    /* request access to nvram interface */
1403    rc = bxe_acquire_nvram_lock(sc);
1404    if (rc) {
1405        return (rc);
1406    }
1407
1408    /* enable access to nvram interface */
1409    bxe_enable_nvram_access(sc);
1410
1411    cmd_flags = (MCPR_NVM_COMMAND_FIRST | MCPR_NVM_COMMAND_LAST);
1412    align_offset = (offset & ~0x03);
1413    rc = bxe_nvram_read_dword(sc, align_offset, &val, cmd_flags);
1414
1415    if (rc == 0) {
1416        val &= ~(0xff << BYTE_OFFSET(offset));
1417        val |= (*data_buf << BYTE_OFFSET(offset));
1418
1419        /* nvram data is returned as an array of bytes
1420         * convert it back to cpu order
1421         */
1422        val = be32toh(val);
1423
1424        rc = bxe_nvram_write_dword(sc, align_offset, val, cmd_flags);
1425    }
1426
1427    /* disable access to nvram interface */
1428    bxe_disable_nvram_access(sc);
1429    bxe_release_nvram_lock(sc);
1430
1431    return (rc);
1432}
1433
1434static int
1435bxe_nvram_write(struct bxe_softc *sc,
1436                uint32_t         offset,
1437                uint8_t          *data_buf,
1438                int              buf_size)
1439{
1440    uint32_t cmd_flags;
1441    uint32_t val;
1442    uint32_t written_so_far;
1443    int rc;
1444
1445    if (buf_size == 1) {
1446        return (bxe_nvram_write1(sc, offset, data_buf, buf_size));
1447    }
1448
1449    if ((offset & 0x03) || (buf_size & 0x03) /* || (buf_size == 0) */) {
1450        BLOGE(sc, "Invalid parameter, offset 0x%x buf_size 0x%x\n",
1451              offset, buf_size);
1452        return (-1);
1453    }
1454
1455    if (buf_size == 0) {
1456        return (0); /* nothing to do */
1457    }
1458
1459    if ((offset + buf_size) > sc->devinfo.flash_size) {
1460        BLOGE(sc, "Invalid parameter, "
1461                  "offset 0x%x + buf_size 0x%x > flash_size 0x%x\n",
1462              offset, buf_size, sc->devinfo.flash_size);
1463        return (-1);
1464    }
1465
1466    /* request access to nvram interface */
1467    rc = bxe_acquire_nvram_lock(sc);
1468    if (rc) {
1469        return (rc);
1470    }
1471
1472    /* enable access to nvram interface */
1473    bxe_enable_nvram_access(sc);
1474
1475    written_so_far = 0;
1476    cmd_flags = MCPR_NVM_COMMAND_FIRST;
1477    while ((written_so_far < buf_size) && (rc == 0)) {
1478        if (written_so_far == (buf_size - sizeof(uint32_t))) {
1479            cmd_flags |= MCPR_NVM_COMMAND_LAST;
1480        } else if (((offset + 4) % NVRAM_PAGE_SIZE) == 0) {
1481            cmd_flags |= MCPR_NVM_COMMAND_LAST;
1482        } else if ((offset % NVRAM_PAGE_SIZE) == 0) {
1483            cmd_flags |= MCPR_NVM_COMMAND_FIRST;
1484        }
1485
1486        memcpy(&val, data_buf, 4);
1487
1488        rc = bxe_nvram_write_dword(sc, offset, val, cmd_flags);
1489
1490        /* advance to the next dword */
1491        offset += sizeof(uint32_t);
1492        data_buf += sizeof(uint32_t);
1493        written_so_far += sizeof(uint32_t);
1494        cmd_flags = 0;
1495    }
1496
1497    /* disable access to nvram interface */
1498    bxe_disable_nvram_access(sc);
1499    bxe_release_nvram_lock(sc);
1500
1501    return (rc);
1502}
1503
1504/* copy command into DMAE command memory and set DMAE command Go */
1505void
1506bxe_post_dmae(struct bxe_softc    *sc,
1507              struct dmae_cmd *dmae,
1508              int                 idx)
1509{
1510    uint32_t cmd_offset;
1511    int i;
1512
1513    cmd_offset = (DMAE_REG_CMD_MEM + (sizeof(struct dmae_cmd) * idx));
1514    for (i = 0; i < ((sizeof(struct dmae_cmd) / 4)); i++) {
1515        REG_WR(sc, (cmd_offset + (i * 4)), *(((uint32_t *)dmae) + i));
1516    }
1517
1518    REG_WR(sc, dmae_reg_go_c[idx], 1);
1519}
1520
1521uint32_t
1522bxe_dmae_opcode_add_comp(uint32_t opcode,
1523                         uint8_t  comp_type)
1524{
1525    return (opcode | ((comp_type << DMAE_CMD_C_DST_SHIFT) |
1526                      DMAE_CMD_C_TYPE_ENABLE));
1527}
1528
1529uint32_t
1530bxe_dmae_opcode_clr_src_reset(uint32_t opcode)
1531{
1532    return (opcode & ~DMAE_CMD_SRC_RESET);
1533}
1534
1535uint32_t
1536bxe_dmae_opcode(struct bxe_softc *sc,
1537                uint8_t          src_type,
1538                uint8_t          dst_type,
1539                uint8_t          with_comp,
1540                uint8_t          comp_type)
1541{
1542    uint32_t opcode = 0;
1543
1544    opcode |= ((src_type << DMAE_CMD_SRC_SHIFT) |
1545               (dst_type << DMAE_CMD_DST_SHIFT));
1546
1547    opcode |= (DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET);
1548
1549    opcode |= (SC_PORT(sc) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0);
1550
1551    opcode |= ((SC_VN(sc) << DMAE_CMD_E1HVN_SHIFT) |
1552               (SC_VN(sc) << DMAE_CMD_DST_VN_SHIFT));
1553
1554    opcode |= (DMAE_COM_SET_ERR << DMAE_CMD_ERR_POLICY_SHIFT);
1555
1556#ifdef __BIG_ENDIAN
1557    opcode |= DMAE_CMD_ENDIANITY_B_DW_SWAP;
1558#else
1559    opcode |= DMAE_CMD_ENDIANITY_DW_SWAP;
1560#endif
1561
1562    if (with_comp) {
1563        opcode = bxe_dmae_opcode_add_comp(opcode, comp_type);
1564    }
1565
1566    return (opcode);
1567}
1568
1569static void
1570bxe_prep_dmae_with_comp(struct bxe_softc    *sc,
1571                        struct dmae_cmd *dmae,
1572                        uint8_t             src_type,
1573                        uint8_t             dst_type)
1574{
1575    memset(dmae, 0, sizeof(struct dmae_cmd));
1576
1577    /* set the opcode */
1578    dmae->opcode = bxe_dmae_opcode(sc, src_type, dst_type,
1579                                   TRUE, DMAE_COMP_PCI);
1580
1581    /* fill in the completion parameters */
1582    dmae->comp_addr_lo = U64_LO(BXE_SP_MAPPING(sc, wb_comp));
1583    dmae->comp_addr_hi = U64_HI(BXE_SP_MAPPING(sc, wb_comp));
1584    dmae->comp_val     = DMAE_COMP_VAL;
1585}
1586
1587/* issue a DMAE command over the init channel and wait for completion */
1588static int
1589bxe_issue_dmae_with_comp(struct bxe_softc    *sc,
1590                         struct dmae_cmd *dmae)
1591{
1592    uint32_t *wb_comp = BXE_SP(sc, wb_comp);
1593    int timeout = CHIP_REV_IS_SLOW(sc) ? 400000 : 4000;
1594
1595    BXE_DMAE_LOCK(sc);
1596
1597    /* reset completion */
1598    *wb_comp = 0;
1599
1600    /* post the command on the channel used for initializations */
1601    bxe_post_dmae(sc, dmae, INIT_DMAE_C(sc));
1602
1603    /* wait for completion */
1604    DELAY(5);
1605
1606    while ((*wb_comp & ~DMAE_PCI_ERR_FLAG) != DMAE_COMP_VAL) {
1607        if (!timeout ||
1608            (sc->recovery_state != BXE_RECOVERY_DONE &&
1609             sc->recovery_state != BXE_RECOVERY_NIC_LOADING)) {
1610            BLOGE(sc, "DMAE timeout! *wb_comp 0x%x recovery_state 0x%x\n",
1611                *wb_comp, sc->recovery_state);
1612            BXE_DMAE_UNLOCK(sc);
1613            return (DMAE_TIMEOUT);
1614        }
1615
1616        timeout--;
1617        DELAY(50);
1618    }
1619
1620    if (*wb_comp & DMAE_PCI_ERR_FLAG) {
1621        BLOGE(sc, "DMAE PCI error! *wb_comp 0x%x recovery_state 0x%x\n",
1622                *wb_comp, sc->recovery_state);
1623        BXE_DMAE_UNLOCK(sc);
1624        return (DMAE_PCI_ERROR);
1625    }
1626
1627    BXE_DMAE_UNLOCK(sc);
1628    return (0);
1629}
1630
1631void
1632bxe_read_dmae(struct bxe_softc *sc,
1633              uint32_t         src_addr,
1634              uint32_t         len32)
1635{
1636    struct dmae_cmd dmae;
1637    uint32_t *data;
1638    int i, rc;
1639
1640    DBASSERT(sc, (len32 <= 4), ("DMAE read length is %d", len32));
1641
1642    if (!sc->dmae_ready) {
1643        data = BXE_SP(sc, wb_data[0]);
1644
1645        for (i = 0; i < len32; i++) {
1646            data[i] = (CHIP_IS_E1(sc)) ?
1647                          bxe_reg_rd_ind(sc, (src_addr + (i * 4))) :
1648                          REG_RD(sc, (src_addr + (i * 4)));
1649        }
1650
1651        return;
1652    }
1653
1654    /* set opcode and fixed command fields */
1655    bxe_prep_dmae_with_comp(sc, &dmae, DMAE_SRC_GRC, DMAE_DST_PCI);
1656
1657    /* fill in addresses and len */
1658    dmae.src_addr_lo = (src_addr >> 2); /* GRC addr has dword resolution */
1659    dmae.src_addr_hi = 0;
1660    dmae.dst_addr_lo = U64_LO(BXE_SP_MAPPING(sc, wb_data));
1661    dmae.dst_addr_hi = U64_HI(BXE_SP_MAPPING(sc, wb_data));
1662    dmae.len         = len32;
1663
1664    /* issue the command and wait for completion */
1665    if ((rc = bxe_issue_dmae_with_comp(sc, &dmae)) != 0) {
1666        bxe_panic(sc, ("DMAE failed (%d)\n", rc));
1667    }
1668}
1669
1670void
1671bxe_write_dmae(struct bxe_softc *sc,
1672               bus_addr_t       dma_addr,
1673               uint32_t         dst_addr,
1674               uint32_t         len32)
1675{
1676    struct dmae_cmd dmae;
1677    int rc;
1678
1679    if (!sc->dmae_ready) {
1680        DBASSERT(sc, (len32 <= 4), ("DMAE not ready and length is %d", len32));
1681
1682        if (CHIP_IS_E1(sc)) {
1683            ecore_init_ind_wr(sc, dst_addr, BXE_SP(sc, wb_data[0]), len32);
1684        } else {
1685            ecore_init_str_wr(sc, dst_addr, BXE_SP(sc, wb_data[0]), len32);
1686        }
1687
1688        return;
1689    }
1690
1691    /* set opcode and fixed command fields */
1692    bxe_prep_dmae_with_comp(sc, &dmae, DMAE_SRC_PCI, DMAE_DST_GRC);
1693
1694    /* fill in addresses and len */
1695    dmae.src_addr_lo = U64_LO(dma_addr);
1696    dmae.src_addr_hi = U64_HI(dma_addr);
1697    dmae.dst_addr_lo = (dst_addr >> 2); /* GRC addr has dword resolution */
1698    dmae.dst_addr_hi = 0;
1699    dmae.len         = len32;
1700
1701    /* issue the command and wait for completion */
1702    if ((rc = bxe_issue_dmae_with_comp(sc, &dmae)) != 0) {
1703        bxe_panic(sc, ("DMAE failed (%d)\n", rc));
1704    }
1705}
1706
1707void
1708bxe_write_dmae_phys_len(struct bxe_softc *sc,
1709                        bus_addr_t       phys_addr,
1710                        uint32_t         addr,
1711                        uint32_t         len)
1712{
1713    int dmae_wr_max = DMAE_LEN32_WR_MAX(sc);
1714    int offset = 0;
1715
1716    while (len > dmae_wr_max) {
1717        bxe_write_dmae(sc,
1718                       (phys_addr + offset), /* src DMA address */
1719                       (addr + offset),      /* dst GRC address */
1720                       dmae_wr_max);
1721        offset += (dmae_wr_max * 4);
1722        len -= dmae_wr_max;
1723    }
1724
1725    bxe_write_dmae(sc,
1726                   (phys_addr + offset), /* src DMA address */
1727                   (addr + offset),      /* dst GRC address */
1728                   len);
1729}
1730
1731void
1732bxe_set_ctx_validation(struct bxe_softc   *sc,
1733                       struct eth_context *cxt,
1734                       uint32_t           cid)
1735{
1736    /* ustorm cxt validation */
1737    cxt->ustorm_ag_context.cdu_usage =
1738        CDU_RSRVD_VALUE_TYPE_A(HW_CID(sc, cid),
1739            CDU_REGION_NUMBER_UCM_AG, ETH_CONNECTION_TYPE);
1740    /* xcontext validation */
1741    cxt->xstorm_ag_context.cdu_reserved =
1742        CDU_RSRVD_VALUE_TYPE_A(HW_CID(sc, cid),
1743            CDU_REGION_NUMBER_XCM_AG, ETH_CONNECTION_TYPE);
1744}
1745
1746static void
1747bxe_storm_memset_hc_timeout(struct bxe_softc *sc,
1748                            uint8_t          port,
1749                            uint8_t          fw_sb_id,
1750                            uint8_t          sb_index,
1751                            uint8_t          ticks)
1752{
1753    uint32_t addr =
1754        (BAR_CSTRORM_INTMEM +
1755         CSTORM_STATUS_BLOCK_DATA_TIMEOUT_OFFSET(fw_sb_id, sb_index));
1756
1757    REG_WR8(sc, addr, ticks);
1758
1759    BLOGD(sc, DBG_LOAD,
1760          "port %d fw_sb_id %d sb_index %d ticks %d\n",
1761          port, fw_sb_id, sb_index, ticks);
1762}
1763
1764static void
1765bxe_storm_memset_hc_disable(struct bxe_softc *sc,
1766                            uint8_t          port,
1767                            uint16_t         fw_sb_id,
1768                            uint8_t          sb_index,
1769                            uint8_t          disable)
1770{
1771    uint32_t enable_flag =
1772        (disable) ? 0 : (1 << HC_INDEX_DATA_HC_ENABLED_SHIFT);
1773    uint32_t addr =
1774        (BAR_CSTRORM_INTMEM +
1775         CSTORM_STATUS_BLOCK_DATA_FLAGS_OFFSET(fw_sb_id, sb_index));
1776    uint8_t flags;
1777
1778    /* clear and set */
1779    flags = REG_RD8(sc, addr);
1780    flags &= ~HC_INDEX_DATA_HC_ENABLED;
1781    flags |= enable_flag;
1782    REG_WR8(sc, addr, flags);
1783
1784    BLOGD(sc, DBG_LOAD,
1785          "port %d fw_sb_id %d sb_index %d disable %d\n",
1786          port, fw_sb_id, sb_index, disable);
1787}
1788
1789void
1790bxe_update_coalesce_sb_index(struct bxe_softc *sc,
1791                             uint8_t          fw_sb_id,
1792                             uint8_t          sb_index,
1793                             uint8_t          disable,
1794                             uint16_t         usec)
1795{
1796    int port = SC_PORT(sc);
1797    uint8_t ticks = (usec / 4); /* XXX ??? */
1798
1799    bxe_storm_memset_hc_timeout(sc, port, fw_sb_id, sb_index, ticks);
1800
1801    disable = (disable) ? 1 : ((usec) ? 0 : 1);
1802    bxe_storm_memset_hc_disable(sc, port, fw_sb_id, sb_index, disable);
1803}
1804
1805void
1806elink_cb_udelay(struct bxe_softc *sc,
1807                uint32_t         usecs)
1808{
1809    DELAY(usecs);
1810}
1811
1812uint32_t
1813elink_cb_reg_read(struct bxe_softc *sc,
1814                  uint32_t         reg_addr)
1815{
1816    return (REG_RD(sc, reg_addr));
1817}
1818
1819void
1820elink_cb_reg_write(struct bxe_softc *sc,
1821                   uint32_t         reg_addr,
1822                   uint32_t         val)
1823{
1824    REG_WR(sc, reg_addr, val);
1825}
1826
1827void
1828elink_cb_reg_wb_write(struct bxe_softc *sc,
1829                      uint32_t         offset,
1830                      uint32_t         *wb_write,
1831                      uint16_t         len)
1832{
1833    REG_WR_DMAE(sc, offset, wb_write, len);
1834}
1835
1836void
1837elink_cb_reg_wb_read(struct bxe_softc *sc,
1838                     uint32_t         offset,
1839                     uint32_t         *wb_write,
1840                     uint16_t         len)
1841{
1842    REG_RD_DMAE(sc, offset, wb_write, len);
1843}
1844
1845uint8_t
1846elink_cb_path_id(struct bxe_softc *sc)
1847{
1848    return (SC_PATH(sc));
1849}
1850
1851void
1852elink_cb_event_log(struct bxe_softc     *sc,
1853                   const elink_log_id_t elink_log_id,
1854                   ...)
1855{
1856    /* XXX */
1857    BLOGI(sc, "ELINK EVENT LOG (%d)\n", elink_log_id);
1858}
1859
1860static int
1861bxe_set_spio(struct bxe_softc *sc,
1862             int              spio,
1863             uint32_t         mode)
1864{
1865    uint32_t spio_reg;
1866
1867    /* Only 2 SPIOs are configurable */
1868    if ((spio != MISC_SPIO_SPIO4) && (spio != MISC_SPIO_SPIO5)) {
1869        BLOGE(sc, "Invalid SPIO 0x%x mode 0x%x\n", spio, mode);
1870        return (-1);
1871    }
1872
1873    bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_SPIO);
1874
1875    /* read SPIO and mask except the float bits */
1876    spio_reg = (REG_RD(sc, MISC_REG_SPIO) & MISC_SPIO_FLOAT);
1877
1878    switch (mode) {
1879    case MISC_SPIO_OUTPUT_LOW:
1880        BLOGD(sc, DBG_LOAD, "Set SPIO 0x%x -> output low\n", spio);
1881        /* clear FLOAT and set CLR */
1882        spio_reg &= ~(spio << MISC_SPIO_FLOAT_POS);
1883        spio_reg |=  (spio << MISC_SPIO_CLR_POS);
1884        break;
1885
1886    case MISC_SPIO_OUTPUT_HIGH:
1887        BLOGD(sc, DBG_LOAD, "Set SPIO 0x%x -> output high\n", spio);
1888        /* clear FLOAT and set SET */
1889        spio_reg &= ~(spio << MISC_SPIO_FLOAT_POS);
1890        spio_reg |=  (spio << MISC_SPIO_SET_POS);
1891        break;
1892
1893    case MISC_SPIO_INPUT_HI_Z:
1894        BLOGD(sc, DBG_LOAD, "Set SPIO 0x%x -> input\n", spio);
1895        /* set FLOAT */
1896        spio_reg |= (spio << MISC_SPIO_FLOAT_POS);
1897        break;
1898
1899    default:
1900        break;
1901    }
1902
1903    REG_WR(sc, MISC_REG_SPIO, spio_reg);
1904    bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_SPIO);
1905
1906    return (0);
1907}
1908
1909static int
1910bxe_gpio_read(struct bxe_softc *sc,
1911              int              gpio_num,
1912              uint8_t          port)
1913{
1914    /* The GPIO should be swapped if swap register is set and active */
1915    int gpio_port = ((REG_RD(sc, NIG_REG_PORT_SWAP) &&
1916                      REG_RD(sc, NIG_REG_STRAP_OVERRIDE)) ^ port);
1917    int gpio_shift = (gpio_num +
1918                      (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0));
1919    uint32_t gpio_mask = (1 << gpio_shift);
1920    uint32_t gpio_reg;
1921
1922    if (gpio_num > MISC_REGISTERS_GPIO_3) {
1923        BLOGE(sc, "Invalid GPIO %d port 0x%x gpio_port %d gpio_shift %d"
1924            " gpio_mask 0x%x\n", gpio_num, port, gpio_port, gpio_shift,
1925            gpio_mask);
1926        return (-1);
1927    }
1928
1929    /* read GPIO value */
1930    gpio_reg = REG_RD(sc, MISC_REG_GPIO);
1931
1932    /* get the requested pin value */
1933    return ((gpio_reg & gpio_mask) == gpio_mask) ? 1 : 0;
1934}
1935
1936static int
1937bxe_gpio_write(struct bxe_softc *sc,
1938               int              gpio_num,
1939               uint32_t         mode,
1940               uint8_t          port)
1941{
1942    /* The GPIO should be swapped if swap register is set and active */
1943    int gpio_port = ((REG_RD(sc, NIG_REG_PORT_SWAP) &&
1944                      REG_RD(sc, NIG_REG_STRAP_OVERRIDE)) ^ port);
1945    int gpio_shift = (gpio_num +
1946                      (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0));
1947    uint32_t gpio_mask = (1 << gpio_shift);
1948    uint32_t gpio_reg;
1949
1950    if (gpio_num > MISC_REGISTERS_GPIO_3) {
1951        BLOGE(sc, "Invalid GPIO %d mode 0x%x port 0x%x gpio_port %d"
1952            " gpio_shift %d gpio_mask 0x%x\n",
1953            gpio_num, mode, port, gpio_port, gpio_shift, gpio_mask);
1954        return (-1);
1955    }
1956
1957    bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_GPIO);
1958
1959    /* read GPIO and mask except the float bits */
1960    gpio_reg = (REG_RD(sc, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
1961
1962    switch (mode) {
1963    case MISC_REGISTERS_GPIO_OUTPUT_LOW:
1964        BLOGD(sc, DBG_PHY,
1965              "Set GPIO %d (shift %d) -> output low\n",
1966              gpio_num, gpio_shift);
1967        /* clear FLOAT and set CLR */
1968        gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1969        gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
1970        break;
1971
1972    case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
1973        BLOGD(sc, DBG_PHY,
1974              "Set GPIO %d (shift %d) -> output high\n",
1975              gpio_num, gpio_shift);
1976        /* clear FLOAT and set SET */
1977        gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1978        gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
1979        break;
1980
1981    case MISC_REGISTERS_GPIO_INPUT_HI_Z:
1982        BLOGD(sc, DBG_PHY,
1983              "Set GPIO %d (shift %d) -> input\n",
1984              gpio_num, gpio_shift);
1985        /* set FLOAT */
1986        gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1987        break;
1988
1989    default:
1990        break;
1991    }
1992
1993    REG_WR(sc, MISC_REG_GPIO, gpio_reg);
1994    bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_GPIO);
1995
1996    return (0);
1997}
1998
1999static int
2000bxe_gpio_mult_write(struct bxe_softc *sc,
2001                    uint8_t          pins,
2002                    uint32_t         mode)
2003{
2004    uint32_t gpio_reg;
2005
2006    /* any port swapping should be handled by caller */
2007
2008    bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_GPIO);
2009
2010    /* read GPIO and mask except the float bits */
2011    gpio_reg = REG_RD(sc, MISC_REG_GPIO);
2012    gpio_reg &= ~(pins << MISC_REGISTERS_GPIO_FLOAT_POS);
2013    gpio_reg &= ~(pins << MISC_REGISTERS_GPIO_CLR_POS);
2014    gpio_reg &= ~(pins << MISC_REGISTERS_GPIO_SET_POS);
2015
2016    switch (mode) {
2017    case MISC_REGISTERS_GPIO_OUTPUT_LOW:
2018        BLOGD(sc, DBG_PHY, "Set GPIO 0x%x -> output low\n", pins);
2019        /* set CLR */
2020        gpio_reg |= (pins << MISC_REGISTERS_GPIO_CLR_POS);
2021        break;
2022
2023    case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
2024        BLOGD(sc, DBG_PHY, "Set GPIO 0x%x -> output high\n", pins);
2025        /* set SET */
2026        gpio_reg |= (pins << MISC_REGISTERS_GPIO_SET_POS);
2027        break;
2028
2029    case MISC_REGISTERS_GPIO_INPUT_HI_Z:
2030        BLOGD(sc, DBG_PHY, "Set GPIO 0x%x -> input\n", pins);
2031        /* set FLOAT */
2032        gpio_reg |= (pins << MISC_REGISTERS_GPIO_FLOAT_POS);
2033        break;
2034
2035    default:
2036        BLOGE(sc, "Invalid GPIO mode assignment pins 0x%x mode 0x%x"
2037            " gpio_reg 0x%x\n", pins, mode, gpio_reg);
2038        bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_GPIO);
2039        return (-1);
2040    }
2041
2042    REG_WR(sc, MISC_REG_GPIO, gpio_reg);
2043    bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_GPIO);
2044
2045    return (0);
2046}
2047
2048static int
2049bxe_gpio_int_write(struct bxe_softc *sc,
2050                   int              gpio_num,
2051                   uint32_t         mode,
2052                   uint8_t          port)
2053{
2054    /* The GPIO should be swapped if swap register is set and active */
2055    int gpio_port = ((REG_RD(sc, NIG_REG_PORT_SWAP) &&
2056                      REG_RD(sc, NIG_REG_STRAP_OVERRIDE)) ^ port);
2057    int gpio_shift = (gpio_num +
2058                      (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0));
2059    uint32_t gpio_mask = (1 << gpio_shift);
2060    uint32_t gpio_reg;
2061
2062    if (gpio_num > MISC_REGISTERS_GPIO_3) {
2063        BLOGE(sc, "Invalid GPIO %d mode 0x%x port 0x%x gpio_port %d"
2064            " gpio_shift %d gpio_mask 0x%x\n",
2065            gpio_num, mode, port, gpio_port, gpio_shift, gpio_mask);
2066        return (-1);
2067    }
2068
2069    bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_GPIO);
2070
2071    /* read GPIO int */
2072    gpio_reg = REG_RD(sc, MISC_REG_GPIO_INT);
2073
2074    switch (mode) {
2075    case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR:
2076        BLOGD(sc, DBG_PHY,
2077              "Clear GPIO INT %d (shift %d) -> output low\n",
2078              gpio_num, gpio_shift);
2079        /* clear SET and set CLR */
2080        gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
2081        gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
2082        break;
2083
2084    case MISC_REGISTERS_GPIO_INT_OUTPUT_SET:
2085        BLOGD(sc, DBG_PHY,
2086              "Set GPIO INT %d (shift %d) -> output high\n",
2087              gpio_num, gpio_shift);
2088        /* clear CLR and set SET */
2089        gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
2090        gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
2091        break;
2092
2093    default:
2094        break;
2095    }
2096
2097    REG_WR(sc, MISC_REG_GPIO_INT, gpio_reg);
2098    bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_GPIO);
2099
2100    return (0);
2101}
2102
2103uint32_t
2104elink_cb_gpio_read(struct bxe_softc *sc,
2105                   uint16_t         gpio_num,
2106                   uint8_t          port)
2107{
2108    return (bxe_gpio_read(sc, gpio_num, port));
2109}
2110
2111uint8_t
2112elink_cb_gpio_write(struct bxe_softc *sc,
2113                    uint16_t         gpio_num,
2114                    uint8_t          mode, /* 0=low 1=high */
2115                    uint8_t          port)
2116{
2117    return (bxe_gpio_write(sc, gpio_num, mode, port));
2118}
2119
2120uint8_t
2121elink_cb_gpio_mult_write(struct bxe_softc *sc,
2122                         uint8_t          pins,
2123                         uint8_t          mode) /* 0=low 1=high */
2124{
2125    return (bxe_gpio_mult_write(sc, pins, mode));
2126}
2127
2128uint8_t
2129elink_cb_gpio_int_write(struct bxe_softc *sc,
2130                        uint16_t         gpio_num,
2131                        uint8_t          mode, /* 0=low 1=high */
2132                        uint8_t          port)
2133{
2134    return (bxe_gpio_int_write(sc, gpio_num, mode, port));
2135}
2136
2137void
2138elink_cb_notify_link_changed(struct bxe_softc *sc)
2139{
2140    REG_WR(sc, (MISC_REG_AEU_GENERAL_ATTN_12 +
2141                (SC_FUNC(sc) * sizeof(uint32_t))), 1);
2142}
2143
2144/* send the MCP a request, block until there is a reply */
2145uint32_t
2146elink_cb_fw_command(struct bxe_softc *sc,
2147                    uint32_t         command,
2148                    uint32_t         param)
2149{
2150    int mb_idx = SC_FW_MB_IDX(sc);
2151    uint32_t seq;
2152    uint32_t rc = 0;
2153    uint32_t cnt = 1;
2154    uint8_t delay = CHIP_REV_IS_SLOW(sc) ? 100 : 10;
2155
2156    BXE_FWMB_LOCK(sc);
2157
2158    seq = ++sc->fw_seq;
2159    SHMEM_WR(sc, func_mb[mb_idx].drv_mb_param, param);
2160    SHMEM_WR(sc, func_mb[mb_idx].drv_mb_header, (command | seq));
2161
2162    BLOGD(sc, DBG_PHY,
2163          "wrote command 0x%08x to FW MB param 0x%08x\n",
2164          (command | seq), param);
2165
2166    /* Let the FW do it's magic. GIve it up to 5 seconds... */
2167    do {
2168        DELAY(delay * 1000);
2169        rc = SHMEM_RD(sc, func_mb[mb_idx].fw_mb_header);
2170    } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 500));
2171
2172    BLOGD(sc, DBG_PHY,
2173          "[after %d ms] read 0x%x seq 0x%x from FW MB\n",
2174          cnt*delay, rc, seq);
2175
2176    /* is this a reply to our command? */
2177    if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK)) {
2178        rc &= FW_MSG_CODE_MASK;
2179    } else {
2180        /* Ruh-roh! */
2181        BLOGE(sc, "FW failed to respond!\n");
2182        // XXX bxe_fw_dump(sc);
2183        rc = 0;
2184    }
2185
2186    BXE_FWMB_UNLOCK(sc);
2187    return (rc);
2188}
2189
2190static uint32_t
2191bxe_fw_command(struct bxe_softc *sc,
2192               uint32_t         command,
2193               uint32_t         param)
2194{
2195    return (elink_cb_fw_command(sc, command, param));
2196}
2197
2198static void
2199__storm_memset_dma_mapping(struct bxe_softc *sc,
2200                           uint32_t         addr,
2201                           bus_addr_t       mapping)
2202{
2203    REG_WR(sc, addr, U64_LO(mapping));
2204    REG_WR(sc, (addr + 4), U64_HI(mapping));
2205}
2206
2207static void
2208storm_memset_spq_addr(struct bxe_softc *sc,
2209                      bus_addr_t       mapping,
2210                      uint16_t         abs_fid)
2211{
2212    uint32_t addr = (XSEM_REG_FAST_MEMORY +
2213                     XSTORM_SPQ_PAGE_BASE_OFFSET(abs_fid));
2214    __storm_memset_dma_mapping(sc, addr, mapping);
2215}
2216
2217static void
2218storm_memset_vf_to_pf(struct bxe_softc *sc,
2219                      uint16_t         abs_fid,
2220                      uint16_t         pf_id)
2221{
2222    REG_WR8(sc, (BAR_XSTRORM_INTMEM + XSTORM_VF_TO_PF_OFFSET(abs_fid)), pf_id);
2223    REG_WR8(sc, (BAR_CSTRORM_INTMEM + CSTORM_VF_TO_PF_OFFSET(abs_fid)), pf_id);
2224    REG_WR8(sc, (BAR_TSTRORM_INTMEM + TSTORM_VF_TO_PF_OFFSET(abs_fid)), pf_id);
2225    REG_WR8(sc, (BAR_USTRORM_INTMEM + USTORM_VF_TO_PF_OFFSET(abs_fid)), pf_id);
2226}
2227
2228static void
2229storm_memset_func_en(struct bxe_softc *sc,
2230                     uint16_t         abs_fid,
2231                     uint8_t          enable)
2232{
2233    REG_WR8(sc, (BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(abs_fid)), enable);
2234    REG_WR8(sc, (BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(abs_fid)), enable);
2235    REG_WR8(sc, (BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(abs_fid)), enable);
2236    REG_WR8(sc, (BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(abs_fid)), enable);
2237}
2238
2239static void
2240storm_memset_eq_data(struct bxe_softc       *sc,
2241                     struct event_ring_data *eq_data,
2242                     uint16_t               pfid)
2243{
2244    uint32_t addr;
2245    size_t size;
2246
2247    addr = (BAR_CSTRORM_INTMEM + CSTORM_EVENT_RING_DATA_OFFSET(pfid));
2248    size = sizeof(struct event_ring_data);
2249    ecore_storm_memset_struct(sc, addr, size, (uint32_t *)eq_data);
2250}
2251
2252static void
2253storm_memset_eq_prod(struct bxe_softc *sc,
2254                     uint16_t         eq_prod,
2255                     uint16_t         pfid)
2256{
2257    uint32_t addr = (BAR_CSTRORM_INTMEM +
2258                     CSTORM_EVENT_RING_PROD_OFFSET(pfid));
2259    REG_WR16(sc, addr, eq_prod);
2260}
2261
2262/*
2263 * Post a slowpath command.
2264 *
2265 * A slowpath command is used to propagate a configuration change through
2266 * the controller in a controlled manner, allowing each STORM processor and
2267 * other H/W blocks to phase in the change.  The commands sent on the
2268 * slowpath are referred to as ramrods.  Depending on the ramrod used the
2269 * completion of the ramrod will occur in different ways.  Here's a
2270 * breakdown of ramrods and how they complete:
2271 *
2272 * RAMROD_CMD_ID_ETH_PORT_SETUP
2273 *   Used to setup the leading connection on a port.  Completes on the
2274 *   Receive Completion Queue (RCQ) of that port (typically fp[0]).
2275 *
2276 * RAMROD_CMD_ID_ETH_CLIENT_SETUP
2277 *   Used to setup an additional connection on a port.  Completes on the
2278 *   RCQ of the multi-queue/RSS connection being initialized.
2279 *
2280 * RAMROD_CMD_ID_ETH_STAT_QUERY
2281 *   Used to force the storm processors to update the statistics database
2282 *   in host memory.  This ramrod is send on the leading connection CID and
2283 *   completes as an index increment of the CSTORM on the default status
2284 *   block.
2285 *
2286 * RAMROD_CMD_ID_ETH_UPDATE
2287 *   Used to update the state of the leading connection, usually to udpate
2288 *   the RSS indirection table.  Completes on the RCQ of the leading
2289 *   connection. (Not currently used under FreeBSD until OS support becomes
2290 *   available.)
2291 *
2292 * RAMROD_CMD_ID_ETH_HALT
2293 *   Used when tearing down a connection prior to driver unload.  Completes
2294 *   on the RCQ of the multi-queue/RSS connection being torn down.  Don't
2295 *   use this on the leading connection.
2296 *
2297 * RAMROD_CMD_ID_ETH_SET_MAC
2298 *   Sets the Unicast/Broadcast/Multicast used by the port.  Completes on
2299 *   the RCQ of the leading connection.
2300 *
2301 * RAMROD_CMD_ID_ETH_CFC_DEL
2302 *   Used when tearing down a conneciton prior to driver unload.  Completes
2303 *   on the RCQ of the leading connection (since the current connection
2304 *   has been completely removed from controller memory).
2305 *
2306 * RAMROD_CMD_ID_ETH_PORT_DEL
2307 *   Used to tear down the leading connection prior to driver unload,
2308 *   typically fp[0].  Completes as an index increment of the CSTORM on the
2309 *   default status block.
2310 *
2311 * RAMROD_CMD_ID_ETH_FORWARD_SETUP
2312 *   Used for connection offload.  Completes on the RCQ of the multi-queue
2313 *   RSS connection that is being offloaded.  (Not currently used under
2314 *   FreeBSD.)
2315 *
2316 * There can only be one command pending per function.
2317 *
2318 * Returns:
2319 *   0 = Success, !0 = Failure.
2320 */
2321
2322/* must be called under the spq lock */
2323static inline
2324struct eth_spe *bxe_sp_get_next(struct bxe_softc *sc)
2325{
2326    struct eth_spe *next_spe = sc->spq_prod_bd;
2327
2328    if (sc->spq_prod_bd == sc->spq_last_bd) {
2329        /* wrap back to the first eth_spq */
2330        sc->spq_prod_bd = sc->spq;
2331        sc->spq_prod_idx = 0;
2332    } else {
2333        sc->spq_prod_bd++;
2334        sc->spq_prod_idx++;
2335    }
2336
2337    return (next_spe);
2338}
2339
2340/* must be called under the spq lock */
2341static inline
2342void bxe_sp_prod_update(struct bxe_softc *sc)
2343{
2344    int func = SC_FUNC(sc);
2345
2346    /*
2347     * Make sure that BD data is updated before writing the producer.
2348     * BD data is written to the memory, the producer is read from the
2349     * memory, thus we need a full memory barrier to ensure the ordering.
2350     */
2351    mb();
2352
2353    REG_WR16(sc, (BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func)),
2354             sc->spq_prod_idx);
2355
2356    bus_space_barrier(sc->bar[BAR0].tag, sc->bar[BAR0].handle, 0, 0,
2357                      BUS_SPACE_BARRIER_WRITE);
2358}
2359
2360/**
2361 * bxe_is_contextless_ramrod - check if the current command ends on EQ
2362 *
2363 * @cmd:      command to check
2364 * @cmd_type: command type
2365 */
2366static inline
2367int bxe_is_contextless_ramrod(int cmd,
2368                              int cmd_type)
2369{
2370    if ((cmd_type == NONE_CONNECTION_TYPE) ||
2371        (cmd == RAMROD_CMD_ID_ETH_FORWARD_SETUP) ||
2372        (cmd == RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES) ||
2373        (cmd == RAMROD_CMD_ID_ETH_FILTER_RULES) ||
2374        (cmd == RAMROD_CMD_ID_ETH_MULTICAST_RULES) ||
2375        (cmd == RAMROD_CMD_ID_ETH_SET_MAC) ||
2376        (cmd == RAMROD_CMD_ID_ETH_RSS_UPDATE)) {
2377        return (TRUE);
2378    } else {
2379        return (FALSE);
2380    }
2381}
2382
2383/**
2384 * bxe_sp_post - place a single command on an SP ring
2385 *
2386 * @sc:         driver handle
2387 * @command:    command to place (e.g. SETUP, FILTER_RULES, etc.)
2388 * @cid:        SW CID the command is related to
2389 * @data_hi:    command private data address (high 32 bits)
2390 * @data_lo:    command private data address (low 32 bits)
2391 * @cmd_type:   command type (e.g. NONE, ETH)
2392 *
2393 * SP data is handled as if it's always an address pair, thus data fields are
2394 * not swapped to little endian in upper functions. Instead this function swaps
2395 * data as if it's two uint32 fields.
2396 */
2397int
2398bxe_sp_post(struct bxe_softc *sc,
2399            int              command,
2400            int              cid,
2401            uint32_t         data_hi,
2402            uint32_t         data_lo,
2403            int              cmd_type)
2404{
2405    struct eth_spe *spe;
2406    uint16_t type;
2407    int common;
2408
2409    common = bxe_is_contextless_ramrod(command, cmd_type);
2410
2411    BXE_SP_LOCK(sc);
2412
2413    if (common) {
2414        if (!atomic_load_acq_long(&sc->eq_spq_left)) {
2415            BLOGE(sc, "EQ ring is full!\n");
2416            BXE_SP_UNLOCK(sc);
2417            return (-1);
2418        }
2419    } else {
2420        if (!atomic_load_acq_long(&sc->cq_spq_left)) {
2421            BLOGE(sc, "SPQ ring is full!\n");
2422            BXE_SP_UNLOCK(sc);
2423            return (-1);
2424        }
2425    }
2426
2427    spe = bxe_sp_get_next(sc);
2428
2429    /* CID needs port number to be encoded int it */
2430    spe->hdr.conn_and_cmd_data =
2431        htole32((command << SPE_HDR_T_CMD_ID_SHIFT) | HW_CID(sc, cid));
2432
2433    type = (cmd_type << SPE_HDR_T_CONN_TYPE_SHIFT) & SPE_HDR_T_CONN_TYPE;
2434
2435    /* TBD: Check if it works for VFs */
2436    type |= ((SC_FUNC(sc) << SPE_HDR_T_FUNCTION_ID_SHIFT) &
2437             SPE_HDR_T_FUNCTION_ID);
2438
2439    spe->hdr.type = htole16(type);
2440
2441    spe->data.update_data_addr.hi = htole32(data_hi);
2442    spe->data.update_data_addr.lo = htole32(data_lo);
2443
2444    /*
2445     * It's ok if the actual decrement is issued towards the memory
2446     * somewhere between the lock and unlock. Thus no more explict
2447     * memory barrier is needed.
2448     */
2449    if (common) {
2450        atomic_subtract_acq_long(&sc->eq_spq_left, 1);
2451    } else {
2452        atomic_subtract_acq_long(&sc->cq_spq_left, 1);
2453    }
2454
2455    BLOGD(sc, DBG_SP, "SPQE -> %#jx\n", (uintmax_t)sc->spq_dma.paddr);
2456    BLOGD(sc, DBG_SP, "FUNC_RDATA -> %p / %#jx\n",
2457          BXE_SP(sc, func_rdata), (uintmax_t)BXE_SP_MAPPING(sc, func_rdata));
2458    BLOGD(sc, DBG_SP,
2459          "SPQE[%x] (%x:%x) (cmd, common?) (%d,%d) hw_cid %x data (%x:%x) type(0x%x) left (CQ, EQ) (%lx,%lx)\n",
2460          sc->spq_prod_idx,
2461          (uint32_t)U64_HI(sc->spq_dma.paddr),
2462          (uint32_t)(U64_LO(sc->spq_dma.paddr) + (uint8_t *)sc->spq_prod_bd - (uint8_t *)sc->spq),
2463          command,
2464          common,
2465          HW_CID(sc, cid),
2466          data_hi,
2467          data_lo,
2468          type,
2469          atomic_load_acq_long(&sc->cq_spq_left),
2470          atomic_load_acq_long(&sc->eq_spq_left));
2471
2472    bxe_sp_prod_update(sc);
2473
2474    BXE_SP_UNLOCK(sc);
2475    return (0);
2476}
2477
2478/**
2479 * bxe_debug_print_ind_table - prints the indirection table configuration.
2480 *
2481 * @sc: driver hanlde
2482 * @p:  pointer to rss configuration
2483 */
2484
2485/*
2486 * FreeBSD Device probe function.
2487 *
2488 * Compares the device found to the driver's list of supported devices and
2489 * reports back to the bsd loader whether this is the right driver for the device.
2490 * This is the driver entry function called from the "kldload" command.
2491 *
2492 * Returns:
2493 *   BUS_PROBE_DEFAULT on success, positive value on failure.
2494 */
2495static int
2496bxe_probe(device_t dev)
2497{
2498    struct bxe_device_type *t;
2499    char *descbuf;
2500    uint16_t did, sdid, svid, vid;
2501
2502    /* Find our device structure */
2503    t = bxe_devs;
2504
2505    /* Get the data for the device to be probed. */
2506    vid  = pci_get_vendor(dev);
2507    did  = pci_get_device(dev);
2508    svid = pci_get_subvendor(dev);
2509    sdid = pci_get_subdevice(dev);
2510
2511    /* Look through the list of known devices for a match. */
2512    while (t->bxe_name != NULL) {
2513        if ((vid == t->bxe_vid) && (did == t->bxe_did) &&
2514            ((svid == t->bxe_svid) || (t->bxe_svid == PCI_ANY_ID)) &&
2515            ((sdid == t->bxe_sdid) || (t->bxe_sdid == PCI_ANY_ID))) {
2516            descbuf = malloc(BXE_DEVDESC_MAX, M_TEMP, M_NOWAIT);
2517            if (descbuf == NULL)
2518                return (ENOMEM);
2519
2520            /* Print out the device identity. */
2521            snprintf(descbuf, BXE_DEVDESC_MAX,
2522                     "%s (%c%d) BXE v:%s\n", t->bxe_name,
2523                     (((pci_read_config(dev, PCIR_REVID, 4) &
2524                        0xf0) >> 4) + 'A'),
2525                     (pci_read_config(dev, PCIR_REVID, 4) & 0xf),
2526                     BXE_DRIVER_VERSION);
2527
2528            device_set_desc_copy(dev, descbuf);
2529            free(descbuf, M_TEMP);
2530            return (BUS_PROBE_DEFAULT);
2531        }
2532        t++;
2533    }
2534
2535    return (ENXIO);
2536}
2537
2538static void
2539bxe_init_mutexes(struct bxe_softc *sc)
2540{
2541#ifdef BXE_CORE_LOCK_SX
2542    snprintf(sc->core_sx_name, sizeof(sc->core_sx_name),
2543             "bxe%d_core_lock", sc->unit);
2544    sx_init(&sc->core_sx, sc->core_sx_name);
2545#else
2546    snprintf(sc->core_mtx_name, sizeof(sc->core_mtx_name),
2547             "bxe%d_core_lock", sc->unit);
2548    mtx_init(&sc->core_mtx, sc->core_mtx_name, NULL, MTX_DEF);
2549#endif
2550
2551    snprintf(sc->sp_mtx_name, sizeof(sc->sp_mtx_name),
2552             "bxe%d_sp_lock", sc->unit);
2553    mtx_init(&sc->sp_mtx, sc->sp_mtx_name, NULL, MTX_DEF);
2554
2555    snprintf(sc->dmae_mtx_name, sizeof(sc->dmae_mtx_name),
2556             "bxe%d_dmae_lock", sc->unit);
2557    mtx_init(&sc->dmae_mtx, sc->dmae_mtx_name, NULL, MTX_DEF);
2558
2559    snprintf(sc->port.phy_mtx_name, sizeof(sc->port.phy_mtx_name),
2560             "bxe%d_phy_lock", sc->unit);
2561    mtx_init(&sc->port.phy_mtx, sc->port.phy_mtx_name, NULL, MTX_DEF);
2562
2563    snprintf(sc->fwmb_mtx_name, sizeof(sc->fwmb_mtx_name),
2564             "bxe%d_fwmb_lock", sc->unit);
2565    mtx_init(&sc->fwmb_mtx, sc->fwmb_mtx_name, NULL, MTX_DEF);
2566
2567    snprintf(sc->print_mtx_name, sizeof(sc->print_mtx_name),
2568             "bxe%d_print_lock", sc->unit);
2569    mtx_init(&(sc->print_mtx), sc->print_mtx_name, NULL, MTX_DEF);
2570
2571    snprintf(sc->stats_mtx_name, sizeof(sc->stats_mtx_name),
2572             "bxe%d_stats_lock", sc->unit);
2573    mtx_init(&(sc->stats_mtx), sc->stats_mtx_name, NULL, MTX_DEF);
2574
2575    snprintf(sc->mcast_mtx_name, sizeof(sc->mcast_mtx_name),
2576             "bxe%d_mcast_lock", sc->unit);
2577    mtx_init(&(sc->mcast_mtx), sc->mcast_mtx_name, NULL, MTX_DEF);
2578}
2579
2580static void
2581bxe_release_mutexes(struct bxe_softc *sc)
2582{
2583#ifdef BXE_CORE_LOCK_SX
2584    sx_destroy(&sc->core_sx);
2585#else
2586    if (mtx_initialized(&sc->core_mtx)) {
2587        mtx_destroy(&sc->core_mtx);
2588    }
2589#endif
2590
2591    if (mtx_initialized(&sc->sp_mtx)) {
2592        mtx_destroy(&sc->sp_mtx);
2593    }
2594
2595    if (mtx_initialized(&sc->dmae_mtx)) {
2596        mtx_destroy(&sc->dmae_mtx);
2597    }
2598
2599    if (mtx_initialized(&sc->port.phy_mtx)) {
2600        mtx_destroy(&sc->port.phy_mtx);
2601    }
2602
2603    if (mtx_initialized(&sc->fwmb_mtx)) {
2604        mtx_destroy(&sc->fwmb_mtx);
2605    }
2606
2607    if (mtx_initialized(&sc->print_mtx)) {
2608        mtx_destroy(&sc->print_mtx);
2609    }
2610
2611    if (mtx_initialized(&sc->stats_mtx)) {
2612        mtx_destroy(&sc->stats_mtx);
2613    }
2614
2615    if (mtx_initialized(&sc->mcast_mtx)) {
2616        mtx_destroy(&sc->mcast_mtx);
2617    }
2618}
2619
2620static void
2621bxe_tx_disable(struct bxe_softc* sc)
2622{
2623    if_t ifp = sc->ifp;
2624
2625    /* tell the stack the driver is stopped and TX queue is full */
2626    if (ifp !=  NULL) {
2627        if_setdrvflags(ifp, 0);
2628    }
2629}
2630
2631static void
2632bxe_drv_pulse(struct bxe_softc *sc)
2633{
2634    SHMEM_WR(sc, func_mb[SC_FW_MB_IDX(sc)].drv_pulse_mb,
2635             sc->fw_drv_pulse_wr_seq);
2636}
2637
2638static inline uint16_t
2639bxe_tx_avail(struct bxe_softc *sc,
2640             struct bxe_fastpath *fp)
2641{
2642    int16_t  used;
2643    uint16_t prod;
2644    uint16_t cons;
2645
2646    prod = fp->tx_bd_prod;
2647    cons = fp->tx_bd_cons;
2648
2649    used = SUB_S16(prod, cons);
2650
2651    return (int16_t)(sc->tx_ring_size) - used;
2652}
2653
2654static inline int
2655bxe_tx_queue_has_work(struct bxe_fastpath *fp)
2656{
2657    uint16_t hw_cons;
2658
2659    mb(); /* status block fields can change */
2660    hw_cons = le16toh(*fp->tx_cons_sb);
2661    return (hw_cons != fp->tx_pkt_cons);
2662}
2663
2664static inline uint8_t
2665bxe_has_tx_work(struct bxe_fastpath *fp)
2666{
2667    /* expand this for multi-cos if ever supported */
2668    return (bxe_tx_queue_has_work(fp)) ? TRUE : FALSE;
2669}
2670
2671static inline int
2672bxe_has_rx_work(struct bxe_fastpath *fp)
2673{
2674    uint16_t rx_cq_cons_sb;
2675
2676    mb(); /* status block fields can change */
2677    rx_cq_cons_sb = le16toh(*fp->rx_cq_cons_sb);
2678    if ((rx_cq_cons_sb & RCQ_MAX) == RCQ_MAX)
2679        rx_cq_cons_sb++;
2680    return (fp->rx_cq_cons != rx_cq_cons_sb);
2681}
2682
2683static void
2684bxe_sp_event(struct bxe_softc    *sc,
2685             struct bxe_fastpath *fp,
2686             union eth_rx_cqe    *rr_cqe)
2687{
2688    int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
2689    int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
2690    enum ecore_queue_cmd drv_cmd = ECORE_Q_CMD_MAX;
2691    struct ecore_queue_sp_obj *q_obj = &BXE_SP_OBJ(sc, fp).q_obj;
2692
2693    BLOGD(sc, DBG_SP, "fp=%d cid=%d got ramrod #%d state is %x type is %d\n",
2694          fp->index, cid, command, sc->state, rr_cqe->ramrod_cqe.ramrod_type);
2695
2696    switch (command) {
2697    case (RAMROD_CMD_ID_ETH_CLIENT_UPDATE):
2698        BLOGD(sc, DBG_SP, "got UPDATE ramrod. CID %d\n", cid);
2699        drv_cmd = ECORE_Q_CMD_UPDATE;
2700        break;
2701
2702    case (RAMROD_CMD_ID_ETH_CLIENT_SETUP):
2703        BLOGD(sc, DBG_SP, "got MULTI[%d] setup ramrod\n", cid);
2704        drv_cmd = ECORE_Q_CMD_SETUP;
2705        break;
2706
2707    case (RAMROD_CMD_ID_ETH_TX_QUEUE_SETUP):
2708        BLOGD(sc, DBG_SP, "got MULTI[%d] tx-only setup ramrod\n", cid);
2709        drv_cmd = ECORE_Q_CMD_SETUP_TX_ONLY;
2710        break;
2711
2712    case (RAMROD_CMD_ID_ETH_HALT):
2713        BLOGD(sc, DBG_SP, "got MULTI[%d] halt ramrod\n", cid);
2714        drv_cmd = ECORE_Q_CMD_HALT;
2715        break;
2716
2717    case (RAMROD_CMD_ID_ETH_TERMINATE):
2718        BLOGD(sc, DBG_SP, "got MULTI[%d] teminate ramrod\n", cid);
2719        drv_cmd = ECORE_Q_CMD_TERMINATE;
2720        break;
2721
2722    case (RAMROD_CMD_ID_ETH_EMPTY):
2723        BLOGD(sc, DBG_SP, "got MULTI[%d] empty ramrod\n", cid);
2724        drv_cmd = ECORE_Q_CMD_EMPTY;
2725        break;
2726
2727    default:
2728        BLOGD(sc, DBG_SP, "ERROR: unexpected MC reply (%d) on fp[%d]\n",
2729              command, fp->index);
2730        return;
2731    }
2732
2733    if ((drv_cmd != ECORE_Q_CMD_MAX) &&
2734        q_obj->complete_cmd(sc, q_obj, drv_cmd)) {
2735        /*
2736         * q_obj->complete_cmd() failure means that this was
2737         * an unexpected completion.
2738         *
2739         * In this case we don't want to increase the sc->spq_left
2740         * because apparently we haven't sent this command the first
2741         * place.
2742         */
2743        // bxe_panic(sc, ("Unexpected SP completion\n"));
2744        return;
2745    }
2746
2747    atomic_add_acq_long(&sc->cq_spq_left, 1);
2748
2749    BLOGD(sc, DBG_SP, "sc->cq_spq_left 0x%lx\n",
2750          atomic_load_acq_long(&sc->cq_spq_left));
2751}
2752
2753/*
2754 * The current mbuf is part of an aggregation. Move the mbuf into the TPA
2755 * aggregation queue, put an empty mbuf back onto the receive chain, and mark
2756 * the current aggregation queue as in-progress.
2757 */
2758static void
2759bxe_tpa_start(struct bxe_softc            *sc,
2760              struct bxe_fastpath         *fp,
2761              uint16_t                    queue,
2762              uint16_t                    cons,
2763              uint16_t                    prod,
2764              struct eth_fast_path_rx_cqe *cqe)
2765{
2766    struct bxe_sw_rx_bd tmp_bd;
2767    struct bxe_sw_rx_bd *rx_buf;
2768    struct eth_rx_bd *rx_bd;
2769    int max_agg_queues;
2770    struct bxe_sw_tpa_info *tpa_info = &fp->rx_tpa_info[queue];
2771    uint16_t index;
2772
2773    BLOGD(sc, DBG_LRO, "fp[%02d].tpa[%02d] TPA START "
2774                       "cons=%d prod=%d\n",
2775          fp->index, queue, cons, prod);
2776
2777    max_agg_queues = MAX_AGG_QS(sc);
2778
2779    KASSERT((queue < max_agg_queues),
2780            ("fp[%02d] invalid aggr queue (%d >= %d)!",
2781             fp->index, queue, max_agg_queues));
2782
2783    KASSERT((tpa_info->state == BXE_TPA_STATE_STOP),
2784            ("fp[%02d].tpa[%02d] starting aggr on queue not stopped!",
2785             fp->index, queue));
2786
2787    /* copy the existing mbuf and mapping from the TPA pool */
2788    tmp_bd = tpa_info->bd;
2789
2790    if (tmp_bd.m == NULL) {
2791        uint32_t *tmp;
2792
2793        tmp = (uint32_t *)cqe;
2794
2795        BLOGE(sc, "fp[%02d].tpa[%02d] cons[%d] prod[%d]mbuf not allocated!\n",
2796              fp->index, queue, cons, prod);
2797        BLOGE(sc, "cqe [0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x]\n",
2798            *tmp, *(tmp+1), *(tmp+2), *(tmp+3), *(tmp+4), *(tmp+5), *(tmp+6), *(tmp+7));
2799
2800        /* XXX Error handling? */
2801        return;
2802    }
2803
2804    /* change the TPA queue to the start state */
2805    tpa_info->state            = BXE_TPA_STATE_START;
2806    tpa_info->placement_offset = cqe->placement_offset;
2807    tpa_info->parsing_flags    = le16toh(cqe->pars_flags.flags);
2808    tpa_info->vlan_tag         = le16toh(cqe->vlan_tag);
2809    tpa_info->len_on_bd        = le16toh(cqe->len_on_bd);
2810
2811    fp->rx_tpa_queue_used |= (1 << queue);
2812
2813    /*
2814     * If all the buffer descriptors are filled with mbufs then fill in
2815     * the current consumer index with a new BD. Else if a maximum Rx
2816     * buffer limit is imposed then fill in the next producer index.
2817     */
2818    index = (sc->max_rx_bufs != RX_BD_USABLE) ?
2819                prod : cons;
2820
2821    /* move the received mbuf and mapping to TPA pool */
2822    tpa_info->bd = fp->rx_mbuf_chain[cons];
2823
2824    /* release any existing RX BD mbuf mappings */
2825    if (cons != index) {
2826        rx_buf = &fp->rx_mbuf_chain[cons];
2827
2828        if (rx_buf->m_map != NULL) {
2829            bus_dmamap_sync(fp->rx_mbuf_tag, rx_buf->m_map,
2830                            BUS_DMASYNC_POSTREAD);
2831            bus_dmamap_unload(fp->rx_mbuf_tag, rx_buf->m_map);
2832        }
2833
2834        /*
2835         * We get here when the maximum number of rx buffers is less than
2836         * RX_BD_USABLE. The mbuf is already saved above so it's OK to NULL
2837         * it out here without concern of a memory leak.
2838         */
2839        fp->rx_mbuf_chain[cons].m = NULL;
2840    }
2841
2842    /* update the Rx SW BD with the mbuf info from the TPA pool */
2843    fp->rx_mbuf_chain[index] = tmp_bd;
2844
2845    /* update the Rx BD with the empty mbuf phys address from the TPA pool */
2846    rx_bd = &fp->rx_chain[index];
2847    rx_bd->addr_hi = htole32(U64_HI(tpa_info->seg.ds_addr));
2848    rx_bd->addr_lo = htole32(U64_LO(tpa_info->seg.ds_addr));
2849}
2850
2851/*
2852 * When a TPA aggregation is completed, loop through the individual mbufs
2853 * of the aggregation, combining them into a single mbuf which will be sent
2854 * up the stack. Refill all freed SGEs with mbufs as we go along.
2855 */
2856static int
2857bxe_fill_frag_mbuf(struct bxe_softc          *sc,
2858                   struct bxe_fastpath       *fp,
2859                   struct bxe_sw_tpa_info    *tpa_info,
2860                   uint16_t                  queue,
2861                   uint16_t                  pages,
2862                   struct mbuf               *m,
2863			       struct eth_end_agg_rx_cqe *cqe,
2864                   uint16_t                  cqe_idx)
2865{
2866    struct mbuf *m_frag;
2867    uint32_t frag_len, frag_size, i;
2868    uint16_t sge_idx;
2869    int rc = 0;
2870    int j;
2871
2872    frag_size = le16toh(cqe->pkt_len) - tpa_info->len_on_bd;
2873
2874    BLOGD(sc, DBG_LRO,
2875          "fp[%02d].tpa[%02d] TPA fill len_on_bd=%d frag_size=%d pages=%d\n",
2876          fp->index, queue, tpa_info->len_on_bd, frag_size, pages);
2877
2878    /* make sure the aggregated frame is not too big to handle */
2879    if (pages > 8 * PAGES_PER_SGE) {
2880
2881        uint32_t *tmp = (uint32_t *)cqe;
2882
2883        BLOGE(sc, "fp[%02d].sge[0x%04x] has too many pages (%d)! "
2884                  "pkt_len=%d len_on_bd=%d frag_size=%d\n",
2885              fp->index, cqe_idx, pages, le16toh(cqe->pkt_len),
2886              tpa_info->len_on_bd, frag_size);
2887
2888        BLOGE(sc, "cqe [0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x]\n",
2889            *tmp, *(tmp+1), *(tmp+2), *(tmp+3), *(tmp+4), *(tmp+5), *(tmp+6), *(tmp+7));
2890
2891        bxe_panic(sc, ("sge page count error\n"));
2892        return (EINVAL);
2893    }
2894
2895    /*
2896     * Scan through the scatter gather list pulling individual mbufs into a
2897     * single mbuf for the host stack.
2898     */
2899    for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
2900        sge_idx = RX_SGE(le16toh(cqe->sgl_or_raw_data.sgl[j]));
2901
2902        /*
2903         * Firmware gives the indices of the SGE as if the ring is an array
2904         * (meaning that the "next" element will consume 2 indices).
2905         */
2906        frag_len = min(frag_size, (uint32_t)(SGE_PAGES));
2907
2908        BLOGD(sc, DBG_LRO, "fp[%02d].tpa[%02d] TPA fill i=%d j=%d "
2909                           "sge_idx=%d frag_size=%d frag_len=%d\n",
2910              fp->index, queue, i, j, sge_idx, frag_size, frag_len);
2911
2912        m_frag = fp->rx_sge_mbuf_chain[sge_idx].m;
2913
2914        /* allocate a new mbuf for the SGE */
2915        rc = bxe_alloc_rx_sge_mbuf(fp, sge_idx);
2916        if (rc) {
2917            /* Leave all remaining SGEs in the ring! */
2918            return (rc);
2919        }
2920
2921        /* update the fragment length */
2922        m_frag->m_len = frag_len;
2923
2924        /* concatenate the fragment to the head mbuf */
2925        m_cat(m, m_frag);
2926        fp->eth_q_stats.mbuf_alloc_sge--;
2927
2928        /* update the TPA mbuf size and remaining fragment size */
2929        m->m_pkthdr.len += frag_len;
2930        frag_size -= frag_len;
2931    }
2932
2933    BLOGD(sc, DBG_LRO,
2934          "fp[%02d].tpa[%02d] TPA fill done frag_size=%d\n",
2935          fp->index, queue, frag_size);
2936
2937    return (rc);
2938}
2939
2940static inline void
2941bxe_clear_sge_mask_next_elems(struct bxe_fastpath *fp)
2942{
2943    int i, j;
2944
2945    for (i = 1; i <= RX_SGE_NUM_PAGES; i++) {
2946        int idx = RX_SGE_TOTAL_PER_PAGE * i - 1;
2947
2948        for (j = 0; j < 2; j++) {
2949            BIT_VEC64_CLEAR_BIT(fp->sge_mask, idx);
2950            idx--;
2951        }
2952    }
2953}
2954
2955static inline void
2956bxe_init_sge_ring_bit_mask(struct bxe_fastpath *fp)
2957{
2958    /* set the mask to all 1's, it's faster to compare to 0 than to 0xf's */
2959    memset(fp->sge_mask, 0xff, sizeof(fp->sge_mask));
2960
2961    /*
2962     * Clear the two last indices in the page to 1. These are the indices that
2963     * correspond to the "next" element, hence will never be indicated and
2964     * should be removed from the calculations.
2965     */
2966    bxe_clear_sge_mask_next_elems(fp);
2967}
2968
2969static inline void
2970bxe_update_last_max_sge(struct bxe_fastpath *fp,
2971                        uint16_t            idx)
2972{
2973    uint16_t last_max = fp->last_max_sge;
2974
2975    if (SUB_S16(idx, last_max) > 0) {
2976        fp->last_max_sge = idx;
2977    }
2978}
2979
2980static inline void
2981bxe_update_sge_prod(struct bxe_softc          *sc,
2982                    struct bxe_fastpath       *fp,
2983                    uint16_t                  sge_len,
2984                    union eth_sgl_or_raw_data *cqe)
2985{
2986    uint16_t last_max, last_elem, first_elem;
2987    uint16_t delta = 0;
2988    uint16_t i;
2989
2990    if (!sge_len) {
2991        return;
2992    }
2993
2994    /* first mark all used pages */
2995    for (i = 0; i < sge_len; i++) {
2996        BIT_VEC64_CLEAR_BIT(fp->sge_mask,
2997                            RX_SGE(le16toh(cqe->sgl[i])));
2998    }
2999
3000    BLOGD(sc, DBG_LRO,
3001          "fp[%02d] fp_cqe->sgl[%d] = %d\n",
3002          fp->index, sge_len - 1,
3003          le16toh(cqe->sgl[sge_len - 1]));
3004
3005    /* assume that the last SGE index is the biggest */
3006    bxe_update_last_max_sge(fp,
3007                            le16toh(cqe->sgl[sge_len - 1]));
3008
3009    last_max = RX_SGE(fp->last_max_sge);
3010    last_elem = last_max >> BIT_VEC64_ELEM_SHIFT;
3011    first_elem = RX_SGE(fp->rx_sge_prod) >> BIT_VEC64_ELEM_SHIFT;
3012
3013    /* if ring is not full */
3014    if (last_elem + 1 != first_elem) {
3015        last_elem++;
3016    }
3017
3018    /* now update the prod */
3019    for (i = first_elem; i != last_elem; i = RX_SGE_NEXT_MASK_ELEM(i)) {
3020        if (__predict_true(fp->sge_mask[i])) {
3021            break;
3022        }
3023
3024        fp->sge_mask[i] = BIT_VEC64_ELEM_ONE_MASK;
3025        delta += BIT_VEC64_ELEM_SZ;
3026    }
3027
3028    if (delta > 0) {
3029        fp->rx_sge_prod += delta;
3030        /* clear page-end entries */
3031        bxe_clear_sge_mask_next_elems(fp);
3032    }
3033
3034    BLOGD(sc, DBG_LRO,
3035          "fp[%02d] fp->last_max_sge=%d fp->rx_sge_prod=%d\n",
3036          fp->index, fp->last_max_sge, fp->rx_sge_prod);
3037}
3038
3039/*
3040 * The aggregation on the current TPA queue has completed. Pull the individual
3041 * mbuf fragments together into a single mbuf, perform all necessary checksum
3042 * calculations, and send the resuting mbuf to the stack.
3043 */
3044static void
3045bxe_tpa_stop(struct bxe_softc          *sc,
3046             struct bxe_fastpath       *fp,
3047             struct bxe_sw_tpa_info    *tpa_info,
3048             uint16_t                  queue,
3049             uint16_t                  pages,
3050			 struct eth_end_agg_rx_cqe *cqe,
3051             uint16_t                  cqe_idx)
3052{
3053    if_t ifp = sc->ifp;
3054    struct mbuf *m;
3055    int rc = 0;
3056
3057    BLOGD(sc, DBG_LRO,
3058          "fp[%02d].tpa[%02d] pad=%d pkt_len=%d pages=%d vlan=%d\n",
3059          fp->index, queue, tpa_info->placement_offset,
3060          le16toh(cqe->pkt_len), pages, tpa_info->vlan_tag);
3061
3062    m = tpa_info->bd.m;
3063
3064    /* allocate a replacement before modifying existing mbuf */
3065    rc = bxe_alloc_rx_tpa_mbuf(fp, queue);
3066    if (rc) {
3067        /* drop the frame and log an error */
3068        fp->eth_q_stats.rx_soft_errors++;
3069        goto bxe_tpa_stop_exit;
3070    }
3071
3072    /* we have a replacement, fixup the current mbuf */
3073    m_adj(m, tpa_info->placement_offset);
3074    m->m_pkthdr.len = m->m_len = tpa_info->len_on_bd;
3075
3076    /* mark the checksums valid (taken care of by the firmware) */
3077    fp->eth_q_stats.rx_ofld_frames_csum_ip++;
3078    fp->eth_q_stats.rx_ofld_frames_csum_tcp_udp++;
3079    m->m_pkthdr.csum_data = 0xffff;
3080    m->m_pkthdr.csum_flags |= (CSUM_IP_CHECKED |
3081                               CSUM_IP_VALID   |
3082                               CSUM_DATA_VALID |
3083                               CSUM_PSEUDO_HDR);
3084
3085    /* aggregate all of the SGEs into a single mbuf */
3086    rc = bxe_fill_frag_mbuf(sc, fp, tpa_info, queue, pages, m, cqe, cqe_idx);
3087    if (rc) {
3088        /* drop the packet and log an error */
3089        fp->eth_q_stats.rx_soft_errors++;
3090        m_freem(m);
3091    } else {
3092        if (tpa_info->parsing_flags & PARSING_FLAGS_INNER_VLAN_EXIST) {
3093            m->m_pkthdr.ether_vtag = tpa_info->vlan_tag;
3094            m->m_flags |= M_VLANTAG;
3095        }
3096
3097        /* assign packet to this interface interface */
3098        if_setrcvif(m, ifp);
3099
3100#if __FreeBSD_version >= 800000
3101        /* specify what RSS queue was used for this flow */
3102        m->m_pkthdr.flowid = fp->index;
3103        BXE_SET_FLOWID(m);
3104#endif
3105
3106        if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
3107        fp->eth_q_stats.rx_tpa_pkts++;
3108
3109        /* pass the frame to the stack */
3110        if_input(ifp, m);
3111    }
3112
3113    /* we passed an mbuf up the stack or dropped the frame */
3114    fp->eth_q_stats.mbuf_alloc_tpa--;
3115
3116bxe_tpa_stop_exit:
3117
3118    fp->rx_tpa_info[queue].state = BXE_TPA_STATE_STOP;
3119    fp->rx_tpa_queue_used &= ~(1 << queue);
3120}
3121
3122static uint8_t
3123bxe_service_rxsgl(
3124                 struct bxe_fastpath *fp,
3125                 uint16_t len,
3126                 uint16_t lenonbd,
3127                 struct mbuf *m,
3128                 struct eth_fast_path_rx_cqe *cqe_fp)
3129{
3130    struct mbuf *m_frag;
3131    uint16_t frags, frag_len;
3132    uint16_t sge_idx = 0;
3133    uint16_t j;
3134    uint8_t i, rc = 0;
3135    uint32_t frag_size;
3136
3137    /* adjust the mbuf */
3138    m->m_len = lenonbd;
3139
3140    frag_size =  len - lenonbd;
3141    frags = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
3142
3143    for (i = 0, j = 0; i < frags; i += PAGES_PER_SGE, j++) {
3144        sge_idx = RX_SGE(le16toh(cqe_fp->sgl_or_raw_data.sgl[j]));
3145
3146        m_frag = fp->rx_sge_mbuf_chain[sge_idx].m;
3147        frag_len = min(frag_size, (uint32_t)(SGE_PAGE_SIZE));
3148        m_frag->m_len = frag_len;
3149
3150       /* allocate a new mbuf for the SGE */
3151        rc = bxe_alloc_rx_sge_mbuf(fp, sge_idx);
3152        if (rc) {
3153            /* Leave all remaining SGEs in the ring! */
3154            return (rc);
3155        }
3156        fp->eth_q_stats.mbuf_alloc_sge--;
3157
3158        /* concatenate the fragment to the head mbuf */
3159        m_cat(m, m_frag);
3160
3161        frag_size -= frag_len;
3162    }
3163
3164    bxe_update_sge_prod(fp->sc, fp, frags, &cqe_fp->sgl_or_raw_data);
3165
3166    return rc;
3167}
3168
3169static uint8_t
3170bxe_rxeof(struct bxe_softc    *sc,
3171          struct bxe_fastpath *fp)
3172{
3173    if_t ifp = sc->ifp;
3174    uint16_t bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
3175    uint16_t hw_cq_cons, sw_cq_cons, sw_cq_prod;
3176    int rx_pkts = 0;
3177    int rc = 0;
3178
3179    BXE_FP_RX_LOCK(fp);
3180
3181    /* CQ "next element" is of the size of the regular element */
3182    hw_cq_cons = le16toh(*fp->rx_cq_cons_sb);
3183    if ((hw_cq_cons & RCQ_USABLE_PER_PAGE) == RCQ_USABLE_PER_PAGE) {
3184        hw_cq_cons++;
3185    }
3186
3187    bd_cons = fp->rx_bd_cons;
3188    bd_prod = fp->rx_bd_prod;
3189    bd_prod_fw = bd_prod;
3190    sw_cq_cons = fp->rx_cq_cons;
3191    sw_cq_prod = fp->rx_cq_prod;
3192
3193    /*
3194     * Memory barrier necessary as speculative reads of the rx
3195     * buffer can be ahead of the index in the status block
3196     */
3197    rmb();
3198
3199    BLOGD(sc, DBG_RX,
3200          "fp[%02d] Rx START hw_cq_cons=%u sw_cq_cons=%u\n",
3201          fp->index, hw_cq_cons, sw_cq_cons);
3202
3203    while (sw_cq_cons != hw_cq_cons) {
3204        struct bxe_sw_rx_bd *rx_buf = NULL;
3205        union eth_rx_cqe *cqe;
3206        struct eth_fast_path_rx_cqe *cqe_fp;
3207        uint8_t cqe_fp_flags;
3208        enum eth_rx_cqe_type cqe_fp_type;
3209        uint16_t len, lenonbd,  pad;
3210        struct mbuf *m = NULL;
3211
3212        comp_ring_cons = RCQ(sw_cq_cons);
3213        bd_prod = RX_BD(bd_prod);
3214        bd_cons = RX_BD(bd_cons);
3215
3216        cqe          = &fp->rcq_chain[comp_ring_cons];
3217        cqe_fp       = &cqe->fast_path_cqe;
3218        cqe_fp_flags = cqe_fp->type_error_flags;
3219        cqe_fp_type  = cqe_fp_flags & ETH_FAST_PATH_RX_CQE_TYPE;
3220
3221        BLOGD(sc, DBG_RX,
3222              "fp[%02d] Rx hw_cq_cons=%d hw_sw_cons=%d "
3223              "BD prod=%d cons=%d CQE type=0x%x err=0x%x "
3224              "status=0x%x rss_hash=0x%x vlan=0x%x len=%u lenonbd=%u\n",
3225              fp->index,
3226              hw_cq_cons,
3227              sw_cq_cons,
3228              bd_prod,
3229              bd_cons,
3230              CQE_TYPE(cqe_fp_flags),
3231              cqe_fp_flags,
3232              cqe_fp->status_flags,
3233              le32toh(cqe_fp->rss_hash_result),
3234              le16toh(cqe_fp->vlan_tag),
3235              le16toh(cqe_fp->pkt_len_or_gro_seg_len),
3236              le16toh(cqe_fp->len_on_bd));
3237
3238        /* is this a slowpath msg? */
3239        if (__predict_false(CQE_TYPE_SLOW(cqe_fp_type))) {
3240            bxe_sp_event(sc, fp, cqe);
3241            goto next_cqe;
3242        }
3243
3244        rx_buf = &fp->rx_mbuf_chain[bd_cons];
3245
3246        if (!CQE_TYPE_FAST(cqe_fp_type)) {
3247            struct bxe_sw_tpa_info *tpa_info;
3248            uint16_t frag_size, pages;
3249            uint8_t queue;
3250
3251            if (CQE_TYPE_START(cqe_fp_type)) {
3252                bxe_tpa_start(sc, fp, cqe_fp->queue_index,
3253                              bd_cons, bd_prod, cqe_fp);
3254                m = NULL; /* packet not ready yet */
3255                goto next_rx;
3256            }
3257
3258            KASSERT(CQE_TYPE_STOP(cqe_fp_type),
3259                    ("CQE type is not STOP! (0x%x)\n", cqe_fp_type));
3260
3261            queue = cqe->end_agg_cqe.queue_index;
3262            tpa_info = &fp->rx_tpa_info[queue];
3263
3264            BLOGD(sc, DBG_LRO, "fp[%02d].tpa[%02d] TPA STOP\n",
3265                  fp->index, queue);
3266
3267            frag_size = (le16toh(cqe->end_agg_cqe.pkt_len) -
3268                         tpa_info->len_on_bd);
3269            pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
3270
3271            bxe_tpa_stop(sc, fp, tpa_info, queue, pages,
3272                         &cqe->end_agg_cqe, comp_ring_cons);
3273
3274            bxe_update_sge_prod(sc, fp, pages, &cqe->end_agg_cqe.sgl_or_raw_data);
3275
3276            goto next_cqe;
3277        }
3278
3279        /* non TPA */
3280
3281        /* is this an error packet? */
3282        if (__predict_false(cqe_fp_flags &
3283                            ETH_FAST_PATH_RX_CQE_PHY_DECODE_ERR_FLG)) {
3284            BLOGE(sc, "flags 0x%x rx packet %u\n", cqe_fp_flags, sw_cq_cons);
3285            fp->eth_q_stats.rx_soft_errors++;
3286            goto next_rx;
3287        }
3288
3289        len = le16toh(cqe_fp->pkt_len_or_gro_seg_len);
3290        lenonbd = le16toh(cqe_fp->len_on_bd);
3291        pad = cqe_fp->placement_offset;
3292
3293        m = rx_buf->m;
3294
3295        if (__predict_false(m == NULL)) {
3296            BLOGE(sc, "No mbuf in rx chain descriptor %d for fp[%02d]\n",
3297                  bd_cons, fp->index);
3298            goto next_rx;
3299        }
3300
3301        /* XXX double copy if packet length under a threshold */
3302
3303        /*
3304         * If all the buffer descriptors are filled with mbufs then fill in
3305         * the current consumer index with a new BD. Else if a maximum Rx
3306         * buffer limit is imposed then fill in the next producer index.
3307         */
3308        rc = bxe_alloc_rx_bd_mbuf(fp, bd_cons,
3309                                  (sc->max_rx_bufs != RX_BD_USABLE) ?
3310                                      bd_prod : bd_cons);
3311        if (rc != 0) {
3312
3313            /* we simply reuse the received mbuf and don't post it to the stack */
3314            m = NULL;
3315
3316            BLOGE(sc, "mbuf alloc fail for fp[%02d] rx chain (%d)\n",
3317                  fp->index, rc);
3318            fp->eth_q_stats.rx_soft_errors++;
3319
3320            if (sc->max_rx_bufs != RX_BD_USABLE) {
3321                /* copy this consumer index to the producer index */
3322                memcpy(&fp->rx_mbuf_chain[bd_prod], rx_buf,
3323                       sizeof(struct bxe_sw_rx_bd));
3324                memset(rx_buf, 0, sizeof(struct bxe_sw_rx_bd));
3325            }
3326
3327            goto next_rx;
3328        }
3329
3330        /* current mbuf was detached from the bd */
3331        fp->eth_q_stats.mbuf_alloc_rx--;
3332
3333        /* we allocated a replacement mbuf, fixup the current one */
3334        m_adj(m, pad);
3335        m->m_pkthdr.len = m->m_len = len;
3336
3337        if ((len > 60) && (len > lenonbd)) {
3338            fp->eth_q_stats.rx_bxe_service_rxsgl++;
3339            rc = bxe_service_rxsgl(fp, len, lenonbd, m, cqe_fp);
3340            if (rc)
3341                break;
3342            fp->eth_q_stats.rx_jumbo_sge_pkts++;
3343        } else if (lenonbd < len) {
3344            fp->eth_q_stats.rx_erroneous_jumbo_sge_pkts++;
3345        }
3346
3347        /* assign packet to this interface interface */
3348	if_setrcvif(m, ifp);
3349
3350        /* assume no hardware checksum has complated */
3351        m->m_pkthdr.csum_flags = 0;
3352
3353        /* validate checksum if offload enabled */
3354        if (if_getcapenable(ifp) & IFCAP_RXCSUM) {
3355            /* check for a valid IP frame */
3356            if (!(cqe->fast_path_cqe.status_flags &
3357                  ETH_FAST_PATH_RX_CQE_IP_XSUM_NO_VALIDATION_FLG)) {
3358                m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
3359                if (__predict_false(cqe_fp_flags &
3360                                    ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG)) {
3361                    fp->eth_q_stats.rx_hw_csum_errors++;
3362                } else {
3363                    fp->eth_q_stats.rx_ofld_frames_csum_ip++;
3364                    m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
3365                }
3366            }
3367
3368            /* check for a valid TCP/UDP frame */
3369            if (!(cqe->fast_path_cqe.status_flags &
3370                  ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG)) {
3371                if (__predict_false(cqe_fp_flags &
3372                                    ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG)) {
3373                    fp->eth_q_stats.rx_hw_csum_errors++;
3374                } else {
3375                    fp->eth_q_stats.rx_ofld_frames_csum_tcp_udp++;
3376                    m->m_pkthdr.csum_data = 0xFFFF;
3377                    m->m_pkthdr.csum_flags |= (CSUM_DATA_VALID |
3378                                               CSUM_PSEUDO_HDR);
3379                }
3380            }
3381        }
3382
3383        /* if there is a VLAN tag then flag that info */
3384        if (cqe->fast_path_cqe.pars_flags.flags & PARSING_FLAGS_INNER_VLAN_EXIST) {
3385            m->m_pkthdr.ether_vtag = cqe->fast_path_cqe.vlan_tag;
3386            m->m_flags |= M_VLANTAG;
3387        }
3388
3389#if __FreeBSD_version >= 800000
3390        /* specify what RSS queue was used for this flow */
3391        m->m_pkthdr.flowid = fp->index;
3392        BXE_SET_FLOWID(m);
3393#endif
3394
3395next_rx:
3396
3397        bd_cons    = RX_BD_NEXT(bd_cons);
3398        bd_prod    = RX_BD_NEXT(bd_prod);
3399        bd_prod_fw = RX_BD_NEXT(bd_prod_fw);
3400
3401        /* pass the frame to the stack */
3402        if (__predict_true(m != NULL)) {
3403            if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
3404            rx_pkts++;
3405            if_input(ifp, m);
3406        }
3407
3408next_cqe:
3409
3410        sw_cq_prod = RCQ_NEXT(sw_cq_prod);
3411        sw_cq_cons = RCQ_NEXT(sw_cq_cons);
3412
3413        /* limit spinning on the queue */
3414        if (rc != 0)
3415            break;
3416
3417        if (rx_pkts == sc->rx_budget) {
3418            fp->eth_q_stats.rx_budget_reached++;
3419            break;
3420        }
3421    } /* while work to do */
3422
3423    fp->rx_bd_cons = bd_cons;
3424    fp->rx_bd_prod = bd_prod_fw;
3425    fp->rx_cq_cons = sw_cq_cons;
3426    fp->rx_cq_prod = sw_cq_prod;
3427
3428    /* Update producers */
3429    bxe_update_rx_prod(sc, fp, bd_prod_fw, sw_cq_prod, fp->rx_sge_prod);
3430
3431    fp->eth_q_stats.rx_pkts += rx_pkts;
3432    fp->eth_q_stats.rx_calls++;
3433
3434    BXE_FP_RX_UNLOCK(fp);
3435
3436    return (sw_cq_cons != hw_cq_cons);
3437}
3438
3439static uint16_t
3440bxe_free_tx_pkt(struct bxe_softc    *sc,
3441                struct bxe_fastpath *fp,
3442                uint16_t            idx)
3443{
3444    struct bxe_sw_tx_bd *tx_buf = &fp->tx_mbuf_chain[idx];
3445    struct eth_tx_start_bd *tx_start_bd;
3446    uint16_t bd_idx = TX_BD(tx_buf->first_bd);
3447    uint16_t new_cons;
3448    int nbd;
3449
3450    /* unmap the mbuf from non-paged memory */
3451    bus_dmamap_unload(fp->tx_mbuf_tag, tx_buf->m_map);
3452
3453    tx_start_bd = &fp->tx_chain[bd_idx].start_bd;
3454    nbd = le16toh(tx_start_bd->nbd) - 1;
3455
3456    new_cons = (tx_buf->first_bd + nbd);
3457
3458    /* free the mbuf */
3459    if (__predict_true(tx_buf->m != NULL)) {
3460        m_freem(tx_buf->m);
3461        fp->eth_q_stats.mbuf_alloc_tx--;
3462    } else {
3463        fp->eth_q_stats.tx_chain_lost_mbuf++;
3464    }
3465
3466    tx_buf->m = NULL;
3467    tx_buf->first_bd = 0;
3468
3469    return (new_cons);
3470}
3471
3472/* transmit timeout watchdog */
3473static int
3474bxe_watchdog(struct bxe_softc    *sc,
3475             struct bxe_fastpath *fp)
3476{
3477    BXE_FP_TX_LOCK(fp);
3478
3479    if ((fp->watchdog_timer == 0) || (--fp->watchdog_timer)) {
3480        BXE_FP_TX_UNLOCK(fp);
3481        return (0);
3482    }
3483
3484    BLOGE(sc, "TX watchdog timeout on fp[%02d], resetting!\n", fp->index);
3485    if(sc->trigger_grcdump) {
3486         /* taking grcdump */
3487         bxe_grc_dump(sc);
3488    }
3489
3490    BXE_FP_TX_UNLOCK(fp);
3491
3492    atomic_store_rel_long(&sc->chip_tq_flags, CHIP_TQ_REINIT);
3493    taskqueue_enqueue(sc->chip_tq, &sc->chip_tq_task);
3494
3495    return (-1);
3496}
3497
3498/* processes transmit completions */
3499static uint8_t
3500bxe_txeof(struct bxe_softc    *sc,
3501          struct bxe_fastpath *fp)
3502{
3503    if_t ifp = sc->ifp;
3504    uint16_t bd_cons, hw_cons, sw_cons, pkt_cons;
3505    uint16_t tx_bd_avail;
3506
3507    BXE_FP_TX_LOCK_ASSERT(fp);
3508
3509    bd_cons = fp->tx_bd_cons;
3510    hw_cons = le16toh(*fp->tx_cons_sb);
3511    sw_cons = fp->tx_pkt_cons;
3512
3513    while (sw_cons != hw_cons) {
3514        pkt_cons = TX_BD(sw_cons);
3515
3516        BLOGD(sc, DBG_TX,
3517              "TX: fp[%d]: hw_cons=%u sw_cons=%u pkt_cons=%u\n",
3518              fp->index, hw_cons, sw_cons, pkt_cons);
3519
3520        bd_cons = bxe_free_tx_pkt(sc, fp, pkt_cons);
3521
3522        sw_cons++;
3523    }
3524
3525    fp->tx_pkt_cons = sw_cons;
3526    fp->tx_bd_cons  = bd_cons;
3527
3528    BLOGD(sc, DBG_TX,
3529          "TX done: fp[%d]: hw_cons=%u sw_cons=%u sw_prod=%u\n",
3530          fp->index, hw_cons, fp->tx_pkt_cons, fp->tx_pkt_prod);
3531
3532    mb();
3533
3534    tx_bd_avail = bxe_tx_avail(sc, fp);
3535
3536    if (tx_bd_avail < BXE_TX_CLEANUP_THRESHOLD) {
3537        if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, 0);
3538    } else {
3539        if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE);
3540    }
3541
3542    if (fp->tx_pkt_prod != fp->tx_pkt_cons) {
3543        /* reset the watchdog timer if there are pending transmits */
3544        fp->watchdog_timer = BXE_TX_TIMEOUT;
3545        return (TRUE);
3546    } else {
3547        /* clear watchdog when there are no pending transmits */
3548        fp->watchdog_timer = 0;
3549        return (FALSE);
3550    }
3551}
3552
3553static void
3554bxe_drain_tx_queues(struct bxe_softc *sc)
3555{
3556    struct bxe_fastpath *fp;
3557    int i, count;
3558
3559    /* wait until all TX fastpath tasks have completed */
3560    for (i = 0; i < sc->num_queues; i++) {
3561        fp = &sc->fp[i];
3562
3563        count = 1000;
3564
3565        while (bxe_has_tx_work(fp)) {
3566
3567            BXE_FP_TX_LOCK(fp);
3568            bxe_txeof(sc, fp);
3569            BXE_FP_TX_UNLOCK(fp);
3570
3571            if (count == 0) {
3572                BLOGE(sc, "Timeout waiting for fp[%d] "
3573                          "transmits to complete!\n", i);
3574                bxe_panic(sc, ("tx drain failure\n"));
3575                return;
3576            }
3577
3578            count--;
3579            DELAY(1000);
3580            rmb();
3581        }
3582    }
3583
3584    return;
3585}
3586
3587static int
3588bxe_del_all_macs(struct bxe_softc          *sc,
3589                 struct ecore_vlan_mac_obj *mac_obj,
3590                 int                       mac_type,
3591                 uint8_t                   wait_for_comp)
3592{
3593    unsigned long ramrod_flags = 0, vlan_mac_flags = 0;
3594    int rc;
3595
3596    /* wait for completion of requested */
3597    if (wait_for_comp) {
3598        bxe_set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
3599    }
3600
3601    /* Set the mac type of addresses we want to clear */
3602    bxe_set_bit(mac_type, &vlan_mac_flags);
3603
3604    rc = mac_obj->delete_all(sc, mac_obj, &vlan_mac_flags, &ramrod_flags);
3605    if (rc < 0) {
3606        BLOGE(sc, "Failed to delete MACs (%d) mac_type %d wait_for_comp 0x%x\n",
3607            rc, mac_type, wait_for_comp);
3608    }
3609
3610    return (rc);
3611}
3612
3613static int
3614bxe_fill_accept_flags(struct bxe_softc *sc,
3615                      uint32_t         rx_mode,
3616                      unsigned long    *rx_accept_flags,
3617                      unsigned long    *tx_accept_flags)
3618{
3619    /* Clear the flags first */
3620    *rx_accept_flags = 0;
3621    *tx_accept_flags = 0;
3622
3623    switch (rx_mode) {
3624    case BXE_RX_MODE_NONE:
3625        /*
3626         * 'drop all' supersedes any accept flags that may have been
3627         * passed to the function.
3628         */
3629        break;
3630
3631    case BXE_RX_MODE_NORMAL:
3632        bxe_set_bit(ECORE_ACCEPT_UNICAST, rx_accept_flags);
3633        bxe_set_bit(ECORE_ACCEPT_MULTICAST, rx_accept_flags);
3634        bxe_set_bit(ECORE_ACCEPT_BROADCAST, rx_accept_flags);
3635
3636        /* internal switching mode */
3637        bxe_set_bit(ECORE_ACCEPT_UNICAST, tx_accept_flags);
3638        bxe_set_bit(ECORE_ACCEPT_MULTICAST, tx_accept_flags);
3639        bxe_set_bit(ECORE_ACCEPT_BROADCAST, tx_accept_flags);
3640
3641        break;
3642
3643    case BXE_RX_MODE_ALLMULTI:
3644        bxe_set_bit(ECORE_ACCEPT_UNICAST, rx_accept_flags);
3645        bxe_set_bit(ECORE_ACCEPT_ALL_MULTICAST, rx_accept_flags);
3646        bxe_set_bit(ECORE_ACCEPT_BROADCAST, rx_accept_flags);
3647
3648        /* internal switching mode */
3649        bxe_set_bit(ECORE_ACCEPT_UNICAST, tx_accept_flags);
3650        bxe_set_bit(ECORE_ACCEPT_ALL_MULTICAST, tx_accept_flags);
3651        bxe_set_bit(ECORE_ACCEPT_BROADCAST, tx_accept_flags);
3652
3653        break;
3654
3655    case BXE_RX_MODE_PROMISC:
3656        /*
3657         * According to deffinition of SI mode, iface in promisc mode
3658         * should receive matched and unmatched (in resolution of port)
3659         * unicast packets.
3660         */
3661        bxe_set_bit(ECORE_ACCEPT_UNMATCHED, rx_accept_flags);
3662        bxe_set_bit(ECORE_ACCEPT_UNICAST, rx_accept_flags);
3663        bxe_set_bit(ECORE_ACCEPT_ALL_MULTICAST, rx_accept_flags);
3664        bxe_set_bit(ECORE_ACCEPT_BROADCAST, rx_accept_flags);
3665
3666        /* internal switching mode */
3667        bxe_set_bit(ECORE_ACCEPT_ALL_MULTICAST, tx_accept_flags);
3668        bxe_set_bit(ECORE_ACCEPT_BROADCAST, tx_accept_flags);
3669
3670        if (IS_MF_SI(sc)) {
3671            bxe_set_bit(ECORE_ACCEPT_ALL_UNICAST, tx_accept_flags);
3672        } else {
3673            bxe_set_bit(ECORE_ACCEPT_UNICAST, tx_accept_flags);
3674        }
3675
3676        break;
3677
3678    default:
3679        BLOGE(sc, "Unknown rx_mode (0x%x)\n", rx_mode);
3680        return (-1);
3681    }
3682
3683    /* Set ACCEPT_ANY_VLAN as we do not enable filtering by VLAN */
3684    if (rx_mode != BXE_RX_MODE_NONE) {
3685        bxe_set_bit(ECORE_ACCEPT_ANY_VLAN, rx_accept_flags);
3686        bxe_set_bit(ECORE_ACCEPT_ANY_VLAN, tx_accept_flags);
3687    }
3688
3689    return (0);
3690}
3691
3692static int
3693bxe_set_q_rx_mode(struct bxe_softc *sc,
3694                  uint8_t          cl_id,
3695                  unsigned long    rx_mode_flags,
3696                  unsigned long    rx_accept_flags,
3697                  unsigned long    tx_accept_flags,
3698                  unsigned long    ramrod_flags)
3699{
3700    struct ecore_rx_mode_ramrod_params ramrod_param;
3701    int rc;
3702
3703    memset(&ramrod_param, 0, sizeof(ramrod_param));
3704
3705    /* Prepare ramrod parameters */
3706    ramrod_param.cid = 0;
3707    ramrod_param.cl_id = cl_id;
3708    ramrod_param.rx_mode_obj = &sc->rx_mode_obj;
3709    ramrod_param.func_id = SC_FUNC(sc);
3710
3711    ramrod_param.pstate = &sc->sp_state;
3712    ramrod_param.state = ECORE_FILTER_RX_MODE_PENDING;
3713
3714    ramrod_param.rdata = BXE_SP(sc, rx_mode_rdata);
3715    ramrod_param.rdata_mapping = BXE_SP_MAPPING(sc, rx_mode_rdata);
3716
3717    bxe_set_bit(ECORE_FILTER_RX_MODE_PENDING, &sc->sp_state);
3718
3719    ramrod_param.ramrod_flags = ramrod_flags;
3720    ramrod_param.rx_mode_flags = rx_mode_flags;
3721
3722    ramrod_param.rx_accept_flags = rx_accept_flags;
3723    ramrod_param.tx_accept_flags = tx_accept_flags;
3724
3725    rc = ecore_config_rx_mode(sc, &ramrod_param);
3726    if (rc < 0) {
3727        BLOGE(sc, "Set rx_mode %d cli_id 0x%x rx_mode_flags 0x%x "
3728            "rx_accept_flags 0x%x tx_accept_flags 0x%x "
3729            "ramrod_flags 0x%x rc %d failed\n", sc->rx_mode, cl_id,
3730            (uint32_t)rx_mode_flags, (uint32_t)rx_accept_flags,
3731            (uint32_t)tx_accept_flags, (uint32_t)ramrod_flags, rc);
3732        return (rc);
3733    }
3734
3735    return (0);
3736}
3737
3738static int
3739bxe_set_storm_rx_mode(struct bxe_softc *sc)
3740{
3741    unsigned long rx_mode_flags = 0, ramrod_flags = 0;
3742    unsigned long rx_accept_flags = 0, tx_accept_flags = 0;
3743    int rc;
3744
3745    rc = bxe_fill_accept_flags(sc, sc->rx_mode, &rx_accept_flags,
3746                               &tx_accept_flags);
3747    if (rc) {
3748        return (rc);
3749    }
3750
3751    bxe_set_bit(RAMROD_RX, &ramrod_flags);
3752    bxe_set_bit(RAMROD_TX, &ramrod_flags);
3753
3754    /* XXX ensure all fastpath have same cl_id and/or move it to bxe_softc */
3755    return (bxe_set_q_rx_mode(sc, sc->fp[0].cl_id, rx_mode_flags,
3756                              rx_accept_flags, tx_accept_flags,
3757                              ramrod_flags));
3758}
3759
3760/* returns the "mcp load_code" according to global load_count array */
3761static int
3762bxe_nic_load_no_mcp(struct bxe_softc *sc)
3763{
3764    int path = SC_PATH(sc);
3765    int port = SC_PORT(sc);
3766
3767    BLOGI(sc, "NO MCP - load counts[%d]      %d, %d, %d\n",
3768          path, load_count[path][0], load_count[path][1],
3769          load_count[path][2]);
3770    load_count[path][0]++;
3771    load_count[path][1 + port]++;
3772    BLOGI(sc, "NO MCP - new load counts[%d]  %d, %d, %d\n",
3773          path, load_count[path][0], load_count[path][1],
3774          load_count[path][2]);
3775    if (load_count[path][0] == 1) {
3776        return (FW_MSG_CODE_DRV_LOAD_COMMON);
3777    } else if (load_count[path][1 + port] == 1) {
3778        return (FW_MSG_CODE_DRV_LOAD_PORT);
3779    } else {
3780        return (FW_MSG_CODE_DRV_LOAD_FUNCTION);
3781    }
3782}
3783
3784/* returns the "mcp load_code" according to global load_count array */
3785static int
3786bxe_nic_unload_no_mcp(struct bxe_softc *sc)
3787{
3788    int port = SC_PORT(sc);
3789    int path = SC_PATH(sc);
3790
3791    BLOGI(sc, "NO MCP - load counts[%d]      %d, %d, %d\n",
3792          path, load_count[path][0], load_count[path][1],
3793          load_count[path][2]);
3794    load_count[path][0]--;
3795    load_count[path][1 + port]--;
3796    BLOGI(sc, "NO MCP - new load counts[%d]  %d, %d, %d\n",
3797          path, load_count[path][0], load_count[path][1],
3798          load_count[path][2]);
3799    if (load_count[path][0] == 0) {
3800        return (FW_MSG_CODE_DRV_UNLOAD_COMMON);
3801    } else if (load_count[path][1 + port] == 0) {
3802        return (FW_MSG_CODE_DRV_UNLOAD_PORT);
3803    } else {
3804        return (FW_MSG_CODE_DRV_UNLOAD_FUNCTION);
3805    }
3806}
3807
3808/* request unload mode from the MCP: COMMON, PORT or FUNCTION */
3809static uint32_t
3810bxe_send_unload_req(struct bxe_softc *sc,
3811                    int              unload_mode)
3812{
3813    uint32_t reset_code = 0;
3814
3815    /* Select the UNLOAD request mode */
3816    if (unload_mode == UNLOAD_NORMAL) {
3817        reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
3818    } else {
3819        reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
3820    }
3821
3822    /* Send the request to the MCP */
3823    if (!BXE_NOMCP(sc)) {
3824        reset_code = bxe_fw_command(sc, reset_code, 0);
3825    } else {
3826        reset_code = bxe_nic_unload_no_mcp(sc);
3827    }
3828
3829    return (reset_code);
3830}
3831
3832/* send UNLOAD_DONE command to the MCP */
3833static void
3834bxe_send_unload_done(struct bxe_softc *sc,
3835                     uint8_t          keep_link)
3836{
3837    uint32_t reset_param =
3838        keep_link ? DRV_MSG_CODE_UNLOAD_SKIP_LINK_RESET : 0;
3839
3840    /* Report UNLOAD_DONE to MCP */
3841    if (!BXE_NOMCP(sc)) {
3842        bxe_fw_command(sc, DRV_MSG_CODE_UNLOAD_DONE, reset_param);
3843    }
3844}
3845
3846static int
3847bxe_func_wait_started(struct bxe_softc *sc)
3848{
3849    int tout = 50;
3850
3851    if (!sc->port.pmf) {
3852        return (0);
3853    }
3854
3855    /*
3856     * (assumption: No Attention from MCP at this stage)
3857     * PMF probably in the middle of TX disable/enable transaction
3858     * 1. Sync IRS for default SB
3859     * 2. Sync SP queue - this guarantees us that attention handling started
3860     * 3. Wait, that TX disable/enable transaction completes
3861     *
3862     * 1+2 guarantee that if DCBX attention was scheduled it already changed
3863     * pending bit of transaction from STARTED-->TX_STOPPED, if we already
3864     * received completion for the transaction the state is TX_STOPPED.
3865     * State will return to STARTED after completion of TX_STOPPED-->STARTED
3866     * transaction.
3867     */
3868
3869    /* XXX make sure default SB ISR is done */
3870    /* need a way to synchronize an irq (intr_mtx?) */
3871
3872    /* XXX flush any work queues */
3873
3874    while (ecore_func_get_state(sc, &sc->func_obj) !=
3875           ECORE_F_STATE_STARTED && tout--) {
3876        DELAY(20000);
3877    }
3878
3879    if (ecore_func_get_state(sc, &sc->func_obj) != ECORE_F_STATE_STARTED) {
3880        /*
3881         * Failed to complete the transaction in a "good way"
3882         * Force both transactions with CLR bit.
3883         */
3884        struct ecore_func_state_params func_params = { NULL };
3885
3886        BLOGE(sc, "Unexpected function state! "
3887                  "Forcing STARTED-->TX_STOPPED-->STARTED\n");
3888
3889        func_params.f_obj = &sc->func_obj;
3890        bxe_set_bit(RAMROD_DRV_CLR_ONLY, &func_params.ramrod_flags);
3891
3892        /* STARTED-->TX_STOPPED */
3893        func_params.cmd = ECORE_F_CMD_TX_STOP;
3894        ecore_func_state_change(sc, &func_params);
3895
3896        /* TX_STOPPED-->STARTED */
3897        func_params.cmd = ECORE_F_CMD_TX_START;
3898        return (ecore_func_state_change(sc, &func_params));
3899    }
3900
3901    return (0);
3902}
3903
3904static int
3905bxe_stop_queue(struct bxe_softc *sc,
3906               int              index)
3907{
3908    struct bxe_fastpath *fp = &sc->fp[index];
3909    struct ecore_queue_state_params q_params = { NULL };
3910    int rc;
3911
3912    BLOGD(sc, DBG_LOAD, "stopping queue %d cid %d\n", index, fp->index);
3913
3914    q_params.q_obj = &sc->sp_objs[fp->index].q_obj;
3915    /* We want to wait for completion in this context */
3916    bxe_set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags);
3917
3918    /* Stop the primary connection: */
3919
3920    /* ...halt the connection */
3921    q_params.cmd = ECORE_Q_CMD_HALT;
3922    rc = ecore_queue_state_change(sc, &q_params);
3923    if (rc) {
3924        return (rc);
3925    }
3926
3927    /* ...terminate the connection */
3928    q_params.cmd = ECORE_Q_CMD_TERMINATE;
3929    memset(&q_params.params.terminate, 0, sizeof(q_params.params.terminate));
3930    q_params.params.terminate.cid_index = FIRST_TX_COS_INDEX;
3931    rc = ecore_queue_state_change(sc, &q_params);
3932    if (rc) {
3933        return (rc);
3934    }
3935
3936    /* ...delete cfc entry */
3937    q_params.cmd = ECORE_Q_CMD_CFC_DEL;
3938    memset(&q_params.params.cfc_del, 0, sizeof(q_params.params.cfc_del));
3939    q_params.params.cfc_del.cid_index = FIRST_TX_COS_INDEX;
3940    return (ecore_queue_state_change(sc, &q_params));
3941}
3942
3943/* wait for the outstanding SP commands */
3944static inline uint8_t
3945bxe_wait_sp_comp(struct bxe_softc *sc,
3946                 unsigned long    mask)
3947{
3948    unsigned long tmp;
3949    int tout = 5000; /* wait for 5 secs tops */
3950
3951    while (tout--) {
3952        mb();
3953        if (!(atomic_load_acq_long(&sc->sp_state) & mask)) {
3954            return (TRUE);
3955        }
3956
3957        DELAY(1000);
3958    }
3959
3960    mb();
3961
3962    tmp = atomic_load_acq_long(&sc->sp_state);
3963    if (tmp & mask) {
3964        BLOGE(sc, "Filtering completion timed out: "
3965                  "sp_state 0x%lx, mask 0x%lx\n",
3966              tmp, mask);
3967        return (FALSE);
3968    }
3969
3970    return (FALSE);
3971}
3972
3973static int
3974bxe_func_stop(struct bxe_softc *sc)
3975{
3976    struct ecore_func_state_params func_params = { NULL };
3977    int rc;
3978
3979    /* prepare parameters for function state transitions */
3980    bxe_set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
3981    func_params.f_obj = &sc->func_obj;
3982    func_params.cmd = ECORE_F_CMD_STOP;
3983
3984    /*
3985     * Try to stop the function the 'good way'. If it fails (in case
3986     * of a parity error during bxe_chip_cleanup()) and we are
3987     * not in a debug mode, perform a state transaction in order to
3988     * enable further HW_RESET transaction.
3989     */
3990    rc = ecore_func_state_change(sc, &func_params);
3991    if (rc) {
3992        BLOGE(sc, "FUNC_STOP ramrod failed. "
3993                  "Running a dry transaction (%d)\n", rc);
3994        bxe_set_bit(RAMROD_DRV_CLR_ONLY, &func_params.ramrod_flags);
3995        return (ecore_func_state_change(sc, &func_params));
3996    }
3997
3998    return (0);
3999}
4000
4001static int
4002bxe_reset_hw(struct bxe_softc *sc,
4003             uint32_t         load_code)
4004{
4005    struct ecore_func_state_params func_params = { NULL };
4006
4007    /* Prepare parameters for function state transitions */
4008    bxe_set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
4009
4010    func_params.f_obj = &sc->func_obj;
4011    func_params.cmd = ECORE_F_CMD_HW_RESET;
4012
4013    func_params.params.hw_init.load_phase = load_code;
4014
4015    return (ecore_func_state_change(sc, &func_params));
4016}
4017
4018static void
4019bxe_int_disable_sync(struct bxe_softc *sc,
4020                     int              disable_hw)
4021{
4022    if (disable_hw) {
4023        /* prevent the HW from sending interrupts */
4024        bxe_int_disable(sc);
4025    }
4026
4027    /* XXX need a way to synchronize ALL irqs (intr_mtx?) */
4028    /* make sure all ISRs are done */
4029
4030    /* XXX make sure sp_task is not running */
4031    /* cancel and flush work queues */
4032}
4033
4034static void
4035bxe_chip_cleanup(struct bxe_softc *sc,
4036                 uint32_t         unload_mode,
4037                 uint8_t          keep_link)
4038{
4039    int port = SC_PORT(sc);
4040    struct ecore_mcast_ramrod_params rparam = { NULL };
4041    uint32_t reset_code;
4042    int i, rc = 0;
4043
4044    bxe_drain_tx_queues(sc);
4045
4046    /* give HW time to discard old tx messages */
4047    DELAY(1000);
4048
4049    /* Clean all ETH MACs */
4050    rc = bxe_del_all_macs(sc, &sc->sp_objs[0].mac_obj, ECORE_ETH_MAC, FALSE);
4051    if (rc < 0) {
4052        BLOGE(sc, "Failed to delete all ETH MACs (%d)\n", rc);
4053    }
4054
4055    /* Clean up UC list  */
4056    rc = bxe_del_all_macs(sc, &sc->sp_objs[0].mac_obj, ECORE_UC_LIST_MAC, TRUE);
4057    if (rc < 0) {
4058        BLOGE(sc, "Failed to delete UC MACs list (%d)\n", rc);
4059    }
4060
4061    /* Disable LLH */
4062    if (!CHIP_IS_E1(sc)) {
4063        REG_WR(sc, NIG_REG_LLH0_FUNC_EN + port*8, 0);
4064    }
4065
4066    /* Set "drop all" to stop Rx */
4067
4068    /*
4069     * We need to take the BXE_MCAST_LOCK() here in order to prevent
4070     * a race between the completion code and this code.
4071     */
4072    BXE_MCAST_LOCK(sc);
4073
4074    if (bxe_test_bit(ECORE_FILTER_RX_MODE_PENDING, &sc->sp_state)) {
4075        bxe_set_bit(ECORE_FILTER_RX_MODE_SCHED, &sc->sp_state);
4076    } else {
4077        bxe_set_storm_rx_mode(sc);
4078    }
4079
4080    /* Clean up multicast configuration */
4081    rparam.mcast_obj = &sc->mcast_obj;
4082    rc = ecore_config_mcast(sc, &rparam, ECORE_MCAST_CMD_DEL);
4083    if (rc < 0) {
4084        BLOGE(sc, "Failed to send DEL MCAST command (%d)\n", rc);
4085    }
4086
4087    BXE_MCAST_UNLOCK(sc);
4088
4089    // XXX bxe_iov_chip_cleanup(sc);
4090
4091    /*
4092     * Send the UNLOAD_REQUEST to the MCP. This will return if
4093     * this function should perform FUNCTION, PORT, or COMMON HW
4094     * reset.
4095     */
4096    reset_code = bxe_send_unload_req(sc, unload_mode);
4097
4098    /*
4099     * (assumption: No Attention from MCP at this stage)
4100     * PMF probably in the middle of TX disable/enable transaction
4101     */
4102    rc = bxe_func_wait_started(sc);
4103    if (rc) {
4104        BLOGE(sc, "bxe_func_wait_started failed (%d)\n", rc);
4105    }
4106
4107    /*
4108     * Close multi and leading connections
4109     * Completions for ramrods are collected in a synchronous way
4110     */
4111    for (i = 0; i < sc->num_queues; i++) {
4112        if (bxe_stop_queue(sc, i)) {
4113            goto unload_error;
4114        }
4115    }
4116
4117    /*
4118     * If SP settings didn't get completed so far - something
4119     * very wrong has happen.
4120     */
4121    if (!bxe_wait_sp_comp(sc, ~0x0UL)) {
4122        BLOGE(sc, "Common slow path ramrods got stuck!(%d)\n", rc);
4123    }
4124
4125unload_error:
4126
4127    rc = bxe_func_stop(sc);
4128    if (rc) {
4129        BLOGE(sc, "Function stop failed!(%d)\n", rc);
4130    }
4131
4132    /* disable HW interrupts */
4133    bxe_int_disable_sync(sc, TRUE);
4134
4135    /* detach interrupts */
4136    bxe_interrupt_detach(sc);
4137
4138    /* Reset the chip */
4139    rc = bxe_reset_hw(sc, reset_code);
4140    if (rc) {
4141        BLOGE(sc, "Hardware reset failed(%d)\n", rc);
4142    }
4143
4144    /* Report UNLOAD_DONE to MCP */
4145    bxe_send_unload_done(sc, keep_link);
4146}
4147
4148static void
4149bxe_disable_close_the_gate(struct bxe_softc *sc)
4150{
4151    uint32_t val;
4152    int port = SC_PORT(sc);
4153
4154    BLOGD(sc, DBG_LOAD,
4155          "Disabling 'close the gates'\n");
4156
4157    if (CHIP_IS_E1(sc)) {
4158        uint32_t addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
4159                               MISC_REG_AEU_MASK_ATTN_FUNC_0;
4160        val = REG_RD(sc, addr);
4161        val &= ~(0x300);
4162        REG_WR(sc, addr, val);
4163    } else {
4164        val = REG_RD(sc, MISC_REG_AEU_GENERAL_MASK);
4165        val &= ~(MISC_AEU_GENERAL_MASK_REG_AEU_PXP_CLOSE_MASK |
4166                 MISC_AEU_GENERAL_MASK_REG_AEU_NIG_CLOSE_MASK);
4167        REG_WR(sc, MISC_REG_AEU_GENERAL_MASK, val);
4168    }
4169}
4170
4171/*
4172 * Cleans the object that have internal lists without sending
4173 * ramrods. Should be run when interrutps are disabled.
4174 */
4175static void
4176bxe_squeeze_objects(struct bxe_softc *sc)
4177{
4178    unsigned long ramrod_flags = 0, vlan_mac_flags = 0;
4179    struct ecore_mcast_ramrod_params rparam = { NULL };
4180    struct ecore_vlan_mac_obj *mac_obj = &sc->sp_objs->mac_obj;
4181    int rc;
4182
4183    /* Cleanup MACs' object first... */
4184
4185    /* Wait for completion of requested */
4186    bxe_set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
4187    /* Perform a dry cleanup */
4188    bxe_set_bit(RAMROD_DRV_CLR_ONLY, &ramrod_flags);
4189
4190    /* Clean ETH primary MAC */
4191    bxe_set_bit(ECORE_ETH_MAC, &vlan_mac_flags);
4192    rc = mac_obj->delete_all(sc, &sc->sp_objs->mac_obj, &vlan_mac_flags,
4193                             &ramrod_flags);
4194    if (rc != 0) {
4195        BLOGE(sc, "Failed to clean ETH MACs (%d)\n", rc);
4196    }
4197
4198    /* Cleanup UC list */
4199    vlan_mac_flags = 0;
4200    bxe_set_bit(ECORE_UC_LIST_MAC, &vlan_mac_flags);
4201    rc = mac_obj->delete_all(sc, mac_obj, &vlan_mac_flags,
4202                             &ramrod_flags);
4203    if (rc != 0) {
4204        BLOGE(sc, "Failed to clean UC list MACs (%d)\n", rc);
4205    }
4206
4207    /* Now clean mcast object... */
4208
4209    rparam.mcast_obj = &sc->mcast_obj;
4210    bxe_set_bit(RAMROD_DRV_CLR_ONLY, &rparam.ramrod_flags);
4211
4212    /* Add a DEL command... */
4213    rc = ecore_config_mcast(sc, &rparam, ECORE_MCAST_CMD_DEL);
4214    if (rc < 0) {
4215        BLOGE(sc, "Failed to send DEL MCAST command (%d)\n", rc);
4216    }
4217
4218    /* now wait until all pending commands are cleared */
4219
4220    rc = ecore_config_mcast(sc, &rparam, ECORE_MCAST_CMD_CONT);
4221    while (rc != 0) {
4222        if (rc < 0) {
4223            BLOGE(sc, "Failed to clean MCAST object (%d)\n", rc);
4224            return;
4225        }
4226
4227        rc = ecore_config_mcast(sc, &rparam, ECORE_MCAST_CMD_CONT);
4228    }
4229}
4230
4231/* stop the controller */
4232static __noinline int
4233bxe_nic_unload(struct bxe_softc *sc,
4234               uint32_t         unload_mode,
4235               uint8_t          keep_link)
4236{
4237    uint8_t global = FALSE;
4238    uint32_t val;
4239    int i;
4240
4241    BXE_CORE_LOCK_ASSERT(sc);
4242
4243    if_setdrvflagbits(sc->ifp, 0, IFF_DRV_RUNNING);
4244
4245    for (i = 0; i < sc->num_queues; i++) {
4246        struct bxe_fastpath *fp;
4247
4248        fp = &sc->fp[i];
4249        BXE_FP_TX_LOCK(fp);
4250        BXE_FP_TX_UNLOCK(fp);
4251    }
4252
4253    BLOGD(sc, DBG_LOAD, "Starting NIC unload...\n");
4254
4255    /* mark driver as unloaded in shmem2 */
4256    if (IS_PF(sc) && SHMEM2_HAS(sc, drv_capabilities_flag)) {
4257        val = SHMEM2_RD(sc, drv_capabilities_flag[SC_FW_MB_IDX(sc)]);
4258        SHMEM2_WR(sc, drv_capabilities_flag[SC_FW_MB_IDX(sc)],
4259                  val & ~DRV_FLAGS_CAPABILITIES_LOADED_L2);
4260    }
4261
4262    if (IS_PF(sc) && sc->recovery_state != BXE_RECOVERY_DONE &&
4263        (sc->state == BXE_STATE_CLOSED || sc->state == BXE_STATE_ERROR)) {
4264        /*
4265         * We can get here if the driver has been unloaded
4266         * during parity error recovery and is either waiting for a
4267         * leader to complete or for other functions to unload and
4268         * then ifconfig down has been issued. In this case we want to
4269         * unload and let other functions to complete a recovery
4270         * process.
4271         */
4272        sc->recovery_state = BXE_RECOVERY_DONE;
4273        sc->is_leader = 0;
4274        bxe_release_leader_lock(sc);
4275        mb();
4276
4277        BLOGD(sc, DBG_LOAD, "Releasing a leadership...\n");
4278        BLOGE(sc, "Can't unload in closed or error state recover_state 0x%x"
4279            " state = 0x%x\n", sc->recovery_state, sc->state);
4280        return (-1);
4281    }
4282
4283    /*
4284     * Nothing to do during unload if previous bxe_nic_load()
4285     * did not completed successfully - all resourses are released.
4286     */
4287    if ((sc->state == BXE_STATE_CLOSED) ||
4288        (sc->state == BXE_STATE_ERROR)) {
4289        return (0);
4290    }
4291
4292    sc->state = BXE_STATE_CLOSING_WAITING_HALT;
4293    mb();
4294
4295    /* stop tx */
4296    bxe_tx_disable(sc);
4297
4298    sc->rx_mode = BXE_RX_MODE_NONE;
4299    /* XXX set rx mode ??? */
4300
4301    if (IS_PF(sc) && !sc->grcdump_done) {
4302        /* set ALWAYS_ALIVE bit in shmem */
4303        sc->fw_drv_pulse_wr_seq |= DRV_PULSE_ALWAYS_ALIVE;
4304
4305        bxe_drv_pulse(sc);
4306
4307        bxe_stats_handle(sc, STATS_EVENT_STOP);
4308        bxe_save_statistics(sc);
4309    }
4310
4311    /* wait till consumers catch up with producers in all queues */
4312    bxe_drain_tx_queues(sc);
4313
4314    /* if VF indicate to PF this function is going down (PF will delete sp
4315     * elements and clear initializations
4316     */
4317    if (IS_VF(sc)) {
4318        ; /* bxe_vfpf_close_vf(sc); */
4319    } else if (unload_mode != UNLOAD_RECOVERY) {
4320        /* if this is a normal/close unload need to clean up chip */
4321        if (!sc->grcdump_done)
4322            bxe_chip_cleanup(sc, unload_mode, keep_link);
4323    } else {
4324        /* Send the UNLOAD_REQUEST to the MCP */
4325        bxe_send_unload_req(sc, unload_mode);
4326
4327        /*
4328         * Prevent transactions to host from the functions on the
4329         * engine that doesn't reset global blocks in case of global
4330         * attention once gloabl blocks are reset and gates are opened
4331         * (the engine which leader will perform the recovery
4332         * last).
4333         */
4334        if (!CHIP_IS_E1x(sc)) {
4335            bxe_pf_disable(sc);
4336        }
4337
4338        /* disable HW interrupts */
4339        bxe_int_disable_sync(sc, TRUE);
4340
4341        /* detach interrupts */
4342        bxe_interrupt_detach(sc);
4343
4344        /* Report UNLOAD_DONE to MCP */
4345        bxe_send_unload_done(sc, FALSE);
4346    }
4347
4348    /*
4349     * At this stage no more interrupts will arrive so we may safely clean
4350     * the queue'able objects here in case they failed to get cleaned so far.
4351     */
4352    if (IS_PF(sc)) {
4353        bxe_squeeze_objects(sc);
4354    }
4355
4356    /* There should be no more pending SP commands at this stage */
4357    sc->sp_state = 0;
4358
4359    sc->port.pmf = 0;
4360
4361    bxe_free_fp_buffers(sc);
4362
4363    if (IS_PF(sc)) {
4364        bxe_free_mem(sc);
4365    }
4366
4367    bxe_free_fw_stats_mem(sc);
4368
4369    sc->state = BXE_STATE_CLOSED;
4370
4371    /*
4372     * Check if there are pending parity attentions. If there are - set
4373     * RECOVERY_IN_PROGRESS.
4374     */
4375    if (IS_PF(sc) && bxe_chk_parity_attn(sc, &global, FALSE)) {
4376        bxe_set_reset_in_progress(sc);
4377
4378        /* Set RESET_IS_GLOBAL if needed */
4379        if (global) {
4380            bxe_set_reset_global(sc);
4381        }
4382    }
4383
4384    /*
4385     * The last driver must disable a "close the gate" if there is no
4386     * parity attention or "process kill" pending.
4387     */
4388    if (IS_PF(sc) && !bxe_clear_pf_load(sc) &&
4389        bxe_reset_is_done(sc, SC_PATH(sc))) {
4390        bxe_disable_close_the_gate(sc);
4391    }
4392
4393    BLOGD(sc, DBG_LOAD, "Ended NIC unload\n");
4394
4395    bxe_link_report(sc);
4396
4397    return (0);
4398}
4399
4400/*
4401 * Called by the OS to set various media options (i.e. link, speed, etc.) when
4402 * the user runs "ifconfig bxe media ..." or "ifconfig bxe mediaopt ...".
4403 */
4404static int
4405bxe_ifmedia_update(struct ifnet  *ifp)
4406{
4407    struct bxe_softc *sc = (struct bxe_softc *)if_getsoftc(ifp);
4408    struct ifmedia *ifm;
4409
4410    ifm = &sc->ifmedia;
4411
4412    /* We only support Ethernet media type. */
4413    if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) {
4414        return (EINVAL);
4415    }
4416
4417    switch (IFM_SUBTYPE(ifm->ifm_media)) {
4418    case IFM_AUTO:
4419         break;
4420    case IFM_10G_CX4:
4421    case IFM_10G_SR:
4422    case IFM_10G_T:
4423    case IFM_10G_TWINAX:
4424    default:
4425        /* We don't support changing the media type. */
4426        BLOGD(sc, DBG_LOAD, "Invalid media type (%d)\n",
4427              IFM_SUBTYPE(ifm->ifm_media));
4428        return (EINVAL);
4429    }
4430
4431    return (0);
4432}
4433
4434/*
4435 * Called by the OS to get the current media status (i.e. link, speed, etc.).
4436 */
4437static void
4438bxe_ifmedia_status(struct ifnet *ifp, struct ifmediareq *ifmr)
4439{
4440    struct bxe_softc *sc = if_getsoftc(ifp);
4441
4442    /* Bug 165447: the 'ifconfig' tool skips printing of the "status: ..."
4443       line if the IFM_AVALID flag is *NOT* set. So we need to set this
4444       flag unconditionally (irrespective of the admininistrative
4445       'up/down' state of the interface) to ensure that that line is always
4446       displayed.
4447    */
4448    ifmr->ifm_status = IFM_AVALID;
4449
4450    /* Setup the default interface info. */
4451    ifmr->ifm_active = IFM_ETHER;
4452
4453    /* Report link down if the driver isn't running. */
4454    if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
4455        ifmr->ifm_active |= IFM_NONE;
4456        BLOGD(sc, DBG_PHY, "in %s : nic still not loaded fully\n", __func__);
4457        BLOGD(sc, DBG_PHY, "in %s : link_up (1) : %d\n",
4458                __func__, sc->link_vars.link_up);
4459        return;
4460    }
4461
4462
4463    if (sc->link_vars.link_up) {
4464        ifmr->ifm_status |= IFM_ACTIVE;
4465        ifmr->ifm_active |= IFM_FDX;
4466    } else {
4467        ifmr->ifm_active |= IFM_NONE;
4468        BLOGD(sc, DBG_PHY, "in %s : setting IFM_NONE\n",
4469                __func__);
4470        return;
4471    }
4472
4473    ifmr->ifm_active |= sc->media;
4474    return;
4475}
4476
4477static void
4478bxe_handle_chip_tq(void *context,
4479                   int  pending)
4480{
4481    struct bxe_softc *sc = (struct bxe_softc *)context;
4482    long work = atomic_load_acq_long(&sc->chip_tq_flags);
4483
4484    switch (work)
4485    {
4486
4487    case CHIP_TQ_REINIT:
4488        if (if_getdrvflags(sc->ifp) & IFF_DRV_RUNNING) {
4489            /* restart the interface */
4490            BLOGD(sc, DBG_LOAD, "Restarting the interface...\n");
4491            bxe_periodic_stop(sc);
4492            BXE_CORE_LOCK(sc);
4493            bxe_stop_locked(sc);
4494            bxe_init_locked(sc);
4495            BXE_CORE_UNLOCK(sc);
4496        }
4497        break;
4498
4499    default:
4500        break;
4501    }
4502}
4503
4504/*
4505 * Handles any IOCTL calls from the operating system.
4506 *
4507 * Returns:
4508 *   0 = Success, >0 Failure
4509 */
4510static int
4511bxe_ioctl(if_t ifp,
4512          u_long       command,
4513          caddr_t      data)
4514{
4515    struct bxe_softc *sc = if_getsoftc(ifp);
4516    struct ifreq *ifr = (struct ifreq *)data;
4517    int mask = 0;
4518    int reinit = 0;
4519    int error = 0;
4520
4521    int mtu_min = (ETH_MIN_PACKET_SIZE - ETH_HLEN);
4522    int mtu_max = (MJUM9BYTES - ETH_OVERHEAD - IP_HEADER_ALIGNMENT_PADDING);
4523
4524    switch (command)
4525    {
4526    case SIOCSIFMTU:
4527        BLOGD(sc, DBG_IOCTL, "Received SIOCSIFMTU ioctl (mtu=%d)\n",
4528              ifr->ifr_mtu);
4529
4530        if (sc->mtu == ifr->ifr_mtu) {
4531            /* nothing to change */
4532            break;
4533        }
4534
4535        if ((ifr->ifr_mtu < mtu_min) || (ifr->ifr_mtu > mtu_max)) {
4536            BLOGE(sc, "Unsupported MTU size %d (range is %d-%d)\n",
4537                  ifr->ifr_mtu, mtu_min, mtu_max);
4538            error = EINVAL;
4539            break;
4540        }
4541
4542        atomic_store_rel_int((volatile unsigned int *)&sc->mtu,
4543                             (unsigned long)ifr->ifr_mtu);
4544	/*
4545        atomic_store_rel_long((volatile unsigned long *)&if_getmtu(ifp),
4546                              (unsigned long)ifr->ifr_mtu);
4547	XXX - Not sure why it needs to be atomic
4548	*/
4549	if_setmtu(ifp, ifr->ifr_mtu);
4550        reinit = 1;
4551        break;
4552
4553    case SIOCSIFFLAGS:
4554        /* toggle the interface state up or down */
4555        BLOGD(sc, DBG_IOCTL, "Received SIOCSIFFLAGS ioctl\n");
4556
4557	BXE_CORE_LOCK(sc);
4558        /* check if the interface is up */
4559        if (if_getflags(ifp) & IFF_UP) {
4560            if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
4561                /* set the receive mode flags */
4562                bxe_set_rx_mode(sc);
4563            } else if(sc->state != BXE_STATE_DISABLED) {
4564		bxe_init_locked(sc);
4565            }
4566        } else {
4567            if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
4568		bxe_periodic_stop(sc);
4569		bxe_stop_locked(sc);
4570            }
4571        }
4572	BXE_CORE_UNLOCK(sc);
4573
4574        break;
4575
4576    case SIOCADDMULTI:
4577    case SIOCDELMULTI:
4578        /* add/delete multicast addresses */
4579        BLOGD(sc, DBG_IOCTL, "Received SIOCADDMULTI/SIOCDELMULTI ioctl\n");
4580
4581        /* check if the interface is up */
4582        if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
4583            /* set the receive mode flags */
4584	    BXE_CORE_LOCK(sc);
4585            bxe_set_rx_mode(sc);
4586	    BXE_CORE_UNLOCK(sc);
4587        }
4588
4589        break;
4590
4591    case SIOCSIFCAP:
4592        /* find out which capabilities have changed */
4593        mask = (ifr->ifr_reqcap ^ if_getcapenable(ifp));
4594
4595        BLOGD(sc, DBG_IOCTL, "Received SIOCSIFCAP ioctl (mask=0x%08x)\n",
4596              mask);
4597
4598        /* toggle the LRO capabilites enable flag */
4599        if (mask & IFCAP_LRO) {
4600	    if_togglecapenable(ifp, IFCAP_LRO);
4601            BLOGD(sc, DBG_IOCTL, "Turning LRO %s\n",
4602                  (if_getcapenable(ifp) & IFCAP_LRO) ? "ON" : "OFF");
4603            reinit = 1;
4604        }
4605
4606        /* toggle the TXCSUM checksum capabilites enable flag */
4607        if (mask & IFCAP_TXCSUM) {
4608	    if_togglecapenable(ifp, IFCAP_TXCSUM);
4609            BLOGD(sc, DBG_IOCTL, "Turning TXCSUM %s\n",
4610                  (if_getcapenable(ifp) & IFCAP_TXCSUM) ? "ON" : "OFF");
4611            if (if_getcapenable(ifp) & IFCAP_TXCSUM) {
4612                if_sethwassistbits(ifp, (CSUM_IP      |
4613                                    CSUM_TCP      |
4614                                    CSUM_UDP      |
4615                                    CSUM_TSO      |
4616                                    CSUM_TCP_IPV6 |
4617                                    CSUM_UDP_IPV6), 0);
4618            } else {
4619		if_clearhwassist(ifp); /* XXX */
4620            }
4621        }
4622
4623        /* toggle the RXCSUM checksum capabilities enable flag */
4624        if (mask & IFCAP_RXCSUM) {
4625	    if_togglecapenable(ifp, IFCAP_RXCSUM);
4626            BLOGD(sc, DBG_IOCTL, "Turning RXCSUM %s\n",
4627                  (if_getcapenable(ifp) & IFCAP_RXCSUM) ? "ON" : "OFF");
4628            if (if_getcapenable(ifp) & IFCAP_RXCSUM) {
4629                if_sethwassistbits(ifp, (CSUM_IP      |
4630                                    CSUM_TCP      |
4631                                    CSUM_UDP      |
4632                                    CSUM_TSO      |
4633                                    CSUM_TCP_IPV6 |
4634                                    CSUM_UDP_IPV6), 0);
4635            } else {
4636		if_clearhwassist(ifp); /* XXX */
4637            }
4638        }
4639
4640        /* toggle TSO4 capabilities enabled flag */
4641        if (mask & IFCAP_TSO4) {
4642            if_togglecapenable(ifp, IFCAP_TSO4);
4643            BLOGD(sc, DBG_IOCTL, "Turning TSO4 %s\n",
4644                  (if_getcapenable(ifp) & IFCAP_TSO4) ? "ON" : "OFF");
4645        }
4646
4647        /* toggle TSO6 capabilities enabled flag */
4648        if (mask & IFCAP_TSO6) {
4649	    if_togglecapenable(ifp, IFCAP_TSO6);
4650            BLOGD(sc, DBG_IOCTL, "Turning TSO6 %s\n",
4651                  (if_getcapenable(ifp) & IFCAP_TSO6) ? "ON" : "OFF");
4652        }
4653
4654        /* toggle VLAN_HWTSO capabilities enabled flag */
4655        if (mask & IFCAP_VLAN_HWTSO) {
4656
4657	    if_togglecapenable(ifp, IFCAP_VLAN_HWTSO);
4658            BLOGD(sc, DBG_IOCTL, "Turning VLAN_HWTSO %s\n",
4659                  (if_getcapenable(ifp) & IFCAP_VLAN_HWTSO) ? "ON" : "OFF");
4660        }
4661
4662        /* toggle VLAN_HWCSUM capabilities enabled flag */
4663        if (mask & IFCAP_VLAN_HWCSUM) {
4664            /* XXX investigate this... */
4665            BLOGE(sc, "Changing VLAN_HWCSUM is not supported!\n");
4666            error = EINVAL;
4667        }
4668
4669        /* toggle VLAN_MTU capabilities enable flag */
4670        if (mask & IFCAP_VLAN_MTU) {
4671            /* XXX investigate this... */
4672            BLOGE(sc, "Changing VLAN_MTU is not supported!\n");
4673            error = EINVAL;
4674        }
4675
4676        /* toggle VLAN_HWTAGGING capabilities enabled flag */
4677        if (mask & IFCAP_VLAN_HWTAGGING) {
4678            /* XXX investigate this... */
4679            BLOGE(sc, "Changing VLAN_HWTAGGING is not supported!\n");
4680            error = EINVAL;
4681        }
4682
4683        /* toggle VLAN_HWFILTER capabilities enabled flag */
4684        if (mask & IFCAP_VLAN_HWFILTER) {
4685            /* XXX investigate this... */
4686            BLOGE(sc, "Changing VLAN_HWFILTER is not supported!\n");
4687            error = EINVAL;
4688        }
4689
4690        /* XXX not yet...
4691         * IFCAP_WOL_MAGIC
4692         */
4693
4694        break;
4695
4696    case SIOCSIFMEDIA:
4697    case SIOCGIFMEDIA:
4698        /* set/get interface media */
4699        BLOGD(sc, DBG_IOCTL,
4700              "Received SIOCSIFMEDIA/SIOCGIFMEDIA ioctl (cmd=%lu)\n",
4701              (command & 0xff));
4702        error = ifmedia_ioctl(ifp, ifr, &sc->ifmedia, command);
4703        break;
4704
4705    default:
4706        BLOGD(sc, DBG_IOCTL, "Received Unknown Ioctl (cmd=%lu)\n",
4707              (command & 0xff));
4708        error = ether_ioctl(ifp, command, data);
4709        break;
4710    }
4711
4712    if (reinit && (if_getdrvflags(sc->ifp) & IFF_DRV_RUNNING)) {
4713        BLOGD(sc, DBG_LOAD | DBG_IOCTL,
4714              "Re-initializing hardware from IOCTL change\n");
4715	bxe_periodic_stop(sc);
4716	BXE_CORE_LOCK(sc);
4717	bxe_stop_locked(sc);
4718	bxe_init_locked(sc);
4719	BXE_CORE_UNLOCK(sc);
4720    }
4721
4722    return (error);
4723}
4724
4725static __noinline void
4726bxe_dump_mbuf(struct bxe_softc *sc,
4727              struct mbuf      *m,
4728              uint8_t          contents)
4729{
4730    char * type;
4731    int i = 0;
4732
4733    if (!(sc->debug & DBG_MBUF)) {
4734        return;
4735    }
4736
4737    if (m == NULL) {
4738        BLOGD(sc, DBG_MBUF, "mbuf: null pointer\n");
4739        return;
4740    }
4741
4742    while (m) {
4743
4744#if __FreeBSD_version >= 1000000
4745        BLOGD(sc, DBG_MBUF,
4746              "%02d: mbuf=%p m_len=%d m_flags=0x%b m_data=%p\n",
4747              i, m, m->m_len, m->m_flags, M_FLAG_BITS, m->m_data);
4748
4749        if (m->m_flags & M_PKTHDR) {
4750             BLOGD(sc, DBG_MBUF,
4751                   "%02d: - m_pkthdr: tot_len=%d flags=0x%b csum_flags=%b\n",
4752                   i, m->m_pkthdr.len, m->m_flags, M_FLAG_BITS,
4753                   (int)m->m_pkthdr.csum_flags, CSUM_BITS);
4754        }
4755#else
4756        BLOGD(sc, DBG_MBUF,
4757              "%02d: mbuf=%p m_len=%d m_flags=0x%b m_data=%p\n",
4758              i, m, m->m_len, m->m_flags,
4759              "\20\1M_EXT\2M_PKTHDR\3M_EOR\4M_RDONLY", m->m_data);
4760
4761        if (m->m_flags & M_PKTHDR) {
4762             BLOGD(sc, DBG_MBUF,
4763                   "%02d: - m_pkthdr: tot_len=%d flags=0x%b csum_flags=%b\n",
4764                   i, m->m_pkthdr.len, m->m_flags,
4765                   "\20\12M_BCAST\13M_MCAST\14M_FRAG"
4766                   "\15M_FIRSTFRAG\16M_LASTFRAG\21M_VLANTAG"
4767                   "\22M_PROMISC\23M_NOFREE",
4768                   (int)m->m_pkthdr.csum_flags,
4769                   "\20\1CSUM_IP\2CSUM_TCP\3CSUM_UDP\4CSUM_IP_FRAGS"
4770                   "\5CSUM_FRAGMENT\6CSUM_TSO\11CSUM_IP_CHECKED"
4771                   "\12CSUM_IP_VALID\13CSUM_DATA_VALID"
4772                   "\14CSUM_PSEUDO_HDR");
4773        }
4774#endif /* #if __FreeBSD_version >= 1000000 */
4775
4776        if (m->m_flags & M_EXT) {
4777            switch (m->m_ext.ext_type) {
4778            case EXT_CLUSTER:    type = "EXT_CLUSTER";    break;
4779            case EXT_SFBUF:      type = "EXT_SFBUF";      break;
4780            case EXT_JUMBOP:     type = "EXT_JUMBOP";     break;
4781            case EXT_JUMBO9:     type = "EXT_JUMBO9";     break;
4782            case EXT_JUMBO16:    type = "EXT_JUMBO16";    break;
4783            case EXT_PACKET:     type = "EXT_PACKET";     break;
4784            case EXT_MBUF:       type = "EXT_MBUF";       break;
4785            case EXT_NET_DRV:    type = "EXT_NET_DRV";    break;
4786            case EXT_MOD_TYPE:   type = "EXT_MOD_TYPE";   break;
4787            case EXT_DISPOSABLE: type = "EXT_DISPOSABLE"; break;
4788            case EXT_EXTREF:     type = "EXT_EXTREF";     break;
4789            default:             type = "UNKNOWN";        break;
4790            }
4791
4792            BLOGD(sc, DBG_MBUF,
4793                  "%02d: - m_ext: %p ext_size=%d type=%s\n",
4794                  i, m->m_ext.ext_buf, m->m_ext.ext_size, type);
4795        }
4796
4797        if (contents) {
4798            bxe_dump_mbuf_data(sc, "mbuf data", m, TRUE);
4799        }
4800
4801        m = m->m_next;
4802        i++;
4803    }
4804}
4805
4806/*
4807 * Checks to ensure the 13 bd sliding window is >= MSS for TSO.
4808 * Check that (13 total bds - 3 bds) = 10 bd window >= MSS.
4809 * The window: 3 bds are = 1 for headers BD + 2 for parse BD and last BD
4810 * The headers comes in a separate bd in FreeBSD so 13-3=10.
4811 * Returns: 0 if OK to send, 1 if packet needs further defragmentation
4812 */
4813static int
4814bxe_chktso_window(struct bxe_softc  *sc,
4815                  int               nsegs,
4816                  bus_dma_segment_t *segs,
4817                  struct mbuf       *m)
4818{
4819    uint32_t num_wnds, wnd_size, wnd_sum;
4820    int32_t frag_idx, wnd_idx;
4821    unsigned short lso_mss;
4822    int defrag;
4823
4824    defrag = 0;
4825    wnd_sum = 0;
4826    wnd_size = 10;
4827    num_wnds = nsegs - wnd_size;
4828    lso_mss = htole16(m->m_pkthdr.tso_segsz);
4829
4830    /*
4831     * Total header lengths Eth+IP+TCP in first FreeBSD mbuf so calculate the
4832     * first window sum of data while skipping the first assuming it is the
4833     * header in FreeBSD.
4834     */
4835    for (frag_idx = 1; (frag_idx <= wnd_size); frag_idx++) {
4836        wnd_sum += htole16(segs[frag_idx].ds_len);
4837    }
4838
4839    /* check the first 10 bd window size */
4840    if (wnd_sum < lso_mss) {
4841        return (1);
4842    }
4843
4844    /* run through the windows */
4845    for (wnd_idx = 0; wnd_idx < num_wnds; wnd_idx++, frag_idx++) {
4846        /* subtract the first mbuf->m_len of the last wndw(-header) */
4847        wnd_sum -= htole16(segs[wnd_idx+1].ds_len);
4848        /* add the next mbuf len to the len of our new window */
4849        wnd_sum += htole16(segs[frag_idx].ds_len);
4850        if (wnd_sum < lso_mss) {
4851            return (1);
4852        }
4853    }
4854
4855    return (0);
4856}
4857
4858static uint8_t
4859bxe_set_pbd_csum_e2(struct bxe_fastpath *fp,
4860                    struct mbuf         *m,
4861                    uint32_t            *parsing_data)
4862{
4863    struct ether_vlan_header *eh = NULL;
4864    struct ip *ip4 = NULL;
4865    struct ip6_hdr *ip6 = NULL;
4866    caddr_t ip = NULL;
4867    struct tcphdr *th = NULL;
4868    int e_hlen, ip_hlen, l4_off;
4869    uint16_t proto;
4870
4871    if (m->m_pkthdr.csum_flags == CSUM_IP) {
4872        /* no L4 checksum offload needed */
4873        return (0);
4874    }
4875
4876    /* get the Ethernet header */
4877    eh = mtod(m, struct ether_vlan_header *);
4878
4879    /* handle VLAN encapsulation if present */
4880    if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
4881        e_hlen = (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN);
4882        proto  = ntohs(eh->evl_proto);
4883    } else {
4884        e_hlen = ETHER_HDR_LEN;
4885        proto  = ntohs(eh->evl_encap_proto);
4886    }
4887
4888    switch (proto) {
4889    case ETHERTYPE_IP:
4890        /* get the IP header, if mbuf len < 20 then header in next mbuf */
4891        ip4 = (m->m_len < sizeof(struct ip)) ?
4892                  (struct ip *)m->m_next->m_data :
4893                  (struct ip *)(m->m_data + e_hlen);
4894        /* ip_hl is number of 32-bit words */
4895        ip_hlen = (ip4->ip_hl << 2);
4896        ip = (caddr_t)ip4;
4897        break;
4898    case ETHERTYPE_IPV6:
4899        /* get the IPv6 header, if mbuf len < 40 then header in next mbuf */
4900        ip6 = (m->m_len < sizeof(struct ip6_hdr)) ?
4901                  (struct ip6_hdr *)m->m_next->m_data :
4902                  (struct ip6_hdr *)(m->m_data + e_hlen);
4903        /* XXX cannot support offload with IPv6 extensions */
4904        ip_hlen = sizeof(struct ip6_hdr);
4905        ip = (caddr_t)ip6;
4906        break;
4907    default:
4908        /* We can't offload in this case... */
4909        /* XXX error stat ??? */
4910        return (0);
4911    }
4912
4913    /* XXX assuming L4 header is contiguous to IPv4/IPv6 in the same mbuf */
4914    l4_off = (e_hlen + ip_hlen);
4915
4916    *parsing_data |=
4917        (((l4_off >> 1) << ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W_SHIFT) &
4918         ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W);
4919
4920    if (m->m_pkthdr.csum_flags & (CSUM_TCP |
4921                                  CSUM_TSO |
4922                                  CSUM_TCP_IPV6)) {
4923        fp->eth_q_stats.tx_ofld_frames_csum_tcp++;
4924        th = (struct tcphdr *)(ip + ip_hlen);
4925        /* th_off is number of 32-bit words */
4926        *parsing_data |= ((th->th_off <<
4927                           ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) &
4928                          ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW);
4929        return (l4_off + (th->th_off << 2)); /* entire header length */
4930    } else if (m->m_pkthdr.csum_flags & (CSUM_UDP |
4931                                         CSUM_UDP_IPV6)) {
4932        fp->eth_q_stats.tx_ofld_frames_csum_udp++;
4933        return (l4_off + sizeof(struct udphdr)); /* entire header length */
4934    } else {
4935        /* XXX error stat ??? */
4936        return (0);
4937    }
4938}
4939
4940static uint8_t
4941bxe_set_pbd_csum(struct bxe_fastpath        *fp,
4942                 struct mbuf                *m,
4943                 struct eth_tx_parse_bd_e1x *pbd)
4944{
4945    struct ether_vlan_header *eh = NULL;
4946    struct ip *ip4 = NULL;
4947    struct ip6_hdr *ip6 = NULL;
4948    caddr_t ip = NULL;
4949    struct tcphdr *th = NULL;
4950    struct udphdr *uh = NULL;
4951    int e_hlen, ip_hlen;
4952    uint16_t proto;
4953    uint8_t hlen;
4954    uint16_t tmp_csum;
4955    uint32_t *tmp_uh;
4956
4957    /* get the Ethernet header */
4958    eh = mtod(m, struct ether_vlan_header *);
4959
4960    /* handle VLAN encapsulation if present */
4961    if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
4962        e_hlen = (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN);
4963        proto  = ntohs(eh->evl_proto);
4964    } else {
4965        e_hlen = ETHER_HDR_LEN;
4966        proto  = ntohs(eh->evl_encap_proto);
4967    }
4968
4969    switch (proto) {
4970    case ETHERTYPE_IP:
4971        /* get the IP header, if mbuf len < 20 then header in next mbuf */
4972        ip4 = (m->m_len < sizeof(struct ip)) ?
4973                  (struct ip *)m->m_next->m_data :
4974                  (struct ip *)(m->m_data + e_hlen);
4975        /* ip_hl is number of 32-bit words */
4976        ip_hlen = (ip4->ip_hl << 1);
4977        ip = (caddr_t)ip4;
4978        break;
4979    case ETHERTYPE_IPV6:
4980        /* get the IPv6 header, if mbuf len < 40 then header in next mbuf */
4981        ip6 = (m->m_len < sizeof(struct ip6_hdr)) ?
4982                  (struct ip6_hdr *)m->m_next->m_data :
4983                  (struct ip6_hdr *)(m->m_data + e_hlen);
4984        /* XXX cannot support offload with IPv6 extensions */
4985        ip_hlen = (sizeof(struct ip6_hdr) >> 1);
4986        ip = (caddr_t)ip6;
4987        break;
4988    default:
4989        /* We can't offload in this case... */
4990        /* XXX error stat ??? */
4991        return (0);
4992    }
4993
4994    hlen = (e_hlen >> 1);
4995
4996    /* note that rest of global_data is indirectly zeroed here */
4997    if (m->m_flags & M_VLANTAG) {
4998        pbd->global_data =
4999            htole16(hlen | (1 << ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN_SHIFT));
5000    } else {
5001        pbd->global_data = htole16(hlen);
5002    }
5003
5004    pbd->ip_hlen_w = ip_hlen;
5005
5006    hlen += pbd->ip_hlen_w;
5007
5008    /* XXX assuming L4 header is contiguous to IPv4/IPv6 in the same mbuf */
5009
5010    if (m->m_pkthdr.csum_flags & (CSUM_TCP |
5011                                  CSUM_TSO |
5012                                  CSUM_TCP_IPV6)) {
5013        th = (struct tcphdr *)(ip + (ip_hlen << 1));
5014        /* th_off is number of 32-bit words */
5015        hlen += (uint16_t)(th->th_off << 1);
5016    } else if (m->m_pkthdr.csum_flags & (CSUM_UDP |
5017                                         CSUM_UDP_IPV6)) {
5018        uh = (struct udphdr *)(ip + (ip_hlen << 1));
5019        hlen += (sizeof(struct udphdr) / 2);
5020    } else {
5021        /* valid case as only CSUM_IP was set */
5022        return (0);
5023    }
5024
5025    pbd->total_hlen_w = htole16(hlen);
5026
5027    if (m->m_pkthdr.csum_flags & (CSUM_TCP |
5028                                  CSUM_TSO |
5029                                  CSUM_TCP_IPV6)) {
5030        fp->eth_q_stats.tx_ofld_frames_csum_tcp++;
5031        pbd->tcp_pseudo_csum = ntohs(th->th_sum);
5032    } else if (m->m_pkthdr.csum_flags & (CSUM_UDP |
5033                                         CSUM_UDP_IPV6)) {
5034        fp->eth_q_stats.tx_ofld_frames_csum_udp++;
5035
5036        /*
5037         * Everest1 (i.e. 57710, 57711, 57711E) does not natively support UDP
5038         * checksums and does not know anything about the UDP header and where
5039         * the checksum field is located. It only knows about TCP. Therefore
5040         * we "lie" to the hardware for outgoing UDP packets w/ checksum
5041         * offload. Since the checksum field offset for TCP is 16 bytes and
5042         * for UDP it is 6 bytes we pass a pointer to the hardware that is 10
5043         * bytes less than the start of the UDP header. This allows the
5044         * hardware to write the checksum in the correct spot. But the
5045         * hardware will compute a checksum which includes the last 10 bytes
5046         * of the IP header. To correct this we tweak the stack computed
5047         * pseudo checksum by folding in the calculation of the inverse
5048         * checksum for those final 10 bytes of the IP header. This allows
5049         * the correct checksum to be computed by the hardware.
5050         */
5051
5052        /* set pointer 10 bytes before UDP header */
5053        tmp_uh = (uint32_t *)((uint8_t *)uh - 10);
5054
5055        /* calculate a pseudo header checksum over the first 10 bytes */
5056        tmp_csum = in_pseudo(*tmp_uh,
5057                             *(tmp_uh + 1),
5058                             *(uint16_t *)(tmp_uh + 2));
5059
5060        pbd->tcp_pseudo_csum = ntohs(in_addword(uh->uh_sum, ~tmp_csum));
5061    }
5062
5063    return (hlen * 2); /* entire header length, number of bytes */
5064}
5065
5066static void
5067bxe_set_pbd_lso_e2(struct mbuf *m,
5068                   uint32_t    *parsing_data)
5069{
5070    *parsing_data |= ((m->m_pkthdr.tso_segsz <<
5071                       ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT) &
5072                      ETH_TX_PARSE_BD_E2_LSO_MSS);
5073
5074    /* XXX test for IPv6 with extension header... */
5075}
5076
5077static void
5078bxe_set_pbd_lso(struct mbuf                *m,
5079                struct eth_tx_parse_bd_e1x *pbd)
5080{
5081    struct ether_vlan_header *eh = NULL;
5082    struct ip *ip = NULL;
5083    struct tcphdr *th = NULL;
5084    int e_hlen;
5085
5086    /* get the Ethernet header */
5087    eh = mtod(m, struct ether_vlan_header *);
5088
5089    /* handle VLAN encapsulation if present */
5090    e_hlen = (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) ?
5091                 (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN) : ETHER_HDR_LEN;
5092
5093    /* get the IP and TCP header, with LSO entire header in first mbuf */
5094    /* XXX assuming IPv4 */
5095    ip = (struct ip *)(m->m_data + e_hlen);
5096    th = (struct tcphdr *)((caddr_t)ip + (ip->ip_hl << 2));
5097
5098    pbd->lso_mss = htole16(m->m_pkthdr.tso_segsz);
5099    pbd->tcp_send_seq = ntohl(th->th_seq);
5100    pbd->tcp_flags = ((ntohl(((uint32_t *)th)[3]) >> 16) & 0xff);
5101
5102#if 1
5103        /* XXX IPv4 */
5104        pbd->ip_id = ntohs(ip->ip_id);
5105        pbd->tcp_pseudo_csum =
5106            ntohs(in_pseudo(ip->ip_src.s_addr,
5107                            ip->ip_dst.s_addr,
5108                            htons(IPPROTO_TCP)));
5109#else
5110        /* XXX IPv6 */
5111        pbd->tcp_pseudo_csum =
5112            ntohs(in_pseudo(&ip6->ip6_src,
5113                            &ip6->ip6_dst,
5114                            htons(IPPROTO_TCP)));
5115#endif
5116
5117    pbd->global_data |=
5118        htole16(ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN);
5119}
5120
5121/*
5122 * Encapsulte an mbuf cluster into the tx bd chain and makes the memory
5123 * visible to the controller.
5124 *
5125 * If an mbuf is submitted to this routine and cannot be given to the
5126 * controller (e.g. it has too many fragments) then the function may free
5127 * the mbuf and return to the caller.
5128 *
5129 * Returns:
5130 *   0 = Success, !0 = Failure
5131 *   Note the side effect that an mbuf may be freed if it causes a problem.
5132 */
5133static int
5134bxe_tx_encap(struct bxe_fastpath *fp, struct mbuf **m_head)
5135{
5136    bus_dma_segment_t segs[32];
5137    struct mbuf *m0;
5138    struct bxe_sw_tx_bd *tx_buf;
5139    struct eth_tx_parse_bd_e1x *pbd_e1x = NULL;
5140    struct eth_tx_parse_bd_e2 *pbd_e2 = NULL;
5141    /* struct eth_tx_parse_2nd_bd *pbd2 = NULL; */
5142    struct eth_tx_bd *tx_data_bd;
5143    struct eth_tx_bd *tx_total_pkt_size_bd;
5144    struct eth_tx_start_bd *tx_start_bd;
5145    uint16_t bd_prod, pkt_prod, total_pkt_size;
5146    uint8_t mac_type;
5147    int defragged, error, nsegs, rc, nbds, vlan_off, ovlan;
5148    struct bxe_softc *sc;
5149    uint16_t tx_bd_avail;
5150    struct ether_vlan_header *eh;
5151    uint32_t pbd_e2_parsing_data = 0;
5152    uint8_t hlen = 0;
5153    int tmp_bd;
5154    int i;
5155
5156    sc = fp->sc;
5157
5158#if __FreeBSD_version >= 800000
5159    M_ASSERTPKTHDR(*m_head);
5160#endif /* #if __FreeBSD_version >= 800000 */
5161
5162    m0 = *m_head;
5163    rc = defragged = nbds = ovlan = vlan_off = total_pkt_size = 0;
5164    tx_start_bd = NULL;
5165    tx_data_bd = NULL;
5166    tx_total_pkt_size_bd = NULL;
5167
5168    /* get the H/W pointer for packets and BDs */
5169    pkt_prod = fp->tx_pkt_prod;
5170    bd_prod = fp->tx_bd_prod;
5171
5172    mac_type = UNICAST_ADDRESS;
5173
5174    /* map the mbuf into the next open DMAable memory */
5175    tx_buf = &fp->tx_mbuf_chain[TX_BD(pkt_prod)];
5176    error = bus_dmamap_load_mbuf_sg(fp->tx_mbuf_tag,
5177                                    tx_buf->m_map, m0,
5178                                    segs, &nsegs, BUS_DMA_NOWAIT);
5179
5180    /* mapping errors */
5181    if(__predict_false(error != 0)) {
5182        fp->eth_q_stats.tx_dma_mapping_failure++;
5183        if (error == ENOMEM) {
5184            /* resource issue, try again later */
5185            rc = ENOMEM;
5186        } else if (error == EFBIG) {
5187            /* possibly recoverable with defragmentation */
5188            fp->eth_q_stats.mbuf_defrag_attempts++;
5189            m0 = m_defrag(*m_head, M_NOWAIT);
5190            if (m0 == NULL) {
5191                fp->eth_q_stats.mbuf_defrag_failures++;
5192                rc = ENOBUFS;
5193            } else {
5194                /* defrag successful, try mapping again */
5195                *m_head = m0;
5196                error = bus_dmamap_load_mbuf_sg(fp->tx_mbuf_tag,
5197                                                tx_buf->m_map, m0,
5198                                                segs, &nsegs, BUS_DMA_NOWAIT);
5199                if (error) {
5200                    fp->eth_q_stats.tx_dma_mapping_failure++;
5201                    rc = error;
5202                }
5203            }
5204        } else {
5205            /* unknown, unrecoverable mapping error */
5206            BLOGE(sc, "Unknown TX mapping error rc=%d\n", error);
5207            bxe_dump_mbuf(sc, m0, FALSE);
5208            rc = error;
5209        }
5210
5211        goto bxe_tx_encap_continue;
5212    }
5213
5214    tx_bd_avail = bxe_tx_avail(sc, fp);
5215
5216    /* make sure there is enough room in the send queue */
5217    if (__predict_false(tx_bd_avail < (nsegs + 2))) {
5218        /* Recoverable, try again later. */
5219        fp->eth_q_stats.tx_hw_queue_full++;
5220        bus_dmamap_unload(fp->tx_mbuf_tag, tx_buf->m_map);
5221        rc = ENOMEM;
5222        goto bxe_tx_encap_continue;
5223    }
5224
5225    /* capture the current H/W TX chain high watermark */
5226    if (__predict_false(fp->eth_q_stats.tx_hw_max_queue_depth <
5227                        (TX_BD_USABLE - tx_bd_avail))) {
5228        fp->eth_q_stats.tx_hw_max_queue_depth = (TX_BD_USABLE - tx_bd_avail);
5229    }
5230
5231    /* make sure it fits in the packet window */
5232    if (__predict_false(nsegs > BXE_MAX_SEGMENTS)) {
5233        /*
5234         * The mbuf may be to big for the controller to handle. If the frame
5235         * is a TSO frame we'll need to do an additional check.
5236         */
5237        if (m0->m_pkthdr.csum_flags & CSUM_TSO) {
5238            if (bxe_chktso_window(sc, nsegs, segs, m0) == 0) {
5239                goto bxe_tx_encap_continue; /* OK to send */
5240            } else {
5241                fp->eth_q_stats.tx_window_violation_tso++;
5242            }
5243        } else {
5244            fp->eth_q_stats.tx_window_violation_std++;
5245        }
5246
5247        /* lets try to defragment this mbuf and remap it */
5248        fp->eth_q_stats.mbuf_defrag_attempts++;
5249        bus_dmamap_unload(fp->tx_mbuf_tag, tx_buf->m_map);
5250
5251        m0 = m_defrag(*m_head, M_NOWAIT);
5252        if (m0 == NULL) {
5253            fp->eth_q_stats.mbuf_defrag_failures++;
5254            /* Ugh, just drop the frame... :( */
5255            rc = ENOBUFS;
5256        } else {
5257            /* defrag successful, try mapping again */
5258            *m_head = m0;
5259            error = bus_dmamap_load_mbuf_sg(fp->tx_mbuf_tag,
5260                                            tx_buf->m_map, m0,
5261                                            segs, &nsegs, BUS_DMA_NOWAIT);
5262            if (error) {
5263                fp->eth_q_stats.tx_dma_mapping_failure++;
5264                /* No sense in trying to defrag/copy chain, drop it. :( */
5265                rc = error;
5266            } else {
5267               /* if the chain is still too long then drop it */
5268                if(m0->m_pkthdr.csum_flags & CSUM_TSO) {
5269                    /*
5270                     * in case TSO is enabled nsegs should be checked against
5271                     * BXE_TSO_MAX_SEGMENTS
5272                     */
5273                    if (__predict_false(nsegs > BXE_TSO_MAX_SEGMENTS)) {
5274                        bus_dmamap_unload(fp->tx_mbuf_tag, tx_buf->m_map);
5275                        fp->eth_q_stats.nsegs_path1_errors++;
5276                        rc = ENODEV;
5277                    }
5278                } else {
5279                    if (__predict_false(nsegs > BXE_MAX_SEGMENTS)) {
5280                        bus_dmamap_unload(fp->tx_mbuf_tag, tx_buf->m_map);
5281                        fp->eth_q_stats.nsegs_path2_errors++;
5282                        rc = ENODEV;
5283                    }
5284                }
5285            }
5286        }
5287    }
5288
5289bxe_tx_encap_continue:
5290
5291    /* Check for errors */
5292    if (rc) {
5293        if (rc == ENOMEM) {
5294            /* recoverable try again later  */
5295        } else {
5296            fp->eth_q_stats.tx_soft_errors++;
5297            fp->eth_q_stats.mbuf_alloc_tx--;
5298            m_freem(*m_head);
5299            *m_head = NULL;
5300        }
5301
5302        return (rc);
5303    }
5304
5305    /* set flag according to packet type (UNICAST_ADDRESS is default) */
5306    if (m0->m_flags & M_BCAST) {
5307        mac_type = BROADCAST_ADDRESS;
5308    } else if (m0->m_flags & M_MCAST) {
5309        mac_type = MULTICAST_ADDRESS;
5310    }
5311
5312    /* store the mbuf into the mbuf ring */
5313    tx_buf->m        = m0;
5314    tx_buf->first_bd = fp->tx_bd_prod;
5315    tx_buf->flags    = 0;
5316
5317    /* prepare the first transmit (start) BD for the mbuf */
5318    tx_start_bd = &fp->tx_chain[TX_BD(bd_prod)].start_bd;
5319
5320    BLOGD(sc, DBG_TX,
5321          "sending pkt_prod=%u tx_buf=%p next_idx=%u bd=%u tx_start_bd=%p\n",
5322          pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_start_bd);
5323
5324    tx_start_bd->addr_lo = htole32(U64_LO(segs[0].ds_addr));
5325    tx_start_bd->addr_hi = htole32(U64_HI(segs[0].ds_addr));
5326    tx_start_bd->nbytes  = htole16(segs[0].ds_len);
5327    total_pkt_size += tx_start_bd->nbytes;
5328    tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
5329
5330    tx_start_bd->general_data = (1 << ETH_TX_START_BD_HDR_NBDS_SHIFT);
5331
5332    /* all frames have at least Start BD + Parsing BD */
5333    nbds = nsegs + 1;
5334    tx_start_bd->nbd = htole16(nbds);
5335
5336    if (m0->m_flags & M_VLANTAG) {
5337        tx_start_bd->vlan_or_ethertype = htole16(m0->m_pkthdr.ether_vtag);
5338        tx_start_bd->bd_flags.as_bitfield |=
5339            (X_ETH_OUTBAND_VLAN << ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT);
5340    } else {
5341        /* vf tx, start bd must hold the ethertype for fw to enforce it */
5342        if (IS_VF(sc)) {
5343            /* map ethernet header to find type and header length */
5344            eh = mtod(m0, struct ether_vlan_header *);
5345            tx_start_bd->vlan_or_ethertype = eh->evl_encap_proto;
5346        } else {
5347            /* used by FW for packet accounting */
5348            tx_start_bd->vlan_or_ethertype = htole16(fp->tx_pkt_prod);
5349        }
5350    }
5351
5352    /*
5353     * add a parsing BD from the chain. The parsing BD is always added
5354     * though it is only used for TSO and chksum
5355     */
5356    bd_prod = TX_BD_NEXT(bd_prod);
5357
5358    if (m0->m_pkthdr.csum_flags) {
5359        if (m0->m_pkthdr.csum_flags & CSUM_IP) {
5360            fp->eth_q_stats.tx_ofld_frames_csum_ip++;
5361            tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IP_CSUM;
5362        }
5363
5364        if (m0->m_pkthdr.csum_flags & CSUM_TCP_IPV6) {
5365            tx_start_bd->bd_flags.as_bitfield |= (ETH_TX_BD_FLAGS_IPV6 |
5366                                                  ETH_TX_BD_FLAGS_L4_CSUM);
5367        } else if (m0->m_pkthdr.csum_flags & CSUM_UDP_IPV6) {
5368            tx_start_bd->bd_flags.as_bitfield |= (ETH_TX_BD_FLAGS_IPV6   |
5369                                                  ETH_TX_BD_FLAGS_IS_UDP |
5370                                                  ETH_TX_BD_FLAGS_L4_CSUM);
5371        } else if ((m0->m_pkthdr.csum_flags & CSUM_TCP) ||
5372                   (m0->m_pkthdr.csum_flags & CSUM_TSO)) {
5373            tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
5374        } else if (m0->m_pkthdr.csum_flags & CSUM_UDP) {
5375            tx_start_bd->bd_flags.as_bitfield |= (ETH_TX_BD_FLAGS_L4_CSUM |
5376                                                  ETH_TX_BD_FLAGS_IS_UDP);
5377        }
5378    }
5379
5380    if (!CHIP_IS_E1x(sc)) {
5381        pbd_e2 = &fp->tx_chain[TX_BD(bd_prod)].parse_bd_e2;
5382        memset(pbd_e2, 0, sizeof(struct eth_tx_parse_bd_e2));
5383
5384        if (m0->m_pkthdr.csum_flags) {
5385            hlen = bxe_set_pbd_csum_e2(fp, m0, &pbd_e2_parsing_data);
5386        }
5387
5388        SET_FLAG(pbd_e2_parsing_data, ETH_TX_PARSE_BD_E2_ETH_ADDR_TYPE,
5389                 mac_type);
5390    } else {
5391        uint16_t global_data = 0;
5392
5393        pbd_e1x = &fp->tx_chain[TX_BD(bd_prod)].parse_bd_e1x;
5394        memset(pbd_e1x, 0, sizeof(struct eth_tx_parse_bd_e1x));
5395
5396        if (m0->m_pkthdr.csum_flags) {
5397            hlen = bxe_set_pbd_csum(fp, m0, pbd_e1x);
5398        }
5399
5400        SET_FLAG(global_data,
5401                 ETH_TX_PARSE_BD_E1X_ETH_ADDR_TYPE, mac_type);
5402        pbd_e1x->global_data |= htole16(global_data);
5403    }
5404
5405    /* setup the parsing BD with TSO specific info */
5406    if (m0->m_pkthdr.csum_flags & CSUM_TSO) {
5407        fp->eth_q_stats.tx_ofld_frames_lso++;
5408        tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
5409
5410        if (__predict_false(tx_start_bd->nbytes > hlen)) {
5411            fp->eth_q_stats.tx_ofld_frames_lso_hdr_splits++;
5412
5413            /* split the first BD into header/data making the fw job easy */
5414            nbds++;
5415            tx_start_bd->nbd = htole16(nbds);
5416            tx_start_bd->nbytes = htole16(hlen);
5417
5418            bd_prod = TX_BD_NEXT(bd_prod);
5419
5420            /* new transmit BD after the tx_parse_bd */
5421            tx_data_bd = &fp->tx_chain[TX_BD(bd_prod)].reg_bd;
5422            tx_data_bd->addr_hi = htole32(U64_HI(segs[0].ds_addr + hlen));
5423            tx_data_bd->addr_lo = htole32(U64_LO(segs[0].ds_addr + hlen));
5424            tx_data_bd->nbytes  = htole16(segs[0].ds_len - hlen);
5425            if (tx_total_pkt_size_bd == NULL) {
5426                tx_total_pkt_size_bd = tx_data_bd;
5427            }
5428
5429            BLOGD(sc, DBG_TX,
5430                  "TSO split header size is %d (%x:%x) nbds %d\n",
5431                  le16toh(tx_start_bd->nbytes),
5432                  le32toh(tx_start_bd->addr_hi),
5433                  le32toh(tx_start_bd->addr_lo),
5434                  nbds);
5435        }
5436
5437        if (!CHIP_IS_E1x(sc)) {
5438            bxe_set_pbd_lso_e2(m0, &pbd_e2_parsing_data);
5439        } else {
5440            bxe_set_pbd_lso(m0, pbd_e1x);
5441        }
5442    }
5443
5444    if (pbd_e2_parsing_data) {
5445        pbd_e2->parsing_data = htole32(pbd_e2_parsing_data);
5446    }
5447
5448    /* prepare remaining BDs, start tx bd contains first seg/frag */
5449    for (i = 1; i < nsegs ; i++) {
5450        bd_prod = TX_BD_NEXT(bd_prod);
5451        tx_data_bd = &fp->tx_chain[TX_BD(bd_prod)].reg_bd;
5452        tx_data_bd->addr_lo = htole32(U64_LO(segs[i].ds_addr));
5453        tx_data_bd->addr_hi = htole32(U64_HI(segs[i].ds_addr));
5454        tx_data_bd->nbytes  = htole16(segs[i].ds_len);
5455        if (tx_total_pkt_size_bd == NULL) {
5456            tx_total_pkt_size_bd = tx_data_bd;
5457        }
5458        total_pkt_size += tx_data_bd->nbytes;
5459    }
5460
5461    BLOGD(sc, DBG_TX, "last bd %p\n", tx_data_bd);
5462
5463    if (tx_total_pkt_size_bd != NULL) {
5464        tx_total_pkt_size_bd->total_pkt_bytes = total_pkt_size;
5465    }
5466
5467    if (__predict_false(sc->debug & DBG_TX)) {
5468        tmp_bd = tx_buf->first_bd;
5469        for (i = 0; i < nbds; i++)
5470        {
5471            if (i == 0) {
5472                BLOGD(sc, DBG_TX,
5473                      "TX Strt: %p bd=%d nbd=%d vlan=0x%x "
5474                      "bd_flags=0x%x hdr_nbds=%d\n",
5475                      tx_start_bd,
5476                      tmp_bd,
5477                      le16toh(tx_start_bd->nbd),
5478                      le16toh(tx_start_bd->vlan_or_ethertype),
5479                      tx_start_bd->bd_flags.as_bitfield,
5480                      (tx_start_bd->general_data & ETH_TX_START_BD_HDR_NBDS));
5481            } else if (i == 1) {
5482                if (pbd_e1x) {
5483                    BLOGD(sc, DBG_TX,
5484                          "-> Prse: %p bd=%d global=0x%x ip_hlen_w=%u "
5485                          "ip_id=%u lso_mss=%u tcp_flags=0x%x csum=0x%x "
5486                          "tcp_seq=%u total_hlen_w=%u\n",
5487                          pbd_e1x,
5488                          tmp_bd,
5489                          pbd_e1x->global_data,
5490                          pbd_e1x->ip_hlen_w,
5491                          pbd_e1x->ip_id,
5492                          pbd_e1x->lso_mss,
5493                          pbd_e1x->tcp_flags,
5494                          pbd_e1x->tcp_pseudo_csum,
5495                          pbd_e1x->tcp_send_seq,
5496                          le16toh(pbd_e1x->total_hlen_w));
5497                } else { /* if (pbd_e2) */
5498                    BLOGD(sc, DBG_TX,
5499                          "-> Parse: %p bd=%d dst=%02x:%02x:%02x "
5500                          "src=%02x:%02x:%02x parsing_data=0x%x\n",
5501                          pbd_e2,
5502                          tmp_bd,
5503                          pbd_e2->data.mac_addr.dst_hi,
5504                          pbd_e2->data.mac_addr.dst_mid,
5505                          pbd_e2->data.mac_addr.dst_lo,
5506                          pbd_e2->data.mac_addr.src_hi,
5507                          pbd_e2->data.mac_addr.src_mid,
5508                          pbd_e2->data.mac_addr.src_lo,
5509                          pbd_e2->parsing_data);
5510                }
5511            }
5512
5513            if (i != 1) { /* skip parse db as it doesn't hold data */
5514                tx_data_bd = &fp->tx_chain[TX_BD(tmp_bd)].reg_bd;
5515                BLOGD(sc, DBG_TX,
5516                      "-> Frag: %p bd=%d nbytes=%d hi=0x%x lo: 0x%x\n",
5517                      tx_data_bd,
5518                      tmp_bd,
5519                      le16toh(tx_data_bd->nbytes),
5520                      le32toh(tx_data_bd->addr_hi),
5521                      le32toh(tx_data_bd->addr_lo));
5522            }
5523
5524            tmp_bd = TX_BD_NEXT(tmp_bd);
5525        }
5526    }
5527
5528    BLOGD(sc, DBG_TX, "doorbell: nbds=%d bd=%u\n", nbds, bd_prod);
5529
5530    /* update TX BD producer index value for next TX */
5531    bd_prod = TX_BD_NEXT(bd_prod);
5532
5533    /*
5534     * If the chain of tx_bd's describing this frame is adjacent to or spans
5535     * an eth_tx_next_bd element then we need to increment the nbds value.
5536     */
5537    if (TX_BD_IDX(bd_prod) < nbds) {
5538        nbds++;
5539    }
5540
5541    /* don't allow reordering of writes for nbd and packets */
5542    mb();
5543
5544    fp->tx_db.data.prod += nbds;
5545
5546    /* producer points to the next free tx_bd at this point */
5547    fp->tx_pkt_prod++;
5548    fp->tx_bd_prod = bd_prod;
5549
5550    DOORBELL(sc, fp->index, fp->tx_db.raw);
5551
5552    fp->eth_q_stats.tx_pkts++;
5553
5554    /* Prevent speculative reads from getting ahead of the status block. */
5555    bus_space_barrier(sc->bar[BAR0].tag, sc->bar[BAR0].handle,
5556                      0, 0, BUS_SPACE_BARRIER_READ);
5557
5558    /* Prevent speculative reads from getting ahead of the doorbell. */
5559    bus_space_barrier(sc->bar[BAR2].tag, sc->bar[BAR2].handle,
5560                      0, 0, BUS_SPACE_BARRIER_READ);
5561
5562    return (0);
5563}
5564
5565static void
5566bxe_tx_start_locked(struct bxe_softc *sc,
5567                    if_t ifp,
5568                    struct bxe_fastpath *fp)
5569{
5570    struct mbuf *m = NULL;
5571    int tx_count = 0;
5572    uint16_t tx_bd_avail;
5573
5574    BXE_FP_TX_LOCK_ASSERT(fp);
5575
5576    /* keep adding entries while there are frames to send */
5577    while (!if_sendq_empty(ifp)) {
5578
5579        /*
5580         * check for any frames to send
5581         * dequeue can still be NULL even if queue is not empty
5582         */
5583        m = if_dequeue(ifp);
5584        if (__predict_false(m == NULL)) {
5585            break;
5586        }
5587
5588        /* the mbuf now belongs to us */
5589        fp->eth_q_stats.mbuf_alloc_tx++;
5590
5591        /*
5592         * Put the frame into the transmit ring. If we don't have room,
5593         * place the mbuf back at the head of the TX queue, set the
5594         * OACTIVE flag, and wait for the NIC to drain the chain.
5595         */
5596        if (__predict_false(bxe_tx_encap(fp, &m))) {
5597            fp->eth_q_stats.tx_encap_failures++;
5598            if (m != NULL) {
5599                /* mark the TX queue as full and return the frame */
5600                if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, 0);
5601		if_sendq_prepend(ifp, m);
5602                fp->eth_q_stats.mbuf_alloc_tx--;
5603                fp->eth_q_stats.tx_queue_xoff++;
5604            }
5605
5606            /* stop looking for more work */
5607            break;
5608        }
5609
5610        /* the frame was enqueued successfully */
5611        tx_count++;
5612
5613        /* send a copy of the frame to any BPF listeners. */
5614        if_etherbpfmtap(ifp, m);
5615
5616        tx_bd_avail = bxe_tx_avail(sc, fp);
5617
5618        /* handle any completions if we're running low */
5619        if (tx_bd_avail < BXE_TX_CLEANUP_THRESHOLD) {
5620            /* bxe_txeof will set IFF_DRV_OACTIVE appropriately */
5621            bxe_txeof(sc, fp);
5622            if (if_getdrvflags(ifp) & IFF_DRV_OACTIVE) {
5623                break;
5624            }
5625        }
5626    }
5627
5628    /* all TX packets were dequeued and/or the tx ring is full */
5629    if (tx_count > 0) {
5630        /* reset the TX watchdog timeout timer */
5631        fp->watchdog_timer = BXE_TX_TIMEOUT;
5632    }
5633}
5634
5635/* Legacy (non-RSS) dispatch routine */
5636static void
5637bxe_tx_start(if_t ifp)
5638{
5639    struct bxe_softc *sc;
5640    struct bxe_fastpath *fp;
5641
5642    sc = if_getsoftc(ifp);
5643
5644    if (!(if_getdrvflags(ifp) & IFF_DRV_RUNNING)) {
5645        BLOGW(sc, "Interface not running, ignoring transmit request\n");
5646        return;
5647    }
5648
5649    if (!sc->link_vars.link_up) {
5650        BLOGW(sc, "Interface link is down, ignoring transmit request\n");
5651        return;
5652    }
5653
5654    fp = &sc->fp[0];
5655
5656    if (ifp->if_drv_flags & IFF_DRV_OACTIVE) {
5657        fp->eth_q_stats.tx_queue_full_return++;
5658        return;
5659    }
5660
5661    BXE_FP_TX_LOCK(fp);
5662    bxe_tx_start_locked(sc, ifp, fp);
5663    BXE_FP_TX_UNLOCK(fp);
5664}
5665
5666#if __FreeBSD_version >= 901504
5667
5668static int
5669bxe_tx_mq_start_locked(struct bxe_softc    *sc,
5670                       if_t                ifp,
5671                       struct bxe_fastpath *fp,
5672                       struct mbuf         *m)
5673{
5674    struct buf_ring *tx_br = fp->tx_br;
5675    struct mbuf *next;
5676    int depth, rc, tx_count;
5677    uint16_t tx_bd_avail;
5678
5679    rc = tx_count = 0;
5680
5681    BXE_FP_TX_LOCK_ASSERT(fp);
5682
5683    if (sc->state != BXE_STATE_OPEN)  {
5684        fp->eth_q_stats.bxe_tx_mq_sc_state_failures++;
5685        return ENETDOWN;
5686    }
5687
5688    if (!tx_br) {
5689        BLOGE(sc, "Multiqueue TX and no buf_ring!\n");
5690        return (EINVAL);
5691    }
5692
5693    if (m != NULL) {
5694        rc = drbr_enqueue(ifp, tx_br, m);
5695        if (rc != 0) {
5696            fp->eth_q_stats.tx_soft_errors++;
5697            goto bxe_tx_mq_start_locked_exit;
5698        }
5699    }
5700
5701    if (!sc->link_vars.link_up || !(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
5702        fp->eth_q_stats.tx_request_link_down_failures++;
5703        goto bxe_tx_mq_start_locked_exit;
5704    }
5705
5706    /* fetch the depth of the driver queue */
5707    depth = drbr_inuse_drv(ifp, tx_br);
5708    if (depth > fp->eth_q_stats.tx_max_drbr_queue_depth) {
5709        fp->eth_q_stats.tx_max_drbr_queue_depth = depth;
5710    }
5711
5712    /* keep adding entries while there are frames to send */
5713    while ((next = drbr_peek(ifp, tx_br)) != NULL) {
5714        /* handle any completions if we're running low */
5715        tx_bd_avail = bxe_tx_avail(sc, fp);
5716        if (tx_bd_avail < BXE_TX_CLEANUP_THRESHOLD) {
5717            /* bxe_txeof will set IFF_DRV_OACTIVE appropriately */
5718            bxe_txeof(sc, fp);
5719            tx_bd_avail = bxe_tx_avail(sc, fp);
5720            if (tx_bd_avail < (BXE_TSO_MAX_SEGMENTS + 1)) {
5721                fp->eth_q_stats.bd_avail_too_less_failures++;
5722                m_freem(next);
5723                drbr_advance(ifp, tx_br);
5724                rc = ENOBUFS;
5725                break;
5726            }
5727        }
5728
5729        /* the mbuf now belongs to us */
5730        fp->eth_q_stats.mbuf_alloc_tx++;
5731
5732        /*
5733         * Put the frame into the transmit ring. If we don't have room,
5734         * place the mbuf back at the head of the TX queue, set the
5735         * OACTIVE flag, and wait for the NIC to drain the chain.
5736         */
5737        rc = bxe_tx_encap(fp, &next);
5738        if (__predict_false(rc != 0)) {
5739            fp->eth_q_stats.tx_encap_failures++;
5740            if (next != NULL) {
5741                /* mark the TX queue as full and save the frame */
5742                if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, 0);
5743                drbr_putback(ifp, tx_br, next);
5744                fp->eth_q_stats.mbuf_alloc_tx--;
5745                fp->eth_q_stats.tx_frames_deferred++;
5746            } else
5747                drbr_advance(ifp, tx_br);
5748
5749            /* stop looking for more work */
5750            break;
5751        }
5752
5753        /* the transmit frame was enqueued successfully */
5754        tx_count++;
5755
5756        /* send a copy of the frame to any BPF listeners */
5757	if_etherbpfmtap(ifp, next);
5758
5759        drbr_advance(ifp, tx_br);
5760    }
5761
5762    /* all TX packets were dequeued and/or the tx ring is full */
5763    if (tx_count > 0) {
5764        /* reset the TX watchdog timeout timer */
5765        fp->watchdog_timer = BXE_TX_TIMEOUT;
5766    }
5767
5768bxe_tx_mq_start_locked_exit:
5769    /* If we didn't drain the drbr, enqueue a task in the future to do it. */
5770    if (!drbr_empty(ifp, tx_br)) {
5771        fp->eth_q_stats.tx_mq_not_empty++;
5772        taskqueue_enqueue_timeout(fp->tq, &fp->tx_timeout_task, 1);
5773    }
5774
5775    return (rc);
5776}
5777
5778static void
5779bxe_tx_mq_start_deferred(void *arg,
5780                         int pending)
5781{
5782    struct bxe_fastpath *fp = (struct bxe_fastpath *)arg;
5783    struct bxe_softc *sc = fp->sc;
5784    if_t ifp = sc->ifp;
5785
5786    BXE_FP_TX_LOCK(fp);
5787    bxe_tx_mq_start_locked(sc, ifp, fp, NULL);
5788    BXE_FP_TX_UNLOCK(fp);
5789}
5790
5791/* Multiqueue (TSS) dispatch routine. */
5792static int
5793bxe_tx_mq_start(struct ifnet *ifp,
5794                struct mbuf  *m)
5795{
5796    struct bxe_softc *sc = if_getsoftc(ifp);
5797    struct bxe_fastpath *fp;
5798    int fp_index, rc;
5799
5800    fp_index = 0; /* default is the first queue */
5801
5802    /* check if flowid is set */
5803
5804    if (BXE_VALID_FLOWID(m))
5805        fp_index = (m->m_pkthdr.flowid % sc->num_queues);
5806
5807    fp = &sc->fp[fp_index];
5808
5809    if (sc->state != BXE_STATE_OPEN)  {
5810        fp->eth_q_stats.bxe_tx_mq_sc_state_failures++;
5811        return ENETDOWN;
5812    }
5813
5814    if (BXE_FP_TX_TRYLOCK(fp)) {
5815        rc = bxe_tx_mq_start_locked(sc, ifp, fp, m);
5816        BXE_FP_TX_UNLOCK(fp);
5817    } else {
5818        rc = drbr_enqueue(ifp, fp->tx_br, m);
5819        taskqueue_enqueue(fp->tq, &fp->tx_task);
5820    }
5821
5822    return (rc);
5823}
5824
5825static void
5826bxe_mq_flush(struct ifnet *ifp)
5827{
5828    struct bxe_softc *sc = if_getsoftc(ifp);
5829    struct bxe_fastpath *fp;
5830    struct mbuf *m;
5831    int i;
5832
5833    for (i = 0; i < sc->num_queues; i++) {
5834        fp = &sc->fp[i];
5835
5836        if (fp->state != BXE_FP_STATE_IRQ) {
5837            BLOGD(sc, DBG_LOAD, "Not clearing fp[%02d] buf_ring (state=%d)\n",
5838                  fp->index, fp->state);
5839            continue;
5840        }
5841
5842        if (fp->tx_br != NULL) {
5843            BLOGD(sc, DBG_LOAD, "Clearing fp[%02d] buf_ring\n", fp->index);
5844            BXE_FP_TX_LOCK(fp);
5845            while ((m = buf_ring_dequeue_sc(fp->tx_br)) != NULL) {
5846                m_freem(m);
5847            }
5848            BXE_FP_TX_UNLOCK(fp);
5849        }
5850    }
5851
5852    if_qflush(ifp);
5853}
5854
5855#endif /* FreeBSD_version >= 901504 */
5856
5857static uint16_t
5858bxe_cid_ilt_lines(struct bxe_softc *sc)
5859{
5860    if (IS_SRIOV(sc)) {
5861        return ((BXE_FIRST_VF_CID + BXE_VF_CIDS) / ILT_PAGE_CIDS);
5862    }
5863    return (L2_ILT_LINES(sc));
5864}
5865
5866static void
5867bxe_ilt_set_info(struct bxe_softc *sc)
5868{
5869    struct ilt_client_info *ilt_client;
5870    struct ecore_ilt *ilt = sc->ilt;
5871    uint16_t line = 0;
5872
5873    ilt->start_line = FUNC_ILT_BASE(SC_FUNC(sc));
5874    BLOGD(sc, DBG_LOAD, "ilt starts at line %d\n", ilt->start_line);
5875
5876    /* CDU */
5877    ilt_client = &ilt->clients[ILT_CLIENT_CDU];
5878    ilt_client->client_num = ILT_CLIENT_CDU;
5879    ilt_client->page_size = CDU_ILT_PAGE_SZ;
5880    ilt_client->flags = ILT_CLIENT_SKIP_MEM;
5881    ilt_client->start = line;
5882    line += bxe_cid_ilt_lines(sc);
5883
5884    if (CNIC_SUPPORT(sc)) {
5885        line += CNIC_ILT_LINES;
5886    }
5887
5888    ilt_client->end = (line - 1);
5889
5890    BLOGD(sc, DBG_LOAD,
5891          "ilt client[CDU]: start %d, end %d, "
5892          "psz 0x%x, flags 0x%x, hw psz %d\n",
5893          ilt_client->start, ilt_client->end,
5894          ilt_client->page_size,
5895          ilt_client->flags,
5896          ilog2(ilt_client->page_size >> 12));
5897
5898    /* QM */
5899    if (QM_INIT(sc->qm_cid_count)) {
5900        ilt_client = &ilt->clients[ILT_CLIENT_QM];
5901        ilt_client->client_num = ILT_CLIENT_QM;
5902        ilt_client->page_size = QM_ILT_PAGE_SZ;
5903        ilt_client->flags = 0;
5904        ilt_client->start = line;
5905
5906        /* 4 bytes for each cid */
5907        line += DIV_ROUND_UP(sc->qm_cid_count * QM_QUEUES_PER_FUNC * 4,
5908                             QM_ILT_PAGE_SZ);
5909
5910        ilt_client->end = (line - 1);
5911
5912        BLOGD(sc, DBG_LOAD,
5913              "ilt client[QM]: start %d, end %d, "
5914              "psz 0x%x, flags 0x%x, hw psz %d\n",
5915              ilt_client->start, ilt_client->end,
5916              ilt_client->page_size, ilt_client->flags,
5917              ilog2(ilt_client->page_size >> 12));
5918    }
5919
5920    if (CNIC_SUPPORT(sc)) {
5921        /* SRC */
5922        ilt_client = &ilt->clients[ILT_CLIENT_SRC];
5923        ilt_client->client_num = ILT_CLIENT_SRC;
5924        ilt_client->page_size = SRC_ILT_PAGE_SZ;
5925        ilt_client->flags = 0;
5926        ilt_client->start = line;
5927        line += SRC_ILT_LINES;
5928        ilt_client->end = (line - 1);
5929
5930        BLOGD(sc, DBG_LOAD,
5931              "ilt client[SRC]: start %d, end %d, "
5932              "psz 0x%x, flags 0x%x, hw psz %d\n",
5933              ilt_client->start, ilt_client->end,
5934              ilt_client->page_size, ilt_client->flags,
5935              ilog2(ilt_client->page_size >> 12));
5936
5937        /* TM */
5938        ilt_client = &ilt->clients[ILT_CLIENT_TM];
5939        ilt_client->client_num = ILT_CLIENT_TM;
5940        ilt_client->page_size = TM_ILT_PAGE_SZ;
5941        ilt_client->flags = 0;
5942        ilt_client->start = line;
5943        line += TM_ILT_LINES;
5944        ilt_client->end = (line - 1);
5945
5946        BLOGD(sc, DBG_LOAD,
5947              "ilt client[TM]: start %d, end %d, "
5948              "psz 0x%x, flags 0x%x, hw psz %d\n",
5949              ilt_client->start, ilt_client->end,
5950              ilt_client->page_size, ilt_client->flags,
5951              ilog2(ilt_client->page_size >> 12));
5952    }
5953
5954    KASSERT((line <= ILT_MAX_LINES), ("Invalid number of ILT lines!"));
5955}
5956
5957static void
5958bxe_set_fp_rx_buf_size(struct bxe_softc *sc)
5959{
5960    int i;
5961    uint32_t rx_buf_size;
5962
5963    rx_buf_size = (IP_HEADER_ALIGNMENT_PADDING + ETH_OVERHEAD + sc->mtu);
5964
5965    for (i = 0; i < sc->num_queues; i++) {
5966        if(rx_buf_size <= MCLBYTES){
5967            sc->fp[i].rx_buf_size = rx_buf_size;
5968            sc->fp[i].mbuf_alloc_size = MCLBYTES;
5969        }else if (rx_buf_size <= MJUMPAGESIZE){
5970            sc->fp[i].rx_buf_size = rx_buf_size;
5971            sc->fp[i].mbuf_alloc_size = MJUMPAGESIZE;
5972        }else if (rx_buf_size <= (MJUMPAGESIZE + MCLBYTES)){
5973            sc->fp[i].rx_buf_size = MCLBYTES;
5974            sc->fp[i].mbuf_alloc_size = MCLBYTES;
5975        }else if (rx_buf_size <= (2 * MJUMPAGESIZE)){
5976            sc->fp[i].rx_buf_size = MJUMPAGESIZE;
5977            sc->fp[i].mbuf_alloc_size = MJUMPAGESIZE;
5978        }else {
5979            sc->fp[i].rx_buf_size = MCLBYTES;
5980            sc->fp[i].mbuf_alloc_size = MCLBYTES;
5981        }
5982    }
5983}
5984
5985static int
5986bxe_alloc_ilt_mem(struct bxe_softc *sc)
5987{
5988    int rc = 0;
5989
5990    if ((sc->ilt =
5991         (struct ecore_ilt *)malloc(sizeof(struct ecore_ilt),
5992                                    M_BXE_ILT,
5993                                    (M_NOWAIT | M_ZERO))) == NULL) {
5994        rc = 1;
5995    }
5996
5997    return (rc);
5998}
5999
6000static int
6001bxe_alloc_ilt_lines_mem(struct bxe_softc *sc)
6002{
6003    int rc = 0;
6004
6005    if ((sc->ilt->lines =
6006         (struct ilt_line *)malloc((sizeof(struct ilt_line) * ILT_MAX_LINES),
6007                                    M_BXE_ILT,
6008                                    (M_NOWAIT | M_ZERO))) == NULL) {
6009        rc = 1;
6010    }
6011
6012    return (rc);
6013}
6014
6015static void
6016bxe_free_ilt_mem(struct bxe_softc *sc)
6017{
6018    if (sc->ilt != NULL) {
6019        free(sc->ilt, M_BXE_ILT);
6020        sc->ilt = NULL;
6021    }
6022}
6023
6024static void
6025bxe_free_ilt_lines_mem(struct bxe_softc *sc)
6026{
6027    if (sc->ilt->lines != NULL) {
6028        free(sc->ilt->lines, M_BXE_ILT);
6029        sc->ilt->lines = NULL;
6030    }
6031}
6032
6033static void
6034bxe_free_mem(struct bxe_softc *sc)
6035{
6036    int i;
6037
6038    for (i = 0; i < L2_ILT_LINES(sc); i++) {
6039        bxe_dma_free(sc, &sc->context[i].vcxt_dma);
6040        sc->context[i].vcxt = NULL;
6041        sc->context[i].size = 0;
6042    }
6043
6044    ecore_ilt_mem_op(sc, ILT_MEMOP_FREE);
6045
6046    bxe_free_ilt_lines_mem(sc);
6047
6048}
6049
6050static int
6051bxe_alloc_mem(struct bxe_softc *sc)
6052{
6053
6054    int context_size;
6055    int allocated;
6056    int i;
6057
6058    /*
6059     * Allocate memory for CDU context:
6060     * This memory is allocated separately and not in the generic ILT
6061     * functions because CDU differs in few aspects:
6062     * 1. There can be multiple entities allocating memory for context -
6063     * regular L2, CNIC, and SRIOV drivers. Each separately controls
6064     * its own ILT lines.
6065     * 2. Since CDU page-size is not a single 4KB page (which is the case
6066     * for the other ILT clients), to be efficient we want to support
6067     * allocation of sub-page-size in the last entry.
6068     * 3. Context pointers are used by the driver to pass to FW / update
6069     * the context (for the other ILT clients the pointers are used just to
6070     * free the memory during unload).
6071     */
6072    context_size = (sizeof(union cdu_context) * BXE_L2_CID_COUNT(sc));
6073    for (i = 0, allocated = 0; allocated < context_size; i++) {
6074        sc->context[i].size = min(CDU_ILT_PAGE_SZ,
6075                                  (context_size - allocated));
6076
6077        if (bxe_dma_alloc(sc, sc->context[i].size,
6078                          &sc->context[i].vcxt_dma,
6079                          "cdu context") != 0) {
6080            bxe_free_mem(sc);
6081            return (-1);
6082        }
6083
6084        sc->context[i].vcxt =
6085            (union cdu_context *)sc->context[i].vcxt_dma.vaddr;
6086
6087        allocated += sc->context[i].size;
6088    }
6089
6090    bxe_alloc_ilt_lines_mem(sc);
6091
6092    BLOGD(sc, DBG_LOAD, "ilt=%p start_line=%u lines=%p\n",
6093          sc->ilt, sc->ilt->start_line, sc->ilt->lines);
6094    {
6095        for (i = 0; i < 4; i++) {
6096            BLOGD(sc, DBG_LOAD,
6097                  "c%d page_size=%u start=%u end=%u num=%u flags=0x%x\n",
6098                  i,
6099                  sc->ilt->clients[i].page_size,
6100                  sc->ilt->clients[i].start,
6101                  sc->ilt->clients[i].end,
6102                  sc->ilt->clients[i].client_num,
6103                  sc->ilt->clients[i].flags);
6104        }
6105    }
6106    if (ecore_ilt_mem_op(sc, ILT_MEMOP_ALLOC)) {
6107        BLOGE(sc, "ecore_ilt_mem_op ILT_MEMOP_ALLOC failed\n");
6108        bxe_free_mem(sc);
6109        return (-1);
6110    }
6111
6112    return (0);
6113}
6114
6115static void
6116bxe_free_rx_bd_chain(struct bxe_fastpath *fp)
6117{
6118    struct bxe_softc *sc;
6119    int i;
6120
6121    sc = fp->sc;
6122
6123    if (fp->rx_mbuf_tag == NULL) {
6124        return;
6125    }
6126
6127    /* free all mbufs and unload all maps */
6128    for (i = 0; i < RX_BD_TOTAL; i++) {
6129        if (fp->rx_mbuf_chain[i].m_map != NULL) {
6130            bus_dmamap_sync(fp->rx_mbuf_tag,
6131                            fp->rx_mbuf_chain[i].m_map,
6132                            BUS_DMASYNC_POSTREAD);
6133            bus_dmamap_unload(fp->rx_mbuf_tag,
6134                              fp->rx_mbuf_chain[i].m_map);
6135        }
6136
6137        if (fp->rx_mbuf_chain[i].m != NULL) {
6138            m_freem(fp->rx_mbuf_chain[i].m);
6139            fp->rx_mbuf_chain[i].m = NULL;
6140            fp->eth_q_stats.mbuf_alloc_rx--;
6141        }
6142    }
6143}
6144
6145static void
6146bxe_free_tpa_pool(struct bxe_fastpath *fp)
6147{
6148    struct bxe_softc *sc;
6149    int i, max_agg_queues;
6150
6151    sc = fp->sc;
6152
6153    if (fp->rx_mbuf_tag == NULL) {
6154        return;
6155    }
6156
6157    max_agg_queues = MAX_AGG_QS(sc);
6158
6159    /* release all mbufs and unload all DMA maps in the TPA pool */
6160    for (i = 0; i < max_agg_queues; i++) {
6161        if (fp->rx_tpa_info[i].bd.m_map != NULL) {
6162            bus_dmamap_sync(fp->rx_mbuf_tag,
6163                            fp->rx_tpa_info[i].bd.m_map,
6164                            BUS_DMASYNC_POSTREAD);
6165            bus_dmamap_unload(fp->rx_mbuf_tag,
6166                              fp->rx_tpa_info[i].bd.m_map);
6167        }
6168
6169        if (fp->rx_tpa_info[i].bd.m != NULL) {
6170            m_freem(fp->rx_tpa_info[i].bd.m);
6171            fp->rx_tpa_info[i].bd.m = NULL;
6172            fp->eth_q_stats.mbuf_alloc_tpa--;
6173        }
6174    }
6175}
6176
6177static void
6178bxe_free_sge_chain(struct bxe_fastpath *fp)
6179{
6180    struct bxe_softc *sc;
6181    int i;
6182
6183    sc = fp->sc;
6184
6185    if (fp->rx_sge_mbuf_tag == NULL) {
6186        return;
6187    }
6188
6189    /* rree all mbufs and unload all maps */
6190    for (i = 0; i < RX_SGE_TOTAL; i++) {
6191        if (fp->rx_sge_mbuf_chain[i].m_map != NULL) {
6192            bus_dmamap_sync(fp->rx_sge_mbuf_tag,
6193                            fp->rx_sge_mbuf_chain[i].m_map,
6194                            BUS_DMASYNC_POSTREAD);
6195            bus_dmamap_unload(fp->rx_sge_mbuf_tag,
6196                              fp->rx_sge_mbuf_chain[i].m_map);
6197        }
6198
6199        if (fp->rx_sge_mbuf_chain[i].m != NULL) {
6200            m_freem(fp->rx_sge_mbuf_chain[i].m);
6201            fp->rx_sge_mbuf_chain[i].m = NULL;
6202            fp->eth_q_stats.mbuf_alloc_sge--;
6203        }
6204    }
6205}
6206
6207static void
6208bxe_free_fp_buffers(struct bxe_softc *sc)
6209{
6210    struct bxe_fastpath *fp;
6211    int i;
6212
6213    for (i = 0; i < sc->num_queues; i++) {
6214        fp = &sc->fp[i];
6215
6216#if __FreeBSD_version >= 901504
6217        if (fp->tx_br != NULL) {
6218            /* just in case bxe_mq_flush() wasn't called */
6219            if (mtx_initialized(&fp->tx_mtx)) {
6220                struct mbuf *m;
6221
6222                BXE_FP_TX_LOCK(fp);
6223                while ((m = buf_ring_dequeue_sc(fp->tx_br)) != NULL)
6224                    m_freem(m);
6225                BXE_FP_TX_UNLOCK(fp);
6226            }
6227        }
6228#endif
6229
6230        /* free all RX buffers */
6231        bxe_free_rx_bd_chain(fp);
6232        bxe_free_tpa_pool(fp);
6233        bxe_free_sge_chain(fp);
6234
6235        if (fp->eth_q_stats.mbuf_alloc_rx != 0) {
6236            BLOGE(sc, "failed to claim all rx mbufs (%d left)\n",
6237                  fp->eth_q_stats.mbuf_alloc_rx);
6238        }
6239
6240        if (fp->eth_q_stats.mbuf_alloc_sge != 0) {
6241            BLOGE(sc, "failed to claim all sge mbufs (%d left)\n",
6242                  fp->eth_q_stats.mbuf_alloc_sge);
6243        }
6244
6245        if (fp->eth_q_stats.mbuf_alloc_tpa != 0) {
6246            BLOGE(sc, "failed to claim all sge mbufs (%d left)\n",
6247                  fp->eth_q_stats.mbuf_alloc_tpa);
6248        }
6249
6250        if (fp->eth_q_stats.mbuf_alloc_tx != 0) {
6251            BLOGE(sc, "failed to release tx mbufs (%d left)\n",
6252                  fp->eth_q_stats.mbuf_alloc_tx);
6253        }
6254
6255        /* XXX verify all mbufs were reclaimed */
6256    }
6257}
6258
6259static int
6260bxe_alloc_rx_bd_mbuf(struct bxe_fastpath *fp,
6261                     uint16_t            prev_index,
6262                     uint16_t            index)
6263{
6264    struct bxe_sw_rx_bd *rx_buf;
6265    struct eth_rx_bd *rx_bd;
6266    bus_dma_segment_t segs[1];
6267    bus_dmamap_t map;
6268    struct mbuf *m;
6269    int nsegs, rc;
6270
6271    rc = 0;
6272
6273    /* allocate the new RX BD mbuf */
6274    m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, fp->mbuf_alloc_size);
6275    if (__predict_false(m == NULL)) {
6276        fp->eth_q_stats.mbuf_rx_bd_alloc_failed++;
6277        return (ENOBUFS);
6278    }
6279
6280    fp->eth_q_stats.mbuf_alloc_rx++;
6281
6282    /* initialize the mbuf buffer length */
6283    m->m_pkthdr.len = m->m_len = fp->rx_buf_size;
6284
6285    /* map the mbuf into non-paged pool */
6286    rc = bus_dmamap_load_mbuf_sg(fp->rx_mbuf_tag,
6287                                 fp->rx_mbuf_spare_map,
6288                                 m, segs, &nsegs, BUS_DMA_NOWAIT);
6289    if (__predict_false(rc != 0)) {
6290        fp->eth_q_stats.mbuf_rx_bd_mapping_failed++;
6291        m_freem(m);
6292        fp->eth_q_stats.mbuf_alloc_rx--;
6293        return (rc);
6294    }
6295
6296    /* all mbufs must map to a single segment */
6297    KASSERT((nsegs == 1), ("Too many segments, %d returned!", nsegs));
6298
6299    /* release any existing RX BD mbuf mappings */
6300
6301    if (prev_index != index) {
6302        rx_buf = &fp->rx_mbuf_chain[prev_index];
6303
6304        if (rx_buf->m_map != NULL) {
6305            bus_dmamap_sync(fp->rx_mbuf_tag, rx_buf->m_map,
6306                            BUS_DMASYNC_POSTREAD);
6307            bus_dmamap_unload(fp->rx_mbuf_tag, rx_buf->m_map);
6308        }
6309
6310        /*
6311         * We only get here from bxe_rxeof() when the maximum number
6312         * of rx buffers is less than RX_BD_USABLE. bxe_rxeof() already
6313         * holds the mbuf in the prev_index so it's OK to NULL it out
6314         * here without concern of a memory leak.
6315         */
6316        fp->rx_mbuf_chain[prev_index].m = NULL;
6317    }
6318
6319    rx_buf = &fp->rx_mbuf_chain[index];
6320
6321    if (rx_buf->m_map != NULL) {
6322        bus_dmamap_sync(fp->rx_mbuf_tag, rx_buf->m_map,
6323                        BUS_DMASYNC_POSTREAD);
6324        bus_dmamap_unload(fp->rx_mbuf_tag, rx_buf->m_map);
6325    }
6326
6327    /* save the mbuf and mapping info for a future packet */
6328    map = (prev_index != index) ?
6329              fp->rx_mbuf_chain[prev_index].m_map : rx_buf->m_map;
6330    rx_buf->m_map = fp->rx_mbuf_spare_map;
6331    fp->rx_mbuf_spare_map = map;
6332    bus_dmamap_sync(fp->rx_mbuf_tag, rx_buf->m_map,
6333                    BUS_DMASYNC_PREREAD);
6334    rx_buf->m = m;
6335
6336    rx_bd = &fp->rx_chain[index];
6337    rx_bd->addr_hi = htole32(U64_HI(segs[0].ds_addr));
6338    rx_bd->addr_lo = htole32(U64_LO(segs[0].ds_addr));
6339
6340    return (rc);
6341}
6342
6343static int
6344bxe_alloc_rx_tpa_mbuf(struct bxe_fastpath *fp,
6345                      int                 queue)
6346{
6347    struct bxe_sw_tpa_info *tpa_info = &fp->rx_tpa_info[queue];
6348    bus_dma_segment_t segs[1];
6349    bus_dmamap_t map;
6350    struct mbuf *m;
6351    int nsegs;
6352    int rc = 0;
6353
6354    /* allocate the new TPA mbuf */
6355    m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, fp->mbuf_alloc_size);
6356    if (__predict_false(m == NULL)) {
6357        fp->eth_q_stats.mbuf_rx_tpa_alloc_failed++;
6358        return (ENOBUFS);
6359    }
6360
6361    fp->eth_q_stats.mbuf_alloc_tpa++;
6362
6363    /* initialize the mbuf buffer length */
6364    m->m_pkthdr.len = m->m_len = fp->rx_buf_size;
6365
6366    /* map the mbuf into non-paged pool */
6367    rc = bus_dmamap_load_mbuf_sg(fp->rx_mbuf_tag,
6368                                 fp->rx_tpa_info_mbuf_spare_map,
6369                                 m, segs, &nsegs, BUS_DMA_NOWAIT);
6370    if (__predict_false(rc != 0)) {
6371        fp->eth_q_stats.mbuf_rx_tpa_mapping_failed++;
6372        m_free(m);
6373        fp->eth_q_stats.mbuf_alloc_tpa--;
6374        return (rc);
6375    }
6376
6377    /* all mbufs must map to a single segment */
6378    KASSERT((nsegs == 1), ("Too many segments, %d returned!", nsegs));
6379
6380    /* release any existing TPA mbuf mapping */
6381    if (tpa_info->bd.m_map != NULL) {
6382        bus_dmamap_sync(fp->rx_mbuf_tag, tpa_info->bd.m_map,
6383                        BUS_DMASYNC_POSTREAD);
6384        bus_dmamap_unload(fp->rx_mbuf_tag, tpa_info->bd.m_map);
6385    }
6386
6387    /* save the mbuf and mapping info for the TPA mbuf */
6388    map = tpa_info->bd.m_map;
6389    tpa_info->bd.m_map = fp->rx_tpa_info_mbuf_spare_map;
6390    fp->rx_tpa_info_mbuf_spare_map = map;
6391    bus_dmamap_sync(fp->rx_mbuf_tag, tpa_info->bd.m_map,
6392                    BUS_DMASYNC_PREREAD);
6393    tpa_info->bd.m = m;
6394    tpa_info->seg = segs[0];
6395
6396    return (rc);
6397}
6398
6399/*
6400 * Allocate an mbuf and assign it to the receive scatter gather chain. The
6401 * caller must take care to save a copy of the existing mbuf in the SG mbuf
6402 * chain.
6403 */
6404static int
6405bxe_alloc_rx_sge_mbuf(struct bxe_fastpath *fp,
6406                      uint16_t            index)
6407{
6408    struct bxe_sw_rx_bd *sge_buf;
6409    struct eth_rx_sge *sge;
6410    bus_dma_segment_t segs[1];
6411    bus_dmamap_t map;
6412    struct mbuf *m;
6413    int nsegs;
6414    int rc = 0;
6415
6416    /* allocate a new SGE mbuf */
6417    m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, SGE_PAGE_SIZE);
6418    if (__predict_false(m == NULL)) {
6419        fp->eth_q_stats.mbuf_rx_sge_alloc_failed++;
6420        return (ENOMEM);
6421    }
6422
6423    fp->eth_q_stats.mbuf_alloc_sge++;
6424
6425    /* initialize the mbuf buffer length */
6426    m->m_pkthdr.len = m->m_len = SGE_PAGE_SIZE;
6427
6428    /* map the SGE mbuf into non-paged pool */
6429    rc = bus_dmamap_load_mbuf_sg(fp->rx_sge_mbuf_tag,
6430                                 fp->rx_sge_mbuf_spare_map,
6431                                 m, segs, &nsegs, BUS_DMA_NOWAIT);
6432    if (__predict_false(rc != 0)) {
6433        fp->eth_q_stats.mbuf_rx_sge_mapping_failed++;
6434        m_freem(m);
6435        fp->eth_q_stats.mbuf_alloc_sge--;
6436        return (rc);
6437    }
6438
6439    /* all mbufs must map to a single segment */
6440    KASSERT((nsegs == 1), ("Too many segments, %d returned!", nsegs));
6441
6442    sge_buf = &fp->rx_sge_mbuf_chain[index];
6443
6444    /* release any existing SGE mbuf mapping */
6445    if (sge_buf->m_map != NULL) {
6446        bus_dmamap_sync(fp->rx_sge_mbuf_tag, sge_buf->m_map,
6447                        BUS_DMASYNC_POSTREAD);
6448        bus_dmamap_unload(fp->rx_sge_mbuf_tag, sge_buf->m_map);
6449    }
6450
6451    /* save the mbuf and mapping info for a future packet */
6452    map = sge_buf->m_map;
6453    sge_buf->m_map = fp->rx_sge_mbuf_spare_map;
6454    fp->rx_sge_mbuf_spare_map = map;
6455    bus_dmamap_sync(fp->rx_sge_mbuf_tag, sge_buf->m_map,
6456                    BUS_DMASYNC_PREREAD);
6457    sge_buf->m = m;
6458
6459    sge = &fp->rx_sge_chain[index];
6460    sge->addr_hi = htole32(U64_HI(segs[0].ds_addr));
6461    sge->addr_lo = htole32(U64_LO(segs[0].ds_addr));
6462
6463    return (rc);
6464}
6465
6466static __noinline int
6467bxe_alloc_fp_buffers(struct bxe_softc *sc)
6468{
6469    struct bxe_fastpath *fp;
6470    int i, j, rc = 0;
6471    int ring_prod, cqe_ring_prod;
6472    int max_agg_queues;
6473
6474    for (i = 0; i < sc->num_queues; i++) {
6475        fp = &sc->fp[i];
6476
6477        ring_prod = cqe_ring_prod = 0;
6478        fp->rx_bd_cons = 0;
6479        fp->rx_cq_cons = 0;
6480
6481        /* allocate buffers for the RX BDs in RX BD chain */
6482        for (j = 0; j < sc->max_rx_bufs; j++) {
6483            rc = bxe_alloc_rx_bd_mbuf(fp, ring_prod, ring_prod);
6484            if (rc != 0) {
6485                BLOGE(sc, "mbuf alloc fail for fp[%02d] rx chain (%d)\n",
6486                      i, rc);
6487                goto bxe_alloc_fp_buffers_error;
6488            }
6489
6490            ring_prod     = RX_BD_NEXT(ring_prod);
6491            cqe_ring_prod = RCQ_NEXT(cqe_ring_prod);
6492        }
6493
6494        fp->rx_bd_prod = ring_prod;
6495        fp->rx_cq_prod = cqe_ring_prod;
6496        fp->eth_q_stats.rx_calls = fp->eth_q_stats.rx_pkts = 0;
6497
6498        max_agg_queues = MAX_AGG_QS(sc);
6499
6500        fp->tpa_enable = TRUE;
6501
6502        /* fill the TPA pool */
6503        for (j = 0; j < max_agg_queues; j++) {
6504            rc = bxe_alloc_rx_tpa_mbuf(fp, j);
6505            if (rc != 0) {
6506                BLOGE(sc, "mbuf alloc fail for fp[%02d] TPA queue %d\n",
6507                          i, j);
6508                fp->tpa_enable = FALSE;
6509                goto bxe_alloc_fp_buffers_error;
6510            }
6511
6512            fp->rx_tpa_info[j].state = BXE_TPA_STATE_STOP;
6513        }
6514
6515        if (fp->tpa_enable) {
6516            /* fill the RX SGE chain */
6517            ring_prod = 0;
6518            for (j = 0; j < RX_SGE_USABLE; j++) {
6519                rc = bxe_alloc_rx_sge_mbuf(fp, ring_prod);
6520                if (rc != 0) {
6521                    BLOGE(sc, "mbuf alloc fail for fp[%02d] SGE %d\n",
6522                              i, ring_prod);
6523                    fp->tpa_enable = FALSE;
6524                    ring_prod = 0;
6525                    goto bxe_alloc_fp_buffers_error;
6526                }
6527
6528                ring_prod = RX_SGE_NEXT(ring_prod);
6529            }
6530
6531            fp->rx_sge_prod = ring_prod;
6532        }
6533    }
6534
6535    return (0);
6536
6537bxe_alloc_fp_buffers_error:
6538
6539    /* unwind what was already allocated */
6540    bxe_free_rx_bd_chain(fp);
6541    bxe_free_tpa_pool(fp);
6542    bxe_free_sge_chain(fp);
6543
6544    return (ENOBUFS);
6545}
6546
6547static void
6548bxe_free_fw_stats_mem(struct bxe_softc *sc)
6549{
6550    bxe_dma_free(sc, &sc->fw_stats_dma);
6551
6552    sc->fw_stats_num = 0;
6553
6554    sc->fw_stats_req_size = 0;
6555    sc->fw_stats_req = NULL;
6556    sc->fw_stats_req_mapping = 0;
6557
6558    sc->fw_stats_data_size = 0;
6559    sc->fw_stats_data = NULL;
6560    sc->fw_stats_data_mapping = 0;
6561}
6562
6563static int
6564bxe_alloc_fw_stats_mem(struct bxe_softc *sc)
6565{
6566    uint8_t num_queue_stats;
6567    int num_groups;
6568
6569    /* number of queues for statistics is number of eth queues */
6570    num_queue_stats = BXE_NUM_ETH_QUEUES(sc);
6571
6572    /*
6573     * Total number of FW statistics requests =
6574     *   1 for port stats + 1 for PF stats + num of queues
6575     */
6576    sc->fw_stats_num = (2 + num_queue_stats);
6577
6578    /*
6579     * Request is built from stats_query_header and an array of
6580     * stats_query_cmd_group each of which contains STATS_QUERY_CMD_COUNT
6581     * rules. The real number or requests is configured in the
6582     * stats_query_header.
6583     */
6584    num_groups =
6585        ((sc->fw_stats_num / STATS_QUERY_CMD_COUNT) +
6586         ((sc->fw_stats_num % STATS_QUERY_CMD_COUNT) ? 1 : 0));
6587
6588    BLOGD(sc, DBG_LOAD, "stats fw_stats_num %d num_groups %d\n",
6589          sc->fw_stats_num, num_groups);
6590
6591    sc->fw_stats_req_size =
6592        (sizeof(struct stats_query_header) +
6593         (num_groups * sizeof(struct stats_query_cmd_group)));
6594
6595    /*
6596     * Data for statistics requests + stats_counter.
6597     * stats_counter holds per-STORM counters that are incremented when
6598     * STORM has finished with the current request. Memory for FCoE
6599     * offloaded statistics are counted anyway, even if they will not be sent.
6600     * VF stats are not accounted for here as the data of VF stats is stored
6601     * in memory allocated by the VF, not here.
6602     */
6603    sc->fw_stats_data_size =
6604        (sizeof(struct stats_counter) +
6605         sizeof(struct per_port_stats) +
6606         sizeof(struct per_pf_stats) +
6607         /* sizeof(struct fcoe_statistics_params) + */
6608         (sizeof(struct per_queue_stats) * num_queue_stats));
6609
6610    if (bxe_dma_alloc(sc, (sc->fw_stats_req_size + sc->fw_stats_data_size),
6611                      &sc->fw_stats_dma, "fw stats") != 0) {
6612        bxe_free_fw_stats_mem(sc);
6613        return (-1);
6614    }
6615
6616    /* set up the shortcuts */
6617
6618    sc->fw_stats_req =
6619        (struct bxe_fw_stats_req *)sc->fw_stats_dma.vaddr;
6620    sc->fw_stats_req_mapping = sc->fw_stats_dma.paddr;
6621
6622    sc->fw_stats_data =
6623        (struct bxe_fw_stats_data *)((uint8_t *)sc->fw_stats_dma.vaddr +
6624                                     sc->fw_stats_req_size);
6625    sc->fw_stats_data_mapping = (sc->fw_stats_dma.paddr +
6626                                 sc->fw_stats_req_size);
6627
6628    BLOGD(sc, DBG_LOAD, "statistics request base address set to %#jx\n",
6629          (uintmax_t)sc->fw_stats_req_mapping);
6630
6631    BLOGD(sc, DBG_LOAD, "statistics data base address set to %#jx\n",
6632          (uintmax_t)sc->fw_stats_data_mapping);
6633
6634    return (0);
6635}
6636
6637/*
6638 * Bits map:
6639 * 0-7  - Engine0 load counter.
6640 * 8-15 - Engine1 load counter.
6641 * 16   - Engine0 RESET_IN_PROGRESS bit.
6642 * 17   - Engine1 RESET_IN_PROGRESS bit.
6643 * 18   - Engine0 ONE_IS_LOADED. Set when there is at least one active
6644 *        function on the engine
6645 * 19   - Engine1 ONE_IS_LOADED.
6646 * 20   - Chip reset flow bit. When set none-leader must wait for both engines
6647 *        leader to complete (check for both RESET_IN_PROGRESS bits and not
6648 *        for just the one belonging to its engine).
6649 */
6650#define BXE_RECOVERY_GLOB_REG     MISC_REG_GENERIC_POR_1
6651#define BXE_PATH0_LOAD_CNT_MASK   0x000000ff
6652#define BXE_PATH0_LOAD_CNT_SHIFT  0
6653#define BXE_PATH1_LOAD_CNT_MASK   0x0000ff00
6654#define BXE_PATH1_LOAD_CNT_SHIFT  8
6655#define BXE_PATH0_RST_IN_PROG_BIT 0x00010000
6656#define BXE_PATH1_RST_IN_PROG_BIT 0x00020000
6657#define BXE_GLOBAL_RESET_BIT      0x00040000
6658
6659/* set the GLOBAL_RESET bit, should be run under rtnl lock */
6660static void
6661bxe_set_reset_global(struct bxe_softc *sc)
6662{
6663    uint32_t val;
6664    bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG);
6665    val = REG_RD(sc, BXE_RECOVERY_GLOB_REG);
6666    REG_WR(sc, BXE_RECOVERY_GLOB_REG, val | BXE_GLOBAL_RESET_BIT);
6667    bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG);
6668}
6669
6670/* clear the GLOBAL_RESET bit, should be run under rtnl lock */
6671static void
6672bxe_clear_reset_global(struct bxe_softc *sc)
6673{
6674    uint32_t val;
6675    bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG);
6676    val = REG_RD(sc, BXE_RECOVERY_GLOB_REG);
6677    REG_WR(sc, BXE_RECOVERY_GLOB_REG, val & (~BXE_GLOBAL_RESET_BIT));
6678    bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG);
6679}
6680
6681/* checks the GLOBAL_RESET bit, should be run under rtnl lock */
6682static uint8_t
6683bxe_reset_is_global(struct bxe_softc *sc)
6684{
6685    uint32_t val = REG_RD(sc, BXE_RECOVERY_GLOB_REG);
6686    BLOGD(sc, DBG_LOAD, "GLOB_REG=0x%08x\n", val);
6687    return (val & BXE_GLOBAL_RESET_BIT) ? TRUE : FALSE;
6688}
6689
6690/* clear RESET_IN_PROGRESS bit for the engine, should be run under rtnl lock */
6691static void
6692bxe_set_reset_done(struct bxe_softc *sc)
6693{
6694    uint32_t val;
6695    uint32_t bit = SC_PATH(sc) ? BXE_PATH1_RST_IN_PROG_BIT :
6696                                 BXE_PATH0_RST_IN_PROG_BIT;
6697
6698    bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG);
6699
6700    val = REG_RD(sc, BXE_RECOVERY_GLOB_REG);
6701    /* Clear the bit */
6702    val &= ~bit;
6703    REG_WR(sc, BXE_RECOVERY_GLOB_REG, val);
6704
6705    bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG);
6706}
6707
6708/* set RESET_IN_PROGRESS for the engine, should be run under rtnl lock */
6709static void
6710bxe_set_reset_in_progress(struct bxe_softc *sc)
6711{
6712    uint32_t val;
6713    uint32_t bit = SC_PATH(sc) ? BXE_PATH1_RST_IN_PROG_BIT :
6714                                 BXE_PATH0_RST_IN_PROG_BIT;
6715
6716    bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG);
6717
6718    val = REG_RD(sc, BXE_RECOVERY_GLOB_REG);
6719    /* Set the bit */
6720    val |= bit;
6721    REG_WR(sc, BXE_RECOVERY_GLOB_REG, val);
6722
6723    bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG);
6724}
6725
6726/* check RESET_IN_PROGRESS bit for an engine, should be run under rtnl lock */
6727static uint8_t
6728bxe_reset_is_done(struct bxe_softc *sc,
6729                  int              engine)
6730{
6731    uint32_t val = REG_RD(sc, BXE_RECOVERY_GLOB_REG);
6732    uint32_t bit = engine ? BXE_PATH1_RST_IN_PROG_BIT :
6733                            BXE_PATH0_RST_IN_PROG_BIT;
6734
6735    /* return false if bit is set */
6736    return (val & bit) ? FALSE : TRUE;
6737}
6738
6739/* get the load status for an engine, should be run under rtnl lock */
6740static uint8_t
6741bxe_get_load_status(struct bxe_softc *sc,
6742                    int              engine)
6743{
6744    uint32_t mask = engine ? BXE_PATH1_LOAD_CNT_MASK :
6745                             BXE_PATH0_LOAD_CNT_MASK;
6746    uint32_t shift = engine ? BXE_PATH1_LOAD_CNT_SHIFT :
6747                              BXE_PATH0_LOAD_CNT_SHIFT;
6748    uint32_t val = REG_RD(sc, BXE_RECOVERY_GLOB_REG);
6749
6750    BLOGD(sc, DBG_LOAD, "Old value for GLOB_REG=0x%08x\n", val);
6751
6752    val = ((val & mask) >> shift);
6753
6754    BLOGD(sc, DBG_LOAD, "Load mask engine %d = 0x%08x\n", engine, val);
6755
6756    return (val != 0);
6757}
6758
6759/* set pf load mark */
6760/* XXX needs to be under rtnl lock */
6761static void
6762bxe_set_pf_load(struct bxe_softc *sc)
6763{
6764    uint32_t val;
6765    uint32_t val1;
6766    uint32_t mask = SC_PATH(sc) ? BXE_PATH1_LOAD_CNT_MASK :
6767                                  BXE_PATH0_LOAD_CNT_MASK;
6768    uint32_t shift = SC_PATH(sc) ? BXE_PATH1_LOAD_CNT_SHIFT :
6769                                   BXE_PATH0_LOAD_CNT_SHIFT;
6770
6771    bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG);
6772
6773    val = REG_RD(sc, BXE_RECOVERY_GLOB_REG);
6774    BLOGD(sc, DBG_LOAD, "Old value for GLOB_REG=0x%08x\n", val);
6775
6776    /* get the current counter value */
6777    val1 = ((val & mask) >> shift);
6778
6779    /* set bit of this PF */
6780    val1 |= (1 << SC_ABS_FUNC(sc));
6781
6782    /* clear the old value */
6783    val &= ~mask;
6784
6785    /* set the new one */
6786    val |= ((val1 << shift) & mask);
6787
6788    REG_WR(sc, BXE_RECOVERY_GLOB_REG, val);
6789
6790    bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG);
6791}
6792
6793/* clear pf load mark */
6794/* XXX needs to be under rtnl lock */
6795static uint8_t
6796bxe_clear_pf_load(struct bxe_softc *sc)
6797{
6798    uint32_t val1, val;
6799    uint32_t mask = SC_PATH(sc) ? BXE_PATH1_LOAD_CNT_MASK :
6800                                  BXE_PATH0_LOAD_CNT_MASK;
6801    uint32_t shift = SC_PATH(sc) ? BXE_PATH1_LOAD_CNT_SHIFT :
6802                                   BXE_PATH0_LOAD_CNT_SHIFT;
6803
6804    bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG);
6805    val = REG_RD(sc, BXE_RECOVERY_GLOB_REG);
6806    BLOGD(sc, DBG_LOAD, "Old GEN_REG_VAL=0x%08x\n", val);
6807
6808    /* get the current counter value */
6809    val1 = (val & mask) >> shift;
6810
6811    /* clear bit of that PF */
6812    val1 &= ~(1 << SC_ABS_FUNC(sc));
6813
6814    /* clear the old value */
6815    val &= ~mask;
6816
6817    /* set the new one */
6818    val |= ((val1 << shift) & mask);
6819
6820    REG_WR(sc, BXE_RECOVERY_GLOB_REG, val);
6821    bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG);
6822    return (val1 != 0);
6823}
6824
6825/* send load requrest to mcp and analyze response */
6826static int
6827bxe_nic_load_request(struct bxe_softc *sc,
6828                     uint32_t         *load_code)
6829{
6830    /* init fw_seq */
6831    sc->fw_seq =
6832        (SHMEM_RD(sc, func_mb[SC_FW_MB_IDX(sc)].drv_mb_header) &
6833         DRV_MSG_SEQ_NUMBER_MASK);
6834
6835    BLOGD(sc, DBG_LOAD, "initial fw_seq 0x%04x\n", sc->fw_seq);
6836
6837    /* get the current FW pulse sequence */
6838    sc->fw_drv_pulse_wr_seq =
6839        (SHMEM_RD(sc, func_mb[SC_FW_MB_IDX(sc)].drv_pulse_mb) &
6840         DRV_PULSE_SEQ_MASK);
6841
6842    BLOGD(sc, DBG_LOAD, "initial drv_pulse 0x%04x\n",
6843          sc->fw_drv_pulse_wr_seq);
6844
6845    /* load request */
6846    (*load_code) = bxe_fw_command(sc, DRV_MSG_CODE_LOAD_REQ,
6847                                  DRV_MSG_CODE_LOAD_REQ_WITH_LFA);
6848
6849    /* if the MCP fails to respond we must abort */
6850    if (!(*load_code)) {
6851        BLOGE(sc, "MCP response failure!\n");
6852        return (-1);
6853    }
6854
6855    /* if MCP refused then must abort */
6856    if ((*load_code) == FW_MSG_CODE_DRV_LOAD_REFUSED) {
6857        BLOGE(sc, "MCP refused load request\n");
6858        return (-1);
6859    }
6860
6861    return (0);
6862}
6863
6864/*
6865 * Check whether another PF has already loaded FW to chip. In virtualized
6866 * environments a pf from anoth VM may have already initialized the device
6867 * including loading FW.
6868 */
6869static int
6870bxe_nic_load_analyze_req(struct bxe_softc *sc,
6871                         uint32_t         load_code)
6872{
6873    uint32_t my_fw, loaded_fw;
6874
6875    /* is another pf loaded on this engine? */
6876    if ((load_code != FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) &&
6877        (load_code != FW_MSG_CODE_DRV_LOAD_COMMON)) {
6878        /* build my FW version dword */
6879        my_fw = (BCM_5710_FW_MAJOR_VERSION +
6880                 (BCM_5710_FW_MINOR_VERSION << 8 ) +
6881                 (BCM_5710_FW_REVISION_VERSION << 16) +
6882                 (BCM_5710_FW_ENGINEERING_VERSION << 24));
6883
6884        /* read loaded FW from chip */
6885        loaded_fw = REG_RD(sc, XSEM_REG_PRAM);
6886        BLOGD(sc, DBG_LOAD, "loaded FW 0x%08x / my FW 0x%08x\n",
6887              loaded_fw, my_fw);
6888
6889        /* abort nic load if version mismatch */
6890        if (my_fw != loaded_fw) {
6891            BLOGE(sc, "FW 0x%08x already loaded (mine is 0x%08x)",
6892                  loaded_fw, my_fw);
6893            return (-1);
6894        }
6895    }
6896
6897    return (0);
6898}
6899
6900/* mark PMF if applicable */
6901static void
6902bxe_nic_load_pmf(struct bxe_softc *sc,
6903                 uint32_t         load_code)
6904{
6905    uint32_t ncsi_oem_data_addr;
6906
6907    if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
6908        (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) ||
6909        (load_code == FW_MSG_CODE_DRV_LOAD_PORT)) {
6910        /*
6911         * Barrier here for ordering between the writing to sc->port.pmf here
6912         * and reading it from the periodic task.
6913         */
6914        sc->port.pmf = 1;
6915        mb();
6916    } else {
6917        sc->port.pmf = 0;
6918    }
6919
6920    BLOGD(sc, DBG_LOAD, "pmf %d\n", sc->port.pmf);
6921
6922    /* XXX needed? */
6923    if (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) {
6924        if (SHMEM2_HAS(sc, ncsi_oem_data_addr)) {
6925            ncsi_oem_data_addr = SHMEM2_RD(sc, ncsi_oem_data_addr);
6926            if (ncsi_oem_data_addr) {
6927                REG_WR(sc,
6928                       (ncsi_oem_data_addr +
6929                        offsetof(struct glob_ncsi_oem_data, driver_version)),
6930                       0);
6931            }
6932        }
6933    }
6934}
6935
6936static void
6937bxe_read_mf_cfg(struct bxe_softc *sc)
6938{
6939    int n = (CHIP_IS_MODE_4_PORT(sc) ? 2 : 1);
6940    int abs_func;
6941    int vn;
6942
6943    if (BXE_NOMCP(sc)) {
6944        return; /* what should be the default bvalue in this case */
6945    }
6946
6947    /*
6948     * The formula for computing the absolute function number is...
6949     * For 2 port configuration (4 functions per port):
6950     *   abs_func = 2 * vn + SC_PORT + SC_PATH
6951     * For 4 port configuration (2 functions per port):
6952     *   abs_func = 4 * vn + 2 * SC_PORT + SC_PATH
6953     */
6954    for (vn = VN_0; vn < SC_MAX_VN_NUM(sc); vn++) {
6955        abs_func = (n * (2 * vn + SC_PORT(sc)) + SC_PATH(sc));
6956        if (abs_func >= E1H_FUNC_MAX) {
6957            break;
6958        }
6959        sc->devinfo.mf_info.mf_config[vn] =
6960            MFCFG_RD(sc, func_mf_config[abs_func].config);
6961    }
6962
6963    if (sc->devinfo.mf_info.mf_config[SC_VN(sc)] &
6964        FUNC_MF_CFG_FUNC_DISABLED) {
6965        BLOGD(sc, DBG_LOAD, "mf_cfg function disabled\n");
6966        sc->flags |= BXE_MF_FUNC_DIS;
6967    } else {
6968        BLOGD(sc, DBG_LOAD, "mf_cfg function enabled\n");
6969        sc->flags &= ~BXE_MF_FUNC_DIS;
6970    }
6971}
6972
6973/* acquire split MCP access lock register */
6974static int bxe_acquire_alr(struct bxe_softc *sc)
6975{
6976    uint32_t j, val;
6977
6978    for (j = 0; j < 1000; j++) {
6979        val = (1UL << 31);
6980        REG_WR(sc, GRCBASE_MCP + 0x9c, val);
6981        val = REG_RD(sc, GRCBASE_MCP + 0x9c);
6982        if (val & (1L << 31))
6983            break;
6984
6985        DELAY(5000);
6986    }
6987
6988    if (!(val & (1L << 31))) {
6989        BLOGE(sc, "Cannot acquire MCP access lock register\n");
6990        return (-1);
6991    }
6992
6993    return (0);
6994}
6995
6996/* release split MCP access lock register */
6997static void bxe_release_alr(struct bxe_softc *sc)
6998{
6999    REG_WR(sc, GRCBASE_MCP + 0x9c, 0);
7000}
7001
7002static void
7003bxe_fan_failure(struct bxe_softc *sc)
7004{
7005    int port = SC_PORT(sc);
7006    uint32_t ext_phy_config;
7007
7008    /* mark the failure */
7009    ext_phy_config =
7010        SHMEM_RD(sc, dev_info.port_hw_config[port].external_phy_config);
7011
7012    ext_phy_config &= ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
7013    ext_phy_config |= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
7014    SHMEM_WR(sc, dev_info.port_hw_config[port].external_phy_config,
7015             ext_phy_config);
7016
7017    /* log the failure */
7018    BLOGW(sc, "Fan Failure has caused the driver to shutdown "
7019              "the card to prevent permanent damage. "
7020              "Please contact OEM Support for assistance\n");
7021
7022    /* XXX */
7023#if 1
7024    bxe_panic(sc, ("Schedule task to handle fan failure\n"));
7025#else
7026    /*
7027     * Schedule device reset (unload)
7028     * This is due to some boards consuming sufficient power when driver is
7029     * up to overheat if fan fails.
7030     */
7031    bxe_set_bit(BXE_SP_RTNL_FAN_FAILURE, &sc->sp_rtnl_state);
7032    schedule_delayed_work(&sc->sp_rtnl_task, 0);
7033#endif
7034}
7035
7036/* this function is called upon a link interrupt */
7037static void
7038bxe_link_attn(struct bxe_softc *sc)
7039{
7040    uint32_t pause_enabled = 0;
7041    struct host_port_stats *pstats;
7042    int cmng_fns;
7043    struct bxe_fastpath *fp;
7044    int i;
7045
7046    /* Make sure that we are synced with the current statistics */
7047    bxe_stats_handle(sc, STATS_EVENT_STOP);
7048    BLOGD(sc, DBG_LOAD, "link_vars phy_flags : %x\n", sc->link_vars.phy_flags);
7049    elink_link_update(&sc->link_params, &sc->link_vars);
7050
7051    if (sc->link_vars.link_up) {
7052
7053        /* dropless flow control */
7054        if (!CHIP_IS_E1(sc) && sc->dropless_fc) {
7055            pause_enabled = 0;
7056
7057            if (sc->link_vars.flow_ctrl & ELINK_FLOW_CTRL_TX) {
7058                pause_enabled = 1;
7059            }
7060
7061            REG_WR(sc,
7062                   (BAR_USTRORM_INTMEM +
7063                    USTORM_ETH_PAUSE_ENABLED_OFFSET(SC_PORT(sc))),
7064                   pause_enabled);
7065        }
7066
7067        if (sc->link_vars.mac_type != ELINK_MAC_TYPE_EMAC) {
7068            pstats = BXE_SP(sc, port_stats);
7069            /* reset old mac stats */
7070            memset(&(pstats->mac_stx[0]), 0, sizeof(struct mac_stx));
7071        }
7072
7073        if (sc->state == BXE_STATE_OPEN) {
7074            bxe_stats_handle(sc, STATS_EVENT_LINK_UP);
7075        }
7076
7077	/* Restart tx when the link comes back. */
7078        FOR_EACH_ETH_QUEUE(sc, i) {
7079            fp = &sc->fp[i];
7080            taskqueue_enqueue(fp->tq, &fp->tx_task);
7081	}
7082    }
7083
7084    if (sc->link_vars.link_up && sc->link_vars.line_speed) {
7085        cmng_fns = bxe_get_cmng_fns_mode(sc);
7086
7087        if (cmng_fns != CMNG_FNS_NONE) {
7088            bxe_cmng_fns_init(sc, FALSE, cmng_fns);
7089            storm_memset_cmng(sc, &sc->cmng, SC_PORT(sc));
7090        } else {
7091            /* rate shaping and fairness are disabled */
7092            BLOGD(sc, DBG_LOAD, "single function mode without fairness\n");
7093        }
7094    }
7095
7096    bxe_link_report_locked(sc);
7097
7098    if (IS_MF(sc)) {
7099        ; // XXX bxe_link_sync_notify(sc);
7100    }
7101}
7102
7103static void
7104bxe_attn_int_asserted(struct bxe_softc *sc,
7105                      uint32_t         asserted)
7106{
7107    int port = SC_PORT(sc);
7108    uint32_t aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
7109                               MISC_REG_AEU_MASK_ATTN_FUNC_0;
7110    uint32_t nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
7111                                        NIG_REG_MASK_INTERRUPT_PORT0;
7112    uint32_t aeu_mask;
7113    uint32_t nig_mask = 0;
7114    uint32_t reg_addr;
7115    uint32_t igu_acked;
7116    uint32_t cnt;
7117
7118    if (sc->attn_state & asserted) {
7119        BLOGE(sc, "IGU ERROR attn=0x%08x\n", asserted);
7120    }
7121
7122    bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
7123
7124    aeu_mask = REG_RD(sc, aeu_addr);
7125
7126    BLOGD(sc, DBG_INTR, "aeu_mask 0x%08x newly asserted 0x%08x\n",
7127          aeu_mask, asserted);
7128
7129    aeu_mask &= ~(asserted & 0x3ff);
7130
7131    BLOGD(sc, DBG_INTR, "new mask 0x%08x\n", aeu_mask);
7132
7133    REG_WR(sc, aeu_addr, aeu_mask);
7134
7135    bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
7136
7137    BLOGD(sc, DBG_INTR, "attn_state 0x%08x\n", sc->attn_state);
7138    sc->attn_state |= asserted;
7139    BLOGD(sc, DBG_INTR, "new state 0x%08x\n", sc->attn_state);
7140
7141    if (asserted & ATTN_HARD_WIRED_MASK) {
7142        if (asserted & ATTN_NIG_FOR_FUNC) {
7143
7144	    bxe_acquire_phy_lock(sc);
7145            /* save nig interrupt mask */
7146            nig_mask = REG_RD(sc, nig_int_mask_addr);
7147
7148            /* If nig_mask is not set, no need to call the update function */
7149            if (nig_mask) {
7150                REG_WR(sc, nig_int_mask_addr, 0);
7151
7152                bxe_link_attn(sc);
7153            }
7154
7155            /* handle unicore attn? */
7156        }
7157
7158        if (asserted & ATTN_SW_TIMER_4_FUNC) {
7159            BLOGD(sc, DBG_INTR, "ATTN_SW_TIMER_4_FUNC!\n");
7160        }
7161
7162        if (asserted & GPIO_2_FUNC) {
7163            BLOGD(sc, DBG_INTR, "GPIO_2_FUNC!\n");
7164        }
7165
7166        if (asserted & GPIO_3_FUNC) {
7167            BLOGD(sc, DBG_INTR, "GPIO_3_FUNC!\n");
7168        }
7169
7170        if (asserted & GPIO_4_FUNC) {
7171            BLOGD(sc, DBG_INTR, "GPIO_4_FUNC!\n");
7172        }
7173
7174        if (port == 0) {
7175            if (asserted & ATTN_GENERAL_ATTN_1) {
7176                BLOGD(sc, DBG_INTR, "ATTN_GENERAL_ATTN_1!\n");
7177                REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
7178            }
7179            if (asserted & ATTN_GENERAL_ATTN_2) {
7180                BLOGD(sc, DBG_INTR, "ATTN_GENERAL_ATTN_2!\n");
7181                REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
7182            }
7183            if (asserted & ATTN_GENERAL_ATTN_3) {
7184                BLOGD(sc, DBG_INTR, "ATTN_GENERAL_ATTN_3!\n");
7185                REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
7186            }
7187        } else {
7188            if (asserted & ATTN_GENERAL_ATTN_4) {
7189                BLOGD(sc, DBG_INTR, "ATTN_GENERAL_ATTN_4!\n");
7190                REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
7191            }
7192            if (asserted & ATTN_GENERAL_ATTN_5) {
7193                BLOGD(sc, DBG_INTR, "ATTN_GENERAL_ATTN_5!\n");
7194                REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
7195            }
7196            if (asserted & ATTN_GENERAL_ATTN_6) {
7197                BLOGD(sc, DBG_INTR, "ATTN_GENERAL_ATTN_6!\n");
7198                REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
7199            }
7200        }
7201    } /* hardwired */
7202
7203    if (sc->devinfo.int_block == INT_BLOCK_HC) {
7204        reg_addr = (HC_REG_COMMAND_REG + port*32 + COMMAND_REG_ATTN_BITS_SET);
7205    } else {
7206        reg_addr = (BAR_IGU_INTMEM + IGU_CMD_ATTN_BIT_SET_UPPER*8);
7207    }
7208
7209    BLOGD(sc, DBG_INTR, "about to mask 0x%08x at %s addr 0x%08x\n",
7210          asserted,
7211          (sc->devinfo.int_block == INT_BLOCK_HC) ? "HC" : "IGU", reg_addr);
7212    REG_WR(sc, reg_addr, asserted);
7213
7214    /* now set back the mask */
7215    if (asserted & ATTN_NIG_FOR_FUNC) {
7216        /*
7217         * Verify that IGU ack through BAR was written before restoring
7218         * NIG mask. This loop should exit after 2-3 iterations max.
7219         */
7220        if (sc->devinfo.int_block != INT_BLOCK_HC) {
7221            cnt = 0;
7222
7223            do {
7224                igu_acked = REG_RD(sc, IGU_REG_ATTENTION_ACK_BITS);
7225            } while (((igu_acked & ATTN_NIG_FOR_FUNC) == 0) &&
7226                     (++cnt < MAX_IGU_ATTN_ACK_TO));
7227
7228            if (!igu_acked) {
7229                BLOGE(sc, "Failed to verify IGU ack on time\n");
7230            }
7231
7232            mb();
7233        }
7234
7235        REG_WR(sc, nig_int_mask_addr, nig_mask);
7236
7237	bxe_release_phy_lock(sc);
7238    }
7239}
7240
7241static void
7242bxe_print_next_block(struct bxe_softc *sc,
7243                     int              idx,
7244                     const char       *blk)
7245{
7246    BLOGI(sc, "%s%s", idx ? ", " : "", blk);
7247}
7248
7249static int
7250bxe_check_blocks_with_parity0(struct bxe_softc *sc,
7251                              uint32_t         sig,
7252                              int              par_num,
7253                              uint8_t          print)
7254{
7255    uint32_t cur_bit = 0;
7256    int i = 0;
7257
7258    for (i = 0; sig; i++) {
7259        cur_bit = ((uint32_t)0x1 << i);
7260        if (sig & cur_bit) {
7261            switch (cur_bit) {
7262            case AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR:
7263                if (print)
7264                    bxe_print_next_block(sc, par_num++, "BRB");
7265                break;
7266            case AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR:
7267                if (print)
7268                    bxe_print_next_block(sc, par_num++, "PARSER");
7269                break;
7270            case AEU_INPUTS_ATTN_BITS_TSDM_PARITY_ERROR:
7271                if (print)
7272                    bxe_print_next_block(sc, par_num++, "TSDM");
7273                break;
7274            case AEU_INPUTS_ATTN_BITS_SEARCHER_PARITY_ERROR:
7275                if (print)
7276                    bxe_print_next_block(sc, par_num++, "SEARCHER");
7277                break;
7278            case AEU_INPUTS_ATTN_BITS_TCM_PARITY_ERROR:
7279                if (print)
7280                    bxe_print_next_block(sc, par_num++, "TCM");
7281                break;
7282            case AEU_INPUTS_ATTN_BITS_TSEMI_PARITY_ERROR:
7283                if (print)
7284                    bxe_print_next_block(sc, par_num++, "TSEMI");
7285                break;
7286            case AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR:
7287                if (print)
7288                    bxe_print_next_block(sc, par_num++, "XPB");
7289                break;
7290            }
7291
7292            /* Clear the bit */
7293            sig &= ~cur_bit;
7294        }
7295    }
7296
7297    return (par_num);
7298}
7299
7300static int
7301bxe_check_blocks_with_parity1(struct bxe_softc *sc,
7302                              uint32_t         sig,
7303                              int              par_num,
7304                              uint8_t          *global,
7305                              uint8_t          print)
7306{
7307    int i = 0;
7308    uint32_t cur_bit = 0;
7309    for (i = 0; sig; i++) {
7310        cur_bit = ((uint32_t)0x1 << i);
7311        if (sig & cur_bit) {
7312            switch (cur_bit) {
7313            case AEU_INPUTS_ATTN_BITS_PBF_PARITY_ERROR:
7314                if (print)
7315                    bxe_print_next_block(sc, par_num++, "PBF");
7316                break;
7317            case AEU_INPUTS_ATTN_BITS_QM_PARITY_ERROR:
7318                if (print)
7319                    bxe_print_next_block(sc, par_num++, "QM");
7320                break;
7321            case AEU_INPUTS_ATTN_BITS_TIMERS_PARITY_ERROR:
7322                if (print)
7323                    bxe_print_next_block(sc, par_num++, "TM");
7324                break;
7325            case AEU_INPUTS_ATTN_BITS_XSDM_PARITY_ERROR:
7326                if (print)
7327                    bxe_print_next_block(sc, par_num++, "XSDM");
7328                break;
7329            case AEU_INPUTS_ATTN_BITS_XCM_PARITY_ERROR:
7330                if (print)
7331                    bxe_print_next_block(sc, par_num++, "XCM");
7332                break;
7333            case AEU_INPUTS_ATTN_BITS_XSEMI_PARITY_ERROR:
7334                if (print)
7335                    bxe_print_next_block(sc, par_num++, "XSEMI");
7336                break;
7337            case AEU_INPUTS_ATTN_BITS_DOORBELLQ_PARITY_ERROR:
7338                if (print)
7339                    bxe_print_next_block(sc, par_num++, "DOORBELLQ");
7340                break;
7341            case AEU_INPUTS_ATTN_BITS_NIG_PARITY_ERROR:
7342                if (print)
7343                    bxe_print_next_block(sc, par_num++, "NIG");
7344                break;
7345            case AEU_INPUTS_ATTN_BITS_VAUX_PCI_CORE_PARITY_ERROR:
7346                if (print)
7347                    bxe_print_next_block(sc, par_num++, "VAUX PCI CORE");
7348                *global = TRUE;
7349                break;
7350            case AEU_INPUTS_ATTN_BITS_DEBUG_PARITY_ERROR:
7351                if (print)
7352                    bxe_print_next_block(sc, par_num++, "DEBUG");
7353                break;
7354            case AEU_INPUTS_ATTN_BITS_USDM_PARITY_ERROR:
7355                if (print)
7356                    bxe_print_next_block(sc, par_num++, "USDM");
7357                break;
7358            case AEU_INPUTS_ATTN_BITS_UCM_PARITY_ERROR:
7359                if (print)
7360                    bxe_print_next_block(sc, par_num++, "UCM");
7361                break;
7362            case AEU_INPUTS_ATTN_BITS_USEMI_PARITY_ERROR:
7363                if (print)
7364                    bxe_print_next_block(sc, par_num++, "USEMI");
7365                break;
7366            case AEU_INPUTS_ATTN_BITS_UPB_PARITY_ERROR:
7367                if (print)
7368                    bxe_print_next_block(sc, par_num++, "UPB");
7369                break;
7370            case AEU_INPUTS_ATTN_BITS_CSDM_PARITY_ERROR:
7371                if (print)
7372                    bxe_print_next_block(sc, par_num++, "CSDM");
7373                break;
7374            case AEU_INPUTS_ATTN_BITS_CCM_PARITY_ERROR:
7375                if (print)
7376                    bxe_print_next_block(sc, par_num++, "CCM");
7377                break;
7378            }
7379
7380            /* Clear the bit */
7381            sig &= ~cur_bit;
7382        }
7383    }
7384
7385    return (par_num);
7386}
7387
7388static int
7389bxe_check_blocks_with_parity2(struct bxe_softc *sc,
7390                              uint32_t         sig,
7391                              int              par_num,
7392                              uint8_t          print)
7393{
7394    uint32_t cur_bit = 0;
7395    int i = 0;
7396
7397    for (i = 0; sig; i++) {
7398        cur_bit = ((uint32_t)0x1 << i);
7399        if (sig & cur_bit) {
7400            switch (cur_bit) {
7401            case AEU_INPUTS_ATTN_BITS_CSEMI_PARITY_ERROR:
7402                if (print)
7403                    bxe_print_next_block(sc, par_num++, "CSEMI");
7404                break;
7405            case AEU_INPUTS_ATTN_BITS_PXP_PARITY_ERROR:
7406                if (print)
7407                    bxe_print_next_block(sc, par_num++, "PXP");
7408                break;
7409            case AEU_IN_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR:
7410                if (print)
7411                    bxe_print_next_block(sc, par_num++, "PXPPCICLOCKCLIENT");
7412                break;
7413            case AEU_INPUTS_ATTN_BITS_CFC_PARITY_ERROR:
7414                if (print)
7415                    bxe_print_next_block(sc, par_num++, "CFC");
7416                break;
7417            case AEU_INPUTS_ATTN_BITS_CDU_PARITY_ERROR:
7418                if (print)
7419                    bxe_print_next_block(sc, par_num++, "CDU");
7420                break;
7421            case AEU_INPUTS_ATTN_BITS_DMAE_PARITY_ERROR:
7422                if (print)
7423                    bxe_print_next_block(sc, par_num++, "DMAE");
7424                break;
7425            case AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR:
7426                if (print)
7427                    bxe_print_next_block(sc, par_num++, "IGU");
7428                break;
7429            case AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR:
7430                if (print)
7431                    bxe_print_next_block(sc, par_num++, "MISC");
7432                break;
7433            }
7434
7435            /* Clear the bit */
7436            sig &= ~cur_bit;
7437        }
7438    }
7439
7440    return (par_num);
7441}
7442
7443static int
7444bxe_check_blocks_with_parity3(struct bxe_softc *sc,
7445                              uint32_t         sig,
7446                              int              par_num,
7447                              uint8_t          *global,
7448                              uint8_t          print)
7449{
7450    uint32_t cur_bit = 0;
7451    int i = 0;
7452
7453    for (i = 0; sig; i++) {
7454        cur_bit = ((uint32_t)0x1 << i);
7455        if (sig & cur_bit) {
7456            switch (cur_bit) {
7457            case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY:
7458                if (print)
7459                    bxe_print_next_block(sc, par_num++, "MCP ROM");
7460                *global = TRUE;
7461                break;
7462            case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY:
7463                if (print)
7464                    bxe_print_next_block(sc, par_num++,
7465                              "MCP UMP RX");
7466                *global = TRUE;
7467                break;
7468            case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY:
7469                if (print)
7470                    bxe_print_next_block(sc, par_num++,
7471                              "MCP UMP TX");
7472                *global = TRUE;
7473                break;
7474            case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY:
7475                if (print)
7476                    bxe_print_next_block(sc, par_num++,
7477                              "MCP SCPAD");
7478                *global = TRUE;
7479                break;
7480            }
7481
7482            /* Clear the bit */
7483            sig &= ~cur_bit;
7484        }
7485    }
7486
7487    return (par_num);
7488}
7489
7490static int
7491bxe_check_blocks_with_parity4(struct bxe_softc *sc,
7492                              uint32_t         sig,
7493                              int              par_num,
7494                              uint8_t          print)
7495{
7496    uint32_t cur_bit = 0;
7497    int i = 0;
7498
7499    for (i = 0; sig; i++) {
7500        cur_bit = ((uint32_t)0x1 << i);
7501        if (sig & cur_bit) {
7502            switch (cur_bit) {
7503            case AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR:
7504                if (print)
7505                    bxe_print_next_block(sc, par_num++, "PGLUE_B");
7506                break;
7507            case AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR:
7508                if (print)
7509                    bxe_print_next_block(sc, par_num++, "ATC");
7510                break;
7511            }
7512
7513            /* Clear the bit */
7514            sig &= ~cur_bit;
7515        }
7516    }
7517
7518    return (par_num);
7519}
7520
7521static uint8_t
7522bxe_parity_attn(struct bxe_softc *sc,
7523                uint8_t          *global,
7524                uint8_t          print,
7525                uint32_t         *sig)
7526{
7527    int par_num = 0;
7528
7529    if ((sig[0] & HW_PRTY_ASSERT_SET_0) ||
7530        (sig[1] & HW_PRTY_ASSERT_SET_1) ||
7531        (sig[2] & HW_PRTY_ASSERT_SET_2) ||
7532        (sig[3] & HW_PRTY_ASSERT_SET_3) ||
7533        (sig[4] & HW_PRTY_ASSERT_SET_4)) {
7534        BLOGE(sc, "Parity error: HW block parity attention:\n"
7535                  "[0]:0x%08x [1]:0x%08x [2]:0x%08x [3]:0x%08x [4]:0x%08x\n",
7536              (uint32_t)(sig[0] & HW_PRTY_ASSERT_SET_0),
7537              (uint32_t)(sig[1] & HW_PRTY_ASSERT_SET_1),
7538              (uint32_t)(sig[2] & HW_PRTY_ASSERT_SET_2),
7539              (uint32_t)(sig[3] & HW_PRTY_ASSERT_SET_3),
7540              (uint32_t)(sig[4] & HW_PRTY_ASSERT_SET_4));
7541
7542        if (print)
7543            BLOGI(sc, "Parity errors detected in blocks: ");
7544
7545        par_num =
7546            bxe_check_blocks_with_parity0(sc, sig[0] &
7547                                          HW_PRTY_ASSERT_SET_0,
7548                                          par_num, print);
7549        par_num =
7550            bxe_check_blocks_with_parity1(sc, sig[1] &
7551                                          HW_PRTY_ASSERT_SET_1,
7552                                          par_num, global, print);
7553        par_num =
7554            bxe_check_blocks_with_parity2(sc, sig[2] &
7555                                          HW_PRTY_ASSERT_SET_2,
7556                                          par_num, print);
7557        par_num =
7558            bxe_check_blocks_with_parity3(sc, sig[3] &
7559                                          HW_PRTY_ASSERT_SET_3,
7560                                          par_num, global, print);
7561        par_num =
7562            bxe_check_blocks_with_parity4(sc, sig[4] &
7563                                          HW_PRTY_ASSERT_SET_4,
7564                                          par_num, print);
7565
7566        if (print)
7567            BLOGI(sc, "\n");
7568
7569        return (TRUE);
7570    }
7571
7572    return (FALSE);
7573}
7574
7575static uint8_t
7576bxe_chk_parity_attn(struct bxe_softc *sc,
7577                    uint8_t          *global,
7578                    uint8_t          print)
7579{
7580    struct attn_route attn = { {0} };
7581    int port = SC_PORT(sc);
7582
7583    attn.sig[0] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
7584    attn.sig[1] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
7585    attn.sig[2] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
7586    attn.sig[3] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
7587
7588    /*
7589     * Since MCP attentions can't be disabled inside the block, we need to
7590     * read AEU registers to see whether they're currently disabled
7591     */
7592    attn.sig[3] &= ((REG_RD(sc, (!port ? MISC_REG_AEU_ENABLE4_FUNC_0_OUT_0
7593                                      : MISC_REG_AEU_ENABLE4_FUNC_1_OUT_0)) &
7594                         MISC_AEU_ENABLE_MCP_PRTY_BITS) |
7595                        ~MISC_AEU_ENABLE_MCP_PRTY_BITS);
7596
7597
7598    if (!CHIP_IS_E1x(sc))
7599        attn.sig[4] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_5_FUNC_0 + port*4);
7600
7601    return (bxe_parity_attn(sc, global, print, attn.sig));
7602}
7603
7604static void
7605bxe_attn_int_deasserted4(struct bxe_softc *sc,
7606                         uint32_t         attn)
7607{
7608    uint32_t val;
7609
7610    if (attn & AEU_INPUTS_ATTN_BITS_PGLUE_HW_INTERRUPT) {
7611        val = REG_RD(sc, PGLUE_B_REG_PGLUE_B_INT_STS_CLR);
7612        BLOGE(sc, "PGLUE hw attention 0x%08x\n", val);
7613        if (val & PGLUE_B_PGLUE_B_INT_STS_REG_ADDRESS_ERROR)
7614            BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_ADDRESS_ERROR\n");
7615        if (val & PGLUE_B_PGLUE_B_INT_STS_REG_INCORRECT_RCV_BEHAVIOR)
7616            BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_INCORRECT_RCV_BEHAVIOR\n");
7617        if (val & PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN)
7618            BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN\n");
7619        if (val & PGLUE_B_PGLUE_B_INT_STS_REG_VF_LENGTH_VIOLATION_ATTN)
7620            BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_VF_LENGTH_VIOLATION_ATTN\n");
7621        if (val & PGLUE_B_PGLUE_B_INT_STS_REG_VF_GRC_SPACE_VIOLATION_ATTN)
7622            BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_VF_GRC_SPACE_VIOLATION_ATTN\n");
7623        if (val & PGLUE_B_PGLUE_B_INT_STS_REG_VF_MSIX_BAR_VIOLATION_ATTN)
7624            BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_VF_MSIX_BAR_VIOLATION_ATTN\n");
7625        if (val & PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_ERROR_ATTN)
7626            BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_ERROR_ATTN\n");
7627        if (val & PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_IN_TWO_RCBS_ATTN)
7628            BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_IN_TWO_RCBS_ATTN\n");
7629        if (val & PGLUE_B_PGLUE_B_INT_STS_REG_CSSNOOP_FIFO_OVERFLOW)
7630            BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_CSSNOOP_FIFO_OVERFLOW\n");
7631    }
7632
7633    if (attn & AEU_INPUTS_ATTN_BITS_ATC_HW_INTERRUPT) {
7634        val = REG_RD(sc, ATC_REG_ATC_INT_STS_CLR);
7635        BLOGE(sc, "ATC hw attention 0x%08x\n", val);
7636        if (val & ATC_ATC_INT_STS_REG_ADDRESS_ERROR)
7637            BLOGE(sc, "ATC_ATC_INT_STS_REG_ADDRESS_ERROR\n");
7638        if (val & ATC_ATC_INT_STS_REG_ATC_TCPL_TO_NOT_PEND)
7639            BLOGE(sc, "ATC_ATC_INT_STS_REG_ATC_TCPL_TO_NOT_PEND\n");
7640        if (val & ATC_ATC_INT_STS_REG_ATC_GPA_MULTIPLE_HITS)
7641            BLOGE(sc, "ATC_ATC_INT_STS_REG_ATC_GPA_MULTIPLE_HITS\n");
7642        if (val & ATC_ATC_INT_STS_REG_ATC_RCPL_TO_EMPTY_CNT)
7643            BLOGE(sc, "ATC_ATC_INT_STS_REG_ATC_RCPL_TO_EMPTY_CNT\n");
7644        if (val & ATC_ATC_INT_STS_REG_ATC_TCPL_ERROR)
7645            BLOGE(sc, "ATC_ATC_INT_STS_REG_ATC_TCPL_ERROR\n");
7646        if (val & ATC_ATC_INT_STS_REG_ATC_IREQ_LESS_THAN_STU)
7647            BLOGE(sc, "ATC_ATC_INT_STS_REG_ATC_IREQ_LESS_THAN_STU\n");
7648    }
7649
7650    if (attn & (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR |
7651                AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR)) {
7652        BLOGE(sc, "FATAL parity attention set4 0x%08x\n",
7653              (uint32_t)(attn & (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR |
7654                                 AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR)));
7655    }
7656}
7657
7658static void
7659bxe_e1h_disable(struct bxe_softc *sc)
7660{
7661    int port = SC_PORT(sc);
7662
7663    bxe_tx_disable(sc);
7664
7665    REG_WR(sc, NIG_REG_LLH0_FUNC_EN + port*8, 0);
7666}
7667
7668static void
7669bxe_e1h_enable(struct bxe_softc *sc)
7670{
7671    int port = SC_PORT(sc);
7672
7673    REG_WR(sc, NIG_REG_LLH0_FUNC_EN + port*8, 1);
7674
7675    // XXX bxe_tx_enable(sc);
7676}
7677
7678/*
7679 * called due to MCP event (on pmf):
7680 *   reread new bandwidth configuration
7681 *   configure FW
7682 *   notify others function about the change
7683 */
7684static void
7685bxe_config_mf_bw(struct bxe_softc *sc)
7686{
7687    if (sc->link_vars.link_up) {
7688        bxe_cmng_fns_init(sc, TRUE, CMNG_FNS_MINMAX);
7689        // XXX bxe_link_sync_notify(sc);
7690    }
7691
7692    storm_memset_cmng(sc, &sc->cmng, SC_PORT(sc));
7693}
7694
7695static void
7696bxe_set_mf_bw(struct bxe_softc *sc)
7697{
7698    bxe_config_mf_bw(sc);
7699    bxe_fw_command(sc, DRV_MSG_CODE_SET_MF_BW_ACK, 0);
7700}
7701
7702static void
7703bxe_handle_eee_event(struct bxe_softc *sc)
7704{
7705    BLOGD(sc, DBG_INTR, "EEE - LLDP event\n");
7706    bxe_fw_command(sc, DRV_MSG_CODE_EEE_RESULTS_ACK, 0);
7707}
7708
7709#define DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED 3
7710
7711static void
7712bxe_drv_info_ether_stat(struct bxe_softc *sc)
7713{
7714    struct eth_stats_info *ether_stat =
7715        &sc->sp->drv_info_to_mcp.ether_stat;
7716
7717    strlcpy(ether_stat->version, BXE_DRIVER_VERSION,
7718            ETH_STAT_INFO_VERSION_LEN);
7719
7720    /* XXX (+ MAC_PAD) taken from other driver... verify this is right */
7721    sc->sp_objs[0].mac_obj.get_n_elements(sc, &sc->sp_objs[0].mac_obj,
7722                                          DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED,
7723                                          ether_stat->mac_local + MAC_PAD,
7724                                          MAC_PAD, ETH_ALEN);
7725
7726    ether_stat->mtu_size = sc->mtu;
7727
7728    ether_stat->feature_flags |= FEATURE_ETH_CHKSUM_OFFLOAD_MASK;
7729    if (if_getcapenable(sc->ifp) & (IFCAP_TSO4 | IFCAP_TSO6)) {
7730        ether_stat->feature_flags |= FEATURE_ETH_LSO_MASK;
7731    }
7732
7733    // XXX ether_stat->feature_flags |= ???;
7734
7735    ether_stat->promiscuous_mode = 0; // (flags & PROMISC) ? 1 : 0;
7736
7737    ether_stat->txq_size = sc->tx_ring_size;
7738    ether_stat->rxq_size = sc->rx_ring_size;
7739}
7740
7741static void
7742bxe_handle_drv_info_req(struct bxe_softc *sc)
7743{
7744    enum drv_info_opcode op_code;
7745    uint32_t drv_info_ctl = SHMEM2_RD(sc, drv_info_control);
7746
7747    /* if drv_info version supported by MFW doesn't match - send NACK */
7748    if ((drv_info_ctl & DRV_INFO_CONTROL_VER_MASK) != DRV_INFO_CUR_VER) {
7749        bxe_fw_command(sc, DRV_MSG_CODE_DRV_INFO_NACK, 0);
7750        return;
7751    }
7752
7753    op_code = ((drv_info_ctl & DRV_INFO_CONTROL_OP_CODE_MASK) >>
7754               DRV_INFO_CONTROL_OP_CODE_SHIFT);
7755
7756    memset(&sc->sp->drv_info_to_mcp, 0, sizeof(union drv_info_to_mcp));
7757
7758    switch (op_code) {
7759    case ETH_STATS_OPCODE:
7760        bxe_drv_info_ether_stat(sc);
7761        break;
7762    case FCOE_STATS_OPCODE:
7763    case ISCSI_STATS_OPCODE:
7764    default:
7765        /* if op code isn't supported - send NACK */
7766        bxe_fw_command(sc, DRV_MSG_CODE_DRV_INFO_NACK, 0);
7767        return;
7768    }
7769
7770    /*
7771     * If we got drv_info attn from MFW then these fields are defined in
7772     * shmem2 for sure
7773     */
7774    SHMEM2_WR(sc, drv_info_host_addr_lo,
7775              U64_LO(BXE_SP_MAPPING(sc, drv_info_to_mcp)));
7776    SHMEM2_WR(sc, drv_info_host_addr_hi,
7777              U64_HI(BXE_SP_MAPPING(sc, drv_info_to_mcp)));
7778
7779    bxe_fw_command(sc, DRV_MSG_CODE_DRV_INFO_ACK, 0);
7780}
7781
7782static void
7783bxe_dcc_event(struct bxe_softc *sc,
7784              uint32_t         dcc_event)
7785{
7786    BLOGD(sc, DBG_INTR, "dcc_event 0x%08x\n", dcc_event);
7787
7788    if (dcc_event & DRV_STATUS_DCC_DISABLE_ENABLE_PF) {
7789        /*
7790         * This is the only place besides the function initialization
7791         * where the sc->flags can change so it is done without any
7792         * locks
7793         */
7794        if (sc->devinfo.mf_info.mf_config[SC_VN(sc)] & FUNC_MF_CFG_FUNC_DISABLED) {
7795            BLOGD(sc, DBG_INTR, "mf_cfg function disabled\n");
7796            sc->flags |= BXE_MF_FUNC_DIS;
7797            bxe_e1h_disable(sc);
7798        } else {
7799            BLOGD(sc, DBG_INTR, "mf_cfg function enabled\n");
7800            sc->flags &= ~BXE_MF_FUNC_DIS;
7801            bxe_e1h_enable(sc);
7802        }
7803        dcc_event &= ~DRV_STATUS_DCC_DISABLE_ENABLE_PF;
7804    }
7805
7806    if (dcc_event & DRV_STATUS_DCC_BANDWIDTH_ALLOCATION) {
7807        bxe_config_mf_bw(sc);
7808        dcc_event &= ~DRV_STATUS_DCC_BANDWIDTH_ALLOCATION;
7809    }
7810
7811    /* Report results to MCP */
7812    if (dcc_event)
7813        bxe_fw_command(sc, DRV_MSG_CODE_DCC_FAILURE, 0);
7814    else
7815        bxe_fw_command(sc, DRV_MSG_CODE_DCC_OK, 0);
7816}
7817
7818static void
7819bxe_pmf_update(struct bxe_softc *sc)
7820{
7821    int port = SC_PORT(sc);
7822    uint32_t val;
7823
7824    sc->port.pmf = 1;
7825    BLOGD(sc, DBG_INTR, "pmf %d\n", sc->port.pmf);
7826
7827    /*
7828     * We need the mb() to ensure the ordering between the writing to
7829     * sc->port.pmf here and reading it from the bxe_periodic_task().
7830     */
7831    mb();
7832
7833    /* queue a periodic task */
7834    // XXX schedule task...
7835
7836    // XXX bxe_dcbx_pmf_update(sc);
7837
7838    /* enable nig attention */
7839    val = (0xff0f | (1 << (SC_VN(sc) + 4)));
7840    if (sc->devinfo.int_block == INT_BLOCK_HC) {
7841        REG_WR(sc, HC_REG_TRAILING_EDGE_0 + port*8, val);
7842        REG_WR(sc, HC_REG_LEADING_EDGE_0 + port*8, val);
7843    } else if (!CHIP_IS_E1x(sc)) {
7844        REG_WR(sc, IGU_REG_TRAILING_EDGE_LATCH, val);
7845        REG_WR(sc, IGU_REG_LEADING_EDGE_LATCH, val);
7846    }
7847
7848    bxe_stats_handle(sc, STATS_EVENT_PMF);
7849}
7850
7851static int
7852bxe_mc_assert(struct bxe_softc *sc)
7853{
7854    char last_idx;
7855    int i, rc = 0;
7856    uint32_t row0, row1, row2, row3;
7857
7858    /* XSTORM */
7859    last_idx = REG_RD8(sc, BAR_XSTRORM_INTMEM + XSTORM_ASSERT_LIST_INDEX_OFFSET);
7860    if (last_idx)
7861        BLOGE(sc, "XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
7862
7863    /* print the asserts */
7864    for (i = 0; i < STORM_ASSERT_ARRAY_SIZE; i++) {
7865
7866        row0 = REG_RD(sc, BAR_XSTRORM_INTMEM + XSTORM_ASSERT_LIST_OFFSET(i));
7867        row1 = REG_RD(sc, BAR_XSTRORM_INTMEM + XSTORM_ASSERT_LIST_OFFSET(i) + 4);
7868        row2 = REG_RD(sc, BAR_XSTRORM_INTMEM + XSTORM_ASSERT_LIST_OFFSET(i) + 8);
7869        row3 = REG_RD(sc, BAR_XSTRORM_INTMEM + XSTORM_ASSERT_LIST_OFFSET(i) + 12);
7870
7871        if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
7872            BLOGE(sc, "XSTORM_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x\n",
7873                  i, row3, row2, row1, row0);
7874            rc++;
7875        } else {
7876            break;
7877        }
7878    }
7879
7880    /* TSTORM */
7881    last_idx = REG_RD8(sc, BAR_TSTRORM_INTMEM + TSTORM_ASSERT_LIST_INDEX_OFFSET);
7882    if (last_idx) {
7883        BLOGE(sc, "TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
7884    }
7885
7886    /* print the asserts */
7887    for (i = 0; i < STORM_ASSERT_ARRAY_SIZE; i++) {
7888
7889        row0 = REG_RD(sc, BAR_TSTRORM_INTMEM + TSTORM_ASSERT_LIST_OFFSET(i));
7890        row1 = REG_RD(sc, BAR_TSTRORM_INTMEM + TSTORM_ASSERT_LIST_OFFSET(i) + 4);
7891        row2 = REG_RD(sc, BAR_TSTRORM_INTMEM + TSTORM_ASSERT_LIST_OFFSET(i) + 8);
7892        row3 = REG_RD(sc, BAR_TSTRORM_INTMEM + TSTORM_ASSERT_LIST_OFFSET(i) + 12);
7893
7894        if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
7895            BLOGE(sc, "TSTORM_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x\n",
7896                  i, row3, row2, row1, row0);
7897            rc++;
7898        } else {
7899            break;
7900        }
7901    }
7902
7903    /* CSTORM */
7904    last_idx = REG_RD8(sc, BAR_CSTRORM_INTMEM + CSTORM_ASSERT_LIST_INDEX_OFFSET);
7905    if (last_idx) {
7906        BLOGE(sc, "CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
7907    }
7908
7909    /* print the asserts */
7910    for (i = 0; i < STORM_ASSERT_ARRAY_SIZE; i++) {
7911
7912        row0 = REG_RD(sc, BAR_CSTRORM_INTMEM + CSTORM_ASSERT_LIST_OFFSET(i));
7913        row1 = REG_RD(sc, BAR_CSTRORM_INTMEM + CSTORM_ASSERT_LIST_OFFSET(i) + 4);
7914        row2 = REG_RD(sc, BAR_CSTRORM_INTMEM + CSTORM_ASSERT_LIST_OFFSET(i) + 8);
7915        row3 = REG_RD(sc, BAR_CSTRORM_INTMEM + CSTORM_ASSERT_LIST_OFFSET(i) + 12);
7916
7917        if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
7918            BLOGE(sc, "CSTORM_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x\n",
7919                  i, row3, row2, row1, row0);
7920            rc++;
7921        } else {
7922            break;
7923        }
7924    }
7925
7926    /* USTORM */
7927    last_idx = REG_RD8(sc, BAR_USTRORM_INTMEM + USTORM_ASSERT_LIST_INDEX_OFFSET);
7928    if (last_idx) {
7929        BLOGE(sc, "USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
7930    }
7931
7932    /* print the asserts */
7933    for (i = 0; i < STORM_ASSERT_ARRAY_SIZE; i++) {
7934
7935        row0 = REG_RD(sc, BAR_USTRORM_INTMEM + USTORM_ASSERT_LIST_OFFSET(i));
7936        row1 = REG_RD(sc, BAR_USTRORM_INTMEM + USTORM_ASSERT_LIST_OFFSET(i) + 4);
7937        row2 = REG_RD(sc, BAR_USTRORM_INTMEM + USTORM_ASSERT_LIST_OFFSET(i) + 8);
7938        row3 = REG_RD(sc, BAR_USTRORM_INTMEM + USTORM_ASSERT_LIST_OFFSET(i) + 12);
7939
7940        if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
7941            BLOGE(sc, "USTORM_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x\n",
7942                  i, row3, row2, row1, row0);
7943            rc++;
7944        } else {
7945            break;
7946        }
7947    }
7948
7949    return (rc);
7950}
7951
7952static void
7953bxe_attn_int_deasserted3(struct bxe_softc *sc,
7954                         uint32_t         attn)
7955{
7956    int func = SC_FUNC(sc);
7957    uint32_t val;
7958
7959    if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
7960
7961        if (attn & BXE_PMF_LINK_ASSERT(sc)) {
7962
7963            REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
7964            bxe_read_mf_cfg(sc);
7965            sc->devinfo.mf_info.mf_config[SC_VN(sc)] =
7966                MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].config);
7967            val = SHMEM_RD(sc, func_mb[SC_FW_MB_IDX(sc)].drv_status);
7968
7969            if (val & DRV_STATUS_DCC_EVENT_MASK)
7970                bxe_dcc_event(sc, (val & DRV_STATUS_DCC_EVENT_MASK));
7971
7972            if (val & DRV_STATUS_SET_MF_BW)
7973                bxe_set_mf_bw(sc);
7974
7975            if (val & DRV_STATUS_DRV_INFO_REQ)
7976                bxe_handle_drv_info_req(sc);
7977
7978            if ((sc->port.pmf == 0) && (val & DRV_STATUS_PMF))
7979                bxe_pmf_update(sc);
7980
7981            if (val & DRV_STATUS_EEE_NEGOTIATION_RESULTS)
7982                bxe_handle_eee_event(sc);
7983
7984            if (sc->link_vars.periodic_flags &
7985                ELINK_PERIODIC_FLAGS_LINK_EVENT) {
7986                /* sync with link */
7987		bxe_acquire_phy_lock(sc);
7988                sc->link_vars.periodic_flags &=
7989                    ~ELINK_PERIODIC_FLAGS_LINK_EVENT;
7990		bxe_release_phy_lock(sc);
7991                if (IS_MF(sc))
7992                    ; // XXX bxe_link_sync_notify(sc);
7993                bxe_link_report(sc);
7994            }
7995
7996            /*
7997             * Always call it here: bxe_link_report() will
7998             * prevent the link indication duplication.
7999             */
8000            bxe_link_status_update(sc);
8001
8002        } else if (attn & BXE_MC_ASSERT_BITS) {
8003
8004            BLOGE(sc, "MC assert!\n");
8005            bxe_mc_assert(sc);
8006            REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_10, 0);
8007            REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_9, 0);
8008            REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_8, 0);
8009            REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_7, 0);
8010            bxe_panic(sc, ("MC assert!\n"));
8011
8012        } else if (attn & BXE_MCP_ASSERT) {
8013
8014            BLOGE(sc, "MCP assert!\n");
8015            REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_11, 0);
8016            // XXX bxe_fw_dump(sc);
8017
8018        } else {
8019            BLOGE(sc, "Unknown HW assert! (attn 0x%08x)\n", attn);
8020        }
8021    }
8022
8023    if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
8024        BLOGE(sc, "LATCHED attention 0x%08x (masked)\n", attn);
8025        if (attn & BXE_GRC_TIMEOUT) {
8026            val = CHIP_IS_E1(sc) ? 0 : REG_RD(sc, MISC_REG_GRC_TIMEOUT_ATTN);
8027            BLOGE(sc, "GRC time-out 0x%08x\n", val);
8028        }
8029        if (attn & BXE_GRC_RSV) {
8030            val = CHIP_IS_E1(sc) ? 0 : REG_RD(sc, MISC_REG_GRC_RSV_ATTN);
8031            BLOGE(sc, "GRC reserved 0x%08x\n", val);
8032        }
8033        REG_WR(sc, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
8034    }
8035}
8036
8037static void
8038bxe_attn_int_deasserted2(struct bxe_softc *sc,
8039                         uint32_t         attn)
8040{
8041    int port = SC_PORT(sc);
8042    int reg_offset;
8043    uint32_t val0, mask0, val1, mask1;
8044    uint32_t val;
8045
8046    if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
8047        val = REG_RD(sc, CFC_REG_CFC_INT_STS_CLR);
8048        BLOGE(sc, "CFC hw attention 0x%08x\n", val);
8049        /* CFC error attention */
8050        if (val & 0x2) {
8051            BLOGE(sc, "FATAL error from CFC\n");
8052        }
8053    }
8054
8055    if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
8056        val = REG_RD(sc, PXP_REG_PXP_INT_STS_CLR_0);
8057        BLOGE(sc, "PXP hw attention-0 0x%08x\n", val);
8058        /* RQ_USDMDP_FIFO_OVERFLOW */
8059        if (val & 0x18000) {
8060            BLOGE(sc, "FATAL error from PXP\n");
8061        }
8062
8063        if (!CHIP_IS_E1x(sc)) {
8064            val = REG_RD(sc, PXP_REG_PXP_INT_STS_CLR_1);
8065            BLOGE(sc, "PXP hw attention-1 0x%08x\n", val);
8066        }
8067    }
8068
8069#define PXP2_EOP_ERROR_BIT  PXP2_PXP2_INT_STS_CLR_0_REG_WR_PGLUE_EOP_ERROR
8070#define AEU_PXP2_HW_INT_BIT AEU_INPUTS_ATTN_BITS_PXPPCICLOCKCLIENT_HW_INTERRUPT
8071
8072    if (attn & AEU_PXP2_HW_INT_BIT) {
8073        /*  CQ47854 workaround do not panic on
8074         *  PXP2_PXP2_INT_STS_0_REG_WR_PGLUE_EOP_ERROR
8075         */
8076        if (!CHIP_IS_E1x(sc)) {
8077            mask0 = REG_RD(sc, PXP2_REG_PXP2_INT_MASK_0);
8078            val1 = REG_RD(sc, PXP2_REG_PXP2_INT_STS_1);
8079            mask1 = REG_RD(sc, PXP2_REG_PXP2_INT_MASK_1);
8080            val0 = REG_RD(sc, PXP2_REG_PXP2_INT_STS_0);
8081            /*
8082             * If the only PXP2_EOP_ERROR_BIT is set in
8083             * STS0 and STS1 - clear it
8084             *
8085             * probably we lose additional attentions between
8086             * STS0 and STS_CLR0, in this case user will not
8087             * be notified about them
8088             */
8089            if (val0 & mask0 & PXP2_EOP_ERROR_BIT &&
8090                !(val1 & mask1))
8091                val0 = REG_RD(sc, PXP2_REG_PXP2_INT_STS_CLR_0);
8092
8093            /* print the register, since no one can restore it */
8094            BLOGE(sc, "PXP2_REG_PXP2_INT_STS_CLR_0 0x%08x\n", val0);
8095
8096            /*
8097             * if PXP2_PXP2_INT_STS_0_REG_WR_PGLUE_EOP_ERROR
8098             * then notify
8099             */
8100            if (val0 & PXP2_EOP_ERROR_BIT) {
8101                BLOGE(sc, "PXP2_WR_PGLUE_EOP_ERROR\n");
8102
8103                /*
8104                 * if only PXP2_PXP2_INT_STS_0_REG_WR_PGLUE_EOP_ERROR is
8105                 * set then clear attention from PXP2 block without panic
8106                 */
8107                if (((val0 & mask0) == PXP2_EOP_ERROR_BIT) &&
8108                    ((val1 & mask1) == 0))
8109                    attn &= ~AEU_PXP2_HW_INT_BIT;
8110            }
8111        }
8112    }
8113
8114    if (attn & HW_INTERRUT_ASSERT_SET_2) {
8115        reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
8116                             MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
8117
8118        val = REG_RD(sc, reg_offset);
8119        val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
8120        REG_WR(sc, reg_offset, val);
8121
8122        BLOGE(sc, "FATAL HW block attention set2 0x%x\n",
8123              (uint32_t)(attn & HW_INTERRUT_ASSERT_SET_2));
8124        bxe_panic(sc, ("HW block attention set2\n"));
8125    }
8126}
8127
8128static void
8129bxe_attn_int_deasserted1(struct bxe_softc *sc,
8130                         uint32_t         attn)
8131{
8132    int port = SC_PORT(sc);
8133    int reg_offset;
8134    uint32_t val;
8135
8136    if (attn & AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT) {
8137        val = REG_RD(sc, DORQ_REG_DORQ_INT_STS_CLR);
8138        BLOGE(sc, "DB hw attention 0x%08x\n", val);
8139        /* DORQ discard attention */
8140        if (val & 0x2) {
8141            BLOGE(sc, "FATAL error from DORQ\n");
8142        }
8143    }
8144
8145    if (attn & HW_INTERRUT_ASSERT_SET_1) {
8146        reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
8147                             MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
8148
8149        val = REG_RD(sc, reg_offset);
8150        val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
8151        REG_WR(sc, reg_offset, val);
8152
8153        BLOGE(sc, "FATAL HW block attention set1 0x%08x\n",
8154              (uint32_t)(attn & HW_INTERRUT_ASSERT_SET_1));
8155        bxe_panic(sc, ("HW block attention set1\n"));
8156    }
8157}
8158
8159static void
8160bxe_attn_int_deasserted0(struct bxe_softc *sc,
8161                         uint32_t         attn)
8162{
8163    int port = SC_PORT(sc);
8164    int reg_offset;
8165    uint32_t val;
8166
8167    reg_offset = (port) ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
8168                          MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0;
8169
8170    if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
8171        val = REG_RD(sc, reg_offset);
8172        val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
8173        REG_WR(sc, reg_offset, val);
8174
8175        BLOGW(sc, "SPIO5 hw attention\n");
8176
8177        /* Fan failure attention */
8178        elink_hw_reset_phy(&sc->link_params);
8179        bxe_fan_failure(sc);
8180    }
8181
8182    if ((attn & sc->link_vars.aeu_int_mask) && sc->port.pmf) {
8183	bxe_acquire_phy_lock(sc);
8184        elink_handle_module_detect_int(&sc->link_params);
8185	bxe_release_phy_lock(sc);
8186    }
8187
8188    if (attn & HW_INTERRUT_ASSERT_SET_0) {
8189        val = REG_RD(sc, reg_offset);
8190        val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
8191        REG_WR(sc, reg_offset, val);
8192
8193        bxe_panic(sc, ("FATAL HW block attention set0 0x%lx\n",
8194                       (attn & HW_INTERRUT_ASSERT_SET_0)));
8195    }
8196}
8197
8198static void
8199bxe_attn_int_deasserted(struct bxe_softc *sc,
8200                        uint32_t         deasserted)
8201{
8202    struct attn_route attn;
8203    struct attn_route *group_mask;
8204    int port = SC_PORT(sc);
8205    int index;
8206    uint32_t reg_addr;
8207    uint32_t val;
8208    uint32_t aeu_mask;
8209    uint8_t global = FALSE;
8210
8211    /*
8212     * Need to take HW lock because MCP or other port might also
8213     * try to handle this event.
8214     */
8215    bxe_acquire_alr(sc);
8216
8217    if (bxe_chk_parity_attn(sc, &global, TRUE)) {
8218        /* XXX
8219         * In case of parity errors don't handle attentions so that
8220         * other function would "see" parity errors.
8221         */
8222        sc->recovery_state = BXE_RECOVERY_INIT;
8223        // XXX schedule a recovery task...
8224        /* disable HW interrupts */
8225        bxe_int_disable(sc);
8226        bxe_release_alr(sc);
8227        return;
8228    }
8229
8230    attn.sig[0] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
8231    attn.sig[1] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
8232    attn.sig[2] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
8233    attn.sig[3] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
8234    if (!CHIP_IS_E1x(sc)) {
8235        attn.sig[4] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_5_FUNC_0 + port*4);
8236    } else {
8237        attn.sig[4] = 0;
8238    }
8239
8240    BLOGD(sc, DBG_INTR, "attn: 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n",
8241          attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3], attn.sig[4]);
8242
8243    for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
8244        if (deasserted & (1 << index)) {
8245            group_mask = &sc->attn_group[index];
8246
8247            BLOGD(sc, DBG_INTR,
8248                  "group[%d]: 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n", index,
8249                  group_mask->sig[0], group_mask->sig[1],
8250                  group_mask->sig[2], group_mask->sig[3],
8251                  group_mask->sig[4]);
8252
8253            bxe_attn_int_deasserted4(sc, attn.sig[4] & group_mask->sig[4]);
8254            bxe_attn_int_deasserted3(sc, attn.sig[3] & group_mask->sig[3]);
8255            bxe_attn_int_deasserted1(sc, attn.sig[1] & group_mask->sig[1]);
8256            bxe_attn_int_deasserted2(sc, attn.sig[2] & group_mask->sig[2]);
8257            bxe_attn_int_deasserted0(sc, attn.sig[0] & group_mask->sig[0]);
8258        }
8259    }
8260
8261    bxe_release_alr(sc);
8262
8263    if (sc->devinfo.int_block == INT_BLOCK_HC) {
8264        reg_addr = (HC_REG_COMMAND_REG + port*32 +
8265                    COMMAND_REG_ATTN_BITS_CLR);
8266    } else {
8267        reg_addr = (BAR_IGU_INTMEM + IGU_CMD_ATTN_BIT_CLR_UPPER*8);
8268    }
8269
8270    val = ~deasserted;
8271    BLOGD(sc, DBG_INTR,
8272          "about to mask 0x%08x at %s addr 0x%08x\n", val,
8273          (sc->devinfo.int_block == INT_BLOCK_HC) ? "HC" : "IGU", reg_addr);
8274    REG_WR(sc, reg_addr, val);
8275
8276    if (~sc->attn_state & deasserted) {
8277        BLOGE(sc, "IGU error\n");
8278    }
8279
8280    reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
8281                      MISC_REG_AEU_MASK_ATTN_FUNC_0;
8282
8283    bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
8284
8285    aeu_mask = REG_RD(sc, reg_addr);
8286
8287    BLOGD(sc, DBG_INTR, "aeu_mask 0x%08x newly deasserted 0x%08x\n",
8288          aeu_mask, deasserted);
8289    aeu_mask |= (deasserted & 0x3ff);
8290    BLOGD(sc, DBG_INTR, "new mask 0x%08x\n", aeu_mask);
8291
8292    REG_WR(sc, reg_addr, aeu_mask);
8293    bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
8294
8295    BLOGD(sc, DBG_INTR, "attn_state 0x%08x\n", sc->attn_state);
8296    sc->attn_state &= ~deasserted;
8297    BLOGD(sc, DBG_INTR, "new state 0x%08x\n", sc->attn_state);
8298}
8299
8300static void
8301bxe_attn_int(struct bxe_softc *sc)
8302{
8303    /* read local copy of bits */
8304    uint32_t attn_bits = le32toh(sc->def_sb->atten_status_block.attn_bits);
8305    uint32_t attn_ack = le32toh(sc->def_sb->atten_status_block.attn_bits_ack);
8306    uint32_t attn_state = sc->attn_state;
8307
8308    /* look for changed bits */
8309    uint32_t asserted   =  attn_bits & ~attn_ack & ~attn_state;
8310    uint32_t deasserted = ~attn_bits &  attn_ack &  attn_state;
8311
8312    BLOGD(sc, DBG_INTR,
8313          "attn_bits 0x%08x attn_ack 0x%08x asserted 0x%08x deasserted 0x%08x\n",
8314          attn_bits, attn_ack, asserted, deasserted);
8315
8316    if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state)) {
8317        BLOGE(sc, "BAD attention state\n");
8318    }
8319
8320    /* handle bits that were raised */
8321    if (asserted) {
8322        bxe_attn_int_asserted(sc, asserted);
8323    }
8324
8325    if (deasserted) {
8326        bxe_attn_int_deasserted(sc, deasserted);
8327    }
8328}
8329
8330static uint16_t
8331bxe_update_dsb_idx(struct bxe_softc *sc)
8332{
8333    struct host_sp_status_block *def_sb = sc->def_sb;
8334    uint16_t rc = 0;
8335
8336    mb(); /* status block is written to by the chip */
8337
8338    if (sc->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
8339        sc->def_att_idx = def_sb->atten_status_block.attn_bits_index;
8340        rc |= BXE_DEF_SB_ATT_IDX;
8341    }
8342
8343    if (sc->def_idx != def_sb->sp_sb.running_index) {
8344        sc->def_idx = def_sb->sp_sb.running_index;
8345        rc |= BXE_DEF_SB_IDX;
8346    }
8347
8348    mb();
8349
8350    return (rc);
8351}
8352
8353static inline struct ecore_queue_sp_obj *
8354bxe_cid_to_q_obj(struct bxe_softc *sc,
8355                 uint32_t         cid)
8356{
8357    BLOGD(sc, DBG_SP, "retrieving fp from cid %d\n", cid);
8358    return (&sc->sp_objs[CID_TO_FP(cid, sc)].q_obj);
8359}
8360
8361static void
8362bxe_handle_mcast_eqe(struct bxe_softc *sc)
8363{
8364    struct ecore_mcast_ramrod_params rparam;
8365    int rc;
8366
8367    memset(&rparam, 0, sizeof(rparam));
8368
8369    rparam.mcast_obj = &sc->mcast_obj;
8370
8371    BXE_MCAST_LOCK(sc);
8372
8373    /* clear pending state for the last command */
8374    sc->mcast_obj.raw.clear_pending(&sc->mcast_obj.raw);
8375
8376    /* if there are pending mcast commands - send them */
8377    if (sc->mcast_obj.check_pending(&sc->mcast_obj)) {
8378        rc = ecore_config_mcast(sc, &rparam, ECORE_MCAST_CMD_CONT);
8379        if (rc < 0) {
8380            BLOGD(sc, DBG_SP,
8381                "ERROR: Failed to send pending mcast commands (%d)\n", rc);
8382        }
8383    }
8384
8385    BXE_MCAST_UNLOCK(sc);
8386}
8387
8388static void
8389bxe_handle_classification_eqe(struct bxe_softc      *sc,
8390                              union event_ring_elem *elem)
8391{
8392    unsigned long ramrod_flags = 0;
8393    int rc = 0;
8394    uint32_t cid = elem->message.data.eth_event.echo & BXE_SWCID_MASK;
8395    struct ecore_vlan_mac_obj *vlan_mac_obj;
8396
8397    /* always push next commands out, don't wait here */
8398    bit_set(&ramrod_flags, RAMROD_CONT);
8399
8400    switch (le32toh(elem->message.data.eth_event.echo) >> BXE_SWCID_SHIFT) {
8401    case ECORE_FILTER_MAC_PENDING:
8402        BLOGD(sc, DBG_SP, "Got SETUP_MAC completions\n");
8403        vlan_mac_obj = &sc->sp_objs[cid].mac_obj;
8404        break;
8405
8406    case ECORE_FILTER_MCAST_PENDING:
8407        BLOGD(sc, DBG_SP, "Got SETUP_MCAST completions\n");
8408        /*
8409         * This is only relevant for 57710 where multicast MACs are
8410         * configured as unicast MACs using the same ramrod.
8411         */
8412        bxe_handle_mcast_eqe(sc);
8413        return;
8414
8415    default:
8416        BLOGE(sc, "Unsupported classification command: %d\n",
8417              elem->message.data.eth_event.echo);
8418        return;
8419    }
8420
8421    rc = vlan_mac_obj->complete(sc, vlan_mac_obj, elem, &ramrod_flags);
8422
8423    if (rc < 0) {
8424        BLOGE(sc, "Failed to schedule new commands (%d)\n", rc);
8425    } else if (rc > 0) {
8426        BLOGD(sc, DBG_SP, "Scheduled next pending commands...\n");
8427    }
8428}
8429
8430static void
8431bxe_handle_rx_mode_eqe(struct bxe_softc      *sc,
8432                       union event_ring_elem *elem)
8433{
8434    bxe_clear_bit(ECORE_FILTER_RX_MODE_PENDING, &sc->sp_state);
8435
8436    /* send rx_mode command again if was requested */
8437    if (bxe_test_and_clear_bit(ECORE_FILTER_RX_MODE_SCHED,
8438                               &sc->sp_state)) {
8439        bxe_set_storm_rx_mode(sc);
8440    }
8441}
8442
8443static void
8444bxe_update_eq_prod(struct bxe_softc *sc,
8445                   uint16_t         prod)
8446{
8447    storm_memset_eq_prod(sc, prod, SC_FUNC(sc));
8448    wmb(); /* keep prod updates ordered */
8449}
8450
8451static void
8452bxe_eq_int(struct bxe_softc *sc)
8453{
8454    uint16_t hw_cons, sw_cons, sw_prod;
8455    union event_ring_elem *elem;
8456    uint8_t echo;
8457    uint32_t cid;
8458    uint8_t opcode;
8459    int spqe_cnt = 0;
8460    struct ecore_queue_sp_obj *q_obj;
8461    struct ecore_func_sp_obj *f_obj = &sc->func_obj;
8462    struct ecore_raw_obj *rss_raw = &sc->rss_conf_obj.raw;
8463
8464    hw_cons = le16toh(*sc->eq_cons_sb);
8465
8466    /*
8467     * The hw_cons range is 1-255, 257 - the sw_cons range is 0-254, 256.
8468     * when we get to the next-page we need to adjust so the loop
8469     * condition below will be met. The next element is the size of a
8470     * regular element and hence incrementing by 1
8471     */
8472    if ((hw_cons & EQ_DESC_MAX_PAGE) == EQ_DESC_MAX_PAGE) {
8473        hw_cons++;
8474    }
8475
8476    /*
8477     * This function may never run in parallel with itself for a
8478     * specific sc and no need for a read memory barrier here.
8479     */
8480    sw_cons = sc->eq_cons;
8481    sw_prod = sc->eq_prod;
8482
8483    BLOGD(sc, DBG_SP,"EQ: hw_cons=%u sw_cons=%u eq_spq_left=0x%lx\n",
8484          hw_cons, sw_cons, atomic_load_acq_long(&sc->eq_spq_left));
8485
8486    for (;
8487         sw_cons != hw_cons;
8488         sw_prod = NEXT_EQ_IDX(sw_prod), sw_cons = NEXT_EQ_IDX(sw_cons)) {
8489
8490        elem = &sc->eq[EQ_DESC(sw_cons)];
8491
8492        /* elem CID originates from FW, actually LE */
8493        cid = SW_CID(elem->message.data.cfc_del_event.cid);
8494        opcode = elem->message.opcode;
8495
8496        /* handle eq element */
8497        switch (opcode) {
8498
8499        case EVENT_RING_OPCODE_STAT_QUERY:
8500            BLOGD(sc, DBG_SP, "got statistics completion event %d\n",
8501                  sc->stats_comp++);
8502            /* nothing to do with stats comp */
8503            goto next_spqe;
8504
8505        case EVENT_RING_OPCODE_CFC_DEL:
8506            /* handle according to cid range */
8507            /* we may want to verify here that the sc state is HALTING */
8508            BLOGD(sc, DBG_SP, "got delete ramrod for MULTI[%d]\n", cid);
8509            q_obj = bxe_cid_to_q_obj(sc, cid);
8510            if (q_obj->complete_cmd(sc, q_obj, ECORE_Q_CMD_CFC_DEL)) {
8511                break;
8512            }
8513            goto next_spqe;
8514
8515        case EVENT_RING_OPCODE_STOP_TRAFFIC:
8516            BLOGD(sc, DBG_SP, "got STOP TRAFFIC\n");
8517            if (f_obj->complete_cmd(sc, f_obj, ECORE_F_CMD_TX_STOP)) {
8518                break;
8519            }
8520            // XXX bxe_dcbx_set_params(sc, BXE_DCBX_STATE_TX_PAUSED);
8521            goto next_spqe;
8522
8523        case EVENT_RING_OPCODE_START_TRAFFIC:
8524            BLOGD(sc, DBG_SP, "got START TRAFFIC\n");
8525            if (f_obj->complete_cmd(sc, f_obj, ECORE_F_CMD_TX_START)) {
8526                break;
8527            }
8528            // XXX bxe_dcbx_set_params(sc, BXE_DCBX_STATE_TX_RELEASED);
8529            goto next_spqe;
8530
8531        case EVENT_RING_OPCODE_FUNCTION_UPDATE:
8532            echo = elem->message.data.function_update_event.echo;
8533            if (echo == SWITCH_UPDATE) {
8534                BLOGD(sc, DBG_SP, "got FUNC_SWITCH_UPDATE ramrod\n");
8535                if (f_obj->complete_cmd(sc, f_obj,
8536                                        ECORE_F_CMD_SWITCH_UPDATE)) {
8537                    break;
8538                }
8539            }
8540            else {
8541                BLOGD(sc, DBG_SP,
8542                      "AFEX: ramrod completed FUNCTION_UPDATE\n");
8543            }
8544            goto next_spqe;
8545
8546        case EVENT_RING_OPCODE_FORWARD_SETUP:
8547            q_obj = &bxe_fwd_sp_obj(sc, q_obj);
8548            if (q_obj->complete_cmd(sc, q_obj,
8549                                    ECORE_Q_CMD_SETUP_TX_ONLY)) {
8550                break;
8551            }
8552            goto next_spqe;
8553
8554        case EVENT_RING_OPCODE_FUNCTION_START:
8555            BLOGD(sc, DBG_SP, "got FUNC_START ramrod\n");
8556            if (f_obj->complete_cmd(sc, f_obj, ECORE_F_CMD_START)) {
8557                break;
8558            }
8559            goto next_spqe;
8560
8561        case EVENT_RING_OPCODE_FUNCTION_STOP:
8562            BLOGD(sc, DBG_SP, "got FUNC_STOP ramrod\n");
8563            if (f_obj->complete_cmd(sc, f_obj, ECORE_F_CMD_STOP)) {
8564                break;
8565            }
8566            goto next_spqe;
8567        }
8568
8569        switch (opcode | sc->state) {
8570        case (EVENT_RING_OPCODE_RSS_UPDATE_RULES | BXE_STATE_OPEN):
8571        case (EVENT_RING_OPCODE_RSS_UPDATE_RULES | BXE_STATE_OPENING_WAITING_PORT):
8572            cid = elem->message.data.eth_event.echo & BXE_SWCID_MASK;
8573            BLOGD(sc, DBG_SP, "got RSS_UPDATE ramrod. CID %d\n", cid);
8574            rss_raw->clear_pending(rss_raw);
8575            break;
8576
8577        case (EVENT_RING_OPCODE_SET_MAC | BXE_STATE_OPEN):
8578        case (EVENT_RING_OPCODE_SET_MAC | BXE_STATE_DIAG):
8579        case (EVENT_RING_OPCODE_SET_MAC | BXE_STATE_CLOSING_WAITING_HALT):
8580        case (EVENT_RING_OPCODE_CLASSIFICATION_RULES | BXE_STATE_OPEN):
8581        case (EVENT_RING_OPCODE_CLASSIFICATION_RULES | BXE_STATE_DIAG):
8582        case (EVENT_RING_OPCODE_CLASSIFICATION_RULES | BXE_STATE_CLOSING_WAITING_HALT):
8583            BLOGD(sc, DBG_SP, "got (un)set mac ramrod\n");
8584            bxe_handle_classification_eqe(sc, elem);
8585            break;
8586
8587        case (EVENT_RING_OPCODE_MULTICAST_RULES | BXE_STATE_OPEN):
8588        case (EVENT_RING_OPCODE_MULTICAST_RULES | BXE_STATE_DIAG):
8589        case (EVENT_RING_OPCODE_MULTICAST_RULES | BXE_STATE_CLOSING_WAITING_HALT):
8590            BLOGD(sc, DBG_SP, "got mcast ramrod\n");
8591            bxe_handle_mcast_eqe(sc);
8592            break;
8593
8594        case (EVENT_RING_OPCODE_FILTERS_RULES | BXE_STATE_OPEN):
8595        case (EVENT_RING_OPCODE_FILTERS_RULES | BXE_STATE_DIAG):
8596        case (EVENT_RING_OPCODE_FILTERS_RULES | BXE_STATE_CLOSING_WAITING_HALT):
8597            BLOGD(sc, DBG_SP, "got rx_mode ramrod\n");
8598            bxe_handle_rx_mode_eqe(sc, elem);
8599            break;
8600
8601        default:
8602            /* unknown event log error and continue */
8603            BLOGE(sc, "Unknown EQ event %d, sc->state 0x%x\n",
8604                  elem->message.opcode, sc->state);
8605        }
8606
8607next_spqe:
8608        spqe_cnt++;
8609    } /* for */
8610
8611    mb();
8612    atomic_add_acq_long(&sc->eq_spq_left, spqe_cnt);
8613
8614    sc->eq_cons = sw_cons;
8615    sc->eq_prod = sw_prod;
8616
8617    /* make sure that above mem writes were issued towards the memory */
8618    wmb();
8619
8620    /* update producer */
8621    bxe_update_eq_prod(sc, sc->eq_prod);
8622}
8623
8624static void
8625bxe_handle_sp_tq(void *context,
8626                 int  pending)
8627{
8628    struct bxe_softc *sc = (struct bxe_softc *)context;
8629    uint16_t status;
8630
8631    BLOGD(sc, DBG_SP, "---> SP TASK <---\n");
8632
8633    /* what work needs to be performed? */
8634    status = bxe_update_dsb_idx(sc);
8635
8636    BLOGD(sc, DBG_SP, "dsb status 0x%04x\n", status);
8637
8638    /* HW attentions */
8639    if (status & BXE_DEF_SB_ATT_IDX) {
8640        BLOGD(sc, DBG_SP, "---> ATTN INTR <---\n");
8641        bxe_attn_int(sc);
8642        status &= ~BXE_DEF_SB_ATT_IDX;
8643    }
8644
8645    /* SP events: STAT_QUERY and others */
8646    if (status & BXE_DEF_SB_IDX) {
8647        /* handle EQ completions */
8648        BLOGD(sc, DBG_SP, "---> EQ INTR <---\n");
8649        bxe_eq_int(sc);
8650        bxe_ack_sb(sc, sc->igu_dsb_id, USTORM_ID,
8651                   le16toh(sc->def_idx), IGU_INT_NOP, 1);
8652        status &= ~BXE_DEF_SB_IDX;
8653    }
8654
8655    /* if status is non zero then something went wrong */
8656    if (__predict_false(status)) {
8657        BLOGE(sc, "Got an unknown SP interrupt! (0x%04x)\n", status);
8658    }
8659
8660    /* ack status block only if something was actually handled */
8661    bxe_ack_sb(sc, sc->igu_dsb_id, ATTENTION_ID,
8662               le16toh(sc->def_att_idx), IGU_INT_ENABLE, 1);
8663
8664    /*
8665     * Must be called after the EQ processing (since eq leads to sriov
8666     * ramrod completion flows).
8667     * This flow may have been scheduled by the arrival of a ramrod
8668     * completion, or by the sriov code rescheduling itself.
8669     */
8670    // XXX bxe_iov_sp_task(sc);
8671
8672}
8673
8674static void
8675bxe_handle_fp_tq(void *context,
8676                 int  pending)
8677{
8678    struct bxe_fastpath *fp = (struct bxe_fastpath *)context;
8679    struct bxe_softc *sc = fp->sc;
8680    uint8_t more_tx = FALSE;
8681    uint8_t more_rx = FALSE;
8682
8683    BLOGD(sc, DBG_INTR, "---> FP TASK QUEUE (%d) <---\n", fp->index);
8684
8685    /* XXX
8686     * IFF_DRV_RUNNING state can't be checked here since we process
8687     * slowpath events on a client queue during setup. Instead
8688     * we need to add a "process/continue" flag here that the driver
8689     * can use to tell the task here not to do anything.
8690     */
8691#if 0
8692    if (!(if_getdrvflags(sc->ifp) & IFF_DRV_RUNNING)) {
8693        return;
8694    }
8695#endif
8696
8697    /* update the fastpath index */
8698    bxe_update_fp_sb_idx(fp);
8699
8700    /* XXX add loop here if ever support multiple tx CoS */
8701    /* fp->txdata[cos] */
8702    if (bxe_has_tx_work(fp)) {
8703        BXE_FP_TX_LOCK(fp);
8704        more_tx = bxe_txeof(sc, fp);
8705        BXE_FP_TX_UNLOCK(fp);
8706    }
8707
8708    if (bxe_has_rx_work(fp)) {
8709        more_rx = bxe_rxeof(sc, fp);
8710    }
8711
8712    if (more_rx /*|| more_tx*/) {
8713        /* still more work to do */
8714        taskqueue_enqueue(fp->tq, &fp->tq_task);
8715        return;
8716    }
8717
8718    bxe_ack_sb(sc, fp->igu_sb_id, USTORM_ID,
8719               le16toh(fp->fp_hc_idx), IGU_INT_ENABLE, 1);
8720}
8721
8722static void
8723bxe_task_fp(struct bxe_fastpath *fp)
8724{
8725    struct bxe_softc *sc = fp->sc;
8726    uint8_t more_tx = FALSE;
8727    uint8_t more_rx = FALSE;
8728
8729    BLOGD(sc, DBG_INTR, "---> FP TASK ISR (%d) <---\n", fp->index);
8730
8731    /* update the fastpath index */
8732    bxe_update_fp_sb_idx(fp);
8733
8734    /* XXX add loop here if ever support multiple tx CoS */
8735    /* fp->txdata[cos] */
8736    if (bxe_has_tx_work(fp)) {
8737        BXE_FP_TX_LOCK(fp);
8738        more_tx = bxe_txeof(sc, fp);
8739        BXE_FP_TX_UNLOCK(fp);
8740    }
8741
8742    if (bxe_has_rx_work(fp)) {
8743        more_rx = bxe_rxeof(sc, fp);
8744    }
8745
8746    if (more_rx /*|| more_tx*/) {
8747        /* still more work to do, bail out if this ISR and process later */
8748        taskqueue_enqueue(fp->tq, &fp->tq_task);
8749        return;
8750    }
8751
8752    /*
8753     * Here we write the fastpath index taken before doing any tx or rx work.
8754     * It is very well possible other hw events occurred up to this point and
8755     * they were actually processed accordingly above. Since we're going to
8756     * write an older fastpath index, an interrupt is coming which we might
8757     * not do any work in.
8758     */
8759    bxe_ack_sb(sc, fp->igu_sb_id, USTORM_ID,
8760               le16toh(fp->fp_hc_idx), IGU_INT_ENABLE, 1);
8761}
8762
8763/*
8764 * Legacy interrupt entry point.
8765 *
8766 * Verifies that the controller generated the interrupt and
8767 * then calls a separate routine to handle the various
8768 * interrupt causes: link, RX, and TX.
8769 */
8770static void
8771bxe_intr_legacy(void *xsc)
8772{
8773    struct bxe_softc *sc = (struct bxe_softc *)xsc;
8774    struct bxe_fastpath *fp;
8775    uint16_t status, mask;
8776    int i;
8777
8778    BLOGD(sc, DBG_INTR, "---> BXE INTx <---\n");
8779
8780    /*
8781     * 0 for ustorm, 1 for cstorm
8782     * the bits returned from ack_int() are 0-15
8783     * bit 0 = attention status block
8784     * bit 1 = fast path status block
8785     * a mask of 0x2 or more = tx/rx event
8786     * a mask of 1 = slow path event
8787     */
8788
8789    status = bxe_ack_int(sc);
8790
8791    /* the interrupt is not for us */
8792    if (__predict_false(status == 0)) {
8793        BLOGD(sc, DBG_INTR, "Not our interrupt!\n");
8794        return;
8795    }
8796
8797    BLOGD(sc, DBG_INTR, "Interrupt status 0x%04x\n", status);
8798
8799    FOR_EACH_ETH_QUEUE(sc, i) {
8800        fp = &sc->fp[i];
8801        mask = (0x2 << (fp->index + CNIC_SUPPORT(sc)));
8802        if (status & mask) {
8803            /* acknowledge and disable further fastpath interrupts */
8804            bxe_ack_sb(sc, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
8805            bxe_task_fp(fp);
8806            status &= ~mask;
8807        }
8808    }
8809
8810    if (__predict_false(status & 0x1)) {
8811        /* acknowledge and disable further slowpath interrupts */
8812        bxe_ack_sb(sc, sc->igu_dsb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
8813
8814        /* schedule slowpath handler */
8815        taskqueue_enqueue(sc->sp_tq, &sc->sp_tq_task);
8816
8817        status &= ~0x1;
8818    }
8819
8820    if (__predict_false(status)) {
8821        BLOGW(sc, "Unexpected fastpath status (0x%08x)!\n", status);
8822    }
8823}
8824
8825/* slowpath interrupt entry point */
8826static void
8827bxe_intr_sp(void *xsc)
8828{
8829    struct bxe_softc *sc = (struct bxe_softc *)xsc;
8830
8831    BLOGD(sc, (DBG_INTR | DBG_SP), "---> SP INTR <---\n");
8832
8833    /* acknowledge and disable further slowpath interrupts */
8834    bxe_ack_sb(sc, sc->igu_dsb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
8835
8836    /* schedule slowpath handler */
8837    taskqueue_enqueue(sc->sp_tq, &sc->sp_tq_task);
8838}
8839
8840/* fastpath interrupt entry point */
8841static void
8842bxe_intr_fp(void *xfp)
8843{
8844    struct bxe_fastpath *fp = (struct bxe_fastpath *)xfp;
8845    struct bxe_softc *sc = fp->sc;
8846
8847    BLOGD(sc, DBG_INTR, "---> FP INTR %d <---\n", fp->index);
8848
8849    BLOGD(sc, DBG_INTR,
8850          "(cpu=%d) MSI-X fp=%d fw_sb=%d igu_sb=%d\n",
8851          curcpu, fp->index, fp->fw_sb_id, fp->igu_sb_id);
8852
8853    /* acknowledge and disable further fastpath interrupts */
8854    bxe_ack_sb(sc, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
8855
8856    bxe_task_fp(fp);
8857}
8858
8859/* Release all interrupts allocated by the driver. */
8860static void
8861bxe_interrupt_free(struct bxe_softc *sc)
8862{
8863    int i;
8864
8865    switch (sc->interrupt_mode) {
8866    case INTR_MODE_INTX:
8867        BLOGD(sc, DBG_LOAD, "Releasing legacy INTx vector\n");
8868        if (sc->intr[0].resource != NULL) {
8869            bus_release_resource(sc->dev,
8870                                 SYS_RES_IRQ,
8871                                 sc->intr[0].rid,
8872                                 sc->intr[0].resource);
8873        }
8874        break;
8875    case INTR_MODE_MSI:
8876        for (i = 0; i < sc->intr_count; i++) {
8877            BLOGD(sc, DBG_LOAD, "Releasing MSI vector %d\n", i);
8878            if (sc->intr[i].resource && sc->intr[i].rid) {
8879                bus_release_resource(sc->dev,
8880                                     SYS_RES_IRQ,
8881                                     sc->intr[i].rid,
8882                                     sc->intr[i].resource);
8883            }
8884        }
8885        pci_release_msi(sc->dev);
8886        break;
8887    case INTR_MODE_MSIX:
8888        for (i = 0; i < sc->intr_count; i++) {
8889            BLOGD(sc, DBG_LOAD, "Releasing MSI-X vector %d\n", i);
8890            if (sc->intr[i].resource && sc->intr[i].rid) {
8891                bus_release_resource(sc->dev,
8892                                     SYS_RES_IRQ,
8893                                     sc->intr[i].rid,
8894                                     sc->intr[i].resource);
8895            }
8896        }
8897        pci_release_msi(sc->dev);
8898        break;
8899    default:
8900        /* nothing to do as initial allocation failed */
8901        break;
8902    }
8903}
8904
8905/*
8906 * This function determines and allocates the appropriate
8907 * interrupt based on system capabilites and user request.
8908 *
8909 * The user may force a particular interrupt mode, specify
8910 * the number of receive queues, specify the method for
8911 * distribuitng received frames to receive queues, or use
8912 * the default settings which will automatically select the
8913 * best supported combination.  In addition, the OS may or
8914 * may not support certain combinations of these settings.
8915 * This routine attempts to reconcile the settings requested
8916 * by the user with the capabilites available from the system
8917 * to select the optimal combination of features.
8918 *
8919 * Returns:
8920 *   0 = Success, !0 = Failure.
8921 */
8922static int
8923bxe_interrupt_alloc(struct bxe_softc *sc)
8924{
8925    int msix_count = 0;
8926    int msi_count = 0;
8927    int num_requested = 0;
8928    int num_allocated = 0;
8929    int rid, i, j;
8930    int rc;
8931
8932    /* get the number of available MSI/MSI-X interrupts from the OS */
8933    if (sc->interrupt_mode > 0) {
8934        if (sc->devinfo.pcie_cap_flags & BXE_MSIX_CAPABLE_FLAG) {
8935            msix_count = pci_msix_count(sc->dev);
8936        }
8937
8938        if (sc->devinfo.pcie_cap_flags & BXE_MSI_CAPABLE_FLAG) {
8939            msi_count = pci_msi_count(sc->dev);
8940        }
8941
8942        BLOGD(sc, DBG_LOAD, "%d MSI and %d MSI-X vectors available\n",
8943              msi_count, msix_count);
8944    }
8945
8946    do { /* try allocating MSI-X interrupt resources (at least 2) */
8947        if (sc->interrupt_mode != INTR_MODE_MSIX) {
8948            break;
8949        }
8950
8951        if (((sc->devinfo.pcie_cap_flags & BXE_MSIX_CAPABLE_FLAG) == 0) ||
8952            (msix_count < 2)) {
8953            sc->interrupt_mode = INTR_MODE_MSI; /* try MSI next */
8954            break;
8955        }
8956
8957        /* ask for the necessary number of MSI-X vectors */
8958        num_requested = min((sc->num_queues + 1), msix_count);
8959
8960        BLOGD(sc, DBG_LOAD, "Requesting %d MSI-X vectors\n", num_requested);
8961
8962        num_allocated = num_requested;
8963        if ((rc = pci_alloc_msix(sc->dev, &num_allocated)) != 0) {
8964            BLOGE(sc, "MSI-X alloc failed! (%d)\n", rc);
8965            sc->interrupt_mode = INTR_MODE_MSI; /* try MSI next */
8966            break;
8967        }
8968
8969        if (num_allocated < 2) { /* possible? */
8970            BLOGE(sc, "MSI-X allocation less than 2!\n");
8971            sc->interrupt_mode = INTR_MODE_MSI; /* try MSI next */
8972            pci_release_msi(sc->dev);
8973            break;
8974        }
8975
8976        BLOGI(sc, "MSI-X vectors Requested %d and Allocated %d\n",
8977              num_requested, num_allocated);
8978
8979        /* best effort so use the number of vectors allocated to us */
8980        sc->intr_count = num_allocated;
8981        sc->num_queues = num_allocated - 1;
8982
8983        rid = 1; /* initial resource identifier */
8984
8985        /* allocate the MSI-X vectors */
8986        for (i = 0; i < num_allocated; i++) {
8987            sc->intr[i].rid = (rid + i);
8988
8989            if ((sc->intr[i].resource =
8990                 bus_alloc_resource_any(sc->dev,
8991                                        SYS_RES_IRQ,
8992                                        &sc->intr[i].rid,
8993                                        RF_ACTIVE)) == NULL) {
8994                BLOGE(sc, "Failed to map MSI-X[%d] (rid=%d)!\n",
8995                      i, (rid + i));
8996
8997                for (j = (i - 1); j >= 0; j--) {
8998                    bus_release_resource(sc->dev,
8999                                         SYS_RES_IRQ,
9000                                         sc->intr[j].rid,
9001                                         sc->intr[j].resource);
9002                }
9003
9004                sc->intr_count = 0;
9005                sc->num_queues = 0;
9006                sc->interrupt_mode = INTR_MODE_MSI; /* try MSI next */
9007                pci_release_msi(sc->dev);
9008                break;
9009            }
9010
9011            BLOGD(sc, DBG_LOAD, "Mapped MSI-X[%d] (rid=%d)\n", i, (rid + i));
9012        }
9013    } while (0);
9014
9015    do { /* try allocating MSI vector resources (at least 2) */
9016        if (sc->interrupt_mode != INTR_MODE_MSI) {
9017            break;
9018        }
9019
9020        if (((sc->devinfo.pcie_cap_flags & BXE_MSI_CAPABLE_FLAG) == 0) ||
9021            (msi_count < 1)) {
9022            sc->interrupt_mode = INTR_MODE_INTX; /* try INTx next */
9023            break;
9024        }
9025
9026        /* ask for a single MSI vector */
9027        num_requested = 1;
9028
9029        BLOGD(sc, DBG_LOAD, "Requesting %d MSI vectors\n", num_requested);
9030
9031        num_allocated = num_requested;
9032        if ((rc = pci_alloc_msi(sc->dev, &num_allocated)) != 0) {
9033            BLOGE(sc, "MSI alloc failed (%d)!\n", rc);
9034            sc->interrupt_mode = INTR_MODE_INTX; /* try INTx next */
9035            break;
9036        }
9037
9038        if (num_allocated != 1) { /* possible? */
9039            BLOGE(sc, "MSI allocation is not 1!\n");
9040            sc->interrupt_mode = INTR_MODE_INTX; /* try INTx next */
9041            pci_release_msi(sc->dev);
9042            break;
9043        }
9044
9045        BLOGI(sc, "MSI vectors Requested %d and Allocated %d\n",
9046              num_requested, num_allocated);
9047
9048        /* best effort so use the number of vectors allocated to us */
9049        sc->intr_count = num_allocated;
9050        sc->num_queues = num_allocated;
9051
9052        rid = 1; /* initial resource identifier */
9053
9054        sc->intr[0].rid = rid;
9055
9056        if ((sc->intr[0].resource =
9057             bus_alloc_resource_any(sc->dev,
9058                                    SYS_RES_IRQ,
9059                                    &sc->intr[0].rid,
9060                                    RF_ACTIVE)) == NULL) {
9061            BLOGE(sc, "Failed to map MSI[0] (rid=%d)!\n", rid);
9062            sc->intr_count = 0;
9063            sc->num_queues = 0;
9064            sc->interrupt_mode = INTR_MODE_INTX; /* try INTx next */
9065            pci_release_msi(sc->dev);
9066            break;
9067        }
9068
9069        BLOGD(sc, DBG_LOAD, "Mapped MSI[0] (rid=%d)\n", rid);
9070    } while (0);
9071
9072    do { /* try allocating INTx vector resources */
9073        if (sc->interrupt_mode != INTR_MODE_INTX) {
9074            break;
9075        }
9076
9077        BLOGD(sc, DBG_LOAD, "Requesting legacy INTx interrupt\n");
9078
9079        /* only one vector for INTx */
9080        sc->intr_count = 1;
9081        sc->num_queues = 1;
9082
9083        rid = 0; /* initial resource identifier */
9084
9085        sc->intr[0].rid = rid;
9086
9087        if ((sc->intr[0].resource =
9088             bus_alloc_resource_any(sc->dev,
9089                                    SYS_RES_IRQ,
9090                                    &sc->intr[0].rid,
9091                                    (RF_ACTIVE | RF_SHAREABLE))) == NULL) {
9092            BLOGE(sc, "Failed to map INTx (rid=%d)!\n", rid);
9093            sc->intr_count = 0;
9094            sc->num_queues = 0;
9095            sc->interrupt_mode = -1; /* Failed! */
9096            break;
9097        }
9098
9099        BLOGD(sc, DBG_LOAD, "Mapped INTx (rid=%d)\n", rid);
9100    } while (0);
9101
9102    if (sc->interrupt_mode == -1) {
9103        BLOGE(sc, "Interrupt Allocation: FAILED!!!\n");
9104        rc = 1;
9105    } else {
9106        BLOGD(sc, DBG_LOAD,
9107              "Interrupt Allocation: interrupt_mode=%d, num_queues=%d\n",
9108              sc->interrupt_mode, sc->num_queues);
9109        rc = 0;
9110    }
9111
9112    return (rc);
9113}
9114
9115static void
9116bxe_interrupt_detach(struct bxe_softc *sc)
9117{
9118    struct bxe_fastpath *fp;
9119    int i;
9120
9121    /* release interrupt resources */
9122    for (i = 0; i < sc->intr_count; i++) {
9123        if (sc->intr[i].resource && sc->intr[i].tag) {
9124            BLOGD(sc, DBG_LOAD, "Disabling interrupt vector %d\n", i);
9125            bus_teardown_intr(sc->dev, sc->intr[i].resource, sc->intr[i].tag);
9126        }
9127    }
9128
9129    for (i = 0; i < sc->num_queues; i++) {
9130        fp = &sc->fp[i];
9131        if (fp->tq) {
9132            taskqueue_drain(fp->tq, &fp->tq_task);
9133            taskqueue_drain(fp->tq, &fp->tx_task);
9134            while (taskqueue_cancel_timeout(fp->tq, &fp->tx_timeout_task,
9135                NULL))
9136                taskqueue_drain_timeout(fp->tq, &fp->tx_timeout_task);
9137        }
9138
9139        for (i = 0; i < sc->num_queues; i++) {
9140            fp = &sc->fp[i];
9141            if (fp->tq != NULL) {
9142                taskqueue_free(fp->tq);
9143                fp->tq = NULL;
9144            }
9145        }
9146    }
9147
9148    if (sc->sp_tq) {
9149        taskqueue_drain(sc->sp_tq, &sc->sp_tq_task);
9150        taskqueue_free(sc->sp_tq);
9151        sc->sp_tq = NULL;
9152    }
9153}
9154
9155/*
9156 * Enables interrupts and attach to the ISR.
9157 *
9158 * When using multiple MSI/MSI-X vectors the first vector
9159 * is used for slowpath operations while all remaining
9160 * vectors are used for fastpath operations.  If only a
9161 * single MSI/MSI-X vector is used (SINGLE_ISR) then the
9162 * ISR must look for both slowpath and fastpath completions.
9163 */
9164static int
9165bxe_interrupt_attach(struct bxe_softc *sc)
9166{
9167    struct bxe_fastpath *fp;
9168    int rc = 0;
9169    int i;
9170
9171    snprintf(sc->sp_tq_name, sizeof(sc->sp_tq_name),
9172             "bxe%d_sp_tq", sc->unit);
9173    TASK_INIT(&sc->sp_tq_task, 0, bxe_handle_sp_tq, sc);
9174    sc->sp_tq = taskqueue_create(sc->sp_tq_name, M_NOWAIT,
9175                                 taskqueue_thread_enqueue,
9176                                 &sc->sp_tq);
9177    taskqueue_start_threads(&sc->sp_tq, 1, PWAIT, /* lower priority */
9178                            "%s", sc->sp_tq_name);
9179
9180
9181    for (i = 0; i < sc->num_queues; i++) {
9182        fp = &sc->fp[i];
9183        snprintf(fp->tq_name, sizeof(fp->tq_name),
9184                 "bxe%d_fp%d_tq", sc->unit, i);
9185        TASK_INIT(&fp->tq_task, 0, bxe_handle_fp_tq, fp);
9186        TASK_INIT(&fp->tx_task, 0, bxe_tx_mq_start_deferred, fp);
9187        fp->tq = taskqueue_create(fp->tq_name, M_NOWAIT,
9188                                  taskqueue_thread_enqueue,
9189                                  &fp->tq);
9190        TIMEOUT_TASK_INIT(fp->tq, &fp->tx_timeout_task, 0,
9191                          bxe_tx_mq_start_deferred, fp);
9192        taskqueue_start_threads(&fp->tq, 1, PI_NET, /* higher priority */
9193                                "%s", fp->tq_name);
9194    }
9195
9196    /* setup interrupt handlers */
9197    if (sc->interrupt_mode == INTR_MODE_MSIX) {
9198        BLOGD(sc, DBG_LOAD, "Enabling slowpath MSI-X[0] vector\n");
9199
9200        /*
9201         * Setup the interrupt handler. Note that we pass the driver instance
9202         * to the interrupt handler for the slowpath.
9203         */
9204        if ((rc = bus_setup_intr(sc->dev, sc->intr[0].resource,
9205                                 (INTR_TYPE_NET | INTR_MPSAFE),
9206                                 NULL, bxe_intr_sp, sc,
9207                                 &sc->intr[0].tag)) != 0) {
9208            BLOGE(sc, "Failed to allocate MSI-X[0] vector (%d)\n", rc);
9209            goto bxe_interrupt_attach_exit;
9210        }
9211
9212        bus_describe_intr(sc->dev, sc->intr[0].resource,
9213                          sc->intr[0].tag, "sp");
9214
9215        /* bus_bind_intr(sc->dev, sc->intr[0].resource, 0); */
9216
9217        /* initialize the fastpath vectors (note the first was used for sp) */
9218        for (i = 0; i < sc->num_queues; i++) {
9219            fp = &sc->fp[i];
9220            BLOGD(sc, DBG_LOAD, "Enabling MSI-X[%d] vector\n", (i + 1));
9221
9222            /*
9223             * Setup the interrupt handler. Note that we pass the
9224             * fastpath context to the interrupt handler in this
9225             * case.
9226             */
9227            if ((rc = bus_setup_intr(sc->dev, sc->intr[i + 1].resource,
9228                                     (INTR_TYPE_NET | INTR_MPSAFE),
9229                                     NULL, bxe_intr_fp, fp,
9230                                     &sc->intr[i + 1].tag)) != 0) {
9231                BLOGE(sc, "Failed to allocate MSI-X[%d] vector (%d)\n",
9232                      (i + 1), rc);
9233                goto bxe_interrupt_attach_exit;
9234            }
9235
9236            bus_describe_intr(sc->dev, sc->intr[i + 1].resource,
9237                              sc->intr[i + 1].tag, "fp%02d", i);
9238
9239            /* bind the fastpath instance to a cpu */
9240            if (sc->num_queues > 1) {
9241                bus_bind_intr(sc->dev, sc->intr[i + 1].resource, i);
9242            }
9243
9244            fp->state = BXE_FP_STATE_IRQ;
9245        }
9246    } else if (sc->interrupt_mode == INTR_MODE_MSI) {
9247        BLOGD(sc, DBG_LOAD, "Enabling MSI[0] vector\n");
9248
9249        /*
9250         * Setup the interrupt handler. Note that we pass the
9251         * driver instance to the interrupt handler which
9252         * will handle both the slowpath and fastpath.
9253         */
9254        if ((rc = bus_setup_intr(sc->dev, sc->intr[0].resource,
9255                                 (INTR_TYPE_NET | INTR_MPSAFE),
9256                                 NULL, bxe_intr_legacy, sc,
9257                                 &sc->intr[0].tag)) != 0) {
9258            BLOGE(sc, "Failed to allocate MSI[0] vector (%d)\n", rc);
9259            goto bxe_interrupt_attach_exit;
9260        }
9261
9262    } else { /* (sc->interrupt_mode == INTR_MODE_INTX) */
9263        BLOGD(sc, DBG_LOAD, "Enabling INTx interrupts\n");
9264
9265        /*
9266         * Setup the interrupt handler. Note that we pass the
9267         * driver instance to the interrupt handler which
9268         * will handle both the slowpath and fastpath.
9269         */
9270        if ((rc = bus_setup_intr(sc->dev, sc->intr[0].resource,
9271                                 (INTR_TYPE_NET | INTR_MPSAFE),
9272                                 NULL, bxe_intr_legacy, sc,
9273                                 &sc->intr[0].tag)) != 0) {
9274            BLOGE(sc, "Failed to allocate INTx interrupt (%d)\n", rc);
9275            goto bxe_interrupt_attach_exit;
9276        }
9277    }
9278
9279bxe_interrupt_attach_exit:
9280
9281    return (rc);
9282}
9283
9284static int  bxe_init_hw_common_chip(struct bxe_softc *sc);
9285static int  bxe_init_hw_common(struct bxe_softc *sc);
9286static int  bxe_init_hw_port(struct bxe_softc *sc);
9287static int  bxe_init_hw_func(struct bxe_softc *sc);
9288static void bxe_reset_common(struct bxe_softc *sc);
9289static void bxe_reset_port(struct bxe_softc *sc);
9290static void bxe_reset_func(struct bxe_softc *sc);
9291static int  bxe_gunzip_init(struct bxe_softc *sc);
9292static void bxe_gunzip_end(struct bxe_softc *sc);
9293static int  bxe_init_firmware(struct bxe_softc *sc);
9294static void bxe_release_firmware(struct bxe_softc *sc);
9295
9296static struct
9297ecore_func_sp_drv_ops bxe_func_sp_drv = {
9298    .init_hw_cmn_chip = bxe_init_hw_common_chip,
9299    .init_hw_cmn      = bxe_init_hw_common,
9300    .init_hw_port     = bxe_init_hw_port,
9301    .init_hw_func     = bxe_init_hw_func,
9302
9303    .reset_hw_cmn     = bxe_reset_common,
9304    .reset_hw_port    = bxe_reset_port,
9305    .reset_hw_func    = bxe_reset_func,
9306
9307    .gunzip_init      = bxe_gunzip_init,
9308    .gunzip_end       = bxe_gunzip_end,
9309
9310    .init_fw          = bxe_init_firmware,
9311    .release_fw       = bxe_release_firmware,
9312};
9313
9314static void
9315bxe_init_func_obj(struct bxe_softc *sc)
9316{
9317    sc->dmae_ready = 0;
9318
9319    ecore_init_func_obj(sc,
9320                        &sc->func_obj,
9321                        BXE_SP(sc, func_rdata),
9322                        BXE_SP_MAPPING(sc, func_rdata),
9323                        BXE_SP(sc, func_afex_rdata),
9324                        BXE_SP_MAPPING(sc, func_afex_rdata),
9325                        &bxe_func_sp_drv);
9326}
9327
9328static int
9329bxe_init_hw(struct bxe_softc *sc,
9330            uint32_t         load_code)
9331{
9332    struct ecore_func_state_params func_params = { NULL };
9333    int rc;
9334
9335    /* prepare the parameters for function state transitions */
9336    bit_set(&func_params.ramrod_flags, RAMROD_COMP_WAIT);
9337
9338    func_params.f_obj = &sc->func_obj;
9339    func_params.cmd = ECORE_F_CMD_HW_INIT;
9340
9341    func_params.params.hw_init.load_phase = load_code;
9342
9343    /*
9344     * Via a plethora of function pointers, we will eventually reach
9345     * bxe_init_hw_common(), bxe_init_hw_port(), or bxe_init_hw_func().
9346     */
9347    rc = ecore_func_state_change(sc, &func_params);
9348
9349    return (rc);
9350}
9351
9352static void
9353bxe_fill(struct bxe_softc *sc,
9354         uint32_t         addr,
9355         int              fill,
9356         uint32_t         len)
9357{
9358    uint32_t i;
9359
9360    if (!(len % 4) && !(addr % 4)) {
9361        for (i = 0; i < len; i += 4) {
9362            REG_WR(sc, (addr + i), fill);
9363        }
9364    } else {
9365        for (i = 0; i < len; i++) {
9366            REG_WR8(sc, (addr + i), fill);
9367        }
9368    }
9369}
9370
9371/* writes FP SP data to FW - data_size in dwords */
9372static void
9373bxe_wr_fp_sb_data(struct bxe_softc *sc,
9374                  int              fw_sb_id,
9375                  uint32_t         *sb_data_p,
9376                  uint32_t         data_size)
9377{
9378    int index;
9379
9380    for (index = 0; index < data_size; index++) {
9381        REG_WR(sc,
9382               (BAR_CSTRORM_INTMEM +
9383                CSTORM_STATUS_BLOCK_DATA_OFFSET(fw_sb_id) +
9384                (sizeof(uint32_t) * index)),
9385               *(sb_data_p + index));
9386    }
9387}
9388
9389static void
9390bxe_zero_fp_sb(struct bxe_softc *sc,
9391               int              fw_sb_id)
9392{
9393    struct hc_status_block_data_e2 sb_data_e2;
9394    struct hc_status_block_data_e1x sb_data_e1x;
9395    uint32_t *sb_data_p;
9396    uint32_t data_size = 0;
9397
9398    if (!CHIP_IS_E1x(sc)) {
9399        memset(&sb_data_e2, 0, sizeof(struct hc_status_block_data_e2));
9400        sb_data_e2.common.state = SB_DISABLED;
9401        sb_data_e2.common.p_func.vf_valid = FALSE;
9402        sb_data_p = (uint32_t *)&sb_data_e2;
9403        data_size = (sizeof(struct hc_status_block_data_e2) /
9404                     sizeof(uint32_t));
9405    } else {
9406        memset(&sb_data_e1x, 0, sizeof(struct hc_status_block_data_e1x));
9407        sb_data_e1x.common.state = SB_DISABLED;
9408        sb_data_e1x.common.p_func.vf_valid = FALSE;
9409        sb_data_p = (uint32_t *)&sb_data_e1x;
9410        data_size = (sizeof(struct hc_status_block_data_e1x) /
9411                     sizeof(uint32_t));
9412    }
9413
9414    bxe_wr_fp_sb_data(sc, fw_sb_id, sb_data_p, data_size);
9415
9416    bxe_fill(sc, (BAR_CSTRORM_INTMEM + CSTORM_STATUS_BLOCK_OFFSET(fw_sb_id)),
9417             0, CSTORM_STATUS_BLOCK_SIZE);
9418    bxe_fill(sc, (BAR_CSTRORM_INTMEM + CSTORM_SYNC_BLOCK_OFFSET(fw_sb_id)),
9419             0, CSTORM_SYNC_BLOCK_SIZE);
9420}
9421
9422static void
9423bxe_wr_sp_sb_data(struct bxe_softc               *sc,
9424                  struct hc_sp_status_block_data *sp_sb_data)
9425{
9426    int i;
9427
9428    for (i = 0;
9429         i < (sizeof(struct hc_sp_status_block_data) / sizeof(uint32_t));
9430         i++) {
9431        REG_WR(sc,
9432               (BAR_CSTRORM_INTMEM +
9433                CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(SC_FUNC(sc)) +
9434                (i * sizeof(uint32_t))),
9435               *((uint32_t *)sp_sb_data + i));
9436    }
9437}
9438
9439static void
9440bxe_zero_sp_sb(struct bxe_softc *sc)
9441{
9442    struct hc_sp_status_block_data sp_sb_data;
9443
9444    memset(&sp_sb_data, 0, sizeof(struct hc_sp_status_block_data));
9445
9446    sp_sb_data.state           = SB_DISABLED;
9447    sp_sb_data.p_func.vf_valid = FALSE;
9448
9449    bxe_wr_sp_sb_data(sc, &sp_sb_data);
9450
9451    bxe_fill(sc,
9452             (BAR_CSTRORM_INTMEM +
9453              CSTORM_SP_STATUS_BLOCK_OFFSET(SC_FUNC(sc))),
9454              0, CSTORM_SP_STATUS_BLOCK_SIZE);
9455    bxe_fill(sc,
9456             (BAR_CSTRORM_INTMEM +
9457              CSTORM_SP_SYNC_BLOCK_OFFSET(SC_FUNC(sc))),
9458              0, CSTORM_SP_SYNC_BLOCK_SIZE);
9459}
9460
9461static void
9462bxe_setup_ndsb_state_machine(struct hc_status_block_sm *hc_sm,
9463                             int                       igu_sb_id,
9464                             int                       igu_seg_id)
9465{
9466    hc_sm->igu_sb_id      = igu_sb_id;
9467    hc_sm->igu_seg_id     = igu_seg_id;
9468    hc_sm->timer_value    = 0xFF;
9469    hc_sm->time_to_expire = 0xFFFFFFFF;
9470}
9471
9472static void
9473bxe_map_sb_state_machines(struct hc_index_data *index_data)
9474{
9475    /* zero out state machine indices */
9476
9477    /* rx indices */
9478    index_data[HC_INDEX_ETH_RX_CQ_CONS].flags &= ~HC_INDEX_DATA_SM_ID;
9479
9480    /* tx indices */
9481    index_data[HC_INDEX_OOO_TX_CQ_CONS].flags      &= ~HC_INDEX_DATA_SM_ID;
9482    index_data[HC_INDEX_ETH_TX_CQ_CONS_COS0].flags &= ~HC_INDEX_DATA_SM_ID;
9483    index_data[HC_INDEX_ETH_TX_CQ_CONS_COS1].flags &= ~HC_INDEX_DATA_SM_ID;
9484    index_data[HC_INDEX_ETH_TX_CQ_CONS_COS2].flags &= ~HC_INDEX_DATA_SM_ID;
9485
9486    /* map indices */
9487
9488    /* rx indices */
9489    index_data[HC_INDEX_ETH_RX_CQ_CONS].flags |=
9490        (SM_RX_ID << HC_INDEX_DATA_SM_ID_SHIFT);
9491
9492    /* tx indices */
9493    index_data[HC_INDEX_OOO_TX_CQ_CONS].flags |=
9494        (SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT);
9495    index_data[HC_INDEX_ETH_TX_CQ_CONS_COS0].flags |=
9496        (SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT);
9497    index_data[HC_INDEX_ETH_TX_CQ_CONS_COS1].flags |=
9498        (SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT);
9499    index_data[HC_INDEX_ETH_TX_CQ_CONS_COS2].flags |=
9500        (SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT);
9501}
9502
9503static void
9504bxe_init_sb(struct bxe_softc *sc,
9505            bus_addr_t       busaddr,
9506            int              vfid,
9507            uint8_t          vf_valid,
9508            int              fw_sb_id,
9509            int              igu_sb_id)
9510{
9511    struct hc_status_block_data_e2  sb_data_e2;
9512    struct hc_status_block_data_e1x sb_data_e1x;
9513    struct hc_status_block_sm       *hc_sm_p;
9514    uint32_t *sb_data_p;
9515    int igu_seg_id;
9516    int data_size;
9517
9518    if (CHIP_INT_MODE_IS_BC(sc)) {
9519        igu_seg_id = HC_SEG_ACCESS_NORM;
9520    } else {
9521        igu_seg_id = IGU_SEG_ACCESS_NORM;
9522    }
9523
9524    bxe_zero_fp_sb(sc, fw_sb_id);
9525
9526    if (!CHIP_IS_E1x(sc)) {
9527        memset(&sb_data_e2, 0, sizeof(struct hc_status_block_data_e2));
9528        sb_data_e2.common.state = SB_ENABLED;
9529        sb_data_e2.common.p_func.pf_id = SC_FUNC(sc);
9530        sb_data_e2.common.p_func.vf_id = vfid;
9531        sb_data_e2.common.p_func.vf_valid = vf_valid;
9532        sb_data_e2.common.p_func.vnic_id = SC_VN(sc);
9533        sb_data_e2.common.same_igu_sb_1b = TRUE;
9534        sb_data_e2.common.host_sb_addr.hi = U64_HI(busaddr);
9535        sb_data_e2.common.host_sb_addr.lo = U64_LO(busaddr);
9536        hc_sm_p = sb_data_e2.common.state_machine;
9537        sb_data_p = (uint32_t *)&sb_data_e2;
9538        data_size = (sizeof(struct hc_status_block_data_e2) /
9539                     sizeof(uint32_t));
9540        bxe_map_sb_state_machines(sb_data_e2.index_data);
9541    } else {
9542        memset(&sb_data_e1x, 0, sizeof(struct hc_status_block_data_e1x));
9543        sb_data_e1x.common.state = SB_ENABLED;
9544        sb_data_e1x.common.p_func.pf_id = SC_FUNC(sc);
9545        sb_data_e1x.common.p_func.vf_id = 0xff;
9546        sb_data_e1x.common.p_func.vf_valid = FALSE;
9547        sb_data_e1x.common.p_func.vnic_id = SC_VN(sc);
9548        sb_data_e1x.common.same_igu_sb_1b = TRUE;
9549        sb_data_e1x.common.host_sb_addr.hi = U64_HI(busaddr);
9550        sb_data_e1x.common.host_sb_addr.lo = U64_LO(busaddr);
9551        hc_sm_p = sb_data_e1x.common.state_machine;
9552        sb_data_p = (uint32_t *)&sb_data_e1x;
9553        data_size = (sizeof(struct hc_status_block_data_e1x) /
9554                     sizeof(uint32_t));
9555        bxe_map_sb_state_machines(sb_data_e1x.index_data);
9556    }
9557
9558    bxe_setup_ndsb_state_machine(&hc_sm_p[SM_RX_ID], igu_sb_id, igu_seg_id);
9559    bxe_setup_ndsb_state_machine(&hc_sm_p[SM_TX_ID], igu_sb_id, igu_seg_id);
9560
9561    BLOGD(sc, DBG_LOAD, "Init FW SB %d\n", fw_sb_id);
9562
9563    /* write indices to HW - PCI guarantees endianity of regpairs */
9564    bxe_wr_fp_sb_data(sc, fw_sb_id, sb_data_p, data_size);
9565}
9566
9567static inline uint8_t
9568bxe_fp_qzone_id(struct bxe_fastpath *fp)
9569{
9570    if (CHIP_IS_E1x(fp->sc)) {
9571        return (fp->cl_id + SC_PORT(fp->sc) * ETH_MAX_RX_CLIENTS_E1H);
9572    } else {
9573        return (fp->cl_id);
9574    }
9575}
9576
9577static inline uint32_t
9578bxe_rx_ustorm_prods_offset(struct bxe_softc    *sc,
9579                           struct bxe_fastpath *fp)
9580{
9581    uint32_t offset = BAR_USTRORM_INTMEM;
9582
9583    if (!CHIP_IS_E1x(sc)) {
9584        offset += USTORM_RX_PRODS_E2_OFFSET(fp->cl_qzone_id);
9585    } else {
9586        offset += USTORM_RX_PRODS_E1X_OFFSET(SC_PORT(sc), fp->cl_id);
9587    }
9588
9589    return (offset);
9590}
9591
9592static void
9593bxe_init_eth_fp(struct bxe_softc *sc,
9594                int              idx)
9595{
9596    struct bxe_fastpath *fp = &sc->fp[idx];
9597    uint32_t cids[ECORE_MULTI_TX_COS] = { 0 };
9598    unsigned long q_type = 0;
9599    int cos;
9600
9601    fp->sc    = sc;
9602    fp->index = idx;
9603
9604    fp->igu_sb_id = (sc->igu_base_sb + idx + CNIC_SUPPORT(sc));
9605    fp->fw_sb_id = (sc->base_fw_ndsb + idx + CNIC_SUPPORT(sc));
9606
9607    fp->cl_id = (CHIP_IS_E1x(sc)) ?
9608                    (SC_L_ID(sc) + idx) :
9609                    /* want client ID same as IGU SB ID for non-E1 */
9610                    fp->igu_sb_id;
9611    fp->cl_qzone_id = bxe_fp_qzone_id(fp);
9612
9613    /* setup sb indices */
9614    if (!CHIP_IS_E1x(sc)) {
9615        fp->sb_index_values  = fp->status_block.e2_sb->sb.index_values;
9616        fp->sb_running_index = fp->status_block.e2_sb->sb.running_index;
9617    } else {
9618        fp->sb_index_values  = fp->status_block.e1x_sb->sb.index_values;
9619        fp->sb_running_index = fp->status_block.e1x_sb->sb.running_index;
9620    }
9621
9622    /* init shortcut */
9623    fp->ustorm_rx_prods_offset = bxe_rx_ustorm_prods_offset(sc, fp);
9624
9625    fp->rx_cq_cons_sb = &fp->sb_index_values[HC_INDEX_ETH_RX_CQ_CONS];
9626
9627    /*
9628     * XXX If multiple CoS is ever supported then each fastpath structure
9629     * will need to maintain tx producer/consumer/dma/etc values *per* CoS.
9630     */
9631    for (cos = 0; cos < sc->max_cos; cos++) {
9632        cids[cos] = idx;
9633    }
9634    fp->tx_cons_sb = &fp->sb_index_values[HC_INDEX_ETH_TX_CQ_CONS_COS0];
9635
9636    /* nothing more for a VF to do */
9637    if (IS_VF(sc)) {
9638        return;
9639    }
9640
9641    bxe_init_sb(sc, fp->sb_dma.paddr, BXE_VF_ID_INVALID, FALSE,
9642                fp->fw_sb_id, fp->igu_sb_id);
9643
9644    bxe_update_fp_sb_idx(fp);
9645
9646    /* Configure Queue State object */
9647    bit_set(&q_type, ECORE_Q_TYPE_HAS_RX);
9648    bit_set(&q_type, ECORE_Q_TYPE_HAS_TX);
9649
9650    ecore_init_queue_obj(sc,
9651                         &sc->sp_objs[idx].q_obj,
9652                         fp->cl_id,
9653                         cids,
9654                         sc->max_cos,
9655                         SC_FUNC(sc),
9656                         BXE_SP(sc, q_rdata),
9657                         BXE_SP_MAPPING(sc, q_rdata),
9658                         q_type);
9659
9660    /* configure classification DBs */
9661    ecore_init_mac_obj(sc,
9662                       &sc->sp_objs[idx].mac_obj,
9663                       fp->cl_id,
9664                       idx,
9665                       SC_FUNC(sc),
9666                       BXE_SP(sc, mac_rdata),
9667                       BXE_SP_MAPPING(sc, mac_rdata),
9668                       ECORE_FILTER_MAC_PENDING,
9669                       &sc->sp_state,
9670                       ECORE_OBJ_TYPE_RX_TX,
9671                       &sc->macs_pool);
9672
9673    BLOGD(sc, DBG_LOAD, "fp[%d]: sb=%p cl_id=%d fw_sb=%d igu_sb=%d\n",
9674          idx, fp->status_block.e2_sb, fp->cl_id, fp->fw_sb_id, fp->igu_sb_id);
9675}
9676
9677static inline void
9678bxe_update_rx_prod(struct bxe_softc    *sc,
9679                   struct bxe_fastpath *fp,
9680                   uint16_t            rx_bd_prod,
9681                   uint16_t            rx_cq_prod,
9682                   uint16_t            rx_sge_prod)
9683{
9684    struct ustorm_eth_rx_producers rx_prods = { 0 };
9685    uint32_t i;
9686
9687    /* update producers */
9688    rx_prods.bd_prod  = rx_bd_prod;
9689    rx_prods.cqe_prod = rx_cq_prod;
9690    rx_prods.sge_prod = rx_sge_prod;
9691
9692    /*
9693     * Make sure that the BD and SGE data is updated before updating the
9694     * producers since FW might read the BD/SGE right after the producer
9695     * is updated.
9696     * This is only applicable for weak-ordered memory model archs such
9697     * as IA-64. The following barrier is also mandatory since FW will
9698     * assumes BDs must have buffers.
9699     */
9700    wmb();
9701
9702    for (i = 0; i < (sizeof(rx_prods) / 4); i++) {
9703        REG_WR(sc,
9704               (fp->ustorm_rx_prods_offset + (i * 4)),
9705               ((uint32_t *)&rx_prods)[i]);
9706    }
9707
9708    wmb(); /* keep prod updates ordered */
9709
9710    BLOGD(sc, DBG_RX,
9711          "RX fp[%d]: wrote prods bd_prod=%u cqe_prod=%u sge_prod=%u\n",
9712          fp->index, rx_bd_prod, rx_cq_prod, rx_sge_prod);
9713}
9714
9715static void
9716bxe_init_rx_rings(struct bxe_softc *sc)
9717{
9718    struct bxe_fastpath *fp;
9719    int i;
9720
9721    for (i = 0; i < sc->num_queues; i++) {
9722        fp = &sc->fp[i];
9723
9724        fp->rx_bd_cons = 0;
9725
9726        /*
9727         * Activate the BD ring...
9728         * Warning, this will generate an interrupt (to the TSTORM)
9729         * so this can only be done after the chip is initialized
9730         */
9731        bxe_update_rx_prod(sc, fp,
9732                           fp->rx_bd_prod,
9733                           fp->rx_cq_prod,
9734                           fp->rx_sge_prod);
9735
9736        if (i != 0) {
9737            continue;
9738        }
9739
9740        if (CHIP_IS_E1(sc)) {
9741            REG_WR(sc,
9742                   (BAR_USTRORM_INTMEM +
9743                    USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(SC_FUNC(sc))),
9744                   U64_LO(fp->rcq_dma.paddr));
9745            REG_WR(sc,
9746                   (BAR_USTRORM_INTMEM +
9747                    USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(SC_FUNC(sc)) + 4),
9748                   U64_HI(fp->rcq_dma.paddr));
9749        }
9750    }
9751}
9752
9753static void
9754bxe_init_tx_ring_one(struct bxe_fastpath *fp)
9755{
9756    SET_FLAG(fp->tx_db.data.header.data, DOORBELL_HDR_T_DB_TYPE, 1);
9757    fp->tx_db.data.zero_fill1 = 0;
9758    fp->tx_db.data.prod = 0;
9759
9760    fp->tx_pkt_prod = 0;
9761    fp->tx_pkt_cons = 0;
9762    fp->tx_bd_prod = 0;
9763    fp->tx_bd_cons = 0;
9764    fp->eth_q_stats.tx_pkts = 0;
9765}
9766
9767static inline void
9768bxe_init_tx_rings(struct bxe_softc *sc)
9769{
9770    int i;
9771
9772    for (i = 0; i < sc->num_queues; i++) {
9773        bxe_init_tx_ring_one(&sc->fp[i]);
9774    }
9775}
9776
9777static void
9778bxe_init_def_sb(struct bxe_softc *sc)
9779{
9780    struct host_sp_status_block *def_sb = sc->def_sb;
9781    bus_addr_t mapping = sc->def_sb_dma.paddr;
9782    int igu_sp_sb_index;
9783    int igu_seg_id;
9784    int port = SC_PORT(sc);
9785    int func = SC_FUNC(sc);
9786    int reg_offset, reg_offset_en5;
9787    uint64_t section;
9788    int index, sindex;
9789    struct hc_sp_status_block_data sp_sb_data;
9790
9791    memset(&sp_sb_data, 0, sizeof(struct hc_sp_status_block_data));
9792
9793    if (CHIP_INT_MODE_IS_BC(sc)) {
9794        igu_sp_sb_index = DEF_SB_IGU_ID;
9795        igu_seg_id = HC_SEG_ACCESS_DEF;
9796    } else {
9797        igu_sp_sb_index = sc->igu_dsb_id;
9798        igu_seg_id = IGU_SEG_ACCESS_DEF;
9799    }
9800
9801    /* attentions */
9802    section = ((uint64_t)mapping +
9803               offsetof(struct host_sp_status_block, atten_status_block));
9804    def_sb->atten_status_block.status_block_id = igu_sp_sb_index;
9805    sc->attn_state = 0;
9806
9807    reg_offset = (port) ?
9808                     MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
9809                     MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0;
9810    reg_offset_en5 = (port) ?
9811                         MISC_REG_AEU_ENABLE5_FUNC_1_OUT_0 :
9812                         MISC_REG_AEU_ENABLE5_FUNC_0_OUT_0;
9813
9814    for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
9815        /* take care of sig[0]..sig[4] */
9816        for (sindex = 0; sindex < 4; sindex++) {
9817            sc->attn_group[index].sig[sindex] =
9818                REG_RD(sc, (reg_offset + (sindex * 0x4) + (0x10 * index)));
9819        }
9820
9821        if (!CHIP_IS_E1x(sc)) {
9822            /*
9823             * enable5 is separate from the rest of the registers,
9824             * and the address skip is 4 and not 16 between the
9825             * different groups
9826             */
9827            sc->attn_group[index].sig[4] =
9828                REG_RD(sc, (reg_offset_en5 + (0x4 * index)));
9829        } else {
9830            sc->attn_group[index].sig[4] = 0;
9831        }
9832    }
9833
9834    if (sc->devinfo.int_block == INT_BLOCK_HC) {
9835        reg_offset = (port) ?
9836                         HC_REG_ATTN_MSG1_ADDR_L :
9837                         HC_REG_ATTN_MSG0_ADDR_L;
9838        REG_WR(sc, reg_offset, U64_LO(section));
9839        REG_WR(sc, (reg_offset + 4), U64_HI(section));
9840    } else if (!CHIP_IS_E1x(sc)) {
9841        REG_WR(sc, IGU_REG_ATTN_MSG_ADDR_L, U64_LO(section));
9842        REG_WR(sc, IGU_REG_ATTN_MSG_ADDR_H, U64_HI(section));
9843    }
9844
9845    section = ((uint64_t)mapping +
9846               offsetof(struct host_sp_status_block, sp_sb));
9847
9848    bxe_zero_sp_sb(sc);
9849
9850    /* PCI guarantees endianity of regpair */
9851    sp_sb_data.state           = SB_ENABLED;
9852    sp_sb_data.host_sb_addr.lo = U64_LO(section);
9853    sp_sb_data.host_sb_addr.hi = U64_HI(section);
9854    sp_sb_data.igu_sb_id       = igu_sp_sb_index;
9855    sp_sb_data.igu_seg_id      = igu_seg_id;
9856    sp_sb_data.p_func.pf_id    = func;
9857    sp_sb_data.p_func.vnic_id  = SC_VN(sc);
9858    sp_sb_data.p_func.vf_id    = 0xff;
9859
9860    bxe_wr_sp_sb_data(sc, &sp_sb_data);
9861
9862    bxe_ack_sb(sc, sc->igu_dsb_id, USTORM_ID, 0, IGU_INT_ENABLE, 0);
9863}
9864
9865static void
9866bxe_init_sp_ring(struct bxe_softc *sc)
9867{
9868    atomic_store_rel_long(&sc->cq_spq_left, MAX_SPQ_PENDING);
9869    sc->spq_prod_idx = 0;
9870    sc->dsb_sp_prod = &sc->def_sb->sp_sb.index_values[HC_SP_INDEX_ETH_DEF_CONS];
9871    sc->spq_prod_bd = sc->spq;
9872    sc->spq_last_bd = (sc->spq_prod_bd + MAX_SP_DESC_CNT);
9873}
9874
9875static void
9876bxe_init_eq_ring(struct bxe_softc *sc)
9877{
9878    union event_ring_elem *elem;
9879    int i;
9880
9881    for (i = 1; i <= NUM_EQ_PAGES; i++) {
9882        elem = &sc->eq[EQ_DESC_CNT_PAGE * i - 1];
9883
9884        elem->next_page.addr.hi = htole32(U64_HI(sc->eq_dma.paddr +
9885                                                 BCM_PAGE_SIZE *
9886                                                 (i % NUM_EQ_PAGES)));
9887        elem->next_page.addr.lo = htole32(U64_LO(sc->eq_dma.paddr +
9888                                                 BCM_PAGE_SIZE *
9889                                                 (i % NUM_EQ_PAGES)));
9890    }
9891
9892    sc->eq_cons    = 0;
9893    sc->eq_prod    = NUM_EQ_DESC;
9894    sc->eq_cons_sb = &sc->def_sb->sp_sb.index_values[HC_SP_INDEX_EQ_CONS];
9895
9896    atomic_store_rel_long(&sc->eq_spq_left,
9897                          (min((MAX_SP_DESC_CNT - MAX_SPQ_PENDING),
9898                               NUM_EQ_DESC) - 1));
9899}
9900
9901static void
9902bxe_init_internal_common(struct bxe_softc *sc)
9903{
9904    int i;
9905
9906    /*
9907     * Zero this manually as its initialization is currently missing
9908     * in the initTool.
9909     */
9910    for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++) {
9911        REG_WR(sc,
9912               (BAR_USTRORM_INTMEM + USTORM_AGG_DATA_OFFSET + (i * 4)),
9913               0);
9914    }
9915
9916    if (!CHIP_IS_E1x(sc)) {
9917        REG_WR8(sc, (BAR_CSTRORM_INTMEM + CSTORM_IGU_MODE_OFFSET),
9918                CHIP_INT_MODE_IS_BC(sc) ? HC_IGU_BC_MODE : HC_IGU_NBC_MODE);
9919    }
9920}
9921
9922static void
9923bxe_init_internal(struct bxe_softc *sc,
9924                  uint32_t         load_code)
9925{
9926    switch (load_code) {
9927    case FW_MSG_CODE_DRV_LOAD_COMMON:
9928    case FW_MSG_CODE_DRV_LOAD_COMMON_CHIP:
9929        bxe_init_internal_common(sc);
9930        /* no break */
9931
9932    case FW_MSG_CODE_DRV_LOAD_PORT:
9933        /* nothing to do */
9934        /* no break */
9935
9936    case FW_MSG_CODE_DRV_LOAD_FUNCTION:
9937        /* internal memory per function is initialized inside bxe_pf_init */
9938        break;
9939
9940    default:
9941        BLOGE(sc, "Unknown load_code (0x%x) from MCP\n", load_code);
9942        break;
9943    }
9944}
9945
9946static void
9947storm_memset_func_cfg(struct bxe_softc                         *sc,
9948                      struct tstorm_eth_function_common_config *tcfg,
9949                      uint16_t                                  abs_fid)
9950{
9951    uint32_t addr;
9952    size_t size;
9953
9954    addr = (BAR_TSTRORM_INTMEM +
9955            TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(abs_fid));
9956    size = sizeof(struct tstorm_eth_function_common_config);
9957    ecore_storm_memset_struct(sc, addr, size, (uint32_t *)tcfg);
9958}
9959
9960static void
9961bxe_func_init(struct bxe_softc            *sc,
9962              struct bxe_func_init_params *p)
9963{
9964    struct tstorm_eth_function_common_config tcfg = { 0 };
9965
9966    if (CHIP_IS_E1x(sc)) {
9967        storm_memset_func_cfg(sc, &tcfg, p->func_id);
9968    }
9969
9970    /* Enable the function in the FW */
9971    storm_memset_vf_to_pf(sc, p->func_id, p->pf_id);
9972    storm_memset_func_en(sc, p->func_id, 1);
9973
9974    /* spq */
9975    if (p->func_flgs & FUNC_FLG_SPQ) {
9976        storm_memset_spq_addr(sc, p->spq_map, p->func_id);
9977        REG_WR(sc,
9978               (XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(p->func_id)),
9979               p->spq_prod);
9980    }
9981}
9982
9983/*
9984 * Calculates the sum of vn_min_rates.
9985 * It's needed for further normalizing of the min_rates.
9986 * Returns:
9987 *   sum of vn_min_rates.
9988 *     or
9989 *   0 - if all the min_rates are 0.
9990 * In the later case fainess algorithm should be deactivated.
9991 * If all min rates are not zero then those that are zeroes will be set to 1.
9992 */
9993static void
9994bxe_calc_vn_min(struct bxe_softc       *sc,
9995                struct cmng_init_input *input)
9996{
9997    uint32_t vn_cfg;
9998    uint32_t vn_min_rate;
9999    int all_zero = 1;
10000    int vn;
10001
10002    for (vn = VN_0; vn < SC_MAX_VN_NUM(sc); vn++) {
10003        vn_cfg = sc->devinfo.mf_info.mf_config[vn];
10004        vn_min_rate = (((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
10005                        FUNC_MF_CFG_MIN_BW_SHIFT) * 100);
10006
10007        if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
10008            /* skip hidden VNs */
10009            vn_min_rate = 0;
10010        } else if (!vn_min_rate) {
10011            /* If min rate is zero - set it to 100 */
10012            vn_min_rate = DEF_MIN_RATE;
10013        } else {
10014            all_zero = 0;
10015        }
10016
10017        input->vnic_min_rate[vn] = vn_min_rate;
10018    }
10019
10020    /* if ETS or all min rates are zeros - disable fairness */
10021    if (BXE_IS_ETS_ENABLED(sc)) {
10022        input->flags.cmng_enables &= ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
10023        BLOGD(sc, DBG_LOAD, "Fairness disabled (ETS)\n");
10024    } else if (all_zero) {
10025        input->flags.cmng_enables &= ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
10026        BLOGD(sc, DBG_LOAD,
10027              "Fariness disabled (all MIN values are zeroes)\n");
10028    } else {
10029        input->flags.cmng_enables |= CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
10030    }
10031}
10032
10033static inline uint16_t
10034bxe_extract_max_cfg(struct bxe_softc *sc,
10035                    uint32_t         mf_cfg)
10036{
10037    uint16_t max_cfg = ((mf_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
10038                        FUNC_MF_CFG_MAX_BW_SHIFT);
10039
10040    if (!max_cfg) {
10041        BLOGD(sc, DBG_LOAD, "Max BW configured to 0 - using 100 instead\n");
10042        max_cfg = 100;
10043    }
10044
10045    return (max_cfg);
10046}
10047
10048static void
10049bxe_calc_vn_max(struct bxe_softc       *sc,
10050                int                    vn,
10051                struct cmng_init_input *input)
10052{
10053    uint16_t vn_max_rate;
10054    uint32_t vn_cfg = sc->devinfo.mf_info.mf_config[vn];
10055    uint32_t max_cfg;
10056
10057    if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
10058        vn_max_rate = 0;
10059    } else {
10060        max_cfg = bxe_extract_max_cfg(sc, vn_cfg);
10061
10062        if (IS_MF_SI(sc)) {
10063            /* max_cfg in percents of linkspeed */
10064            vn_max_rate = ((sc->link_vars.line_speed * max_cfg) / 100);
10065        } else { /* SD modes */
10066            /* max_cfg is absolute in 100Mb units */
10067            vn_max_rate = (max_cfg * 100);
10068        }
10069    }
10070
10071    BLOGD(sc, DBG_LOAD, "vn %d: vn_max_rate %d\n", vn, vn_max_rate);
10072
10073    input->vnic_max_rate[vn] = vn_max_rate;
10074}
10075
10076static void
10077bxe_cmng_fns_init(struct bxe_softc *sc,
10078                  uint8_t          read_cfg,
10079                  uint8_t          cmng_type)
10080{
10081    struct cmng_init_input input;
10082    int vn;
10083
10084    memset(&input, 0, sizeof(struct cmng_init_input));
10085
10086    input.port_rate = sc->link_vars.line_speed;
10087
10088    if (cmng_type == CMNG_FNS_MINMAX) {
10089        /* read mf conf from shmem */
10090        if (read_cfg) {
10091            bxe_read_mf_cfg(sc);
10092        }
10093
10094        /* get VN min rate and enable fairness if not 0 */
10095        bxe_calc_vn_min(sc, &input);
10096
10097        /* get VN max rate */
10098        if (sc->port.pmf) {
10099            for (vn = VN_0; vn < SC_MAX_VN_NUM(sc); vn++) {
10100                bxe_calc_vn_max(sc, vn, &input);
10101            }
10102        }
10103
10104        /* always enable rate shaping and fairness */
10105        input.flags.cmng_enables |= CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN;
10106
10107        ecore_init_cmng(&input, &sc->cmng);
10108        return;
10109    }
10110
10111    /* rate shaping and fairness are disabled */
10112    BLOGD(sc, DBG_LOAD, "rate shaping and fairness have been disabled\n");
10113}
10114
10115static int
10116bxe_get_cmng_fns_mode(struct bxe_softc *sc)
10117{
10118    if (CHIP_REV_IS_SLOW(sc)) {
10119        return (CMNG_FNS_NONE);
10120    }
10121
10122    if (IS_MF(sc)) {
10123        return (CMNG_FNS_MINMAX);
10124    }
10125
10126    return (CMNG_FNS_NONE);
10127}
10128
10129static void
10130storm_memset_cmng(struct bxe_softc *sc,
10131                  struct cmng_init *cmng,
10132                  uint8_t          port)
10133{
10134    int vn;
10135    int func;
10136    uint32_t addr;
10137    size_t size;
10138
10139    addr = (BAR_XSTRORM_INTMEM +
10140            XSTORM_CMNG_PER_PORT_VARS_OFFSET(port));
10141    size = sizeof(struct cmng_struct_per_port);
10142    ecore_storm_memset_struct(sc, addr, size, (uint32_t *)&cmng->port);
10143
10144    for (vn = VN_0; vn < SC_MAX_VN_NUM(sc); vn++) {
10145        func = func_by_vn(sc, vn);
10146
10147        addr = (BAR_XSTRORM_INTMEM +
10148                XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func));
10149        size = sizeof(struct rate_shaping_vars_per_vn);
10150        ecore_storm_memset_struct(sc, addr, size,
10151                                  (uint32_t *)&cmng->vnic.vnic_max_rate[vn]);
10152
10153        addr = (BAR_XSTRORM_INTMEM +
10154                XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func));
10155        size = sizeof(struct fairness_vars_per_vn);
10156        ecore_storm_memset_struct(sc, addr, size,
10157                                  (uint32_t *)&cmng->vnic.vnic_min_rate[vn]);
10158    }
10159}
10160
10161static void
10162bxe_pf_init(struct bxe_softc *sc)
10163{
10164    struct bxe_func_init_params func_init = { 0 };
10165    struct event_ring_data eq_data = { { 0 } };
10166    uint16_t flags;
10167
10168    if (!CHIP_IS_E1x(sc)) {
10169        /* reset IGU PF statistics: MSIX + ATTN */
10170        /* PF */
10171        REG_WR(sc,
10172               (IGU_REG_STATISTIC_NUM_MESSAGE_SENT +
10173                (BXE_IGU_STAS_MSG_VF_CNT * 4) +
10174                ((CHIP_IS_MODE_4_PORT(sc) ? SC_FUNC(sc) : SC_VN(sc)) * 4)),
10175               0);
10176        /* ATTN */
10177        REG_WR(sc,
10178               (IGU_REG_STATISTIC_NUM_MESSAGE_SENT +
10179                (BXE_IGU_STAS_MSG_VF_CNT * 4) +
10180                (BXE_IGU_STAS_MSG_PF_CNT * 4) +
10181                ((CHIP_IS_MODE_4_PORT(sc) ? SC_FUNC(sc) : SC_VN(sc)) * 4)),
10182               0);
10183    }
10184
10185    /* function setup flags */
10186    flags = (FUNC_FLG_STATS | FUNC_FLG_LEADING | FUNC_FLG_SPQ);
10187
10188    /*
10189     * This flag is relevant for E1x only.
10190     * E2 doesn't have a TPA configuration in a function level.
10191     */
10192    flags |= (if_getcapenable(sc->ifp) & IFCAP_LRO) ? FUNC_FLG_TPA : 0;
10193
10194    func_init.func_flgs = flags;
10195    func_init.pf_id     = SC_FUNC(sc);
10196    func_init.func_id   = SC_FUNC(sc);
10197    func_init.spq_map   = sc->spq_dma.paddr;
10198    func_init.spq_prod  = sc->spq_prod_idx;
10199
10200    bxe_func_init(sc, &func_init);
10201
10202    memset(&sc->cmng, 0, sizeof(struct cmng_struct_per_port));
10203
10204    /*
10205     * Congestion management values depend on the link rate.
10206     * There is no active link so initial link rate is set to 10Gbps.
10207     * When the link comes up the congestion management values are
10208     * re-calculated according to the actual link rate.
10209     */
10210    sc->link_vars.line_speed = SPEED_10000;
10211    bxe_cmng_fns_init(sc, TRUE, bxe_get_cmng_fns_mode(sc));
10212
10213    /* Only the PMF sets the HW */
10214    if (sc->port.pmf) {
10215        storm_memset_cmng(sc, &sc->cmng, SC_PORT(sc));
10216    }
10217
10218    /* init Event Queue - PCI bus guarantees correct endainity */
10219    eq_data.base_addr.hi = U64_HI(sc->eq_dma.paddr);
10220    eq_data.base_addr.lo = U64_LO(sc->eq_dma.paddr);
10221    eq_data.producer     = sc->eq_prod;
10222    eq_data.index_id     = HC_SP_INDEX_EQ_CONS;
10223    eq_data.sb_id        = DEF_SB_ID;
10224    storm_memset_eq_data(sc, &eq_data, SC_FUNC(sc));
10225}
10226
10227static void
10228bxe_hc_int_enable(struct bxe_softc *sc)
10229{
10230    int port = SC_PORT(sc);
10231    uint32_t addr = (port) ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
10232    uint32_t val = REG_RD(sc, addr);
10233    uint8_t msix = (sc->interrupt_mode == INTR_MODE_MSIX) ? TRUE : FALSE;
10234    uint8_t single_msix = ((sc->interrupt_mode == INTR_MODE_MSIX) &&
10235                           (sc->intr_count == 1)) ? TRUE : FALSE;
10236    uint8_t msi = (sc->interrupt_mode == INTR_MODE_MSI) ? TRUE : FALSE;
10237
10238    if (msix) {
10239        val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
10240                 HC_CONFIG_0_REG_INT_LINE_EN_0);
10241        val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
10242                HC_CONFIG_0_REG_ATTN_BIT_EN_0);
10243        if (single_msix) {
10244            val |= HC_CONFIG_0_REG_SINGLE_ISR_EN_0;
10245        }
10246    } else if (msi) {
10247        val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0;
10248        val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
10249                HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
10250                HC_CONFIG_0_REG_ATTN_BIT_EN_0);
10251    } else {
10252        val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
10253                HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
10254                HC_CONFIG_0_REG_INT_LINE_EN_0 |
10255                HC_CONFIG_0_REG_ATTN_BIT_EN_0);
10256
10257        if (!CHIP_IS_E1(sc)) {
10258            BLOGD(sc, DBG_INTR, "write %x to HC %d (addr 0x%x)\n",
10259                  val, port, addr);
10260
10261            REG_WR(sc, addr, val);
10262
10263            val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
10264        }
10265    }
10266
10267    if (CHIP_IS_E1(sc)) {
10268        REG_WR(sc, (HC_REG_INT_MASK + port*4), 0x1FFFF);
10269    }
10270
10271    BLOGD(sc, DBG_INTR, "write %x to HC %d (addr 0x%x) mode %s\n",
10272          val, port, addr, ((msix) ? "MSI-X" : ((msi) ? "MSI" : "INTx")));
10273
10274    REG_WR(sc, addr, val);
10275
10276    /* ensure that HC_CONFIG is written before leading/trailing edge config */
10277    mb();
10278
10279    if (!CHIP_IS_E1(sc)) {
10280        /* init leading/trailing edge */
10281        if (IS_MF(sc)) {
10282            val = (0xee0f | (1 << (SC_VN(sc) + 4)));
10283            if (sc->port.pmf) {
10284                /* enable nig and gpio3 attention */
10285                val |= 0x1100;
10286            }
10287        } else {
10288            val = 0xffff;
10289        }
10290
10291        REG_WR(sc, (HC_REG_TRAILING_EDGE_0 + port*8), val);
10292        REG_WR(sc, (HC_REG_LEADING_EDGE_0 + port*8), val);
10293    }
10294
10295    /* make sure that interrupts are indeed enabled from here on */
10296    mb();
10297}
10298
10299static void
10300bxe_igu_int_enable(struct bxe_softc *sc)
10301{
10302    uint32_t val;
10303    uint8_t msix = (sc->interrupt_mode == INTR_MODE_MSIX) ? TRUE : FALSE;
10304    uint8_t single_msix = ((sc->interrupt_mode == INTR_MODE_MSIX) &&
10305                           (sc->intr_count == 1)) ? TRUE : FALSE;
10306    uint8_t msi = (sc->interrupt_mode == INTR_MODE_MSI) ? TRUE : FALSE;
10307
10308    val = REG_RD(sc, IGU_REG_PF_CONFIGURATION);
10309
10310    if (msix) {
10311        val &= ~(IGU_PF_CONF_INT_LINE_EN |
10312                 IGU_PF_CONF_SINGLE_ISR_EN);
10313        val |= (IGU_PF_CONF_MSI_MSIX_EN |
10314                IGU_PF_CONF_ATTN_BIT_EN);
10315        if (single_msix) {
10316            val |= IGU_PF_CONF_SINGLE_ISR_EN;
10317        }
10318    } else if (msi) {
10319        val &= ~IGU_PF_CONF_INT_LINE_EN;
10320        val |= (IGU_PF_CONF_MSI_MSIX_EN |
10321                IGU_PF_CONF_ATTN_BIT_EN |
10322                IGU_PF_CONF_SINGLE_ISR_EN);
10323    } else {
10324        val &= ~IGU_PF_CONF_MSI_MSIX_EN;
10325        val |= (IGU_PF_CONF_INT_LINE_EN |
10326                IGU_PF_CONF_ATTN_BIT_EN |
10327                IGU_PF_CONF_SINGLE_ISR_EN);
10328    }
10329
10330    /* clean previous status - need to configure igu prior to ack*/
10331    if ((!msix) || single_msix) {
10332        REG_WR(sc, IGU_REG_PF_CONFIGURATION, val);
10333        bxe_ack_int(sc);
10334    }
10335
10336    val |= IGU_PF_CONF_FUNC_EN;
10337
10338    BLOGD(sc, DBG_INTR, "write 0x%x to IGU mode %s\n",
10339          val, ((msix) ? "MSI-X" : ((msi) ? "MSI" : "INTx")));
10340
10341    REG_WR(sc, IGU_REG_PF_CONFIGURATION, val);
10342
10343    mb();
10344
10345    /* init leading/trailing edge */
10346    if (IS_MF(sc)) {
10347        val = (0xee0f | (1 << (SC_VN(sc) + 4)));
10348        if (sc->port.pmf) {
10349            /* enable nig and gpio3 attention */
10350            val |= 0x1100;
10351        }
10352    } else {
10353        val = 0xffff;
10354    }
10355
10356    REG_WR(sc, IGU_REG_TRAILING_EDGE_LATCH, val);
10357    REG_WR(sc, IGU_REG_LEADING_EDGE_LATCH, val);
10358
10359    /* make sure that interrupts are indeed enabled from here on */
10360    mb();
10361}
10362
10363static void
10364bxe_int_enable(struct bxe_softc *sc)
10365{
10366    if (sc->devinfo.int_block == INT_BLOCK_HC) {
10367        bxe_hc_int_enable(sc);
10368    } else {
10369        bxe_igu_int_enable(sc);
10370    }
10371}
10372
10373static void
10374bxe_hc_int_disable(struct bxe_softc *sc)
10375{
10376    int port = SC_PORT(sc);
10377    uint32_t addr = (port) ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
10378    uint32_t val = REG_RD(sc, addr);
10379
10380    /*
10381     * In E1 we must use only PCI configuration space to disable MSI/MSIX
10382     * capablility. It's forbidden to disable IGU_PF_CONF_MSI_MSIX_EN in HC
10383     * block
10384     */
10385    if (CHIP_IS_E1(sc)) {
10386        /*
10387         * Since IGU_PF_CONF_MSI_MSIX_EN still always on use mask register
10388         * to prevent from HC sending interrupts after we exit the function
10389         */
10390        REG_WR(sc, (HC_REG_INT_MASK + port*4), 0);
10391
10392        val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
10393                 HC_CONFIG_0_REG_INT_LINE_EN_0 |
10394                 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
10395    } else {
10396        val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
10397                 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
10398                 HC_CONFIG_0_REG_INT_LINE_EN_0 |
10399                 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
10400    }
10401
10402    BLOGD(sc, DBG_INTR, "write %x to HC %d (addr 0x%x)\n", val, port, addr);
10403
10404    /* flush all outstanding writes */
10405    mb();
10406
10407    REG_WR(sc, addr, val);
10408    if (REG_RD(sc, addr) != val) {
10409        BLOGE(sc, "proper val not read from HC IGU!\n");
10410    }
10411}
10412
10413static void
10414bxe_igu_int_disable(struct bxe_softc *sc)
10415{
10416    uint32_t val = REG_RD(sc, IGU_REG_PF_CONFIGURATION);
10417
10418    val &= ~(IGU_PF_CONF_MSI_MSIX_EN |
10419             IGU_PF_CONF_INT_LINE_EN |
10420             IGU_PF_CONF_ATTN_BIT_EN);
10421
10422    BLOGD(sc, DBG_INTR, "write %x to IGU\n", val);
10423
10424    /* flush all outstanding writes */
10425    mb();
10426
10427    REG_WR(sc, IGU_REG_PF_CONFIGURATION, val);
10428    if (REG_RD(sc, IGU_REG_PF_CONFIGURATION) != val) {
10429        BLOGE(sc, "proper val not read from IGU!\n");
10430    }
10431}
10432
10433static void
10434bxe_int_disable(struct bxe_softc *sc)
10435{
10436    if (sc->devinfo.int_block == INT_BLOCK_HC) {
10437        bxe_hc_int_disable(sc);
10438    } else {
10439        bxe_igu_int_disable(sc);
10440    }
10441}
10442
10443static void
10444bxe_nic_init(struct bxe_softc *sc,
10445             int              load_code)
10446{
10447    int i;
10448
10449    for (i = 0; i < sc->num_queues; i++) {
10450        bxe_init_eth_fp(sc, i);
10451    }
10452
10453    rmb(); /* ensure status block indices were read */
10454
10455    bxe_init_rx_rings(sc);
10456    bxe_init_tx_rings(sc);
10457
10458    if (IS_VF(sc)) {
10459        return;
10460    }
10461
10462    /* initialize MOD_ABS interrupts */
10463    elink_init_mod_abs_int(sc, &sc->link_vars,
10464                           sc->devinfo.chip_id,
10465                           sc->devinfo.shmem_base,
10466                           sc->devinfo.shmem2_base,
10467                           SC_PORT(sc));
10468
10469    bxe_init_def_sb(sc);
10470    bxe_update_dsb_idx(sc);
10471    bxe_init_sp_ring(sc);
10472    bxe_init_eq_ring(sc);
10473    bxe_init_internal(sc, load_code);
10474    bxe_pf_init(sc);
10475    bxe_stats_init(sc);
10476
10477    /* flush all before enabling interrupts */
10478    mb();
10479
10480    bxe_int_enable(sc);
10481
10482    /* check for SPIO5 */
10483    bxe_attn_int_deasserted0(sc,
10484                             REG_RD(sc,
10485                                    (MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 +
10486                                     SC_PORT(sc)*4)) &
10487                             AEU_INPUTS_ATTN_BITS_SPIO5);
10488}
10489
10490static inline void
10491bxe_init_objs(struct bxe_softc *sc)
10492{
10493    /* mcast rules must be added to tx if tx switching is enabled */
10494    ecore_obj_type o_type =
10495        (sc->flags & BXE_TX_SWITCHING) ? ECORE_OBJ_TYPE_RX_TX :
10496                                         ECORE_OBJ_TYPE_RX;
10497
10498    /* RX_MODE controlling object */
10499    ecore_init_rx_mode_obj(sc, &sc->rx_mode_obj);
10500
10501    /* multicast configuration controlling object */
10502    ecore_init_mcast_obj(sc,
10503                         &sc->mcast_obj,
10504                         sc->fp[0].cl_id,
10505                         sc->fp[0].index,
10506                         SC_FUNC(sc),
10507                         SC_FUNC(sc),
10508                         BXE_SP(sc, mcast_rdata),
10509                         BXE_SP_MAPPING(sc, mcast_rdata),
10510                         ECORE_FILTER_MCAST_PENDING,
10511                         &sc->sp_state,
10512                         o_type);
10513
10514    /* Setup CAM credit pools */
10515    ecore_init_mac_credit_pool(sc,
10516                               &sc->macs_pool,
10517                               SC_FUNC(sc),
10518                               CHIP_IS_E1x(sc) ? VNICS_PER_PORT(sc) :
10519                                                 VNICS_PER_PATH(sc));
10520
10521    ecore_init_vlan_credit_pool(sc,
10522                                &sc->vlans_pool,
10523                                SC_ABS_FUNC(sc) >> 1,
10524                                CHIP_IS_E1x(sc) ? VNICS_PER_PORT(sc) :
10525                                                  VNICS_PER_PATH(sc));
10526
10527    /* RSS configuration object */
10528    ecore_init_rss_config_obj(sc,
10529                              &sc->rss_conf_obj,
10530                              sc->fp[0].cl_id,
10531                              sc->fp[0].index,
10532                              SC_FUNC(sc),
10533                              SC_FUNC(sc),
10534                              BXE_SP(sc, rss_rdata),
10535                              BXE_SP_MAPPING(sc, rss_rdata),
10536                              ECORE_FILTER_RSS_CONF_PENDING,
10537                              &sc->sp_state, ECORE_OBJ_TYPE_RX);
10538}
10539
10540/*
10541 * Initialize the function. This must be called before sending CLIENT_SETUP
10542 * for the first client.
10543 */
10544static inline int
10545bxe_func_start(struct bxe_softc *sc)
10546{
10547    struct ecore_func_state_params func_params = { NULL };
10548    struct ecore_func_start_params *start_params = &func_params.params.start;
10549
10550    /* Prepare parameters for function state transitions */
10551    bit_set(&func_params.ramrod_flags, RAMROD_COMP_WAIT);
10552
10553    func_params.f_obj = &sc->func_obj;
10554    func_params.cmd = ECORE_F_CMD_START;
10555
10556    /* Function parameters */
10557    start_params->mf_mode     = sc->devinfo.mf_info.mf_mode;
10558    start_params->sd_vlan_tag = OVLAN(sc);
10559
10560    if (CHIP_IS_E2(sc) || CHIP_IS_E3(sc)) {
10561        start_params->network_cos_mode = STATIC_COS;
10562    } else { /* CHIP_IS_E1X */
10563        start_params->network_cos_mode = FW_WRR;
10564    }
10565
10566    //start_params->gre_tunnel_mode = 0;
10567    //start_params->gre_tunnel_rss  = 0;
10568
10569    return (ecore_func_state_change(sc, &func_params));
10570}
10571
10572static int
10573bxe_set_power_state(struct bxe_softc *sc,
10574                    uint8_t          state)
10575{
10576    uint16_t pmcsr;
10577
10578    /* If there is no power capability, silently succeed */
10579    if (!(sc->devinfo.pcie_cap_flags & BXE_PM_CAPABLE_FLAG)) {
10580        BLOGW(sc, "No power capability\n");
10581        return (0);
10582    }
10583
10584    pmcsr = pci_read_config(sc->dev,
10585                            (sc->devinfo.pcie_pm_cap_reg + PCIR_POWER_STATUS),
10586                            2);
10587
10588    switch (state) {
10589    case PCI_PM_D0:
10590        pci_write_config(sc->dev,
10591                         (sc->devinfo.pcie_pm_cap_reg + PCIR_POWER_STATUS),
10592                         ((pmcsr & ~PCIM_PSTAT_DMASK) | PCIM_PSTAT_PME), 2);
10593
10594        if (pmcsr & PCIM_PSTAT_DMASK) {
10595            /* delay required during transition out of D3hot */
10596            DELAY(20000);
10597        }
10598
10599        break;
10600
10601    case PCI_PM_D3hot:
10602        /* XXX if there are other clients above don't shut down the power */
10603
10604        /* don't shut down the power for emulation and FPGA */
10605        if (CHIP_REV_IS_SLOW(sc)) {
10606            return (0);
10607        }
10608
10609        pmcsr &= ~PCIM_PSTAT_DMASK;
10610        pmcsr |= PCIM_PSTAT_D3;
10611
10612        if (sc->wol) {
10613            pmcsr |= PCIM_PSTAT_PMEENABLE;
10614        }
10615
10616        pci_write_config(sc->dev,
10617                         (sc->devinfo.pcie_pm_cap_reg + PCIR_POWER_STATUS),
10618                         pmcsr, 4);
10619
10620        /*
10621         * No more memory access after this point until device is brought back
10622         * to D0 state.
10623         */
10624        break;
10625
10626    default:
10627        BLOGE(sc, "Can't support PCI power state = 0x%x pmcsr 0x%x\n",
10628            state, pmcsr);
10629        return (-1);
10630    }
10631
10632    return (0);
10633}
10634
10635
10636/* return true if succeeded to acquire the lock */
10637static uint8_t
10638bxe_trylock_hw_lock(struct bxe_softc *sc,
10639                    uint32_t         resource)
10640{
10641    uint32_t lock_status;
10642    uint32_t resource_bit = (1 << resource);
10643    int func = SC_FUNC(sc);
10644    uint32_t hw_lock_control_reg;
10645
10646    BLOGD(sc, DBG_LOAD, "Trying to take a resource lock 0x%x\n", resource);
10647
10648    /* Validating that the resource is within range */
10649    if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
10650        BLOGD(sc, DBG_LOAD,
10651              "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
10652              resource, HW_LOCK_MAX_RESOURCE_VALUE);
10653        return (FALSE);
10654    }
10655
10656    if (func <= 5) {
10657        hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
10658    } else {
10659        hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
10660    }
10661
10662    /* try to acquire the lock */
10663    REG_WR(sc, hw_lock_control_reg + 4, resource_bit);
10664    lock_status = REG_RD(sc, hw_lock_control_reg);
10665    if (lock_status & resource_bit) {
10666        return (TRUE);
10667    }
10668
10669    BLOGE(sc, "Failed to get a resource lock 0x%x func %d "
10670        "lock_status 0x%x resource_bit 0x%x\n", resource, func,
10671        lock_status, resource_bit);
10672
10673    return (FALSE);
10674}
10675
10676/*
10677 * Get the recovery leader resource id according to the engine this function
10678 * belongs to. Currently only only 2 engines is supported.
10679 */
10680static int
10681bxe_get_leader_lock_resource(struct bxe_softc *sc)
10682{
10683    if (SC_PATH(sc)) {
10684        return (HW_LOCK_RESOURCE_RECOVERY_LEADER_1);
10685    } else {
10686        return (HW_LOCK_RESOURCE_RECOVERY_LEADER_0);
10687    }
10688}
10689
10690/* try to acquire a leader lock for current engine */
10691static uint8_t
10692bxe_trylock_leader_lock(struct bxe_softc *sc)
10693{
10694    return (bxe_trylock_hw_lock(sc, bxe_get_leader_lock_resource(sc)));
10695}
10696
10697static int
10698bxe_release_leader_lock(struct bxe_softc *sc)
10699{
10700    return (bxe_release_hw_lock(sc, bxe_get_leader_lock_resource(sc)));
10701}
10702
10703/* close gates #2, #3 and #4 */
10704static void
10705bxe_set_234_gates(struct bxe_softc *sc,
10706                  uint8_t          close)
10707{
10708    uint32_t val;
10709
10710    /* gates #2 and #4a are closed/opened for "not E1" only */
10711    if (!CHIP_IS_E1(sc)) {
10712        /* #4 */
10713        REG_WR(sc, PXP_REG_HST_DISCARD_DOORBELLS, !!close);
10714        /* #2 */
10715        REG_WR(sc, PXP_REG_HST_DISCARD_INTERNAL_WRITES, !!close);
10716    }
10717
10718    /* #3 */
10719    if (CHIP_IS_E1x(sc)) {
10720        /* prevent interrupts from HC on both ports */
10721        val = REG_RD(sc, HC_REG_CONFIG_1);
10722        REG_WR(sc, HC_REG_CONFIG_1,
10723               (!close) ? (val | HC_CONFIG_1_REG_BLOCK_DISABLE_1) :
10724               (val & ~(uint32_t)HC_CONFIG_1_REG_BLOCK_DISABLE_1));
10725
10726        val = REG_RD(sc, HC_REG_CONFIG_0);
10727        REG_WR(sc, HC_REG_CONFIG_0,
10728               (!close) ? (val | HC_CONFIG_0_REG_BLOCK_DISABLE_0) :
10729               (val & ~(uint32_t)HC_CONFIG_0_REG_BLOCK_DISABLE_0));
10730    } else {
10731        /* Prevent incoming interrupts in IGU */
10732        val = REG_RD(sc, IGU_REG_BLOCK_CONFIGURATION);
10733
10734        REG_WR(sc, IGU_REG_BLOCK_CONFIGURATION,
10735               (!close) ?
10736               (val | IGU_BLOCK_CONFIGURATION_REG_BLOCK_ENABLE) :
10737               (val & ~(uint32_t)IGU_BLOCK_CONFIGURATION_REG_BLOCK_ENABLE));
10738    }
10739
10740    BLOGD(sc, DBG_LOAD, "%s gates #2, #3 and #4\n",
10741          close ? "closing" : "opening");
10742
10743    wmb();
10744}
10745
10746/* poll for pending writes bit, it should get cleared in no more than 1s */
10747static int
10748bxe_er_poll_igu_vq(struct bxe_softc *sc)
10749{
10750    uint32_t cnt = 1000;
10751    uint32_t pend_bits = 0;
10752
10753    do {
10754        pend_bits = REG_RD(sc, IGU_REG_PENDING_BITS_STATUS);
10755
10756        if (pend_bits == 0) {
10757            break;
10758        }
10759
10760        DELAY(1000);
10761    } while (--cnt > 0);
10762
10763    if (cnt == 0) {
10764        BLOGE(sc, "Still pending IGU requests bits=0x%08x!\n", pend_bits);
10765        return (-1);
10766    }
10767
10768    return (0);
10769}
10770
10771#define SHARED_MF_CLP_MAGIC  0x80000000 /* 'magic' bit */
10772
10773static void
10774bxe_clp_reset_prep(struct bxe_softc *sc,
10775                   uint32_t         *magic_val)
10776{
10777    /* Do some magic... */
10778    uint32_t val = MFCFG_RD(sc, shared_mf_config.clp_mb);
10779    *magic_val = val & SHARED_MF_CLP_MAGIC;
10780    MFCFG_WR(sc, shared_mf_config.clp_mb, val | SHARED_MF_CLP_MAGIC);
10781}
10782
10783/* restore the value of the 'magic' bit */
10784static void
10785bxe_clp_reset_done(struct bxe_softc *sc,
10786                   uint32_t         magic_val)
10787{
10788    /* Restore the 'magic' bit value... */
10789    uint32_t val = MFCFG_RD(sc, shared_mf_config.clp_mb);
10790    MFCFG_WR(sc, shared_mf_config.clp_mb,
10791              (val & (~SHARED_MF_CLP_MAGIC)) | magic_val);
10792}
10793
10794/* prepare for MCP reset, takes care of CLP configurations */
10795static void
10796bxe_reset_mcp_prep(struct bxe_softc *sc,
10797                   uint32_t         *magic_val)
10798{
10799    uint32_t shmem;
10800    uint32_t validity_offset;
10801
10802    /* set `magic' bit in order to save MF config */
10803    if (!CHIP_IS_E1(sc)) {
10804        bxe_clp_reset_prep(sc, magic_val);
10805    }
10806
10807    /* get shmem offset */
10808    shmem = REG_RD(sc, MISC_REG_SHARED_MEM_ADDR);
10809    validity_offset =
10810        offsetof(struct shmem_region, validity_map[SC_PORT(sc)]);
10811
10812    /* Clear validity map flags */
10813    if (shmem > 0) {
10814        REG_WR(sc, shmem + validity_offset, 0);
10815    }
10816}
10817
10818#define MCP_TIMEOUT      5000   /* 5 seconds (in ms) */
10819#define MCP_ONE_TIMEOUT  100    /* 100 ms */
10820
10821static void
10822bxe_mcp_wait_one(struct bxe_softc *sc)
10823{
10824    /* special handling for emulation and FPGA (10 times longer) */
10825    if (CHIP_REV_IS_SLOW(sc)) {
10826        DELAY((MCP_ONE_TIMEOUT*10) * 1000);
10827    } else {
10828        DELAY((MCP_ONE_TIMEOUT) * 1000);
10829    }
10830}
10831
10832/* initialize shmem_base and waits for validity signature to appear */
10833static int
10834bxe_init_shmem(struct bxe_softc *sc)
10835{
10836    int cnt = 0;
10837    uint32_t val = 0;
10838
10839    do {
10840        sc->devinfo.shmem_base     =
10841        sc->link_params.shmem_base =
10842            REG_RD(sc, MISC_REG_SHARED_MEM_ADDR);
10843
10844        if (sc->devinfo.shmem_base) {
10845            val = SHMEM_RD(sc, validity_map[SC_PORT(sc)]);
10846            if (val & SHR_MEM_VALIDITY_MB)
10847                return (0);
10848        }
10849
10850        bxe_mcp_wait_one(sc);
10851
10852    } while (cnt++ < (MCP_TIMEOUT / MCP_ONE_TIMEOUT));
10853
10854    BLOGE(sc, "BAD MCP validity signature\n");
10855
10856    return (-1);
10857}
10858
10859static int
10860bxe_reset_mcp_comp(struct bxe_softc *sc,
10861                   uint32_t         magic_val)
10862{
10863    int rc = bxe_init_shmem(sc);
10864
10865    /* Restore the `magic' bit value */
10866    if (!CHIP_IS_E1(sc)) {
10867        bxe_clp_reset_done(sc, magic_val);
10868    }
10869
10870    return (rc);
10871}
10872
10873static void
10874bxe_pxp_prep(struct bxe_softc *sc)
10875{
10876    if (!CHIP_IS_E1(sc)) {
10877        REG_WR(sc, PXP2_REG_RD_START_INIT, 0);
10878        REG_WR(sc, PXP2_REG_RQ_RBC_DONE, 0);
10879        wmb();
10880    }
10881}
10882
10883/*
10884 * Reset the whole chip except for:
10885 *      - PCIE core
10886 *      - PCI Glue, PSWHST, PXP/PXP2 RF (all controlled by one reset bit)
10887 *      - IGU
10888 *      - MISC (including AEU)
10889 *      - GRC
10890 *      - RBCN, RBCP
10891 */
10892static void
10893bxe_process_kill_chip_reset(struct bxe_softc *sc,
10894                            uint8_t          global)
10895{
10896    uint32_t not_reset_mask1, reset_mask1, not_reset_mask2, reset_mask2;
10897    uint32_t global_bits2, stay_reset2;
10898
10899    /*
10900     * Bits that have to be set in reset_mask2 if we want to reset 'global'
10901     * (per chip) blocks.
10902     */
10903    global_bits2 =
10904        MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_CMN_CPU |
10905        MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_CMN_CORE;
10906
10907    /*
10908     * Don't reset the following blocks.
10909     * Important: per port blocks (such as EMAC, BMAC, UMAC) can't be
10910     *            reset, as in 4 port device they might still be owned
10911     *            by the MCP (there is only one leader per path).
10912     */
10913    not_reset_mask1 =
10914        MISC_REGISTERS_RESET_REG_1_RST_HC |
10915        MISC_REGISTERS_RESET_REG_1_RST_PXPV |
10916        MISC_REGISTERS_RESET_REG_1_RST_PXP;
10917
10918    not_reset_mask2 =
10919        MISC_REGISTERS_RESET_REG_2_RST_PCI_MDIO |
10920        MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE |
10921        MISC_REGISTERS_RESET_REG_2_RST_EMAC1_HARD_CORE |
10922        MISC_REGISTERS_RESET_REG_2_RST_MISC_CORE |
10923        MISC_REGISTERS_RESET_REG_2_RST_RBCN |
10924        MISC_REGISTERS_RESET_REG_2_RST_GRC  |
10925        MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_REG_HARD_CORE |
10926        MISC_REGISTERS_RESET_REG_2_RST_MCP_N_HARD_CORE_RST_B |
10927        MISC_REGISTERS_RESET_REG_2_RST_ATC |
10928        MISC_REGISTERS_RESET_REG_2_PGLC |
10929        MISC_REGISTERS_RESET_REG_2_RST_BMAC0 |
10930        MISC_REGISTERS_RESET_REG_2_RST_BMAC1 |
10931        MISC_REGISTERS_RESET_REG_2_RST_EMAC0 |
10932        MISC_REGISTERS_RESET_REG_2_RST_EMAC1 |
10933        MISC_REGISTERS_RESET_REG_2_UMAC0 |
10934        MISC_REGISTERS_RESET_REG_2_UMAC1;
10935
10936    /*
10937     * Keep the following blocks in reset:
10938     *  - all xxMACs are handled by the elink code.
10939     */
10940    stay_reset2 =
10941        MISC_REGISTERS_RESET_REG_2_XMAC |
10942        MISC_REGISTERS_RESET_REG_2_XMAC_SOFT;
10943
10944    /* Full reset masks according to the chip */
10945    reset_mask1 = 0xffffffff;
10946
10947    if (CHIP_IS_E1(sc))
10948        reset_mask2 = 0xffff;
10949    else if (CHIP_IS_E1H(sc))
10950        reset_mask2 = 0x1ffff;
10951    else if (CHIP_IS_E2(sc))
10952        reset_mask2 = 0xfffff;
10953    else /* CHIP_IS_E3 */
10954        reset_mask2 = 0x3ffffff;
10955
10956    /* Don't reset global blocks unless we need to */
10957    if (!global)
10958        reset_mask2 &= ~global_bits2;
10959
10960    /*
10961     * In case of attention in the QM, we need to reset PXP
10962     * (MISC_REGISTERS_RESET_REG_2_RST_PXP_RQ_RD_WR) before QM
10963     * because otherwise QM reset would release 'close the gates' shortly
10964     * before resetting the PXP, then the PSWRQ would send a write
10965     * request to PGLUE. Then when PXP is reset, PGLUE would try to
10966     * read the payload data from PSWWR, but PSWWR would not
10967     * respond. The write queue in PGLUE would stuck, dmae commands
10968     * would not return. Therefore it's important to reset the second
10969     * reset register (containing the
10970     * MISC_REGISTERS_RESET_REG_2_RST_PXP_RQ_RD_WR bit) before the
10971     * first one (containing the MISC_REGISTERS_RESET_REG_1_RST_QM
10972     * bit).
10973     */
10974    REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
10975           reset_mask2 & (~not_reset_mask2));
10976
10977    REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
10978           reset_mask1 & (~not_reset_mask1));
10979
10980    mb();
10981    wmb();
10982
10983    REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET,
10984           reset_mask2 & (~stay_reset2));
10985
10986    mb();
10987    wmb();
10988
10989    REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, reset_mask1);
10990    wmb();
10991}
10992
10993static int
10994bxe_process_kill(struct bxe_softc *sc,
10995                 uint8_t          global)
10996{
10997    int cnt = 1000;
10998    uint32_t val = 0;
10999    uint32_t sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1, pgl_exp_rom2;
11000    uint32_t tags_63_32 = 0;
11001
11002    /* Empty the Tetris buffer, wait for 1s */
11003    do {
11004        sr_cnt  = REG_RD(sc, PXP2_REG_RD_SR_CNT);
11005        blk_cnt = REG_RD(sc, PXP2_REG_RD_BLK_CNT);
11006        port_is_idle_0 = REG_RD(sc, PXP2_REG_RD_PORT_IS_IDLE_0);
11007        port_is_idle_1 = REG_RD(sc, PXP2_REG_RD_PORT_IS_IDLE_1);
11008        pgl_exp_rom2 = REG_RD(sc, PXP2_REG_PGL_EXP_ROM2);
11009        if (CHIP_IS_E3(sc)) {
11010            tags_63_32 = REG_RD(sc, PGLUE_B_REG_TAGS_63_32);
11011        }
11012
11013        if ((sr_cnt == 0x7e) && (blk_cnt == 0xa0) &&
11014            ((port_is_idle_0 & 0x1) == 0x1) &&
11015            ((port_is_idle_1 & 0x1) == 0x1) &&
11016            (pgl_exp_rom2 == 0xffffffff) &&
11017            (!CHIP_IS_E3(sc) || (tags_63_32 == 0xffffffff)))
11018            break;
11019        DELAY(1000);
11020    } while (cnt-- > 0);
11021
11022    if (cnt <= 0) {
11023        BLOGE(sc, "ERROR: Tetris buffer didn't get empty or there "
11024                  "are still outstanding read requests after 1s! "
11025                  "sr_cnt=0x%08x, blk_cnt=0x%08x, port_is_idle_0=0x%08x, "
11026                  "port_is_idle_1=0x%08x, pgl_exp_rom2=0x%08x\n",
11027              sr_cnt, blk_cnt, port_is_idle_0,
11028              port_is_idle_1, pgl_exp_rom2);
11029        return (-1);
11030    }
11031
11032    mb();
11033
11034    /* Close gates #2, #3 and #4 */
11035    bxe_set_234_gates(sc, TRUE);
11036
11037    /* Poll for IGU VQs for 57712 and newer chips */
11038    if (!CHIP_IS_E1x(sc) && bxe_er_poll_igu_vq(sc)) {
11039        return (-1);
11040    }
11041
11042    /* XXX indicate that "process kill" is in progress to MCP */
11043
11044    /* clear "unprepared" bit */
11045    REG_WR(sc, MISC_REG_UNPREPARED, 0);
11046    mb();
11047
11048    /* Make sure all is written to the chip before the reset */
11049    wmb();
11050
11051    /*
11052     * Wait for 1ms to empty GLUE and PCI-E core queues,
11053     * PSWHST, GRC and PSWRD Tetris buffer.
11054     */
11055    DELAY(1000);
11056
11057    /* Prepare to chip reset: */
11058    /* MCP */
11059    if (global) {
11060        bxe_reset_mcp_prep(sc, &val);
11061    }
11062
11063    /* PXP */
11064    bxe_pxp_prep(sc);
11065    mb();
11066
11067    /* reset the chip */
11068    bxe_process_kill_chip_reset(sc, global);
11069    mb();
11070
11071    /* clear errors in PGB */
11072    if (!CHIP_IS_E1(sc))
11073        REG_WR(sc, PGLUE_B_REG_LATCHED_ERRORS_CLR, 0x7f);
11074
11075    /* Recover after reset: */
11076    /* MCP */
11077    if (global && bxe_reset_mcp_comp(sc, val)) {
11078        return (-1);
11079    }
11080
11081    /* XXX add resetting the NO_MCP mode DB here */
11082
11083    /* Open the gates #2, #3 and #4 */
11084    bxe_set_234_gates(sc, FALSE);
11085
11086    /* XXX
11087     * IGU/AEU preparation bring back the AEU/IGU to a reset state
11088     * re-enable attentions
11089     */
11090
11091    return (0);
11092}
11093
11094static int
11095bxe_leader_reset(struct bxe_softc *sc)
11096{
11097    int rc = 0;
11098    uint8_t global = bxe_reset_is_global(sc);
11099    uint32_t load_code;
11100
11101    /*
11102     * If not going to reset MCP, load "fake" driver to reset HW while
11103     * driver is owner of the HW.
11104     */
11105    if (!global && !BXE_NOMCP(sc)) {
11106        load_code = bxe_fw_command(sc, DRV_MSG_CODE_LOAD_REQ,
11107                                   DRV_MSG_CODE_LOAD_REQ_WITH_LFA);
11108        if (!load_code) {
11109            BLOGE(sc, "MCP response failure, aborting\n");
11110            rc = -1;
11111            goto exit_leader_reset;
11112        }
11113
11114        if ((load_code != FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) &&
11115            (load_code != FW_MSG_CODE_DRV_LOAD_COMMON)) {
11116            BLOGE(sc, "MCP unexpected response, aborting\n");
11117            rc = -1;
11118            goto exit_leader_reset2;
11119        }
11120
11121        load_code = bxe_fw_command(sc, DRV_MSG_CODE_LOAD_DONE, 0);
11122        if (!load_code) {
11123            BLOGE(sc, "MCP response failure, aborting\n");
11124            rc = -1;
11125            goto exit_leader_reset2;
11126        }
11127    }
11128
11129    /* try to recover after the failure */
11130    if (bxe_process_kill(sc, global)) {
11131        BLOGE(sc, "Something bad occurred on engine %d!\n", SC_PATH(sc));
11132        rc = -1;
11133        goto exit_leader_reset2;
11134    }
11135
11136    /*
11137     * Clear the RESET_IN_PROGRESS and RESET_GLOBAL bits and update the driver
11138     * state.
11139     */
11140    bxe_set_reset_done(sc);
11141    if (global) {
11142        bxe_clear_reset_global(sc);
11143    }
11144
11145exit_leader_reset2:
11146
11147    /* unload "fake driver" if it was loaded */
11148    if (!global && !BXE_NOMCP(sc)) {
11149        bxe_fw_command(sc, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0);
11150        bxe_fw_command(sc, DRV_MSG_CODE_UNLOAD_DONE, 0);
11151    }
11152
11153exit_leader_reset:
11154
11155    sc->is_leader = 0;
11156    bxe_release_leader_lock(sc);
11157
11158    mb();
11159    return (rc);
11160}
11161
11162/*
11163 * prepare INIT transition, parameters configured:
11164 *   - HC configuration
11165 *   - Queue's CDU context
11166 */
11167static void
11168bxe_pf_q_prep_init(struct bxe_softc               *sc,
11169                   struct bxe_fastpath            *fp,
11170                   struct ecore_queue_init_params *init_params)
11171{
11172    uint8_t cos;
11173    int cxt_index, cxt_offset;
11174
11175    bxe_set_bit(ECORE_Q_FLG_HC, &init_params->rx.flags);
11176    bxe_set_bit(ECORE_Q_FLG_HC, &init_params->tx.flags);
11177
11178    bxe_set_bit(ECORE_Q_FLG_HC_EN, &init_params->rx.flags);
11179    bxe_set_bit(ECORE_Q_FLG_HC_EN, &init_params->tx.flags);
11180
11181    /* HC rate */
11182    init_params->rx.hc_rate =
11183        sc->hc_rx_ticks ? (1000000 / sc->hc_rx_ticks) : 0;
11184    init_params->tx.hc_rate =
11185        sc->hc_tx_ticks ? (1000000 / sc->hc_tx_ticks) : 0;
11186
11187    /* FW SB ID */
11188    init_params->rx.fw_sb_id = init_params->tx.fw_sb_id = fp->fw_sb_id;
11189
11190    /* CQ index among the SB indices */
11191    init_params->rx.sb_cq_index = HC_INDEX_ETH_RX_CQ_CONS;
11192    init_params->tx.sb_cq_index = HC_INDEX_ETH_FIRST_TX_CQ_CONS;
11193
11194    /* set maximum number of COSs supported by this queue */
11195    init_params->max_cos = sc->max_cos;
11196
11197    BLOGD(sc, DBG_LOAD, "fp %d setting queue params max cos to %d\n",
11198          fp->index, init_params->max_cos);
11199
11200    /* set the context pointers queue object */
11201    for (cos = FIRST_TX_COS_INDEX; cos < init_params->max_cos; cos++) {
11202        /* XXX change index/cid here if ever support multiple tx CoS */
11203        /* fp->txdata[cos]->cid */
11204        cxt_index = fp->index / ILT_PAGE_CIDS;
11205        cxt_offset = fp->index - (cxt_index * ILT_PAGE_CIDS);
11206        init_params->cxts[cos] = &sc->context[cxt_index].vcxt[cxt_offset].eth;
11207    }
11208}
11209
11210/* set flags that are common for the Tx-only and not normal connections */
11211static unsigned long
11212bxe_get_common_flags(struct bxe_softc    *sc,
11213                     struct bxe_fastpath *fp,
11214                     uint8_t             zero_stats)
11215{
11216    unsigned long flags = 0;
11217
11218    /* PF driver will always initialize the Queue to an ACTIVE state */
11219    bxe_set_bit(ECORE_Q_FLG_ACTIVE, &flags);
11220
11221    /*
11222     * tx only connections collect statistics (on the same index as the
11223     * parent connection). The statistics are zeroed when the parent
11224     * connection is initialized.
11225     */
11226
11227    bxe_set_bit(ECORE_Q_FLG_STATS, &flags);
11228    if (zero_stats) {
11229        bxe_set_bit(ECORE_Q_FLG_ZERO_STATS, &flags);
11230    }
11231
11232    /*
11233     * tx only connections can support tx-switching, though their
11234     * CoS-ness doesn't survive the loopback
11235     */
11236    if (sc->flags & BXE_TX_SWITCHING) {
11237        bxe_set_bit(ECORE_Q_FLG_TX_SWITCH, &flags);
11238    }
11239
11240    bxe_set_bit(ECORE_Q_FLG_PCSUM_ON_PKT, &flags);
11241
11242    return (flags);
11243}
11244
11245static unsigned long
11246bxe_get_q_flags(struct bxe_softc    *sc,
11247                struct bxe_fastpath *fp,
11248                uint8_t             leading)
11249{
11250    unsigned long flags = 0;
11251
11252    if (IS_MF_SD(sc)) {
11253        bxe_set_bit(ECORE_Q_FLG_OV, &flags);
11254    }
11255
11256    if (if_getcapenable(sc->ifp) & IFCAP_LRO) {
11257        bxe_set_bit(ECORE_Q_FLG_TPA, &flags);
11258#if __FreeBSD_version >= 800000
11259        bxe_set_bit(ECORE_Q_FLG_TPA_IPV6, &flags);
11260#endif
11261    }
11262
11263    if (leading) {
11264        bxe_set_bit(ECORE_Q_FLG_LEADING_RSS, &flags);
11265        bxe_set_bit(ECORE_Q_FLG_MCAST, &flags);
11266    }
11267
11268    bxe_set_bit(ECORE_Q_FLG_VLAN, &flags);
11269
11270    /* merge with common flags */
11271    return (flags | bxe_get_common_flags(sc, fp, TRUE));
11272}
11273
11274static void
11275bxe_pf_q_prep_general(struct bxe_softc                  *sc,
11276                      struct bxe_fastpath               *fp,
11277                      struct ecore_general_setup_params *gen_init,
11278                      uint8_t                           cos)
11279{
11280    gen_init->stat_id = bxe_stats_id(fp);
11281    gen_init->spcl_id = fp->cl_id;
11282    gen_init->mtu = sc->mtu;
11283    gen_init->cos = cos;
11284}
11285
11286static void
11287bxe_pf_rx_q_prep(struct bxe_softc              *sc,
11288                 struct bxe_fastpath           *fp,
11289                 struct rxq_pause_params       *pause,
11290                 struct ecore_rxq_setup_params *rxq_init)
11291{
11292    uint8_t max_sge = 0;
11293    uint16_t sge_sz = 0;
11294    uint16_t tpa_agg_size = 0;
11295
11296    pause->sge_th_lo = SGE_TH_LO(sc);
11297    pause->sge_th_hi = SGE_TH_HI(sc);
11298
11299    /* validate SGE ring has enough to cross high threshold */
11300    if (sc->dropless_fc &&
11301            (pause->sge_th_hi + FW_PREFETCH_CNT) >
11302            (RX_SGE_USABLE_PER_PAGE * RX_SGE_NUM_PAGES)) {
11303        BLOGW(sc, "sge ring threshold limit\n");
11304    }
11305
11306    /* minimum max_aggregation_size is 2*MTU (two full buffers) */
11307    tpa_agg_size = (2 * sc->mtu);
11308    if (tpa_agg_size < sc->max_aggregation_size) {
11309        tpa_agg_size = sc->max_aggregation_size;
11310    }
11311
11312    max_sge = SGE_PAGE_ALIGN(sc->mtu) >> SGE_PAGE_SHIFT;
11313    max_sge = ((max_sge + PAGES_PER_SGE - 1) &
11314                   (~(PAGES_PER_SGE - 1))) >> PAGES_PER_SGE_SHIFT;
11315    sge_sz = (uint16_t)min(SGE_PAGES, 0xffff);
11316
11317    /* pause - not for e1 */
11318    if (!CHIP_IS_E1(sc)) {
11319        pause->bd_th_lo = BD_TH_LO(sc);
11320        pause->bd_th_hi = BD_TH_HI(sc);
11321
11322        pause->rcq_th_lo = RCQ_TH_LO(sc);
11323        pause->rcq_th_hi = RCQ_TH_HI(sc);
11324
11325        /* validate rings have enough entries to cross high thresholds */
11326        if (sc->dropless_fc &&
11327            pause->bd_th_hi + FW_PREFETCH_CNT >
11328            sc->rx_ring_size) {
11329            BLOGW(sc, "rx bd ring threshold limit\n");
11330        }
11331
11332        if (sc->dropless_fc &&
11333            pause->rcq_th_hi + FW_PREFETCH_CNT >
11334            RCQ_NUM_PAGES * RCQ_USABLE_PER_PAGE) {
11335            BLOGW(sc, "rcq ring threshold limit\n");
11336        }
11337
11338        pause->pri_map = 1;
11339    }
11340
11341    /* rxq setup */
11342    rxq_init->dscr_map   = fp->rx_dma.paddr;
11343    rxq_init->sge_map    = fp->rx_sge_dma.paddr;
11344    rxq_init->rcq_map    = fp->rcq_dma.paddr;
11345    rxq_init->rcq_np_map = (fp->rcq_dma.paddr + BCM_PAGE_SIZE);
11346
11347    /*
11348     * This should be a maximum number of data bytes that may be
11349     * placed on the BD (not including paddings).
11350     */
11351    rxq_init->buf_sz = (fp->rx_buf_size -
11352                        IP_HEADER_ALIGNMENT_PADDING);
11353
11354    rxq_init->cl_qzone_id     = fp->cl_qzone_id;
11355    rxq_init->tpa_agg_sz      = tpa_agg_size;
11356    rxq_init->sge_buf_sz      = sge_sz;
11357    rxq_init->max_sges_pkt    = max_sge;
11358    rxq_init->rss_engine_id   = SC_FUNC(sc);
11359    rxq_init->mcast_engine_id = SC_FUNC(sc);
11360
11361    /*
11362     * Maximum number or simultaneous TPA aggregation for this Queue.
11363     * For PF Clients it should be the maximum available number.
11364     * VF driver(s) may want to define it to a smaller value.
11365     */
11366    rxq_init->max_tpa_queues = MAX_AGG_QS(sc);
11367
11368    rxq_init->cache_line_log = BXE_RX_ALIGN_SHIFT;
11369    rxq_init->fw_sb_id = fp->fw_sb_id;
11370
11371    rxq_init->sb_cq_index = HC_INDEX_ETH_RX_CQ_CONS;
11372
11373    /*
11374     * configure silent vlan removal
11375     * if multi function mode is afex, then mask default vlan
11376     */
11377    if (IS_MF_AFEX(sc)) {
11378        rxq_init->silent_removal_value =
11379            sc->devinfo.mf_info.afex_def_vlan_tag;
11380        rxq_init->silent_removal_mask = EVL_VLID_MASK;
11381    }
11382}
11383
11384static void
11385bxe_pf_tx_q_prep(struct bxe_softc              *sc,
11386                 struct bxe_fastpath           *fp,
11387                 struct ecore_txq_setup_params *txq_init,
11388                 uint8_t                       cos)
11389{
11390    /*
11391     * XXX If multiple CoS is ever supported then each fastpath structure
11392     * will need to maintain tx producer/consumer/dma/etc values *per* CoS.
11393     * fp->txdata[cos]->tx_dma.paddr;
11394     */
11395    txq_init->dscr_map     = fp->tx_dma.paddr;
11396    txq_init->sb_cq_index  = HC_INDEX_ETH_FIRST_TX_CQ_CONS + cos;
11397    txq_init->traffic_type = LLFC_TRAFFIC_TYPE_NW;
11398    txq_init->fw_sb_id     = fp->fw_sb_id;
11399
11400    /*
11401     * set the TSS leading client id for TX classfication to the
11402     * leading RSS client id
11403     */
11404    txq_init->tss_leading_cl_id = BXE_FP(sc, 0, cl_id);
11405}
11406
11407/*
11408 * This function performs 2 steps in a queue state machine:
11409 *   1) RESET->INIT
11410 *   2) INIT->SETUP
11411 */
11412static int
11413bxe_setup_queue(struct bxe_softc    *sc,
11414                struct bxe_fastpath *fp,
11415                uint8_t             leading)
11416{
11417    struct ecore_queue_state_params q_params = { NULL };
11418    struct ecore_queue_setup_params *setup_params =
11419                        &q_params.params.setup;
11420    int rc;
11421
11422    BLOGD(sc, DBG_LOAD, "setting up queue %d\n", fp->index);
11423
11424    bxe_ack_sb(sc, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_ENABLE, 0);
11425
11426    q_params.q_obj = &BXE_SP_OBJ(sc, fp).q_obj;
11427
11428    /* we want to wait for completion in this context */
11429    bxe_set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags);
11430
11431    /* prepare the INIT parameters */
11432    bxe_pf_q_prep_init(sc, fp, &q_params.params.init);
11433
11434    /* Set the command */
11435    q_params.cmd = ECORE_Q_CMD_INIT;
11436
11437    /* Change the state to INIT */
11438    rc = ecore_queue_state_change(sc, &q_params);
11439    if (rc) {
11440        BLOGE(sc, "Queue(%d) INIT failed rc = %d\n", fp->index, rc);
11441        return (rc);
11442    }
11443
11444    BLOGD(sc, DBG_LOAD, "init complete\n");
11445
11446    /* now move the Queue to the SETUP state */
11447    memset(setup_params, 0, sizeof(*setup_params));
11448
11449    /* set Queue flags */
11450    setup_params->flags = bxe_get_q_flags(sc, fp, leading);
11451
11452    /* set general SETUP parameters */
11453    bxe_pf_q_prep_general(sc, fp, &setup_params->gen_params,
11454                          FIRST_TX_COS_INDEX);
11455
11456    bxe_pf_rx_q_prep(sc, fp,
11457                     &setup_params->pause_params,
11458                     &setup_params->rxq_params);
11459
11460    bxe_pf_tx_q_prep(sc, fp,
11461                     &setup_params->txq_params,
11462                     FIRST_TX_COS_INDEX);
11463
11464    /* Set the command */
11465    q_params.cmd = ECORE_Q_CMD_SETUP;
11466
11467    /* change the state to SETUP */
11468    rc = ecore_queue_state_change(sc, &q_params);
11469    if (rc) {
11470        BLOGE(sc, "Queue(%d) SETUP failed (rc = %d)\n", fp->index, rc);
11471        return (rc);
11472    }
11473
11474    return (rc);
11475}
11476
11477static int
11478bxe_setup_leading(struct bxe_softc *sc)
11479{
11480    return (bxe_setup_queue(sc, &sc->fp[0], TRUE));
11481}
11482
11483static int
11484bxe_config_rss_pf(struct bxe_softc            *sc,
11485                  struct ecore_rss_config_obj *rss_obj,
11486                  uint8_t                     config_hash)
11487{
11488    struct ecore_config_rss_params params = { NULL };
11489    int i;
11490
11491    /*
11492     * Although RSS is meaningless when there is a single HW queue we
11493     * still need it enabled in order to have HW Rx hash generated.
11494     */
11495
11496    params.rss_obj = rss_obj;
11497
11498    bxe_set_bit(RAMROD_COMP_WAIT, &params.ramrod_flags);
11499
11500    bxe_set_bit(ECORE_RSS_MODE_REGULAR, &params.rss_flags);
11501
11502    /* RSS configuration */
11503    bxe_set_bit(ECORE_RSS_IPV4, &params.rss_flags);
11504    bxe_set_bit(ECORE_RSS_IPV4_TCP, &params.rss_flags);
11505    bxe_set_bit(ECORE_RSS_IPV6, &params.rss_flags);
11506    bxe_set_bit(ECORE_RSS_IPV6_TCP, &params.rss_flags);
11507    if (rss_obj->udp_rss_v4) {
11508        bxe_set_bit(ECORE_RSS_IPV4_UDP, &params.rss_flags);
11509    }
11510    if (rss_obj->udp_rss_v6) {
11511        bxe_set_bit(ECORE_RSS_IPV6_UDP, &params.rss_flags);
11512    }
11513
11514    /* Hash bits */
11515    params.rss_result_mask = MULTI_MASK;
11516
11517    memcpy(params.ind_table, rss_obj->ind_table, sizeof(params.ind_table));
11518
11519    if (config_hash) {
11520        /* RSS keys */
11521        for (i = 0; i < sizeof(params.rss_key) / 4; i++) {
11522            params.rss_key[i] = arc4random();
11523        }
11524
11525        bxe_set_bit(ECORE_RSS_SET_SRCH, &params.rss_flags);
11526    }
11527
11528    return (ecore_config_rss(sc, &params));
11529}
11530
11531static int
11532bxe_config_rss_eth(struct bxe_softc *sc,
11533                   uint8_t          config_hash)
11534{
11535    return (bxe_config_rss_pf(sc, &sc->rss_conf_obj, config_hash));
11536}
11537
11538static int
11539bxe_init_rss_pf(struct bxe_softc *sc)
11540{
11541    uint8_t num_eth_queues = BXE_NUM_ETH_QUEUES(sc);
11542    int i;
11543
11544    /*
11545     * Prepare the initial contents of the indirection table if
11546     * RSS is enabled
11547     */
11548    for (i = 0; i < sizeof(sc->rss_conf_obj.ind_table); i++) {
11549        sc->rss_conf_obj.ind_table[i] =
11550            (sc->fp->cl_id + (i % num_eth_queues));
11551    }
11552
11553    if (sc->udp_rss) {
11554        sc->rss_conf_obj.udp_rss_v4 = sc->rss_conf_obj.udp_rss_v6 = 1;
11555    }
11556
11557    /*
11558     * For 57710 and 57711 SEARCHER configuration (rss_keys) is
11559     * per-port, so if explicit configuration is needed, do it only
11560     * for a PMF.
11561     *
11562     * For 57712 and newer it's a per-function configuration.
11563     */
11564    return (bxe_config_rss_eth(sc, sc->port.pmf || !CHIP_IS_E1x(sc)));
11565}
11566
11567static int
11568bxe_set_mac_one(struct bxe_softc          *sc,
11569                uint8_t                   *mac,
11570                struct ecore_vlan_mac_obj *obj,
11571                uint8_t                   set,
11572                int                       mac_type,
11573                unsigned long             *ramrod_flags)
11574{
11575    struct ecore_vlan_mac_ramrod_params ramrod_param;
11576    int rc;
11577
11578    memset(&ramrod_param, 0, sizeof(ramrod_param));
11579
11580    /* fill in general parameters */
11581    ramrod_param.vlan_mac_obj = obj;
11582    ramrod_param.ramrod_flags = *ramrod_flags;
11583
11584    /* fill a user request section if needed */
11585    if (!bxe_test_bit(RAMROD_CONT, ramrod_flags)) {
11586        memcpy(ramrod_param.user_req.u.mac.mac, mac, ETH_ALEN);
11587
11588        bxe_set_bit(mac_type, &ramrod_param.user_req.vlan_mac_flags);
11589
11590        /* Set the command: ADD or DEL */
11591        ramrod_param.user_req.cmd = (set) ? ECORE_VLAN_MAC_ADD :
11592                                            ECORE_VLAN_MAC_DEL;
11593    }
11594
11595    rc = ecore_config_vlan_mac(sc, &ramrod_param);
11596
11597    if (rc == ECORE_EXISTS) {
11598        BLOGD(sc, DBG_SP, "Failed to schedule ADD operations (EEXIST)\n");
11599        /* do not treat adding same MAC as error */
11600        rc = 0;
11601    } else if (rc < 0) {
11602        BLOGE(sc, "%s MAC failed (%d)\n", (set ? "Set" : "Delete"), rc);
11603    }
11604
11605    return (rc);
11606}
11607
11608static int
11609bxe_set_eth_mac(struct bxe_softc *sc,
11610                uint8_t          set)
11611{
11612    unsigned long ramrod_flags = 0;
11613
11614    BLOGD(sc, DBG_LOAD, "Adding Ethernet MAC\n");
11615
11616    bxe_set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
11617
11618    /* Eth MAC is set on RSS leading client (fp[0]) */
11619    return (bxe_set_mac_one(sc, sc->link_params.mac_addr,
11620                            &sc->sp_objs->mac_obj,
11621                            set, ECORE_ETH_MAC, &ramrod_flags));
11622}
11623
11624static int
11625bxe_get_cur_phy_idx(struct bxe_softc *sc)
11626{
11627    uint32_t sel_phy_idx = 0;
11628
11629    if (sc->link_params.num_phys <= 1) {
11630        return (ELINK_INT_PHY);
11631    }
11632
11633    if (sc->link_vars.link_up) {
11634        sel_phy_idx = ELINK_EXT_PHY1;
11635        /* In case link is SERDES, check if the ELINK_EXT_PHY2 is the one */
11636        if ((sc->link_vars.link_status & LINK_STATUS_SERDES_LINK) &&
11637            (sc->link_params.phy[ELINK_EXT_PHY2].supported &
11638             ELINK_SUPPORTED_FIBRE))
11639            sel_phy_idx = ELINK_EXT_PHY2;
11640    } else {
11641        switch (elink_phy_selection(&sc->link_params)) {
11642        case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT:
11643        case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY:
11644        case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY:
11645               sel_phy_idx = ELINK_EXT_PHY1;
11646               break;
11647        case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY:
11648        case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY:
11649               sel_phy_idx = ELINK_EXT_PHY2;
11650               break;
11651        }
11652    }
11653
11654    return (sel_phy_idx);
11655}
11656
11657static int
11658bxe_get_link_cfg_idx(struct bxe_softc *sc)
11659{
11660    uint32_t sel_phy_idx = bxe_get_cur_phy_idx(sc);
11661
11662    /*
11663     * The selected activated PHY is always after swapping (in case PHY
11664     * swapping is enabled). So when swapping is enabled, we need to reverse
11665     * the configuration
11666     */
11667
11668    if (sc->link_params.multi_phy_config & PORT_HW_CFG_PHY_SWAPPED_ENABLED) {
11669        if (sel_phy_idx == ELINK_EXT_PHY1)
11670            sel_phy_idx = ELINK_EXT_PHY2;
11671        else if (sel_phy_idx == ELINK_EXT_PHY2)
11672            sel_phy_idx = ELINK_EXT_PHY1;
11673    }
11674
11675    return (ELINK_LINK_CONFIG_IDX(sel_phy_idx));
11676}
11677
11678static void
11679bxe_set_requested_fc(struct bxe_softc *sc)
11680{
11681    /*
11682     * Initialize link parameters structure variables
11683     * It is recommended to turn off RX FC for jumbo frames
11684     * for better performance
11685     */
11686    if (CHIP_IS_E1x(sc) && (sc->mtu > 5000)) {
11687        sc->link_params.req_fc_auto_adv = ELINK_FLOW_CTRL_TX;
11688    } else {
11689        sc->link_params.req_fc_auto_adv = ELINK_FLOW_CTRL_BOTH;
11690    }
11691}
11692
11693static void
11694bxe_calc_fc_adv(struct bxe_softc *sc)
11695{
11696    uint8_t cfg_idx = bxe_get_link_cfg_idx(sc);
11697
11698
11699    sc->port.advertising[cfg_idx] &= ~(ADVERTISED_Asym_Pause |
11700                                           ADVERTISED_Pause);
11701
11702    switch (sc->link_vars.ieee_fc &
11703            MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
11704
11705    case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
11706        sc->port.advertising[cfg_idx] |= (ADVERTISED_Asym_Pause |
11707                                          ADVERTISED_Pause);
11708        break;
11709
11710    case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
11711        sc->port.advertising[cfg_idx] |= ADVERTISED_Asym_Pause;
11712        break;
11713
11714    default:
11715        break;
11716
11717    }
11718}
11719
11720static uint16_t
11721bxe_get_mf_speed(struct bxe_softc *sc)
11722{
11723    uint16_t line_speed = sc->link_vars.line_speed;
11724    if (IS_MF(sc)) {
11725        uint16_t maxCfg =
11726            bxe_extract_max_cfg(sc, sc->devinfo.mf_info.mf_config[SC_VN(sc)]);
11727
11728        /* calculate the current MAX line speed limit for the MF devices */
11729        if (IS_MF_SI(sc)) {
11730            line_speed = (line_speed * maxCfg) / 100;
11731        } else { /* SD mode */
11732            uint16_t vn_max_rate = maxCfg * 100;
11733
11734            if (vn_max_rate < line_speed) {
11735                line_speed = vn_max_rate;
11736            }
11737        }
11738    }
11739
11740    return (line_speed);
11741}
11742
11743static void
11744bxe_fill_report_data(struct bxe_softc            *sc,
11745                     struct bxe_link_report_data *data)
11746{
11747    uint16_t line_speed = bxe_get_mf_speed(sc);
11748
11749    memset(data, 0, sizeof(*data));
11750
11751    /* fill the report data with the effective line speed */
11752    data->line_speed = line_speed;
11753
11754    /* Link is down */
11755    if (!sc->link_vars.link_up || (sc->flags & BXE_MF_FUNC_DIS)) {
11756        bxe_set_bit(BXE_LINK_REPORT_LINK_DOWN, &data->link_report_flags);
11757    }
11758
11759    /* Full DUPLEX */
11760    if (sc->link_vars.duplex == DUPLEX_FULL) {
11761        bxe_set_bit(BXE_LINK_REPORT_FULL_DUPLEX, &data->link_report_flags);
11762    }
11763
11764    /* Rx Flow Control is ON */
11765    if (sc->link_vars.flow_ctrl & ELINK_FLOW_CTRL_RX) {
11766        bxe_set_bit(BXE_LINK_REPORT_RX_FC_ON, &data->link_report_flags);
11767    }
11768
11769    /* Tx Flow Control is ON */
11770    if (sc->link_vars.flow_ctrl & ELINK_FLOW_CTRL_TX) {
11771        bxe_set_bit(BXE_LINK_REPORT_TX_FC_ON, &data->link_report_flags);
11772    }
11773}
11774
11775/* report link status to OS, should be called under phy_lock */
11776static void
11777bxe_link_report_locked(struct bxe_softc *sc)
11778{
11779    struct bxe_link_report_data cur_data;
11780
11781    /* reread mf_cfg */
11782    if (IS_PF(sc) && !CHIP_IS_E1(sc)) {
11783        bxe_read_mf_cfg(sc);
11784    }
11785
11786    /* Read the current link report info */
11787    bxe_fill_report_data(sc, &cur_data);
11788
11789    /* Don't report link down or exactly the same link status twice */
11790    if (!memcmp(&cur_data, &sc->last_reported_link, sizeof(cur_data)) ||
11791        (bxe_test_bit(BXE_LINK_REPORT_LINK_DOWN,
11792                      &sc->last_reported_link.link_report_flags) &&
11793         bxe_test_bit(BXE_LINK_REPORT_LINK_DOWN,
11794                      &cur_data.link_report_flags))) {
11795        return;
11796    }
11797
11798	ELINK_DEBUG_P2(sc, "Change in link status : cur_data = %x, last_reported_link = %x\n",
11799					cur_data.link_report_flags, sc->last_reported_link.link_report_flags);
11800    sc->link_cnt++;
11801
11802	ELINK_DEBUG_P1(sc, "link status change count = %x\n", sc->link_cnt);
11803    /* report new link params and remember the state for the next time */
11804    memcpy(&sc->last_reported_link, &cur_data, sizeof(cur_data));
11805
11806    if (bxe_test_bit(BXE_LINK_REPORT_LINK_DOWN,
11807                     &cur_data.link_report_flags)) {
11808        if_link_state_change(sc->ifp, LINK_STATE_DOWN);
11809    } else {
11810        const char *duplex;
11811        const char *flow;
11812
11813        if (bxe_test_and_clear_bit(BXE_LINK_REPORT_FULL_DUPLEX,
11814                                   &cur_data.link_report_flags)) {
11815            duplex = "full";
11816			ELINK_DEBUG_P0(sc, "link set to full duplex\n");
11817        } else {
11818            duplex = "half";
11819			ELINK_DEBUG_P0(sc, "link set to half duplex\n");
11820        }
11821
11822        /*
11823         * Handle the FC at the end so that only these flags would be
11824         * possibly set. This way we may easily check if there is no FC
11825         * enabled.
11826         */
11827        if (cur_data.link_report_flags) {
11828            if (bxe_test_bit(BXE_LINK_REPORT_RX_FC_ON,
11829                             &cur_data.link_report_flags) &&
11830                bxe_test_bit(BXE_LINK_REPORT_TX_FC_ON,
11831                             &cur_data.link_report_flags)) {
11832                flow = "ON - receive & transmit";
11833            } else if (bxe_test_bit(BXE_LINK_REPORT_RX_FC_ON,
11834                                    &cur_data.link_report_flags) &&
11835                       !bxe_test_bit(BXE_LINK_REPORT_TX_FC_ON,
11836                                     &cur_data.link_report_flags)) {
11837                flow = "ON - receive";
11838            } else if (!bxe_test_bit(BXE_LINK_REPORT_RX_FC_ON,
11839                                     &cur_data.link_report_flags) &&
11840                       bxe_test_bit(BXE_LINK_REPORT_TX_FC_ON,
11841                                    &cur_data.link_report_flags)) {
11842                flow = "ON - transmit";
11843            } else {
11844                flow = "none"; /* possible? */
11845            }
11846        } else {
11847            flow = "none";
11848        }
11849
11850        if_link_state_change(sc->ifp, LINK_STATE_UP);
11851        BLOGI(sc, "NIC Link is Up, %d Mbps %s duplex, Flow control: %s\n",
11852              cur_data.line_speed, duplex, flow);
11853    }
11854}
11855
11856static void
11857bxe_link_report(struct bxe_softc *sc)
11858{
11859    bxe_acquire_phy_lock(sc);
11860    bxe_link_report_locked(sc);
11861    bxe_release_phy_lock(sc);
11862}
11863
11864static void
11865bxe_link_status_update(struct bxe_softc *sc)
11866{
11867    if (sc->state != BXE_STATE_OPEN) {
11868        return;
11869    }
11870
11871    if (IS_PF(sc) && !CHIP_REV_IS_SLOW(sc)) {
11872        elink_link_status_update(&sc->link_params, &sc->link_vars);
11873    } else {
11874        sc->port.supported[0] |= (ELINK_SUPPORTED_10baseT_Half |
11875                                  ELINK_SUPPORTED_10baseT_Full |
11876                                  ELINK_SUPPORTED_100baseT_Half |
11877                                  ELINK_SUPPORTED_100baseT_Full |
11878                                  ELINK_SUPPORTED_1000baseT_Full |
11879                                  ELINK_SUPPORTED_2500baseX_Full |
11880                                  ELINK_SUPPORTED_10000baseT_Full |
11881                                  ELINK_SUPPORTED_TP |
11882                                  ELINK_SUPPORTED_FIBRE |
11883                                  ELINK_SUPPORTED_Autoneg |
11884                                  ELINK_SUPPORTED_Pause |
11885                                  ELINK_SUPPORTED_Asym_Pause);
11886        sc->port.advertising[0] = sc->port.supported[0];
11887
11888        sc->link_params.sc                = sc;
11889        sc->link_params.port              = SC_PORT(sc);
11890        sc->link_params.req_duplex[0]     = DUPLEX_FULL;
11891        sc->link_params.req_flow_ctrl[0]  = ELINK_FLOW_CTRL_NONE;
11892        sc->link_params.req_line_speed[0] = SPEED_10000;
11893        sc->link_params.speed_cap_mask[0] = 0x7f0000;
11894        sc->link_params.switch_cfg        = ELINK_SWITCH_CFG_10G;
11895
11896        if (CHIP_REV_IS_FPGA(sc)) {
11897            sc->link_vars.mac_type    = ELINK_MAC_TYPE_EMAC;
11898            sc->link_vars.line_speed  = ELINK_SPEED_1000;
11899            sc->link_vars.link_status = (LINK_STATUS_LINK_UP |
11900                                         LINK_STATUS_SPEED_AND_DUPLEX_1000TFD);
11901        } else {
11902            sc->link_vars.mac_type    = ELINK_MAC_TYPE_BMAC;
11903            sc->link_vars.line_speed  = ELINK_SPEED_10000;
11904            sc->link_vars.link_status = (LINK_STATUS_LINK_UP |
11905                                         LINK_STATUS_SPEED_AND_DUPLEX_10GTFD);
11906        }
11907
11908        sc->link_vars.link_up = 1;
11909
11910        sc->link_vars.duplex    = DUPLEX_FULL;
11911        sc->link_vars.flow_ctrl = ELINK_FLOW_CTRL_NONE;
11912
11913        if (IS_PF(sc)) {
11914            REG_WR(sc, NIG_REG_EGRESS_DRAIN0_MODE + sc->link_params.port*4, 0);
11915            bxe_stats_handle(sc, STATS_EVENT_LINK_UP);
11916            bxe_link_report(sc);
11917        }
11918    }
11919
11920    if (IS_PF(sc)) {
11921        if (sc->link_vars.link_up) {
11922            bxe_stats_handle(sc, STATS_EVENT_LINK_UP);
11923        } else {
11924            bxe_stats_handle(sc, STATS_EVENT_STOP);
11925        }
11926        bxe_link_report(sc);
11927    } else {
11928        bxe_link_report(sc);
11929        bxe_stats_handle(sc, STATS_EVENT_LINK_UP);
11930    }
11931}
11932
11933static int
11934bxe_initial_phy_init(struct bxe_softc *sc,
11935                     int              load_mode)
11936{
11937    int rc, cfg_idx = bxe_get_link_cfg_idx(sc);
11938    uint16_t req_line_speed = sc->link_params.req_line_speed[cfg_idx];
11939    struct elink_params *lp = &sc->link_params;
11940
11941    bxe_set_requested_fc(sc);
11942
11943    if (CHIP_REV_IS_SLOW(sc)) {
11944        uint32_t bond = CHIP_BOND_ID(sc);
11945        uint32_t feat = 0;
11946
11947        if (CHIP_IS_E2(sc) && CHIP_IS_MODE_4_PORT(sc)) {
11948            feat |= ELINK_FEATURE_CONFIG_EMUL_DISABLE_BMAC;
11949        } else if (bond & 0x4) {
11950            if (CHIP_IS_E3(sc)) {
11951                feat |= ELINK_FEATURE_CONFIG_EMUL_DISABLE_XMAC;
11952            } else {
11953                feat |= ELINK_FEATURE_CONFIG_EMUL_DISABLE_BMAC;
11954            }
11955        } else if (bond & 0x8) {
11956            if (CHIP_IS_E3(sc)) {
11957                feat |= ELINK_FEATURE_CONFIG_EMUL_DISABLE_UMAC;
11958            } else {
11959                feat |= ELINK_FEATURE_CONFIG_EMUL_DISABLE_EMAC;
11960            }
11961        }
11962
11963        /* disable EMAC for E3 and above */
11964        if (bond & 0x2) {
11965            feat |= ELINK_FEATURE_CONFIG_EMUL_DISABLE_EMAC;
11966        }
11967
11968        sc->link_params.feature_config_flags |= feat;
11969    }
11970
11971    bxe_acquire_phy_lock(sc);
11972
11973    if (load_mode == LOAD_DIAG) {
11974        lp->loopback_mode = ELINK_LOOPBACK_XGXS;
11975        /* Prefer doing PHY loopback at 10G speed, if possible */
11976        if (lp->req_line_speed[cfg_idx] < ELINK_SPEED_10000) {
11977            if (lp->speed_cap_mask[cfg_idx] &
11978                PORT_HW_CFG_SPEED_CAPABILITY_D0_10G) {
11979                lp->req_line_speed[cfg_idx] = ELINK_SPEED_10000;
11980            } else {
11981                lp->req_line_speed[cfg_idx] = ELINK_SPEED_1000;
11982            }
11983        }
11984    }
11985
11986    if (load_mode == LOAD_LOOPBACK_EXT) {
11987        lp->loopback_mode = ELINK_LOOPBACK_EXT;
11988    }
11989
11990    rc = elink_phy_init(&sc->link_params, &sc->link_vars);
11991
11992    bxe_release_phy_lock(sc);
11993
11994    bxe_calc_fc_adv(sc);
11995
11996    if (sc->link_vars.link_up) {
11997        bxe_stats_handle(sc, STATS_EVENT_LINK_UP);
11998        bxe_link_report(sc);
11999    }
12000
12001    if (!CHIP_REV_IS_SLOW(sc)) {
12002        bxe_periodic_start(sc);
12003    }
12004
12005    sc->link_params.req_line_speed[cfg_idx] = req_line_speed;
12006    return (rc);
12007}
12008
12009/* must be called under IF_ADDR_LOCK */
12010static int
12011bxe_init_mcast_macs_list(struct bxe_softc                 *sc,
12012                         struct ecore_mcast_ramrod_params *p)
12013{
12014    if_t ifp = sc->ifp;
12015    int mc_count = 0;
12016    struct ifmultiaddr *ifma;
12017    struct ecore_mcast_list_elem *mc_mac;
12018
12019    TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
12020        if (ifma->ifma_addr->sa_family != AF_LINK) {
12021            continue;
12022        }
12023
12024        mc_count++;
12025    }
12026
12027    ECORE_LIST_INIT(&p->mcast_list);
12028    p->mcast_list_len = 0;
12029
12030    if (!mc_count) {
12031        return (0);
12032    }
12033
12034    mc_mac = malloc(sizeof(*mc_mac) * mc_count, M_DEVBUF,
12035                    (M_NOWAIT | M_ZERO));
12036    if (!mc_mac) {
12037        BLOGE(sc, "Failed to allocate temp mcast list\n");
12038        return (-1);
12039    }
12040    bzero(mc_mac, (sizeof(*mc_mac) * mc_count));
12041
12042    TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
12043        if (ifma->ifma_addr->sa_family != AF_LINK) {
12044            continue;
12045        }
12046
12047        mc_mac->mac = (uint8_t *)LLADDR((struct sockaddr_dl *)ifma->ifma_addr);
12048        ECORE_LIST_PUSH_TAIL(&mc_mac->link, &p->mcast_list);
12049
12050        BLOGD(sc, DBG_LOAD,
12051              "Setting MCAST %02X:%02X:%02X:%02X:%02X:%02X and mc_count %d\n",
12052              mc_mac->mac[0], mc_mac->mac[1], mc_mac->mac[2],
12053              mc_mac->mac[3], mc_mac->mac[4], mc_mac->mac[5], mc_count);
12054       mc_mac++;
12055    }
12056
12057    p->mcast_list_len = mc_count;
12058
12059    return (0);
12060}
12061
12062static void
12063bxe_free_mcast_macs_list(struct ecore_mcast_ramrod_params *p)
12064{
12065    struct ecore_mcast_list_elem *mc_mac =
12066        ECORE_LIST_FIRST_ENTRY(&p->mcast_list,
12067                               struct ecore_mcast_list_elem,
12068                               link);
12069
12070    if (mc_mac) {
12071        /* only a single free as all mc_macs are in the same heap array */
12072        free(mc_mac, M_DEVBUF);
12073    }
12074}
12075static int
12076bxe_set_mc_list(struct bxe_softc *sc)
12077{
12078    struct ecore_mcast_ramrod_params rparam = { NULL };
12079    int rc = 0;
12080
12081    rparam.mcast_obj = &sc->mcast_obj;
12082
12083    BXE_MCAST_LOCK(sc);
12084
12085    /* first, clear all configured multicast MACs */
12086    rc = ecore_config_mcast(sc, &rparam, ECORE_MCAST_CMD_DEL);
12087    if (rc < 0) {
12088        BLOGE(sc, "Failed to clear multicast configuration: %d\n", rc);
12089        /* Manual backport parts of FreeBSD upstream r284470. */
12090        BXE_MCAST_UNLOCK(sc);
12091        return (rc);
12092    }
12093
12094    /* configure a new MACs list */
12095    rc = bxe_init_mcast_macs_list(sc, &rparam);
12096    if (rc) {
12097        BLOGE(sc, "Failed to create mcast MACs list (%d)\n", rc);
12098        BXE_MCAST_UNLOCK(sc);
12099        return (rc);
12100    }
12101
12102    /* Now add the new MACs */
12103    rc = ecore_config_mcast(sc, &rparam, ECORE_MCAST_CMD_ADD);
12104    if (rc < 0) {
12105        BLOGE(sc, "Failed to set new mcast config (%d)\n", rc);
12106    }
12107
12108    bxe_free_mcast_macs_list(&rparam);
12109
12110    BXE_MCAST_UNLOCK(sc);
12111
12112    return (rc);
12113}
12114
12115static int
12116bxe_set_uc_list(struct bxe_softc *sc)
12117{
12118    if_t ifp = sc->ifp;
12119    struct ecore_vlan_mac_obj *mac_obj = &sc->sp_objs->mac_obj;
12120    struct ifaddr *ifa;
12121    unsigned long ramrod_flags = 0;
12122    int rc;
12123
12124#if __FreeBSD_version < 800000
12125    IF_ADDR_LOCK(ifp);
12126#else
12127    if_addr_rlock(ifp);
12128#endif
12129
12130    /* first schedule a cleanup up of old configuration */
12131    rc = bxe_del_all_macs(sc, mac_obj, ECORE_UC_LIST_MAC, FALSE);
12132    if (rc < 0) {
12133        BLOGE(sc, "Failed to schedule delete of all ETH MACs (%d)\n", rc);
12134#if __FreeBSD_version < 800000
12135        IF_ADDR_UNLOCK(ifp);
12136#else
12137        if_addr_runlock(ifp);
12138#endif
12139        return (rc);
12140    }
12141
12142    ifa = if_getifaddr(ifp); /* XXX Is this structure */
12143    while (ifa) {
12144        if (ifa->ifa_addr->sa_family != AF_LINK) {
12145            ifa = TAILQ_NEXT(ifa, ifa_link);
12146            continue;
12147        }
12148
12149        rc = bxe_set_mac_one(sc, (uint8_t *)LLADDR((struct sockaddr_dl *)ifa->ifa_addr),
12150                             mac_obj, TRUE, ECORE_UC_LIST_MAC, &ramrod_flags);
12151        if (rc == -EEXIST) {
12152            BLOGD(sc, DBG_SP, "Failed to schedule ADD operations (EEXIST)\n");
12153            /* do not treat adding same MAC as an error */
12154            rc = 0;
12155        } else if (rc < 0) {
12156            BLOGE(sc, "Failed to schedule ADD operations (%d)\n", rc);
12157#if __FreeBSD_version < 800000
12158            IF_ADDR_UNLOCK(ifp);
12159#else
12160            if_addr_runlock(ifp);
12161#endif
12162            return (rc);
12163        }
12164
12165        ifa = TAILQ_NEXT(ifa, ifa_link);
12166    }
12167
12168#if __FreeBSD_version < 800000
12169    IF_ADDR_UNLOCK(ifp);
12170#else
12171    if_addr_runlock(ifp);
12172#endif
12173
12174    /* Execute the pending commands */
12175    bit_set(&ramrod_flags, RAMROD_CONT);
12176    return (bxe_set_mac_one(sc, NULL, mac_obj, FALSE /* don't care */,
12177                            ECORE_UC_LIST_MAC, &ramrod_flags));
12178}
12179
12180static void
12181bxe_set_rx_mode(struct bxe_softc *sc)
12182{
12183    if_t ifp = sc->ifp;
12184    uint32_t rx_mode = BXE_RX_MODE_NORMAL;
12185
12186    if (sc->state != BXE_STATE_OPEN) {
12187        BLOGD(sc, DBG_SP, "state is %x, returning\n", sc->state);
12188        return;
12189    }
12190
12191    BLOGD(sc, DBG_SP, "if_flags(ifp)=0x%x\n", if_getflags(sc->ifp));
12192
12193    if (if_getflags(ifp) & IFF_PROMISC) {
12194        rx_mode = BXE_RX_MODE_PROMISC;
12195    } else if ((if_getflags(ifp) & IFF_ALLMULTI) ||
12196               ((if_getamcount(ifp) > BXE_MAX_MULTICAST) &&
12197                CHIP_IS_E1(sc))) {
12198        rx_mode = BXE_RX_MODE_ALLMULTI;
12199    } else {
12200        if (IS_PF(sc)) {
12201            /* some multicasts */
12202            if (bxe_set_mc_list(sc) < 0) {
12203                rx_mode = BXE_RX_MODE_ALLMULTI;
12204            }
12205            if (bxe_set_uc_list(sc) < 0) {
12206                rx_mode = BXE_RX_MODE_PROMISC;
12207            }
12208        }
12209    }
12210
12211    sc->rx_mode = rx_mode;
12212
12213    /* schedule the rx_mode command */
12214    if (bxe_test_bit(ECORE_FILTER_RX_MODE_PENDING, &sc->sp_state)) {
12215        BLOGD(sc, DBG_LOAD, "Scheduled setting rx_mode with ECORE...\n");
12216        bxe_set_bit(ECORE_FILTER_RX_MODE_SCHED, &sc->sp_state);
12217        return;
12218    }
12219
12220    if (IS_PF(sc)) {
12221        bxe_set_storm_rx_mode(sc);
12222    }
12223}
12224
12225
12226/* update flags in shmem */
12227static void
12228bxe_update_drv_flags(struct bxe_softc *sc,
12229                     uint32_t         flags,
12230                     uint32_t         set)
12231{
12232    uint32_t drv_flags;
12233
12234    if (SHMEM2_HAS(sc, drv_flags)) {
12235        bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_DRV_FLAGS);
12236        drv_flags = SHMEM2_RD(sc, drv_flags);
12237
12238        if (set) {
12239            SET_FLAGS(drv_flags, flags);
12240        } else {
12241            RESET_FLAGS(drv_flags, flags);
12242        }
12243
12244        SHMEM2_WR(sc, drv_flags, drv_flags);
12245        BLOGD(sc, DBG_LOAD, "drv_flags 0x%08x\n", drv_flags);
12246
12247        bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_DRV_FLAGS);
12248    }
12249}
12250
12251/* periodic timer callout routine, only runs when the interface is up */
12252
12253static void
12254bxe_periodic_callout_func(void *xsc)
12255{
12256    struct bxe_softc *sc = (struct bxe_softc *)xsc;
12257    int i;
12258
12259    if (!BXE_CORE_TRYLOCK(sc)) {
12260        /* just bail and try again next time */
12261
12262        if ((sc->state == BXE_STATE_OPEN) &&
12263            (atomic_load_acq_long(&sc->periodic_flags) == PERIODIC_GO)) {
12264            /* schedule the next periodic callout */
12265            callout_reset(&sc->periodic_callout, hz,
12266                          bxe_periodic_callout_func, sc);
12267        }
12268
12269        return;
12270    }
12271
12272    if ((sc->state != BXE_STATE_OPEN) ||
12273        (atomic_load_acq_long(&sc->periodic_flags) == PERIODIC_STOP)) {
12274        BLOGW(sc, "periodic callout exit (state=0x%x)\n", sc->state);
12275        BXE_CORE_UNLOCK(sc);
12276        return;
12277        }
12278
12279
12280    /* Check for TX timeouts on any fastpath. */
12281    FOR_EACH_QUEUE(sc, i) {
12282        if (bxe_watchdog(sc, &sc->fp[i]) != 0) {
12283            /* Ruh-Roh, chip was reset! */
12284            break;
12285        }
12286    }
12287
12288    if (!CHIP_REV_IS_SLOW(sc)) {
12289        /*
12290         * This barrier is needed to ensure the ordering between the writing
12291         * to the sc->port.pmf in the bxe_nic_load() or bxe_pmf_update() and
12292         * the reading here.
12293         */
12294        mb();
12295        if (sc->port.pmf) {
12296	    bxe_acquire_phy_lock(sc);
12297            elink_period_func(&sc->link_params, &sc->link_vars);
12298	    bxe_release_phy_lock(sc);
12299        }
12300    }
12301
12302    if (IS_PF(sc) && !(sc->flags & BXE_NO_PULSE)) {
12303        int mb_idx = SC_FW_MB_IDX(sc);
12304        uint32_t drv_pulse;
12305        uint32_t mcp_pulse;
12306
12307        ++sc->fw_drv_pulse_wr_seq;
12308        sc->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
12309
12310        drv_pulse = sc->fw_drv_pulse_wr_seq;
12311        bxe_drv_pulse(sc);
12312
12313        mcp_pulse = (SHMEM_RD(sc, func_mb[mb_idx].mcp_pulse_mb) &
12314                     MCP_PULSE_SEQ_MASK);
12315
12316        /*
12317         * The delta between driver pulse and mcp response should
12318         * be 1 (before mcp response) or 0 (after mcp response).
12319         */
12320        if ((drv_pulse != mcp_pulse) &&
12321            (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
12322            /* someone lost a heartbeat... */
12323            BLOGE(sc, "drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
12324                  drv_pulse, mcp_pulse);
12325        }
12326    }
12327
12328    /* state is BXE_STATE_OPEN */
12329    bxe_stats_handle(sc, STATS_EVENT_UPDATE);
12330
12331    BXE_CORE_UNLOCK(sc);
12332
12333    if ((sc->state == BXE_STATE_OPEN) &&
12334        (atomic_load_acq_long(&sc->periodic_flags) == PERIODIC_GO)) {
12335        /* schedule the next periodic callout */
12336        callout_reset(&sc->periodic_callout, hz,
12337                      bxe_periodic_callout_func, sc);
12338    }
12339}
12340
12341static void
12342bxe_periodic_start(struct bxe_softc *sc)
12343{
12344    atomic_store_rel_long(&sc->periodic_flags, PERIODIC_GO);
12345    callout_reset(&sc->periodic_callout, hz, bxe_periodic_callout_func, sc);
12346}
12347
12348static void
12349bxe_periodic_stop(struct bxe_softc *sc)
12350{
12351    atomic_store_rel_long(&sc->periodic_flags, PERIODIC_STOP);
12352    callout_drain(&sc->periodic_callout);
12353}
12354
12355/* start the controller */
12356static __noinline int
12357bxe_nic_load(struct bxe_softc *sc,
12358             int              load_mode)
12359{
12360    uint32_t val;
12361    int load_code = 0;
12362    int i, rc = 0;
12363
12364    BXE_CORE_LOCK_ASSERT(sc);
12365
12366    BLOGD(sc, DBG_LOAD, "Starting NIC load...\n");
12367
12368    sc->state = BXE_STATE_OPENING_WAITING_LOAD;
12369
12370    if (IS_PF(sc)) {
12371        /* must be called before memory allocation and HW init */
12372        bxe_ilt_set_info(sc);
12373    }
12374
12375    sc->last_reported_link_state = LINK_STATE_UNKNOWN;
12376
12377    bxe_set_fp_rx_buf_size(sc);
12378
12379    if (bxe_alloc_fp_buffers(sc) != 0) {
12380        BLOGE(sc, "Failed to allocate fastpath memory\n");
12381        sc->state = BXE_STATE_CLOSED;
12382        rc = ENOMEM;
12383        goto bxe_nic_load_error0;
12384    }
12385
12386    if (bxe_alloc_mem(sc) != 0) {
12387        sc->state = BXE_STATE_CLOSED;
12388        rc = ENOMEM;
12389        goto bxe_nic_load_error0;
12390    }
12391
12392    if (bxe_alloc_fw_stats_mem(sc) != 0) {
12393        sc->state = BXE_STATE_CLOSED;
12394        rc = ENOMEM;
12395        goto bxe_nic_load_error0;
12396    }
12397
12398    if (IS_PF(sc)) {
12399        /* set pf load just before approaching the MCP */
12400        bxe_set_pf_load(sc);
12401
12402        /* if MCP exists send load request and analyze response */
12403        if (!BXE_NOMCP(sc)) {
12404            /* attempt to load pf */
12405            if (bxe_nic_load_request(sc, &load_code) != 0) {
12406                sc->state = BXE_STATE_CLOSED;
12407                rc = ENXIO;
12408                goto bxe_nic_load_error1;
12409            }
12410
12411            /* what did the MCP say? */
12412            if (bxe_nic_load_analyze_req(sc, load_code) != 0) {
12413                bxe_fw_command(sc, DRV_MSG_CODE_LOAD_DONE, 0);
12414                sc->state = BXE_STATE_CLOSED;
12415                rc = ENXIO;
12416                goto bxe_nic_load_error2;
12417            }
12418        } else {
12419            BLOGI(sc, "Device has no MCP!\n");
12420            load_code = bxe_nic_load_no_mcp(sc);
12421        }
12422
12423        /* mark PMF if applicable */
12424        bxe_nic_load_pmf(sc, load_code);
12425
12426        /* Init Function state controlling object */
12427        bxe_init_func_obj(sc);
12428
12429        /* Initialize HW */
12430        if (bxe_init_hw(sc, load_code) != 0) {
12431            BLOGE(sc, "HW init failed\n");
12432            bxe_fw_command(sc, DRV_MSG_CODE_LOAD_DONE, 0);
12433            sc->state = BXE_STATE_CLOSED;
12434            rc = ENXIO;
12435            goto bxe_nic_load_error2;
12436        }
12437    }
12438
12439    /* set ALWAYS_ALIVE bit in shmem */
12440    sc->fw_drv_pulse_wr_seq |= DRV_PULSE_ALWAYS_ALIVE;
12441    bxe_drv_pulse(sc);
12442    sc->flags |= BXE_NO_PULSE;
12443
12444    /* attach interrupts */
12445    if (bxe_interrupt_attach(sc) != 0) {
12446        sc->state = BXE_STATE_CLOSED;
12447        rc = ENXIO;
12448        goto bxe_nic_load_error2;
12449    }
12450
12451    bxe_nic_init(sc, load_code);
12452
12453    /* Init per-function objects */
12454    if (IS_PF(sc)) {
12455        bxe_init_objs(sc);
12456        // XXX bxe_iov_nic_init(sc);
12457
12458        /* set AFEX default VLAN tag to an invalid value */
12459        sc->devinfo.mf_info.afex_def_vlan_tag = -1;
12460        // XXX bxe_nic_load_afex_dcc(sc, load_code);
12461
12462        sc->state = BXE_STATE_OPENING_WAITING_PORT;
12463        rc = bxe_func_start(sc);
12464        if (rc) {
12465            BLOGE(sc, "Function start failed! rc = %d\n", rc);
12466            bxe_fw_command(sc, DRV_MSG_CODE_LOAD_DONE, 0);
12467            sc->state = BXE_STATE_ERROR;
12468            goto bxe_nic_load_error3;
12469        }
12470
12471        /* send LOAD_DONE command to MCP */
12472        if (!BXE_NOMCP(sc)) {
12473            load_code = bxe_fw_command(sc, DRV_MSG_CODE_LOAD_DONE, 0);
12474            if (!load_code) {
12475                BLOGE(sc, "MCP response failure, aborting\n");
12476                sc->state = BXE_STATE_ERROR;
12477                rc = ENXIO;
12478                goto bxe_nic_load_error3;
12479            }
12480        }
12481
12482        rc = bxe_setup_leading(sc);
12483        if (rc) {
12484            BLOGE(sc, "Setup leading failed! rc = %d\n", rc);
12485            sc->state = BXE_STATE_ERROR;
12486            goto bxe_nic_load_error3;
12487        }
12488
12489        FOR_EACH_NONDEFAULT_ETH_QUEUE(sc, i) {
12490            rc = bxe_setup_queue(sc, &sc->fp[i], FALSE);
12491            if (rc) {
12492                BLOGE(sc, "Queue(%d) setup failed rc = %d\n", i, rc);
12493                sc->state = BXE_STATE_ERROR;
12494                goto bxe_nic_load_error3;
12495            }
12496        }
12497
12498        rc = bxe_init_rss_pf(sc);
12499        if (rc) {
12500            BLOGE(sc, "PF RSS init failed\n");
12501            sc->state = BXE_STATE_ERROR;
12502            goto bxe_nic_load_error3;
12503        }
12504    }
12505    /* XXX VF */
12506
12507    /* now when Clients are configured we are ready to work */
12508    sc->state = BXE_STATE_OPEN;
12509
12510    /* Configure a ucast MAC */
12511    if (IS_PF(sc)) {
12512        rc = bxe_set_eth_mac(sc, TRUE);
12513    }
12514    if (rc) {
12515        BLOGE(sc, "Setting Ethernet MAC failed rc = %d\n", rc);
12516        sc->state = BXE_STATE_ERROR;
12517        goto bxe_nic_load_error3;
12518    }
12519
12520    if (sc->port.pmf) {
12521        rc = bxe_initial_phy_init(sc, /* XXX load_mode */LOAD_OPEN);
12522        if (rc) {
12523            sc->state = BXE_STATE_ERROR;
12524            goto bxe_nic_load_error3;
12525        }
12526    }
12527
12528    sc->link_params.feature_config_flags &=
12529        ~ELINK_FEATURE_CONFIG_BOOT_FROM_SAN;
12530
12531    /* start fast path */
12532
12533    /* Initialize Rx filter */
12534    bxe_set_rx_mode(sc);
12535
12536    /* start the Tx */
12537    switch (/* XXX load_mode */LOAD_OPEN) {
12538    case LOAD_NORMAL:
12539    case LOAD_OPEN:
12540        break;
12541
12542    case LOAD_DIAG:
12543    case LOAD_LOOPBACK_EXT:
12544        sc->state = BXE_STATE_DIAG;
12545        break;
12546
12547    default:
12548        break;
12549    }
12550
12551    if (sc->port.pmf) {
12552        bxe_update_drv_flags(sc, 1 << DRV_FLAGS_PORT_MASK, 0);
12553    } else {
12554        bxe_link_status_update(sc);
12555    }
12556
12557    /* start the periodic timer callout */
12558    bxe_periodic_start(sc);
12559
12560    if (IS_PF(sc) && SHMEM2_HAS(sc, drv_capabilities_flag)) {
12561        /* mark driver is loaded in shmem2 */
12562        val = SHMEM2_RD(sc, drv_capabilities_flag[SC_FW_MB_IDX(sc)]);
12563        SHMEM2_WR(sc, drv_capabilities_flag[SC_FW_MB_IDX(sc)],
12564                  (val |
12565                   DRV_FLAGS_CAPABILITIES_LOADED_SUPPORTED |
12566                   DRV_FLAGS_CAPABILITIES_LOADED_L2));
12567    }
12568
12569    /* wait for all pending SP commands to complete */
12570    if (IS_PF(sc) && !bxe_wait_sp_comp(sc, ~0x0UL)) {
12571        BLOGE(sc, "Timeout waiting for all SPs to complete!\n");
12572        bxe_periodic_stop(sc);
12573        bxe_nic_unload(sc, UNLOAD_CLOSE, FALSE);
12574        return (ENXIO);
12575    }
12576
12577    /* Tell the stack the driver is running! */
12578    if_setdrvflags(sc->ifp, IFF_DRV_RUNNING);
12579
12580    BLOGD(sc, DBG_LOAD, "NIC successfully loaded\n");
12581
12582    return (0);
12583
12584bxe_nic_load_error3:
12585
12586    if (IS_PF(sc)) {
12587        bxe_int_disable_sync(sc, 1);
12588
12589        /* clean out queued objects */
12590        bxe_squeeze_objects(sc);
12591    }
12592
12593    bxe_interrupt_detach(sc);
12594
12595bxe_nic_load_error2:
12596
12597    if (IS_PF(sc) && !BXE_NOMCP(sc)) {
12598        bxe_fw_command(sc, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0);
12599        bxe_fw_command(sc, DRV_MSG_CODE_UNLOAD_DONE, 0);
12600    }
12601
12602    sc->port.pmf = 0;
12603
12604bxe_nic_load_error1:
12605
12606    /* clear pf_load status, as it was already set */
12607    if (IS_PF(sc)) {
12608        bxe_clear_pf_load(sc);
12609    }
12610
12611bxe_nic_load_error0:
12612
12613    bxe_free_fw_stats_mem(sc);
12614    bxe_free_fp_buffers(sc);
12615    bxe_free_mem(sc);
12616
12617    return (rc);
12618}
12619
12620static int
12621bxe_init_locked(struct bxe_softc *sc)
12622{
12623    int other_engine = SC_PATH(sc) ? 0 : 1;
12624    uint8_t other_load_status, load_status;
12625    uint8_t global = FALSE;
12626    int rc;
12627
12628    BXE_CORE_LOCK_ASSERT(sc);
12629
12630    /* check if the driver is already running */
12631    if (if_getdrvflags(sc->ifp) & IFF_DRV_RUNNING) {
12632        BLOGD(sc, DBG_LOAD, "Init called while driver is running!\n");
12633        return (0);
12634    }
12635
12636    bxe_set_power_state(sc, PCI_PM_D0);
12637
12638    /*
12639     * If parity occurred during the unload, then attentions and/or
12640     * RECOVERY_IN_PROGRES may still be set. If so we want the first function
12641     * loaded on the current engine to complete the recovery. Parity recovery
12642     * is only relevant for PF driver.
12643     */
12644    if (IS_PF(sc)) {
12645        other_load_status = bxe_get_load_status(sc, other_engine);
12646        load_status = bxe_get_load_status(sc, SC_PATH(sc));
12647
12648        if (!bxe_reset_is_done(sc, SC_PATH(sc)) ||
12649            bxe_chk_parity_attn(sc, &global, TRUE)) {
12650            do {
12651                /*
12652                 * If there are attentions and they are in global blocks, set
12653                 * the GLOBAL_RESET bit regardless whether it will be this
12654                 * function that will complete the recovery or not.
12655                 */
12656                if (global) {
12657                    bxe_set_reset_global(sc);
12658                }
12659
12660                /*
12661                 * Only the first function on the current engine should try
12662                 * to recover in open. In case of attentions in global blocks
12663                 * only the first in the chip should try to recover.
12664                 */
12665                if ((!load_status && (!global || !other_load_status)) &&
12666                    bxe_trylock_leader_lock(sc) && !bxe_leader_reset(sc)) {
12667                    BLOGI(sc, "Recovered during init\n");
12668                    break;
12669                }
12670
12671                /* recovery has failed... */
12672                bxe_set_power_state(sc, PCI_PM_D3hot);
12673                sc->recovery_state = BXE_RECOVERY_FAILED;
12674
12675                BLOGE(sc, "Recovery flow hasn't properly "
12676                          "completed yet, try again later. "
12677                          "If you still see this message after a "
12678                          "few retries then power cycle is required.\n");
12679
12680                rc = ENXIO;
12681                goto bxe_init_locked_done;
12682            } while (0);
12683        }
12684    }
12685
12686    sc->recovery_state = BXE_RECOVERY_DONE;
12687
12688    rc = bxe_nic_load(sc, LOAD_OPEN);
12689
12690bxe_init_locked_done:
12691
12692    if (rc) {
12693        /* Tell the stack the driver is NOT running! */
12694        BLOGE(sc, "Initialization failed, "
12695                  "stack notified driver is NOT running!\n");
12696	if_setdrvflagbits(sc->ifp, 0, IFF_DRV_RUNNING);
12697    }
12698
12699    return (rc);
12700}
12701
12702static int
12703bxe_stop_locked(struct bxe_softc *sc)
12704{
12705    BXE_CORE_LOCK_ASSERT(sc);
12706    return (bxe_nic_unload(sc, UNLOAD_NORMAL, TRUE));
12707}
12708
12709/*
12710 * Handles controller initialization when called from an unlocked routine.
12711 * ifconfig calls this function.
12712 *
12713 * Returns:
12714 *   void
12715 */
12716static void
12717bxe_init(void *xsc)
12718{
12719    struct bxe_softc *sc = (struct bxe_softc *)xsc;
12720
12721    BXE_CORE_LOCK(sc);
12722    bxe_init_locked(sc);
12723    BXE_CORE_UNLOCK(sc);
12724}
12725
12726static int
12727bxe_init_ifnet(struct bxe_softc *sc)
12728{
12729    if_t ifp;
12730    int capabilities;
12731
12732    /* ifconfig entrypoint for media type/status reporting */
12733    ifmedia_init(&sc->ifmedia, IFM_IMASK,
12734                 bxe_ifmedia_update,
12735                 bxe_ifmedia_status);
12736
12737    /* set the default interface values */
12738    ifmedia_add(&sc->ifmedia, (IFM_ETHER | IFM_FDX | sc->media), 0, NULL);
12739    ifmedia_add(&sc->ifmedia, (IFM_ETHER | IFM_AUTO), 0, NULL);
12740    ifmedia_set(&sc->ifmedia, (IFM_ETHER | IFM_AUTO));
12741
12742    sc->ifmedia.ifm_media = sc->ifmedia.ifm_cur->ifm_media; /* XXX ? */
12743	BLOGI(sc, "IFMEDIA flags : %x\n", sc->ifmedia.ifm_media);
12744
12745    /* allocate the ifnet structure */
12746    if ((ifp = if_gethandle(IFT_ETHER)) == NULL) {
12747        BLOGE(sc, "Interface allocation failed!\n");
12748        return (ENXIO);
12749    }
12750
12751    if_setsoftc(ifp, sc);
12752    if_initname(ifp, device_get_name(sc->dev), device_get_unit(sc->dev));
12753    if_setflags(ifp, (IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST));
12754    if_setioctlfn(ifp, bxe_ioctl);
12755    if_setstartfn(ifp, bxe_tx_start);
12756    if_setgetcounterfn(ifp, bxe_get_counter);
12757#if __FreeBSD_version >= 901504
12758    if_settransmitfn(ifp, bxe_tx_mq_start);
12759    if_setqflushfn(ifp, bxe_mq_flush);
12760#endif
12761#ifdef FreeBSD8_0
12762    if_settimer(ifp, 0);
12763#endif
12764    if_setinitfn(ifp, bxe_init);
12765    if_setmtu(ifp, sc->mtu);
12766    if_sethwassist(ifp, (CSUM_IP      |
12767                        CSUM_TCP      |
12768                        CSUM_UDP      |
12769                        CSUM_TSO      |
12770                        CSUM_TCP_IPV6 |
12771                        CSUM_UDP_IPV6));
12772
12773    capabilities =
12774#if __FreeBSD_version < 700000
12775        (IFCAP_VLAN_MTU       |
12776         IFCAP_VLAN_HWTAGGING |
12777         IFCAP_HWCSUM         |
12778         IFCAP_JUMBO_MTU      |
12779         IFCAP_LRO);
12780#else
12781        (IFCAP_VLAN_MTU       |
12782         IFCAP_VLAN_HWTAGGING |
12783         IFCAP_VLAN_HWTSO     |
12784         IFCAP_VLAN_HWFILTER  |
12785         IFCAP_VLAN_HWCSUM    |
12786         IFCAP_HWCSUM         |
12787         IFCAP_JUMBO_MTU      |
12788         IFCAP_LRO            |
12789         IFCAP_TSO4           |
12790         IFCAP_TSO6           |
12791         IFCAP_WOL_MAGIC);
12792#endif
12793    if_setcapabilitiesbit(ifp, capabilities, 0); /* XXX */
12794    if_setcapenable(ifp, if_getcapabilities(ifp));
12795    if_setbaudrate(ifp, IF_Gbps(10));
12796/* XXX */
12797    if_setsendqlen(ifp, sc->tx_ring_size);
12798    if_setsendqready(ifp);
12799/* XXX */
12800
12801    sc->ifp = ifp;
12802
12803    /* attach to the Ethernet interface list */
12804    ether_ifattach(ifp, sc->link_params.mac_addr);
12805
12806    return (0);
12807}
12808
12809static void
12810bxe_deallocate_bars(struct bxe_softc *sc)
12811{
12812    int i;
12813
12814    for (i = 0; i < MAX_BARS; i++) {
12815        if (sc->bar[i].resource != NULL) {
12816            bus_release_resource(sc->dev,
12817                                 SYS_RES_MEMORY,
12818                                 sc->bar[i].rid,
12819                                 sc->bar[i].resource);
12820            BLOGD(sc, DBG_LOAD, "Released PCI BAR%d [%02x] memory\n",
12821                  i, PCIR_BAR(i));
12822        }
12823    }
12824}
12825
12826static int
12827bxe_allocate_bars(struct bxe_softc *sc)
12828{
12829    u_int flags;
12830    int i;
12831
12832    memset(sc->bar, 0, sizeof(sc->bar));
12833
12834    for (i = 0; i < MAX_BARS; i++) {
12835
12836        /* memory resources reside at BARs 0, 2, 4 */
12837        /* Run `pciconf -lb` to see mappings */
12838        if ((i != 0) && (i != 2) && (i != 4)) {
12839            continue;
12840        }
12841
12842        sc->bar[i].rid = PCIR_BAR(i);
12843
12844        flags = RF_ACTIVE;
12845        if (i == 0) {
12846            flags |= RF_SHAREABLE;
12847        }
12848
12849        if ((sc->bar[i].resource =
12850             bus_alloc_resource_any(sc->dev,
12851                                    SYS_RES_MEMORY,
12852                                    &sc->bar[i].rid,
12853                                    flags)) == NULL) {
12854            return (0);
12855        }
12856
12857        sc->bar[i].tag    = rman_get_bustag(sc->bar[i].resource);
12858        sc->bar[i].handle = rman_get_bushandle(sc->bar[i].resource);
12859        sc->bar[i].kva    = (vm_offset_t)rman_get_virtual(sc->bar[i].resource);
12860
12861        BLOGI(sc, "PCI BAR%d [%02x] memory allocated: %#jx-%#jx (%jd) -> %#jx\n",
12862              i, PCIR_BAR(i),
12863              rman_get_start(sc->bar[i].resource),
12864              rman_get_end(sc->bar[i].resource),
12865              rman_get_size(sc->bar[i].resource),
12866              (uintmax_t)sc->bar[i].kva);
12867    }
12868
12869    return (0);
12870}
12871
12872static void
12873bxe_get_function_num(struct bxe_softc *sc)
12874{
12875    uint32_t val = 0;
12876
12877    /*
12878     * Read the ME register to get the function number. The ME register
12879     * holds the relative-function number and absolute-function number. The
12880     * absolute-function number appears only in E2 and above. Before that
12881     * these bits always contained zero, therefore we cannot blindly use them.
12882     */
12883
12884    val = REG_RD(sc, BAR_ME_REGISTER);
12885
12886    sc->pfunc_rel =
12887        (uint8_t)((val & ME_REG_PF_NUM) >> ME_REG_PF_NUM_SHIFT);
12888    sc->path_id =
12889        (uint8_t)((val & ME_REG_ABS_PF_NUM) >> ME_REG_ABS_PF_NUM_SHIFT) & 1;
12890
12891    if (CHIP_PORT_MODE(sc) == CHIP_4_PORT_MODE) {
12892        sc->pfunc_abs = ((sc->pfunc_rel << 1) | sc->path_id);
12893    } else {
12894        sc->pfunc_abs = (sc->pfunc_rel | sc->path_id);
12895    }
12896
12897    BLOGD(sc, DBG_LOAD,
12898          "Relative function %d, Absolute function %d, Path %d\n",
12899          sc->pfunc_rel, sc->pfunc_abs, sc->path_id);
12900}
12901
12902static uint32_t
12903bxe_get_shmem_mf_cfg_base(struct bxe_softc *sc)
12904{
12905    uint32_t shmem2_size;
12906    uint32_t offset;
12907    uint32_t mf_cfg_offset_value;
12908
12909    /* Non 57712 */
12910    offset = (SHMEM_RD(sc, func_mb) +
12911              (MAX_FUNC_NUM * sizeof(struct drv_func_mb)));
12912
12913    /* 57712 plus */
12914    if (sc->devinfo.shmem2_base != 0) {
12915        shmem2_size = SHMEM2_RD(sc, size);
12916        if (shmem2_size > offsetof(struct shmem2_region, mf_cfg_addr)) {
12917            mf_cfg_offset_value = SHMEM2_RD(sc, mf_cfg_addr);
12918            if (SHMEM_MF_CFG_ADDR_NONE != mf_cfg_offset_value) {
12919                offset = mf_cfg_offset_value;
12920            }
12921        }
12922    }
12923
12924    return (offset);
12925}
12926
12927static uint32_t
12928bxe_pcie_capability_read(struct bxe_softc *sc,
12929                         int    reg,
12930                         int    width)
12931{
12932    int pcie_reg;
12933
12934    /* ensure PCIe capability is enabled */
12935    if (pci_find_cap(sc->dev, PCIY_EXPRESS, &pcie_reg) == 0) {
12936        if (pcie_reg != 0) {
12937            BLOGD(sc, DBG_LOAD, "PCIe capability at 0x%04x\n", pcie_reg);
12938            return (pci_read_config(sc->dev, (pcie_reg + reg), width));
12939        }
12940    }
12941
12942    BLOGE(sc, "PCIe capability NOT FOUND!!!\n");
12943
12944    return (0);
12945}
12946
12947static uint8_t
12948bxe_is_pcie_pending(struct bxe_softc *sc)
12949{
12950    return (bxe_pcie_capability_read(sc, PCIR_EXPRESS_DEVICE_STA, 2) &
12951            PCIM_EXP_STA_TRANSACTION_PND);
12952}
12953
12954/*
12955 * Walk the PCI capabiites list for the device to find what features are
12956 * supported. These capabilites may be enabled/disabled by firmware so it's
12957 * best to walk the list rather than make assumptions.
12958 */
12959static void
12960bxe_probe_pci_caps(struct bxe_softc *sc)
12961{
12962    uint16_t link_status;
12963    int reg;
12964
12965    /* check if PCI Power Management is enabled */
12966    if (pci_find_cap(sc->dev, PCIY_PMG, &reg) == 0) {
12967        if (reg != 0) {
12968            BLOGD(sc, DBG_LOAD, "Found PM capability at 0x%04x\n", reg);
12969
12970            sc->devinfo.pcie_cap_flags |= BXE_PM_CAPABLE_FLAG;
12971            sc->devinfo.pcie_pm_cap_reg = (uint16_t)reg;
12972        }
12973    }
12974
12975    link_status = bxe_pcie_capability_read(sc, PCIR_EXPRESS_LINK_STA, 2);
12976
12977    /* handle PCIe 2.0 workarounds for 57710 */
12978    if (CHIP_IS_E1(sc)) {
12979        /* workaround for 57710 errata E4_57710_27462 */
12980        sc->devinfo.pcie_link_speed =
12981            (REG_RD(sc, 0x3d04) & (1 << 24)) ? 2 : 1;
12982
12983        /* workaround for 57710 errata E4_57710_27488 */
12984        sc->devinfo.pcie_link_width =
12985            ((link_status & PCIM_LINK_STA_WIDTH) >> 4);
12986        if (sc->devinfo.pcie_link_speed > 1) {
12987            sc->devinfo.pcie_link_width =
12988                ((link_status & PCIM_LINK_STA_WIDTH) >> 4) >> 1;
12989        }
12990    } else {
12991        sc->devinfo.pcie_link_speed =
12992            (link_status & PCIM_LINK_STA_SPEED);
12993        sc->devinfo.pcie_link_width =
12994            ((link_status & PCIM_LINK_STA_WIDTH) >> 4);
12995    }
12996
12997    BLOGD(sc, DBG_LOAD, "PCIe link speed=%d width=%d\n",
12998          sc->devinfo.pcie_link_speed, sc->devinfo.pcie_link_width);
12999
13000    sc->devinfo.pcie_cap_flags |= BXE_PCIE_CAPABLE_FLAG;
13001    sc->devinfo.pcie_pcie_cap_reg = (uint16_t)reg;
13002
13003    /* check if MSI capability is enabled */
13004    if (pci_find_cap(sc->dev, PCIY_MSI, &reg) == 0) {
13005        if (reg != 0) {
13006            BLOGD(sc, DBG_LOAD, "Found MSI capability at 0x%04x\n", reg);
13007
13008            sc->devinfo.pcie_cap_flags |= BXE_MSI_CAPABLE_FLAG;
13009            sc->devinfo.pcie_msi_cap_reg = (uint16_t)reg;
13010        }
13011    }
13012
13013    /* check if MSI-X capability is enabled */
13014    if (pci_find_cap(sc->dev, PCIY_MSIX, &reg) == 0) {
13015        if (reg != 0) {
13016            BLOGD(sc, DBG_LOAD, "Found MSI-X capability at 0x%04x\n", reg);
13017
13018            sc->devinfo.pcie_cap_flags |= BXE_MSIX_CAPABLE_FLAG;
13019            sc->devinfo.pcie_msix_cap_reg = (uint16_t)reg;
13020        }
13021    }
13022}
13023
13024static int
13025bxe_get_shmem_mf_cfg_info_sd(struct bxe_softc *sc)
13026{
13027    struct bxe_mf_info *mf_info = &sc->devinfo.mf_info;
13028    uint32_t val;
13029
13030    /* get the outer vlan if we're in switch-dependent mode */
13031
13032    val = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].e1hov_tag);
13033    mf_info->ext_id = (uint16_t)val;
13034
13035    mf_info->multi_vnics_mode = 1;
13036
13037    if (!VALID_OVLAN(mf_info->ext_id)) {
13038        BLOGE(sc, "Invalid VLAN (%d)\n", mf_info->ext_id);
13039        return (1);
13040    }
13041
13042    /* get the capabilities */
13043    if ((mf_info->mf_config[SC_VN(sc)] & FUNC_MF_CFG_PROTOCOL_MASK) ==
13044        FUNC_MF_CFG_PROTOCOL_ISCSI) {
13045        mf_info->mf_protos_supported |= MF_PROTO_SUPPORT_ISCSI;
13046    } else if ((mf_info->mf_config[SC_VN(sc)] & FUNC_MF_CFG_PROTOCOL_MASK) ==
13047               FUNC_MF_CFG_PROTOCOL_FCOE) {
13048        mf_info->mf_protos_supported |= MF_PROTO_SUPPORT_FCOE;
13049    } else {
13050        mf_info->mf_protos_supported |= MF_PROTO_SUPPORT_ETHERNET;
13051    }
13052
13053    mf_info->vnics_per_port =
13054        (CHIP_PORT_MODE(sc) == CHIP_4_PORT_MODE) ? 2 : 4;
13055
13056    return (0);
13057}
13058
13059static uint32_t
13060bxe_get_shmem_ext_proto_support_flags(struct bxe_softc *sc)
13061{
13062    uint32_t retval = 0;
13063    uint32_t val;
13064
13065    val = MFCFG_RD(sc, func_ext_config[SC_ABS_FUNC(sc)].func_cfg);
13066
13067    if (val & MACP_FUNC_CFG_FLAGS_ENABLED) {
13068        if (val & MACP_FUNC_CFG_FLAGS_ETHERNET) {
13069            retval |= MF_PROTO_SUPPORT_ETHERNET;
13070        }
13071        if (val & MACP_FUNC_CFG_FLAGS_ISCSI_OFFLOAD) {
13072            retval |= MF_PROTO_SUPPORT_ISCSI;
13073        }
13074        if (val & MACP_FUNC_CFG_FLAGS_FCOE_OFFLOAD) {
13075            retval |= MF_PROTO_SUPPORT_FCOE;
13076        }
13077    }
13078
13079    return (retval);
13080}
13081
13082static int
13083bxe_get_shmem_mf_cfg_info_si(struct bxe_softc *sc)
13084{
13085    struct bxe_mf_info *mf_info = &sc->devinfo.mf_info;
13086    uint32_t val;
13087
13088    /*
13089     * There is no outer vlan if we're in switch-independent mode.
13090     * If the mac is valid then assume multi-function.
13091     */
13092
13093    val = MFCFG_RD(sc, func_ext_config[SC_ABS_FUNC(sc)].func_cfg);
13094
13095    mf_info->multi_vnics_mode = ((val & MACP_FUNC_CFG_FLAGS_MASK) != 0);
13096
13097    mf_info->mf_protos_supported = bxe_get_shmem_ext_proto_support_flags(sc);
13098
13099    mf_info->vnics_per_port =
13100        (CHIP_PORT_MODE(sc) == CHIP_4_PORT_MODE) ? 2 : 4;
13101
13102    return (0);
13103}
13104
13105static int
13106bxe_get_shmem_mf_cfg_info_niv(struct bxe_softc *sc)
13107{
13108    struct bxe_mf_info *mf_info = &sc->devinfo.mf_info;
13109    uint32_t e1hov_tag;
13110    uint32_t func_config;
13111    uint32_t niv_config;
13112
13113    mf_info->multi_vnics_mode = 1;
13114
13115    e1hov_tag   = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].e1hov_tag);
13116    func_config = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].config);
13117    niv_config  = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].afex_config);
13118
13119    mf_info->ext_id =
13120        (uint16_t)((e1hov_tag & FUNC_MF_CFG_E1HOV_TAG_MASK) >>
13121                   FUNC_MF_CFG_E1HOV_TAG_SHIFT);
13122
13123    mf_info->default_vlan =
13124        (uint16_t)((e1hov_tag & FUNC_MF_CFG_AFEX_VLAN_MASK) >>
13125                   FUNC_MF_CFG_AFEX_VLAN_SHIFT);
13126
13127    mf_info->niv_allowed_priorities =
13128        (uint8_t)((niv_config & FUNC_MF_CFG_AFEX_COS_FILTER_MASK) >>
13129                  FUNC_MF_CFG_AFEX_COS_FILTER_SHIFT);
13130
13131    mf_info->niv_default_cos =
13132        (uint8_t)((func_config & FUNC_MF_CFG_TRANSMIT_PRIORITY_MASK) >>
13133                  FUNC_MF_CFG_TRANSMIT_PRIORITY_SHIFT);
13134
13135    mf_info->afex_vlan_mode =
13136        ((niv_config & FUNC_MF_CFG_AFEX_VLAN_MODE_MASK) >>
13137         FUNC_MF_CFG_AFEX_VLAN_MODE_SHIFT);
13138
13139    mf_info->niv_mba_enabled =
13140        ((niv_config & FUNC_MF_CFG_AFEX_MBA_ENABLED_MASK) >>
13141         FUNC_MF_CFG_AFEX_MBA_ENABLED_SHIFT);
13142
13143    mf_info->mf_protos_supported = bxe_get_shmem_ext_proto_support_flags(sc);
13144
13145    mf_info->vnics_per_port =
13146        (CHIP_PORT_MODE(sc) == CHIP_4_PORT_MODE) ? 2 : 4;
13147
13148    return (0);
13149}
13150
13151static int
13152bxe_check_valid_mf_cfg(struct bxe_softc *sc)
13153{
13154    struct bxe_mf_info *mf_info = &sc->devinfo.mf_info;
13155    uint32_t mf_cfg1;
13156    uint32_t mf_cfg2;
13157    uint32_t ovlan1;
13158    uint32_t ovlan2;
13159    uint8_t i, j;
13160
13161    BLOGD(sc, DBG_LOAD, "MF config parameters for function %d\n",
13162          SC_PORT(sc));
13163    BLOGD(sc, DBG_LOAD, "\tmf_config=0x%x\n",
13164          mf_info->mf_config[SC_VN(sc)]);
13165    BLOGD(sc, DBG_LOAD, "\tmulti_vnics_mode=%d\n",
13166          mf_info->multi_vnics_mode);
13167    BLOGD(sc, DBG_LOAD, "\tvnics_per_port=%d\n",
13168          mf_info->vnics_per_port);
13169    BLOGD(sc, DBG_LOAD, "\tovlan/vifid=%d\n",
13170          mf_info->ext_id);
13171    BLOGD(sc, DBG_LOAD, "\tmin_bw=%d/%d/%d/%d\n",
13172          mf_info->min_bw[0], mf_info->min_bw[1],
13173          mf_info->min_bw[2], mf_info->min_bw[3]);
13174    BLOGD(sc, DBG_LOAD, "\tmax_bw=%d/%d/%d/%d\n",
13175          mf_info->max_bw[0], mf_info->max_bw[1],
13176          mf_info->max_bw[2], mf_info->max_bw[3]);
13177    BLOGD(sc, DBG_LOAD, "\tmac_addr: %s\n",
13178          sc->mac_addr_str);
13179
13180    /* various MF mode sanity checks... */
13181
13182    if (mf_info->mf_config[SC_VN(sc)] & FUNC_MF_CFG_FUNC_HIDE) {
13183        BLOGE(sc, "Enumerated function %d is marked as hidden\n",
13184              SC_PORT(sc));
13185        return (1);
13186    }
13187
13188    if ((mf_info->vnics_per_port > 1) && !mf_info->multi_vnics_mode) {
13189        BLOGE(sc, "vnics_per_port=%d multi_vnics_mode=%d\n",
13190              mf_info->vnics_per_port, mf_info->multi_vnics_mode);
13191        return (1);
13192    }
13193
13194    if (mf_info->mf_mode == MULTI_FUNCTION_SD) {
13195        /* vnic id > 0 must have valid ovlan in switch-dependent mode */
13196        if ((SC_VN(sc) > 0) && !VALID_OVLAN(OVLAN(sc))) {
13197            BLOGE(sc, "mf_mode=SD vnic_id=%d ovlan=%d\n",
13198                  SC_VN(sc), OVLAN(sc));
13199            return (1);
13200        }
13201
13202        if (!VALID_OVLAN(OVLAN(sc)) && mf_info->multi_vnics_mode) {
13203            BLOGE(sc, "mf_mode=SD multi_vnics_mode=%d ovlan=%d\n",
13204                  mf_info->multi_vnics_mode, OVLAN(sc));
13205            return (1);
13206        }
13207
13208        /*
13209         * Verify all functions are either MF or SF mode. If MF, make sure
13210         * sure that all non-hidden functions have a valid ovlan. If SF,
13211         * make sure that all non-hidden functions have an invalid ovlan.
13212         */
13213        FOREACH_ABS_FUNC_IN_PORT(sc, i) {
13214            mf_cfg1 = MFCFG_RD(sc, func_mf_config[i].config);
13215            ovlan1  = MFCFG_RD(sc, func_mf_config[i].e1hov_tag);
13216            if (!(mf_cfg1 & FUNC_MF_CFG_FUNC_HIDE) &&
13217                (((mf_info->multi_vnics_mode) && !VALID_OVLAN(ovlan1)) ||
13218                 ((!mf_info->multi_vnics_mode) && VALID_OVLAN(ovlan1)))) {
13219                BLOGE(sc, "mf_mode=SD function %d MF config "
13220                          "mismatch, multi_vnics_mode=%d ovlan=%d\n",
13221                      i, mf_info->multi_vnics_mode, ovlan1);
13222                return (1);
13223            }
13224        }
13225
13226        /* Verify all funcs on the same port each have a different ovlan. */
13227        FOREACH_ABS_FUNC_IN_PORT(sc, i) {
13228            mf_cfg1 = MFCFG_RD(sc, func_mf_config[i].config);
13229            ovlan1  = MFCFG_RD(sc, func_mf_config[i].e1hov_tag);
13230            /* iterate from the next function on the port to the max func */
13231            for (j = i + 2; j < MAX_FUNC_NUM; j += 2) {
13232                mf_cfg2 = MFCFG_RD(sc, func_mf_config[j].config);
13233                ovlan2  = MFCFG_RD(sc, func_mf_config[j].e1hov_tag);
13234                if (!(mf_cfg1 & FUNC_MF_CFG_FUNC_HIDE) &&
13235                    VALID_OVLAN(ovlan1) &&
13236                    !(mf_cfg2 & FUNC_MF_CFG_FUNC_HIDE) &&
13237                    VALID_OVLAN(ovlan2) &&
13238                    (ovlan1 == ovlan2)) {
13239                    BLOGE(sc, "mf_mode=SD functions %d and %d "
13240                              "have the same ovlan (%d)\n",
13241                          i, j, ovlan1);
13242                    return (1);
13243                }
13244            }
13245        }
13246    } /* MULTI_FUNCTION_SD */
13247
13248    return (0);
13249}
13250
13251static int
13252bxe_get_mf_cfg_info(struct bxe_softc *sc)
13253{
13254    struct bxe_mf_info *mf_info = &sc->devinfo.mf_info;
13255    uint32_t val, mac_upper;
13256    uint8_t i, vnic;
13257
13258    /* initialize mf_info defaults */
13259    mf_info->vnics_per_port   = 1;
13260    mf_info->multi_vnics_mode = FALSE;
13261    mf_info->path_has_ovlan   = FALSE;
13262    mf_info->mf_mode          = SINGLE_FUNCTION;
13263
13264    if (!CHIP_IS_MF_CAP(sc)) {
13265        return (0);
13266    }
13267
13268    if (sc->devinfo.mf_cfg_base == SHMEM_MF_CFG_ADDR_NONE) {
13269        BLOGE(sc, "Invalid mf_cfg_base!\n");
13270        return (1);
13271    }
13272
13273    /* get the MF mode (switch dependent / independent / single-function) */
13274
13275    val = SHMEM_RD(sc, dev_info.shared_feature_config.config);
13276
13277    switch (val & SHARED_FEAT_CFG_FORCE_SF_MODE_MASK)
13278    {
13279    case SHARED_FEAT_CFG_FORCE_SF_MODE_SWITCH_INDEPT:
13280
13281        mac_upper = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].mac_upper);
13282
13283        /* check for legal upper mac bytes */
13284        if (mac_upper != FUNC_MF_CFG_UPPERMAC_DEFAULT) {
13285            mf_info->mf_mode = MULTI_FUNCTION_SI;
13286        } else {
13287            BLOGE(sc, "Invalid config for Switch Independent mode\n");
13288        }
13289
13290        break;
13291
13292    case SHARED_FEAT_CFG_FORCE_SF_MODE_MF_ALLOWED:
13293    case SHARED_FEAT_CFG_FORCE_SF_MODE_SPIO4:
13294
13295        /* get outer vlan configuration */
13296        val = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].e1hov_tag);
13297
13298        if ((val & FUNC_MF_CFG_E1HOV_TAG_MASK) !=
13299            FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
13300            mf_info->mf_mode = MULTI_FUNCTION_SD;
13301        } else {
13302            BLOGE(sc, "Invalid config for Switch Dependent mode\n");
13303        }
13304
13305        break;
13306
13307    case SHARED_FEAT_CFG_FORCE_SF_MODE_FORCED_SF:
13308
13309        /* not in MF mode, vnics_per_port=1 and multi_vnics_mode=FALSE */
13310        return (0);
13311
13312    case SHARED_FEAT_CFG_FORCE_SF_MODE_AFEX_MODE:
13313
13314        /*
13315         * Mark MF mode as NIV if MCP version includes NPAR-SD support
13316         * and the MAC address is valid.
13317         */
13318        mac_upper = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].mac_upper);
13319
13320        if ((SHMEM2_HAS(sc, afex_driver_support)) &&
13321            (mac_upper != FUNC_MF_CFG_UPPERMAC_DEFAULT)) {
13322            mf_info->mf_mode = MULTI_FUNCTION_AFEX;
13323        } else {
13324            BLOGE(sc, "Invalid config for AFEX mode\n");
13325        }
13326
13327        break;
13328
13329    default:
13330
13331        BLOGE(sc, "Unknown MF mode (0x%08x)\n",
13332              (val & SHARED_FEAT_CFG_FORCE_SF_MODE_MASK));
13333
13334        return (1);
13335    }
13336
13337    /* set path mf_mode (which could be different than function mf_mode) */
13338    if (mf_info->mf_mode == MULTI_FUNCTION_SD) {
13339        mf_info->path_has_ovlan = TRUE;
13340    } else if (mf_info->mf_mode == SINGLE_FUNCTION) {
13341        /*
13342         * Decide on path multi vnics mode. If we're not in MF mode and in
13343         * 4-port mode, this is good enough to check vnic-0 of the other port
13344         * on the same path
13345         */
13346        if (CHIP_PORT_MODE(sc) == CHIP_4_PORT_MODE) {
13347            uint8_t other_port = !(PORT_ID(sc) & 1);
13348            uint8_t abs_func_other_port = (SC_PATH(sc) + (2 * other_port));
13349
13350            val = MFCFG_RD(sc, func_mf_config[abs_func_other_port].e1hov_tag);
13351
13352            mf_info->path_has_ovlan = VALID_OVLAN((uint16_t)val) ? 1 : 0;
13353        }
13354    }
13355
13356    if (mf_info->mf_mode == SINGLE_FUNCTION) {
13357        /* invalid MF config */
13358        if (SC_VN(sc) >= 1) {
13359            BLOGE(sc, "VNIC ID >= 1 in SF mode\n");
13360            return (1);
13361        }
13362
13363        return (0);
13364    }
13365
13366    /* get the MF configuration */
13367    mf_info->mf_config[SC_VN(sc)] =
13368        MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].config);
13369
13370    switch(mf_info->mf_mode)
13371    {
13372    case MULTI_FUNCTION_SD:
13373
13374        bxe_get_shmem_mf_cfg_info_sd(sc);
13375        break;
13376
13377    case MULTI_FUNCTION_SI:
13378
13379        bxe_get_shmem_mf_cfg_info_si(sc);
13380        break;
13381
13382    case MULTI_FUNCTION_AFEX:
13383
13384        bxe_get_shmem_mf_cfg_info_niv(sc);
13385        break;
13386
13387    default:
13388
13389        BLOGE(sc, "Get MF config failed (mf_mode=0x%08x)\n",
13390              mf_info->mf_mode);
13391        return (1);
13392    }
13393
13394    /* get the congestion management parameters */
13395
13396    vnic = 0;
13397    FOREACH_ABS_FUNC_IN_PORT(sc, i) {
13398        /* get min/max bw */
13399        val = MFCFG_RD(sc, func_mf_config[i].config);
13400        mf_info->min_bw[vnic] =
13401            ((val & FUNC_MF_CFG_MIN_BW_MASK) >> FUNC_MF_CFG_MIN_BW_SHIFT);
13402        mf_info->max_bw[vnic] =
13403            ((val & FUNC_MF_CFG_MAX_BW_MASK) >> FUNC_MF_CFG_MAX_BW_SHIFT);
13404        vnic++;
13405    }
13406
13407    return (bxe_check_valid_mf_cfg(sc));
13408}
13409
13410static int
13411bxe_get_shmem_info(struct bxe_softc *sc)
13412{
13413    int port;
13414    uint32_t mac_hi, mac_lo, val;
13415
13416    port = SC_PORT(sc);
13417    mac_hi = mac_lo = 0;
13418
13419    sc->link_params.sc   = sc;
13420    sc->link_params.port = port;
13421
13422    /* get the hardware config info */
13423    sc->devinfo.hw_config =
13424        SHMEM_RD(sc, dev_info.shared_hw_config.config);
13425    sc->devinfo.hw_config2 =
13426        SHMEM_RD(sc, dev_info.shared_hw_config.config2);
13427
13428    sc->link_params.hw_led_mode =
13429        ((sc->devinfo.hw_config & SHARED_HW_CFG_LED_MODE_MASK) >>
13430         SHARED_HW_CFG_LED_MODE_SHIFT);
13431
13432    /* get the port feature config */
13433    sc->port.config =
13434        SHMEM_RD(sc, dev_info.port_feature_config[port].config);
13435
13436    /* get the link params */
13437    sc->link_params.speed_cap_mask[0] =
13438        SHMEM_RD(sc, dev_info.port_hw_config[port].speed_capability_mask);
13439    sc->link_params.speed_cap_mask[1] =
13440        SHMEM_RD(sc, dev_info.port_hw_config[port].speed_capability_mask2);
13441
13442    /* get the lane config */
13443    sc->link_params.lane_config =
13444        SHMEM_RD(sc, dev_info.port_hw_config[port].lane_config);
13445
13446    /* get the link config */
13447    val = SHMEM_RD(sc, dev_info.port_feature_config[port].link_config);
13448    sc->port.link_config[ELINK_INT_PHY] = val;
13449    sc->link_params.switch_cfg = (val & PORT_FEATURE_CONNECTED_SWITCH_MASK);
13450    sc->port.link_config[ELINK_EXT_PHY1] =
13451        SHMEM_RD(sc, dev_info.port_feature_config[port].link_config2);
13452
13453    /* get the override preemphasis flag and enable it or turn it off */
13454    val = SHMEM_RD(sc, dev_info.shared_feature_config.config);
13455    if (val & SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED) {
13456        sc->link_params.feature_config_flags |=
13457            ELINK_FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
13458    } else {
13459        sc->link_params.feature_config_flags &=
13460            ~ELINK_FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
13461    }
13462
13463    /* get the initial value of the link params */
13464    sc->link_params.multi_phy_config =
13465        SHMEM_RD(sc, dev_info.port_hw_config[port].multi_phy_config);
13466
13467    /* get external phy info */
13468    sc->port.ext_phy_config =
13469        SHMEM_RD(sc, dev_info.port_hw_config[port].external_phy_config);
13470
13471    /* get the multifunction configuration */
13472    bxe_get_mf_cfg_info(sc);
13473
13474    /* get the mac address */
13475    if (IS_MF(sc)) {
13476        mac_hi = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].mac_upper);
13477        mac_lo = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].mac_lower);
13478    } else {
13479        mac_hi = SHMEM_RD(sc, dev_info.port_hw_config[port].mac_upper);
13480        mac_lo = SHMEM_RD(sc, dev_info.port_hw_config[port].mac_lower);
13481    }
13482
13483    if ((mac_lo == 0) && (mac_hi == 0)) {
13484        *sc->mac_addr_str = 0;
13485        BLOGE(sc, "No Ethernet address programmed!\n");
13486    } else {
13487        sc->link_params.mac_addr[0] = (uint8_t)(mac_hi >> 8);
13488        sc->link_params.mac_addr[1] = (uint8_t)(mac_hi);
13489        sc->link_params.mac_addr[2] = (uint8_t)(mac_lo >> 24);
13490        sc->link_params.mac_addr[3] = (uint8_t)(mac_lo >> 16);
13491        sc->link_params.mac_addr[4] = (uint8_t)(mac_lo >> 8);
13492        sc->link_params.mac_addr[5] = (uint8_t)(mac_lo);
13493        snprintf(sc->mac_addr_str, sizeof(sc->mac_addr_str),
13494                 "%02x:%02x:%02x:%02x:%02x:%02x",
13495                 sc->link_params.mac_addr[0], sc->link_params.mac_addr[1],
13496                 sc->link_params.mac_addr[2], sc->link_params.mac_addr[3],
13497                 sc->link_params.mac_addr[4], sc->link_params.mac_addr[5]);
13498        BLOGD(sc, DBG_LOAD, "Ethernet address: %s\n", sc->mac_addr_str);
13499    }
13500
13501    return (0);
13502}
13503
13504static void
13505bxe_get_tunable_params(struct bxe_softc *sc)
13506{
13507    /* sanity checks */
13508
13509    if ((bxe_interrupt_mode != INTR_MODE_INTX) &&
13510        (bxe_interrupt_mode != INTR_MODE_MSI)  &&
13511        (bxe_interrupt_mode != INTR_MODE_MSIX)) {
13512        BLOGW(sc, "invalid interrupt_mode value (%d)\n", bxe_interrupt_mode);
13513        bxe_interrupt_mode = INTR_MODE_MSIX;
13514    }
13515
13516    if ((bxe_queue_count < 0) || (bxe_queue_count > MAX_RSS_CHAINS)) {
13517        BLOGW(sc, "invalid queue_count value (%d)\n", bxe_queue_count);
13518        bxe_queue_count = 0;
13519    }
13520
13521    if ((bxe_max_rx_bufs < 1) || (bxe_max_rx_bufs > RX_BD_USABLE)) {
13522        if (bxe_max_rx_bufs == 0) {
13523            bxe_max_rx_bufs = RX_BD_USABLE;
13524        } else {
13525            BLOGW(sc, "invalid max_rx_bufs (%d)\n", bxe_max_rx_bufs);
13526            bxe_max_rx_bufs = 2048;
13527        }
13528    }
13529
13530    if ((bxe_hc_rx_ticks < 1) || (bxe_hc_rx_ticks > 100)) {
13531        BLOGW(sc, "invalid hc_rx_ticks (%d)\n", bxe_hc_rx_ticks);
13532        bxe_hc_rx_ticks = 25;
13533    }
13534
13535    if ((bxe_hc_tx_ticks < 1) || (bxe_hc_tx_ticks > 100)) {
13536        BLOGW(sc, "invalid hc_tx_ticks (%d)\n", bxe_hc_tx_ticks);
13537        bxe_hc_tx_ticks = 50;
13538    }
13539
13540    if (bxe_max_aggregation_size == 0) {
13541        bxe_max_aggregation_size = TPA_AGG_SIZE;
13542    }
13543
13544    if (bxe_max_aggregation_size > 0xffff) {
13545        BLOGW(sc, "invalid max_aggregation_size (%d)\n",
13546              bxe_max_aggregation_size);
13547        bxe_max_aggregation_size = TPA_AGG_SIZE;
13548    }
13549
13550    if ((bxe_mrrs < -1) || (bxe_mrrs > 3)) {
13551        BLOGW(sc, "invalid mrrs (%d)\n", bxe_mrrs);
13552        bxe_mrrs = -1;
13553    }
13554
13555    if ((bxe_autogreeen < 0) || (bxe_autogreeen > 2)) {
13556        BLOGW(sc, "invalid autogreeen (%d)\n", bxe_autogreeen);
13557        bxe_autogreeen = 0;
13558    }
13559
13560    if ((bxe_udp_rss < 0) || (bxe_udp_rss > 1)) {
13561        BLOGW(sc, "invalid udp_rss (%d)\n", bxe_udp_rss);
13562        bxe_udp_rss = 0;
13563    }
13564
13565    /* pull in user settings */
13566
13567    sc->interrupt_mode       = bxe_interrupt_mode;
13568    sc->max_rx_bufs          = bxe_max_rx_bufs;
13569    sc->hc_rx_ticks          = bxe_hc_rx_ticks;
13570    sc->hc_tx_ticks          = bxe_hc_tx_ticks;
13571    sc->max_aggregation_size = bxe_max_aggregation_size;
13572    sc->mrrs                 = bxe_mrrs;
13573    sc->autogreeen           = bxe_autogreeen;
13574    sc->udp_rss              = bxe_udp_rss;
13575
13576    if (bxe_interrupt_mode == INTR_MODE_INTX) {
13577        sc->num_queues = 1;
13578    } else { /* INTR_MODE_MSI or INTR_MODE_MSIX */
13579        sc->num_queues =
13580            min((bxe_queue_count ? bxe_queue_count : mp_ncpus),
13581                MAX_RSS_CHAINS);
13582        if (sc->num_queues > mp_ncpus) {
13583            sc->num_queues = mp_ncpus;
13584        }
13585    }
13586
13587    BLOGD(sc, DBG_LOAD,
13588          "User Config: "
13589          "debug=0x%lx "
13590          "interrupt_mode=%d "
13591          "queue_count=%d "
13592          "hc_rx_ticks=%d "
13593          "hc_tx_ticks=%d "
13594          "rx_budget=%d "
13595          "max_aggregation_size=%d "
13596          "mrrs=%d "
13597          "autogreeen=%d "
13598          "udp_rss=%d\n",
13599          bxe_debug,
13600          sc->interrupt_mode,
13601          sc->num_queues,
13602          sc->hc_rx_ticks,
13603          sc->hc_tx_ticks,
13604          bxe_rx_budget,
13605          sc->max_aggregation_size,
13606          sc->mrrs,
13607          sc->autogreeen,
13608          sc->udp_rss);
13609}
13610
13611static int
13612bxe_media_detect(struct bxe_softc *sc)
13613{
13614    int port_type;
13615    uint32_t phy_idx = bxe_get_cur_phy_idx(sc);
13616
13617    switch (sc->link_params.phy[phy_idx].media_type) {
13618    case ELINK_ETH_PHY_SFPP_10G_FIBER:
13619    case ELINK_ETH_PHY_XFP_FIBER:
13620        BLOGI(sc, "Found 10Gb Fiber media.\n");
13621        sc->media = IFM_10G_SR;
13622        port_type = PORT_FIBRE;
13623        break;
13624    case ELINK_ETH_PHY_SFP_1G_FIBER:
13625        BLOGI(sc, "Found 1Gb Fiber media.\n");
13626        sc->media = IFM_1000_SX;
13627        port_type = PORT_FIBRE;
13628        break;
13629    case ELINK_ETH_PHY_KR:
13630    case ELINK_ETH_PHY_CX4:
13631        BLOGI(sc, "Found 10GBase-CX4 media.\n");
13632        sc->media = IFM_10G_CX4;
13633        port_type = PORT_FIBRE;
13634        break;
13635    case ELINK_ETH_PHY_DA_TWINAX:
13636        BLOGI(sc, "Found 10Gb Twinax media.\n");
13637        sc->media = IFM_10G_TWINAX;
13638        port_type = PORT_DA;
13639        break;
13640    case ELINK_ETH_PHY_BASE_T:
13641        if (sc->link_params.speed_cap_mask[0] &
13642            PORT_HW_CFG_SPEED_CAPABILITY_D0_10G) {
13643            BLOGI(sc, "Found 10GBase-T media.\n");
13644            sc->media = IFM_10G_T;
13645            port_type = PORT_TP;
13646        } else {
13647            BLOGI(sc, "Found 1000Base-T media.\n");
13648            sc->media = IFM_1000_T;
13649            port_type = PORT_TP;
13650        }
13651        break;
13652    case ELINK_ETH_PHY_NOT_PRESENT:
13653        BLOGI(sc, "Media not present.\n");
13654        sc->media = 0;
13655        port_type = PORT_OTHER;
13656        break;
13657    case ELINK_ETH_PHY_UNSPECIFIED:
13658    default:
13659        BLOGI(sc, "Unknown media!\n");
13660        sc->media = 0;
13661        port_type = PORT_OTHER;
13662        break;
13663    }
13664    return port_type;
13665}
13666
13667#define GET_FIELD(value, fname)                     \
13668    (((value) & (fname##_MASK)) >> (fname##_SHIFT))
13669#define IGU_FID(val) GET_FIELD((val), IGU_REG_MAPPING_MEMORY_FID)
13670#define IGU_VEC(val) GET_FIELD((val), IGU_REG_MAPPING_MEMORY_VECTOR)
13671
13672static int
13673bxe_get_igu_cam_info(struct bxe_softc *sc)
13674{
13675    int pfid = SC_FUNC(sc);
13676    int igu_sb_id;
13677    uint32_t val;
13678    uint8_t fid, igu_sb_cnt = 0;
13679
13680    sc->igu_base_sb = 0xff;
13681
13682    if (CHIP_INT_MODE_IS_BC(sc)) {
13683        int vn = SC_VN(sc);
13684        igu_sb_cnt = sc->igu_sb_cnt;
13685        sc->igu_base_sb = ((CHIP_IS_MODE_4_PORT(sc) ? pfid : vn) *
13686                           FP_SB_MAX_E1x);
13687        sc->igu_dsb_id = (E1HVN_MAX * FP_SB_MAX_E1x +
13688                          (CHIP_IS_MODE_4_PORT(sc) ? pfid : vn));
13689        return (0);
13690    }
13691
13692    /* IGU in normal mode - read CAM */
13693    for (igu_sb_id = 0;
13694         igu_sb_id < IGU_REG_MAPPING_MEMORY_SIZE;
13695         igu_sb_id++) {
13696        val = REG_RD(sc, IGU_REG_MAPPING_MEMORY + igu_sb_id * 4);
13697        if (!(val & IGU_REG_MAPPING_MEMORY_VALID)) {
13698            continue;
13699        }
13700        fid = IGU_FID(val);
13701        if ((fid & IGU_FID_ENCODE_IS_PF)) {
13702            if ((fid & IGU_FID_PF_NUM_MASK) != pfid) {
13703                continue;
13704            }
13705            if (IGU_VEC(val) == 0) {
13706                /* default status block */
13707                sc->igu_dsb_id = igu_sb_id;
13708            } else {
13709                if (sc->igu_base_sb == 0xff) {
13710                    sc->igu_base_sb = igu_sb_id;
13711                }
13712                igu_sb_cnt++;
13713            }
13714        }
13715    }
13716
13717    /*
13718     * Due to new PF resource allocation by MFW T7.4 and above, it's optional
13719     * that number of CAM entries will not be equal to the value advertised in
13720     * PCI. Driver should use the minimal value of both as the actual status
13721     * block count
13722     */
13723    sc->igu_sb_cnt = min(sc->igu_sb_cnt, igu_sb_cnt);
13724
13725    if (igu_sb_cnt == 0) {
13726        BLOGE(sc, "CAM configuration error\n");
13727        return (-1);
13728    }
13729
13730    return (0);
13731}
13732
13733/*
13734 * Gather various information from the device config space, the device itself,
13735 * shmem, and the user input.
13736 */
13737static int
13738bxe_get_device_info(struct bxe_softc *sc)
13739{
13740    uint32_t val;
13741    int rc;
13742
13743    /* Get the data for the device */
13744    sc->devinfo.vendor_id    = pci_get_vendor(sc->dev);
13745    sc->devinfo.device_id    = pci_get_device(sc->dev);
13746    sc->devinfo.subvendor_id = pci_get_subvendor(sc->dev);
13747    sc->devinfo.subdevice_id = pci_get_subdevice(sc->dev);
13748
13749    /* get the chip revision (chip metal comes from pci config space) */
13750    sc->devinfo.chip_id     =
13751    sc->link_params.chip_id =
13752        (((REG_RD(sc, MISC_REG_CHIP_NUM)                   & 0xffff) << 16) |
13753         ((REG_RD(sc, MISC_REG_CHIP_REV)                   & 0xf)    << 12) |
13754         (((REG_RD(sc, PCICFG_OFFSET + PCI_ID_VAL3) >> 24) & 0xf)    << 4)  |
13755         ((REG_RD(sc, MISC_REG_BOND_ID)                    & 0xf)    << 0));
13756
13757    /* force 57811 according to MISC register */
13758    if (REG_RD(sc, MISC_REG_CHIP_TYPE) & MISC_REG_CHIP_TYPE_57811_MASK) {
13759        if (CHIP_IS_57810(sc)) {
13760            sc->devinfo.chip_id = ((CHIP_NUM_57811 << 16) |
13761                                   (sc->devinfo.chip_id & 0x0000ffff));
13762        } else if (CHIP_IS_57810_MF(sc)) {
13763            sc->devinfo.chip_id = ((CHIP_NUM_57811_MF << 16) |
13764                                   (sc->devinfo.chip_id & 0x0000ffff));
13765        }
13766        sc->devinfo.chip_id |= 0x1;
13767    }
13768
13769    BLOGD(sc, DBG_LOAD,
13770          "chip_id=0x%08x (num=0x%04x rev=0x%01x metal=0x%02x bond=0x%01x)\n",
13771          sc->devinfo.chip_id,
13772          ((sc->devinfo.chip_id >> 16) & 0xffff),
13773          ((sc->devinfo.chip_id >> 12) & 0xf),
13774          ((sc->devinfo.chip_id >>  4) & 0xff),
13775          ((sc->devinfo.chip_id >>  0) & 0xf));
13776
13777    val = (REG_RD(sc, 0x2874) & 0x55);
13778    if ((sc->devinfo.chip_id & 0x1) ||
13779        (CHIP_IS_E1(sc) && val) ||
13780        (CHIP_IS_E1H(sc) && (val == 0x55))) {
13781        sc->flags |= BXE_ONE_PORT_FLAG;
13782        BLOGD(sc, DBG_LOAD, "single port device\n");
13783    }
13784
13785    /* set the doorbell size */
13786    sc->doorbell_size = (1 << BXE_DB_SHIFT);
13787
13788    /* determine whether the device is in 2 port or 4 port mode */
13789    sc->devinfo.chip_port_mode = CHIP_PORT_MODE_NONE; /* E1 & E1h*/
13790    if (CHIP_IS_E2E3(sc)) {
13791        /*
13792         * Read port4mode_en_ovwr[0]:
13793         *   If 1, four port mode is in port4mode_en_ovwr[1].
13794         *   If 0, four port mode is in port4mode_en[0].
13795         */
13796        val = REG_RD(sc, MISC_REG_PORT4MODE_EN_OVWR);
13797        if (val & 1) {
13798            val = ((val >> 1) & 1);
13799        } else {
13800            val = REG_RD(sc, MISC_REG_PORT4MODE_EN);
13801        }
13802
13803        sc->devinfo.chip_port_mode =
13804            (val) ? CHIP_4_PORT_MODE : CHIP_2_PORT_MODE;
13805
13806        BLOGD(sc, DBG_LOAD, "Port mode = %s\n", (val) ? "4" : "2");
13807    }
13808
13809    /* get the function and path info for the device */
13810    bxe_get_function_num(sc);
13811
13812    /* get the shared memory base address */
13813    sc->devinfo.shmem_base     =
13814    sc->link_params.shmem_base =
13815        REG_RD(sc, MISC_REG_SHARED_MEM_ADDR);
13816    sc->devinfo.shmem2_base =
13817        REG_RD(sc, (SC_PATH(sc) ? MISC_REG_GENERIC_CR_1 :
13818                                  MISC_REG_GENERIC_CR_0));
13819
13820    BLOGD(sc, DBG_LOAD, "shmem_base=0x%08x, shmem2_base=0x%08x\n",
13821          sc->devinfo.shmem_base, sc->devinfo.shmem2_base);
13822
13823    if (!sc->devinfo.shmem_base) {
13824        /* this should ONLY prevent upcoming shmem reads */
13825        BLOGI(sc, "MCP not active\n");
13826        sc->flags |= BXE_NO_MCP_FLAG;
13827        return (0);
13828    }
13829
13830    /* make sure the shared memory contents are valid */
13831    val = SHMEM_RD(sc, validity_map[SC_PORT(sc)]);
13832    if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) !=
13833        (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) {
13834        BLOGE(sc, "Invalid SHMEM validity signature: 0x%08x\n", val);
13835        return (0);
13836    }
13837    BLOGD(sc, DBG_LOAD, "Valid SHMEM validity signature: 0x%08x\n", val);
13838
13839    /* get the bootcode version */
13840    sc->devinfo.bc_ver = SHMEM_RD(sc, dev_info.bc_rev);
13841    snprintf(sc->devinfo.bc_ver_str,
13842             sizeof(sc->devinfo.bc_ver_str),
13843             "%d.%d.%d",
13844             ((sc->devinfo.bc_ver >> 24) & 0xff),
13845             ((sc->devinfo.bc_ver >> 16) & 0xff),
13846             ((sc->devinfo.bc_ver >>  8) & 0xff));
13847    BLOGD(sc, DBG_LOAD, "Bootcode version: %s\n", sc->devinfo.bc_ver_str);
13848
13849    /* get the bootcode shmem address */
13850    sc->devinfo.mf_cfg_base = bxe_get_shmem_mf_cfg_base(sc);
13851    BLOGD(sc, DBG_LOAD, "mf_cfg_base=0x08%x \n", sc->devinfo.mf_cfg_base);
13852
13853    /* clean indirect addresses as they're not used */
13854    pci_write_config(sc->dev, PCICFG_GRC_ADDRESS, 0, 4);
13855    if (IS_PF(sc)) {
13856        REG_WR(sc, PXP2_REG_PGL_ADDR_88_F0, 0);
13857        REG_WR(sc, PXP2_REG_PGL_ADDR_8C_F0, 0);
13858        REG_WR(sc, PXP2_REG_PGL_ADDR_90_F0, 0);
13859        REG_WR(sc, PXP2_REG_PGL_ADDR_94_F0, 0);
13860        if (CHIP_IS_E1x(sc)) {
13861            REG_WR(sc, PXP2_REG_PGL_ADDR_88_F1, 0);
13862            REG_WR(sc, PXP2_REG_PGL_ADDR_8C_F1, 0);
13863            REG_WR(sc, PXP2_REG_PGL_ADDR_90_F1, 0);
13864            REG_WR(sc, PXP2_REG_PGL_ADDR_94_F1, 0);
13865        }
13866
13867        /*
13868         * Enable internal target-read (in case we are probed after PF
13869         * FLR). Must be done prior to any BAR read access. Only for
13870         * 57712 and up
13871         */
13872        if (!CHIP_IS_E1x(sc)) {
13873            REG_WR(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ, 1);
13874        }
13875    }
13876
13877    /* get the nvram size */
13878    val = REG_RD(sc, MCP_REG_MCPR_NVM_CFG4);
13879    sc->devinfo.flash_size =
13880        (NVRAM_1MB_SIZE << (val & MCPR_NVM_CFG4_FLASH_SIZE));
13881    BLOGD(sc, DBG_LOAD, "nvram flash size: %d\n", sc->devinfo.flash_size);
13882
13883    /* get PCI capabilites */
13884    bxe_probe_pci_caps(sc);
13885
13886    bxe_set_power_state(sc, PCI_PM_D0);
13887
13888    /* get various configuration parameters from shmem */
13889    bxe_get_shmem_info(sc);
13890
13891    if (sc->devinfo.pcie_msix_cap_reg != 0) {
13892        val = pci_read_config(sc->dev,
13893                              (sc->devinfo.pcie_msix_cap_reg +
13894                               PCIR_MSIX_CTRL),
13895                              2);
13896        sc->igu_sb_cnt = (val & PCIM_MSIXCTRL_TABLE_SIZE);
13897    } else {
13898        sc->igu_sb_cnt = 1;
13899    }
13900
13901    sc->igu_base_addr = BAR_IGU_INTMEM;
13902
13903    /* initialize IGU parameters */
13904    if (CHIP_IS_E1x(sc)) {
13905        sc->devinfo.int_block = INT_BLOCK_HC;
13906        sc->igu_dsb_id = DEF_SB_IGU_ID;
13907        sc->igu_base_sb = 0;
13908    } else {
13909        sc->devinfo.int_block = INT_BLOCK_IGU;
13910
13911        /* do not allow device reset during IGU info preocessing */
13912        bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RESET);
13913
13914        val = REG_RD(sc, IGU_REG_BLOCK_CONFIGURATION);
13915
13916        if (val & IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN) {
13917            int tout = 5000;
13918
13919            BLOGD(sc, DBG_LOAD, "FORCING IGU Normal Mode\n");
13920
13921            val &= ~(IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN);
13922            REG_WR(sc, IGU_REG_BLOCK_CONFIGURATION, val);
13923            REG_WR(sc, IGU_REG_RESET_MEMORIES, 0x7f);
13924
13925            while (tout && REG_RD(sc, IGU_REG_RESET_MEMORIES)) {
13926                tout--;
13927                DELAY(1000);
13928            }
13929
13930            if (REG_RD(sc, IGU_REG_RESET_MEMORIES)) {
13931                BLOGD(sc, DBG_LOAD, "FORCING IGU Normal Mode failed!!!\n");
13932                bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RESET);
13933                return (-1);
13934            }
13935        }
13936
13937        if (val & IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN) {
13938            BLOGD(sc, DBG_LOAD, "IGU Backward Compatible Mode\n");
13939            sc->devinfo.int_block |= INT_BLOCK_MODE_BW_COMP;
13940        } else {
13941            BLOGD(sc, DBG_LOAD, "IGU Normal Mode\n");
13942        }
13943
13944        rc = bxe_get_igu_cam_info(sc);
13945
13946        bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RESET);
13947
13948        if (rc) {
13949            return (rc);
13950        }
13951    }
13952
13953    /*
13954     * Get base FW non-default (fast path) status block ID. This value is
13955     * used to initialize the fw_sb_id saved on the fp/queue structure to
13956     * determine the id used by the FW.
13957     */
13958    if (CHIP_IS_E1x(sc)) {
13959        sc->base_fw_ndsb = ((SC_PORT(sc) * FP_SB_MAX_E1x) + SC_L_ID(sc));
13960    } else {
13961        /*
13962         * 57712+ - We currently use one FW SB per IGU SB (Rx and Tx of
13963         * the same queue are indicated on the same IGU SB). So we prefer
13964         * FW and IGU SBs to be the same value.
13965         */
13966        sc->base_fw_ndsb = sc->igu_base_sb;
13967    }
13968
13969    BLOGD(sc, DBG_LOAD,
13970          "igu_dsb_id=%d igu_base_sb=%d igu_sb_cnt=%d base_fw_ndsb=%d\n",
13971          sc->igu_dsb_id, sc->igu_base_sb,
13972          sc->igu_sb_cnt, sc->base_fw_ndsb);
13973
13974    elink_phy_probe(&sc->link_params);
13975
13976    return (0);
13977}
13978
13979static void
13980bxe_link_settings_supported(struct bxe_softc *sc,
13981                            uint32_t         switch_cfg)
13982{
13983    uint32_t cfg_size = 0;
13984    uint32_t idx;
13985    uint8_t port = SC_PORT(sc);
13986
13987    /* aggregation of supported attributes of all external phys */
13988    sc->port.supported[0] = 0;
13989    sc->port.supported[1] = 0;
13990
13991    switch (sc->link_params.num_phys) {
13992    case 1:
13993        sc->port.supported[0] = sc->link_params.phy[ELINK_INT_PHY].supported;
13994        cfg_size = 1;
13995        break;
13996    case 2:
13997        sc->port.supported[0] = sc->link_params.phy[ELINK_EXT_PHY1].supported;
13998        cfg_size = 1;
13999        break;
14000    case 3:
14001        if (sc->link_params.multi_phy_config &
14002            PORT_HW_CFG_PHY_SWAPPED_ENABLED) {
14003            sc->port.supported[1] =
14004                sc->link_params.phy[ELINK_EXT_PHY1].supported;
14005            sc->port.supported[0] =
14006                sc->link_params.phy[ELINK_EXT_PHY2].supported;
14007        } else {
14008            sc->port.supported[0] =
14009                sc->link_params.phy[ELINK_EXT_PHY1].supported;
14010            sc->port.supported[1] =
14011                sc->link_params.phy[ELINK_EXT_PHY2].supported;
14012        }
14013        cfg_size = 2;
14014        break;
14015    }
14016
14017    if (!(sc->port.supported[0] || sc->port.supported[1])) {
14018        BLOGE(sc, "Invalid phy config in NVRAM (PHY1=0x%08x PHY2=0x%08x)\n",
14019              SHMEM_RD(sc,
14020                       dev_info.port_hw_config[port].external_phy_config),
14021              SHMEM_RD(sc,
14022                       dev_info.port_hw_config[port].external_phy_config2));
14023        return;
14024    }
14025
14026    if (CHIP_IS_E3(sc))
14027        sc->port.phy_addr = REG_RD(sc, MISC_REG_WC0_CTRL_PHY_ADDR);
14028    else {
14029        switch (switch_cfg) {
14030        case ELINK_SWITCH_CFG_1G:
14031            sc->port.phy_addr =
14032                REG_RD(sc, NIG_REG_SERDES0_CTRL_PHY_ADDR + port*0x10);
14033            break;
14034        case ELINK_SWITCH_CFG_10G:
14035            sc->port.phy_addr =
14036                REG_RD(sc, NIG_REG_XGXS0_CTRL_PHY_ADDR + port*0x18);
14037            break;
14038        default:
14039            BLOGE(sc, "Invalid switch config in link_config=0x%08x\n",
14040                  sc->port.link_config[0]);
14041            return;
14042        }
14043    }
14044
14045    BLOGD(sc, DBG_LOAD, "PHY addr 0x%08x\n", sc->port.phy_addr);
14046
14047    /* mask what we support according to speed_cap_mask per configuration */
14048    for (idx = 0; idx < cfg_size; idx++) {
14049        if (!(sc->link_params.speed_cap_mask[idx] &
14050              PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF)) {
14051            sc->port.supported[idx] &= ~ELINK_SUPPORTED_10baseT_Half;
14052        }
14053
14054        if (!(sc->link_params.speed_cap_mask[idx] &
14055              PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL)) {
14056            sc->port.supported[idx] &= ~ELINK_SUPPORTED_10baseT_Full;
14057        }
14058
14059        if (!(sc->link_params.speed_cap_mask[idx] &
14060              PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF)) {
14061            sc->port.supported[idx] &= ~ELINK_SUPPORTED_100baseT_Half;
14062        }
14063
14064        if (!(sc->link_params.speed_cap_mask[idx] &
14065              PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL)) {
14066            sc->port.supported[idx] &= ~ELINK_SUPPORTED_100baseT_Full;
14067        }
14068
14069        if (!(sc->link_params.speed_cap_mask[idx] &
14070              PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)) {
14071            sc->port.supported[idx] &= ~ELINK_SUPPORTED_1000baseT_Full;
14072        }
14073
14074        if (!(sc->link_params.speed_cap_mask[idx] &
14075              PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G)) {
14076            sc->port.supported[idx] &= ~ELINK_SUPPORTED_2500baseX_Full;
14077        }
14078
14079        if (!(sc->link_params.speed_cap_mask[idx] &
14080              PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)) {
14081            sc->port.supported[idx] &= ~ELINK_SUPPORTED_10000baseT_Full;
14082        }
14083
14084        if (!(sc->link_params.speed_cap_mask[idx] &
14085              PORT_HW_CFG_SPEED_CAPABILITY_D0_20G)) {
14086            sc->port.supported[idx] &= ~ELINK_SUPPORTED_20000baseKR2_Full;
14087        }
14088    }
14089
14090    BLOGD(sc, DBG_LOAD, "PHY supported 0=0x%08x 1=0x%08x\n",
14091          sc->port.supported[0], sc->port.supported[1]);
14092	ELINK_DEBUG_P2(sc, "PHY supported 0=0x%08x 1=0x%08x\n",
14093					sc->port.supported[0], sc->port.supported[1]);
14094}
14095
14096static void
14097bxe_link_settings_requested(struct bxe_softc *sc)
14098{
14099    uint32_t link_config;
14100    uint32_t idx;
14101    uint32_t cfg_size = 0;
14102
14103    sc->port.advertising[0] = 0;
14104    sc->port.advertising[1] = 0;
14105
14106    switch (sc->link_params.num_phys) {
14107    case 1:
14108    case 2:
14109        cfg_size = 1;
14110        break;
14111    case 3:
14112        cfg_size = 2;
14113        break;
14114    }
14115
14116    for (idx = 0; idx < cfg_size; idx++) {
14117        sc->link_params.req_duplex[idx] = DUPLEX_FULL;
14118        link_config = sc->port.link_config[idx];
14119
14120        switch (link_config & PORT_FEATURE_LINK_SPEED_MASK) {
14121        case PORT_FEATURE_LINK_SPEED_AUTO:
14122            if (sc->port.supported[idx] & ELINK_SUPPORTED_Autoneg) {
14123                sc->link_params.req_line_speed[idx] = ELINK_SPEED_AUTO_NEG;
14124                sc->port.advertising[idx] |= sc->port.supported[idx];
14125                if (sc->link_params.phy[ELINK_EXT_PHY1].type ==
14126                    PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833)
14127                    sc->port.advertising[idx] |=
14128                        (ELINK_SUPPORTED_100baseT_Half |
14129                         ELINK_SUPPORTED_100baseT_Full);
14130            } else {
14131                /* force 10G, no AN */
14132                sc->link_params.req_line_speed[idx] = ELINK_SPEED_10000;
14133                sc->port.advertising[idx] |=
14134                    (ADVERTISED_10000baseT_Full | ADVERTISED_FIBRE);
14135                continue;
14136            }
14137            break;
14138
14139        case PORT_FEATURE_LINK_SPEED_10M_FULL:
14140            if (sc->port.supported[idx] & ELINK_SUPPORTED_10baseT_Full) {
14141                sc->link_params.req_line_speed[idx] = ELINK_SPEED_10;
14142                sc->port.advertising[idx] |= (ADVERTISED_10baseT_Full |
14143                                              ADVERTISED_TP);
14144            } else {
14145                BLOGE(sc, "Invalid NVRAM config link_config=0x%08x "
14146                          "speed_cap_mask=0x%08x\n",
14147                      link_config, sc->link_params.speed_cap_mask[idx]);
14148                return;
14149            }
14150            break;
14151
14152        case PORT_FEATURE_LINK_SPEED_10M_HALF:
14153            if (sc->port.supported[idx] & ELINK_SUPPORTED_10baseT_Half) {
14154                sc->link_params.req_line_speed[idx] = ELINK_SPEED_10;
14155                sc->link_params.req_duplex[idx] = DUPLEX_HALF;
14156                sc->port.advertising[idx] |= (ADVERTISED_10baseT_Half |
14157                                              ADVERTISED_TP);
14158				ELINK_DEBUG_P1(sc, "driver requesting DUPLEX_HALF req_duplex = %x!\n",
14159								sc->link_params.req_duplex[idx]);
14160            } else {
14161                BLOGE(sc, "Invalid NVRAM config link_config=0x%08x "
14162                          "speed_cap_mask=0x%08x\n",
14163                      link_config, sc->link_params.speed_cap_mask[idx]);
14164                return;
14165            }
14166            break;
14167
14168        case PORT_FEATURE_LINK_SPEED_100M_FULL:
14169            if (sc->port.supported[idx] & ELINK_SUPPORTED_100baseT_Full) {
14170                sc->link_params.req_line_speed[idx] = ELINK_SPEED_100;
14171                sc->port.advertising[idx] |= (ADVERTISED_100baseT_Full |
14172                                              ADVERTISED_TP);
14173            } else {
14174                BLOGE(sc, "Invalid NVRAM config link_config=0x%08x "
14175                          "speed_cap_mask=0x%08x\n",
14176                      link_config, sc->link_params.speed_cap_mask[idx]);
14177                return;
14178            }
14179            break;
14180
14181        case PORT_FEATURE_LINK_SPEED_100M_HALF:
14182            if (sc->port.supported[idx] & ELINK_SUPPORTED_100baseT_Half) {
14183                sc->link_params.req_line_speed[idx] = ELINK_SPEED_100;
14184                sc->link_params.req_duplex[idx] = DUPLEX_HALF;
14185                sc->port.advertising[idx] |= (ADVERTISED_100baseT_Half |
14186                                              ADVERTISED_TP);
14187            } else {
14188                BLOGE(sc, "Invalid NVRAM config link_config=0x%08x "
14189                          "speed_cap_mask=0x%08x\n",
14190                      link_config, sc->link_params.speed_cap_mask[idx]);
14191                return;
14192            }
14193            break;
14194
14195        case PORT_FEATURE_LINK_SPEED_1G:
14196            if (sc->port.supported[idx] & ELINK_SUPPORTED_1000baseT_Full) {
14197                sc->link_params.req_line_speed[idx] = ELINK_SPEED_1000;
14198                sc->port.advertising[idx] |= (ADVERTISED_1000baseT_Full |
14199                                              ADVERTISED_TP);
14200            } else {
14201                BLOGE(sc, "Invalid NVRAM config link_config=0x%08x "
14202                          "speed_cap_mask=0x%08x\n",
14203                      link_config, sc->link_params.speed_cap_mask[idx]);
14204                return;
14205            }
14206            break;
14207
14208        case PORT_FEATURE_LINK_SPEED_2_5G:
14209            if (sc->port.supported[idx] & ELINK_SUPPORTED_2500baseX_Full) {
14210                sc->link_params.req_line_speed[idx] = ELINK_SPEED_2500;
14211                sc->port.advertising[idx] |= (ADVERTISED_2500baseX_Full |
14212                                              ADVERTISED_TP);
14213            } else {
14214                BLOGE(sc, "Invalid NVRAM config link_config=0x%08x "
14215                          "speed_cap_mask=0x%08x\n",
14216                      link_config, sc->link_params.speed_cap_mask[idx]);
14217                return;
14218            }
14219            break;
14220
14221        case PORT_FEATURE_LINK_SPEED_10G_CX4:
14222            if (sc->port.supported[idx] & ELINK_SUPPORTED_10000baseT_Full) {
14223                sc->link_params.req_line_speed[idx] = ELINK_SPEED_10000;
14224                sc->port.advertising[idx] |= (ADVERTISED_10000baseT_Full |
14225                                              ADVERTISED_FIBRE);
14226            } else {
14227                BLOGE(sc, "Invalid NVRAM config link_config=0x%08x "
14228                          "speed_cap_mask=0x%08x\n",
14229                      link_config, sc->link_params.speed_cap_mask[idx]);
14230                return;
14231            }
14232            break;
14233
14234        case PORT_FEATURE_LINK_SPEED_20G:
14235            sc->link_params.req_line_speed[idx] = ELINK_SPEED_20000;
14236            break;
14237
14238        default:
14239            BLOGE(sc, "Invalid NVRAM config link_config=0x%08x "
14240                      "speed_cap_mask=0x%08x\n",
14241                  link_config, sc->link_params.speed_cap_mask[idx]);
14242            sc->link_params.req_line_speed[idx] = ELINK_SPEED_AUTO_NEG;
14243            sc->port.advertising[idx] = sc->port.supported[idx];
14244            break;
14245        }
14246
14247        sc->link_params.req_flow_ctrl[idx] =
14248            (link_config & PORT_FEATURE_FLOW_CONTROL_MASK);
14249
14250        if (sc->link_params.req_flow_ctrl[idx] == ELINK_FLOW_CTRL_AUTO) {
14251            if (!(sc->port.supported[idx] & ELINK_SUPPORTED_Autoneg)) {
14252                sc->link_params.req_flow_ctrl[idx] = ELINK_FLOW_CTRL_NONE;
14253            } else {
14254                bxe_set_requested_fc(sc);
14255            }
14256        }
14257
14258        BLOGD(sc, DBG_LOAD, "req_line_speed=%d req_duplex=%d "
14259                            "req_flow_ctrl=0x%x advertising=0x%x\n",
14260              sc->link_params.req_line_speed[idx],
14261              sc->link_params.req_duplex[idx],
14262              sc->link_params.req_flow_ctrl[idx],
14263              sc->port.advertising[idx]);
14264		ELINK_DEBUG_P3(sc, "req_line_speed=%d req_duplex=%d "
14265						"advertising=0x%x\n",
14266						sc->link_params.req_line_speed[idx],
14267						sc->link_params.req_duplex[idx],
14268						sc->port.advertising[idx]);
14269    }
14270}
14271
14272static void
14273bxe_get_phy_info(struct bxe_softc *sc)
14274{
14275    uint8_t port = SC_PORT(sc);
14276    uint32_t config = sc->port.config;
14277    uint32_t eee_mode;
14278
14279    /* shmem data already read in bxe_get_shmem_info() */
14280
14281    ELINK_DEBUG_P3(sc, "lane_config=0x%08x speed_cap_mask0=0x%08x "
14282                        "link_config0=0x%08x\n",
14283               sc->link_params.lane_config,
14284               sc->link_params.speed_cap_mask[0],
14285               sc->port.link_config[0]);
14286
14287
14288    bxe_link_settings_supported(sc, sc->link_params.switch_cfg);
14289    bxe_link_settings_requested(sc);
14290
14291    if (sc->autogreeen == AUTO_GREEN_FORCE_ON) {
14292        sc->link_params.feature_config_flags |=
14293            ELINK_FEATURE_CONFIG_AUTOGREEEN_ENABLED;
14294    } else if (sc->autogreeen == AUTO_GREEN_FORCE_OFF) {
14295        sc->link_params.feature_config_flags &=
14296            ~ELINK_FEATURE_CONFIG_AUTOGREEEN_ENABLED;
14297    } else if (config & PORT_FEAT_CFG_AUTOGREEEN_ENABLED) {
14298        sc->link_params.feature_config_flags |=
14299            ELINK_FEATURE_CONFIG_AUTOGREEEN_ENABLED;
14300    }
14301
14302    /* configure link feature according to nvram value */
14303    eee_mode =
14304        (((SHMEM_RD(sc, dev_info.port_feature_config[port].eee_power_mode)) &
14305          PORT_FEAT_CFG_EEE_POWER_MODE_MASK) >>
14306         PORT_FEAT_CFG_EEE_POWER_MODE_SHIFT);
14307    if (eee_mode != PORT_FEAT_CFG_EEE_POWER_MODE_DISABLED) {
14308        sc->link_params.eee_mode = (ELINK_EEE_MODE_ADV_LPI |
14309                                    ELINK_EEE_MODE_ENABLE_LPI |
14310                                    ELINK_EEE_MODE_OUTPUT_TIME);
14311    } else {
14312        sc->link_params.eee_mode = 0;
14313    }
14314
14315    /* get the media type */
14316    bxe_media_detect(sc);
14317	ELINK_DEBUG_P1(sc, "detected media type\n", sc->media);
14318}
14319
14320static void
14321bxe_get_params(struct bxe_softc *sc)
14322{
14323    /* get user tunable params */
14324    bxe_get_tunable_params(sc);
14325
14326    /* select the RX and TX ring sizes */
14327    sc->tx_ring_size = TX_BD_USABLE;
14328    sc->rx_ring_size = RX_BD_USABLE;
14329
14330    /* XXX disable WoL */
14331    sc->wol = 0;
14332}
14333
14334static void
14335bxe_set_modes_bitmap(struct bxe_softc *sc)
14336{
14337    uint32_t flags = 0;
14338
14339    if (CHIP_REV_IS_FPGA(sc)) {
14340        SET_FLAGS(flags, MODE_FPGA);
14341    } else if (CHIP_REV_IS_EMUL(sc)) {
14342        SET_FLAGS(flags, MODE_EMUL);
14343    } else {
14344        SET_FLAGS(flags, MODE_ASIC);
14345    }
14346
14347    if (CHIP_IS_MODE_4_PORT(sc)) {
14348        SET_FLAGS(flags, MODE_PORT4);
14349    } else {
14350        SET_FLAGS(flags, MODE_PORT2);
14351    }
14352
14353    if (CHIP_IS_E2(sc)) {
14354        SET_FLAGS(flags, MODE_E2);
14355    } else if (CHIP_IS_E3(sc)) {
14356        SET_FLAGS(flags, MODE_E3);
14357        if (CHIP_REV(sc) == CHIP_REV_Ax) {
14358            SET_FLAGS(flags, MODE_E3_A0);
14359        } else /*if (CHIP_REV(sc) == CHIP_REV_Bx)*/ {
14360            SET_FLAGS(flags, MODE_E3_B0 | MODE_COS3);
14361        }
14362    }
14363
14364    if (IS_MF(sc)) {
14365        SET_FLAGS(flags, MODE_MF);
14366        switch (sc->devinfo.mf_info.mf_mode) {
14367        case MULTI_FUNCTION_SD:
14368            SET_FLAGS(flags, MODE_MF_SD);
14369            break;
14370        case MULTI_FUNCTION_SI:
14371            SET_FLAGS(flags, MODE_MF_SI);
14372            break;
14373        case MULTI_FUNCTION_AFEX:
14374            SET_FLAGS(flags, MODE_MF_AFEX);
14375            break;
14376        }
14377    } else {
14378        SET_FLAGS(flags, MODE_SF);
14379    }
14380
14381#if defined(__LITTLE_ENDIAN)
14382    SET_FLAGS(flags, MODE_LITTLE_ENDIAN);
14383#else /* __BIG_ENDIAN */
14384    SET_FLAGS(flags, MODE_BIG_ENDIAN);
14385#endif
14386
14387    INIT_MODE_FLAGS(sc) = flags;
14388}
14389
14390static int
14391bxe_alloc_hsi_mem(struct bxe_softc *sc)
14392{
14393    struct bxe_fastpath *fp;
14394    bus_addr_t busaddr;
14395    int max_agg_queues;
14396    int max_segments;
14397    bus_size_t max_size;
14398    bus_size_t max_seg_size;
14399    char buf[32];
14400    int rc;
14401    int i, j;
14402
14403    /* XXX zero out all vars here and call bxe_alloc_hsi_mem on error */
14404
14405    /* allocate the parent bus DMA tag */
14406    rc = bus_dma_tag_create(bus_get_dma_tag(sc->dev), /* parent tag */
14407                            1,                        /* alignment */
14408                            0,                        /* boundary limit */
14409                            BUS_SPACE_MAXADDR,        /* restricted low */
14410                            BUS_SPACE_MAXADDR,        /* restricted hi */
14411                            NULL,                     /* addr filter() */
14412                            NULL,                     /* addr filter() arg */
14413                            BUS_SPACE_MAXSIZE_32BIT,  /* max map size */
14414                            BUS_SPACE_UNRESTRICTED,   /* num discontinuous */
14415                            BUS_SPACE_MAXSIZE_32BIT,  /* max seg size */
14416                            0,                        /* flags */
14417                            NULL,                     /* lock() */
14418                            NULL,                     /* lock() arg */
14419                            &sc->parent_dma_tag);     /* returned dma tag */
14420    if (rc != 0) {
14421        BLOGE(sc, "Failed to alloc parent DMA tag (%d)!\n", rc);
14422        return (1);
14423    }
14424
14425    /************************/
14426    /* DEFAULT STATUS BLOCK */
14427    /************************/
14428
14429    if (bxe_dma_alloc(sc, sizeof(struct host_sp_status_block),
14430                      &sc->def_sb_dma, "default status block") != 0) {
14431        /* XXX */
14432        bus_dma_tag_destroy(sc->parent_dma_tag);
14433        return (1);
14434    }
14435
14436    sc->def_sb = (struct host_sp_status_block *)sc->def_sb_dma.vaddr;
14437
14438    /***************/
14439    /* EVENT QUEUE */
14440    /***************/
14441
14442    if (bxe_dma_alloc(sc, BCM_PAGE_SIZE,
14443                      &sc->eq_dma, "event queue") != 0) {
14444        /* XXX */
14445        bxe_dma_free(sc, &sc->def_sb_dma);
14446        sc->def_sb = NULL;
14447        bus_dma_tag_destroy(sc->parent_dma_tag);
14448        return (1);
14449    }
14450
14451    sc->eq = (union event_ring_elem * )sc->eq_dma.vaddr;
14452
14453    /*************/
14454    /* SLOW PATH */
14455    /*************/
14456
14457    if (bxe_dma_alloc(sc, sizeof(struct bxe_slowpath),
14458                      &sc->sp_dma, "slow path") != 0) {
14459        /* XXX */
14460        bxe_dma_free(sc, &sc->eq_dma);
14461        sc->eq = NULL;
14462        bxe_dma_free(sc, &sc->def_sb_dma);
14463        sc->def_sb = NULL;
14464        bus_dma_tag_destroy(sc->parent_dma_tag);
14465        return (1);
14466    }
14467
14468    sc->sp = (struct bxe_slowpath *)sc->sp_dma.vaddr;
14469
14470    /*******************/
14471    /* SLOW PATH QUEUE */
14472    /*******************/
14473
14474    if (bxe_dma_alloc(sc, BCM_PAGE_SIZE,
14475                      &sc->spq_dma, "slow path queue") != 0) {
14476        /* XXX */
14477        bxe_dma_free(sc, &sc->sp_dma);
14478        sc->sp = NULL;
14479        bxe_dma_free(sc, &sc->eq_dma);
14480        sc->eq = NULL;
14481        bxe_dma_free(sc, &sc->def_sb_dma);
14482        sc->def_sb = NULL;
14483        bus_dma_tag_destroy(sc->parent_dma_tag);
14484        return (1);
14485    }
14486
14487    sc->spq = (struct eth_spe *)sc->spq_dma.vaddr;
14488
14489    /***************************/
14490    /* FW DECOMPRESSION BUFFER */
14491    /***************************/
14492
14493    if (bxe_dma_alloc(sc, FW_BUF_SIZE, &sc->gz_buf_dma,
14494                      "fw decompression buffer") != 0) {
14495        /* XXX */
14496        bxe_dma_free(sc, &sc->spq_dma);
14497        sc->spq = NULL;
14498        bxe_dma_free(sc, &sc->sp_dma);
14499        sc->sp = NULL;
14500        bxe_dma_free(sc, &sc->eq_dma);
14501        sc->eq = NULL;
14502        bxe_dma_free(sc, &sc->def_sb_dma);
14503        sc->def_sb = NULL;
14504        bus_dma_tag_destroy(sc->parent_dma_tag);
14505        return (1);
14506    }
14507
14508    sc->gz_buf = (void *)sc->gz_buf_dma.vaddr;
14509
14510    if ((sc->gz_strm =
14511         malloc(sizeof(*sc->gz_strm), M_DEVBUF, M_NOWAIT)) == NULL) {
14512        /* XXX */
14513        bxe_dma_free(sc, &sc->gz_buf_dma);
14514        sc->gz_buf = NULL;
14515        bxe_dma_free(sc, &sc->spq_dma);
14516        sc->spq = NULL;
14517        bxe_dma_free(sc, &sc->sp_dma);
14518        sc->sp = NULL;
14519        bxe_dma_free(sc, &sc->eq_dma);
14520        sc->eq = NULL;
14521        bxe_dma_free(sc, &sc->def_sb_dma);
14522        sc->def_sb = NULL;
14523        bus_dma_tag_destroy(sc->parent_dma_tag);
14524        return (1);
14525    }
14526
14527    /*************/
14528    /* FASTPATHS */
14529    /*************/
14530
14531    /* allocate DMA memory for each fastpath structure */
14532    for (i = 0; i < sc->num_queues; i++) {
14533        fp = &sc->fp[i];
14534        fp->sc    = sc;
14535        fp->index = i;
14536
14537        /*******************/
14538        /* FP STATUS BLOCK */
14539        /*******************/
14540
14541        snprintf(buf, sizeof(buf), "fp %d status block", i);
14542        if (bxe_dma_alloc(sc, sizeof(union bxe_host_hc_status_block),
14543                          &fp->sb_dma, buf) != 0) {
14544            /* XXX unwind and free previous fastpath allocations */
14545            BLOGE(sc, "Failed to alloc %s\n", buf);
14546            return (1);
14547        } else {
14548            if (CHIP_IS_E2E3(sc)) {
14549                fp->status_block.e2_sb =
14550                    (struct host_hc_status_block_e2 *)fp->sb_dma.vaddr;
14551            } else {
14552                fp->status_block.e1x_sb =
14553                    (struct host_hc_status_block_e1x *)fp->sb_dma.vaddr;
14554            }
14555        }
14556
14557        /******************/
14558        /* FP TX BD CHAIN */
14559        /******************/
14560
14561        snprintf(buf, sizeof(buf), "fp %d tx bd chain", i);
14562        if (bxe_dma_alloc(sc, (BCM_PAGE_SIZE * TX_BD_NUM_PAGES),
14563                          &fp->tx_dma, buf) != 0) {
14564            /* XXX unwind and free previous fastpath allocations */
14565            BLOGE(sc, "Failed to alloc %s\n", buf);
14566            return (1);
14567        } else {
14568            fp->tx_chain = (union eth_tx_bd_types *)fp->tx_dma.vaddr;
14569        }
14570
14571        /* link together the tx bd chain pages */
14572        for (j = 1; j <= TX_BD_NUM_PAGES; j++) {
14573            /* index into the tx bd chain array to last entry per page */
14574            struct eth_tx_next_bd *tx_next_bd =
14575                &fp->tx_chain[TX_BD_TOTAL_PER_PAGE * j - 1].next_bd;
14576            /* point to the next page and wrap from last page */
14577            busaddr = (fp->tx_dma.paddr +
14578                       (BCM_PAGE_SIZE * (j % TX_BD_NUM_PAGES)));
14579            tx_next_bd->addr_hi = htole32(U64_HI(busaddr));
14580            tx_next_bd->addr_lo = htole32(U64_LO(busaddr));
14581        }
14582
14583        /******************/
14584        /* FP RX BD CHAIN */
14585        /******************/
14586
14587        snprintf(buf, sizeof(buf), "fp %d rx bd chain", i);
14588        if (bxe_dma_alloc(sc, (BCM_PAGE_SIZE * RX_BD_NUM_PAGES),
14589                          &fp->rx_dma, buf) != 0) {
14590            /* XXX unwind and free previous fastpath allocations */
14591            BLOGE(sc, "Failed to alloc %s\n", buf);
14592            return (1);
14593        } else {
14594            fp->rx_chain = (struct eth_rx_bd *)fp->rx_dma.vaddr;
14595        }
14596
14597        /* link together the rx bd chain pages */
14598        for (j = 1; j <= RX_BD_NUM_PAGES; j++) {
14599            /* index into the rx bd chain array to last entry per page */
14600            struct eth_rx_bd *rx_bd =
14601                &fp->rx_chain[RX_BD_TOTAL_PER_PAGE * j - 2];
14602            /* point to the next page and wrap from last page */
14603            busaddr = (fp->rx_dma.paddr +
14604                       (BCM_PAGE_SIZE * (j % RX_BD_NUM_PAGES)));
14605            rx_bd->addr_hi = htole32(U64_HI(busaddr));
14606            rx_bd->addr_lo = htole32(U64_LO(busaddr));
14607        }
14608
14609        /*******************/
14610        /* FP RX RCQ CHAIN */
14611        /*******************/
14612
14613        snprintf(buf, sizeof(buf), "fp %d rcq chain", i);
14614        if (bxe_dma_alloc(sc, (BCM_PAGE_SIZE * RCQ_NUM_PAGES),
14615                          &fp->rcq_dma, buf) != 0) {
14616            /* XXX unwind and free previous fastpath allocations */
14617            BLOGE(sc, "Failed to alloc %s\n", buf);
14618            return (1);
14619        } else {
14620            fp->rcq_chain = (union eth_rx_cqe *)fp->rcq_dma.vaddr;
14621        }
14622
14623        /* link together the rcq chain pages */
14624        for (j = 1; j <= RCQ_NUM_PAGES; j++) {
14625            /* index into the rcq chain array to last entry per page */
14626            struct eth_rx_cqe_next_page *rx_cqe_next =
14627                (struct eth_rx_cqe_next_page *)
14628                &fp->rcq_chain[RCQ_TOTAL_PER_PAGE * j - 1];
14629            /* point to the next page and wrap from last page */
14630            busaddr = (fp->rcq_dma.paddr +
14631                       (BCM_PAGE_SIZE * (j % RCQ_NUM_PAGES)));
14632            rx_cqe_next->addr_hi = htole32(U64_HI(busaddr));
14633            rx_cqe_next->addr_lo = htole32(U64_LO(busaddr));
14634        }
14635
14636        /*******************/
14637        /* FP RX SGE CHAIN */
14638        /*******************/
14639
14640        snprintf(buf, sizeof(buf), "fp %d sge chain", i);
14641        if (bxe_dma_alloc(sc, (BCM_PAGE_SIZE * RX_SGE_NUM_PAGES),
14642                          &fp->rx_sge_dma, buf) != 0) {
14643            /* XXX unwind and free previous fastpath allocations */
14644            BLOGE(sc, "Failed to alloc %s\n", buf);
14645            return (1);
14646        } else {
14647            fp->rx_sge_chain = (struct eth_rx_sge *)fp->rx_sge_dma.vaddr;
14648        }
14649
14650        /* link together the sge chain pages */
14651        for (j = 1; j <= RX_SGE_NUM_PAGES; j++) {
14652            /* index into the rcq chain array to last entry per page */
14653            struct eth_rx_sge *rx_sge =
14654                &fp->rx_sge_chain[RX_SGE_TOTAL_PER_PAGE * j - 2];
14655            /* point to the next page and wrap from last page */
14656            busaddr = (fp->rx_sge_dma.paddr +
14657                       (BCM_PAGE_SIZE * (j % RX_SGE_NUM_PAGES)));
14658            rx_sge->addr_hi = htole32(U64_HI(busaddr));
14659            rx_sge->addr_lo = htole32(U64_LO(busaddr));
14660        }
14661
14662        /***********************/
14663        /* FP TX MBUF DMA MAPS */
14664        /***********************/
14665
14666        /* set required sizes before mapping to conserve resources */
14667        if (if_getcapenable(sc->ifp) & (IFCAP_TSO4 | IFCAP_TSO6)) {
14668            max_size     = BXE_TSO_MAX_SIZE;
14669            max_segments = BXE_TSO_MAX_SEGMENTS;
14670            max_seg_size = BXE_TSO_MAX_SEG_SIZE;
14671        } else {
14672            max_size     = (MCLBYTES * BXE_MAX_SEGMENTS);
14673            max_segments = BXE_MAX_SEGMENTS;
14674            max_seg_size = MCLBYTES;
14675        }
14676
14677        /* create a dma tag for the tx mbufs */
14678        rc = bus_dma_tag_create(sc->parent_dma_tag, /* parent tag */
14679                                1,                  /* alignment */
14680                                0,                  /* boundary limit */
14681                                BUS_SPACE_MAXADDR,  /* restricted low */
14682                                BUS_SPACE_MAXADDR,  /* restricted hi */
14683                                NULL,               /* addr filter() */
14684                                NULL,               /* addr filter() arg */
14685                                max_size,           /* max map size */
14686                                max_segments,       /* num discontinuous */
14687                                max_seg_size,       /* max seg size */
14688                                0,                  /* flags */
14689                                NULL,               /* lock() */
14690                                NULL,               /* lock() arg */
14691                                &fp->tx_mbuf_tag);  /* returned dma tag */
14692        if (rc != 0) {
14693            /* XXX unwind and free previous fastpath allocations */
14694            BLOGE(sc, "Failed to create dma tag for "
14695                      "'fp %d tx mbufs' (%d)\n", i, rc);
14696            return (1);
14697        }
14698
14699        /* create dma maps for each of the tx mbuf clusters */
14700        for (j = 0; j < TX_BD_TOTAL; j++) {
14701            if (bus_dmamap_create(fp->tx_mbuf_tag,
14702                                  BUS_DMA_NOWAIT,
14703                                  &fp->tx_mbuf_chain[j].m_map)) {
14704                /* XXX unwind and free previous fastpath allocations */
14705                BLOGE(sc, "Failed to create dma map for "
14706                          "'fp %d tx mbuf %d' (%d)\n", i, j, rc);
14707                return (1);
14708            }
14709        }
14710
14711        /***********************/
14712        /* FP RX MBUF DMA MAPS */
14713        /***********************/
14714
14715        /* create a dma tag for the rx mbufs */
14716        rc = bus_dma_tag_create(sc->parent_dma_tag, /* parent tag */
14717                                1,                  /* alignment */
14718                                0,                  /* boundary limit */
14719                                BUS_SPACE_MAXADDR,  /* restricted low */
14720                                BUS_SPACE_MAXADDR,  /* restricted hi */
14721                                NULL,               /* addr filter() */
14722                                NULL,               /* addr filter() arg */
14723                                MJUM9BYTES,         /* max map size */
14724                                1,                  /* num discontinuous */
14725                                MJUM9BYTES,         /* max seg size */
14726                                0,                  /* flags */
14727                                NULL,               /* lock() */
14728                                NULL,               /* lock() arg */
14729                                &fp->rx_mbuf_tag);  /* returned dma tag */
14730        if (rc != 0) {
14731            /* XXX unwind and free previous fastpath allocations */
14732            BLOGE(sc, "Failed to create dma tag for "
14733                      "'fp %d rx mbufs' (%d)\n", i, rc);
14734            return (1);
14735        }
14736
14737        /* create dma maps for each of the rx mbuf clusters */
14738        for (j = 0; j < RX_BD_TOTAL; j++) {
14739            if (bus_dmamap_create(fp->rx_mbuf_tag,
14740                                  BUS_DMA_NOWAIT,
14741                                  &fp->rx_mbuf_chain[j].m_map)) {
14742                /* XXX unwind and free previous fastpath allocations */
14743                BLOGE(sc, "Failed to create dma map for "
14744                          "'fp %d rx mbuf %d' (%d)\n", i, j, rc);
14745                return (1);
14746            }
14747        }
14748
14749        /* create dma map for the spare rx mbuf cluster */
14750        if (bus_dmamap_create(fp->rx_mbuf_tag,
14751                              BUS_DMA_NOWAIT,
14752                              &fp->rx_mbuf_spare_map)) {
14753            /* XXX unwind and free previous fastpath allocations */
14754            BLOGE(sc, "Failed to create dma map for "
14755                      "'fp %d spare rx mbuf' (%d)\n", i, rc);
14756            return (1);
14757        }
14758
14759        /***************************/
14760        /* FP RX SGE MBUF DMA MAPS */
14761        /***************************/
14762
14763        /* create a dma tag for the rx sge mbufs */
14764        rc = bus_dma_tag_create(sc->parent_dma_tag, /* parent tag */
14765                                1,                  /* alignment */
14766                                0,                  /* boundary limit */
14767                                BUS_SPACE_MAXADDR,  /* restricted low */
14768                                BUS_SPACE_MAXADDR,  /* restricted hi */
14769                                NULL,               /* addr filter() */
14770                                NULL,               /* addr filter() arg */
14771                                BCM_PAGE_SIZE,      /* max map size */
14772                                1,                  /* num discontinuous */
14773                                BCM_PAGE_SIZE,      /* max seg size */
14774                                0,                  /* flags */
14775                                NULL,               /* lock() */
14776                                NULL,               /* lock() arg */
14777                                &fp->rx_sge_mbuf_tag); /* returned dma tag */
14778        if (rc != 0) {
14779            /* XXX unwind and free previous fastpath allocations */
14780            BLOGE(sc, "Failed to create dma tag for "
14781                      "'fp %d rx sge mbufs' (%d)\n", i, rc);
14782            return (1);
14783        }
14784
14785        /* create dma maps for the rx sge mbuf clusters */
14786        for (j = 0; j < RX_SGE_TOTAL; j++) {
14787            if (bus_dmamap_create(fp->rx_sge_mbuf_tag,
14788                                  BUS_DMA_NOWAIT,
14789                                  &fp->rx_sge_mbuf_chain[j].m_map)) {
14790                /* XXX unwind and free previous fastpath allocations */
14791                BLOGE(sc, "Failed to create dma map for "
14792                          "'fp %d rx sge mbuf %d' (%d)\n", i, j, rc);
14793                return (1);
14794            }
14795        }
14796
14797        /* create dma map for the spare rx sge mbuf cluster */
14798        if (bus_dmamap_create(fp->rx_sge_mbuf_tag,
14799                              BUS_DMA_NOWAIT,
14800                              &fp->rx_sge_mbuf_spare_map)) {
14801            /* XXX unwind and free previous fastpath allocations */
14802            BLOGE(sc, "Failed to create dma map for "
14803                      "'fp %d spare rx sge mbuf' (%d)\n", i, rc);
14804            return (1);
14805        }
14806
14807        /***************************/
14808        /* FP RX TPA MBUF DMA MAPS */
14809        /***************************/
14810
14811        /* create dma maps for the rx tpa mbuf clusters */
14812        max_agg_queues = MAX_AGG_QS(sc);
14813
14814        for (j = 0; j < max_agg_queues; j++) {
14815            if (bus_dmamap_create(fp->rx_mbuf_tag,
14816                                  BUS_DMA_NOWAIT,
14817                                  &fp->rx_tpa_info[j].bd.m_map)) {
14818                /* XXX unwind and free previous fastpath allocations */
14819                BLOGE(sc, "Failed to create dma map for "
14820                          "'fp %d rx tpa mbuf %d' (%d)\n", i, j, rc);
14821                return (1);
14822            }
14823        }
14824
14825        /* create dma map for the spare rx tpa mbuf cluster */
14826        if (bus_dmamap_create(fp->rx_mbuf_tag,
14827                              BUS_DMA_NOWAIT,
14828                              &fp->rx_tpa_info_mbuf_spare_map)) {
14829            /* XXX unwind and free previous fastpath allocations */
14830            BLOGE(sc, "Failed to create dma map for "
14831                      "'fp %d spare rx tpa mbuf' (%d)\n", i, rc);
14832            return (1);
14833        }
14834
14835        bxe_init_sge_ring_bit_mask(fp);
14836    }
14837
14838    return (0);
14839}
14840
14841static void
14842bxe_free_hsi_mem(struct bxe_softc *sc)
14843{
14844    struct bxe_fastpath *fp;
14845    int max_agg_queues;
14846    int i, j;
14847
14848    if (sc->parent_dma_tag == NULL) {
14849        return; /* assume nothing was allocated */
14850    }
14851
14852    for (i = 0; i < sc->num_queues; i++) {
14853        fp = &sc->fp[i];
14854
14855        /*******************/
14856        /* FP STATUS BLOCK */
14857        /*******************/
14858
14859        bxe_dma_free(sc, &fp->sb_dma);
14860        memset(&fp->status_block, 0, sizeof(fp->status_block));
14861
14862        /******************/
14863        /* FP TX BD CHAIN */
14864        /******************/
14865
14866        bxe_dma_free(sc, &fp->tx_dma);
14867        fp->tx_chain = NULL;
14868
14869        /******************/
14870        /* FP RX BD CHAIN */
14871        /******************/
14872
14873        bxe_dma_free(sc, &fp->rx_dma);
14874        fp->rx_chain = NULL;
14875
14876        /*******************/
14877        /* FP RX RCQ CHAIN */
14878        /*******************/
14879
14880        bxe_dma_free(sc, &fp->rcq_dma);
14881        fp->rcq_chain = NULL;
14882
14883        /*******************/
14884        /* FP RX SGE CHAIN */
14885        /*******************/
14886
14887        bxe_dma_free(sc, &fp->rx_sge_dma);
14888        fp->rx_sge_chain = NULL;
14889
14890        /***********************/
14891        /* FP TX MBUF DMA MAPS */
14892        /***********************/
14893
14894        if (fp->tx_mbuf_tag != NULL) {
14895            for (j = 0; j < TX_BD_TOTAL; j++) {
14896                if (fp->tx_mbuf_chain[j].m_map != NULL) {
14897                    bus_dmamap_unload(fp->tx_mbuf_tag,
14898                                      fp->tx_mbuf_chain[j].m_map);
14899                    bus_dmamap_destroy(fp->tx_mbuf_tag,
14900                                       fp->tx_mbuf_chain[j].m_map);
14901                }
14902            }
14903
14904            bus_dma_tag_destroy(fp->tx_mbuf_tag);
14905            fp->tx_mbuf_tag = NULL;
14906        }
14907
14908        /***********************/
14909        /* FP RX MBUF DMA MAPS */
14910        /***********************/
14911
14912        if (fp->rx_mbuf_tag != NULL) {
14913            for (j = 0; j < RX_BD_TOTAL; j++) {
14914                if (fp->rx_mbuf_chain[j].m_map != NULL) {
14915                    bus_dmamap_unload(fp->rx_mbuf_tag,
14916                                      fp->rx_mbuf_chain[j].m_map);
14917                    bus_dmamap_destroy(fp->rx_mbuf_tag,
14918                                       fp->rx_mbuf_chain[j].m_map);
14919                }
14920            }
14921
14922            if (fp->rx_mbuf_spare_map != NULL) {
14923                bus_dmamap_unload(fp->rx_mbuf_tag, fp->rx_mbuf_spare_map);
14924                bus_dmamap_destroy(fp->rx_mbuf_tag, fp->rx_mbuf_spare_map);
14925            }
14926
14927            /***************************/
14928            /* FP RX TPA MBUF DMA MAPS */
14929            /***************************/
14930
14931            max_agg_queues = MAX_AGG_QS(sc);
14932
14933            for (j = 0; j < max_agg_queues; j++) {
14934                if (fp->rx_tpa_info[j].bd.m_map != NULL) {
14935                    bus_dmamap_unload(fp->rx_mbuf_tag,
14936                                      fp->rx_tpa_info[j].bd.m_map);
14937                    bus_dmamap_destroy(fp->rx_mbuf_tag,
14938                                       fp->rx_tpa_info[j].bd.m_map);
14939                }
14940            }
14941
14942            if (fp->rx_tpa_info_mbuf_spare_map != NULL) {
14943                bus_dmamap_unload(fp->rx_mbuf_tag,
14944                                  fp->rx_tpa_info_mbuf_spare_map);
14945                bus_dmamap_destroy(fp->rx_mbuf_tag,
14946                                   fp->rx_tpa_info_mbuf_spare_map);
14947            }
14948
14949            bus_dma_tag_destroy(fp->rx_mbuf_tag);
14950            fp->rx_mbuf_tag = NULL;
14951        }
14952
14953        /***************************/
14954        /* FP RX SGE MBUF DMA MAPS */
14955        /***************************/
14956
14957        if (fp->rx_sge_mbuf_tag != NULL) {
14958            for (j = 0; j < RX_SGE_TOTAL; j++) {
14959                if (fp->rx_sge_mbuf_chain[j].m_map != NULL) {
14960                    bus_dmamap_unload(fp->rx_sge_mbuf_tag,
14961                                      fp->rx_sge_mbuf_chain[j].m_map);
14962                    bus_dmamap_destroy(fp->rx_sge_mbuf_tag,
14963                                       fp->rx_sge_mbuf_chain[j].m_map);
14964                }
14965            }
14966
14967            if (fp->rx_sge_mbuf_spare_map != NULL) {
14968                bus_dmamap_unload(fp->rx_sge_mbuf_tag,
14969                                  fp->rx_sge_mbuf_spare_map);
14970                bus_dmamap_destroy(fp->rx_sge_mbuf_tag,
14971                                   fp->rx_sge_mbuf_spare_map);
14972            }
14973
14974            bus_dma_tag_destroy(fp->rx_sge_mbuf_tag);
14975            fp->rx_sge_mbuf_tag = NULL;
14976        }
14977    }
14978
14979    /***************************/
14980    /* FW DECOMPRESSION BUFFER */
14981    /***************************/
14982
14983    bxe_dma_free(sc, &sc->gz_buf_dma);
14984    sc->gz_buf = NULL;
14985    free(sc->gz_strm, M_DEVBUF);
14986    sc->gz_strm = NULL;
14987
14988    /*******************/
14989    /* SLOW PATH QUEUE */
14990    /*******************/
14991
14992    bxe_dma_free(sc, &sc->spq_dma);
14993    sc->spq = NULL;
14994
14995    /*************/
14996    /* SLOW PATH */
14997    /*************/
14998
14999    bxe_dma_free(sc, &sc->sp_dma);
15000    sc->sp = NULL;
15001
15002    /***************/
15003    /* EVENT QUEUE */
15004    /***************/
15005
15006    bxe_dma_free(sc, &sc->eq_dma);
15007    sc->eq = NULL;
15008
15009    /************************/
15010    /* DEFAULT STATUS BLOCK */
15011    /************************/
15012
15013    bxe_dma_free(sc, &sc->def_sb_dma);
15014    sc->def_sb = NULL;
15015
15016    bus_dma_tag_destroy(sc->parent_dma_tag);
15017    sc->parent_dma_tag = NULL;
15018}
15019
15020/*
15021 * Previous driver DMAE transaction may have occurred when pre-boot stage
15022 * ended and boot began. This would invalidate the addresses of the
15023 * transaction, resulting in was-error bit set in the PCI causing all
15024 * hw-to-host PCIe transactions to timeout. If this happened we want to clear
15025 * the interrupt which detected this from the pglueb and the was-done bit
15026 */
15027static void
15028bxe_prev_interrupted_dmae(struct bxe_softc *sc)
15029{
15030    uint32_t val;
15031
15032    if (!CHIP_IS_E1x(sc)) {
15033        val = REG_RD(sc, PGLUE_B_REG_PGLUE_B_INT_STS);
15034        if (val & PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN) {
15035            BLOGD(sc, DBG_LOAD,
15036                  "Clearing 'was-error' bit that was set in pglueb");
15037            REG_WR(sc, PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR, 1 << SC_FUNC(sc));
15038        }
15039    }
15040}
15041
15042static int
15043bxe_prev_mcp_done(struct bxe_softc *sc)
15044{
15045    uint32_t rc = bxe_fw_command(sc, DRV_MSG_CODE_UNLOAD_DONE,
15046                                 DRV_MSG_CODE_UNLOAD_SKIP_LINK_RESET);
15047    if (!rc) {
15048        BLOGE(sc, "MCP response failure, aborting\n");
15049        return (-1);
15050    }
15051
15052    return (0);
15053}
15054
15055static struct bxe_prev_list_node *
15056bxe_prev_path_get_entry(struct bxe_softc *sc)
15057{
15058    struct bxe_prev_list_node *tmp;
15059
15060    LIST_FOREACH(tmp, &bxe_prev_list, node) {
15061        if ((sc->pcie_bus == tmp->bus) &&
15062            (sc->pcie_device == tmp->slot) &&
15063            (SC_PATH(sc) == tmp->path)) {
15064            return (tmp);
15065        }
15066    }
15067
15068    return (NULL);
15069}
15070
15071static uint8_t
15072bxe_prev_is_path_marked(struct bxe_softc *sc)
15073{
15074    struct bxe_prev_list_node *tmp;
15075    int rc = FALSE;
15076
15077    mtx_lock(&bxe_prev_mtx);
15078
15079    tmp = bxe_prev_path_get_entry(sc);
15080    if (tmp) {
15081        if (tmp->aer) {
15082            BLOGD(sc, DBG_LOAD,
15083                  "Path %d/%d/%d was marked by AER\n",
15084                  sc->pcie_bus, sc->pcie_device, SC_PATH(sc));
15085        } else {
15086            rc = TRUE;
15087            BLOGD(sc, DBG_LOAD,
15088                  "Path %d/%d/%d was already cleaned from previous drivers\n",
15089                  sc->pcie_bus, sc->pcie_device, SC_PATH(sc));
15090        }
15091    }
15092
15093    mtx_unlock(&bxe_prev_mtx);
15094
15095    return (rc);
15096}
15097
15098static int
15099bxe_prev_mark_path(struct bxe_softc *sc,
15100                   uint8_t          after_undi)
15101{
15102    struct bxe_prev_list_node *tmp;
15103
15104    mtx_lock(&bxe_prev_mtx);
15105
15106    /* Check whether the entry for this path already exists */
15107    tmp = bxe_prev_path_get_entry(sc);
15108    if (tmp) {
15109        if (!tmp->aer) {
15110            BLOGD(sc, DBG_LOAD,
15111                  "Re-marking AER in path %d/%d/%d\n",
15112                  sc->pcie_bus, sc->pcie_device, SC_PATH(sc));
15113        } else {
15114            BLOGD(sc, DBG_LOAD,
15115                  "Removing AER indication from path %d/%d/%d\n",
15116                  sc->pcie_bus, sc->pcie_device, SC_PATH(sc));
15117            tmp->aer = 0;
15118        }
15119
15120        mtx_unlock(&bxe_prev_mtx);
15121        return (0);
15122    }
15123
15124    mtx_unlock(&bxe_prev_mtx);
15125
15126    /* Create an entry for this path and add it */
15127    tmp = malloc(sizeof(struct bxe_prev_list_node), M_DEVBUF,
15128                 (M_NOWAIT | M_ZERO));
15129    if (!tmp) {
15130        BLOGE(sc, "Failed to allocate 'bxe_prev_list_node'\n");
15131        return (-1);
15132    }
15133
15134    tmp->bus  = sc->pcie_bus;
15135    tmp->slot = sc->pcie_device;
15136    tmp->path = SC_PATH(sc);
15137    tmp->aer  = 0;
15138    tmp->undi = after_undi ? (1 << SC_PORT(sc)) : 0;
15139
15140    mtx_lock(&bxe_prev_mtx);
15141
15142    BLOGD(sc, DBG_LOAD,
15143          "Marked path %d/%d/%d - finished previous unload\n",
15144          sc->pcie_bus, sc->pcie_device, SC_PATH(sc));
15145    LIST_INSERT_HEAD(&bxe_prev_list, tmp, node);
15146
15147    mtx_unlock(&bxe_prev_mtx);
15148
15149    return (0);
15150}
15151
15152static int
15153bxe_do_flr(struct bxe_softc *sc)
15154{
15155    int i;
15156
15157    /* only E2 and onwards support FLR */
15158    if (CHIP_IS_E1x(sc)) {
15159        BLOGD(sc, DBG_LOAD, "FLR not supported in E1/E1H\n");
15160        return (-1);
15161    }
15162
15163    /* only bootcode REQ_BC_VER_4_INITIATE_FLR and onwards support flr */
15164    if (sc->devinfo.bc_ver < REQ_BC_VER_4_INITIATE_FLR) {
15165        BLOGD(sc, DBG_LOAD, "FLR not supported by BC_VER: 0x%08x\n",
15166              sc->devinfo.bc_ver);
15167        return (-1);
15168    }
15169
15170    /* Wait for Transaction Pending bit clean */
15171    for (i = 0; i < 4; i++) {
15172        if (i) {
15173            DELAY(((1 << (i - 1)) * 100) * 1000);
15174        }
15175
15176        if (!bxe_is_pcie_pending(sc)) {
15177            goto clear;
15178        }
15179    }
15180
15181    BLOGE(sc, "PCIE transaction is not cleared, "
15182              "proceeding with reset anyway\n");
15183
15184clear:
15185
15186    BLOGD(sc, DBG_LOAD, "Initiating FLR\n");
15187    bxe_fw_command(sc, DRV_MSG_CODE_INITIATE_FLR, 0);
15188
15189    return (0);
15190}
15191
15192struct bxe_mac_vals {
15193    uint32_t xmac_addr;
15194    uint32_t xmac_val;
15195    uint32_t emac_addr;
15196    uint32_t emac_val;
15197    uint32_t umac_addr;
15198    uint32_t umac_val;
15199    uint32_t bmac_addr;
15200    uint32_t bmac_val[2];
15201};
15202
15203static void
15204bxe_prev_unload_close_mac(struct bxe_softc *sc,
15205                          struct bxe_mac_vals *vals)
15206{
15207    uint32_t val, base_addr, offset, mask, reset_reg;
15208    uint8_t mac_stopped = FALSE;
15209    uint8_t port = SC_PORT(sc);
15210    uint32_t wb_data[2];
15211
15212    /* reset addresses as they also mark which values were changed */
15213    vals->bmac_addr = 0;
15214    vals->umac_addr = 0;
15215    vals->xmac_addr = 0;
15216    vals->emac_addr = 0;
15217
15218    reset_reg = REG_RD(sc, MISC_REG_RESET_REG_2);
15219
15220    if (!CHIP_IS_E3(sc)) {
15221        val = REG_RD(sc, NIG_REG_BMAC0_REGS_OUT_EN + port * 4);
15222        mask = MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port;
15223        if ((mask & reset_reg) && val) {
15224            BLOGD(sc, DBG_LOAD, "Disable BMAC Rx\n");
15225            base_addr = SC_PORT(sc) ? NIG_REG_INGRESS_BMAC1_MEM
15226                                    : NIG_REG_INGRESS_BMAC0_MEM;
15227            offset = CHIP_IS_E2(sc) ? BIGMAC2_REGISTER_BMAC_CONTROL
15228                                    : BIGMAC_REGISTER_BMAC_CONTROL;
15229
15230            /*
15231             * use rd/wr since we cannot use dmae. This is safe
15232             * since MCP won't access the bus due to the request
15233             * to unload, and no function on the path can be
15234             * loaded at this time.
15235             */
15236            wb_data[0] = REG_RD(sc, base_addr + offset);
15237            wb_data[1] = REG_RD(sc, base_addr + offset + 0x4);
15238            vals->bmac_addr = base_addr + offset;
15239            vals->bmac_val[0] = wb_data[0];
15240            vals->bmac_val[1] = wb_data[1];
15241            wb_data[0] &= ~ELINK_BMAC_CONTROL_RX_ENABLE;
15242            REG_WR(sc, vals->bmac_addr, wb_data[0]);
15243            REG_WR(sc, vals->bmac_addr + 0x4, wb_data[1]);
15244        }
15245
15246        BLOGD(sc, DBG_LOAD, "Disable EMAC Rx\n");
15247        vals->emac_addr = NIG_REG_NIG_EMAC0_EN + SC_PORT(sc)*4;
15248        vals->emac_val = REG_RD(sc, vals->emac_addr);
15249        REG_WR(sc, vals->emac_addr, 0);
15250        mac_stopped = TRUE;
15251    } else {
15252        if (reset_reg & MISC_REGISTERS_RESET_REG_2_XMAC) {
15253            BLOGD(sc, DBG_LOAD, "Disable XMAC Rx\n");
15254            base_addr = SC_PORT(sc) ? GRCBASE_XMAC1 : GRCBASE_XMAC0;
15255            val = REG_RD(sc, base_addr + XMAC_REG_PFC_CTRL_HI);
15256            REG_WR(sc, base_addr + XMAC_REG_PFC_CTRL_HI, val & ~(1 << 1));
15257            REG_WR(sc, base_addr + XMAC_REG_PFC_CTRL_HI, val | (1 << 1));
15258            vals->xmac_addr = base_addr + XMAC_REG_CTRL;
15259            vals->xmac_val = REG_RD(sc, vals->xmac_addr);
15260            REG_WR(sc, vals->xmac_addr, 0);
15261            mac_stopped = TRUE;
15262        }
15263
15264        mask = MISC_REGISTERS_RESET_REG_2_UMAC0 << port;
15265        if (mask & reset_reg) {
15266            BLOGD(sc, DBG_LOAD, "Disable UMAC Rx\n");
15267            base_addr = SC_PORT(sc) ? GRCBASE_UMAC1 : GRCBASE_UMAC0;
15268            vals->umac_addr = base_addr + UMAC_REG_COMMAND_CONFIG;
15269            vals->umac_val = REG_RD(sc, vals->umac_addr);
15270            REG_WR(sc, vals->umac_addr, 0);
15271            mac_stopped = TRUE;
15272        }
15273    }
15274
15275    if (mac_stopped) {
15276        DELAY(20000);
15277    }
15278}
15279
15280#define BXE_PREV_UNDI_PROD_ADDR(p)  (BAR_TSTRORM_INTMEM + 0x1508 + ((p) << 4))
15281#define BXE_PREV_UNDI_RCQ(val)      ((val) & 0xffff)
15282#define BXE_PREV_UNDI_BD(val)       ((val) >> 16 & 0xffff)
15283#define BXE_PREV_UNDI_PROD(rcq, bd) ((bd) << 16 | (rcq))
15284
15285static void
15286bxe_prev_unload_undi_inc(struct bxe_softc *sc,
15287                         uint8_t          port,
15288                         uint8_t          inc)
15289{
15290    uint16_t rcq, bd;
15291    uint32_t tmp_reg = REG_RD(sc, BXE_PREV_UNDI_PROD_ADDR(port));
15292
15293    rcq = BXE_PREV_UNDI_RCQ(tmp_reg) + inc;
15294    bd = BXE_PREV_UNDI_BD(tmp_reg) + inc;
15295
15296    tmp_reg = BXE_PREV_UNDI_PROD(rcq, bd);
15297    REG_WR(sc, BXE_PREV_UNDI_PROD_ADDR(port), tmp_reg);
15298
15299    BLOGD(sc, DBG_LOAD,
15300          "UNDI producer [%d] rings bd -> 0x%04x, rcq -> 0x%04x\n",
15301          port, bd, rcq);
15302}
15303
15304static int
15305bxe_prev_unload_common(struct bxe_softc *sc)
15306{
15307    uint32_t reset_reg, tmp_reg = 0, rc;
15308    uint8_t prev_undi = FALSE;
15309    struct bxe_mac_vals mac_vals;
15310    uint32_t timer_count = 1000;
15311    uint32_t prev_brb;
15312
15313    /*
15314     * It is possible a previous function received 'common' answer,
15315     * but hasn't loaded yet, therefore creating a scenario of
15316     * multiple functions receiving 'common' on the same path.
15317     */
15318    BLOGD(sc, DBG_LOAD, "Common unload Flow\n");
15319
15320    memset(&mac_vals, 0, sizeof(mac_vals));
15321
15322    if (bxe_prev_is_path_marked(sc)) {
15323        return (bxe_prev_mcp_done(sc));
15324    }
15325
15326    reset_reg = REG_RD(sc, MISC_REG_RESET_REG_1);
15327
15328    /* Reset should be performed after BRB is emptied */
15329    if (reset_reg & MISC_REGISTERS_RESET_REG_1_RST_BRB1) {
15330        /* Close the MAC Rx to prevent BRB from filling up */
15331        bxe_prev_unload_close_mac(sc, &mac_vals);
15332
15333        /* close LLH filters towards the BRB */
15334        elink_set_rx_filter(&sc->link_params, 0);
15335
15336        /*
15337         * Check if the UNDI driver was previously loaded.
15338         * UNDI driver initializes CID offset for normal bell to 0x7
15339         */
15340        if (reset_reg & MISC_REGISTERS_RESET_REG_1_RST_DORQ) {
15341            tmp_reg = REG_RD(sc, DORQ_REG_NORM_CID_OFST);
15342            if (tmp_reg == 0x7) {
15343                BLOGD(sc, DBG_LOAD, "UNDI previously loaded\n");
15344                prev_undi = TRUE;
15345                /* clear the UNDI indication */
15346                REG_WR(sc, DORQ_REG_NORM_CID_OFST, 0);
15347                /* clear possible idle check errors */
15348                REG_RD(sc, NIG_REG_NIG_INT_STS_CLR_0);
15349            }
15350        }
15351
15352        /* wait until BRB is empty */
15353        tmp_reg = REG_RD(sc, BRB1_REG_NUM_OF_FULL_BLOCKS);
15354        while (timer_count) {
15355            prev_brb = tmp_reg;
15356
15357            tmp_reg = REG_RD(sc, BRB1_REG_NUM_OF_FULL_BLOCKS);
15358            if (!tmp_reg) {
15359                break;
15360            }
15361
15362            BLOGD(sc, DBG_LOAD, "BRB still has 0x%08x\n", tmp_reg);
15363
15364            /* reset timer as long as BRB actually gets emptied */
15365            if (prev_brb > tmp_reg) {
15366                timer_count = 1000;
15367            } else {
15368                timer_count--;
15369            }
15370
15371            /* If UNDI resides in memory, manually increment it */
15372            if (prev_undi) {
15373                bxe_prev_unload_undi_inc(sc, SC_PORT(sc), 1);
15374            }
15375
15376            DELAY(10);
15377        }
15378
15379        if (!timer_count) {
15380            BLOGE(sc, "Failed to empty BRB\n");
15381        }
15382    }
15383
15384    /* No packets are in the pipeline, path is ready for reset */
15385    bxe_reset_common(sc);
15386
15387    if (mac_vals.xmac_addr) {
15388        REG_WR(sc, mac_vals.xmac_addr, mac_vals.xmac_val);
15389    }
15390    if (mac_vals.umac_addr) {
15391        REG_WR(sc, mac_vals.umac_addr, mac_vals.umac_val);
15392    }
15393    if (mac_vals.emac_addr) {
15394        REG_WR(sc, mac_vals.emac_addr, mac_vals.emac_val);
15395    }
15396    if (mac_vals.bmac_addr) {
15397        REG_WR(sc, mac_vals.bmac_addr, mac_vals.bmac_val[0]);
15398        REG_WR(sc, mac_vals.bmac_addr + 4, mac_vals.bmac_val[1]);
15399    }
15400
15401    rc = bxe_prev_mark_path(sc, prev_undi);
15402    if (rc) {
15403        bxe_prev_mcp_done(sc);
15404        return (rc);
15405    }
15406
15407    return (bxe_prev_mcp_done(sc));
15408}
15409
15410static int
15411bxe_prev_unload_uncommon(struct bxe_softc *sc)
15412{
15413    int rc;
15414
15415    BLOGD(sc, DBG_LOAD, "Uncommon unload Flow\n");
15416
15417    /* Test if previous unload process was already finished for this path */
15418    if (bxe_prev_is_path_marked(sc)) {
15419        return (bxe_prev_mcp_done(sc));
15420    }
15421
15422    BLOGD(sc, DBG_LOAD, "Path is unmarked\n");
15423
15424    /*
15425     * If function has FLR capabilities, and existing FW version matches
15426     * the one required, then FLR will be sufficient to clean any residue
15427     * left by previous driver
15428     */
15429    rc = bxe_nic_load_analyze_req(sc, FW_MSG_CODE_DRV_LOAD_FUNCTION);
15430    if (!rc) {
15431        /* fw version is good */
15432        BLOGD(sc, DBG_LOAD, "FW version matches our own, attempting FLR\n");
15433        rc = bxe_do_flr(sc);
15434    }
15435
15436    if (!rc) {
15437        /* FLR was performed */
15438        BLOGD(sc, DBG_LOAD, "FLR successful\n");
15439        return (0);
15440    }
15441
15442    BLOGD(sc, DBG_LOAD, "Could not FLR\n");
15443
15444    /* Close the MCP request, return failure*/
15445    rc = bxe_prev_mcp_done(sc);
15446    if (!rc) {
15447        rc = BXE_PREV_WAIT_NEEDED;
15448    }
15449
15450    return (rc);
15451}
15452
15453static int
15454bxe_prev_unload(struct bxe_softc *sc)
15455{
15456    int time_counter = 10;
15457    uint32_t fw, hw_lock_reg, hw_lock_val;
15458    uint32_t rc = 0;
15459
15460    /*
15461     * Clear HW from errors which may have resulted from an interrupted
15462     * DMAE transaction.
15463     */
15464    bxe_prev_interrupted_dmae(sc);
15465
15466    /* Release previously held locks */
15467    hw_lock_reg =
15468        (SC_FUNC(sc) <= 5) ?
15469            (MISC_REG_DRIVER_CONTROL_1 + SC_FUNC(sc) * 8) :
15470            (MISC_REG_DRIVER_CONTROL_7 + (SC_FUNC(sc) - 6) * 8);
15471
15472    hw_lock_val = (REG_RD(sc, hw_lock_reg));
15473    if (hw_lock_val) {
15474        if (hw_lock_val & HW_LOCK_RESOURCE_NVRAM) {
15475            BLOGD(sc, DBG_LOAD, "Releasing previously held NVRAM lock\n");
15476            REG_WR(sc, MCP_REG_MCPR_NVM_SW_ARB,
15477                   (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << SC_PORT(sc)));
15478        }
15479        BLOGD(sc, DBG_LOAD, "Releasing previously held HW lock\n");
15480        REG_WR(sc, hw_lock_reg, 0xffffffff);
15481    } else {
15482        BLOGD(sc, DBG_LOAD, "No need to release HW/NVRAM locks\n");
15483    }
15484
15485    if (MCPR_ACCESS_LOCK_LOCK & REG_RD(sc, MCP_REG_MCPR_ACCESS_LOCK)) {
15486        BLOGD(sc, DBG_LOAD, "Releasing previously held ALR\n");
15487        REG_WR(sc, MCP_REG_MCPR_ACCESS_LOCK, 0);
15488    }
15489
15490    do {
15491        /* Lock MCP using an unload request */
15492        fw = bxe_fw_command(sc, DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS, 0);
15493        if (!fw) {
15494            BLOGE(sc, "MCP response failure, aborting\n");
15495            rc = -1;
15496            break;
15497        }
15498
15499        if (fw == FW_MSG_CODE_DRV_UNLOAD_COMMON) {
15500            rc = bxe_prev_unload_common(sc);
15501            break;
15502        }
15503
15504        /* non-common reply from MCP night require looping */
15505        rc = bxe_prev_unload_uncommon(sc);
15506        if (rc != BXE_PREV_WAIT_NEEDED) {
15507            break;
15508        }
15509
15510        DELAY(20000);
15511    } while (--time_counter);
15512
15513    if (!time_counter || rc) {
15514        BLOGE(sc, "Failed to unload previous driver!"
15515            " time_counter %d rc %d\n", time_counter, rc);
15516        rc = -1;
15517    }
15518
15519    return (rc);
15520}
15521
15522void
15523bxe_dcbx_set_state(struct bxe_softc *sc,
15524                   uint8_t          dcb_on,
15525                   uint32_t         dcbx_enabled)
15526{
15527    if (!CHIP_IS_E1x(sc)) {
15528        sc->dcb_state = dcb_on;
15529        sc->dcbx_enabled = dcbx_enabled;
15530    } else {
15531        sc->dcb_state = FALSE;
15532        sc->dcbx_enabled = BXE_DCBX_ENABLED_INVALID;
15533    }
15534    BLOGD(sc, DBG_LOAD,
15535          "DCB state [%s:%s]\n",
15536          dcb_on ? "ON" : "OFF",
15537          (dcbx_enabled == BXE_DCBX_ENABLED_OFF) ? "user-mode" :
15538          (dcbx_enabled == BXE_DCBX_ENABLED_ON_NEG_OFF) ? "on-chip static" :
15539          (dcbx_enabled == BXE_DCBX_ENABLED_ON_NEG_ON) ?
15540          "on-chip with negotiation" : "invalid");
15541}
15542
15543/* must be called after sriov-enable */
15544static int
15545bxe_set_qm_cid_count(struct bxe_softc *sc)
15546{
15547    int cid_count = BXE_L2_MAX_CID(sc);
15548
15549    if (IS_SRIOV(sc)) {
15550        cid_count += BXE_VF_CIDS;
15551    }
15552
15553    if (CNIC_SUPPORT(sc)) {
15554        cid_count += CNIC_CID_MAX;
15555    }
15556
15557    return (roundup(cid_count, QM_CID_ROUND));
15558}
15559
15560static void
15561bxe_init_multi_cos(struct bxe_softc *sc)
15562{
15563    int pri, cos;
15564
15565    uint32_t pri_map = 0; /* XXX change to user config */
15566
15567    for (pri = 0; pri < BXE_MAX_PRIORITY; pri++) {
15568        cos = ((pri_map & (0xf << (pri * 4))) >> (pri * 4));
15569        if (cos < sc->max_cos) {
15570            sc->prio_to_cos[pri] = cos;
15571        } else {
15572            BLOGW(sc, "Invalid COS %d for priority %d "
15573                      "(max COS is %d), setting to 0\n",
15574                  cos, pri, (sc->max_cos - 1));
15575            sc->prio_to_cos[pri] = 0;
15576        }
15577    }
15578}
15579
15580static int
15581bxe_sysctl_state(SYSCTL_HANDLER_ARGS)
15582{
15583    struct bxe_softc *sc;
15584    int error, result;
15585
15586    result = 0;
15587    error = sysctl_handle_int(oidp, &result, 0, req);
15588
15589    if (error || !req->newptr) {
15590        return (error);
15591    }
15592
15593    if (result == 1) {
15594        uint32_t  temp;
15595        sc = (struct bxe_softc *)arg1;
15596
15597        BLOGI(sc, "... dumping driver state ...\n");
15598        temp = SHMEM2_RD(sc, temperature_in_half_celsius);
15599        BLOGI(sc, "\t Device Temperature = %d Celsius\n", (temp/2));
15600    }
15601
15602    return (error);
15603}
15604
15605static int
15606bxe_sysctl_eth_stat(SYSCTL_HANDLER_ARGS)
15607{
15608    struct bxe_softc *sc = (struct bxe_softc *)arg1;
15609    uint32_t *eth_stats = (uint32_t *)&sc->eth_stats;
15610    uint32_t *offset;
15611    uint64_t value = 0;
15612    int index = (int)arg2;
15613
15614    if (index >= BXE_NUM_ETH_STATS) {
15615        BLOGE(sc, "bxe_eth_stats index out of range (%d)\n", index);
15616        return (-1);
15617    }
15618
15619    offset = (eth_stats + bxe_eth_stats_arr[index].offset);
15620
15621    switch (bxe_eth_stats_arr[index].size) {
15622    case 4:
15623        value = (uint64_t)*offset;
15624        break;
15625    case 8:
15626        value = HILO_U64(*offset, *(offset + 1));
15627        break;
15628    default:
15629        BLOGE(sc, "Invalid bxe_eth_stats size (index=%d size=%d)\n",
15630              index, bxe_eth_stats_arr[index].size);
15631        return (-1);
15632    }
15633
15634    return (sysctl_handle_64(oidp, &value, 0, req));
15635}
15636
15637static int
15638bxe_sysctl_eth_q_stat(SYSCTL_HANDLER_ARGS)
15639{
15640    struct bxe_softc *sc = (struct bxe_softc *)arg1;
15641    uint32_t *eth_stats;
15642    uint32_t *offset;
15643    uint64_t value = 0;
15644    uint32_t q_stat = (uint32_t)arg2;
15645    uint32_t fp_index = ((q_stat >> 16) & 0xffff);
15646    uint32_t index = (q_stat & 0xffff);
15647
15648    eth_stats = (uint32_t *)&sc->fp[fp_index].eth_q_stats;
15649
15650    if (index >= BXE_NUM_ETH_Q_STATS) {
15651        BLOGE(sc, "bxe_eth_q_stats index out of range (%d)\n", index);
15652        return (-1);
15653    }
15654
15655    offset = (eth_stats + bxe_eth_q_stats_arr[index].offset);
15656
15657    switch (bxe_eth_q_stats_arr[index].size) {
15658    case 4:
15659        value = (uint64_t)*offset;
15660        break;
15661    case 8:
15662        value = HILO_U64(*offset, *(offset + 1));
15663        break;
15664    default:
15665        BLOGE(sc, "Invalid bxe_eth_q_stats size (index=%d size=%d)\n",
15666              index, bxe_eth_q_stats_arr[index].size);
15667        return (-1);
15668    }
15669
15670    return (sysctl_handle_64(oidp, &value, 0, req));
15671}
15672
15673static void bxe_force_link_reset(struct bxe_softc *sc)
15674{
15675
15676        bxe_acquire_phy_lock(sc);
15677        elink_link_reset(&sc->link_params, &sc->link_vars, 1);
15678        bxe_release_phy_lock(sc);
15679}
15680
15681static int
15682bxe_sysctl_pauseparam(SYSCTL_HANDLER_ARGS)
15683{
15684        struct bxe_softc *sc = (struct bxe_softc *)arg1;;
15685        uint32_t cfg_idx = bxe_get_link_cfg_idx(sc);
15686        int rc = 0;
15687        int error;
15688        int result;
15689
15690
15691        error = sysctl_handle_int(oidp, &sc->bxe_pause_param, 0, req);
15692
15693        if (error || !req->newptr) {
15694                return (error);
15695        }
15696        if ((sc->bxe_pause_param < 0) ||  (sc->bxe_pause_param > 8)) {
15697                BLOGW(sc, "invalid pause param (%d) - use intergers between 1 & 8\n",sc->bxe_pause_param);
15698                sc->bxe_pause_param = 8;
15699        }
15700
15701        result = (sc->bxe_pause_param << PORT_FEATURE_FLOW_CONTROL_SHIFT);
15702
15703
15704        if((result & 0x400) && !(sc->port.supported[cfg_idx] & ELINK_SUPPORTED_Autoneg))  {
15705                        BLOGW(sc, "Does not support Autoneg pause_param %d\n", sc->bxe_pause_param);
15706                        return -EINVAL;
15707        }
15708
15709        if(IS_MF(sc))
15710                return 0;
15711       sc->link_params.req_flow_ctrl[cfg_idx] = ELINK_FLOW_CTRL_AUTO;
15712        if(result & ELINK_FLOW_CTRL_RX)
15713                sc->link_params.req_flow_ctrl[cfg_idx] |= ELINK_FLOW_CTRL_RX;
15714
15715        if(result & ELINK_FLOW_CTRL_TX)
15716                sc->link_params.req_flow_ctrl[cfg_idx] |= ELINK_FLOW_CTRL_TX;
15717        if(sc->link_params.req_flow_ctrl[cfg_idx] == ELINK_FLOW_CTRL_AUTO)
15718                sc->link_params.req_flow_ctrl[cfg_idx] = ELINK_FLOW_CTRL_NONE;
15719
15720        if(result & 0x400) {
15721                if (sc->link_params.req_line_speed[cfg_idx] == ELINK_SPEED_AUTO_NEG) {
15722                        sc->link_params.req_flow_ctrl[cfg_idx] =
15723                                ELINK_FLOW_CTRL_AUTO;
15724                }
15725                sc->link_params.req_fc_auto_adv = 0;
15726                if (result & ELINK_FLOW_CTRL_RX)
15727                        sc->link_params.req_fc_auto_adv |= ELINK_FLOW_CTRL_RX;
15728
15729                if (result & ELINK_FLOW_CTRL_TX)
15730                        sc->link_params.req_fc_auto_adv |= ELINK_FLOW_CTRL_TX;
15731                if (!sc->link_params.req_fc_auto_adv)
15732                        sc->link_params.req_fc_auto_adv |= ELINK_FLOW_CTRL_NONE;
15733        }
15734         if (IS_PF(sc)) {
15735                        if (sc->link_vars.link_up) {
15736                                bxe_stats_handle(sc, STATS_EVENT_STOP);
15737                        }
15738			if (if_getdrvflags(sc->ifp) & IFF_DRV_RUNNING) {
15739                        bxe_force_link_reset(sc);
15740                        bxe_acquire_phy_lock(sc);
15741
15742                        rc = elink_phy_init(&sc->link_params, &sc->link_vars);
15743
15744                        bxe_release_phy_lock(sc);
15745
15746                        bxe_calc_fc_adv(sc);
15747                        }
15748        }
15749        return rc;
15750}
15751
15752
15753static void
15754bxe_add_sysctls(struct bxe_softc *sc)
15755{
15756    struct sysctl_ctx_list *ctx;
15757    struct sysctl_oid_list *children;
15758    struct sysctl_oid *queue_top, *queue;
15759    struct sysctl_oid_list *queue_top_children, *queue_children;
15760    char queue_num_buf[32];
15761    uint32_t q_stat;
15762    int i, j;
15763
15764    ctx = device_get_sysctl_ctx(sc->dev);
15765    children = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev));
15766
15767    SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "version",
15768                      CTLFLAG_RD, BXE_DRIVER_VERSION, 0,
15769                      "version");
15770
15771    snprintf(sc->fw_ver_str, sizeof(sc->fw_ver_str), "%d.%d.%d.%d",
15772             BCM_5710_FW_MAJOR_VERSION,
15773             BCM_5710_FW_MINOR_VERSION,
15774             BCM_5710_FW_REVISION_VERSION,
15775             BCM_5710_FW_ENGINEERING_VERSION);
15776
15777    snprintf(sc->mf_mode_str, sizeof(sc->mf_mode_str), "%s",
15778        ((sc->devinfo.mf_info.mf_mode == SINGLE_FUNCTION)     ? "Single"  :
15779         (sc->devinfo.mf_info.mf_mode == MULTI_FUNCTION_SD)   ? "MF-SD"   :
15780         (sc->devinfo.mf_info.mf_mode == MULTI_FUNCTION_SI)   ? "MF-SI"   :
15781         (sc->devinfo.mf_info.mf_mode == MULTI_FUNCTION_AFEX) ? "MF-AFEX" :
15782                                                                "Unknown"));
15783    SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "mf_vnics",
15784                    CTLFLAG_RD, &sc->devinfo.mf_info.vnics_per_port, 0,
15785                    "multifunction vnics per port");
15786
15787    snprintf(sc->pci_link_str, sizeof(sc->pci_link_str), "%s x%d",
15788        ((sc->devinfo.pcie_link_speed == 1) ? "2.5GT/s" :
15789         (sc->devinfo.pcie_link_speed == 2) ? "5.0GT/s" :
15790         (sc->devinfo.pcie_link_speed == 4) ? "8.0GT/s" :
15791                                              "???GT/s"),
15792        sc->devinfo.pcie_link_width);
15793
15794    sc->debug = bxe_debug;
15795
15796#if __FreeBSD_version >= 900000
15797    SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "bc_version",
15798                      CTLFLAG_RD, sc->devinfo.bc_ver_str, 0,
15799                      "bootcode version");
15800    SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "fw_version",
15801                      CTLFLAG_RD, sc->fw_ver_str, 0,
15802                      "firmware version");
15803    SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "mf_mode",
15804                      CTLFLAG_RD, sc->mf_mode_str, 0,
15805                      "multifunction mode");
15806    SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "mac_addr",
15807                      CTLFLAG_RD, sc->mac_addr_str, 0,
15808                      "mac address");
15809    SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "pci_link",
15810                      CTLFLAG_RD, sc->pci_link_str, 0,
15811                      "pci link status");
15812    SYSCTL_ADD_ULONG(ctx, children, OID_AUTO, "debug",
15813                    CTLFLAG_RW, &sc->debug,
15814                    "debug logging mode");
15815#else
15816    SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "bc_version",
15817                      CTLFLAG_RD, &sc->devinfo.bc_ver_str, 0,
15818                      "bootcode version");
15819    SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "fw_version",
15820                      CTLFLAG_RD, &sc->fw_ver_str, 0,
15821                      "firmware version");
15822    SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "mf_mode",
15823                      CTLFLAG_RD, &sc->mf_mode_str, 0,
15824                      "multifunction mode");
15825    SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "mac_addr",
15826                      CTLFLAG_RD, &sc->mac_addr_str, 0,
15827                      "mac address");
15828    SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "pci_link",
15829                      CTLFLAG_RD, &sc->pci_link_str, 0,
15830                      "pci link status");
15831    SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "debug",
15832                    CTLFLAG_RW, &sc->debug, 0,
15833                    "debug logging mode");
15834#endif /* #if __FreeBSD_version >= 900000 */
15835
15836    sc->trigger_grcdump = 0;
15837    SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "trigger_grcdump",
15838                   CTLFLAG_RW, &sc->trigger_grcdump, 0,
15839                   "trigger grcdump should be invoked"
15840                   "  before collecting grcdump");
15841
15842    sc->grcdump_started = 0;
15843    sc->grcdump_done = 0;
15844    SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "grcdump_done",
15845                   CTLFLAG_RD, &sc->grcdump_done, 0,
15846                   "set by driver when grcdump is done");
15847
15848    sc->rx_budget = bxe_rx_budget;
15849    SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "rx_budget",
15850                    CTLFLAG_RW, &sc->rx_budget, 0,
15851                    "rx processing budget");
15852
15853   SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "pause_param",
15854                    CTLTYPE_UINT | CTLFLAG_RW, sc, 0,
15855                    bxe_sysctl_pauseparam, "IU",
15856                    "need pause frames- DEF:0/TX:1/RX:2/BOTH:3/AUTO:4/AUTOTX:5/AUTORX:6/AUTORXTX:7/NONE:8");
15857
15858
15859    SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "state",
15860                    CTLTYPE_UINT | CTLFLAG_RW, sc, 0,
15861                    bxe_sysctl_state, "IU", "dump driver state");
15862
15863    for (i = 0; i < BXE_NUM_ETH_STATS; i++) {
15864        SYSCTL_ADD_PROC(ctx, children, OID_AUTO,
15865                        bxe_eth_stats_arr[i].string,
15866                        CTLTYPE_U64 | CTLFLAG_RD, sc, i,
15867                        bxe_sysctl_eth_stat, "LU",
15868                        bxe_eth_stats_arr[i].string);
15869    }
15870
15871    /* add a new parent node for all queues "dev.bxe.#.queue" */
15872    queue_top = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "queue",
15873                                CTLFLAG_RD, NULL, "queue");
15874    queue_top_children = SYSCTL_CHILDREN(queue_top);
15875
15876    for (i = 0; i < sc->num_queues; i++) {
15877        /* add a new parent node for a single queue "dev.bxe.#.queue.#" */
15878        snprintf(queue_num_buf, sizeof(queue_num_buf), "%d", i);
15879        queue = SYSCTL_ADD_NODE(ctx, queue_top_children, OID_AUTO,
15880                                queue_num_buf, CTLFLAG_RD, NULL,
15881                                "single queue");
15882        queue_children = SYSCTL_CHILDREN(queue);
15883
15884        for (j = 0; j < BXE_NUM_ETH_Q_STATS; j++) {
15885            q_stat = ((i << 16) | j);
15886            SYSCTL_ADD_PROC(ctx, queue_children, OID_AUTO,
15887                            bxe_eth_q_stats_arr[j].string,
15888                            CTLTYPE_U64 | CTLFLAG_RD, sc, q_stat,
15889                            bxe_sysctl_eth_q_stat, "LU",
15890                            bxe_eth_q_stats_arr[j].string);
15891        }
15892    }
15893}
15894
15895static int
15896bxe_alloc_buf_rings(struct bxe_softc *sc)
15897{
15898#if __FreeBSD_version >= 901504
15899
15900    int i;
15901    struct bxe_fastpath *fp;
15902
15903    for (i = 0; i < sc->num_queues; i++) {
15904
15905        fp = &sc->fp[i];
15906
15907        fp->tx_br = buf_ring_alloc(BXE_BR_SIZE, M_DEVBUF,
15908                                   M_NOWAIT, &fp->tx_mtx);
15909        if (fp->tx_br == NULL)
15910            return (-1);
15911    }
15912#endif
15913    return (0);
15914}
15915
15916static void
15917bxe_free_buf_rings(struct bxe_softc *sc)
15918{
15919#if __FreeBSD_version >= 901504
15920
15921    int i;
15922    struct bxe_fastpath *fp;
15923
15924    for (i = 0; i < sc->num_queues; i++) {
15925
15926        fp = &sc->fp[i];
15927
15928        if (fp->tx_br) {
15929            buf_ring_free(fp->tx_br, M_DEVBUF);
15930            fp->tx_br = NULL;
15931        }
15932    }
15933
15934#endif
15935}
15936
15937static void
15938bxe_init_fp_mutexs(struct bxe_softc *sc)
15939{
15940    int i;
15941    struct bxe_fastpath *fp;
15942
15943    for (i = 0; i < sc->num_queues; i++) {
15944
15945        fp = &sc->fp[i];
15946
15947        snprintf(fp->tx_mtx_name, sizeof(fp->tx_mtx_name),
15948            "bxe%d_fp%d_tx_lock", sc->unit, i);
15949        mtx_init(&fp->tx_mtx, fp->tx_mtx_name, NULL, MTX_DEF);
15950
15951        snprintf(fp->rx_mtx_name, sizeof(fp->rx_mtx_name),
15952            "bxe%d_fp%d_rx_lock", sc->unit, i);
15953        mtx_init(&fp->rx_mtx, fp->rx_mtx_name, NULL, MTX_DEF);
15954    }
15955}
15956
15957static void
15958bxe_destroy_fp_mutexs(struct bxe_softc *sc)
15959{
15960    int i;
15961    struct bxe_fastpath *fp;
15962
15963    for (i = 0; i < sc->num_queues; i++) {
15964
15965        fp = &sc->fp[i];
15966
15967        if (mtx_initialized(&fp->tx_mtx)) {
15968            mtx_destroy(&fp->tx_mtx);
15969        }
15970
15971        if (mtx_initialized(&fp->rx_mtx)) {
15972            mtx_destroy(&fp->rx_mtx);
15973        }
15974    }
15975}
15976
15977
15978/*
15979 * Device attach function.
15980 *
15981 * Allocates device resources, performs secondary chip identification, and
15982 * initializes driver instance variables. This function is called from driver
15983 * load after a successful probe.
15984 *
15985 * Returns:
15986 *   0 = Success, >0 = Failure
15987 */
15988static int
15989bxe_attach(device_t dev)
15990{
15991    struct bxe_softc *sc;
15992
15993    sc = device_get_softc(dev);
15994
15995    BLOGD(sc, DBG_LOAD, "Starting attach...\n");
15996
15997    sc->state = BXE_STATE_CLOSED;
15998
15999    sc->dev  = dev;
16000    sc->unit = device_get_unit(dev);
16001
16002    BLOGD(sc, DBG_LOAD, "softc = %p\n", sc);
16003
16004    sc->pcie_bus    = pci_get_bus(dev);
16005    sc->pcie_device = pci_get_slot(dev);
16006    sc->pcie_func   = pci_get_function(dev);
16007
16008    /* enable bus master capability */
16009    pci_enable_busmaster(dev);
16010
16011    /* get the BARs */
16012    if (bxe_allocate_bars(sc) != 0) {
16013        return (ENXIO);
16014    }
16015
16016    /* initialize the mutexes */
16017    bxe_init_mutexes(sc);
16018
16019    /* prepare the periodic callout */
16020    callout_init(&sc->periodic_callout, 0);
16021
16022    /* prepare the chip taskqueue */
16023    sc->chip_tq_flags = CHIP_TQ_NONE;
16024    snprintf(sc->chip_tq_name, sizeof(sc->chip_tq_name),
16025             "bxe%d_chip_tq", sc->unit);
16026    TASK_INIT(&sc->chip_tq_task, 0, bxe_handle_chip_tq, sc);
16027    sc->chip_tq = taskqueue_create(sc->chip_tq_name, M_NOWAIT,
16028                                   taskqueue_thread_enqueue,
16029                                   &sc->chip_tq);
16030    taskqueue_start_threads(&sc->chip_tq, 1, PWAIT, /* lower priority */
16031                            "%s", sc->chip_tq_name);
16032
16033    /* get device info and set params */
16034    if (bxe_get_device_info(sc) != 0) {
16035        BLOGE(sc, "getting device info\n");
16036        bxe_deallocate_bars(sc);
16037        pci_disable_busmaster(dev);
16038        return (ENXIO);
16039    }
16040
16041    /* get final misc params */
16042    bxe_get_params(sc);
16043
16044    /* set the default MTU (changed via ifconfig) */
16045    sc->mtu = ETHERMTU;
16046
16047    bxe_set_modes_bitmap(sc);
16048
16049    /* XXX
16050     * If in AFEX mode and the function is configured for FCoE
16051     * then bail... no L2 allowed.
16052     */
16053
16054    /* get phy settings from shmem and 'and' against admin settings */
16055    bxe_get_phy_info(sc);
16056
16057    /* initialize the FreeBSD ifnet interface */
16058    if (bxe_init_ifnet(sc) != 0) {
16059        bxe_release_mutexes(sc);
16060        bxe_deallocate_bars(sc);
16061        pci_disable_busmaster(dev);
16062        return (ENXIO);
16063    }
16064
16065    if (bxe_add_cdev(sc) != 0) {
16066        if (sc->ifp != NULL) {
16067            ether_ifdetach(sc->ifp);
16068        }
16069        ifmedia_removeall(&sc->ifmedia);
16070        bxe_release_mutexes(sc);
16071        bxe_deallocate_bars(sc);
16072        pci_disable_busmaster(dev);
16073        return (ENXIO);
16074    }
16075
16076    /* allocate device interrupts */
16077    if (bxe_interrupt_alloc(sc) != 0) {
16078        bxe_del_cdev(sc);
16079        if (sc->ifp != NULL) {
16080            ether_ifdetach(sc->ifp);
16081        }
16082        ifmedia_removeall(&sc->ifmedia);
16083        bxe_release_mutexes(sc);
16084        bxe_deallocate_bars(sc);
16085        pci_disable_busmaster(dev);
16086        return (ENXIO);
16087    }
16088
16089    bxe_init_fp_mutexs(sc);
16090
16091    if (bxe_alloc_buf_rings(sc) != 0) {
16092	bxe_free_buf_rings(sc);
16093        bxe_interrupt_free(sc);
16094        bxe_del_cdev(sc);
16095        if (sc->ifp != NULL) {
16096            ether_ifdetach(sc->ifp);
16097        }
16098        ifmedia_removeall(&sc->ifmedia);
16099        bxe_release_mutexes(sc);
16100        bxe_deallocate_bars(sc);
16101        pci_disable_busmaster(dev);
16102        return (ENXIO);
16103    }
16104
16105    /* allocate ilt */
16106    if (bxe_alloc_ilt_mem(sc) != 0) {
16107	bxe_free_buf_rings(sc);
16108        bxe_interrupt_free(sc);
16109        bxe_del_cdev(sc);
16110        if (sc->ifp != NULL) {
16111            ether_ifdetach(sc->ifp);
16112        }
16113        ifmedia_removeall(&sc->ifmedia);
16114        bxe_release_mutexes(sc);
16115        bxe_deallocate_bars(sc);
16116        pci_disable_busmaster(dev);
16117        return (ENXIO);
16118    }
16119
16120    /* allocate the host hardware/software hsi structures */
16121    if (bxe_alloc_hsi_mem(sc) != 0) {
16122        bxe_free_ilt_mem(sc);
16123	bxe_free_buf_rings(sc);
16124        bxe_interrupt_free(sc);
16125        bxe_del_cdev(sc);
16126        if (sc->ifp != NULL) {
16127            ether_ifdetach(sc->ifp);
16128        }
16129        ifmedia_removeall(&sc->ifmedia);
16130        bxe_release_mutexes(sc);
16131        bxe_deallocate_bars(sc);
16132        pci_disable_busmaster(dev);
16133        return (ENXIO);
16134    }
16135
16136    /* need to reset chip if UNDI was active */
16137    if (IS_PF(sc) && !BXE_NOMCP(sc)) {
16138        /* init fw_seq */
16139        sc->fw_seq =
16140            (SHMEM_RD(sc, func_mb[SC_FW_MB_IDX(sc)].drv_mb_header) &
16141             DRV_MSG_SEQ_NUMBER_MASK);
16142        BLOGD(sc, DBG_LOAD, "prev unload fw_seq 0x%04x\n", sc->fw_seq);
16143        bxe_prev_unload(sc);
16144    }
16145
16146#if 1
16147    /* XXX */
16148    bxe_dcbx_set_state(sc, FALSE, BXE_DCBX_ENABLED_OFF);
16149#else
16150    if (SHMEM2_HAS(sc, dcbx_lldp_params_offset) &&
16151        SHMEM2_HAS(sc, dcbx_lldp_dcbx_stat_offset) &&
16152        SHMEM2_RD(sc, dcbx_lldp_params_offset) &&
16153        SHMEM2_RD(sc, dcbx_lldp_dcbx_stat_offset)) {
16154        bxe_dcbx_set_state(sc, TRUE, BXE_DCBX_ENABLED_ON_NEG_ON);
16155        bxe_dcbx_init_params(sc);
16156    } else {
16157        bxe_dcbx_set_state(sc, FALSE, BXE_DCBX_ENABLED_OFF);
16158    }
16159#endif
16160
16161    /* calculate qm_cid_count */
16162    sc->qm_cid_count = bxe_set_qm_cid_count(sc);
16163    BLOGD(sc, DBG_LOAD, "qm_cid_count=%d\n", sc->qm_cid_count);
16164
16165    sc->max_cos = 1;
16166    bxe_init_multi_cos(sc);
16167
16168    bxe_add_sysctls(sc);
16169
16170    return (0);
16171}
16172
16173/*
16174 * Device detach function.
16175 *
16176 * Stops the controller, resets the controller, and releases resources.
16177 *
16178 * Returns:
16179 *   0 = Success, >0 = Failure
16180 */
16181static int
16182bxe_detach(device_t dev)
16183{
16184    struct bxe_softc *sc;
16185    if_t ifp;
16186
16187    sc = device_get_softc(dev);
16188
16189    BLOGD(sc, DBG_LOAD, "Starting detach...\n");
16190
16191    ifp = sc->ifp;
16192    if (ifp != NULL && if_vlantrunkinuse(ifp)) {
16193        BLOGE(sc, "Cannot detach while VLANs are in use.\n");
16194        return(EBUSY);
16195    }
16196
16197    bxe_del_cdev(sc);
16198
16199    /* stop the periodic callout */
16200    bxe_periodic_stop(sc);
16201
16202    /* stop the chip taskqueue */
16203    atomic_store_rel_long(&sc->chip_tq_flags, CHIP_TQ_NONE);
16204    if (sc->chip_tq) {
16205        taskqueue_drain(sc->chip_tq, &sc->chip_tq_task);
16206        taskqueue_free(sc->chip_tq);
16207        sc->chip_tq = NULL;
16208    }
16209
16210    /* stop and reset the controller if it was open */
16211    if (sc->state != BXE_STATE_CLOSED) {
16212        BXE_CORE_LOCK(sc);
16213        bxe_nic_unload(sc, UNLOAD_CLOSE, TRUE);
16214        sc->state = BXE_STATE_DISABLED;
16215        BXE_CORE_UNLOCK(sc);
16216    }
16217
16218    /* release the network interface */
16219    if (ifp != NULL) {
16220        ether_ifdetach(ifp);
16221    }
16222    ifmedia_removeall(&sc->ifmedia);
16223
16224    /* XXX do the following based on driver state... */
16225
16226    /* free the host hardware/software hsi structures */
16227    bxe_free_hsi_mem(sc);
16228
16229    /* free ilt */
16230    bxe_free_ilt_mem(sc);
16231
16232    bxe_free_buf_rings(sc);
16233
16234    /* release the interrupts */
16235    bxe_interrupt_free(sc);
16236
16237    /* Release the mutexes*/
16238    bxe_destroy_fp_mutexs(sc);
16239    bxe_release_mutexes(sc);
16240
16241
16242    /* Release the PCIe BAR mapped memory */
16243    bxe_deallocate_bars(sc);
16244
16245    /* Release the FreeBSD interface. */
16246    if (sc->ifp != NULL) {
16247        if_free(sc->ifp);
16248    }
16249
16250    pci_disable_busmaster(dev);
16251
16252    return (0);
16253}
16254
16255/*
16256 * Device shutdown function.
16257 *
16258 * Stops and resets the controller.
16259 *
16260 * Returns:
16261 *   Nothing
16262 */
16263static int
16264bxe_shutdown(device_t dev)
16265{
16266    struct bxe_softc *sc;
16267
16268    sc = device_get_softc(dev);
16269
16270    BLOGD(sc, DBG_LOAD, "Starting shutdown...\n");
16271
16272    /* stop the periodic callout */
16273    bxe_periodic_stop(sc);
16274
16275    BXE_CORE_LOCK(sc);
16276    bxe_nic_unload(sc, UNLOAD_NORMAL, FALSE);
16277    BXE_CORE_UNLOCK(sc);
16278
16279    return (0);
16280}
16281
16282void
16283bxe_igu_ack_sb(struct bxe_softc *sc,
16284               uint8_t          igu_sb_id,
16285               uint8_t          segment,
16286               uint16_t         index,
16287               uint8_t          op,
16288               uint8_t          update)
16289{
16290    uint32_t igu_addr = sc->igu_base_addr;
16291    igu_addr += (IGU_CMD_INT_ACK_BASE + igu_sb_id)*8;
16292    bxe_igu_ack_sb_gen(sc, igu_sb_id, segment, index, op, update, igu_addr);
16293}
16294
16295static void
16296bxe_igu_clear_sb_gen(struct bxe_softc *sc,
16297                     uint8_t          func,
16298                     uint8_t          idu_sb_id,
16299                     uint8_t          is_pf)
16300{
16301    uint32_t data, ctl, cnt = 100;
16302    uint32_t igu_addr_data = IGU_REG_COMMAND_REG_32LSB_DATA;
16303    uint32_t igu_addr_ctl = IGU_REG_COMMAND_REG_CTRL;
16304    uint32_t igu_addr_ack = IGU_REG_CSTORM_TYPE_0_SB_CLEANUP + (idu_sb_id/32)*4;
16305    uint32_t sb_bit =  1 << (idu_sb_id%32);
16306    uint32_t func_encode = func | (is_pf ? 1 : 0) << IGU_FID_ENCODE_IS_PF_SHIFT;
16307    uint32_t addr_encode = IGU_CMD_E2_PROD_UPD_BASE + idu_sb_id;
16308
16309    /* Not supported in BC mode */
16310    if (CHIP_INT_MODE_IS_BC(sc)) {
16311        return;
16312    }
16313
16314    data = ((IGU_USE_REGISTER_cstorm_type_0_sb_cleanup <<
16315             IGU_REGULAR_CLEANUP_TYPE_SHIFT) |
16316            IGU_REGULAR_CLEANUP_SET |
16317            IGU_REGULAR_BCLEANUP);
16318
16319    ctl = ((addr_encode << IGU_CTRL_REG_ADDRESS_SHIFT) |
16320           (func_encode << IGU_CTRL_REG_FID_SHIFT) |
16321           (IGU_CTRL_CMD_TYPE_WR << IGU_CTRL_REG_TYPE_SHIFT));
16322
16323    BLOGD(sc, DBG_LOAD, "write 0x%08x to IGU(via GRC) addr 0x%x\n",
16324            data, igu_addr_data);
16325    REG_WR(sc, igu_addr_data, data);
16326
16327    bus_space_barrier(sc->bar[BAR0].tag, sc->bar[BAR0].handle, 0, 0,
16328                      BUS_SPACE_BARRIER_WRITE);
16329    mb();
16330
16331    BLOGD(sc, DBG_LOAD, "write 0x%08x to IGU(via GRC) addr 0x%x\n",
16332            ctl, igu_addr_ctl);
16333    REG_WR(sc, igu_addr_ctl, ctl);
16334
16335    bus_space_barrier(sc->bar[BAR0].tag, sc->bar[BAR0].handle, 0, 0,
16336                      BUS_SPACE_BARRIER_WRITE);
16337    mb();
16338
16339    /* wait for clean up to finish */
16340    while (!(REG_RD(sc, igu_addr_ack) & sb_bit) && --cnt) {
16341        DELAY(20000);
16342    }
16343
16344    if (!(REG_RD(sc, igu_addr_ack) & sb_bit)) {
16345        BLOGD(sc, DBG_LOAD,
16346              "Unable to finish IGU cleanup: "
16347              "idu_sb_id %d offset %d bit %d (cnt %d)\n",
16348              idu_sb_id, idu_sb_id/32, idu_sb_id%32, cnt);
16349    }
16350}
16351
16352static void
16353bxe_igu_clear_sb(struct bxe_softc *sc,
16354                 uint8_t          idu_sb_id)
16355{
16356    bxe_igu_clear_sb_gen(sc, SC_FUNC(sc), idu_sb_id, TRUE /*PF*/);
16357}
16358
16359
16360
16361
16362
16363
16364
16365/*******************/
16366/* ECORE CALLBACKS */
16367/*******************/
16368
16369static void
16370bxe_reset_common(struct bxe_softc *sc)
16371{
16372    uint32_t val = 0x1400;
16373
16374    /* reset_common */
16375    REG_WR(sc, (GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR), 0xd3ffff7f);
16376
16377    if (CHIP_IS_E3(sc)) {
16378        val |= MISC_REGISTERS_RESET_REG_2_MSTAT0;
16379        val |= MISC_REGISTERS_RESET_REG_2_MSTAT1;
16380    }
16381
16382    REG_WR(sc, (GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR), val);
16383}
16384
16385static void
16386bxe_common_init_phy(struct bxe_softc *sc)
16387{
16388    uint32_t shmem_base[2];
16389    uint32_t shmem2_base[2];
16390
16391    /* Avoid common init in case MFW supports LFA */
16392    if (SHMEM2_RD(sc, size) >
16393        (uint32_t)offsetof(struct shmem2_region,
16394                           lfa_host_addr[SC_PORT(sc)])) {
16395        return;
16396    }
16397
16398    shmem_base[0]  = sc->devinfo.shmem_base;
16399    shmem2_base[0] = sc->devinfo.shmem2_base;
16400
16401    if (!CHIP_IS_E1x(sc)) {
16402        shmem_base[1]  = SHMEM2_RD(sc, other_shmem_base_addr);
16403        shmem2_base[1] = SHMEM2_RD(sc, other_shmem2_base_addr);
16404    }
16405
16406    bxe_acquire_phy_lock(sc);
16407    elink_common_init_phy(sc, shmem_base, shmem2_base,
16408                          sc->devinfo.chip_id, 0);
16409    bxe_release_phy_lock(sc);
16410}
16411
16412static void
16413bxe_pf_disable(struct bxe_softc *sc)
16414{
16415    uint32_t val = REG_RD(sc, IGU_REG_PF_CONFIGURATION);
16416
16417    val &= ~IGU_PF_CONF_FUNC_EN;
16418
16419    REG_WR(sc, IGU_REG_PF_CONFIGURATION, val);
16420    REG_WR(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 0);
16421    REG_WR(sc, CFC_REG_WEAK_ENABLE_PF, 0);
16422}
16423
16424static void
16425bxe_init_pxp(struct bxe_softc *sc)
16426{
16427    uint16_t devctl;
16428    int r_order, w_order;
16429
16430    devctl = bxe_pcie_capability_read(sc, PCIR_EXPRESS_DEVICE_CTL, 2);
16431
16432    BLOGD(sc, DBG_LOAD, "read 0x%08x from devctl\n", devctl);
16433
16434    w_order = ((devctl & PCIM_EXP_CTL_MAX_PAYLOAD) >> 5);
16435
16436    if (sc->mrrs == -1) {
16437        r_order = ((devctl & PCIM_EXP_CTL_MAX_READ_REQUEST) >> 12);
16438    } else {
16439        BLOGD(sc, DBG_LOAD, "forcing read order to %d\n", sc->mrrs);
16440        r_order = sc->mrrs;
16441    }
16442
16443    ecore_init_pxp_arb(sc, r_order, w_order);
16444}
16445
16446static uint32_t
16447bxe_get_pretend_reg(struct bxe_softc *sc)
16448{
16449    uint32_t base = PXP2_REG_PGL_PRETEND_FUNC_F0;
16450    uint32_t stride = (PXP2_REG_PGL_PRETEND_FUNC_F1 - base);
16451    return (base + (SC_ABS_FUNC(sc)) * stride);
16452}
16453
16454/*
16455 * Called only on E1H or E2.
16456 * When pretending to be PF, the pretend value is the function number 0..7.
16457 * When pretending to be VF, the pretend val is the PF-num:VF-valid:ABS-VFID
16458 * combination.
16459 */
16460static int
16461bxe_pretend_func(struct bxe_softc *sc,
16462                 uint16_t         pretend_func_val)
16463{
16464    uint32_t pretend_reg;
16465
16466    if (CHIP_IS_E1H(sc) && (pretend_func_val > E1H_FUNC_MAX)) {
16467        return (-1);
16468    }
16469
16470    /* get my own pretend register */
16471    pretend_reg = bxe_get_pretend_reg(sc);
16472    REG_WR(sc, pretend_reg, pretend_func_val);
16473    REG_RD(sc, pretend_reg);
16474    return (0);
16475}
16476
16477static void
16478bxe_iov_init_dmae(struct bxe_softc *sc)
16479{
16480    return;
16481}
16482
16483static void
16484bxe_iov_init_dq(struct bxe_softc *sc)
16485{
16486    return;
16487}
16488
16489/* send a NIG loopback debug packet */
16490static void
16491bxe_lb_pckt(struct bxe_softc *sc)
16492{
16493    uint32_t wb_write[3];
16494
16495    /* Ethernet source and destination addresses */
16496    wb_write[0] = 0x55555555;
16497    wb_write[1] = 0x55555555;
16498    wb_write[2] = 0x20;     /* SOP */
16499    REG_WR_DMAE(sc, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
16500
16501    /* NON-IP protocol */
16502    wb_write[0] = 0x09000000;
16503    wb_write[1] = 0x55555555;
16504    wb_write[2] = 0x10;     /* EOP, eop_bvalid = 0 */
16505    REG_WR_DMAE(sc, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
16506}
16507
16508/*
16509 * Some of the internal memories are not directly readable from the driver.
16510 * To test them we send debug packets.
16511 */
16512static int
16513bxe_int_mem_test(struct bxe_softc *sc)
16514{
16515    int factor;
16516    int count, i;
16517    uint32_t val = 0;
16518
16519    if (CHIP_REV_IS_FPGA(sc)) {
16520        factor = 120;
16521    } else if (CHIP_REV_IS_EMUL(sc)) {
16522        factor = 200;
16523    } else {
16524        factor = 1;
16525    }
16526
16527    /* disable inputs of parser neighbor blocks */
16528    REG_WR(sc, TSDM_REG_ENABLE_IN1, 0x0);
16529    REG_WR(sc, TCM_REG_PRS_IFEN, 0x0);
16530    REG_WR(sc, CFC_REG_DEBUG0, 0x1);
16531    REG_WR(sc, NIG_REG_PRS_REQ_IN_EN, 0x0);
16532
16533    /*  write 0 to parser credits for CFC search request */
16534    REG_WR(sc, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
16535
16536    /* send Ethernet packet */
16537    bxe_lb_pckt(sc);
16538
16539    /* TODO do i reset NIG statistic? */
16540    /* Wait until NIG register shows 1 packet of size 0x10 */
16541    count = 1000 * factor;
16542    while (count) {
16543        bxe_read_dmae(sc, NIG_REG_STAT2_BRB_OCTET, 2);
16544        val = *BXE_SP(sc, wb_data[0]);
16545        if (val == 0x10) {
16546            break;
16547        }
16548
16549        DELAY(10000);
16550        count--;
16551    }
16552
16553    if (val != 0x10) {
16554        BLOGE(sc, "NIG timeout val=0x%x\n", val);
16555        return (-1);
16556    }
16557
16558    /* wait until PRS register shows 1 packet */
16559    count = (1000 * factor);
16560    while (count) {
16561        val = REG_RD(sc, PRS_REG_NUM_OF_PACKETS);
16562        if (val == 1) {
16563            break;
16564        }
16565
16566        DELAY(10000);
16567        count--;
16568    }
16569
16570    if (val != 0x1) {
16571        BLOGE(sc, "PRS timeout val=0x%x\n", val);
16572        return (-2);
16573    }
16574
16575    /* Reset and init BRB, PRS */
16576    REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
16577    DELAY(50000);
16578    REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
16579    DELAY(50000);
16580    ecore_init_block(sc, BLOCK_BRB1, PHASE_COMMON);
16581    ecore_init_block(sc, BLOCK_PRS, PHASE_COMMON);
16582
16583    /* Disable inputs of parser neighbor blocks */
16584    REG_WR(sc, TSDM_REG_ENABLE_IN1, 0x0);
16585    REG_WR(sc, TCM_REG_PRS_IFEN, 0x0);
16586    REG_WR(sc, CFC_REG_DEBUG0, 0x1);
16587    REG_WR(sc, NIG_REG_PRS_REQ_IN_EN, 0x0);
16588
16589    /* Write 0 to parser credits for CFC search request */
16590    REG_WR(sc, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
16591
16592    /* send 10 Ethernet packets */
16593    for (i = 0; i < 10; i++) {
16594        bxe_lb_pckt(sc);
16595    }
16596
16597    /* Wait until NIG register shows 10+1 packets of size 11*0x10 = 0xb0 */
16598    count = (1000 * factor);
16599    while (count) {
16600        bxe_read_dmae(sc, NIG_REG_STAT2_BRB_OCTET, 2);
16601        val = *BXE_SP(sc, wb_data[0]);
16602        if (val == 0xb0) {
16603            break;
16604        }
16605
16606        DELAY(10000);
16607        count--;
16608    }
16609
16610    if (val != 0xb0) {
16611        BLOGE(sc, "NIG timeout val=0x%x\n", val);
16612        return (-3);
16613    }
16614
16615    /* Wait until PRS register shows 2 packets */
16616    val = REG_RD(sc, PRS_REG_NUM_OF_PACKETS);
16617    if (val != 2) {
16618        BLOGE(sc, "PRS timeout val=0x%x\n", val);
16619    }
16620
16621    /* Write 1 to parser credits for CFC search request */
16622    REG_WR(sc, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
16623
16624    /* Wait until PRS register shows 3 packets */
16625    DELAY(10000 * factor);
16626
16627    /* Wait until NIG register shows 1 packet of size 0x10 */
16628    val = REG_RD(sc, PRS_REG_NUM_OF_PACKETS);
16629    if (val != 3) {
16630        BLOGE(sc, "PRS timeout val=0x%x\n", val);
16631    }
16632
16633    /* clear NIG EOP FIFO */
16634    for (i = 0; i < 11; i++) {
16635        REG_RD(sc, NIG_REG_INGRESS_EOP_LB_FIFO);
16636    }
16637
16638    val = REG_RD(sc, NIG_REG_INGRESS_EOP_LB_EMPTY);
16639    if (val != 1) {
16640        BLOGE(sc, "clear of NIG failed val=0x%x\n", val);
16641        return (-4);
16642    }
16643
16644    /* Reset and init BRB, PRS, NIG */
16645    REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
16646    DELAY(50000);
16647    REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
16648    DELAY(50000);
16649    ecore_init_block(sc, BLOCK_BRB1, PHASE_COMMON);
16650    ecore_init_block(sc, BLOCK_PRS, PHASE_COMMON);
16651    if (!CNIC_SUPPORT(sc)) {
16652        /* set NIC mode */
16653        REG_WR(sc, PRS_REG_NIC_MODE, 1);
16654    }
16655
16656    /* Enable inputs of parser neighbor blocks */
16657    REG_WR(sc, TSDM_REG_ENABLE_IN1, 0x7fffffff);
16658    REG_WR(sc, TCM_REG_PRS_IFEN, 0x1);
16659    REG_WR(sc, CFC_REG_DEBUG0, 0x0);
16660    REG_WR(sc, NIG_REG_PRS_REQ_IN_EN, 0x1);
16661
16662    return (0);
16663}
16664
16665static void
16666bxe_setup_fan_failure_detection(struct bxe_softc *sc)
16667{
16668    int is_required;
16669    uint32_t val;
16670    int port;
16671
16672    is_required = 0;
16673    val = (SHMEM_RD(sc, dev_info.shared_hw_config.config2) &
16674           SHARED_HW_CFG_FAN_FAILURE_MASK);
16675
16676    if (val == SHARED_HW_CFG_FAN_FAILURE_ENABLED) {
16677        is_required = 1;
16678    }
16679    /*
16680     * The fan failure mechanism is usually related to the PHY type since
16681     * the power consumption of the board is affected by the PHY. Currently,
16682     * fan is required for most designs with SFX7101, BCM8727 and BCM8481.
16683     */
16684    else if (val == SHARED_HW_CFG_FAN_FAILURE_PHY_TYPE) {
16685        for (port = PORT_0; port < PORT_MAX; port++) {
16686            is_required |= elink_fan_failure_det_req(sc,
16687                                                     sc->devinfo.shmem_base,
16688                                                     sc->devinfo.shmem2_base,
16689                                                     port);
16690        }
16691    }
16692
16693    BLOGD(sc, DBG_LOAD, "fan detection setting: %d\n", is_required);
16694
16695    if (is_required == 0) {
16696        return;
16697    }
16698
16699    /* Fan failure is indicated by SPIO 5 */
16700    bxe_set_spio(sc, MISC_SPIO_SPIO5, MISC_SPIO_INPUT_HI_Z);
16701
16702    /* set to active low mode */
16703    val = REG_RD(sc, MISC_REG_SPIO_INT);
16704    val |= (MISC_SPIO_SPIO5 << MISC_SPIO_INT_OLD_SET_POS);
16705    REG_WR(sc, MISC_REG_SPIO_INT, val);
16706
16707    /* enable interrupt to signal the IGU */
16708    val = REG_RD(sc, MISC_REG_SPIO_EVENT_EN);
16709    val |= MISC_SPIO_SPIO5;
16710    REG_WR(sc, MISC_REG_SPIO_EVENT_EN, val);
16711}
16712
16713static void
16714bxe_enable_blocks_attention(struct bxe_softc *sc)
16715{
16716    uint32_t val;
16717
16718    REG_WR(sc, PXP_REG_PXP_INT_MASK_0, 0);
16719    if (!CHIP_IS_E1x(sc)) {
16720        REG_WR(sc, PXP_REG_PXP_INT_MASK_1, 0x40);
16721    } else {
16722        REG_WR(sc, PXP_REG_PXP_INT_MASK_1, 0);
16723    }
16724    REG_WR(sc, DORQ_REG_DORQ_INT_MASK, 0);
16725    REG_WR(sc, CFC_REG_CFC_INT_MASK, 0);
16726    /*
16727     * mask read length error interrupts in brb for parser
16728     * (parsing unit and 'checksum and crc' unit)
16729     * these errors are legal (PU reads fixed length and CAC can cause
16730     * read length error on truncated packets)
16731     */
16732    REG_WR(sc, BRB1_REG_BRB1_INT_MASK, 0xFC00);
16733    REG_WR(sc, QM_REG_QM_INT_MASK, 0);
16734    REG_WR(sc, TM_REG_TM_INT_MASK, 0);
16735    REG_WR(sc, XSDM_REG_XSDM_INT_MASK_0, 0);
16736    REG_WR(sc, XSDM_REG_XSDM_INT_MASK_1, 0);
16737    REG_WR(sc, XCM_REG_XCM_INT_MASK, 0);
16738/*      REG_WR(sc, XSEM_REG_XSEM_INT_MASK_0, 0); */
16739/*      REG_WR(sc, XSEM_REG_XSEM_INT_MASK_1, 0); */
16740    REG_WR(sc, USDM_REG_USDM_INT_MASK_0, 0);
16741    REG_WR(sc, USDM_REG_USDM_INT_MASK_1, 0);
16742    REG_WR(sc, UCM_REG_UCM_INT_MASK, 0);
16743/*      REG_WR(sc, USEM_REG_USEM_INT_MASK_0, 0); */
16744/*      REG_WR(sc, USEM_REG_USEM_INT_MASK_1, 0); */
16745    REG_WR(sc, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
16746    REG_WR(sc, CSDM_REG_CSDM_INT_MASK_0, 0);
16747    REG_WR(sc, CSDM_REG_CSDM_INT_MASK_1, 0);
16748    REG_WR(sc, CCM_REG_CCM_INT_MASK, 0);
16749/*      REG_WR(sc, CSEM_REG_CSEM_INT_MASK_0, 0); */
16750/*      REG_WR(sc, CSEM_REG_CSEM_INT_MASK_1, 0); */
16751
16752    val = (PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_AFT |
16753           PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_OF |
16754           PXP2_PXP2_INT_MASK_0_REG_PGL_PCIE_ATTN);
16755    if (!CHIP_IS_E1x(sc)) {
16756        val |= (PXP2_PXP2_INT_MASK_0_REG_PGL_READ_BLOCKED |
16757                PXP2_PXP2_INT_MASK_0_REG_PGL_WRITE_BLOCKED);
16758    }
16759    REG_WR(sc, PXP2_REG_PXP2_INT_MASK_0, val);
16760
16761    REG_WR(sc, TSDM_REG_TSDM_INT_MASK_0, 0);
16762    REG_WR(sc, TSDM_REG_TSDM_INT_MASK_1, 0);
16763    REG_WR(sc, TCM_REG_TCM_INT_MASK, 0);
16764/*      REG_WR(sc, TSEM_REG_TSEM_INT_MASK_0, 0); */
16765
16766    if (!CHIP_IS_E1x(sc)) {
16767        /* enable VFC attentions: bits 11 and 12, bits 31:13 reserved */
16768        REG_WR(sc, TSEM_REG_TSEM_INT_MASK_1, 0x07ff);
16769    }
16770
16771    REG_WR(sc, CDU_REG_CDU_INT_MASK, 0);
16772    REG_WR(sc, DMAE_REG_DMAE_INT_MASK, 0);
16773/*      REG_WR(sc, MISC_REG_MISC_INT_MASK, 0); */
16774    REG_WR(sc, PBF_REG_PBF_INT_MASK, 0x18);     /* bit 3,4 masked */
16775}
16776
16777/**
16778 * bxe_init_hw_common - initialize the HW at the COMMON phase.
16779 *
16780 * @sc:     driver handle
16781 */
16782static int
16783bxe_init_hw_common(struct bxe_softc *sc)
16784{
16785    uint8_t abs_func_id;
16786    uint32_t val;
16787
16788    BLOGD(sc, DBG_LOAD, "starting common init for func %d\n",
16789          SC_ABS_FUNC(sc));
16790
16791    /*
16792     * take the RESET lock to protect undi_unload flow from accessing
16793     * registers while we are resetting the chip
16794     */
16795    bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RESET);
16796
16797    bxe_reset_common(sc);
16798
16799    REG_WR(sc, (GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET), 0xffffffff);
16800
16801    val = 0xfffc;
16802    if (CHIP_IS_E3(sc)) {
16803        val |= MISC_REGISTERS_RESET_REG_2_MSTAT0;
16804        val |= MISC_REGISTERS_RESET_REG_2_MSTAT1;
16805    }
16806
16807    REG_WR(sc, (GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET), val);
16808
16809    bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RESET);
16810
16811    ecore_init_block(sc, BLOCK_MISC, PHASE_COMMON);
16812    BLOGD(sc, DBG_LOAD, "after misc block init\n");
16813
16814    if (!CHIP_IS_E1x(sc)) {
16815        /*
16816         * 4-port mode or 2-port mode we need to turn off master-enable for
16817         * everyone. After that we turn it back on for self. So, we disregard
16818         * multi-function, and always disable all functions on the given path,
16819         * this means 0,2,4,6 for path 0 and 1,3,5,7 for path 1
16820         */
16821        for (abs_func_id = SC_PATH(sc);
16822             abs_func_id < (E2_FUNC_MAX * 2);
16823             abs_func_id += 2) {
16824            if (abs_func_id == SC_ABS_FUNC(sc)) {
16825                REG_WR(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1);
16826                continue;
16827            }
16828
16829            bxe_pretend_func(sc, abs_func_id);
16830
16831            /* clear pf enable */
16832            bxe_pf_disable(sc);
16833
16834            bxe_pretend_func(sc, SC_ABS_FUNC(sc));
16835        }
16836    }
16837
16838    BLOGD(sc, DBG_LOAD, "after pf disable\n");
16839
16840    ecore_init_block(sc, BLOCK_PXP, PHASE_COMMON);
16841
16842    if (CHIP_IS_E1(sc)) {
16843        /*
16844         * enable HW interrupt from PXP on USDM overflow
16845         * bit 16 on INT_MASK_0
16846         */
16847        REG_WR(sc, PXP_REG_PXP_INT_MASK_0, 0);
16848    }
16849
16850    ecore_init_block(sc, BLOCK_PXP2, PHASE_COMMON);
16851    bxe_init_pxp(sc);
16852
16853#ifdef __BIG_ENDIAN
16854    REG_WR(sc, PXP2_REG_RQ_QM_ENDIAN_M, 1);
16855    REG_WR(sc, PXP2_REG_RQ_TM_ENDIAN_M, 1);
16856    REG_WR(sc, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
16857    REG_WR(sc, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
16858    REG_WR(sc, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
16859    /* make sure this value is 0 */
16860    REG_WR(sc, PXP2_REG_RQ_HC_ENDIAN_M, 0);
16861
16862    //REG_WR(sc, PXP2_REG_RD_PBF_SWAP_MODE, 1);
16863    REG_WR(sc, PXP2_REG_RD_QM_SWAP_MODE, 1);
16864    REG_WR(sc, PXP2_REG_RD_TM_SWAP_MODE, 1);
16865    REG_WR(sc, PXP2_REG_RD_SRC_SWAP_MODE, 1);
16866    REG_WR(sc, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
16867#endif
16868
16869    ecore_ilt_init_page_size(sc, INITOP_SET);
16870
16871    if (CHIP_REV_IS_FPGA(sc) && CHIP_IS_E1H(sc)) {
16872        REG_WR(sc, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
16873    }
16874
16875    /* let the HW do it's magic... */
16876    DELAY(100000);
16877
16878    /* finish PXP init */
16879    val = REG_RD(sc, PXP2_REG_RQ_CFG_DONE);
16880    if (val != 1) {
16881        BLOGE(sc, "PXP2 CFG failed PXP2_REG_RQ_CFG_DONE val = 0x%x\n",
16882            val);
16883        return (-1);
16884    }
16885    val = REG_RD(sc, PXP2_REG_RD_INIT_DONE);
16886    if (val != 1) {
16887        BLOGE(sc, "PXP2 RD_INIT failed val = 0x%x\n", val);
16888        return (-1);
16889    }
16890
16891    BLOGD(sc, DBG_LOAD, "after pxp init\n");
16892
16893    /*
16894     * Timer bug workaround for E2 only. We need to set the entire ILT to have
16895     * entries with value "0" and valid bit on. This needs to be done by the
16896     * first PF that is loaded in a path (i.e. common phase)
16897     */
16898    if (!CHIP_IS_E1x(sc)) {
16899/*
16900 * In E2 there is a bug in the timers block that can cause function 6 / 7
16901 * (i.e. vnic3) to start even if it is marked as "scan-off".
16902 * This occurs when a different function (func2,3) is being marked
16903 * as "scan-off". Real-life scenario for example: if a driver is being
16904 * load-unloaded while func6,7 are down. This will cause the timer to access
16905 * the ilt, translate to a logical address and send a request to read/write.
16906 * Since the ilt for the function that is down is not valid, this will cause
16907 * a translation error which is unrecoverable.
16908 * The Workaround is intended to make sure that when this happens nothing
16909 * fatal will occur. The workaround:
16910 *  1.  First PF driver which loads on a path will:
16911 *      a.  After taking the chip out of reset, by using pretend,
16912 *          it will write "0" to the following registers of
16913 *          the other vnics.
16914 *          REG_WR(pdev, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 0);
16915 *          REG_WR(pdev, CFC_REG_WEAK_ENABLE_PF,0);
16916 *          REG_WR(pdev, CFC_REG_STRONG_ENABLE_PF,0);
16917 *          And for itself it will write '1' to
16918 *          PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER to enable
16919 *          dmae-operations (writing to pram for example.)
16920 *          note: can be done for only function 6,7 but cleaner this
16921 *            way.
16922 *      b.  Write zero+valid to the entire ILT.
16923 *      c.  Init the first_timers_ilt_entry, last_timers_ilt_entry of
16924 *          VNIC3 (of that port). The range allocated will be the
16925 *          entire ILT. This is needed to prevent  ILT range error.
16926 *  2.  Any PF driver load flow:
16927 *      a.  ILT update with the physical addresses of the allocated
16928 *          logical pages.
16929 *      b.  Wait 20msec. - note that this timeout is needed to make
16930 *          sure there are no requests in one of the PXP internal
16931 *          queues with "old" ILT addresses.
16932 *      c.  PF enable in the PGLC.
16933 *      d.  Clear the was_error of the PF in the PGLC. (could have
16934 *          occurred while driver was down)
16935 *      e.  PF enable in the CFC (WEAK + STRONG)
16936 *      f.  Timers scan enable
16937 *  3.  PF driver unload flow:
16938 *      a.  Clear the Timers scan_en.
16939 *      b.  Polling for scan_on=0 for that PF.
16940 *      c.  Clear the PF enable bit in the PXP.
16941 *      d.  Clear the PF enable in the CFC (WEAK + STRONG)
16942 *      e.  Write zero+valid to all ILT entries (The valid bit must
16943 *          stay set)
16944 *      f.  If this is VNIC 3 of a port then also init
16945 *          first_timers_ilt_entry to zero and last_timers_ilt_entry
16946 *          to the last enrty in the ILT.
16947 *
16948 *      Notes:
16949 *      Currently the PF error in the PGLC is non recoverable.
16950 *      In the future the there will be a recovery routine for this error.
16951 *      Currently attention is masked.
16952 *      Having an MCP lock on the load/unload process does not guarantee that
16953 *      there is no Timer disable during Func6/7 enable. This is because the
16954 *      Timers scan is currently being cleared by the MCP on FLR.
16955 *      Step 2.d can be done only for PF6/7 and the driver can also check if
16956 *      there is error before clearing it. But the flow above is simpler and
16957 *      more general.
16958 *      All ILT entries are written by zero+valid and not just PF6/7
16959 *      ILT entries since in the future the ILT entries allocation for
16960 *      PF-s might be dynamic.
16961 */
16962        struct ilt_client_info ilt_cli;
16963        struct ecore_ilt ilt;
16964
16965        memset(&ilt_cli, 0, sizeof(struct ilt_client_info));
16966        memset(&ilt, 0, sizeof(struct ecore_ilt));
16967
16968        /* initialize dummy TM client */
16969        ilt_cli.start      = 0;
16970        ilt_cli.end        = ILT_NUM_PAGE_ENTRIES - 1;
16971        ilt_cli.client_num = ILT_CLIENT_TM;
16972
16973        /*
16974         * Step 1: set zeroes to all ilt page entries with valid bit on
16975         * Step 2: set the timers first/last ilt entry to point
16976         * to the entire range to prevent ILT range error for 3rd/4th
16977         * vnic (this code assumes existence of the vnic)
16978         *
16979         * both steps performed by call to ecore_ilt_client_init_op()
16980         * with dummy TM client
16981         *
16982         * we must use pretend since PXP2_REG_RQ_##blk##_FIRST_ILT
16983         * and his brother are split registers
16984         */
16985
16986        bxe_pretend_func(sc, (SC_PATH(sc) + 6));
16987        ecore_ilt_client_init_op_ilt(sc, &ilt, &ilt_cli, INITOP_CLEAR);
16988        bxe_pretend_func(sc, SC_ABS_FUNC(sc));
16989
16990        REG_WR(sc, PXP2_REG_RQ_DRAM_ALIGN, BXE_PXP_DRAM_ALIGN);
16991        REG_WR(sc, PXP2_REG_RQ_DRAM_ALIGN_RD, BXE_PXP_DRAM_ALIGN);
16992        REG_WR(sc, PXP2_REG_RQ_DRAM_ALIGN_SEL, 1);
16993    }
16994
16995    REG_WR(sc, PXP2_REG_RQ_DISABLE_INPUTS, 0);
16996    REG_WR(sc, PXP2_REG_RD_DISABLE_INPUTS, 0);
16997
16998    if (!CHIP_IS_E1x(sc)) {
16999        int factor = CHIP_REV_IS_EMUL(sc) ? 1000 :
17000                     (CHIP_REV_IS_FPGA(sc) ? 400 : 0);
17001
17002        ecore_init_block(sc, BLOCK_PGLUE_B, PHASE_COMMON);
17003        ecore_init_block(sc, BLOCK_ATC, PHASE_COMMON);
17004
17005        /* let the HW do it's magic... */
17006        do {
17007            DELAY(200000);
17008            val = REG_RD(sc, ATC_REG_ATC_INIT_DONE);
17009        } while (factor-- && (val != 1));
17010
17011        if (val != 1) {
17012            BLOGE(sc, "ATC_INIT failed val = 0x%x\n", val);
17013            return (-1);
17014        }
17015    }
17016
17017    BLOGD(sc, DBG_LOAD, "after pglue and atc init\n");
17018
17019    ecore_init_block(sc, BLOCK_DMAE, PHASE_COMMON);
17020
17021    bxe_iov_init_dmae(sc);
17022
17023    /* clean the DMAE memory */
17024    sc->dmae_ready = 1;
17025    ecore_init_fill(sc, TSEM_REG_PRAM, 0, 8, 1);
17026
17027    ecore_init_block(sc, BLOCK_TCM, PHASE_COMMON);
17028
17029    ecore_init_block(sc, BLOCK_UCM, PHASE_COMMON);
17030
17031    ecore_init_block(sc, BLOCK_CCM, PHASE_COMMON);
17032
17033    ecore_init_block(sc, BLOCK_XCM, PHASE_COMMON);
17034
17035    bxe_read_dmae(sc, XSEM_REG_PASSIVE_BUFFER, 3);
17036    bxe_read_dmae(sc, CSEM_REG_PASSIVE_BUFFER, 3);
17037    bxe_read_dmae(sc, TSEM_REG_PASSIVE_BUFFER, 3);
17038    bxe_read_dmae(sc, USEM_REG_PASSIVE_BUFFER, 3);
17039
17040    ecore_init_block(sc, BLOCK_QM, PHASE_COMMON);
17041
17042    /* QM queues pointers table */
17043    ecore_qm_init_ptr_table(sc, sc->qm_cid_count, INITOP_SET);
17044
17045    /* soft reset pulse */
17046    REG_WR(sc, QM_REG_SOFT_RESET, 1);
17047    REG_WR(sc, QM_REG_SOFT_RESET, 0);
17048
17049    if (CNIC_SUPPORT(sc))
17050        ecore_init_block(sc, BLOCK_TM, PHASE_COMMON);
17051
17052    ecore_init_block(sc, BLOCK_DORQ, PHASE_COMMON);
17053    REG_WR(sc, DORQ_REG_DPM_CID_OFST, BXE_DB_SHIFT);
17054    if (!CHIP_REV_IS_SLOW(sc)) {
17055        /* enable hw interrupt from doorbell Q */
17056        REG_WR(sc, DORQ_REG_DORQ_INT_MASK, 0);
17057    }
17058
17059    ecore_init_block(sc, BLOCK_BRB1, PHASE_COMMON);
17060
17061    ecore_init_block(sc, BLOCK_PRS, PHASE_COMMON);
17062    REG_WR(sc, PRS_REG_A_PRSU_20, 0xf);
17063
17064    if (!CHIP_IS_E1(sc)) {
17065        REG_WR(sc, PRS_REG_E1HOV_MODE, sc->devinfo.mf_info.path_has_ovlan);
17066    }
17067
17068    if (!CHIP_IS_E1x(sc) && !CHIP_IS_E3B0(sc)) {
17069        if (IS_MF_AFEX(sc)) {
17070            /*
17071             * configure that AFEX and VLAN headers must be
17072             * received in AFEX mode
17073             */
17074            REG_WR(sc, PRS_REG_HDRS_AFTER_BASIC, 0xE);
17075            REG_WR(sc, PRS_REG_MUST_HAVE_HDRS, 0xA);
17076            REG_WR(sc, PRS_REG_HDRS_AFTER_TAG_0, 0x6);
17077            REG_WR(sc, PRS_REG_TAG_ETHERTYPE_0, 0x8926);
17078            REG_WR(sc, PRS_REG_TAG_LEN_0, 0x4);
17079        } else {
17080            /*
17081             * Bit-map indicating which L2 hdrs may appear
17082             * after the basic Ethernet header
17083             */
17084            REG_WR(sc, PRS_REG_HDRS_AFTER_BASIC,
17085                   sc->devinfo.mf_info.path_has_ovlan ? 7 : 6);
17086        }
17087    }
17088
17089    ecore_init_block(sc, BLOCK_TSDM, PHASE_COMMON);
17090    ecore_init_block(sc, BLOCK_CSDM, PHASE_COMMON);
17091    ecore_init_block(sc, BLOCK_USDM, PHASE_COMMON);
17092    ecore_init_block(sc, BLOCK_XSDM, PHASE_COMMON);
17093
17094    if (!CHIP_IS_E1x(sc)) {
17095        /* reset VFC memories */
17096        REG_WR(sc, TSEM_REG_FAST_MEMORY + VFC_REG_MEMORIES_RST,
17097               VFC_MEMORIES_RST_REG_CAM_RST |
17098               VFC_MEMORIES_RST_REG_RAM_RST);
17099        REG_WR(sc, XSEM_REG_FAST_MEMORY + VFC_REG_MEMORIES_RST,
17100               VFC_MEMORIES_RST_REG_CAM_RST |
17101               VFC_MEMORIES_RST_REG_RAM_RST);
17102
17103        DELAY(20000);
17104    }
17105
17106    ecore_init_block(sc, BLOCK_TSEM, PHASE_COMMON);
17107    ecore_init_block(sc, BLOCK_USEM, PHASE_COMMON);
17108    ecore_init_block(sc, BLOCK_CSEM, PHASE_COMMON);
17109    ecore_init_block(sc, BLOCK_XSEM, PHASE_COMMON);
17110
17111    /* sync semi rtc */
17112    REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
17113           0x80000000);
17114    REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
17115           0x80000000);
17116
17117    ecore_init_block(sc, BLOCK_UPB, PHASE_COMMON);
17118    ecore_init_block(sc, BLOCK_XPB, PHASE_COMMON);
17119    ecore_init_block(sc, BLOCK_PBF, PHASE_COMMON);
17120
17121    if (!CHIP_IS_E1x(sc)) {
17122        if (IS_MF_AFEX(sc)) {
17123            /*
17124             * configure that AFEX and VLAN headers must be
17125             * sent in AFEX mode
17126             */
17127            REG_WR(sc, PBF_REG_HDRS_AFTER_BASIC, 0xE);
17128            REG_WR(sc, PBF_REG_MUST_HAVE_HDRS, 0xA);
17129            REG_WR(sc, PBF_REG_HDRS_AFTER_TAG_0, 0x6);
17130            REG_WR(sc, PBF_REG_TAG_ETHERTYPE_0, 0x8926);
17131            REG_WR(sc, PBF_REG_TAG_LEN_0, 0x4);
17132        } else {
17133            REG_WR(sc, PBF_REG_HDRS_AFTER_BASIC,
17134                   sc->devinfo.mf_info.path_has_ovlan ? 7 : 6);
17135        }
17136    }
17137
17138    REG_WR(sc, SRC_REG_SOFT_RST, 1);
17139
17140    ecore_init_block(sc, BLOCK_SRC, PHASE_COMMON);
17141
17142    if (CNIC_SUPPORT(sc)) {
17143        REG_WR(sc, SRC_REG_KEYSEARCH_0, 0x63285672);
17144        REG_WR(sc, SRC_REG_KEYSEARCH_1, 0x24b8f2cc);
17145        REG_WR(sc, SRC_REG_KEYSEARCH_2, 0x223aef9b);
17146        REG_WR(sc, SRC_REG_KEYSEARCH_3, 0x26001e3a);
17147        REG_WR(sc, SRC_REG_KEYSEARCH_4, 0x7ae91116);
17148        REG_WR(sc, SRC_REG_KEYSEARCH_5, 0x5ce5230b);
17149        REG_WR(sc, SRC_REG_KEYSEARCH_6, 0x298d8adf);
17150        REG_WR(sc, SRC_REG_KEYSEARCH_7, 0x6eb0ff09);
17151        REG_WR(sc, SRC_REG_KEYSEARCH_8, 0x1830f82f);
17152        REG_WR(sc, SRC_REG_KEYSEARCH_9, 0x01e46be7);
17153    }
17154    REG_WR(sc, SRC_REG_SOFT_RST, 0);
17155
17156    if (sizeof(union cdu_context) != 1024) {
17157        /* we currently assume that a context is 1024 bytes */
17158        BLOGE(sc, "please adjust the size of cdu_context(%ld)\n",
17159              (long)sizeof(union cdu_context));
17160    }
17161
17162    ecore_init_block(sc, BLOCK_CDU, PHASE_COMMON);
17163    val = (4 << 24) + (0 << 12) + 1024;
17164    REG_WR(sc, CDU_REG_CDU_GLOBAL_PARAMS, val);
17165
17166    ecore_init_block(sc, BLOCK_CFC, PHASE_COMMON);
17167
17168    REG_WR(sc, CFC_REG_INIT_REG, 0x7FF);
17169    /* enable context validation interrupt from CFC */
17170    REG_WR(sc, CFC_REG_CFC_INT_MASK, 0);
17171
17172    /* set the thresholds to prevent CFC/CDU race */
17173    REG_WR(sc, CFC_REG_DEBUG0, 0x20020000);
17174    ecore_init_block(sc, BLOCK_HC, PHASE_COMMON);
17175
17176    if (!CHIP_IS_E1x(sc) && BXE_NOMCP(sc)) {
17177        REG_WR(sc, IGU_REG_RESET_MEMORIES, 0x36);
17178    }
17179
17180    ecore_init_block(sc, BLOCK_IGU, PHASE_COMMON);
17181    ecore_init_block(sc, BLOCK_MISC_AEU, PHASE_COMMON);
17182
17183    /* Reset PCIE errors for debug */
17184    REG_WR(sc, 0x2814, 0xffffffff);
17185    REG_WR(sc, 0x3820, 0xffffffff);
17186
17187    if (!CHIP_IS_E1x(sc)) {
17188        REG_WR(sc, PCICFG_OFFSET + PXPCS_TL_CONTROL_5,
17189               (PXPCS_TL_CONTROL_5_ERR_UNSPPORT1 |
17190                PXPCS_TL_CONTROL_5_ERR_UNSPPORT));
17191        REG_WR(sc, PCICFG_OFFSET + PXPCS_TL_FUNC345_STAT,
17192               (PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT4 |
17193                PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT3 |
17194                PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT2));
17195        REG_WR(sc, PCICFG_OFFSET + PXPCS_TL_FUNC678_STAT,
17196               (PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT7 |
17197                PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT6 |
17198                PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT5));
17199    }
17200
17201    ecore_init_block(sc, BLOCK_NIG, PHASE_COMMON);
17202
17203    if (!CHIP_IS_E1(sc)) {
17204        /* in E3 this done in per-port section */
17205        if (!CHIP_IS_E3(sc))
17206            REG_WR(sc, NIG_REG_LLH_MF_MODE, IS_MF(sc));
17207    }
17208
17209    if (CHIP_IS_E1H(sc)) {
17210        /* not applicable for E2 (and above ...) */
17211        REG_WR(sc, NIG_REG_LLH_E1HOV_MODE, IS_MF_SD(sc));
17212    }
17213
17214    if (CHIP_REV_IS_SLOW(sc)) {
17215        DELAY(200000);
17216    }
17217
17218    /* finish CFC init */
17219    val = reg_poll(sc, CFC_REG_LL_INIT_DONE, 1, 100, 10);
17220    if (val != 1) {
17221        BLOGE(sc, "CFC LL_INIT failed val=0x%x\n", val);
17222        return (-1);
17223    }
17224    val = reg_poll(sc, CFC_REG_AC_INIT_DONE, 1, 100, 10);
17225    if (val != 1) {
17226        BLOGE(sc, "CFC AC_INIT failed val=0x%x\n", val);
17227        return (-1);
17228    }
17229    val = reg_poll(sc, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
17230    if (val != 1) {
17231        BLOGE(sc, "CFC CAM_INIT failed val=0x%x\n", val);
17232        return (-1);
17233    }
17234    REG_WR(sc, CFC_REG_DEBUG0, 0);
17235
17236    if (CHIP_IS_E1(sc)) {
17237        /* read NIG statistic to see if this is our first up since powerup */
17238        bxe_read_dmae(sc, NIG_REG_STAT2_BRB_OCTET, 2);
17239        val = *BXE_SP(sc, wb_data[0]);
17240
17241        /* do internal memory self test */
17242        if ((val == 0) && bxe_int_mem_test(sc)) {
17243            BLOGE(sc, "internal mem self test failed val=0x%x\n", val);
17244            return (-1);
17245        }
17246    }
17247
17248    bxe_setup_fan_failure_detection(sc);
17249
17250    /* clear PXP2 attentions */
17251    REG_RD(sc, PXP2_REG_PXP2_INT_STS_CLR_0);
17252
17253    bxe_enable_blocks_attention(sc);
17254
17255    if (!CHIP_REV_IS_SLOW(sc)) {
17256        ecore_enable_blocks_parity(sc);
17257    }
17258
17259    if (!BXE_NOMCP(sc)) {
17260        if (CHIP_IS_E1x(sc)) {
17261            bxe_common_init_phy(sc);
17262        }
17263    }
17264
17265    return (0);
17266}
17267
17268/**
17269 * bxe_init_hw_common_chip - init HW at the COMMON_CHIP phase.
17270 *
17271 * @sc:     driver handle
17272 */
17273static int
17274bxe_init_hw_common_chip(struct bxe_softc *sc)
17275{
17276    int rc = bxe_init_hw_common(sc);
17277
17278    if (rc) {
17279        BLOGE(sc, "bxe_init_hw_common failed rc=%d\n", rc);
17280        return (rc);
17281    }
17282
17283    /* In E2 2-PORT mode, same ext phy is used for the two paths */
17284    if (!BXE_NOMCP(sc)) {
17285        bxe_common_init_phy(sc);
17286    }
17287
17288    return (0);
17289}
17290
17291static int
17292bxe_init_hw_port(struct bxe_softc *sc)
17293{
17294    int port = SC_PORT(sc);
17295    int init_phase = port ? PHASE_PORT1 : PHASE_PORT0;
17296    uint32_t low, high;
17297    uint32_t val;
17298
17299    BLOGD(sc, DBG_LOAD, "starting port init for port %d\n", port);
17300
17301    REG_WR(sc, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
17302
17303    ecore_init_block(sc, BLOCK_MISC, init_phase);
17304    ecore_init_block(sc, BLOCK_PXP, init_phase);
17305    ecore_init_block(sc, BLOCK_PXP2, init_phase);
17306
17307    /*
17308     * Timers bug workaround: disables the pf_master bit in pglue at
17309     * common phase, we need to enable it here before any dmae access are
17310     * attempted. Therefore we manually added the enable-master to the
17311     * port phase (it also happens in the function phase)
17312     */
17313    if (!CHIP_IS_E1x(sc)) {
17314        REG_WR(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1);
17315    }
17316
17317    ecore_init_block(sc, BLOCK_ATC, init_phase);
17318    ecore_init_block(sc, BLOCK_DMAE, init_phase);
17319    ecore_init_block(sc, BLOCK_PGLUE_B, init_phase);
17320    ecore_init_block(sc, BLOCK_QM, init_phase);
17321
17322    ecore_init_block(sc, BLOCK_TCM, init_phase);
17323    ecore_init_block(sc, BLOCK_UCM, init_phase);
17324    ecore_init_block(sc, BLOCK_CCM, init_phase);
17325    ecore_init_block(sc, BLOCK_XCM, init_phase);
17326
17327    /* QM cid (connection) count */
17328    ecore_qm_init_cid_count(sc, sc->qm_cid_count, INITOP_SET);
17329
17330    if (CNIC_SUPPORT(sc)) {
17331        ecore_init_block(sc, BLOCK_TM, init_phase);
17332        REG_WR(sc, TM_REG_LIN0_SCAN_TIME + port*4, 20);
17333        REG_WR(sc, TM_REG_LIN0_MAX_ACTIVE_CID + port*4, 31);
17334    }
17335
17336    ecore_init_block(sc, BLOCK_DORQ, init_phase);
17337
17338    ecore_init_block(sc, BLOCK_BRB1, init_phase);
17339
17340    if (CHIP_IS_E1(sc) || CHIP_IS_E1H(sc)) {
17341        if (IS_MF(sc)) {
17342            low = (BXE_ONE_PORT(sc) ? 160 : 246);
17343        } else if (sc->mtu > 4096) {
17344            if (BXE_ONE_PORT(sc)) {
17345                low = 160;
17346            } else {
17347                val = sc->mtu;
17348                /* (24*1024 + val*4)/256 */
17349                low = (96 + (val / 64) + ((val % 64) ? 1 : 0));
17350            }
17351        } else {
17352            low = (BXE_ONE_PORT(sc) ? 80 : 160);
17353        }
17354        high = (low + 56); /* 14*1024/256 */
17355        REG_WR(sc, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port*4, low);
17356        REG_WR(sc, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high);
17357    }
17358
17359    if (CHIP_IS_MODE_4_PORT(sc)) {
17360        REG_WR(sc, SC_PORT(sc) ?
17361               BRB1_REG_MAC_GUARANTIED_1 :
17362               BRB1_REG_MAC_GUARANTIED_0, 40);
17363    }
17364
17365    ecore_init_block(sc, BLOCK_PRS, init_phase);
17366    if (CHIP_IS_E3B0(sc)) {
17367        if (IS_MF_AFEX(sc)) {
17368            /* configure headers for AFEX mode */
17369            REG_WR(sc, SC_PORT(sc) ?
17370                   PRS_REG_HDRS_AFTER_BASIC_PORT_1 :
17371                   PRS_REG_HDRS_AFTER_BASIC_PORT_0, 0xE);
17372            REG_WR(sc, SC_PORT(sc) ?
17373                   PRS_REG_HDRS_AFTER_TAG_0_PORT_1 :
17374                   PRS_REG_HDRS_AFTER_TAG_0_PORT_0, 0x6);
17375            REG_WR(sc, SC_PORT(sc) ?
17376                   PRS_REG_MUST_HAVE_HDRS_PORT_1 :
17377                   PRS_REG_MUST_HAVE_HDRS_PORT_0, 0xA);
17378        } else {
17379            /* Ovlan exists only if we are in multi-function +
17380             * switch-dependent mode, in switch-independent there
17381             * is no ovlan headers
17382             */
17383            REG_WR(sc, SC_PORT(sc) ?
17384                   PRS_REG_HDRS_AFTER_BASIC_PORT_1 :
17385                   PRS_REG_HDRS_AFTER_BASIC_PORT_0,
17386                   (sc->devinfo.mf_info.path_has_ovlan ? 7 : 6));
17387        }
17388    }
17389
17390    ecore_init_block(sc, BLOCK_TSDM, init_phase);
17391    ecore_init_block(sc, BLOCK_CSDM, init_phase);
17392    ecore_init_block(sc, BLOCK_USDM, init_phase);
17393    ecore_init_block(sc, BLOCK_XSDM, init_phase);
17394
17395    ecore_init_block(sc, BLOCK_TSEM, init_phase);
17396    ecore_init_block(sc, BLOCK_USEM, init_phase);
17397    ecore_init_block(sc, BLOCK_CSEM, init_phase);
17398    ecore_init_block(sc, BLOCK_XSEM, init_phase);
17399
17400    ecore_init_block(sc, BLOCK_UPB, init_phase);
17401    ecore_init_block(sc, BLOCK_XPB, init_phase);
17402
17403    ecore_init_block(sc, BLOCK_PBF, init_phase);
17404
17405    if (CHIP_IS_E1x(sc)) {
17406        /* configure PBF to work without PAUSE mtu 9000 */
17407        REG_WR(sc, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
17408
17409        /* update threshold */
17410        REG_WR(sc, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
17411        /* update init credit */
17412        REG_WR(sc, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
17413
17414        /* probe changes */
17415        REG_WR(sc, PBF_REG_INIT_P0 + port*4, 1);
17416        DELAY(50);
17417        REG_WR(sc, PBF_REG_INIT_P0 + port*4, 0);
17418    }
17419
17420    if (CNIC_SUPPORT(sc)) {
17421        ecore_init_block(sc, BLOCK_SRC, init_phase);
17422    }
17423
17424    ecore_init_block(sc, BLOCK_CDU, init_phase);
17425    ecore_init_block(sc, BLOCK_CFC, init_phase);
17426
17427    if (CHIP_IS_E1(sc)) {
17428        REG_WR(sc, HC_REG_LEADING_EDGE_0 + port*8, 0);
17429        REG_WR(sc, HC_REG_TRAILING_EDGE_0 + port*8, 0);
17430    }
17431    ecore_init_block(sc, BLOCK_HC, init_phase);
17432
17433    ecore_init_block(sc, BLOCK_IGU, init_phase);
17434
17435    ecore_init_block(sc, BLOCK_MISC_AEU, init_phase);
17436    /* init aeu_mask_attn_func_0/1:
17437     *  - SF mode: bits 3-7 are masked. only bits 0-2 are in use
17438     *  - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
17439     *             bits 4-7 are used for "per vn group attention" */
17440    val = IS_MF(sc) ? 0xF7 : 0x7;
17441    /* Enable DCBX attention for all but E1 */
17442    val |= CHIP_IS_E1(sc) ? 0 : 0x10;
17443    REG_WR(sc, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, val);
17444
17445    ecore_init_block(sc, BLOCK_NIG, init_phase);
17446
17447    if (!CHIP_IS_E1x(sc)) {
17448        /* Bit-map indicating which L2 hdrs may appear after the
17449         * basic Ethernet header
17450         */
17451        if (IS_MF_AFEX(sc)) {
17452            REG_WR(sc, SC_PORT(sc) ?
17453                   NIG_REG_P1_HDRS_AFTER_BASIC :
17454                   NIG_REG_P0_HDRS_AFTER_BASIC, 0xE);
17455        } else {
17456            REG_WR(sc, SC_PORT(sc) ?
17457                   NIG_REG_P1_HDRS_AFTER_BASIC :
17458                   NIG_REG_P0_HDRS_AFTER_BASIC,
17459                   IS_MF_SD(sc) ? 7 : 6);
17460        }
17461
17462        if (CHIP_IS_E3(sc)) {
17463            REG_WR(sc, SC_PORT(sc) ?
17464                   NIG_REG_LLH1_MF_MODE :
17465                   NIG_REG_LLH_MF_MODE, IS_MF(sc));
17466        }
17467    }
17468    if (!CHIP_IS_E3(sc)) {
17469        REG_WR(sc, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
17470    }
17471
17472    if (!CHIP_IS_E1(sc)) {
17473        /* 0x2 disable mf_ov, 0x1 enable */
17474        REG_WR(sc, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
17475               (IS_MF_SD(sc) ? 0x1 : 0x2));
17476
17477        if (!CHIP_IS_E1x(sc)) {
17478            val = 0;
17479            switch (sc->devinfo.mf_info.mf_mode) {
17480            case MULTI_FUNCTION_SD:
17481                val = 1;
17482                break;
17483            case MULTI_FUNCTION_SI:
17484            case MULTI_FUNCTION_AFEX:
17485                val = 2;
17486                break;
17487            }
17488
17489            REG_WR(sc, (SC_PORT(sc) ? NIG_REG_LLH1_CLS_TYPE :
17490                        NIG_REG_LLH0_CLS_TYPE), val);
17491        }
17492        REG_WR(sc, NIG_REG_LLFC_ENABLE_0 + port*4, 0);
17493        REG_WR(sc, NIG_REG_LLFC_OUT_EN_0 + port*4, 0);
17494        REG_WR(sc, NIG_REG_PAUSE_ENABLE_0 + port*4, 1);
17495    }
17496
17497    /* If SPIO5 is set to generate interrupts, enable it for this port */
17498    val = REG_RD(sc, MISC_REG_SPIO_EVENT_EN);
17499    if (val & MISC_SPIO_SPIO5) {
17500        uint32_t reg_addr = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
17501                                    MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
17502        val = REG_RD(sc, reg_addr);
17503        val |= AEU_INPUTS_ATTN_BITS_SPIO5;
17504        REG_WR(sc, reg_addr, val);
17505    }
17506
17507    return (0);
17508}
17509
17510static uint32_t
17511bxe_flr_clnup_reg_poll(struct bxe_softc *sc,
17512                       uint32_t         reg,
17513                       uint32_t         expected,
17514                       uint32_t         poll_count)
17515{
17516    uint32_t cur_cnt = poll_count;
17517    uint32_t val;
17518
17519    while ((val = REG_RD(sc, reg)) != expected && cur_cnt--) {
17520        DELAY(FLR_WAIT_INTERVAL);
17521    }
17522
17523    return (val);
17524}
17525
17526static int
17527bxe_flr_clnup_poll_hw_counter(struct bxe_softc *sc,
17528                              uint32_t         reg,
17529                              char             *msg,
17530                              uint32_t         poll_cnt)
17531{
17532    uint32_t val = bxe_flr_clnup_reg_poll(sc, reg, 0, poll_cnt);
17533
17534    if (val != 0) {
17535        BLOGE(sc, "%s usage count=%d\n", msg, val);
17536        return (1);
17537    }
17538
17539    return (0);
17540}
17541
17542/* Common routines with VF FLR cleanup */
17543static uint32_t
17544bxe_flr_clnup_poll_count(struct bxe_softc *sc)
17545{
17546    /* adjust polling timeout */
17547    if (CHIP_REV_IS_EMUL(sc)) {
17548        return (FLR_POLL_CNT * 2000);
17549    }
17550
17551    if (CHIP_REV_IS_FPGA(sc)) {
17552        return (FLR_POLL_CNT * 120);
17553    }
17554
17555    return (FLR_POLL_CNT);
17556}
17557
17558static int
17559bxe_poll_hw_usage_counters(struct bxe_softc *sc,
17560                           uint32_t         poll_cnt)
17561{
17562    /* wait for CFC PF usage-counter to zero (includes all the VFs) */
17563    if (bxe_flr_clnup_poll_hw_counter(sc,
17564                                      CFC_REG_NUM_LCIDS_INSIDE_PF,
17565                                      "CFC PF usage counter timed out",
17566                                      poll_cnt)) {
17567        return (1);
17568    }
17569
17570    /* Wait for DQ PF usage-counter to zero (until DQ cleanup) */
17571    if (bxe_flr_clnup_poll_hw_counter(sc,
17572                                      DORQ_REG_PF_USAGE_CNT,
17573                                      "DQ PF usage counter timed out",
17574                                      poll_cnt)) {
17575        return (1);
17576    }
17577
17578    /* Wait for QM PF usage-counter to zero (until DQ cleanup) */
17579    if (bxe_flr_clnup_poll_hw_counter(sc,
17580                                      QM_REG_PF_USG_CNT_0 + 4*SC_FUNC(sc),
17581                                      "QM PF usage counter timed out",
17582                                      poll_cnt)) {
17583        return (1);
17584    }
17585
17586    /* Wait for Timer PF usage-counters to zero (until DQ cleanup) */
17587    if (bxe_flr_clnup_poll_hw_counter(sc,
17588                                      TM_REG_LIN0_VNIC_UC + 4*SC_PORT(sc),
17589                                      "Timers VNIC usage counter timed out",
17590                                      poll_cnt)) {
17591        return (1);
17592    }
17593
17594    if (bxe_flr_clnup_poll_hw_counter(sc,
17595                                      TM_REG_LIN0_NUM_SCANS + 4*SC_PORT(sc),
17596                                      "Timers NUM_SCANS usage counter timed out",
17597                                      poll_cnt)) {
17598        return (1);
17599    }
17600
17601    /* Wait DMAE PF usage counter to zero */
17602    if (bxe_flr_clnup_poll_hw_counter(sc,
17603                                      dmae_reg_go_c[INIT_DMAE_C(sc)],
17604                                      "DMAE dommand register timed out",
17605                                      poll_cnt)) {
17606        return (1);
17607    }
17608
17609    return (0);
17610}
17611
17612#define OP_GEN_PARAM(param)                                            \
17613    (((param) << SDM_OP_GEN_COMP_PARAM_SHIFT) & SDM_OP_GEN_COMP_PARAM)
17614#define OP_GEN_TYPE(type)                                           \
17615    (((type) << SDM_OP_GEN_COMP_TYPE_SHIFT) & SDM_OP_GEN_COMP_TYPE)
17616#define OP_GEN_AGG_VECT(index)                                             \
17617    (((index) << SDM_OP_GEN_AGG_VECT_IDX_SHIFT) & SDM_OP_GEN_AGG_VECT_IDX)
17618
17619static int
17620bxe_send_final_clnup(struct bxe_softc *sc,
17621                     uint8_t          clnup_func,
17622                     uint32_t         poll_cnt)
17623{
17624    uint32_t op_gen_command = 0;
17625    uint32_t comp_addr = (BAR_CSTRORM_INTMEM +
17626                          CSTORM_FINAL_CLEANUP_COMPLETE_OFFSET(clnup_func));
17627    int ret = 0;
17628
17629    if (REG_RD(sc, comp_addr)) {
17630        BLOGE(sc, "Cleanup complete was not 0 before sending\n");
17631        return (1);
17632    }
17633
17634    op_gen_command |= OP_GEN_PARAM(XSTORM_AGG_INT_FINAL_CLEANUP_INDEX);
17635    op_gen_command |= OP_GEN_TYPE(XSTORM_AGG_INT_FINAL_CLEANUP_COMP_TYPE);
17636    op_gen_command |= OP_GEN_AGG_VECT(clnup_func);
17637    op_gen_command |= 1 << SDM_OP_GEN_AGG_VECT_IDX_VALID_SHIFT;
17638
17639    BLOGD(sc, DBG_LOAD, "sending FW Final cleanup\n");
17640    REG_WR(sc, XSDM_REG_OPERATION_GEN, op_gen_command);
17641
17642    if (bxe_flr_clnup_reg_poll(sc, comp_addr, 1, poll_cnt) != 1) {
17643        BLOGE(sc, "FW final cleanup did not succeed\n");
17644        BLOGD(sc, DBG_LOAD, "At timeout completion address contained %x\n",
17645              (REG_RD(sc, comp_addr)));
17646        bxe_panic(sc, ("FLR cleanup failed\n"));
17647        return (1);
17648    }
17649
17650    /* Zero completion for nxt FLR */
17651    REG_WR(sc, comp_addr, 0);
17652
17653    return (ret);
17654}
17655
17656static void
17657bxe_pbf_pN_buf_flushed(struct bxe_softc       *sc,
17658                       struct pbf_pN_buf_regs *regs,
17659                       uint32_t               poll_count)
17660{
17661    uint32_t init_crd, crd, crd_start, crd_freed, crd_freed_start;
17662    uint32_t cur_cnt = poll_count;
17663
17664    crd_freed = crd_freed_start = REG_RD(sc, regs->crd_freed);
17665    crd = crd_start = REG_RD(sc, regs->crd);
17666    init_crd = REG_RD(sc, regs->init_crd);
17667
17668    BLOGD(sc, DBG_LOAD, "INIT CREDIT[%d] : %x\n", regs->pN, init_crd);
17669    BLOGD(sc, DBG_LOAD, "CREDIT[%d]      : s:%x\n", regs->pN, crd);
17670    BLOGD(sc, DBG_LOAD, "CREDIT_FREED[%d]: s:%x\n", regs->pN, crd_freed);
17671
17672    while ((crd != init_crd) &&
17673           ((uint32_t)((int32_t)crd_freed - (int32_t)crd_freed_start) <
17674            (init_crd - crd_start))) {
17675        if (cur_cnt--) {
17676            DELAY(FLR_WAIT_INTERVAL);
17677            crd = REG_RD(sc, regs->crd);
17678            crd_freed = REG_RD(sc, regs->crd_freed);
17679        } else {
17680            BLOGD(sc, DBG_LOAD, "PBF tx buffer[%d] timed out\n", regs->pN);
17681            BLOGD(sc, DBG_LOAD, "CREDIT[%d]      : c:%x\n", regs->pN, crd);
17682            BLOGD(sc, DBG_LOAD, "CREDIT_FREED[%d]: c:%x\n", regs->pN, crd_freed);
17683            break;
17684        }
17685    }
17686
17687    BLOGD(sc, DBG_LOAD, "Waited %d*%d usec for PBF tx buffer[%d]\n",
17688          poll_count-cur_cnt, FLR_WAIT_INTERVAL, regs->pN);
17689}
17690
17691static void
17692bxe_pbf_pN_cmd_flushed(struct bxe_softc       *sc,
17693                       struct pbf_pN_cmd_regs *regs,
17694                       uint32_t               poll_count)
17695{
17696    uint32_t occup, to_free, freed, freed_start;
17697    uint32_t cur_cnt = poll_count;
17698
17699    occup = to_free = REG_RD(sc, regs->lines_occup);
17700    freed = freed_start = REG_RD(sc, regs->lines_freed);
17701
17702    BLOGD(sc, DBG_LOAD, "OCCUPANCY[%d]   : s:%x\n", regs->pN, occup);
17703    BLOGD(sc, DBG_LOAD, "LINES_FREED[%d] : s:%x\n", regs->pN, freed);
17704
17705    while (occup &&
17706           ((uint32_t)((int32_t)freed - (int32_t)freed_start) < to_free)) {
17707        if (cur_cnt--) {
17708            DELAY(FLR_WAIT_INTERVAL);
17709            occup = REG_RD(sc, regs->lines_occup);
17710            freed = REG_RD(sc, regs->lines_freed);
17711        } else {
17712            BLOGD(sc, DBG_LOAD, "PBF cmd queue[%d] timed out\n", regs->pN);
17713            BLOGD(sc, DBG_LOAD, "OCCUPANCY[%d]   : s:%x\n", regs->pN, occup);
17714            BLOGD(sc, DBG_LOAD, "LINES_FREED[%d] : s:%x\n", regs->pN, freed);
17715            break;
17716        }
17717    }
17718
17719    BLOGD(sc, DBG_LOAD, "Waited %d*%d usec for PBF cmd queue[%d]\n",
17720          poll_count - cur_cnt, FLR_WAIT_INTERVAL, regs->pN);
17721}
17722
17723static void
17724bxe_tx_hw_flushed(struct bxe_softc *sc, uint32_t poll_count)
17725{
17726    struct pbf_pN_cmd_regs cmd_regs[] = {
17727        {0, (CHIP_IS_E3B0(sc)) ?
17728            PBF_REG_TQ_OCCUPANCY_Q0 :
17729            PBF_REG_P0_TQ_OCCUPANCY,
17730            (CHIP_IS_E3B0(sc)) ?
17731            PBF_REG_TQ_LINES_FREED_CNT_Q0 :
17732            PBF_REG_P0_TQ_LINES_FREED_CNT},
17733        {1, (CHIP_IS_E3B0(sc)) ?
17734            PBF_REG_TQ_OCCUPANCY_Q1 :
17735            PBF_REG_P1_TQ_OCCUPANCY,
17736            (CHIP_IS_E3B0(sc)) ?
17737            PBF_REG_TQ_LINES_FREED_CNT_Q1 :
17738            PBF_REG_P1_TQ_LINES_FREED_CNT},
17739        {4, (CHIP_IS_E3B0(sc)) ?
17740            PBF_REG_TQ_OCCUPANCY_LB_Q :
17741            PBF_REG_P4_TQ_OCCUPANCY,
17742            (CHIP_IS_E3B0(sc)) ?
17743            PBF_REG_TQ_LINES_FREED_CNT_LB_Q :
17744            PBF_REG_P4_TQ_LINES_FREED_CNT}
17745    };
17746
17747    struct pbf_pN_buf_regs buf_regs[] = {
17748        {0, (CHIP_IS_E3B0(sc)) ?
17749            PBF_REG_INIT_CRD_Q0 :
17750            PBF_REG_P0_INIT_CRD ,
17751            (CHIP_IS_E3B0(sc)) ?
17752            PBF_REG_CREDIT_Q0 :
17753            PBF_REG_P0_CREDIT,
17754            (CHIP_IS_E3B0(sc)) ?
17755            PBF_REG_INTERNAL_CRD_FREED_CNT_Q0 :
17756            PBF_REG_P0_INTERNAL_CRD_FREED_CNT},
17757        {1, (CHIP_IS_E3B0(sc)) ?
17758            PBF_REG_INIT_CRD_Q1 :
17759            PBF_REG_P1_INIT_CRD,
17760            (CHIP_IS_E3B0(sc)) ?
17761            PBF_REG_CREDIT_Q1 :
17762            PBF_REG_P1_CREDIT,
17763            (CHIP_IS_E3B0(sc)) ?
17764            PBF_REG_INTERNAL_CRD_FREED_CNT_Q1 :
17765            PBF_REG_P1_INTERNAL_CRD_FREED_CNT},
17766        {4, (CHIP_IS_E3B0(sc)) ?
17767            PBF_REG_INIT_CRD_LB_Q :
17768            PBF_REG_P4_INIT_CRD,
17769            (CHIP_IS_E3B0(sc)) ?
17770            PBF_REG_CREDIT_LB_Q :
17771            PBF_REG_P4_CREDIT,
17772            (CHIP_IS_E3B0(sc)) ?
17773            PBF_REG_INTERNAL_CRD_FREED_CNT_LB_Q :
17774            PBF_REG_P4_INTERNAL_CRD_FREED_CNT},
17775    };
17776
17777    int i;
17778
17779    /* Verify the command queues are flushed P0, P1, P4 */
17780    for (i = 0; i < ARRAY_SIZE(cmd_regs); i++) {
17781        bxe_pbf_pN_cmd_flushed(sc, &cmd_regs[i], poll_count);
17782    }
17783
17784    /* Verify the transmission buffers are flushed P0, P1, P4 */
17785    for (i = 0; i < ARRAY_SIZE(buf_regs); i++) {
17786        bxe_pbf_pN_buf_flushed(sc, &buf_regs[i], poll_count);
17787    }
17788}
17789
17790static void
17791bxe_hw_enable_status(struct bxe_softc *sc)
17792{
17793    uint32_t val;
17794
17795    val = REG_RD(sc, CFC_REG_WEAK_ENABLE_PF);
17796    BLOGD(sc, DBG_LOAD, "CFC_REG_WEAK_ENABLE_PF is 0x%x\n", val);
17797
17798    val = REG_RD(sc, PBF_REG_DISABLE_PF);
17799    BLOGD(sc, DBG_LOAD, "PBF_REG_DISABLE_PF is 0x%x\n", val);
17800
17801    val = REG_RD(sc, IGU_REG_PCI_PF_MSI_EN);
17802    BLOGD(sc, DBG_LOAD, "IGU_REG_PCI_PF_MSI_EN is 0x%x\n", val);
17803
17804    val = REG_RD(sc, IGU_REG_PCI_PF_MSIX_EN);
17805    BLOGD(sc, DBG_LOAD, "IGU_REG_PCI_PF_MSIX_EN is 0x%x\n", val);
17806
17807    val = REG_RD(sc, IGU_REG_PCI_PF_MSIX_FUNC_MASK);
17808    BLOGD(sc, DBG_LOAD, "IGU_REG_PCI_PF_MSIX_FUNC_MASK is 0x%x\n", val);
17809
17810    val = REG_RD(sc, PGLUE_B_REG_SHADOW_BME_PF_7_0_CLR);
17811    BLOGD(sc, DBG_LOAD, "PGLUE_B_REG_SHADOW_BME_PF_7_0_CLR is 0x%x\n", val);
17812
17813    val = REG_RD(sc, PGLUE_B_REG_FLR_REQUEST_PF_7_0_CLR);
17814    BLOGD(sc, DBG_LOAD, "PGLUE_B_REG_FLR_REQUEST_PF_7_0_CLR is 0x%x\n", val);
17815
17816    val = REG_RD(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER);
17817    BLOGD(sc, DBG_LOAD, "PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER is 0x%x\n", val);
17818}
17819
17820static int
17821bxe_pf_flr_clnup(struct bxe_softc *sc)
17822{
17823    uint32_t poll_cnt = bxe_flr_clnup_poll_count(sc);
17824
17825    BLOGD(sc, DBG_LOAD, "Cleanup after FLR PF[%d]\n", SC_ABS_FUNC(sc));
17826
17827    /* Re-enable PF target read access */
17828    REG_WR(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ, 1);
17829
17830    /* Poll HW usage counters */
17831    BLOGD(sc, DBG_LOAD, "Polling usage counters\n");
17832    if (bxe_poll_hw_usage_counters(sc, poll_cnt)) {
17833        return (-1);
17834    }
17835
17836    /* Zero the igu 'trailing edge' and 'leading edge' */
17837
17838    /* Send the FW cleanup command */
17839    if (bxe_send_final_clnup(sc, (uint8_t)SC_FUNC(sc), poll_cnt)) {
17840        return (-1);
17841    }
17842
17843    /* ATC cleanup */
17844
17845    /* Verify TX hw is flushed */
17846    bxe_tx_hw_flushed(sc, poll_cnt);
17847
17848    /* Wait 100ms (not adjusted according to platform) */
17849    DELAY(100000);
17850
17851    /* Verify no pending pci transactions */
17852    if (bxe_is_pcie_pending(sc)) {
17853        BLOGE(sc, "PCIE Transactions still pending\n");
17854    }
17855
17856    /* Debug */
17857    bxe_hw_enable_status(sc);
17858
17859    /*
17860     * Master enable - Due to WB DMAE writes performed before this
17861     * register is re-initialized as part of the regular function init
17862     */
17863    REG_WR(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1);
17864
17865    return (0);
17866}
17867
17868static int
17869bxe_init_hw_func(struct bxe_softc *sc)
17870{
17871    int port = SC_PORT(sc);
17872    int func = SC_FUNC(sc);
17873    int init_phase = PHASE_PF0 + func;
17874    struct ecore_ilt *ilt = sc->ilt;
17875    uint16_t cdu_ilt_start;
17876    uint32_t addr, val;
17877    uint32_t main_mem_base, main_mem_size, main_mem_prty_clr;
17878    int i, main_mem_width, rc;
17879
17880    BLOGD(sc, DBG_LOAD, "starting func init for func %d\n", func);
17881
17882    /* FLR cleanup */
17883    if (!CHIP_IS_E1x(sc)) {
17884        rc = bxe_pf_flr_clnup(sc);
17885        if (rc) {
17886            BLOGE(sc, "FLR cleanup failed!\n");
17887            // XXX bxe_fw_dump(sc);
17888            // XXX bxe_idle_chk(sc);
17889            return (rc);
17890        }
17891    }
17892
17893    /* set MSI reconfigure capability */
17894    if (sc->devinfo.int_block == INT_BLOCK_HC) {
17895        addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0);
17896        val = REG_RD(sc, addr);
17897        val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0;
17898        REG_WR(sc, addr, val);
17899    }
17900
17901    ecore_init_block(sc, BLOCK_PXP, init_phase);
17902    ecore_init_block(sc, BLOCK_PXP2, init_phase);
17903
17904    ilt = sc->ilt;
17905    cdu_ilt_start = ilt->clients[ILT_CLIENT_CDU].start;
17906
17907    for (i = 0; i < L2_ILT_LINES(sc); i++) {
17908        ilt->lines[cdu_ilt_start + i].page = sc->context[i].vcxt;
17909        ilt->lines[cdu_ilt_start + i].page_mapping =
17910            sc->context[i].vcxt_dma.paddr;
17911        ilt->lines[cdu_ilt_start + i].size = sc->context[i].size;
17912    }
17913    ecore_ilt_init_op(sc, INITOP_SET);
17914
17915    /* Set NIC mode */
17916    REG_WR(sc, PRS_REG_NIC_MODE, 1);
17917    BLOGD(sc, DBG_LOAD, "NIC MODE configured\n");
17918
17919    if (!CHIP_IS_E1x(sc)) {
17920        uint32_t pf_conf = IGU_PF_CONF_FUNC_EN;
17921
17922        /* Turn on a single ISR mode in IGU if driver is going to use
17923         * INT#x or MSI
17924         */
17925        if (sc->interrupt_mode != INTR_MODE_MSIX) {
17926            pf_conf |= IGU_PF_CONF_SINGLE_ISR_EN;
17927        }
17928
17929        /*
17930         * Timers workaround bug: function init part.
17931         * Need to wait 20msec after initializing ILT,
17932         * needed to make sure there are no requests in
17933         * one of the PXP internal queues with "old" ILT addresses
17934         */
17935        DELAY(20000);
17936
17937        /*
17938         * Master enable - Due to WB DMAE writes performed before this
17939         * register is re-initialized as part of the regular function
17940         * init
17941         */
17942        REG_WR(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1);
17943        /* Enable the function in IGU */
17944        REG_WR(sc, IGU_REG_PF_CONFIGURATION, pf_conf);
17945    }
17946
17947    sc->dmae_ready = 1;
17948
17949    ecore_init_block(sc, BLOCK_PGLUE_B, init_phase);
17950
17951    if (!CHIP_IS_E1x(sc))
17952        REG_WR(sc, PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR, func);
17953
17954    ecore_init_block(sc, BLOCK_ATC, init_phase);
17955    ecore_init_block(sc, BLOCK_DMAE, init_phase);
17956    ecore_init_block(sc, BLOCK_NIG, init_phase);
17957    ecore_init_block(sc, BLOCK_SRC, init_phase);
17958    ecore_init_block(sc, BLOCK_MISC, init_phase);
17959    ecore_init_block(sc, BLOCK_TCM, init_phase);
17960    ecore_init_block(sc, BLOCK_UCM, init_phase);
17961    ecore_init_block(sc, BLOCK_CCM, init_phase);
17962    ecore_init_block(sc, BLOCK_XCM, init_phase);
17963    ecore_init_block(sc, BLOCK_TSEM, init_phase);
17964    ecore_init_block(sc, BLOCK_USEM, init_phase);
17965    ecore_init_block(sc, BLOCK_CSEM, init_phase);
17966    ecore_init_block(sc, BLOCK_XSEM, init_phase);
17967
17968    if (!CHIP_IS_E1x(sc))
17969        REG_WR(sc, QM_REG_PF_EN, 1);
17970
17971    if (!CHIP_IS_E1x(sc)) {
17972        REG_WR(sc, TSEM_REG_VFPF_ERR_NUM, BXE_MAX_NUM_OF_VFS + func);
17973        REG_WR(sc, USEM_REG_VFPF_ERR_NUM, BXE_MAX_NUM_OF_VFS + func);
17974        REG_WR(sc, CSEM_REG_VFPF_ERR_NUM, BXE_MAX_NUM_OF_VFS + func);
17975        REG_WR(sc, XSEM_REG_VFPF_ERR_NUM, BXE_MAX_NUM_OF_VFS + func);
17976    }
17977    ecore_init_block(sc, BLOCK_QM, init_phase);
17978
17979    ecore_init_block(sc, BLOCK_TM, init_phase);
17980    ecore_init_block(sc, BLOCK_DORQ, init_phase);
17981
17982    bxe_iov_init_dq(sc);
17983
17984    ecore_init_block(sc, BLOCK_BRB1, init_phase);
17985    ecore_init_block(sc, BLOCK_PRS, init_phase);
17986    ecore_init_block(sc, BLOCK_TSDM, init_phase);
17987    ecore_init_block(sc, BLOCK_CSDM, init_phase);
17988    ecore_init_block(sc, BLOCK_USDM, init_phase);
17989    ecore_init_block(sc, BLOCK_XSDM, init_phase);
17990    ecore_init_block(sc, BLOCK_UPB, init_phase);
17991    ecore_init_block(sc, BLOCK_XPB, init_phase);
17992    ecore_init_block(sc, BLOCK_PBF, init_phase);
17993    if (!CHIP_IS_E1x(sc))
17994        REG_WR(sc, PBF_REG_DISABLE_PF, 0);
17995
17996    ecore_init_block(sc, BLOCK_CDU, init_phase);
17997
17998    ecore_init_block(sc, BLOCK_CFC, init_phase);
17999
18000    if (!CHIP_IS_E1x(sc))
18001        REG_WR(sc, CFC_REG_WEAK_ENABLE_PF, 1);
18002
18003    if (IS_MF(sc)) {
18004        REG_WR(sc, NIG_REG_LLH0_FUNC_EN + port*8, 1);
18005        REG_WR(sc, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, OVLAN(sc));
18006    }
18007
18008    ecore_init_block(sc, BLOCK_MISC_AEU, init_phase);
18009
18010    /* HC init per function */
18011    if (sc->devinfo.int_block == INT_BLOCK_HC) {
18012        if (CHIP_IS_E1H(sc)) {
18013            REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
18014
18015            REG_WR(sc, HC_REG_LEADING_EDGE_0 + port*8, 0);
18016            REG_WR(sc, HC_REG_TRAILING_EDGE_0 + port*8, 0);
18017        }
18018        ecore_init_block(sc, BLOCK_HC, init_phase);
18019
18020    } else {
18021        int num_segs, sb_idx, prod_offset;
18022
18023        REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
18024
18025        if (!CHIP_IS_E1x(sc)) {
18026            REG_WR(sc, IGU_REG_LEADING_EDGE_LATCH, 0);
18027            REG_WR(sc, IGU_REG_TRAILING_EDGE_LATCH, 0);
18028        }
18029
18030        ecore_init_block(sc, BLOCK_IGU, init_phase);
18031
18032        if (!CHIP_IS_E1x(sc)) {
18033            int dsb_idx = 0;
18034            /**
18035             * Producer memory:
18036             * E2 mode: address 0-135 match to the mapping memory;
18037             * 136 - PF0 default prod; 137 - PF1 default prod;
18038             * 138 - PF2 default prod; 139 - PF3 default prod;
18039             * 140 - PF0 attn prod;    141 - PF1 attn prod;
18040             * 142 - PF2 attn prod;    143 - PF3 attn prod;
18041             * 144-147 reserved.
18042             *
18043             * E1.5 mode - In backward compatible mode;
18044             * for non default SB; each even line in the memory
18045             * holds the U producer and each odd line hold
18046             * the C producer. The first 128 producers are for
18047             * NDSB (PF0 - 0-31; PF1 - 32-63 and so on). The last 20
18048             * producers are for the DSB for each PF.
18049             * Each PF has five segments: (the order inside each
18050             * segment is PF0; PF1; PF2; PF3) - 128-131 U prods;
18051             * 132-135 C prods; 136-139 X prods; 140-143 T prods;
18052             * 144-147 attn prods;
18053             */
18054            /* non-default-status-blocks */
18055            num_segs = CHIP_INT_MODE_IS_BC(sc) ?
18056                IGU_BC_NDSB_NUM_SEGS : IGU_NORM_NDSB_NUM_SEGS;
18057            for (sb_idx = 0; sb_idx < sc->igu_sb_cnt; sb_idx++) {
18058                prod_offset = (sc->igu_base_sb + sb_idx) *
18059                    num_segs;
18060
18061                for (i = 0; i < num_segs; i++) {
18062                    addr = IGU_REG_PROD_CONS_MEMORY +
18063                            (prod_offset + i) * 4;
18064                    REG_WR(sc, addr, 0);
18065                }
18066                /* send consumer update with value 0 */
18067                bxe_ack_sb(sc, sc->igu_base_sb + sb_idx,
18068                           USTORM_ID, 0, IGU_INT_NOP, 1);
18069                bxe_igu_clear_sb(sc, sc->igu_base_sb + sb_idx);
18070            }
18071
18072            /* default-status-blocks */
18073            num_segs = CHIP_INT_MODE_IS_BC(sc) ?
18074                IGU_BC_DSB_NUM_SEGS : IGU_NORM_DSB_NUM_SEGS;
18075
18076            if (CHIP_IS_MODE_4_PORT(sc))
18077                dsb_idx = SC_FUNC(sc);
18078            else
18079                dsb_idx = SC_VN(sc);
18080
18081            prod_offset = (CHIP_INT_MODE_IS_BC(sc) ?
18082                       IGU_BC_BASE_DSB_PROD + dsb_idx :
18083                       IGU_NORM_BASE_DSB_PROD + dsb_idx);
18084
18085            /*
18086             * igu prods come in chunks of E1HVN_MAX (4) -
18087             * does not matters what is the current chip mode
18088             */
18089            for (i = 0; i < (num_segs * E1HVN_MAX);
18090                 i += E1HVN_MAX) {
18091                addr = IGU_REG_PROD_CONS_MEMORY +
18092                            (prod_offset + i)*4;
18093                REG_WR(sc, addr, 0);
18094            }
18095            /* send consumer update with 0 */
18096            if (CHIP_INT_MODE_IS_BC(sc)) {
18097                bxe_ack_sb(sc, sc->igu_dsb_id,
18098                           USTORM_ID, 0, IGU_INT_NOP, 1);
18099                bxe_ack_sb(sc, sc->igu_dsb_id,
18100                           CSTORM_ID, 0, IGU_INT_NOP, 1);
18101                bxe_ack_sb(sc, sc->igu_dsb_id,
18102                           XSTORM_ID, 0, IGU_INT_NOP, 1);
18103                bxe_ack_sb(sc, sc->igu_dsb_id,
18104                           TSTORM_ID, 0, IGU_INT_NOP, 1);
18105                bxe_ack_sb(sc, sc->igu_dsb_id,
18106                           ATTENTION_ID, 0, IGU_INT_NOP, 1);
18107            } else {
18108                bxe_ack_sb(sc, sc->igu_dsb_id,
18109                           USTORM_ID, 0, IGU_INT_NOP, 1);
18110                bxe_ack_sb(sc, sc->igu_dsb_id,
18111                           ATTENTION_ID, 0, IGU_INT_NOP, 1);
18112            }
18113            bxe_igu_clear_sb(sc, sc->igu_dsb_id);
18114
18115            /* !!! these should become driver const once
18116               rf-tool supports split-68 const */
18117            REG_WR(sc, IGU_REG_SB_INT_BEFORE_MASK_LSB, 0);
18118            REG_WR(sc, IGU_REG_SB_INT_BEFORE_MASK_MSB, 0);
18119            REG_WR(sc, IGU_REG_SB_MASK_LSB, 0);
18120            REG_WR(sc, IGU_REG_SB_MASK_MSB, 0);
18121            REG_WR(sc, IGU_REG_PBA_STATUS_LSB, 0);
18122            REG_WR(sc, IGU_REG_PBA_STATUS_MSB, 0);
18123        }
18124    }
18125
18126    /* Reset PCIE errors for debug */
18127    REG_WR(sc, 0x2114, 0xffffffff);
18128    REG_WR(sc, 0x2120, 0xffffffff);
18129
18130    if (CHIP_IS_E1x(sc)) {
18131        main_mem_size = HC_REG_MAIN_MEMORY_SIZE / 2; /*dwords*/
18132        main_mem_base = HC_REG_MAIN_MEMORY +
18133                SC_PORT(sc) * (main_mem_size * 4);
18134        main_mem_prty_clr = HC_REG_HC_PRTY_STS_CLR;
18135        main_mem_width = 8;
18136
18137        val = REG_RD(sc, main_mem_prty_clr);
18138        if (val) {
18139            BLOGD(sc, DBG_LOAD,
18140                  "Parity errors in HC block during function init (0x%x)!\n",
18141                  val);
18142        }
18143
18144        /* Clear "false" parity errors in MSI-X table */
18145        for (i = main_mem_base;
18146             i < main_mem_base + main_mem_size * 4;
18147             i += main_mem_width) {
18148            bxe_read_dmae(sc, i, main_mem_width / 4);
18149            bxe_write_dmae(sc, BXE_SP_MAPPING(sc, wb_data),
18150                           i, main_mem_width / 4);
18151        }
18152        /* Clear HC parity attention */
18153        REG_RD(sc, main_mem_prty_clr);
18154    }
18155
18156#if 1
18157    /* Enable STORMs SP logging */
18158    REG_WR8(sc, BAR_USTRORM_INTMEM +
18159           USTORM_RECORD_SLOW_PATH_OFFSET(SC_FUNC(sc)), 1);
18160    REG_WR8(sc, BAR_TSTRORM_INTMEM +
18161           TSTORM_RECORD_SLOW_PATH_OFFSET(SC_FUNC(sc)), 1);
18162    REG_WR8(sc, BAR_CSTRORM_INTMEM +
18163           CSTORM_RECORD_SLOW_PATH_OFFSET(SC_FUNC(sc)), 1);
18164    REG_WR8(sc, BAR_XSTRORM_INTMEM +
18165           XSTORM_RECORD_SLOW_PATH_OFFSET(SC_FUNC(sc)), 1);
18166#endif
18167
18168    elink_phy_probe(&sc->link_params);
18169
18170    return (0);
18171}
18172
18173static void
18174bxe_link_reset(struct bxe_softc *sc)
18175{
18176    if (!BXE_NOMCP(sc)) {
18177	bxe_acquire_phy_lock(sc);
18178        elink_lfa_reset(&sc->link_params, &sc->link_vars);
18179	bxe_release_phy_lock(sc);
18180    } else {
18181        if (!CHIP_REV_IS_SLOW(sc)) {
18182            BLOGW(sc, "Bootcode is missing - cannot reset link\n");
18183        }
18184    }
18185}
18186
18187static void
18188bxe_reset_port(struct bxe_softc *sc)
18189{
18190    int port = SC_PORT(sc);
18191    uint32_t val;
18192
18193	ELINK_DEBUG_P0(sc, "bxe_reset_port called\n");
18194    /* reset physical Link */
18195    bxe_link_reset(sc);
18196
18197    REG_WR(sc, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
18198
18199    /* Do not rcv packets to BRB */
18200    REG_WR(sc, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
18201    /* Do not direct rcv packets that are not for MCP to the BRB */
18202    REG_WR(sc, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
18203               NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
18204
18205    /* Configure AEU */
18206    REG_WR(sc, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
18207
18208    DELAY(100000);
18209
18210    /* Check for BRB port occupancy */
18211    val = REG_RD(sc, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
18212    if (val) {
18213        BLOGD(sc, DBG_LOAD,
18214              "BRB1 is not empty, %d blocks are occupied\n", val);
18215    }
18216
18217    /* TODO: Close Doorbell port? */
18218}
18219
18220static void
18221bxe_ilt_wr(struct bxe_softc *sc,
18222           uint32_t         index,
18223           bus_addr_t       addr)
18224{
18225    int reg;
18226    uint32_t wb_write[2];
18227
18228    if (CHIP_IS_E1(sc)) {
18229        reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
18230    } else {
18231        reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
18232    }
18233
18234    wb_write[0] = ONCHIP_ADDR1(addr);
18235    wb_write[1] = ONCHIP_ADDR2(addr);
18236    REG_WR_DMAE(sc, reg, wb_write, 2);
18237}
18238
18239static void
18240bxe_clear_func_ilt(struct bxe_softc *sc,
18241                   uint32_t         func)
18242{
18243    uint32_t i, base = FUNC_ILT_BASE(func);
18244    for (i = base; i < base + ILT_PER_FUNC; i++) {
18245        bxe_ilt_wr(sc, i, 0);
18246    }
18247}
18248
18249static void
18250bxe_reset_func(struct bxe_softc *sc)
18251{
18252    struct bxe_fastpath *fp;
18253    int port = SC_PORT(sc);
18254    int func = SC_FUNC(sc);
18255    int i;
18256
18257    /* Disable the function in the FW */
18258    REG_WR8(sc, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(func), 0);
18259    REG_WR8(sc, BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(func), 0);
18260    REG_WR8(sc, BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(func), 0);
18261    REG_WR8(sc, BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(func), 0);
18262
18263    /* FP SBs */
18264    FOR_EACH_ETH_QUEUE(sc, i) {
18265        fp = &sc->fp[i];
18266        REG_WR8(sc, BAR_CSTRORM_INTMEM +
18267                CSTORM_STATUS_BLOCK_DATA_STATE_OFFSET(fp->fw_sb_id),
18268                SB_DISABLED);
18269    }
18270
18271    /* SP SB */
18272    REG_WR8(sc, BAR_CSTRORM_INTMEM +
18273            CSTORM_SP_STATUS_BLOCK_DATA_STATE_OFFSET(func),
18274            SB_DISABLED);
18275
18276    for (i = 0; i < XSTORM_SPQ_DATA_SIZE / 4; i++) {
18277        REG_WR(sc, BAR_XSTRORM_INTMEM + XSTORM_SPQ_DATA_OFFSET(func), 0);
18278    }
18279
18280    /* Configure IGU */
18281    if (sc->devinfo.int_block == INT_BLOCK_HC) {
18282        REG_WR(sc, HC_REG_LEADING_EDGE_0 + port*8, 0);
18283        REG_WR(sc, HC_REG_TRAILING_EDGE_0 + port*8, 0);
18284    } else {
18285        REG_WR(sc, IGU_REG_LEADING_EDGE_LATCH, 0);
18286        REG_WR(sc, IGU_REG_TRAILING_EDGE_LATCH, 0);
18287    }
18288
18289    if (CNIC_LOADED(sc)) {
18290        /* Disable Timer scan */
18291        REG_WR(sc, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
18292        /*
18293         * Wait for at least 10ms and up to 2 second for the timers
18294         * scan to complete
18295         */
18296        for (i = 0; i < 200; i++) {
18297            DELAY(10000);
18298            if (!REG_RD(sc, TM_REG_LIN0_SCAN_ON + port*4))
18299                break;
18300        }
18301    }
18302
18303    /* Clear ILT */
18304    bxe_clear_func_ilt(sc, func);
18305
18306    /*
18307     * Timers workaround bug for E2: if this is vnic-3,
18308     * we need to set the entire ilt range for this timers.
18309     */
18310    if (!CHIP_IS_E1x(sc) && SC_VN(sc) == 3) {
18311        struct ilt_client_info ilt_cli;
18312        /* use dummy TM client */
18313        memset(&ilt_cli, 0, sizeof(struct ilt_client_info));
18314        ilt_cli.start = 0;
18315        ilt_cli.end = ILT_NUM_PAGE_ENTRIES - 1;
18316        ilt_cli.client_num = ILT_CLIENT_TM;
18317
18318        ecore_ilt_boundry_init_op(sc, &ilt_cli, 0, INITOP_CLEAR);
18319    }
18320
18321    /* this assumes that reset_port() called before reset_func()*/
18322    if (!CHIP_IS_E1x(sc)) {
18323        bxe_pf_disable(sc);
18324    }
18325
18326    sc->dmae_ready = 0;
18327}
18328
18329static int
18330bxe_gunzip_init(struct bxe_softc *sc)
18331{
18332    return (0);
18333}
18334
18335static void
18336bxe_gunzip_end(struct bxe_softc *sc)
18337{
18338    return;
18339}
18340
18341static int
18342bxe_init_firmware(struct bxe_softc *sc)
18343{
18344    if (CHIP_IS_E1(sc)) {
18345        ecore_init_e1_firmware(sc);
18346        sc->iro_array = e1_iro_arr;
18347    } else if (CHIP_IS_E1H(sc)) {
18348        ecore_init_e1h_firmware(sc);
18349        sc->iro_array = e1h_iro_arr;
18350    } else if (!CHIP_IS_E1x(sc)) {
18351        ecore_init_e2_firmware(sc);
18352        sc->iro_array = e2_iro_arr;
18353    } else {
18354        BLOGE(sc, "Unsupported chip revision\n");
18355        return (-1);
18356    }
18357
18358    return (0);
18359}
18360
18361static void
18362bxe_release_firmware(struct bxe_softc *sc)
18363{
18364    /* Do nothing */
18365    return;
18366}
18367
18368static int
18369ecore_gunzip(struct bxe_softc *sc,
18370             const uint8_t    *zbuf,
18371             int              len)
18372{
18373    /* XXX : Implement... */
18374    BLOGD(sc, DBG_LOAD, "ECORE_GUNZIP NOT IMPLEMENTED\n");
18375    return (FALSE);
18376}
18377
18378static void
18379ecore_reg_wr_ind(struct bxe_softc *sc,
18380                 uint32_t         addr,
18381                 uint32_t         val)
18382{
18383    bxe_reg_wr_ind(sc, addr, val);
18384}
18385
18386static void
18387ecore_write_dmae_phys_len(struct bxe_softc *sc,
18388                          bus_addr_t       phys_addr,
18389                          uint32_t         addr,
18390                          uint32_t         len)
18391{
18392    bxe_write_dmae_phys_len(sc, phys_addr, addr, len);
18393}
18394
18395void
18396ecore_storm_memset_struct(struct bxe_softc *sc,
18397                          uint32_t         addr,
18398                          size_t           size,
18399                          uint32_t         *data)
18400{
18401    uint8_t i;
18402    for (i = 0; i < size/4; i++) {
18403        REG_WR(sc, addr + (i * 4), data[i]);
18404    }
18405}
18406
18407
18408/*
18409 * character device - ioctl interface definitions
18410 */
18411
18412
18413#include "bxe_dump.h"
18414#include "bxe_ioctl.h"
18415#include <sys/conf.h>
18416
18417static int bxe_eioctl(struct cdev *dev, u_long cmd, caddr_t data, int fflag,
18418                struct thread *td);
18419
18420static struct cdevsw bxe_cdevsw = {
18421    .d_version = D_VERSION,
18422    .d_ioctl = bxe_eioctl,
18423    .d_name = "bxecnic",
18424};
18425
18426#define BXE_PATH(sc)    (CHIP_IS_E1x(sc) ? 0 : (sc->pcie_func & 1))
18427
18428
18429#define DUMP_ALL_PRESETS        0x1FFF
18430#define DUMP_MAX_PRESETS        13
18431#define IS_E1_REG(chips)        ((chips & DUMP_CHIP_E1) == DUMP_CHIP_E1)
18432#define IS_E1H_REG(chips)       ((chips & DUMP_CHIP_E1H) == DUMP_CHIP_E1H)
18433#define IS_E2_REG(chips)        ((chips & DUMP_CHIP_E2) == DUMP_CHIP_E2)
18434#define IS_E3A0_REG(chips)      ((chips & DUMP_CHIP_E3A0) == DUMP_CHIP_E3A0)
18435#define IS_E3B0_REG(chips)      ((chips & DUMP_CHIP_E3B0) == DUMP_CHIP_E3B0)
18436
18437#define IS_REG_IN_PRESET(presets, idx)  \
18438                ((presets & (1 << (idx-1))) == (1 << (idx-1)))
18439
18440
18441static int
18442bxe_get_preset_regs_len(struct bxe_softc *sc, uint32_t preset)
18443{
18444    if (CHIP_IS_E1(sc))
18445        return dump_num_registers[0][preset-1];
18446    else if (CHIP_IS_E1H(sc))
18447        return dump_num_registers[1][preset-1];
18448    else if (CHIP_IS_E2(sc))
18449        return dump_num_registers[2][preset-1];
18450    else if (CHIP_IS_E3A0(sc))
18451        return dump_num_registers[3][preset-1];
18452    else if (CHIP_IS_E3B0(sc))
18453        return dump_num_registers[4][preset-1];
18454    else
18455        return 0;
18456}
18457
18458static int
18459bxe_get_total_regs_len32(struct bxe_softc *sc)
18460{
18461    uint32_t preset_idx;
18462    int regdump_len32 = 0;
18463
18464
18465    /* Calculate the total preset regs length */
18466    for (preset_idx = 1; preset_idx <= DUMP_MAX_PRESETS; preset_idx++) {
18467        regdump_len32 += bxe_get_preset_regs_len(sc, preset_idx);
18468    }
18469
18470    return regdump_len32;
18471}
18472
18473static const uint32_t *
18474__bxe_get_page_addr_ar(struct bxe_softc *sc)
18475{
18476    if (CHIP_IS_E2(sc))
18477        return page_vals_e2;
18478    else if (CHIP_IS_E3(sc))
18479        return page_vals_e3;
18480    else
18481        return NULL;
18482}
18483
18484static uint32_t
18485__bxe_get_page_reg_num(struct bxe_softc *sc)
18486{
18487    if (CHIP_IS_E2(sc))
18488        return PAGE_MODE_VALUES_E2;
18489    else if (CHIP_IS_E3(sc))
18490        return PAGE_MODE_VALUES_E3;
18491    else
18492        return 0;
18493}
18494
18495static const uint32_t *
18496__bxe_get_page_write_ar(struct bxe_softc *sc)
18497{
18498    if (CHIP_IS_E2(sc))
18499        return page_write_regs_e2;
18500    else if (CHIP_IS_E3(sc))
18501        return page_write_regs_e3;
18502    else
18503        return NULL;
18504}
18505
18506static uint32_t
18507__bxe_get_page_write_num(struct bxe_softc *sc)
18508{
18509    if (CHIP_IS_E2(sc))
18510        return PAGE_WRITE_REGS_E2;
18511    else if (CHIP_IS_E3(sc))
18512        return PAGE_WRITE_REGS_E3;
18513    else
18514        return 0;
18515}
18516
18517static const struct reg_addr *
18518__bxe_get_page_read_ar(struct bxe_softc *sc)
18519{
18520    if (CHIP_IS_E2(sc))
18521        return page_read_regs_e2;
18522    else if (CHIP_IS_E3(sc))
18523        return page_read_regs_e3;
18524    else
18525        return NULL;
18526}
18527
18528static uint32_t
18529__bxe_get_page_read_num(struct bxe_softc *sc)
18530{
18531    if (CHIP_IS_E2(sc))
18532        return PAGE_READ_REGS_E2;
18533    else if (CHIP_IS_E3(sc))
18534        return PAGE_READ_REGS_E3;
18535    else
18536        return 0;
18537}
18538
18539static bool
18540bxe_is_reg_in_chip(struct bxe_softc *sc, const struct reg_addr *reg_info)
18541{
18542    if (CHIP_IS_E1(sc))
18543        return IS_E1_REG(reg_info->chips);
18544    else if (CHIP_IS_E1H(sc))
18545        return IS_E1H_REG(reg_info->chips);
18546    else if (CHIP_IS_E2(sc))
18547        return IS_E2_REG(reg_info->chips);
18548    else if (CHIP_IS_E3A0(sc))
18549        return IS_E3A0_REG(reg_info->chips);
18550    else if (CHIP_IS_E3B0(sc))
18551        return IS_E3B0_REG(reg_info->chips);
18552    else
18553        return 0;
18554}
18555
18556static bool
18557bxe_is_wreg_in_chip(struct bxe_softc *sc, const struct wreg_addr *wreg_info)
18558{
18559    if (CHIP_IS_E1(sc))
18560        return IS_E1_REG(wreg_info->chips);
18561    else if (CHIP_IS_E1H(sc))
18562        return IS_E1H_REG(wreg_info->chips);
18563    else if (CHIP_IS_E2(sc))
18564        return IS_E2_REG(wreg_info->chips);
18565    else if (CHIP_IS_E3A0(sc))
18566        return IS_E3A0_REG(wreg_info->chips);
18567    else if (CHIP_IS_E3B0(sc))
18568        return IS_E3B0_REG(wreg_info->chips);
18569    else
18570        return 0;
18571}
18572
18573/**
18574 * bxe_read_pages_regs - read "paged" registers
18575 *
18576 * @bp          device handle
18577 * @p           output buffer
18578 *
18579 * Reads "paged" memories: memories that may only be read by first writing to a
18580 * specific address ("write address") and then reading from a specific address
18581 * ("read address"). There may be more than one write address per "page" and
18582 * more than one read address per write address.
18583 */
18584static void
18585bxe_read_pages_regs(struct bxe_softc *sc, uint32_t *p, uint32_t preset)
18586{
18587    uint32_t i, j, k, n;
18588
18589    /* addresses of the paged registers */
18590    const uint32_t *page_addr = __bxe_get_page_addr_ar(sc);
18591    /* number of paged registers */
18592    int num_pages = __bxe_get_page_reg_num(sc);
18593    /* write addresses */
18594    const uint32_t *write_addr = __bxe_get_page_write_ar(sc);
18595    /* number of write addresses */
18596    int write_num = __bxe_get_page_write_num(sc);
18597    /* read addresses info */
18598    const struct reg_addr *read_addr = __bxe_get_page_read_ar(sc);
18599    /* number of read addresses */
18600    int read_num = __bxe_get_page_read_num(sc);
18601    uint32_t addr, size;
18602
18603    for (i = 0; i < num_pages; i++) {
18604        for (j = 0; j < write_num; j++) {
18605            REG_WR(sc, write_addr[j], page_addr[i]);
18606
18607            for (k = 0; k < read_num; k++) {
18608                if (IS_REG_IN_PRESET(read_addr[k].presets, preset)) {
18609                    size = read_addr[k].size;
18610                    for (n = 0; n < size; n++) {
18611                        addr = read_addr[k].addr + n*4;
18612                        *p++ = REG_RD(sc, addr);
18613                    }
18614                }
18615            }
18616        }
18617    }
18618    return;
18619}
18620
18621
18622static int
18623bxe_get_preset_regs(struct bxe_softc *sc, uint32_t *p, uint32_t preset)
18624{
18625    uint32_t i, j, addr;
18626    const struct wreg_addr *wreg_addr_p = NULL;
18627
18628    if (CHIP_IS_E1(sc))
18629        wreg_addr_p = &wreg_addr_e1;
18630    else if (CHIP_IS_E1H(sc))
18631        wreg_addr_p = &wreg_addr_e1h;
18632    else if (CHIP_IS_E2(sc))
18633        wreg_addr_p = &wreg_addr_e2;
18634    else if (CHIP_IS_E3A0(sc))
18635        wreg_addr_p = &wreg_addr_e3;
18636    else if (CHIP_IS_E3B0(sc))
18637        wreg_addr_p = &wreg_addr_e3b0;
18638    else
18639        return (-1);
18640
18641    /* Read the idle_chk registers */
18642    for (i = 0; i < IDLE_REGS_COUNT; i++) {
18643        if (bxe_is_reg_in_chip(sc, &idle_reg_addrs[i]) &&
18644            IS_REG_IN_PRESET(idle_reg_addrs[i].presets, preset)) {
18645            for (j = 0; j < idle_reg_addrs[i].size; j++)
18646                *p++ = REG_RD(sc, idle_reg_addrs[i].addr + j*4);
18647        }
18648    }
18649
18650    /* Read the regular registers */
18651    for (i = 0; i < REGS_COUNT; i++) {
18652        if (bxe_is_reg_in_chip(sc, &reg_addrs[i]) &&
18653            IS_REG_IN_PRESET(reg_addrs[i].presets, preset)) {
18654            for (j = 0; j < reg_addrs[i].size; j++)
18655                *p++ = REG_RD(sc, reg_addrs[i].addr + j*4);
18656        }
18657    }
18658
18659    /* Read the CAM registers */
18660    if (bxe_is_wreg_in_chip(sc, wreg_addr_p) &&
18661        IS_REG_IN_PRESET(wreg_addr_p->presets, preset)) {
18662        for (i = 0; i < wreg_addr_p->size; i++) {
18663            *p++ = REG_RD(sc, wreg_addr_p->addr + i*4);
18664
18665            /* In case of wreg_addr register, read additional
18666               registers from read_regs array
18667             */
18668            for (j = 0; j < wreg_addr_p->read_regs_count; j++) {
18669                addr = *(wreg_addr_p->read_regs);
18670                *p++ = REG_RD(sc, addr + j*4);
18671            }
18672        }
18673    }
18674
18675    /* Paged registers are supported in E2 & E3 only */
18676    if (CHIP_IS_E2(sc) || CHIP_IS_E3(sc)) {
18677        /* Read "paged" registers */
18678        bxe_read_pages_regs(sc, p, preset);
18679    }
18680
18681    return 0;
18682}
18683
18684int
18685bxe_grc_dump(struct bxe_softc *sc)
18686{
18687    int rval = 0;
18688    uint32_t preset_idx;
18689    uint8_t *buf;
18690    uint32_t size;
18691    struct  dump_header *d_hdr;
18692    uint32_t i;
18693    uint32_t reg_val;
18694    uint32_t reg_addr;
18695    uint32_t cmd_offset;
18696    struct ecore_ilt *ilt = SC_ILT(sc);
18697    struct bxe_fastpath *fp;
18698    struct ilt_client_info *ilt_cli;
18699    int grc_dump_size;
18700
18701
18702    if (sc->grcdump_done || sc->grcdump_started)
18703	return (rval);
18704
18705    sc->grcdump_started = 1;
18706    BLOGI(sc, "Started collecting grcdump\n");
18707
18708    grc_dump_size = (bxe_get_total_regs_len32(sc) * sizeof(uint32_t)) +
18709                sizeof(struct  dump_header);
18710
18711    sc->grc_dump = malloc(grc_dump_size, M_DEVBUF, M_NOWAIT);
18712
18713    if (sc->grc_dump == NULL) {
18714        BLOGW(sc, "Unable to allocate memory for grcdump collection\n");
18715        return(ENOMEM);
18716    }
18717
18718
18719
18720    /* Disable parity attentions as long as following dump may
18721     * cause false alarms by reading never written registers. We
18722     * will re-enable parity attentions right after the dump.
18723     */
18724
18725    /* Disable parity on path 0 */
18726    bxe_pretend_func(sc, 0);
18727
18728    ecore_disable_blocks_parity(sc);
18729
18730    /* Disable parity on path 1 */
18731    bxe_pretend_func(sc, 1);
18732    ecore_disable_blocks_parity(sc);
18733
18734    /* Return to current function */
18735    bxe_pretend_func(sc, SC_ABS_FUNC(sc));
18736
18737    buf = sc->grc_dump;
18738    d_hdr = sc->grc_dump;
18739
18740    d_hdr->header_size = (sizeof(struct  dump_header) >> 2) - 1;
18741    d_hdr->version = BNX2X_DUMP_VERSION;
18742    d_hdr->preset = DUMP_ALL_PRESETS;
18743
18744    if (CHIP_IS_E1(sc)) {
18745        d_hdr->dump_meta_data = DUMP_CHIP_E1;
18746    } else if (CHIP_IS_E1H(sc)) {
18747        d_hdr->dump_meta_data = DUMP_CHIP_E1H;
18748    } else if (CHIP_IS_E2(sc)) {
18749        d_hdr->dump_meta_data = DUMP_CHIP_E2 |
18750                (BXE_PATH(sc) ? DUMP_PATH_1 : DUMP_PATH_0);
18751    } else if (CHIP_IS_E3A0(sc)) {
18752        d_hdr->dump_meta_data = DUMP_CHIP_E3A0 |
18753                (BXE_PATH(sc) ? DUMP_PATH_1 : DUMP_PATH_0);
18754    } else if (CHIP_IS_E3B0(sc)) {
18755        d_hdr->dump_meta_data = DUMP_CHIP_E3B0 |
18756                (BXE_PATH(sc) ? DUMP_PATH_1 : DUMP_PATH_0);
18757    }
18758
18759    buf += sizeof(struct  dump_header);
18760
18761    for (preset_idx = 1; preset_idx <= DUMP_MAX_PRESETS; preset_idx++) {
18762
18763        /* Skip presets with IOR */
18764        if ((preset_idx == 2) || (preset_idx == 5) || (preset_idx == 8) ||
18765            (preset_idx == 11))
18766            continue;
18767
18768        rval = bxe_get_preset_regs(sc, (uint32_t *)buf, preset_idx);
18769
18770	if (rval)
18771            break;
18772
18773        size = bxe_get_preset_regs_len(sc, preset_idx) * (sizeof (uint32_t));
18774
18775        buf += size;
18776    }
18777
18778    bxe_pretend_func(sc, 0);
18779    ecore_clear_blocks_parity(sc);
18780    ecore_enable_blocks_parity(sc);
18781
18782    bxe_pretend_func(sc, 1);
18783    ecore_clear_blocks_parity(sc);
18784    ecore_enable_blocks_parity(sc);
18785
18786    /* Return to current function */
18787    bxe_pretend_func(sc, SC_ABS_FUNC(sc));
18788
18789
18790
18791    if(sc->state == BXE_STATE_OPEN) {
18792        if(sc->fw_stats_req  != NULL) {
18793    		BLOGI(sc, "fw stats start_paddr %#jx end_paddr %#jx vaddr %p size 0x%x\n",
18794        			(uintmax_t)sc->fw_stats_req_mapping,
18795        			(uintmax_t)sc->fw_stats_data_mapping,
18796        			sc->fw_stats_req, (sc->fw_stats_req_size + sc->fw_stats_data_size));
18797		}
18798		if(sc->def_sb != NULL) {
18799			BLOGI(sc, "def_status_block paddr %p vaddr %p size 0x%zx\n",
18800        			(void *)sc->def_sb_dma.paddr, sc->def_sb,
18801        			sizeof(struct host_sp_status_block));
18802		}
18803		if(sc->eq_dma.vaddr != NULL) {
18804    		BLOGI(sc, "event_queue paddr %#jx vaddr %p size 0x%x\n",
18805        			(uintmax_t)sc->eq_dma.paddr, sc->eq_dma.vaddr, BCM_PAGE_SIZE);
18806		}
18807		if(sc->sp_dma.vaddr != NULL) {
18808    		BLOGI(sc, "slow path paddr %#jx vaddr %p size 0x%zx\n",
18809        			(uintmax_t)sc->sp_dma.paddr, sc->sp_dma.vaddr,
18810        			sizeof(struct bxe_slowpath));
18811		}
18812		if(sc->spq_dma.vaddr != NULL) {
18813    		BLOGI(sc, "slow path queue paddr %#jx vaddr %p size 0x%x\n",
18814        			(uintmax_t)sc->spq_dma.paddr, sc->spq_dma.vaddr, BCM_PAGE_SIZE);
18815		}
18816		if(sc->gz_buf_dma.vaddr != NULL) {
18817    		BLOGI(sc, "fw_buf paddr %#jx vaddr %p size 0x%x\n",
18818        			(uintmax_t)sc->gz_buf_dma.paddr, sc->gz_buf_dma.vaddr,
18819        			FW_BUF_SIZE);
18820		}
18821    	for (i = 0; i < sc->num_queues; i++) {
18822        	fp = &sc->fp[i];
18823			if(fp->sb_dma.vaddr != NULL && fp->tx_dma.vaddr != NULL &&
18824                        fp->rx_dma.vaddr != NULL && fp->rcq_dma.vaddr != NULL &&
18825                        fp->rx_sge_dma.vaddr != NULL) {
18826
18827				BLOGI(sc, "FP status block fp %d paddr %#jx vaddr %p size 0x%zx\n", i,
18828            			(uintmax_t)fp->sb_dma.paddr, fp->sb_dma.vaddr,
18829            			sizeof(union bxe_host_hc_status_block));
18830				BLOGI(sc, "TX BD CHAIN fp %d paddr %#jx vaddr %p size 0x%x\n", i,
18831            			(uintmax_t)fp->tx_dma.paddr, fp->tx_dma.vaddr,
18832            			(BCM_PAGE_SIZE * TX_BD_NUM_PAGES));
18833        		BLOGI(sc, "RX BD CHAIN fp %d paddr %#jx vaddr %p size 0x%x\n", i,
18834            			(uintmax_t)fp->rx_dma.paddr, fp->rx_dma.vaddr,
18835            			(BCM_PAGE_SIZE * RX_BD_NUM_PAGES));
18836        		BLOGI(sc, "RX RCQ CHAIN fp %d paddr %#jx vaddr %p size 0x%zx\n", i,
18837            			(uintmax_t)fp->rcq_dma.paddr, fp->rcq_dma.vaddr,
18838            			(BCM_PAGE_SIZE * RCQ_NUM_PAGES));
18839        		BLOGI(sc, "RX SGE CHAIN fp %d paddr %#jx vaddr %p size 0x%x\n", i,
18840            			(uintmax_t)fp->rx_sge_dma.paddr, fp->rx_sge_dma.vaddr,
18841            			(BCM_PAGE_SIZE * RX_SGE_NUM_PAGES));
18842    		}
18843		}
18844		if(ilt != NULL ) {
18845    		ilt_cli = &ilt->clients[1];
18846			if(ilt->lines != NULL) {
18847    		for (i = ilt_cli->start; i <= ilt_cli->end; i++) {
18848        		BLOGI(sc, "ECORE_ILT paddr %#jx vaddr %p size 0x%x\n",
18849            			(uintmax_t)(((struct bxe_dma *)((&ilt->lines[i])->page))->paddr),
18850            			((struct bxe_dma *)((&ilt->lines[i])->page))->vaddr, BCM_PAGE_SIZE);
18851    		}
18852			}
18853		}
18854
18855
18856    	cmd_offset = DMAE_REG_CMD_MEM;
18857    	for (i = 0; i < 224; i++) {
18858        	reg_addr = (cmd_offset +(i * 4));
18859        	reg_val = REG_RD(sc, reg_addr);
18860        	BLOGI(sc, "DMAE_REG_CMD_MEM i=%d reg_addr 0x%x reg_val 0x%08x\n",i,
18861            			reg_addr, reg_val);
18862    	}
18863	}
18864
18865    BLOGI(sc, "Collection of grcdump done\n");
18866    sc->grcdump_done = 1;
18867    return(rval);
18868}
18869
18870static int
18871bxe_add_cdev(struct bxe_softc *sc)
18872{
18873    sc->eeprom = malloc(BXE_EEPROM_MAX_DATA_LEN, M_DEVBUF, M_NOWAIT);
18874
18875    if (sc->eeprom == NULL) {
18876        BLOGW(sc, "Unable to alloc for eeprom size buffer\n");
18877        return (-1);
18878    }
18879
18880    sc->ioctl_dev = make_dev(&bxe_cdevsw,
18881                            sc->ifp->if_dunit,
18882                            UID_ROOT,
18883                            GID_WHEEL,
18884                            0600,
18885                            "%s",
18886                            if_name(sc->ifp));
18887
18888    if (sc->ioctl_dev == NULL) {
18889        free(sc->eeprom, M_DEVBUF);
18890        sc->eeprom = NULL;
18891        return (-1);
18892    }
18893
18894    sc->ioctl_dev->si_drv1 = sc;
18895
18896    return (0);
18897}
18898
18899static void
18900bxe_del_cdev(struct bxe_softc *sc)
18901{
18902    if (sc->ioctl_dev != NULL)
18903        destroy_dev(sc->ioctl_dev);
18904
18905    if (sc->eeprom != NULL) {
18906        free(sc->eeprom, M_DEVBUF);
18907        sc->eeprom = NULL;
18908    }
18909    sc->ioctl_dev = NULL;
18910
18911    return;
18912}
18913
18914static bool bxe_is_nvram_accessible(struct bxe_softc *sc)
18915{
18916
18917    if ((if_getdrvflags(sc->ifp) & IFF_DRV_RUNNING) == 0)
18918        return FALSE;
18919
18920    return TRUE;
18921}
18922
18923
18924static int
18925bxe_wr_eeprom(struct bxe_softc *sc, void *data, uint32_t offset, uint32_t len)
18926{
18927    int rval = 0;
18928
18929    if(!bxe_is_nvram_accessible(sc)) {
18930        BLOGW(sc, "Cannot access eeprom when interface is down\n");
18931        return (-EAGAIN);
18932    }
18933    rval = bxe_nvram_write(sc, offset, (uint8_t *)data, len);
18934
18935
18936   return (rval);
18937}
18938
18939static int
18940bxe_rd_eeprom(struct bxe_softc *sc, void *data, uint32_t offset, uint32_t len)
18941{
18942    int rval = 0;
18943
18944    if(!bxe_is_nvram_accessible(sc)) {
18945        BLOGW(sc, "Cannot access eeprom when interface is down\n");
18946        return (-EAGAIN);
18947    }
18948    rval = bxe_nvram_read(sc, offset, (uint8_t *)data, len);
18949
18950   return (rval);
18951}
18952
18953static int
18954bxe_eeprom_rd_wr(struct bxe_softc *sc, bxe_eeprom_t *eeprom)
18955{
18956    int rval = 0;
18957
18958    switch (eeprom->eeprom_cmd) {
18959
18960    case BXE_EEPROM_CMD_SET_EEPROM:
18961
18962        rval = copyin(eeprom->eeprom_data, sc->eeprom,
18963                       eeprom->eeprom_data_len);
18964
18965        if (rval)
18966            break;
18967
18968        rval = bxe_wr_eeprom(sc, sc->eeprom, eeprom->eeprom_offset,
18969                       eeprom->eeprom_data_len);
18970        break;
18971
18972    case BXE_EEPROM_CMD_GET_EEPROM:
18973
18974        rval = bxe_rd_eeprom(sc, sc->eeprom, eeprom->eeprom_offset,
18975                       eeprom->eeprom_data_len);
18976
18977        if (rval) {
18978            break;
18979        }
18980
18981        rval = copyout(sc->eeprom, eeprom->eeprom_data,
18982                       eeprom->eeprom_data_len);
18983        break;
18984
18985    default:
18986            rval = EINVAL;
18987            break;
18988    }
18989
18990    if (rval) {
18991        BLOGW(sc, "ioctl cmd %d  failed rval %d\n", eeprom->eeprom_cmd, rval);
18992    }
18993
18994    return (rval);
18995}
18996
18997static int
18998bxe_get_settings(struct bxe_softc *sc, bxe_dev_setting_t *dev_p)
18999{
19000    uint32_t ext_phy_config;
19001    int port = SC_PORT(sc);
19002    int cfg_idx = bxe_get_link_cfg_idx(sc);
19003
19004    dev_p->supported = sc->port.supported[cfg_idx] |
19005            (sc->port.supported[cfg_idx ^ 1] &
19006            (ELINK_SUPPORTED_TP | ELINK_SUPPORTED_FIBRE));
19007    dev_p->advertising = sc->port.advertising[cfg_idx];
19008    if(sc->link_params.phy[bxe_get_cur_phy_idx(sc)].media_type ==
19009        ELINK_ETH_PHY_SFP_1G_FIBER) {
19010        dev_p->supported = ~(ELINK_SUPPORTED_10000baseT_Full);
19011        dev_p->advertising &= ~(ADVERTISED_10000baseT_Full);
19012    }
19013    if ((sc->state == BXE_STATE_OPEN) && sc->link_vars.link_up &&
19014        !(sc->flags & BXE_MF_FUNC_DIS)) {
19015        dev_p->duplex = sc->link_vars.duplex;
19016        if (IS_MF(sc) && !BXE_NOMCP(sc))
19017            dev_p->speed = bxe_get_mf_speed(sc);
19018        else
19019            dev_p->speed = sc->link_vars.line_speed;
19020    } else {
19021        dev_p->duplex = DUPLEX_UNKNOWN;
19022        dev_p->speed = SPEED_UNKNOWN;
19023    }
19024
19025    dev_p->port = bxe_media_detect(sc);
19026
19027    ext_phy_config = SHMEM_RD(sc,
19028                         dev_info.port_hw_config[port].external_phy_config);
19029    if((ext_phy_config & PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK) ==
19030        PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT)
19031        dev_p->phy_address =  sc->port.phy_addr;
19032    else if(((ext_phy_config & PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK) !=
19033            PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE) &&
19034        ((ext_phy_config & PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK) !=
19035            PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN))
19036        dev_p->phy_address = ELINK_XGXS_EXT_PHY_ADDR(ext_phy_config);
19037    else
19038        dev_p->phy_address = 0;
19039
19040    if(sc->link_params.req_line_speed[cfg_idx] == ELINK_SPEED_AUTO_NEG)
19041        dev_p->autoneg = AUTONEG_ENABLE;
19042    else
19043       dev_p->autoneg = AUTONEG_DISABLE;
19044
19045
19046    return 0;
19047}
19048
19049static int
19050bxe_eioctl(struct cdev *dev, u_long cmd, caddr_t data, int fflag,
19051        struct thread *td)
19052{
19053    struct bxe_softc    *sc;
19054    int                 rval = 0;
19055    device_t            pci_dev;
19056    bxe_grcdump_t       *dump = NULL;
19057    int grc_dump_size;
19058    bxe_drvinfo_t   *drv_infop = NULL;
19059    bxe_dev_setting_t  *dev_p;
19060    bxe_dev_setting_t  dev_set;
19061    bxe_get_regs_t  *reg_p;
19062    bxe_reg_rdw_t *reg_rdw_p;
19063    bxe_pcicfg_rdw_t *cfg_rdw_p;
19064    bxe_perm_mac_addr_t *mac_addr_p;
19065
19066
19067    if ((sc = (struct bxe_softc *)dev->si_drv1) == NULL)
19068        return ENXIO;
19069
19070    pci_dev= sc->dev;
19071
19072    dump = (bxe_grcdump_t *)data;
19073
19074    switch(cmd) {
19075
19076        case BXE_GRC_DUMP_SIZE:
19077            dump->pci_func = sc->pcie_func;
19078            dump->grcdump_size =
19079                (bxe_get_total_regs_len32(sc) * sizeof(uint32_t)) +
19080                     sizeof(struct  dump_header);
19081            break;
19082
19083        case BXE_GRC_DUMP:
19084
19085            grc_dump_size = (bxe_get_total_regs_len32(sc) * sizeof(uint32_t)) +
19086                                sizeof(struct  dump_header);
19087            if ((!sc->trigger_grcdump) || (dump->grcdump == NULL) ||
19088                (dump->grcdump_size < grc_dump_size)) {
19089                rval = EINVAL;
19090                break;
19091            }
19092
19093            if((sc->trigger_grcdump) && (!sc->grcdump_done) &&
19094                (!sc->grcdump_started)) {
19095                rval =  bxe_grc_dump(sc);
19096            }
19097
19098            if((!rval) && (sc->grcdump_done) && (sc->grcdump_started) &&
19099                (sc->grc_dump != NULL))  {
19100                dump->grcdump_dwords = grc_dump_size >> 2;
19101                rval = copyout(sc->grc_dump, dump->grcdump, grc_dump_size);
19102                free(sc->grc_dump, M_DEVBUF);
19103                sc->grc_dump = NULL;
19104                sc->grcdump_started = 0;
19105                sc->grcdump_done = 0;
19106            }
19107
19108            break;
19109
19110        case BXE_DRV_INFO:
19111            drv_infop = (bxe_drvinfo_t *)data;
19112            snprintf(drv_infop->drv_name, BXE_DRV_NAME_LENGTH, "%s", "bxe");
19113            snprintf(drv_infop->drv_version, BXE_DRV_VERSION_LENGTH, "v:%s",
19114                BXE_DRIVER_VERSION);
19115            snprintf(drv_infop->mfw_version, BXE_MFW_VERSION_LENGTH, "%s",
19116                sc->devinfo.bc_ver_str);
19117            snprintf(drv_infop->stormfw_version, BXE_STORMFW_VERSION_LENGTH,
19118                "%s", sc->fw_ver_str);
19119            drv_infop->eeprom_dump_len = sc->devinfo.flash_size;
19120            drv_infop->reg_dump_len =
19121                (bxe_get_total_regs_len32(sc) * sizeof(uint32_t))
19122                    + sizeof(struct  dump_header);
19123            snprintf(drv_infop->bus_info, BXE_BUS_INFO_LENGTH, "%d:%d:%d",
19124                sc->pcie_bus, sc->pcie_device, sc->pcie_func);
19125            break;
19126
19127        case BXE_DEV_SETTING:
19128            dev_p = (bxe_dev_setting_t *)data;
19129            bxe_get_settings(sc, &dev_set);
19130            dev_p->supported = dev_set.supported;
19131            dev_p->advertising = dev_set.advertising;
19132            dev_p->speed = dev_set.speed;
19133            dev_p->duplex = dev_set.duplex;
19134            dev_p->port = dev_set.port;
19135            dev_p->phy_address = dev_set.phy_address;
19136            dev_p->autoneg = dev_set.autoneg;
19137
19138            break;
19139
19140        case BXE_GET_REGS:
19141
19142            reg_p = (bxe_get_regs_t *)data;
19143            grc_dump_size = reg_p->reg_buf_len;
19144
19145            if((!sc->grcdump_done) && (!sc->grcdump_started)) {
19146                bxe_grc_dump(sc);
19147            }
19148            if((sc->grcdump_done) && (sc->grcdump_started) &&
19149                (sc->grc_dump != NULL))  {
19150                rval = copyout(sc->grc_dump, reg_p->reg_buf, grc_dump_size);
19151                free(sc->grc_dump, M_DEVBUF);
19152                sc->grc_dump = NULL;
19153                sc->grcdump_started = 0;
19154                sc->grcdump_done = 0;
19155            }
19156
19157            break;
19158
19159        case BXE_RDW_REG:
19160            reg_rdw_p = (bxe_reg_rdw_t *)data;
19161            if((reg_rdw_p->reg_cmd == BXE_READ_REG_CMD) &&
19162                (reg_rdw_p->reg_access_type == BXE_REG_ACCESS_DIRECT))
19163                reg_rdw_p->reg_val = REG_RD(sc, reg_rdw_p->reg_id);
19164
19165            if((reg_rdw_p->reg_cmd == BXE_WRITE_REG_CMD) &&
19166                (reg_rdw_p->reg_access_type == BXE_REG_ACCESS_DIRECT))
19167                REG_WR(sc, reg_rdw_p->reg_id, reg_rdw_p->reg_val);
19168
19169            break;
19170
19171        case BXE_RDW_PCICFG:
19172            cfg_rdw_p = (bxe_pcicfg_rdw_t *)data;
19173            if(cfg_rdw_p->cfg_cmd == BXE_READ_PCICFG) {
19174
19175                cfg_rdw_p->cfg_val = pci_read_config(sc->dev, cfg_rdw_p->cfg_id,
19176                                         cfg_rdw_p->cfg_width);
19177
19178            } else if(cfg_rdw_p->cfg_cmd == BXE_WRITE_PCICFG) {
19179                pci_write_config(sc->dev, cfg_rdw_p->cfg_id, cfg_rdw_p->cfg_val,
19180                            cfg_rdw_p->cfg_width);
19181            } else {
19182                BLOGW(sc, "BXE_RDW_PCICFG ioctl wrong cmd passed\n");
19183            }
19184            break;
19185
19186        case BXE_MAC_ADDR:
19187            mac_addr_p = (bxe_perm_mac_addr_t *)data;
19188            snprintf(mac_addr_p->mac_addr_str, sizeof(sc->mac_addr_str), "%s",
19189                sc->mac_addr_str);
19190            break;
19191
19192        case BXE_EEPROM:
19193            rval = bxe_eeprom_rd_wr(sc, (bxe_eeprom_t *)data);
19194            break;
19195
19196
19197        default:
19198            break;
19199    }
19200
19201    return (rval);
19202}
19203