bxe.c revision 321517
1/*-
2 * Copyright (c) 2007-2014 QLogic Corporation. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 *
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS'
15 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
18 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
19 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
20 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
21 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
22 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
23 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
24 * THE POSSIBILITY OF SUCH DAMAGE.
25 */
26
27#include <sys/cdefs.h>
28__FBSDID("$FreeBSD: stable/11/sys/dev/bxe/bxe.c 321517 2017-07-26 11:04:30Z ae $");
29
30#define BXE_DRIVER_VERSION "1.78.90"
31
32#include "bxe.h"
33#include "ecore_sp.h"
34#include "ecore_init.h"
35#include "ecore_init_ops.h"
36
37#include "57710_int_offsets.h"
38#include "57711_int_offsets.h"
39#include "57712_int_offsets.h"
40
41/*
42 * CTLTYPE_U64 and sysctl_handle_64 were added in r217616. Define these
43 * explicitly here for older kernels that don't include this changeset.
44 */
45#ifndef CTLTYPE_U64
46#define CTLTYPE_U64      CTLTYPE_QUAD
47#define sysctl_handle_64 sysctl_handle_quad
48#endif
49
50/*
51 * CSUM_TCP_IPV6 and CSUM_UDP_IPV6 were added in r236170. Define these
52 * here as zero(0) for older kernels that don't include this changeset
53 * thereby masking the functionality.
54 */
55#ifndef CSUM_TCP_IPV6
56#define CSUM_TCP_IPV6 0
57#define CSUM_UDP_IPV6 0
58#endif
59
60/*
61 * pci_find_cap was added in r219865. Re-define this at pci_find_extcap
62 * for older kernels that don't include this changeset.
63 */
64#if __FreeBSD_version < 900035
65#define pci_find_cap pci_find_extcap
66#endif
67
68#define BXE_DEF_SB_ATT_IDX 0x0001
69#define BXE_DEF_SB_IDX     0x0002
70
71/*
72 * FLR Support - bxe_pf_flr_clnup() is called during nic_load in the per
73 * function HW initialization.
74 */
75#define FLR_WAIT_USEC     10000 /* 10 msecs */
76#define FLR_WAIT_INTERVAL 50    /* usecs */
77#define FLR_POLL_CNT      (FLR_WAIT_USEC / FLR_WAIT_INTERVAL) /* 200 */
78
79struct pbf_pN_buf_regs {
80    int pN;
81    uint32_t init_crd;
82    uint32_t crd;
83    uint32_t crd_freed;
84};
85
86struct pbf_pN_cmd_regs {
87    int pN;
88    uint32_t lines_occup;
89    uint32_t lines_freed;
90};
91
92/*
93 * PCI Device ID Table used by bxe_probe().
94 */
95#define BXE_DEVDESC_MAX 64
96static struct bxe_device_type bxe_devs[] = {
97    {
98        BRCM_VENDORID,
99        CHIP_NUM_57710,
100        PCI_ANY_ID, PCI_ANY_ID,
101        "QLogic NetXtreme II BCM57710 10GbE"
102    },
103    {
104        BRCM_VENDORID,
105        CHIP_NUM_57711,
106        PCI_ANY_ID, PCI_ANY_ID,
107        "QLogic NetXtreme II BCM57711 10GbE"
108    },
109    {
110        BRCM_VENDORID,
111        CHIP_NUM_57711E,
112        PCI_ANY_ID, PCI_ANY_ID,
113        "QLogic NetXtreme II BCM57711E 10GbE"
114    },
115    {
116        BRCM_VENDORID,
117        CHIP_NUM_57712,
118        PCI_ANY_ID, PCI_ANY_ID,
119        "QLogic NetXtreme II BCM57712 10GbE"
120    },
121    {
122        BRCM_VENDORID,
123        CHIP_NUM_57712_MF,
124        PCI_ANY_ID, PCI_ANY_ID,
125        "QLogic NetXtreme II BCM57712 MF 10GbE"
126    },
127    {
128        BRCM_VENDORID,
129        CHIP_NUM_57800,
130        PCI_ANY_ID, PCI_ANY_ID,
131        "QLogic NetXtreme II BCM57800 10GbE"
132    },
133    {
134        BRCM_VENDORID,
135        CHIP_NUM_57800_MF,
136        PCI_ANY_ID, PCI_ANY_ID,
137        "QLogic NetXtreme II BCM57800 MF 10GbE"
138    },
139    {
140        BRCM_VENDORID,
141        CHIP_NUM_57810,
142        PCI_ANY_ID, PCI_ANY_ID,
143        "QLogic NetXtreme II BCM57810 10GbE"
144    },
145    {
146        BRCM_VENDORID,
147        CHIP_NUM_57810_MF,
148        PCI_ANY_ID, PCI_ANY_ID,
149        "QLogic NetXtreme II BCM57810 MF 10GbE"
150    },
151    {
152        BRCM_VENDORID,
153        CHIP_NUM_57811,
154        PCI_ANY_ID, PCI_ANY_ID,
155        "QLogic NetXtreme II BCM57811 10GbE"
156    },
157    {
158        BRCM_VENDORID,
159        CHIP_NUM_57811_MF,
160        PCI_ANY_ID, PCI_ANY_ID,
161        "QLogic NetXtreme II BCM57811 MF 10GbE"
162    },
163    {
164        BRCM_VENDORID,
165        CHIP_NUM_57840_4_10,
166        PCI_ANY_ID, PCI_ANY_ID,
167        "QLogic NetXtreme II BCM57840 4x10GbE"
168    },
169    {
170        QLOGIC_VENDORID,
171        CHIP_NUM_57840_4_10,
172        PCI_ANY_ID, PCI_ANY_ID,
173        "QLogic NetXtreme II BCM57840 4x10GbE"
174    },
175    {
176        BRCM_VENDORID,
177        CHIP_NUM_57840_2_20,
178        PCI_ANY_ID, PCI_ANY_ID,
179        "QLogic NetXtreme II BCM57840 2x20GbE"
180    },
181    {
182        BRCM_VENDORID,
183        CHIP_NUM_57840_MF,
184        PCI_ANY_ID, PCI_ANY_ID,
185        "QLogic NetXtreme II BCM57840 MF 10GbE"
186    },
187    {
188        0, 0, 0, 0, NULL
189    }
190};
191
192MALLOC_DECLARE(M_BXE_ILT);
193MALLOC_DEFINE(M_BXE_ILT, "bxe_ilt", "bxe ILT pointer");
194
195/*
196 * FreeBSD device entry points.
197 */
198static int bxe_probe(device_t);
199static int bxe_attach(device_t);
200static int bxe_detach(device_t);
201static int bxe_shutdown(device_t);
202
203/*
204 * FreeBSD KLD module/device interface event handler method.
205 */
206static device_method_t bxe_methods[] = {
207    /* Device interface (device_if.h) */
208    DEVMETHOD(device_probe,     bxe_probe),
209    DEVMETHOD(device_attach,    bxe_attach),
210    DEVMETHOD(device_detach,    bxe_detach),
211    DEVMETHOD(device_shutdown,  bxe_shutdown),
212    /* Bus interface (bus_if.h) */
213    DEVMETHOD(bus_print_child,  bus_generic_print_child),
214    DEVMETHOD(bus_driver_added, bus_generic_driver_added),
215    KOBJMETHOD_END
216};
217
218/*
219 * FreeBSD KLD Module data declaration
220 */
221static driver_t bxe_driver = {
222    "bxe",                   /* module name */
223    bxe_methods,             /* event handler */
224    sizeof(struct bxe_softc) /* extra data */
225};
226
227/*
228 * FreeBSD dev class is needed to manage dev instances and
229 * to associate with a bus type
230 */
231static devclass_t bxe_devclass;
232
233MODULE_DEPEND(bxe, pci, 1, 1, 1);
234MODULE_DEPEND(bxe, ether, 1, 1, 1);
235DRIVER_MODULE(bxe, pci, bxe_driver, bxe_devclass, 0, 0);
236
237/* resources needed for unloading a previously loaded device */
238
239#define BXE_PREV_WAIT_NEEDED 1
240struct mtx bxe_prev_mtx;
241MTX_SYSINIT(bxe_prev_mtx, &bxe_prev_mtx, "bxe_prev_lock", MTX_DEF);
242struct bxe_prev_list_node {
243    LIST_ENTRY(bxe_prev_list_node) node;
244    uint8_t bus;
245    uint8_t slot;
246    uint8_t path;
247    uint8_t aer; /* XXX automatic error recovery */
248    uint8_t undi;
249};
250static LIST_HEAD(, bxe_prev_list_node) bxe_prev_list = LIST_HEAD_INITIALIZER(bxe_prev_list);
251
252static int load_count[2][3] = { {0} }; /* per-path: 0-common, 1-port0, 2-port1 */
253
254/* Tunable device values... */
255
256SYSCTL_NODE(_hw, OID_AUTO, bxe, CTLFLAG_RD, 0, "bxe driver parameters");
257
258/* Debug */
259unsigned long bxe_debug = 0;
260SYSCTL_ULONG(_hw_bxe, OID_AUTO, debug, CTLFLAG_RDTUN,
261             &bxe_debug, 0, "Debug logging mode");
262
263/* Interrupt Mode: 0 (IRQ), 1 (MSI/IRQ), and 2 (MSI-X/MSI/IRQ) */
264static int bxe_interrupt_mode = INTR_MODE_MSIX;
265SYSCTL_INT(_hw_bxe, OID_AUTO, interrupt_mode, CTLFLAG_RDTUN,
266           &bxe_interrupt_mode, 0, "Interrupt (MSI-X/MSI/INTx) mode");
267
268/* Number of Queues: 0 (Auto) or 1 to 16 (fixed queue number) */
269static int bxe_queue_count = 4;
270SYSCTL_INT(_hw_bxe, OID_AUTO, queue_count, CTLFLAG_RDTUN,
271           &bxe_queue_count, 0, "Multi-Queue queue count");
272
273/* max number of buffers per queue (default RX_BD_USABLE) */
274static int bxe_max_rx_bufs = 0;
275SYSCTL_INT(_hw_bxe, OID_AUTO, max_rx_bufs, CTLFLAG_RDTUN,
276           &bxe_max_rx_bufs, 0, "Maximum Number of Rx Buffers Per Queue");
277
278/* Host interrupt coalescing RX tick timer (usecs) */
279static int bxe_hc_rx_ticks = 25;
280SYSCTL_INT(_hw_bxe, OID_AUTO, hc_rx_ticks, CTLFLAG_RDTUN,
281           &bxe_hc_rx_ticks, 0, "Host Coalescing Rx ticks");
282
283/* Host interrupt coalescing TX tick timer (usecs) */
284static int bxe_hc_tx_ticks = 50;
285SYSCTL_INT(_hw_bxe, OID_AUTO, hc_tx_ticks, CTLFLAG_RDTUN,
286           &bxe_hc_tx_ticks, 0, "Host Coalescing Tx ticks");
287
288/* Maximum number of Rx packets to process at a time */
289static int bxe_rx_budget = 0xffffffff;
290SYSCTL_INT(_hw_bxe, OID_AUTO, rx_budget, CTLFLAG_TUN,
291           &bxe_rx_budget, 0, "Rx processing budget");
292
293/* Maximum LRO aggregation size */
294static int bxe_max_aggregation_size = 0;
295SYSCTL_INT(_hw_bxe, OID_AUTO, max_aggregation_size, CTLFLAG_TUN,
296           &bxe_max_aggregation_size, 0, "max aggregation size");
297
298/* PCI MRRS: -1 (Auto), 0 (128B), 1 (256B), 2 (512B), 3 (1KB) */
299static int bxe_mrrs = -1;
300SYSCTL_INT(_hw_bxe, OID_AUTO, mrrs, CTLFLAG_RDTUN,
301           &bxe_mrrs, 0, "PCIe maximum read request size");
302
303/* AutoGrEEEn: 0 (hardware default), 1 (force on), 2 (force off) */
304static int bxe_autogreeen = 0;
305SYSCTL_INT(_hw_bxe, OID_AUTO, autogreeen, CTLFLAG_RDTUN,
306           &bxe_autogreeen, 0, "AutoGrEEEn support");
307
308/* 4-tuple RSS support for UDP: 0 (disabled), 1 (enabled) */
309static int bxe_udp_rss = 0;
310SYSCTL_INT(_hw_bxe, OID_AUTO, udp_rss, CTLFLAG_RDTUN,
311           &bxe_udp_rss, 0, "UDP RSS support");
312
313
314#define STAT_NAME_LEN 32 /* no stat names below can be longer than this */
315
316#define STATS_OFFSET32(stat_name)                   \
317    (offsetof(struct bxe_eth_stats, stat_name) / 4)
318
319#define Q_STATS_OFFSET32(stat_name)                   \
320    (offsetof(struct bxe_eth_q_stats, stat_name) / 4)
321
322static const struct {
323    uint32_t offset;
324    uint32_t size;
325    uint32_t flags;
326#define STATS_FLAGS_PORT  1
327#define STATS_FLAGS_FUNC  2 /* MF only cares about function stats */
328#define STATS_FLAGS_BOTH  (STATS_FLAGS_FUNC | STATS_FLAGS_PORT)
329    char string[STAT_NAME_LEN];
330} bxe_eth_stats_arr[] = {
331    { STATS_OFFSET32(total_bytes_received_hi),
332                8, STATS_FLAGS_BOTH, "rx_bytes" },
333    { STATS_OFFSET32(error_bytes_received_hi),
334                8, STATS_FLAGS_BOTH, "rx_error_bytes" },
335    { STATS_OFFSET32(total_unicast_packets_received_hi),
336                8, STATS_FLAGS_BOTH, "rx_ucast_packets" },
337    { STATS_OFFSET32(total_multicast_packets_received_hi),
338                8, STATS_FLAGS_BOTH, "rx_mcast_packets" },
339    { STATS_OFFSET32(total_broadcast_packets_received_hi),
340                8, STATS_FLAGS_BOTH, "rx_bcast_packets" },
341    { STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi),
342                8, STATS_FLAGS_PORT, "rx_crc_errors" },
343    { STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi),
344                8, STATS_FLAGS_PORT, "rx_align_errors" },
345    { STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi),
346                8, STATS_FLAGS_PORT, "rx_undersize_packets" },
347    { STATS_OFFSET32(etherstatsoverrsizepkts_hi),
348                8, STATS_FLAGS_PORT, "rx_oversize_packets" },
349    { STATS_OFFSET32(rx_stat_etherstatsfragments_hi),
350                8, STATS_FLAGS_PORT, "rx_fragments" },
351    { STATS_OFFSET32(rx_stat_etherstatsjabbers_hi),
352                8, STATS_FLAGS_PORT, "rx_jabbers" },
353    { STATS_OFFSET32(no_buff_discard_hi),
354                8, STATS_FLAGS_BOTH, "rx_discards" },
355    { STATS_OFFSET32(mac_filter_discard),
356                4, STATS_FLAGS_PORT, "rx_filtered_packets" },
357    { STATS_OFFSET32(mf_tag_discard),
358                4, STATS_FLAGS_PORT, "rx_mf_tag_discard" },
359    { STATS_OFFSET32(pfc_frames_received_hi),
360                8, STATS_FLAGS_PORT, "pfc_frames_received" },
361    { STATS_OFFSET32(pfc_frames_sent_hi),
362                8, STATS_FLAGS_PORT, "pfc_frames_sent" },
363    { STATS_OFFSET32(brb_drop_hi),
364                8, STATS_FLAGS_PORT, "rx_brb_discard" },
365    { STATS_OFFSET32(brb_truncate_hi),
366                8, STATS_FLAGS_PORT, "rx_brb_truncate" },
367    { STATS_OFFSET32(pause_frames_received_hi),
368                8, STATS_FLAGS_PORT, "rx_pause_frames" },
369    { STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi),
370                8, STATS_FLAGS_PORT, "rx_mac_ctrl_frames" },
371    { STATS_OFFSET32(nig_timer_max),
372                4, STATS_FLAGS_PORT, "rx_constant_pause_events" },
373    { STATS_OFFSET32(total_bytes_transmitted_hi),
374                8, STATS_FLAGS_BOTH, "tx_bytes" },
375    { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi),
376                8, STATS_FLAGS_PORT, "tx_error_bytes" },
377    { STATS_OFFSET32(total_unicast_packets_transmitted_hi),
378                8, STATS_FLAGS_BOTH, "tx_ucast_packets" },
379    { STATS_OFFSET32(total_multicast_packets_transmitted_hi),
380                8, STATS_FLAGS_BOTH, "tx_mcast_packets" },
381    { STATS_OFFSET32(total_broadcast_packets_transmitted_hi),
382                8, STATS_FLAGS_BOTH, "tx_bcast_packets" },
383    { STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi),
384                8, STATS_FLAGS_PORT, "tx_mac_errors" },
385    { STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi),
386                8, STATS_FLAGS_PORT, "tx_carrier_errors" },
387    { STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi),
388                8, STATS_FLAGS_PORT, "tx_single_collisions" },
389    { STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi),
390                8, STATS_FLAGS_PORT, "tx_multi_collisions" },
391    { STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi),
392                8, STATS_FLAGS_PORT, "tx_deferred" },
393    { STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi),
394                8, STATS_FLAGS_PORT, "tx_excess_collisions" },
395    { STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi),
396                8, STATS_FLAGS_PORT, "tx_late_collisions" },
397    { STATS_OFFSET32(tx_stat_etherstatscollisions_hi),
398                8, STATS_FLAGS_PORT, "tx_total_collisions" },
399    { STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi),
400                8, STATS_FLAGS_PORT, "tx_64_byte_packets" },
401    { STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi),
402                8, STATS_FLAGS_PORT, "tx_65_to_127_byte_packets" },
403    { STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi),
404                8, STATS_FLAGS_PORT, "tx_128_to_255_byte_packets" },
405    { STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi),
406                8, STATS_FLAGS_PORT, "tx_256_to_511_byte_packets" },
407    { STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi),
408                8, STATS_FLAGS_PORT, "tx_512_to_1023_byte_packets" },
409    { STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi),
410                8, STATS_FLAGS_PORT, "tx_1024_to_1522_byte_packets" },
411    { STATS_OFFSET32(etherstatspktsover1522octets_hi),
412                8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" },
413    { STATS_OFFSET32(pause_frames_sent_hi),
414                8, STATS_FLAGS_PORT, "tx_pause_frames" },
415    { STATS_OFFSET32(total_tpa_aggregations_hi),
416                8, STATS_FLAGS_FUNC, "tpa_aggregations" },
417    { STATS_OFFSET32(total_tpa_aggregated_frames_hi),
418                8, STATS_FLAGS_FUNC, "tpa_aggregated_frames"},
419    { STATS_OFFSET32(total_tpa_bytes_hi),
420                8, STATS_FLAGS_FUNC, "tpa_bytes"},
421    { STATS_OFFSET32(eee_tx_lpi),
422                4, STATS_FLAGS_PORT, "eee_tx_lpi"},
423    { STATS_OFFSET32(rx_calls),
424                4, STATS_FLAGS_FUNC, "rx_calls"},
425    { STATS_OFFSET32(rx_pkts),
426                4, STATS_FLAGS_FUNC, "rx_pkts"},
427    { STATS_OFFSET32(rx_tpa_pkts),
428                4, STATS_FLAGS_FUNC, "rx_tpa_pkts"},
429    { STATS_OFFSET32(rx_erroneous_jumbo_sge_pkts),
430                4, STATS_FLAGS_FUNC, "rx_erroneous_jumbo_sge_pkts"},
431    { STATS_OFFSET32(rx_bxe_service_rxsgl),
432                4, STATS_FLAGS_FUNC, "rx_bxe_service_rxsgl"},
433    { STATS_OFFSET32(rx_jumbo_sge_pkts),
434                4, STATS_FLAGS_FUNC, "rx_jumbo_sge_pkts"},
435    { STATS_OFFSET32(rx_soft_errors),
436                4, STATS_FLAGS_FUNC, "rx_soft_errors"},
437    { STATS_OFFSET32(rx_hw_csum_errors),
438                4, STATS_FLAGS_FUNC, "rx_hw_csum_errors"},
439    { STATS_OFFSET32(rx_ofld_frames_csum_ip),
440                4, STATS_FLAGS_FUNC, "rx_ofld_frames_csum_ip"},
441    { STATS_OFFSET32(rx_ofld_frames_csum_tcp_udp),
442                4, STATS_FLAGS_FUNC, "rx_ofld_frames_csum_tcp_udp"},
443    { STATS_OFFSET32(rx_budget_reached),
444                4, STATS_FLAGS_FUNC, "rx_budget_reached"},
445    { STATS_OFFSET32(tx_pkts),
446                4, STATS_FLAGS_FUNC, "tx_pkts"},
447    { STATS_OFFSET32(tx_soft_errors),
448                4, STATS_FLAGS_FUNC, "tx_soft_errors"},
449    { STATS_OFFSET32(tx_ofld_frames_csum_ip),
450                4, STATS_FLAGS_FUNC, "tx_ofld_frames_csum_ip"},
451    { STATS_OFFSET32(tx_ofld_frames_csum_tcp),
452                4, STATS_FLAGS_FUNC, "tx_ofld_frames_csum_tcp"},
453    { STATS_OFFSET32(tx_ofld_frames_csum_udp),
454                4, STATS_FLAGS_FUNC, "tx_ofld_frames_csum_udp"},
455    { STATS_OFFSET32(tx_ofld_frames_lso),
456                4, STATS_FLAGS_FUNC, "tx_ofld_frames_lso"},
457    { STATS_OFFSET32(tx_ofld_frames_lso_hdr_splits),
458                4, STATS_FLAGS_FUNC, "tx_ofld_frames_lso_hdr_splits"},
459    { STATS_OFFSET32(tx_encap_failures),
460                4, STATS_FLAGS_FUNC, "tx_encap_failures"},
461    { STATS_OFFSET32(tx_hw_queue_full),
462                4, STATS_FLAGS_FUNC, "tx_hw_queue_full"},
463    { STATS_OFFSET32(tx_hw_max_queue_depth),
464                4, STATS_FLAGS_FUNC, "tx_hw_max_queue_depth"},
465    { STATS_OFFSET32(tx_dma_mapping_failure),
466                4, STATS_FLAGS_FUNC, "tx_dma_mapping_failure"},
467    { STATS_OFFSET32(tx_max_drbr_queue_depth),
468                4, STATS_FLAGS_FUNC, "tx_max_drbr_queue_depth"},
469    { STATS_OFFSET32(tx_window_violation_std),
470                4, STATS_FLAGS_FUNC, "tx_window_violation_std"},
471    { STATS_OFFSET32(tx_window_violation_tso),
472                4, STATS_FLAGS_FUNC, "tx_window_violation_tso"},
473    { STATS_OFFSET32(tx_chain_lost_mbuf),
474                4, STATS_FLAGS_FUNC, "tx_chain_lost_mbuf"},
475    { STATS_OFFSET32(tx_frames_deferred),
476                4, STATS_FLAGS_FUNC, "tx_frames_deferred"},
477    { STATS_OFFSET32(tx_queue_xoff),
478                4, STATS_FLAGS_FUNC, "tx_queue_xoff"},
479    { STATS_OFFSET32(mbuf_defrag_attempts),
480                4, STATS_FLAGS_FUNC, "mbuf_defrag_attempts"},
481    { STATS_OFFSET32(mbuf_defrag_failures),
482                4, STATS_FLAGS_FUNC, "mbuf_defrag_failures"},
483    { STATS_OFFSET32(mbuf_rx_bd_alloc_failed),
484                4, STATS_FLAGS_FUNC, "mbuf_rx_bd_alloc_failed"},
485    { STATS_OFFSET32(mbuf_rx_bd_mapping_failed),
486                4, STATS_FLAGS_FUNC, "mbuf_rx_bd_mapping_failed"},
487    { STATS_OFFSET32(mbuf_rx_tpa_alloc_failed),
488                4, STATS_FLAGS_FUNC, "mbuf_rx_tpa_alloc_failed"},
489    { STATS_OFFSET32(mbuf_rx_tpa_mapping_failed),
490                4, STATS_FLAGS_FUNC, "mbuf_rx_tpa_mapping_failed"},
491    { STATS_OFFSET32(mbuf_rx_sge_alloc_failed),
492                4, STATS_FLAGS_FUNC, "mbuf_rx_sge_alloc_failed"},
493    { STATS_OFFSET32(mbuf_rx_sge_mapping_failed),
494                4, STATS_FLAGS_FUNC, "mbuf_rx_sge_mapping_failed"},
495    { STATS_OFFSET32(mbuf_alloc_tx),
496                4, STATS_FLAGS_FUNC, "mbuf_alloc_tx"},
497    { STATS_OFFSET32(mbuf_alloc_rx),
498                4, STATS_FLAGS_FUNC, "mbuf_alloc_rx"},
499    { STATS_OFFSET32(mbuf_alloc_sge),
500                4, STATS_FLAGS_FUNC, "mbuf_alloc_sge"},
501    { STATS_OFFSET32(mbuf_alloc_tpa),
502                4, STATS_FLAGS_FUNC, "mbuf_alloc_tpa"},
503    { STATS_OFFSET32(tx_queue_full_return),
504                4, STATS_FLAGS_FUNC, "tx_queue_full_return"},
505    { STATS_OFFSET32(bxe_tx_mq_sc_state_failures),
506                4, STATS_FLAGS_FUNC, "bxe_tx_mq_sc_state_failures"},
507    { STATS_OFFSET32(tx_request_link_down_failures),
508                4, STATS_FLAGS_FUNC, "tx_request_link_down_failures"},
509    { STATS_OFFSET32(bd_avail_too_less_failures),
510                4, STATS_FLAGS_FUNC, "bd_avail_too_less_failures"},
511    { STATS_OFFSET32(tx_mq_not_empty),
512                4, STATS_FLAGS_FUNC, "tx_mq_not_empty"},
513    { STATS_OFFSET32(nsegs_path1_errors),
514                4, STATS_FLAGS_FUNC, "nsegs_path1_errors"},
515    { STATS_OFFSET32(nsegs_path2_errors),
516                4, STATS_FLAGS_FUNC, "nsegs_path2_errors"}
517
518
519};
520
521static const struct {
522    uint32_t offset;
523    uint32_t size;
524    char string[STAT_NAME_LEN];
525} bxe_eth_q_stats_arr[] = {
526    { Q_STATS_OFFSET32(total_bytes_received_hi),
527                8, "rx_bytes" },
528    { Q_STATS_OFFSET32(total_unicast_packets_received_hi),
529                8, "rx_ucast_packets" },
530    { Q_STATS_OFFSET32(total_multicast_packets_received_hi),
531                8, "rx_mcast_packets" },
532    { Q_STATS_OFFSET32(total_broadcast_packets_received_hi),
533                8, "rx_bcast_packets" },
534    { Q_STATS_OFFSET32(no_buff_discard_hi),
535                8, "rx_discards" },
536    { Q_STATS_OFFSET32(total_bytes_transmitted_hi),
537                8, "tx_bytes" },
538    { Q_STATS_OFFSET32(total_unicast_packets_transmitted_hi),
539                8, "tx_ucast_packets" },
540    { Q_STATS_OFFSET32(total_multicast_packets_transmitted_hi),
541                8, "tx_mcast_packets" },
542    { Q_STATS_OFFSET32(total_broadcast_packets_transmitted_hi),
543                8, "tx_bcast_packets" },
544    { Q_STATS_OFFSET32(total_tpa_aggregations_hi),
545                8, "tpa_aggregations" },
546    { Q_STATS_OFFSET32(total_tpa_aggregated_frames_hi),
547                8, "tpa_aggregated_frames"},
548    { Q_STATS_OFFSET32(total_tpa_bytes_hi),
549                8, "tpa_bytes"},
550    { Q_STATS_OFFSET32(rx_calls),
551                4, "rx_calls"},
552    { Q_STATS_OFFSET32(rx_pkts),
553                4, "rx_pkts"},
554    { Q_STATS_OFFSET32(rx_tpa_pkts),
555                4, "rx_tpa_pkts"},
556    { Q_STATS_OFFSET32(rx_erroneous_jumbo_sge_pkts),
557                4, "rx_erroneous_jumbo_sge_pkts"},
558    { Q_STATS_OFFSET32(rx_bxe_service_rxsgl),
559                4, "rx_bxe_service_rxsgl"},
560    { Q_STATS_OFFSET32(rx_jumbo_sge_pkts),
561                4, "rx_jumbo_sge_pkts"},
562    { Q_STATS_OFFSET32(rx_soft_errors),
563                4, "rx_soft_errors"},
564    { Q_STATS_OFFSET32(rx_hw_csum_errors),
565                4, "rx_hw_csum_errors"},
566    { Q_STATS_OFFSET32(rx_ofld_frames_csum_ip),
567                4, "rx_ofld_frames_csum_ip"},
568    { Q_STATS_OFFSET32(rx_ofld_frames_csum_tcp_udp),
569                4, "rx_ofld_frames_csum_tcp_udp"},
570    { Q_STATS_OFFSET32(rx_budget_reached),
571                4, "rx_budget_reached"},
572    { Q_STATS_OFFSET32(tx_pkts),
573                4, "tx_pkts"},
574    { Q_STATS_OFFSET32(tx_soft_errors),
575                4, "tx_soft_errors"},
576    { Q_STATS_OFFSET32(tx_ofld_frames_csum_ip),
577                4, "tx_ofld_frames_csum_ip"},
578    { Q_STATS_OFFSET32(tx_ofld_frames_csum_tcp),
579                4, "tx_ofld_frames_csum_tcp"},
580    { Q_STATS_OFFSET32(tx_ofld_frames_csum_udp),
581                4, "tx_ofld_frames_csum_udp"},
582    { Q_STATS_OFFSET32(tx_ofld_frames_lso),
583                4, "tx_ofld_frames_lso"},
584    { Q_STATS_OFFSET32(tx_ofld_frames_lso_hdr_splits),
585                4, "tx_ofld_frames_lso_hdr_splits"},
586    { Q_STATS_OFFSET32(tx_encap_failures),
587                4, "tx_encap_failures"},
588    { Q_STATS_OFFSET32(tx_hw_queue_full),
589                4, "tx_hw_queue_full"},
590    { Q_STATS_OFFSET32(tx_hw_max_queue_depth),
591                4, "tx_hw_max_queue_depth"},
592    { Q_STATS_OFFSET32(tx_dma_mapping_failure),
593                4, "tx_dma_mapping_failure"},
594    { Q_STATS_OFFSET32(tx_max_drbr_queue_depth),
595                4, "tx_max_drbr_queue_depth"},
596    { Q_STATS_OFFSET32(tx_window_violation_std),
597                4, "tx_window_violation_std"},
598    { Q_STATS_OFFSET32(tx_window_violation_tso),
599                4, "tx_window_violation_tso"},
600    { Q_STATS_OFFSET32(tx_chain_lost_mbuf),
601                4, "tx_chain_lost_mbuf"},
602    { Q_STATS_OFFSET32(tx_frames_deferred),
603                4, "tx_frames_deferred"},
604    { Q_STATS_OFFSET32(tx_queue_xoff),
605                4, "tx_queue_xoff"},
606    { Q_STATS_OFFSET32(mbuf_defrag_attempts),
607                4, "mbuf_defrag_attempts"},
608    { Q_STATS_OFFSET32(mbuf_defrag_failures),
609                4, "mbuf_defrag_failures"},
610    { Q_STATS_OFFSET32(mbuf_rx_bd_alloc_failed),
611                4, "mbuf_rx_bd_alloc_failed"},
612    { Q_STATS_OFFSET32(mbuf_rx_bd_mapping_failed),
613                4, "mbuf_rx_bd_mapping_failed"},
614    { Q_STATS_OFFSET32(mbuf_rx_tpa_alloc_failed),
615                4, "mbuf_rx_tpa_alloc_failed"},
616    { Q_STATS_OFFSET32(mbuf_rx_tpa_mapping_failed),
617                4, "mbuf_rx_tpa_mapping_failed"},
618    { Q_STATS_OFFSET32(mbuf_rx_sge_alloc_failed),
619                4, "mbuf_rx_sge_alloc_failed"},
620    { Q_STATS_OFFSET32(mbuf_rx_sge_mapping_failed),
621                4, "mbuf_rx_sge_mapping_failed"},
622    { Q_STATS_OFFSET32(mbuf_alloc_tx),
623                4, "mbuf_alloc_tx"},
624    { Q_STATS_OFFSET32(mbuf_alloc_rx),
625                4, "mbuf_alloc_rx"},
626    { Q_STATS_OFFSET32(mbuf_alloc_sge),
627                4, "mbuf_alloc_sge"},
628    { Q_STATS_OFFSET32(mbuf_alloc_tpa),
629                4, "mbuf_alloc_tpa"},
630    { Q_STATS_OFFSET32(tx_queue_full_return),
631                4, "tx_queue_full_return"},
632    { Q_STATS_OFFSET32(bxe_tx_mq_sc_state_failures),
633                4, "bxe_tx_mq_sc_state_failures"},
634    { Q_STATS_OFFSET32(tx_request_link_down_failures),
635                4, "tx_request_link_down_failures"},
636    { Q_STATS_OFFSET32(bd_avail_too_less_failures),
637                4, "bd_avail_too_less_failures"},
638    { Q_STATS_OFFSET32(tx_mq_not_empty),
639                4, "tx_mq_not_empty"},
640    { Q_STATS_OFFSET32(nsegs_path1_errors),
641                4, "nsegs_path1_errors"},
642    { Q_STATS_OFFSET32(nsegs_path2_errors),
643                4, "nsegs_path2_errors"}
644
645
646};
647
648#define BXE_NUM_ETH_STATS   ARRAY_SIZE(bxe_eth_stats_arr)
649#define BXE_NUM_ETH_Q_STATS ARRAY_SIZE(bxe_eth_q_stats_arr)
650
651
652static void    bxe_cmng_fns_init(struct bxe_softc *sc,
653                                 uint8_t          read_cfg,
654                                 uint8_t          cmng_type);
655static int     bxe_get_cmng_fns_mode(struct bxe_softc *sc);
656static void    storm_memset_cmng(struct bxe_softc *sc,
657                                 struct cmng_init *cmng,
658                                 uint8_t          port);
659static void    bxe_set_reset_global(struct bxe_softc *sc);
660static void    bxe_set_reset_in_progress(struct bxe_softc *sc);
661static uint8_t bxe_reset_is_done(struct bxe_softc *sc,
662                                 int              engine);
663static uint8_t bxe_clear_pf_load(struct bxe_softc *sc);
664static uint8_t bxe_chk_parity_attn(struct bxe_softc *sc,
665                                   uint8_t          *global,
666                                   uint8_t          print);
667static void    bxe_int_disable(struct bxe_softc *sc);
668static int     bxe_release_leader_lock(struct bxe_softc *sc);
669static void    bxe_pf_disable(struct bxe_softc *sc);
670static void    bxe_free_fp_buffers(struct bxe_softc *sc);
671static inline void bxe_update_rx_prod(struct bxe_softc    *sc,
672                                      struct bxe_fastpath *fp,
673                                      uint16_t            rx_bd_prod,
674                                      uint16_t            rx_cq_prod,
675                                      uint16_t            rx_sge_prod);
676static void    bxe_link_report_locked(struct bxe_softc *sc);
677static void    bxe_link_report(struct bxe_softc *sc);
678static void    bxe_link_status_update(struct bxe_softc *sc);
679static void    bxe_periodic_callout_func(void *xsc);
680static void    bxe_periodic_start(struct bxe_softc *sc);
681static void    bxe_periodic_stop(struct bxe_softc *sc);
682static int     bxe_alloc_rx_bd_mbuf(struct bxe_fastpath *fp,
683                                    uint16_t prev_index,
684                                    uint16_t index);
685static int     bxe_alloc_rx_tpa_mbuf(struct bxe_fastpath *fp,
686                                     int                 queue);
687static int     bxe_alloc_rx_sge_mbuf(struct bxe_fastpath *fp,
688                                     uint16_t            index);
689static uint8_t bxe_txeof(struct bxe_softc *sc,
690                         struct bxe_fastpath *fp);
691static void    bxe_task_fp(struct bxe_fastpath *fp);
692static __noinline void bxe_dump_mbuf(struct bxe_softc *sc,
693                                     struct mbuf      *m,
694                                     uint8_t          contents);
695static int     bxe_alloc_mem(struct bxe_softc *sc);
696static void    bxe_free_mem(struct bxe_softc *sc);
697static int     bxe_alloc_fw_stats_mem(struct bxe_softc *sc);
698static void    bxe_free_fw_stats_mem(struct bxe_softc *sc);
699static int     bxe_interrupt_attach(struct bxe_softc *sc);
700static void    bxe_interrupt_detach(struct bxe_softc *sc);
701static void    bxe_set_rx_mode(struct bxe_softc *sc);
702static int     bxe_init_locked(struct bxe_softc *sc);
703static int     bxe_stop_locked(struct bxe_softc *sc);
704static __noinline int bxe_nic_load(struct bxe_softc *sc,
705                                   int              load_mode);
706static __noinline int bxe_nic_unload(struct bxe_softc *sc,
707                                     uint32_t         unload_mode,
708                                     uint8_t          keep_link);
709
710static void bxe_handle_sp_tq(void *context, int pending);
711static void bxe_handle_fp_tq(void *context, int pending);
712
713static int bxe_add_cdev(struct bxe_softc *sc);
714static void bxe_del_cdev(struct bxe_softc *sc);
715int bxe_grc_dump(struct bxe_softc *sc);
716static int bxe_alloc_buf_rings(struct bxe_softc *sc);
717static void bxe_free_buf_rings(struct bxe_softc *sc);
718
719/* calculate crc32 on a buffer (NOTE: crc32_length MUST be aligned to 8) */
720uint32_t
721calc_crc32(uint8_t  *crc32_packet,
722           uint32_t crc32_length,
723           uint32_t crc32_seed,
724           uint8_t  complement)
725{
726   uint32_t byte         = 0;
727   uint32_t bit          = 0;
728   uint8_t  msb          = 0;
729   uint32_t temp         = 0;
730   uint32_t shft         = 0;
731   uint8_t  current_byte = 0;
732   uint32_t crc32_result = crc32_seed;
733   const uint32_t CRC32_POLY = 0x1edc6f41;
734
735   if ((crc32_packet == NULL) ||
736       (crc32_length == 0) ||
737       ((crc32_length % 8) != 0))
738    {
739        return (crc32_result);
740    }
741
742    for (byte = 0; byte < crc32_length; byte = byte + 1)
743    {
744        current_byte = crc32_packet[byte];
745        for (bit = 0; bit < 8; bit = bit + 1)
746        {
747            /* msb = crc32_result[31]; */
748            msb = (uint8_t)(crc32_result >> 31);
749
750            crc32_result = crc32_result << 1;
751
752            /* it (msb != current_byte[bit]) */
753            if (msb != (0x1 & (current_byte >> bit)))
754            {
755                crc32_result = crc32_result ^ CRC32_POLY;
756                /* crc32_result[0] = 1 */
757                crc32_result |= 1;
758            }
759        }
760    }
761
762    /* Last step is to:
763     * 1. "mirror" every bit
764     * 2. swap the 4 bytes
765     * 3. complement each bit
766     */
767
768    /* Mirror */
769    temp = crc32_result;
770    shft = sizeof(crc32_result) * 8 - 1;
771
772    for (crc32_result >>= 1; crc32_result; crc32_result >>= 1)
773    {
774        temp <<= 1;
775        temp |= crc32_result & 1;
776        shft-- ;
777    }
778
779    /* temp[31-bit] = crc32_result[bit] */
780    temp <<= shft;
781
782    /* Swap */
783    /* crc32_result = {temp[7:0], temp[15:8], temp[23:16], temp[31:24]} */
784    {
785        uint32_t t0, t1, t2, t3;
786        t0 = (0x000000ff & (temp >> 24));
787        t1 = (0x0000ff00 & (temp >> 8));
788        t2 = (0x00ff0000 & (temp << 8));
789        t3 = (0xff000000 & (temp << 24));
790        crc32_result = t0 | t1 | t2 | t3;
791    }
792
793    /* Complement */
794    if (complement)
795    {
796        crc32_result = ~crc32_result;
797    }
798
799    return (crc32_result);
800}
801
802int
803bxe_test_bit(int                    nr,
804             volatile unsigned long *addr)
805{
806    return ((atomic_load_acq_long(addr) & (1 << nr)) != 0);
807}
808
809void
810bxe_set_bit(unsigned int           nr,
811            volatile unsigned long *addr)
812{
813    atomic_set_acq_long(addr, (1 << nr));
814}
815
816void
817bxe_clear_bit(int                    nr,
818              volatile unsigned long *addr)
819{
820    atomic_clear_acq_long(addr, (1 << nr));
821}
822
823int
824bxe_test_and_set_bit(int                    nr,
825                       volatile unsigned long *addr)
826{
827    unsigned long x;
828    nr = (1 << nr);
829    do {
830        x = *addr;
831    } while (atomic_cmpset_acq_long(addr, x, x | nr) == 0);
832    // if (x & nr) bit_was_set; else bit_was_not_set;
833    return (x & nr);
834}
835
836int
837bxe_test_and_clear_bit(int                    nr,
838                       volatile unsigned long *addr)
839{
840    unsigned long x;
841    nr = (1 << nr);
842    do {
843        x = *addr;
844    } while (atomic_cmpset_acq_long(addr, x, x & ~nr) == 0);
845    // if (x & nr) bit_was_set; else bit_was_not_set;
846    return (x & nr);
847}
848
849int
850bxe_cmpxchg(volatile int *addr,
851            int          old,
852            int          new)
853{
854    int x;
855    do {
856        x = *addr;
857    } while (atomic_cmpset_acq_int(addr, old, new) == 0);
858    return (x);
859}
860
861/*
862 * Get DMA memory from the OS.
863 *
864 * Validates that the OS has provided DMA buffers in response to a
865 * bus_dmamap_load call and saves the physical address of those buffers.
866 * When the callback is used the OS will return 0 for the mapping function
867 * (bus_dmamap_load) so we use the value of map_arg->maxsegs to pass any
868 * failures back to the caller.
869 *
870 * Returns:
871 *   Nothing.
872 */
873static void
874bxe_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error)
875{
876    struct bxe_dma *dma = arg;
877
878    if (error) {
879        dma->paddr = 0;
880        dma->nseg  = 0;
881        BLOGE(dma->sc, "Failed DMA alloc '%s' (%d)!\n", dma->msg, error);
882    } else {
883        dma->paddr = segs->ds_addr;
884        dma->nseg  = nseg;
885    }
886}
887
888/*
889 * Allocate a block of memory and map it for DMA. No partial completions
890 * allowed and release any resources acquired if we can't acquire all
891 * resources.
892 *
893 * Returns:
894 *   0 = Success, !0 = Failure
895 */
896int
897bxe_dma_alloc(struct bxe_softc *sc,
898              bus_size_t       size,
899              struct bxe_dma   *dma,
900              const char       *msg)
901{
902    int rc;
903
904    if (dma->size > 0) {
905        BLOGE(sc, "dma block '%s' already has size %lu\n", msg,
906              (unsigned long)dma->size);
907        return (1);
908    }
909
910    memset(dma, 0, sizeof(*dma)); /* sanity */
911    dma->sc   = sc;
912    dma->size = size;
913    snprintf(dma->msg, sizeof(dma->msg), "%s", msg);
914
915    rc = bus_dma_tag_create(sc->parent_dma_tag, /* parent tag */
916                            BCM_PAGE_SIZE,      /* alignment */
917                            0,                  /* boundary limit */
918                            BUS_SPACE_MAXADDR,  /* restricted low */
919                            BUS_SPACE_MAXADDR,  /* restricted hi */
920                            NULL,               /* addr filter() */
921                            NULL,               /* addr filter() arg */
922                            size,               /* max map size */
923                            1,                  /* num discontinuous */
924                            size,               /* max seg size */
925                            BUS_DMA_ALLOCNOW,   /* flags */
926                            NULL,               /* lock() */
927                            NULL,               /* lock() arg */
928                            &dma->tag);         /* returned dma tag */
929    if (rc != 0) {
930        BLOGE(sc, "Failed to create dma tag for '%s' (%d)\n", msg, rc);
931        memset(dma, 0, sizeof(*dma));
932        return (1);
933    }
934
935    rc = bus_dmamem_alloc(dma->tag,
936                          (void **)&dma->vaddr,
937                          (BUS_DMA_NOWAIT | BUS_DMA_ZERO),
938                          &dma->map);
939    if (rc != 0) {
940        BLOGE(sc, "Failed to alloc dma mem for '%s' (%d)\n", msg, rc);
941        bus_dma_tag_destroy(dma->tag);
942        memset(dma, 0, sizeof(*dma));
943        return (1);
944    }
945
946    rc = bus_dmamap_load(dma->tag,
947                         dma->map,
948                         dma->vaddr,
949                         size,
950                         bxe_dma_map_addr, /* BLOGD in here */
951                         dma,
952                         BUS_DMA_NOWAIT);
953    if (rc != 0) {
954        BLOGE(sc, "Failed to load dma map for '%s' (%d)\n", msg, rc);
955        bus_dmamem_free(dma->tag, dma->vaddr, dma->map);
956        bus_dma_tag_destroy(dma->tag);
957        memset(dma, 0, sizeof(*dma));
958        return (1);
959    }
960
961    return (0);
962}
963
964void
965bxe_dma_free(struct bxe_softc *sc,
966             struct bxe_dma   *dma)
967{
968    if (dma->size > 0) {
969        DBASSERT(sc, (dma->tag != NULL), ("dma tag is NULL"));
970
971        bus_dmamap_sync(dma->tag, dma->map,
972                        (BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE));
973        bus_dmamap_unload(dma->tag, dma->map);
974        bus_dmamem_free(dma->tag, dma->vaddr, dma->map);
975        bus_dma_tag_destroy(dma->tag);
976    }
977
978    memset(dma, 0, sizeof(*dma));
979}
980
981/*
982 * These indirect read and write routines are only during init.
983 * The locking is handled by the MCP.
984 */
985
986void
987bxe_reg_wr_ind(struct bxe_softc *sc,
988               uint32_t         addr,
989               uint32_t         val)
990{
991    pci_write_config(sc->dev, PCICFG_GRC_ADDRESS, addr, 4);
992    pci_write_config(sc->dev, PCICFG_GRC_DATA, val, 4);
993    pci_write_config(sc->dev, PCICFG_GRC_ADDRESS, 0, 4);
994}
995
996uint32_t
997bxe_reg_rd_ind(struct bxe_softc *sc,
998               uint32_t         addr)
999{
1000    uint32_t val;
1001
1002    pci_write_config(sc->dev, PCICFG_GRC_ADDRESS, addr, 4);
1003    val = pci_read_config(sc->dev, PCICFG_GRC_DATA, 4);
1004    pci_write_config(sc->dev, PCICFG_GRC_ADDRESS, 0, 4);
1005
1006    return (val);
1007}
1008
1009static int
1010bxe_acquire_hw_lock(struct bxe_softc *sc,
1011                    uint32_t         resource)
1012{
1013    uint32_t lock_status;
1014    uint32_t resource_bit = (1 << resource);
1015    int func = SC_FUNC(sc);
1016    uint32_t hw_lock_control_reg;
1017    int cnt;
1018
1019    /* validate the resource is within range */
1020    if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1021        BLOGE(sc, "(resource 0x%x > HW_LOCK_MAX_RESOURCE_VALUE)"
1022            " resource_bit 0x%x\n", resource, resource_bit);
1023        return (-1);
1024    }
1025
1026    if (func <= 5) {
1027        hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + (func * 8));
1028    } else {
1029        hw_lock_control_reg =
1030                (MISC_REG_DRIVER_CONTROL_7 + ((func - 6) * 8));
1031    }
1032
1033    /* validate the resource is not already taken */
1034    lock_status = REG_RD(sc, hw_lock_control_reg);
1035    if (lock_status & resource_bit) {
1036        BLOGE(sc, "resource (0x%x) in use (status 0x%x bit 0x%x)\n",
1037              resource, lock_status, resource_bit);
1038        return (-1);
1039    }
1040
1041    /* try every 5ms for 5 seconds */
1042    for (cnt = 0; cnt < 1000; cnt++) {
1043        REG_WR(sc, (hw_lock_control_reg + 4), resource_bit);
1044        lock_status = REG_RD(sc, hw_lock_control_reg);
1045        if (lock_status & resource_bit) {
1046            return (0);
1047        }
1048        DELAY(5000);
1049    }
1050
1051    BLOGE(sc, "Resource 0x%x resource_bit 0x%x lock timeout!\n",
1052        resource, resource_bit);
1053    return (-1);
1054}
1055
1056static int
1057bxe_release_hw_lock(struct bxe_softc *sc,
1058                    uint32_t         resource)
1059{
1060    uint32_t lock_status;
1061    uint32_t resource_bit = (1 << resource);
1062    int func = SC_FUNC(sc);
1063    uint32_t hw_lock_control_reg;
1064
1065    /* validate the resource is within range */
1066    if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1067        BLOGE(sc, "(resource 0x%x > HW_LOCK_MAX_RESOURCE_VALUE)"
1068            " resource_bit 0x%x\n", resource, resource_bit);
1069        return (-1);
1070    }
1071
1072    if (func <= 5) {
1073        hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + (func * 8));
1074    } else {
1075        hw_lock_control_reg =
1076                (MISC_REG_DRIVER_CONTROL_7 + ((func - 6) * 8));
1077    }
1078
1079    /* validate the resource is currently taken */
1080    lock_status = REG_RD(sc, hw_lock_control_reg);
1081    if (!(lock_status & resource_bit)) {
1082        BLOGE(sc, "resource (0x%x) not in use (status 0x%x bit 0x%x)\n",
1083              resource, lock_status, resource_bit);
1084        return (-1);
1085    }
1086
1087    REG_WR(sc, hw_lock_control_reg, resource_bit);
1088    return (0);
1089}
1090static void bxe_acquire_phy_lock(struct bxe_softc *sc)
1091{
1092	BXE_PHY_LOCK(sc);
1093	bxe_acquire_hw_lock(sc,HW_LOCK_RESOURCE_MDIO);
1094}
1095
1096static void bxe_release_phy_lock(struct bxe_softc *sc)
1097{
1098	bxe_release_hw_lock(sc,HW_LOCK_RESOURCE_MDIO);
1099	BXE_PHY_UNLOCK(sc);
1100}
1101/*
1102 * Per pf misc lock must be acquired before the per port mcp lock. Otherwise,
1103 * had we done things the other way around, if two pfs from the same port
1104 * would attempt to access nvram at the same time, we could run into a
1105 * scenario such as:
1106 * pf A takes the port lock.
1107 * pf B succeeds in taking the same lock since they are from the same port.
1108 * pf A takes the per pf misc lock. Performs eeprom access.
1109 * pf A finishes. Unlocks the per pf misc lock.
1110 * Pf B takes the lock and proceeds to perform it's own access.
1111 * pf A unlocks the per port lock, while pf B is still working (!).
1112 * mcp takes the per port lock and corrupts pf B's access (and/or has it's own
1113 * access corrupted by pf B).*
1114 */
1115static int
1116bxe_acquire_nvram_lock(struct bxe_softc *sc)
1117{
1118    int port = SC_PORT(sc);
1119    int count, i;
1120    uint32_t val = 0;
1121
1122    /* acquire HW lock: protect against other PFs in PF Direct Assignment */
1123    bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_NVRAM);
1124
1125    /* adjust timeout for emulation/FPGA */
1126    count = NVRAM_TIMEOUT_COUNT;
1127    if (CHIP_REV_IS_SLOW(sc)) {
1128        count *= 100;
1129    }
1130
1131    /* request access to nvram interface */
1132    REG_WR(sc, MCP_REG_MCPR_NVM_SW_ARB,
1133           (MCPR_NVM_SW_ARB_ARB_REQ_SET1 << port));
1134
1135    for (i = 0; i < count*10; i++) {
1136        val = REG_RD(sc, MCP_REG_MCPR_NVM_SW_ARB);
1137        if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) {
1138            break;
1139        }
1140
1141        DELAY(5);
1142    }
1143
1144    if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) {
1145        BLOGE(sc, "Cannot get access to nvram interface "
1146            "port %d val 0x%x (MCPR_NVM_SW_ARB_ARB_ARB1 << port)\n",
1147            port, val);
1148        return (-1);
1149    }
1150
1151    return (0);
1152}
1153
1154static int
1155bxe_release_nvram_lock(struct bxe_softc *sc)
1156{
1157    int port = SC_PORT(sc);
1158    int count, i;
1159    uint32_t val = 0;
1160
1161    /* adjust timeout for emulation/FPGA */
1162    count = NVRAM_TIMEOUT_COUNT;
1163    if (CHIP_REV_IS_SLOW(sc)) {
1164        count *= 100;
1165    }
1166
1167    /* relinquish nvram interface */
1168    REG_WR(sc, MCP_REG_MCPR_NVM_SW_ARB,
1169           (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << port));
1170
1171    for (i = 0; i < count*10; i++) {
1172        val = REG_RD(sc, MCP_REG_MCPR_NVM_SW_ARB);
1173        if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) {
1174            break;
1175        }
1176
1177        DELAY(5);
1178    }
1179
1180    if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) {
1181        BLOGE(sc, "Cannot free access to nvram interface "
1182            "port %d val 0x%x (MCPR_NVM_SW_ARB_ARB_ARB1 << port)\n",
1183            port, val);
1184        return (-1);
1185    }
1186
1187    /* release HW lock: protect against other PFs in PF Direct Assignment */
1188    bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_NVRAM);
1189
1190    return (0);
1191}
1192
1193static void
1194bxe_enable_nvram_access(struct bxe_softc *sc)
1195{
1196    uint32_t val;
1197
1198    val = REG_RD(sc, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
1199
1200    /* enable both bits, even on read */
1201    REG_WR(sc, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
1202           (val | MCPR_NVM_ACCESS_ENABLE_EN | MCPR_NVM_ACCESS_ENABLE_WR_EN));
1203}
1204
1205static void
1206bxe_disable_nvram_access(struct bxe_softc *sc)
1207{
1208    uint32_t val;
1209
1210    val = REG_RD(sc, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
1211
1212    /* disable both bits, even after read */
1213    REG_WR(sc, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
1214           (val & ~(MCPR_NVM_ACCESS_ENABLE_EN |
1215                    MCPR_NVM_ACCESS_ENABLE_WR_EN)));
1216}
1217
1218static int
1219bxe_nvram_read_dword(struct bxe_softc *sc,
1220                     uint32_t         offset,
1221                     uint32_t         *ret_val,
1222                     uint32_t         cmd_flags)
1223{
1224    int count, i, rc;
1225    uint32_t val;
1226
1227    /* build the command word */
1228    cmd_flags |= MCPR_NVM_COMMAND_DOIT;
1229
1230    /* need to clear DONE bit separately */
1231    REG_WR(sc, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
1232
1233    /* address of the NVRAM to read from */
1234    REG_WR(sc, MCP_REG_MCPR_NVM_ADDR,
1235           (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
1236
1237    /* issue a read command */
1238    REG_WR(sc, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
1239
1240    /* adjust timeout for emulation/FPGA */
1241    count = NVRAM_TIMEOUT_COUNT;
1242    if (CHIP_REV_IS_SLOW(sc)) {
1243        count *= 100;
1244    }
1245
1246    /* wait for completion */
1247    *ret_val = 0;
1248    rc = -1;
1249    for (i = 0; i < count; i++) {
1250        DELAY(5);
1251        val = REG_RD(sc, MCP_REG_MCPR_NVM_COMMAND);
1252
1253        if (val & MCPR_NVM_COMMAND_DONE) {
1254            val = REG_RD(sc, MCP_REG_MCPR_NVM_READ);
1255            /* we read nvram data in cpu order
1256             * but ethtool sees it as an array of bytes
1257             * converting to big-endian will do the work
1258             */
1259            *ret_val = htobe32(val);
1260            rc = 0;
1261            break;
1262        }
1263    }
1264
1265    if (rc == -1) {
1266        BLOGE(sc, "nvram read timeout expired "
1267            "(offset 0x%x cmd_flags 0x%x val 0x%x)\n",
1268            offset, cmd_flags, val);
1269    }
1270
1271    return (rc);
1272}
1273
1274static int
1275bxe_nvram_read(struct bxe_softc *sc,
1276               uint32_t         offset,
1277               uint8_t          *ret_buf,
1278               int              buf_size)
1279{
1280    uint32_t cmd_flags;
1281    uint32_t val;
1282    int rc;
1283
1284    if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
1285        BLOGE(sc, "Invalid parameter, offset 0x%x buf_size 0x%x\n",
1286              offset, buf_size);
1287        return (-1);
1288    }
1289
1290    if ((offset + buf_size) > sc->devinfo.flash_size) {
1291        BLOGE(sc, "Invalid parameter, "
1292                  "offset 0x%x + buf_size 0x%x > flash_size 0x%x\n",
1293              offset, buf_size, sc->devinfo.flash_size);
1294        return (-1);
1295    }
1296
1297    /* request access to nvram interface */
1298    rc = bxe_acquire_nvram_lock(sc);
1299    if (rc) {
1300        return (rc);
1301    }
1302
1303    /* enable access to nvram interface */
1304    bxe_enable_nvram_access(sc);
1305
1306    /* read the first word(s) */
1307    cmd_flags = MCPR_NVM_COMMAND_FIRST;
1308    while ((buf_size > sizeof(uint32_t)) && (rc == 0)) {
1309        rc = bxe_nvram_read_dword(sc, offset, &val, cmd_flags);
1310        memcpy(ret_buf, &val, 4);
1311
1312        /* advance to the next dword */
1313        offset += sizeof(uint32_t);
1314        ret_buf += sizeof(uint32_t);
1315        buf_size -= sizeof(uint32_t);
1316        cmd_flags = 0;
1317    }
1318
1319    if (rc == 0) {
1320        cmd_flags |= MCPR_NVM_COMMAND_LAST;
1321        rc = bxe_nvram_read_dword(sc, offset, &val, cmd_flags);
1322        memcpy(ret_buf, &val, 4);
1323    }
1324
1325    /* disable access to nvram interface */
1326    bxe_disable_nvram_access(sc);
1327    bxe_release_nvram_lock(sc);
1328
1329    return (rc);
1330}
1331
1332static int
1333bxe_nvram_write_dword(struct bxe_softc *sc,
1334                      uint32_t         offset,
1335                      uint32_t         val,
1336                      uint32_t         cmd_flags)
1337{
1338    int count, i, rc;
1339
1340    /* build the command word */
1341    cmd_flags |= (MCPR_NVM_COMMAND_DOIT | MCPR_NVM_COMMAND_WR);
1342
1343    /* need to clear DONE bit separately */
1344    REG_WR(sc, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
1345
1346    /* write the data */
1347    REG_WR(sc, MCP_REG_MCPR_NVM_WRITE, val);
1348
1349    /* address of the NVRAM to write to */
1350    REG_WR(sc, MCP_REG_MCPR_NVM_ADDR,
1351           (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
1352
1353    /* issue the write command */
1354    REG_WR(sc, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
1355
1356    /* adjust timeout for emulation/FPGA */
1357    count = NVRAM_TIMEOUT_COUNT;
1358    if (CHIP_REV_IS_SLOW(sc)) {
1359        count *= 100;
1360    }
1361
1362    /* wait for completion */
1363    rc = -1;
1364    for (i = 0; i < count; i++) {
1365        DELAY(5);
1366        val = REG_RD(sc, MCP_REG_MCPR_NVM_COMMAND);
1367        if (val & MCPR_NVM_COMMAND_DONE) {
1368            rc = 0;
1369            break;
1370        }
1371    }
1372
1373    if (rc == -1) {
1374        BLOGE(sc, "nvram write timeout expired "
1375            "(offset 0x%x cmd_flags 0x%x val 0x%x)\n",
1376            offset, cmd_flags, val);
1377    }
1378
1379    return (rc);
1380}
1381
1382#define BYTE_OFFSET(offset) (8 * (offset & 0x03))
1383
1384static int
1385bxe_nvram_write1(struct bxe_softc *sc,
1386                 uint32_t         offset,
1387                 uint8_t          *data_buf,
1388                 int              buf_size)
1389{
1390    uint32_t cmd_flags;
1391    uint32_t align_offset;
1392    uint32_t val;
1393    int rc;
1394
1395    if ((offset + buf_size) > sc->devinfo.flash_size) {
1396        BLOGE(sc, "Invalid parameter, "
1397                  "offset 0x%x + buf_size 0x%x > flash_size 0x%x\n",
1398              offset, buf_size, sc->devinfo.flash_size);
1399        return (-1);
1400    }
1401
1402    /* request access to nvram interface */
1403    rc = bxe_acquire_nvram_lock(sc);
1404    if (rc) {
1405        return (rc);
1406    }
1407
1408    /* enable access to nvram interface */
1409    bxe_enable_nvram_access(sc);
1410
1411    cmd_flags = (MCPR_NVM_COMMAND_FIRST | MCPR_NVM_COMMAND_LAST);
1412    align_offset = (offset & ~0x03);
1413    rc = bxe_nvram_read_dword(sc, align_offset, &val, cmd_flags);
1414
1415    if (rc == 0) {
1416        val &= ~(0xff << BYTE_OFFSET(offset));
1417        val |= (*data_buf << BYTE_OFFSET(offset));
1418
1419        /* nvram data is returned as an array of bytes
1420         * convert it back to cpu order
1421         */
1422        val = be32toh(val);
1423
1424        rc = bxe_nvram_write_dword(sc, align_offset, val, cmd_flags);
1425    }
1426
1427    /* disable access to nvram interface */
1428    bxe_disable_nvram_access(sc);
1429    bxe_release_nvram_lock(sc);
1430
1431    return (rc);
1432}
1433
1434static int
1435bxe_nvram_write(struct bxe_softc *sc,
1436                uint32_t         offset,
1437                uint8_t          *data_buf,
1438                int              buf_size)
1439{
1440    uint32_t cmd_flags;
1441    uint32_t val;
1442    uint32_t written_so_far;
1443    int rc;
1444
1445    if (buf_size == 1) {
1446        return (bxe_nvram_write1(sc, offset, data_buf, buf_size));
1447    }
1448
1449    if ((offset & 0x03) || (buf_size & 0x03) /* || (buf_size == 0) */) {
1450        BLOGE(sc, "Invalid parameter, offset 0x%x buf_size 0x%x\n",
1451              offset, buf_size);
1452        return (-1);
1453    }
1454
1455    if (buf_size == 0) {
1456        return (0); /* nothing to do */
1457    }
1458
1459    if ((offset + buf_size) > sc->devinfo.flash_size) {
1460        BLOGE(sc, "Invalid parameter, "
1461                  "offset 0x%x + buf_size 0x%x > flash_size 0x%x\n",
1462              offset, buf_size, sc->devinfo.flash_size);
1463        return (-1);
1464    }
1465
1466    /* request access to nvram interface */
1467    rc = bxe_acquire_nvram_lock(sc);
1468    if (rc) {
1469        return (rc);
1470    }
1471
1472    /* enable access to nvram interface */
1473    bxe_enable_nvram_access(sc);
1474
1475    written_so_far = 0;
1476    cmd_flags = MCPR_NVM_COMMAND_FIRST;
1477    while ((written_so_far < buf_size) && (rc == 0)) {
1478        if (written_so_far == (buf_size - sizeof(uint32_t))) {
1479            cmd_flags |= MCPR_NVM_COMMAND_LAST;
1480        } else if (((offset + 4) % NVRAM_PAGE_SIZE) == 0) {
1481            cmd_flags |= MCPR_NVM_COMMAND_LAST;
1482        } else if ((offset % NVRAM_PAGE_SIZE) == 0) {
1483            cmd_flags |= MCPR_NVM_COMMAND_FIRST;
1484        }
1485
1486        memcpy(&val, data_buf, 4);
1487
1488        rc = bxe_nvram_write_dword(sc, offset, val, cmd_flags);
1489
1490        /* advance to the next dword */
1491        offset += sizeof(uint32_t);
1492        data_buf += sizeof(uint32_t);
1493        written_so_far += sizeof(uint32_t);
1494        cmd_flags = 0;
1495    }
1496
1497    /* disable access to nvram interface */
1498    bxe_disable_nvram_access(sc);
1499    bxe_release_nvram_lock(sc);
1500
1501    return (rc);
1502}
1503
1504/* copy command into DMAE command memory and set DMAE command Go */
1505void
1506bxe_post_dmae(struct bxe_softc    *sc,
1507              struct dmae_cmd *dmae,
1508              int                 idx)
1509{
1510    uint32_t cmd_offset;
1511    int i;
1512
1513    cmd_offset = (DMAE_REG_CMD_MEM + (sizeof(struct dmae_cmd) * idx));
1514    for (i = 0; i < ((sizeof(struct dmae_cmd) / 4)); i++) {
1515        REG_WR(sc, (cmd_offset + (i * 4)), *(((uint32_t *)dmae) + i));
1516    }
1517
1518    REG_WR(sc, dmae_reg_go_c[idx], 1);
1519}
1520
1521uint32_t
1522bxe_dmae_opcode_add_comp(uint32_t opcode,
1523                         uint8_t  comp_type)
1524{
1525    return (opcode | ((comp_type << DMAE_CMD_C_DST_SHIFT) |
1526                      DMAE_CMD_C_TYPE_ENABLE));
1527}
1528
1529uint32_t
1530bxe_dmae_opcode_clr_src_reset(uint32_t opcode)
1531{
1532    return (opcode & ~DMAE_CMD_SRC_RESET);
1533}
1534
1535uint32_t
1536bxe_dmae_opcode(struct bxe_softc *sc,
1537                uint8_t          src_type,
1538                uint8_t          dst_type,
1539                uint8_t          with_comp,
1540                uint8_t          comp_type)
1541{
1542    uint32_t opcode = 0;
1543
1544    opcode |= ((src_type << DMAE_CMD_SRC_SHIFT) |
1545               (dst_type << DMAE_CMD_DST_SHIFT));
1546
1547    opcode |= (DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET);
1548
1549    opcode |= (SC_PORT(sc) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0);
1550
1551    opcode |= ((SC_VN(sc) << DMAE_CMD_E1HVN_SHIFT) |
1552               (SC_VN(sc) << DMAE_CMD_DST_VN_SHIFT));
1553
1554    opcode |= (DMAE_COM_SET_ERR << DMAE_CMD_ERR_POLICY_SHIFT);
1555
1556#ifdef __BIG_ENDIAN
1557    opcode |= DMAE_CMD_ENDIANITY_B_DW_SWAP;
1558#else
1559    opcode |= DMAE_CMD_ENDIANITY_DW_SWAP;
1560#endif
1561
1562    if (with_comp) {
1563        opcode = bxe_dmae_opcode_add_comp(opcode, comp_type);
1564    }
1565
1566    return (opcode);
1567}
1568
1569static void
1570bxe_prep_dmae_with_comp(struct bxe_softc    *sc,
1571                        struct dmae_cmd *dmae,
1572                        uint8_t             src_type,
1573                        uint8_t             dst_type)
1574{
1575    memset(dmae, 0, sizeof(struct dmae_cmd));
1576
1577    /* set the opcode */
1578    dmae->opcode = bxe_dmae_opcode(sc, src_type, dst_type,
1579                                   TRUE, DMAE_COMP_PCI);
1580
1581    /* fill in the completion parameters */
1582    dmae->comp_addr_lo = U64_LO(BXE_SP_MAPPING(sc, wb_comp));
1583    dmae->comp_addr_hi = U64_HI(BXE_SP_MAPPING(sc, wb_comp));
1584    dmae->comp_val     = DMAE_COMP_VAL;
1585}
1586
1587/* issue a DMAE command over the init channel and wait for completion */
1588static int
1589bxe_issue_dmae_with_comp(struct bxe_softc    *sc,
1590                         struct dmae_cmd *dmae)
1591{
1592    uint32_t *wb_comp = BXE_SP(sc, wb_comp);
1593    int timeout = CHIP_REV_IS_SLOW(sc) ? 400000 : 4000;
1594
1595    BXE_DMAE_LOCK(sc);
1596
1597    /* reset completion */
1598    *wb_comp = 0;
1599
1600    /* post the command on the channel used for initializations */
1601    bxe_post_dmae(sc, dmae, INIT_DMAE_C(sc));
1602
1603    /* wait for completion */
1604    DELAY(5);
1605
1606    while ((*wb_comp & ~DMAE_PCI_ERR_FLAG) != DMAE_COMP_VAL) {
1607        if (!timeout ||
1608            (sc->recovery_state != BXE_RECOVERY_DONE &&
1609             sc->recovery_state != BXE_RECOVERY_NIC_LOADING)) {
1610            BLOGE(sc, "DMAE timeout! *wb_comp 0x%x recovery_state 0x%x\n",
1611                *wb_comp, sc->recovery_state);
1612            BXE_DMAE_UNLOCK(sc);
1613            return (DMAE_TIMEOUT);
1614        }
1615
1616        timeout--;
1617        DELAY(50);
1618    }
1619
1620    if (*wb_comp & DMAE_PCI_ERR_FLAG) {
1621        BLOGE(sc, "DMAE PCI error! *wb_comp 0x%x recovery_state 0x%x\n",
1622                *wb_comp, sc->recovery_state);
1623        BXE_DMAE_UNLOCK(sc);
1624        return (DMAE_PCI_ERROR);
1625    }
1626
1627    BXE_DMAE_UNLOCK(sc);
1628    return (0);
1629}
1630
1631void
1632bxe_read_dmae(struct bxe_softc *sc,
1633              uint32_t         src_addr,
1634              uint32_t         len32)
1635{
1636    struct dmae_cmd dmae;
1637    uint32_t *data;
1638    int i, rc;
1639
1640    DBASSERT(sc, (len32 <= 4), ("DMAE read length is %d", len32));
1641
1642    if (!sc->dmae_ready) {
1643        data = BXE_SP(sc, wb_data[0]);
1644
1645        for (i = 0; i < len32; i++) {
1646            data[i] = (CHIP_IS_E1(sc)) ?
1647                          bxe_reg_rd_ind(sc, (src_addr + (i * 4))) :
1648                          REG_RD(sc, (src_addr + (i * 4)));
1649        }
1650
1651        return;
1652    }
1653
1654    /* set opcode and fixed command fields */
1655    bxe_prep_dmae_with_comp(sc, &dmae, DMAE_SRC_GRC, DMAE_DST_PCI);
1656
1657    /* fill in addresses and len */
1658    dmae.src_addr_lo = (src_addr >> 2); /* GRC addr has dword resolution */
1659    dmae.src_addr_hi = 0;
1660    dmae.dst_addr_lo = U64_LO(BXE_SP_MAPPING(sc, wb_data));
1661    dmae.dst_addr_hi = U64_HI(BXE_SP_MAPPING(sc, wb_data));
1662    dmae.len         = len32;
1663
1664    /* issue the command and wait for completion */
1665    if ((rc = bxe_issue_dmae_with_comp(sc, &dmae)) != 0) {
1666        bxe_panic(sc, ("DMAE failed (%d)\n", rc));
1667    }
1668}
1669
1670void
1671bxe_write_dmae(struct bxe_softc *sc,
1672               bus_addr_t       dma_addr,
1673               uint32_t         dst_addr,
1674               uint32_t         len32)
1675{
1676    struct dmae_cmd dmae;
1677    int rc;
1678
1679    if (!sc->dmae_ready) {
1680        DBASSERT(sc, (len32 <= 4), ("DMAE not ready and length is %d", len32));
1681
1682        if (CHIP_IS_E1(sc)) {
1683            ecore_init_ind_wr(sc, dst_addr, BXE_SP(sc, wb_data[0]), len32);
1684        } else {
1685            ecore_init_str_wr(sc, dst_addr, BXE_SP(sc, wb_data[0]), len32);
1686        }
1687
1688        return;
1689    }
1690
1691    /* set opcode and fixed command fields */
1692    bxe_prep_dmae_with_comp(sc, &dmae, DMAE_SRC_PCI, DMAE_DST_GRC);
1693
1694    /* fill in addresses and len */
1695    dmae.src_addr_lo = U64_LO(dma_addr);
1696    dmae.src_addr_hi = U64_HI(dma_addr);
1697    dmae.dst_addr_lo = (dst_addr >> 2); /* GRC addr has dword resolution */
1698    dmae.dst_addr_hi = 0;
1699    dmae.len         = len32;
1700
1701    /* issue the command and wait for completion */
1702    if ((rc = bxe_issue_dmae_with_comp(sc, &dmae)) != 0) {
1703        bxe_panic(sc, ("DMAE failed (%d)\n", rc));
1704    }
1705}
1706
1707void
1708bxe_write_dmae_phys_len(struct bxe_softc *sc,
1709                        bus_addr_t       phys_addr,
1710                        uint32_t         addr,
1711                        uint32_t         len)
1712{
1713    int dmae_wr_max = DMAE_LEN32_WR_MAX(sc);
1714    int offset = 0;
1715
1716    while (len > dmae_wr_max) {
1717        bxe_write_dmae(sc,
1718                       (phys_addr + offset), /* src DMA address */
1719                       (addr + offset),      /* dst GRC address */
1720                       dmae_wr_max);
1721        offset += (dmae_wr_max * 4);
1722        len -= dmae_wr_max;
1723    }
1724
1725    bxe_write_dmae(sc,
1726                   (phys_addr + offset), /* src DMA address */
1727                   (addr + offset),      /* dst GRC address */
1728                   len);
1729}
1730
1731void
1732bxe_set_ctx_validation(struct bxe_softc   *sc,
1733                       struct eth_context *cxt,
1734                       uint32_t           cid)
1735{
1736    /* ustorm cxt validation */
1737    cxt->ustorm_ag_context.cdu_usage =
1738        CDU_RSRVD_VALUE_TYPE_A(HW_CID(sc, cid),
1739            CDU_REGION_NUMBER_UCM_AG, ETH_CONNECTION_TYPE);
1740    /* xcontext validation */
1741    cxt->xstorm_ag_context.cdu_reserved =
1742        CDU_RSRVD_VALUE_TYPE_A(HW_CID(sc, cid),
1743            CDU_REGION_NUMBER_XCM_AG, ETH_CONNECTION_TYPE);
1744}
1745
1746static void
1747bxe_storm_memset_hc_timeout(struct bxe_softc *sc,
1748                            uint8_t          port,
1749                            uint8_t          fw_sb_id,
1750                            uint8_t          sb_index,
1751                            uint8_t          ticks)
1752{
1753    uint32_t addr =
1754        (BAR_CSTRORM_INTMEM +
1755         CSTORM_STATUS_BLOCK_DATA_TIMEOUT_OFFSET(fw_sb_id, sb_index));
1756
1757    REG_WR8(sc, addr, ticks);
1758
1759    BLOGD(sc, DBG_LOAD,
1760          "port %d fw_sb_id %d sb_index %d ticks %d\n",
1761          port, fw_sb_id, sb_index, ticks);
1762}
1763
1764static void
1765bxe_storm_memset_hc_disable(struct bxe_softc *sc,
1766                            uint8_t          port,
1767                            uint16_t         fw_sb_id,
1768                            uint8_t          sb_index,
1769                            uint8_t          disable)
1770{
1771    uint32_t enable_flag =
1772        (disable) ? 0 : (1 << HC_INDEX_DATA_HC_ENABLED_SHIFT);
1773    uint32_t addr =
1774        (BAR_CSTRORM_INTMEM +
1775         CSTORM_STATUS_BLOCK_DATA_FLAGS_OFFSET(fw_sb_id, sb_index));
1776    uint8_t flags;
1777
1778    /* clear and set */
1779    flags = REG_RD8(sc, addr);
1780    flags &= ~HC_INDEX_DATA_HC_ENABLED;
1781    flags |= enable_flag;
1782    REG_WR8(sc, addr, flags);
1783
1784    BLOGD(sc, DBG_LOAD,
1785          "port %d fw_sb_id %d sb_index %d disable %d\n",
1786          port, fw_sb_id, sb_index, disable);
1787}
1788
1789void
1790bxe_update_coalesce_sb_index(struct bxe_softc *sc,
1791                             uint8_t          fw_sb_id,
1792                             uint8_t          sb_index,
1793                             uint8_t          disable,
1794                             uint16_t         usec)
1795{
1796    int port = SC_PORT(sc);
1797    uint8_t ticks = (usec / 4); /* XXX ??? */
1798
1799    bxe_storm_memset_hc_timeout(sc, port, fw_sb_id, sb_index, ticks);
1800
1801    disable = (disable) ? 1 : ((usec) ? 0 : 1);
1802    bxe_storm_memset_hc_disable(sc, port, fw_sb_id, sb_index, disable);
1803}
1804
1805void
1806elink_cb_udelay(struct bxe_softc *sc,
1807                uint32_t         usecs)
1808{
1809    DELAY(usecs);
1810}
1811
1812uint32_t
1813elink_cb_reg_read(struct bxe_softc *sc,
1814                  uint32_t         reg_addr)
1815{
1816    return (REG_RD(sc, reg_addr));
1817}
1818
1819void
1820elink_cb_reg_write(struct bxe_softc *sc,
1821                   uint32_t         reg_addr,
1822                   uint32_t         val)
1823{
1824    REG_WR(sc, reg_addr, val);
1825}
1826
1827void
1828elink_cb_reg_wb_write(struct bxe_softc *sc,
1829                      uint32_t         offset,
1830                      uint32_t         *wb_write,
1831                      uint16_t         len)
1832{
1833    REG_WR_DMAE(sc, offset, wb_write, len);
1834}
1835
1836void
1837elink_cb_reg_wb_read(struct bxe_softc *sc,
1838                     uint32_t         offset,
1839                     uint32_t         *wb_write,
1840                     uint16_t         len)
1841{
1842    REG_RD_DMAE(sc, offset, wb_write, len);
1843}
1844
1845uint8_t
1846elink_cb_path_id(struct bxe_softc *sc)
1847{
1848    return (SC_PATH(sc));
1849}
1850
1851void
1852elink_cb_event_log(struct bxe_softc     *sc,
1853                   const elink_log_id_t elink_log_id,
1854                   ...)
1855{
1856    /* XXX */
1857    BLOGI(sc, "ELINK EVENT LOG (%d)\n", elink_log_id);
1858}
1859
1860static int
1861bxe_set_spio(struct bxe_softc *sc,
1862             int              spio,
1863             uint32_t         mode)
1864{
1865    uint32_t spio_reg;
1866
1867    /* Only 2 SPIOs are configurable */
1868    if ((spio != MISC_SPIO_SPIO4) && (spio != MISC_SPIO_SPIO5)) {
1869        BLOGE(sc, "Invalid SPIO 0x%x mode 0x%x\n", spio, mode);
1870        return (-1);
1871    }
1872
1873    bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_SPIO);
1874
1875    /* read SPIO and mask except the float bits */
1876    spio_reg = (REG_RD(sc, MISC_REG_SPIO) & MISC_SPIO_FLOAT);
1877
1878    switch (mode) {
1879    case MISC_SPIO_OUTPUT_LOW:
1880        BLOGD(sc, DBG_LOAD, "Set SPIO 0x%x -> output low\n", spio);
1881        /* clear FLOAT and set CLR */
1882        spio_reg &= ~(spio << MISC_SPIO_FLOAT_POS);
1883        spio_reg |=  (spio << MISC_SPIO_CLR_POS);
1884        break;
1885
1886    case MISC_SPIO_OUTPUT_HIGH:
1887        BLOGD(sc, DBG_LOAD, "Set SPIO 0x%x -> output high\n", spio);
1888        /* clear FLOAT and set SET */
1889        spio_reg &= ~(spio << MISC_SPIO_FLOAT_POS);
1890        spio_reg |=  (spio << MISC_SPIO_SET_POS);
1891        break;
1892
1893    case MISC_SPIO_INPUT_HI_Z:
1894        BLOGD(sc, DBG_LOAD, "Set SPIO 0x%x -> input\n", spio);
1895        /* set FLOAT */
1896        spio_reg |= (spio << MISC_SPIO_FLOAT_POS);
1897        break;
1898
1899    default:
1900        break;
1901    }
1902
1903    REG_WR(sc, MISC_REG_SPIO, spio_reg);
1904    bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_SPIO);
1905
1906    return (0);
1907}
1908
1909static int
1910bxe_gpio_read(struct bxe_softc *sc,
1911              int              gpio_num,
1912              uint8_t          port)
1913{
1914    /* The GPIO should be swapped if swap register is set and active */
1915    int gpio_port = ((REG_RD(sc, NIG_REG_PORT_SWAP) &&
1916                      REG_RD(sc, NIG_REG_STRAP_OVERRIDE)) ^ port);
1917    int gpio_shift = (gpio_num +
1918                      (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0));
1919    uint32_t gpio_mask = (1 << gpio_shift);
1920    uint32_t gpio_reg;
1921
1922    if (gpio_num > MISC_REGISTERS_GPIO_3) {
1923        BLOGE(sc, "Invalid GPIO %d port 0x%x gpio_port %d gpio_shift %d"
1924            " gpio_mask 0x%x\n", gpio_num, port, gpio_port, gpio_shift,
1925            gpio_mask);
1926        return (-1);
1927    }
1928
1929    /* read GPIO value */
1930    gpio_reg = REG_RD(sc, MISC_REG_GPIO);
1931
1932    /* get the requested pin value */
1933    return ((gpio_reg & gpio_mask) == gpio_mask) ? 1 : 0;
1934}
1935
1936static int
1937bxe_gpio_write(struct bxe_softc *sc,
1938               int              gpio_num,
1939               uint32_t         mode,
1940               uint8_t          port)
1941{
1942    /* The GPIO should be swapped if swap register is set and active */
1943    int gpio_port = ((REG_RD(sc, NIG_REG_PORT_SWAP) &&
1944                      REG_RD(sc, NIG_REG_STRAP_OVERRIDE)) ^ port);
1945    int gpio_shift = (gpio_num +
1946                      (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0));
1947    uint32_t gpio_mask = (1 << gpio_shift);
1948    uint32_t gpio_reg;
1949
1950    if (gpio_num > MISC_REGISTERS_GPIO_3) {
1951        BLOGE(sc, "Invalid GPIO %d mode 0x%x port 0x%x gpio_port %d"
1952            " gpio_shift %d gpio_mask 0x%x\n",
1953            gpio_num, mode, port, gpio_port, gpio_shift, gpio_mask);
1954        return (-1);
1955    }
1956
1957    bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_GPIO);
1958
1959    /* read GPIO and mask except the float bits */
1960    gpio_reg = (REG_RD(sc, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
1961
1962    switch (mode) {
1963    case MISC_REGISTERS_GPIO_OUTPUT_LOW:
1964        BLOGD(sc, DBG_PHY,
1965              "Set GPIO %d (shift %d) -> output low\n",
1966              gpio_num, gpio_shift);
1967        /* clear FLOAT and set CLR */
1968        gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1969        gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
1970        break;
1971
1972    case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
1973        BLOGD(sc, DBG_PHY,
1974              "Set GPIO %d (shift %d) -> output high\n",
1975              gpio_num, gpio_shift);
1976        /* clear FLOAT and set SET */
1977        gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1978        gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
1979        break;
1980
1981    case MISC_REGISTERS_GPIO_INPUT_HI_Z:
1982        BLOGD(sc, DBG_PHY,
1983              "Set GPIO %d (shift %d) -> input\n",
1984              gpio_num, gpio_shift);
1985        /* set FLOAT */
1986        gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1987        break;
1988
1989    default:
1990        break;
1991    }
1992
1993    REG_WR(sc, MISC_REG_GPIO, gpio_reg);
1994    bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_GPIO);
1995
1996    return (0);
1997}
1998
1999static int
2000bxe_gpio_mult_write(struct bxe_softc *sc,
2001                    uint8_t          pins,
2002                    uint32_t         mode)
2003{
2004    uint32_t gpio_reg;
2005
2006    /* any port swapping should be handled by caller */
2007
2008    bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_GPIO);
2009
2010    /* read GPIO and mask except the float bits */
2011    gpio_reg = REG_RD(sc, MISC_REG_GPIO);
2012    gpio_reg &= ~(pins << MISC_REGISTERS_GPIO_FLOAT_POS);
2013    gpio_reg &= ~(pins << MISC_REGISTERS_GPIO_CLR_POS);
2014    gpio_reg &= ~(pins << MISC_REGISTERS_GPIO_SET_POS);
2015
2016    switch (mode) {
2017    case MISC_REGISTERS_GPIO_OUTPUT_LOW:
2018        BLOGD(sc, DBG_PHY, "Set GPIO 0x%x -> output low\n", pins);
2019        /* set CLR */
2020        gpio_reg |= (pins << MISC_REGISTERS_GPIO_CLR_POS);
2021        break;
2022
2023    case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
2024        BLOGD(sc, DBG_PHY, "Set GPIO 0x%x -> output high\n", pins);
2025        /* set SET */
2026        gpio_reg |= (pins << MISC_REGISTERS_GPIO_SET_POS);
2027        break;
2028
2029    case MISC_REGISTERS_GPIO_INPUT_HI_Z:
2030        BLOGD(sc, DBG_PHY, "Set GPIO 0x%x -> input\n", pins);
2031        /* set FLOAT */
2032        gpio_reg |= (pins << MISC_REGISTERS_GPIO_FLOAT_POS);
2033        break;
2034
2035    default:
2036        BLOGE(sc, "Invalid GPIO mode assignment pins 0x%x mode 0x%x"
2037            " gpio_reg 0x%x\n", pins, mode, gpio_reg);
2038        bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_GPIO);
2039        return (-1);
2040    }
2041
2042    REG_WR(sc, MISC_REG_GPIO, gpio_reg);
2043    bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_GPIO);
2044
2045    return (0);
2046}
2047
2048static int
2049bxe_gpio_int_write(struct bxe_softc *sc,
2050                   int              gpio_num,
2051                   uint32_t         mode,
2052                   uint8_t          port)
2053{
2054    /* The GPIO should be swapped if swap register is set and active */
2055    int gpio_port = ((REG_RD(sc, NIG_REG_PORT_SWAP) &&
2056                      REG_RD(sc, NIG_REG_STRAP_OVERRIDE)) ^ port);
2057    int gpio_shift = (gpio_num +
2058                      (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0));
2059    uint32_t gpio_mask = (1 << gpio_shift);
2060    uint32_t gpio_reg;
2061
2062    if (gpio_num > MISC_REGISTERS_GPIO_3) {
2063        BLOGE(sc, "Invalid GPIO %d mode 0x%x port 0x%x gpio_port %d"
2064            " gpio_shift %d gpio_mask 0x%x\n",
2065            gpio_num, mode, port, gpio_port, gpio_shift, gpio_mask);
2066        return (-1);
2067    }
2068
2069    bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_GPIO);
2070
2071    /* read GPIO int */
2072    gpio_reg = REG_RD(sc, MISC_REG_GPIO_INT);
2073
2074    switch (mode) {
2075    case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR:
2076        BLOGD(sc, DBG_PHY,
2077              "Clear GPIO INT %d (shift %d) -> output low\n",
2078              gpio_num, gpio_shift);
2079        /* clear SET and set CLR */
2080        gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
2081        gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
2082        break;
2083
2084    case MISC_REGISTERS_GPIO_INT_OUTPUT_SET:
2085        BLOGD(sc, DBG_PHY,
2086              "Set GPIO INT %d (shift %d) -> output high\n",
2087              gpio_num, gpio_shift);
2088        /* clear CLR and set SET */
2089        gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
2090        gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
2091        break;
2092
2093    default:
2094        break;
2095    }
2096
2097    REG_WR(sc, MISC_REG_GPIO_INT, gpio_reg);
2098    bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_GPIO);
2099
2100    return (0);
2101}
2102
2103uint32_t
2104elink_cb_gpio_read(struct bxe_softc *sc,
2105                   uint16_t         gpio_num,
2106                   uint8_t          port)
2107{
2108    return (bxe_gpio_read(sc, gpio_num, port));
2109}
2110
2111uint8_t
2112elink_cb_gpio_write(struct bxe_softc *sc,
2113                    uint16_t         gpio_num,
2114                    uint8_t          mode, /* 0=low 1=high */
2115                    uint8_t          port)
2116{
2117    return (bxe_gpio_write(sc, gpio_num, mode, port));
2118}
2119
2120uint8_t
2121elink_cb_gpio_mult_write(struct bxe_softc *sc,
2122                         uint8_t          pins,
2123                         uint8_t          mode) /* 0=low 1=high */
2124{
2125    return (bxe_gpio_mult_write(sc, pins, mode));
2126}
2127
2128uint8_t
2129elink_cb_gpio_int_write(struct bxe_softc *sc,
2130                        uint16_t         gpio_num,
2131                        uint8_t          mode, /* 0=low 1=high */
2132                        uint8_t          port)
2133{
2134    return (bxe_gpio_int_write(sc, gpio_num, mode, port));
2135}
2136
2137void
2138elink_cb_notify_link_changed(struct bxe_softc *sc)
2139{
2140    REG_WR(sc, (MISC_REG_AEU_GENERAL_ATTN_12 +
2141                (SC_FUNC(sc) * sizeof(uint32_t))), 1);
2142}
2143
2144/* send the MCP a request, block until there is a reply */
2145uint32_t
2146elink_cb_fw_command(struct bxe_softc *sc,
2147                    uint32_t         command,
2148                    uint32_t         param)
2149{
2150    int mb_idx = SC_FW_MB_IDX(sc);
2151    uint32_t seq;
2152    uint32_t rc = 0;
2153    uint32_t cnt = 1;
2154    uint8_t delay = CHIP_REV_IS_SLOW(sc) ? 100 : 10;
2155
2156    BXE_FWMB_LOCK(sc);
2157
2158    seq = ++sc->fw_seq;
2159    SHMEM_WR(sc, func_mb[mb_idx].drv_mb_param, param);
2160    SHMEM_WR(sc, func_mb[mb_idx].drv_mb_header, (command | seq));
2161
2162    BLOGD(sc, DBG_PHY,
2163          "wrote command 0x%08x to FW MB param 0x%08x\n",
2164          (command | seq), param);
2165
2166    /* Let the FW do it's magic. GIve it up to 5 seconds... */
2167    do {
2168        DELAY(delay * 1000);
2169        rc = SHMEM_RD(sc, func_mb[mb_idx].fw_mb_header);
2170    } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 500));
2171
2172    BLOGD(sc, DBG_PHY,
2173          "[after %d ms] read 0x%x seq 0x%x from FW MB\n",
2174          cnt*delay, rc, seq);
2175
2176    /* is this a reply to our command? */
2177    if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK)) {
2178        rc &= FW_MSG_CODE_MASK;
2179    } else {
2180        /* Ruh-roh! */
2181        BLOGE(sc, "FW failed to respond!\n");
2182        // XXX bxe_fw_dump(sc);
2183        rc = 0;
2184    }
2185
2186    BXE_FWMB_UNLOCK(sc);
2187    return (rc);
2188}
2189
2190static uint32_t
2191bxe_fw_command(struct bxe_softc *sc,
2192               uint32_t         command,
2193               uint32_t         param)
2194{
2195    return (elink_cb_fw_command(sc, command, param));
2196}
2197
2198static void
2199__storm_memset_dma_mapping(struct bxe_softc *sc,
2200                           uint32_t         addr,
2201                           bus_addr_t       mapping)
2202{
2203    REG_WR(sc, addr, U64_LO(mapping));
2204    REG_WR(sc, (addr + 4), U64_HI(mapping));
2205}
2206
2207static void
2208storm_memset_spq_addr(struct bxe_softc *sc,
2209                      bus_addr_t       mapping,
2210                      uint16_t         abs_fid)
2211{
2212    uint32_t addr = (XSEM_REG_FAST_MEMORY +
2213                     XSTORM_SPQ_PAGE_BASE_OFFSET(abs_fid));
2214    __storm_memset_dma_mapping(sc, addr, mapping);
2215}
2216
2217static void
2218storm_memset_vf_to_pf(struct bxe_softc *sc,
2219                      uint16_t         abs_fid,
2220                      uint16_t         pf_id)
2221{
2222    REG_WR8(sc, (BAR_XSTRORM_INTMEM + XSTORM_VF_TO_PF_OFFSET(abs_fid)), pf_id);
2223    REG_WR8(sc, (BAR_CSTRORM_INTMEM + CSTORM_VF_TO_PF_OFFSET(abs_fid)), pf_id);
2224    REG_WR8(sc, (BAR_TSTRORM_INTMEM + TSTORM_VF_TO_PF_OFFSET(abs_fid)), pf_id);
2225    REG_WR8(sc, (BAR_USTRORM_INTMEM + USTORM_VF_TO_PF_OFFSET(abs_fid)), pf_id);
2226}
2227
2228static void
2229storm_memset_func_en(struct bxe_softc *sc,
2230                     uint16_t         abs_fid,
2231                     uint8_t          enable)
2232{
2233    REG_WR8(sc, (BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(abs_fid)), enable);
2234    REG_WR8(sc, (BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(abs_fid)), enable);
2235    REG_WR8(sc, (BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(abs_fid)), enable);
2236    REG_WR8(sc, (BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(abs_fid)), enable);
2237}
2238
2239static void
2240storm_memset_eq_data(struct bxe_softc       *sc,
2241                     struct event_ring_data *eq_data,
2242                     uint16_t               pfid)
2243{
2244    uint32_t addr;
2245    size_t size;
2246
2247    addr = (BAR_CSTRORM_INTMEM + CSTORM_EVENT_RING_DATA_OFFSET(pfid));
2248    size = sizeof(struct event_ring_data);
2249    ecore_storm_memset_struct(sc, addr, size, (uint32_t *)eq_data);
2250}
2251
2252static void
2253storm_memset_eq_prod(struct bxe_softc *sc,
2254                     uint16_t         eq_prod,
2255                     uint16_t         pfid)
2256{
2257    uint32_t addr = (BAR_CSTRORM_INTMEM +
2258                     CSTORM_EVENT_RING_PROD_OFFSET(pfid));
2259    REG_WR16(sc, addr, eq_prod);
2260}
2261
2262/*
2263 * Post a slowpath command.
2264 *
2265 * A slowpath command is used to propagate a configuration change through
2266 * the controller in a controlled manner, allowing each STORM processor and
2267 * other H/W blocks to phase in the change.  The commands sent on the
2268 * slowpath are referred to as ramrods.  Depending on the ramrod used the
2269 * completion of the ramrod will occur in different ways.  Here's a
2270 * breakdown of ramrods and how they complete:
2271 *
2272 * RAMROD_CMD_ID_ETH_PORT_SETUP
2273 *   Used to setup the leading connection on a port.  Completes on the
2274 *   Receive Completion Queue (RCQ) of that port (typically fp[0]).
2275 *
2276 * RAMROD_CMD_ID_ETH_CLIENT_SETUP
2277 *   Used to setup an additional connection on a port.  Completes on the
2278 *   RCQ of the multi-queue/RSS connection being initialized.
2279 *
2280 * RAMROD_CMD_ID_ETH_STAT_QUERY
2281 *   Used to force the storm processors to update the statistics database
2282 *   in host memory.  This ramrod is send on the leading connection CID and
2283 *   completes as an index increment of the CSTORM on the default status
2284 *   block.
2285 *
2286 * RAMROD_CMD_ID_ETH_UPDATE
2287 *   Used to update the state of the leading connection, usually to udpate
2288 *   the RSS indirection table.  Completes on the RCQ of the leading
2289 *   connection. (Not currently used under FreeBSD until OS support becomes
2290 *   available.)
2291 *
2292 * RAMROD_CMD_ID_ETH_HALT
2293 *   Used when tearing down a connection prior to driver unload.  Completes
2294 *   on the RCQ of the multi-queue/RSS connection being torn down.  Don't
2295 *   use this on the leading connection.
2296 *
2297 * RAMROD_CMD_ID_ETH_SET_MAC
2298 *   Sets the Unicast/Broadcast/Multicast used by the port.  Completes on
2299 *   the RCQ of the leading connection.
2300 *
2301 * RAMROD_CMD_ID_ETH_CFC_DEL
2302 *   Used when tearing down a conneciton prior to driver unload.  Completes
2303 *   on the RCQ of the leading connection (since the current connection
2304 *   has been completely removed from controller memory).
2305 *
2306 * RAMROD_CMD_ID_ETH_PORT_DEL
2307 *   Used to tear down the leading connection prior to driver unload,
2308 *   typically fp[0].  Completes as an index increment of the CSTORM on the
2309 *   default status block.
2310 *
2311 * RAMROD_CMD_ID_ETH_FORWARD_SETUP
2312 *   Used for connection offload.  Completes on the RCQ of the multi-queue
2313 *   RSS connection that is being offloaded.  (Not currently used under
2314 *   FreeBSD.)
2315 *
2316 * There can only be one command pending per function.
2317 *
2318 * Returns:
2319 *   0 = Success, !0 = Failure.
2320 */
2321
2322/* must be called under the spq lock */
2323static inline
2324struct eth_spe *bxe_sp_get_next(struct bxe_softc *sc)
2325{
2326    struct eth_spe *next_spe = sc->spq_prod_bd;
2327
2328    if (sc->spq_prod_bd == sc->spq_last_bd) {
2329        /* wrap back to the first eth_spq */
2330        sc->spq_prod_bd = sc->spq;
2331        sc->spq_prod_idx = 0;
2332    } else {
2333        sc->spq_prod_bd++;
2334        sc->spq_prod_idx++;
2335    }
2336
2337    return (next_spe);
2338}
2339
2340/* must be called under the spq lock */
2341static inline
2342void bxe_sp_prod_update(struct bxe_softc *sc)
2343{
2344    int func = SC_FUNC(sc);
2345
2346    /*
2347     * Make sure that BD data is updated before writing the producer.
2348     * BD data is written to the memory, the producer is read from the
2349     * memory, thus we need a full memory barrier to ensure the ordering.
2350     */
2351    mb();
2352
2353    REG_WR16(sc, (BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func)),
2354             sc->spq_prod_idx);
2355
2356    bus_space_barrier(sc->bar[BAR0].tag, sc->bar[BAR0].handle, 0, 0,
2357                      BUS_SPACE_BARRIER_WRITE);
2358}
2359
2360/**
2361 * bxe_is_contextless_ramrod - check if the current command ends on EQ
2362 *
2363 * @cmd:      command to check
2364 * @cmd_type: command type
2365 */
2366static inline
2367int bxe_is_contextless_ramrod(int cmd,
2368                              int cmd_type)
2369{
2370    if ((cmd_type == NONE_CONNECTION_TYPE) ||
2371        (cmd == RAMROD_CMD_ID_ETH_FORWARD_SETUP) ||
2372        (cmd == RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES) ||
2373        (cmd == RAMROD_CMD_ID_ETH_FILTER_RULES) ||
2374        (cmd == RAMROD_CMD_ID_ETH_MULTICAST_RULES) ||
2375        (cmd == RAMROD_CMD_ID_ETH_SET_MAC) ||
2376        (cmd == RAMROD_CMD_ID_ETH_RSS_UPDATE)) {
2377        return (TRUE);
2378    } else {
2379        return (FALSE);
2380    }
2381}
2382
2383/**
2384 * bxe_sp_post - place a single command on an SP ring
2385 *
2386 * @sc:         driver handle
2387 * @command:    command to place (e.g. SETUP, FILTER_RULES, etc.)
2388 * @cid:        SW CID the command is related to
2389 * @data_hi:    command private data address (high 32 bits)
2390 * @data_lo:    command private data address (low 32 bits)
2391 * @cmd_type:   command type (e.g. NONE, ETH)
2392 *
2393 * SP data is handled as if it's always an address pair, thus data fields are
2394 * not swapped to little endian in upper functions. Instead this function swaps
2395 * data as if it's two uint32 fields.
2396 */
2397int
2398bxe_sp_post(struct bxe_softc *sc,
2399            int              command,
2400            int              cid,
2401            uint32_t         data_hi,
2402            uint32_t         data_lo,
2403            int              cmd_type)
2404{
2405    struct eth_spe *spe;
2406    uint16_t type;
2407    int common;
2408
2409    common = bxe_is_contextless_ramrod(command, cmd_type);
2410
2411    BXE_SP_LOCK(sc);
2412
2413    if (common) {
2414        if (!atomic_load_acq_long(&sc->eq_spq_left)) {
2415            BLOGE(sc, "EQ ring is full!\n");
2416            BXE_SP_UNLOCK(sc);
2417            return (-1);
2418        }
2419    } else {
2420        if (!atomic_load_acq_long(&sc->cq_spq_left)) {
2421            BLOGE(sc, "SPQ ring is full!\n");
2422            BXE_SP_UNLOCK(sc);
2423            return (-1);
2424        }
2425    }
2426
2427    spe = bxe_sp_get_next(sc);
2428
2429    /* CID needs port number to be encoded int it */
2430    spe->hdr.conn_and_cmd_data =
2431        htole32((command << SPE_HDR_T_CMD_ID_SHIFT) | HW_CID(sc, cid));
2432
2433    type = (cmd_type << SPE_HDR_T_CONN_TYPE_SHIFT) & SPE_HDR_T_CONN_TYPE;
2434
2435    /* TBD: Check if it works for VFs */
2436    type |= ((SC_FUNC(sc) << SPE_HDR_T_FUNCTION_ID_SHIFT) &
2437             SPE_HDR_T_FUNCTION_ID);
2438
2439    spe->hdr.type = htole16(type);
2440
2441    spe->data.update_data_addr.hi = htole32(data_hi);
2442    spe->data.update_data_addr.lo = htole32(data_lo);
2443
2444    /*
2445     * It's ok if the actual decrement is issued towards the memory
2446     * somewhere between the lock and unlock. Thus no more explict
2447     * memory barrier is needed.
2448     */
2449    if (common) {
2450        atomic_subtract_acq_long(&sc->eq_spq_left, 1);
2451    } else {
2452        atomic_subtract_acq_long(&sc->cq_spq_left, 1);
2453    }
2454
2455    BLOGD(sc, DBG_SP, "SPQE -> %#jx\n", (uintmax_t)sc->spq_dma.paddr);
2456    BLOGD(sc, DBG_SP, "FUNC_RDATA -> %p / %#jx\n",
2457          BXE_SP(sc, func_rdata), (uintmax_t)BXE_SP_MAPPING(sc, func_rdata));
2458    BLOGD(sc, DBG_SP,
2459          "SPQE[%x] (%x:%x) (cmd, common?) (%d,%d) hw_cid %x data (%x:%x) type(0x%x) left (CQ, EQ) (%lx,%lx)\n",
2460          sc->spq_prod_idx,
2461          (uint32_t)U64_HI(sc->spq_dma.paddr),
2462          (uint32_t)(U64_LO(sc->spq_dma.paddr) + (uint8_t *)sc->spq_prod_bd - (uint8_t *)sc->spq),
2463          command,
2464          common,
2465          HW_CID(sc, cid),
2466          data_hi,
2467          data_lo,
2468          type,
2469          atomic_load_acq_long(&sc->cq_spq_left),
2470          atomic_load_acq_long(&sc->eq_spq_left));
2471
2472    bxe_sp_prod_update(sc);
2473
2474    BXE_SP_UNLOCK(sc);
2475    return (0);
2476}
2477
2478/**
2479 * bxe_debug_print_ind_table - prints the indirection table configuration.
2480 *
2481 * @sc: driver hanlde
2482 * @p:  pointer to rss configuration
2483 */
2484
2485/*
2486 * FreeBSD Device probe function.
2487 *
2488 * Compares the device found to the driver's list of supported devices and
2489 * reports back to the bsd loader whether this is the right driver for the device.
2490 * This is the driver entry function called from the "kldload" command.
2491 *
2492 * Returns:
2493 *   BUS_PROBE_DEFAULT on success, positive value on failure.
2494 */
2495static int
2496bxe_probe(device_t dev)
2497{
2498    struct bxe_device_type *t;
2499    char *descbuf;
2500    uint16_t did, sdid, svid, vid;
2501
2502    /* Find our device structure */
2503    t = bxe_devs;
2504
2505    /* Get the data for the device to be probed. */
2506    vid  = pci_get_vendor(dev);
2507    did  = pci_get_device(dev);
2508    svid = pci_get_subvendor(dev);
2509    sdid = pci_get_subdevice(dev);
2510
2511    /* Look through the list of known devices for a match. */
2512    while (t->bxe_name != NULL) {
2513        if ((vid == t->bxe_vid) && (did == t->bxe_did) &&
2514            ((svid == t->bxe_svid) || (t->bxe_svid == PCI_ANY_ID)) &&
2515            ((sdid == t->bxe_sdid) || (t->bxe_sdid == PCI_ANY_ID))) {
2516            descbuf = malloc(BXE_DEVDESC_MAX, M_TEMP, M_NOWAIT);
2517            if (descbuf == NULL)
2518                return (ENOMEM);
2519
2520            /* Print out the device identity. */
2521            snprintf(descbuf, BXE_DEVDESC_MAX,
2522                     "%s (%c%d) BXE v:%s\n", t->bxe_name,
2523                     (((pci_read_config(dev, PCIR_REVID, 4) &
2524                        0xf0) >> 4) + 'A'),
2525                     (pci_read_config(dev, PCIR_REVID, 4) & 0xf),
2526                     BXE_DRIVER_VERSION);
2527
2528            device_set_desc_copy(dev, descbuf);
2529            free(descbuf, M_TEMP);
2530            return (BUS_PROBE_DEFAULT);
2531        }
2532        t++;
2533    }
2534
2535    return (ENXIO);
2536}
2537
2538static void
2539bxe_init_mutexes(struct bxe_softc *sc)
2540{
2541#ifdef BXE_CORE_LOCK_SX
2542    snprintf(sc->core_sx_name, sizeof(sc->core_sx_name),
2543             "bxe%d_core_lock", sc->unit);
2544    sx_init(&sc->core_sx, sc->core_sx_name);
2545#else
2546    snprintf(sc->core_mtx_name, sizeof(sc->core_mtx_name),
2547             "bxe%d_core_lock", sc->unit);
2548    mtx_init(&sc->core_mtx, sc->core_mtx_name, NULL, MTX_DEF);
2549#endif
2550
2551    snprintf(sc->sp_mtx_name, sizeof(sc->sp_mtx_name),
2552             "bxe%d_sp_lock", sc->unit);
2553    mtx_init(&sc->sp_mtx, sc->sp_mtx_name, NULL, MTX_DEF);
2554
2555    snprintf(sc->dmae_mtx_name, sizeof(sc->dmae_mtx_name),
2556             "bxe%d_dmae_lock", sc->unit);
2557    mtx_init(&sc->dmae_mtx, sc->dmae_mtx_name, NULL, MTX_DEF);
2558
2559    snprintf(sc->port.phy_mtx_name, sizeof(sc->port.phy_mtx_name),
2560             "bxe%d_phy_lock", sc->unit);
2561    mtx_init(&sc->port.phy_mtx, sc->port.phy_mtx_name, NULL, MTX_DEF);
2562
2563    snprintf(sc->fwmb_mtx_name, sizeof(sc->fwmb_mtx_name),
2564             "bxe%d_fwmb_lock", sc->unit);
2565    mtx_init(&sc->fwmb_mtx, sc->fwmb_mtx_name, NULL, MTX_DEF);
2566
2567    snprintf(sc->print_mtx_name, sizeof(sc->print_mtx_name),
2568             "bxe%d_print_lock", sc->unit);
2569    mtx_init(&(sc->print_mtx), sc->print_mtx_name, NULL, MTX_DEF);
2570
2571    snprintf(sc->stats_mtx_name, sizeof(sc->stats_mtx_name),
2572             "bxe%d_stats_lock", sc->unit);
2573    mtx_init(&(sc->stats_mtx), sc->stats_mtx_name, NULL, MTX_DEF);
2574
2575    snprintf(sc->mcast_mtx_name, sizeof(sc->mcast_mtx_name),
2576             "bxe%d_mcast_lock", sc->unit);
2577    mtx_init(&(sc->mcast_mtx), sc->mcast_mtx_name, NULL, MTX_DEF);
2578}
2579
2580static void
2581bxe_release_mutexes(struct bxe_softc *sc)
2582{
2583#ifdef BXE_CORE_LOCK_SX
2584    sx_destroy(&sc->core_sx);
2585#else
2586    if (mtx_initialized(&sc->core_mtx)) {
2587        mtx_destroy(&sc->core_mtx);
2588    }
2589#endif
2590
2591    if (mtx_initialized(&sc->sp_mtx)) {
2592        mtx_destroy(&sc->sp_mtx);
2593    }
2594
2595    if (mtx_initialized(&sc->dmae_mtx)) {
2596        mtx_destroy(&sc->dmae_mtx);
2597    }
2598
2599    if (mtx_initialized(&sc->port.phy_mtx)) {
2600        mtx_destroy(&sc->port.phy_mtx);
2601    }
2602
2603    if (mtx_initialized(&sc->fwmb_mtx)) {
2604        mtx_destroy(&sc->fwmb_mtx);
2605    }
2606
2607    if (mtx_initialized(&sc->print_mtx)) {
2608        mtx_destroy(&sc->print_mtx);
2609    }
2610
2611    if (mtx_initialized(&sc->stats_mtx)) {
2612        mtx_destroy(&sc->stats_mtx);
2613    }
2614
2615    if (mtx_initialized(&sc->mcast_mtx)) {
2616        mtx_destroy(&sc->mcast_mtx);
2617    }
2618}
2619
2620static void
2621bxe_tx_disable(struct bxe_softc* sc)
2622{
2623    if_t ifp = sc->ifp;
2624
2625    /* tell the stack the driver is stopped and TX queue is full */
2626    if (ifp !=  NULL) {
2627        if_setdrvflags(ifp, 0);
2628    }
2629}
2630
2631static void
2632bxe_drv_pulse(struct bxe_softc *sc)
2633{
2634    SHMEM_WR(sc, func_mb[SC_FW_MB_IDX(sc)].drv_pulse_mb,
2635             sc->fw_drv_pulse_wr_seq);
2636}
2637
2638static inline uint16_t
2639bxe_tx_avail(struct bxe_softc *sc,
2640             struct bxe_fastpath *fp)
2641{
2642    int16_t  used;
2643    uint16_t prod;
2644    uint16_t cons;
2645
2646    prod = fp->tx_bd_prod;
2647    cons = fp->tx_bd_cons;
2648
2649    used = SUB_S16(prod, cons);
2650
2651    return (int16_t)(sc->tx_ring_size) - used;
2652}
2653
2654static inline int
2655bxe_tx_queue_has_work(struct bxe_fastpath *fp)
2656{
2657    uint16_t hw_cons;
2658
2659    mb(); /* status block fields can change */
2660    hw_cons = le16toh(*fp->tx_cons_sb);
2661    return (hw_cons != fp->tx_pkt_cons);
2662}
2663
2664static inline uint8_t
2665bxe_has_tx_work(struct bxe_fastpath *fp)
2666{
2667    /* expand this for multi-cos if ever supported */
2668    return (bxe_tx_queue_has_work(fp)) ? TRUE : FALSE;
2669}
2670
2671static inline int
2672bxe_has_rx_work(struct bxe_fastpath *fp)
2673{
2674    uint16_t rx_cq_cons_sb;
2675
2676    mb(); /* status block fields can change */
2677    rx_cq_cons_sb = le16toh(*fp->rx_cq_cons_sb);
2678    if ((rx_cq_cons_sb & RCQ_MAX) == RCQ_MAX)
2679        rx_cq_cons_sb++;
2680    return (fp->rx_cq_cons != rx_cq_cons_sb);
2681}
2682
2683static void
2684bxe_sp_event(struct bxe_softc    *sc,
2685             struct bxe_fastpath *fp,
2686             union eth_rx_cqe    *rr_cqe)
2687{
2688    int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
2689    int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
2690    enum ecore_queue_cmd drv_cmd = ECORE_Q_CMD_MAX;
2691    struct ecore_queue_sp_obj *q_obj = &BXE_SP_OBJ(sc, fp).q_obj;
2692
2693    BLOGD(sc, DBG_SP, "fp=%d cid=%d got ramrod #%d state is %x type is %d\n",
2694          fp->index, cid, command, sc->state, rr_cqe->ramrod_cqe.ramrod_type);
2695
2696    switch (command) {
2697    case (RAMROD_CMD_ID_ETH_CLIENT_UPDATE):
2698        BLOGD(sc, DBG_SP, "got UPDATE ramrod. CID %d\n", cid);
2699        drv_cmd = ECORE_Q_CMD_UPDATE;
2700        break;
2701
2702    case (RAMROD_CMD_ID_ETH_CLIENT_SETUP):
2703        BLOGD(sc, DBG_SP, "got MULTI[%d] setup ramrod\n", cid);
2704        drv_cmd = ECORE_Q_CMD_SETUP;
2705        break;
2706
2707    case (RAMROD_CMD_ID_ETH_TX_QUEUE_SETUP):
2708        BLOGD(sc, DBG_SP, "got MULTI[%d] tx-only setup ramrod\n", cid);
2709        drv_cmd = ECORE_Q_CMD_SETUP_TX_ONLY;
2710        break;
2711
2712    case (RAMROD_CMD_ID_ETH_HALT):
2713        BLOGD(sc, DBG_SP, "got MULTI[%d] halt ramrod\n", cid);
2714        drv_cmd = ECORE_Q_CMD_HALT;
2715        break;
2716
2717    case (RAMROD_CMD_ID_ETH_TERMINATE):
2718        BLOGD(sc, DBG_SP, "got MULTI[%d] teminate ramrod\n", cid);
2719        drv_cmd = ECORE_Q_CMD_TERMINATE;
2720        break;
2721
2722    case (RAMROD_CMD_ID_ETH_EMPTY):
2723        BLOGD(sc, DBG_SP, "got MULTI[%d] empty ramrod\n", cid);
2724        drv_cmd = ECORE_Q_CMD_EMPTY;
2725        break;
2726
2727    default:
2728        BLOGD(sc, DBG_SP, "ERROR: unexpected MC reply (%d) on fp[%d]\n",
2729              command, fp->index);
2730        return;
2731    }
2732
2733    if ((drv_cmd != ECORE_Q_CMD_MAX) &&
2734        q_obj->complete_cmd(sc, q_obj, drv_cmd)) {
2735        /*
2736         * q_obj->complete_cmd() failure means that this was
2737         * an unexpected completion.
2738         *
2739         * In this case we don't want to increase the sc->spq_left
2740         * because apparently we haven't sent this command the first
2741         * place.
2742         */
2743        // bxe_panic(sc, ("Unexpected SP completion\n"));
2744        return;
2745    }
2746
2747    atomic_add_acq_long(&sc->cq_spq_left, 1);
2748
2749    BLOGD(sc, DBG_SP, "sc->cq_spq_left 0x%lx\n",
2750          atomic_load_acq_long(&sc->cq_spq_left));
2751}
2752
2753/*
2754 * The current mbuf is part of an aggregation. Move the mbuf into the TPA
2755 * aggregation queue, put an empty mbuf back onto the receive chain, and mark
2756 * the current aggregation queue as in-progress.
2757 */
2758static void
2759bxe_tpa_start(struct bxe_softc            *sc,
2760              struct bxe_fastpath         *fp,
2761              uint16_t                    queue,
2762              uint16_t                    cons,
2763              uint16_t                    prod,
2764              struct eth_fast_path_rx_cqe *cqe)
2765{
2766    struct bxe_sw_rx_bd tmp_bd;
2767    struct bxe_sw_rx_bd *rx_buf;
2768    struct eth_rx_bd *rx_bd;
2769    int max_agg_queues;
2770    struct bxe_sw_tpa_info *tpa_info = &fp->rx_tpa_info[queue];
2771    uint16_t index;
2772
2773    BLOGD(sc, DBG_LRO, "fp[%02d].tpa[%02d] TPA START "
2774                       "cons=%d prod=%d\n",
2775          fp->index, queue, cons, prod);
2776
2777    max_agg_queues = MAX_AGG_QS(sc);
2778
2779    KASSERT((queue < max_agg_queues),
2780            ("fp[%02d] invalid aggr queue (%d >= %d)!",
2781             fp->index, queue, max_agg_queues));
2782
2783    KASSERT((tpa_info->state == BXE_TPA_STATE_STOP),
2784            ("fp[%02d].tpa[%02d] starting aggr on queue not stopped!",
2785             fp->index, queue));
2786
2787    /* copy the existing mbuf and mapping from the TPA pool */
2788    tmp_bd = tpa_info->bd;
2789
2790    if (tmp_bd.m == NULL) {
2791        uint32_t *tmp;
2792
2793        tmp = (uint32_t *)cqe;
2794
2795        BLOGE(sc, "fp[%02d].tpa[%02d] cons[%d] prod[%d]mbuf not allocated!\n",
2796              fp->index, queue, cons, prod);
2797        BLOGE(sc, "cqe [0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x]\n",
2798            *tmp, *(tmp+1), *(tmp+2), *(tmp+3), *(tmp+4), *(tmp+5), *(tmp+6), *(tmp+7));
2799
2800        /* XXX Error handling? */
2801        return;
2802    }
2803
2804    /* change the TPA queue to the start state */
2805    tpa_info->state            = BXE_TPA_STATE_START;
2806    tpa_info->placement_offset = cqe->placement_offset;
2807    tpa_info->parsing_flags    = le16toh(cqe->pars_flags.flags);
2808    tpa_info->vlan_tag         = le16toh(cqe->vlan_tag);
2809    tpa_info->len_on_bd        = le16toh(cqe->len_on_bd);
2810
2811    fp->rx_tpa_queue_used |= (1 << queue);
2812
2813    /*
2814     * If all the buffer descriptors are filled with mbufs then fill in
2815     * the current consumer index with a new BD. Else if a maximum Rx
2816     * buffer limit is imposed then fill in the next producer index.
2817     */
2818    index = (sc->max_rx_bufs != RX_BD_USABLE) ?
2819                prod : cons;
2820
2821    /* move the received mbuf and mapping to TPA pool */
2822    tpa_info->bd = fp->rx_mbuf_chain[cons];
2823
2824    /* release any existing RX BD mbuf mappings */
2825    if (cons != index) {
2826        rx_buf = &fp->rx_mbuf_chain[cons];
2827
2828        if (rx_buf->m_map != NULL) {
2829            bus_dmamap_sync(fp->rx_mbuf_tag, rx_buf->m_map,
2830                            BUS_DMASYNC_POSTREAD);
2831            bus_dmamap_unload(fp->rx_mbuf_tag, rx_buf->m_map);
2832        }
2833
2834        /*
2835         * We get here when the maximum number of rx buffers is less than
2836         * RX_BD_USABLE. The mbuf is already saved above so it's OK to NULL
2837         * it out here without concern of a memory leak.
2838         */
2839        fp->rx_mbuf_chain[cons].m = NULL;
2840    }
2841
2842    /* update the Rx SW BD with the mbuf info from the TPA pool */
2843    fp->rx_mbuf_chain[index] = tmp_bd;
2844
2845    /* update the Rx BD with the empty mbuf phys address from the TPA pool */
2846    rx_bd = &fp->rx_chain[index];
2847    rx_bd->addr_hi = htole32(U64_HI(tpa_info->seg.ds_addr));
2848    rx_bd->addr_lo = htole32(U64_LO(tpa_info->seg.ds_addr));
2849}
2850
2851/*
2852 * When a TPA aggregation is completed, loop through the individual mbufs
2853 * of the aggregation, combining them into a single mbuf which will be sent
2854 * up the stack. Refill all freed SGEs with mbufs as we go along.
2855 */
2856static int
2857bxe_fill_frag_mbuf(struct bxe_softc          *sc,
2858                   struct bxe_fastpath       *fp,
2859                   struct bxe_sw_tpa_info    *tpa_info,
2860                   uint16_t                  queue,
2861                   uint16_t                  pages,
2862                   struct mbuf               *m,
2863			       struct eth_end_agg_rx_cqe *cqe,
2864                   uint16_t                  cqe_idx)
2865{
2866    struct mbuf *m_frag;
2867    uint32_t frag_len, frag_size, i;
2868    uint16_t sge_idx;
2869    int rc = 0;
2870    int j;
2871
2872    frag_size = le16toh(cqe->pkt_len) - tpa_info->len_on_bd;
2873
2874    BLOGD(sc, DBG_LRO,
2875          "fp[%02d].tpa[%02d] TPA fill len_on_bd=%d frag_size=%d pages=%d\n",
2876          fp->index, queue, tpa_info->len_on_bd, frag_size, pages);
2877
2878    /* make sure the aggregated frame is not too big to handle */
2879    if (pages > 8 * PAGES_PER_SGE) {
2880
2881        uint32_t *tmp = (uint32_t *)cqe;
2882
2883        BLOGE(sc, "fp[%02d].sge[0x%04x] has too many pages (%d)! "
2884                  "pkt_len=%d len_on_bd=%d frag_size=%d\n",
2885              fp->index, cqe_idx, pages, le16toh(cqe->pkt_len),
2886              tpa_info->len_on_bd, frag_size);
2887
2888        BLOGE(sc, "cqe [0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x]\n",
2889            *tmp, *(tmp+1), *(tmp+2), *(tmp+3), *(tmp+4), *(tmp+5), *(tmp+6), *(tmp+7));
2890
2891        bxe_panic(sc, ("sge page count error\n"));
2892        return (EINVAL);
2893    }
2894
2895    /*
2896     * Scan through the scatter gather list pulling individual mbufs into a
2897     * single mbuf for the host stack.
2898     */
2899    for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
2900        sge_idx = RX_SGE(le16toh(cqe->sgl_or_raw_data.sgl[j]));
2901
2902        /*
2903         * Firmware gives the indices of the SGE as if the ring is an array
2904         * (meaning that the "next" element will consume 2 indices).
2905         */
2906        frag_len = min(frag_size, (uint32_t)(SGE_PAGES));
2907
2908        BLOGD(sc, DBG_LRO, "fp[%02d].tpa[%02d] TPA fill i=%d j=%d "
2909                           "sge_idx=%d frag_size=%d frag_len=%d\n",
2910              fp->index, queue, i, j, sge_idx, frag_size, frag_len);
2911
2912        m_frag = fp->rx_sge_mbuf_chain[sge_idx].m;
2913
2914        /* allocate a new mbuf for the SGE */
2915        rc = bxe_alloc_rx_sge_mbuf(fp, sge_idx);
2916        if (rc) {
2917            /* Leave all remaining SGEs in the ring! */
2918            return (rc);
2919        }
2920
2921        /* update the fragment length */
2922        m_frag->m_len = frag_len;
2923
2924        /* concatenate the fragment to the head mbuf */
2925        m_cat(m, m_frag);
2926        fp->eth_q_stats.mbuf_alloc_sge--;
2927
2928        /* update the TPA mbuf size and remaining fragment size */
2929        m->m_pkthdr.len += frag_len;
2930        frag_size -= frag_len;
2931    }
2932
2933    BLOGD(sc, DBG_LRO,
2934          "fp[%02d].tpa[%02d] TPA fill done frag_size=%d\n",
2935          fp->index, queue, frag_size);
2936
2937    return (rc);
2938}
2939
2940static inline void
2941bxe_clear_sge_mask_next_elems(struct bxe_fastpath *fp)
2942{
2943    int i, j;
2944
2945    for (i = 1; i <= RX_SGE_NUM_PAGES; i++) {
2946        int idx = RX_SGE_TOTAL_PER_PAGE * i - 1;
2947
2948        for (j = 0; j < 2; j++) {
2949            BIT_VEC64_CLEAR_BIT(fp->sge_mask, idx);
2950            idx--;
2951        }
2952    }
2953}
2954
2955static inline void
2956bxe_init_sge_ring_bit_mask(struct bxe_fastpath *fp)
2957{
2958    /* set the mask to all 1's, it's faster to compare to 0 than to 0xf's */
2959    memset(fp->sge_mask, 0xff, sizeof(fp->sge_mask));
2960
2961    /*
2962     * Clear the two last indices in the page to 1. These are the indices that
2963     * correspond to the "next" element, hence will never be indicated and
2964     * should be removed from the calculations.
2965     */
2966    bxe_clear_sge_mask_next_elems(fp);
2967}
2968
2969static inline void
2970bxe_update_last_max_sge(struct bxe_fastpath *fp,
2971                        uint16_t            idx)
2972{
2973    uint16_t last_max = fp->last_max_sge;
2974
2975    if (SUB_S16(idx, last_max) > 0) {
2976        fp->last_max_sge = idx;
2977    }
2978}
2979
2980static inline void
2981bxe_update_sge_prod(struct bxe_softc          *sc,
2982                    struct bxe_fastpath       *fp,
2983                    uint16_t                  sge_len,
2984                    union eth_sgl_or_raw_data *cqe)
2985{
2986    uint16_t last_max, last_elem, first_elem;
2987    uint16_t delta = 0;
2988    uint16_t i;
2989
2990    if (!sge_len) {
2991        return;
2992    }
2993
2994    /* first mark all used pages */
2995    for (i = 0; i < sge_len; i++) {
2996        BIT_VEC64_CLEAR_BIT(fp->sge_mask,
2997                            RX_SGE(le16toh(cqe->sgl[i])));
2998    }
2999
3000    BLOGD(sc, DBG_LRO,
3001          "fp[%02d] fp_cqe->sgl[%d] = %d\n",
3002          fp->index, sge_len - 1,
3003          le16toh(cqe->sgl[sge_len - 1]));
3004
3005    /* assume that the last SGE index is the biggest */
3006    bxe_update_last_max_sge(fp,
3007                            le16toh(cqe->sgl[sge_len - 1]));
3008
3009    last_max = RX_SGE(fp->last_max_sge);
3010    last_elem = last_max >> BIT_VEC64_ELEM_SHIFT;
3011    first_elem = RX_SGE(fp->rx_sge_prod) >> BIT_VEC64_ELEM_SHIFT;
3012
3013    /* if ring is not full */
3014    if (last_elem + 1 != first_elem) {
3015        last_elem++;
3016    }
3017
3018    /* now update the prod */
3019    for (i = first_elem; i != last_elem; i = RX_SGE_NEXT_MASK_ELEM(i)) {
3020        if (__predict_true(fp->sge_mask[i])) {
3021            break;
3022        }
3023
3024        fp->sge_mask[i] = BIT_VEC64_ELEM_ONE_MASK;
3025        delta += BIT_VEC64_ELEM_SZ;
3026    }
3027
3028    if (delta > 0) {
3029        fp->rx_sge_prod += delta;
3030        /* clear page-end entries */
3031        bxe_clear_sge_mask_next_elems(fp);
3032    }
3033
3034    BLOGD(sc, DBG_LRO,
3035          "fp[%02d] fp->last_max_sge=%d fp->rx_sge_prod=%d\n",
3036          fp->index, fp->last_max_sge, fp->rx_sge_prod);
3037}
3038
3039/*
3040 * The aggregation on the current TPA queue has completed. Pull the individual
3041 * mbuf fragments together into a single mbuf, perform all necessary checksum
3042 * calculations, and send the resuting mbuf to the stack.
3043 */
3044static void
3045bxe_tpa_stop(struct bxe_softc          *sc,
3046             struct bxe_fastpath       *fp,
3047             struct bxe_sw_tpa_info    *tpa_info,
3048             uint16_t                  queue,
3049             uint16_t                  pages,
3050			 struct eth_end_agg_rx_cqe *cqe,
3051             uint16_t                  cqe_idx)
3052{
3053    if_t ifp = sc->ifp;
3054    struct mbuf *m;
3055    int rc = 0;
3056
3057    BLOGD(sc, DBG_LRO,
3058          "fp[%02d].tpa[%02d] pad=%d pkt_len=%d pages=%d vlan=%d\n",
3059          fp->index, queue, tpa_info->placement_offset,
3060          le16toh(cqe->pkt_len), pages, tpa_info->vlan_tag);
3061
3062    m = tpa_info->bd.m;
3063
3064    /* allocate a replacement before modifying existing mbuf */
3065    rc = bxe_alloc_rx_tpa_mbuf(fp, queue);
3066    if (rc) {
3067        /* drop the frame and log an error */
3068        fp->eth_q_stats.rx_soft_errors++;
3069        goto bxe_tpa_stop_exit;
3070    }
3071
3072    /* we have a replacement, fixup the current mbuf */
3073    m_adj(m, tpa_info->placement_offset);
3074    m->m_pkthdr.len = m->m_len = tpa_info->len_on_bd;
3075
3076    /* mark the checksums valid (taken care of by the firmware) */
3077    fp->eth_q_stats.rx_ofld_frames_csum_ip++;
3078    fp->eth_q_stats.rx_ofld_frames_csum_tcp_udp++;
3079    m->m_pkthdr.csum_data = 0xffff;
3080    m->m_pkthdr.csum_flags |= (CSUM_IP_CHECKED |
3081                               CSUM_IP_VALID   |
3082                               CSUM_DATA_VALID |
3083                               CSUM_PSEUDO_HDR);
3084
3085    /* aggregate all of the SGEs into a single mbuf */
3086    rc = bxe_fill_frag_mbuf(sc, fp, tpa_info, queue, pages, m, cqe, cqe_idx);
3087    if (rc) {
3088        /* drop the packet and log an error */
3089        fp->eth_q_stats.rx_soft_errors++;
3090        m_freem(m);
3091    } else {
3092        if (tpa_info->parsing_flags & PARSING_FLAGS_INNER_VLAN_EXIST) {
3093            m->m_pkthdr.ether_vtag = tpa_info->vlan_tag;
3094            m->m_flags |= M_VLANTAG;
3095        }
3096
3097        /* assign packet to this interface interface */
3098        if_setrcvif(m, ifp);
3099
3100#if __FreeBSD_version >= 800000
3101        /* specify what RSS queue was used for this flow */
3102        m->m_pkthdr.flowid = fp->index;
3103        BXE_SET_FLOWID(m);
3104#endif
3105
3106        if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
3107        fp->eth_q_stats.rx_tpa_pkts++;
3108
3109        /* pass the frame to the stack */
3110        if_input(ifp, m);
3111    }
3112
3113    /* we passed an mbuf up the stack or dropped the frame */
3114    fp->eth_q_stats.mbuf_alloc_tpa--;
3115
3116bxe_tpa_stop_exit:
3117
3118    fp->rx_tpa_info[queue].state = BXE_TPA_STATE_STOP;
3119    fp->rx_tpa_queue_used &= ~(1 << queue);
3120}
3121
3122static uint8_t
3123bxe_service_rxsgl(
3124                 struct bxe_fastpath *fp,
3125                 uint16_t len,
3126                 uint16_t lenonbd,
3127                 struct mbuf *m,
3128                 struct eth_fast_path_rx_cqe *cqe_fp)
3129{
3130    struct mbuf *m_frag;
3131    uint16_t frags, frag_len;
3132    uint16_t sge_idx = 0;
3133    uint16_t j;
3134    uint8_t i, rc = 0;
3135    uint32_t frag_size;
3136
3137    /* adjust the mbuf */
3138    m->m_len = lenonbd;
3139
3140    frag_size =  len - lenonbd;
3141    frags = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
3142
3143    for (i = 0, j = 0; i < frags; i += PAGES_PER_SGE, j++) {
3144        sge_idx = RX_SGE(le16toh(cqe_fp->sgl_or_raw_data.sgl[j]));
3145
3146        m_frag = fp->rx_sge_mbuf_chain[sge_idx].m;
3147        frag_len = min(frag_size, (uint32_t)(SGE_PAGE_SIZE));
3148        m_frag->m_len = frag_len;
3149
3150       /* allocate a new mbuf for the SGE */
3151        rc = bxe_alloc_rx_sge_mbuf(fp, sge_idx);
3152        if (rc) {
3153            /* Leave all remaining SGEs in the ring! */
3154            return (rc);
3155        }
3156        fp->eth_q_stats.mbuf_alloc_sge--;
3157
3158        /* concatenate the fragment to the head mbuf */
3159        m_cat(m, m_frag);
3160
3161        frag_size -= frag_len;
3162    }
3163
3164    bxe_update_sge_prod(fp->sc, fp, frags, &cqe_fp->sgl_or_raw_data);
3165
3166    return rc;
3167}
3168
3169static uint8_t
3170bxe_rxeof(struct bxe_softc    *sc,
3171          struct bxe_fastpath *fp)
3172{
3173    if_t ifp = sc->ifp;
3174    uint16_t bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
3175    uint16_t hw_cq_cons, sw_cq_cons, sw_cq_prod;
3176    int rx_pkts = 0;
3177    int rc = 0;
3178
3179    BXE_FP_RX_LOCK(fp);
3180
3181    /* CQ "next element" is of the size of the regular element */
3182    hw_cq_cons = le16toh(*fp->rx_cq_cons_sb);
3183    if ((hw_cq_cons & RCQ_USABLE_PER_PAGE) == RCQ_USABLE_PER_PAGE) {
3184        hw_cq_cons++;
3185    }
3186
3187    bd_cons = fp->rx_bd_cons;
3188    bd_prod = fp->rx_bd_prod;
3189    bd_prod_fw = bd_prod;
3190    sw_cq_cons = fp->rx_cq_cons;
3191    sw_cq_prod = fp->rx_cq_prod;
3192
3193    /*
3194     * Memory barrier necessary as speculative reads of the rx
3195     * buffer can be ahead of the index in the status block
3196     */
3197    rmb();
3198
3199    BLOGD(sc, DBG_RX,
3200          "fp[%02d] Rx START hw_cq_cons=%u sw_cq_cons=%u\n",
3201          fp->index, hw_cq_cons, sw_cq_cons);
3202
3203    while (sw_cq_cons != hw_cq_cons) {
3204        struct bxe_sw_rx_bd *rx_buf = NULL;
3205        union eth_rx_cqe *cqe;
3206        struct eth_fast_path_rx_cqe *cqe_fp;
3207        uint8_t cqe_fp_flags;
3208        enum eth_rx_cqe_type cqe_fp_type;
3209        uint16_t len, lenonbd,  pad;
3210        struct mbuf *m = NULL;
3211
3212        comp_ring_cons = RCQ(sw_cq_cons);
3213        bd_prod = RX_BD(bd_prod);
3214        bd_cons = RX_BD(bd_cons);
3215
3216        cqe          = &fp->rcq_chain[comp_ring_cons];
3217        cqe_fp       = &cqe->fast_path_cqe;
3218        cqe_fp_flags = cqe_fp->type_error_flags;
3219        cqe_fp_type  = cqe_fp_flags & ETH_FAST_PATH_RX_CQE_TYPE;
3220
3221        BLOGD(sc, DBG_RX,
3222              "fp[%02d] Rx hw_cq_cons=%d hw_sw_cons=%d "
3223              "BD prod=%d cons=%d CQE type=0x%x err=0x%x "
3224              "status=0x%x rss_hash=0x%x vlan=0x%x len=%u lenonbd=%u\n",
3225              fp->index,
3226              hw_cq_cons,
3227              sw_cq_cons,
3228              bd_prod,
3229              bd_cons,
3230              CQE_TYPE(cqe_fp_flags),
3231              cqe_fp_flags,
3232              cqe_fp->status_flags,
3233              le32toh(cqe_fp->rss_hash_result),
3234              le16toh(cqe_fp->vlan_tag),
3235              le16toh(cqe_fp->pkt_len_or_gro_seg_len),
3236              le16toh(cqe_fp->len_on_bd));
3237
3238        /* is this a slowpath msg? */
3239        if (__predict_false(CQE_TYPE_SLOW(cqe_fp_type))) {
3240            bxe_sp_event(sc, fp, cqe);
3241            goto next_cqe;
3242        }
3243
3244        rx_buf = &fp->rx_mbuf_chain[bd_cons];
3245
3246        if (!CQE_TYPE_FAST(cqe_fp_type)) {
3247            struct bxe_sw_tpa_info *tpa_info;
3248            uint16_t frag_size, pages;
3249            uint8_t queue;
3250
3251            if (CQE_TYPE_START(cqe_fp_type)) {
3252                bxe_tpa_start(sc, fp, cqe_fp->queue_index,
3253                              bd_cons, bd_prod, cqe_fp);
3254                m = NULL; /* packet not ready yet */
3255                goto next_rx;
3256            }
3257
3258            KASSERT(CQE_TYPE_STOP(cqe_fp_type),
3259                    ("CQE type is not STOP! (0x%x)\n", cqe_fp_type));
3260
3261            queue = cqe->end_agg_cqe.queue_index;
3262            tpa_info = &fp->rx_tpa_info[queue];
3263
3264            BLOGD(sc, DBG_LRO, "fp[%02d].tpa[%02d] TPA STOP\n",
3265                  fp->index, queue);
3266
3267            frag_size = (le16toh(cqe->end_agg_cqe.pkt_len) -
3268                         tpa_info->len_on_bd);
3269            pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
3270
3271            bxe_tpa_stop(sc, fp, tpa_info, queue, pages,
3272                         &cqe->end_agg_cqe, comp_ring_cons);
3273
3274            bxe_update_sge_prod(sc, fp, pages, &cqe->end_agg_cqe.sgl_or_raw_data);
3275
3276            goto next_cqe;
3277        }
3278
3279        /* non TPA */
3280
3281        /* is this an error packet? */
3282        if (__predict_false(cqe_fp_flags &
3283                            ETH_FAST_PATH_RX_CQE_PHY_DECODE_ERR_FLG)) {
3284            BLOGE(sc, "flags 0x%x rx packet %u\n", cqe_fp_flags, sw_cq_cons);
3285            fp->eth_q_stats.rx_soft_errors++;
3286            goto next_rx;
3287        }
3288
3289        len = le16toh(cqe_fp->pkt_len_or_gro_seg_len);
3290        lenonbd = le16toh(cqe_fp->len_on_bd);
3291        pad = cqe_fp->placement_offset;
3292
3293        m = rx_buf->m;
3294
3295        if (__predict_false(m == NULL)) {
3296            BLOGE(sc, "No mbuf in rx chain descriptor %d for fp[%02d]\n",
3297                  bd_cons, fp->index);
3298            goto next_rx;
3299        }
3300
3301        /* XXX double copy if packet length under a threshold */
3302
3303        /*
3304         * If all the buffer descriptors are filled with mbufs then fill in
3305         * the current consumer index with a new BD. Else if a maximum Rx
3306         * buffer limit is imposed then fill in the next producer index.
3307         */
3308        rc = bxe_alloc_rx_bd_mbuf(fp, bd_cons,
3309                                  (sc->max_rx_bufs != RX_BD_USABLE) ?
3310                                      bd_prod : bd_cons);
3311        if (rc != 0) {
3312
3313            /* we simply reuse the received mbuf and don't post it to the stack */
3314            m = NULL;
3315
3316            BLOGE(sc, "mbuf alloc fail for fp[%02d] rx chain (%d)\n",
3317                  fp->index, rc);
3318            fp->eth_q_stats.rx_soft_errors++;
3319
3320            if (sc->max_rx_bufs != RX_BD_USABLE) {
3321                /* copy this consumer index to the producer index */
3322                memcpy(&fp->rx_mbuf_chain[bd_prod], rx_buf,
3323                       sizeof(struct bxe_sw_rx_bd));
3324                memset(rx_buf, 0, sizeof(struct bxe_sw_rx_bd));
3325            }
3326
3327            goto next_rx;
3328        }
3329
3330        /* current mbuf was detached from the bd */
3331        fp->eth_q_stats.mbuf_alloc_rx--;
3332
3333        /* we allocated a replacement mbuf, fixup the current one */
3334        m_adj(m, pad);
3335        m->m_pkthdr.len = m->m_len = len;
3336
3337        if ((len > 60) && (len > lenonbd)) {
3338            fp->eth_q_stats.rx_bxe_service_rxsgl++;
3339            rc = bxe_service_rxsgl(fp, len, lenonbd, m, cqe_fp);
3340            if (rc)
3341                break;
3342            fp->eth_q_stats.rx_jumbo_sge_pkts++;
3343        } else if (lenonbd < len) {
3344            fp->eth_q_stats.rx_erroneous_jumbo_sge_pkts++;
3345        }
3346
3347        /* assign packet to this interface interface */
3348	if_setrcvif(m, ifp);
3349
3350        /* assume no hardware checksum has complated */
3351        m->m_pkthdr.csum_flags = 0;
3352
3353        /* validate checksum if offload enabled */
3354        if (if_getcapenable(ifp) & IFCAP_RXCSUM) {
3355            /* check for a valid IP frame */
3356            if (!(cqe->fast_path_cqe.status_flags &
3357                  ETH_FAST_PATH_RX_CQE_IP_XSUM_NO_VALIDATION_FLG)) {
3358                m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
3359                if (__predict_false(cqe_fp_flags &
3360                                    ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG)) {
3361                    fp->eth_q_stats.rx_hw_csum_errors++;
3362                } else {
3363                    fp->eth_q_stats.rx_ofld_frames_csum_ip++;
3364                    m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
3365                }
3366            }
3367
3368            /* check for a valid TCP/UDP frame */
3369            if (!(cqe->fast_path_cqe.status_flags &
3370                  ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG)) {
3371                if (__predict_false(cqe_fp_flags &
3372                                    ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG)) {
3373                    fp->eth_q_stats.rx_hw_csum_errors++;
3374                } else {
3375                    fp->eth_q_stats.rx_ofld_frames_csum_tcp_udp++;
3376                    m->m_pkthdr.csum_data = 0xFFFF;
3377                    m->m_pkthdr.csum_flags |= (CSUM_DATA_VALID |
3378                                               CSUM_PSEUDO_HDR);
3379                }
3380            }
3381        }
3382
3383        /* if there is a VLAN tag then flag that info */
3384        if (cqe->fast_path_cqe.pars_flags.flags & PARSING_FLAGS_INNER_VLAN_EXIST) {
3385            m->m_pkthdr.ether_vtag = cqe->fast_path_cqe.vlan_tag;
3386            m->m_flags |= M_VLANTAG;
3387        }
3388
3389#if __FreeBSD_version >= 800000
3390        /* specify what RSS queue was used for this flow */
3391        m->m_pkthdr.flowid = fp->index;
3392        BXE_SET_FLOWID(m);
3393#endif
3394
3395next_rx:
3396
3397        bd_cons    = RX_BD_NEXT(bd_cons);
3398        bd_prod    = RX_BD_NEXT(bd_prod);
3399        bd_prod_fw = RX_BD_NEXT(bd_prod_fw);
3400
3401        /* pass the frame to the stack */
3402        if (__predict_true(m != NULL)) {
3403            if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
3404            rx_pkts++;
3405            if_input(ifp, m);
3406        }
3407
3408next_cqe:
3409
3410        sw_cq_prod = RCQ_NEXT(sw_cq_prod);
3411        sw_cq_cons = RCQ_NEXT(sw_cq_cons);
3412
3413        /* limit spinning on the queue */
3414        if (rc != 0)
3415            break;
3416
3417        if (rx_pkts == sc->rx_budget) {
3418            fp->eth_q_stats.rx_budget_reached++;
3419            break;
3420        }
3421    } /* while work to do */
3422
3423    fp->rx_bd_cons = bd_cons;
3424    fp->rx_bd_prod = bd_prod_fw;
3425    fp->rx_cq_cons = sw_cq_cons;
3426    fp->rx_cq_prod = sw_cq_prod;
3427
3428    /* Update producers */
3429    bxe_update_rx_prod(sc, fp, bd_prod_fw, sw_cq_prod, fp->rx_sge_prod);
3430
3431    fp->eth_q_stats.rx_pkts += rx_pkts;
3432    fp->eth_q_stats.rx_calls++;
3433
3434    BXE_FP_RX_UNLOCK(fp);
3435
3436    return (sw_cq_cons != hw_cq_cons);
3437}
3438
3439static uint16_t
3440bxe_free_tx_pkt(struct bxe_softc    *sc,
3441                struct bxe_fastpath *fp,
3442                uint16_t            idx)
3443{
3444    struct bxe_sw_tx_bd *tx_buf = &fp->tx_mbuf_chain[idx];
3445    struct eth_tx_start_bd *tx_start_bd;
3446    uint16_t bd_idx = TX_BD(tx_buf->first_bd);
3447    uint16_t new_cons;
3448    int nbd;
3449
3450    /* unmap the mbuf from non-paged memory */
3451    bus_dmamap_unload(fp->tx_mbuf_tag, tx_buf->m_map);
3452
3453    tx_start_bd = &fp->tx_chain[bd_idx].start_bd;
3454    nbd = le16toh(tx_start_bd->nbd) - 1;
3455
3456    new_cons = (tx_buf->first_bd + nbd);
3457
3458    /* free the mbuf */
3459    if (__predict_true(tx_buf->m != NULL)) {
3460        m_freem(tx_buf->m);
3461        fp->eth_q_stats.mbuf_alloc_tx--;
3462    } else {
3463        fp->eth_q_stats.tx_chain_lost_mbuf++;
3464    }
3465
3466    tx_buf->m = NULL;
3467    tx_buf->first_bd = 0;
3468
3469    return (new_cons);
3470}
3471
3472/* transmit timeout watchdog */
3473static int
3474bxe_watchdog(struct bxe_softc    *sc,
3475             struct bxe_fastpath *fp)
3476{
3477    BXE_FP_TX_LOCK(fp);
3478
3479    if ((fp->watchdog_timer == 0) || (--fp->watchdog_timer)) {
3480        BXE_FP_TX_UNLOCK(fp);
3481        return (0);
3482    }
3483
3484    BLOGE(sc, "TX watchdog timeout on fp[%02d], resetting!\n", fp->index);
3485    if(sc->trigger_grcdump) {
3486         /* taking grcdump */
3487         bxe_grc_dump(sc);
3488    }
3489
3490    BXE_FP_TX_UNLOCK(fp);
3491
3492    atomic_store_rel_long(&sc->chip_tq_flags, CHIP_TQ_REINIT);
3493    taskqueue_enqueue(sc->chip_tq, &sc->chip_tq_task);
3494
3495    return (-1);
3496}
3497
3498/* processes transmit completions */
3499static uint8_t
3500bxe_txeof(struct bxe_softc    *sc,
3501          struct bxe_fastpath *fp)
3502{
3503    if_t ifp = sc->ifp;
3504    uint16_t bd_cons, hw_cons, sw_cons, pkt_cons;
3505    uint16_t tx_bd_avail;
3506
3507    BXE_FP_TX_LOCK_ASSERT(fp);
3508
3509    bd_cons = fp->tx_bd_cons;
3510    hw_cons = le16toh(*fp->tx_cons_sb);
3511    sw_cons = fp->tx_pkt_cons;
3512
3513    while (sw_cons != hw_cons) {
3514        pkt_cons = TX_BD(sw_cons);
3515
3516        BLOGD(sc, DBG_TX,
3517              "TX: fp[%d]: hw_cons=%u sw_cons=%u pkt_cons=%u\n",
3518              fp->index, hw_cons, sw_cons, pkt_cons);
3519
3520        bd_cons = bxe_free_tx_pkt(sc, fp, pkt_cons);
3521
3522        sw_cons++;
3523    }
3524
3525    fp->tx_pkt_cons = sw_cons;
3526    fp->tx_bd_cons  = bd_cons;
3527
3528    BLOGD(sc, DBG_TX,
3529          "TX done: fp[%d]: hw_cons=%u sw_cons=%u sw_prod=%u\n",
3530          fp->index, hw_cons, fp->tx_pkt_cons, fp->tx_pkt_prod);
3531
3532    mb();
3533
3534    tx_bd_avail = bxe_tx_avail(sc, fp);
3535
3536    if (tx_bd_avail < BXE_TX_CLEANUP_THRESHOLD) {
3537        if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, 0);
3538    } else {
3539        if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE);
3540    }
3541
3542    if (fp->tx_pkt_prod != fp->tx_pkt_cons) {
3543        /* reset the watchdog timer if there are pending transmits */
3544        fp->watchdog_timer = BXE_TX_TIMEOUT;
3545        return (TRUE);
3546    } else {
3547        /* clear watchdog when there are no pending transmits */
3548        fp->watchdog_timer = 0;
3549        return (FALSE);
3550    }
3551}
3552
3553static void
3554bxe_drain_tx_queues(struct bxe_softc *sc)
3555{
3556    struct bxe_fastpath *fp;
3557    int i, count;
3558
3559    /* wait until all TX fastpath tasks have completed */
3560    for (i = 0; i < sc->num_queues; i++) {
3561        fp = &sc->fp[i];
3562
3563        count = 1000;
3564
3565        while (bxe_has_tx_work(fp)) {
3566
3567            BXE_FP_TX_LOCK(fp);
3568            bxe_txeof(sc, fp);
3569            BXE_FP_TX_UNLOCK(fp);
3570
3571            if (count == 0) {
3572                BLOGE(sc, "Timeout waiting for fp[%d] "
3573                          "transmits to complete!\n", i);
3574                bxe_panic(sc, ("tx drain failure\n"));
3575                return;
3576            }
3577
3578            count--;
3579            DELAY(1000);
3580            rmb();
3581        }
3582    }
3583
3584    return;
3585}
3586
3587static int
3588bxe_del_all_macs(struct bxe_softc          *sc,
3589                 struct ecore_vlan_mac_obj *mac_obj,
3590                 int                       mac_type,
3591                 uint8_t                   wait_for_comp)
3592{
3593    unsigned long ramrod_flags = 0, vlan_mac_flags = 0;
3594    int rc;
3595
3596    /* wait for completion of requested */
3597    if (wait_for_comp) {
3598        bxe_set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
3599    }
3600
3601    /* Set the mac type of addresses we want to clear */
3602    bxe_set_bit(mac_type, &vlan_mac_flags);
3603
3604    rc = mac_obj->delete_all(sc, mac_obj, &vlan_mac_flags, &ramrod_flags);
3605    if (rc < 0) {
3606        BLOGE(sc, "Failed to delete MACs (%d) mac_type %d wait_for_comp 0x%x\n",
3607            rc, mac_type, wait_for_comp);
3608    }
3609
3610    return (rc);
3611}
3612
3613static int
3614bxe_fill_accept_flags(struct bxe_softc *sc,
3615                      uint32_t         rx_mode,
3616                      unsigned long    *rx_accept_flags,
3617                      unsigned long    *tx_accept_flags)
3618{
3619    /* Clear the flags first */
3620    *rx_accept_flags = 0;
3621    *tx_accept_flags = 0;
3622
3623    switch (rx_mode) {
3624    case BXE_RX_MODE_NONE:
3625        /*
3626         * 'drop all' supersedes any accept flags that may have been
3627         * passed to the function.
3628         */
3629        break;
3630
3631    case BXE_RX_MODE_NORMAL:
3632        bxe_set_bit(ECORE_ACCEPT_UNICAST, rx_accept_flags);
3633        bxe_set_bit(ECORE_ACCEPT_MULTICAST, rx_accept_flags);
3634        bxe_set_bit(ECORE_ACCEPT_BROADCAST, rx_accept_flags);
3635
3636        /* internal switching mode */
3637        bxe_set_bit(ECORE_ACCEPT_UNICAST, tx_accept_flags);
3638        bxe_set_bit(ECORE_ACCEPT_MULTICAST, tx_accept_flags);
3639        bxe_set_bit(ECORE_ACCEPT_BROADCAST, tx_accept_flags);
3640
3641        break;
3642
3643    case BXE_RX_MODE_ALLMULTI:
3644        bxe_set_bit(ECORE_ACCEPT_UNICAST, rx_accept_flags);
3645        bxe_set_bit(ECORE_ACCEPT_ALL_MULTICAST, rx_accept_flags);
3646        bxe_set_bit(ECORE_ACCEPT_BROADCAST, rx_accept_flags);
3647
3648        /* internal switching mode */
3649        bxe_set_bit(ECORE_ACCEPT_UNICAST, tx_accept_flags);
3650        bxe_set_bit(ECORE_ACCEPT_ALL_MULTICAST, tx_accept_flags);
3651        bxe_set_bit(ECORE_ACCEPT_BROADCAST, tx_accept_flags);
3652
3653        break;
3654
3655    case BXE_RX_MODE_PROMISC:
3656        /*
3657         * According to deffinition of SI mode, iface in promisc mode
3658         * should receive matched and unmatched (in resolution of port)
3659         * unicast packets.
3660         */
3661        bxe_set_bit(ECORE_ACCEPT_UNMATCHED, rx_accept_flags);
3662        bxe_set_bit(ECORE_ACCEPT_UNICAST, rx_accept_flags);
3663        bxe_set_bit(ECORE_ACCEPT_ALL_MULTICAST, rx_accept_flags);
3664        bxe_set_bit(ECORE_ACCEPT_BROADCAST, rx_accept_flags);
3665
3666        /* internal switching mode */
3667        bxe_set_bit(ECORE_ACCEPT_ALL_MULTICAST, tx_accept_flags);
3668        bxe_set_bit(ECORE_ACCEPT_BROADCAST, tx_accept_flags);
3669
3670        if (IS_MF_SI(sc)) {
3671            bxe_set_bit(ECORE_ACCEPT_ALL_UNICAST, tx_accept_flags);
3672        } else {
3673            bxe_set_bit(ECORE_ACCEPT_UNICAST, tx_accept_flags);
3674        }
3675
3676        break;
3677
3678    default:
3679        BLOGE(sc, "Unknown rx_mode (0x%x)\n", rx_mode);
3680        return (-1);
3681    }
3682
3683    /* Set ACCEPT_ANY_VLAN as we do not enable filtering by VLAN */
3684    if (rx_mode != BXE_RX_MODE_NONE) {
3685        bxe_set_bit(ECORE_ACCEPT_ANY_VLAN, rx_accept_flags);
3686        bxe_set_bit(ECORE_ACCEPT_ANY_VLAN, tx_accept_flags);
3687    }
3688
3689    return (0);
3690}
3691
3692static int
3693bxe_set_q_rx_mode(struct bxe_softc *sc,
3694                  uint8_t          cl_id,
3695                  unsigned long    rx_mode_flags,
3696                  unsigned long    rx_accept_flags,
3697                  unsigned long    tx_accept_flags,
3698                  unsigned long    ramrod_flags)
3699{
3700    struct ecore_rx_mode_ramrod_params ramrod_param;
3701    int rc;
3702
3703    memset(&ramrod_param, 0, sizeof(ramrod_param));
3704
3705    /* Prepare ramrod parameters */
3706    ramrod_param.cid = 0;
3707    ramrod_param.cl_id = cl_id;
3708    ramrod_param.rx_mode_obj = &sc->rx_mode_obj;
3709    ramrod_param.func_id = SC_FUNC(sc);
3710
3711    ramrod_param.pstate = &sc->sp_state;
3712    ramrod_param.state = ECORE_FILTER_RX_MODE_PENDING;
3713
3714    ramrod_param.rdata = BXE_SP(sc, rx_mode_rdata);
3715    ramrod_param.rdata_mapping = BXE_SP_MAPPING(sc, rx_mode_rdata);
3716
3717    bxe_set_bit(ECORE_FILTER_RX_MODE_PENDING, &sc->sp_state);
3718
3719    ramrod_param.ramrod_flags = ramrod_flags;
3720    ramrod_param.rx_mode_flags = rx_mode_flags;
3721
3722    ramrod_param.rx_accept_flags = rx_accept_flags;
3723    ramrod_param.tx_accept_flags = tx_accept_flags;
3724
3725    rc = ecore_config_rx_mode(sc, &ramrod_param);
3726    if (rc < 0) {
3727        BLOGE(sc, "Set rx_mode %d cli_id 0x%x rx_mode_flags 0x%x "
3728            "rx_accept_flags 0x%x tx_accept_flags 0x%x "
3729            "ramrod_flags 0x%x rc %d failed\n", sc->rx_mode, cl_id,
3730            (uint32_t)rx_mode_flags, (uint32_t)rx_accept_flags,
3731            (uint32_t)tx_accept_flags, (uint32_t)ramrod_flags, rc);
3732        return (rc);
3733    }
3734
3735    return (0);
3736}
3737
3738static int
3739bxe_set_storm_rx_mode(struct bxe_softc *sc)
3740{
3741    unsigned long rx_mode_flags = 0, ramrod_flags = 0;
3742    unsigned long rx_accept_flags = 0, tx_accept_flags = 0;
3743    int rc;
3744
3745    rc = bxe_fill_accept_flags(sc, sc->rx_mode, &rx_accept_flags,
3746                               &tx_accept_flags);
3747    if (rc) {
3748        return (rc);
3749    }
3750
3751    bxe_set_bit(RAMROD_RX, &ramrod_flags);
3752    bxe_set_bit(RAMROD_TX, &ramrod_flags);
3753
3754    /* XXX ensure all fastpath have same cl_id and/or move it to bxe_softc */
3755    return (bxe_set_q_rx_mode(sc, sc->fp[0].cl_id, rx_mode_flags,
3756                              rx_accept_flags, tx_accept_flags,
3757                              ramrod_flags));
3758}
3759
3760/* returns the "mcp load_code" according to global load_count array */
3761static int
3762bxe_nic_load_no_mcp(struct bxe_softc *sc)
3763{
3764    int path = SC_PATH(sc);
3765    int port = SC_PORT(sc);
3766
3767    BLOGI(sc, "NO MCP - load counts[%d]      %d, %d, %d\n",
3768          path, load_count[path][0], load_count[path][1],
3769          load_count[path][2]);
3770    load_count[path][0]++;
3771    load_count[path][1 + port]++;
3772    BLOGI(sc, "NO MCP - new load counts[%d]  %d, %d, %d\n",
3773          path, load_count[path][0], load_count[path][1],
3774          load_count[path][2]);
3775    if (load_count[path][0] == 1) {
3776        return (FW_MSG_CODE_DRV_LOAD_COMMON);
3777    } else if (load_count[path][1 + port] == 1) {
3778        return (FW_MSG_CODE_DRV_LOAD_PORT);
3779    } else {
3780        return (FW_MSG_CODE_DRV_LOAD_FUNCTION);
3781    }
3782}
3783
3784/* returns the "mcp load_code" according to global load_count array */
3785static int
3786bxe_nic_unload_no_mcp(struct bxe_softc *sc)
3787{
3788    int port = SC_PORT(sc);
3789    int path = SC_PATH(sc);
3790
3791    BLOGI(sc, "NO MCP - load counts[%d]      %d, %d, %d\n",
3792          path, load_count[path][0], load_count[path][1],
3793          load_count[path][2]);
3794    load_count[path][0]--;
3795    load_count[path][1 + port]--;
3796    BLOGI(sc, "NO MCP - new load counts[%d]  %d, %d, %d\n",
3797          path, load_count[path][0], load_count[path][1],
3798          load_count[path][2]);
3799    if (load_count[path][0] == 0) {
3800        return (FW_MSG_CODE_DRV_UNLOAD_COMMON);
3801    } else if (load_count[path][1 + port] == 0) {
3802        return (FW_MSG_CODE_DRV_UNLOAD_PORT);
3803    } else {
3804        return (FW_MSG_CODE_DRV_UNLOAD_FUNCTION);
3805    }
3806}
3807
3808/* request unload mode from the MCP: COMMON, PORT or FUNCTION */
3809static uint32_t
3810bxe_send_unload_req(struct bxe_softc *sc,
3811                    int              unload_mode)
3812{
3813    uint32_t reset_code = 0;
3814
3815    /* Select the UNLOAD request mode */
3816    if (unload_mode == UNLOAD_NORMAL) {
3817        reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
3818    } else {
3819        reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
3820    }
3821
3822    /* Send the request to the MCP */
3823    if (!BXE_NOMCP(sc)) {
3824        reset_code = bxe_fw_command(sc, reset_code, 0);
3825    } else {
3826        reset_code = bxe_nic_unload_no_mcp(sc);
3827    }
3828
3829    return (reset_code);
3830}
3831
3832/* send UNLOAD_DONE command to the MCP */
3833static void
3834bxe_send_unload_done(struct bxe_softc *sc,
3835                     uint8_t          keep_link)
3836{
3837    uint32_t reset_param =
3838        keep_link ? DRV_MSG_CODE_UNLOAD_SKIP_LINK_RESET : 0;
3839
3840    /* Report UNLOAD_DONE to MCP */
3841    if (!BXE_NOMCP(sc)) {
3842        bxe_fw_command(sc, DRV_MSG_CODE_UNLOAD_DONE, reset_param);
3843    }
3844}
3845
3846static int
3847bxe_func_wait_started(struct bxe_softc *sc)
3848{
3849    int tout = 50;
3850
3851    if (!sc->port.pmf) {
3852        return (0);
3853    }
3854
3855    /*
3856     * (assumption: No Attention from MCP at this stage)
3857     * PMF probably in the middle of TX disable/enable transaction
3858     * 1. Sync IRS for default SB
3859     * 2. Sync SP queue - this guarantees us that attention handling started
3860     * 3. Wait, that TX disable/enable transaction completes
3861     *
3862     * 1+2 guarantee that if DCBX attention was scheduled it already changed
3863     * pending bit of transaction from STARTED-->TX_STOPPED, if we already
3864     * received completion for the transaction the state is TX_STOPPED.
3865     * State will return to STARTED after completion of TX_STOPPED-->STARTED
3866     * transaction.
3867     */
3868
3869    /* XXX make sure default SB ISR is done */
3870    /* need a way to synchronize an irq (intr_mtx?) */
3871
3872    /* XXX flush any work queues */
3873
3874    while (ecore_func_get_state(sc, &sc->func_obj) !=
3875           ECORE_F_STATE_STARTED && tout--) {
3876        DELAY(20000);
3877    }
3878
3879    if (ecore_func_get_state(sc, &sc->func_obj) != ECORE_F_STATE_STARTED) {
3880        /*
3881         * Failed to complete the transaction in a "good way"
3882         * Force both transactions with CLR bit.
3883         */
3884        struct ecore_func_state_params func_params = { NULL };
3885
3886        BLOGE(sc, "Unexpected function state! "
3887                  "Forcing STARTED-->TX_STOPPED-->STARTED\n");
3888
3889        func_params.f_obj = &sc->func_obj;
3890        bxe_set_bit(RAMROD_DRV_CLR_ONLY, &func_params.ramrod_flags);
3891
3892        /* STARTED-->TX_STOPPED */
3893        func_params.cmd = ECORE_F_CMD_TX_STOP;
3894        ecore_func_state_change(sc, &func_params);
3895
3896        /* TX_STOPPED-->STARTED */
3897        func_params.cmd = ECORE_F_CMD_TX_START;
3898        return (ecore_func_state_change(sc, &func_params));
3899    }
3900
3901    return (0);
3902}
3903
3904static int
3905bxe_stop_queue(struct bxe_softc *sc,
3906               int              index)
3907{
3908    struct bxe_fastpath *fp = &sc->fp[index];
3909    struct ecore_queue_state_params q_params = { NULL };
3910    int rc;
3911
3912    BLOGD(sc, DBG_LOAD, "stopping queue %d cid %d\n", index, fp->index);
3913
3914    q_params.q_obj = &sc->sp_objs[fp->index].q_obj;
3915    /* We want to wait for completion in this context */
3916    bxe_set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags);
3917
3918    /* Stop the primary connection: */
3919
3920    /* ...halt the connection */
3921    q_params.cmd = ECORE_Q_CMD_HALT;
3922    rc = ecore_queue_state_change(sc, &q_params);
3923    if (rc) {
3924        return (rc);
3925    }
3926
3927    /* ...terminate the connection */
3928    q_params.cmd = ECORE_Q_CMD_TERMINATE;
3929    memset(&q_params.params.terminate, 0, sizeof(q_params.params.terminate));
3930    q_params.params.terminate.cid_index = FIRST_TX_COS_INDEX;
3931    rc = ecore_queue_state_change(sc, &q_params);
3932    if (rc) {
3933        return (rc);
3934    }
3935
3936    /* ...delete cfc entry */
3937    q_params.cmd = ECORE_Q_CMD_CFC_DEL;
3938    memset(&q_params.params.cfc_del, 0, sizeof(q_params.params.cfc_del));
3939    q_params.params.cfc_del.cid_index = FIRST_TX_COS_INDEX;
3940    return (ecore_queue_state_change(sc, &q_params));
3941}
3942
3943/* wait for the outstanding SP commands */
3944static inline uint8_t
3945bxe_wait_sp_comp(struct bxe_softc *sc,
3946                 unsigned long    mask)
3947{
3948    unsigned long tmp;
3949    int tout = 5000; /* wait for 5 secs tops */
3950
3951    while (tout--) {
3952        mb();
3953        if (!(atomic_load_acq_long(&sc->sp_state) & mask)) {
3954            return (TRUE);
3955        }
3956
3957        DELAY(1000);
3958    }
3959
3960    mb();
3961
3962    tmp = atomic_load_acq_long(&sc->sp_state);
3963    if (tmp & mask) {
3964        BLOGE(sc, "Filtering completion timed out: "
3965                  "sp_state 0x%lx, mask 0x%lx\n",
3966              tmp, mask);
3967        return (FALSE);
3968    }
3969
3970    return (FALSE);
3971}
3972
3973static int
3974bxe_func_stop(struct bxe_softc *sc)
3975{
3976    struct ecore_func_state_params func_params = { NULL };
3977    int rc;
3978
3979    /* prepare parameters for function state transitions */
3980    bxe_set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
3981    func_params.f_obj = &sc->func_obj;
3982    func_params.cmd = ECORE_F_CMD_STOP;
3983
3984    /*
3985     * Try to stop the function the 'good way'. If it fails (in case
3986     * of a parity error during bxe_chip_cleanup()) and we are
3987     * not in a debug mode, perform a state transaction in order to
3988     * enable further HW_RESET transaction.
3989     */
3990    rc = ecore_func_state_change(sc, &func_params);
3991    if (rc) {
3992        BLOGE(sc, "FUNC_STOP ramrod failed. "
3993                  "Running a dry transaction (%d)\n", rc);
3994        bxe_set_bit(RAMROD_DRV_CLR_ONLY, &func_params.ramrod_flags);
3995        return (ecore_func_state_change(sc, &func_params));
3996    }
3997
3998    return (0);
3999}
4000
4001static int
4002bxe_reset_hw(struct bxe_softc *sc,
4003             uint32_t         load_code)
4004{
4005    struct ecore_func_state_params func_params = { NULL };
4006
4007    /* Prepare parameters for function state transitions */
4008    bxe_set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
4009
4010    func_params.f_obj = &sc->func_obj;
4011    func_params.cmd = ECORE_F_CMD_HW_RESET;
4012
4013    func_params.params.hw_init.load_phase = load_code;
4014
4015    return (ecore_func_state_change(sc, &func_params));
4016}
4017
4018static void
4019bxe_int_disable_sync(struct bxe_softc *sc,
4020                     int              disable_hw)
4021{
4022    if (disable_hw) {
4023        /* prevent the HW from sending interrupts */
4024        bxe_int_disable(sc);
4025    }
4026
4027    /* XXX need a way to synchronize ALL irqs (intr_mtx?) */
4028    /* make sure all ISRs are done */
4029
4030    /* XXX make sure sp_task is not running */
4031    /* cancel and flush work queues */
4032}
4033
4034static void
4035bxe_chip_cleanup(struct bxe_softc *sc,
4036                 uint32_t         unload_mode,
4037                 uint8_t          keep_link)
4038{
4039    int port = SC_PORT(sc);
4040    struct ecore_mcast_ramrod_params rparam = { NULL };
4041    uint32_t reset_code;
4042    int i, rc = 0;
4043
4044    bxe_drain_tx_queues(sc);
4045
4046    /* give HW time to discard old tx messages */
4047    DELAY(1000);
4048
4049    /* Clean all ETH MACs */
4050    rc = bxe_del_all_macs(sc, &sc->sp_objs[0].mac_obj, ECORE_ETH_MAC, FALSE);
4051    if (rc < 0) {
4052        BLOGE(sc, "Failed to delete all ETH MACs (%d)\n", rc);
4053    }
4054
4055    /* Clean up UC list  */
4056    rc = bxe_del_all_macs(sc, &sc->sp_objs[0].mac_obj, ECORE_UC_LIST_MAC, TRUE);
4057    if (rc < 0) {
4058        BLOGE(sc, "Failed to delete UC MACs list (%d)\n", rc);
4059    }
4060
4061    /* Disable LLH */
4062    if (!CHIP_IS_E1(sc)) {
4063        REG_WR(sc, NIG_REG_LLH0_FUNC_EN + port*8, 0);
4064    }
4065
4066    /* Set "drop all" to stop Rx */
4067
4068    /*
4069     * We need to take the BXE_MCAST_LOCK() here in order to prevent
4070     * a race between the completion code and this code.
4071     */
4072    BXE_MCAST_LOCK(sc);
4073
4074    if (bxe_test_bit(ECORE_FILTER_RX_MODE_PENDING, &sc->sp_state)) {
4075        bxe_set_bit(ECORE_FILTER_RX_MODE_SCHED, &sc->sp_state);
4076    } else {
4077        bxe_set_storm_rx_mode(sc);
4078    }
4079
4080    /* Clean up multicast configuration */
4081    rparam.mcast_obj = &sc->mcast_obj;
4082    rc = ecore_config_mcast(sc, &rparam, ECORE_MCAST_CMD_DEL);
4083    if (rc < 0) {
4084        BLOGE(sc, "Failed to send DEL MCAST command (%d)\n", rc);
4085    }
4086
4087    BXE_MCAST_UNLOCK(sc);
4088
4089    // XXX bxe_iov_chip_cleanup(sc);
4090
4091    /*
4092     * Send the UNLOAD_REQUEST to the MCP. This will return if
4093     * this function should perform FUNCTION, PORT, or COMMON HW
4094     * reset.
4095     */
4096    reset_code = bxe_send_unload_req(sc, unload_mode);
4097
4098    /*
4099     * (assumption: No Attention from MCP at this stage)
4100     * PMF probably in the middle of TX disable/enable transaction
4101     */
4102    rc = bxe_func_wait_started(sc);
4103    if (rc) {
4104        BLOGE(sc, "bxe_func_wait_started failed (%d)\n", rc);
4105    }
4106
4107    /*
4108     * Close multi and leading connections
4109     * Completions for ramrods are collected in a synchronous way
4110     */
4111    for (i = 0; i < sc->num_queues; i++) {
4112        if (bxe_stop_queue(sc, i)) {
4113            goto unload_error;
4114        }
4115    }
4116
4117    /*
4118     * If SP settings didn't get completed so far - something
4119     * very wrong has happen.
4120     */
4121    if (!bxe_wait_sp_comp(sc, ~0x0UL)) {
4122        BLOGE(sc, "Common slow path ramrods got stuck!(%d)\n", rc);
4123    }
4124
4125unload_error:
4126
4127    rc = bxe_func_stop(sc);
4128    if (rc) {
4129        BLOGE(sc, "Function stop failed!(%d)\n", rc);
4130    }
4131
4132    /* disable HW interrupts */
4133    bxe_int_disable_sync(sc, TRUE);
4134
4135    /* detach interrupts */
4136    bxe_interrupt_detach(sc);
4137
4138    /* Reset the chip */
4139    rc = bxe_reset_hw(sc, reset_code);
4140    if (rc) {
4141        BLOGE(sc, "Hardware reset failed(%d)\n", rc);
4142    }
4143
4144    /* Report UNLOAD_DONE to MCP */
4145    bxe_send_unload_done(sc, keep_link);
4146}
4147
4148static void
4149bxe_disable_close_the_gate(struct bxe_softc *sc)
4150{
4151    uint32_t val;
4152    int port = SC_PORT(sc);
4153
4154    BLOGD(sc, DBG_LOAD,
4155          "Disabling 'close the gates'\n");
4156
4157    if (CHIP_IS_E1(sc)) {
4158        uint32_t addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
4159                               MISC_REG_AEU_MASK_ATTN_FUNC_0;
4160        val = REG_RD(sc, addr);
4161        val &= ~(0x300);
4162        REG_WR(sc, addr, val);
4163    } else {
4164        val = REG_RD(sc, MISC_REG_AEU_GENERAL_MASK);
4165        val &= ~(MISC_AEU_GENERAL_MASK_REG_AEU_PXP_CLOSE_MASK |
4166                 MISC_AEU_GENERAL_MASK_REG_AEU_NIG_CLOSE_MASK);
4167        REG_WR(sc, MISC_REG_AEU_GENERAL_MASK, val);
4168    }
4169}
4170
4171/*
4172 * Cleans the object that have internal lists without sending
4173 * ramrods. Should be run when interrutps are disabled.
4174 */
4175static void
4176bxe_squeeze_objects(struct bxe_softc *sc)
4177{
4178    unsigned long ramrod_flags = 0, vlan_mac_flags = 0;
4179    struct ecore_mcast_ramrod_params rparam = { NULL };
4180    struct ecore_vlan_mac_obj *mac_obj = &sc->sp_objs->mac_obj;
4181    int rc;
4182
4183    /* Cleanup MACs' object first... */
4184
4185    /* Wait for completion of requested */
4186    bxe_set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
4187    /* Perform a dry cleanup */
4188    bxe_set_bit(RAMROD_DRV_CLR_ONLY, &ramrod_flags);
4189
4190    /* Clean ETH primary MAC */
4191    bxe_set_bit(ECORE_ETH_MAC, &vlan_mac_flags);
4192    rc = mac_obj->delete_all(sc, &sc->sp_objs->mac_obj, &vlan_mac_flags,
4193                             &ramrod_flags);
4194    if (rc != 0) {
4195        BLOGE(sc, "Failed to clean ETH MACs (%d)\n", rc);
4196    }
4197
4198    /* Cleanup UC list */
4199    vlan_mac_flags = 0;
4200    bxe_set_bit(ECORE_UC_LIST_MAC, &vlan_mac_flags);
4201    rc = mac_obj->delete_all(sc, mac_obj, &vlan_mac_flags,
4202                             &ramrod_flags);
4203    if (rc != 0) {
4204        BLOGE(sc, "Failed to clean UC list MACs (%d)\n", rc);
4205    }
4206
4207    /* Now clean mcast object... */
4208
4209    rparam.mcast_obj = &sc->mcast_obj;
4210    bxe_set_bit(RAMROD_DRV_CLR_ONLY, &rparam.ramrod_flags);
4211
4212    /* Add a DEL command... */
4213    rc = ecore_config_mcast(sc, &rparam, ECORE_MCAST_CMD_DEL);
4214    if (rc < 0) {
4215        BLOGE(sc, "Failed to send DEL MCAST command (%d)\n", rc);
4216    }
4217
4218    /* now wait until all pending commands are cleared */
4219
4220    rc = ecore_config_mcast(sc, &rparam, ECORE_MCAST_CMD_CONT);
4221    while (rc != 0) {
4222        if (rc < 0) {
4223            BLOGE(sc, "Failed to clean MCAST object (%d)\n", rc);
4224            return;
4225        }
4226
4227        rc = ecore_config_mcast(sc, &rparam, ECORE_MCAST_CMD_CONT);
4228    }
4229}
4230
4231/* stop the controller */
4232static __noinline int
4233bxe_nic_unload(struct bxe_softc *sc,
4234               uint32_t         unload_mode,
4235               uint8_t          keep_link)
4236{
4237    uint8_t global = FALSE;
4238    uint32_t val;
4239    int i;
4240
4241    BXE_CORE_LOCK_ASSERT(sc);
4242
4243    if_setdrvflagbits(sc->ifp, 0, IFF_DRV_RUNNING);
4244
4245    for (i = 0; i < sc->num_queues; i++) {
4246        struct bxe_fastpath *fp;
4247
4248        fp = &sc->fp[i];
4249        BXE_FP_TX_LOCK(fp);
4250        BXE_FP_TX_UNLOCK(fp);
4251    }
4252
4253    BLOGD(sc, DBG_LOAD, "Starting NIC unload...\n");
4254
4255    /* mark driver as unloaded in shmem2 */
4256    if (IS_PF(sc) && SHMEM2_HAS(sc, drv_capabilities_flag)) {
4257        val = SHMEM2_RD(sc, drv_capabilities_flag[SC_FW_MB_IDX(sc)]);
4258        SHMEM2_WR(sc, drv_capabilities_flag[SC_FW_MB_IDX(sc)],
4259                  val & ~DRV_FLAGS_CAPABILITIES_LOADED_L2);
4260    }
4261
4262    if (IS_PF(sc) && sc->recovery_state != BXE_RECOVERY_DONE &&
4263        (sc->state == BXE_STATE_CLOSED || sc->state == BXE_STATE_ERROR)) {
4264        /*
4265         * We can get here if the driver has been unloaded
4266         * during parity error recovery and is either waiting for a
4267         * leader to complete or for other functions to unload and
4268         * then ifconfig down has been issued. In this case we want to
4269         * unload and let other functions to complete a recovery
4270         * process.
4271         */
4272        sc->recovery_state = BXE_RECOVERY_DONE;
4273        sc->is_leader = 0;
4274        bxe_release_leader_lock(sc);
4275        mb();
4276
4277        BLOGD(sc, DBG_LOAD, "Releasing a leadership...\n");
4278        BLOGE(sc, "Can't unload in closed or error state recover_state 0x%x"
4279            " state = 0x%x\n", sc->recovery_state, sc->state);
4280        return (-1);
4281    }
4282
4283    /*
4284     * Nothing to do during unload if previous bxe_nic_load()
4285     * did not completed successfully - all resourses are released.
4286     */
4287    if ((sc->state == BXE_STATE_CLOSED) ||
4288        (sc->state == BXE_STATE_ERROR)) {
4289        return (0);
4290    }
4291
4292    sc->state = BXE_STATE_CLOSING_WAITING_HALT;
4293    mb();
4294
4295    /* stop tx */
4296    bxe_tx_disable(sc);
4297
4298    sc->rx_mode = BXE_RX_MODE_NONE;
4299    /* XXX set rx mode ??? */
4300
4301    if (IS_PF(sc) && !sc->grcdump_done) {
4302        /* set ALWAYS_ALIVE bit in shmem */
4303        sc->fw_drv_pulse_wr_seq |= DRV_PULSE_ALWAYS_ALIVE;
4304
4305        bxe_drv_pulse(sc);
4306
4307        bxe_stats_handle(sc, STATS_EVENT_STOP);
4308        bxe_save_statistics(sc);
4309    }
4310
4311    /* wait till consumers catch up with producers in all queues */
4312    bxe_drain_tx_queues(sc);
4313
4314    /* if VF indicate to PF this function is going down (PF will delete sp
4315     * elements and clear initializations
4316     */
4317    if (IS_VF(sc)) {
4318        ; /* bxe_vfpf_close_vf(sc); */
4319    } else if (unload_mode != UNLOAD_RECOVERY) {
4320        /* if this is a normal/close unload need to clean up chip */
4321        if (!sc->grcdump_done)
4322            bxe_chip_cleanup(sc, unload_mode, keep_link);
4323    } else {
4324        /* Send the UNLOAD_REQUEST to the MCP */
4325        bxe_send_unload_req(sc, unload_mode);
4326
4327        /*
4328         * Prevent transactions to host from the functions on the
4329         * engine that doesn't reset global blocks in case of global
4330         * attention once gloabl blocks are reset and gates are opened
4331         * (the engine which leader will perform the recovery
4332         * last).
4333         */
4334        if (!CHIP_IS_E1x(sc)) {
4335            bxe_pf_disable(sc);
4336        }
4337
4338        /* disable HW interrupts */
4339        bxe_int_disable_sync(sc, TRUE);
4340
4341        /* detach interrupts */
4342        bxe_interrupt_detach(sc);
4343
4344        /* Report UNLOAD_DONE to MCP */
4345        bxe_send_unload_done(sc, FALSE);
4346    }
4347
4348    /*
4349     * At this stage no more interrupts will arrive so we may safely clean
4350     * the queue'able objects here in case they failed to get cleaned so far.
4351     */
4352    if (IS_PF(sc)) {
4353        bxe_squeeze_objects(sc);
4354    }
4355
4356    /* There should be no more pending SP commands at this stage */
4357    sc->sp_state = 0;
4358
4359    sc->port.pmf = 0;
4360
4361    bxe_free_fp_buffers(sc);
4362
4363    if (IS_PF(sc)) {
4364        bxe_free_mem(sc);
4365    }
4366
4367    bxe_free_fw_stats_mem(sc);
4368
4369    sc->state = BXE_STATE_CLOSED;
4370
4371    /*
4372     * Check if there are pending parity attentions. If there are - set
4373     * RECOVERY_IN_PROGRESS.
4374     */
4375    if (IS_PF(sc) && bxe_chk_parity_attn(sc, &global, FALSE)) {
4376        bxe_set_reset_in_progress(sc);
4377
4378        /* Set RESET_IS_GLOBAL if needed */
4379        if (global) {
4380            bxe_set_reset_global(sc);
4381        }
4382    }
4383
4384    /*
4385     * The last driver must disable a "close the gate" if there is no
4386     * parity attention or "process kill" pending.
4387     */
4388    if (IS_PF(sc) && !bxe_clear_pf_load(sc) &&
4389        bxe_reset_is_done(sc, SC_PATH(sc))) {
4390        bxe_disable_close_the_gate(sc);
4391    }
4392
4393    BLOGD(sc, DBG_LOAD, "Ended NIC unload\n");
4394
4395    return (0);
4396}
4397
4398/*
4399 * Called by the OS to set various media options (i.e. link, speed, etc.) when
4400 * the user runs "ifconfig bxe media ..." or "ifconfig bxe mediaopt ...".
4401 */
4402static int
4403bxe_ifmedia_update(struct ifnet  *ifp)
4404{
4405    struct bxe_softc *sc = (struct bxe_softc *)if_getsoftc(ifp);
4406    struct ifmedia *ifm;
4407
4408    ifm = &sc->ifmedia;
4409
4410    /* We only support Ethernet media type. */
4411    if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) {
4412        return (EINVAL);
4413    }
4414
4415    switch (IFM_SUBTYPE(ifm->ifm_media)) {
4416    case IFM_AUTO:
4417         break;
4418    case IFM_10G_CX4:
4419    case IFM_10G_SR:
4420    case IFM_10G_T:
4421    case IFM_10G_TWINAX:
4422    default:
4423        /* We don't support changing the media type. */
4424        BLOGD(sc, DBG_LOAD, "Invalid media type (%d)\n",
4425              IFM_SUBTYPE(ifm->ifm_media));
4426        return (EINVAL);
4427    }
4428
4429    return (0);
4430}
4431
4432/*
4433 * Called by the OS to get the current media status (i.e. link, speed, etc.).
4434 */
4435static void
4436bxe_ifmedia_status(struct ifnet *ifp, struct ifmediareq *ifmr)
4437{
4438    struct bxe_softc *sc = if_getsoftc(ifp);
4439
4440    /* Report link down if the driver isn't running. */
4441    if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0) {
4442        ifmr->ifm_active |= IFM_NONE;
4443        return;
4444    }
4445
4446    /* Setup the default interface info. */
4447    ifmr->ifm_status = IFM_AVALID;
4448    ifmr->ifm_active = IFM_ETHER;
4449
4450    if (sc->link_vars.link_up) {
4451        ifmr->ifm_status |= IFM_ACTIVE;
4452    } else {
4453        ifmr->ifm_active |= IFM_NONE;
4454        return;
4455    }
4456
4457    ifmr->ifm_active |= sc->media;
4458
4459    if (sc->link_vars.duplex == DUPLEX_FULL) {
4460        ifmr->ifm_active |= IFM_FDX;
4461    } else {
4462        ifmr->ifm_active |= IFM_HDX;
4463    }
4464}
4465
4466static void
4467bxe_handle_chip_tq(void *context,
4468                   int  pending)
4469{
4470    struct bxe_softc *sc = (struct bxe_softc *)context;
4471    long work = atomic_load_acq_long(&sc->chip_tq_flags);
4472
4473    switch (work)
4474    {
4475
4476    case CHIP_TQ_REINIT:
4477        if (if_getdrvflags(sc->ifp) & IFF_DRV_RUNNING) {
4478            /* restart the interface */
4479            BLOGD(sc, DBG_LOAD, "Restarting the interface...\n");
4480            bxe_periodic_stop(sc);
4481            BXE_CORE_LOCK(sc);
4482            bxe_stop_locked(sc);
4483            bxe_init_locked(sc);
4484            BXE_CORE_UNLOCK(sc);
4485        }
4486        break;
4487
4488    default:
4489        break;
4490    }
4491}
4492
4493/*
4494 * Handles any IOCTL calls from the operating system.
4495 *
4496 * Returns:
4497 *   0 = Success, >0 Failure
4498 */
4499static int
4500bxe_ioctl(if_t ifp,
4501          u_long       command,
4502          caddr_t      data)
4503{
4504    struct bxe_softc *sc = if_getsoftc(ifp);
4505    struct ifreq *ifr = (struct ifreq *)data;
4506    int mask = 0;
4507    int reinit = 0;
4508    int error = 0;
4509
4510    int mtu_min = (ETH_MIN_PACKET_SIZE - ETH_HLEN);
4511    int mtu_max = (MJUM9BYTES - ETH_OVERHEAD - IP_HEADER_ALIGNMENT_PADDING);
4512
4513    switch (command)
4514    {
4515    case SIOCSIFMTU:
4516        BLOGD(sc, DBG_IOCTL, "Received SIOCSIFMTU ioctl (mtu=%d)\n",
4517              ifr->ifr_mtu);
4518
4519        if (sc->mtu == ifr->ifr_mtu) {
4520            /* nothing to change */
4521            break;
4522        }
4523
4524        if ((ifr->ifr_mtu < mtu_min) || (ifr->ifr_mtu > mtu_max)) {
4525            BLOGE(sc, "Unsupported MTU size %d (range is %d-%d)\n",
4526                  ifr->ifr_mtu, mtu_min, mtu_max);
4527            error = EINVAL;
4528            break;
4529        }
4530
4531        atomic_store_rel_int((volatile unsigned int *)&sc->mtu,
4532                             (unsigned long)ifr->ifr_mtu);
4533	/*
4534        atomic_store_rel_long((volatile unsigned long *)&if_getmtu(ifp),
4535                              (unsigned long)ifr->ifr_mtu);
4536	XXX - Not sure why it needs to be atomic
4537	*/
4538	if_setmtu(ifp, ifr->ifr_mtu);
4539        reinit = 1;
4540        break;
4541
4542    case SIOCSIFFLAGS:
4543        /* toggle the interface state up or down */
4544        BLOGD(sc, DBG_IOCTL, "Received SIOCSIFFLAGS ioctl\n");
4545
4546	BXE_CORE_LOCK(sc);
4547        /* check if the interface is up */
4548        if (if_getflags(ifp) & IFF_UP) {
4549            if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
4550                /* set the receive mode flags */
4551                bxe_set_rx_mode(sc);
4552            } else if(sc->state != BXE_STATE_DISABLED) {
4553		bxe_init_locked(sc);
4554            }
4555        } else {
4556            if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
4557		bxe_periodic_stop(sc);
4558		bxe_stop_locked(sc);
4559            }
4560        }
4561	BXE_CORE_UNLOCK(sc);
4562
4563        break;
4564
4565    case SIOCADDMULTI:
4566    case SIOCDELMULTI:
4567        /* add/delete multicast addresses */
4568        BLOGD(sc, DBG_IOCTL, "Received SIOCADDMULTI/SIOCDELMULTI ioctl\n");
4569
4570        /* check if the interface is up */
4571        if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
4572            /* set the receive mode flags */
4573	    BXE_CORE_LOCK(sc);
4574            bxe_set_rx_mode(sc);
4575	    BXE_CORE_UNLOCK(sc);
4576        }
4577
4578        break;
4579
4580    case SIOCSIFCAP:
4581        /* find out which capabilities have changed */
4582        mask = (ifr->ifr_reqcap ^ if_getcapenable(ifp));
4583
4584        BLOGD(sc, DBG_IOCTL, "Received SIOCSIFCAP ioctl (mask=0x%08x)\n",
4585              mask);
4586
4587        /* toggle the LRO capabilites enable flag */
4588        if (mask & IFCAP_LRO) {
4589	    if_togglecapenable(ifp, IFCAP_LRO);
4590            BLOGD(sc, DBG_IOCTL, "Turning LRO %s\n",
4591                  (if_getcapenable(ifp) & IFCAP_LRO) ? "ON" : "OFF");
4592            reinit = 1;
4593        }
4594
4595        /* toggle the TXCSUM checksum capabilites enable flag */
4596        if (mask & IFCAP_TXCSUM) {
4597	    if_togglecapenable(ifp, IFCAP_TXCSUM);
4598            BLOGD(sc, DBG_IOCTL, "Turning TXCSUM %s\n",
4599                  (if_getcapenable(ifp) & IFCAP_TXCSUM) ? "ON" : "OFF");
4600            if (if_getcapenable(ifp) & IFCAP_TXCSUM) {
4601                if_sethwassistbits(ifp, (CSUM_IP      |
4602                                    CSUM_TCP      |
4603                                    CSUM_UDP      |
4604                                    CSUM_TSO      |
4605                                    CSUM_TCP_IPV6 |
4606                                    CSUM_UDP_IPV6), 0);
4607            } else {
4608		if_clearhwassist(ifp); /* XXX */
4609            }
4610        }
4611
4612        /* toggle the RXCSUM checksum capabilities enable flag */
4613        if (mask & IFCAP_RXCSUM) {
4614	    if_togglecapenable(ifp, IFCAP_RXCSUM);
4615            BLOGD(sc, DBG_IOCTL, "Turning RXCSUM %s\n",
4616                  (if_getcapenable(ifp) & IFCAP_RXCSUM) ? "ON" : "OFF");
4617            if (if_getcapenable(ifp) & IFCAP_RXCSUM) {
4618                if_sethwassistbits(ifp, (CSUM_IP      |
4619                                    CSUM_TCP      |
4620                                    CSUM_UDP      |
4621                                    CSUM_TSO      |
4622                                    CSUM_TCP_IPV6 |
4623                                    CSUM_UDP_IPV6), 0);
4624            } else {
4625		if_clearhwassist(ifp); /* XXX */
4626            }
4627        }
4628
4629        /* toggle TSO4 capabilities enabled flag */
4630        if (mask & IFCAP_TSO4) {
4631            if_togglecapenable(ifp, IFCAP_TSO4);
4632            BLOGD(sc, DBG_IOCTL, "Turning TSO4 %s\n",
4633                  (if_getcapenable(ifp) & IFCAP_TSO4) ? "ON" : "OFF");
4634        }
4635
4636        /* toggle TSO6 capabilities enabled flag */
4637        if (mask & IFCAP_TSO6) {
4638	    if_togglecapenable(ifp, IFCAP_TSO6);
4639            BLOGD(sc, DBG_IOCTL, "Turning TSO6 %s\n",
4640                  (if_getcapenable(ifp) & IFCAP_TSO6) ? "ON" : "OFF");
4641        }
4642
4643        /* toggle VLAN_HWTSO capabilities enabled flag */
4644        if (mask & IFCAP_VLAN_HWTSO) {
4645
4646	    if_togglecapenable(ifp, IFCAP_VLAN_HWTSO);
4647            BLOGD(sc, DBG_IOCTL, "Turning VLAN_HWTSO %s\n",
4648                  (if_getcapenable(ifp) & IFCAP_VLAN_HWTSO) ? "ON" : "OFF");
4649        }
4650
4651        /* toggle VLAN_HWCSUM capabilities enabled flag */
4652        if (mask & IFCAP_VLAN_HWCSUM) {
4653            /* XXX investigate this... */
4654            BLOGE(sc, "Changing VLAN_HWCSUM is not supported!\n");
4655            error = EINVAL;
4656        }
4657
4658        /* toggle VLAN_MTU capabilities enable flag */
4659        if (mask & IFCAP_VLAN_MTU) {
4660            /* XXX investigate this... */
4661            BLOGE(sc, "Changing VLAN_MTU is not supported!\n");
4662            error = EINVAL;
4663        }
4664
4665        /* toggle VLAN_HWTAGGING capabilities enabled flag */
4666        if (mask & IFCAP_VLAN_HWTAGGING) {
4667            /* XXX investigate this... */
4668            BLOGE(sc, "Changing VLAN_HWTAGGING is not supported!\n");
4669            error = EINVAL;
4670        }
4671
4672        /* toggle VLAN_HWFILTER capabilities enabled flag */
4673        if (mask & IFCAP_VLAN_HWFILTER) {
4674            /* XXX investigate this... */
4675            BLOGE(sc, "Changing VLAN_HWFILTER is not supported!\n");
4676            error = EINVAL;
4677        }
4678
4679        /* XXX not yet...
4680         * IFCAP_WOL_MAGIC
4681         */
4682
4683        break;
4684
4685    case SIOCSIFMEDIA:
4686    case SIOCGIFMEDIA:
4687        /* set/get interface media */
4688        BLOGD(sc, DBG_IOCTL,
4689              "Received SIOCSIFMEDIA/SIOCGIFMEDIA ioctl (cmd=%lu)\n",
4690              (command & 0xff));
4691        error = ifmedia_ioctl(ifp, ifr, &sc->ifmedia, command);
4692        break;
4693
4694    default:
4695        BLOGD(sc, DBG_IOCTL, "Received Unknown Ioctl (cmd=%lu)\n",
4696              (command & 0xff));
4697        error = ether_ioctl(ifp, command, data);
4698        break;
4699    }
4700
4701    if (reinit && (if_getdrvflags(sc->ifp) & IFF_DRV_RUNNING)) {
4702        BLOGD(sc, DBG_LOAD | DBG_IOCTL,
4703              "Re-initializing hardware from IOCTL change\n");
4704	bxe_periodic_stop(sc);
4705	BXE_CORE_LOCK(sc);
4706	bxe_stop_locked(sc);
4707	bxe_init_locked(sc);
4708	BXE_CORE_UNLOCK(sc);
4709    }
4710
4711    return (error);
4712}
4713
4714static __noinline void
4715bxe_dump_mbuf(struct bxe_softc *sc,
4716              struct mbuf      *m,
4717              uint8_t          contents)
4718{
4719    char * type;
4720    int i = 0;
4721
4722    if (!(sc->debug & DBG_MBUF)) {
4723        return;
4724    }
4725
4726    if (m == NULL) {
4727        BLOGD(sc, DBG_MBUF, "mbuf: null pointer\n");
4728        return;
4729    }
4730
4731    while (m) {
4732
4733#if __FreeBSD_version >= 1000000
4734        BLOGD(sc, DBG_MBUF,
4735              "%02d: mbuf=%p m_len=%d m_flags=0x%b m_data=%p\n",
4736              i, m, m->m_len, m->m_flags, M_FLAG_BITS, m->m_data);
4737
4738        if (m->m_flags & M_PKTHDR) {
4739             BLOGD(sc, DBG_MBUF,
4740                   "%02d: - m_pkthdr: tot_len=%d flags=0x%b csum_flags=%b\n",
4741                   i, m->m_pkthdr.len, m->m_flags, M_FLAG_BITS,
4742                   (int)m->m_pkthdr.csum_flags, CSUM_BITS);
4743        }
4744#else
4745        BLOGD(sc, DBG_MBUF,
4746              "%02d: mbuf=%p m_len=%d m_flags=0x%b m_data=%p\n",
4747              i, m, m->m_len, m->m_flags,
4748              "\20\1M_EXT\2M_PKTHDR\3M_EOR\4M_RDONLY", m->m_data);
4749
4750        if (m->m_flags & M_PKTHDR) {
4751             BLOGD(sc, DBG_MBUF,
4752                   "%02d: - m_pkthdr: tot_len=%d flags=0x%b csum_flags=%b\n",
4753                   i, m->m_pkthdr.len, m->m_flags,
4754                   "\20\12M_BCAST\13M_MCAST\14M_FRAG"
4755                   "\15M_FIRSTFRAG\16M_LASTFRAG\21M_VLANTAG"
4756                   "\22M_PROMISC\23M_NOFREE",
4757                   (int)m->m_pkthdr.csum_flags,
4758                   "\20\1CSUM_IP\2CSUM_TCP\3CSUM_UDP\4CSUM_IP_FRAGS"
4759                   "\5CSUM_FRAGMENT\6CSUM_TSO\11CSUM_IP_CHECKED"
4760                   "\12CSUM_IP_VALID\13CSUM_DATA_VALID"
4761                   "\14CSUM_PSEUDO_HDR");
4762        }
4763#endif /* #if __FreeBSD_version >= 1000000 */
4764
4765        if (m->m_flags & M_EXT) {
4766            switch (m->m_ext.ext_type) {
4767            case EXT_CLUSTER:    type = "EXT_CLUSTER";    break;
4768            case EXT_SFBUF:      type = "EXT_SFBUF";      break;
4769            case EXT_JUMBOP:     type = "EXT_JUMBOP";     break;
4770            case EXT_JUMBO9:     type = "EXT_JUMBO9";     break;
4771            case EXT_JUMBO16:    type = "EXT_JUMBO16";    break;
4772            case EXT_PACKET:     type = "EXT_PACKET";     break;
4773            case EXT_MBUF:       type = "EXT_MBUF";       break;
4774            case EXT_NET_DRV:    type = "EXT_NET_DRV";    break;
4775            case EXT_MOD_TYPE:   type = "EXT_MOD_TYPE";   break;
4776            case EXT_DISPOSABLE: type = "EXT_DISPOSABLE"; break;
4777            case EXT_EXTREF:     type = "EXT_EXTREF";     break;
4778            default:             type = "UNKNOWN";        break;
4779            }
4780
4781            BLOGD(sc, DBG_MBUF,
4782                  "%02d: - m_ext: %p ext_size=%d type=%s\n",
4783                  i, m->m_ext.ext_buf, m->m_ext.ext_size, type);
4784        }
4785
4786        if (contents) {
4787            bxe_dump_mbuf_data(sc, "mbuf data", m, TRUE);
4788        }
4789
4790        m = m->m_next;
4791        i++;
4792    }
4793}
4794
4795/*
4796 * Checks to ensure the 13 bd sliding window is >= MSS for TSO.
4797 * Check that (13 total bds - 3 bds) = 10 bd window >= MSS.
4798 * The window: 3 bds are = 1 for headers BD + 2 for parse BD and last BD
4799 * The headers comes in a separate bd in FreeBSD so 13-3=10.
4800 * Returns: 0 if OK to send, 1 if packet needs further defragmentation
4801 */
4802static int
4803bxe_chktso_window(struct bxe_softc  *sc,
4804                  int               nsegs,
4805                  bus_dma_segment_t *segs,
4806                  struct mbuf       *m)
4807{
4808    uint32_t num_wnds, wnd_size, wnd_sum;
4809    int32_t frag_idx, wnd_idx;
4810    unsigned short lso_mss;
4811    int defrag;
4812
4813    defrag = 0;
4814    wnd_sum = 0;
4815    wnd_size = 10;
4816    num_wnds = nsegs - wnd_size;
4817    lso_mss = htole16(m->m_pkthdr.tso_segsz);
4818
4819    /*
4820     * Total header lengths Eth+IP+TCP in first FreeBSD mbuf so calculate the
4821     * first window sum of data while skipping the first assuming it is the
4822     * header in FreeBSD.
4823     */
4824    for (frag_idx = 1; (frag_idx <= wnd_size); frag_idx++) {
4825        wnd_sum += htole16(segs[frag_idx].ds_len);
4826    }
4827
4828    /* check the first 10 bd window size */
4829    if (wnd_sum < lso_mss) {
4830        return (1);
4831    }
4832
4833    /* run through the windows */
4834    for (wnd_idx = 0; wnd_idx < num_wnds; wnd_idx++, frag_idx++) {
4835        /* subtract the first mbuf->m_len of the last wndw(-header) */
4836        wnd_sum -= htole16(segs[wnd_idx+1].ds_len);
4837        /* add the next mbuf len to the len of our new window */
4838        wnd_sum += htole16(segs[frag_idx].ds_len);
4839        if (wnd_sum < lso_mss) {
4840            return (1);
4841        }
4842    }
4843
4844    return (0);
4845}
4846
4847static uint8_t
4848bxe_set_pbd_csum_e2(struct bxe_fastpath *fp,
4849                    struct mbuf         *m,
4850                    uint32_t            *parsing_data)
4851{
4852    struct ether_vlan_header *eh = NULL;
4853    struct ip *ip4 = NULL;
4854    struct ip6_hdr *ip6 = NULL;
4855    caddr_t ip = NULL;
4856    struct tcphdr *th = NULL;
4857    int e_hlen, ip_hlen, l4_off;
4858    uint16_t proto;
4859
4860    if (m->m_pkthdr.csum_flags == CSUM_IP) {
4861        /* no L4 checksum offload needed */
4862        return (0);
4863    }
4864
4865    /* get the Ethernet header */
4866    eh = mtod(m, struct ether_vlan_header *);
4867
4868    /* handle VLAN encapsulation if present */
4869    if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
4870        e_hlen = (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN);
4871        proto  = ntohs(eh->evl_proto);
4872    } else {
4873        e_hlen = ETHER_HDR_LEN;
4874        proto  = ntohs(eh->evl_encap_proto);
4875    }
4876
4877    switch (proto) {
4878    case ETHERTYPE_IP:
4879        /* get the IP header, if mbuf len < 20 then header in next mbuf */
4880        ip4 = (m->m_len < sizeof(struct ip)) ?
4881                  (struct ip *)m->m_next->m_data :
4882                  (struct ip *)(m->m_data + e_hlen);
4883        /* ip_hl is number of 32-bit words */
4884        ip_hlen = (ip4->ip_hl << 2);
4885        ip = (caddr_t)ip4;
4886        break;
4887    case ETHERTYPE_IPV6:
4888        /* get the IPv6 header, if mbuf len < 40 then header in next mbuf */
4889        ip6 = (m->m_len < sizeof(struct ip6_hdr)) ?
4890                  (struct ip6_hdr *)m->m_next->m_data :
4891                  (struct ip6_hdr *)(m->m_data + e_hlen);
4892        /* XXX cannot support offload with IPv6 extensions */
4893        ip_hlen = sizeof(struct ip6_hdr);
4894        ip = (caddr_t)ip6;
4895        break;
4896    default:
4897        /* We can't offload in this case... */
4898        /* XXX error stat ??? */
4899        return (0);
4900    }
4901
4902    /* XXX assuming L4 header is contiguous to IPv4/IPv6 in the same mbuf */
4903    l4_off = (e_hlen + ip_hlen);
4904
4905    *parsing_data |=
4906        (((l4_off >> 1) << ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W_SHIFT) &
4907         ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W);
4908
4909    if (m->m_pkthdr.csum_flags & (CSUM_TCP |
4910                                  CSUM_TSO |
4911                                  CSUM_TCP_IPV6)) {
4912        fp->eth_q_stats.tx_ofld_frames_csum_tcp++;
4913        th = (struct tcphdr *)(ip + ip_hlen);
4914        /* th_off is number of 32-bit words */
4915        *parsing_data |= ((th->th_off <<
4916                           ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) &
4917                          ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW);
4918        return (l4_off + (th->th_off << 2)); /* entire header length */
4919    } else if (m->m_pkthdr.csum_flags & (CSUM_UDP |
4920                                         CSUM_UDP_IPV6)) {
4921        fp->eth_q_stats.tx_ofld_frames_csum_udp++;
4922        return (l4_off + sizeof(struct udphdr)); /* entire header length */
4923    } else {
4924        /* XXX error stat ??? */
4925        return (0);
4926    }
4927}
4928
4929static uint8_t
4930bxe_set_pbd_csum(struct bxe_fastpath        *fp,
4931                 struct mbuf                *m,
4932                 struct eth_tx_parse_bd_e1x *pbd)
4933{
4934    struct ether_vlan_header *eh = NULL;
4935    struct ip *ip4 = NULL;
4936    struct ip6_hdr *ip6 = NULL;
4937    caddr_t ip = NULL;
4938    struct tcphdr *th = NULL;
4939    struct udphdr *uh = NULL;
4940    int e_hlen, ip_hlen;
4941    uint16_t proto;
4942    uint8_t hlen;
4943    uint16_t tmp_csum;
4944    uint32_t *tmp_uh;
4945
4946    /* get the Ethernet header */
4947    eh = mtod(m, struct ether_vlan_header *);
4948
4949    /* handle VLAN encapsulation if present */
4950    if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
4951        e_hlen = (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN);
4952        proto  = ntohs(eh->evl_proto);
4953    } else {
4954        e_hlen = ETHER_HDR_LEN;
4955        proto  = ntohs(eh->evl_encap_proto);
4956    }
4957
4958    switch (proto) {
4959    case ETHERTYPE_IP:
4960        /* get the IP header, if mbuf len < 20 then header in next mbuf */
4961        ip4 = (m->m_len < sizeof(struct ip)) ?
4962                  (struct ip *)m->m_next->m_data :
4963                  (struct ip *)(m->m_data + e_hlen);
4964        /* ip_hl is number of 32-bit words */
4965        ip_hlen = (ip4->ip_hl << 1);
4966        ip = (caddr_t)ip4;
4967        break;
4968    case ETHERTYPE_IPV6:
4969        /* get the IPv6 header, if mbuf len < 40 then header in next mbuf */
4970        ip6 = (m->m_len < sizeof(struct ip6_hdr)) ?
4971                  (struct ip6_hdr *)m->m_next->m_data :
4972                  (struct ip6_hdr *)(m->m_data + e_hlen);
4973        /* XXX cannot support offload with IPv6 extensions */
4974        ip_hlen = (sizeof(struct ip6_hdr) >> 1);
4975        ip = (caddr_t)ip6;
4976        break;
4977    default:
4978        /* We can't offload in this case... */
4979        /* XXX error stat ??? */
4980        return (0);
4981    }
4982
4983    hlen = (e_hlen >> 1);
4984
4985    /* note that rest of global_data is indirectly zeroed here */
4986    if (m->m_flags & M_VLANTAG) {
4987        pbd->global_data =
4988            htole16(hlen | (1 << ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN_SHIFT));
4989    } else {
4990        pbd->global_data = htole16(hlen);
4991    }
4992
4993    pbd->ip_hlen_w = ip_hlen;
4994
4995    hlen += pbd->ip_hlen_w;
4996
4997    /* XXX assuming L4 header is contiguous to IPv4/IPv6 in the same mbuf */
4998
4999    if (m->m_pkthdr.csum_flags & (CSUM_TCP |
5000                                  CSUM_TSO |
5001                                  CSUM_TCP_IPV6)) {
5002        th = (struct tcphdr *)(ip + (ip_hlen << 1));
5003        /* th_off is number of 32-bit words */
5004        hlen += (uint16_t)(th->th_off << 1);
5005    } else if (m->m_pkthdr.csum_flags & (CSUM_UDP |
5006                                         CSUM_UDP_IPV6)) {
5007        uh = (struct udphdr *)(ip + (ip_hlen << 1));
5008        hlen += (sizeof(struct udphdr) / 2);
5009    } else {
5010        /* valid case as only CSUM_IP was set */
5011        return (0);
5012    }
5013
5014    pbd->total_hlen_w = htole16(hlen);
5015
5016    if (m->m_pkthdr.csum_flags & (CSUM_TCP |
5017                                  CSUM_TSO |
5018                                  CSUM_TCP_IPV6)) {
5019        fp->eth_q_stats.tx_ofld_frames_csum_tcp++;
5020        pbd->tcp_pseudo_csum = ntohs(th->th_sum);
5021    } else if (m->m_pkthdr.csum_flags & (CSUM_UDP |
5022                                         CSUM_UDP_IPV6)) {
5023        fp->eth_q_stats.tx_ofld_frames_csum_udp++;
5024
5025        /*
5026         * Everest1 (i.e. 57710, 57711, 57711E) does not natively support UDP
5027         * checksums and does not know anything about the UDP header and where
5028         * the checksum field is located. It only knows about TCP. Therefore
5029         * we "lie" to the hardware for outgoing UDP packets w/ checksum
5030         * offload. Since the checksum field offset for TCP is 16 bytes and
5031         * for UDP it is 6 bytes we pass a pointer to the hardware that is 10
5032         * bytes less than the start of the UDP header. This allows the
5033         * hardware to write the checksum in the correct spot. But the
5034         * hardware will compute a checksum which includes the last 10 bytes
5035         * of the IP header. To correct this we tweak the stack computed
5036         * pseudo checksum by folding in the calculation of the inverse
5037         * checksum for those final 10 bytes of the IP header. This allows
5038         * the correct checksum to be computed by the hardware.
5039         */
5040
5041        /* set pointer 10 bytes before UDP header */
5042        tmp_uh = (uint32_t *)((uint8_t *)uh - 10);
5043
5044        /* calculate a pseudo header checksum over the first 10 bytes */
5045        tmp_csum = in_pseudo(*tmp_uh,
5046                             *(tmp_uh + 1),
5047                             *(uint16_t *)(tmp_uh + 2));
5048
5049        pbd->tcp_pseudo_csum = ntohs(in_addword(uh->uh_sum, ~tmp_csum));
5050    }
5051
5052    return (hlen * 2); /* entire header length, number of bytes */
5053}
5054
5055static void
5056bxe_set_pbd_lso_e2(struct mbuf *m,
5057                   uint32_t    *parsing_data)
5058{
5059    *parsing_data |= ((m->m_pkthdr.tso_segsz <<
5060                       ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT) &
5061                      ETH_TX_PARSE_BD_E2_LSO_MSS);
5062
5063    /* XXX test for IPv6 with extension header... */
5064}
5065
5066static void
5067bxe_set_pbd_lso(struct mbuf                *m,
5068                struct eth_tx_parse_bd_e1x *pbd)
5069{
5070    struct ether_vlan_header *eh = NULL;
5071    struct ip *ip = NULL;
5072    struct tcphdr *th = NULL;
5073    int e_hlen;
5074
5075    /* get the Ethernet header */
5076    eh = mtod(m, struct ether_vlan_header *);
5077
5078    /* handle VLAN encapsulation if present */
5079    e_hlen = (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) ?
5080                 (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN) : ETHER_HDR_LEN;
5081
5082    /* get the IP and TCP header, with LSO entire header in first mbuf */
5083    /* XXX assuming IPv4 */
5084    ip = (struct ip *)(m->m_data + e_hlen);
5085    th = (struct tcphdr *)((caddr_t)ip + (ip->ip_hl << 2));
5086
5087    pbd->lso_mss = htole16(m->m_pkthdr.tso_segsz);
5088    pbd->tcp_send_seq = ntohl(th->th_seq);
5089    pbd->tcp_flags = ((ntohl(((uint32_t *)th)[3]) >> 16) & 0xff);
5090
5091#if 1
5092        /* XXX IPv4 */
5093        pbd->ip_id = ntohs(ip->ip_id);
5094        pbd->tcp_pseudo_csum =
5095            ntohs(in_pseudo(ip->ip_src.s_addr,
5096                            ip->ip_dst.s_addr,
5097                            htons(IPPROTO_TCP)));
5098#else
5099        /* XXX IPv6 */
5100        pbd->tcp_pseudo_csum =
5101            ntohs(in_pseudo(&ip6->ip6_src,
5102                            &ip6->ip6_dst,
5103                            htons(IPPROTO_TCP)));
5104#endif
5105
5106    pbd->global_data |=
5107        htole16(ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN);
5108}
5109
5110/*
5111 * Encapsulte an mbuf cluster into the tx bd chain and makes the memory
5112 * visible to the controller.
5113 *
5114 * If an mbuf is submitted to this routine and cannot be given to the
5115 * controller (e.g. it has too many fragments) then the function may free
5116 * the mbuf and return to the caller.
5117 *
5118 * Returns:
5119 *   0 = Success, !0 = Failure
5120 *   Note the side effect that an mbuf may be freed if it causes a problem.
5121 */
5122static int
5123bxe_tx_encap(struct bxe_fastpath *fp, struct mbuf **m_head)
5124{
5125    bus_dma_segment_t segs[32];
5126    struct mbuf *m0;
5127    struct bxe_sw_tx_bd *tx_buf;
5128    struct eth_tx_parse_bd_e1x *pbd_e1x = NULL;
5129    struct eth_tx_parse_bd_e2 *pbd_e2 = NULL;
5130    /* struct eth_tx_parse_2nd_bd *pbd2 = NULL; */
5131    struct eth_tx_bd *tx_data_bd;
5132    struct eth_tx_bd *tx_total_pkt_size_bd;
5133    struct eth_tx_start_bd *tx_start_bd;
5134    uint16_t bd_prod, pkt_prod, total_pkt_size;
5135    uint8_t mac_type;
5136    int defragged, error, nsegs, rc, nbds, vlan_off, ovlan;
5137    struct bxe_softc *sc;
5138    uint16_t tx_bd_avail;
5139    struct ether_vlan_header *eh;
5140    uint32_t pbd_e2_parsing_data = 0;
5141    uint8_t hlen = 0;
5142    int tmp_bd;
5143    int i;
5144
5145    sc = fp->sc;
5146
5147#if __FreeBSD_version >= 800000
5148    M_ASSERTPKTHDR(*m_head);
5149#endif /* #if __FreeBSD_version >= 800000 */
5150
5151    m0 = *m_head;
5152    rc = defragged = nbds = ovlan = vlan_off = total_pkt_size = 0;
5153    tx_start_bd = NULL;
5154    tx_data_bd = NULL;
5155    tx_total_pkt_size_bd = NULL;
5156
5157    /* get the H/W pointer for packets and BDs */
5158    pkt_prod = fp->tx_pkt_prod;
5159    bd_prod = fp->tx_bd_prod;
5160
5161    mac_type = UNICAST_ADDRESS;
5162
5163    /* map the mbuf into the next open DMAable memory */
5164    tx_buf = &fp->tx_mbuf_chain[TX_BD(pkt_prod)];
5165    error = bus_dmamap_load_mbuf_sg(fp->tx_mbuf_tag,
5166                                    tx_buf->m_map, m0,
5167                                    segs, &nsegs, BUS_DMA_NOWAIT);
5168
5169    /* mapping errors */
5170    if(__predict_false(error != 0)) {
5171        fp->eth_q_stats.tx_dma_mapping_failure++;
5172        if (error == ENOMEM) {
5173            /* resource issue, try again later */
5174            rc = ENOMEM;
5175        } else if (error == EFBIG) {
5176            /* possibly recoverable with defragmentation */
5177            fp->eth_q_stats.mbuf_defrag_attempts++;
5178            m0 = m_defrag(*m_head, M_NOWAIT);
5179            if (m0 == NULL) {
5180                fp->eth_q_stats.mbuf_defrag_failures++;
5181                rc = ENOBUFS;
5182            } else {
5183                /* defrag successful, try mapping again */
5184                *m_head = m0;
5185                error = bus_dmamap_load_mbuf_sg(fp->tx_mbuf_tag,
5186                                                tx_buf->m_map, m0,
5187                                                segs, &nsegs, BUS_DMA_NOWAIT);
5188                if (error) {
5189                    fp->eth_q_stats.tx_dma_mapping_failure++;
5190                    rc = error;
5191                }
5192            }
5193        } else {
5194            /* unknown, unrecoverable mapping error */
5195            BLOGE(sc, "Unknown TX mapping error rc=%d\n", error);
5196            bxe_dump_mbuf(sc, m0, FALSE);
5197            rc = error;
5198        }
5199
5200        goto bxe_tx_encap_continue;
5201    }
5202
5203    tx_bd_avail = bxe_tx_avail(sc, fp);
5204
5205    /* make sure there is enough room in the send queue */
5206    if (__predict_false(tx_bd_avail < (nsegs + 2))) {
5207        /* Recoverable, try again later. */
5208        fp->eth_q_stats.tx_hw_queue_full++;
5209        bus_dmamap_unload(fp->tx_mbuf_tag, tx_buf->m_map);
5210        rc = ENOMEM;
5211        goto bxe_tx_encap_continue;
5212    }
5213
5214    /* capture the current H/W TX chain high watermark */
5215    if (__predict_false(fp->eth_q_stats.tx_hw_max_queue_depth <
5216                        (TX_BD_USABLE - tx_bd_avail))) {
5217        fp->eth_q_stats.tx_hw_max_queue_depth = (TX_BD_USABLE - tx_bd_avail);
5218    }
5219
5220    /* make sure it fits in the packet window */
5221    if (__predict_false(nsegs > BXE_MAX_SEGMENTS)) {
5222        /*
5223         * The mbuf may be to big for the controller to handle. If the frame
5224         * is a TSO frame we'll need to do an additional check.
5225         */
5226        if (m0->m_pkthdr.csum_flags & CSUM_TSO) {
5227            if (bxe_chktso_window(sc, nsegs, segs, m0) == 0) {
5228                goto bxe_tx_encap_continue; /* OK to send */
5229            } else {
5230                fp->eth_q_stats.tx_window_violation_tso++;
5231            }
5232        } else {
5233            fp->eth_q_stats.tx_window_violation_std++;
5234        }
5235
5236        /* lets try to defragment this mbuf and remap it */
5237        fp->eth_q_stats.mbuf_defrag_attempts++;
5238        bus_dmamap_unload(fp->tx_mbuf_tag, tx_buf->m_map);
5239
5240        m0 = m_defrag(*m_head, M_NOWAIT);
5241        if (m0 == NULL) {
5242            fp->eth_q_stats.mbuf_defrag_failures++;
5243            /* Ugh, just drop the frame... :( */
5244            rc = ENOBUFS;
5245        } else {
5246            /* defrag successful, try mapping again */
5247            *m_head = m0;
5248            error = bus_dmamap_load_mbuf_sg(fp->tx_mbuf_tag,
5249                                            tx_buf->m_map, m0,
5250                                            segs, &nsegs, BUS_DMA_NOWAIT);
5251            if (error) {
5252                fp->eth_q_stats.tx_dma_mapping_failure++;
5253                /* No sense in trying to defrag/copy chain, drop it. :( */
5254                rc = error;
5255            } else {
5256               /* if the chain is still too long then drop it */
5257                if(m0->m_pkthdr.csum_flags & CSUM_TSO) {
5258                    /*
5259                     * in case TSO is enabled nsegs should be checked against
5260                     * BXE_TSO_MAX_SEGMENTS
5261                     */
5262                    if (__predict_false(nsegs > BXE_TSO_MAX_SEGMENTS)) {
5263                        bus_dmamap_unload(fp->tx_mbuf_tag, tx_buf->m_map);
5264                        fp->eth_q_stats.nsegs_path1_errors++;
5265                        rc = ENODEV;
5266                    }
5267                } else {
5268                    if (__predict_false(nsegs > BXE_MAX_SEGMENTS)) {
5269                        bus_dmamap_unload(fp->tx_mbuf_tag, tx_buf->m_map);
5270                        fp->eth_q_stats.nsegs_path2_errors++;
5271                        rc = ENODEV;
5272                    }
5273                }
5274            }
5275        }
5276    }
5277
5278bxe_tx_encap_continue:
5279
5280    /* Check for errors */
5281    if (rc) {
5282        if (rc == ENOMEM) {
5283            /* recoverable try again later  */
5284        } else {
5285            fp->eth_q_stats.tx_soft_errors++;
5286            fp->eth_q_stats.mbuf_alloc_tx--;
5287            m_freem(*m_head);
5288            *m_head = NULL;
5289        }
5290
5291        return (rc);
5292    }
5293
5294    /* set flag according to packet type (UNICAST_ADDRESS is default) */
5295    if (m0->m_flags & M_BCAST) {
5296        mac_type = BROADCAST_ADDRESS;
5297    } else if (m0->m_flags & M_MCAST) {
5298        mac_type = MULTICAST_ADDRESS;
5299    }
5300
5301    /* store the mbuf into the mbuf ring */
5302    tx_buf->m        = m0;
5303    tx_buf->first_bd = fp->tx_bd_prod;
5304    tx_buf->flags    = 0;
5305
5306    /* prepare the first transmit (start) BD for the mbuf */
5307    tx_start_bd = &fp->tx_chain[TX_BD(bd_prod)].start_bd;
5308
5309    BLOGD(sc, DBG_TX,
5310          "sending pkt_prod=%u tx_buf=%p next_idx=%u bd=%u tx_start_bd=%p\n",
5311          pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_start_bd);
5312
5313    tx_start_bd->addr_lo = htole32(U64_LO(segs[0].ds_addr));
5314    tx_start_bd->addr_hi = htole32(U64_HI(segs[0].ds_addr));
5315    tx_start_bd->nbytes  = htole16(segs[0].ds_len);
5316    total_pkt_size += tx_start_bd->nbytes;
5317    tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
5318
5319    tx_start_bd->general_data = (1 << ETH_TX_START_BD_HDR_NBDS_SHIFT);
5320
5321    /* all frames have at least Start BD + Parsing BD */
5322    nbds = nsegs + 1;
5323    tx_start_bd->nbd = htole16(nbds);
5324
5325    if (m0->m_flags & M_VLANTAG) {
5326        tx_start_bd->vlan_or_ethertype = htole16(m0->m_pkthdr.ether_vtag);
5327        tx_start_bd->bd_flags.as_bitfield |=
5328            (X_ETH_OUTBAND_VLAN << ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT);
5329    } else {
5330        /* vf tx, start bd must hold the ethertype for fw to enforce it */
5331        if (IS_VF(sc)) {
5332            /* map ethernet header to find type and header length */
5333            eh = mtod(m0, struct ether_vlan_header *);
5334            tx_start_bd->vlan_or_ethertype = eh->evl_encap_proto;
5335        } else {
5336            /* used by FW for packet accounting */
5337            tx_start_bd->vlan_or_ethertype = htole16(fp->tx_pkt_prod);
5338        }
5339    }
5340
5341    /*
5342     * add a parsing BD from the chain. The parsing BD is always added
5343     * though it is only used for TSO and chksum
5344     */
5345    bd_prod = TX_BD_NEXT(bd_prod);
5346
5347    if (m0->m_pkthdr.csum_flags) {
5348        if (m0->m_pkthdr.csum_flags & CSUM_IP) {
5349            fp->eth_q_stats.tx_ofld_frames_csum_ip++;
5350            tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IP_CSUM;
5351        }
5352
5353        if (m0->m_pkthdr.csum_flags & CSUM_TCP_IPV6) {
5354            tx_start_bd->bd_flags.as_bitfield |= (ETH_TX_BD_FLAGS_IPV6 |
5355                                                  ETH_TX_BD_FLAGS_L4_CSUM);
5356        } else if (m0->m_pkthdr.csum_flags & CSUM_UDP_IPV6) {
5357            tx_start_bd->bd_flags.as_bitfield |= (ETH_TX_BD_FLAGS_IPV6   |
5358                                                  ETH_TX_BD_FLAGS_IS_UDP |
5359                                                  ETH_TX_BD_FLAGS_L4_CSUM);
5360        } else if ((m0->m_pkthdr.csum_flags & CSUM_TCP) ||
5361                   (m0->m_pkthdr.csum_flags & CSUM_TSO)) {
5362            tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
5363        } else if (m0->m_pkthdr.csum_flags & CSUM_UDP) {
5364            tx_start_bd->bd_flags.as_bitfield |= (ETH_TX_BD_FLAGS_L4_CSUM |
5365                                                  ETH_TX_BD_FLAGS_IS_UDP);
5366        }
5367    }
5368
5369    if (!CHIP_IS_E1x(sc)) {
5370        pbd_e2 = &fp->tx_chain[TX_BD(bd_prod)].parse_bd_e2;
5371        memset(pbd_e2, 0, sizeof(struct eth_tx_parse_bd_e2));
5372
5373        if (m0->m_pkthdr.csum_flags) {
5374            hlen = bxe_set_pbd_csum_e2(fp, m0, &pbd_e2_parsing_data);
5375        }
5376
5377        SET_FLAG(pbd_e2_parsing_data, ETH_TX_PARSE_BD_E2_ETH_ADDR_TYPE,
5378                 mac_type);
5379    } else {
5380        uint16_t global_data = 0;
5381
5382        pbd_e1x = &fp->tx_chain[TX_BD(bd_prod)].parse_bd_e1x;
5383        memset(pbd_e1x, 0, sizeof(struct eth_tx_parse_bd_e1x));
5384
5385        if (m0->m_pkthdr.csum_flags) {
5386            hlen = bxe_set_pbd_csum(fp, m0, pbd_e1x);
5387        }
5388
5389        SET_FLAG(global_data,
5390                 ETH_TX_PARSE_BD_E1X_ETH_ADDR_TYPE, mac_type);
5391        pbd_e1x->global_data |= htole16(global_data);
5392    }
5393
5394    /* setup the parsing BD with TSO specific info */
5395    if (m0->m_pkthdr.csum_flags & CSUM_TSO) {
5396        fp->eth_q_stats.tx_ofld_frames_lso++;
5397        tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
5398
5399        if (__predict_false(tx_start_bd->nbytes > hlen)) {
5400            fp->eth_q_stats.tx_ofld_frames_lso_hdr_splits++;
5401
5402            /* split the first BD into header/data making the fw job easy */
5403            nbds++;
5404            tx_start_bd->nbd = htole16(nbds);
5405            tx_start_bd->nbytes = htole16(hlen);
5406
5407            bd_prod = TX_BD_NEXT(bd_prod);
5408
5409            /* new transmit BD after the tx_parse_bd */
5410            tx_data_bd = &fp->tx_chain[TX_BD(bd_prod)].reg_bd;
5411            tx_data_bd->addr_hi = htole32(U64_HI(segs[0].ds_addr + hlen));
5412            tx_data_bd->addr_lo = htole32(U64_LO(segs[0].ds_addr + hlen));
5413            tx_data_bd->nbytes  = htole16(segs[0].ds_len - hlen);
5414            if (tx_total_pkt_size_bd == NULL) {
5415                tx_total_pkt_size_bd = tx_data_bd;
5416            }
5417
5418            BLOGD(sc, DBG_TX,
5419                  "TSO split header size is %d (%x:%x) nbds %d\n",
5420                  le16toh(tx_start_bd->nbytes),
5421                  le32toh(tx_start_bd->addr_hi),
5422                  le32toh(tx_start_bd->addr_lo),
5423                  nbds);
5424        }
5425
5426        if (!CHIP_IS_E1x(sc)) {
5427            bxe_set_pbd_lso_e2(m0, &pbd_e2_parsing_data);
5428        } else {
5429            bxe_set_pbd_lso(m0, pbd_e1x);
5430        }
5431    }
5432
5433    if (pbd_e2_parsing_data) {
5434        pbd_e2->parsing_data = htole32(pbd_e2_parsing_data);
5435    }
5436
5437    /* prepare remaining BDs, start tx bd contains first seg/frag */
5438    for (i = 1; i < nsegs ; i++) {
5439        bd_prod = TX_BD_NEXT(bd_prod);
5440        tx_data_bd = &fp->tx_chain[TX_BD(bd_prod)].reg_bd;
5441        tx_data_bd->addr_lo = htole32(U64_LO(segs[i].ds_addr));
5442        tx_data_bd->addr_hi = htole32(U64_HI(segs[i].ds_addr));
5443        tx_data_bd->nbytes  = htole16(segs[i].ds_len);
5444        if (tx_total_pkt_size_bd == NULL) {
5445            tx_total_pkt_size_bd = tx_data_bd;
5446        }
5447        total_pkt_size += tx_data_bd->nbytes;
5448    }
5449
5450    BLOGD(sc, DBG_TX, "last bd %p\n", tx_data_bd);
5451
5452    if (tx_total_pkt_size_bd != NULL) {
5453        tx_total_pkt_size_bd->total_pkt_bytes = total_pkt_size;
5454    }
5455
5456    if (__predict_false(sc->debug & DBG_TX)) {
5457        tmp_bd = tx_buf->first_bd;
5458        for (i = 0; i < nbds; i++)
5459        {
5460            if (i == 0) {
5461                BLOGD(sc, DBG_TX,
5462                      "TX Strt: %p bd=%d nbd=%d vlan=0x%x "
5463                      "bd_flags=0x%x hdr_nbds=%d\n",
5464                      tx_start_bd,
5465                      tmp_bd,
5466                      le16toh(tx_start_bd->nbd),
5467                      le16toh(tx_start_bd->vlan_or_ethertype),
5468                      tx_start_bd->bd_flags.as_bitfield,
5469                      (tx_start_bd->general_data & ETH_TX_START_BD_HDR_NBDS));
5470            } else if (i == 1) {
5471                if (pbd_e1x) {
5472                    BLOGD(sc, DBG_TX,
5473                          "-> Prse: %p bd=%d global=0x%x ip_hlen_w=%u "
5474                          "ip_id=%u lso_mss=%u tcp_flags=0x%x csum=0x%x "
5475                          "tcp_seq=%u total_hlen_w=%u\n",
5476                          pbd_e1x,
5477                          tmp_bd,
5478                          pbd_e1x->global_data,
5479                          pbd_e1x->ip_hlen_w,
5480                          pbd_e1x->ip_id,
5481                          pbd_e1x->lso_mss,
5482                          pbd_e1x->tcp_flags,
5483                          pbd_e1x->tcp_pseudo_csum,
5484                          pbd_e1x->tcp_send_seq,
5485                          le16toh(pbd_e1x->total_hlen_w));
5486                } else { /* if (pbd_e2) */
5487                    BLOGD(sc, DBG_TX,
5488                          "-> Parse: %p bd=%d dst=%02x:%02x:%02x "
5489                          "src=%02x:%02x:%02x parsing_data=0x%x\n",
5490                          pbd_e2,
5491                          tmp_bd,
5492                          pbd_e2->data.mac_addr.dst_hi,
5493                          pbd_e2->data.mac_addr.dst_mid,
5494                          pbd_e2->data.mac_addr.dst_lo,
5495                          pbd_e2->data.mac_addr.src_hi,
5496                          pbd_e2->data.mac_addr.src_mid,
5497                          pbd_e2->data.mac_addr.src_lo,
5498                          pbd_e2->parsing_data);
5499                }
5500            }
5501
5502            if (i != 1) { /* skip parse db as it doesn't hold data */
5503                tx_data_bd = &fp->tx_chain[TX_BD(tmp_bd)].reg_bd;
5504                BLOGD(sc, DBG_TX,
5505                      "-> Frag: %p bd=%d nbytes=%d hi=0x%x lo: 0x%x\n",
5506                      tx_data_bd,
5507                      tmp_bd,
5508                      le16toh(tx_data_bd->nbytes),
5509                      le32toh(tx_data_bd->addr_hi),
5510                      le32toh(tx_data_bd->addr_lo));
5511            }
5512
5513            tmp_bd = TX_BD_NEXT(tmp_bd);
5514        }
5515    }
5516
5517    BLOGD(sc, DBG_TX, "doorbell: nbds=%d bd=%u\n", nbds, bd_prod);
5518
5519    /* update TX BD producer index value for next TX */
5520    bd_prod = TX_BD_NEXT(bd_prod);
5521
5522    /*
5523     * If the chain of tx_bd's describing this frame is adjacent to or spans
5524     * an eth_tx_next_bd element then we need to increment the nbds value.
5525     */
5526    if (TX_BD_IDX(bd_prod) < nbds) {
5527        nbds++;
5528    }
5529
5530    /* don't allow reordering of writes for nbd and packets */
5531    mb();
5532
5533    fp->tx_db.data.prod += nbds;
5534
5535    /* producer points to the next free tx_bd at this point */
5536    fp->tx_pkt_prod++;
5537    fp->tx_bd_prod = bd_prod;
5538
5539    DOORBELL(sc, fp->index, fp->tx_db.raw);
5540
5541    fp->eth_q_stats.tx_pkts++;
5542
5543    /* Prevent speculative reads from getting ahead of the status block. */
5544    bus_space_barrier(sc->bar[BAR0].tag, sc->bar[BAR0].handle,
5545                      0, 0, BUS_SPACE_BARRIER_READ);
5546
5547    /* Prevent speculative reads from getting ahead of the doorbell. */
5548    bus_space_barrier(sc->bar[BAR2].tag, sc->bar[BAR2].handle,
5549                      0, 0, BUS_SPACE_BARRIER_READ);
5550
5551    return (0);
5552}
5553
5554static void
5555bxe_tx_start_locked(struct bxe_softc *sc,
5556                    if_t ifp,
5557                    struct bxe_fastpath *fp)
5558{
5559    struct mbuf *m = NULL;
5560    int tx_count = 0;
5561    uint16_t tx_bd_avail;
5562
5563    BXE_FP_TX_LOCK_ASSERT(fp);
5564
5565    /* keep adding entries while there are frames to send */
5566    while (!if_sendq_empty(ifp)) {
5567
5568        /*
5569         * check for any frames to send
5570         * dequeue can still be NULL even if queue is not empty
5571         */
5572        m = if_dequeue(ifp);
5573        if (__predict_false(m == NULL)) {
5574            break;
5575        }
5576
5577        /* the mbuf now belongs to us */
5578        fp->eth_q_stats.mbuf_alloc_tx++;
5579
5580        /*
5581         * Put the frame into the transmit ring. If we don't have room,
5582         * place the mbuf back at the head of the TX queue, set the
5583         * OACTIVE flag, and wait for the NIC to drain the chain.
5584         */
5585        if (__predict_false(bxe_tx_encap(fp, &m))) {
5586            fp->eth_q_stats.tx_encap_failures++;
5587            if (m != NULL) {
5588                /* mark the TX queue as full and return the frame */
5589                if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, 0);
5590		if_sendq_prepend(ifp, m);
5591                fp->eth_q_stats.mbuf_alloc_tx--;
5592                fp->eth_q_stats.tx_queue_xoff++;
5593            }
5594
5595            /* stop looking for more work */
5596            break;
5597        }
5598
5599        /* the frame was enqueued successfully */
5600        tx_count++;
5601
5602        /* send a copy of the frame to any BPF listeners. */
5603        if_etherbpfmtap(ifp, m);
5604
5605        tx_bd_avail = bxe_tx_avail(sc, fp);
5606
5607        /* handle any completions if we're running low */
5608        if (tx_bd_avail < BXE_TX_CLEANUP_THRESHOLD) {
5609            /* bxe_txeof will set IFF_DRV_OACTIVE appropriately */
5610            bxe_txeof(sc, fp);
5611            if (if_getdrvflags(ifp) & IFF_DRV_OACTIVE) {
5612                break;
5613            }
5614        }
5615    }
5616
5617    /* all TX packets were dequeued and/or the tx ring is full */
5618    if (tx_count > 0) {
5619        /* reset the TX watchdog timeout timer */
5620        fp->watchdog_timer = BXE_TX_TIMEOUT;
5621    }
5622}
5623
5624/* Legacy (non-RSS) dispatch routine */
5625static void
5626bxe_tx_start(if_t ifp)
5627{
5628    struct bxe_softc *sc;
5629    struct bxe_fastpath *fp;
5630
5631    sc = if_getsoftc(ifp);
5632
5633    if (!(if_getdrvflags(ifp) & IFF_DRV_RUNNING)) {
5634        BLOGW(sc, "Interface not running, ignoring transmit request\n");
5635        return;
5636    }
5637
5638    if (!sc->link_vars.link_up) {
5639        BLOGW(sc, "Interface link is down, ignoring transmit request\n");
5640        return;
5641    }
5642
5643    fp = &sc->fp[0];
5644
5645    if (ifp->if_drv_flags & IFF_DRV_OACTIVE) {
5646        fp->eth_q_stats.tx_queue_full_return++;
5647        return;
5648    }
5649
5650    BXE_FP_TX_LOCK(fp);
5651    bxe_tx_start_locked(sc, ifp, fp);
5652    BXE_FP_TX_UNLOCK(fp);
5653}
5654
5655#if __FreeBSD_version >= 901504
5656
5657static int
5658bxe_tx_mq_start_locked(struct bxe_softc    *sc,
5659                       if_t                ifp,
5660                       struct bxe_fastpath *fp,
5661                       struct mbuf         *m)
5662{
5663    struct buf_ring *tx_br = fp->tx_br;
5664    struct mbuf *next;
5665    int depth, rc, tx_count;
5666    uint16_t tx_bd_avail;
5667
5668    rc = tx_count = 0;
5669
5670    BXE_FP_TX_LOCK_ASSERT(fp);
5671
5672    if (sc->state != BXE_STATE_OPEN)  {
5673        fp->eth_q_stats.bxe_tx_mq_sc_state_failures++;
5674        return ENETDOWN;
5675    }
5676
5677    if (!tx_br) {
5678        BLOGE(sc, "Multiqueue TX and no buf_ring!\n");
5679        return (EINVAL);
5680    }
5681
5682    if (m != NULL) {
5683        rc = drbr_enqueue(ifp, tx_br, m);
5684        if (rc != 0) {
5685            fp->eth_q_stats.tx_soft_errors++;
5686            goto bxe_tx_mq_start_locked_exit;
5687        }
5688    }
5689
5690    if (!sc->link_vars.link_up || !(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
5691        fp->eth_q_stats.tx_request_link_down_failures++;
5692        goto bxe_tx_mq_start_locked_exit;
5693    }
5694
5695    /* fetch the depth of the driver queue */
5696    depth = drbr_inuse_drv(ifp, tx_br);
5697    if (depth > fp->eth_q_stats.tx_max_drbr_queue_depth) {
5698        fp->eth_q_stats.tx_max_drbr_queue_depth = depth;
5699    }
5700
5701    /* keep adding entries while there are frames to send */
5702    while ((next = drbr_peek(ifp, tx_br)) != NULL) {
5703        /* handle any completions if we're running low */
5704        tx_bd_avail = bxe_tx_avail(sc, fp);
5705        if (tx_bd_avail < BXE_TX_CLEANUP_THRESHOLD) {
5706            /* bxe_txeof will set IFF_DRV_OACTIVE appropriately */
5707            bxe_txeof(sc, fp);
5708            tx_bd_avail = bxe_tx_avail(sc, fp);
5709            if (tx_bd_avail < (BXE_TSO_MAX_SEGMENTS + 1)) {
5710                fp->eth_q_stats.bd_avail_too_less_failures++;
5711                m_freem(next);
5712                drbr_advance(ifp, tx_br);
5713                rc = ENOBUFS;
5714                break;
5715            }
5716        }
5717
5718        /* the mbuf now belongs to us */
5719        fp->eth_q_stats.mbuf_alloc_tx++;
5720
5721        /*
5722         * Put the frame into the transmit ring. If we don't have room,
5723         * place the mbuf back at the head of the TX queue, set the
5724         * OACTIVE flag, and wait for the NIC to drain the chain.
5725         */
5726        rc = bxe_tx_encap(fp, &next);
5727        if (__predict_false(rc != 0)) {
5728            fp->eth_q_stats.tx_encap_failures++;
5729            if (next != NULL) {
5730                /* mark the TX queue as full and save the frame */
5731                if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, 0);
5732                drbr_putback(ifp, tx_br, next);
5733                fp->eth_q_stats.mbuf_alloc_tx--;
5734                fp->eth_q_stats.tx_frames_deferred++;
5735            } else
5736                drbr_advance(ifp, tx_br);
5737
5738            /* stop looking for more work */
5739            break;
5740        }
5741
5742        /* the transmit frame was enqueued successfully */
5743        tx_count++;
5744
5745        /* send a copy of the frame to any BPF listeners */
5746	if_etherbpfmtap(ifp, next);
5747
5748        drbr_advance(ifp, tx_br);
5749    }
5750
5751    /* all TX packets were dequeued and/or the tx ring is full */
5752    if (tx_count > 0) {
5753        /* reset the TX watchdog timeout timer */
5754        fp->watchdog_timer = BXE_TX_TIMEOUT;
5755    }
5756
5757bxe_tx_mq_start_locked_exit:
5758    /* If we didn't drain the drbr, enqueue a task in the future to do it. */
5759    if (!drbr_empty(ifp, tx_br)) {
5760        fp->eth_q_stats.tx_mq_not_empty++;
5761        taskqueue_enqueue_timeout(fp->tq, &fp->tx_timeout_task, 1);
5762    }
5763
5764    return (rc);
5765}
5766
5767static void
5768bxe_tx_mq_start_deferred(void *arg,
5769                         int pending)
5770{
5771    struct bxe_fastpath *fp = (struct bxe_fastpath *)arg;
5772    struct bxe_softc *sc = fp->sc;
5773    if_t ifp = sc->ifp;
5774
5775    BXE_FP_TX_LOCK(fp);
5776    bxe_tx_mq_start_locked(sc, ifp, fp, NULL);
5777    BXE_FP_TX_UNLOCK(fp);
5778}
5779
5780/* Multiqueue (TSS) dispatch routine. */
5781static int
5782bxe_tx_mq_start(struct ifnet *ifp,
5783                struct mbuf  *m)
5784{
5785    struct bxe_softc *sc = if_getsoftc(ifp);
5786    struct bxe_fastpath *fp;
5787    int fp_index, rc;
5788
5789    fp_index = 0; /* default is the first queue */
5790
5791    /* check if flowid is set */
5792
5793    if (BXE_VALID_FLOWID(m))
5794        fp_index = (m->m_pkthdr.flowid % sc->num_queues);
5795
5796    fp = &sc->fp[fp_index];
5797
5798    if (sc->state != BXE_STATE_OPEN)  {
5799        fp->eth_q_stats.bxe_tx_mq_sc_state_failures++;
5800        return ENETDOWN;
5801    }
5802
5803    if (BXE_FP_TX_TRYLOCK(fp)) {
5804        rc = bxe_tx_mq_start_locked(sc, ifp, fp, m);
5805        BXE_FP_TX_UNLOCK(fp);
5806    } else {
5807        rc = drbr_enqueue(ifp, fp->tx_br, m);
5808        taskqueue_enqueue(fp->tq, &fp->tx_task);
5809    }
5810
5811    return (rc);
5812}
5813
5814static void
5815bxe_mq_flush(struct ifnet *ifp)
5816{
5817    struct bxe_softc *sc = if_getsoftc(ifp);
5818    struct bxe_fastpath *fp;
5819    struct mbuf *m;
5820    int i;
5821
5822    for (i = 0; i < sc->num_queues; i++) {
5823        fp = &sc->fp[i];
5824
5825        if (fp->state != BXE_FP_STATE_IRQ) {
5826            BLOGD(sc, DBG_LOAD, "Not clearing fp[%02d] buf_ring (state=%d)\n",
5827                  fp->index, fp->state);
5828            continue;
5829        }
5830
5831        if (fp->tx_br != NULL) {
5832            BLOGD(sc, DBG_LOAD, "Clearing fp[%02d] buf_ring\n", fp->index);
5833            BXE_FP_TX_LOCK(fp);
5834            while ((m = buf_ring_dequeue_sc(fp->tx_br)) != NULL) {
5835                m_freem(m);
5836            }
5837            BXE_FP_TX_UNLOCK(fp);
5838        }
5839    }
5840
5841    if_qflush(ifp);
5842}
5843
5844#endif /* FreeBSD_version >= 901504 */
5845
5846static uint16_t
5847bxe_cid_ilt_lines(struct bxe_softc *sc)
5848{
5849    if (IS_SRIOV(sc)) {
5850        return ((BXE_FIRST_VF_CID + BXE_VF_CIDS) / ILT_PAGE_CIDS);
5851    }
5852    return (L2_ILT_LINES(sc));
5853}
5854
5855static void
5856bxe_ilt_set_info(struct bxe_softc *sc)
5857{
5858    struct ilt_client_info *ilt_client;
5859    struct ecore_ilt *ilt = sc->ilt;
5860    uint16_t line = 0;
5861
5862    ilt->start_line = FUNC_ILT_BASE(SC_FUNC(sc));
5863    BLOGD(sc, DBG_LOAD, "ilt starts at line %d\n", ilt->start_line);
5864
5865    /* CDU */
5866    ilt_client = &ilt->clients[ILT_CLIENT_CDU];
5867    ilt_client->client_num = ILT_CLIENT_CDU;
5868    ilt_client->page_size = CDU_ILT_PAGE_SZ;
5869    ilt_client->flags = ILT_CLIENT_SKIP_MEM;
5870    ilt_client->start = line;
5871    line += bxe_cid_ilt_lines(sc);
5872
5873    if (CNIC_SUPPORT(sc)) {
5874        line += CNIC_ILT_LINES;
5875    }
5876
5877    ilt_client->end = (line - 1);
5878
5879    BLOGD(sc, DBG_LOAD,
5880          "ilt client[CDU]: start %d, end %d, "
5881          "psz 0x%x, flags 0x%x, hw psz %d\n",
5882          ilt_client->start, ilt_client->end,
5883          ilt_client->page_size,
5884          ilt_client->flags,
5885          ilog2(ilt_client->page_size >> 12));
5886
5887    /* QM */
5888    if (QM_INIT(sc->qm_cid_count)) {
5889        ilt_client = &ilt->clients[ILT_CLIENT_QM];
5890        ilt_client->client_num = ILT_CLIENT_QM;
5891        ilt_client->page_size = QM_ILT_PAGE_SZ;
5892        ilt_client->flags = 0;
5893        ilt_client->start = line;
5894
5895        /* 4 bytes for each cid */
5896        line += DIV_ROUND_UP(sc->qm_cid_count * QM_QUEUES_PER_FUNC * 4,
5897                             QM_ILT_PAGE_SZ);
5898
5899        ilt_client->end = (line - 1);
5900
5901        BLOGD(sc, DBG_LOAD,
5902              "ilt client[QM]: start %d, end %d, "
5903              "psz 0x%x, flags 0x%x, hw psz %d\n",
5904              ilt_client->start, ilt_client->end,
5905              ilt_client->page_size, ilt_client->flags,
5906              ilog2(ilt_client->page_size >> 12));
5907    }
5908
5909    if (CNIC_SUPPORT(sc)) {
5910        /* SRC */
5911        ilt_client = &ilt->clients[ILT_CLIENT_SRC];
5912        ilt_client->client_num = ILT_CLIENT_SRC;
5913        ilt_client->page_size = SRC_ILT_PAGE_SZ;
5914        ilt_client->flags = 0;
5915        ilt_client->start = line;
5916        line += SRC_ILT_LINES;
5917        ilt_client->end = (line - 1);
5918
5919        BLOGD(sc, DBG_LOAD,
5920              "ilt client[SRC]: start %d, end %d, "
5921              "psz 0x%x, flags 0x%x, hw psz %d\n",
5922              ilt_client->start, ilt_client->end,
5923              ilt_client->page_size, ilt_client->flags,
5924              ilog2(ilt_client->page_size >> 12));
5925
5926        /* TM */
5927        ilt_client = &ilt->clients[ILT_CLIENT_TM];
5928        ilt_client->client_num = ILT_CLIENT_TM;
5929        ilt_client->page_size = TM_ILT_PAGE_SZ;
5930        ilt_client->flags = 0;
5931        ilt_client->start = line;
5932        line += TM_ILT_LINES;
5933        ilt_client->end = (line - 1);
5934
5935        BLOGD(sc, DBG_LOAD,
5936              "ilt client[TM]: start %d, end %d, "
5937              "psz 0x%x, flags 0x%x, hw psz %d\n",
5938              ilt_client->start, ilt_client->end,
5939              ilt_client->page_size, ilt_client->flags,
5940              ilog2(ilt_client->page_size >> 12));
5941    }
5942
5943    KASSERT((line <= ILT_MAX_LINES), ("Invalid number of ILT lines!"));
5944}
5945
5946static void
5947bxe_set_fp_rx_buf_size(struct bxe_softc *sc)
5948{
5949    int i;
5950    uint32_t rx_buf_size;
5951
5952    rx_buf_size = (IP_HEADER_ALIGNMENT_PADDING + ETH_OVERHEAD + sc->mtu);
5953
5954    for (i = 0; i < sc->num_queues; i++) {
5955        if(rx_buf_size <= MCLBYTES){
5956            sc->fp[i].rx_buf_size = rx_buf_size;
5957            sc->fp[i].mbuf_alloc_size = MCLBYTES;
5958        }else if (rx_buf_size <= MJUMPAGESIZE){
5959            sc->fp[i].rx_buf_size = rx_buf_size;
5960            sc->fp[i].mbuf_alloc_size = MJUMPAGESIZE;
5961        }else if (rx_buf_size <= (MJUMPAGESIZE + MCLBYTES)){
5962            sc->fp[i].rx_buf_size = MCLBYTES;
5963            sc->fp[i].mbuf_alloc_size = MCLBYTES;
5964        }else if (rx_buf_size <= (2 * MJUMPAGESIZE)){
5965            sc->fp[i].rx_buf_size = MJUMPAGESIZE;
5966            sc->fp[i].mbuf_alloc_size = MJUMPAGESIZE;
5967        }else {
5968            sc->fp[i].rx_buf_size = MCLBYTES;
5969            sc->fp[i].mbuf_alloc_size = MCLBYTES;
5970        }
5971    }
5972}
5973
5974static int
5975bxe_alloc_ilt_mem(struct bxe_softc *sc)
5976{
5977    int rc = 0;
5978
5979    if ((sc->ilt =
5980         (struct ecore_ilt *)malloc(sizeof(struct ecore_ilt),
5981                                    M_BXE_ILT,
5982                                    (M_NOWAIT | M_ZERO))) == NULL) {
5983        rc = 1;
5984    }
5985
5986    return (rc);
5987}
5988
5989static int
5990bxe_alloc_ilt_lines_mem(struct bxe_softc *sc)
5991{
5992    int rc = 0;
5993
5994    if ((sc->ilt->lines =
5995         (struct ilt_line *)malloc((sizeof(struct ilt_line) * ILT_MAX_LINES),
5996                                    M_BXE_ILT,
5997                                    (M_NOWAIT | M_ZERO))) == NULL) {
5998        rc = 1;
5999    }
6000
6001    return (rc);
6002}
6003
6004static void
6005bxe_free_ilt_mem(struct bxe_softc *sc)
6006{
6007    if (sc->ilt != NULL) {
6008        free(sc->ilt, M_BXE_ILT);
6009        sc->ilt = NULL;
6010    }
6011}
6012
6013static void
6014bxe_free_ilt_lines_mem(struct bxe_softc *sc)
6015{
6016    if (sc->ilt->lines != NULL) {
6017        free(sc->ilt->lines, M_BXE_ILT);
6018        sc->ilt->lines = NULL;
6019    }
6020}
6021
6022static void
6023bxe_free_mem(struct bxe_softc *sc)
6024{
6025    int i;
6026
6027    for (i = 0; i < L2_ILT_LINES(sc); i++) {
6028        bxe_dma_free(sc, &sc->context[i].vcxt_dma);
6029        sc->context[i].vcxt = NULL;
6030        sc->context[i].size = 0;
6031    }
6032
6033    ecore_ilt_mem_op(sc, ILT_MEMOP_FREE);
6034
6035    bxe_free_ilt_lines_mem(sc);
6036
6037}
6038
6039static int
6040bxe_alloc_mem(struct bxe_softc *sc)
6041{
6042
6043    int context_size;
6044    int allocated;
6045    int i;
6046
6047    /*
6048     * Allocate memory for CDU context:
6049     * This memory is allocated separately and not in the generic ILT
6050     * functions because CDU differs in few aspects:
6051     * 1. There can be multiple entities allocating memory for context -
6052     * regular L2, CNIC, and SRIOV drivers. Each separately controls
6053     * its own ILT lines.
6054     * 2. Since CDU page-size is not a single 4KB page (which is the case
6055     * for the other ILT clients), to be efficient we want to support
6056     * allocation of sub-page-size in the last entry.
6057     * 3. Context pointers are used by the driver to pass to FW / update
6058     * the context (for the other ILT clients the pointers are used just to
6059     * free the memory during unload).
6060     */
6061    context_size = (sizeof(union cdu_context) * BXE_L2_CID_COUNT(sc));
6062    for (i = 0, allocated = 0; allocated < context_size; i++) {
6063        sc->context[i].size = min(CDU_ILT_PAGE_SZ,
6064                                  (context_size - allocated));
6065
6066        if (bxe_dma_alloc(sc, sc->context[i].size,
6067                          &sc->context[i].vcxt_dma,
6068                          "cdu context") != 0) {
6069            bxe_free_mem(sc);
6070            return (-1);
6071        }
6072
6073        sc->context[i].vcxt =
6074            (union cdu_context *)sc->context[i].vcxt_dma.vaddr;
6075
6076        allocated += sc->context[i].size;
6077    }
6078
6079    bxe_alloc_ilt_lines_mem(sc);
6080
6081    BLOGD(sc, DBG_LOAD, "ilt=%p start_line=%u lines=%p\n",
6082          sc->ilt, sc->ilt->start_line, sc->ilt->lines);
6083    {
6084        for (i = 0; i < 4; i++) {
6085            BLOGD(sc, DBG_LOAD,
6086                  "c%d page_size=%u start=%u end=%u num=%u flags=0x%x\n",
6087                  i,
6088                  sc->ilt->clients[i].page_size,
6089                  sc->ilt->clients[i].start,
6090                  sc->ilt->clients[i].end,
6091                  sc->ilt->clients[i].client_num,
6092                  sc->ilt->clients[i].flags);
6093        }
6094    }
6095    if (ecore_ilt_mem_op(sc, ILT_MEMOP_ALLOC)) {
6096        BLOGE(sc, "ecore_ilt_mem_op ILT_MEMOP_ALLOC failed\n");
6097        bxe_free_mem(sc);
6098        return (-1);
6099    }
6100
6101    return (0);
6102}
6103
6104static void
6105bxe_free_rx_bd_chain(struct bxe_fastpath *fp)
6106{
6107    struct bxe_softc *sc;
6108    int i;
6109
6110    sc = fp->sc;
6111
6112    if (fp->rx_mbuf_tag == NULL) {
6113        return;
6114    }
6115
6116    /* free all mbufs and unload all maps */
6117    for (i = 0; i < RX_BD_TOTAL; i++) {
6118        if (fp->rx_mbuf_chain[i].m_map != NULL) {
6119            bus_dmamap_sync(fp->rx_mbuf_tag,
6120                            fp->rx_mbuf_chain[i].m_map,
6121                            BUS_DMASYNC_POSTREAD);
6122            bus_dmamap_unload(fp->rx_mbuf_tag,
6123                              fp->rx_mbuf_chain[i].m_map);
6124        }
6125
6126        if (fp->rx_mbuf_chain[i].m != NULL) {
6127            m_freem(fp->rx_mbuf_chain[i].m);
6128            fp->rx_mbuf_chain[i].m = NULL;
6129            fp->eth_q_stats.mbuf_alloc_rx--;
6130        }
6131    }
6132}
6133
6134static void
6135bxe_free_tpa_pool(struct bxe_fastpath *fp)
6136{
6137    struct bxe_softc *sc;
6138    int i, max_agg_queues;
6139
6140    sc = fp->sc;
6141
6142    if (fp->rx_mbuf_tag == NULL) {
6143        return;
6144    }
6145
6146    max_agg_queues = MAX_AGG_QS(sc);
6147
6148    /* release all mbufs and unload all DMA maps in the TPA pool */
6149    for (i = 0; i < max_agg_queues; i++) {
6150        if (fp->rx_tpa_info[i].bd.m_map != NULL) {
6151            bus_dmamap_sync(fp->rx_mbuf_tag,
6152                            fp->rx_tpa_info[i].bd.m_map,
6153                            BUS_DMASYNC_POSTREAD);
6154            bus_dmamap_unload(fp->rx_mbuf_tag,
6155                              fp->rx_tpa_info[i].bd.m_map);
6156        }
6157
6158        if (fp->rx_tpa_info[i].bd.m != NULL) {
6159            m_freem(fp->rx_tpa_info[i].bd.m);
6160            fp->rx_tpa_info[i].bd.m = NULL;
6161            fp->eth_q_stats.mbuf_alloc_tpa--;
6162        }
6163    }
6164}
6165
6166static void
6167bxe_free_sge_chain(struct bxe_fastpath *fp)
6168{
6169    struct bxe_softc *sc;
6170    int i;
6171
6172    sc = fp->sc;
6173
6174    if (fp->rx_sge_mbuf_tag == NULL) {
6175        return;
6176    }
6177
6178    /* rree all mbufs and unload all maps */
6179    for (i = 0; i < RX_SGE_TOTAL; i++) {
6180        if (fp->rx_sge_mbuf_chain[i].m_map != NULL) {
6181            bus_dmamap_sync(fp->rx_sge_mbuf_tag,
6182                            fp->rx_sge_mbuf_chain[i].m_map,
6183                            BUS_DMASYNC_POSTREAD);
6184            bus_dmamap_unload(fp->rx_sge_mbuf_tag,
6185                              fp->rx_sge_mbuf_chain[i].m_map);
6186        }
6187
6188        if (fp->rx_sge_mbuf_chain[i].m != NULL) {
6189            m_freem(fp->rx_sge_mbuf_chain[i].m);
6190            fp->rx_sge_mbuf_chain[i].m = NULL;
6191            fp->eth_q_stats.mbuf_alloc_sge--;
6192        }
6193    }
6194}
6195
6196static void
6197bxe_free_fp_buffers(struct bxe_softc *sc)
6198{
6199    struct bxe_fastpath *fp;
6200    int i;
6201
6202    for (i = 0; i < sc->num_queues; i++) {
6203        fp = &sc->fp[i];
6204
6205#if __FreeBSD_version >= 901504
6206        if (fp->tx_br != NULL) {
6207            /* just in case bxe_mq_flush() wasn't called */
6208            if (mtx_initialized(&fp->tx_mtx)) {
6209                struct mbuf *m;
6210
6211                BXE_FP_TX_LOCK(fp);
6212                while ((m = buf_ring_dequeue_sc(fp->tx_br)) != NULL)
6213                    m_freem(m);
6214                BXE_FP_TX_UNLOCK(fp);
6215            }
6216        }
6217#endif
6218
6219        /* free all RX buffers */
6220        bxe_free_rx_bd_chain(fp);
6221        bxe_free_tpa_pool(fp);
6222        bxe_free_sge_chain(fp);
6223
6224        if (fp->eth_q_stats.mbuf_alloc_rx != 0) {
6225            BLOGE(sc, "failed to claim all rx mbufs (%d left)\n",
6226                  fp->eth_q_stats.mbuf_alloc_rx);
6227        }
6228
6229        if (fp->eth_q_stats.mbuf_alloc_sge != 0) {
6230            BLOGE(sc, "failed to claim all sge mbufs (%d left)\n",
6231                  fp->eth_q_stats.mbuf_alloc_sge);
6232        }
6233
6234        if (fp->eth_q_stats.mbuf_alloc_tpa != 0) {
6235            BLOGE(sc, "failed to claim all sge mbufs (%d left)\n",
6236                  fp->eth_q_stats.mbuf_alloc_tpa);
6237        }
6238
6239        if (fp->eth_q_stats.mbuf_alloc_tx != 0) {
6240            BLOGE(sc, "failed to release tx mbufs (%d left)\n",
6241                  fp->eth_q_stats.mbuf_alloc_tx);
6242        }
6243
6244        /* XXX verify all mbufs were reclaimed */
6245    }
6246}
6247
6248static int
6249bxe_alloc_rx_bd_mbuf(struct bxe_fastpath *fp,
6250                     uint16_t            prev_index,
6251                     uint16_t            index)
6252{
6253    struct bxe_sw_rx_bd *rx_buf;
6254    struct eth_rx_bd *rx_bd;
6255    bus_dma_segment_t segs[1];
6256    bus_dmamap_t map;
6257    struct mbuf *m;
6258    int nsegs, rc;
6259
6260    rc = 0;
6261
6262    /* allocate the new RX BD mbuf */
6263    m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, fp->mbuf_alloc_size);
6264    if (__predict_false(m == NULL)) {
6265        fp->eth_q_stats.mbuf_rx_bd_alloc_failed++;
6266        return (ENOBUFS);
6267    }
6268
6269    fp->eth_q_stats.mbuf_alloc_rx++;
6270
6271    /* initialize the mbuf buffer length */
6272    m->m_pkthdr.len = m->m_len = fp->rx_buf_size;
6273
6274    /* map the mbuf into non-paged pool */
6275    rc = bus_dmamap_load_mbuf_sg(fp->rx_mbuf_tag,
6276                                 fp->rx_mbuf_spare_map,
6277                                 m, segs, &nsegs, BUS_DMA_NOWAIT);
6278    if (__predict_false(rc != 0)) {
6279        fp->eth_q_stats.mbuf_rx_bd_mapping_failed++;
6280        m_freem(m);
6281        fp->eth_q_stats.mbuf_alloc_rx--;
6282        return (rc);
6283    }
6284
6285    /* all mbufs must map to a single segment */
6286    KASSERT((nsegs == 1), ("Too many segments, %d returned!", nsegs));
6287
6288    /* release any existing RX BD mbuf mappings */
6289
6290    if (prev_index != index) {
6291        rx_buf = &fp->rx_mbuf_chain[prev_index];
6292
6293        if (rx_buf->m_map != NULL) {
6294            bus_dmamap_sync(fp->rx_mbuf_tag, rx_buf->m_map,
6295                            BUS_DMASYNC_POSTREAD);
6296            bus_dmamap_unload(fp->rx_mbuf_tag, rx_buf->m_map);
6297        }
6298
6299        /*
6300         * We only get here from bxe_rxeof() when the maximum number
6301         * of rx buffers is less than RX_BD_USABLE. bxe_rxeof() already
6302         * holds the mbuf in the prev_index so it's OK to NULL it out
6303         * here without concern of a memory leak.
6304         */
6305        fp->rx_mbuf_chain[prev_index].m = NULL;
6306    }
6307
6308    rx_buf = &fp->rx_mbuf_chain[index];
6309
6310    if (rx_buf->m_map != NULL) {
6311        bus_dmamap_sync(fp->rx_mbuf_tag, rx_buf->m_map,
6312                        BUS_DMASYNC_POSTREAD);
6313        bus_dmamap_unload(fp->rx_mbuf_tag, rx_buf->m_map);
6314    }
6315
6316    /* save the mbuf and mapping info for a future packet */
6317    map = (prev_index != index) ?
6318              fp->rx_mbuf_chain[prev_index].m_map : rx_buf->m_map;
6319    rx_buf->m_map = fp->rx_mbuf_spare_map;
6320    fp->rx_mbuf_spare_map = map;
6321    bus_dmamap_sync(fp->rx_mbuf_tag, rx_buf->m_map,
6322                    BUS_DMASYNC_PREREAD);
6323    rx_buf->m = m;
6324
6325    rx_bd = &fp->rx_chain[index];
6326    rx_bd->addr_hi = htole32(U64_HI(segs[0].ds_addr));
6327    rx_bd->addr_lo = htole32(U64_LO(segs[0].ds_addr));
6328
6329    return (rc);
6330}
6331
6332static int
6333bxe_alloc_rx_tpa_mbuf(struct bxe_fastpath *fp,
6334                      int                 queue)
6335{
6336    struct bxe_sw_tpa_info *tpa_info = &fp->rx_tpa_info[queue];
6337    bus_dma_segment_t segs[1];
6338    bus_dmamap_t map;
6339    struct mbuf *m;
6340    int nsegs;
6341    int rc = 0;
6342
6343    /* allocate the new TPA mbuf */
6344    m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, fp->mbuf_alloc_size);
6345    if (__predict_false(m == NULL)) {
6346        fp->eth_q_stats.mbuf_rx_tpa_alloc_failed++;
6347        return (ENOBUFS);
6348    }
6349
6350    fp->eth_q_stats.mbuf_alloc_tpa++;
6351
6352    /* initialize the mbuf buffer length */
6353    m->m_pkthdr.len = m->m_len = fp->rx_buf_size;
6354
6355    /* map the mbuf into non-paged pool */
6356    rc = bus_dmamap_load_mbuf_sg(fp->rx_mbuf_tag,
6357                                 fp->rx_tpa_info_mbuf_spare_map,
6358                                 m, segs, &nsegs, BUS_DMA_NOWAIT);
6359    if (__predict_false(rc != 0)) {
6360        fp->eth_q_stats.mbuf_rx_tpa_mapping_failed++;
6361        m_free(m);
6362        fp->eth_q_stats.mbuf_alloc_tpa--;
6363        return (rc);
6364    }
6365
6366    /* all mbufs must map to a single segment */
6367    KASSERT((nsegs == 1), ("Too many segments, %d returned!", nsegs));
6368
6369    /* release any existing TPA mbuf mapping */
6370    if (tpa_info->bd.m_map != NULL) {
6371        bus_dmamap_sync(fp->rx_mbuf_tag, tpa_info->bd.m_map,
6372                        BUS_DMASYNC_POSTREAD);
6373        bus_dmamap_unload(fp->rx_mbuf_tag, tpa_info->bd.m_map);
6374    }
6375
6376    /* save the mbuf and mapping info for the TPA mbuf */
6377    map = tpa_info->bd.m_map;
6378    tpa_info->bd.m_map = fp->rx_tpa_info_mbuf_spare_map;
6379    fp->rx_tpa_info_mbuf_spare_map = map;
6380    bus_dmamap_sync(fp->rx_mbuf_tag, tpa_info->bd.m_map,
6381                    BUS_DMASYNC_PREREAD);
6382    tpa_info->bd.m = m;
6383    tpa_info->seg = segs[0];
6384
6385    return (rc);
6386}
6387
6388/*
6389 * Allocate an mbuf and assign it to the receive scatter gather chain. The
6390 * caller must take care to save a copy of the existing mbuf in the SG mbuf
6391 * chain.
6392 */
6393static int
6394bxe_alloc_rx_sge_mbuf(struct bxe_fastpath *fp,
6395                      uint16_t            index)
6396{
6397    struct bxe_sw_rx_bd *sge_buf;
6398    struct eth_rx_sge *sge;
6399    bus_dma_segment_t segs[1];
6400    bus_dmamap_t map;
6401    struct mbuf *m;
6402    int nsegs;
6403    int rc = 0;
6404
6405    /* allocate a new SGE mbuf */
6406    m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, SGE_PAGE_SIZE);
6407    if (__predict_false(m == NULL)) {
6408        fp->eth_q_stats.mbuf_rx_sge_alloc_failed++;
6409        return (ENOMEM);
6410    }
6411
6412    fp->eth_q_stats.mbuf_alloc_sge++;
6413
6414    /* initialize the mbuf buffer length */
6415    m->m_pkthdr.len = m->m_len = SGE_PAGE_SIZE;
6416
6417    /* map the SGE mbuf into non-paged pool */
6418    rc = bus_dmamap_load_mbuf_sg(fp->rx_sge_mbuf_tag,
6419                                 fp->rx_sge_mbuf_spare_map,
6420                                 m, segs, &nsegs, BUS_DMA_NOWAIT);
6421    if (__predict_false(rc != 0)) {
6422        fp->eth_q_stats.mbuf_rx_sge_mapping_failed++;
6423        m_freem(m);
6424        fp->eth_q_stats.mbuf_alloc_sge--;
6425        return (rc);
6426    }
6427
6428    /* all mbufs must map to a single segment */
6429    KASSERT((nsegs == 1), ("Too many segments, %d returned!", nsegs));
6430
6431    sge_buf = &fp->rx_sge_mbuf_chain[index];
6432
6433    /* release any existing SGE mbuf mapping */
6434    if (sge_buf->m_map != NULL) {
6435        bus_dmamap_sync(fp->rx_sge_mbuf_tag, sge_buf->m_map,
6436                        BUS_DMASYNC_POSTREAD);
6437        bus_dmamap_unload(fp->rx_sge_mbuf_tag, sge_buf->m_map);
6438    }
6439
6440    /* save the mbuf and mapping info for a future packet */
6441    map = sge_buf->m_map;
6442    sge_buf->m_map = fp->rx_sge_mbuf_spare_map;
6443    fp->rx_sge_mbuf_spare_map = map;
6444    bus_dmamap_sync(fp->rx_sge_mbuf_tag, sge_buf->m_map,
6445                    BUS_DMASYNC_PREREAD);
6446    sge_buf->m = m;
6447
6448    sge = &fp->rx_sge_chain[index];
6449    sge->addr_hi = htole32(U64_HI(segs[0].ds_addr));
6450    sge->addr_lo = htole32(U64_LO(segs[0].ds_addr));
6451
6452    return (rc);
6453}
6454
6455static __noinline int
6456bxe_alloc_fp_buffers(struct bxe_softc *sc)
6457{
6458    struct bxe_fastpath *fp;
6459    int i, j, rc = 0;
6460    int ring_prod, cqe_ring_prod;
6461    int max_agg_queues;
6462
6463    for (i = 0; i < sc->num_queues; i++) {
6464        fp = &sc->fp[i];
6465
6466        ring_prod = cqe_ring_prod = 0;
6467        fp->rx_bd_cons = 0;
6468        fp->rx_cq_cons = 0;
6469
6470        /* allocate buffers for the RX BDs in RX BD chain */
6471        for (j = 0; j < sc->max_rx_bufs; j++) {
6472            rc = bxe_alloc_rx_bd_mbuf(fp, ring_prod, ring_prod);
6473            if (rc != 0) {
6474                BLOGE(sc, "mbuf alloc fail for fp[%02d] rx chain (%d)\n",
6475                      i, rc);
6476                goto bxe_alloc_fp_buffers_error;
6477            }
6478
6479            ring_prod     = RX_BD_NEXT(ring_prod);
6480            cqe_ring_prod = RCQ_NEXT(cqe_ring_prod);
6481        }
6482
6483        fp->rx_bd_prod = ring_prod;
6484        fp->rx_cq_prod = cqe_ring_prod;
6485        fp->eth_q_stats.rx_calls = fp->eth_q_stats.rx_pkts = 0;
6486
6487        max_agg_queues = MAX_AGG_QS(sc);
6488
6489        fp->tpa_enable = TRUE;
6490
6491        /* fill the TPA pool */
6492        for (j = 0; j < max_agg_queues; j++) {
6493            rc = bxe_alloc_rx_tpa_mbuf(fp, j);
6494            if (rc != 0) {
6495                BLOGE(sc, "mbuf alloc fail for fp[%02d] TPA queue %d\n",
6496                          i, j);
6497                fp->tpa_enable = FALSE;
6498                goto bxe_alloc_fp_buffers_error;
6499            }
6500
6501            fp->rx_tpa_info[j].state = BXE_TPA_STATE_STOP;
6502        }
6503
6504        if (fp->tpa_enable) {
6505            /* fill the RX SGE chain */
6506            ring_prod = 0;
6507            for (j = 0; j < RX_SGE_USABLE; j++) {
6508                rc = bxe_alloc_rx_sge_mbuf(fp, ring_prod);
6509                if (rc != 0) {
6510                    BLOGE(sc, "mbuf alloc fail for fp[%02d] SGE %d\n",
6511                              i, ring_prod);
6512                    fp->tpa_enable = FALSE;
6513                    ring_prod = 0;
6514                    goto bxe_alloc_fp_buffers_error;
6515                }
6516
6517                ring_prod = RX_SGE_NEXT(ring_prod);
6518            }
6519
6520            fp->rx_sge_prod = ring_prod;
6521        }
6522    }
6523
6524    return (0);
6525
6526bxe_alloc_fp_buffers_error:
6527
6528    /* unwind what was already allocated */
6529    bxe_free_rx_bd_chain(fp);
6530    bxe_free_tpa_pool(fp);
6531    bxe_free_sge_chain(fp);
6532
6533    return (ENOBUFS);
6534}
6535
6536static void
6537bxe_free_fw_stats_mem(struct bxe_softc *sc)
6538{
6539    bxe_dma_free(sc, &sc->fw_stats_dma);
6540
6541    sc->fw_stats_num = 0;
6542
6543    sc->fw_stats_req_size = 0;
6544    sc->fw_stats_req = NULL;
6545    sc->fw_stats_req_mapping = 0;
6546
6547    sc->fw_stats_data_size = 0;
6548    sc->fw_stats_data = NULL;
6549    sc->fw_stats_data_mapping = 0;
6550}
6551
6552static int
6553bxe_alloc_fw_stats_mem(struct bxe_softc *sc)
6554{
6555    uint8_t num_queue_stats;
6556    int num_groups;
6557
6558    /* number of queues for statistics is number of eth queues */
6559    num_queue_stats = BXE_NUM_ETH_QUEUES(sc);
6560
6561    /*
6562     * Total number of FW statistics requests =
6563     *   1 for port stats + 1 for PF stats + num of queues
6564     */
6565    sc->fw_stats_num = (2 + num_queue_stats);
6566
6567    /*
6568     * Request is built from stats_query_header and an array of
6569     * stats_query_cmd_group each of which contains STATS_QUERY_CMD_COUNT
6570     * rules. The real number or requests is configured in the
6571     * stats_query_header.
6572     */
6573    num_groups =
6574        ((sc->fw_stats_num / STATS_QUERY_CMD_COUNT) +
6575         ((sc->fw_stats_num % STATS_QUERY_CMD_COUNT) ? 1 : 0));
6576
6577    BLOGD(sc, DBG_LOAD, "stats fw_stats_num %d num_groups %d\n",
6578          sc->fw_stats_num, num_groups);
6579
6580    sc->fw_stats_req_size =
6581        (sizeof(struct stats_query_header) +
6582         (num_groups * sizeof(struct stats_query_cmd_group)));
6583
6584    /*
6585     * Data for statistics requests + stats_counter.
6586     * stats_counter holds per-STORM counters that are incremented when
6587     * STORM has finished with the current request. Memory for FCoE
6588     * offloaded statistics are counted anyway, even if they will not be sent.
6589     * VF stats are not accounted for here as the data of VF stats is stored
6590     * in memory allocated by the VF, not here.
6591     */
6592    sc->fw_stats_data_size =
6593        (sizeof(struct stats_counter) +
6594         sizeof(struct per_port_stats) +
6595         sizeof(struct per_pf_stats) +
6596         /* sizeof(struct fcoe_statistics_params) + */
6597         (sizeof(struct per_queue_stats) * num_queue_stats));
6598
6599    if (bxe_dma_alloc(sc, (sc->fw_stats_req_size + sc->fw_stats_data_size),
6600                      &sc->fw_stats_dma, "fw stats") != 0) {
6601        bxe_free_fw_stats_mem(sc);
6602        return (-1);
6603    }
6604
6605    /* set up the shortcuts */
6606
6607    sc->fw_stats_req =
6608        (struct bxe_fw_stats_req *)sc->fw_stats_dma.vaddr;
6609    sc->fw_stats_req_mapping = sc->fw_stats_dma.paddr;
6610
6611    sc->fw_stats_data =
6612        (struct bxe_fw_stats_data *)((uint8_t *)sc->fw_stats_dma.vaddr +
6613                                     sc->fw_stats_req_size);
6614    sc->fw_stats_data_mapping = (sc->fw_stats_dma.paddr +
6615                                 sc->fw_stats_req_size);
6616
6617    BLOGD(sc, DBG_LOAD, "statistics request base address set to %#jx\n",
6618          (uintmax_t)sc->fw_stats_req_mapping);
6619
6620    BLOGD(sc, DBG_LOAD, "statistics data base address set to %#jx\n",
6621          (uintmax_t)sc->fw_stats_data_mapping);
6622
6623    return (0);
6624}
6625
6626/*
6627 * Bits map:
6628 * 0-7  - Engine0 load counter.
6629 * 8-15 - Engine1 load counter.
6630 * 16   - Engine0 RESET_IN_PROGRESS bit.
6631 * 17   - Engine1 RESET_IN_PROGRESS bit.
6632 * 18   - Engine0 ONE_IS_LOADED. Set when there is at least one active
6633 *        function on the engine
6634 * 19   - Engine1 ONE_IS_LOADED.
6635 * 20   - Chip reset flow bit. When set none-leader must wait for both engines
6636 *        leader to complete (check for both RESET_IN_PROGRESS bits and not
6637 *        for just the one belonging to its engine).
6638 */
6639#define BXE_RECOVERY_GLOB_REG     MISC_REG_GENERIC_POR_1
6640#define BXE_PATH0_LOAD_CNT_MASK   0x000000ff
6641#define BXE_PATH0_LOAD_CNT_SHIFT  0
6642#define BXE_PATH1_LOAD_CNT_MASK   0x0000ff00
6643#define BXE_PATH1_LOAD_CNT_SHIFT  8
6644#define BXE_PATH0_RST_IN_PROG_BIT 0x00010000
6645#define BXE_PATH1_RST_IN_PROG_BIT 0x00020000
6646#define BXE_GLOBAL_RESET_BIT      0x00040000
6647
6648/* set the GLOBAL_RESET bit, should be run under rtnl lock */
6649static void
6650bxe_set_reset_global(struct bxe_softc *sc)
6651{
6652    uint32_t val;
6653    bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG);
6654    val = REG_RD(sc, BXE_RECOVERY_GLOB_REG);
6655    REG_WR(sc, BXE_RECOVERY_GLOB_REG, val | BXE_GLOBAL_RESET_BIT);
6656    bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG);
6657}
6658
6659/* clear the GLOBAL_RESET bit, should be run under rtnl lock */
6660static void
6661bxe_clear_reset_global(struct bxe_softc *sc)
6662{
6663    uint32_t val;
6664    bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG);
6665    val = REG_RD(sc, BXE_RECOVERY_GLOB_REG);
6666    REG_WR(sc, BXE_RECOVERY_GLOB_REG, val & (~BXE_GLOBAL_RESET_BIT));
6667    bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG);
6668}
6669
6670/* checks the GLOBAL_RESET bit, should be run under rtnl lock */
6671static uint8_t
6672bxe_reset_is_global(struct bxe_softc *sc)
6673{
6674    uint32_t val = REG_RD(sc, BXE_RECOVERY_GLOB_REG);
6675    BLOGD(sc, DBG_LOAD, "GLOB_REG=0x%08x\n", val);
6676    return (val & BXE_GLOBAL_RESET_BIT) ? TRUE : FALSE;
6677}
6678
6679/* clear RESET_IN_PROGRESS bit for the engine, should be run under rtnl lock */
6680static void
6681bxe_set_reset_done(struct bxe_softc *sc)
6682{
6683    uint32_t val;
6684    uint32_t bit = SC_PATH(sc) ? BXE_PATH1_RST_IN_PROG_BIT :
6685                                 BXE_PATH0_RST_IN_PROG_BIT;
6686
6687    bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG);
6688
6689    val = REG_RD(sc, BXE_RECOVERY_GLOB_REG);
6690    /* Clear the bit */
6691    val &= ~bit;
6692    REG_WR(sc, BXE_RECOVERY_GLOB_REG, val);
6693
6694    bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG);
6695}
6696
6697/* set RESET_IN_PROGRESS for the engine, should be run under rtnl lock */
6698static void
6699bxe_set_reset_in_progress(struct bxe_softc *sc)
6700{
6701    uint32_t val;
6702    uint32_t bit = SC_PATH(sc) ? BXE_PATH1_RST_IN_PROG_BIT :
6703                                 BXE_PATH0_RST_IN_PROG_BIT;
6704
6705    bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG);
6706
6707    val = REG_RD(sc, BXE_RECOVERY_GLOB_REG);
6708    /* Set the bit */
6709    val |= bit;
6710    REG_WR(sc, BXE_RECOVERY_GLOB_REG, val);
6711
6712    bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG);
6713}
6714
6715/* check RESET_IN_PROGRESS bit for an engine, should be run under rtnl lock */
6716static uint8_t
6717bxe_reset_is_done(struct bxe_softc *sc,
6718                  int              engine)
6719{
6720    uint32_t val = REG_RD(sc, BXE_RECOVERY_GLOB_REG);
6721    uint32_t bit = engine ? BXE_PATH1_RST_IN_PROG_BIT :
6722                            BXE_PATH0_RST_IN_PROG_BIT;
6723
6724    /* return false if bit is set */
6725    return (val & bit) ? FALSE : TRUE;
6726}
6727
6728/* get the load status for an engine, should be run under rtnl lock */
6729static uint8_t
6730bxe_get_load_status(struct bxe_softc *sc,
6731                    int              engine)
6732{
6733    uint32_t mask = engine ? BXE_PATH1_LOAD_CNT_MASK :
6734                             BXE_PATH0_LOAD_CNT_MASK;
6735    uint32_t shift = engine ? BXE_PATH1_LOAD_CNT_SHIFT :
6736                              BXE_PATH0_LOAD_CNT_SHIFT;
6737    uint32_t val = REG_RD(sc, BXE_RECOVERY_GLOB_REG);
6738
6739    BLOGD(sc, DBG_LOAD, "Old value for GLOB_REG=0x%08x\n", val);
6740
6741    val = ((val & mask) >> shift);
6742
6743    BLOGD(sc, DBG_LOAD, "Load mask engine %d = 0x%08x\n", engine, val);
6744
6745    return (val != 0);
6746}
6747
6748/* set pf load mark */
6749/* XXX needs to be under rtnl lock */
6750static void
6751bxe_set_pf_load(struct bxe_softc *sc)
6752{
6753    uint32_t val;
6754    uint32_t val1;
6755    uint32_t mask = SC_PATH(sc) ? BXE_PATH1_LOAD_CNT_MASK :
6756                                  BXE_PATH0_LOAD_CNT_MASK;
6757    uint32_t shift = SC_PATH(sc) ? BXE_PATH1_LOAD_CNT_SHIFT :
6758                                   BXE_PATH0_LOAD_CNT_SHIFT;
6759
6760    bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG);
6761
6762    val = REG_RD(sc, BXE_RECOVERY_GLOB_REG);
6763    BLOGD(sc, DBG_LOAD, "Old value for GLOB_REG=0x%08x\n", val);
6764
6765    /* get the current counter value */
6766    val1 = ((val & mask) >> shift);
6767
6768    /* set bit of this PF */
6769    val1 |= (1 << SC_ABS_FUNC(sc));
6770
6771    /* clear the old value */
6772    val &= ~mask;
6773
6774    /* set the new one */
6775    val |= ((val1 << shift) & mask);
6776
6777    REG_WR(sc, BXE_RECOVERY_GLOB_REG, val);
6778
6779    bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG);
6780}
6781
6782/* clear pf load mark */
6783/* XXX needs to be under rtnl lock */
6784static uint8_t
6785bxe_clear_pf_load(struct bxe_softc *sc)
6786{
6787    uint32_t val1, val;
6788    uint32_t mask = SC_PATH(sc) ? BXE_PATH1_LOAD_CNT_MASK :
6789                                  BXE_PATH0_LOAD_CNT_MASK;
6790    uint32_t shift = SC_PATH(sc) ? BXE_PATH1_LOAD_CNT_SHIFT :
6791                                   BXE_PATH0_LOAD_CNT_SHIFT;
6792
6793    bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG);
6794    val = REG_RD(sc, BXE_RECOVERY_GLOB_REG);
6795    BLOGD(sc, DBG_LOAD, "Old GEN_REG_VAL=0x%08x\n", val);
6796
6797    /* get the current counter value */
6798    val1 = (val & mask) >> shift;
6799
6800    /* clear bit of that PF */
6801    val1 &= ~(1 << SC_ABS_FUNC(sc));
6802
6803    /* clear the old value */
6804    val &= ~mask;
6805
6806    /* set the new one */
6807    val |= ((val1 << shift) & mask);
6808
6809    REG_WR(sc, BXE_RECOVERY_GLOB_REG, val);
6810    bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG);
6811    return (val1 != 0);
6812}
6813
6814/* send load requrest to mcp and analyze response */
6815static int
6816bxe_nic_load_request(struct bxe_softc *sc,
6817                     uint32_t         *load_code)
6818{
6819    /* init fw_seq */
6820    sc->fw_seq =
6821        (SHMEM_RD(sc, func_mb[SC_FW_MB_IDX(sc)].drv_mb_header) &
6822         DRV_MSG_SEQ_NUMBER_MASK);
6823
6824    BLOGD(sc, DBG_LOAD, "initial fw_seq 0x%04x\n", sc->fw_seq);
6825
6826    /* get the current FW pulse sequence */
6827    sc->fw_drv_pulse_wr_seq =
6828        (SHMEM_RD(sc, func_mb[SC_FW_MB_IDX(sc)].drv_pulse_mb) &
6829         DRV_PULSE_SEQ_MASK);
6830
6831    BLOGD(sc, DBG_LOAD, "initial drv_pulse 0x%04x\n",
6832          sc->fw_drv_pulse_wr_seq);
6833
6834    /* load request */
6835    (*load_code) = bxe_fw_command(sc, DRV_MSG_CODE_LOAD_REQ,
6836                                  DRV_MSG_CODE_LOAD_REQ_WITH_LFA);
6837
6838    /* if the MCP fails to respond we must abort */
6839    if (!(*load_code)) {
6840        BLOGE(sc, "MCP response failure!\n");
6841        return (-1);
6842    }
6843
6844    /* if MCP refused then must abort */
6845    if ((*load_code) == FW_MSG_CODE_DRV_LOAD_REFUSED) {
6846        BLOGE(sc, "MCP refused load request\n");
6847        return (-1);
6848    }
6849
6850    return (0);
6851}
6852
6853/*
6854 * Check whether another PF has already loaded FW to chip. In virtualized
6855 * environments a pf from anoth VM may have already initialized the device
6856 * including loading FW.
6857 */
6858static int
6859bxe_nic_load_analyze_req(struct bxe_softc *sc,
6860                         uint32_t         load_code)
6861{
6862    uint32_t my_fw, loaded_fw;
6863
6864    /* is another pf loaded on this engine? */
6865    if ((load_code != FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) &&
6866        (load_code != FW_MSG_CODE_DRV_LOAD_COMMON)) {
6867        /* build my FW version dword */
6868        my_fw = (BCM_5710_FW_MAJOR_VERSION +
6869                 (BCM_5710_FW_MINOR_VERSION << 8 ) +
6870                 (BCM_5710_FW_REVISION_VERSION << 16) +
6871                 (BCM_5710_FW_ENGINEERING_VERSION << 24));
6872
6873        /* read loaded FW from chip */
6874        loaded_fw = REG_RD(sc, XSEM_REG_PRAM);
6875        BLOGD(sc, DBG_LOAD, "loaded FW 0x%08x / my FW 0x%08x\n",
6876              loaded_fw, my_fw);
6877
6878        /* abort nic load if version mismatch */
6879        if (my_fw != loaded_fw) {
6880            BLOGE(sc, "FW 0x%08x already loaded (mine is 0x%08x)",
6881                  loaded_fw, my_fw);
6882            return (-1);
6883        }
6884    }
6885
6886    return (0);
6887}
6888
6889/* mark PMF if applicable */
6890static void
6891bxe_nic_load_pmf(struct bxe_softc *sc,
6892                 uint32_t         load_code)
6893{
6894    uint32_t ncsi_oem_data_addr;
6895
6896    if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
6897        (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) ||
6898        (load_code == FW_MSG_CODE_DRV_LOAD_PORT)) {
6899        /*
6900         * Barrier here for ordering between the writing to sc->port.pmf here
6901         * and reading it from the periodic task.
6902         */
6903        sc->port.pmf = 1;
6904        mb();
6905    } else {
6906        sc->port.pmf = 0;
6907    }
6908
6909    BLOGD(sc, DBG_LOAD, "pmf %d\n", sc->port.pmf);
6910
6911    /* XXX needed? */
6912    if (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) {
6913        if (SHMEM2_HAS(sc, ncsi_oem_data_addr)) {
6914            ncsi_oem_data_addr = SHMEM2_RD(sc, ncsi_oem_data_addr);
6915            if (ncsi_oem_data_addr) {
6916                REG_WR(sc,
6917                       (ncsi_oem_data_addr +
6918                        offsetof(struct glob_ncsi_oem_data, driver_version)),
6919                       0);
6920            }
6921        }
6922    }
6923}
6924
6925static void
6926bxe_read_mf_cfg(struct bxe_softc *sc)
6927{
6928    int n = (CHIP_IS_MODE_4_PORT(sc) ? 2 : 1);
6929    int abs_func;
6930    int vn;
6931
6932    if (BXE_NOMCP(sc)) {
6933        return; /* what should be the default bvalue in this case */
6934    }
6935
6936    /*
6937     * The formula for computing the absolute function number is...
6938     * For 2 port configuration (4 functions per port):
6939     *   abs_func = 2 * vn + SC_PORT + SC_PATH
6940     * For 4 port configuration (2 functions per port):
6941     *   abs_func = 4 * vn + 2 * SC_PORT + SC_PATH
6942     */
6943    for (vn = VN_0; vn < SC_MAX_VN_NUM(sc); vn++) {
6944        abs_func = (n * (2 * vn + SC_PORT(sc)) + SC_PATH(sc));
6945        if (abs_func >= E1H_FUNC_MAX) {
6946            break;
6947        }
6948        sc->devinfo.mf_info.mf_config[vn] =
6949            MFCFG_RD(sc, func_mf_config[abs_func].config);
6950    }
6951
6952    if (sc->devinfo.mf_info.mf_config[SC_VN(sc)] &
6953        FUNC_MF_CFG_FUNC_DISABLED) {
6954        BLOGD(sc, DBG_LOAD, "mf_cfg function disabled\n");
6955        sc->flags |= BXE_MF_FUNC_DIS;
6956    } else {
6957        BLOGD(sc, DBG_LOAD, "mf_cfg function enabled\n");
6958        sc->flags &= ~BXE_MF_FUNC_DIS;
6959    }
6960}
6961
6962/* acquire split MCP access lock register */
6963static int bxe_acquire_alr(struct bxe_softc *sc)
6964{
6965    uint32_t j, val;
6966
6967    for (j = 0; j < 1000; j++) {
6968        val = (1UL << 31);
6969        REG_WR(sc, GRCBASE_MCP + 0x9c, val);
6970        val = REG_RD(sc, GRCBASE_MCP + 0x9c);
6971        if (val & (1L << 31))
6972            break;
6973
6974        DELAY(5000);
6975    }
6976
6977    if (!(val & (1L << 31))) {
6978        BLOGE(sc, "Cannot acquire MCP access lock register\n");
6979        return (-1);
6980    }
6981
6982    return (0);
6983}
6984
6985/* release split MCP access lock register */
6986static void bxe_release_alr(struct bxe_softc *sc)
6987{
6988    REG_WR(sc, GRCBASE_MCP + 0x9c, 0);
6989}
6990
6991static void
6992bxe_fan_failure(struct bxe_softc *sc)
6993{
6994    int port = SC_PORT(sc);
6995    uint32_t ext_phy_config;
6996
6997    /* mark the failure */
6998    ext_phy_config =
6999        SHMEM_RD(sc, dev_info.port_hw_config[port].external_phy_config);
7000
7001    ext_phy_config &= ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
7002    ext_phy_config |= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
7003    SHMEM_WR(sc, dev_info.port_hw_config[port].external_phy_config,
7004             ext_phy_config);
7005
7006    /* log the failure */
7007    BLOGW(sc, "Fan Failure has caused the driver to shutdown "
7008              "the card to prevent permanent damage. "
7009              "Please contact OEM Support for assistance\n");
7010
7011    /* XXX */
7012#if 1
7013    bxe_panic(sc, ("Schedule task to handle fan failure\n"));
7014#else
7015    /*
7016     * Schedule device reset (unload)
7017     * This is due to some boards consuming sufficient power when driver is
7018     * up to overheat if fan fails.
7019     */
7020    bxe_set_bit(BXE_SP_RTNL_FAN_FAILURE, &sc->sp_rtnl_state);
7021    schedule_delayed_work(&sc->sp_rtnl_task, 0);
7022#endif
7023}
7024
7025/* this function is called upon a link interrupt */
7026static void
7027bxe_link_attn(struct bxe_softc *sc)
7028{
7029    uint32_t pause_enabled = 0;
7030    struct host_port_stats *pstats;
7031    int cmng_fns;
7032    struct bxe_fastpath *fp;
7033    int i;
7034
7035    /* Make sure that we are synced with the current statistics */
7036    bxe_stats_handle(sc, STATS_EVENT_STOP);
7037	BLOGI(sc, "link_vars phy_flags : %x\n", sc->link_vars.phy_flags);
7038    elink_link_update(&sc->link_params, &sc->link_vars);
7039
7040    if (sc->link_vars.link_up) {
7041
7042        /* dropless flow control */
7043        if (!CHIP_IS_E1(sc) && sc->dropless_fc) {
7044            pause_enabled = 0;
7045
7046            if (sc->link_vars.flow_ctrl & ELINK_FLOW_CTRL_TX) {
7047                pause_enabled = 1;
7048            }
7049
7050            REG_WR(sc,
7051                   (BAR_USTRORM_INTMEM +
7052                    USTORM_ETH_PAUSE_ENABLED_OFFSET(SC_PORT(sc))),
7053                   pause_enabled);
7054        }
7055
7056        if (sc->link_vars.mac_type != ELINK_MAC_TYPE_EMAC) {
7057            pstats = BXE_SP(sc, port_stats);
7058            /* reset old mac stats */
7059            memset(&(pstats->mac_stx[0]), 0, sizeof(struct mac_stx));
7060        }
7061
7062        if (sc->state == BXE_STATE_OPEN) {
7063            bxe_stats_handle(sc, STATS_EVENT_LINK_UP);
7064        }
7065
7066	/* Restart tx when the link comes back. */
7067        FOR_EACH_ETH_QUEUE(sc, i) {
7068            fp = &sc->fp[i];
7069            taskqueue_enqueue(fp->tq, &fp->tx_task);
7070	}
7071    }
7072
7073    if (sc->link_vars.link_up && sc->link_vars.line_speed) {
7074        cmng_fns = bxe_get_cmng_fns_mode(sc);
7075
7076        if (cmng_fns != CMNG_FNS_NONE) {
7077            bxe_cmng_fns_init(sc, FALSE, cmng_fns);
7078            storm_memset_cmng(sc, &sc->cmng, SC_PORT(sc));
7079        } else {
7080            /* rate shaping and fairness are disabled */
7081            BLOGD(sc, DBG_LOAD, "single function mode without fairness\n");
7082        }
7083    }
7084
7085    bxe_link_report_locked(sc);
7086
7087    if (IS_MF(sc)) {
7088        ; // XXX bxe_link_sync_notify(sc);
7089    }
7090}
7091
7092static void
7093bxe_attn_int_asserted(struct bxe_softc *sc,
7094                      uint32_t         asserted)
7095{
7096    int port = SC_PORT(sc);
7097    uint32_t aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
7098                               MISC_REG_AEU_MASK_ATTN_FUNC_0;
7099    uint32_t nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
7100                                        NIG_REG_MASK_INTERRUPT_PORT0;
7101    uint32_t aeu_mask;
7102    uint32_t nig_mask = 0;
7103    uint32_t reg_addr;
7104    uint32_t igu_acked;
7105    uint32_t cnt;
7106
7107    if (sc->attn_state & asserted) {
7108        BLOGE(sc, "IGU ERROR attn=0x%08x\n", asserted);
7109    }
7110
7111    bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
7112
7113    aeu_mask = REG_RD(sc, aeu_addr);
7114
7115    BLOGD(sc, DBG_INTR, "aeu_mask 0x%08x newly asserted 0x%08x\n",
7116          aeu_mask, asserted);
7117
7118    aeu_mask &= ~(asserted & 0x3ff);
7119
7120    BLOGD(sc, DBG_INTR, "new mask 0x%08x\n", aeu_mask);
7121
7122    REG_WR(sc, aeu_addr, aeu_mask);
7123
7124    bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
7125
7126    BLOGD(sc, DBG_INTR, "attn_state 0x%08x\n", sc->attn_state);
7127    sc->attn_state |= asserted;
7128    BLOGD(sc, DBG_INTR, "new state 0x%08x\n", sc->attn_state);
7129
7130    if (asserted & ATTN_HARD_WIRED_MASK) {
7131        if (asserted & ATTN_NIG_FOR_FUNC) {
7132
7133	    bxe_acquire_phy_lock(sc);
7134            /* save nig interrupt mask */
7135            nig_mask = REG_RD(sc, nig_int_mask_addr);
7136
7137            /* If nig_mask is not set, no need to call the update function */
7138            if (nig_mask) {
7139                REG_WR(sc, nig_int_mask_addr, 0);
7140
7141                bxe_link_attn(sc);
7142            }
7143
7144            /* handle unicore attn? */
7145        }
7146
7147        if (asserted & ATTN_SW_TIMER_4_FUNC) {
7148            BLOGD(sc, DBG_INTR, "ATTN_SW_TIMER_4_FUNC!\n");
7149        }
7150
7151        if (asserted & GPIO_2_FUNC) {
7152            BLOGD(sc, DBG_INTR, "GPIO_2_FUNC!\n");
7153        }
7154
7155        if (asserted & GPIO_3_FUNC) {
7156            BLOGD(sc, DBG_INTR, "GPIO_3_FUNC!\n");
7157        }
7158
7159        if (asserted & GPIO_4_FUNC) {
7160            BLOGD(sc, DBG_INTR, "GPIO_4_FUNC!\n");
7161        }
7162
7163        if (port == 0) {
7164            if (asserted & ATTN_GENERAL_ATTN_1) {
7165                BLOGD(sc, DBG_INTR, "ATTN_GENERAL_ATTN_1!\n");
7166                REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
7167            }
7168            if (asserted & ATTN_GENERAL_ATTN_2) {
7169                BLOGD(sc, DBG_INTR, "ATTN_GENERAL_ATTN_2!\n");
7170                REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
7171            }
7172            if (asserted & ATTN_GENERAL_ATTN_3) {
7173                BLOGD(sc, DBG_INTR, "ATTN_GENERAL_ATTN_3!\n");
7174                REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
7175            }
7176        } else {
7177            if (asserted & ATTN_GENERAL_ATTN_4) {
7178                BLOGD(sc, DBG_INTR, "ATTN_GENERAL_ATTN_4!\n");
7179                REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
7180            }
7181            if (asserted & ATTN_GENERAL_ATTN_5) {
7182                BLOGD(sc, DBG_INTR, "ATTN_GENERAL_ATTN_5!\n");
7183                REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
7184            }
7185            if (asserted & ATTN_GENERAL_ATTN_6) {
7186                BLOGD(sc, DBG_INTR, "ATTN_GENERAL_ATTN_6!\n");
7187                REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
7188            }
7189        }
7190    } /* hardwired */
7191
7192    if (sc->devinfo.int_block == INT_BLOCK_HC) {
7193        reg_addr = (HC_REG_COMMAND_REG + port*32 + COMMAND_REG_ATTN_BITS_SET);
7194    } else {
7195        reg_addr = (BAR_IGU_INTMEM + IGU_CMD_ATTN_BIT_SET_UPPER*8);
7196    }
7197
7198    BLOGD(sc, DBG_INTR, "about to mask 0x%08x at %s addr 0x%08x\n",
7199          asserted,
7200          (sc->devinfo.int_block == INT_BLOCK_HC) ? "HC" : "IGU", reg_addr);
7201    REG_WR(sc, reg_addr, asserted);
7202
7203    /* now set back the mask */
7204    if (asserted & ATTN_NIG_FOR_FUNC) {
7205        /*
7206         * Verify that IGU ack through BAR was written before restoring
7207         * NIG mask. This loop should exit after 2-3 iterations max.
7208         */
7209        if (sc->devinfo.int_block != INT_BLOCK_HC) {
7210            cnt = 0;
7211
7212            do {
7213                igu_acked = REG_RD(sc, IGU_REG_ATTENTION_ACK_BITS);
7214            } while (((igu_acked & ATTN_NIG_FOR_FUNC) == 0) &&
7215                     (++cnt < MAX_IGU_ATTN_ACK_TO));
7216
7217            if (!igu_acked) {
7218                BLOGE(sc, "Failed to verify IGU ack on time\n");
7219            }
7220
7221            mb();
7222        }
7223
7224        REG_WR(sc, nig_int_mask_addr, nig_mask);
7225
7226	bxe_release_phy_lock(sc);
7227    }
7228}
7229
7230static void
7231bxe_print_next_block(struct bxe_softc *sc,
7232                     int              idx,
7233                     const char       *blk)
7234{
7235    BLOGI(sc, "%s%s", idx ? ", " : "", blk);
7236}
7237
7238static int
7239bxe_check_blocks_with_parity0(struct bxe_softc *sc,
7240                              uint32_t         sig,
7241                              int              par_num,
7242                              uint8_t          print)
7243{
7244    uint32_t cur_bit = 0;
7245    int i = 0;
7246
7247    for (i = 0; sig; i++) {
7248        cur_bit = ((uint32_t)0x1 << i);
7249        if (sig & cur_bit) {
7250            switch (cur_bit) {
7251            case AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR:
7252                if (print)
7253                    bxe_print_next_block(sc, par_num++, "BRB");
7254                break;
7255            case AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR:
7256                if (print)
7257                    bxe_print_next_block(sc, par_num++, "PARSER");
7258                break;
7259            case AEU_INPUTS_ATTN_BITS_TSDM_PARITY_ERROR:
7260                if (print)
7261                    bxe_print_next_block(sc, par_num++, "TSDM");
7262                break;
7263            case AEU_INPUTS_ATTN_BITS_SEARCHER_PARITY_ERROR:
7264                if (print)
7265                    bxe_print_next_block(sc, par_num++, "SEARCHER");
7266                break;
7267            case AEU_INPUTS_ATTN_BITS_TCM_PARITY_ERROR:
7268                if (print)
7269                    bxe_print_next_block(sc, par_num++, "TCM");
7270                break;
7271            case AEU_INPUTS_ATTN_BITS_TSEMI_PARITY_ERROR:
7272                if (print)
7273                    bxe_print_next_block(sc, par_num++, "TSEMI");
7274                break;
7275            case AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR:
7276                if (print)
7277                    bxe_print_next_block(sc, par_num++, "XPB");
7278                break;
7279            }
7280
7281            /* Clear the bit */
7282            sig &= ~cur_bit;
7283        }
7284    }
7285
7286    return (par_num);
7287}
7288
7289static int
7290bxe_check_blocks_with_parity1(struct bxe_softc *sc,
7291                              uint32_t         sig,
7292                              int              par_num,
7293                              uint8_t          *global,
7294                              uint8_t          print)
7295{
7296    int i = 0;
7297    uint32_t cur_bit = 0;
7298    for (i = 0; sig; i++) {
7299        cur_bit = ((uint32_t)0x1 << i);
7300        if (sig & cur_bit) {
7301            switch (cur_bit) {
7302            case AEU_INPUTS_ATTN_BITS_PBF_PARITY_ERROR:
7303                if (print)
7304                    bxe_print_next_block(sc, par_num++, "PBF");
7305                break;
7306            case AEU_INPUTS_ATTN_BITS_QM_PARITY_ERROR:
7307                if (print)
7308                    bxe_print_next_block(sc, par_num++, "QM");
7309                break;
7310            case AEU_INPUTS_ATTN_BITS_TIMERS_PARITY_ERROR:
7311                if (print)
7312                    bxe_print_next_block(sc, par_num++, "TM");
7313                break;
7314            case AEU_INPUTS_ATTN_BITS_XSDM_PARITY_ERROR:
7315                if (print)
7316                    bxe_print_next_block(sc, par_num++, "XSDM");
7317                break;
7318            case AEU_INPUTS_ATTN_BITS_XCM_PARITY_ERROR:
7319                if (print)
7320                    bxe_print_next_block(sc, par_num++, "XCM");
7321                break;
7322            case AEU_INPUTS_ATTN_BITS_XSEMI_PARITY_ERROR:
7323                if (print)
7324                    bxe_print_next_block(sc, par_num++, "XSEMI");
7325                break;
7326            case AEU_INPUTS_ATTN_BITS_DOORBELLQ_PARITY_ERROR:
7327                if (print)
7328                    bxe_print_next_block(sc, par_num++, "DOORBELLQ");
7329                break;
7330            case AEU_INPUTS_ATTN_BITS_NIG_PARITY_ERROR:
7331                if (print)
7332                    bxe_print_next_block(sc, par_num++, "NIG");
7333                break;
7334            case AEU_INPUTS_ATTN_BITS_VAUX_PCI_CORE_PARITY_ERROR:
7335                if (print)
7336                    bxe_print_next_block(sc, par_num++, "VAUX PCI CORE");
7337                *global = TRUE;
7338                break;
7339            case AEU_INPUTS_ATTN_BITS_DEBUG_PARITY_ERROR:
7340                if (print)
7341                    bxe_print_next_block(sc, par_num++, "DEBUG");
7342                break;
7343            case AEU_INPUTS_ATTN_BITS_USDM_PARITY_ERROR:
7344                if (print)
7345                    bxe_print_next_block(sc, par_num++, "USDM");
7346                break;
7347            case AEU_INPUTS_ATTN_BITS_UCM_PARITY_ERROR:
7348                if (print)
7349                    bxe_print_next_block(sc, par_num++, "UCM");
7350                break;
7351            case AEU_INPUTS_ATTN_BITS_USEMI_PARITY_ERROR:
7352                if (print)
7353                    bxe_print_next_block(sc, par_num++, "USEMI");
7354                break;
7355            case AEU_INPUTS_ATTN_BITS_UPB_PARITY_ERROR:
7356                if (print)
7357                    bxe_print_next_block(sc, par_num++, "UPB");
7358                break;
7359            case AEU_INPUTS_ATTN_BITS_CSDM_PARITY_ERROR:
7360                if (print)
7361                    bxe_print_next_block(sc, par_num++, "CSDM");
7362                break;
7363            case AEU_INPUTS_ATTN_BITS_CCM_PARITY_ERROR:
7364                if (print)
7365                    bxe_print_next_block(sc, par_num++, "CCM");
7366                break;
7367            }
7368
7369            /* Clear the bit */
7370            sig &= ~cur_bit;
7371        }
7372    }
7373
7374    return (par_num);
7375}
7376
7377static int
7378bxe_check_blocks_with_parity2(struct bxe_softc *sc,
7379                              uint32_t         sig,
7380                              int              par_num,
7381                              uint8_t          print)
7382{
7383    uint32_t cur_bit = 0;
7384    int i = 0;
7385
7386    for (i = 0; sig; i++) {
7387        cur_bit = ((uint32_t)0x1 << i);
7388        if (sig & cur_bit) {
7389            switch (cur_bit) {
7390            case AEU_INPUTS_ATTN_BITS_CSEMI_PARITY_ERROR:
7391                if (print)
7392                    bxe_print_next_block(sc, par_num++, "CSEMI");
7393                break;
7394            case AEU_INPUTS_ATTN_BITS_PXP_PARITY_ERROR:
7395                if (print)
7396                    bxe_print_next_block(sc, par_num++, "PXP");
7397                break;
7398            case AEU_IN_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR:
7399                if (print)
7400                    bxe_print_next_block(sc, par_num++, "PXPPCICLOCKCLIENT");
7401                break;
7402            case AEU_INPUTS_ATTN_BITS_CFC_PARITY_ERROR:
7403                if (print)
7404                    bxe_print_next_block(sc, par_num++, "CFC");
7405                break;
7406            case AEU_INPUTS_ATTN_BITS_CDU_PARITY_ERROR:
7407                if (print)
7408                    bxe_print_next_block(sc, par_num++, "CDU");
7409                break;
7410            case AEU_INPUTS_ATTN_BITS_DMAE_PARITY_ERROR:
7411                if (print)
7412                    bxe_print_next_block(sc, par_num++, "DMAE");
7413                break;
7414            case AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR:
7415                if (print)
7416                    bxe_print_next_block(sc, par_num++, "IGU");
7417                break;
7418            case AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR:
7419                if (print)
7420                    bxe_print_next_block(sc, par_num++, "MISC");
7421                break;
7422            }
7423
7424            /* Clear the bit */
7425            sig &= ~cur_bit;
7426        }
7427    }
7428
7429    return (par_num);
7430}
7431
7432static int
7433bxe_check_blocks_with_parity3(struct bxe_softc *sc,
7434                              uint32_t         sig,
7435                              int              par_num,
7436                              uint8_t          *global,
7437                              uint8_t          print)
7438{
7439    uint32_t cur_bit = 0;
7440    int i = 0;
7441
7442    for (i = 0; sig; i++) {
7443        cur_bit = ((uint32_t)0x1 << i);
7444        if (sig & cur_bit) {
7445            switch (cur_bit) {
7446            case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY:
7447                if (print)
7448                    bxe_print_next_block(sc, par_num++, "MCP ROM");
7449                *global = TRUE;
7450                break;
7451            case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY:
7452                if (print)
7453                    bxe_print_next_block(sc, par_num++,
7454                              "MCP UMP RX");
7455                *global = TRUE;
7456                break;
7457            case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY:
7458                if (print)
7459                    bxe_print_next_block(sc, par_num++,
7460                              "MCP UMP TX");
7461                *global = TRUE;
7462                break;
7463            case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY:
7464                if (print)
7465                    bxe_print_next_block(sc, par_num++,
7466                              "MCP SCPAD");
7467                *global = TRUE;
7468                break;
7469            }
7470
7471            /* Clear the bit */
7472            sig &= ~cur_bit;
7473        }
7474    }
7475
7476    return (par_num);
7477}
7478
7479static int
7480bxe_check_blocks_with_parity4(struct bxe_softc *sc,
7481                              uint32_t         sig,
7482                              int              par_num,
7483                              uint8_t          print)
7484{
7485    uint32_t cur_bit = 0;
7486    int i = 0;
7487
7488    for (i = 0; sig; i++) {
7489        cur_bit = ((uint32_t)0x1 << i);
7490        if (sig & cur_bit) {
7491            switch (cur_bit) {
7492            case AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR:
7493                if (print)
7494                    bxe_print_next_block(sc, par_num++, "PGLUE_B");
7495                break;
7496            case AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR:
7497                if (print)
7498                    bxe_print_next_block(sc, par_num++, "ATC");
7499                break;
7500            }
7501
7502            /* Clear the bit */
7503            sig &= ~cur_bit;
7504        }
7505    }
7506
7507    return (par_num);
7508}
7509
7510static uint8_t
7511bxe_parity_attn(struct bxe_softc *sc,
7512                uint8_t          *global,
7513                uint8_t          print,
7514                uint32_t         *sig)
7515{
7516    int par_num = 0;
7517
7518    if ((sig[0] & HW_PRTY_ASSERT_SET_0) ||
7519        (sig[1] & HW_PRTY_ASSERT_SET_1) ||
7520        (sig[2] & HW_PRTY_ASSERT_SET_2) ||
7521        (sig[3] & HW_PRTY_ASSERT_SET_3) ||
7522        (sig[4] & HW_PRTY_ASSERT_SET_4)) {
7523        BLOGE(sc, "Parity error: HW block parity attention:\n"
7524                  "[0]:0x%08x [1]:0x%08x [2]:0x%08x [3]:0x%08x [4]:0x%08x\n",
7525              (uint32_t)(sig[0] & HW_PRTY_ASSERT_SET_0),
7526              (uint32_t)(sig[1] & HW_PRTY_ASSERT_SET_1),
7527              (uint32_t)(sig[2] & HW_PRTY_ASSERT_SET_2),
7528              (uint32_t)(sig[3] & HW_PRTY_ASSERT_SET_3),
7529              (uint32_t)(sig[4] & HW_PRTY_ASSERT_SET_4));
7530
7531        if (print)
7532            BLOGI(sc, "Parity errors detected in blocks: ");
7533
7534        par_num =
7535            bxe_check_blocks_with_parity0(sc, sig[0] &
7536                                          HW_PRTY_ASSERT_SET_0,
7537                                          par_num, print);
7538        par_num =
7539            bxe_check_blocks_with_parity1(sc, sig[1] &
7540                                          HW_PRTY_ASSERT_SET_1,
7541                                          par_num, global, print);
7542        par_num =
7543            bxe_check_blocks_with_parity2(sc, sig[2] &
7544                                          HW_PRTY_ASSERT_SET_2,
7545                                          par_num, print);
7546        par_num =
7547            bxe_check_blocks_with_parity3(sc, sig[3] &
7548                                          HW_PRTY_ASSERT_SET_3,
7549                                          par_num, global, print);
7550        par_num =
7551            bxe_check_blocks_with_parity4(sc, sig[4] &
7552                                          HW_PRTY_ASSERT_SET_4,
7553                                          par_num, print);
7554
7555        if (print)
7556            BLOGI(sc, "\n");
7557
7558        return (TRUE);
7559    }
7560
7561    return (FALSE);
7562}
7563
7564static uint8_t
7565bxe_chk_parity_attn(struct bxe_softc *sc,
7566                    uint8_t          *global,
7567                    uint8_t          print)
7568{
7569    struct attn_route attn = { {0} };
7570    int port = SC_PORT(sc);
7571
7572    attn.sig[0] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
7573    attn.sig[1] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
7574    attn.sig[2] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
7575    attn.sig[3] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
7576
7577    /*
7578     * Since MCP attentions can't be disabled inside the block, we need to
7579     * read AEU registers to see whether they're currently disabled
7580     */
7581    attn.sig[3] &= ((REG_RD(sc, (!port ? MISC_REG_AEU_ENABLE4_FUNC_0_OUT_0
7582                                      : MISC_REG_AEU_ENABLE4_FUNC_1_OUT_0)) &
7583                         MISC_AEU_ENABLE_MCP_PRTY_BITS) |
7584                        ~MISC_AEU_ENABLE_MCP_PRTY_BITS);
7585
7586
7587    if (!CHIP_IS_E1x(sc))
7588        attn.sig[4] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_5_FUNC_0 + port*4);
7589
7590    return (bxe_parity_attn(sc, global, print, attn.sig));
7591}
7592
7593static void
7594bxe_attn_int_deasserted4(struct bxe_softc *sc,
7595                         uint32_t         attn)
7596{
7597    uint32_t val;
7598
7599    if (attn & AEU_INPUTS_ATTN_BITS_PGLUE_HW_INTERRUPT) {
7600        val = REG_RD(sc, PGLUE_B_REG_PGLUE_B_INT_STS_CLR);
7601        BLOGE(sc, "PGLUE hw attention 0x%08x\n", val);
7602        if (val & PGLUE_B_PGLUE_B_INT_STS_REG_ADDRESS_ERROR)
7603            BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_ADDRESS_ERROR\n");
7604        if (val & PGLUE_B_PGLUE_B_INT_STS_REG_INCORRECT_RCV_BEHAVIOR)
7605            BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_INCORRECT_RCV_BEHAVIOR\n");
7606        if (val & PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN)
7607            BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN\n");
7608        if (val & PGLUE_B_PGLUE_B_INT_STS_REG_VF_LENGTH_VIOLATION_ATTN)
7609            BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_VF_LENGTH_VIOLATION_ATTN\n");
7610        if (val & PGLUE_B_PGLUE_B_INT_STS_REG_VF_GRC_SPACE_VIOLATION_ATTN)
7611            BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_VF_GRC_SPACE_VIOLATION_ATTN\n");
7612        if (val & PGLUE_B_PGLUE_B_INT_STS_REG_VF_MSIX_BAR_VIOLATION_ATTN)
7613            BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_VF_MSIX_BAR_VIOLATION_ATTN\n");
7614        if (val & PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_ERROR_ATTN)
7615            BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_ERROR_ATTN\n");
7616        if (val & PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_IN_TWO_RCBS_ATTN)
7617            BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_IN_TWO_RCBS_ATTN\n");
7618        if (val & PGLUE_B_PGLUE_B_INT_STS_REG_CSSNOOP_FIFO_OVERFLOW)
7619            BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_CSSNOOP_FIFO_OVERFLOW\n");
7620    }
7621
7622    if (attn & AEU_INPUTS_ATTN_BITS_ATC_HW_INTERRUPT) {
7623        val = REG_RD(sc, ATC_REG_ATC_INT_STS_CLR);
7624        BLOGE(sc, "ATC hw attention 0x%08x\n", val);
7625        if (val & ATC_ATC_INT_STS_REG_ADDRESS_ERROR)
7626            BLOGE(sc, "ATC_ATC_INT_STS_REG_ADDRESS_ERROR\n");
7627        if (val & ATC_ATC_INT_STS_REG_ATC_TCPL_TO_NOT_PEND)
7628            BLOGE(sc, "ATC_ATC_INT_STS_REG_ATC_TCPL_TO_NOT_PEND\n");
7629        if (val & ATC_ATC_INT_STS_REG_ATC_GPA_MULTIPLE_HITS)
7630            BLOGE(sc, "ATC_ATC_INT_STS_REG_ATC_GPA_MULTIPLE_HITS\n");
7631        if (val & ATC_ATC_INT_STS_REG_ATC_RCPL_TO_EMPTY_CNT)
7632            BLOGE(sc, "ATC_ATC_INT_STS_REG_ATC_RCPL_TO_EMPTY_CNT\n");
7633        if (val & ATC_ATC_INT_STS_REG_ATC_TCPL_ERROR)
7634            BLOGE(sc, "ATC_ATC_INT_STS_REG_ATC_TCPL_ERROR\n");
7635        if (val & ATC_ATC_INT_STS_REG_ATC_IREQ_LESS_THAN_STU)
7636            BLOGE(sc, "ATC_ATC_INT_STS_REG_ATC_IREQ_LESS_THAN_STU\n");
7637    }
7638
7639    if (attn & (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR |
7640                AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR)) {
7641        BLOGE(sc, "FATAL parity attention set4 0x%08x\n",
7642              (uint32_t)(attn & (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR |
7643                                 AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR)));
7644    }
7645}
7646
7647static void
7648bxe_e1h_disable(struct bxe_softc *sc)
7649{
7650    int port = SC_PORT(sc);
7651
7652    bxe_tx_disable(sc);
7653
7654    REG_WR(sc, NIG_REG_LLH0_FUNC_EN + port*8, 0);
7655}
7656
7657static void
7658bxe_e1h_enable(struct bxe_softc *sc)
7659{
7660    int port = SC_PORT(sc);
7661
7662    REG_WR(sc, NIG_REG_LLH0_FUNC_EN + port*8, 1);
7663
7664    // XXX bxe_tx_enable(sc);
7665}
7666
7667/*
7668 * called due to MCP event (on pmf):
7669 *   reread new bandwidth configuration
7670 *   configure FW
7671 *   notify others function about the change
7672 */
7673static void
7674bxe_config_mf_bw(struct bxe_softc *sc)
7675{
7676    if (sc->link_vars.link_up) {
7677        bxe_cmng_fns_init(sc, TRUE, CMNG_FNS_MINMAX);
7678        // XXX bxe_link_sync_notify(sc);
7679    }
7680
7681    storm_memset_cmng(sc, &sc->cmng, SC_PORT(sc));
7682}
7683
7684static void
7685bxe_set_mf_bw(struct bxe_softc *sc)
7686{
7687    bxe_config_mf_bw(sc);
7688    bxe_fw_command(sc, DRV_MSG_CODE_SET_MF_BW_ACK, 0);
7689}
7690
7691static void
7692bxe_handle_eee_event(struct bxe_softc *sc)
7693{
7694    BLOGD(sc, DBG_INTR, "EEE - LLDP event\n");
7695    bxe_fw_command(sc, DRV_MSG_CODE_EEE_RESULTS_ACK, 0);
7696}
7697
7698#define DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED 3
7699
7700static void
7701bxe_drv_info_ether_stat(struct bxe_softc *sc)
7702{
7703    struct eth_stats_info *ether_stat =
7704        &sc->sp->drv_info_to_mcp.ether_stat;
7705
7706    strlcpy(ether_stat->version, BXE_DRIVER_VERSION,
7707            ETH_STAT_INFO_VERSION_LEN);
7708
7709    /* XXX (+ MAC_PAD) taken from other driver... verify this is right */
7710    sc->sp_objs[0].mac_obj.get_n_elements(sc, &sc->sp_objs[0].mac_obj,
7711                                          DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED,
7712                                          ether_stat->mac_local + MAC_PAD,
7713                                          MAC_PAD, ETH_ALEN);
7714
7715    ether_stat->mtu_size = sc->mtu;
7716
7717    ether_stat->feature_flags |= FEATURE_ETH_CHKSUM_OFFLOAD_MASK;
7718    if (if_getcapenable(sc->ifp) & (IFCAP_TSO4 | IFCAP_TSO6)) {
7719        ether_stat->feature_flags |= FEATURE_ETH_LSO_MASK;
7720    }
7721
7722    // XXX ether_stat->feature_flags |= ???;
7723
7724    ether_stat->promiscuous_mode = 0; // (flags & PROMISC) ? 1 : 0;
7725
7726    ether_stat->txq_size = sc->tx_ring_size;
7727    ether_stat->rxq_size = sc->rx_ring_size;
7728}
7729
7730static void
7731bxe_handle_drv_info_req(struct bxe_softc *sc)
7732{
7733    enum drv_info_opcode op_code;
7734    uint32_t drv_info_ctl = SHMEM2_RD(sc, drv_info_control);
7735
7736    /* if drv_info version supported by MFW doesn't match - send NACK */
7737    if ((drv_info_ctl & DRV_INFO_CONTROL_VER_MASK) != DRV_INFO_CUR_VER) {
7738        bxe_fw_command(sc, DRV_MSG_CODE_DRV_INFO_NACK, 0);
7739        return;
7740    }
7741
7742    op_code = ((drv_info_ctl & DRV_INFO_CONTROL_OP_CODE_MASK) >>
7743               DRV_INFO_CONTROL_OP_CODE_SHIFT);
7744
7745    memset(&sc->sp->drv_info_to_mcp, 0, sizeof(union drv_info_to_mcp));
7746
7747    switch (op_code) {
7748    case ETH_STATS_OPCODE:
7749        bxe_drv_info_ether_stat(sc);
7750        break;
7751    case FCOE_STATS_OPCODE:
7752    case ISCSI_STATS_OPCODE:
7753    default:
7754        /* if op code isn't supported - send NACK */
7755        bxe_fw_command(sc, DRV_MSG_CODE_DRV_INFO_NACK, 0);
7756        return;
7757    }
7758
7759    /*
7760     * If we got drv_info attn from MFW then these fields are defined in
7761     * shmem2 for sure
7762     */
7763    SHMEM2_WR(sc, drv_info_host_addr_lo,
7764              U64_LO(BXE_SP_MAPPING(sc, drv_info_to_mcp)));
7765    SHMEM2_WR(sc, drv_info_host_addr_hi,
7766              U64_HI(BXE_SP_MAPPING(sc, drv_info_to_mcp)));
7767
7768    bxe_fw_command(sc, DRV_MSG_CODE_DRV_INFO_ACK, 0);
7769}
7770
7771static void
7772bxe_dcc_event(struct bxe_softc *sc,
7773              uint32_t         dcc_event)
7774{
7775    BLOGD(sc, DBG_INTR, "dcc_event 0x%08x\n", dcc_event);
7776
7777    if (dcc_event & DRV_STATUS_DCC_DISABLE_ENABLE_PF) {
7778        /*
7779         * This is the only place besides the function initialization
7780         * where the sc->flags can change so it is done without any
7781         * locks
7782         */
7783        if (sc->devinfo.mf_info.mf_config[SC_VN(sc)] & FUNC_MF_CFG_FUNC_DISABLED) {
7784            BLOGD(sc, DBG_INTR, "mf_cfg function disabled\n");
7785            sc->flags |= BXE_MF_FUNC_DIS;
7786            bxe_e1h_disable(sc);
7787        } else {
7788            BLOGD(sc, DBG_INTR, "mf_cfg function enabled\n");
7789            sc->flags &= ~BXE_MF_FUNC_DIS;
7790            bxe_e1h_enable(sc);
7791        }
7792        dcc_event &= ~DRV_STATUS_DCC_DISABLE_ENABLE_PF;
7793    }
7794
7795    if (dcc_event & DRV_STATUS_DCC_BANDWIDTH_ALLOCATION) {
7796        bxe_config_mf_bw(sc);
7797        dcc_event &= ~DRV_STATUS_DCC_BANDWIDTH_ALLOCATION;
7798    }
7799
7800    /* Report results to MCP */
7801    if (dcc_event)
7802        bxe_fw_command(sc, DRV_MSG_CODE_DCC_FAILURE, 0);
7803    else
7804        bxe_fw_command(sc, DRV_MSG_CODE_DCC_OK, 0);
7805}
7806
7807static void
7808bxe_pmf_update(struct bxe_softc *sc)
7809{
7810    int port = SC_PORT(sc);
7811    uint32_t val;
7812
7813    sc->port.pmf = 1;
7814    BLOGD(sc, DBG_INTR, "pmf %d\n", sc->port.pmf);
7815
7816    /*
7817     * We need the mb() to ensure the ordering between the writing to
7818     * sc->port.pmf here and reading it from the bxe_periodic_task().
7819     */
7820    mb();
7821
7822    /* queue a periodic task */
7823    // XXX schedule task...
7824
7825    // XXX bxe_dcbx_pmf_update(sc);
7826
7827    /* enable nig attention */
7828    val = (0xff0f | (1 << (SC_VN(sc) + 4)));
7829    if (sc->devinfo.int_block == INT_BLOCK_HC) {
7830        REG_WR(sc, HC_REG_TRAILING_EDGE_0 + port*8, val);
7831        REG_WR(sc, HC_REG_LEADING_EDGE_0 + port*8, val);
7832    } else if (!CHIP_IS_E1x(sc)) {
7833        REG_WR(sc, IGU_REG_TRAILING_EDGE_LATCH, val);
7834        REG_WR(sc, IGU_REG_LEADING_EDGE_LATCH, val);
7835    }
7836
7837    bxe_stats_handle(sc, STATS_EVENT_PMF);
7838}
7839
7840static int
7841bxe_mc_assert(struct bxe_softc *sc)
7842{
7843    char last_idx;
7844    int i, rc = 0;
7845    uint32_t row0, row1, row2, row3;
7846
7847    /* XSTORM */
7848    last_idx = REG_RD8(sc, BAR_XSTRORM_INTMEM + XSTORM_ASSERT_LIST_INDEX_OFFSET);
7849    if (last_idx)
7850        BLOGE(sc, "XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
7851
7852    /* print the asserts */
7853    for (i = 0; i < STORM_ASSERT_ARRAY_SIZE; i++) {
7854
7855        row0 = REG_RD(sc, BAR_XSTRORM_INTMEM + XSTORM_ASSERT_LIST_OFFSET(i));
7856        row1 = REG_RD(sc, BAR_XSTRORM_INTMEM + XSTORM_ASSERT_LIST_OFFSET(i) + 4);
7857        row2 = REG_RD(sc, BAR_XSTRORM_INTMEM + XSTORM_ASSERT_LIST_OFFSET(i) + 8);
7858        row3 = REG_RD(sc, BAR_XSTRORM_INTMEM + XSTORM_ASSERT_LIST_OFFSET(i) + 12);
7859
7860        if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
7861            BLOGE(sc, "XSTORM_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x\n",
7862                  i, row3, row2, row1, row0);
7863            rc++;
7864        } else {
7865            break;
7866        }
7867    }
7868
7869    /* TSTORM */
7870    last_idx = REG_RD8(sc, BAR_TSTRORM_INTMEM + TSTORM_ASSERT_LIST_INDEX_OFFSET);
7871    if (last_idx) {
7872        BLOGE(sc, "TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
7873    }
7874
7875    /* print the asserts */
7876    for (i = 0; i < STORM_ASSERT_ARRAY_SIZE; i++) {
7877
7878        row0 = REG_RD(sc, BAR_TSTRORM_INTMEM + TSTORM_ASSERT_LIST_OFFSET(i));
7879        row1 = REG_RD(sc, BAR_TSTRORM_INTMEM + TSTORM_ASSERT_LIST_OFFSET(i) + 4);
7880        row2 = REG_RD(sc, BAR_TSTRORM_INTMEM + TSTORM_ASSERT_LIST_OFFSET(i) + 8);
7881        row3 = REG_RD(sc, BAR_TSTRORM_INTMEM + TSTORM_ASSERT_LIST_OFFSET(i) + 12);
7882
7883        if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
7884            BLOGE(sc, "TSTORM_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x\n",
7885                  i, row3, row2, row1, row0);
7886            rc++;
7887        } else {
7888            break;
7889        }
7890    }
7891
7892    /* CSTORM */
7893    last_idx = REG_RD8(sc, BAR_CSTRORM_INTMEM + CSTORM_ASSERT_LIST_INDEX_OFFSET);
7894    if (last_idx) {
7895        BLOGE(sc, "CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
7896    }
7897
7898    /* print the asserts */
7899    for (i = 0; i < STORM_ASSERT_ARRAY_SIZE; i++) {
7900
7901        row0 = REG_RD(sc, BAR_CSTRORM_INTMEM + CSTORM_ASSERT_LIST_OFFSET(i));
7902        row1 = REG_RD(sc, BAR_CSTRORM_INTMEM + CSTORM_ASSERT_LIST_OFFSET(i) + 4);
7903        row2 = REG_RD(sc, BAR_CSTRORM_INTMEM + CSTORM_ASSERT_LIST_OFFSET(i) + 8);
7904        row3 = REG_RD(sc, BAR_CSTRORM_INTMEM + CSTORM_ASSERT_LIST_OFFSET(i) + 12);
7905
7906        if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
7907            BLOGE(sc, "CSTORM_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x\n",
7908                  i, row3, row2, row1, row0);
7909            rc++;
7910        } else {
7911            break;
7912        }
7913    }
7914
7915    /* USTORM */
7916    last_idx = REG_RD8(sc, BAR_USTRORM_INTMEM + USTORM_ASSERT_LIST_INDEX_OFFSET);
7917    if (last_idx) {
7918        BLOGE(sc, "USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
7919    }
7920
7921    /* print the asserts */
7922    for (i = 0; i < STORM_ASSERT_ARRAY_SIZE; i++) {
7923
7924        row0 = REG_RD(sc, BAR_USTRORM_INTMEM + USTORM_ASSERT_LIST_OFFSET(i));
7925        row1 = REG_RD(sc, BAR_USTRORM_INTMEM + USTORM_ASSERT_LIST_OFFSET(i) + 4);
7926        row2 = REG_RD(sc, BAR_USTRORM_INTMEM + USTORM_ASSERT_LIST_OFFSET(i) + 8);
7927        row3 = REG_RD(sc, BAR_USTRORM_INTMEM + USTORM_ASSERT_LIST_OFFSET(i) + 12);
7928
7929        if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
7930            BLOGE(sc, "USTORM_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x\n",
7931                  i, row3, row2, row1, row0);
7932            rc++;
7933        } else {
7934            break;
7935        }
7936    }
7937
7938    return (rc);
7939}
7940
7941static void
7942bxe_attn_int_deasserted3(struct bxe_softc *sc,
7943                         uint32_t         attn)
7944{
7945    int func = SC_FUNC(sc);
7946    uint32_t val;
7947
7948    if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
7949
7950        if (attn & BXE_PMF_LINK_ASSERT(sc)) {
7951
7952            REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
7953            bxe_read_mf_cfg(sc);
7954            sc->devinfo.mf_info.mf_config[SC_VN(sc)] =
7955                MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].config);
7956            val = SHMEM_RD(sc, func_mb[SC_FW_MB_IDX(sc)].drv_status);
7957
7958            if (val & DRV_STATUS_DCC_EVENT_MASK)
7959                bxe_dcc_event(sc, (val & DRV_STATUS_DCC_EVENT_MASK));
7960
7961            if (val & DRV_STATUS_SET_MF_BW)
7962                bxe_set_mf_bw(sc);
7963
7964            if (val & DRV_STATUS_DRV_INFO_REQ)
7965                bxe_handle_drv_info_req(sc);
7966
7967            if ((sc->port.pmf == 0) && (val & DRV_STATUS_PMF))
7968                bxe_pmf_update(sc);
7969
7970            if (val & DRV_STATUS_EEE_NEGOTIATION_RESULTS)
7971                bxe_handle_eee_event(sc);
7972
7973            if (sc->link_vars.periodic_flags &
7974                ELINK_PERIODIC_FLAGS_LINK_EVENT) {
7975                /* sync with link */
7976		bxe_acquire_phy_lock(sc);
7977                sc->link_vars.periodic_flags &=
7978                    ~ELINK_PERIODIC_FLAGS_LINK_EVENT;
7979		bxe_release_phy_lock(sc);
7980                if (IS_MF(sc))
7981                    ; // XXX bxe_link_sync_notify(sc);
7982                bxe_link_report(sc);
7983            }
7984
7985            /*
7986             * Always call it here: bxe_link_report() will
7987             * prevent the link indication duplication.
7988             */
7989            bxe_link_status_update(sc);
7990
7991        } else if (attn & BXE_MC_ASSERT_BITS) {
7992
7993            BLOGE(sc, "MC assert!\n");
7994            bxe_mc_assert(sc);
7995            REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_10, 0);
7996            REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_9, 0);
7997            REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_8, 0);
7998            REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_7, 0);
7999            bxe_panic(sc, ("MC assert!\n"));
8000
8001        } else if (attn & BXE_MCP_ASSERT) {
8002
8003            BLOGE(sc, "MCP assert!\n");
8004            REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_11, 0);
8005            // XXX bxe_fw_dump(sc);
8006
8007        } else {
8008            BLOGE(sc, "Unknown HW assert! (attn 0x%08x)\n", attn);
8009        }
8010    }
8011
8012    if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
8013        BLOGE(sc, "LATCHED attention 0x%08x (masked)\n", attn);
8014        if (attn & BXE_GRC_TIMEOUT) {
8015            val = CHIP_IS_E1(sc) ? 0 : REG_RD(sc, MISC_REG_GRC_TIMEOUT_ATTN);
8016            BLOGE(sc, "GRC time-out 0x%08x\n", val);
8017        }
8018        if (attn & BXE_GRC_RSV) {
8019            val = CHIP_IS_E1(sc) ? 0 : REG_RD(sc, MISC_REG_GRC_RSV_ATTN);
8020            BLOGE(sc, "GRC reserved 0x%08x\n", val);
8021        }
8022        REG_WR(sc, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
8023    }
8024}
8025
8026static void
8027bxe_attn_int_deasserted2(struct bxe_softc *sc,
8028                         uint32_t         attn)
8029{
8030    int port = SC_PORT(sc);
8031    int reg_offset;
8032    uint32_t val0, mask0, val1, mask1;
8033    uint32_t val;
8034
8035    if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
8036        val = REG_RD(sc, CFC_REG_CFC_INT_STS_CLR);
8037        BLOGE(sc, "CFC hw attention 0x%08x\n", val);
8038        /* CFC error attention */
8039        if (val & 0x2) {
8040            BLOGE(sc, "FATAL error from CFC\n");
8041        }
8042    }
8043
8044    if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
8045        val = REG_RD(sc, PXP_REG_PXP_INT_STS_CLR_0);
8046        BLOGE(sc, "PXP hw attention-0 0x%08x\n", val);
8047        /* RQ_USDMDP_FIFO_OVERFLOW */
8048        if (val & 0x18000) {
8049            BLOGE(sc, "FATAL error from PXP\n");
8050        }
8051
8052        if (!CHIP_IS_E1x(sc)) {
8053            val = REG_RD(sc, PXP_REG_PXP_INT_STS_CLR_1);
8054            BLOGE(sc, "PXP hw attention-1 0x%08x\n", val);
8055        }
8056    }
8057
8058#define PXP2_EOP_ERROR_BIT  PXP2_PXP2_INT_STS_CLR_0_REG_WR_PGLUE_EOP_ERROR
8059#define AEU_PXP2_HW_INT_BIT AEU_INPUTS_ATTN_BITS_PXPPCICLOCKCLIENT_HW_INTERRUPT
8060
8061    if (attn & AEU_PXP2_HW_INT_BIT) {
8062        /*  CQ47854 workaround do not panic on
8063         *  PXP2_PXP2_INT_STS_0_REG_WR_PGLUE_EOP_ERROR
8064         */
8065        if (!CHIP_IS_E1x(sc)) {
8066            mask0 = REG_RD(sc, PXP2_REG_PXP2_INT_MASK_0);
8067            val1 = REG_RD(sc, PXP2_REG_PXP2_INT_STS_1);
8068            mask1 = REG_RD(sc, PXP2_REG_PXP2_INT_MASK_1);
8069            val0 = REG_RD(sc, PXP2_REG_PXP2_INT_STS_0);
8070            /*
8071             * If the only PXP2_EOP_ERROR_BIT is set in
8072             * STS0 and STS1 - clear it
8073             *
8074             * probably we lose additional attentions between
8075             * STS0 and STS_CLR0, in this case user will not
8076             * be notified about them
8077             */
8078            if (val0 & mask0 & PXP2_EOP_ERROR_BIT &&
8079                !(val1 & mask1))
8080                val0 = REG_RD(sc, PXP2_REG_PXP2_INT_STS_CLR_0);
8081
8082            /* print the register, since no one can restore it */
8083            BLOGE(sc, "PXP2_REG_PXP2_INT_STS_CLR_0 0x%08x\n", val0);
8084
8085            /*
8086             * if PXP2_PXP2_INT_STS_0_REG_WR_PGLUE_EOP_ERROR
8087             * then notify
8088             */
8089            if (val0 & PXP2_EOP_ERROR_BIT) {
8090                BLOGE(sc, "PXP2_WR_PGLUE_EOP_ERROR\n");
8091
8092                /*
8093                 * if only PXP2_PXP2_INT_STS_0_REG_WR_PGLUE_EOP_ERROR is
8094                 * set then clear attention from PXP2 block without panic
8095                 */
8096                if (((val0 & mask0) == PXP2_EOP_ERROR_BIT) &&
8097                    ((val1 & mask1) == 0))
8098                    attn &= ~AEU_PXP2_HW_INT_BIT;
8099            }
8100        }
8101    }
8102
8103    if (attn & HW_INTERRUT_ASSERT_SET_2) {
8104        reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
8105                             MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
8106
8107        val = REG_RD(sc, reg_offset);
8108        val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
8109        REG_WR(sc, reg_offset, val);
8110
8111        BLOGE(sc, "FATAL HW block attention set2 0x%x\n",
8112              (uint32_t)(attn & HW_INTERRUT_ASSERT_SET_2));
8113        bxe_panic(sc, ("HW block attention set2\n"));
8114    }
8115}
8116
8117static void
8118bxe_attn_int_deasserted1(struct bxe_softc *sc,
8119                         uint32_t         attn)
8120{
8121    int port = SC_PORT(sc);
8122    int reg_offset;
8123    uint32_t val;
8124
8125    if (attn & AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT) {
8126        val = REG_RD(sc, DORQ_REG_DORQ_INT_STS_CLR);
8127        BLOGE(sc, "DB hw attention 0x%08x\n", val);
8128        /* DORQ discard attention */
8129        if (val & 0x2) {
8130            BLOGE(sc, "FATAL error from DORQ\n");
8131        }
8132    }
8133
8134    if (attn & HW_INTERRUT_ASSERT_SET_1) {
8135        reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
8136                             MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
8137
8138        val = REG_RD(sc, reg_offset);
8139        val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
8140        REG_WR(sc, reg_offset, val);
8141
8142        BLOGE(sc, "FATAL HW block attention set1 0x%08x\n",
8143              (uint32_t)(attn & HW_INTERRUT_ASSERT_SET_1));
8144        bxe_panic(sc, ("HW block attention set1\n"));
8145    }
8146}
8147
8148static void
8149bxe_attn_int_deasserted0(struct bxe_softc *sc,
8150                         uint32_t         attn)
8151{
8152    int port = SC_PORT(sc);
8153    int reg_offset;
8154    uint32_t val;
8155
8156    reg_offset = (port) ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
8157                          MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0;
8158
8159    if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
8160        val = REG_RD(sc, reg_offset);
8161        val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
8162        REG_WR(sc, reg_offset, val);
8163
8164        BLOGW(sc, "SPIO5 hw attention\n");
8165
8166        /* Fan failure attention */
8167        elink_hw_reset_phy(&sc->link_params);
8168        bxe_fan_failure(sc);
8169    }
8170
8171    if ((attn & sc->link_vars.aeu_int_mask) && sc->port.pmf) {
8172	bxe_acquire_phy_lock(sc);
8173        elink_handle_module_detect_int(&sc->link_params);
8174	bxe_release_phy_lock(sc);
8175    }
8176
8177    if (attn & HW_INTERRUT_ASSERT_SET_0) {
8178        val = REG_RD(sc, reg_offset);
8179        val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
8180        REG_WR(sc, reg_offset, val);
8181
8182        bxe_panic(sc, ("FATAL HW block attention set0 0x%lx\n",
8183                       (attn & HW_INTERRUT_ASSERT_SET_0)));
8184    }
8185}
8186
8187static void
8188bxe_attn_int_deasserted(struct bxe_softc *sc,
8189                        uint32_t         deasserted)
8190{
8191    struct attn_route attn;
8192    struct attn_route *group_mask;
8193    int port = SC_PORT(sc);
8194    int index;
8195    uint32_t reg_addr;
8196    uint32_t val;
8197    uint32_t aeu_mask;
8198    uint8_t global = FALSE;
8199
8200    /*
8201     * Need to take HW lock because MCP or other port might also
8202     * try to handle this event.
8203     */
8204    bxe_acquire_alr(sc);
8205
8206    if (bxe_chk_parity_attn(sc, &global, TRUE)) {
8207        /* XXX
8208         * In case of parity errors don't handle attentions so that
8209         * other function would "see" parity errors.
8210         */
8211        sc->recovery_state = BXE_RECOVERY_INIT;
8212        // XXX schedule a recovery task...
8213        /* disable HW interrupts */
8214        bxe_int_disable(sc);
8215        bxe_release_alr(sc);
8216        return;
8217    }
8218
8219    attn.sig[0] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
8220    attn.sig[1] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
8221    attn.sig[2] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
8222    attn.sig[3] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
8223    if (!CHIP_IS_E1x(sc)) {
8224        attn.sig[4] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_5_FUNC_0 + port*4);
8225    } else {
8226        attn.sig[4] = 0;
8227    }
8228
8229    BLOGD(sc, DBG_INTR, "attn: 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n",
8230          attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3], attn.sig[4]);
8231
8232    for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
8233        if (deasserted & (1 << index)) {
8234            group_mask = &sc->attn_group[index];
8235
8236            BLOGD(sc, DBG_INTR,
8237                  "group[%d]: 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n", index,
8238                  group_mask->sig[0], group_mask->sig[1],
8239                  group_mask->sig[2], group_mask->sig[3],
8240                  group_mask->sig[4]);
8241
8242            bxe_attn_int_deasserted4(sc, attn.sig[4] & group_mask->sig[4]);
8243            bxe_attn_int_deasserted3(sc, attn.sig[3] & group_mask->sig[3]);
8244            bxe_attn_int_deasserted1(sc, attn.sig[1] & group_mask->sig[1]);
8245            bxe_attn_int_deasserted2(sc, attn.sig[2] & group_mask->sig[2]);
8246            bxe_attn_int_deasserted0(sc, attn.sig[0] & group_mask->sig[0]);
8247        }
8248    }
8249
8250    bxe_release_alr(sc);
8251
8252    if (sc->devinfo.int_block == INT_BLOCK_HC) {
8253        reg_addr = (HC_REG_COMMAND_REG + port*32 +
8254                    COMMAND_REG_ATTN_BITS_CLR);
8255    } else {
8256        reg_addr = (BAR_IGU_INTMEM + IGU_CMD_ATTN_BIT_CLR_UPPER*8);
8257    }
8258
8259    val = ~deasserted;
8260    BLOGD(sc, DBG_INTR,
8261          "about to mask 0x%08x at %s addr 0x%08x\n", val,
8262          (sc->devinfo.int_block == INT_BLOCK_HC) ? "HC" : "IGU", reg_addr);
8263    REG_WR(sc, reg_addr, val);
8264
8265    if (~sc->attn_state & deasserted) {
8266        BLOGE(sc, "IGU error\n");
8267    }
8268
8269    reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
8270                      MISC_REG_AEU_MASK_ATTN_FUNC_0;
8271
8272    bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
8273
8274    aeu_mask = REG_RD(sc, reg_addr);
8275
8276    BLOGD(sc, DBG_INTR, "aeu_mask 0x%08x newly deasserted 0x%08x\n",
8277          aeu_mask, deasserted);
8278    aeu_mask |= (deasserted & 0x3ff);
8279    BLOGD(sc, DBG_INTR, "new mask 0x%08x\n", aeu_mask);
8280
8281    REG_WR(sc, reg_addr, aeu_mask);
8282    bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
8283
8284    BLOGD(sc, DBG_INTR, "attn_state 0x%08x\n", sc->attn_state);
8285    sc->attn_state &= ~deasserted;
8286    BLOGD(sc, DBG_INTR, "new state 0x%08x\n", sc->attn_state);
8287}
8288
8289static void
8290bxe_attn_int(struct bxe_softc *sc)
8291{
8292    /* read local copy of bits */
8293    uint32_t attn_bits = le32toh(sc->def_sb->atten_status_block.attn_bits);
8294    uint32_t attn_ack = le32toh(sc->def_sb->atten_status_block.attn_bits_ack);
8295    uint32_t attn_state = sc->attn_state;
8296
8297    /* look for changed bits */
8298    uint32_t asserted   =  attn_bits & ~attn_ack & ~attn_state;
8299    uint32_t deasserted = ~attn_bits &  attn_ack &  attn_state;
8300
8301    BLOGD(sc, DBG_INTR,
8302          "attn_bits 0x%08x attn_ack 0x%08x asserted 0x%08x deasserted 0x%08x\n",
8303          attn_bits, attn_ack, asserted, deasserted);
8304
8305    if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state)) {
8306        BLOGE(sc, "BAD attention state\n");
8307    }
8308
8309    /* handle bits that were raised */
8310    if (asserted) {
8311        bxe_attn_int_asserted(sc, asserted);
8312    }
8313
8314    if (deasserted) {
8315        bxe_attn_int_deasserted(sc, deasserted);
8316    }
8317}
8318
8319static uint16_t
8320bxe_update_dsb_idx(struct bxe_softc *sc)
8321{
8322    struct host_sp_status_block *def_sb = sc->def_sb;
8323    uint16_t rc = 0;
8324
8325    mb(); /* status block is written to by the chip */
8326
8327    if (sc->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
8328        sc->def_att_idx = def_sb->atten_status_block.attn_bits_index;
8329        rc |= BXE_DEF_SB_ATT_IDX;
8330    }
8331
8332    if (sc->def_idx != def_sb->sp_sb.running_index) {
8333        sc->def_idx = def_sb->sp_sb.running_index;
8334        rc |= BXE_DEF_SB_IDX;
8335    }
8336
8337    mb();
8338
8339    return (rc);
8340}
8341
8342static inline struct ecore_queue_sp_obj *
8343bxe_cid_to_q_obj(struct bxe_softc *sc,
8344                 uint32_t         cid)
8345{
8346    BLOGD(sc, DBG_SP, "retrieving fp from cid %d\n", cid);
8347    return (&sc->sp_objs[CID_TO_FP(cid, sc)].q_obj);
8348}
8349
8350static void
8351bxe_handle_mcast_eqe(struct bxe_softc *sc)
8352{
8353    struct ecore_mcast_ramrod_params rparam;
8354    int rc;
8355
8356    memset(&rparam, 0, sizeof(rparam));
8357
8358    rparam.mcast_obj = &sc->mcast_obj;
8359
8360    BXE_MCAST_LOCK(sc);
8361
8362    /* clear pending state for the last command */
8363    sc->mcast_obj.raw.clear_pending(&sc->mcast_obj.raw);
8364
8365    /* if there are pending mcast commands - send them */
8366    if (sc->mcast_obj.check_pending(&sc->mcast_obj)) {
8367        rc = ecore_config_mcast(sc, &rparam, ECORE_MCAST_CMD_CONT);
8368        if (rc < 0) {
8369            BLOGD(sc, DBG_SP,
8370                "ERROR: Failed to send pending mcast commands (%d)\n", rc);
8371        }
8372    }
8373
8374    BXE_MCAST_UNLOCK(sc);
8375}
8376
8377static void
8378bxe_handle_classification_eqe(struct bxe_softc      *sc,
8379                              union event_ring_elem *elem)
8380{
8381    unsigned long ramrod_flags = 0;
8382    int rc = 0;
8383    uint32_t cid = elem->message.data.eth_event.echo & BXE_SWCID_MASK;
8384    struct ecore_vlan_mac_obj *vlan_mac_obj;
8385
8386    /* always push next commands out, don't wait here */
8387    bit_set(&ramrod_flags, RAMROD_CONT);
8388
8389    switch (le32toh(elem->message.data.eth_event.echo) >> BXE_SWCID_SHIFT) {
8390    case ECORE_FILTER_MAC_PENDING:
8391        BLOGD(sc, DBG_SP, "Got SETUP_MAC completions\n");
8392        vlan_mac_obj = &sc->sp_objs[cid].mac_obj;
8393        break;
8394
8395    case ECORE_FILTER_MCAST_PENDING:
8396        BLOGD(sc, DBG_SP, "Got SETUP_MCAST completions\n");
8397        /*
8398         * This is only relevant for 57710 where multicast MACs are
8399         * configured as unicast MACs using the same ramrod.
8400         */
8401        bxe_handle_mcast_eqe(sc);
8402        return;
8403
8404    default:
8405        BLOGE(sc, "Unsupported classification command: %d\n",
8406              elem->message.data.eth_event.echo);
8407        return;
8408    }
8409
8410    rc = vlan_mac_obj->complete(sc, vlan_mac_obj, elem, &ramrod_flags);
8411
8412    if (rc < 0) {
8413        BLOGE(sc, "Failed to schedule new commands (%d)\n", rc);
8414    } else if (rc > 0) {
8415        BLOGD(sc, DBG_SP, "Scheduled next pending commands...\n");
8416    }
8417}
8418
8419static void
8420bxe_handle_rx_mode_eqe(struct bxe_softc      *sc,
8421                       union event_ring_elem *elem)
8422{
8423    bxe_clear_bit(ECORE_FILTER_RX_MODE_PENDING, &sc->sp_state);
8424
8425    /* send rx_mode command again if was requested */
8426    if (bxe_test_and_clear_bit(ECORE_FILTER_RX_MODE_SCHED,
8427                               &sc->sp_state)) {
8428        bxe_set_storm_rx_mode(sc);
8429    }
8430}
8431
8432static void
8433bxe_update_eq_prod(struct bxe_softc *sc,
8434                   uint16_t         prod)
8435{
8436    storm_memset_eq_prod(sc, prod, SC_FUNC(sc));
8437    wmb(); /* keep prod updates ordered */
8438}
8439
8440static void
8441bxe_eq_int(struct bxe_softc *sc)
8442{
8443    uint16_t hw_cons, sw_cons, sw_prod;
8444    union event_ring_elem *elem;
8445    uint8_t echo;
8446    uint32_t cid;
8447    uint8_t opcode;
8448    int spqe_cnt = 0;
8449    struct ecore_queue_sp_obj *q_obj;
8450    struct ecore_func_sp_obj *f_obj = &sc->func_obj;
8451    struct ecore_raw_obj *rss_raw = &sc->rss_conf_obj.raw;
8452
8453    hw_cons = le16toh(*sc->eq_cons_sb);
8454
8455    /*
8456     * The hw_cons range is 1-255, 257 - the sw_cons range is 0-254, 256.
8457     * when we get to the next-page we need to adjust so the loop
8458     * condition below will be met. The next element is the size of a
8459     * regular element and hence incrementing by 1
8460     */
8461    if ((hw_cons & EQ_DESC_MAX_PAGE) == EQ_DESC_MAX_PAGE) {
8462        hw_cons++;
8463    }
8464
8465    /*
8466     * This function may never run in parallel with itself for a
8467     * specific sc and no need for a read memory barrier here.
8468     */
8469    sw_cons = sc->eq_cons;
8470    sw_prod = sc->eq_prod;
8471
8472    BLOGD(sc, DBG_SP,"EQ: hw_cons=%u sw_cons=%u eq_spq_left=0x%lx\n",
8473          hw_cons, sw_cons, atomic_load_acq_long(&sc->eq_spq_left));
8474
8475    for (;
8476         sw_cons != hw_cons;
8477         sw_prod = NEXT_EQ_IDX(sw_prod), sw_cons = NEXT_EQ_IDX(sw_cons)) {
8478
8479        elem = &sc->eq[EQ_DESC(sw_cons)];
8480
8481        /* elem CID originates from FW, actually LE */
8482        cid = SW_CID(elem->message.data.cfc_del_event.cid);
8483        opcode = elem->message.opcode;
8484
8485        /* handle eq element */
8486        switch (opcode) {
8487
8488        case EVENT_RING_OPCODE_STAT_QUERY:
8489            BLOGD(sc, DBG_SP, "got statistics completion event %d\n",
8490                  sc->stats_comp++);
8491            /* nothing to do with stats comp */
8492            goto next_spqe;
8493
8494        case EVENT_RING_OPCODE_CFC_DEL:
8495            /* handle according to cid range */
8496            /* we may want to verify here that the sc state is HALTING */
8497            BLOGD(sc, DBG_SP, "got delete ramrod for MULTI[%d]\n", cid);
8498            q_obj = bxe_cid_to_q_obj(sc, cid);
8499            if (q_obj->complete_cmd(sc, q_obj, ECORE_Q_CMD_CFC_DEL)) {
8500                break;
8501            }
8502            goto next_spqe;
8503
8504        case EVENT_RING_OPCODE_STOP_TRAFFIC:
8505            BLOGD(sc, DBG_SP, "got STOP TRAFFIC\n");
8506            if (f_obj->complete_cmd(sc, f_obj, ECORE_F_CMD_TX_STOP)) {
8507                break;
8508            }
8509            // XXX bxe_dcbx_set_params(sc, BXE_DCBX_STATE_TX_PAUSED);
8510            goto next_spqe;
8511
8512        case EVENT_RING_OPCODE_START_TRAFFIC:
8513            BLOGD(sc, DBG_SP, "got START TRAFFIC\n");
8514            if (f_obj->complete_cmd(sc, f_obj, ECORE_F_CMD_TX_START)) {
8515                break;
8516            }
8517            // XXX bxe_dcbx_set_params(sc, BXE_DCBX_STATE_TX_RELEASED);
8518            goto next_spqe;
8519
8520        case EVENT_RING_OPCODE_FUNCTION_UPDATE:
8521            echo = elem->message.data.function_update_event.echo;
8522            if (echo == SWITCH_UPDATE) {
8523                BLOGD(sc, DBG_SP, "got FUNC_SWITCH_UPDATE ramrod\n");
8524                if (f_obj->complete_cmd(sc, f_obj,
8525                                        ECORE_F_CMD_SWITCH_UPDATE)) {
8526                    break;
8527                }
8528            }
8529            else {
8530                BLOGD(sc, DBG_SP,
8531                      "AFEX: ramrod completed FUNCTION_UPDATE\n");
8532            }
8533            goto next_spqe;
8534
8535        case EVENT_RING_OPCODE_FORWARD_SETUP:
8536            q_obj = &bxe_fwd_sp_obj(sc, q_obj);
8537            if (q_obj->complete_cmd(sc, q_obj,
8538                                    ECORE_Q_CMD_SETUP_TX_ONLY)) {
8539                break;
8540            }
8541            goto next_spqe;
8542
8543        case EVENT_RING_OPCODE_FUNCTION_START:
8544            BLOGD(sc, DBG_SP, "got FUNC_START ramrod\n");
8545            if (f_obj->complete_cmd(sc, f_obj, ECORE_F_CMD_START)) {
8546                break;
8547            }
8548            goto next_spqe;
8549
8550        case EVENT_RING_OPCODE_FUNCTION_STOP:
8551            BLOGD(sc, DBG_SP, "got FUNC_STOP ramrod\n");
8552            if (f_obj->complete_cmd(sc, f_obj, ECORE_F_CMD_STOP)) {
8553                break;
8554            }
8555            goto next_spqe;
8556        }
8557
8558        switch (opcode | sc->state) {
8559        case (EVENT_RING_OPCODE_RSS_UPDATE_RULES | BXE_STATE_OPEN):
8560        case (EVENT_RING_OPCODE_RSS_UPDATE_RULES | BXE_STATE_OPENING_WAITING_PORT):
8561            cid = elem->message.data.eth_event.echo & BXE_SWCID_MASK;
8562            BLOGD(sc, DBG_SP, "got RSS_UPDATE ramrod. CID %d\n", cid);
8563            rss_raw->clear_pending(rss_raw);
8564            break;
8565
8566        case (EVENT_RING_OPCODE_SET_MAC | BXE_STATE_OPEN):
8567        case (EVENT_RING_OPCODE_SET_MAC | BXE_STATE_DIAG):
8568        case (EVENT_RING_OPCODE_SET_MAC | BXE_STATE_CLOSING_WAITING_HALT):
8569        case (EVENT_RING_OPCODE_CLASSIFICATION_RULES | BXE_STATE_OPEN):
8570        case (EVENT_RING_OPCODE_CLASSIFICATION_RULES | BXE_STATE_DIAG):
8571        case (EVENT_RING_OPCODE_CLASSIFICATION_RULES | BXE_STATE_CLOSING_WAITING_HALT):
8572            BLOGD(sc, DBG_SP, "got (un)set mac ramrod\n");
8573            bxe_handle_classification_eqe(sc, elem);
8574            break;
8575
8576        case (EVENT_RING_OPCODE_MULTICAST_RULES | BXE_STATE_OPEN):
8577        case (EVENT_RING_OPCODE_MULTICAST_RULES | BXE_STATE_DIAG):
8578        case (EVENT_RING_OPCODE_MULTICAST_RULES | BXE_STATE_CLOSING_WAITING_HALT):
8579            BLOGD(sc, DBG_SP, "got mcast ramrod\n");
8580            bxe_handle_mcast_eqe(sc);
8581            break;
8582
8583        case (EVENT_RING_OPCODE_FILTERS_RULES | BXE_STATE_OPEN):
8584        case (EVENT_RING_OPCODE_FILTERS_RULES | BXE_STATE_DIAG):
8585        case (EVENT_RING_OPCODE_FILTERS_RULES | BXE_STATE_CLOSING_WAITING_HALT):
8586            BLOGD(sc, DBG_SP, "got rx_mode ramrod\n");
8587            bxe_handle_rx_mode_eqe(sc, elem);
8588            break;
8589
8590        default:
8591            /* unknown event log error and continue */
8592            BLOGE(sc, "Unknown EQ event %d, sc->state 0x%x\n",
8593                  elem->message.opcode, sc->state);
8594        }
8595
8596next_spqe:
8597        spqe_cnt++;
8598    } /* for */
8599
8600    mb();
8601    atomic_add_acq_long(&sc->eq_spq_left, spqe_cnt);
8602
8603    sc->eq_cons = sw_cons;
8604    sc->eq_prod = sw_prod;
8605
8606    /* make sure that above mem writes were issued towards the memory */
8607    wmb();
8608
8609    /* update producer */
8610    bxe_update_eq_prod(sc, sc->eq_prod);
8611}
8612
8613static void
8614bxe_handle_sp_tq(void *context,
8615                 int  pending)
8616{
8617    struct bxe_softc *sc = (struct bxe_softc *)context;
8618    uint16_t status;
8619
8620    BLOGD(sc, DBG_SP, "---> SP TASK <---\n");
8621
8622    /* what work needs to be performed? */
8623    status = bxe_update_dsb_idx(sc);
8624
8625    BLOGD(sc, DBG_SP, "dsb status 0x%04x\n", status);
8626
8627    /* HW attentions */
8628    if (status & BXE_DEF_SB_ATT_IDX) {
8629        BLOGD(sc, DBG_SP, "---> ATTN INTR <---\n");
8630        bxe_attn_int(sc);
8631        status &= ~BXE_DEF_SB_ATT_IDX;
8632    }
8633
8634    /* SP events: STAT_QUERY and others */
8635    if (status & BXE_DEF_SB_IDX) {
8636        /* handle EQ completions */
8637        BLOGD(sc, DBG_SP, "---> EQ INTR <---\n");
8638        bxe_eq_int(sc);
8639        bxe_ack_sb(sc, sc->igu_dsb_id, USTORM_ID,
8640                   le16toh(sc->def_idx), IGU_INT_NOP, 1);
8641        status &= ~BXE_DEF_SB_IDX;
8642    }
8643
8644    /* if status is non zero then something went wrong */
8645    if (__predict_false(status)) {
8646        BLOGE(sc, "Got an unknown SP interrupt! (0x%04x)\n", status);
8647    }
8648
8649    /* ack status block only if something was actually handled */
8650    bxe_ack_sb(sc, sc->igu_dsb_id, ATTENTION_ID,
8651               le16toh(sc->def_att_idx), IGU_INT_ENABLE, 1);
8652
8653    /*
8654     * Must be called after the EQ processing (since eq leads to sriov
8655     * ramrod completion flows).
8656     * This flow may have been scheduled by the arrival of a ramrod
8657     * completion, or by the sriov code rescheduling itself.
8658     */
8659    // XXX bxe_iov_sp_task(sc);
8660
8661}
8662
8663static void
8664bxe_handle_fp_tq(void *context,
8665                 int  pending)
8666{
8667    struct bxe_fastpath *fp = (struct bxe_fastpath *)context;
8668    struct bxe_softc *sc = fp->sc;
8669    uint8_t more_tx = FALSE;
8670    uint8_t more_rx = FALSE;
8671
8672    BLOGD(sc, DBG_INTR, "---> FP TASK QUEUE (%d) <---\n", fp->index);
8673
8674    /* XXX
8675     * IFF_DRV_RUNNING state can't be checked here since we process
8676     * slowpath events on a client queue during setup. Instead
8677     * we need to add a "process/continue" flag here that the driver
8678     * can use to tell the task here not to do anything.
8679     */
8680#if 0
8681    if (!(if_getdrvflags(sc->ifp) & IFF_DRV_RUNNING)) {
8682        return;
8683    }
8684#endif
8685
8686    /* update the fastpath index */
8687    bxe_update_fp_sb_idx(fp);
8688
8689    /* XXX add loop here if ever support multiple tx CoS */
8690    /* fp->txdata[cos] */
8691    if (bxe_has_tx_work(fp)) {
8692        BXE_FP_TX_LOCK(fp);
8693        more_tx = bxe_txeof(sc, fp);
8694        BXE_FP_TX_UNLOCK(fp);
8695    }
8696
8697    if (bxe_has_rx_work(fp)) {
8698        more_rx = bxe_rxeof(sc, fp);
8699    }
8700
8701    if (more_rx /*|| more_tx*/) {
8702        /* still more work to do */
8703        taskqueue_enqueue(fp->tq, &fp->tq_task);
8704        return;
8705    }
8706
8707    bxe_ack_sb(sc, fp->igu_sb_id, USTORM_ID,
8708               le16toh(fp->fp_hc_idx), IGU_INT_ENABLE, 1);
8709}
8710
8711static void
8712bxe_task_fp(struct bxe_fastpath *fp)
8713{
8714    struct bxe_softc *sc = fp->sc;
8715    uint8_t more_tx = FALSE;
8716    uint8_t more_rx = FALSE;
8717
8718    BLOGD(sc, DBG_INTR, "---> FP TASK ISR (%d) <---\n", fp->index);
8719
8720    /* update the fastpath index */
8721    bxe_update_fp_sb_idx(fp);
8722
8723    /* XXX add loop here if ever support multiple tx CoS */
8724    /* fp->txdata[cos] */
8725    if (bxe_has_tx_work(fp)) {
8726        BXE_FP_TX_LOCK(fp);
8727        more_tx = bxe_txeof(sc, fp);
8728        BXE_FP_TX_UNLOCK(fp);
8729    }
8730
8731    if (bxe_has_rx_work(fp)) {
8732        more_rx = bxe_rxeof(sc, fp);
8733    }
8734
8735    if (more_rx /*|| more_tx*/) {
8736        /* still more work to do, bail out if this ISR and process later */
8737        taskqueue_enqueue(fp->tq, &fp->tq_task);
8738        return;
8739    }
8740
8741    /*
8742     * Here we write the fastpath index taken before doing any tx or rx work.
8743     * It is very well possible other hw events occurred up to this point and
8744     * they were actually processed accordingly above. Since we're going to
8745     * write an older fastpath index, an interrupt is coming which we might
8746     * not do any work in.
8747     */
8748    bxe_ack_sb(sc, fp->igu_sb_id, USTORM_ID,
8749               le16toh(fp->fp_hc_idx), IGU_INT_ENABLE, 1);
8750}
8751
8752/*
8753 * Legacy interrupt entry point.
8754 *
8755 * Verifies that the controller generated the interrupt and
8756 * then calls a separate routine to handle the various
8757 * interrupt causes: link, RX, and TX.
8758 */
8759static void
8760bxe_intr_legacy(void *xsc)
8761{
8762    struct bxe_softc *sc = (struct bxe_softc *)xsc;
8763    struct bxe_fastpath *fp;
8764    uint16_t status, mask;
8765    int i;
8766
8767    BLOGD(sc, DBG_INTR, "---> BXE INTx <---\n");
8768
8769    /*
8770     * 0 for ustorm, 1 for cstorm
8771     * the bits returned from ack_int() are 0-15
8772     * bit 0 = attention status block
8773     * bit 1 = fast path status block
8774     * a mask of 0x2 or more = tx/rx event
8775     * a mask of 1 = slow path event
8776     */
8777
8778    status = bxe_ack_int(sc);
8779
8780    /* the interrupt is not for us */
8781    if (__predict_false(status == 0)) {
8782        BLOGD(sc, DBG_INTR, "Not our interrupt!\n");
8783        return;
8784    }
8785
8786    BLOGD(sc, DBG_INTR, "Interrupt status 0x%04x\n", status);
8787
8788    FOR_EACH_ETH_QUEUE(sc, i) {
8789        fp = &sc->fp[i];
8790        mask = (0x2 << (fp->index + CNIC_SUPPORT(sc)));
8791        if (status & mask) {
8792            /* acknowledge and disable further fastpath interrupts */
8793            bxe_ack_sb(sc, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
8794            bxe_task_fp(fp);
8795            status &= ~mask;
8796        }
8797    }
8798
8799    if (__predict_false(status & 0x1)) {
8800        /* acknowledge and disable further slowpath interrupts */
8801        bxe_ack_sb(sc, sc->igu_dsb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
8802
8803        /* schedule slowpath handler */
8804        taskqueue_enqueue(sc->sp_tq, &sc->sp_tq_task);
8805
8806        status &= ~0x1;
8807    }
8808
8809    if (__predict_false(status)) {
8810        BLOGW(sc, "Unexpected fastpath status (0x%08x)!\n", status);
8811    }
8812}
8813
8814/* slowpath interrupt entry point */
8815static void
8816bxe_intr_sp(void *xsc)
8817{
8818    struct bxe_softc *sc = (struct bxe_softc *)xsc;
8819
8820    BLOGD(sc, (DBG_INTR | DBG_SP), "---> SP INTR <---\n");
8821
8822    /* acknowledge and disable further slowpath interrupts */
8823    bxe_ack_sb(sc, sc->igu_dsb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
8824
8825    /* schedule slowpath handler */
8826    taskqueue_enqueue(sc->sp_tq, &sc->sp_tq_task);
8827}
8828
8829/* fastpath interrupt entry point */
8830static void
8831bxe_intr_fp(void *xfp)
8832{
8833    struct bxe_fastpath *fp = (struct bxe_fastpath *)xfp;
8834    struct bxe_softc *sc = fp->sc;
8835
8836    BLOGD(sc, DBG_INTR, "---> FP INTR %d <---\n", fp->index);
8837
8838    BLOGD(sc, DBG_INTR,
8839          "(cpu=%d) MSI-X fp=%d fw_sb=%d igu_sb=%d\n",
8840          curcpu, fp->index, fp->fw_sb_id, fp->igu_sb_id);
8841
8842    /* acknowledge and disable further fastpath interrupts */
8843    bxe_ack_sb(sc, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
8844
8845    bxe_task_fp(fp);
8846}
8847
8848/* Release all interrupts allocated by the driver. */
8849static void
8850bxe_interrupt_free(struct bxe_softc *sc)
8851{
8852    int i;
8853
8854    switch (sc->interrupt_mode) {
8855    case INTR_MODE_INTX:
8856        BLOGD(sc, DBG_LOAD, "Releasing legacy INTx vector\n");
8857        if (sc->intr[0].resource != NULL) {
8858            bus_release_resource(sc->dev,
8859                                 SYS_RES_IRQ,
8860                                 sc->intr[0].rid,
8861                                 sc->intr[0].resource);
8862        }
8863        break;
8864    case INTR_MODE_MSI:
8865        for (i = 0; i < sc->intr_count; i++) {
8866            BLOGD(sc, DBG_LOAD, "Releasing MSI vector %d\n", i);
8867            if (sc->intr[i].resource && sc->intr[i].rid) {
8868                bus_release_resource(sc->dev,
8869                                     SYS_RES_IRQ,
8870                                     sc->intr[i].rid,
8871                                     sc->intr[i].resource);
8872            }
8873        }
8874        pci_release_msi(sc->dev);
8875        break;
8876    case INTR_MODE_MSIX:
8877        for (i = 0; i < sc->intr_count; i++) {
8878            BLOGD(sc, DBG_LOAD, "Releasing MSI-X vector %d\n", i);
8879            if (sc->intr[i].resource && sc->intr[i].rid) {
8880                bus_release_resource(sc->dev,
8881                                     SYS_RES_IRQ,
8882                                     sc->intr[i].rid,
8883                                     sc->intr[i].resource);
8884            }
8885        }
8886        pci_release_msi(sc->dev);
8887        break;
8888    default:
8889        /* nothing to do as initial allocation failed */
8890        break;
8891    }
8892}
8893
8894/*
8895 * This function determines and allocates the appropriate
8896 * interrupt based on system capabilites and user request.
8897 *
8898 * The user may force a particular interrupt mode, specify
8899 * the number of receive queues, specify the method for
8900 * distribuitng received frames to receive queues, or use
8901 * the default settings which will automatically select the
8902 * best supported combination.  In addition, the OS may or
8903 * may not support certain combinations of these settings.
8904 * This routine attempts to reconcile the settings requested
8905 * by the user with the capabilites available from the system
8906 * to select the optimal combination of features.
8907 *
8908 * Returns:
8909 *   0 = Success, !0 = Failure.
8910 */
8911static int
8912bxe_interrupt_alloc(struct bxe_softc *sc)
8913{
8914    int msix_count = 0;
8915    int msi_count = 0;
8916    int num_requested = 0;
8917    int num_allocated = 0;
8918    int rid, i, j;
8919    int rc;
8920
8921    /* get the number of available MSI/MSI-X interrupts from the OS */
8922    if (sc->interrupt_mode > 0) {
8923        if (sc->devinfo.pcie_cap_flags & BXE_MSIX_CAPABLE_FLAG) {
8924            msix_count = pci_msix_count(sc->dev);
8925        }
8926
8927        if (sc->devinfo.pcie_cap_flags & BXE_MSI_CAPABLE_FLAG) {
8928            msi_count = pci_msi_count(sc->dev);
8929        }
8930
8931        BLOGD(sc, DBG_LOAD, "%d MSI and %d MSI-X vectors available\n",
8932              msi_count, msix_count);
8933    }
8934
8935    do { /* try allocating MSI-X interrupt resources (at least 2) */
8936        if (sc->interrupt_mode != INTR_MODE_MSIX) {
8937            break;
8938        }
8939
8940        if (((sc->devinfo.pcie_cap_flags & BXE_MSIX_CAPABLE_FLAG) == 0) ||
8941            (msix_count < 2)) {
8942            sc->interrupt_mode = INTR_MODE_MSI; /* try MSI next */
8943            break;
8944        }
8945
8946        /* ask for the necessary number of MSI-X vectors */
8947        num_requested = min((sc->num_queues + 1), msix_count);
8948
8949        BLOGD(sc, DBG_LOAD, "Requesting %d MSI-X vectors\n", num_requested);
8950
8951        num_allocated = num_requested;
8952        if ((rc = pci_alloc_msix(sc->dev, &num_allocated)) != 0) {
8953            BLOGE(sc, "MSI-X alloc failed! (%d)\n", rc);
8954            sc->interrupt_mode = INTR_MODE_MSI; /* try MSI next */
8955            break;
8956        }
8957
8958        if (num_allocated < 2) { /* possible? */
8959            BLOGE(sc, "MSI-X allocation less than 2!\n");
8960            sc->interrupt_mode = INTR_MODE_MSI; /* try MSI next */
8961            pci_release_msi(sc->dev);
8962            break;
8963        }
8964
8965        BLOGI(sc, "MSI-X vectors Requested %d and Allocated %d\n",
8966              num_requested, num_allocated);
8967
8968        /* best effort so use the number of vectors allocated to us */
8969        sc->intr_count = num_allocated;
8970        sc->num_queues = num_allocated - 1;
8971
8972        rid = 1; /* initial resource identifier */
8973
8974        /* allocate the MSI-X vectors */
8975        for (i = 0; i < num_allocated; i++) {
8976            sc->intr[i].rid = (rid + i);
8977
8978            if ((sc->intr[i].resource =
8979                 bus_alloc_resource_any(sc->dev,
8980                                        SYS_RES_IRQ,
8981                                        &sc->intr[i].rid,
8982                                        RF_ACTIVE)) == NULL) {
8983                BLOGE(sc, "Failed to map MSI-X[%d] (rid=%d)!\n",
8984                      i, (rid + i));
8985
8986                for (j = (i - 1); j >= 0; j--) {
8987                    bus_release_resource(sc->dev,
8988                                         SYS_RES_IRQ,
8989                                         sc->intr[j].rid,
8990                                         sc->intr[j].resource);
8991                }
8992
8993                sc->intr_count = 0;
8994                sc->num_queues = 0;
8995                sc->interrupt_mode = INTR_MODE_MSI; /* try MSI next */
8996                pci_release_msi(sc->dev);
8997                break;
8998            }
8999
9000            BLOGD(sc, DBG_LOAD, "Mapped MSI-X[%d] (rid=%d)\n", i, (rid + i));
9001        }
9002    } while (0);
9003
9004    do { /* try allocating MSI vector resources (at least 2) */
9005        if (sc->interrupt_mode != INTR_MODE_MSI) {
9006            break;
9007        }
9008
9009        if (((sc->devinfo.pcie_cap_flags & BXE_MSI_CAPABLE_FLAG) == 0) ||
9010            (msi_count < 1)) {
9011            sc->interrupt_mode = INTR_MODE_INTX; /* try INTx next */
9012            break;
9013        }
9014
9015        /* ask for a single MSI vector */
9016        num_requested = 1;
9017
9018        BLOGD(sc, DBG_LOAD, "Requesting %d MSI vectors\n", num_requested);
9019
9020        num_allocated = num_requested;
9021        if ((rc = pci_alloc_msi(sc->dev, &num_allocated)) != 0) {
9022            BLOGE(sc, "MSI alloc failed (%d)!\n", rc);
9023            sc->interrupt_mode = INTR_MODE_INTX; /* try INTx next */
9024            break;
9025        }
9026
9027        if (num_allocated != 1) { /* possible? */
9028            BLOGE(sc, "MSI allocation is not 1!\n");
9029            sc->interrupt_mode = INTR_MODE_INTX; /* try INTx next */
9030            pci_release_msi(sc->dev);
9031            break;
9032        }
9033
9034        BLOGI(sc, "MSI vectors Requested %d and Allocated %d\n",
9035              num_requested, num_allocated);
9036
9037        /* best effort so use the number of vectors allocated to us */
9038        sc->intr_count = num_allocated;
9039        sc->num_queues = num_allocated;
9040
9041        rid = 1; /* initial resource identifier */
9042
9043        sc->intr[0].rid = rid;
9044
9045        if ((sc->intr[0].resource =
9046             bus_alloc_resource_any(sc->dev,
9047                                    SYS_RES_IRQ,
9048                                    &sc->intr[0].rid,
9049                                    RF_ACTIVE)) == NULL) {
9050            BLOGE(sc, "Failed to map MSI[0] (rid=%d)!\n", rid);
9051            sc->intr_count = 0;
9052            sc->num_queues = 0;
9053            sc->interrupt_mode = INTR_MODE_INTX; /* try INTx next */
9054            pci_release_msi(sc->dev);
9055            break;
9056        }
9057
9058        BLOGD(sc, DBG_LOAD, "Mapped MSI[0] (rid=%d)\n", rid);
9059    } while (0);
9060
9061    do { /* try allocating INTx vector resources */
9062        if (sc->interrupt_mode != INTR_MODE_INTX) {
9063            break;
9064        }
9065
9066        BLOGD(sc, DBG_LOAD, "Requesting legacy INTx interrupt\n");
9067
9068        /* only one vector for INTx */
9069        sc->intr_count = 1;
9070        sc->num_queues = 1;
9071
9072        rid = 0; /* initial resource identifier */
9073
9074        sc->intr[0].rid = rid;
9075
9076        if ((sc->intr[0].resource =
9077             bus_alloc_resource_any(sc->dev,
9078                                    SYS_RES_IRQ,
9079                                    &sc->intr[0].rid,
9080                                    (RF_ACTIVE | RF_SHAREABLE))) == NULL) {
9081            BLOGE(sc, "Failed to map INTx (rid=%d)!\n", rid);
9082            sc->intr_count = 0;
9083            sc->num_queues = 0;
9084            sc->interrupt_mode = -1; /* Failed! */
9085            break;
9086        }
9087
9088        BLOGD(sc, DBG_LOAD, "Mapped INTx (rid=%d)\n", rid);
9089    } while (0);
9090
9091    if (sc->interrupt_mode == -1) {
9092        BLOGE(sc, "Interrupt Allocation: FAILED!!!\n");
9093        rc = 1;
9094    } else {
9095        BLOGD(sc, DBG_LOAD,
9096              "Interrupt Allocation: interrupt_mode=%d, num_queues=%d\n",
9097              sc->interrupt_mode, sc->num_queues);
9098        rc = 0;
9099    }
9100
9101    return (rc);
9102}
9103
9104static void
9105bxe_interrupt_detach(struct bxe_softc *sc)
9106{
9107    struct bxe_fastpath *fp;
9108    int i;
9109
9110    /* release interrupt resources */
9111    for (i = 0; i < sc->intr_count; i++) {
9112        if (sc->intr[i].resource && sc->intr[i].tag) {
9113            BLOGD(sc, DBG_LOAD, "Disabling interrupt vector %d\n", i);
9114            bus_teardown_intr(sc->dev, sc->intr[i].resource, sc->intr[i].tag);
9115        }
9116    }
9117
9118    for (i = 0; i < sc->num_queues; i++) {
9119        fp = &sc->fp[i];
9120        if (fp->tq) {
9121            taskqueue_drain(fp->tq, &fp->tq_task);
9122            taskqueue_drain(fp->tq, &fp->tx_task);
9123            while (taskqueue_cancel_timeout(fp->tq, &fp->tx_timeout_task,
9124                NULL))
9125                taskqueue_drain_timeout(fp->tq, &fp->tx_timeout_task);
9126            taskqueue_free(fp->tq);
9127            fp->tq = NULL;
9128        }
9129    }
9130
9131
9132    if (sc->sp_tq) {
9133        taskqueue_drain(sc->sp_tq, &sc->sp_tq_task);
9134        taskqueue_free(sc->sp_tq);
9135        sc->sp_tq = NULL;
9136    }
9137}
9138
9139/*
9140 * Enables interrupts and attach to the ISR.
9141 *
9142 * When using multiple MSI/MSI-X vectors the first vector
9143 * is used for slowpath operations while all remaining
9144 * vectors are used for fastpath operations.  If only a
9145 * single MSI/MSI-X vector is used (SINGLE_ISR) then the
9146 * ISR must look for both slowpath and fastpath completions.
9147 */
9148static int
9149bxe_interrupt_attach(struct bxe_softc *sc)
9150{
9151    struct bxe_fastpath *fp;
9152    int rc = 0;
9153    int i;
9154
9155    snprintf(sc->sp_tq_name, sizeof(sc->sp_tq_name),
9156             "bxe%d_sp_tq", sc->unit);
9157    TASK_INIT(&sc->sp_tq_task, 0, bxe_handle_sp_tq, sc);
9158    sc->sp_tq = taskqueue_create(sc->sp_tq_name, M_NOWAIT,
9159                                 taskqueue_thread_enqueue,
9160                                 &sc->sp_tq);
9161    taskqueue_start_threads(&sc->sp_tq, 1, PWAIT, /* lower priority */
9162                            "%s", sc->sp_tq_name);
9163
9164
9165    for (i = 0; i < sc->num_queues; i++) {
9166        fp = &sc->fp[i];
9167        snprintf(fp->tq_name, sizeof(fp->tq_name),
9168                 "bxe%d_fp%d_tq", sc->unit, i);
9169        TASK_INIT(&fp->tq_task, 0, bxe_handle_fp_tq, fp);
9170        TASK_INIT(&fp->tx_task, 0, bxe_tx_mq_start_deferred, fp);
9171        fp->tq = taskqueue_create(fp->tq_name, M_NOWAIT,
9172                                  taskqueue_thread_enqueue,
9173                                  &fp->tq);
9174        TIMEOUT_TASK_INIT(fp->tq, &fp->tx_timeout_task, 0,
9175                          bxe_tx_mq_start_deferred, fp);
9176        taskqueue_start_threads(&fp->tq, 1, PI_NET, /* higher priority */
9177                                "%s", fp->tq_name);
9178    }
9179
9180    /* setup interrupt handlers */
9181    if (sc->interrupt_mode == INTR_MODE_MSIX) {
9182        BLOGD(sc, DBG_LOAD, "Enabling slowpath MSI-X[0] vector\n");
9183
9184        /*
9185         * Setup the interrupt handler. Note that we pass the driver instance
9186         * to the interrupt handler for the slowpath.
9187         */
9188        if ((rc = bus_setup_intr(sc->dev, sc->intr[0].resource,
9189                                 (INTR_TYPE_NET | INTR_MPSAFE),
9190                                 NULL, bxe_intr_sp, sc,
9191                                 &sc->intr[0].tag)) != 0) {
9192            BLOGE(sc, "Failed to allocate MSI-X[0] vector (%d)\n", rc);
9193            goto bxe_interrupt_attach_exit;
9194        }
9195
9196        bus_describe_intr(sc->dev, sc->intr[0].resource,
9197                          sc->intr[0].tag, "sp");
9198
9199        /* bus_bind_intr(sc->dev, sc->intr[0].resource, 0); */
9200
9201        /* initialize the fastpath vectors (note the first was used for sp) */
9202        for (i = 0; i < sc->num_queues; i++) {
9203            fp = &sc->fp[i];
9204            BLOGD(sc, DBG_LOAD, "Enabling MSI-X[%d] vector\n", (i + 1));
9205
9206            /*
9207             * Setup the interrupt handler. Note that we pass the
9208             * fastpath context to the interrupt handler in this
9209             * case.
9210             */
9211            if ((rc = bus_setup_intr(sc->dev, sc->intr[i + 1].resource,
9212                                     (INTR_TYPE_NET | INTR_MPSAFE),
9213                                     NULL, bxe_intr_fp, fp,
9214                                     &sc->intr[i + 1].tag)) != 0) {
9215                BLOGE(sc, "Failed to allocate MSI-X[%d] vector (%d)\n",
9216                      (i + 1), rc);
9217                goto bxe_interrupt_attach_exit;
9218            }
9219
9220            bus_describe_intr(sc->dev, sc->intr[i + 1].resource,
9221                              sc->intr[i + 1].tag, "fp%02d", i);
9222
9223            /* bind the fastpath instance to a cpu */
9224            if (sc->num_queues > 1) {
9225                bus_bind_intr(sc->dev, sc->intr[i + 1].resource, i);
9226            }
9227
9228            fp->state = BXE_FP_STATE_IRQ;
9229        }
9230    } else if (sc->interrupt_mode == INTR_MODE_MSI) {
9231        BLOGD(sc, DBG_LOAD, "Enabling MSI[0] vector\n");
9232
9233        /*
9234         * Setup the interrupt handler. Note that we pass the
9235         * driver instance to the interrupt handler which
9236         * will handle both the slowpath and fastpath.
9237         */
9238        if ((rc = bus_setup_intr(sc->dev, sc->intr[0].resource,
9239                                 (INTR_TYPE_NET | INTR_MPSAFE),
9240                                 NULL, bxe_intr_legacy, sc,
9241                                 &sc->intr[0].tag)) != 0) {
9242            BLOGE(sc, "Failed to allocate MSI[0] vector (%d)\n", rc);
9243            goto bxe_interrupt_attach_exit;
9244        }
9245
9246    } else { /* (sc->interrupt_mode == INTR_MODE_INTX) */
9247        BLOGD(sc, DBG_LOAD, "Enabling INTx interrupts\n");
9248
9249        /*
9250         * Setup the interrupt handler. Note that we pass the
9251         * driver instance to the interrupt handler which
9252         * will handle both the slowpath and fastpath.
9253         */
9254        if ((rc = bus_setup_intr(sc->dev, sc->intr[0].resource,
9255                                 (INTR_TYPE_NET | INTR_MPSAFE),
9256                                 NULL, bxe_intr_legacy, sc,
9257                                 &sc->intr[0].tag)) != 0) {
9258            BLOGE(sc, "Failed to allocate INTx interrupt (%d)\n", rc);
9259            goto bxe_interrupt_attach_exit;
9260        }
9261    }
9262
9263bxe_interrupt_attach_exit:
9264
9265    return (rc);
9266}
9267
9268static int  bxe_init_hw_common_chip(struct bxe_softc *sc);
9269static int  bxe_init_hw_common(struct bxe_softc *sc);
9270static int  bxe_init_hw_port(struct bxe_softc *sc);
9271static int  bxe_init_hw_func(struct bxe_softc *sc);
9272static void bxe_reset_common(struct bxe_softc *sc);
9273static void bxe_reset_port(struct bxe_softc *sc);
9274static void bxe_reset_func(struct bxe_softc *sc);
9275static int  bxe_gunzip_init(struct bxe_softc *sc);
9276static void bxe_gunzip_end(struct bxe_softc *sc);
9277static int  bxe_init_firmware(struct bxe_softc *sc);
9278static void bxe_release_firmware(struct bxe_softc *sc);
9279
9280static struct
9281ecore_func_sp_drv_ops bxe_func_sp_drv = {
9282    .init_hw_cmn_chip = bxe_init_hw_common_chip,
9283    .init_hw_cmn      = bxe_init_hw_common,
9284    .init_hw_port     = bxe_init_hw_port,
9285    .init_hw_func     = bxe_init_hw_func,
9286
9287    .reset_hw_cmn     = bxe_reset_common,
9288    .reset_hw_port    = bxe_reset_port,
9289    .reset_hw_func    = bxe_reset_func,
9290
9291    .gunzip_init      = bxe_gunzip_init,
9292    .gunzip_end       = bxe_gunzip_end,
9293
9294    .init_fw          = bxe_init_firmware,
9295    .release_fw       = bxe_release_firmware,
9296};
9297
9298static void
9299bxe_init_func_obj(struct bxe_softc *sc)
9300{
9301    sc->dmae_ready = 0;
9302
9303    ecore_init_func_obj(sc,
9304                        &sc->func_obj,
9305                        BXE_SP(sc, func_rdata),
9306                        BXE_SP_MAPPING(sc, func_rdata),
9307                        BXE_SP(sc, func_afex_rdata),
9308                        BXE_SP_MAPPING(sc, func_afex_rdata),
9309                        &bxe_func_sp_drv);
9310}
9311
9312static int
9313bxe_init_hw(struct bxe_softc *sc,
9314            uint32_t         load_code)
9315{
9316    struct ecore_func_state_params func_params = { NULL };
9317    int rc;
9318
9319    /* prepare the parameters for function state transitions */
9320    bit_set(&func_params.ramrod_flags, RAMROD_COMP_WAIT);
9321
9322    func_params.f_obj = &sc->func_obj;
9323    func_params.cmd = ECORE_F_CMD_HW_INIT;
9324
9325    func_params.params.hw_init.load_phase = load_code;
9326
9327    /*
9328     * Via a plethora of function pointers, we will eventually reach
9329     * bxe_init_hw_common(), bxe_init_hw_port(), or bxe_init_hw_func().
9330     */
9331    rc = ecore_func_state_change(sc, &func_params);
9332
9333    return (rc);
9334}
9335
9336static void
9337bxe_fill(struct bxe_softc *sc,
9338         uint32_t         addr,
9339         int              fill,
9340         uint32_t         len)
9341{
9342    uint32_t i;
9343
9344    if (!(len % 4) && !(addr % 4)) {
9345        for (i = 0; i < len; i += 4) {
9346            REG_WR(sc, (addr + i), fill);
9347        }
9348    } else {
9349        for (i = 0; i < len; i++) {
9350            REG_WR8(sc, (addr + i), fill);
9351        }
9352    }
9353}
9354
9355/* writes FP SP data to FW - data_size in dwords */
9356static void
9357bxe_wr_fp_sb_data(struct bxe_softc *sc,
9358                  int              fw_sb_id,
9359                  uint32_t         *sb_data_p,
9360                  uint32_t         data_size)
9361{
9362    int index;
9363
9364    for (index = 0; index < data_size; index++) {
9365        REG_WR(sc,
9366               (BAR_CSTRORM_INTMEM +
9367                CSTORM_STATUS_BLOCK_DATA_OFFSET(fw_sb_id) +
9368                (sizeof(uint32_t) * index)),
9369               *(sb_data_p + index));
9370    }
9371}
9372
9373static void
9374bxe_zero_fp_sb(struct bxe_softc *sc,
9375               int              fw_sb_id)
9376{
9377    struct hc_status_block_data_e2 sb_data_e2;
9378    struct hc_status_block_data_e1x sb_data_e1x;
9379    uint32_t *sb_data_p;
9380    uint32_t data_size = 0;
9381
9382    if (!CHIP_IS_E1x(sc)) {
9383        memset(&sb_data_e2, 0, sizeof(struct hc_status_block_data_e2));
9384        sb_data_e2.common.state = SB_DISABLED;
9385        sb_data_e2.common.p_func.vf_valid = FALSE;
9386        sb_data_p = (uint32_t *)&sb_data_e2;
9387        data_size = (sizeof(struct hc_status_block_data_e2) /
9388                     sizeof(uint32_t));
9389    } else {
9390        memset(&sb_data_e1x, 0, sizeof(struct hc_status_block_data_e1x));
9391        sb_data_e1x.common.state = SB_DISABLED;
9392        sb_data_e1x.common.p_func.vf_valid = FALSE;
9393        sb_data_p = (uint32_t *)&sb_data_e1x;
9394        data_size = (sizeof(struct hc_status_block_data_e1x) /
9395                     sizeof(uint32_t));
9396    }
9397
9398    bxe_wr_fp_sb_data(sc, fw_sb_id, sb_data_p, data_size);
9399
9400    bxe_fill(sc, (BAR_CSTRORM_INTMEM + CSTORM_STATUS_BLOCK_OFFSET(fw_sb_id)),
9401             0, CSTORM_STATUS_BLOCK_SIZE);
9402    bxe_fill(sc, (BAR_CSTRORM_INTMEM + CSTORM_SYNC_BLOCK_OFFSET(fw_sb_id)),
9403             0, CSTORM_SYNC_BLOCK_SIZE);
9404}
9405
9406static void
9407bxe_wr_sp_sb_data(struct bxe_softc               *sc,
9408                  struct hc_sp_status_block_data *sp_sb_data)
9409{
9410    int i;
9411
9412    for (i = 0;
9413         i < (sizeof(struct hc_sp_status_block_data) / sizeof(uint32_t));
9414         i++) {
9415        REG_WR(sc,
9416               (BAR_CSTRORM_INTMEM +
9417                CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(SC_FUNC(sc)) +
9418                (i * sizeof(uint32_t))),
9419               *((uint32_t *)sp_sb_data + i));
9420    }
9421}
9422
9423static void
9424bxe_zero_sp_sb(struct bxe_softc *sc)
9425{
9426    struct hc_sp_status_block_data sp_sb_data;
9427
9428    memset(&sp_sb_data, 0, sizeof(struct hc_sp_status_block_data));
9429
9430    sp_sb_data.state           = SB_DISABLED;
9431    sp_sb_data.p_func.vf_valid = FALSE;
9432
9433    bxe_wr_sp_sb_data(sc, &sp_sb_data);
9434
9435    bxe_fill(sc,
9436             (BAR_CSTRORM_INTMEM +
9437              CSTORM_SP_STATUS_BLOCK_OFFSET(SC_FUNC(sc))),
9438              0, CSTORM_SP_STATUS_BLOCK_SIZE);
9439    bxe_fill(sc,
9440             (BAR_CSTRORM_INTMEM +
9441              CSTORM_SP_SYNC_BLOCK_OFFSET(SC_FUNC(sc))),
9442              0, CSTORM_SP_SYNC_BLOCK_SIZE);
9443}
9444
9445static void
9446bxe_setup_ndsb_state_machine(struct hc_status_block_sm *hc_sm,
9447                             int                       igu_sb_id,
9448                             int                       igu_seg_id)
9449{
9450    hc_sm->igu_sb_id      = igu_sb_id;
9451    hc_sm->igu_seg_id     = igu_seg_id;
9452    hc_sm->timer_value    = 0xFF;
9453    hc_sm->time_to_expire = 0xFFFFFFFF;
9454}
9455
9456static void
9457bxe_map_sb_state_machines(struct hc_index_data *index_data)
9458{
9459    /* zero out state machine indices */
9460
9461    /* rx indices */
9462    index_data[HC_INDEX_ETH_RX_CQ_CONS].flags &= ~HC_INDEX_DATA_SM_ID;
9463
9464    /* tx indices */
9465    index_data[HC_INDEX_OOO_TX_CQ_CONS].flags      &= ~HC_INDEX_DATA_SM_ID;
9466    index_data[HC_INDEX_ETH_TX_CQ_CONS_COS0].flags &= ~HC_INDEX_DATA_SM_ID;
9467    index_data[HC_INDEX_ETH_TX_CQ_CONS_COS1].flags &= ~HC_INDEX_DATA_SM_ID;
9468    index_data[HC_INDEX_ETH_TX_CQ_CONS_COS2].flags &= ~HC_INDEX_DATA_SM_ID;
9469
9470    /* map indices */
9471
9472    /* rx indices */
9473    index_data[HC_INDEX_ETH_RX_CQ_CONS].flags |=
9474        (SM_RX_ID << HC_INDEX_DATA_SM_ID_SHIFT);
9475
9476    /* tx indices */
9477    index_data[HC_INDEX_OOO_TX_CQ_CONS].flags |=
9478        (SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT);
9479    index_data[HC_INDEX_ETH_TX_CQ_CONS_COS0].flags |=
9480        (SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT);
9481    index_data[HC_INDEX_ETH_TX_CQ_CONS_COS1].flags |=
9482        (SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT);
9483    index_data[HC_INDEX_ETH_TX_CQ_CONS_COS2].flags |=
9484        (SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT);
9485}
9486
9487static void
9488bxe_init_sb(struct bxe_softc *sc,
9489            bus_addr_t       busaddr,
9490            int              vfid,
9491            uint8_t          vf_valid,
9492            int              fw_sb_id,
9493            int              igu_sb_id)
9494{
9495    struct hc_status_block_data_e2  sb_data_e2;
9496    struct hc_status_block_data_e1x sb_data_e1x;
9497    struct hc_status_block_sm       *hc_sm_p;
9498    uint32_t *sb_data_p;
9499    int igu_seg_id;
9500    int data_size;
9501
9502    if (CHIP_INT_MODE_IS_BC(sc)) {
9503        igu_seg_id = HC_SEG_ACCESS_NORM;
9504    } else {
9505        igu_seg_id = IGU_SEG_ACCESS_NORM;
9506    }
9507
9508    bxe_zero_fp_sb(sc, fw_sb_id);
9509
9510    if (!CHIP_IS_E1x(sc)) {
9511        memset(&sb_data_e2, 0, sizeof(struct hc_status_block_data_e2));
9512        sb_data_e2.common.state = SB_ENABLED;
9513        sb_data_e2.common.p_func.pf_id = SC_FUNC(sc);
9514        sb_data_e2.common.p_func.vf_id = vfid;
9515        sb_data_e2.common.p_func.vf_valid = vf_valid;
9516        sb_data_e2.common.p_func.vnic_id = SC_VN(sc);
9517        sb_data_e2.common.same_igu_sb_1b = TRUE;
9518        sb_data_e2.common.host_sb_addr.hi = U64_HI(busaddr);
9519        sb_data_e2.common.host_sb_addr.lo = U64_LO(busaddr);
9520        hc_sm_p = sb_data_e2.common.state_machine;
9521        sb_data_p = (uint32_t *)&sb_data_e2;
9522        data_size = (sizeof(struct hc_status_block_data_e2) /
9523                     sizeof(uint32_t));
9524        bxe_map_sb_state_machines(sb_data_e2.index_data);
9525    } else {
9526        memset(&sb_data_e1x, 0, sizeof(struct hc_status_block_data_e1x));
9527        sb_data_e1x.common.state = SB_ENABLED;
9528        sb_data_e1x.common.p_func.pf_id = SC_FUNC(sc);
9529        sb_data_e1x.common.p_func.vf_id = 0xff;
9530        sb_data_e1x.common.p_func.vf_valid = FALSE;
9531        sb_data_e1x.common.p_func.vnic_id = SC_VN(sc);
9532        sb_data_e1x.common.same_igu_sb_1b = TRUE;
9533        sb_data_e1x.common.host_sb_addr.hi = U64_HI(busaddr);
9534        sb_data_e1x.common.host_sb_addr.lo = U64_LO(busaddr);
9535        hc_sm_p = sb_data_e1x.common.state_machine;
9536        sb_data_p = (uint32_t *)&sb_data_e1x;
9537        data_size = (sizeof(struct hc_status_block_data_e1x) /
9538                     sizeof(uint32_t));
9539        bxe_map_sb_state_machines(sb_data_e1x.index_data);
9540    }
9541
9542    bxe_setup_ndsb_state_machine(&hc_sm_p[SM_RX_ID], igu_sb_id, igu_seg_id);
9543    bxe_setup_ndsb_state_machine(&hc_sm_p[SM_TX_ID], igu_sb_id, igu_seg_id);
9544
9545    BLOGD(sc, DBG_LOAD, "Init FW SB %d\n", fw_sb_id);
9546
9547    /* write indices to HW - PCI guarantees endianity of regpairs */
9548    bxe_wr_fp_sb_data(sc, fw_sb_id, sb_data_p, data_size);
9549}
9550
9551static inline uint8_t
9552bxe_fp_qzone_id(struct bxe_fastpath *fp)
9553{
9554    if (CHIP_IS_E1x(fp->sc)) {
9555        return (fp->cl_id + SC_PORT(fp->sc) * ETH_MAX_RX_CLIENTS_E1H);
9556    } else {
9557        return (fp->cl_id);
9558    }
9559}
9560
9561static inline uint32_t
9562bxe_rx_ustorm_prods_offset(struct bxe_softc    *sc,
9563                           struct bxe_fastpath *fp)
9564{
9565    uint32_t offset = BAR_USTRORM_INTMEM;
9566
9567    if (!CHIP_IS_E1x(sc)) {
9568        offset += USTORM_RX_PRODS_E2_OFFSET(fp->cl_qzone_id);
9569    } else {
9570        offset += USTORM_RX_PRODS_E1X_OFFSET(SC_PORT(sc), fp->cl_id);
9571    }
9572
9573    return (offset);
9574}
9575
9576static void
9577bxe_init_eth_fp(struct bxe_softc *sc,
9578                int              idx)
9579{
9580    struct bxe_fastpath *fp = &sc->fp[idx];
9581    uint32_t cids[ECORE_MULTI_TX_COS] = { 0 };
9582    unsigned long q_type = 0;
9583    int cos;
9584
9585    fp->sc    = sc;
9586    fp->index = idx;
9587
9588    fp->igu_sb_id = (sc->igu_base_sb + idx + CNIC_SUPPORT(sc));
9589    fp->fw_sb_id = (sc->base_fw_ndsb + idx + CNIC_SUPPORT(sc));
9590
9591    fp->cl_id = (CHIP_IS_E1x(sc)) ?
9592                    (SC_L_ID(sc) + idx) :
9593                    /* want client ID same as IGU SB ID for non-E1 */
9594                    fp->igu_sb_id;
9595    fp->cl_qzone_id = bxe_fp_qzone_id(fp);
9596
9597    /* setup sb indices */
9598    if (!CHIP_IS_E1x(sc)) {
9599        fp->sb_index_values  = fp->status_block.e2_sb->sb.index_values;
9600        fp->sb_running_index = fp->status_block.e2_sb->sb.running_index;
9601    } else {
9602        fp->sb_index_values  = fp->status_block.e1x_sb->sb.index_values;
9603        fp->sb_running_index = fp->status_block.e1x_sb->sb.running_index;
9604    }
9605
9606    /* init shortcut */
9607    fp->ustorm_rx_prods_offset = bxe_rx_ustorm_prods_offset(sc, fp);
9608
9609    fp->rx_cq_cons_sb = &fp->sb_index_values[HC_INDEX_ETH_RX_CQ_CONS];
9610
9611    /*
9612     * XXX If multiple CoS is ever supported then each fastpath structure
9613     * will need to maintain tx producer/consumer/dma/etc values *per* CoS.
9614     */
9615    for (cos = 0; cos < sc->max_cos; cos++) {
9616        cids[cos] = idx;
9617    }
9618    fp->tx_cons_sb = &fp->sb_index_values[HC_INDEX_ETH_TX_CQ_CONS_COS0];
9619
9620    /* nothing more for a VF to do */
9621    if (IS_VF(sc)) {
9622        return;
9623    }
9624
9625    bxe_init_sb(sc, fp->sb_dma.paddr, BXE_VF_ID_INVALID, FALSE,
9626                fp->fw_sb_id, fp->igu_sb_id);
9627
9628    bxe_update_fp_sb_idx(fp);
9629
9630    /* Configure Queue State object */
9631    bit_set(&q_type, ECORE_Q_TYPE_HAS_RX);
9632    bit_set(&q_type, ECORE_Q_TYPE_HAS_TX);
9633
9634    ecore_init_queue_obj(sc,
9635                         &sc->sp_objs[idx].q_obj,
9636                         fp->cl_id,
9637                         cids,
9638                         sc->max_cos,
9639                         SC_FUNC(sc),
9640                         BXE_SP(sc, q_rdata),
9641                         BXE_SP_MAPPING(sc, q_rdata),
9642                         q_type);
9643
9644    /* configure classification DBs */
9645    ecore_init_mac_obj(sc,
9646                       &sc->sp_objs[idx].mac_obj,
9647                       fp->cl_id,
9648                       idx,
9649                       SC_FUNC(sc),
9650                       BXE_SP(sc, mac_rdata),
9651                       BXE_SP_MAPPING(sc, mac_rdata),
9652                       ECORE_FILTER_MAC_PENDING,
9653                       &sc->sp_state,
9654                       ECORE_OBJ_TYPE_RX_TX,
9655                       &sc->macs_pool);
9656
9657    BLOGD(sc, DBG_LOAD, "fp[%d]: sb=%p cl_id=%d fw_sb=%d igu_sb=%d\n",
9658          idx, fp->status_block.e2_sb, fp->cl_id, fp->fw_sb_id, fp->igu_sb_id);
9659}
9660
9661static inline void
9662bxe_update_rx_prod(struct bxe_softc    *sc,
9663                   struct bxe_fastpath *fp,
9664                   uint16_t            rx_bd_prod,
9665                   uint16_t            rx_cq_prod,
9666                   uint16_t            rx_sge_prod)
9667{
9668    struct ustorm_eth_rx_producers rx_prods = { 0 };
9669    uint32_t i;
9670
9671    /* update producers */
9672    rx_prods.bd_prod  = rx_bd_prod;
9673    rx_prods.cqe_prod = rx_cq_prod;
9674    rx_prods.sge_prod = rx_sge_prod;
9675
9676    /*
9677     * Make sure that the BD and SGE data is updated before updating the
9678     * producers since FW might read the BD/SGE right after the producer
9679     * is updated.
9680     * This is only applicable for weak-ordered memory model archs such
9681     * as IA-64. The following barrier is also mandatory since FW will
9682     * assumes BDs must have buffers.
9683     */
9684    wmb();
9685
9686    for (i = 0; i < (sizeof(rx_prods) / 4); i++) {
9687        REG_WR(sc,
9688               (fp->ustorm_rx_prods_offset + (i * 4)),
9689               ((uint32_t *)&rx_prods)[i]);
9690    }
9691
9692    wmb(); /* keep prod updates ordered */
9693
9694    BLOGD(sc, DBG_RX,
9695          "RX fp[%d]: wrote prods bd_prod=%u cqe_prod=%u sge_prod=%u\n",
9696          fp->index, rx_bd_prod, rx_cq_prod, rx_sge_prod);
9697}
9698
9699static void
9700bxe_init_rx_rings(struct bxe_softc *sc)
9701{
9702    struct bxe_fastpath *fp;
9703    int i;
9704
9705    for (i = 0; i < sc->num_queues; i++) {
9706        fp = &sc->fp[i];
9707
9708        fp->rx_bd_cons = 0;
9709
9710        /*
9711         * Activate the BD ring...
9712         * Warning, this will generate an interrupt (to the TSTORM)
9713         * so this can only be done after the chip is initialized
9714         */
9715        bxe_update_rx_prod(sc, fp,
9716                           fp->rx_bd_prod,
9717                           fp->rx_cq_prod,
9718                           fp->rx_sge_prod);
9719
9720        if (i != 0) {
9721            continue;
9722        }
9723
9724        if (CHIP_IS_E1(sc)) {
9725            REG_WR(sc,
9726                   (BAR_USTRORM_INTMEM +
9727                    USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(SC_FUNC(sc))),
9728                   U64_LO(fp->rcq_dma.paddr));
9729            REG_WR(sc,
9730                   (BAR_USTRORM_INTMEM +
9731                    USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(SC_FUNC(sc)) + 4),
9732                   U64_HI(fp->rcq_dma.paddr));
9733        }
9734    }
9735}
9736
9737static void
9738bxe_init_tx_ring_one(struct bxe_fastpath *fp)
9739{
9740    SET_FLAG(fp->tx_db.data.header.data, DOORBELL_HDR_T_DB_TYPE, 1);
9741    fp->tx_db.data.zero_fill1 = 0;
9742    fp->tx_db.data.prod = 0;
9743
9744    fp->tx_pkt_prod = 0;
9745    fp->tx_pkt_cons = 0;
9746    fp->tx_bd_prod = 0;
9747    fp->tx_bd_cons = 0;
9748    fp->eth_q_stats.tx_pkts = 0;
9749}
9750
9751static inline void
9752bxe_init_tx_rings(struct bxe_softc *sc)
9753{
9754    int i;
9755
9756    for (i = 0; i < sc->num_queues; i++) {
9757        bxe_init_tx_ring_one(&sc->fp[i]);
9758    }
9759}
9760
9761static void
9762bxe_init_def_sb(struct bxe_softc *sc)
9763{
9764    struct host_sp_status_block *def_sb = sc->def_sb;
9765    bus_addr_t mapping = sc->def_sb_dma.paddr;
9766    int igu_sp_sb_index;
9767    int igu_seg_id;
9768    int port = SC_PORT(sc);
9769    int func = SC_FUNC(sc);
9770    int reg_offset, reg_offset_en5;
9771    uint64_t section;
9772    int index, sindex;
9773    struct hc_sp_status_block_data sp_sb_data;
9774
9775    memset(&sp_sb_data, 0, sizeof(struct hc_sp_status_block_data));
9776
9777    if (CHIP_INT_MODE_IS_BC(sc)) {
9778        igu_sp_sb_index = DEF_SB_IGU_ID;
9779        igu_seg_id = HC_SEG_ACCESS_DEF;
9780    } else {
9781        igu_sp_sb_index = sc->igu_dsb_id;
9782        igu_seg_id = IGU_SEG_ACCESS_DEF;
9783    }
9784
9785    /* attentions */
9786    section = ((uint64_t)mapping +
9787               offsetof(struct host_sp_status_block, atten_status_block));
9788    def_sb->atten_status_block.status_block_id = igu_sp_sb_index;
9789    sc->attn_state = 0;
9790
9791    reg_offset = (port) ?
9792                     MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
9793                     MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0;
9794    reg_offset_en5 = (port) ?
9795                         MISC_REG_AEU_ENABLE5_FUNC_1_OUT_0 :
9796                         MISC_REG_AEU_ENABLE5_FUNC_0_OUT_0;
9797
9798    for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
9799        /* take care of sig[0]..sig[4] */
9800        for (sindex = 0; sindex < 4; sindex++) {
9801            sc->attn_group[index].sig[sindex] =
9802                REG_RD(sc, (reg_offset + (sindex * 0x4) + (0x10 * index)));
9803        }
9804
9805        if (!CHIP_IS_E1x(sc)) {
9806            /*
9807             * enable5 is separate from the rest of the registers,
9808             * and the address skip is 4 and not 16 between the
9809             * different groups
9810             */
9811            sc->attn_group[index].sig[4] =
9812                REG_RD(sc, (reg_offset_en5 + (0x4 * index)));
9813        } else {
9814            sc->attn_group[index].sig[4] = 0;
9815        }
9816    }
9817
9818    if (sc->devinfo.int_block == INT_BLOCK_HC) {
9819        reg_offset = (port) ?
9820                         HC_REG_ATTN_MSG1_ADDR_L :
9821                         HC_REG_ATTN_MSG0_ADDR_L;
9822        REG_WR(sc, reg_offset, U64_LO(section));
9823        REG_WR(sc, (reg_offset + 4), U64_HI(section));
9824    } else if (!CHIP_IS_E1x(sc)) {
9825        REG_WR(sc, IGU_REG_ATTN_MSG_ADDR_L, U64_LO(section));
9826        REG_WR(sc, IGU_REG_ATTN_MSG_ADDR_H, U64_HI(section));
9827    }
9828
9829    section = ((uint64_t)mapping +
9830               offsetof(struct host_sp_status_block, sp_sb));
9831
9832    bxe_zero_sp_sb(sc);
9833
9834    /* PCI guarantees endianity of regpair */
9835    sp_sb_data.state           = SB_ENABLED;
9836    sp_sb_data.host_sb_addr.lo = U64_LO(section);
9837    sp_sb_data.host_sb_addr.hi = U64_HI(section);
9838    sp_sb_data.igu_sb_id       = igu_sp_sb_index;
9839    sp_sb_data.igu_seg_id      = igu_seg_id;
9840    sp_sb_data.p_func.pf_id    = func;
9841    sp_sb_data.p_func.vnic_id  = SC_VN(sc);
9842    sp_sb_data.p_func.vf_id    = 0xff;
9843
9844    bxe_wr_sp_sb_data(sc, &sp_sb_data);
9845
9846    bxe_ack_sb(sc, sc->igu_dsb_id, USTORM_ID, 0, IGU_INT_ENABLE, 0);
9847}
9848
9849static void
9850bxe_init_sp_ring(struct bxe_softc *sc)
9851{
9852    atomic_store_rel_long(&sc->cq_spq_left, MAX_SPQ_PENDING);
9853    sc->spq_prod_idx = 0;
9854    sc->dsb_sp_prod = &sc->def_sb->sp_sb.index_values[HC_SP_INDEX_ETH_DEF_CONS];
9855    sc->spq_prod_bd = sc->spq;
9856    sc->spq_last_bd = (sc->spq_prod_bd + MAX_SP_DESC_CNT);
9857}
9858
9859static void
9860bxe_init_eq_ring(struct bxe_softc *sc)
9861{
9862    union event_ring_elem *elem;
9863    int i;
9864
9865    for (i = 1; i <= NUM_EQ_PAGES; i++) {
9866        elem = &sc->eq[EQ_DESC_CNT_PAGE * i - 1];
9867
9868        elem->next_page.addr.hi = htole32(U64_HI(sc->eq_dma.paddr +
9869                                                 BCM_PAGE_SIZE *
9870                                                 (i % NUM_EQ_PAGES)));
9871        elem->next_page.addr.lo = htole32(U64_LO(sc->eq_dma.paddr +
9872                                                 BCM_PAGE_SIZE *
9873                                                 (i % NUM_EQ_PAGES)));
9874    }
9875
9876    sc->eq_cons    = 0;
9877    sc->eq_prod    = NUM_EQ_DESC;
9878    sc->eq_cons_sb = &sc->def_sb->sp_sb.index_values[HC_SP_INDEX_EQ_CONS];
9879
9880    atomic_store_rel_long(&sc->eq_spq_left,
9881                          (min((MAX_SP_DESC_CNT - MAX_SPQ_PENDING),
9882                               NUM_EQ_DESC) - 1));
9883}
9884
9885static void
9886bxe_init_internal_common(struct bxe_softc *sc)
9887{
9888    int i;
9889
9890    /*
9891     * Zero this manually as its initialization is currently missing
9892     * in the initTool.
9893     */
9894    for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++) {
9895        REG_WR(sc,
9896               (BAR_USTRORM_INTMEM + USTORM_AGG_DATA_OFFSET + (i * 4)),
9897               0);
9898    }
9899
9900    if (!CHIP_IS_E1x(sc)) {
9901        REG_WR8(sc, (BAR_CSTRORM_INTMEM + CSTORM_IGU_MODE_OFFSET),
9902                CHIP_INT_MODE_IS_BC(sc) ? HC_IGU_BC_MODE : HC_IGU_NBC_MODE);
9903    }
9904}
9905
9906static void
9907bxe_init_internal(struct bxe_softc *sc,
9908                  uint32_t         load_code)
9909{
9910    switch (load_code) {
9911    case FW_MSG_CODE_DRV_LOAD_COMMON:
9912    case FW_MSG_CODE_DRV_LOAD_COMMON_CHIP:
9913        bxe_init_internal_common(sc);
9914        /* no break */
9915
9916    case FW_MSG_CODE_DRV_LOAD_PORT:
9917        /* nothing to do */
9918        /* no break */
9919
9920    case FW_MSG_CODE_DRV_LOAD_FUNCTION:
9921        /* internal memory per function is initialized inside bxe_pf_init */
9922        break;
9923
9924    default:
9925        BLOGE(sc, "Unknown load_code (0x%x) from MCP\n", load_code);
9926        break;
9927    }
9928}
9929
9930static void
9931storm_memset_func_cfg(struct bxe_softc                         *sc,
9932                      struct tstorm_eth_function_common_config *tcfg,
9933                      uint16_t                                  abs_fid)
9934{
9935    uint32_t addr;
9936    size_t size;
9937
9938    addr = (BAR_TSTRORM_INTMEM +
9939            TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(abs_fid));
9940    size = sizeof(struct tstorm_eth_function_common_config);
9941    ecore_storm_memset_struct(sc, addr, size, (uint32_t *)tcfg);
9942}
9943
9944static void
9945bxe_func_init(struct bxe_softc            *sc,
9946              struct bxe_func_init_params *p)
9947{
9948    struct tstorm_eth_function_common_config tcfg = { 0 };
9949
9950    if (CHIP_IS_E1x(sc)) {
9951        storm_memset_func_cfg(sc, &tcfg, p->func_id);
9952    }
9953
9954    /* Enable the function in the FW */
9955    storm_memset_vf_to_pf(sc, p->func_id, p->pf_id);
9956    storm_memset_func_en(sc, p->func_id, 1);
9957
9958    /* spq */
9959    if (p->func_flgs & FUNC_FLG_SPQ) {
9960        storm_memset_spq_addr(sc, p->spq_map, p->func_id);
9961        REG_WR(sc,
9962               (XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(p->func_id)),
9963               p->spq_prod);
9964    }
9965}
9966
9967/*
9968 * Calculates the sum of vn_min_rates.
9969 * It's needed for further normalizing of the min_rates.
9970 * Returns:
9971 *   sum of vn_min_rates.
9972 *     or
9973 *   0 - if all the min_rates are 0.
9974 * In the later case fainess algorithm should be deactivated.
9975 * If all min rates are not zero then those that are zeroes will be set to 1.
9976 */
9977static void
9978bxe_calc_vn_min(struct bxe_softc       *sc,
9979                struct cmng_init_input *input)
9980{
9981    uint32_t vn_cfg;
9982    uint32_t vn_min_rate;
9983    int all_zero = 1;
9984    int vn;
9985
9986    for (vn = VN_0; vn < SC_MAX_VN_NUM(sc); vn++) {
9987        vn_cfg = sc->devinfo.mf_info.mf_config[vn];
9988        vn_min_rate = (((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
9989                        FUNC_MF_CFG_MIN_BW_SHIFT) * 100);
9990
9991        if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
9992            /* skip hidden VNs */
9993            vn_min_rate = 0;
9994        } else if (!vn_min_rate) {
9995            /* If min rate is zero - set it to 100 */
9996            vn_min_rate = DEF_MIN_RATE;
9997        } else {
9998            all_zero = 0;
9999        }
10000
10001        input->vnic_min_rate[vn] = vn_min_rate;
10002    }
10003
10004    /* if ETS or all min rates are zeros - disable fairness */
10005    if (BXE_IS_ETS_ENABLED(sc)) {
10006        input->flags.cmng_enables &= ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
10007        BLOGD(sc, DBG_LOAD, "Fairness disabled (ETS)\n");
10008    } else if (all_zero) {
10009        input->flags.cmng_enables &= ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
10010        BLOGD(sc, DBG_LOAD,
10011              "Fariness disabled (all MIN values are zeroes)\n");
10012    } else {
10013        input->flags.cmng_enables |= CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
10014    }
10015}
10016
10017static inline uint16_t
10018bxe_extract_max_cfg(struct bxe_softc *sc,
10019                    uint32_t         mf_cfg)
10020{
10021    uint16_t max_cfg = ((mf_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
10022                        FUNC_MF_CFG_MAX_BW_SHIFT);
10023
10024    if (!max_cfg) {
10025        BLOGD(sc, DBG_LOAD, "Max BW configured to 0 - using 100 instead\n");
10026        max_cfg = 100;
10027    }
10028
10029    return (max_cfg);
10030}
10031
10032static void
10033bxe_calc_vn_max(struct bxe_softc       *sc,
10034                int                    vn,
10035                struct cmng_init_input *input)
10036{
10037    uint16_t vn_max_rate;
10038    uint32_t vn_cfg = sc->devinfo.mf_info.mf_config[vn];
10039    uint32_t max_cfg;
10040
10041    if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
10042        vn_max_rate = 0;
10043    } else {
10044        max_cfg = bxe_extract_max_cfg(sc, vn_cfg);
10045
10046        if (IS_MF_SI(sc)) {
10047            /* max_cfg in percents of linkspeed */
10048            vn_max_rate = ((sc->link_vars.line_speed * max_cfg) / 100);
10049        } else { /* SD modes */
10050            /* max_cfg is absolute in 100Mb units */
10051            vn_max_rate = (max_cfg * 100);
10052        }
10053    }
10054
10055    BLOGD(sc, DBG_LOAD, "vn %d: vn_max_rate %d\n", vn, vn_max_rate);
10056
10057    input->vnic_max_rate[vn] = vn_max_rate;
10058}
10059
10060static void
10061bxe_cmng_fns_init(struct bxe_softc *sc,
10062                  uint8_t          read_cfg,
10063                  uint8_t          cmng_type)
10064{
10065    struct cmng_init_input input;
10066    int vn;
10067
10068    memset(&input, 0, sizeof(struct cmng_init_input));
10069
10070    input.port_rate = sc->link_vars.line_speed;
10071
10072    if (cmng_type == CMNG_FNS_MINMAX) {
10073        /* read mf conf from shmem */
10074        if (read_cfg) {
10075            bxe_read_mf_cfg(sc);
10076        }
10077
10078        /* get VN min rate and enable fairness if not 0 */
10079        bxe_calc_vn_min(sc, &input);
10080
10081        /* get VN max rate */
10082        if (sc->port.pmf) {
10083            for (vn = VN_0; vn < SC_MAX_VN_NUM(sc); vn++) {
10084                bxe_calc_vn_max(sc, vn, &input);
10085            }
10086        }
10087
10088        /* always enable rate shaping and fairness */
10089        input.flags.cmng_enables |= CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN;
10090
10091        ecore_init_cmng(&input, &sc->cmng);
10092        return;
10093    }
10094
10095    /* rate shaping and fairness are disabled */
10096    BLOGD(sc, DBG_LOAD, "rate shaping and fairness have been disabled\n");
10097}
10098
10099static int
10100bxe_get_cmng_fns_mode(struct bxe_softc *sc)
10101{
10102    if (CHIP_REV_IS_SLOW(sc)) {
10103        return (CMNG_FNS_NONE);
10104    }
10105
10106    if (IS_MF(sc)) {
10107        return (CMNG_FNS_MINMAX);
10108    }
10109
10110    return (CMNG_FNS_NONE);
10111}
10112
10113static void
10114storm_memset_cmng(struct bxe_softc *sc,
10115                  struct cmng_init *cmng,
10116                  uint8_t          port)
10117{
10118    int vn;
10119    int func;
10120    uint32_t addr;
10121    size_t size;
10122
10123    addr = (BAR_XSTRORM_INTMEM +
10124            XSTORM_CMNG_PER_PORT_VARS_OFFSET(port));
10125    size = sizeof(struct cmng_struct_per_port);
10126    ecore_storm_memset_struct(sc, addr, size, (uint32_t *)&cmng->port);
10127
10128    for (vn = VN_0; vn < SC_MAX_VN_NUM(sc); vn++) {
10129        func = func_by_vn(sc, vn);
10130
10131        addr = (BAR_XSTRORM_INTMEM +
10132                XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func));
10133        size = sizeof(struct rate_shaping_vars_per_vn);
10134        ecore_storm_memset_struct(sc, addr, size,
10135                                  (uint32_t *)&cmng->vnic.vnic_max_rate[vn]);
10136
10137        addr = (BAR_XSTRORM_INTMEM +
10138                XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func));
10139        size = sizeof(struct fairness_vars_per_vn);
10140        ecore_storm_memset_struct(sc, addr, size,
10141                                  (uint32_t *)&cmng->vnic.vnic_min_rate[vn]);
10142    }
10143}
10144
10145static void
10146bxe_pf_init(struct bxe_softc *sc)
10147{
10148    struct bxe_func_init_params func_init = { 0 };
10149    struct event_ring_data eq_data = { { 0 } };
10150    uint16_t flags;
10151
10152    if (!CHIP_IS_E1x(sc)) {
10153        /* reset IGU PF statistics: MSIX + ATTN */
10154        /* PF */
10155        REG_WR(sc,
10156               (IGU_REG_STATISTIC_NUM_MESSAGE_SENT +
10157                (BXE_IGU_STAS_MSG_VF_CNT * 4) +
10158                ((CHIP_IS_MODE_4_PORT(sc) ? SC_FUNC(sc) : SC_VN(sc)) * 4)),
10159               0);
10160        /* ATTN */
10161        REG_WR(sc,
10162               (IGU_REG_STATISTIC_NUM_MESSAGE_SENT +
10163                (BXE_IGU_STAS_MSG_VF_CNT * 4) +
10164                (BXE_IGU_STAS_MSG_PF_CNT * 4) +
10165                ((CHIP_IS_MODE_4_PORT(sc) ? SC_FUNC(sc) : SC_VN(sc)) * 4)),
10166               0);
10167    }
10168
10169    /* function setup flags */
10170    flags = (FUNC_FLG_STATS | FUNC_FLG_LEADING | FUNC_FLG_SPQ);
10171
10172    /*
10173     * This flag is relevant for E1x only.
10174     * E2 doesn't have a TPA configuration in a function level.
10175     */
10176    flags |= (if_getcapenable(sc->ifp) & IFCAP_LRO) ? FUNC_FLG_TPA : 0;
10177
10178    func_init.func_flgs = flags;
10179    func_init.pf_id     = SC_FUNC(sc);
10180    func_init.func_id   = SC_FUNC(sc);
10181    func_init.spq_map   = sc->spq_dma.paddr;
10182    func_init.spq_prod  = sc->spq_prod_idx;
10183
10184    bxe_func_init(sc, &func_init);
10185
10186    memset(&sc->cmng, 0, sizeof(struct cmng_struct_per_port));
10187
10188    /*
10189     * Congestion management values depend on the link rate.
10190     * There is no active link so initial link rate is set to 10Gbps.
10191     * When the link comes up the congestion management values are
10192     * re-calculated according to the actual link rate.
10193     */
10194    sc->link_vars.line_speed = SPEED_10000;
10195    bxe_cmng_fns_init(sc, TRUE, bxe_get_cmng_fns_mode(sc));
10196
10197    /* Only the PMF sets the HW */
10198    if (sc->port.pmf) {
10199        storm_memset_cmng(sc, &sc->cmng, SC_PORT(sc));
10200    }
10201
10202    /* init Event Queue - PCI bus guarantees correct endainity */
10203    eq_data.base_addr.hi = U64_HI(sc->eq_dma.paddr);
10204    eq_data.base_addr.lo = U64_LO(sc->eq_dma.paddr);
10205    eq_data.producer     = sc->eq_prod;
10206    eq_data.index_id     = HC_SP_INDEX_EQ_CONS;
10207    eq_data.sb_id        = DEF_SB_ID;
10208    storm_memset_eq_data(sc, &eq_data, SC_FUNC(sc));
10209}
10210
10211static void
10212bxe_hc_int_enable(struct bxe_softc *sc)
10213{
10214    int port = SC_PORT(sc);
10215    uint32_t addr = (port) ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
10216    uint32_t val = REG_RD(sc, addr);
10217    uint8_t msix = (sc->interrupt_mode == INTR_MODE_MSIX) ? TRUE : FALSE;
10218    uint8_t single_msix = ((sc->interrupt_mode == INTR_MODE_MSIX) &&
10219                           (sc->intr_count == 1)) ? TRUE : FALSE;
10220    uint8_t msi = (sc->interrupt_mode == INTR_MODE_MSI) ? TRUE : FALSE;
10221
10222    if (msix) {
10223        val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
10224                 HC_CONFIG_0_REG_INT_LINE_EN_0);
10225        val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
10226                HC_CONFIG_0_REG_ATTN_BIT_EN_0);
10227        if (single_msix) {
10228            val |= HC_CONFIG_0_REG_SINGLE_ISR_EN_0;
10229        }
10230    } else if (msi) {
10231        val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0;
10232        val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
10233                HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
10234                HC_CONFIG_0_REG_ATTN_BIT_EN_0);
10235    } else {
10236        val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
10237                HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
10238                HC_CONFIG_0_REG_INT_LINE_EN_0 |
10239                HC_CONFIG_0_REG_ATTN_BIT_EN_0);
10240
10241        if (!CHIP_IS_E1(sc)) {
10242            BLOGD(sc, DBG_INTR, "write %x to HC %d (addr 0x%x)\n",
10243                  val, port, addr);
10244
10245            REG_WR(sc, addr, val);
10246
10247            val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
10248        }
10249    }
10250
10251    if (CHIP_IS_E1(sc)) {
10252        REG_WR(sc, (HC_REG_INT_MASK + port*4), 0x1FFFF);
10253    }
10254
10255    BLOGD(sc, DBG_INTR, "write %x to HC %d (addr 0x%x) mode %s\n",
10256          val, port, addr, ((msix) ? "MSI-X" : ((msi) ? "MSI" : "INTx")));
10257
10258    REG_WR(sc, addr, val);
10259
10260    /* ensure that HC_CONFIG is written before leading/trailing edge config */
10261    mb();
10262
10263    if (!CHIP_IS_E1(sc)) {
10264        /* init leading/trailing edge */
10265        if (IS_MF(sc)) {
10266            val = (0xee0f | (1 << (SC_VN(sc) + 4)));
10267            if (sc->port.pmf) {
10268                /* enable nig and gpio3 attention */
10269                val |= 0x1100;
10270            }
10271        } else {
10272            val = 0xffff;
10273        }
10274
10275        REG_WR(sc, (HC_REG_TRAILING_EDGE_0 + port*8), val);
10276        REG_WR(sc, (HC_REG_LEADING_EDGE_0 + port*8), val);
10277    }
10278
10279    /* make sure that interrupts are indeed enabled from here on */
10280    mb();
10281}
10282
10283static void
10284bxe_igu_int_enable(struct bxe_softc *sc)
10285{
10286    uint32_t val;
10287    uint8_t msix = (sc->interrupt_mode == INTR_MODE_MSIX) ? TRUE : FALSE;
10288    uint8_t single_msix = ((sc->interrupt_mode == INTR_MODE_MSIX) &&
10289                           (sc->intr_count == 1)) ? TRUE : FALSE;
10290    uint8_t msi = (sc->interrupt_mode == INTR_MODE_MSI) ? TRUE : FALSE;
10291
10292    val = REG_RD(sc, IGU_REG_PF_CONFIGURATION);
10293
10294    if (msix) {
10295        val &= ~(IGU_PF_CONF_INT_LINE_EN |
10296                 IGU_PF_CONF_SINGLE_ISR_EN);
10297        val |= (IGU_PF_CONF_MSI_MSIX_EN |
10298                IGU_PF_CONF_ATTN_BIT_EN);
10299        if (single_msix) {
10300            val |= IGU_PF_CONF_SINGLE_ISR_EN;
10301        }
10302    } else if (msi) {
10303        val &= ~IGU_PF_CONF_INT_LINE_EN;
10304        val |= (IGU_PF_CONF_MSI_MSIX_EN |
10305                IGU_PF_CONF_ATTN_BIT_EN |
10306                IGU_PF_CONF_SINGLE_ISR_EN);
10307    } else {
10308        val &= ~IGU_PF_CONF_MSI_MSIX_EN;
10309        val |= (IGU_PF_CONF_INT_LINE_EN |
10310                IGU_PF_CONF_ATTN_BIT_EN |
10311                IGU_PF_CONF_SINGLE_ISR_EN);
10312    }
10313
10314    /* clean previous status - need to configure igu prior to ack*/
10315    if ((!msix) || single_msix) {
10316        REG_WR(sc, IGU_REG_PF_CONFIGURATION, val);
10317        bxe_ack_int(sc);
10318    }
10319
10320    val |= IGU_PF_CONF_FUNC_EN;
10321
10322    BLOGD(sc, DBG_INTR, "write 0x%x to IGU mode %s\n",
10323          val, ((msix) ? "MSI-X" : ((msi) ? "MSI" : "INTx")));
10324
10325    REG_WR(sc, IGU_REG_PF_CONFIGURATION, val);
10326
10327    mb();
10328
10329    /* init leading/trailing edge */
10330    if (IS_MF(sc)) {
10331        val = (0xee0f | (1 << (SC_VN(sc) + 4)));
10332        if (sc->port.pmf) {
10333            /* enable nig and gpio3 attention */
10334            val |= 0x1100;
10335        }
10336    } else {
10337        val = 0xffff;
10338    }
10339
10340    REG_WR(sc, IGU_REG_TRAILING_EDGE_LATCH, val);
10341    REG_WR(sc, IGU_REG_LEADING_EDGE_LATCH, val);
10342
10343    /* make sure that interrupts are indeed enabled from here on */
10344    mb();
10345}
10346
10347static void
10348bxe_int_enable(struct bxe_softc *sc)
10349{
10350    if (sc->devinfo.int_block == INT_BLOCK_HC) {
10351        bxe_hc_int_enable(sc);
10352    } else {
10353        bxe_igu_int_enable(sc);
10354    }
10355}
10356
10357static void
10358bxe_hc_int_disable(struct bxe_softc *sc)
10359{
10360    int port = SC_PORT(sc);
10361    uint32_t addr = (port) ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
10362    uint32_t val = REG_RD(sc, addr);
10363
10364    /*
10365     * In E1 we must use only PCI configuration space to disable MSI/MSIX
10366     * capablility. It's forbidden to disable IGU_PF_CONF_MSI_MSIX_EN in HC
10367     * block
10368     */
10369    if (CHIP_IS_E1(sc)) {
10370        /*
10371         * Since IGU_PF_CONF_MSI_MSIX_EN still always on use mask register
10372         * to prevent from HC sending interrupts after we exit the function
10373         */
10374        REG_WR(sc, (HC_REG_INT_MASK + port*4), 0);
10375
10376        val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
10377                 HC_CONFIG_0_REG_INT_LINE_EN_0 |
10378                 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
10379    } else {
10380        val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
10381                 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
10382                 HC_CONFIG_0_REG_INT_LINE_EN_0 |
10383                 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
10384    }
10385
10386    BLOGD(sc, DBG_INTR, "write %x to HC %d (addr 0x%x)\n", val, port, addr);
10387
10388    /* flush all outstanding writes */
10389    mb();
10390
10391    REG_WR(sc, addr, val);
10392    if (REG_RD(sc, addr) != val) {
10393        BLOGE(sc, "proper val not read from HC IGU!\n");
10394    }
10395}
10396
10397static void
10398bxe_igu_int_disable(struct bxe_softc *sc)
10399{
10400    uint32_t val = REG_RD(sc, IGU_REG_PF_CONFIGURATION);
10401
10402    val &= ~(IGU_PF_CONF_MSI_MSIX_EN |
10403             IGU_PF_CONF_INT_LINE_EN |
10404             IGU_PF_CONF_ATTN_BIT_EN);
10405
10406    BLOGD(sc, DBG_INTR, "write %x to IGU\n", val);
10407
10408    /* flush all outstanding writes */
10409    mb();
10410
10411    REG_WR(sc, IGU_REG_PF_CONFIGURATION, val);
10412    if (REG_RD(sc, IGU_REG_PF_CONFIGURATION) != val) {
10413        BLOGE(sc, "proper val not read from IGU!\n");
10414    }
10415}
10416
10417static void
10418bxe_int_disable(struct bxe_softc *sc)
10419{
10420    if (sc->devinfo.int_block == INT_BLOCK_HC) {
10421        bxe_hc_int_disable(sc);
10422    } else {
10423        bxe_igu_int_disable(sc);
10424    }
10425}
10426
10427static void
10428bxe_nic_init(struct bxe_softc *sc,
10429             int              load_code)
10430{
10431    int i;
10432
10433    for (i = 0; i < sc->num_queues; i++) {
10434        bxe_init_eth_fp(sc, i);
10435    }
10436
10437    rmb(); /* ensure status block indices were read */
10438
10439    bxe_init_rx_rings(sc);
10440    bxe_init_tx_rings(sc);
10441
10442    if (IS_VF(sc)) {
10443        return;
10444    }
10445
10446    /* initialize MOD_ABS interrupts */
10447    elink_init_mod_abs_int(sc, &sc->link_vars,
10448                           sc->devinfo.chip_id,
10449                           sc->devinfo.shmem_base,
10450                           sc->devinfo.shmem2_base,
10451                           SC_PORT(sc));
10452
10453    bxe_init_def_sb(sc);
10454    bxe_update_dsb_idx(sc);
10455    bxe_init_sp_ring(sc);
10456    bxe_init_eq_ring(sc);
10457    bxe_init_internal(sc, load_code);
10458    bxe_pf_init(sc);
10459    bxe_stats_init(sc);
10460
10461    /* flush all before enabling interrupts */
10462    mb();
10463
10464    bxe_int_enable(sc);
10465
10466    /* check for SPIO5 */
10467    bxe_attn_int_deasserted0(sc,
10468                             REG_RD(sc,
10469                                    (MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 +
10470                                     SC_PORT(sc)*4)) &
10471                             AEU_INPUTS_ATTN_BITS_SPIO5);
10472}
10473
10474static inline void
10475bxe_init_objs(struct bxe_softc *sc)
10476{
10477    /* mcast rules must be added to tx if tx switching is enabled */
10478    ecore_obj_type o_type =
10479        (sc->flags & BXE_TX_SWITCHING) ? ECORE_OBJ_TYPE_RX_TX :
10480                                         ECORE_OBJ_TYPE_RX;
10481
10482    /* RX_MODE controlling object */
10483    ecore_init_rx_mode_obj(sc, &sc->rx_mode_obj);
10484
10485    /* multicast configuration controlling object */
10486    ecore_init_mcast_obj(sc,
10487                         &sc->mcast_obj,
10488                         sc->fp[0].cl_id,
10489                         sc->fp[0].index,
10490                         SC_FUNC(sc),
10491                         SC_FUNC(sc),
10492                         BXE_SP(sc, mcast_rdata),
10493                         BXE_SP_MAPPING(sc, mcast_rdata),
10494                         ECORE_FILTER_MCAST_PENDING,
10495                         &sc->sp_state,
10496                         o_type);
10497
10498    /* Setup CAM credit pools */
10499    ecore_init_mac_credit_pool(sc,
10500                               &sc->macs_pool,
10501                               SC_FUNC(sc),
10502                               CHIP_IS_E1x(sc) ? VNICS_PER_PORT(sc) :
10503                                                 VNICS_PER_PATH(sc));
10504
10505    ecore_init_vlan_credit_pool(sc,
10506                                &sc->vlans_pool,
10507                                SC_ABS_FUNC(sc) >> 1,
10508                                CHIP_IS_E1x(sc) ? VNICS_PER_PORT(sc) :
10509                                                  VNICS_PER_PATH(sc));
10510
10511    /* RSS configuration object */
10512    ecore_init_rss_config_obj(sc,
10513                              &sc->rss_conf_obj,
10514                              sc->fp[0].cl_id,
10515                              sc->fp[0].index,
10516                              SC_FUNC(sc),
10517                              SC_FUNC(sc),
10518                              BXE_SP(sc, rss_rdata),
10519                              BXE_SP_MAPPING(sc, rss_rdata),
10520                              ECORE_FILTER_RSS_CONF_PENDING,
10521                              &sc->sp_state, ECORE_OBJ_TYPE_RX);
10522}
10523
10524/*
10525 * Initialize the function. This must be called before sending CLIENT_SETUP
10526 * for the first client.
10527 */
10528static inline int
10529bxe_func_start(struct bxe_softc *sc)
10530{
10531    struct ecore_func_state_params func_params = { NULL };
10532    struct ecore_func_start_params *start_params = &func_params.params.start;
10533
10534    /* Prepare parameters for function state transitions */
10535    bit_set(&func_params.ramrod_flags, RAMROD_COMP_WAIT);
10536
10537    func_params.f_obj = &sc->func_obj;
10538    func_params.cmd = ECORE_F_CMD_START;
10539
10540    /* Function parameters */
10541    start_params->mf_mode     = sc->devinfo.mf_info.mf_mode;
10542    start_params->sd_vlan_tag = OVLAN(sc);
10543
10544    if (CHIP_IS_E2(sc) || CHIP_IS_E3(sc)) {
10545        start_params->network_cos_mode = STATIC_COS;
10546    } else { /* CHIP_IS_E1X */
10547        start_params->network_cos_mode = FW_WRR;
10548    }
10549
10550    //start_params->gre_tunnel_mode = 0;
10551    //start_params->gre_tunnel_rss  = 0;
10552
10553    return (ecore_func_state_change(sc, &func_params));
10554}
10555
10556static int
10557bxe_set_power_state(struct bxe_softc *sc,
10558                    uint8_t          state)
10559{
10560    uint16_t pmcsr;
10561
10562    /* If there is no power capability, silently succeed */
10563    if (!(sc->devinfo.pcie_cap_flags & BXE_PM_CAPABLE_FLAG)) {
10564        BLOGW(sc, "No power capability\n");
10565        return (0);
10566    }
10567
10568    pmcsr = pci_read_config(sc->dev,
10569                            (sc->devinfo.pcie_pm_cap_reg + PCIR_POWER_STATUS),
10570                            2);
10571
10572    switch (state) {
10573    case PCI_PM_D0:
10574        pci_write_config(sc->dev,
10575                         (sc->devinfo.pcie_pm_cap_reg + PCIR_POWER_STATUS),
10576                         ((pmcsr & ~PCIM_PSTAT_DMASK) | PCIM_PSTAT_PME), 2);
10577
10578        if (pmcsr & PCIM_PSTAT_DMASK) {
10579            /* delay required during transition out of D3hot */
10580            DELAY(20000);
10581        }
10582
10583        break;
10584
10585    case PCI_PM_D3hot:
10586        /* XXX if there are other clients above don't shut down the power */
10587
10588        /* don't shut down the power for emulation and FPGA */
10589        if (CHIP_REV_IS_SLOW(sc)) {
10590            return (0);
10591        }
10592
10593        pmcsr &= ~PCIM_PSTAT_DMASK;
10594        pmcsr |= PCIM_PSTAT_D3;
10595
10596        if (sc->wol) {
10597            pmcsr |= PCIM_PSTAT_PMEENABLE;
10598        }
10599
10600        pci_write_config(sc->dev,
10601                         (sc->devinfo.pcie_pm_cap_reg + PCIR_POWER_STATUS),
10602                         pmcsr, 4);
10603
10604        /*
10605         * No more memory access after this point until device is brought back
10606         * to D0 state.
10607         */
10608        break;
10609
10610    default:
10611        BLOGE(sc, "Can't support PCI power state = 0x%x pmcsr 0x%x\n",
10612            state, pmcsr);
10613        return (-1);
10614    }
10615
10616    return (0);
10617}
10618
10619
10620/* return true if succeeded to acquire the lock */
10621static uint8_t
10622bxe_trylock_hw_lock(struct bxe_softc *sc,
10623                    uint32_t         resource)
10624{
10625    uint32_t lock_status;
10626    uint32_t resource_bit = (1 << resource);
10627    int func = SC_FUNC(sc);
10628    uint32_t hw_lock_control_reg;
10629
10630    BLOGD(sc, DBG_LOAD, "Trying to take a resource lock 0x%x\n", resource);
10631
10632    /* Validating that the resource is within range */
10633    if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
10634        BLOGD(sc, DBG_LOAD,
10635              "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
10636              resource, HW_LOCK_MAX_RESOURCE_VALUE);
10637        return (FALSE);
10638    }
10639
10640    if (func <= 5) {
10641        hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
10642    } else {
10643        hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
10644    }
10645
10646    /* try to acquire the lock */
10647    REG_WR(sc, hw_lock_control_reg + 4, resource_bit);
10648    lock_status = REG_RD(sc, hw_lock_control_reg);
10649    if (lock_status & resource_bit) {
10650        return (TRUE);
10651    }
10652
10653    BLOGE(sc, "Failed to get a resource lock 0x%x func %d "
10654        "lock_status 0x%x resource_bit 0x%x\n", resource, func,
10655        lock_status, resource_bit);
10656
10657    return (FALSE);
10658}
10659
10660/*
10661 * Get the recovery leader resource id according to the engine this function
10662 * belongs to. Currently only only 2 engines is supported.
10663 */
10664static int
10665bxe_get_leader_lock_resource(struct bxe_softc *sc)
10666{
10667    if (SC_PATH(sc)) {
10668        return (HW_LOCK_RESOURCE_RECOVERY_LEADER_1);
10669    } else {
10670        return (HW_LOCK_RESOURCE_RECOVERY_LEADER_0);
10671    }
10672}
10673
10674/* try to acquire a leader lock for current engine */
10675static uint8_t
10676bxe_trylock_leader_lock(struct bxe_softc *sc)
10677{
10678    return (bxe_trylock_hw_lock(sc, bxe_get_leader_lock_resource(sc)));
10679}
10680
10681static int
10682bxe_release_leader_lock(struct bxe_softc *sc)
10683{
10684    return (bxe_release_hw_lock(sc, bxe_get_leader_lock_resource(sc)));
10685}
10686
10687/* close gates #2, #3 and #4 */
10688static void
10689bxe_set_234_gates(struct bxe_softc *sc,
10690                  uint8_t          close)
10691{
10692    uint32_t val;
10693
10694    /* gates #2 and #4a are closed/opened for "not E1" only */
10695    if (!CHIP_IS_E1(sc)) {
10696        /* #4 */
10697        REG_WR(sc, PXP_REG_HST_DISCARD_DOORBELLS, !!close);
10698        /* #2 */
10699        REG_WR(sc, PXP_REG_HST_DISCARD_INTERNAL_WRITES, !!close);
10700    }
10701
10702    /* #3 */
10703    if (CHIP_IS_E1x(sc)) {
10704        /* prevent interrupts from HC on both ports */
10705        val = REG_RD(sc, HC_REG_CONFIG_1);
10706        REG_WR(sc, HC_REG_CONFIG_1,
10707               (!close) ? (val | HC_CONFIG_1_REG_BLOCK_DISABLE_1) :
10708               (val & ~(uint32_t)HC_CONFIG_1_REG_BLOCK_DISABLE_1));
10709
10710        val = REG_RD(sc, HC_REG_CONFIG_0);
10711        REG_WR(sc, HC_REG_CONFIG_0,
10712               (!close) ? (val | HC_CONFIG_0_REG_BLOCK_DISABLE_0) :
10713               (val & ~(uint32_t)HC_CONFIG_0_REG_BLOCK_DISABLE_0));
10714    } else {
10715        /* Prevent incoming interrupts in IGU */
10716        val = REG_RD(sc, IGU_REG_BLOCK_CONFIGURATION);
10717
10718        REG_WR(sc, IGU_REG_BLOCK_CONFIGURATION,
10719               (!close) ?
10720               (val | IGU_BLOCK_CONFIGURATION_REG_BLOCK_ENABLE) :
10721               (val & ~(uint32_t)IGU_BLOCK_CONFIGURATION_REG_BLOCK_ENABLE));
10722    }
10723
10724    BLOGD(sc, DBG_LOAD, "%s gates #2, #3 and #4\n",
10725          close ? "closing" : "opening");
10726
10727    wmb();
10728}
10729
10730/* poll for pending writes bit, it should get cleared in no more than 1s */
10731static int
10732bxe_er_poll_igu_vq(struct bxe_softc *sc)
10733{
10734    uint32_t cnt = 1000;
10735    uint32_t pend_bits = 0;
10736
10737    do {
10738        pend_bits = REG_RD(sc, IGU_REG_PENDING_BITS_STATUS);
10739
10740        if (pend_bits == 0) {
10741            break;
10742        }
10743
10744        DELAY(1000);
10745    } while (--cnt > 0);
10746
10747    if (cnt == 0) {
10748        BLOGE(sc, "Still pending IGU requests bits=0x%08x!\n", pend_bits);
10749        return (-1);
10750    }
10751
10752    return (0);
10753}
10754
10755#define SHARED_MF_CLP_MAGIC  0x80000000 /* 'magic' bit */
10756
10757static void
10758bxe_clp_reset_prep(struct bxe_softc *sc,
10759                   uint32_t         *magic_val)
10760{
10761    /* Do some magic... */
10762    uint32_t val = MFCFG_RD(sc, shared_mf_config.clp_mb);
10763    *magic_val = val & SHARED_MF_CLP_MAGIC;
10764    MFCFG_WR(sc, shared_mf_config.clp_mb, val | SHARED_MF_CLP_MAGIC);
10765}
10766
10767/* restore the value of the 'magic' bit */
10768static void
10769bxe_clp_reset_done(struct bxe_softc *sc,
10770                   uint32_t         magic_val)
10771{
10772    /* Restore the 'magic' bit value... */
10773    uint32_t val = MFCFG_RD(sc, shared_mf_config.clp_mb);
10774    MFCFG_WR(sc, shared_mf_config.clp_mb,
10775              (val & (~SHARED_MF_CLP_MAGIC)) | magic_val);
10776}
10777
10778/* prepare for MCP reset, takes care of CLP configurations */
10779static void
10780bxe_reset_mcp_prep(struct bxe_softc *sc,
10781                   uint32_t         *magic_val)
10782{
10783    uint32_t shmem;
10784    uint32_t validity_offset;
10785
10786    /* set `magic' bit in order to save MF config */
10787    if (!CHIP_IS_E1(sc)) {
10788        bxe_clp_reset_prep(sc, magic_val);
10789    }
10790
10791    /* get shmem offset */
10792    shmem = REG_RD(sc, MISC_REG_SHARED_MEM_ADDR);
10793    validity_offset =
10794        offsetof(struct shmem_region, validity_map[SC_PORT(sc)]);
10795
10796    /* Clear validity map flags */
10797    if (shmem > 0) {
10798        REG_WR(sc, shmem + validity_offset, 0);
10799    }
10800}
10801
10802#define MCP_TIMEOUT      5000   /* 5 seconds (in ms) */
10803#define MCP_ONE_TIMEOUT  100    /* 100 ms */
10804
10805static void
10806bxe_mcp_wait_one(struct bxe_softc *sc)
10807{
10808    /* special handling for emulation and FPGA (10 times longer) */
10809    if (CHIP_REV_IS_SLOW(sc)) {
10810        DELAY((MCP_ONE_TIMEOUT*10) * 1000);
10811    } else {
10812        DELAY((MCP_ONE_TIMEOUT) * 1000);
10813    }
10814}
10815
10816/* initialize shmem_base and waits for validity signature to appear */
10817static int
10818bxe_init_shmem(struct bxe_softc *sc)
10819{
10820    int cnt = 0;
10821    uint32_t val = 0;
10822
10823    do {
10824        sc->devinfo.shmem_base     =
10825        sc->link_params.shmem_base =
10826            REG_RD(sc, MISC_REG_SHARED_MEM_ADDR);
10827
10828        if (sc->devinfo.shmem_base) {
10829            val = SHMEM_RD(sc, validity_map[SC_PORT(sc)]);
10830            if (val & SHR_MEM_VALIDITY_MB)
10831                return (0);
10832        }
10833
10834        bxe_mcp_wait_one(sc);
10835
10836    } while (cnt++ < (MCP_TIMEOUT / MCP_ONE_TIMEOUT));
10837
10838    BLOGE(sc, "BAD MCP validity signature\n");
10839
10840    return (-1);
10841}
10842
10843static int
10844bxe_reset_mcp_comp(struct bxe_softc *sc,
10845                   uint32_t         magic_val)
10846{
10847    int rc = bxe_init_shmem(sc);
10848
10849    /* Restore the `magic' bit value */
10850    if (!CHIP_IS_E1(sc)) {
10851        bxe_clp_reset_done(sc, magic_val);
10852    }
10853
10854    return (rc);
10855}
10856
10857static void
10858bxe_pxp_prep(struct bxe_softc *sc)
10859{
10860    if (!CHIP_IS_E1(sc)) {
10861        REG_WR(sc, PXP2_REG_RD_START_INIT, 0);
10862        REG_WR(sc, PXP2_REG_RQ_RBC_DONE, 0);
10863        wmb();
10864    }
10865}
10866
10867/*
10868 * Reset the whole chip except for:
10869 *      - PCIE core
10870 *      - PCI Glue, PSWHST, PXP/PXP2 RF (all controlled by one reset bit)
10871 *      - IGU
10872 *      - MISC (including AEU)
10873 *      - GRC
10874 *      - RBCN, RBCP
10875 */
10876static void
10877bxe_process_kill_chip_reset(struct bxe_softc *sc,
10878                            uint8_t          global)
10879{
10880    uint32_t not_reset_mask1, reset_mask1, not_reset_mask2, reset_mask2;
10881    uint32_t global_bits2, stay_reset2;
10882
10883    /*
10884     * Bits that have to be set in reset_mask2 if we want to reset 'global'
10885     * (per chip) blocks.
10886     */
10887    global_bits2 =
10888        MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_CMN_CPU |
10889        MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_CMN_CORE;
10890
10891    /*
10892     * Don't reset the following blocks.
10893     * Important: per port blocks (such as EMAC, BMAC, UMAC) can't be
10894     *            reset, as in 4 port device they might still be owned
10895     *            by the MCP (there is only one leader per path).
10896     */
10897    not_reset_mask1 =
10898        MISC_REGISTERS_RESET_REG_1_RST_HC |
10899        MISC_REGISTERS_RESET_REG_1_RST_PXPV |
10900        MISC_REGISTERS_RESET_REG_1_RST_PXP;
10901
10902    not_reset_mask2 =
10903        MISC_REGISTERS_RESET_REG_2_RST_PCI_MDIO |
10904        MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE |
10905        MISC_REGISTERS_RESET_REG_2_RST_EMAC1_HARD_CORE |
10906        MISC_REGISTERS_RESET_REG_2_RST_MISC_CORE |
10907        MISC_REGISTERS_RESET_REG_2_RST_RBCN |
10908        MISC_REGISTERS_RESET_REG_2_RST_GRC  |
10909        MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_REG_HARD_CORE |
10910        MISC_REGISTERS_RESET_REG_2_RST_MCP_N_HARD_CORE_RST_B |
10911        MISC_REGISTERS_RESET_REG_2_RST_ATC |
10912        MISC_REGISTERS_RESET_REG_2_PGLC |
10913        MISC_REGISTERS_RESET_REG_2_RST_BMAC0 |
10914        MISC_REGISTERS_RESET_REG_2_RST_BMAC1 |
10915        MISC_REGISTERS_RESET_REG_2_RST_EMAC0 |
10916        MISC_REGISTERS_RESET_REG_2_RST_EMAC1 |
10917        MISC_REGISTERS_RESET_REG_2_UMAC0 |
10918        MISC_REGISTERS_RESET_REG_2_UMAC1;
10919
10920    /*
10921     * Keep the following blocks in reset:
10922     *  - all xxMACs are handled by the elink code.
10923     */
10924    stay_reset2 =
10925        MISC_REGISTERS_RESET_REG_2_XMAC |
10926        MISC_REGISTERS_RESET_REG_2_XMAC_SOFT;
10927
10928    /* Full reset masks according to the chip */
10929    reset_mask1 = 0xffffffff;
10930
10931    if (CHIP_IS_E1(sc))
10932        reset_mask2 = 0xffff;
10933    else if (CHIP_IS_E1H(sc))
10934        reset_mask2 = 0x1ffff;
10935    else if (CHIP_IS_E2(sc))
10936        reset_mask2 = 0xfffff;
10937    else /* CHIP_IS_E3 */
10938        reset_mask2 = 0x3ffffff;
10939
10940    /* Don't reset global blocks unless we need to */
10941    if (!global)
10942        reset_mask2 &= ~global_bits2;
10943
10944    /*
10945     * In case of attention in the QM, we need to reset PXP
10946     * (MISC_REGISTERS_RESET_REG_2_RST_PXP_RQ_RD_WR) before QM
10947     * because otherwise QM reset would release 'close the gates' shortly
10948     * before resetting the PXP, then the PSWRQ would send a write
10949     * request to PGLUE. Then when PXP is reset, PGLUE would try to
10950     * read the payload data from PSWWR, but PSWWR would not
10951     * respond. The write queue in PGLUE would stuck, dmae commands
10952     * would not return. Therefore it's important to reset the second
10953     * reset register (containing the
10954     * MISC_REGISTERS_RESET_REG_2_RST_PXP_RQ_RD_WR bit) before the
10955     * first one (containing the MISC_REGISTERS_RESET_REG_1_RST_QM
10956     * bit).
10957     */
10958    REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
10959           reset_mask2 & (~not_reset_mask2));
10960
10961    REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
10962           reset_mask1 & (~not_reset_mask1));
10963
10964    mb();
10965    wmb();
10966
10967    REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET,
10968           reset_mask2 & (~stay_reset2));
10969
10970    mb();
10971    wmb();
10972
10973    REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, reset_mask1);
10974    wmb();
10975}
10976
10977static int
10978bxe_process_kill(struct bxe_softc *sc,
10979                 uint8_t          global)
10980{
10981    int cnt = 1000;
10982    uint32_t val = 0;
10983    uint32_t sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1, pgl_exp_rom2;
10984    uint32_t tags_63_32 = 0;
10985
10986    /* Empty the Tetris buffer, wait for 1s */
10987    do {
10988        sr_cnt  = REG_RD(sc, PXP2_REG_RD_SR_CNT);
10989        blk_cnt = REG_RD(sc, PXP2_REG_RD_BLK_CNT);
10990        port_is_idle_0 = REG_RD(sc, PXP2_REG_RD_PORT_IS_IDLE_0);
10991        port_is_idle_1 = REG_RD(sc, PXP2_REG_RD_PORT_IS_IDLE_1);
10992        pgl_exp_rom2 = REG_RD(sc, PXP2_REG_PGL_EXP_ROM2);
10993        if (CHIP_IS_E3(sc)) {
10994            tags_63_32 = REG_RD(sc, PGLUE_B_REG_TAGS_63_32);
10995        }
10996
10997        if ((sr_cnt == 0x7e) && (blk_cnt == 0xa0) &&
10998            ((port_is_idle_0 & 0x1) == 0x1) &&
10999            ((port_is_idle_1 & 0x1) == 0x1) &&
11000            (pgl_exp_rom2 == 0xffffffff) &&
11001            (!CHIP_IS_E3(sc) || (tags_63_32 == 0xffffffff)))
11002            break;
11003        DELAY(1000);
11004    } while (cnt-- > 0);
11005
11006    if (cnt <= 0) {
11007        BLOGE(sc, "ERROR: Tetris buffer didn't get empty or there "
11008                  "are still outstanding read requests after 1s! "
11009                  "sr_cnt=0x%08x, blk_cnt=0x%08x, port_is_idle_0=0x%08x, "
11010                  "port_is_idle_1=0x%08x, pgl_exp_rom2=0x%08x\n",
11011              sr_cnt, blk_cnt, port_is_idle_0,
11012              port_is_idle_1, pgl_exp_rom2);
11013        return (-1);
11014    }
11015
11016    mb();
11017
11018    /* Close gates #2, #3 and #4 */
11019    bxe_set_234_gates(sc, TRUE);
11020
11021    /* Poll for IGU VQs for 57712 and newer chips */
11022    if (!CHIP_IS_E1x(sc) && bxe_er_poll_igu_vq(sc)) {
11023        return (-1);
11024    }
11025
11026    /* XXX indicate that "process kill" is in progress to MCP */
11027
11028    /* clear "unprepared" bit */
11029    REG_WR(sc, MISC_REG_UNPREPARED, 0);
11030    mb();
11031
11032    /* Make sure all is written to the chip before the reset */
11033    wmb();
11034
11035    /*
11036     * Wait for 1ms to empty GLUE and PCI-E core queues,
11037     * PSWHST, GRC and PSWRD Tetris buffer.
11038     */
11039    DELAY(1000);
11040
11041    /* Prepare to chip reset: */
11042    /* MCP */
11043    if (global) {
11044        bxe_reset_mcp_prep(sc, &val);
11045    }
11046
11047    /* PXP */
11048    bxe_pxp_prep(sc);
11049    mb();
11050
11051    /* reset the chip */
11052    bxe_process_kill_chip_reset(sc, global);
11053    mb();
11054
11055    /* clear errors in PGB */
11056    if (!CHIP_IS_E1(sc))
11057        REG_WR(sc, PGLUE_B_REG_LATCHED_ERRORS_CLR, 0x7f);
11058
11059    /* Recover after reset: */
11060    /* MCP */
11061    if (global && bxe_reset_mcp_comp(sc, val)) {
11062        return (-1);
11063    }
11064
11065    /* XXX add resetting the NO_MCP mode DB here */
11066
11067    /* Open the gates #2, #3 and #4 */
11068    bxe_set_234_gates(sc, FALSE);
11069
11070    /* XXX
11071     * IGU/AEU preparation bring back the AEU/IGU to a reset state
11072     * re-enable attentions
11073     */
11074
11075    return (0);
11076}
11077
11078static int
11079bxe_leader_reset(struct bxe_softc *sc)
11080{
11081    int rc = 0;
11082    uint8_t global = bxe_reset_is_global(sc);
11083    uint32_t load_code;
11084
11085    /*
11086     * If not going to reset MCP, load "fake" driver to reset HW while
11087     * driver is owner of the HW.
11088     */
11089    if (!global && !BXE_NOMCP(sc)) {
11090        load_code = bxe_fw_command(sc, DRV_MSG_CODE_LOAD_REQ,
11091                                   DRV_MSG_CODE_LOAD_REQ_WITH_LFA);
11092        if (!load_code) {
11093            BLOGE(sc, "MCP response failure, aborting\n");
11094            rc = -1;
11095            goto exit_leader_reset;
11096        }
11097
11098        if ((load_code != FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) &&
11099            (load_code != FW_MSG_CODE_DRV_LOAD_COMMON)) {
11100            BLOGE(sc, "MCP unexpected response, aborting\n");
11101            rc = -1;
11102            goto exit_leader_reset2;
11103        }
11104
11105        load_code = bxe_fw_command(sc, DRV_MSG_CODE_LOAD_DONE, 0);
11106        if (!load_code) {
11107            BLOGE(sc, "MCP response failure, aborting\n");
11108            rc = -1;
11109            goto exit_leader_reset2;
11110        }
11111    }
11112
11113    /* try to recover after the failure */
11114    if (bxe_process_kill(sc, global)) {
11115        BLOGE(sc, "Something bad occurred on engine %d!\n", SC_PATH(sc));
11116        rc = -1;
11117        goto exit_leader_reset2;
11118    }
11119
11120    /*
11121     * Clear the RESET_IN_PROGRESS and RESET_GLOBAL bits and update the driver
11122     * state.
11123     */
11124    bxe_set_reset_done(sc);
11125    if (global) {
11126        bxe_clear_reset_global(sc);
11127    }
11128
11129exit_leader_reset2:
11130
11131    /* unload "fake driver" if it was loaded */
11132    if (!global && !BXE_NOMCP(sc)) {
11133        bxe_fw_command(sc, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0);
11134        bxe_fw_command(sc, DRV_MSG_CODE_UNLOAD_DONE, 0);
11135    }
11136
11137exit_leader_reset:
11138
11139    sc->is_leader = 0;
11140    bxe_release_leader_lock(sc);
11141
11142    mb();
11143    return (rc);
11144}
11145
11146/*
11147 * prepare INIT transition, parameters configured:
11148 *   - HC configuration
11149 *   - Queue's CDU context
11150 */
11151static void
11152bxe_pf_q_prep_init(struct bxe_softc               *sc,
11153                   struct bxe_fastpath            *fp,
11154                   struct ecore_queue_init_params *init_params)
11155{
11156    uint8_t cos;
11157    int cxt_index, cxt_offset;
11158
11159    bxe_set_bit(ECORE_Q_FLG_HC, &init_params->rx.flags);
11160    bxe_set_bit(ECORE_Q_FLG_HC, &init_params->tx.flags);
11161
11162    bxe_set_bit(ECORE_Q_FLG_HC_EN, &init_params->rx.flags);
11163    bxe_set_bit(ECORE_Q_FLG_HC_EN, &init_params->tx.flags);
11164
11165    /* HC rate */
11166    init_params->rx.hc_rate =
11167        sc->hc_rx_ticks ? (1000000 / sc->hc_rx_ticks) : 0;
11168    init_params->tx.hc_rate =
11169        sc->hc_tx_ticks ? (1000000 / sc->hc_tx_ticks) : 0;
11170
11171    /* FW SB ID */
11172    init_params->rx.fw_sb_id = init_params->tx.fw_sb_id = fp->fw_sb_id;
11173
11174    /* CQ index among the SB indices */
11175    init_params->rx.sb_cq_index = HC_INDEX_ETH_RX_CQ_CONS;
11176    init_params->tx.sb_cq_index = HC_INDEX_ETH_FIRST_TX_CQ_CONS;
11177
11178    /* set maximum number of COSs supported by this queue */
11179    init_params->max_cos = sc->max_cos;
11180
11181    BLOGD(sc, DBG_LOAD, "fp %d setting queue params max cos to %d\n",
11182          fp->index, init_params->max_cos);
11183
11184    /* set the context pointers queue object */
11185    for (cos = FIRST_TX_COS_INDEX; cos < init_params->max_cos; cos++) {
11186        /* XXX change index/cid here if ever support multiple tx CoS */
11187        /* fp->txdata[cos]->cid */
11188        cxt_index = fp->index / ILT_PAGE_CIDS;
11189        cxt_offset = fp->index - (cxt_index * ILT_PAGE_CIDS);
11190        init_params->cxts[cos] = &sc->context[cxt_index].vcxt[cxt_offset].eth;
11191    }
11192}
11193
11194/* set flags that are common for the Tx-only and not normal connections */
11195static unsigned long
11196bxe_get_common_flags(struct bxe_softc    *sc,
11197                     struct bxe_fastpath *fp,
11198                     uint8_t             zero_stats)
11199{
11200    unsigned long flags = 0;
11201
11202    /* PF driver will always initialize the Queue to an ACTIVE state */
11203    bxe_set_bit(ECORE_Q_FLG_ACTIVE, &flags);
11204
11205    /*
11206     * tx only connections collect statistics (on the same index as the
11207     * parent connection). The statistics are zeroed when the parent
11208     * connection is initialized.
11209     */
11210
11211    bxe_set_bit(ECORE_Q_FLG_STATS, &flags);
11212    if (zero_stats) {
11213        bxe_set_bit(ECORE_Q_FLG_ZERO_STATS, &flags);
11214    }
11215
11216    /*
11217     * tx only connections can support tx-switching, though their
11218     * CoS-ness doesn't survive the loopback
11219     */
11220    if (sc->flags & BXE_TX_SWITCHING) {
11221        bxe_set_bit(ECORE_Q_FLG_TX_SWITCH, &flags);
11222    }
11223
11224    bxe_set_bit(ECORE_Q_FLG_PCSUM_ON_PKT, &flags);
11225
11226    return (flags);
11227}
11228
11229static unsigned long
11230bxe_get_q_flags(struct bxe_softc    *sc,
11231                struct bxe_fastpath *fp,
11232                uint8_t             leading)
11233{
11234    unsigned long flags = 0;
11235
11236    if (IS_MF_SD(sc)) {
11237        bxe_set_bit(ECORE_Q_FLG_OV, &flags);
11238    }
11239
11240    if (if_getcapenable(sc->ifp) & IFCAP_LRO) {
11241        bxe_set_bit(ECORE_Q_FLG_TPA, &flags);
11242#if __FreeBSD_version >= 800000
11243        bxe_set_bit(ECORE_Q_FLG_TPA_IPV6, &flags);
11244#endif
11245    }
11246
11247    if (leading) {
11248        bxe_set_bit(ECORE_Q_FLG_LEADING_RSS, &flags);
11249        bxe_set_bit(ECORE_Q_FLG_MCAST, &flags);
11250    }
11251
11252    bxe_set_bit(ECORE_Q_FLG_VLAN, &flags);
11253
11254    /* merge with common flags */
11255    return (flags | bxe_get_common_flags(sc, fp, TRUE));
11256}
11257
11258static void
11259bxe_pf_q_prep_general(struct bxe_softc                  *sc,
11260                      struct bxe_fastpath               *fp,
11261                      struct ecore_general_setup_params *gen_init,
11262                      uint8_t                           cos)
11263{
11264    gen_init->stat_id = bxe_stats_id(fp);
11265    gen_init->spcl_id = fp->cl_id;
11266    gen_init->mtu = sc->mtu;
11267    gen_init->cos = cos;
11268}
11269
11270static void
11271bxe_pf_rx_q_prep(struct bxe_softc              *sc,
11272                 struct bxe_fastpath           *fp,
11273                 struct rxq_pause_params       *pause,
11274                 struct ecore_rxq_setup_params *rxq_init)
11275{
11276    uint8_t max_sge = 0;
11277    uint16_t sge_sz = 0;
11278    uint16_t tpa_agg_size = 0;
11279
11280    pause->sge_th_lo = SGE_TH_LO(sc);
11281    pause->sge_th_hi = SGE_TH_HI(sc);
11282
11283    /* validate SGE ring has enough to cross high threshold */
11284    if (sc->dropless_fc &&
11285            (pause->sge_th_hi + FW_PREFETCH_CNT) >
11286            (RX_SGE_USABLE_PER_PAGE * RX_SGE_NUM_PAGES)) {
11287        BLOGW(sc, "sge ring threshold limit\n");
11288    }
11289
11290    /* minimum max_aggregation_size is 2*MTU (two full buffers) */
11291    tpa_agg_size = (2 * sc->mtu);
11292    if (tpa_agg_size < sc->max_aggregation_size) {
11293        tpa_agg_size = sc->max_aggregation_size;
11294    }
11295
11296    max_sge = SGE_PAGE_ALIGN(sc->mtu) >> SGE_PAGE_SHIFT;
11297    max_sge = ((max_sge + PAGES_PER_SGE - 1) &
11298                   (~(PAGES_PER_SGE - 1))) >> PAGES_PER_SGE_SHIFT;
11299    sge_sz = (uint16_t)min(SGE_PAGES, 0xffff);
11300
11301    /* pause - not for e1 */
11302    if (!CHIP_IS_E1(sc)) {
11303        pause->bd_th_lo = BD_TH_LO(sc);
11304        pause->bd_th_hi = BD_TH_HI(sc);
11305
11306        pause->rcq_th_lo = RCQ_TH_LO(sc);
11307        pause->rcq_th_hi = RCQ_TH_HI(sc);
11308
11309        /* validate rings have enough entries to cross high thresholds */
11310        if (sc->dropless_fc &&
11311            pause->bd_th_hi + FW_PREFETCH_CNT >
11312            sc->rx_ring_size) {
11313            BLOGW(sc, "rx bd ring threshold limit\n");
11314        }
11315
11316        if (sc->dropless_fc &&
11317            pause->rcq_th_hi + FW_PREFETCH_CNT >
11318            RCQ_NUM_PAGES * RCQ_USABLE_PER_PAGE) {
11319            BLOGW(sc, "rcq ring threshold limit\n");
11320        }
11321
11322        pause->pri_map = 1;
11323    }
11324
11325    /* rxq setup */
11326    rxq_init->dscr_map   = fp->rx_dma.paddr;
11327    rxq_init->sge_map    = fp->rx_sge_dma.paddr;
11328    rxq_init->rcq_map    = fp->rcq_dma.paddr;
11329    rxq_init->rcq_np_map = (fp->rcq_dma.paddr + BCM_PAGE_SIZE);
11330
11331    /*
11332     * This should be a maximum number of data bytes that may be
11333     * placed on the BD (not including paddings).
11334     */
11335    rxq_init->buf_sz = (fp->rx_buf_size -
11336                        IP_HEADER_ALIGNMENT_PADDING);
11337
11338    rxq_init->cl_qzone_id     = fp->cl_qzone_id;
11339    rxq_init->tpa_agg_sz      = tpa_agg_size;
11340    rxq_init->sge_buf_sz      = sge_sz;
11341    rxq_init->max_sges_pkt    = max_sge;
11342    rxq_init->rss_engine_id   = SC_FUNC(sc);
11343    rxq_init->mcast_engine_id = SC_FUNC(sc);
11344
11345    /*
11346     * Maximum number or simultaneous TPA aggregation for this Queue.
11347     * For PF Clients it should be the maximum available number.
11348     * VF driver(s) may want to define it to a smaller value.
11349     */
11350    rxq_init->max_tpa_queues = MAX_AGG_QS(sc);
11351
11352    rxq_init->cache_line_log = BXE_RX_ALIGN_SHIFT;
11353    rxq_init->fw_sb_id = fp->fw_sb_id;
11354
11355    rxq_init->sb_cq_index = HC_INDEX_ETH_RX_CQ_CONS;
11356
11357    /*
11358     * configure silent vlan removal
11359     * if multi function mode is afex, then mask default vlan
11360     */
11361    if (IS_MF_AFEX(sc)) {
11362        rxq_init->silent_removal_value =
11363            sc->devinfo.mf_info.afex_def_vlan_tag;
11364        rxq_init->silent_removal_mask = EVL_VLID_MASK;
11365    }
11366}
11367
11368static void
11369bxe_pf_tx_q_prep(struct bxe_softc              *sc,
11370                 struct bxe_fastpath           *fp,
11371                 struct ecore_txq_setup_params *txq_init,
11372                 uint8_t                       cos)
11373{
11374    /*
11375     * XXX If multiple CoS is ever supported then each fastpath structure
11376     * will need to maintain tx producer/consumer/dma/etc values *per* CoS.
11377     * fp->txdata[cos]->tx_dma.paddr;
11378     */
11379    txq_init->dscr_map     = fp->tx_dma.paddr;
11380    txq_init->sb_cq_index  = HC_INDEX_ETH_FIRST_TX_CQ_CONS + cos;
11381    txq_init->traffic_type = LLFC_TRAFFIC_TYPE_NW;
11382    txq_init->fw_sb_id     = fp->fw_sb_id;
11383
11384    /*
11385     * set the TSS leading client id for TX classfication to the
11386     * leading RSS client id
11387     */
11388    txq_init->tss_leading_cl_id = BXE_FP(sc, 0, cl_id);
11389}
11390
11391/*
11392 * This function performs 2 steps in a queue state machine:
11393 *   1) RESET->INIT
11394 *   2) INIT->SETUP
11395 */
11396static int
11397bxe_setup_queue(struct bxe_softc    *sc,
11398                struct bxe_fastpath *fp,
11399                uint8_t             leading)
11400{
11401    struct ecore_queue_state_params q_params = { NULL };
11402    struct ecore_queue_setup_params *setup_params =
11403                        &q_params.params.setup;
11404    int rc;
11405
11406    BLOGD(sc, DBG_LOAD, "setting up queue %d\n", fp->index);
11407
11408    bxe_ack_sb(sc, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_ENABLE, 0);
11409
11410    q_params.q_obj = &BXE_SP_OBJ(sc, fp).q_obj;
11411
11412    /* we want to wait for completion in this context */
11413    bxe_set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags);
11414
11415    /* prepare the INIT parameters */
11416    bxe_pf_q_prep_init(sc, fp, &q_params.params.init);
11417
11418    /* Set the command */
11419    q_params.cmd = ECORE_Q_CMD_INIT;
11420
11421    /* Change the state to INIT */
11422    rc = ecore_queue_state_change(sc, &q_params);
11423    if (rc) {
11424        BLOGE(sc, "Queue(%d) INIT failed rc = %d\n", fp->index, rc);
11425        return (rc);
11426    }
11427
11428    BLOGD(sc, DBG_LOAD, "init complete\n");
11429
11430    /* now move the Queue to the SETUP state */
11431    memset(setup_params, 0, sizeof(*setup_params));
11432
11433    /* set Queue flags */
11434    setup_params->flags = bxe_get_q_flags(sc, fp, leading);
11435
11436    /* set general SETUP parameters */
11437    bxe_pf_q_prep_general(sc, fp, &setup_params->gen_params,
11438                          FIRST_TX_COS_INDEX);
11439
11440    bxe_pf_rx_q_prep(sc, fp,
11441                     &setup_params->pause_params,
11442                     &setup_params->rxq_params);
11443
11444    bxe_pf_tx_q_prep(sc, fp,
11445                     &setup_params->txq_params,
11446                     FIRST_TX_COS_INDEX);
11447
11448    /* Set the command */
11449    q_params.cmd = ECORE_Q_CMD_SETUP;
11450
11451    /* change the state to SETUP */
11452    rc = ecore_queue_state_change(sc, &q_params);
11453    if (rc) {
11454        BLOGE(sc, "Queue(%d) SETUP failed (rc = %d)\n", fp->index, rc);
11455        return (rc);
11456    }
11457
11458    return (rc);
11459}
11460
11461static int
11462bxe_setup_leading(struct bxe_softc *sc)
11463{
11464    return (bxe_setup_queue(sc, &sc->fp[0], TRUE));
11465}
11466
11467static int
11468bxe_config_rss_pf(struct bxe_softc            *sc,
11469                  struct ecore_rss_config_obj *rss_obj,
11470                  uint8_t                     config_hash)
11471{
11472    struct ecore_config_rss_params params = { NULL };
11473    int i;
11474
11475    /*
11476     * Although RSS is meaningless when there is a single HW queue we
11477     * still need it enabled in order to have HW Rx hash generated.
11478     */
11479
11480    params.rss_obj = rss_obj;
11481
11482    bxe_set_bit(RAMROD_COMP_WAIT, &params.ramrod_flags);
11483
11484    bxe_set_bit(ECORE_RSS_MODE_REGULAR, &params.rss_flags);
11485
11486    /* RSS configuration */
11487    bxe_set_bit(ECORE_RSS_IPV4, &params.rss_flags);
11488    bxe_set_bit(ECORE_RSS_IPV4_TCP, &params.rss_flags);
11489    bxe_set_bit(ECORE_RSS_IPV6, &params.rss_flags);
11490    bxe_set_bit(ECORE_RSS_IPV6_TCP, &params.rss_flags);
11491    if (rss_obj->udp_rss_v4) {
11492        bxe_set_bit(ECORE_RSS_IPV4_UDP, &params.rss_flags);
11493    }
11494    if (rss_obj->udp_rss_v6) {
11495        bxe_set_bit(ECORE_RSS_IPV6_UDP, &params.rss_flags);
11496    }
11497
11498    /* Hash bits */
11499    params.rss_result_mask = MULTI_MASK;
11500
11501    memcpy(params.ind_table, rss_obj->ind_table, sizeof(params.ind_table));
11502
11503    if (config_hash) {
11504        /* RSS keys */
11505        for (i = 0; i < sizeof(params.rss_key) / 4; i++) {
11506            params.rss_key[i] = arc4random();
11507        }
11508
11509        bxe_set_bit(ECORE_RSS_SET_SRCH, &params.rss_flags);
11510    }
11511
11512    return (ecore_config_rss(sc, &params));
11513}
11514
11515static int
11516bxe_config_rss_eth(struct bxe_softc *sc,
11517                   uint8_t          config_hash)
11518{
11519    return (bxe_config_rss_pf(sc, &sc->rss_conf_obj, config_hash));
11520}
11521
11522static int
11523bxe_init_rss_pf(struct bxe_softc *sc)
11524{
11525    uint8_t num_eth_queues = BXE_NUM_ETH_QUEUES(sc);
11526    int i;
11527
11528    /*
11529     * Prepare the initial contents of the indirection table if
11530     * RSS is enabled
11531     */
11532    for (i = 0; i < sizeof(sc->rss_conf_obj.ind_table); i++) {
11533        sc->rss_conf_obj.ind_table[i] =
11534            (sc->fp->cl_id + (i % num_eth_queues));
11535    }
11536
11537    if (sc->udp_rss) {
11538        sc->rss_conf_obj.udp_rss_v4 = sc->rss_conf_obj.udp_rss_v6 = 1;
11539    }
11540
11541    /*
11542     * For 57710 and 57711 SEARCHER configuration (rss_keys) is
11543     * per-port, so if explicit configuration is needed, do it only
11544     * for a PMF.
11545     *
11546     * For 57712 and newer it's a per-function configuration.
11547     */
11548    return (bxe_config_rss_eth(sc, sc->port.pmf || !CHIP_IS_E1x(sc)));
11549}
11550
11551static int
11552bxe_set_mac_one(struct bxe_softc          *sc,
11553                uint8_t                   *mac,
11554                struct ecore_vlan_mac_obj *obj,
11555                uint8_t                   set,
11556                int                       mac_type,
11557                unsigned long             *ramrod_flags)
11558{
11559    struct ecore_vlan_mac_ramrod_params ramrod_param;
11560    int rc;
11561
11562    memset(&ramrod_param, 0, sizeof(ramrod_param));
11563
11564    /* fill in general parameters */
11565    ramrod_param.vlan_mac_obj = obj;
11566    ramrod_param.ramrod_flags = *ramrod_flags;
11567
11568    /* fill a user request section if needed */
11569    if (!bxe_test_bit(RAMROD_CONT, ramrod_flags)) {
11570        memcpy(ramrod_param.user_req.u.mac.mac, mac, ETH_ALEN);
11571
11572        bxe_set_bit(mac_type, &ramrod_param.user_req.vlan_mac_flags);
11573
11574        /* Set the command: ADD or DEL */
11575        ramrod_param.user_req.cmd = (set) ? ECORE_VLAN_MAC_ADD :
11576                                            ECORE_VLAN_MAC_DEL;
11577    }
11578
11579    rc = ecore_config_vlan_mac(sc, &ramrod_param);
11580
11581    if (rc == ECORE_EXISTS) {
11582        BLOGD(sc, DBG_SP, "Failed to schedule ADD operations (EEXIST)\n");
11583        /* do not treat adding same MAC as error */
11584        rc = 0;
11585    } else if (rc < 0) {
11586        BLOGE(sc, "%s MAC failed (%d)\n", (set ? "Set" : "Delete"), rc);
11587    }
11588
11589    return (rc);
11590}
11591
11592static int
11593bxe_set_eth_mac(struct bxe_softc *sc,
11594                uint8_t          set)
11595{
11596    unsigned long ramrod_flags = 0;
11597
11598    BLOGD(sc, DBG_LOAD, "Adding Ethernet MAC\n");
11599
11600    bxe_set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
11601
11602    /* Eth MAC is set on RSS leading client (fp[0]) */
11603    return (bxe_set_mac_one(sc, sc->link_params.mac_addr,
11604                            &sc->sp_objs->mac_obj,
11605                            set, ECORE_ETH_MAC, &ramrod_flags));
11606}
11607
11608static int
11609bxe_get_cur_phy_idx(struct bxe_softc *sc)
11610{
11611    uint32_t sel_phy_idx = 0;
11612
11613    if (sc->link_params.num_phys <= 1) {
11614        return (ELINK_INT_PHY);
11615    }
11616
11617    if (sc->link_vars.link_up) {
11618        sel_phy_idx = ELINK_EXT_PHY1;
11619        /* In case link is SERDES, check if the ELINK_EXT_PHY2 is the one */
11620        if ((sc->link_vars.link_status & LINK_STATUS_SERDES_LINK) &&
11621            (sc->link_params.phy[ELINK_EXT_PHY2].supported &
11622             ELINK_SUPPORTED_FIBRE))
11623            sel_phy_idx = ELINK_EXT_PHY2;
11624    } else {
11625        switch (elink_phy_selection(&sc->link_params)) {
11626        case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT:
11627        case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY:
11628        case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY:
11629               sel_phy_idx = ELINK_EXT_PHY1;
11630               break;
11631        case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY:
11632        case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY:
11633               sel_phy_idx = ELINK_EXT_PHY2;
11634               break;
11635        }
11636    }
11637
11638    return (sel_phy_idx);
11639}
11640
11641static int
11642bxe_get_link_cfg_idx(struct bxe_softc *sc)
11643{
11644    uint32_t sel_phy_idx = bxe_get_cur_phy_idx(sc);
11645
11646    /*
11647     * The selected activated PHY is always after swapping (in case PHY
11648     * swapping is enabled). So when swapping is enabled, we need to reverse
11649     * the configuration
11650     */
11651
11652    if (sc->link_params.multi_phy_config & PORT_HW_CFG_PHY_SWAPPED_ENABLED) {
11653        if (sel_phy_idx == ELINK_EXT_PHY1)
11654            sel_phy_idx = ELINK_EXT_PHY2;
11655        else if (sel_phy_idx == ELINK_EXT_PHY2)
11656            sel_phy_idx = ELINK_EXT_PHY1;
11657    }
11658
11659    return (ELINK_LINK_CONFIG_IDX(sel_phy_idx));
11660}
11661
11662static void
11663bxe_set_requested_fc(struct bxe_softc *sc)
11664{
11665    /*
11666     * Initialize link parameters structure variables
11667     * It is recommended to turn off RX FC for jumbo frames
11668     * for better performance
11669     */
11670    if (CHIP_IS_E1x(sc) && (sc->mtu > 5000)) {
11671        sc->link_params.req_fc_auto_adv = ELINK_FLOW_CTRL_TX;
11672    } else {
11673        sc->link_params.req_fc_auto_adv = ELINK_FLOW_CTRL_BOTH;
11674    }
11675}
11676
11677static void
11678bxe_calc_fc_adv(struct bxe_softc *sc)
11679{
11680    uint8_t cfg_idx = bxe_get_link_cfg_idx(sc);
11681
11682
11683    sc->port.advertising[cfg_idx] &= ~(ADVERTISED_Asym_Pause |
11684                                           ADVERTISED_Pause);
11685
11686    switch (sc->link_vars.ieee_fc &
11687            MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
11688
11689    case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
11690        sc->port.advertising[cfg_idx] |= (ADVERTISED_Asym_Pause |
11691                                          ADVERTISED_Pause);
11692        break;
11693
11694    case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
11695        sc->port.advertising[cfg_idx] |= ADVERTISED_Asym_Pause;
11696        break;
11697
11698    default:
11699        break;
11700
11701    }
11702}
11703
11704static uint16_t
11705bxe_get_mf_speed(struct bxe_softc *sc)
11706{
11707    uint16_t line_speed = sc->link_vars.line_speed;
11708    if (IS_MF(sc)) {
11709        uint16_t maxCfg =
11710            bxe_extract_max_cfg(sc, sc->devinfo.mf_info.mf_config[SC_VN(sc)]);
11711
11712        /* calculate the current MAX line speed limit for the MF devices */
11713        if (IS_MF_SI(sc)) {
11714            line_speed = (line_speed * maxCfg) / 100;
11715        } else { /* SD mode */
11716            uint16_t vn_max_rate = maxCfg * 100;
11717
11718            if (vn_max_rate < line_speed) {
11719                line_speed = vn_max_rate;
11720            }
11721        }
11722    }
11723
11724    return (line_speed);
11725}
11726
11727static void
11728bxe_fill_report_data(struct bxe_softc            *sc,
11729                     struct bxe_link_report_data *data)
11730{
11731    uint16_t line_speed = bxe_get_mf_speed(sc);
11732
11733    memset(data, 0, sizeof(*data));
11734
11735    /* fill the report data with the effective line speed */
11736    data->line_speed = line_speed;
11737
11738    /* Link is down */
11739    if (!sc->link_vars.link_up || (sc->flags & BXE_MF_FUNC_DIS)) {
11740        bxe_set_bit(BXE_LINK_REPORT_LINK_DOWN, &data->link_report_flags);
11741    }
11742
11743    /* Full DUPLEX */
11744    if (sc->link_vars.duplex == DUPLEX_FULL) {
11745        bxe_set_bit(BXE_LINK_REPORT_FULL_DUPLEX, &data->link_report_flags);
11746    }
11747
11748    /* Rx Flow Control is ON */
11749    if (sc->link_vars.flow_ctrl & ELINK_FLOW_CTRL_RX) {
11750        bxe_set_bit(BXE_LINK_REPORT_RX_FC_ON, &data->link_report_flags);
11751    }
11752
11753    /* Tx Flow Control is ON */
11754    if (sc->link_vars.flow_ctrl & ELINK_FLOW_CTRL_TX) {
11755        bxe_set_bit(BXE_LINK_REPORT_TX_FC_ON, &data->link_report_flags);
11756    }
11757}
11758
11759/* report link status to OS, should be called under phy_lock */
11760static void
11761bxe_link_report_locked(struct bxe_softc *sc)
11762{
11763    struct bxe_link_report_data cur_data;
11764
11765    /* reread mf_cfg */
11766    if (IS_PF(sc) && !CHIP_IS_E1(sc)) {
11767        bxe_read_mf_cfg(sc);
11768    }
11769
11770    /* Read the current link report info */
11771    bxe_fill_report_data(sc, &cur_data);
11772
11773    /* Don't report link down or exactly the same link status twice */
11774    if (!memcmp(&cur_data, &sc->last_reported_link, sizeof(cur_data)) ||
11775        (bxe_test_bit(BXE_LINK_REPORT_LINK_DOWN,
11776                      &sc->last_reported_link.link_report_flags) &&
11777         bxe_test_bit(BXE_LINK_REPORT_LINK_DOWN,
11778                      &cur_data.link_report_flags))) {
11779        return;
11780    }
11781
11782	ELINK_DEBUG_P2(sc, "Change in link status : cur_data = %x, last_reported_link = %x\n",
11783					cur_data.link_report_flags, sc->last_reported_link.link_report_flags);
11784    sc->link_cnt++;
11785
11786	ELINK_DEBUG_P1(sc, "link status change count = %x\n", sc->link_cnt);
11787    /* report new link params and remember the state for the next time */
11788    memcpy(&sc->last_reported_link, &cur_data, sizeof(cur_data));
11789
11790    if (bxe_test_bit(BXE_LINK_REPORT_LINK_DOWN,
11791                     &cur_data.link_report_flags)) {
11792        if_link_state_change(sc->ifp, LINK_STATE_DOWN);
11793    } else {
11794        const char *duplex;
11795        const char *flow;
11796
11797        if (bxe_test_and_clear_bit(BXE_LINK_REPORT_FULL_DUPLEX,
11798                                   &cur_data.link_report_flags)) {
11799            duplex = "full";
11800			ELINK_DEBUG_P0(sc, "link set to full duplex\n");
11801        } else {
11802            duplex = "half";
11803			ELINK_DEBUG_P0(sc, "link set to half duplex\n");
11804        }
11805
11806        /*
11807         * Handle the FC at the end so that only these flags would be
11808         * possibly set. This way we may easily check if there is no FC
11809         * enabled.
11810         */
11811        if (cur_data.link_report_flags) {
11812            if (bxe_test_bit(BXE_LINK_REPORT_RX_FC_ON,
11813                             &cur_data.link_report_flags) &&
11814                bxe_test_bit(BXE_LINK_REPORT_TX_FC_ON,
11815                             &cur_data.link_report_flags)) {
11816                flow = "ON - receive & transmit";
11817            } else if (bxe_test_bit(BXE_LINK_REPORT_RX_FC_ON,
11818                                    &cur_data.link_report_flags) &&
11819                       !bxe_test_bit(BXE_LINK_REPORT_TX_FC_ON,
11820                                     &cur_data.link_report_flags)) {
11821                flow = "ON - receive";
11822            } else if (!bxe_test_bit(BXE_LINK_REPORT_RX_FC_ON,
11823                                     &cur_data.link_report_flags) &&
11824                       bxe_test_bit(BXE_LINK_REPORT_TX_FC_ON,
11825                                    &cur_data.link_report_flags)) {
11826                flow = "ON - transmit";
11827            } else {
11828                flow = "none"; /* possible? */
11829            }
11830        } else {
11831            flow = "none";
11832        }
11833
11834        if_link_state_change(sc->ifp, LINK_STATE_UP);
11835        BLOGI(sc, "NIC Link is Up, %d Mbps %s duplex, Flow control: %s\n",
11836              cur_data.line_speed, duplex, flow);
11837    }
11838}
11839
11840static void
11841bxe_link_report(struct bxe_softc *sc)
11842{
11843    bxe_acquire_phy_lock(sc);
11844    bxe_link_report_locked(sc);
11845    bxe_release_phy_lock(sc);
11846}
11847
11848static void
11849bxe_link_status_update(struct bxe_softc *sc)
11850{
11851    if (sc->state != BXE_STATE_OPEN) {
11852        return;
11853    }
11854
11855    if (IS_PF(sc) && !CHIP_REV_IS_SLOW(sc)) {
11856        elink_link_status_update(&sc->link_params, &sc->link_vars);
11857    } else {
11858        sc->port.supported[0] |= (ELINK_SUPPORTED_10baseT_Half |
11859                                  ELINK_SUPPORTED_10baseT_Full |
11860                                  ELINK_SUPPORTED_100baseT_Half |
11861                                  ELINK_SUPPORTED_100baseT_Full |
11862                                  ELINK_SUPPORTED_1000baseT_Full |
11863                                  ELINK_SUPPORTED_2500baseX_Full |
11864                                  ELINK_SUPPORTED_10000baseT_Full |
11865                                  ELINK_SUPPORTED_TP |
11866                                  ELINK_SUPPORTED_FIBRE |
11867                                  ELINK_SUPPORTED_Autoneg |
11868                                  ELINK_SUPPORTED_Pause |
11869                                  ELINK_SUPPORTED_Asym_Pause);
11870        sc->port.advertising[0] = sc->port.supported[0];
11871
11872        sc->link_params.sc                = sc;
11873        sc->link_params.port              = SC_PORT(sc);
11874        sc->link_params.req_duplex[0]     = DUPLEX_FULL;
11875        sc->link_params.req_flow_ctrl[0]  = ELINK_FLOW_CTRL_NONE;
11876        sc->link_params.req_line_speed[0] = SPEED_10000;
11877        sc->link_params.speed_cap_mask[0] = 0x7f0000;
11878        sc->link_params.switch_cfg        = ELINK_SWITCH_CFG_10G;
11879
11880        if (CHIP_REV_IS_FPGA(sc)) {
11881            sc->link_vars.mac_type    = ELINK_MAC_TYPE_EMAC;
11882            sc->link_vars.line_speed  = ELINK_SPEED_1000;
11883            sc->link_vars.link_status = (LINK_STATUS_LINK_UP |
11884                                         LINK_STATUS_SPEED_AND_DUPLEX_1000TFD);
11885        } else {
11886            sc->link_vars.mac_type    = ELINK_MAC_TYPE_BMAC;
11887            sc->link_vars.line_speed  = ELINK_SPEED_10000;
11888            sc->link_vars.link_status = (LINK_STATUS_LINK_UP |
11889                                         LINK_STATUS_SPEED_AND_DUPLEX_10GTFD);
11890        }
11891
11892        sc->link_vars.link_up = 1;
11893
11894        sc->link_vars.duplex    = DUPLEX_FULL;
11895        sc->link_vars.flow_ctrl = ELINK_FLOW_CTRL_NONE;
11896
11897        if (IS_PF(sc)) {
11898            REG_WR(sc, NIG_REG_EGRESS_DRAIN0_MODE + sc->link_params.port*4, 0);
11899            bxe_stats_handle(sc, STATS_EVENT_LINK_UP);
11900            bxe_link_report(sc);
11901        }
11902    }
11903
11904    if (IS_PF(sc)) {
11905        if (sc->link_vars.link_up) {
11906            bxe_stats_handle(sc, STATS_EVENT_LINK_UP);
11907        } else {
11908            bxe_stats_handle(sc, STATS_EVENT_STOP);
11909        }
11910        bxe_link_report(sc);
11911    } else {
11912        bxe_link_report(sc);
11913        bxe_stats_handle(sc, STATS_EVENT_LINK_UP);
11914    }
11915}
11916
11917static int
11918bxe_initial_phy_init(struct bxe_softc *sc,
11919                     int              load_mode)
11920{
11921    int rc, cfg_idx = bxe_get_link_cfg_idx(sc);
11922    uint16_t req_line_speed = sc->link_params.req_line_speed[cfg_idx];
11923    struct elink_params *lp = &sc->link_params;
11924
11925    bxe_set_requested_fc(sc);
11926
11927    if (CHIP_REV_IS_SLOW(sc)) {
11928        uint32_t bond = CHIP_BOND_ID(sc);
11929        uint32_t feat = 0;
11930
11931        if (CHIP_IS_E2(sc) && CHIP_IS_MODE_4_PORT(sc)) {
11932            feat |= ELINK_FEATURE_CONFIG_EMUL_DISABLE_BMAC;
11933        } else if (bond & 0x4) {
11934            if (CHIP_IS_E3(sc)) {
11935                feat |= ELINK_FEATURE_CONFIG_EMUL_DISABLE_XMAC;
11936            } else {
11937                feat |= ELINK_FEATURE_CONFIG_EMUL_DISABLE_BMAC;
11938            }
11939        } else if (bond & 0x8) {
11940            if (CHIP_IS_E3(sc)) {
11941                feat |= ELINK_FEATURE_CONFIG_EMUL_DISABLE_UMAC;
11942            } else {
11943                feat |= ELINK_FEATURE_CONFIG_EMUL_DISABLE_EMAC;
11944            }
11945        }
11946
11947        /* disable EMAC for E3 and above */
11948        if (bond & 0x2) {
11949            feat |= ELINK_FEATURE_CONFIG_EMUL_DISABLE_EMAC;
11950        }
11951
11952        sc->link_params.feature_config_flags |= feat;
11953    }
11954
11955    bxe_acquire_phy_lock(sc);
11956
11957    if (load_mode == LOAD_DIAG) {
11958        lp->loopback_mode = ELINK_LOOPBACK_XGXS;
11959        /* Prefer doing PHY loopback at 10G speed, if possible */
11960        if (lp->req_line_speed[cfg_idx] < ELINK_SPEED_10000) {
11961            if (lp->speed_cap_mask[cfg_idx] &
11962                PORT_HW_CFG_SPEED_CAPABILITY_D0_10G) {
11963                lp->req_line_speed[cfg_idx] = ELINK_SPEED_10000;
11964            } else {
11965                lp->req_line_speed[cfg_idx] = ELINK_SPEED_1000;
11966            }
11967        }
11968    }
11969
11970    if (load_mode == LOAD_LOOPBACK_EXT) {
11971        lp->loopback_mode = ELINK_LOOPBACK_EXT;
11972    }
11973
11974    rc = elink_phy_init(&sc->link_params, &sc->link_vars);
11975
11976    bxe_release_phy_lock(sc);
11977
11978    bxe_calc_fc_adv(sc);
11979
11980    if (sc->link_vars.link_up) {
11981        bxe_stats_handle(sc, STATS_EVENT_LINK_UP);
11982        bxe_link_report(sc);
11983    }
11984
11985    if (!CHIP_REV_IS_SLOW(sc)) {
11986        bxe_periodic_start(sc);
11987    }
11988
11989    sc->link_params.req_line_speed[cfg_idx] = req_line_speed;
11990    return (rc);
11991}
11992
11993/* must be called under IF_ADDR_LOCK */
11994
11995static int
11996bxe_set_mc_list(struct bxe_softc *sc)
11997{
11998    struct ecore_mcast_ramrod_params rparam = { NULL };
11999    int rc = 0;
12000    int mc_count = 0;
12001    int mcnt, i;
12002    struct ecore_mcast_list_elem *mc_mac, *mc_mac_start;
12003    unsigned char *mta;
12004    if_t ifp = sc->ifp;
12005
12006    mc_count = if_multiaddr_count(ifp, -1);/* XXX they don't have a limit */
12007    if (!mc_count)
12008        return (0);
12009
12010    mta = malloc(sizeof(unsigned char) * ETHER_ADDR_LEN *
12011            mc_count, M_DEVBUF, M_NOWAIT);
12012
12013    if(mta == NULL) {
12014        BLOGE(sc, "Failed to allocate temp mcast list\n");
12015        return (-1);
12016    }
12017    bzero(mta, (sizeof(unsigned char) * ETHER_ADDR_LEN * mc_count));
12018
12019    mc_mac = malloc(sizeof(*mc_mac) * mc_count, M_DEVBUF, (M_NOWAIT | M_ZERO));
12020    mc_mac_start = mc_mac;
12021
12022    if (!mc_mac) {
12023        free(mta, M_DEVBUF);
12024        BLOGE(sc, "Failed to allocate temp mcast list\n");
12025        return (-1);
12026    }
12027    bzero(mc_mac, (sizeof(*mc_mac) * mc_count));
12028
12029    /* mta and mcnt not expected to be  different */
12030    if_multiaddr_array(ifp, mta, &mcnt, mc_count);
12031
12032
12033    rparam.mcast_obj = &sc->mcast_obj;
12034    ECORE_LIST_INIT(&rparam.mcast_list);
12035
12036    for(i=0; i< mcnt; i++) {
12037
12038        mc_mac->mac = (uint8_t *)(mta + (i * ETHER_ADDR_LEN));
12039        ECORE_LIST_PUSH_TAIL(&mc_mac->link, &rparam.mcast_list);
12040
12041        BLOGD(sc, DBG_LOAD,
12042              "Setting MCAST %02X:%02X:%02X:%02X:%02X:%02X\n",
12043              mc_mac->mac[0], mc_mac->mac[1], mc_mac->mac[2],
12044              mc_mac->mac[3], mc_mac->mac[4], mc_mac->mac[5]);
12045
12046        mc_mac++;
12047    }
12048    rparam.mcast_list_len = mc_count;
12049
12050    BXE_MCAST_LOCK(sc);
12051
12052    /* first, clear all configured multicast MACs */
12053    rc = ecore_config_mcast(sc, &rparam, ECORE_MCAST_CMD_DEL);
12054    if (rc < 0) {
12055        BLOGE(sc, "Failed to clear multicast configuration: %d\n", rc);
12056        BXE_MCAST_UNLOCK(sc);
12057    	free(mc_mac_start, M_DEVBUF);
12058        free(mta, M_DEVBUF);
12059        return (rc);
12060    }
12061
12062    /* Now add the new MACs */
12063    rc = ecore_config_mcast(sc, &rparam, ECORE_MCAST_CMD_ADD);
12064    if (rc < 0) {
12065        BLOGE(sc, "Failed to set new mcast config (%d)\n", rc);
12066    }
12067
12068    BXE_MCAST_UNLOCK(sc);
12069
12070    free(mc_mac_start, M_DEVBUF);
12071    free(mta, M_DEVBUF);
12072
12073    return (rc);
12074}
12075
12076static int
12077bxe_set_uc_list(struct bxe_softc *sc)
12078{
12079    if_t ifp = sc->ifp;
12080    struct ecore_vlan_mac_obj *mac_obj = &sc->sp_objs->mac_obj;
12081    struct ifaddr *ifa;
12082    unsigned long ramrod_flags = 0;
12083    int rc;
12084
12085#if __FreeBSD_version < 800000
12086    IF_ADDR_LOCK(ifp);
12087#else
12088    if_addr_rlock(ifp);
12089#endif
12090
12091    /* first schedule a cleanup up of old configuration */
12092    rc = bxe_del_all_macs(sc, mac_obj, ECORE_UC_LIST_MAC, FALSE);
12093    if (rc < 0) {
12094        BLOGE(sc, "Failed to schedule delete of all ETH MACs (%d)\n", rc);
12095#if __FreeBSD_version < 800000
12096        IF_ADDR_UNLOCK(ifp);
12097#else
12098        if_addr_runlock(ifp);
12099#endif
12100        return (rc);
12101    }
12102
12103    ifa = if_getifaddr(ifp); /* XXX Is this structure */
12104    while (ifa) {
12105        if (ifa->ifa_addr->sa_family != AF_LINK) {
12106            ifa = TAILQ_NEXT(ifa, ifa_link);
12107            continue;
12108        }
12109
12110        rc = bxe_set_mac_one(sc, (uint8_t *)LLADDR((struct sockaddr_dl *)ifa->ifa_addr),
12111                             mac_obj, TRUE, ECORE_UC_LIST_MAC, &ramrod_flags);
12112        if (rc == -EEXIST) {
12113            BLOGD(sc, DBG_SP, "Failed to schedule ADD operations (EEXIST)\n");
12114            /* do not treat adding same MAC as an error */
12115            rc = 0;
12116        } else if (rc < 0) {
12117            BLOGE(sc, "Failed to schedule ADD operations (%d)\n", rc);
12118#if __FreeBSD_version < 800000
12119            IF_ADDR_UNLOCK(ifp);
12120#else
12121            if_addr_runlock(ifp);
12122#endif
12123            return (rc);
12124        }
12125
12126        ifa = TAILQ_NEXT(ifa, ifa_link);
12127    }
12128
12129#if __FreeBSD_version < 800000
12130    IF_ADDR_UNLOCK(ifp);
12131#else
12132    if_addr_runlock(ifp);
12133#endif
12134
12135    /* Execute the pending commands */
12136    bit_set(&ramrod_flags, RAMROD_CONT);
12137    return (bxe_set_mac_one(sc, NULL, mac_obj, FALSE /* don't care */,
12138                            ECORE_UC_LIST_MAC, &ramrod_flags));
12139}
12140
12141static void
12142bxe_set_rx_mode(struct bxe_softc *sc)
12143{
12144    if_t ifp = sc->ifp;
12145    uint32_t rx_mode = BXE_RX_MODE_NORMAL;
12146
12147    if (sc->state != BXE_STATE_OPEN) {
12148        BLOGD(sc, DBG_SP, "state is %x, returning\n", sc->state);
12149        return;
12150    }
12151
12152    BLOGD(sc, DBG_SP, "if_flags(ifp)=0x%x\n", if_getflags(sc->ifp));
12153
12154    if (if_getflags(ifp) & IFF_PROMISC) {
12155        rx_mode = BXE_RX_MODE_PROMISC;
12156    } else if ((if_getflags(ifp) & IFF_ALLMULTI) ||
12157               ((if_getamcount(ifp) > BXE_MAX_MULTICAST) &&
12158                CHIP_IS_E1(sc))) {
12159        rx_mode = BXE_RX_MODE_ALLMULTI;
12160    } else {
12161        if (IS_PF(sc)) {
12162            /* some multicasts */
12163            if (bxe_set_mc_list(sc) < 0) {
12164                rx_mode = BXE_RX_MODE_ALLMULTI;
12165            }
12166            if (bxe_set_uc_list(sc) < 0) {
12167                rx_mode = BXE_RX_MODE_PROMISC;
12168            }
12169        }
12170    }
12171
12172    sc->rx_mode = rx_mode;
12173
12174    /* schedule the rx_mode command */
12175    if (bxe_test_bit(ECORE_FILTER_RX_MODE_PENDING, &sc->sp_state)) {
12176        BLOGD(sc, DBG_LOAD, "Scheduled setting rx_mode with ECORE...\n");
12177        bxe_set_bit(ECORE_FILTER_RX_MODE_SCHED, &sc->sp_state);
12178        return;
12179    }
12180
12181    if (IS_PF(sc)) {
12182        bxe_set_storm_rx_mode(sc);
12183    }
12184}
12185
12186
12187/* update flags in shmem */
12188static void
12189bxe_update_drv_flags(struct bxe_softc *sc,
12190                     uint32_t         flags,
12191                     uint32_t         set)
12192{
12193    uint32_t drv_flags;
12194
12195    if (SHMEM2_HAS(sc, drv_flags)) {
12196        bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_DRV_FLAGS);
12197        drv_flags = SHMEM2_RD(sc, drv_flags);
12198
12199        if (set) {
12200            SET_FLAGS(drv_flags, flags);
12201        } else {
12202            RESET_FLAGS(drv_flags, flags);
12203        }
12204
12205        SHMEM2_WR(sc, drv_flags, drv_flags);
12206        BLOGD(sc, DBG_LOAD, "drv_flags 0x%08x\n", drv_flags);
12207
12208        bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_DRV_FLAGS);
12209    }
12210}
12211
12212/* periodic timer callout routine, only runs when the interface is up */
12213
12214static void
12215bxe_periodic_callout_func(void *xsc)
12216{
12217    struct bxe_softc *sc = (struct bxe_softc *)xsc;
12218    int i;
12219
12220    if (!BXE_CORE_TRYLOCK(sc)) {
12221        /* just bail and try again next time */
12222
12223        if ((sc->state == BXE_STATE_OPEN) &&
12224            (atomic_load_acq_long(&sc->periodic_flags) == PERIODIC_GO)) {
12225            /* schedule the next periodic callout */
12226            callout_reset(&sc->periodic_callout, hz,
12227                          bxe_periodic_callout_func, sc);
12228        }
12229
12230        return;
12231    }
12232
12233    if ((sc->state != BXE_STATE_OPEN) ||
12234        (atomic_load_acq_long(&sc->periodic_flags) == PERIODIC_STOP)) {
12235        BLOGW(sc, "periodic callout exit (state=0x%x)\n", sc->state);
12236        BXE_CORE_UNLOCK(sc);
12237        return;
12238        }
12239
12240
12241    /* Check for TX timeouts on any fastpath. */
12242    FOR_EACH_QUEUE(sc, i) {
12243        if (bxe_watchdog(sc, &sc->fp[i]) != 0) {
12244            /* Ruh-Roh, chip was reset! */
12245            break;
12246        }
12247    }
12248
12249    if (!CHIP_REV_IS_SLOW(sc)) {
12250        /*
12251         * This barrier is needed to ensure the ordering between the writing
12252         * to the sc->port.pmf in the bxe_nic_load() or bxe_pmf_update() and
12253         * the reading here.
12254         */
12255        mb();
12256        if (sc->port.pmf) {
12257	    bxe_acquire_phy_lock(sc);
12258            elink_period_func(&sc->link_params, &sc->link_vars);
12259	    bxe_release_phy_lock(sc);
12260        }
12261    }
12262
12263    if (IS_PF(sc) && !(sc->flags & BXE_NO_PULSE)) {
12264        int mb_idx = SC_FW_MB_IDX(sc);
12265        uint32_t drv_pulse;
12266        uint32_t mcp_pulse;
12267
12268        ++sc->fw_drv_pulse_wr_seq;
12269        sc->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
12270
12271        drv_pulse = sc->fw_drv_pulse_wr_seq;
12272        bxe_drv_pulse(sc);
12273
12274        mcp_pulse = (SHMEM_RD(sc, func_mb[mb_idx].mcp_pulse_mb) &
12275                     MCP_PULSE_SEQ_MASK);
12276
12277        /*
12278         * The delta between driver pulse and mcp response should
12279         * be 1 (before mcp response) or 0 (after mcp response).
12280         */
12281        if ((drv_pulse != mcp_pulse) &&
12282            (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
12283            /* someone lost a heartbeat... */
12284            BLOGE(sc, "drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
12285                  drv_pulse, mcp_pulse);
12286        }
12287    }
12288
12289    /* state is BXE_STATE_OPEN */
12290    bxe_stats_handle(sc, STATS_EVENT_UPDATE);
12291
12292    BXE_CORE_UNLOCK(sc);
12293
12294    if ((sc->state == BXE_STATE_OPEN) &&
12295        (atomic_load_acq_long(&sc->periodic_flags) == PERIODIC_GO)) {
12296        /* schedule the next periodic callout */
12297        callout_reset(&sc->periodic_callout, hz,
12298                      bxe_periodic_callout_func, sc);
12299    }
12300}
12301
12302static void
12303bxe_periodic_start(struct bxe_softc *sc)
12304{
12305    atomic_store_rel_long(&sc->periodic_flags, PERIODIC_GO);
12306    callout_reset(&sc->periodic_callout, hz, bxe_periodic_callout_func, sc);
12307}
12308
12309static void
12310bxe_periodic_stop(struct bxe_softc *sc)
12311{
12312    atomic_store_rel_long(&sc->periodic_flags, PERIODIC_STOP);
12313    callout_drain(&sc->periodic_callout);
12314}
12315
12316/* start the controller */
12317static __noinline int
12318bxe_nic_load(struct bxe_softc *sc,
12319             int              load_mode)
12320{
12321    uint32_t val;
12322    int load_code = 0;
12323    int i, rc = 0;
12324
12325    BXE_CORE_LOCK_ASSERT(sc);
12326
12327    BLOGD(sc, DBG_LOAD, "Starting NIC load...\n");
12328
12329    sc->state = BXE_STATE_OPENING_WAITING_LOAD;
12330
12331    if (IS_PF(sc)) {
12332        /* must be called before memory allocation and HW init */
12333        bxe_ilt_set_info(sc);
12334    }
12335
12336    sc->last_reported_link_state = LINK_STATE_UNKNOWN;
12337
12338    bxe_set_fp_rx_buf_size(sc);
12339
12340    if (bxe_alloc_fp_buffers(sc) != 0) {
12341        BLOGE(sc, "Failed to allocate fastpath memory\n");
12342        sc->state = BXE_STATE_CLOSED;
12343        rc = ENOMEM;
12344        goto bxe_nic_load_error0;
12345    }
12346
12347    if (bxe_alloc_mem(sc) != 0) {
12348        sc->state = BXE_STATE_CLOSED;
12349        rc = ENOMEM;
12350        goto bxe_nic_load_error0;
12351    }
12352
12353    if (bxe_alloc_fw_stats_mem(sc) != 0) {
12354        sc->state = BXE_STATE_CLOSED;
12355        rc = ENOMEM;
12356        goto bxe_nic_load_error0;
12357    }
12358
12359    if (IS_PF(sc)) {
12360        /* set pf load just before approaching the MCP */
12361        bxe_set_pf_load(sc);
12362
12363        /* if MCP exists send load request and analyze response */
12364        if (!BXE_NOMCP(sc)) {
12365            /* attempt to load pf */
12366            if (bxe_nic_load_request(sc, &load_code) != 0) {
12367                sc->state = BXE_STATE_CLOSED;
12368                rc = ENXIO;
12369                goto bxe_nic_load_error1;
12370            }
12371
12372            /* what did the MCP say? */
12373            if (bxe_nic_load_analyze_req(sc, load_code) != 0) {
12374                bxe_fw_command(sc, DRV_MSG_CODE_LOAD_DONE, 0);
12375                sc->state = BXE_STATE_CLOSED;
12376                rc = ENXIO;
12377                goto bxe_nic_load_error2;
12378            }
12379        } else {
12380            BLOGI(sc, "Device has no MCP!\n");
12381            load_code = bxe_nic_load_no_mcp(sc);
12382        }
12383
12384        /* mark PMF if applicable */
12385        bxe_nic_load_pmf(sc, load_code);
12386
12387        /* Init Function state controlling object */
12388        bxe_init_func_obj(sc);
12389
12390        /* Initialize HW */
12391        if (bxe_init_hw(sc, load_code) != 0) {
12392            BLOGE(sc, "HW init failed\n");
12393            bxe_fw_command(sc, DRV_MSG_CODE_LOAD_DONE, 0);
12394            sc->state = BXE_STATE_CLOSED;
12395            rc = ENXIO;
12396            goto bxe_nic_load_error2;
12397        }
12398    }
12399
12400    /* set ALWAYS_ALIVE bit in shmem */
12401    sc->fw_drv_pulse_wr_seq |= DRV_PULSE_ALWAYS_ALIVE;
12402    bxe_drv_pulse(sc);
12403    sc->flags |= BXE_NO_PULSE;
12404
12405    /* attach interrupts */
12406    if (bxe_interrupt_attach(sc) != 0) {
12407        sc->state = BXE_STATE_CLOSED;
12408        rc = ENXIO;
12409        goto bxe_nic_load_error2;
12410    }
12411
12412    bxe_nic_init(sc, load_code);
12413
12414    /* Init per-function objects */
12415    if (IS_PF(sc)) {
12416        bxe_init_objs(sc);
12417        // XXX bxe_iov_nic_init(sc);
12418
12419        /* set AFEX default VLAN tag to an invalid value */
12420        sc->devinfo.mf_info.afex_def_vlan_tag = -1;
12421        // XXX bxe_nic_load_afex_dcc(sc, load_code);
12422
12423        sc->state = BXE_STATE_OPENING_WAITING_PORT;
12424        rc = bxe_func_start(sc);
12425        if (rc) {
12426            BLOGE(sc, "Function start failed! rc = %d\n", rc);
12427            bxe_fw_command(sc, DRV_MSG_CODE_LOAD_DONE, 0);
12428            sc->state = BXE_STATE_ERROR;
12429            goto bxe_nic_load_error3;
12430        }
12431
12432        /* send LOAD_DONE command to MCP */
12433        if (!BXE_NOMCP(sc)) {
12434            load_code = bxe_fw_command(sc, DRV_MSG_CODE_LOAD_DONE, 0);
12435            if (!load_code) {
12436                BLOGE(sc, "MCP response failure, aborting\n");
12437                sc->state = BXE_STATE_ERROR;
12438                rc = ENXIO;
12439                goto bxe_nic_load_error3;
12440            }
12441        }
12442
12443        rc = bxe_setup_leading(sc);
12444        if (rc) {
12445            BLOGE(sc, "Setup leading failed! rc = %d\n", rc);
12446            sc->state = BXE_STATE_ERROR;
12447            goto bxe_nic_load_error3;
12448        }
12449
12450        FOR_EACH_NONDEFAULT_ETH_QUEUE(sc, i) {
12451            rc = bxe_setup_queue(sc, &sc->fp[i], FALSE);
12452            if (rc) {
12453                BLOGE(sc, "Queue(%d) setup failed rc = %d\n", i, rc);
12454                sc->state = BXE_STATE_ERROR;
12455                goto bxe_nic_load_error3;
12456            }
12457        }
12458
12459        rc = bxe_init_rss_pf(sc);
12460        if (rc) {
12461            BLOGE(sc, "PF RSS init failed\n");
12462            sc->state = BXE_STATE_ERROR;
12463            goto bxe_nic_load_error3;
12464        }
12465    }
12466    /* XXX VF */
12467
12468    /* now when Clients are configured we are ready to work */
12469    sc->state = BXE_STATE_OPEN;
12470
12471    /* Configure a ucast MAC */
12472    if (IS_PF(sc)) {
12473        rc = bxe_set_eth_mac(sc, TRUE);
12474    }
12475    if (rc) {
12476        BLOGE(sc, "Setting Ethernet MAC failed rc = %d\n", rc);
12477        sc->state = BXE_STATE_ERROR;
12478        goto bxe_nic_load_error3;
12479    }
12480
12481    if (sc->port.pmf) {
12482        rc = bxe_initial_phy_init(sc, /* XXX load_mode */LOAD_OPEN);
12483        if (rc) {
12484            sc->state = BXE_STATE_ERROR;
12485            goto bxe_nic_load_error3;
12486        }
12487    }
12488
12489    sc->link_params.feature_config_flags &=
12490        ~ELINK_FEATURE_CONFIG_BOOT_FROM_SAN;
12491
12492    /* start fast path */
12493
12494    /* Initialize Rx filter */
12495    bxe_set_rx_mode(sc);
12496
12497    /* start the Tx */
12498    switch (/* XXX load_mode */LOAD_OPEN) {
12499    case LOAD_NORMAL:
12500    case LOAD_OPEN:
12501        break;
12502
12503    case LOAD_DIAG:
12504    case LOAD_LOOPBACK_EXT:
12505        sc->state = BXE_STATE_DIAG;
12506        break;
12507
12508    default:
12509        break;
12510    }
12511
12512    if (sc->port.pmf) {
12513        bxe_update_drv_flags(sc, 1 << DRV_FLAGS_PORT_MASK, 0);
12514    } else {
12515        bxe_link_status_update(sc);
12516    }
12517
12518    /* start the periodic timer callout */
12519    bxe_periodic_start(sc);
12520
12521    if (IS_PF(sc) && SHMEM2_HAS(sc, drv_capabilities_flag)) {
12522        /* mark driver is loaded in shmem2 */
12523        val = SHMEM2_RD(sc, drv_capabilities_flag[SC_FW_MB_IDX(sc)]);
12524        SHMEM2_WR(sc, drv_capabilities_flag[SC_FW_MB_IDX(sc)],
12525                  (val |
12526                   DRV_FLAGS_CAPABILITIES_LOADED_SUPPORTED |
12527                   DRV_FLAGS_CAPABILITIES_LOADED_L2));
12528    }
12529
12530    /* wait for all pending SP commands to complete */
12531    if (IS_PF(sc) && !bxe_wait_sp_comp(sc, ~0x0UL)) {
12532        BLOGE(sc, "Timeout waiting for all SPs to complete!\n");
12533        bxe_periodic_stop(sc);
12534        bxe_nic_unload(sc, UNLOAD_CLOSE, FALSE);
12535        return (ENXIO);
12536    }
12537
12538    /* Tell the stack the driver is running! */
12539    if_setdrvflags(sc->ifp, IFF_DRV_RUNNING);
12540
12541    BLOGD(sc, DBG_LOAD, "NIC successfully loaded\n");
12542
12543    return (0);
12544
12545bxe_nic_load_error3:
12546
12547    if (IS_PF(sc)) {
12548        bxe_int_disable_sync(sc, 1);
12549
12550        /* clean out queued objects */
12551        bxe_squeeze_objects(sc);
12552    }
12553
12554    bxe_interrupt_detach(sc);
12555
12556bxe_nic_load_error2:
12557
12558    if (IS_PF(sc) && !BXE_NOMCP(sc)) {
12559        bxe_fw_command(sc, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0);
12560        bxe_fw_command(sc, DRV_MSG_CODE_UNLOAD_DONE, 0);
12561    }
12562
12563    sc->port.pmf = 0;
12564
12565bxe_nic_load_error1:
12566
12567    /* clear pf_load status, as it was already set */
12568    if (IS_PF(sc)) {
12569        bxe_clear_pf_load(sc);
12570    }
12571
12572bxe_nic_load_error0:
12573
12574    bxe_free_fw_stats_mem(sc);
12575    bxe_free_fp_buffers(sc);
12576    bxe_free_mem(sc);
12577
12578    return (rc);
12579}
12580
12581static int
12582bxe_init_locked(struct bxe_softc *sc)
12583{
12584    int other_engine = SC_PATH(sc) ? 0 : 1;
12585    uint8_t other_load_status, load_status;
12586    uint8_t global = FALSE;
12587    int rc;
12588
12589    BXE_CORE_LOCK_ASSERT(sc);
12590
12591    /* check if the driver is already running */
12592    if (if_getdrvflags(sc->ifp) & IFF_DRV_RUNNING) {
12593        BLOGD(sc, DBG_LOAD, "Init called while driver is running!\n");
12594        return (0);
12595    }
12596
12597    bxe_set_power_state(sc, PCI_PM_D0);
12598
12599    /*
12600     * If parity occurred during the unload, then attentions and/or
12601     * RECOVERY_IN_PROGRES may still be set. If so we want the first function
12602     * loaded on the current engine to complete the recovery. Parity recovery
12603     * is only relevant for PF driver.
12604     */
12605    if (IS_PF(sc)) {
12606        other_load_status = bxe_get_load_status(sc, other_engine);
12607        load_status = bxe_get_load_status(sc, SC_PATH(sc));
12608
12609        if (!bxe_reset_is_done(sc, SC_PATH(sc)) ||
12610            bxe_chk_parity_attn(sc, &global, TRUE)) {
12611            do {
12612                /*
12613                 * If there are attentions and they are in global blocks, set
12614                 * the GLOBAL_RESET bit regardless whether it will be this
12615                 * function that will complete the recovery or not.
12616                 */
12617                if (global) {
12618                    bxe_set_reset_global(sc);
12619                }
12620
12621                /*
12622                 * Only the first function on the current engine should try
12623                 * to recover in open. In case of attentions in global blocks
12624                 * only the first in the chip should try to recover.
12625                 */
12626                if ((!load_status && (!global || !other_load_status)) &&
12627                    bxe_trylock_leader_lock(sc) && !bxe_leader_reset(sc)) {
12628                    BLOGI(sc, "Recovered during init\n");
12629                    break;
12630                }
12631
12632                /* recovery has failed... */
12633                bxe_set_power_state(sc, PCI_PM_D3hot);
12634                sc->recovery_state = BXE_RECOVERY_FAILED;
12635
12636                BLOGE(sc, "Recovery flow hasn't properly "
12637                          "completed yet, try again later. "
12638                          "If you still see this message after a "
12639                          "few retries then power cycle is required.\n");
12640
12641                rc = ENXIO;
12642                goto bxe_init_locked_done;
12643            } while (0);
12644        }
12645    }
12646
12647    sc->recovery_state = BXE_RECOVERY_DONE;
12648
12649    rc = bxe_nic_load(sc, LOAD_OPEN);
12650
12651bxe_init_locked_done:
12652
12653    if (rc) {
12654        /* Tell the stack the driver is NOT running! */
12655        BLOGE(sc, "Initialization failed, "
12656                  "stack notified driver is NOT running!\n");
12657	if_setdrvflagbits(sc->ifp, 0, IFF_DRV_RUNNING);
12658    }
12659
12660    return (rc);
12661}
12662
12663static int
12664bxe_stop_locked(struct bxe_softc *sc)
12665{
12666    BXE_CORE_LOCK_ASSERT(sc);
12667    return (bxe_nic_unload(sc, UNLOAD_NORMAL, TRUE));
12668}
12669
12670/*
12671 * Handles controller initialization when called from an unlocked routine.
12672 * ifconfig calls this function.
12673 *
12674 * Returns:
12675 *   void
12676 */
12677static void
12678bxe_init(void *xsc)
12679{
12680    struct bxe_softc *sc = (struct bxe_softc *)xsc;
12681
12682    BXE_CORE_LOCK(sc);
12683    bxe_init_locked(sc);
12684    BXE_CORE_UNLOCK(sc);
12685}
12686
12687static int
12688bxe_init_ifnet(struct bxe_softc *sc)
12689{
12690    if_t ifp;
12691    int capabilities;
12692
12693    /* ifconfig entrypoint for media type/status reporting */
12694    ifmedia_init(&sc->ifmedia, IFM_IMASK,
12695                 bxe_ifmedia_update,
12696                 bxe_ifmedia_status);
12697
12698    /* set the default interface values */
12699    ifmedia_add(&sc->ifmedia, (IFM_ETHER | IFM_FDX | sc->media), 0, NULL);
12700    ifmedia_add(&sc->ifmedia, (IFM_ETHER | IFM_AUTO), 0, NULL);
12701    ifmedia_set(&sc->ifmedia, (IFM_ETHER | IFM_AUTO));
12702
12703    sc->ifmedia.ifm_media = sc->ifmedia.ifm_cur->ifm_media; /* XXX ? */
12704	BLOGI(sc, "IFMEDIA flags : %x\n", sc->ifmedia.ifm_media);
12705
12706    /* allocate the ifnet structure */
12707    if ((ifp = if_gethandle(IFT_ETHER)) == NULL) {
12708        BLOGE(sc, "Interface allocation failed!\n");
12709        return (ENXIO);
12710    }
12711
12712    if_setsoftc(ifp, sc);
12713    if_initname(ifp, device_get_name(sc->dev), device_get_unit(sc->dev));
12714    if_setflags(ifp, (IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST));
12715    if_setioctlfn(ifp, bxe_ioctl);
12716    if_setstartfn(ifp, bxe_tx_start);
12717    if_setgetcounterfn(ifp, bxe_get_counter);
12718#if __FreeBSD_version >= 901504
12719    if_settransmitfn(ifp, bxe_tx_mq_start);
12720    if_setqflushfn(ifp, bxe_mq_flush);
12721#endif
12722#ifdef FreeBSD8_0
12723    if_settimer(ifp, 0);
12724#endif
12725    if_setinitfn(ifp, bxe_init);
12726    if_setmtu(ifp, sc->mtu);
12727    if_sethwassist(ifp, (CSUM_IP      |
12728                        CSUM_TCP      |
12729                        CSUM_UDP      |
12730                        CSUM_TSO      |
12731                        CSUM_TCP_IPV6 |
12732                        CSUM_UDP_IPV6));
12733
12734    capabilities =
12735#if __FreeBSD_version < 700000
12736        (IFCAP_VLAN_MTU       |
12737         IFCAP_VLAN_HWTAGGING |
12738         IFCAP_HWCSUM         |
12739         IFCAP_JUMBO_MTU      |
12740         IFCAP_LRO);
12741#else
12742        (IFCAP_VLAN_MTU       |
12743         IFCAP_VLAN_HWTAGGING |
12744         IFCAP_VLAN_HWTSO     |
12745         IFCAP_VLAN_HWFILTER  |
12746         IFCAP_VLAN_HWCSUM    |
12747         IFCAP_HWCSUM         |
12748         IFCAP_JUMBO_MTU      |
12749         IFCAP_LRO            |
12750         IFCAP_TSO4           |
12751         IFCAP_TSO6           |
12752         IFCAP_WOL_MAGIC);
12753#endif
12754    if_setcapabilitiesbit(ifp, capabilities, 0); /* XXX */
12755    if_setcapenable(ifp, if_getcapabilities(ifp));
12756    if_setbaudrate(ifp, IF_Gbps(10));
12757/* XXX */
12758    if_setsendqlen(ifp, sc->tx_ring_size);
12759    if_setsendqready(ifp);
12760/* XXX */
12761
12762    sc->ifp = ifp;
12763
12764    /* attach to the Ethernet interface list */
12765    ether_ifattach(ifp, sc->link_params.mac_addr);
12766
12767    return (0);
12768}
12769
12770static void
12771bxe_deallocate_bars(struct bxe_softc *sc)
12772{
12773    int i;
12774
12775    for (i = 0; i < MAX_BARS; i++) {
12776        if (sc->bar[i].resource != NULL) {
12777            bus_release_resource(sc->dev,
12778                                 SYS_RES_MEMORY,
12779                                 sc->bar[i].rid,
12780                                 sc->bar[i].resource);
12781            BLOGD(sc, DBG_LOAD, "Released PCI BAR%d [%02x] memory\n",
12782                  i, PCIR_BAR(i));
12783        }
12784    }
12785}
12786
12787static int
12788bxe_allocate_bars(struct bxe_softc *sc)
12789{
12790    u_int flags;
12791    int i;
12792
12793    memset(sc->bar, 0, sizeof(sc->bar));
12794
12795    for (i = 0; i < MAX_BARS; i++) {
12796
12797        /* memory resources reside at BARs 0, 2, 4 */
12798        /* Run `pciconf -lb` to see mappings */
12799        if ((i != 0) && (i != 2) && (i != 4)) {
12800            continue;
12801        }
12802
12803        sc->bar[i].rid = PCIR_BAR(i);
12804
12805        flags = RF_ACTIVE;
12806        if (i == 0) {
12807            flags |= RF_SHAREABLE;
12808        }
12809
12810        if ((sc->bar[i].resource =
12811             bus_alloc_resource_any(sc->dev,
12812                                    SYS_RES_MEMORY,
12813                                    &sc->bar[i].rid,
12814                                    flags)) == NULL) {
12815            return (0);
12816        }
12817
12818        sc->bar[i].tag    = rman_get_bustag(sc->bar[i].resource);
12819        sc->bar[i].handle = rman_get_bushandle(sc->bar[i].resource);
12820        sc->bar[i].kva    = (vm_offset_t)rman_get_virtual(sc->bar[i].resource);
12821
12822        BLOGI(sc, "PCI BAR%d [%02x] memory allocated: %p-%p (%jd) -> %p\n",
12823              i, PCIR_BAR(i),
12824              (void *)rman_get_start(sc->bar[i].resource),
12825              (void *)rman_get_end(sc->bar[i].resource),
12826              rman_get_size(sc->bar[i].resource),
12827              (void *)sc->bar[i].kva);
12828    }
12829
12830    return (0);
12831}
12832
12833static void
12834bxe_get_function_num(struct bxe_softc *sc)
12835{
12836    uint32_t val = 0;
12837
12838    /*
12839     * Read the ME register to get the function number. The ME register
12840     * holds the relative-function number and absolute-function number. The
12841     * absolute-function number appears only in E2 and above. Before that
12842     * these bits always contained zero, therefore we cannot blindly use them.
12843     */
12844
12845    val = REG_RD(sc, BAR_ME_REGISTER);
12846
12847    sc->pfunc_rel =
12848        (uint8_t)((val & ME_REG_PF_NUM) >> ME_REG_PF_NUM_SHIFT);
12849    sc->path_id =
12850        (uint8_t)((val & ME_REG_ABS_PF_NUM) >> ME_REG_ABS_PF_NUM_SHIFT) & 1;
12851
12852    if (CHIP_PORT_MODE(sc) == CHIP_4_PORT_MODE) {
12853        sc->pfunc_abs = ((sc->pfunc_rel << 1) | sc->path_id);
12854    } else {
12855        sc->pfunc_abs = (sc->pfunc_rel | sc->path_id);
12856    }
12857
12858    BLOGD(sc, DBG_LOAD,
12859          "Relative function %d, Absolute function %d, Path %d\n",
12860          sc->pfunc_rel, sc->pfunc_abs, sc->path_id);
12861}
12862
12863static uint32_t
12864bxe_get_shmem_mf_cfg_base(struct bxe_softc *sc)
12865{
12866    uint32_t shmem2_size;
12867    uint32_t offset;
12868    uint32_t mf_cfg_offset_value;
12869
12870    /* Non 57712 */
12871    offset = (SHMEM_RD(sc, func_mb) +
12872              (MAX_FUNC_NUM * sizeof(struct drv_func_mb)));
12873
12874    /* 57712 plus */
12875    if (sc->devinfo.shmem2_base != 0) {
12876        shmem2_size = SHMEM2_RD(sc, size);
12877        if (shmem2_size > offsetof(struct shmem2_region, mf_cfg_addr)) {
12878            mf_cfg_offset_value = SHMEM2_RD(sc, mf_cfg_addr);
12879            if (SHMEM_MF_CFG_ADDR_NONE != mf_cfg_offset_value) {
12880                offset = mf_cfg_offset_value;
12881            }
12882        }
12883    }
12884
12885    return (offset);
12886}
12887
12888static uint32_t
12889bxe_pcie_capability_read(struct bxe_softc *sc,
12890                         int    reg,
12891                         int    width)
12892{
12893    int pcie_reg;
12894
12895    /* ensure PCIe capability is enabled */
12896    if (pci_find_cap(sc->dev, PCIY_EXPRESS, &pcie_reg) == 0) {
12897        if (pcie_reg != 0) {
12898            BLOGD(sc, DBG_LOAD, "PCIe capability at 0x%04x\n", pcie_reg);
12899            return (pci_read_config(sc->dev, (pcie_reg + reg), width));
12900        }
12901    }
12902
12903    BLOGE(sc, "PCIe capability NOT FOUND!!!\n");
12904
12905    return (0);
12906}
12907
12908static uint8_t
12909bxe_is_pcie_pending(struct bxe_softc *sc)
12910{
12911    return (bxe_pcie_capability_read(sc, PCIR_EXPRESS_DEVICE_STA, 2) &
12912            PCIM_EXP_STA_TRANSACTION_PND);
12913}
12914
12915/*
12916 * Walk the PCI capabiites list for the device to find what features are
12917 * supported. These capabilites may be enabled/disabled by firmware so it's
12918 * best to walk the list rather than make assumptions.
12919 */
12920static void
12921bxe_probe_pci_caps(struct bxe_softc *sc)
12922{
12923    uint16_t link_status;
12924    int reg;
12925
12926    /* check if PCI Power Management is enabled */
12927    if (pci_find_cap(sc->dev, PCIY_PMG, &reg) == 0) {
12928        if (reg != 0) {
12929            BLOGD(sc, DBG_LOAD, "Found PM capability at 0x%04x\n", reg);
12930
12931            sc->devinfo.pcie_cap_flags |= BXE_PM_CAPABLE_FLAG;
12932            sc->devinfo.pcie_pm_cap_reg = (uint16_t)reg;
12933        }
12934    }
12935
12936    link_status = bxe_pcie_capability_read(sc, PCIR_EXPRESS_LINK_STA, 2);
12937
12938    /* handle PCIe 2.0 workarounds for 57710 */
12939    if (CHIP_IS_E1(sc)) {
12940        /* workaround for 57710 errata E4_57710_27462 */
12941        sc->devinfo.pcie_link_speed =
12942            (REG_RD(sc, 0x3d04) & (1 << 24)) ? 2 : 1;
12943
12944        /* workaround for 57710 errata E4_57710_27488 */
12945        sc->devinfo.pcie_link_width =
12946            ((link_status & PCIM_LINK_STA_WIDTH) >> 4);
12947        if (sc->devinfo.pcie_link_speed > 1) {
12948            sc->devinfo.pcie_link_width =
12949                ((link_status & PCIM_LINK_STA_WIDTH) >> 4) >> 1;
12950        }
12951    } else {
12952        sc->devinfo.pcie_link_speed =
12953            (link_status & PCIM_LINK_STA_SPEED);
12954        sc->devinfo.pcie_link_width =
12955            ((link_status & PCIM_LINK_STA_WIDTH) >> 4);
12956    }
12957
12958    BLOGD(sc, DBG_LOAD, "PCIe link speed=%d width=%d\n",
12959          sc->devinfo.pcie_link_speed, sc->devinfo.pcie_link_width);
12960
12961    sc->devinfo.pcie_cap_flags |= BXE_PCIE_CAPABLE_FLAG;
12962    sc->devinfo.pcie_pcie_cap_reg = (uint16_t)reg;
12963
12964    /* check if MSI capability is enabled */
12965    if (pci_find_cap(sc->dev, PCIY_MSI, &reg) == 0) {
12966        if (reg != 0) {
12967            BLOGD(sc, DBG_LOAD, "Found MSI capability at 0x%04x\n", reg);
12968
12969            sc->devinfo.pcie_cap_flags |= BXE_MSI_CAPABLE_FLAG;
12970            sc->devinfo.pcie_msi_cap_reg = (uint16_t)reg;
12971        }
12972    }
12973
12974    /* check if MSI-X capability is enabled */
12975    if (pci_find_cap(sc->dev, PCIY_MSIX, &reg) == 0) {
12976        if (reg != 0) {
12977            BLOGD(sc, DBG_LOAD, "Found MSI-X capability at 0x%04x\n", reg);
12978
12979            sc->devinfo.pcie_cap_flags |= BXE_MSIX_CAPABLE_FLAG;
12980            sc->devinfo.pcie_msix_cap_reg = (uint16_t)reg;
12981        }
12982    }
12983}
12984
12985static int
12986bxe_get_shmem_mf_cfg_info_sd(struct bxe_softc *sc)
12987{
12988    struct bxe_mf_info *mf_info = &sc->devinfo.mf_info;
12989    uint32_t val;
12990
12991    /* get the outer vlan if we're in switch-dependent mode */
12992
12993    val = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].e1hov_tag);
12994    mf_info->ext_id = (uint16_t)val;
12995
12996    mf_info->multi_vnics_mode = 1;
12997
12998    if (!VALID_OVLAN(mf_info->ext_id)) {
12999        BLOGE(sc, "Invalid VLAN (%d)\n", mf_info->ext_id);
13000        return (1);
13001    }
13002
13003    /* get the capabilities */
13004    if ((mf_info->mf_config[SC_VN(sc)] & FUNC_MF_CFG_PROTOCOL_MASK) ==
13005        FUNC_MF_CFG_PROTOCOL_ISCSI) {
13006        mf_info->mf_protos_supported |= MF_PROTO_SUPPORT_ISCSI;
13007    } else if ((mf_info->mf_config[SC_VN(sc)] & FUNC_MF_CFG_PROTOCOL_MASK) ==
13008               FUNC_MF_CFG_PROTOCOL_FCOE) {
13009        mf_info->mf_protos_supported |= MF_PROTO_SUPPORT_FCOE;
13010    } else {
13011        mf_info->mf_protos_supported |= MF_PROTO_SUPPORT_ETHERNET;
13012    }
13013
13014    mf_info->vnics_per_port =
13015        (CHIP_PORT_MODE(sc) == CHIP_4_PORT_MODE) ? 2 : 4;
13016
13017    return (0);
13018}
13019
13020static uint32_t
13021bxe_get_shmem_ext_proto_support_flags(struct bxe_softc *sc)
13022{
13023    uint32_t retval = 0;
13024    uint32_t val;
13025
13026    val = MFCFG_RD(sc, func_ext_config[SC_ABS_FUNC(sc)].func_cfg);
13027
13028    if (val & MACP_FUNC_CFG_FLAGS_ENABLED) {
13029        if (val & MACP_FUNC_CFG_FLAGS_ETHERNET) {
13030            retval |= MF_PROTO_SUPPORT_ETHERNET;
13031        }
13032        if (val & MACP_FUNC_CFG_FLAGS_ISCSI_OFFLOAD) {
13033            retval |= MF_PROTO_SUPPORT_ISCSI;
13034        }
13035        if (val & MACP_FUNC_CFG_FLAGS_FCOE_OFFLOAD) {
13036            retval |= MF_PROTO_SUPPORT_FCOE;
13037        }
13038    }
13039
13040    return (retval);
13041}
13042
13043static int
13044bxe_get_shmem_mf_cfg_info_si(struct bxe_softc *sc)
13045{
13046    struct bxe_mf_info *mf_info = &sc->devinfo.mf_info;
13047    uint32_t val;
13048
13049    /*
13050     * There is no outer vlan if we're in switch-independent mode.
13051     * If the mac is valid then assume multi-function.
13052     */
13053
13054    val = MFCFG_RD(sc, func_ext_config[SC_ABS_FUNC(sc)].func_cfg);
13055
13056    mf_info->multi_vnics_mode = ((val & MACP_FUNC_CFG_FLAGS_MASK) != 0);
13057
13058    mf_info->mf_protos_supported = bxe_get_shmem_ext_proto_support_flags(sc);
13059
13060    mf_info->vnics_per_port =
13061        (CHIP_PORT_MODE(sc) == CHIP_4_PORT_MODE) ? 2 : 4;
13062
13063    return (0);
13064}
13065
13066static int
13067bxe_get_shmem_mf_cfg_info_niv(struct bxe_softc *sc)
13068{
13069    struct bxe_mf_info *mf_info = &sc->devinfo.mf_info;
13070    uint32_t e1hov_tag;
13071    uint32_t func_config;
13072    uint32_t niv_config;
13073
13074    mf_info->multi_vnics_mode = 1;
13075
13076    e1hov_tag   = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].e1hov_tag);
13077    func_config = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].config);
13078    niv_config  = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].afex_config);
13079
13080    mf_info->ext_id =
13081        (uint16_t)((e1hov_tag & FUNC_MF_CFG_E1HOV_TAG_MASK) >>
13082                   FUNC_MF_CFG_E1HOV_TAG_SHIFT);
13083
13084    mf_info->default_vlan =
13085        (uint16_t)((e1hov_tag & FUNC_MF_CFG_AFEX_VLAN_MASK) >>
13086                   FUNC_MF_CFG_AFEX_VLAN_SHIFT);
13087
13088    mf_info->niv_allowed_priorities =
13089        (uint8_t)((niv_config & FUNC_MF_CFG_AFEX_COS_FILTER_MASK) >>
13090                  FUNC_MF_CFG_AFEX_COS_FILTER_SHIFT);
13091
13092    mf_info->niv_default_cos =
13093        (uint8_t)((func_config & FUNC_MF_CFG_TRANSMIT_PRIORITY_MASK) >>
13094                  FUNC_MF_CFG_TRANSMIT_PRIORITY_SHIFT);
13095
13096    mf_info->afex_vlan_mode =
13097        ((niv_config & FUNC_MF_CFG_AFEX_VLAN_MODE_MASK) >>
13098         FUNC_MF_CFG_AFEX_VLAN_MODE_SHIFT);
13099
13100    mf_info->niv_mba_enabled =
13101        ((niv_config & FUNC_MF_CFG_AFEX_MBA_ENABLED_MASK) >>
13102         FUNC_MF_CFG_AFEX_MBA_ENABLED_SHIFT);
13103
13104    mf_info->mf_protos_supported = bxe_get_shmem_ext_proto_support_flags(sc);
13105
13106    mf_info->vnics_per_port =
13107        (CHIP_PORT_MODE(sc) == CHIP_4_PORT_MODE) ? 2 : 4;
13108
13109    return (0);
13110}
13111
13112static int
13113bxe_check_valid_mf_cfg(struct bxe_softc *sc)
13114{
13115    struct bxe_mf_info *mf_info = &sc->devinfo.mf_info;
13116    uint32_t mf_cfg1;
13117    uint32_t mf_cfg2;
13118    uint32_t ovlan1;
13119    uint32_t ovlan2;
13120    uint8_t i, j;
13121
13122    BLOGD(sc, DBG_LOAD, "MF config parameters for function %d\n",
13123          SC_PORT(sc));
13124    BLOGD(sc, DBG_LOAD, "\tmf_config=0x%x\n",
13125          mf_info->mf_config[SC_VN(sc)]);
13126    BLOGD(sc, DBG_LOAD, "\tmulti_vnics_mode=%d\n",
13127          mf_info->multi_vnics_mode);
13128    BLOGD(sc, DBG_LOAD, "\tvnics_per_port=%d\n",
13129          mf_info->vnics_per_port);
13130    BLOGD(sc, DBG_LOAD, "\tovlan/vifid=%d\n",
13131          mf_info->ext_id);
13132    BLOGD(sc, DBG_LOAD, "\tmin_bw=%d/%d/%d/%d\n",
13133          mf_info->min_bw[0], mf_info->min_bw[1],
13134          mf_info->min_bw[2], mf_info->min_bw[3]);
13135    BLOGD(sc, DBG_LOAD, "\tmax_bw=%d/%d/%d/%d\n",
13136          mf_info->max_bw[0], mf_info->max_bw[1],
13137          mf_info->max_bw[2], mf_info->max_bw[3]);
13138    BLOGD(sc, DBG_LOAD, "\tmac_addr: %s\n",
13139          sc->mac_addr_str);
13140
13141    /* various MF mode sanity checks... */
13142
13143    if (mf_info->mf_config[SC_VN(sc)] & FUNC_MF_CFG_FUNC_HIDE) {
13144        BLOGE(sc, "Enumerated function %d is marked as hidden\n",
13145              SC_PORT(sc));
13146        return (1);
13147    }
13148
13149    if ((mf_info->vnics_per_port > 1) && !mf_info->multi_vnics_mode) {
13150        BLOGE(sc, "vnics_per_port=%d multi_vnics_mode=%d\n",
13151              mf_info->vnics_per_port, mf_info->multi_vnics_mode);
13152        return (1);
13153    }
13154
13155    if (mf_info->mf_mode == MULTI_FUNCTION_SD) {
13156        /* vnic id > 0 must have valid ovlan in switch-dependent mode */
13157        if ((SC_VN(sc) > 0) && !VALID_OVLAN(OVLAN(sc))) {
13158            BLOGE(sc, "mf_mode=SD vnic_id=%d ovlan=%d\n",
13159                  SC_VN(sc), OVLAN(sc));
13160            return (1);
13161        }
13162
13163        if (!VALID_OVLAN(OVLAN(sc)) && mf_info->multi_vnics_mode) {
13164            BLOGE(sc, "mf_mode=SD multi_vnics_mode=%d ovlan=%d\n",
13165                  mf_info->multi_vnics_mode, OVLAN(sc));
13166            return (1);
13167        }
13168
13169        /*
13170         * Verify all functions are either MF or SF mode. If MF, make sure
13171         * sure that all non-hidden functions have a valid ovlan. If SF,
13172         * make sure that all non-hidden functions have an invalid ovlan.
13173         */
13174        FOREACH_ABS_FUNC_IN_PORT(sc, i) {
13175            mf_cfg1 = MFCFG_RD(sc, func_mf_config[i].config);
13176            ovlan1  = MFCFG_RD(sc, func_mf_config[i].e1hov_tag);
13177            if (!(mf_cfg1 & FUNC_MF_CFG_FUNC_HIDE) &&
13178                (((mf_info->multi_vnics_mode) && !VALID_OVLAN(ovlan1)) ||
13179                 ((!mf_info->multi_vnics_mode) && VALID_OVLAN(ovlan1)))) {
13180                BLOGE(sc, "mf_mode=SD function %d MF config "
13181                          "mismatch, multi_vnics_mode=%d ovlan=%d\n",
13182                      i, mf_info->multi_vnics_mode, ovlan1);
13183                return (1);
13184            }
13185        }
13186
13187        /* Verify all funcs on the same port each have a different ovlan. */
13188        FOREACH_ABS_FUNC_IN_PORT(sc, i) {
13189            mf_cfg1 = MFCFG_RD(sc, func_mf_config[i].config);
13190            ovlan1  = MFCFG_RD(sc, func_mf_config[i].e1hov_tag);
13191            /* iterate from the next function on the port to the max func */
13192            for (j = i + 2; j < MAX_FUNC_NUM; j += 2) {
13193                mf_cfg2 = MFCFG_RD(sc, func_mf_config[j].config);
13194                ovlan2  = MFCFG_RD(sc, func_mf_config[j].e1hov_tag);
13195                if (!(mf_cfg1 & FUNC_MF_CFG_FUNC_HIDE) &&
13196                    VALID_OVLAN(ovlan1) &&
13197                    !(mf_cfg2 & FUNC_MF_CFG_FUNC_HIDE) &&
13198                    VALID_OVLAN(ovlan2) &&
13199                    (ovlan1 == ovlan2)) {
13200                    BLOGE(sc, "mf_mode=SD functions %d and %d "
13201                              "have the same ovlan (%d)\n",
13202                          i, j, ovlan1);
13203                    return (1);
13204                }
13205            }
13206        }
13207    } /* MULTI_FUNCTION_SD */
13208
13209    return (0);
13210}
13211
13212static int
13213bxe_get_mf_cfg_info(struct bxe_softc *sc)
13214{
13215    struct bxe_mf_info *mf_info = &sc->devinfo.mf_info;
13216    uint32_t val, mac_upper;
13217    uint8_t i, vnic;
13218
13219    /* initialize mf_info defaults */
13220    mf_info->vnics_per_port   = 1;
13221    mf_info->multi_vnics_mode = FALSE;
13222    mf_info->path_has_ovlan   = FALSE;
13223    mf_info->mf_mode          = SINGLE_FUNCTION;
13224
13225    if (!CHIP_IS_MF_CAP(sc)) {
13226        return (0);
13227    }
13228
13229    if (sc->devinfo.mf_cfg_base == SHMEM_MF_CFG_ADDR_NONE) {
13230        BLOGE(sc, "Invalid mf_cfg_base!\n");
13231        return (1);
13232    }
13233
13234    /* get the MF mode (switch dependent / independent / single-function) */
13235
13236    val = SHMEM_RD(sc, dev_info.shared_feature_config.config);
13237
13238    switch (val & SHARED_FEAT_CFG_FORCE_SF_MODE_MASK)
13239    {
13240    case SHARED_FEAT_CFG_FORCE_SF_MODE_SWITCH_INDEPT:
13241
13242        mac_upper = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].mac_upper);
13243
13244        /* check for legal upper mac bytes */
13245        if (mac_upper != FUNC_MF_CFG_UPPERMAC_DEFAULT) {
13246            mf_info->mf_mode = MULTI_FUNCTION_SI;
13247        } else {
13248            BLOGE(sc, "Invalid config for Switch Independent mode\n");
13249        }
13250
13251        break;
13252
13253    case SHARED_FEAT_CFG_FORCE_SF_MODE_MF_ALLOWED:
13254    case SHARED_FEAT_CFG_FORCE_SF_MODE_SPIO4:
13255
13256        /* get outer vlan configuration */
13257        val = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].e1hov_tag);
13258
13259        if ((val & FUNC_MF_CFG_E1HOV_TAG_MASK) !=
13260            FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
13261            mf_info->mf_mode = MULTI_FUNCTION_SD;
13262        } else {
13263            BLOGE(sc, "Invalid config for Switch Dependent mode\n");
13264        }
13265
13266        break;
13267
13268    case SHARED_FEAT_CFG_FORCE_SF_MODE_FORCED_SF:
13269
13270        /* not in MF mode, vnics_per_port=1 and multi_vnics_mode=FALSE */
13271        return (0);
13272
13273    case SHARED_FEAT_CFG_FORCE_SF_MODE_AFEX_MODE:
13274
13275        /*
13276         * Mark MF mode as NIV if MCP version includes NPAR-SD support
13277         * and the MAC address is valid.
13278         */
13279        mac_upper = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].mac_upper);
13280
13281        if ((SHMEM2_HAS(sc, afex_driver_support)) &&
13282            (mac_upper != FUNC_MF_CFG_UPPERMAC_DEFAULT)) {
13283            mf_info->mf_mode = MULTI_FUNCTION_AFEX;
13284        } else {
13285            BLOGE(sc, "Invalid config for AFEX mode\n");
13286        }
13287
13288        break;
13289
13290    default:
13291
13292        BLOGE(sc, "Unknown MF mode (0x%08x)\n",
13293              (val & SHARED_FEAT_CFG_FORCE_SF_MODE_MASK));
13294
13295        return (1);
13296    }
13297
13298    /* set path mf_mode (which could be different than function mf_mode) */
13299    if (mf_info->mf_mode == MULTI_FUNCTION_SD) {
13300        mf_info->path_has_ovlan = TRUE;
13301    } else if (mf_info->mf_mode == SINGLE_FUNCTION) {
13302        /*
13303         * Decide on path multi vnics mode. If we're not in MF mode and in
13304         * 4-port mode, this is good enough to check vnic-0 of the other port
13305         * on the same path
13306         */
13307        if (CHIP_PORT_MODE(sc) == CHIP_4_PORT_MODE) {
13308            uint8_t other_port = !(PORT_ID(sc) & 1);
13309            uint8_t abs_func_other_port = (SC_PATH(sc) + (2 * other_port));
13310
13311            val = MFCFG_RD(sc, func_mf_config[abs_func_other_port].e1hov_tag);
13312
13313            mf_info->path_has_ovlan = VALID_OVLAN((uint16_t)val) ? 1 : 0;
13314        }
13315    }
13316
13317    if (mf_info->mf_mode == SINGLE_FUNCTION) {
13318        /* invalid MF config */
13319        if (SC_VN(sc) >= 1) {
13320            BLOGE(sc, "VNIC ID >= 1 in SF mode\n");
13321            return (1);
13322        }
13323
13324        return (0);
13325    }
13326
13327    /* get the MF configuration */
13328    mf_info->mf_config[SC_VN(sc)] =
13329        MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].config);
13330
13331    switch(mf_info->mf_mode)
13332    {
13333    case MULTI_FUNCTION_SD:
13334
13335        bxe_get_shmem_mf_cfg_info_sd(sc);
13336        break;
13337
13338    case MULTI_FUNCTION_SI:
13339
13340        bxe_get_shmem_mf_cfg_info_si(sc);
13341        break;
13342
13343    case MULTI_FUNCTION_AFEX:
13344
13345        bxe_get_shmem_mf_cfg_info_niv(sc);
13346        break;
13347
13348    default:
13349
13350        BLOGE(sc, "Get MF config failed (mf_mode=0x%08x)\n",
13351              mf_info->mf_mode);
13352        return (1);
13353    }
13354
13355    /* get the congestion management parameters */
13356
13357    vnic = 0;
13358    FOREACH_ABS_FUNC_IN_PORT(sc, i) {
13359        /* get min/max bw */
13360        val = MFCFG_RD(sc, func_mf_config[i].config);
13361        mf_info->min_bw[vnic] =
13362            ((val & FUNC_MF_CFG_MIN_BW_MASK) >> FUNC_MF_CFG_MIN_BW_SHIFT);
13363        mf_info->max_bw[vnic] =
13364            ((val & FUNC_MF_CFG_MAX_BW_MASK) >> FUNC_MF_CFG_MAX_BW_SHIFT);
13365        vnic++;
13366    }
13367
13368    return (bxe_check_valid_mf_cfg(sc));
13369}
13370
13371static int
13372bxe_get_shmem_info(struct bxe_softc *sc)
13373{
13374    int port;
13375    uint32_t mac_hi, mac_lo, val;
13376
13377    port = SC_PORT(sc);
13378    mac_hi = mac_lo = 0;
13379
13380    sc->link_params.sc   = sc;
13381    sc->link_params.port = port;
13382
13383    /* get the hardware config info */
13384    sc->devinfo.hw_config =
13385        SHMEM_RD(sc, dev_info.shared_hw_config.config);
13386    sc->devinfo.hw_config2 =
13387        SHMEM_RD(sc, dev_info.shared_hw_config.config2);
13388
13389    sc->link_params.hw_led_mode =
13390        ((sc->devinfo.hw_config & SHARED_HW_CFG_LED_MODE_MASK) >>
13391         SHARED_HW_CFG_LED_MODE_SHIFT);
13392
13393    /* get the port feature config */
13394    sc->port.config =
13395        SHMEM_RD(sc, dev_info.port_feature_config[port].config);
13396
13397    /* get the link params */
13398    sc->link_params.speed_cap_mask[0] =
13399        SHMEM_RD(sc, dev_info.port_hw_config[port].speed_capability_mask);
13400    sc->link_params.speed_cap_mask[1] =
13401        SHMEM_RD(sc, dev_info.port_hw_config[port].speed_capability_mask2);
13402
13403    /* get the lane config */
13404    sc->link_params.lane_config =
13405        SHMEM_RD(sc, dev_info.port_hw_config[port].lane_config);
13406
13407    /* get the link config */
13408    val = SHMEM_RD(sc, dev_info.port_feature_config[port].link_config);
13409    sc->port.link_config[ELINK_INT_PHY] = val;
13410    sc->link_params.switch_cfg = (val & PORT_FEATURE_CONNECTED_SWITCH_MASK);
13411    sc->port.link_config[ELINK_EXT_PHY1] =
13412        SHMEM_RD(sc, dev_info.port_feature_config[port].link_config2);
13413
13414    /* get the override preemphasis flag and enable it or turn it off */
13415    val = SHMEM_RD(sc, dev_info.shared_feature_config.config);
13416    if (val & SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED) {
13417        sc->link_params.feature_config_flags |=
13418            ELINK_FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
13419    } else {
13420        sc->link_params.feature_config_flags &=
13421            ~ELINK_FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
13422    }
13423
13424    /* get the initial value of the link params */
13425    sc->link_params.multi_phy_config =
13426        SHMEM_RD(sc, dev_info.port_hw_config[port].multi_phy_config);
13427
13428    /* get external phy info */
13429    sc->port.ext_phy_config =
13430        SHMEM_RD(sc, dev_info.port_hw_config[port].external_phy_config);
13431
13432    /* get the multifunction configuration */
13433    bxe_get_mf_cfg_info(sc);
13434
13435    /* get the mac address */
13436    if (IS_MF(sc)) {
13437        mac_hi = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].mac_upper);
13438        mac_lo = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].mac_lower);
13439    } else {
13440        mac_hi = SHMEM_RD(sc, dev_info.port_hw_config[port].mac_upper);
13441        mac_lo = SHMEM_RD(sc, dev_info.port_hw_config[port].mac_lower);
13442    }
13443
13444    if ((mac_lo == 0) && (mac_hi == 0)) {
13445        *sc->mac_addr_str = 0;
13446        BLOGE(sc, "No Ethernet address programmed!\n");
13447    } else {
13448        sc->link_params.mac_addr[0] = (uint8_t)(mac_hi >> 8);
13449        sc->link_params.mac_addr[1] = (uint8_t)(mac_hi);
13450        sc->link_params.mac_addr[2] = (uint8_t)(mac_lo >> 24);
13451        sc->link_params.mac_addr[3] = (uint8_t)(mac_lo >> 16);
13452        sc->link_params.mac_addr[4] = (uint8_t)(mac_lo >> 8);
13453        sc->link_params.mac_addr[5] = (uint8_t)(mac_lo);
13454        snprintf(sc->mac_addr_str, sizeof(sc->mac_addr_str),
13455                 "%02x:%02x:%02x:%02x:%02x:%02x",
13456                 sc->link_params.mac_addr[0], sc->link_params.mac_addr[1],
13457                 sc->link_params.mac_addr[2], sc->link_params.mac_addr[3],
13458                 sc->link_params.mac_addr[4], sc->link_params.mac_addr[5]);
13459        BLOGD(sc, DBG_LOAD, "Ethernet address: %s\n", sc->mac_addr_str);
13460    }
13461
13462    return (0);
13463}
13464
13465static void
13466bxe_get_tunable_params(struct bxe_softc *sc)
13467{
13468    /* sanity checks */
13469
13470    if ((bxe_interrupt_mode != INTR_MODE_INTX) &&
13471        (bxe_interrupt_mode != INTR_MODE_MSI)  &&
13472        (bxe_interrupt_mode != INTR_MODE_MSIX)) {
13473        BLOGW(sc, "invalid interrupt_mode value (%d)\n", bxe_interrupt_mode);
13474        bxe_interrupt_mode = INTR_MODE_MSIX;
13475    }
13476
13477    if ((bxe_queue_count < 0) || (bxe_queue_count > MAX_RSS_CHAINS)) {
13478        BLOGW(sc, "invalid queue_count value (%d)\n", bxe_queue_count);
13479        bxe_queue_count = 0;
13480    }
13481
13482    if ((bxe_max_rx_bufs < 1) || (bxe_max_rx_bufs > RX_BD_USABLE)) {
13483        if (bxe_max_rx_bufs == 0) {
13484            bxe_max_rx_bufs = RX_BD_USABLE;
13485        } else {
13486            BLOGW(sc, "invalid max_rx_bufs (%d)\n", bxe_max_rx_bufs);
13487            bxe_max_rx_bufs = 2048;
13488        }
13489    }
13490
13491    if ((bxe_hc_rx_ticks < 1) || (bxe_hc_rx_ticks > 100)) {
13492        BLOGW(sc, "invalid hc_rx_ticks (%d)\n", bxe_hc_rx_ticks);
13493        bxe_hc_rx_ticks = 25;
13494    }
13495
13496    if ((bxe_hc_tx_ticks < 1) || (bxe_hc_tx_ticks > 100)) {
13497        BLOGW(sc, "invalid hc_tx_ticks (%d)\n", bxe_hc_tx_ticks);
13498        bxe_hc_tx_ticks = 50;
13499    }
13500
13501    if (bxe_max_aggregation_size == 0) {
13502        bxe_max_aggregation_size = TPA_AGG_SIZE;
13503    }
13504
13505    if (bxe_max_aggregation_size > 0xffff) {
13506        BLOGW(sc, "invalid max_aggregation_size (%d)\n",
13507              bxe_max_aggregation_size);
13508        bxe_max_aggregation_size = TPA_AGG_SIZE;
13509    }
13510
13511    if ((bxe_mrrs < -1) || (bxe_mrrs > 3)) {
13512        BLOGW(sc, "invalid mrrs (%d)\n", bxe_mrrs);
13513        bxe_mrrs = -1;
13514    }
13515
13516    if ((bxe_autogreeen < 0) || (bxe_autogreeen > 2)) {
13517        BLOGW(sc, "invalid autogreeen (%d)\n", bxe_autogreeen);
13518        bxe_autogreeen = 0;
13519    }
13520
13521    if ((bxe_udp_rss < 0) || (bxe_udp_rss > 1)) {
13522        BLOGW(sc, "invalid udp_rss (%d)\n", bxe_udp_rss);
13523        bxe_udp_rss = 0;
13524    }
13525
13526    /* pull in user settings */
13527
13528    sc->interrupt_mode       = bxe_interrupt_mode;
13529    sc->max_rx_bufs          = bxe_max_rx_bufs;
13530    sc->hc_rx_ticks          = bxe_hc_rx_ticks;
13531    sc->hc_tx_ticks          = bxe_hc_tx_ticks;
13532    sc->max_aggregation_size = bxe_max_aggregation_size;
13533    sc->mrrs                 = bxe_mrrs;
13534    sc->autogreeen           = bxe_autogreeen;
13535    sc->udp_rss              = bxe_udp_rss;
13536
13537    if (bxe_interrupt_mode == INTR_MODE_INTX) {
13538        sc->num_queues = 1;
13539    } else { /* INTR_MODE_MSI or INTR_MODE_MSIX */
13540        sc->num_queues =
13541            min((bxe_queue_count ? bxe_queue_count : mp_ncpus),
13542                MAX_RSS_CHAINS);
13543        if (sc->num_queues > mp_ncpus) {
13544            sc->num_queues = mp_ncpus;
13545        }
13546    }
13547
13548    BLOGD(sc, DBG_LOAD,
13549          "User Config: "
13550          "debug=0x%lx "
13551          "interrupt_mode=%d "
13552          "queue_count=%d "
13553          "hc_rx_ticks=%d "
13554          "hc_tx_ticks=%d "
13555          "rx_budget=%d "
13556          "max_aggregation_size=%d "
13557          "mrrs=%d "
13558          "autogreeen=%d "
13559          "udp_rss=%d\n",
13560          bxe_debug,
13561          sc->interrupt_mode,
13562          sc->num_queues,
13563          sc->hc_rx_ticks,
13564          sc->hc_tx_ticks,
13565          bxe_rx_budget,
13566          sc->max_aggregation_size,
13567          sc->mrrs,
13568          sc->autogreeen,
13569          sc->udp_rss);
13570}
13571
13572static int
13573bxe_media_detect(struct bxe_softc *sc)
13574{
13575    int port_type;
13576    uint32_t phy_idx = bxe_get_cur_phy_idx(sc);
13577
13578    switch (sc->link_params.phy[phy_idx].media_type) {
13579    case ELINK_ETH_PHY_SFPP_10G_FIBER:
13580    case ELINK_ETH_PHY_XFP_FIBER:
13581        BLOGI(sc, "Found 10Gb Fiber media.\n");
13582        sc->media = IFM_10G_SR;
13583        port_type = PORT_FIBRE;
13584        break;
13585    case ELINK_ETH_PHY_SFP_1G_FIBER:
13586        BLOGI(sc, "Found 1Gb Fiber media.\n");
13587        sc->media = IFM_1000_SX;
13588        port_type = PORT_FIBRE;
13589        break;
13590    case ELINK_ETH_PHY_KR:
13591    case ELINK_ETH_PHY_CX4:
13592        BLOGI(sc, "Found 10GBase-CX4 media.\n");
13593        sc->media = IFM_10G_CX4;
13594        port_type = PORT_FIBRE;
13595        break;
13596    case ELINK_ETH_PHY_DA_TWINAX:
13597        BLOGI(sc, "Found 10Gb Twinax media.\n");
13598        sc->media = IFM_10G_TWINAX;
13599        port_type = PORT_DA;
13600        break;
13601    case ELINK_ETH_PHY_BASE_T:
13602        if (sc->link_params.speed_cap_mask[0] &
13603            PORT_HW_CFG_SPEED_CAPABILITY_D0_10G) {
13604            BLOGI(sc, "Found 10GBase-T media.\n");
13605            sc->media = IFM_10G_T;
13606            port_type = PORT_TP;
13607        } else {
13608            BLOGI(sc, "Found 1000Base-T media.\n");
13609            sc->media = IFM_1000_T;
13610            port_type = PORT_TP;
13611        }
13612        break;
13613    case ELINK_ETH_PHY_NOT_PRESENT:
13614        BLOGI(sc, "Media not present.\n");
13615        sc->media = 0;
13616        port_type = PORT_OTHER;
13617        break;
13618    case ELINK_ETH_PHY_UNSPECIFIED:
13619    default:
13620        BLOGI(sc, "Unknown media!\n");
13621        sc->media = 0;
13622        port_type = PORT_OTHER;
13623        break;
13624    }
13625    return port_type;
13626}
13627
13628#define GET_FIELD(value, fname)                     \
13629    (((value) & (fname##_MASK)) >> (fname##_SHIFT))
13630#define IGU_FID(val) GET_FIELD((val), IGU_REG_MAPPING_MEMORY_FID)
13631#define IGU_VEC(val) GET_FIELD((val), IGU_REG_MAPPING_MEMORY_VECTOR)
13632
13633static int
13634bxe_get_igu_cam_info(struct bxe_softc *sc)
13635{
13636    int pfid = SC_FUNC(sc);
13637    int igu_sb_id;
13638    uint32_t val;
13639    uint8_t fid, igu_sb_cnt = 0;
13640
13641    sc->igu_base_sb = 0xff;
13642
13643    if (CHIP_INT_MODE_IS_BC(sc)) {
13644        int vn = SC_VN(sc);
13645        igu_sb_cnt = sc->igu_sb_cnt;
13646        sc->igu_base_sb = ((CHIP_IS_MODE_4_PORT(sc) ? pfid : vn) *
13647                           FP_SB_MAX_E1x);
13648        sc->igu_dsb_id = (E1HVN_MAX * FP_SB_MAX_E1x +
13649                          (CHIP_IS_MODE_4_PORT(sc) ? pfid : vn));
13650        return (0);
13651    }
13652
13653    /* IGU in normal mode - read CAM */
13654    for (igu_sb_id = 0;
13655         igu_sb_id < IGU_REG_MAPPING_MEMORY_SIZE;
13656         igu_sb_id++) {
13657        val = REG_RD(sc, IGU_REG_MAPPING_MEMORY + igu_sb_id * 4);
13658        if (!(val & IGU_REG_MAPPING_MEMORY_VALID)) {
13659            continue;
13660        }
13661        fid = IGU_FID(val);
13662        if ((fid & IGU_FID_ENCODE_IS_PF)) {
13663            if ((fid & IGU_FID_PF_NUM_MASK) != pfid) {
13664                continue;
13665            }
13666            if (IGU_VEC(val) == 0) {
13667                /* default status block */
13668                sc->igu_dsb_id = igu_sb_id;
13669            } else {
13670                if (sc->igu_base_sb == 0xff) {
13671                    sc->igu_base_sb = igu_sb_id;
13672                }
13673                igu_sb_cnt++;
13674            }
13675        }
13676    }
13677
13678    /*
13679     * Due to new PF resource allocation by MFW T7.4 and above, it's optional
13680     * that number of CAM entries will not be equal to the value advertised in
13681     * PCI. Driver should use the minimal value of both as the actual status
13682     * block count
13683     */
13684    sc->igu_sb_cnt = min(sc->igu_sb_cnt, igu_sb_cnt);
13685
13686    if (igu_sb_cnt == 0) {
13687        BLOGE(sc, "CAM configuration error\n");
13688        return (-1);
13689    }
13690
13691    return (0);
13692}
13693
13694/*
13695 * Gather various information from the device config space, the device itself,
13696 * shmem, and the user input.
13697 */
13698static int
13699bxe_get_device_info(struct bxe_softc *sc)
13700{
13701    uint32_t val;
13702    int rc;
13703
13704    /* Get the data for the device */
13705    sc->devinfo.vendor_id    = pci_get_vendor(sc->dev);
13706    sc->devinfo.device_id    = pci_get_device(sc->dev);
13707    sc->devinfo.subvendor_id = pci_get_subvendor(sc->dev);
13708    sc->devinfo.subdevice_id = pci_get_subdevice(sc->dev);
13709
13710    /* get the chip revision (chip metal comes from pci config space) */
13711    sc->devinfo.chip_id     =
13712    sc->link_params.chip_id =
13713        (((REG_RD(sc, MISC_REG_CHIP_NUM)                   & 0xffff) << 16) |
13714         ((REG_RD(sc, MISC_REG_CHIP_REV)                   & 0xf)    << 12) |
13715         (((REG_RD(sc, PCICFG_OFFSET + PCI_ID_VAL3) >> 24) & 0xf)    << 4)  |
13716         ((REG_RD(sc, MISC_REG_BOND_ID)                    & 0xf)    << 0));
13717
13718    /* force 57811 according to MISC register */
13719    if (REG_RD(sc, MISC_REG_CHIP_TYPE) & MISC_REG_CHIP_TYPE_57811_MASK) {
13720        if (CHIP_IS_57810(sc)) {
13721            sc->devinfo.chip_id = ((CHIP_NUM_57811 << 16) |
13722                                   (sc->devinfo.chip_id & 0x0000ffff));
13723        } else if (CHIP_IS_57810_MF(sc)) {
13724            sc->devinfo.chip_id = ((CHIP_NUM_57811_MF << 16) |
13725                                   (sc->devinfo.chip_id & 0x0000ffff));
13726        }
13727        sc->devinfo.chip_id |= 0x1;
13728    }
13729
13730    BLOGD(sc, DBG_LOAD,
13731          "chip_id=0x%08x (num=0x%04x rev=0x%01x metal=0x%02x bond=0x%01x)\n",
13732          sc->devinfo.chip_id,
13733          ((sc->devinfo.chip_id >> 16) & 0xffff),
13734          ((sc->devinfo.chip_id >> 12) & 0xf),
13735          ((sc->devinfo.chip_id >>  4) & 0xff),
13736          ((sc->devinfo.chip_id >>  0) & 0xf));
13737
13738    val = (REG_RD(sc, 0x2874) & 0x55);
13739    if ((sc->devinfo.chip_id & 0x1) ||
13740        (CHIP_IS_E1(sc) && val) ||
13741        (CHIP_IS_E1H(sc) && (val == 0x55))) {
13742        sc->flags |= BXE_ONE_PORT_FLAG;
13743        BLOGD(sc, DBG_LOAD, "single port device\n");
13744    }
13745
13746    /* set the doorbell size */
13747    sc->doorbell_size = (1 << BXE_DB_SHIFT);
13748
13749    /* determine whether the device is in 2 port or 4 port mode */
13750    sc->devinfo.chip_port_mode = CHIP_PORT_MODE_NONE; /* E1 & E1h*/
13751    if (CHIP_IS_E2E3(sc)) {
13752        /*
13753         * Read port4mode_en_ovwr[0]:
13754         *   If 1, four port mode is in port4mode_en_ovwr[1].
13755         *   If 0, four port mode is in port4mode_en[0].
13756         */
13757        val = REG_RD(sc, MISC_REG_PORT4MODE_EN_OVWR);
13758        if (val & 1) {
13759            val = ((val >> 1) & 1);
13760        } else {
13761            val = REG_RD(sc, MISC_REG_PORT4MODE_EN);
13762        }
13763
13764        sc->devinfo.chip_port_mode =
13765            (val) ? CHIP_4_PORT_MODE : CHIP_2_PORT_MODE;
13766
13767        BLOGD(sc, DBG_LOAD, "Port mode = %s\n", (val) ? "4" : "2");
13768    }
13769
13770    /* get the function and path info for the device */
13771    bxe_get_function_num(sc);
13772
13773    /* get the shared memory base address */
13774    sc->devinfo.shmem_base     =
13775    sc->link_params.shmem_base =
13776        REG_RD(sc, MISC_REG_SHARED_MEM_ADDR);
13777    sc->devinfo.shmem2_base =
13778        REG_RD(sc, (SC_PATH(sc) ? MISC_REG_GENERIC_CR_1 :
13779                                  MISC_REG_GENERIC_CR_0));
13780
13781    BLOGD(sc, DBG_LOAD, "shmem_base=0x%08x, shmem2_base=0x%08x\n",
13782          sc->devinfo.shmem_base, sc->devinfo.shmem2_base);
13783
13784    if (!sc->devinfo.shmem_base) {
13785        /* this should ONLY prevent upcoming shmem reads */
13786        BLOGI(sc, "MCP not active\n");
13787        sc->flags |= BXE_NO_MCP_FLAG;
13788        return (0);
13789    }
13790
13791    /* make sure the shared memory contents are valid */
13792    val = SHMEM_RD(sc, validity_map[SC_PORT(sc)]);
13793    if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) !=
13794        (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) {
13795        BLOGE(sc, "Invalid SHMEM validity signature: 0x%08x\n", val);
13796        return (0);
13797    }
13798    BLOGD(sc, DBG_LOAD, "Valid SHMEM validity signature: 0x%08x\n", val);
13799
13800    /* get the bootcode version */
13801    sc->devinfo.bc_ver = SHMEM_RD(sc, dev_info.bc_rev);
13802    snprintf(sc->devinfo.bc_ver_str,
13803             sizeof(sc->devinfo.bc_ver_str),
13804             "%d.%d.%d",
13805             ((sc->devinfo.bc_ver >> 24) & 0xff),
13806             ((sc->devinfo.bc_ver >> 16) & 0xff),
13807             ((sc->devinfo.bc_ver >>  8) & 0xff));
13808    BLOGD(sc, DBG_LOAD, "Bootcode version: %s\n", sc->devinfo.bc_ver_str);
13809
13810    /* get the bootcode shmem address */
13811    sc->devinfo.mf_cfg_base = bxe_get_shmem_mf_cfg_base(sc);
13812    BLOGD(sc, DBG_LOAD, "mf_cfg_base=0x08%x \n", sc->devinfo.mf_cfg_base);
13813
13814    /* clean indirect addresses as they're not used */
13815    pci_write_config(sc->dev, PCICFG_GRC_ADDRESS, 0, 4);
13816    if (IS_PF(sc)) {
13817        REG_WR(sc, PXP2_REG_PGL_ADDR_88_F0, 0);
13818        REG_WR(sc, PXP2_REG_PGL_ADDR_8C_F0, 0);
13819        REG_WR(sc, PXP2_REG_PGL_ADDR_90_F0, 0);
13820        REG_WR(sc, PXP2_REG_PGL_ADDR_94_F0, 0);
13821        if (CHIP_IS_E1x(sc)) {
13822            REG_WR(sc, PXP2_REG_PGL_ADDR_88_F1, 0);
13823            REG_WR(sc, PXP2_REG_PGL_ADDR_8C_F1, 0);
13824            REG_WR(sc, PXP2_REG_PGL_ADDR_90_F1, 0);
13825            REG_WR(sc, PXP2_REG_PGL_ADDR_94_F1, 0);
13826        }
13827
13828        /*
13829         * Enable internal target-read (in case we are probed after PF
13830         * FLR). Must be done prior to any BAR read access. Only for
13831         * 57712 and up
13832         */
13833        if (!CHIP_IS_E1x(sc)) {
13834            REG_WR(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ, 1);
13835        }
13836    }
13837
13838    /* get the nvram size */
13839    val = REG_RD(sc, MCP_REG_MCPR_NVM_CFG4);
13840    sc->devinfo.flash_size =
13841        (NVRAM_1MB_SIZE << (val & MCPR_NVM_CFG4_FLASH_SIZE));
13842    BLOGD(sc, DBG_LOAD, "nvram flash size: %d\n", sc->devinfo.flash_size);
13843
13844    /* get PCI capabilites */
13845    bxe_probe_pci_caps(sc);
13846
13847    bxe_set_power_state(sc, PCI_PM_D0);
13848
13849    /* get various configuration parameters from shmem */
13850    bxe_get_shmem_info(sc);
13851
13852    if (sc->devinfo.pcie_msix_cap_reg != 0) {
13853        val = pci_read_config(sc->dev,
13854                              (sc->devinfo.pcie_msix_cap_reg +
13855                               PCIR_MSIX_CTRL),
13856                              2);
13857        sc->igu_sb_cnt = (val & PCIM_MSIXCTRL_TABLE_SIZE);
13858    } else {
13859        sc->igu_sb_cnt = 1;
13860    }
13861
13862    sc->igu_base_addr = BAR_IGU_INTMEM;
13863
13864    /* initialize IGU parameters */
13865    if (CHIP_IS_E1x(sc)) {
13866        sc->devinfo.int_block = INT_BLOCK_HC;
13867        sc->igu_dsb_id = DEF_SB_IGU_ID;
13868        sc->igu_base_sb = 0;
13869    } else {
13870        sc->devinfo.int_block = INT_BLOCK_IGU;
13871
13872        /* do not allow device reset during IGU info preocessing */
13873        bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RESET);
13874
13875        val = REG_RD(sc, IGU_REG_BLOCK_CONFIGURATION);
13876
13877        if (val & IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN) {
13878            int tout = 5000;
13879
13880            BLOGD(sc, DBG_LOAD, "FORCING IGU Normal Mode\n");
13881
13882            val &= ~(IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN);
13883            REG_WR(sc, IGU_REG_BLOCK_CONFIGURATION, val);
13884            REG_WR(sc, IGU_REG_RESET_MEMORIES, 0x7f);
13885
13886            while (tout && REG_RD(sc, IGU_REG_RESET_MEMORIES)) {
13887                tout--;
13888                DELAY(1000);
13889            }
13890
13891            if (REG_RD(sc, IGU_REG_RESET_MEMORIES)) {
13892                BLOGD(sc, DBG_LOAD, "FORCING IGU Normal Mode failed!!!\n");
13893                bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RESET);
13894                return (-1);
13895            }
13896        }
13897
13898        if (val & IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN) {
13899            BLOGD(sc, DBG_LOAD, "IGU Backward Compatible Mode\n");
13900            sc->devinfo.int_block |= INT_BLOCK_MODE_BW_COMP;
13901        } else {
13902            BLOGD(sc, DBG_LOAD, "IGU Normal Mode\n");
13903        }
13904
13905        rc = bxe_get_igu_cam_info(sc);
13906
13907        bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RESET);
13908
13909        if (rc) {
13910            return (rc);
13911        }
13912    }
13913
13914    /*
13915     * Get base FW non-default (fast path) status block ID. This value is
13916     * used to initialize the fw_sb_id saved on the fp/queue structure to
13917     * determine the id used by the FW.
13918     */
13919    if (CHIP_IS_E1x(sc)) {
13920        sc->base_fw_ndsb = ((SC_PORT(sc) * FP_SB_MAX_E1x) + SC_L_ID(sc));
13921    } else {
13922        /*
13923         * 57712+ - We currently use one FW SB per IGU SB (Rx and Tx of
13924         * the same queue are indicated on the same IGU SB). So we prefer
13925         * FW and IGU SBs to be the same value.
13926         */
13927        sc->base_fw_ndsb = sc->igu_base_sb;
13928    }
13929
13930    BLOGD(sc, DBG_LOAD,
13931          "igu_dsb_id=%d igu_base_sb=%d igu_sb_cnt=%d base_fw_ndsb=%d\n",
13932          sc->igu_dsb_id, sc->igu_base_sb,
13933          sc->igu_sb_cnt, sc->base_fw_ndsb);
13934
13935    elink_phy_probe(&sc->link_params);
13936
13937    return (0);
13938}
13939
13940static void
13941bxe_link_settings_supported(struct bxe_softc *sc,
13942                            uint32_t         switch_cfg)
13943{
13944    uint32_t cfg_size = 0;
13945    uint32_t idx;
13946    uint8_t port = SC_PORT(sc);
13947
13948    /* aggregation of supported attributes of all external phys */
13949    sc->port.supported[0] = 0;
13950    sc->port.supported[1] = 0;
13951
13952    switch (sc->link_params.num_phys) {
13953    case 1:
13954        sc->port.supported[0] = sc->link_params.phy[ELINK_INT_PHY].supported;
13955        cfg_size = 1;
13956        break;
13957    case 2:
13958        sc->port.supported[0] = sc->link_params.phy[ELINK_EXT_PHY1].supported;
13959        cfg_size = 1;
13960        break;
13961    case 3:
13962        if (sc->link_params.multi_phy_config &
13963            PORT_HW_CFG_PHY_SWAPPED_ENABLED) {
13964            sc->port.supported[1] =
13965                sc->link_params.phy[ELINK_EXT_PHY1].supported;
13966            sc->port.supported[0] =
13967                sc->link_params.phy[ELINK_EXT_PHY2].supported;
13968        } else {
13969            sc->port.supported[0] =
13970                sc->link_params.phy[ELINK_EXT_PHY1].supported;
13971            sc->port.supported[1] =
13972                sc->link_params.phy[ELINK_EXT_PHY2].supported;
13973        }
13974        cfg_size = 2;
13975        break;
13976    }
13977
13978    if (!(sc->port.supported[0] || sc->port.supported[1])) {
13979        BLOGE(sc, "Invalid phy config in NVRAM (PHY1=0x%08x PHY2=0x%08x)\n",
13980              SHMEM_RD(sc,
13981                       dev_info.port_hw_config[port].external_phy_config),
13982              SHMEM_RD(sc,
13983                       dev_info.port_hw_config[port].external_phy_config2));
13984        return;
13985    }
13986
13987    if (CHIP_IS_E3(sc))
13988        sc->port.phy_addr = REG_RD(sc, MISC_REG_WC0_CTRL_PHY_ADDR);
13989    else {
13990        switch (switch_cfg) {
13991        case ELINK_SWITCH_CFG_1G:
13992            sc->port.phy_addr =
13993                REG_RD(sc, NIG_REG_SERDES0_CTRL_PHY_ADDR + port*0x10);
13994            break;
13995        case ELINK_SWITCH_CFG_10G:
13996            sc->port.phy_addr =
13997                REG_RD(sc, NIG_REG_XGXS0_CTRL_PHY_ADDR + port*0x18);
13998            break;
13999        default:
14000            BLOGE(sc, "Invalid switch config in link_config=0x%08x\n",
14001                  sc->port.link_config[0]);
14002            return;
14003        }
14004    }
14005
14006    BLOGD(sc, DBG_LOAD, "PHY addr 0x%08x\n", sc->port.phy_addr);
14007
14008    /* mask what we support according to speed_cap_mask per configuration */
14009    for (idx = 0; idx < cfg_size; idx++) {
14010        if (!(sc->link_params.speed_cap_mask[idx] &
14011              PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF)) {
14012            sc->port.supported[idx] &= ~ELINK_SUPPORTED_10baseT_Half;
14013        }
14014
14015        if (!(sc->link_params.speed_cap_mask[idx] &
14016              PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL)) {
14017            sc->port.supported[idx] &= ~ELINK_SUPPORTED_10baseT_Full;
14018        }
14019
14020        if (!(sc->link_params.speed_cap_mask[idx] &
14021              PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF)) {
14022            sc->port.supported[idx] &= ~ELINK_SUPPORTED_100baseT_Half;
14023        }
14024
14025        if (!(sc->link_params.speed_cap_mask[idx] &
14026              PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL)) {
14027            sc->port.supported[idx] &= ~ELINK_SUPPORTED_100baseT_Full;
14028        }
14029
14030        if (!(sc->link_params.speed_cap_mask[idx] &
14031              PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)) {
14032            sc->port.supported[idx] &= ~ELINK_SUPPORTED_1000baseT_Full;
14033        }
14034
14035        if (!(sc->link_params.speed_cap_mask[idx] &
14036              PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G)) {
14037            sc->port.supported[idx] &= ~ELINK_SUPPORTED_2500baseX_Full;
14038        }
14039
14040        if (!(sc->link_params.speed_cap_mask[idx] &
14041              PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)) {
14042            sc->port.supported[idx] &= ~ELINK_SUPPORTED_10000baseT_Full;
14043        }
14044
14045        if (!(sc->link_params.speed_cap_mask[idx] &
14046              PORT_HW_CFG_SPEED_CAPABILITY_D0_20G)) {
14047            sc->port.supported[idx] &= ~ELINK_SUPPORTED_20000baseKR2_Full;
14048        }
14049    }
14050
14051    BLOGD(sc, DBG_LOAD, "PHY supported 0=0x%08x 1=0x%08x\n",
14052          sc->port.supported[0], sc->port.supported[1]);
14053	ELINK_DEBUG_P2(sc, "PHY supported 0=0x%08x 1=0x%08x\n",
14054					sc->port.supported[0], sc->port.supported[1]);
14055}
14056
14057static void
14058bxe_link_settings_requested(struct bxe_softc *sc)
14059{
14060    uint32_t link_config;
14061    uint32_t idx;
14062    uint32_t cfg_size = 0;
14063
14064    sc->port.advertising[0] = 0;
14065    sc->port.advertising[1] = 0;
14066
14067    switch (sc->link_params.num_phys) {
14068    case 1:
14069    case 2:
14070        cfg_size = 1;
14071        break;
14072    case 3:
14073        cfg_size = 2;
14074        break;
14075    }
14076
14077    for (idx = 0; idx < cfg_size; idx++) {
14078        sc->link_params.req_duplex[idx] = DUPLEX_FULL;
14079        link_config = sc->port.link_config[idx];
14080
14081        switch (link_config & PORT_FEATURE_LINK_SPEED_MASK) {
14082        case PORT_FEATURE_LINK_SPEED_AUTO:
14083            if (sc->port.supported[idx] & ELINK_SUPPORTED_Autoneg) {
14084                sc->link_params.req_line_speed[idx] = ELINK_SPEED_AUTO_NEG;
14085                sc->port.advertising[idx] |= sc->port.supported[idx];
14086                if (sc->link_params.phy[ELINK_EXT_PHY1].type ==
14087                    PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833)
14088                    sc->port.advertising[idx] |=
14089                        (ELINK_SUPPORTED_100baseT_Half |
14090                         ELINK_SUPPORTED_100baseT_Full);
14091            } else {
14092                /* force 10G, no AN */
14093                sc->link_params.req_line_speed[idx] = ELINK_SPEED_10000;
14094                sc->port.advertising[idx] |=
14095                    (ADVERTISED_10000baseT_Full | ADVERTISED_FIBRE);
14096                continue;
14097            }
14098            break;
14099
14100        case PORT_FEATURE_LINK_SPEED_10M_FULL:
14101            if (sc->port.supported[idx] & ELINK_SUPPORTED_10baseT_Full) {
14102                sc->link_params.req_line_speed[idx] = ELINK_SPEED_10;
14103                sc->port.advertising[idx] |= (ADVERTISED_10baseT_Full |
14104                                              ADVERTISED_TP);
14105            } else {
14106                BLOGE(sc, "Invalid NVRAM config link_config=0x%08x "
14107                          "speed_cap_mask=0x%08x\n",
14108                      link_config, sc->link_params.speed_cap_mask[idx]);
14109                return;
14110            }
14111            break;
14112
14113        case PORT_FEATURE_LINK_SPEED_10M_HALF:
14114            if (sc->port.supported[idx] & ELINK_SUPPORTED_10baseT_Half) {
14115                sc->link_params.req_line_speed[idx] = ELINK_SPEED_10;
14116                sc->link_params.req_duplex[idx] = DUPLEX_HALF;
14117                sc->port.advertising[idx] |= (ADVERTISED_10baseT_Half |
14118                                              ADVERTISED_TP);
14119				ELINK_DEBUG_P1(sc, "driver requesting DUPLEX_HALF req_duplex = %x!\n",
14120								sc->link_params.req_duplex[idx]);
14121            } else {
14122                BLOGE(sc, "Invalid NVRAM config link_config=0x%08x "
14123                          "speed_cap_mask=0x%08x\n",
14124                      link_config, sc->link_params.speed_cap_mask[idx]);
14125                return;
14126            }
14127            break;
14128
14129        case PORT_FEATURE_LINK_SPEED_100M_FULL:
14130            if (sc->port.supported[idx] & ELINK_SUPPORTED_100baseT_Full) {
14131                sc->link_params.req_line_speed[idx] = ELINK_SPEED_100;
14132                sc->port.advertising[idx] |= (ADVERTISED_100baseT_Full |
14133                                              ADVERTISED_TP);
14134            } else {
14135                BLOGE(sc, "Invalid NVRAM config link_config=0x%08x "
14136                          "speed_cap_mask=0x%08x\n",
14137                      link_config, sc->link_params.speed_cap_mask[idx]);
14138                return;
14139            }
14140            break;
14141
14142        case PORT_FEATURE_LINK_SPEED_100M_HALF:
14143            if (sc->port.supported[idx] & ELINK_SUPPORTED_100baseT_Half) {
14144                sc->link_params.req_line_speed[idx] = ELINK_SPEED_100;
14145                sc->link_params.req_duplex[idx] = DUPLEX_HALF;
14146                sc->port.advertising[idx] |= (ADVERTISED_100baseT_Half |
14147                                              ADVERTISED_TP);
14148            } else {
14149                BLOGE(sc, "Invalid NVRAM config link_config=0x%08x "
14150                          "speed_cap_mask=0x%08x\n",
14151                      link_config, sc->link_params.speed_cap_mask[idx]);
14152                return;
14153            }
14154            break;
14155
14156        case PORT_FEATURE_LINK_SPEED_1G:
14157            if (sc->port.supported[idx] & ELINK_SUPPORTED_1000baseT_Full) {
14158                sc->link_params.req_line_speed[idx] = ELINK_SPEED_1000;
14159                sc->port.advertising[idx] |= (ADVERTISED_1000baseT_Full |
14160                                              ADVERTISED_TP);
14161            } else {
14162                BLOGE(sc, "Invalid NVRAM config link_config=0x%08x "
14163                          "speed_cap_mask=0x%08x\n",
14164                      link_config, sc->link_params.speed_cap_mask[idx]);
14165                return;
14166            }
14167            break;
14168
14169        case PORT_FEATURE_LINK_SPEED_2_5G:
14170            if (sc->port.supported[idx] & ELINK_SUPPORTED_2500baseX_Full) {
14171                sc->link_params.req_line_speed[idx] = ELINK_SPEED_2500;
14172                sc->port.advertising[idx] |= (ADVERTISED_2500baseX_Full |
14173                                              ADVERTISED_TP);
14174            } else {
14175                BLOGE(sc, "Invalid NVRAM config link_config=0x%08x "
14176                          "speed_cap_mask=0x%08x\n",
14177                      link_config, sc->link_params.speed_cap_mask[idx]);
14178                return;
14179            }
14180            break;
14181
14182        case PORT_FEATURE_LINK_SPEED_10G_CX4:
14183            if (sc->port.supported[idx] & ELINK_SUPPORTED_10000baseT_Full) {
14184                sc->link_params.req_line_speed[idx] = ELINK_SPEED_10000;
14185                sc->port.advertising[idx] |= (ADVERTISED_10000baseT_Full |
14186                                              ADVERTISED_FIBRE);
14187            } else {
14188                BLOGE(sc, "Invalid NVRAM config link_config=0x%08x "
14189                          "speed_cap_mask=0x%08x\n",
14190                      link_config, sc->link_params.speed_cap_mask[idx]);
14191                return;
14192            }
14193            break;
14194
14195        case PORT_FEATURE_LINK_SPEED_20G:
14196            sc->link_params.req_line_speed[idx] = ELINK_SPEED_20000;
14197            break;
14198
14199        default:
14200            BLOGE(sc, "Invalid NVRAM config link_config=0x%08x "
14201                      "speed_cap_mask=0x%08x\n",
14202                  link_config, sc->link_params.speed_cap_mask[idx]);
14203            sc->link_params.req_line_speed[idx] = ELINK_SPEED_AUTO_NEG;
14204            sc->port.advertising[idx] = sc->port.supported[idx];
14205            break;
14206        }
14207
14208        sc->link_params.req_flow_ctrl[idx] =
14209            (link_config & PORT_FEATURE_FLOW_CONTROL_MASK);
14210
14211        if (sc->link_params.req_flow_ctrl[idx] == ELINK_FLOW_CTRL_AUTO) {
14212            if (!(sc->port.supported[idx] & ELINK_SUPPORTED_Autoneg)) {
14213                sc->link_params.req_flow_ctrl[idx] = ELINK_FLOW_CTRL_NONE;
14214            } else {
14215                bxe_set_requested_fc(sc);
14216            }
14217        }
14218
14219        BLOGD(sc, DBG_LOAD, "req_line_speed=%d req_duplex=%d "
14220                            "req_flow_ctrl=0x%x advertising=0x%x\n",
14221              sc->link_params.req_line_speed[idx],
14222              sc->link_params.req_duplex[idx],
14223              sc->link_params.req_flow_ctrl[idx],
14224              sc->port.advertising[idx]);
14225		ELINK_DEBUG_P3(sc, "req_line_speed=%d req_duplex=%d "
14226						"advertising=0x%x\n",
14227						sc->link_params.req_line_speed[idx],
14228						sc->link_params.req_duplex[idx],
14229						sc->port.advertising[idx]);
14230    }
14231}
14232
14233static void
14234bxe_get_phy_info(struct bxe_softc *sc)
14235{
14236    uint8_t port = SC_PORT(sc);
14237    uint32_t config = sc->port.config;
14238    uint32_t eee_mode;
14239
14240    /* shmem data already read in bxe_get_shmem_info() */
14241
14242    ELINK_DEBUG_P3(sc, "lane_config=0x%08x speed_cap_mask0=0x%08x "
14243                        "link_config0=0x%08x\n",
14244               sc->link_params.lane_config,
14245               sc->link_params.speed_cap_mask[0],
14246               sc->port.link_config[0]);
14247
14248
14249    bxe_link_settings_supported(sc, sc->link_params.switch_cfg);
14250    bxe_link_settings_requested(sc);
14251
14252    if (sc->autogreeen == AUTO_GREEN_FORCE_ON) {
14253        sc->link_params.feature_config_flags |=
14254            ELINK_FEATURE_CONFIG_AUTOGREEEN_ENABLED;
14255    } else if (sc->autogreeen == AUTO_GREEN_FORCE_OFF) {
14256        sc->link_params.feature_config_flags &=
14257            ~ELINK_FEATURE_CONFIG_AUTOGREEEN_ENABLED;
14258    } else if (config & PORT_FEAT_CFG_AUTOGREEEN_ENABLED) {
14259        sc->link_params.feature_config_flags |=
14260            ELINK_FEATURE_CONFIG_AUTOGREEEN_ENABLED;
14261    }
14262
14263    /* configure link feature according to nvram value */
14264    eee_mode =
14265        (((SHMEM_RD(sc, dev_info.port_feature_config[port].eee_power_mode)) &
14266          PORT_FEAT_CFG_EEE_POWER_MODE_MASK) >>
14267         PORT_FEAT_CFG_EEE_POWER_MODE_SHIFT);
14268    if (eee_mode != PORT_FEAT_CFG_EEE_POWER_MODE_DISABLED) {
14269        sc->link_params.eee_mode = (ELINK_EEE_MODE_ADV_LPI |
14270                                    ELINK_EEE_MODE_ENABLE_LPI |
14271                                    ELINK_EEE_MODE_OUTPUT_TIME);
14272    } else {
14273        sc->link_params.eee_mode = 0;
14274    }
14275
14276    /* get the media type */
14277    bxe_media_detect(sc);
14278	ELINK_DEBUG_P1(sc, "detected media type\n", sc->media);
14279}
14280
14281static void
14282bxe_get_params(struct bxe_softc *sc)
14283{
14284    /* get user tunable params */
14285    bxe_get_tunable_params(sc);
14286
14287    /* select the RX and TX ring sizes */
14288    sc->tx_ring_size = TX_BD_USABLE;
14289    sc->rx_ring_size = RX_BD_USABLE;
14290
14291    /* XXX disable WoL */
14292    sc->wol = 0;
14293}
14294
14295static void
14296bxe_set_modes_bitmap(struct bxe_softc *sc)
14297{
14298    uint32_t flags = 0;
14299
14300    if (CHIP_REV_IS_FPGA(sc)) {
14301        SET_FLAGS(flags, MODE_FPGA);
14302    } else if (CHIP_REV_IS_EMUL(sc)) {
14303        SET_FLAGS(flags, MODE_EMUL);
14304    } else {
14305        SET_FLAGS(flags, MODE_ASIC);
14306    }
14307
14308    if (CHIP_IS_MODE_4_PORT(sc)) {
14309        SET_FLAGS(flags, MODE_PORT4);
14310    } else {
14311        SET_FLAGS(flags, MODE_PORT2);
14312    }
14313
14314    if (CHIP_IS_E2(sc)) {
14315        SET_FLAGS(flags, MODE_E2);
14316    } else if (CHIP_IS_E3(sc)) {
14317        SET_FLAGS(flags, MODE_E3);
14318        if (CHIP_REV(sc) == CHIP_REV_Ax) {
14319            SET_FLAGS(flags, MODE_E3_A0);
14320        } else /*if (CHIP_REV(sc) == CHIP_REV_Bx)*/ {
14321            SET_FLAGS(flags, MODE_E3_B0 | MODE_COS3);
14322        }
14323    }
14324
14325    if (IS_MF(sc)) {
14326        SET_FLAGS(flags, MODE_MF);
14327        switch (sc->devinfo.mf_info.mf_mode) {
14328        case MULTI_FUNCTION_SD:
14329            SET_FLAGS(flags, MODE_MF_SD);
14330            break;
14331        case MULTI_FUNCTION_SI:
14332            SET_FLAGS(flags, MODE_MF_SI);
14333            break;
14334        case MULTI_FUNCTION_AFEX:
14335            SET_FLAGS(flags, MODE_MF_AFEX);
14336            break;
14337        }
14338    } else {
14339        SET_FLAGS(flags, MODE_SF);
14340    }
14341
14342#if defined(__LITTLE_ENDIAN)
14343    SET_FLAGS(flags, MODE_LITTLE_ENDIAN);
14344#else /* __BIG_ENDIAN */
14345    SET_FLAGS(flags, MODE_BIG_ENDIAN);
14346#endif
14347
14348    INIT_MODE_FLAGS(sc) = flags;
14349}
14350
14351static int
14352bxe_alloc_hsi_mem(struct bxe_softc *sc)
14353{
14354    struct bxe_fastpath *fp;
14355    bus_addr_t busaddr;
14356    int max_agg_queues;
14357    int max_segments;
14358    bus_size_t max_size;
14359    bus_size_t max_seg_size;
14360    char buf[32];
14361    int rc;
14362    int i, j;
14363
14364    /* XXX zero out all vars here and call bxe_alloc_hsi_mem on error */
14365
14366    /* allocate the parent bus DMA tag */
14367    rc = bus_dma_tag_create(bus_get_dma_tag(sc->dev), /* parent tag */
14368                            1,                        /* alignment */
14369                            0,                        /* boundary limit */
14370                            BUS_SPACE_MAXADDR,        /* restricted low */
14371                            BUS_SPACE_MAXADDR,        /* restricted hi */
14372                            NULL,                     /* addr filter() */
14373                            NULL,                     /* addr filter() arg */
14374                            BUS_SPACE_MAXSIZE_32BIT,  /* max map size */
14375                            BUS_SPACE_UNRESTRICTED,   /* num discontinuous */
14376                            BUS_SPACE_MAXSIZE_32BIT,  /* max seg size */
14377                            0,                        /* flags */
14378                            NULL,                     /* lock() */
14379                            NULL,                     /* lock() arg */
14380                            &sc->parent_dma_tag);     /* returned dma tag */
14381    if (rc != 0) {
14382        BLOGE(sc, "Failed to alloc parent DMA tag (%d)!\n", rc);
14383        return (1);
14384    }
14385
14386    /************************/
14387    /* DEFAULT STATUS BLOCK */
14388    /************************/
14389
14390    if (bxe_dma_alloc(sc, sizeof(struct host_sp_status_block),
14391                      &sc->def_sb_dma, "default status block") != 0) {
14392        /* XXX */
14393        bus_dma_tag_destroy(sc->parent_dma_tag);
14394        return (1);
14395    }
14396
14397    sc->def_sb = (struct host_sp_status_block *)sc->def_sb_dma.vaddr;
14398
14399    /***************/
14400    /* EVENT QUEUE */
14401    /***************/
14402
14403    if (bxe_dma_alloc(sc, BCM_PAGE_SIZE,
14404                      &sc->eq_dma, "event queue") != 0) {
14405        /* XXX */
14406        bxe_dma_free(sc, &sc->def_sb_dma);
14407        sc->def_sb = NULL;
14408        bus_dma_tag_destroy(sc->parent_dma_tag);
14409        return (1);
14410    }
14411
14412    sc->eq = (union event_ring_elem * )sc->eq_dma.vaddr;
14413
14414    /*************/
14415    /* SLOW PATH */
14416    /*************/
14417
14418    if (bxe_dma_alloc(sc, sizeof(struct bxe_slowpath),
14419                      &sc->sp_dma, "slow path") != 0) {
14420        /* XXX */
14421        bxe_dma_free(sc, &sc->eq_dma);
14422        sc->eq = NULL;
14423        bxe_dma_free(sc, &sc->def_sb_dma);
14424        sc->def_sb = NULL;
14425        bus_dma_tag_destroy(sc->parent_dma_tag);
14426        return (1);
14427    }
14428
14429    sc->sp = (struct bxe_slowpath *)sc->sp_dma.vaddr;
14430
14431    /*******************/
14432    /* SLOW PATH QUEUE */
14433    /*******************/
14434
14435    if (bxe_dma_alloc(sc, BCM_PAGE_SIZE,
14436                      &sc->spq_dma, "slow path queue") != 0) {
14437        /* XXX */
14438        bxe_dma_free(sc, &sc->sp_dma);
14439        sc->sp = NULL;
14440        bxe_dma_free(sc, &sc->eq_dma);
14441        sc->eq = NULL;
14442        bxe_dma_free(sc, &sc->def_sb_dma);
14443        sc->def_sb = NULL;
14444        bus_dma_tag_destroy(sc->parent_dma_tag);
14445        return (1);
14446    }
14447
14448    sc->spq = (struct eth_spe *)sc->spq_dma.vaddr;
14449
14450    /***************************/
14451    /* FW DECOMPRESSION BUFFER */
14452    /***************************/
14453
14454    if (bxe_dma_alloc(sc, FW_BUF_SIZE, &sc->gz_buf_dma,
14455                      "fw decompression buffer") != 0) {
14456        /* XXX */
14457        bxe_dma_free(sc, &sc->spq_dma);
14458        sc->spq = NULL;
14459        bxe_dma_free(sc, &sc->sp_dma);
14460        sc->sp = NULL;
14461        bxe_dma_free(sc, &sc->eq_dma);
14462        sc->eq = NULL;
14463        bxe_dma_free(sc, &sc->def_sb_dma);
14464        sc->def_sb = NULL;
14465        bus_dma_tag_destroy(sc->parent_dma_tag);
14466        return (1);
14467    }
14468
14469    sc->gz_buf = (void *)sc->gz_buf_dma.vaddr;
14470
14471    if ((sc->gz_strm =
14472         malloc(sizeof(*sc->gz_strm), M_DEVBUF, M_NOWAIT)) == NULL) {
14473        /* XXX */
14474        bxe_dma_free(sc, &sc->gz_buf_dma);
14475        sc->gz_buf = NULL;
14476        bxe_dma_free(sc, &sc->spq_dma);
14477        sc->spq = NULL;
14478        bxe_dma_free(sc, &sc->sp_dma);
14479        sc->sp = NULL;
14480        bxe_dma_free(sc, &sc->eq_dma);
14481        sc->eq = NULL;
14482        bxe_dma_free(sc, &sc->def_sb_dma);
14483        sc->def_sb = NULL;
14484        bus_dma_tag_destroy(sc->parent_dma_tag);
14485        return (1);
14486    }
14487
14488    /*************/
14489    /* FASTPATHS */
14490    /*************/
14491
14492    /* allocate DMA memory for each fastpath structure */
14493    for (i = 0; i < sc->num_queues; i++) {
14494        fp = &sc->fp[i];
14495        fp->sc    = sc;
14496        fp->index = i;
14497
14498        /*******************/
14499        /* FP STATUS BLOCK */
14500        /*******************/
14501
14502        snprintf(buf, sizeof(buf), "fp %d status block", i);
14503        if (bxe_dma_alloc(sc, sizeof(union bxe_host_hc_status_block),
14504                          &fp->sb_dma, buf) != 0) {
14505            /* XXX unwind and free previous fastpath allocations */
14506            BLOGE(sc, "Failed to alloc %s\n", buf);
14507            return (1);
14508        } else {
14509            if (CHIP_IS_E2E3(sc)) {
14510                fp->status_block.e2_sb =
14511                    (struct host_hc_status_block_e2 *)fp->sb_dma.vaddr;
14512            } else {
14513                fp->status_block.e1x_sb =
14514                    (struct host_hc_status_block_e1x *)fp->sb_dma.vaddr;
14515            }
14516        }
14517
14518        /******************/
14519        /* FP TX BD CHAIN */
14520        /******************/
14521
14522        snprintf(buf, sizeof(buf), "fp %d tx bd chain", i);
14523        if (bxe_dma_alloc(sc, (BCM_PAGE_SIZE * TX_BD_NUM_PAGES),
14524                          &fp->tx_dma, buf) != 0) {
14525            /* XXX unwind and free previous fastpath allocations */
14526            BLOGE(sc, "Failed to alloc %s\n", buf);
14527            return (1);
14528        } else {
14529            fp->tx_chain = (union eth_tx_bd_types *)fp->tx_dma.vaddr;
14530        }
14531
14532        /* link together the tx bd chain pages */
14533        for (j = 1; j <= TX_BD_NUM_PAGES; j++) {
14534            /* index into the tx bd chain array to last entry per page */
14535            struct eth_tx_next_bd *tx_next_bd =
14536                &fp->tx_chain[TX_BD_TOTAL_PER_PAGE * j - 1].next_bd;
14537            /* point to the next page and wrap from last page */
14538            busaddr = (fp->tx_dma.paddr +
14539                       (BCM_PAGE_SIZE * (j % TX_BD_NUM_PAGES)));
14540            tx_next_bd->addr_hi = htole32(U64_HI(busaddr));
14541            tx_next_bd->addr_lo = htole32(U64_LO(busaddr));
14542        }
14543
14544        /******************/
14545        /* FP RX BD CHAIN */
14546        /******************/
14547
14548        snprintf(buf, sizeof(buf), "fp %d rx bd chain", i);
14549        if (bxe_dma_alloc(sc, (BCM_PAGE_SIZE * RX_BD_NUM_PAGES),
14550                          &fp->rx_dma, buf) != 0) {
14551            /* XXX unwind and free previous fastpath allocations */
14552            BLOGE(sc, "Failed to alloc %s\n", buf);
14553            return (1);
14554        } else {
14555            fp->rx_chain = (struct eth_rx_bd *)fp->rx_dma.vaddr;
14556        }
14557
14558        /* link together the rx bd chain pages */
14559        for (j = 1; j <= RX_BD_NUM_PAGES; j++) {
14560            /* index into the rx bd chain array to last entry per page */
14561            struct eth_rx_bd *rx_bd =
14562                &fp->rx_chain[RX_BD_TOTAL_PER_PAGE * j - 2];
14563            /* point to the next page and wrap from last page */
14564            busaddr = (fp->rx_dma.paddr +
14565                       (BCM_PAGE_SIZE * (j % RX_BD_NUM_PAGES)));
14566            rx_bd->addr_hi = htole32(U64_HI(busaddr));
14567            rx_bd->addr_lo = htole32(U64_LO(busaddr));
14568        }
14569
14570        /*******************/
14571        /* FP RX RCQ CHAIN */
14572        /*******************/
14573
14574        snprintf(buf, sizeof(buf), "fp %d rcq chain", i);
14575        if (bxe_dma_alloc(sc, (BCM_PAGE_SIZE * RCQ_NUM_PAGES),
14576                          &fp->rcq_dma, buf) != 0) {
14577            /* XXX unwind and free previous fastpath allocations */
14578            BLOGE(sc, "Failed to alloc %s\n", buf);
14579            return (1);
14580        } else {
14581            fp->rcq_chain = (union eth_rx_cqe *)fp->rcq_dma.vaddr;
14582        }
14583
14584        /* link together the rcq chain pages */
14585        for (j = 1; j <= RCQ_NUM_PAGES; j++) {
14586            /* index into the rcq chain array to last entry per page */
14587            struct eth_rx_cqe_next_page *rx_cqe_next =
14588                (struct eth_rx_cqe_next_page *)
14589                &fp->rcq_chain[RCQ_TOTAL_PER_PAGE * j - 1];
14590            /* point to the next page and wrap from last page */
14591            busaddr = (fp->rcq_dma.paddr +
14592                       (BCM_PAGE_SIZE * (j % RCQ_NUM_PAGES)));
14593            rx_cqe_next->addr_hi = htole32(U64_HI(busaddr));
14594            rx_cqe_next->addr_lo = htole32(U64_LO(busaddr));
14595        }
14596
14597        /*******************/
14598        /* FP RX SGE CHAIN */
14599        /*******************/
14600
14601        snprintf(buf, sizeof(buf), "fp %d sge chain", i);
14602        if (bxe_dma_alloc(sc, (BCM_PAGE_SIZE * RX_SGE_NUM_PAGES),
14603                          &fp->rx_sge_dma, buf) != 0) {
14604            /* XXX unwind and free previous fastpath allocations */
14605            BLOGE(sc, "Failed to alloc %s\n", buf);
14606            return (1);
14607        } else {
14608            fp->rx_sge_chain = (struct eth_rx_sge *)fp->rx_sge_dma.vaddr;
14609        }
14610
14611        /* link together the sge chain pages */
14612        for (j = 1; j <= RX_SGE_NUM_PAGES; j++) {
14613            /* index into the rcq chain array to last entry per page */
14614            struct eth_rx_sge *rx_sge =
14615                &fp->rx_sge_chain[RX_SGE_TOTAL_PER_PAGE * j - 2];
14616            /* point to the next page and wrap from last page */
14617            busaddr = (fp->rx_sge_dma.paddr +
14618                       (BCM_PAGE_SIZE * (j % RX_SGE_NUM_PAGES)));
14619            rx_sge->addr_hi = htole32(U64_HI(busaddr));
14620            rx_sge->addr_lo = htole32(U64_LO(busaddr));
14621        }
14622
14623        /***********************/
14624        /* FP TX MBUF DMA MAPS */
14625        /***********************/
14626
14627        /* set required sizes before mapping to conserve resources */
14628        if (if_getcapenable(sc->ifp) & (IFCAP_TSO4 | IFCAP_TSO6)) {
14629            max_size     = BXE_TSO_MAX_SIZE;
14630            max_segments = BXE_TSO_MAX_SEGMENTS;
14631            max_seg_size = BXE_TSO_MAX_SEG_SIZE;
14632        } else {
14633            max_size     = (MCLBYTES * BXE_MAX_SEGMENTS);
14634            max_segments = BXE_MAX_SEGMENTS;
14635            max_seg_size = MCLBYTES;
14636        }
14637
14638        /* create a dma tag for the tx mbufs */
14639        rc = bus_dma_tag_create(sc->parent_dma_tag, /* parent tag */
14640                                1,                  /* alignment */
14641                                0,                  /* boundary limit */
14642                                BUS_SPACE_MAXADDR,  /* restricted low */
14643                                BUS_SPACE_MAXADDR,  /* restricted hi */
14644                                NULL,               /* addr filter() */
14645                                NULL,               /* addr filter() arg */
14646                                max_size,           /* max map size */
14647                                max_segments,       /* num discontinuous */
14648                                max_seg_size,       /* max seg size */
14649                                0,                  /* flags */
14650                                NULL,               /* lock() */
14651                                NULL,               /* lock() arg */
14652                                &fp->tx_mbuf_tag);  /* returned dma tag */
14653        if (rc != 0) {
14654            /* XXX unwind and free previous fastpath allocations */
14655            BLOGE(sc, "Failed to create dma tag for "
14656                      "'fp %d tx mbufs' (%d)\n", i, rc);
14657            return (1);
14658        }
14659
14660        /* create dma maps for each of the tx mbuf clusters */
14661        for (j = 0; j < TX_BD_TOTAL; j++) {
14662            if (bus_dmamap_create(fp->tx_mbuf_tag,
14663                                  BUS_DMA_NOWAIT,
14664                                  &fp->tx_mbuf_chain[j].m_map)) {
14665                /* XXX unwind and free previous fastpath allocations */
14666                BLOGE(sc, "Failed to create dma map for "
14667                          "'fp %d tx mbuf %d' (%d)\n", i, j, rc);
14668                return (1);
14669            }
14670        }
14671
14672        /***********************/
14673        /* FP RX MBUF DMA MAPS */
14674        /***********************/
14675
14676        /* create a dma tag for the rx mbufs */
14677        rc = bus_dma_tag_create(sc->parent_dma_tag, /* parent tag */
14678                                1,                  /* alignment */
14679                                0,                  /* boundary limit */
14680                                BUS_SPACE_MAXADDR,  /* restricted low */
14681                                BUS_SPACE_MAXADDR,  /* restricted hi */
14682                                NULL,               /* addr filter() */
14683                                NULL,               /* addr filter() arg */
14684                                MJUM9BYTES,         /* max map size */
14685                                1,                  /* num discontinuous */
14686                                MJUM9BYTES,         /* max seg size */
14687                                0,                  /* flags */
14688                                NULL,               /* lock() */
14689                                NULL,               /* lock() arg */
14690                                &fp->rx_mbuf_tag);  /* returned dma tag */
14691        if (rc != 0) {
14692            /* XXX unwind and free previous fastpath allocations */
14693            BLOGE(sc, "Failed to create dma tag for "
14694                      "'fp %d rx mbufs' (%d)\n", i, rc);
14695            return (1);
14696        }
14697
14698        /* create dma maps for each of the rx mbuf clusters */
14699        for (j = 0; j < RX_BD_TOTAL; j++) {
14700            if (bus_dmamap_create(fp->rx_mbuf_tag,
14701                                  BUS_DMA_NOWAIT,
14702                                  &fp->rx_mbuf_chain[j].m_map)) {
14703                /* XXX unwind and free previous fastpath allocations */
14704                BLOGE(sc, "Failed to create dma map for "
14705                          "'fp %d rx mbuf %d' (%d)\n", i, j, rc);
14706                return (1);
14707            }
14708        }
14709
14710        /* create dma map for the spare rx mbuf cluster */
14711        if (bus_dmamap_create(fp->rx_mbuf_tag,
14712                              BUS_DMA_NOWAIT,
14713                              &fp->rx_mbuf_spare_map)) {
14714            /* XXX unwind and free previous fastpath allocations */
14715            BLOGE(sc, "Failed to create dma map for "
14716                      "'fp %d spare rx mbuf' (%d)\n", i, rc);
14717            return (1);
14718        }
14719
14720        /***************************/
14721        /* FP RX SGE MBUF DMA MAPS */
14722        /***************************/
14723
14724        /* create a dma tag for the rx sge mbufs */
14725        rc = bus_dma_tag_create(sc->parent_dma_tag, /* parent tag */
14726                                1,                  /* alignment */
14727                                0,                  /* boundary limit */
14728                                BUS_SPACE_MAXADDR,  /* restricted low */
14729                                BUS_SPACE_MAXADDR,  /* restricted hi */
14730                                NULL,               /* addr filter() */
14731                                NULL,               /* addr filter() arg */
14732                                BCM_PAGE_SIZE,      /* max map size */
14733                                1,                  /* num discontinuous */
14734                                BCM_PAGE_SIZE,      /* max seg size */
14735                                0,                  /* flags */
14736                                NULL,               /* lock() */
14737                                NULL,               /* lock() arg */
14738                                &fp->rx_sge_mbuf_tag); /* returned dma tag */
14739        if (rc != 0) {
14740            /* XXX unwind and free previous fastpath allocations */
14741            BLOGE(sc, "Failed to create dma tag for "
14742                      "'fp %d rx sge mbufs' (%d)\n", i, rc);
14743            return (1);
14744        }
14745
14746        /* create dma maps for the rx sge mbuf clusters */
14747        for (j = 0; j < RX_SGE_TOTAL; j++) {
14748            if (bus_dmamap_create(fp->rx_sge_mbuf_tag,
14749                                  BUS_DMA_NOWAIT,
14750                                  &fp->rx_sge_mbuf_chain[j].m_map)) {
14751                /* XXX unwind and free previous fastpath allocations */
14752                BLOGE(sc, "Failed to create dma map for "
14753                          "'fp %d rx sge mbuf %d' (%d)\n", i, j, rc);
14754                return (1);
14755            }
14756        }
14757
14758        /* create dma map for the spare rx sge mbuf cluster */
14759        if (bus_dmamap_create(fp->rx_sge_mbuf_tag,
14760                              BUS_DMA_NOWAIT,
14761                              &fp->rx_sge_mbuf_spare_map)) {
14762            /* XXX unwind and free previous fastpath allocations */
14763            BLOGE(sc, "Failed to create dma map for "
14764                      "'fp %d spare rx sge mbuf' (%d)\n", i, rc);
14765            return (1);
14766        }
14767
14768        /***************************/
14769        /* FP RX TPA MBUF DMA MAPS */
14770        /***************************/
14771
14772        /* create dma maps for the rx tpa mbuf clusters */
14773        max_agg_queues = MAX_AGG_QS(sc);
14774
14775        for (j = 0; j < max_agg_queues; j++) {
14776            if (bus_dmamap_create(fp->rx_mbuf_tag,
14777                                  BUS_DMA_NOWAIT,
14778                                  &fp->rx_tpa_info[j].bd.m_map)) {
14779                /* XXX unwind and free previous fastpath allocations */
14780                BLOGE(sc, "Failed to create dma map for "
14781                          "'fp %d rx tpa mbuf %d' (%d)\n", i, j, rc);
14782                return (1);
14783            }
14784        }
14785
14786        /* create dma map for the spare rx tpa mbuf cluster */
14787        if (bus_dmamap_create(fp->rx_mbuf_tag,
14788                              BUS_DMA_NOWAIT,
14789                              &fp->rx_tpa_info_mbuf_spare_map)) {
14790            /* XXX unwind and free previous fastpath allocations */
14791            BLOGE(sc, "Failed to create dma map for "
14792                      "'fp %d spare rx tpa mbuf' (%d)\n", i, rc);
14793            return (1);
14794        }
14795
14796        bxe_init_sge_ring_bit_mask(fp);
14797    }
14798
14799    return (0);
14800}
14801
14802static void
14803bxe_free_hsi_mem(struct bxe_softc *sc)
14804{
14805    struct bxe_fastpath *fp;
14806    int max_agg_queues;
14807    int i, j;
14808
14809    if (sc->parent_dma_tag == NULL) {
14810        return; /* assume nothing was allocated */
14811    }
14812
14813    for (i = 0; i < sc->num_queues; i++) {
14814        fp = &sc->fp[i];
14815
14816        /*******************/
14817        /* FP STATUS BLOCK */
14818        /*******************/
14819
14820        bxe_dma_free(sc, &fp->sb_dma);
14821        memset(&fp->status_block, 0, sizeof(fp->status_block));
14822
14823        /******************/
14824        /* FP TX BD CHAIN */
14825        /******************/
14826
14827        bxe_dma_free(sc, &fp->tx_dma);
14828        fp->tx_chain = NULL;
14829
14830        /******************/
14831        /* FP RX BD CHAIN */
14832        /******************/
14833
14834        bxe_dma_free(sc, &fp->rx_dma);
14835        fp->rx_chain = NULL;
14836
14837        /*******************/
14838        /* FP RX RCQ CHAIN */
14839        /*******************/
14840
14841        bxe_dma_free(sc, &fp->rcq_dma);
14842        fp->rcq_chain = NULL;
14843
14844        /*******************/
14845        /* FP RX SGE CHAIN */
14846        /*******************/
14847
14848        bxe_dma_free(sc, &fp->rx_sge_dma);
14849        fp->rx_sge_chain = NULL;
14850
14851        /***********************/
14852        /* FP TX MBUF DMA MAPS */
14853        /***********************/
14854
14855        if (fp->tx_mbuf_tag != NULL) {
14856            for (j = 0; j < TX_BD_TOTAL; j++) {
14857                if (fp->tx_mbuf_chain[j].m_map != NULL) {
14858                    bus_dmamap_unload(fp->tx_mbuf_tag,
14859                                      fp->tx_mbuf_chain[j].m_map);
14860                    bus_dmamap_destroy(fp->tx_mbuf_tag,
14861                                       fp->tx_mbuf_chain[j].m_map);
14862                }
14863            }
14864
14865            bus_dma_tag_destroy(fp->tx_mbuf_tag);
14866            fp->tx_mbuf_tag = NULL;
14867        }
14868
14869        /***********************/
14870        /* FP RX MBUF DMA MAPS */
14871        /***********************/
14872
14873        if (fp->rx_mbuf_tag != NULL) {
14874            for (j = 0; j < RX_BD_TOTAL; j++) {
14875                if (fp->rx_mbuf_chain[j].m_map != NULL) {
14876                    bus_dmamap_unload(fp->rx_mbuf_tag,
14877                                      fp->rx_mbuf_chain[j].m_map);
14878                    bus_dmamap_destroy(fp->rx_mbuf_tag,
14879                                       fp->rx_mbuf_chain[j].m_map);
14880                }
14881            }
14882
14883            if (fp->rx_mbuf_spare_map != NULL) {
14884                bus_dmamap_unload(fp->rx_mbuf_tag, fp->rx_mbuf_spare_map);
14885                bus_dmamap_destroy(fp->rx_mbuf_tag, fp->rx_mbuf_spare_map);
14886            }
14887
14888            /***************************/
14889            /* FP RX TPA MBUF DMA MAPS */
14890            /***************************/
14891
14892            max_agg_queues = MAX_AGG_QS(sc);
14893
14894            for (j = 0; j < max_agg_queues; j++) {
14895                if (fp->rx_tpa_info[j].bd.m_map != NULL) {
14896                    bus_dmamap_unload(fp->rx_mbuf_tag,
14897                                      fp->rx_tpa_info[j].bd.m_map);
14898                    bus_dmamap_destroy(fp->rx_mbuf_tag,
14899                                       fp->rx_tpa_info[j].bd.m_map);
14900                }
14901            }
14902
14903            if (fp->rx_tpa_info_mbuf_spare_map != NULL) {
14904                bus_dmamap_unload(fp->rx_mbuf_tag,
14905                                  fp->rx_tpa_info_mbuf_spare_map);
14906                bus_dmamap_destroy(fp->rx_mbuf_tag,
14907                                   fp->rx_tpa_info_mbuf_spare_map);
14908            }
14909
14910            bus_dma_tag_destroy(fp->rx_mbuf_tag);
14911            fp->rx_mbuf_tag = NULL;
14912        }
14913
14914        /***************************/
14915        /* FP RX SGE MBUF DMA MAPS */
14916        /***************************/
14917
14918        if (fp->rx_sge_mbuf_tag != NULL) {
14919            for (j = 0; j < RX_SGE_TOTAL; j++) {
14920                if (fp->rx_sge_mbuf_chain[j].m_map != NULL) {
14921                    bus_dmamap_unload(fp->rx_sge_mbuf_tag,
14922                                      fp->rx_sge_mbuf_chain[j].m_map);
14923                    bus_dmamap_destroy(fp->rx_sge_mbuf_tag,
14924                                       fp->rx_sge_mbuf_chain[j].m_map);
14925                }
14926            }
14927
14928            if (fp->rx_sge_mbuf_spare_map != NULL) {
14929                bus_dmamap_unload(fp->rx_sge_mbuf_tag,
14930                                  fp->rx_sge_mbuf_spare_map);
14931                bus_dmamap_destroy(fp->rx_sge_mbuf_tag,
14932                                   fp->rx_sge_mbuf_spare_map);
14933            }
14934
14935            bus_dma_tag_destroy(fp->rx_sge_mbuf_tag);
14936            fp->rx_sge_mbuf_tag = NULL;
14937        }
14938    }
14939
14940    /***************************/
14941    /* FW DECOMPRESSION BUFFER */
14942    /***************************/
14943
14944    bxe_dma_free(sc, &sc->gz_buf_dma);
14945    sc->gz_buf = NULL;
14946    free(sc->gz_strm, M_DEVBUF);
14947    sc->gz_strm = NULL;
14948
14949    /*******************/
14950    /* SLOW PATH QUEUE */
14951    /*******************/
14952
14953    bxe_dma_free(sc, &sc->spq_dma);
14954    sc->spq = NULL;
14955
14956    /*************/
14957    /* SLOW PATH */
14958    /*************/
14959
14960    bxe_dma_free(sc, &sc->sp_dma);
14961    sc->sp = NULL;
14962
14963    /***************/
14964    /* EVENT QUEUE */
14965    /***************/
14966
14967    bxe_dma_free(sc, &sc->eq_dma);
14968    sc->eq = NULL;
14969
14970    /************************/
14971    /* DEFAULT STATUS BLOCK */
14972    /************************/
14973
14974    bxe_dma_free(sc, &sc->def_sb_dma);
14975    sc->def_sb = NULL;
14976
14977    bus_dma_tag_destroy(sc->parent_dma_tag);
14978    sc->parent_dma_tag = NULL;
14979}
14980
14981/*
14982 * Previous driver DMAE transaction may have occurred when pre-boot stage
14983 * ended and boot began. This would invalidate the addresses of the
14984 * transaction, resulting in was-error bit set in the PCI causing all
14985 * hw-to-host PCIe transactions to timeout. If this happened we want to clear
14986 * the interrupt which detected this from the pglueb and the was-done bit
14987 */
14988static void
14989bxe_prev_interrupted_dmae(struct bxe_softc *sc)
14990{
14991    uint32_t val;
14992
14993    if (!CHIP_IS_E1x(sc)) {
14994        val = REG_RD(sc, PGLUE_B_REG_PGLUE_B_INT_STS);
14995        if (val & PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN) {
14996            BLOGD(sc, DBG_LOAD,
14997                  "Clearing 'was-error' bit that was set in pglueb");
14998            REG_WR(sc, PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR, 1 << SC_FUNC(sc));
14999        }
15000    }
15001}
15002
15003static int
15004bxe_prev_mcp_done(struct bxe_softc *sc)
15005{
15006    uint32_t rc = bxe_fw_command(sc, DRV_MSG_CODE_UNLOAD_DONE,
15007                                 DRV_MSG_CODE_UNLOAD_SKIP_LINK_RESET);
15008    if (!rc) {
15009        BLOGE(sc, "MCP response failure, aborting\n");
15010        return (-1);
15011    }
15012
15013    return (0);
15014}
15015
15016static struct bxe_prev_list_node *
15017bxe_prev_path_get_entry(struct bxe_softc *sc)
15018{
15019    struct bxe_prev_list_node *tmp;
15020
15021    LIST_FOREACH(tmp, &bxe_prev_list, node) {
15022        if ((sc->pcie_bus == tmp->bus) &&
15023            (sc->pcie_device == tmp->slot) &&
15024            (SC_PATH(sc) == tmp->path)) {
15025            return (tmp);
15026        }
15027    }
15028
15029    return (NULL);
15030}
15031
15032static uint8_t
15033bxe_prev_is_path_marked(struct bxe_softc *sc)
15034{
15035    struct bxe_prev_list_node *tmp;
15036    int rc = FALSE;
15037
15038    mtx_lock(&bxe_prev_mtx);
15039
15040    tmp = bxe_prev_path_get_entry(sc);
15041    if (tmp) {
15042        if (tmp->aer) {
15043            BLOGD(sc, DBG_LOAD,
15044                  "Path %d/%d/%d was marked by AER\n",
15045                  sc->pcie_bus, sc->pcie_device, SC_PATH(sc));
15046        } else {
15047            rc = TRUE;
15048            BLOGD(sc, DBG_LOAD,
15049                  "Path %d/%d/%d was already cleaned from previous drivers\n",
15050                  sc->pcie_bus, sc->pcie_device, SC_PATH(sc));
15051        }
15052    }
15053
15054    mtx_unlock(&bxe_prev_mtx);
15055
15056    return (rc);
15057}
15058
15059static int
15060bxe_prev_mark_path(struct bxe_softc *sc,
15061                   uint8_t          after_undi)
15062{
15063    struct bxe_prev_list_node *tmp;
15064
15065    mtx_lock(&bxe_prev_mtx);
15066
15067    /* Check whether the entry for this path already exists */
15068    tmp = bxe_prev_path_get_entry(sc);
15069    if (tmp) {
15070        if (!tmp->aer) {
15071            BLOGD(sc, DBG_LOAD,
15072                  "Re-marking AER in path %d/%d/%d\n",
15073                  sc->pcie_bus, sc->pcie_device, SC_PATH(sc));
15074        } else {
15075            BLOGD(sc, DBG_LOAD,
15076                  "Removing AER indication from path %d/%d/%d\n",
15077                  sc->pcie_bus, sc->pcie_device, SC_PATH(sc));
15078            tmp->aer = 0;
15079        }
15080
15081        mtx_unlock(&bxe_prev_mtx);
15082        return (0);
15083    }
15084
15085    mtx_unlock(&bxe_prev_mtx);
15086
15087    /* Create an entry for this path and add it */
15088    tmp = malloc(sizeof(struct bxe_prev_list_node), M_DEVBUF,
15089                 (M_NOWAIT | M_ZERO));
15090    if (!tmp) {
15091        BLOGE(sc, "Failed to allocate 'bxe_prev_list_node'\n");
15092        return (-1);
15093    }
15094
15095    tmp->bus  = sc->pcie_bus;
15096    tmp->slot = sc->pcie_device;
15097    tmp->path = SC_PATH(sc);
15098    tmp->aer  = 0;
15099    tmp->undi = after_undi ? (1 << SC_PORT(sc)) : 0;
15100
15101    mtx_lock(&bxe_prev_mtx);
15102
15103    BLOGD(sc, DBG_LOAD,
15104          "Marked path %d/%d/%d - finished previous unload\n",
15105          sc->pcie_bus, sc->pcie_device, SC_PATH(sc));
15106    LIST_INSERT_HEAD(&bxe_prev_list, tmp, node);
15107
15108    mtx_unlock(&bxe_prev_mtx);
15109
15110    return (0);
15111}
15112
15113static int
15114bxe_do_flr(struct bxe_softc *sc)
15115{
15116    int i;
15117
15118    /* only E2 and onwards support FLR */
15119    if (CHIP_IS_E1x(sc)) {
15120        BLOGD(sc, DBG_LOAD, "FLR not supported in E1/E1H\n");
15121        return (-1);
15122    }
15123
15124    /* only bootcode REQ_BC_VER_4_INITIATE_FLR and onwards support flr */
15125    if (sc->devinfo.bc_ver < REQ_BC_VER_4_INITIATE_FLR) {
15126        BLOGD(sc, DBG_LOAD, "FLR not supported by BC_VER: 0x%08x\n",
15127              sc->devinfo.bc_ver);
15128        return (-1);
15129    }
15130
15131    /* Wait for Transaction Pending bit clean */
15132    for (i = 0; i < 4; i++) {
15133        if (i) {
15134            DELAY(((1 << (i - 1)) * 100) * 1000);
15135        }
15136
15137        if (!bxe_is_pcie_pending(sc)) {
15138            goto clear;
15139        }
15140    }
15141
15142    BLOGE(sc, "PCIE transaction is not cleared, "
15143              "proceeding with reset anyway\n");
15144
15145clear:
15146
15147    BLOGD(sc, DBG_LOAD, "Initiating FLR\n");
15148    bxe_fw_command(sc, DRV_MSG_CODE_INITIATE_FLR, 0);
15149
15150    return (0);
15151}
15152
15153struct bxe_mac_vals {
15154    uint32_t xmac_addr;
15155    uint32_t xmac_val;
15156    uint32_t emac_addr;
15157    uint32_t emac_val;
15158    uint32_t umac_addr;
15159    uint32_t umac_val;
15160    uint32_t bmac_addr;
15161    uint32_t bmac_val[2];
15162};
15163
15164static void
15165bxe_prev_unload_close_mac(struct bxe_softc *sc,
15166                          struct bxe_mac_vals *vals)
15167{
15168    uint32_t val, base_addr, offset, mask, reset_reg;
15169    uint8_t mac_stopped = FALSE;
15170    uint8_t port = SC_PORT(sc);
15171    uint32_t wb_data[2];
15172
15173    /* reset addresses as they also mark which values were changed */
15174    vals->bmac_addr = 0;
15175    vals->umac_addr = 0;
15176    vals->xmac_addr = 0;
15177    vals->emac_addr = 0;
15178
15179    reset_reg = REG_RD(sc, MISC_REG_RESET_REG_2);
15180
15181    if (!CHIP_IS_E3(sc)) {
15182        val = REG_RD(sc, NIG_REG_BMAC0_REGS_OUT_EN + port * 4);
15183        mask = MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port;
15184        if ((mask & reset_reg) && val) {
15185            BLOGD(sc, DBG_LOAD, "Disable BMAC Rx\n");
15186            base_addr = SC_PORT(sc) ? NIG_REG_INGRESS_BMAC1_MEM
15187                                    : NIG_REG_INGRESS_BMAC0_MEM;
15188            offset = CHIP_IS_E2(sc) ? BIGMAC2_REGISTER_BMAC_CONTROL
15189                                    : BIGMAC_REGISTER_BMAC_CONTROL;
15190
15191            /*
15192             * use rd/wr since we cannot use dmae. This is safe
15193             * since MCP won't access the bus due to the request
15194             * to unload, and no function on the path can be
15195             * loaded at this time.
15196             */
15197            wb_data[0] = REG_RD(sc, base_addr + offset);
15198            wb_data[1] = REG_RD(sc, base_addr + offset + 0x4);
15199            vals->bmac_addr = base_addr + offset;
15200            vals->bmac_val[0] = wb_data[0];
15201            vals->bmac_val[1] = wb_data[1];
15202            wb_data[0] &= ~ELINK_BMAC_CONTROL_RX_ENABLE;
15203            REG_WR(sc, vals->bmac_addr, wb_data[0]);
15204            REG_WR(sc, vals->bmac_addr + 0x4, wb_data[1]);
15205        }
15206
15207        BLOGD(sc, DBG_LOAD, "Disable EMAC Rx\n");
15208        vals->emac_addr = NIG_REG_NIG_EMAC0_EN + SC_PORT(sc)*4;
15209        vals->emac_val = REG_RD(sc, vals->emac_addr);
15210        REG_WR(sc, vals->emac_addr, 0);
15211        mac_stopped = TRUE;
15212    } else {
15213        if (reset_reg & MISC_REGISTERS_RESET_REG_2_XMAC) {
15214            BLOGD(sc, DBG_LOAD, "Disable XMAC Rx\n");
15215            base_addr = SC_PORT(sc) ? GRCBASE_XMAC1 : GRCBASE_XMAC0;
15216            val = REG_RD(sc, base_addr + XMAC_REG_PFC_CTRL_HI);
15217            REG_WR(sc, base_addr + XMAC_REG_PFC_CTRL_HI, val & ~(1 << 1));
15218            REG_WR(sc, base_addr + XMAC_REG_PFC_CTRL_HI, val | (1 << 1));
15219            vals->xmac_addr = base_addr + XMAC_REG_CTRL;
15220            vals->xmac_val = REG_RD(sc, vals->xmac_addr);
15221            REG_WR(sc, vals->xmac_addr, 0);
15222            mac_stopped = TRUE;
15223        }
15224
15225        mask = MISC_REGISTERS_RESET_REG_2_UMAC0 << port;
15226        if (mask & reset_reg) {
15227            BLOGD(sc, DBG_LOAD, "Disable UMAC Rx\n");
15228            base_addr = SC_PORT(sc) ? GRCBASE_UMAC1 : GRCBASE_UMAC0;
15229            vals->umac_addr = base_addr + UMAC_REG_COMMAND_CONFIG;
15230            vals->umac_val = REG_RD(sc, vals->umac_addr);
15231            REG_WR(sc, vals->umac_addr, 0);
15232            mac_stopped = TRUE;
15233        }
15234    }
15235
15236    if (mac_stopped) {
15237        DELAY(20000);
15238    }
15239}
15240
15241#define BXE_PREV_UNDI_PROD_ADDR(p)  (BAR_TSTRORM_INTMEM + 0x1508 + ((p) << 4))
15242#define BXE_PREV_UNDI_RCQ(val)      ((val) & 0xffff)
15243#define BXE_PREV_UNDI_BD(val)       ((val) >> 16 & 0xffff)
15244#define BXE_PREV_UNDI_PROD(rcq, bd) ((bd) << 16 | (rcq))
15245
15246static void
15247bxe_prev_unload_undi_inc(struct bxe_softc *sc,
15248                         uint8_t          port,
15249                         uint8_t          inc)
15250{
15251    uint16_t rcq, bd;
15252    uint32_t tmp_reg = REG_RD(sc, BXE_PREV_UNDI_PROD_ADDR(port));
15253
15254    rcq = BXE_PREV_UNDI_RCQ(tmp_reg) + inc;
15255    bd = BXE_PREV_UNDI_BD(tmp_reg) + inc;
15256
15257    tmp_reg = BXE_PREV_UNDI_PROD(rcq, bd);
15258    REG_WR(sc, BXE_PREV_UNDI_PROD_ADDR(port), tmp_reg);
15259
15260    BLOGD(sc, DBG_LOAD,
15261          "UNDI producer [%d] rings bd -> 0x%04x, rcq -> 0x%04x\n",
15262          port, bd, rcq);
15263}
15264
15265static int
15266bxe_prev_unload_common(struct bxe_softc *sc)
15267{
15268    uint32_t reset_reg, tmp_reg = 0, rc;
15269    uint8_t prev_undi = FALSE;
15270    struct bxe_mac_vals mac_vals;
15271    uint32_t timer_count = 1000;
15272    uint32_t prev_brb;
15273
15274    /*
15275     * It is possible a previous function received 'common' answer,
15276     * but hasn't loaded yet, therefore creating a scenario of
15277     * multiple functions receiving 'common' on the same path.
15278     */
15279    BLOGD(sc, DBG_LOAD, "Common unload Flow\n");
15280
15281    memset(&mac_vals, 0, sizeof(mac_vals));
15282
15283    if (bxe_prev_is_path_marked(sc)) {
15284        return (bxe_prev_mcp_done(sc));
15285    }
15286
15287    reset_reg = REG_RD(sc, MISC_REG_RESET_REG_1);
15288
15289    /* Reset should be performed after BRB is emptied */
15290    if (reset_reg & MISC_REGISTERS_RESET_REG_1_RST_BRB1) {
15291        /* Close the MAC Rx to prevent BRB from filling up */
15292        bxe_prev_unload_close_mac(sc, &mac_vals);
15293
15294        /* close LLH filters towards the BRB */
15295        elink_set_rx_filter(&sc->link_params, 0);
15296
15297        /*
15298         * Check if the UNDI driver was previously loaded.
15299         * UNDI driver initializes CID offset for normal bell to 0x7
15300         */
15301        if (reset_reg & MISC_REGISTERS_RESET_REG_1_RST_DORQ) {
15302            tmp_reg = REG_RD(sc, DORQ_REG_NORM_CID_OFST);
15303            if (tmp_reg == 0x7) {
15304                BLOGD(sc, DBG_LOAD, "UNDI previously loaded\n");
15305                prev_undi = TRUE;
15306                /* clear the UNDI indication */
15307                REG_WR(sc, DORQ_REG_NORM_CID_OFST, 0);
15308                /* clear possible idle check errors */
15309                REG_RD(sc, NIG_REG_NIG_INT_STS_CLR_0);
15310            }
15311        }
15312
15313        /* wait until BRB is empty */
15314        tmp_reg = REG_RD(sc, BRB1_REG_NUM_OF_FULL_BLOCKS);
15315        while (timer_count) {
15316            prev_brb = tmp_reg;
15317
15318            tmp_reg = REG_RD(sc, BRB1_REG_NUM_OF_FULL_BLOCKS);
15319            if (!tmp_reg) {
15320                break;
15321            }
15322
15323            BLOGD(sc, DBG_LOAD, "BRB still has 0x%08x\n", tmp_reg);
15324
15325            /* reset timer as long as BRB actually gets emptied */
15326            if (prev_brb > tmp_reg) {
15327                timer_count = 1000;
15328            } else {
15329                timer_count--;
15330            }
15331
15332            /* If UNDI resides in memory, manually increment it */
15333            if (prev_undi) {
15334                bxe_prev_unload_undi_inc(sc, SC_PORT(sc), 1);
15335            }
15336
15337            DELAY(10);
15338        }
15339
15340        if (!timer_count) {
15341            BLOGE(sc, "Failed to empty BRB\n");
15342        }
15343    }
15344
15345    /* No packets are in the pipeline, path is ready for reset */
15346    bxe_reset_common(sc);
15347
15348    if (mac_vals.xmac_addr) {
15349        REG_WR(sc, mac_vals.xmac_addr, mac_vals.xmac_val);
15350    }
15351    if (mac_vals.umac_addr) {
15352        REG_WR(sc, mac_vals.umac_addr, mac_vals.umac_val);
15353    }
15354    if (mac_vals.emac_addr) {
15355        REG_WR(sc, mac_vals.emac_addr, mac_vals.emac_val);
15356    }
15357    if (mac_vals.bmac_addr) {
15358        REG_WR(sc, mac_vals.bmac_addr, mac_vals.bmac_val[0]);
15359        REG_WR(sc, mac_vals.bmac_addr + 4, mac_vals.bmac_val[1]);
15360    }
15361
15362    rc = bxe_prev_mark_path(sc, prev_undi);
15363    if (rc) {
15364        bxe_prev_mcp_done(sc);
15365        return (rc);
15366    }
15367
15368    return (bxe_prev_mcp_done(sc));
15369}
15370
15371static int
15372bxe_prev_unload_uncommon(struct bxe_softc *sc)
15373{
15374    int rc;
15375
15376    BLOGD(sc, DBG_LOAD, "Uncommon unload Flow\n");
15377
15378    /* Test if previous unload process was already finished for this path */
15379    if (bxe_prev_is_path_marked(sc)) {
15380        return (bxe_prev_mcp_done(sc));
15381    }
15382
15383    BLOGD(sc, DBG_LOAD, "Path is unmarked\n");
15384
15385    /*
15386     * If function has FLR capabilities, and existing FW version matches
15387     * the one required, then FLR will be sufficient to clean any residue
15388     * left by previous driver
15389     */
15390    rc = bxe_nic_load_analyze_req(sc, FW_MSG_CODE_DRV_LOAD_FUNCTION);
15391    if (!rc) {
15392        /* fw version is good */
15393        BLOGD(sc, DBG_LOAD, "FW version matches our own, attempting FLR\n");
15394        rc = bxe_do_flr(sc);
15395    }
15396
15397    if (!rc) {
15398        /* FLR was performed */
15399        BLOGD(sc, DBG_LOAD, "FLR successful\n");
15400        return (0);
15401    }
15402
15403    BLOGD(sc, DBG_LOAD, "Could not FLR\n");
15404
15405    /* Close the MCP request, return failure*/
15406    rc = bxe_prev_mcp_done(sc);
15407    if (!rc) {
15408        rc = BXE_PREV_WAIT_NEEDED;
15409    }
15410
15411    return (rc);
15412}
15413
15414static int
15415bxe_prev_unload(struct bxe_softc *sc)
15416{
15417    int time_counter = 10;
15418    uint32_t fw, hw_lock_reg, hw_lock_val;
15419    uint32_t rc = 0;
15420
15421    /*
15422     * Clear HW from errors which may have resulted from an interrupted
15423     * DMAE transaction.
15424     */
15425    bxe_prev_interrupted_dmae(sc);
15426
15427    /* Release previously held locks */
15428    hw_lock_reg =
15429        (SC_FUNC(sc) <= 5) ?
15430            (MISC_REG_DRIVER_CONTROL_1 + SC_FUNC(sc) * 8) :
15431            (MISC_REG_DRIVER_CONTROL_7 + (SC_FUNC(sc) - 6) * 8);
15432
15433    hw_lock_val = (REG_RD(sc, hw_lock_reg));
15434    if (hw_lock_val) {
15435        if (hw_lock_val & HW_LOCK_RESOURCE_NVRAM) {
15436            BLOGD(sc, DBG_LOAD, "Releasing previously held NVRAM lock\n");
15437            REG_WR(sc, MCP_REG_MCPR_NVM_SW_ARB,
15438                   (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << SC_PORT(sc)));
15439        }
15440        BLOGD(sc, DBG_LOAD, "Releasing previously held HW lock\n");
15441        REG_WR(sc, hw_lock_reg, 0xffffffff);
15442    } else {
15443        BLOGD(sc, DBG_LOAD, "No need to release HW/NVRAM locks\n");
15444    }
15445
15446    if (MCPR_ACCESS_LOCK_LOCK & REG_RD(sc, MCP_REG_MCPR_ACCESS_LOCK)) {
15447        BLOGD(sc, DBG_LOAD, "Releasing previously held ALR\n");
15448        REG_WR(sc, MCP_REG_MCPR_ACCESS_LOCK, 0);
15449    }
15450
15451    do {
15452        /* Lock MCP using an unload request */
15453        fw = bxe_fw_command(sc, DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS, 0);
15454        if (!fw) {
15455            BLOGE(sc, "MCP response failure, aborting\n");
15456            rc = -1;
15457            break;
15458        }
15459
15460        if (fw == FW_MSG_CODE_DRV_UNLOAD_COMMON) {
15461            rc = bxe_prev_unload_common(sc);
15462            break;
15463        }
15464
15465        /* non-common reply from MCP night require looping */
15466        rc = bxe_prev_unload_uncommon(sc);
15467        if (rc != BXE_PREV_WAIT_NEEDED) {
15468            break;
15469        }
15470
15471        DELAY(20000);
15472    } while (--time_counter);
15473
15474    if (!time_counter || rc) {
15475        BLOGE(sc, "Failed to unload previous driver!"
15476            " time_counter %d rc %d\n", time_counter, rc);
15477        rc = -1;
15478    }
15479
15480    return (rc);
15481}
15482
15483void
15484bxe_dcbx_set_state(struct bxe_softc *sc,
15485                   uint8_t          dcb_on,
15486                   uint32_t         dcbx_enabled)
15487{
15488    if (!CHIP_IS_E1x(sc)) {
15489        sc->dcb_state = dcb_on;
15490        sc->dcbx_enabled = dcbx_enabled;
15491    } else {
15492        sc->dcb_state = FALSE;
15493        sc->dcbx_enabled = BXE_DCBX_ENABLED_INVALID;
15494    }
15495    BLOGD(sc, DBG_LOAD,
15496          "DCB state [%s:%s]\n",
15497          dcb_on ? "ON" : "OFF",
15498          (dcbx_enabled == BXE_DCBX_ENABLED_OFF) ? "user-mode" :
15499          (dcbx_enabled == BXE_DCBX_ENABLED_ON_NEG_OFF) ? "on-chip static" :
15500          (dcbx_enabled == BXE_DCBX_ENABLED_ON_NEG_ON) ?
15501          "on-chip with negotiation" : "invalid");
15502}
15503
15504/* must be called after sriov-enable */
15505static int
15506bxe_set_qm_cid_count(struct bxe_softc *sc)
15507{
15508    int cid_count = BXE_L2_MAX_CID(sc);
15509
15510    if (IS_SRIOV(sc)) {
15511        cid_count += BXE_VF_CIDS;
15512    }
15513
15514    if (CNIC_SUPPORT(sc)) {
15515        cid_count += CNIC_CID_MAX;
15516    }
15517
15518    return (roundup(cid_count, QM_CID_ROUND));
15519}
15520
15521static void
15522bxe_init_multi_cos(struct bxe_softc *sc)
15523{
15524    int pri, cos;
15525
15526    uint32_t pri_map = 0; /* XXX change to user config */
15527
15528    for (pri = 0; pri < BXE_MAX_PRIORITY; pri++) {
15529        cos = ((pri_map & (0xf << (pri * 4))) >> (pri * 4));
15530        if (cos < sc->max_cos) {
15531            sc->prio_to_cos[pri] = cos;
15532        } else {
15533            BLOGW(sc, "Invalid COS %d for priority %d "
15534                      "(max COS is %d), setting to 0\n",
15535                  cos, pri, (sc->max_cos - 1));
15536            sc->prio_to_cos[pri] = 0;
15537        }
15538    }
15539}
15540
15541static int
15542bxe_sysctl_state(SYSCTL_HANDLER_ARGS)
15543{
15544    struct bxe_softc *sc;
15545    int error, result;
15546
15547    result = 0;
15548    error = sysctl_handle_int(oidp, &result, 0, req);
15549
15550    if (error || !req->newptr) {
15551        return (error);
15552    }
15553
15554    if (result == 1) {
15555        uint32_t  temp;
15556        sc = (struct bxe_softc *)arg1;
15557
15558        BLOGI(sc, "... dumping driver state ...\n");
15559        temp = SHMEM2_RD(sc, temperature_in_half_celsius);
15560        BLOGI(sc, "\t Device Temperature = %d Celsius\n", (temp/2));
15561    }
15562
15563    return (error);
15564}
15565
15566static int
15567bxe_sysctl_eth_stat(SYSCTL_HANDLER_ARGS)
15568{
15569    struct bxe_softc *sc = (struct bxe_softc *)arg1;
15570    uint32_t *eth_stats = (uint32_t *)&sc->eth_stats;
15571    uint32_t *offset;
15572    uint64_t value = 0;
15573    int index = (int)arg2;
15574
15575    if (index >= BXE_NUM_ETH_STATS) {
15576        BLOGE(sc, "bxe_eth_stats index out of range (%d)\n", index);
15577        return (-1);
15578    }
15579
15580    offset = (eth_stats + bxe_eth_stats_arr[index].offset);
15581
15582    switch (bxe_eth_stats_arr[index].size) {
15583    case 4:
15584        value = (uint64_t)*offset;
15585        break;
15586    case 8:
15587        value = HILO_U64(*offset, *(offset + 1));
15588        break;
15589    default:
15590        BLOGE(sc, "Invalid bxe_eth_stats size (index=%d size=%d)\n",
15591              index, bxe_eth_stats_arr[index].size);
15592        return (-1);
15593    }
15594
15595    return (sysctl_handle_64(oidp, &value, 0, req));
15596}
15597
15598static int
15599bxe_sysctl_eth_q_stat(SYSCTL_HANDLER_ARGS)
15600{
15601    struct bxe_softc *sc = (struct bxe_softc *)arg1;
15602    uint32_t *eth_stats;
15603    uint32_t *offset;
15604    uint64_t value = 0;
15605    uint32_t q_stat = (uint32_t)arg2;
15606    uint32_t fp_index = ((q_stat >> 16) & 0xffff);
15607    uint32_t index = (q_stat & 0xffff);
15608
15609    eth_stats = (uint32_t *)&sc->fp[fp_index].eth_q_stats;
15610
15611    if (index >= BXE_NUM_ETH_Q_STATS) {
15612        BLOGE(sc, "bxe_eth_q_stats index out of range (%d)\n", index);
15613        return (-1);
15614    }
15615
15616    offset = (eth_stats + bxe_eth_q_stats_arr[index].offset);
15617
15618    switch (bxe_eth_q_stats_arr[index].size) {
15619    case 4:
15620        value = (uint64_t)*offset;
15621        break;
15622    case 8:
15623        value = HILO_U64(*offset, *(offset + 1));
15624        break;
15625    default:
15626        BLOGE(sc, "Invalid bxe_eth_q_stats size (index=%d size=%d)\n",
15627              index, bxe_eth_q_stats_arr[index].size);
15628        return (-1);
15629    }
15630
15631    return (sysctl_handle_64(oidp, &value, 0, req));
15632}
15633
15634static void bxe_force_link_reset(struct bxe_softc *sc)
15635{
15636
15637        bxe_acquire_phy_lock(sc);
15638        elink_link_reset(&sc->link_params, &sc->link_vars, 1);
15639        bxe_release_phy_lock(sc);
15640}
15641
15642static int
15643bxe_sysctl_pauseparam(SYSCTL_HANDLER_ARGS)
15644{
15645        struct bxe_softc *sc = (struct bxe_softc *)arg1;;
15646        uint32_t cfg_idx = bxe_get_link_cfg_idx(sc);
15647        int rc = 0;
15648        int error;
15649        int result;
15650
15651
15652        error = sysctl_handle_int(oidp, &sc->bxe_pause_param, 0, req);
15653
15654        if (error || !req->newptr) {
15655                return (error);
15656        }
15657        if ((sc->bxe_pause_param < 0) ||  (sc->bxe_pause_param > 8)) {
15658                BLOGW(sc, "invalid pause param (%d) - use intergers between 1 & 8\n",sc->bxe_pause_param);
15659                sc->bxe_pause_param = 8;
15660        }
15661
15662        result = (sc->bxe_pause_param << PORT_FEATURE_FLOW_CONTROL_SHIFT);
15663
15664
15665        if((result & 0x400) && !(sc->port.supported[cfg_idx] & ELINK_SUPPORTED_Autoneg))  {
15666                        BLOGW(sc, "Does not support Autoneg pause_param %d\n", sc->bxe_pause_param);
15667                        return -EINVAL;
15668        }
15669
15670        if(IS_MF(sc))
15671                return 0;
15672       sc->link_params.req_flow_ctrl[cfg_idx] = ELINK_FLOW_CTRL_AUTO;
15673        if(result & ELINK_FLOW_CTRL_RX)
15674                sc->link_params.req_flow_ctrl[cfg_idx] |= ELINK_FLOW_CTRL_RX;
15675
15676        if(result & ELINK_FLOW_CTRL_TX)
15677                sc->link_params.req_flow_ctrl[cfg_idx] |= ELINK_FLOW_CTRL_TX;
15678        if(sc->link_params.req_flow_ctrl[cfg_idx] == ELINK_FLOW_CTRL_AUTO)
15679                sc->link_params.req_flow_ctrl[cfg_idx] = ELINK_FLOW_CTRL_NONE;
15680
15681        if(result & 0x400) {
15682                if (sc->link_params.req_line_speed[cfg_idx] == ELINK_SPEED_AUTO_NEG) {
15683                        sc->link_params.req_flow_ctrl[cfg_idx] =
15684                                ELINK_FLOW_CTRL_AUTO;
15685                }
15686                sc->link_params.req_fc_auto_adv = 0;
15687                if (result & ELINK_FLOW_CTRL_RX)
15688                        sc->link_params.req_fc_auto_adv |= ELINK_FLOW_CTRL_RX;
15689
15690                if (result & ELINK_FLOW_CTRL_TX)
15691                        sc->link_params.req_fc_auto_adv |= ELINK_FLOW_CTRL_TX;
15692                if (!sc->link_params.req_fc_auto_adv)
15693                        sc->link_params.req_fc_auto_adv |= ELINK_FLOW_CTRL_NONE;
15694        }
15695         if (IS_PF(sc)) {
15696                        if (sc->link_vars.link_up) {
15697                                bxe_stats_handle(sc, STATS_EVENT_STOP);
15698                        }
15699			if (if_getdrvflags(sc->ifp) & IFF_DRV_RUNNING) {
15700                        bxe_force_link_reset(sc);
15701                        bxe_acquire_phy_lock(sc);
15702
15703                        rc = elink_phy_init(&sc->link_params, &sc->link_vars);
15704
15705                        bxe_release_phy_lock(sc);
15706
15707                        bxe_calc_fc_adv(sc);
15708                        }
15709        }
15710        return rc;
15711}
15712
15713
15714static void
15715bxe_add_sysctls(struct bxe_softc *sc)
15716{
15717    struct sysctl_ctx_list *ctx;
15718    struct sysctl_oid_list *children;
15719    struct sysctl_oid *queue_top, *queue;
15720    struct sysctl_oid_list *queue_top_children, *queue_children;
15721    char queue_num_buf[32];
15722    uint32_t q_stat;
15723    int i, j;
15724
15725    ctx = device_get_sysctl_ctx(sc->dev);
15726    children = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev));
15727
15728    SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "version",
15729                      CTLFLAG_RD, BXE_DRIVER_VERSION, 0,
15730                      "version");
15731
15732    snprintf(sc->fw_ver_str, sizeof(sc->fw_ver_str), "%d.%d.%d.%d",
15733             BCM_5710_FW_MAJOR_VERSION,
15734             BCM_5710_FW_MINOR_VERSION,
15735             BCM_5710_FW_REVISION_VERSION,
15736             BCM_5710_FW_ENGINEERING_VERSION);
15737
15738    snprintf(sc->mf_mode_str, sizeof(sc->mf_mode_str), "%s",
15739        ((sc->devinfo.mf_info.mf_mode == SINGLE_FUNCTION)     ? "Single"  :
15740         (sc->devinfo.mf_info.mf_mode == MULTI_FUNCTION_SD)   ? "MF-SD"   :
15741         (sc->devinfo.mf_info.mf_mode == MULTI_FUNCTION_SI)   ? "MF-SI"   :
15742         (sc->devinfo.mf_info.mf_mode == MULTI_FUNCTION_AFEX) ? "MF-AFEX" :
15743                                                                "Unknown"));
15744    SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "mf_vnics",
15745                    CTLFLAG_RD, &sc->devinfo.mf_info.vnics_per_port, 0,
15746                    "multifunction vnics per port");
15747
15748    snprintf(sc->pci_link_str, sizeof(sc->pci_link_str), "%s x%d",
15749        ((sc->devinfo.pcie_link_speed == 1) ? "2.5GT/s" :
15750         (sc->devinfo.pcie_link_speed == 2) ? "5.0GT/s" :
15751         (sc->devinfo.pcie_link_speed == 4) ? "8.0GT/s" :
15752                                              "???GT/s"),
15753        sc->devinfo.pcie_link_width);
15754
15755    sc->debug = bxe_debug;
15756
15757#if __FreeBSD_version >= 900000
15758    SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "bc_version",
15759                      CTLFLAG_RD, sc->devinfo.bc_ver_str, 0,
15760                      "bootcode version");
15761    SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "fw_version",
15762                      CTLFLAG_RD, sc->fw_ver_str, 0,
15763                      "firmware version");
15764    SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "mf_mode",
15765                      CTLFLAG_RD, sc->mf_mode_str, 0,
15766                      "multifunction mode");
15767    SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "mac_addr",
15768                      CTLFLAG_RD, sc->mac_addr_str, 0,
15769                      "mac address");
15770    SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "pci_link",
15771                      CTLFLAG_RD, sc->pci_link_str, 0,
15772                      "pci link status");
15773    SYSCTL_ADD_ULONG(ctx, children, OID_AUTO, "debug",
15774                    CTLFLAG_RW, &sc->debug,
15775                    "debug logging mode");
15776#else
15777    SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "bc_version",
15778                      CTLFLAG_RD, &sc->devinfo.bc_ver_str, 0,
15779                      "bootcode version");
15780    SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "fw_version",
15781                      CTLFLAG_RD, &sc->fw_ver_str, 0,
15782                      "firmware version");
15783    SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "mf_mode",
15784                      CTLFLAG_RD, &sc->mf_mode_str, 0,
15785                      "multifunction mode");
15786    SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "mac_addr",
15787                      CTLFLAG_RD, &sc->mac_addr_str, 0,
15788                      "mac address");
15789    SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "pci_link",
15790                      CTLFLAG_RD, &sc->pci_link_str, 0,
15791                      "pci link status");
15792    SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "debug",
15793                    CTLFLAG_RW, &sc->debug, 0,
15794                    "debug logging mode");
15795#endif /* #if __FreeBSD_version >= 900000 */
15796
15797    sc->trigger_grcdump = 0;
15798    SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "trigger_grcdump",
15799                   CTLFLAG_RW, &sc->trigger_grcdump, 0,
15800                   "trigger grcdump should be invoked"
15801                   "  before collecting grcdump");
15802
15803    sc->grcdump_started = 0;
15804    sc->grcdump_done = 0;
15805    SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "grcdump_done",
15806                   CTLFLAG_RD, &sc->grcdump_done, 0,
15807                   "set by driver when grcdump is done");
15808
15809    sc->rx_budget = bxe_rx_budget;
15810    SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "rx_budget",
15811                    CTLFLAG_RW, &sc->rx_budget, 0,
15812                    "rx processing budget");
15813
15814   SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "pause_param",
15815                    CTLTYPE_UINT | CTLFLAG_RW, sc, 0,
15816                    bxe_sysctl_pauseparam, "IU",
15817                    "need pause frames- DEF:0/TX:1/RX:2/BOTH:3/AUTO:4/AUTOTX:5/AUTORX:6/AUTORXTX:7/NONE:8");
15818
15819
15820    SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "state",
15821                    CTLTYPE_UINT | CTLFLAG_RW, sc, 0,
15822                    bxe_sysctl_state, "IU", "dump driver state");
15823
15824    for (i = 0; i < BXE_NUM_ETH_STATS; i++) {
15825        SYSCTL_ADD_PROC(ctx, children, OID_AUTO,
15826                        bxe_eth_stats_arr[i].string,
15827                        CTLTYPE_U64 | CTLFLAG_RD, sc, i,
15828                        bxe_sysctl_eth_stat, "LU",
15829                        bxe_eth_stats_arr[i].string);
15830    }
15831
15832    /* add a new parent node for all queues "dev.bxe.#.queue" */
15833    queue_top = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "queue",
15834                                CTLFLAG_RD, NULL, "queue");
15835    queue_top_children = SYSCTL_CHILDREN(queue_top);
15836
15837    for (i = 0; i < sc->num_queues; i++) {
15838        /* add a new parent node for a single queue "dev.bxe.#.queue.#" */
15839        snprintf(queue_num_buf, sizeof(queue_num_buf), "%d", i);
15840        queue = SYSCTL_ADD_NODE(ctx, queue_top_children, OID_AUTO,
15841                                queue_num_buf, CTLFLAG_RD, NULL,
15842                                "single queue");
15843        queue_children = SYSCTL_CHILDREN(queue);
15844
15845        for (j = 0; j < BXE_NUM_ETH_Q_STATS; j++) {
15846            q_stat = ((i << 16) | j);
15847            SYSCTL_ADD_PROC(ctx, queue_children, OID_AUTO,
15848                            bxe_eth_q_stats_arr[j].string,
15849                            CTLTYPE_U64 | CTLFLAG_RD, sc, q_stat,
15850                            bxe_sysctl_eth_q_stat, "LU",
15851                            bxe_eth_q_stats_arr[j].string);
15852        }
15853    }
15854}
15855
15856static int
15857bxe_alloc_buf_rings(struct bxe_softc *sc)
15858{
15859#if __FreeBSD_version >= 901504
15860
15861    int i;
15862    struct bxe_fastpath *fp;
15863
15864    for (i = 0; i < sc->num_queues; i++) {
15865
15866        fp = &sc->fp[i];
15867
15868        fp->tx_br = buf_ring_alloc(BXE_BR_SIZE, M_DEVBUF,
15869                                   M_NOWAIT, &fp->tx_mtx);
15870        if (fp->tx_br == NULL)
15871            return (-1);
15872    }
15873#endif
15874    return (0);
15875}
15876
15877static void
15878bxe_free_buf_rings(struct bxe_softc *sc)
15879{
15880#if __FreeBSD_version >= 901504
15881
15882    int i;
15883    struct bxe_fastpath *fp;
15884
15885    for (i = 0; i < sc->num_queues; i++) {
15886
15887        fp = &sc->fp[i];
15888
15889        if (fp->tx_br) {
15890            buf_ring_free(fp->tx_br, M_DEVBUF);
15891            fp->tx_br = NULL;
15892        }
15893    }
15894
15895#endif
15896}
15897
15898static void
15899bxe_init_fp_mutexs(struct bxe_softc *sc)
15900{
15901    int i;
15902    struct bxe_fastpath *fp;
15903
15904    for (i = 0; i < sc->num_queues; i++) {
15905
15906        fp = &sc->fp[i];
15907
15908        snprintf(fp->tx_mtx_name, sizeof(fp->tx_mtx_name),
15909            "bxe%d_fp%d_tx_lock", sc->unit, i);
15910        mtx_init(&fp->tx_mtx, fp->tx_mtx_name, NULL, MTX_DEF);
15911
15912        snprintf(fp->rx_mtx_name, sizeof(fp->rx_mtx_name),
15913            "bxe%d_fp%d_rx_lock", sc->unit, i);
15914        mtx_init(&fp->rx_mtx, fp->rx_mtx_name, NULL, MTX_DEF);
15915    }
15916}
15917
15918static void
15919bxe_destroy_fp_mutexs(struct bxe_softc *sc)
15920{
15921    int i;
15922    struct bxe_fastpath *fp;
15923
15924    for (i = 0; i < sc->num_queues; i++) {
15925
15926        fp = &sc->fp[i];
15927
15928        if (mtx_initialized(&fp->tx_mtx)) {
15929            mtx_destroy(&fp->tx_mtx);
15930        }
15931
15932        if (mtx_initialized(&fp->rx_mtx)) {
15933            mtx_destroy(&fp->rx_mtx);
15934        }
15935    }
15936}
15937
15938
15939/*
15940 * Device attach function.
15941 *
15942 * Allocates device resources, performs secondary chip identification, and
15943 * initializes driver instance variables. This function is called from driver
15944 * load after a successful probe.
15945 *
15946 * Returns:
15947 *   0 = Success, >0 = Failure
15948 */
15949static int
15950bxe_attach(device_t dev)
15951{
15952    struct bxe_softc *sc;
15953
15954    sc = device_get_softc(dev);
15955
15956    BLOGD(sc, DBG_LOAD, "Starting attach...\n");
15957
15958    sc->state = BXE_STATE_CLOSED;
15959
15960    sc->dev  = dev;
15961    sc->unit = device_get_unit(dev);
15962
15963    BLOGD(sc, DBG_LOAD, "softc = %p\n", sc);
15964
15965    sc->pcie_bus    = pci_get_bus(dev);
15966    sc->pcie_device = pci_get_slot(dev);
15967    sc->pcie_func   = pci_get_function(dev);
15968
15969    /* enable bus master capability */
15970    pci_enable_busmaster(dev);
15971
15972    /* get the BARs */
15973    if (bxe_allocate_bars(sc) != 0) {
15974        return (ENXIO);
15975    }
15976
15977    /* initialize the mutexes */
15978    bxe_init_mutexes(sc);
15979
15980    /* prepare the periodic callout */
15981    callout_init(&sc->periodic_callout, 0);
15982
15983    /* prepare the chip taskqueue */
15984    sc->chip_tq_flags = CHIP_TQ_NONE;
15985    snprintf(sc->chip_tq_name, sizeof(sc->chip_tq_name),
15986             "bxe%d_chip_tq", sc->unit);
15987    TASK_INIT(&sc->chip_tq_task, 0, bxe_handle_chip_tq, sc);
15988    sc->chip_tq = taskqueue_create(sc->chip_tq_name, M_NOWAIT,
15989                                   taskqueue_thread_enqueue,
15990                                   &sc->chip_tq);
15991    taskqueue_start_threads(&sc->chip_tq, 1, PWAIT, /* lower priority */
15992                            "%s", sc->chip_tq_name);
15993
15994    /* get device info and set params */
15995    if (bxe_get_device_info(sc) != 0) {
15996        BLOGE(sc, "getting device info\n");
15997        bxe_deallocate_bars(sc);
15998        pci_disable_busmaster(dev);
15999        return (ENXIO);
16000    }
16001
16002    /* get final misc params */
16003    bxe_get_params(sc);
16004
16005    /* set the default MTU (changed via ifconfig) */
16006    sc->mtu = ETHERMTU;
16007
16008    bxe_set_modes_bitmap(sc);
16009
16010    /* XXX
16011     * If in AFEX mode and the function is configured for FCoE
16012     * then bail... no L2 allowed.
16013     */
16014
16015    /* get phy settings from shmem and 'and' against admin settings */
16016    bxe_get_phy_info(sc);
16017
16018    /* initialize the FreeBSD ifnet interface */
16019    if (bxe_init_ifnet(sc) != 0) {
16020        bxe_release_mutexes(sc);
16021        bxe_deallocate_bars(sc);
16022        pci_disable_busmaster(dev);
16023        return (ENXIO);
16024    }
16025
16026    if (bxe_add_cdev(sc) != 0) {
16027        if (sc->ifp != NULL) {
16028            ether_ifdetach(sc->ifp);
16029        }
16030        ifmedia_removeall(&sc->ifmedia);
16031        bxe_release_mutexes(sc);
16032        bxe_deallocate_bars(sc);
16033        pci_disable_busmaster(dev);
16034        return (ENXIO);
16035    }
16036
16037    /* allocate device interrupts */
16038    if (bxe_interrupt_alloc(sc) != 0) {
16039        bxe_del_cdev(sc);
16040        if (sc->ifp != NULL) {
16041            ether_ifdetach(sc->ifp);
16042        }
16043        ifmedia_removeall(&sc->ifmedia);
16044        bxe_release_mutexes(sc);
16045        bxe_deallocate_bars(sc);
16046        pci_disable_busmaster(dev);
16047        return (ENXIO);
16048    }
16049
16050    bxe_init_fp_mutexs(sc);
16051
16052    if (bxe_alloc_buf_rings(sc) != 0) {
16053	bxe_free_buf_rings(sc);
16054        bxe_interrupt_free(sc);
16055        bxe_del_cdev(sc);
16056        if (sc->ifp != NULL) {
16057            ether_ifdetach(sc->ifp);
16058        }
16059        ifmedia_removeall(&sc->ifmedia);
16060        bxe_release_mutexes(sc);
16061        bxe_deallocate_bars(sc);
16062        pci_disable_busmaster(dev);
16063        return (ENXIO);
16064    }
16065
16066    /* allocate ilt */
16067    if (bxe_alloc_ilt_mem(sc) != 0) {
16068	bxe_free_buf_rings(sc);
16069        bxe_interrupt_free(sc);
16070        bxe_del_cdev(sc);
16071        if (sc->ifp != NULL) {
16072            ether_ifdetach(sc->ifp);
16073        }
16074        ifmedia_removeall(&sc->ifmedia);
16075        bxe_release_mutexes(sc);
16076        bxe_deallocate_bars(sc);
16077        pci_disable_busmaster(dev);
16078        return (ENXIO);
16079    }
16080
16081    /* allocate the host hardware/software hsi structures */
16082    if (bxe_alloc_hsi_mem(sc) != 0) {
16083        bxe_free_ilt_mem(sc);
16084	bxe_free_buf_rings(sc);
16085        bxe_interrupt_free(sc);
16086        bxe_del_cdev(sc);
16087        if (sc->ifp != NULL) {
16088            ether_ifdetach(sc->ifp);
16089        }
16090        ifmedia_removeall(&sc->ifmedia);
16091        bxe_release_mutexes(sc);
16092        bxe_deallocate_bars(sc);
16093        pci_disable_busmaster(dev);
16094        return (ENXIO);
16095    }
16096
16097    /* need to reset chip if UNDI was active */
16098    if (IS_PF(sc) && !BXE_NOMCP(sc)) {
16099        /* init fw_seq */
16100        sc->fw_seq =
16101            (SHMEM_RD(sc, func_mb[SC_FW_MB_IDX(sc)].drv_mb_header) &
16102             DRV_MSG_SEQ_NUMBER_MASK);
16103        BLOGD(sc, DBG_LOAD, "prev unload fw_seq 0x%04x\n", sc->fw_seq);
16104        bxe_prev_unload(sc);
16105    }
16106
16107#if 1
16108    /* XXX */
16109    bxe_dcbx_set_state(sc, FALSE, BXE_DCBX_ENABLED_OFF);
16110#else
16111    if (SHMEM2_HAS(sc, dcbx_lldp_params_offset) &&
16112        SHMEM2_HAS(sc, dcbx_lldp_dcbx_stat_offset) &&
16113        SHMEM2_RD(sc, dcbx_lldp_params_offset) &&
16114        SHMEM2_RD(sc, dcbx_lldp_dcbx_stat_offset)) {
16115        bxe_dcbx_set_state(sc, TRUE, BXE_DCBX_ENABLED_ON_NEG_ON);
16116        bxe_dcbx_init_params(sc);
16117    } else {
16118        bxe_dcbx_set_state(sc, FALSE, BXE_DCBX_ENABLED_OFF);
16119    }
16120#endif
16121
16122    /* calculate qm_cid_count */
16123    sc->qm_cid_count = bxe_set_qm_cid_count(sc);
16124    BLOGD(sc, DBG_LOAD, "qm_cid_count=%d\n", sc->qm_cid_count);
16125
16126    sc->max_cos = 1;
16127    bxe_init_multi_cos(sc);
16128
16129    bxe_add_sysctls(sc);
16130
16131    return (0);
16132}
16133
16134/*
16135 * Device detach function.
16136 *
16137 * Stops the controller, resets the controller, and releases resources.
16138 *
16139 * Returns:
16140 *   0 = Success, >0 = Failure
16141 */
16142static int
16143bxe_detach(device_t dev)
16144{
16145    struct bxe_softc *sc;
16146    if_t ifp;
16147
16148    sc = device_get_softc(dev);
16149
16150    BLOGD(sc, DBG_LOAD, "Starting detach...\n");
16151
16152    ifp = sc->ifp;
16153    if (ifp != NULL && if_vlantrunkinuse(ifp)) {
16154        BLOGE(sc, "Cannot detach while VLANs are in use.\n");
16155        return(EBUSY);
16156    }
16157
16158    bxe_del_cdev(sc);
16159
16160    /* stop the periodic callout */
16161    bxe_periodic_stop(sc);
16162
16163    /* stop the chip taskqueue */
16164    atomic_store_rel_long(&sc->chip_tq_flags, CHIP_TQ_NONE);
16165    if (sc->chip_tq) {
16166        taskqueue_drain(sc->chip_tq, &sc->chip_tq_task);
16167        taskqueue_free(sc->chip_tq);
16168        sc->chip_tq = NULL;
16169    }
16170
16171    /* stop and reset the controller if it was open */
16172    if (sc->state != BXE_STATE_CLOSED) {
16173        BXE_CORE_LOCK(sc);
16174        bxe_nic_unload(sc, UNLOAD_CLOSE, TRUE);
16175        sc->state = BXE_STATE_DISABLED;
16176        BXE_CORE_UNLOCK(sc);
16177    }
16178
16179    /* release the network interface */
16180    if (ifp != NULL) {
16181        ether_ifdetach(ifp);
16182    }
16183    ifmedia_removeall(&sc->ifmedia);
16184
16185    /* XXX do the following based on driver state... */
16186
16187    /* free the host hardware/software hsi structures */
16188    bxe_free_hsi_mem(sc);
16189
16190    /* free ilt */
16191    bxe_free_ilt_mem(sc);
16192
16193    bxe_free_buf_rings(sc);
16194
16195    /* release the interrupts */
16196    bxe_interrupt_free(sc);
16197
16198    /* Release the mutexes*/
16199    bxe_destroy_fp_mutexs(sc);
16200    bxe_release_mutexes(sc);
16201
16202
16203    /* Release the PCIe BAR mapped memory */
16204    bxe_deallocate_bars(sc);
16205
16206    /* Release the FreeBSD interface. */
16207    if (sc->ifp != NULL) {
16208        if_free(sc->ifp);
16209    }
16210
16211    pci_disable_busmaster(dev);
16212
16213    return (0);
16214}
16215
16216/*
16217 * Device shutdown function.
16218 *
16219 * Stops and resets the controller.
16220 *
16221 * Returns:
16222 *   Nothing
16223 */
16224static int
16225bxe_shutdown(device_t dev)
16226{
16227    struct bxe_softc *sc;
16228
16229    sc = device_get_softc(dev);
16230
16231    BLOGD(sc, DBG_LOAD, "Starting shutdown...\n");
16232
16233    /* stop the periodic callout */
16234    bxe_periodic_stop(sc);
16235
16236    BXE_CORE_LOCK(sc);
16237    bxe_nic_unload(sc, UNLOAD_NORMAL, FALSE);
16238    BXE_CORE_UNLOCK(sc);
16239
16240    return (0);
16241}
16242
16243void
16244bxe_igu_ack_sb(struct bxe_softc *sc,
16245               uint8_t          igu_sb_id,
16246               uint8_t          segment,
16247               uint16_t         index,
16248               uint8_t          op,
16249               uint8_t          update)
16250{
16251    uint32_t igu_addr = sc->igu_base_addr;
16252    igu_addr += (IGU_CMD_INT_ACK_BASE + igu_sb_id)*8;
16253    bxe_igu_ack_sb_gen(sc, igu_sb_id, segment, index, op, update, igu_addr);
16254}
16255
16256static void
16257bxe_igu_clear_sb_gen(struct bxe_softc *sc,
16258                     uint8_t          func,
16259                     uint8_t          idu_sb_id,
16260                     uint8_t          is_pf)
16261{
16262    uint32_t data, ctl, cnt = 100;
16263    uint32_t igu_addr_data = IGU_REG_COMMAND_REG_32LSB_DATA;
16264    uint32_t igu_addr_ctl = IGU_REG_COMMAND_REG_CTRL;
16265    uint32_t igu_addr_ack = IGU_REG_CSTORM_TYPE_0_SB_CLEANUP + (idu_sb_id/32)*4;
16266    uint32_t sb_bit =  1 << (idu_sb_id%32);
16267    uint32_t func_encode = func | (is_pf ? 1 : 0) << IGU_FID_ENCODE_IS_PF_SHIFT;
16268    uint32_t addr_encode = IGU_CMD_E2_PROD_UPD_BASE + idu_sb_id;
16269
16270    /* Not supported in BC mode */
16271    if (CHIP_INT_MODE_IS_BC(sc)) {
16272        return;
16273    }
16274
16275    data = ((IGU_USE_REGISTER_cstorm_type_0_sb_cleanup <<
16276             IGU_REGULAR_CLEANUP_TYPE_SHIFT) |
16277            IGU_REGULAR_CLEANUP_SET |
16278            IGU_REGULAR_BCLEANUP);
16279
16280    ctl = ((addr_encode << IGU_CTRL_REG_ADDRESS_SHIFT) |
16281           (func_encode << IGU_CTRL_REG_FID_SHIFT) |
16282           (IGU_CTRL_CMD_TYPE_WR << IGU_CTRL_REG_TYPE_SHIFT));
16283
16284    BLOGD(sc, DBG_LOAD, "write 0x%08x to IGU(via GRC) addr 0x%x\n",
16285            data, igu_addr_data);
16286    REG_WR(sc, igu_addr_data, data);
16287
16288    bus_space_barrier(sc->bar[BAR0].tag, sc->bar[BAR0].handle, 0, 0,
16289                      BUS_SPACE_BARRIER_WRITE);
16290    mb();
16291
16292    BLOGD(sc, DBG_LOAD, "write 0x%08x to IGU(via GRC) addr 0x%x\n",
16293            ctl, igu_addr_ctl);
16294    REG_WR(sc, igu_addr_ctl, ctl);
16295
16296    bus_space_barrier(sc->bar[BAR0].tag, sc->bar[BAR0].handle, 0, 0,
16297                      BUS_SPACE_BARRIER_WRITE);
16298    mb();
16299
16300    /* wait for clean up to finish */
16301    while (!(REG_RD(sc, igu_addr_ack) & sb_bit) && --cnt) {
16302        DELAY(20000);
16303    }
16304
16305    if (!(REG_RD(sc, igu_addr_ack) & sb_bit)) {
16306        BLOGD(sc, DBG_LOAD,
16307              "Unable to finish IGU cleanup: "
16308              "idu_sb_id %d offset %d bit %d (cnt %d)\n",
16309              idu_sb_id, idu_sb_id/32, idu_sb_id%32, cnt);
16310    }
16311}
16312
16313static void
16314bxe_igu_clear_sb(struct bxe_softc *sc,
16315                 uint8_t          idu_sb_id)
16316{
16317    bxe_igu_clear_sb_gen(sc, SC_FUNC(sc), idu_sb_id, TRUE /*PF*/);
16318}
16319
16320
16321
16322
16323
16324
16325
16326/*******************/
16327/* ECORE CALLBACKS */
16328/*******************/
16329
16330static void
16331bxe_reset_common(struct bxe_softc *sc)
16332{
16333    uint32_t val = 0x1400;
16334
16335    /* reset_common */
16336    REG_WR(sc, (GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR), 0xd3ffff7f);
16337
16338    if (CHIP_IS_E3(sc)) {
16339        val |= MISC_REGISTERS_RESET_REG_2_MSTAT0;
16340        val |= MISC_REGISTERS_RESET_REG_2_MSTAT1;
16341    }
16342
16343    REG_WR(sc, (GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR), val);
16344}
16345
16346static void
16347bxe_common_init_phy(struct bxe_softc *sc)
16348{
16349    uint32_t shmem_base[2];
16350    uint32_t shmem2_base[2];
16351
16352    /* Avoid common init in case MFW supports LFA */
16353    if (SHMEM2_RD(sc, size) >
16354        (uint32_t)offsetof(struct shmem2_region,
16355                           lfa_host_addr[SC_PORT(sc)])) {
16356        return;
16357    }
16358
16359    shmem_base[0]  = sc->devinfo.shmem_base;
16360    shmem2_base[0] = sc->devinfo.shmem2_base;
16361
16362    if (!CHIP_IS_E1x(sc)) {
16363        shmem_base[1]  = SHMEM2_RD(sc, other_shmem_base_addr);
16364        shmem2_base[1] = SHMEM2_RD(sc, other_shmem2_base_addr);
16365    }
16366
16367    bxe_acquire_phy_lock(sc);
16368    elink_common_init_phy(sc, shmem_base, shmem2_base,
16369                          sc->devinfo.chip_id, 0);
16370    bxe_release_phy_lock(sc);
16371}
16372
16373static void
16374bxe_pf_disable(struct bxe_softc *sc)
16375{
16376    uint32_t val = REG_RD(sc, IGU_REG_PF_CONFIGURATION);
16377
16378    val &= ~IGU_PF_CONF_FUNC_EN;
16379
16380    REG_WR(sc, IGU_REG_PF_CONFIGURATION, val);
16381    REG_WR(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 0);
16382    REG_WR(sc, CFC_REG_WEAK_ENABLE_PF, 0);
16383}
16384
16385static void
16386bxe_init_pxp(struct bxe_softc *sc)
16387{
16388    uint16_t devctl;
16389    int r_order, w_order;
16390
16391    devctl = bxe_pcie_capability_read(sc, PCIR_EXPRESS_DEVICE_CTL, 2);
16392
16393    BLOGD(sc, DBG_LOAD, "read 0x%08x from devctl\n", devctl);
16394
16395    w_order = ((devctl & PCIM_EXP_CTL_MAX_PAYLOAD) >> 5);
16396
16397    if (sc->mrrs == -1) {
16398        r_order = ((devctl & PCIM_EXP_CTL_MAX_READ_REQUEST) >> 12);
16399    } else {
16400        BLOGD(sc, DBG_LOAD, "forcing read order to %d\n", sc->mrrs);
16401        r_order = sc->mrrs;
16402    }
16403
16404    ecore_init_pxp_arb(sc, r_order, w_order);
16405}
16406
16407static uint32_t
16408bxe_get_pretend_reg(struct bxe_softc *sc)
16409{
16410    uint32_t base = PXP2_REG_PGL_PRETEND_FUNC_F0;
16411    uint32_t stride = (PXP2_REG_PGL_PRETEND_FUNC_F1 - base);
16412    return (base + (SC_ABS_FUNC(sc)) * stride);
16413}
16414
16415/*
16416 * Called only on E1H or E2.
16417 * When pretending to be PF, the pretend value is the function number 0..7.
16418 * When pretending to be VF, the pretend val is the PF-num:VF-valid:ABS-VFID
16419 * combination.
16420 */
16421static int
16422bxe_pretend_func(struct bxe_softc *sc,
16423                 uint16_t         pretend_func_val)
16424{
16425    uint32_t pretend_reg;
16426
16427    if (CHIP_IS_E1H(sc) && (pretend_func_val > E1H_FUNC_MAX)) {
16428        return (-1);
16429    }
16430
16431    /* get my own pretend register */
16432    pretend_reg = bxe_get_pretend_reg(sc);
16433    REG_WR(sc, pretend_reg, pretend_func_val);
16434    REG_RD(sc, pretend_reg);
16435    return (0);
16436}
16437
16438static void
16439bxe_iov_init_dmae(struct bxe_softc *sc)
16440{
16441    return;
16442}
16443
16444static void
16445bxe_iov_init_dq(struct bxe_softc *sc)
16446{
16447    return;
16448}
16449
16450/* send a NIG loopback debug packet */
16451static void
16452bxe_lb_pckt(struct bxe_softc *sc)
16453{
16454    uint32_t wb_write[3];
16455
16456    /* Ethernet source and destination addresses */
16457    wb_write[0] = 0x55555555;
16458    wb_write[1] = 0x55555555;
16459    wb_write[2] = 0x20;     /* SOP */
16460    REG_WR_DMAE(sc, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
16461
16462    /* NON-IP protocol */
16463    wb_write[0] = 0x09000000;
16464    wb_write[1] = 0x55555555;
16465    wb_write[2] = 0x10;     /* EOP, eop_bvalid = 0 */
16466    REG_WR_DMAE(sc, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
16467}
16468
16469/*
16470 * Some of the internal memories are not directly readable from the driver.
16471 * To test them we send debug packets.
16472 */
16473static int
16474bxe_int_mem_test(struct bxe_softc *sc)
16475{
16476    int factor;
16477    int count, i;
16478    uint32_t val = 0;
16479
16480    if (CHIP_REV_IS_FPGA(sc)) {
16481        factor = 120;
16482    } else if (CHIP_REV_IS_EMUL(sc)) {
16483        factor = 200;
16484    } else {
16485        factor = 1;
16486    }
16487
16488    /* disable inputs of parser neighbor blocks */
16489    REG_WR(sc, TSDM_REG_ENABLE_IN1, 0x0);
16490    REG_WR(sc, TCM_REG_PRS_IFEN, 0x0);
16491    REG_WR(sc, CFC_REG_DEBUG0, 0x1);
16492    REG_WR(sc, NIG_REG_PRS_REQ_IN_EN, 0x0);
16493
16494    /*  write 0 to parser credits for CFC search request */
16495    REG_WR(sc, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
16496
16497    /* send Ethernet packet */
16498    bxe_lb_pckt(sc);
16499
16500    /* TODO do i reset NIG statistic? */
16501    /* Wait until NIG register shows 1 packet of size 0x10 */
16502    count = 1000 * factor;
16503    while (count) {
16504        bxe_read_dmae(sc, NIG_REG_STAT2_BRB_OCTET, 2);
16505        val = *BXE_SP(sc, wb_data[0]);
16506        if (val == 0x10) {
16507            break;
16508        }
16509
16510        DELAY(10000);
16511        count--;
16512    }
16513
16514    if (val != 0x10) {
16515        BLOGE(sc, "NIG timeout val=0x%x\n", val);
16516        return (-1);
16517    }
16518
16519    /* wait until PRS register shows 1 packet */
16520    count = (1000 * factor);
16521    while (count) {
16522        val = REG_RD(sc, PRS_REG_NUM_OF_PACKETS);
16523        if (val == 1) {
16524            break;
16525        }
16526
16527        DELAY(10000);
16528        count--;
16529    }
16530
16531    if (val != 0x1) {
16532        BLOGE(sc, "PRS timeout val=0x%x\n", val);
16533        return (-2);
16534    }
16535
16536    /* Reset and init BRB, PRS */
16537    REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
16538    DELAY(50000);
16539    REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
16540    DELAY(50000);
16541    ecore_init_block(sc, BLOCK_BRB1, PHASE_COMMON);
16542    ecore_init_block(sc, BLOCK_PRS, PHASE_COMMON);
16543
16544    /* Disable inputs of parser neighbor blocks */
16545    REG_WR(sc, TSDM_REG_ENABLE_IN1, 0x0);
16546    REG_WR(sc, TCM_REG_PRS_IFEN, 0x0);
16547    REG_WR(sc, CFC_REG_DEBUG0, 0x1);
16548    REG_WR(sc, NIG_REG_PRS_REQ_IN_EN, 0x0);
16549
16550    /* Write 0 to parser credits for CFC search request */
16551    REG_WR(sc, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
16552
16553    /* send 10 Ethernet packets */
16554    for (i = 0; i < 10; i++) {
16555        bxe_lb_pckt(sc);
16556    }
16557
16558    /* Wait until NIG register shows 10+1 packets of size 11*0x10 = 0xb0 */
16559    count = (1000 * factor);
16560    while (count) {
16561        bxe_read_dmae(sc, NIG_REG_STAT2_BRB_OCTET, 2);
16562        val = *BXE_SP(sc, wb_data[0]);
16563        if (val == 0xb0) {
16564            break;
16565        }
16566
16567        DELAY(10000);
16568        count--;
16569    }
16570
16571    if (val != 0xb0) {
16572        BLOGE(sc, "NIG timeout val=0x%x\n", val);
16573        return (-3);
16574    }
16575
16576    /* Wait until PRS register shows 2 packets */
16577    val = REG_RD(sc, PRS_REG_NUM_OF_PACKETS);
16578    if (val != 2) {
16579        BLOGE(sc, "PRS timeout val=0x%x\n", val);
16580    }
16581
16582    /* Write 1 to parser credits for CFC search request */
16583    REG_WR(sc, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
16584
16585    /* Wait until PRS register shows 3 packets */
16586    DELAY(10000 * factor);
16587
16588    /* Wait until NIG register shows 1 packet of size 0x10 */
16589    val = REG_RD(sc, PRS_REG_NUM_OF_PACKETS);
16590    if (val != 3) {
16591        BLOGE(sc, "PRS timeout val=0x%x\n", val);
16592    }
16593
16594    /* clear NIG EOP FIFO */
16595    for (i = 0; i < 11; i++) {
16596        REG_RD(sc, NIG_REG_INGRESS_EOP_LB_FIFO);
16597    }
16598
16599    val = REG_RD(sc, NIG_REG_INGRESS_EOP_LB_EMPTY);
16600    if (val != 1) {
16601        BLOGE(sc, "clear of NIG failed val=0x%x\n", val);
16602        return (-4);
16603    }
16604
16605    /* Reset and init BRB, PRS, NIG */
16606    REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
16607    DELAY(50000);
16608    REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
16609    DELAY(50000);
16610    ecore_init_block(sc, BLOCK_BRB1, PHASE_COMMON);
16611    ecore_init_block(sc, BLOCK_PRS, PHASE_COMMON);
16612    if (!CNIC_SUPPORT(sc)) {
16613        /* set NIC mode */
16614        REG_WR(sc, PRS_REG_NIC_MODE, 1);
16615    }
16616
16617    /* Enable inputs of parser neighbor blocks */
16618    REG_WR(sc, TSDM_REG_ENABLE_IN1, 0x7fffffff);
16619    REG_WR(sc, TCM_REG_PRS_IFEN, 0x1);
16620    REG_WR(sc, CFC_REG_DEBUG0, 0x0);
16621    REG_WR(sc, NIG_REG_PRS_REQ_IN_EN, 0x1);
16622
16623    return (0);
16624}
16625
16626static void
16627bxe_setup_fan_failure_detection(struct bxe_softc *sc)
16628{
16629    int is_required;
16630    uint32_t val;
16631    int port;
16632
16633    is_required = 0;
16634    val = (SHMEM_RD(sc, dev_info.shared_hw_config.config2) &
16635           SHARED_HW_CFG_FAN_FAILURE_MASK);
16636
16637    if (val == SHARED_HW_CFG_FAN_FAILURE_ENABLED) {
16638        is_required = 1;
16639    }
16640    /*
16641     * The fan failure mechanism is usually related to the PHY type since
16642     * the power consumption of the board is affected by the PHY. Currently,
16643     * fan is required for most designs with SFX7101, BCM8727 and BCM8481.
16644     */
16645    else if (val == SHARED_HW_CFG_FAN_FAILURE_PHY_TYPE) {
16646        for (port = PORT_0; port < PORT_MAX; port++) {
16647            is_required |= elink_fan_failure_det_req(sc,
16648                                                     sc->devinfo.shmem_base,
16649                                                     sc->devinfo.shmem2_base,
16650                                                     port);
16651        }
16652    }
16653
16654    BLOGD(sc, DBG_LOAD, "fan detection setting: %d\n", is_required);
16655
16656    if (is_required == 0) {
16657        return;
16658    }
16659
16660    /* Fan failure is indicated by SPIO 5 */
16661    bxe_set_spio(sc, MISC_SPIO_SPIO5, MISC_SPIO_INPUT_HI_Z);
16662
16663    /* set to active low mode */
16664    val = REG_RD(sc, MISC_REG_SPIO_INT);
16665    val |= (MISC_SPIO_SPIO5 << MISC_SPIO_INT_OLD_SET_POS);
16666    REG_WR(sc, MISC_REG_SPIO_INT, val);
16667
16668    /* enable interrupt to signal the IGU */
16669    val = REG_RD(sc, MISC_REG_SPIO_EVENT_EN);
16670    val |= MISC_SPIO_SPIO5;
16671    REG_WR(sc, MISC_REG_SPIO_EVENT_EN, val);
16672}
16673
16674static void
16675bxe_enable_blocks_attention(struct bxe_softc *sc)
16676{
16677    uint32_t val;
16678
16679    REG_WR(sc, PXP_REG_PXP_INT_MASK_0, 0);
16680    if (!CHIP_IS_E1x(sc)) {
16681        REG_WR(sc, PXP_REG_PXP_INT_MASK_1, 0x40);
16682    } else {
16683        REG_WR(sc, PXP_REG_PXP_INT_MASK_1, 0);
16684    }
16685    REG_WR(sc, DORQ_REG_DORQ_INT_MASK, 0);
16686    REG_WR(sc, CFC_REG_CFC_INT_MASK, 0);
16687    /*
16688     * mask read length error interrupts in brb for parser
16689     * (parsing unit and 'checksum and crc' unit)
16690     * these errors are legal (PU reads fixed length and CAC can cause
16691     * read length error on truncated packets)
16692     */
16693    REG_WR(sc, BRB1_REG_BRB1_INT_MASK, 0xFC00);
16694    REG_WR(sc, QM_REG_QM_INT_MASK, 0);
16695    REG_WR(sc, TM_REG_TM_INT_MASK, 0);
16696    REG_WR(sc, XSDM_REG_XSDM_INT_MASK_0, 0);
16697    REG_WR(sc, XSDM_REG_XSDM_INT_MASK_1, 0);
16698    REG_WR(sc, XCM_REG_XCM_INT_MASK, 0);
16699/*      REG_WR(sc, XSEM_REG_XSEM_INT_MASK_0, 0); */
16700/*      REG_WR(sc, XSEM_REG_XSEM_INT_MASK_1, 0); */
16701    REG_WR(sc, USDM_REG_USDM_INT_MASK_0, 0);
16702    REG_WR(sc, USDM_REG_USDM_INT_MASK_1, 0);
16703    REG_WR(sc, UCM_REG_UCM_INT_MASK, 0);
16704/*      REG_WR(sc, USEM_REG_USEM_INT_MASK_0, 0); */
16705/*      REG_WR(sc, USEM_REG_USEM_INT_MASK_1, 0); */
16706    REG_WR(sc, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
16707    REG_WR(sc, CSDM_REG_CSDM_INT_MASK_0, 0);
16708    REG_WR(sc, CSDM_REG_CSDM_INT_MASK_1, 0);
16709    REG_WR(sc, CCM_REG_CCM_INT_MASK, 0);
16710/*      REG_WR(sc, CSEM_REG_CSEM_INT_MASK_0, 0); */
16711/*      REG_WR(sc, CSEM_REG_CSEM_INT_MASK_1, 0); */
16712
16713    val = (PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_AFT |
16714           PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_OF |
16715           PXP2_PXP2_INT_MASK_0_REG_PGL_PCIE_ATTN);
16716    if (!CHIP_IS_E1x(sc)) {
16717        val |= (PXP2_PXP2_INT_MASK_0_REG_PGL_READ_BLOCKED |
16718                PXP2_PXP2_INT_MASK_0_REG_PGL_WRITE_BLOCKED);
16719    }
16720    REG_WR(sc, PXP2_REG_PXP2_INT_MASK_0, val);
16721
16722    REG_WR(sc, TSDM_REG_TSDM_INT_MASK_0, 0);
16723    REG_WR(sc, TSDM_REG_TSDM_INT_MASK_1, 0);
16724    REG_WR(sc, TCM_REG_TCM_INT_MASK, 0);
16725/*      REG_WR(sc, TSEM_REG_TSEM_INT_MASK_0, 0); */
16726
16727    if (!CHIP_IS_E1x(sc)) {
16728        /* enable VFC attentions: bits 11 and 12, bits 31:13 reserved */
16729        REG_WR(sc, TSEM_REG_TSEM_INT_MASK_1, 0x07ff);
16730    }
16731
16732    REG_WR(sc, CDU_REG_CDU_INT_MASK, 0);
16733    REG_WR(sc, DMAE_REG_DMAE_INT_MASK, 0);
16734/*      REG_WR(sc, MISC_REG_MISC_INT_MASK, 0); */
16735    REG_WR(sc, PBF_REG_PBF_INT_MASK, 0x18);     /* bit 3,4 masked */
16736}
16737
16738/**
16739 * bxe_init_hw_common - initialize the HW at the COMMON phase.
16740 *
16741 * @sc:     driver handle
16742 */
16743static int
16744bxe_init_hw_common(struct bxe_softc *sc)
16745{
16746    uint8_t abs_func_id;
16747    uint32_t val;
16748
16749    BLOGD(sc, DBG_LOAD, "starting common init for func %d\n",
16750          SC_ABS_FUNC(sc));
16751
16752    /*
16753     * take the RESET lock to protect undi_unload flow from accessing
16754     * registers while we are resetting the chip
16755     */
16756    bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RESET);
16757
16758    bxe_reset_common(sc);
16759
16760    REG_WR(sc, (GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET), 0xffffffff);
16761
16762    val = 0xfffc;
16763    if (CHIP_IS_E3(sc)) {
16764        val |= MISC_REGISTERS_RESET_REG_2_MSTAT0;
16765        val |= MISC_REGISTERS_RESET_REG_2_MSTAT1;
16766    }
16767
16768    REG_WR(sc, (GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET), val);
16769
16770    bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RESET);
16771
16772    ecore_init_block(sc, BLOCK_MISC, PHASE_COMMON);
16773    BLOGD(sc, DBG_LOAD, "after misc block init\n");
16774
16775    if (!CHIP_IS_E1x(sc)) {
16776        /*
16777         * 4-port mode or 2-port mode we need to turn off master-enable for
16778         * everyone. After that we turn it back on for self. So, we disregard
16779         * multi-function, and always disable all functions on the given path,
16780         * this means 0,2,4,6 for path 0 and 1,3,5,7 for path 1
16781         */
16782        for (abs_func_id = SC_PATH(sc);
16783             abs_func_id < (E2_FUNC_MAX * 2);
16784             abs_func_id += 2) {
16785            if (abs_func_id == SC_ABS_FUNC(sc)) {
16786                REG_WR(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1);
16787                continue;
16788            }
16789
16790            bxe_pretend_func(sc, abs_func_id);
16791
16792            /* clear pf enable */
16793            bxe_pf_disable(sc);
16794
16795            bxe_pretend_func(sc, SC_ABS_FUNC(sc));
16796        }
16797    }
16798
16799    BLOGD(sc, DBG_LOAD, "after pf disable\n");
16800
16801    ecore_init_block(sc, BLOCK_PXP, PHASE_COMMON);
16802
16803    if (CHIP_IS_E1(sc)) {
16804        /*
16805         * enable HW interrupt from PXP on USDM overflow
16806         * bit 16 on INT_MASK_0
16807         */
16808        REG_WR(sc, PXP_REG_PXP_INT_MASK_0, 0);
16809    }
16810
16811    ecore_init_block(sc, BLOCK_PXP2, PHASE_COMMON);
16812    bxe_init_pxp(sc);
16813
16814#ifdef __BIG_ENDIAN
16815    REG_WR(sc, PXP2_REG_RQ_QM_ENDIAN_M, 1);
16816    REG_WR(sc, PXP2_REG_RQ_TM_ENDIAN_M, 1);
16817    REG_WR(sc, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
16818    REG_WR(sc, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
16819    REG_WR(sc, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
16820    /* make sure this value is 0 */
16821    REG_WR(sc, PXP2_REG_RQ_HC_ENDIAN_M, 0);
16822
16823    //REG_WR(sc, PXP2_REG_RD_PBF_SWAP_MODE, 1);
16824    REG_WR(sc, PXP2_REG_RD_QM_SWAP_MODE, 1);
16825    REG_WR(sc, PXP2_REG_RD_TM_SWAP_MODE, 1);
16826    REG_WR(sc, PXP2_REG_RD_SRC_SWAP_MODE, 1);
16827    REG_WR(sc, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
16828#endif
16829
16830    ecore_ilt_init_page_size(sc, INITOP_SET);
16831
16832    if (CHIP_REV_IS_FPGA(sc) && CHIP_IS_E1H(sc)) {
16833        REG_WR(sc, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
16834    }
16835
16836    /* let the HW do it's magic... */
16837    DELAY(100000);
16838
16839    /* finish PXP init */
16840    val = REG_RD(sc, PXP2_REG_RQ_CFG_DONE);
16841    if (val != 1) {
16842        BLOGE(sc, "PXP2 CFG failed PXP2_REG_RQ_CFG_DONE val = 0x%x\n",
16843            val);
16844        return (-1);
16845    }
16846    val = REG_RD(sc, PXP2_REG_RD_INIT_DONE);
16847    if (val != 1) {
16848        BLOGE(sc, "PXP2 RD_INIT failed val = 0x%x\n", val);
16849        return (-1);
16850    }
16851
16852    BLOGD(sc, DBG_LOAD, "after pxp init\n");
16853
16854    /*
16855     * Timer bug workaround for E2 only. We need to set the entire ILT to have
16856     * entries with value "0" and valid bit on. This needs to be done by the
16857     * first PF that is loaded in a path (i.e. common phase)
16858     */
16859    if (!CHIP_IS_E1x(sc)) {
16860/*
16861 * In E2 there is a bug in the timers block that can cause function 6 / 7
16862 * (i.e. vnic3) to start even if it is marked as "scan-off".
16863 * This occurs when a different function (func2,3) is being marked
16864 * as "scan-off". Real-life scenario for example: if a driver is being
16865 * load-unloaded while func6,7 are down. This will cause the timer to access
16866 * the ilt, translate to a logical address and send a request to read/write.
16867 * Since the ilt for the function that is down is not valid, this will cause
16868 * a translation error which is unrecoverable.
16869 * The Workaround is intended to make sure that when this happens nothing
16870 * fatal will occur. The workaround:
16871 *  1.  First PF driver which loads on a path will:
16872 *      a.  After taking the chip out of reset, by using pretend,
16873 *          it will write "0" to the following registers of
16874 *          the other vnics.
16875 *          REG_WR(pdev, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 0);
16876 *          REG_WR(pdev, CFC_REG_WEAK_ENABLE_PF,0);
16877 *          REG_WR(pdev, CFC_REG_STRONG_ENABLE_PF,0);
16878 *          And for itself it will write '1' to
16879 *          PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER to enable
16880 *          dmae-operations (writing to pram for example.)
16881 *          note: can be done for only function 6,7 but cleaner this
16882 *            way.
16883 *      b.  Write zero+valid to the entire ILT.
16884 *      c.  Init the first_timers_ilt_entry, last_timers_ilt_entry of
16885 *          VNIC3 (of that port). The range allocated will be the
16886 *          entire ILT. This is needed to prevent  ILT range error.
16887 *  2.  Any PF driver load flow:
16888 *      a.  ILT update with the physical addresses of the allocated
16889 *          logical pages.
16890 *      b.  Wait 20msec. - note that this timeout is needed to make
16891 *          sure there are no requests in one of the PXP internal
16892 *          queues with "old" ILT addresses.
16893 *      c.  PF enable in the PGLC.
16894 *      d.  Clear the was_error of the PF in the PGLC. (could have
16895 *          occurred while driver was down)
16896 *      e.  PF enable in the CFC (WEAK + STRONG)
16897 *      f.  Timers scan enable
16898 *  3.  PF driver unload flow:
16899 *      a.  Clear the Timers scan_en.
16900 *      b.  Polling for scan_on=0 for that PF.
16901 *      c.  Clear the PF enable bit in the PXP.
16902 *      d.  Clear the PF enable in the CFC (WEAK + STRONG)
16903 *      e.  Write zero+valid to all ILT entries (The valid bit must
16904 *          stay set)
16905 *      f.  If this is VNIC 3 of a port then also init
16906 *          first_timers_ilt_entry to zero and last_timers_ilt_entry
16907 *          to the last enrty in the ILT.
16908 *
16909 *      Notes:
16910 *      Currently the PF error in the PGLC is non recoverable.
16911 *      In the future the there will be a recovery routine for this error.
16912 *      Currently attention is masked.
16913 *      Having an MCP lock on the load/unload process does not guarantee that
16914 *      there is no Timer disable during Func6/7 enable. This is because the
16915 *      Timers scan is currently being cleared by the MCP on FLR.
16916 *      Step 2.d can be done only for PF6/7 and the driver can also check if
16917 *      there is error before clearing it. But the flow above is simpler and
16918 *      more general.
16919 *      All ILT entries are written by zero+valid and not just PF6/7
16920 *      ILT entries since in the future the ILT entries allocation for
16921 *      PF-s might be dynamic.
16922 */
16923        struct ilt_client_info ilt_cli;
16924        struct ecore_ilt ilt;
16925
16926        memset(&ilt_cli, 0, sizeof(struct ilt_client_info));
16927        memset(&ilt, 0, sizeof(struct ecore_ilt));
16928
16929        /* initialize dummy TM client */
16930        ilt_cli.start      = 0;
16931        ilt_cli.end        = ILT_NUM_PAGE_ENTRIES - 1;
16932        ilt_cli.client_num = ILT_CLIENT_TM;
16933
16934        /*
16935         * Step 1: set zeroes to all ilt page entries with valid bit on
16936         * Step 2: set the timers first/last ilt entry to point
16937         * to the entire range to prevent ILT range error for 3rd/4th
16938         * vnic (this code assumes existence of the vnic)
16939         *
16940         * both steps performed by call to ecore_ilt_client_init_op()
16941         * with dummy TM client
16942         *
16943         * we must use pretend since PXP2_REG_RQ_##blk##_FIRST_ILT
16944         * and his brother are split registers
16945         */
16946
16947        bxe_pretend_func(sc, (SC_PATH(sc) + 6));
16948        ecore_ilt_client_init_op_ilt(sc, &ilt, &ilt_cli, INITOP_CLEAR);
16949        bxe_pretend_func(sc, SC_ABS_FUNC(sc));
16950
16951        REG_WR(sc, PXP2_REG_RQ_DRAM_ALIGN, BXE_PXP_DRAM_ALIGN);
16952        REG_WR(sc, PXP2_REG_RQ_DRAM_ALIGN_RD, BXE_PXP_DRAM_ALIGN);
16953        REG_WR(sc, PXP2_REG_RQ_DRAM_ALIGN_SEL, 1);
16954    }
16955
16956    REG_WR(sc, PXP2_REG_RQ_DISABLE_INPUTS, 0);
16957    REG_WR(sc, PXP2_REG_RD_DISABLE_INPUTS, 0);
16958
16959    if (!CHIP_IS_E1x(sc)) {
16960        int factor = CHIP_REV_IS_EMUL(sc) ? 1000 :
16961                     (CHIP_REV_IS_FPGA(sc) ? 400 : 0);
16962
16963        ecore_init_block(sc, BLOCK_PGLUE_B, PHASE_COMMON);
16964        ecore_init_block(sc, BLOCK_ATC, PHASE_COMMON);
16965
16966        /* let the HW do it's magic... */
16967        do {
16968            DELAY(200000);
16969            val = REG_RD(sc, ATC_REG_ATC_INIT_DONE);
16970        } while (factor-- && (val != 1));
16971
16972        if (val != 1) {
16973            BLOGE(sc, "ATC_INIT failed val = 0x%x\n", val);
16974            return (-1);
16975        }
16976    }
16977
16978    BLOGD(sc, DBG_LOAD, "after pglue and atc init\n");
16979
16980    ecore_init_block(sc, BLOCK_DMAE, PHASE_COMMON);
16981
16982    bxe_iov_init_dmae(sc);
16983
16984    /* clean the DMAE memory */
16985    sc->dmae_ready = 1;
16986    ecore_init_fill(sc, TSEM_REG_PRAM, 0, 8, 1);
16987
16988    ecore_init_block(sc, BLOCK_TCM, PHASE_COMMON);
16989
16990    ecore_init_block(sc, BLOCK_UCM, PHASE_COMMON);
16991
16992    ecore_init_block(sc, BLOCK_CCM, PHASE_COMMON);
16993
16994    ecore_init_block(sc, BLOCK_XCM, PHASE_COMMON);
16995
16996    bxe_read_dmae(sc, XSEM_REG_PASSIVE_BUFFER, 3);
16997    bxe_read_dmae(sc, CSEM_REG_PASSIVE_BUFFER, 3);
16998    bxe_read_dmae(sc, TSEM_REG_PASSIVE_BUFFER, 3);
16999    bxe_read_dmae(sc, USEM_REG_PASSIVE_BUFFER, 3);
17000
17001    ecore_init_block(sc, BLOCK_QM, PHASE_COMMON);
17002
17003    /* QM queues pointers table */
17004    ecore_qm_init_ptr_table(sc, sc->qm_cid_count, INITOP_SET);
17005
17006    /* soft reset pulse */
17007    REG_WR(sc, QM_REG_SOFT_RESET, 1);
17008    REG_WR(sc, QM_REG_SOFT_RESET, 0);
17009
17010    if (CNIC_SUPPORT(sc))
17011        ecore_init_block(sc, BLOCK_TM, PHASE_COMMON);
17012
17013    ecore_init_block(sc, BLOCK_DORQ, PHASE_COMMON);
17014    REG_WR(sc, DORQ_REG_DPM_CID_OFST, BXE_DB_SHIFT);
17015    if (!CHIP_REV_IS_SLOW(sc)) {
17016        /* enable hw interrupt from doorbell Q */
17017        REG_WR(sc, DORQ_REG_DORQ_INT_MASK, 0);
17018    }
17019
17020    ecore_init_block(sc, BLOCK_BRB1, PHASE_COMMON);
17021
17022    ecore_init_block(sc, BLOCK_PRS, PHASE_COMMON);
17023    REG_WR(sc, PRS_REG_A_PRSU_20, 0xf);
17024
17025    if (!CHIP_IS_E1(sc)) {
17026        REG_WR(sc, PRS_REG_E1HOV_MODE, sc->devinfo.mf_info.path_has_ovlan);
17027    }
17028
17029    if (!CHIP_IS_E1x(sc) && !CHIP_IS_E3B0(sc)) {
17030        if (IS_MF_AFEX(sc)) {
17031            /*
17032             * configure that AFEX and VLAN headers must be
17033             * received in AFEX mode
17034             */
17035            REG_WR(sc, PRS_REG_HDRS_AFTER_BASIC, 0xE);
17036            REG_WR(sc, PRS_REG_MUST_HAVE_HDRS, 0xA);
17037            REG_WR(sc, PRS_REG_HDRS_AFTER_TAG_0, 0x6);
17038            REG_WR(sc, PRS_REG_TAG_ETHERTYPE_0, 0x8926);
17039            REG_WR(sc, PRS_REG_TAG_LEN_0, 0x4);
17040        } else {
17041            /*
17042             * Bit-map indicating which L2 hdrs may appear
17043             * after the basic Ethernet header
17044             */
17045            REG_WR(sc, PRS_REG_HDRS_AFTER_BASIC,
17046                   sc->devinfo.mf_info.path_has_ovlan ? 7 : 6);
17047        }
17048    }
17049
17050    ecore_init_block(sc, BLOCK_TSDM, PHASE_COMMON);
17051    ecore_init_block(sc, BLOCK_CSDM, PHASE_COMMON);
17052    ecore_init_block(sc, BLOCK_USDM, PHASE_COMMON);
17053    ecore_init_block(sc, BLOCK_XSDM, PHASE_COMMON);
17054
17055    if (!CHIP_IS_E1x(sc)) {
17056        /* reset VFC memories */
17057        REG_WR(sc, TSEM_REG_FAST_MEMORY + VFC_REG_MEMORIES_RST,
17058               VFC_MEMORIES_RST_REG_CAM_RST |
17059               VFC_MEMORIES_RST_REG_RAM_RST);
17060        REG_WR(sc, XSEM_REG_FAST_MEMORY + VFC_REG_MEMORIES_RST,
17061               VFC_MEMORIES_RST_REG_CAM_RST |
17062               VFC_MEMORIES_RST_REG_RAM_RST);
17063
17064        DELAY(20000);
17065    }
17066
17067    ecore_init_block(sc, BLOCK_TSEM, PHASE_COMMON);
17068    ecore_init_block(sc, BLOCK_USEM, PHASE_COMMON);
17069    ecore_init_block(sc, BLOCK_CSEM, PHASE_COMMON);
17070    ecore_init_block(sc, BLOCK_XSEM, PHASE_COMMON);
17071
17072    /* sync semi rtc */
17073    REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
17074           0x80000000);
17075    REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
17076           0x80000000);
17077
17078    ecore_init_block(sc, BLOCK_UPB, PHASE_COMMON);
17079    ecore_init_block(sc, BLOCK_XPB, PHASE_COMMON);
17080    ecore_init_block(sc, BLOCK_PBF, PHASE_COMMON);
17081
17082    if (!CHIP_IS_E1x(sc)) {
17083        if (IS_MF_AFEX(sc)) {
17084            /*
17085             * configure that AFEX and VLAN headers must be
17086             * sent in AFEX mode
17087             */
17088            REG_WR(sc, PBF_REG_HDRS_AFTER_BASIC, 0xE);
17089            REG_WR(sc, PBF_REG_MUST_HAVE_HDRS, 0xA);
17090            REG_WR(sc, PBF_REG_HDRS_AFTER_TAG_0, 0x6);
17091            REG_WR(sc, PBF_REG_TAG_ETHERTYPE_0, 0x8926);
17092            REG_WR(sc, PBF_REG_TAG_LEN_0, 0x4);
17093        } else {
17094            REG_WR(sc, PBF_REG_HDRS_AFTER_BASIC,
17095                   sc->devinfo.mf_info.path_has_ovlan ? 7 : 6);
17096        }
17097    }
17098
17099    REG_WR(sc, SRC_REG_SOFT_RST, 1);
17100
17101    ecore_init_block(sc, BLOCK_SRC, PHASE_COMMON);
17102
17103    if (CNIC_SUPPORT(sc)) {
17104        REG_WR(sc, SRC_REG_KEYSEARCH_0, 0x63285672);
17105        REG_WR(sc, SRC_REG_KEYSEARCH_1, 0x24b8f2cc);
17106        REG_WR(sc, SRC_REG_KEYSEARCH_2, 0x223aef9b);
17107        REG_WR(sc, SRC_REG_KEYSEARCH_3, 0x26001e3a);
17108        REG_WR(sc, SRC_REG_KEYSEARCH_4, 0x7ae91116);
17109        REG_WR(sc, SRC_REG_KEYSEARCH_5, 0x5ce5230b);
17110        REG_WR(sc, SRC_REG_KEYSEARCH_6, 0x298d8adf);
17111        REG_WR(sc, SRC_REG_KEYSEARCH_7, 0x6eb0ff09);
17112        REG_WR(sc, SRC_REG_KEYSEARCH_8, 0x1830f82f);
17113        REG_WR(sc, SRC_REG_KEYSEARCH_9, 0x01e46be7);
17114    }
17115    REG_WR(sc, SRC_REG_SOFT_RST, 0);
17116
17117    if (sizeof(union cdu_context) != 1024) {
17118        /* we currently assume that a context is 1024 bytes */
17119        BLOGE(sc, "please adjust the size of cdu_context(%ld)\n",
17120              (long)sizeof(union cdu_context));
17121    }
17122
17123    ecore_init_block(sc, BLOCK_CDU, PHASE_COMMON);
17124    val = (4 << 24) + (0 << 12) + 1024;
17125    REG_WR(sc, CDU_REG_CDU_GLOBAL_PARAMS, val);
17126
17127    ecore_init_block(sc, BLOCK_CFC, PHASE_COMMON);
17128
17129    REG_WR(sc, CFC_REG_INIT_REG, 0x7FF);
17130    /* enable context validation interrupt from CFC */
17131    REG_WR(sc, CFC_REG_CFC_INT_MASK, 0);
17132
17133    /* set the thresholds to prevent CFC/CDU race */
17134    REG_WR(sc, CFC_REG_DEBUG0, 0x20020000);
17135    ecore_init_block(sc, BLOCK_HC, PHASE_COMMON);
17136
17137    if (!CHIP_IS_E1x(sc) && BXE_NOMCP(sc)) {
17138        REG_WR(sc, IGU_REG_RESET_MEMORIES, 0x36);
17139    }
17140
17141    ecore_init_block(sc, BLOCK_IGU, PHASE_COMMON);
17142    ecore_init_block(sc, BLOCK_MISC_AEU, PHASE_COMMON);
17143
17144    /* Reset PCIE errors for debug */
17145    REG_WR(sc, 0x2814, 0xffffffff);
17146    REG_WR(sc, 0x3820, 0xffffffff);
17147
17148    if (!CHIP_IS_E1x(sc)) {
17149        REG_WR(sc, PCICFG_OFFSET + PXPCS_TL_CONTROL_5,
17150               (PXPCS_TL_CONTROL_5_ERR_UNSPPORT1 |
17151                PXPCS_TL_CONTROL_5_ERR_UNSPPORT));
17152        REG_WR(sc, PCICFG_OFFSET + PXPCS_TL_FUNC345_STAT,
17153               (PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT4 |
17154                PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT3 |
17155                PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT2));
17156        REG_WR(sc, PCICFG_OFFSET + PXPCS_TL_FUNC678_STAT,
17157               (PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT7 |
17158                PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT6 |
17159                PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT5));
17160    }
17161
17162    ecore_init_block(sc, BLOCK_NIG, PHASE_COMMON);
17163
17164    if (!CHIP_IS_E1(sc)) {
17165        /* in E3 this done in per-port section */
17166        if (!CHIP_IS_E3(sc))
17167            REG_WR(sc, NIG_REG_LLH_MF_MODE, IS_MF(sc));
17168    }
17169
17170    if (CHIP_IS_E1H(sc)) {
17171        /* not applicable for E2 (and above ...) */
17172        REG_WR(sc, NIG_REG_LLH_E1HOV_MODE, IS_MF_SD(sc));
17173    }
17174
17175    if (CHIP_REV_IS_SLOW(sc)) {
17176        DELAY(200000);
17177    }
17178
17179    /* finish CFC init */
17180    val = reg_poll(sc, CFC_REG_LL_INIT_DONE, 1, 100, 10);
17181    if (val != 1) {
17182        BLOGE(sc, "CFC LL_INIT failed val=0x%x\n", val);
17183        return (-1);
17184    }
17185    val = reg_poll(sc, CFC_REG_AC_INIT_DONE, 1, 100, 10);
17186    if (val != 1) {
17187        BLOGE(sc, "CFC AC_INIT failed val=0x%x\n", val);
17188        return (-1);
17189    }
17190    val = reg_poll(sc, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
17191    if (val != 1) {
17192        BLOGE(sc, "CFC CAM_INIT failed val=0x%x\n", val);
17193        return (-1);
17194    }
17195    REG_WR(sc, CFC_REG_DEBUG0, 0);
17196
17197    if (CHIP_IS_E1(sc)) {
17198        /* read NIG statistic to see if this is our first up since powerup */
17199        bxe_read_dmae(sc, NIG_REG_STAT2_BRB_OCTET, 2);
17200        val = *BXE_SP(sc, wb_data[0]);
17201
17202        /* do internal memory self test */
17203        if ((val == 0) && bxe_int_mem_test(sc)) {
17204            BLOGE(sc, "internal mem self test failed val=0x%x\n", val);
17205            return (-1);
17206        }
17207    }
17208
17209    bxe_setup_fan_failure_detection(sc);
17210
17211    /* clear PXP2 attentions */
17212    REG_RD(sc, PXP2_REG_PXP2_INT_STS_CLR_0);
17213
17214    bxe_enable_blocks_attention(sc);
17215
17216    if (!CHIP_REV_IS_SLOW(sc)) {
17217        ecore_enable_blocks_parity(sc);
17218    }
17219
17220    if (!BXE_NOMCP(sc)) {
17221        if (CHIP_IS_E1x(sc)) {
17222            bxe_common_init_phy(sc);
17223        }
17224    }
17225
17226    return (0);
17227}
17228
17229/**
17230 * bxe_init_hw_common_chip - init HW at the COMMON_CHIP phase.
17231 *
17232 * @sc:     driver handle
17233 */
17234static int
17235bxe_init_hw_common_chip(struct bxe_softc *sc)
17236{
17237    int rc = bxe_init_hw_common(sc);
17238
17239    if (rc) {
17240        BLOGE(sc, "bxe_init_hw_common failed rc=%d\n", rc);
17241        return (rc);
17242    }
17243
17244    /* In E2 2-PORT mode, same ext phy is used for the two paths */
17245    if (!BXE_NOMCP(sc)) {
17246        bxe_common_init_phy(sc);
17247    }
17248
17249    return (0);
17250}
17251
17252static int
17253bxe_init_hw_port(struct bxe_softc *sc)
17254{
17255    int port = SC_PORT(sc);
17256    int init_phase = port ? PHASE_PORT1 : PHASE_PORT0;
17257    uint32_t low, high;
17258    uint32_t val;
17259
17260    BLOGD(sc, DBG_LOAD, "starting port init for port %d\n", port);
17261
17262    REG_WR(sc, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
17263
17264    ecore_init_block(sc, BLOCK_MISC, init_phase);
17265    ecore_init_block(sc, BLOCK_PXP, init_phase);
17266    ecore_init_block(sc, BLOCK_PXP2, init_phase);
17267
17268    /*
17269     * Timers bug workaround: disables the pf_master bit in pglue at
17270     * common phase, we need to enable it here before any dmae access are
17271     * attempted. Therefore we manually added the enable-master to the
17272     * port phase (it also happens in the function phase)
17273     */
17274    if (!CHIP_IS_E1x(sc)) {
17275        REG_WR(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1);
17276    }
17277
17278    ecore_init_block(sc, BLOCK_ATC, init_phase);
17279    ecore_init_block(sc, BLOCK_DMAE, init_phase);
17280    ecore_init_block(sc, BLOCK_PGLUE_B, init_phase);
17281    ecore_init_block(sc, BLOCK_QM, init_phase);
17282
17283    ecore_init_block(sc, BLOCK_TCM, init_phase);
17284    ecore_init_block(sc, BLOCK_UCM, init_phase);
17285    ecore_init_block(sc, BLOCK_CCM, init_phase);
17286    ecore_init_block(sc, BLOCK_XCM, init_phase);
17287
17288    /* QM cid (connection) count */
17289    ecore_qm_init_cid_count(sc, sc->qm_cid_count, INITOP_SET);
17290
17291    if (CNIC_SUPPORT(sc)) {
17292        ecore_init_block(sc, BLOCK_TM, init_phase);
17293        REG_WR(sc, TM_REG_LIN0_SCAN_TIME + port*4, 20);
17294        REG_WR(sc, TM_REG_LIN0_MAX_ACTIVE_CID + port*4, 31);
17295    }
17296
17297    ecore_init_block(sc, BLOCK_DORQ, init_phase);
17298
17299    ecore_init_block(sc, BLOCK_BRB1, init_phase);
17300
17301    if (CHIP_IS_E1(sc) || CHIP_IS_E1H(sc)) {
17302        if (IS_MF(sc)) {
17303            low = (BXE_ONE_PORT(sc) ? 160 : 246);
17304        } else if (sc->mtu > 4096) {
17305            if (BXE_ONE_PORT(sc)) {
17306                low = 160;
17307            } else {
17308                val = sc->mtu;
17309                /* (24*1024 + val*4)/256 */
17310                low = (96 + (val / 64) + ((val % 64) ? 1 : 0));
17311            }
17312        } else {
17313            low = (BXE_ONE_PORT(sc) ? 80 : 160);
17314        }
17315        high = (low + 56); /* 14*1024/256 */
17316        REG_WR(sc, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port*4, low);
17317        REG_WR(sc, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high);
17318    }
17319
17320    if (CHIP_IS_MODE_4_PORT(sc)) {
17321        REG_WR(sc, SC_PORT(sc) ?
17322               BRB1_REG_MAC_GUARANTIED_1 :
17323               BRB1_REG_MAC_GUARANTIED_0, 40);
17324    }
17325
17326    ecore_init_block(sc, BLOCK_PRS, init_phase);
17327    if (CHIP_IS_E3B0(sc)) {
17328        if (IS_MF_AFEX(sc)) {
17329            /* configure headers for AFEX mode */
17330            REG_WR(sc, SC_PORT(sc) ?
17331                   PRS_REG_HDRS_AFTER_BASIC_PORT_1 :
17332                   PRS_REG_HDRS_AFTER_BASIC_PORT_0, 0xE);
17333            REG_WR(sc, SC_PORT(sc) ?
17334                   PRS_REG_HDRS_AFTER_TAG_0_PORT_1 :
17335                   PRS_REG_HDRS_AFTER_TAG_0_PORT_0, 0x6);
17336            REG_WR(sc, SC_PORT(sc) ?
17337                   PRS_REG_MUST_HAVE_HDRS_PORT_1 :
17338                   PRS_REG_MUST_HAVE_HDRS_PORT_0, 0xA);
17339        } else {
17340            /* Ovlan exists only if we are in multi-function +
17341             * switch-dependent mode, in switch-independent there
17342             * is no ovlan headers
17343             */
17344            REG_WR(sc, SC_PORT(sc) ?
17345                   PRS_REG_HDRS_AFTER_BASIC_PORT_1 :
17346                   PRS_REG_HDRS_AFTER_BASIC_PORT_0,
17347                   (sc->devinfo.mf_info.path_has_ovlan ? 7 : 6));
17348        }
17349    }
17350
17351    ecore_init_block(sc, BLOCK_TSDM, init_phase);
17352    ecore_init_block(sc, BLOCK_CSDM, init_phase);
17353    ecore_init_block(sc, BLOCK_USDM, init_phase);
17354    ecore_init_block(sc, BLOCK_XSDM, init_phase);
17355
17356    ecore_init_block(sc, BLOCK_TSEM, init_phase);
17357    ecore_init_block(sc, BLOCK_USEM, init_phase);
17358    ecore_init_block(sc, BLOCK_CSEM, init_phase);
17359    ecore_init_block(sc, BLOCK_XSEM, init_phase);
17360
17361    ecore_init_block(sc, BLOCK_UPB, init_phase);
17362    ecore_init_block(sc, BLOCK_XPB, init_phase);
17363
17364    ecore_init_block(sc, BLOCK_PBF, init_phase);
17365
17366    if (CHIP_IS_E1x(sc)) {
17367        /* configure PBF to work without PAUSE mtu 9000 */
17368        REG_WR(sc, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
17369
17370        /* update threshold */
17371        REG_WR(sc, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
17372        /* update init credit */
17373        REG_WR(sc, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
17374
17375        /* probe changes */
17376        REG_WR(sc, PBF_REG_INIT_P0 + port*4, 1);
17377        DELAY(50);
17378        REG_WR(sc, PBF_REG_INIT_P0 + port*4, 0);
17379    }
17380
17381    if (CNIC_SUPPORT(sc)) {
17382        ecore_init_block(sc, BLOCK_SRC, init_phase);
17383    }
17384
17385    ecore_init_block(sc, BLOCK_CDU, init_phase);
17386    ecore_init_block(sc, BLOCK_CFC, init_phase);
17387
17388    if (CHIP_IS_E1(sc)) {
17389        REG_WR(sc, HC_REG_LEADING_EDGE_0 + port*8, 0);
17390        REG_WR(sc, HC_REG_TRAILING_EDGE_0 + port*8, 0);
17391    }
17392    ecore_init_block(sc, BLOCK_HC, init_phase);
17393
17394    ecore_init_block(sc, BLOCK_IGU, init_phase);
17395
17396    ecore_init_block(sc, BLOCK_MISC_AEU, init_phase);
17397    /* init aeu_mask_attn_func_0/1:
17398     *  - SF mode: bits 3-7 are masked. only bits 0-2 are in use
17399     *  - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
17400     *             bits 4-7 are used for "per vn group attention" */
17401    val = IS_MF(sc) ? 0xF7 : 0x7;
17402    /* Enable DCBX attention for all but E1 */
17403    val |= CHIP_IS_E1(sc) ? 0 : 0x10;
17404    REG_WR(sc, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, val);
17405
17406    ecore_init_block(sc, BLOCK_NIG, init_phase);
17407
17408    if (!CHIP_IS_E1x(sc)) {
17409        /* Bit-map indicating which L2 hdrs may appear after the
17410         * basic Ethernet header
17411         */
17412        if (IS_MF_AFEX(sc)) {
17413            REG_WR(sc, SC_PORT(sc) ?
17414                   NIG_REG_P1_HDRS_AFTER_BASIC :
17415                   NIG_REG_P0_HDRS_AFTER_BASIC, 0xE);
17416        } else {
17417            REG_WR(sc, SC_PORT(sc) ?
17418                   NIG_REG_P1_HDRS_AFTER_BASIC :
17419                   NIG_REG_P0_HDRS_AFTER_BASIC,
17420                   IS_MF_SD(sc) ? 7 : 6);
17421        }
17422
17423        if (CHIP_IS_E3(sc)) {
17424            REG_WR(sc, SC_PORT(sc) ?
17425                   NIG_REG_LLH1_MF_MODE :
17426                   NIG_REG_LLH_MF_MODE, IS_MF(sc));
17427        }
17428    }
17429    if (!CHIP_IS_E3(sc)) {
17430        REG_WR(sc, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
17431    }
17432
17433    if (!CHIP_IS_E1(sc)) {
17434        /* 0x2 disable mf_ov, 0x1 enable */
17435        REG_WR(sc, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
17436               (IS_MF_SD(sc) ? 0x1 : 0x2));
17437
17438        if (!CHIP_IS_E1x(sc)) {
17439            val = 0;
17440            switch (sc->devinfo.mf_info.mf_mode) {
17441            case MULTI_FUNCTION_SD:
17442                val = 1;
17443                break;
17444            case MULTI_FUNCTION_SI:
17445            case MULTI_FUNCTION_AFEX:
17446                val = 2;
17447                break;
17448            }
17449
17450            REG_WR(sc, (SC_PORT(sc) ? NIG_REG_LLH1_CLS_TYPE :
17451                        NIG_REG_LLH0_CLS_TYPE), val);
17452        }
17453        REG_WR(sc, NIG_REG_LLFC_ENABLE_0 + port*4, 0);
17454        REG_WR(sc, NIG_REG_LLFC_OUT_EN_0 + port*4, 0);
17455        REG_WR(sc, NIG_REG_PAUSE_ENABLE_0 + port*4, 1);
17456    }
17457
17458    /* If SPIO5 is set to generate interrupts, enable it for this port */
17459    val = REG_RD(sc, MISC_REG_SPIO_EVENT_EN);
17460    if (val & MISC_SPIO_SPIO5) {
17461        uint32_t reg_addr = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
17462                                    MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
17463        val = REG_RD(sc, reg_addr);
17464        val |= AEU_INPUTS_ATTN_BITS_SPIO5;
17465        REG_WR(sc, reg_addr, val);
17466    }
17467
17468    return (0);
17469}
17470
17471static uint32_t
17472bxe_flr_clnup_reg_poll(struct bxe_softc *sc,
17473                       uint32_t         reg,
17474                       uint32_t         expected,
17475                       uint32_t         poll_count)
17476{
17477    uint32_t cur_cnt = poll_count;
17478    uint32_t val;
17479
17480    while ((val = REG_RD(sc, reg)) != expected && cur_cnt--) {
17481        DELAY(FLR_WAIT_INTERVAL);
17482    }
17483
17484    return (val);
17485}
17486
17487static int
17488bxe_flr_clnup_poll_hw_counter(struct bxe_softc *sc,
17489                              uint32_t         reg,
17490                              char             *msg,
17491                              uint32_t         poll_cnt)
17492{
17493    uint32_t val = bxe_flr_clnup_reg_poll(sc, reg, 0, poll_cnt);
17494
17495    if (val != 0) {
17496        BLOGE(sc, "%s usage count=%d\n", msg, val);
17497        return (1);
17498    }
17499
17500    return (0);
17501}
17502
17503/* Common routines with VF FLR cleanup */
17504static uint32_t
17505bxe_flr_clnup_poll_count(struct bxe_softc *sc)
17506{
17507    /* adjust polling timeout */
17508    if (CHIP_REV_IS_EMUL(sc)) {
17509        return (FLR_POLL_CNT * 2000);
17510    }
17511
17512    if (CHIP_REV_IS_FPGA(sc)) {
17513        return (FLR_POLL_CNT * 120);
17514    }
17515
17516    return (FLR_POLL_CNT);
17517}
17518
17519static int
17520bxe_poll_hw_usage_counters(struct bxe_softc *sc,
17521                           uint32_t         poll_cnt)
17522{
17523    /* wait for CFC PF usage-counter to zero (includes all the VFs) */
17524    if (bxe_flr_clnup_poll_hw_counter(sc,
17525                                      CFC_REG_NUM_LCIDS_INSIDE_PF,
17526                                      "CFC PF usage counter timed out",
17527                                      poll_cnt)) {
17528        return (1);
17529    }
17530
17531    /* Wait for DQ PF usage-counter to zero (until DQ cleanup) */
17532    if (bxe_flr_clnup_poll_hw_counter(sc,
17533                                      DORQ_REG_PF_USAGE_CNT,
17534                                      "DQ PF usage counter timed out",
17535                                      poll_cnt)) {
17536        return (1);
17537    }
17538
17539    /* Wait for QM PF usage-counter to zero (until DQ cleanup) */
17540    if (bxe_flr_clnup_poll_hw_counter(sc,
17541                                      QM_REG_PF_USG_CNT_0 + 4*SC_FUNC(sc),
17542                                      "QM PF usage counter timed out",
17543                                      poll_cnt)) {
17544        return (1);
17545    }
17546
17547    /* Wait for Timer PF usage-counters to zero (until DQ cleanup) */
17548    if (bxe_flr_clnup_poll_hw_counter(sc,
17549                                      TM_REG_LIN0_VNIC_UC + 4*SC_PORT(sc),
17550                                      "Timers VNIC usage counter timed out",
17551                                      poll_cnt)) {
17552        return (1);
17553    }
17554
17555    if (bxe_flr_clnup_poll_hw_counter(sc,
17556                                      TM_REG_LIN0_NUM_SCANS + 4*SC_PORT(sc),
17557                                      "Timers NUM_SCANS usage counter timed out",
17558                                      poll_cnt)) {
17559        return (1);
17560    }
17561
17562    /* Wait DMAE PF usage counter to zero */
17563    if (bxe_flr_clnup_poll_hw_counter(sc,
17564                                      dmae_reg_go_c[INIT_DMAE_C(sc)],
17565                                      "DMAE dommand register timed out",
17566                                      poll_cnt)) {
17567        return (1);
17568    }
17569
17570    return (0);
17571}
17572
17573#define OP_GEN_PARAM(param)                                            \
17574    (((param) << SDM_OP_GEN_COMP_PARAM_SHIFT) & SDM_OP_GEN_COMP_PARAM)
17575#define OP_GEN_TYPE(type)                                           \
17576    (((type) << SDM_OP_GEN_COMP_TYPE_SHIFT) & SDM_OP_GEN_COMP_TYPE)
17577#define OP_GEN_AGG_VECT(index)                                             \
17578    (((index) << SDM_OP_GEN_AGG_VECT_IDX_SHIFT) & SDM_OP_GEN_AGG_VECT_IDX)
17579
17580static int
17581bxe_send_final_clnup(struct bxe_softc *sc,
17582                     uint8_t          clnup_func,
17583                     uint32_t         poll_cnt)
17584{
17585    uint32_t op_gen_command = 0;
17586    uint32_t comp_addr = (BAR_CSTRORM_INTMEM +
17587                          CSTORM_FINAL_CLEANUP_COMPLETE_OFFSET(clnup_func));
17588    int ret = 0;
17589
17590    if (REG_RD(sc, comp_addr)) {
17591        BLOGE(sc, "Cleanup complete was not 0 before sending\n");
17592        return (1);
17593    }
17594
17595    op_gen_command |= OP_GEN_PARAM(XSTORM_AGG_INT_FINAL_CLEANUP_INDEX);
17596    op_gen_command |= OP_GEN_TYPE(XSTORM_AGG_INT_FINAL_CLEANUP_COMP_TYPE);
17597    op_gen_command |= OP_GEN_AGG_VECT(clnup_func);
17598    op_gen_command |= 1 << SDM_OP_GEN_AGG_VECT_IDX_VALID_SHIFT;
17599
17600    BLOGD(sc, DBG_LOAD, "sending FW Final cleanup\n");
17601    REG_WR(sc, XSDM_REG_OPERATION_GEN, op_gen_command);
17602
17603    if (bxe_flr_clnup_reg_poll(sc, comp_addr, 1, poll_cnt) != 1) {
17604        BLOGE(sc, "FW final cleanup did not succeed\n");
17605        BLOGD(sc, DBG_LOAD, "At timeout completion address contained %x\n",
17606              (REG_RD(sc, comp_addr)));
17607        bxe_panic(sc, ("FLR cleanup failed\n"));
17608        return (1);
17609    }
17610
17611    /* Zero completion for nxt FLR */
17612    REG_WR(sc, comp_addr, 0);
17613
17614    return (ret);
17615}
17616
17617static void
17618bxe_pbf_pN_buf_flushed(struct bxe_softc       *sc,
17619                       struct pbf_pN_buf_regs *regs,
17620                       uint32_t               poll_count)
17621{
17622    uint32_t init_crd, crd, crd_start, crd_freed, crd_freed_start;
17623    uint32_t cur_cnt = poll_count;
17624
17625    crd_freed = crd_freed_start = REG_RD(sc, regs->crd_freed);
17626    crd = crd_start = REG_RD(sc, regs->crd);
17627    init_crd = REG_RD(sc, regs->init_crd);
17628
17629    BLOGD(sc, DBG_LOAD, "INIT CREDIT[%d] : %x\n", regs->pN, init_crd);
17630    BLOGD(sc, DBG_LOAD, "CREDIT[%d]      : s:%x\n", regs->pN, crd);
17631    BLOGD(sc, DBG_LOAD, "CREDIT_FREED[%d]: s:%x\n", regs->pN, crd_freed);
17632
17633    while ((crd != init_crd) &&
17634           ((uint32_t)((int32_t)crd_freed - (int32_t)crd_freed_start) <
17635            (init_crd - crd_start))) {
17636        if (cur_cnt--) {
17637            DELAY(FLR_WAIT_INTERVAL);
17638            crd = REG_RD(sc, regs->crd);
17639            crd_freed = REG_RD(sc, regs->crd_freed);
17640        } else {
17641            BLOGD(sc, DBG_LOAD, "PBF tx buffer[%d] timed out\n", regs->pN);
17642            BLOGD(sc, DBG_LOAD, "CREDIT[%d]      : c:%x\n", regs->pN, crd);
17643            BLOGD(sc, DBG_LOAD, "CREDIT_FREED[%d]: c:%x\n", regs->pN, crd_freed);
17644            break;
17645        }
17646    }
17647
17648    BLOGD(sc, DBG_LOAD, "Waited %d*%d usec for PBF tx buffer[%d]\n",
17649          poll_count-cur_cnt, FLR_WAIT_INTERVAL, regs->pN);
17650}
17651
17652static void
17653bxe_pbf_pN_cmd_flushed(struct bxe_softc       *sc,
17654                       struct pbf_pN_cmd_regs *regs,
17655                       uint32_t               poll_count)
17656{
17657    uint32_t occup, to_free, freed, freed_start;
17658    uint32_t cur_cnt = poll_count;
17659
17660    occup = to_free = REG_RD(sc, regs->lines_occup);
17661    freed = freed_start = REG_RD(sc, regs->lines_freed);
17662
17663    BLOGD(sc, DBG_LOAD, "OCCUPANCY[%d]   : s:%x\n", regs->pN, occup);
17664    BLOGD(sc, DBG_LOAD, "LINES_FREED[%d] : s:%x\n", regs->pN, freed);
17665
17666    while (occup &&
17667           ((uint32_t)((int32_t)freed - (int32_t)freed_start) < to_free)) {
17668        if (cur_cnt--) {
17669            DELAY(FLR_WAIT_INTERVAL);
17670            occup = REG_RD(sc, regs->lines_occup);
17671            freed = REG_RD(sc, regs->lines_freed);
17672        } else {
17673            BLOGD(sc, DBG_LOAD, "PBF cmd queue[%d] timed out\n", regs->pN);
17674            BLOGD(sc, DBG_LOAD, "OCCUPANCY[%d]   : s:%x\n", regs->pN, occup);
17675            BLOGD(sc, DBG_LOAD, "LINES_FREED[%d] : s:%x\n", regs->pN, freed);
17676            break;
17677        }
17678    }
17679
17680    BLOGD(sc, DBG_LOAD, "Waited %d*%d usec for PBF cmd queue[%d]\n",
17681          poll_count - cur_cnt, FLR_WAIT_INTERVAL, regs->pN);
17682}
17683
17684static void
17685bxe_tx_hw_flushed(struct bxe_softc *sc, uint32_t poll_count)
17686{
17687    struct pbf_pN_cmd_regs cmd_regs[] = {
17688        {0, (CHIP_IS_E3B0(sc)) ?
17689            PBF_REG_TQ_OCCUPANCY_Q0 :
17690            PBF_REG_P0_TQ_OCCUPANCY,
17691            (CHIP_IS_E3B0(sc)) ?
17692            PBF_REG_TQ_LINES_FREED_CNT_Q0 :
17693            PBF_REG_P0_TQ_LINES_FREED_CNT},
17694        {1, (CHIP_IS_E3B0(sc)) ?
17695            PBF_REG_TQ_OCCUPANCY_Q1 :
17696            PBF_REG_P1_TQ_OCCUPANCY,
17697            (CHIP_IS_E3B0(sc)) ?
17698            PBF_REG_TQ_LINES_FREED_CNT_Q1 :
17699            PBF_REG_P1_TQ_LINES_FREED_CNT},
17700        {4, (CHIP_IS_E3B0(sc)) ?
17701            PBF_REG_TQ_OCCUPANCY_LB_Q :
17702            PBF_REG_P4_TQ_OCCUPANCY,
17703            (CHIP_IS_E3B0(sc)) ?
17704            PBF_REG_TQ_LINES_FREED_CNT_LB_Q :
17705            PBF_REG_P4_TQ_LINES_FREED_CNT}
17706    };
17707
17708    struct pbf_pN_buf_regs buf_regs[] = {
17709        {0, (CHIP_IS_E3B0(sc)) ?
17710            PBF_REG_INIT_CRD_Q0 :
17711            PBF_REG_P0_INIT_CRD ,
17712            (CHIP_IS_E3B0(sc)) ?
17713            PBF_REG_CREDIT_Q0 :
17714            PBF_REG_P0_CREDIT,
17715            (CHIP_IS_E3B0(sc)) ?
17716            PBF_REG_INTERNAL_CRD_FREED_CNT_Q0 :
17717            PBF_REG_P0_INTERNAL_CRD_FREED_CNT},
17718        {1, (CHIP_IS_E3B0(sc)) ?
17719            PBF_REG_INIT_CRD_Q1 :
17720            PBF_REG_P1_INIT_CRD,
17721            (CHIP_IS_E3B0(sc)) ?
17722            PBF_REG_CREDIT_Q1 :
17723            PBF_REG_P1_CREDIT,
17724            (CHIP_IS_E3B0(sc)) ?
17725            PBF_REG_INTERNAL_CRD_FREED_CNT_Q1 :
17726            PBF_REG_P1_INTERNAL_CRD_FREED_CNT},
17727        {4, (CHIP_IS_E3B0(sc)) ?
17728            PBF_REG_INIT_CRD_LB_Q :
17729            PBF_REG_P4_INIT_CRD,
17730            (CHIP_IS_E3B0(sc)) ?
17731            PBF_REG_CREDIT_LB_Q :
17732            PBF_REG_P4_CREDIT,
17733            (CHIP_IS_E3B0(sc)) ?
17734            PBF_REG_INTERNAL_CRD_FREED_CNT_LB_Q :
17735            PBF_REG_P4_INTERNAL_CRD_FREED_CNT},
17736    };
17737
17738    int i;
17739
17740    /* Verify the command queues are flushed P0, P1, P4 */
17741    for (i = 0; i < ARRAY_SIZE(cmd_regs); i++) {
17742        bxe_pbf_pN_cmd_flushed(sc, &cmd_regs[i], poll_count);
17743    }
17744
17745    /* Verify the transmission buffers are flushed P0, P1, P4 */
17746    for (i = 0; i < ARRAY_SIZE(buf_regs); i++) {
17747        bxe_pbf_pN_buf_flushed(sc, &buf_regs[i], poll_count);
17748    }
17749}
17750
17751static void
17752bxe_hw_enable_status(struct bxe_softc *sc)
17753{
17754    uint32_t val;
17755
17756    val = REG_RD(sc, CFC_REG_WEAK_ENABLE_PF);
17757    BLOGD(sc, DBG_LOAD, "CFC_REG_WEAK_ENABLE_PF is 0x%x\n", val);
17758
17759    val = REG_RD(sc, PBF_REG_DISABLE_PF);
17760    BLOGD(sc, DBG_LOAD, "PBF_REG_DISABLE_PF is 0x%x\n", val);
17761
17762    val = REG_RD(sc, IGU_REG_PCI_PF_MSI_EN);
17763    BLOGD(sc, DBG_LOAD, "IGU_REG_PCI_PF_MSI_EN is 0x%x\n", val);
17764
17765    val = REG_RD(sc, IGU_REG_PCI_PF_MSIX_EN);
17766    BLOGD(sc, DBG_LOAD, "IGU_REG_PCI_PF_MSIX_EN is 0x%x\n", val);
17767
17768    val = REG_RD(sc, IGU_REG_PCI_PF_MSIX_FUNC_MASK);
17769    BLOGD(sc, DBG_LOAD, "IGU_REG_PCI_PF_MSIX_FUNC_MASK is 0x%x\n", val);
17770
17771    val = REG_RD(sc, PGLUE_B_REG_SHADOW_BME_PF_7_0_CLR);
17772    BLOGD(sc, DBG_LOAD, "PGLUE_B_REG_SHADOW_BME_PF_7_0_CLR is 0x%x\n", val);
17773
17774    val = REG_RD(sc, PGLUE_B_REG_FLR_REQUEST_PF_7_0_CLR);
17775    BLOGD(sc, DBG_LOAD, "PGLUE_B_REG_FLR_REQUEST_PF_7_0_CLR is 0x%x\n", val);
17776
17777    val = REG_RD(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER);
17778    BLOGD(sc, DBG_LOAD, "PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER is 0x%x\n", val);
17779}
17780
17781static int
17782bxe_pf_flr_clnup(struct bxe_softc *sc)
17783{
17784    uint32_t poll_cnt = bxe_flr_clnup_poll_count(sc);
17785
17786    BLOGD(sc, DBG_LOAD, "Cleanup after FLR PF[%d]\n", SC_ABS_FUNC(sc));
17787
17788    /* Re-enable PF target read access */
17789    REG_WR(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ, 1);
17790
17791    /* Poll HW usage counters */
17792    BLOGD(sc, DBG_LOAD, "Polling usage counters\n");
17793    if (bxe_poll_hw_usage_counters(sc, poll_cnt)) {
17794        return (-1);
17795    }
17796
17797    /* Zero the igu 'trailing edge' and 'leading edge' */
17798
17799    /* Send the FW cleanup command */
17800    if (bxe_send_final_clnup(sc, (uint8_t)SC_FUNC(sc), poll_cnt)) {
17801        return (-1);
17802    }
17803
17804    /* ATC cleanup */
17805
17806    /* Verify TX hw is flushed */
17807    bxe_tx_hw_flushed(sc, poll_cnt);
17808
17809    /* Wait 100ms (not adjusted according to platform) */
17810    DELAY(100000);
17811
17812    /* Verify no pending pci transactions */
17813    if (bxe_is_pcie_pending(sc)) {
17814        BLOGE(sc, "PCIE Transactions still pending\n");
17815    }
17816
17817    /* Debug */
17818    bxe_hw_enable_status(sc);
17819
17820    /*
17821     * Master enable - Due to WB DMAE writes performed before this
17822     * register is re-initialized as part of the regular function init
17823     */
17824    REG_WR(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1);
17825
17826    return (0);
17827}
17828
17829static int
17830bxe_init_hw_func(struct bxe_softc *sc)
17831{
17832    int port = SC_PORT(sc);
17833    int func = SC_FUNC(sc);
17834    int init_phase = PHASE_PF0 + func;
17835    struct ecore_ilt *ilt = sc->ilt;
17836    uint16_t cdu_ilt_start;
17837    uint32_t addr, val;
17838    uint32_t main_mem_base, main_mem_size, main_mem_prty_clr;
17839    int i, main_mem_width, rc;
17840
17841    BLOGD(sc, DBG_LOAD, "starting func init for func %d\n", func);
17842
17843    /* FLR cleanup */
17844    if (!CHIP_IS_E1x(sc)) {
17845        rc = bxe_pf_flr_clnup(sc);
17846        if (rc) {
17847            BLOGE(sc, "FLR cleanup failed!\n");
17848            // XXX bxe_fw_dump(sc);
17849            // XXX bxe_idle_chk(sc);
17850            return (rc);
17851        }
17852    }
17853
17854    /* set MSI reconfigure capability */
17855    if (sc->devinfo.int_block == INT_BLOCK_HC) {
17856        addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0);
17857        val = REG_RD(sc, addr);
17858        val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0;
17859        REG_WR(sc, addr, val);
17860    }
17861
17862    ecore_init_block(sc, BLOCK_PXP, init_phase);
17863    ecore_init_block(sc, BLOCK_PXP2, init_phase);
17864
17865    ilt = sc->ilt;
17866    cdu_ilt_start = ilt->clients[ILT_CLIENT_CDU].start;
17867
17868    for (i = 0; i < L2_ILT_LINES(sc); i++) {
17869        ilt->lines[cdu_ilt_start + i].page = sc->context[i].vcxt;
17870        ilt->lines[cdu_ilt_start + i].page_mapping =
17871            sc->context[i].vcxt_dma.paddr;
17872        ilt->lines[cdu_ilt_start + i].size = sc->context[i].size;
17873    }
17874    ecore_ilt_init_op(sc, INITOP_SET);
17875
17876    /* Set NIC mode */
17877    REG_WR(sc, PRS_REG_NIC_MODE, 1);
17878    BLOGD(sc, DBG_LOAD, "NIC MODE configured\n");
17879
17880    if (!CHIP_IS_E1x(sc)) {
17881        uint32_t pf_conf = IGU_PF_CONF_FUNC_EN;
17882
17883        /* Turn on a single ISR mode in IGU if driver is going to use
17884         * INT#x or MSI
17885         */
17886        if (sc->interrupt_mode != INTR_MODE_MSIX) {
17887            pf_conf |= IGU_PF_CONF_SINGLE_ISR_EN;
17888        }
17889
17890        /*
17891         * Timers workaround bug: function init part.
17892         * Need to wait 20msec after initializing ILT,
17893         * needed to make sure there are no requests in
17894         * one of the PXP internal queues with "old" ILT addresses
17895         */
17896        DELAY(20000);
17897
17898        /*
17899         * Master enable - Due to WB DMAE writes performed before this
17900         * register is re-initialized as part of the regular function
17901         * init
17902         */
17903        REG_WR(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1);
17904        /* Enable the function in IGU */
17905        REG_WR(sc, IGU_REG_PF_CONFIGURATION, pf_conf);
17906    }
17907
17908    sc->dmae_ready = 1;
17909
17910    ecore_init_block(sc, BLOCK_PGLUE_B, init_phase);
17911
17912    if (!CHIP_IS_E1x(sc))
17913        REG_WR(sc, PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR, func);
17914
17915    ecore_init_block(sc, BLOCK_ATC, init_phase);
17916    ecore_init_block(sc, BLOCK_DMAE, init_phase);
17917    ecore_init_block(sc, BLOCK_NIG, init_phase);
17918    ecore_init_block(sc, BLOCK_SRC, init_phase);
17919    ecore_init_block(sc, BLOCK_MISC, init_phase);
17920    ecore_init_block(sc, BLOCK_TCM, init_phase);
17921    ecore_init_block(sc, BLOCK_UCM, init_phase);
17922    ecore_init_block(sc, BLOCK_CCM, init_phase);
17923    ecore_init_block(sc, BLOCK_XCM, init_phase);
17924    ecore_init_block(sc, BLOCK_TSEM, init_phase);
17925    ecore_init_block(sc, BLOCK_USEM, init_phase);
17926    ecore_init_block(sc, BLOCK_CSEM, init_phase);
17927    ecore_init_block(sc, BLOCK_XSEM, init_phase);
17928
17929    if (!CHIP_IS_E1x(sc))
17930        REG_WR(sc, QM_REG_PF_EN, 1);
17931
17932    if (!CHIP_IS_E1x(sc)) {
17933        REG_WR(sc, TSEM_REG_VFPF_ERR_NUM, BXE_MAX_NUM_OF_VFS + func);
17934        REG_WR(sc, USEM_REG_VFPF_ERR_NUM, BXE_MAX_NUM_OF_VFS + func);
17935        REG_WR(sc, CSEM_REG_VFPF_ERR_NUM, BXE_MAX_NUM_OF_VFS + func);
17936        REG_WR(sc, XSEM_REG_VFPF_ERR_NUM, BXE_MAX_NUM_OF_VFS + func);
17937    }
17938    ecore_init_block(sc, BLOCK_QM, init_phase);
17939
17940    ecore_init_block(sc, BLOCK_TM, init_phase);
17941    ecore_init_block(sc, BLOCK_DORQ, init_phase);
17942
17943    bxe_iov_init_dq(sc);
17944
17945    ecore_init_block(sc, BLOCK_BRB1, init_phase);
17946    ecore_init_block(sc, BLOCK_PRS, init_phase);
17947    ecore_init_block(sc, BLOCK_TSDM, init_phase);
17948    ecore_init_block(sc, BLOCK_CSDM, init_phase);
17949    ecore_init_block(sc, BLOCK_USDM, init_phase);
17950    ecore_init_block(sc, BLOCK_XSDM, init_phase);
17951    ecore_init_block(sc, BLOCK_UPB, init_phase);
17952    ecore_init_block(sc, BLOCK_XPB, init_phase);
17953    ecore_init_block(sc, BLOCK_PBF, init_phase);
17954    if (!CHIP_IS_E1x(sc))
17955        REG_WR(sc, PBF_REG_DISABLE_PF, 0);
17956
17957    ecore_init_block(sc, BLOCK_CDU, init_phase);
17958
17959    ecore_init_block(sc, BLOCK_CFC, init_phase);
17960
17961    if (!CHIP_IS_E1x(sc))
17962        REG_WR(sc, CFC_REG_WEAK_ENABLE_PF, 1);
17963
17964    if (IS_MF(sc)) {
17965        REG_WR(sc, NIG_REG_LLH0_FUNC_EN + port*8, 1);
17966        REG_WR(sc, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, OVLAN(sc));
17967    }
17968
17969    ecore_init_block(sc, BLOCK_MISC_AEU, init_phase);
17970
17971    /* HC init per function */
17972    if (sc->devinfo.int_block == INT_BLOCK_HC) {
17973        if (CHIP_IS_E1H(sc)) {
17974            REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
17975
17976            REG_WR(sc, HC_REG_LEADING_EDGE_0 + port*8, 0);
17977            REG_WR(sc, HC_REG_TRAILING_EDGE_0 + port*8, 0);
17978        }
17979        ecore_init_block(sc, BLOCK_HC, init_phase);
17980
17981    } else {
17982        int num_segs, sb_idx, prod_offset;
17983
17984        REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
17985
17986        if (!CHIP_IS_E1x(sc)) {
17987            REG_WR(sc, IGU_REG_LEADING_EDGE_LATCH, 0);
17988            REG_WR(sc, IGU_REG_TRAILING_EDGE_LATCH, 0);
17989        }
17990
17991        ecore_init_block(sc, BLOCK_IGU, init_phase);
17992
17993        if (!CHIP_IS_E1x(sc)) {
17994            int dsb_idx = 0;
17995            /**
17996             * Producer memory:
17997             * E2 mode: address 0-135 match to the mapping memory;
17998             * 136 - PF0 default prod; 137 - PF1 default prod;
17999             * 138 - PF2 default prod; 139 - PF3 default prod;
18000             * 140 - PF0 attn prod;    141 - PF1 attn prod;
18001             * 142 - PF2 attn prod;    143 - PF3 attn prod;
18002             * 144-147 reserved.
18003             *
18004             * E1.5 mode - In backward compatible mode;
18005             * for non default SB; each even line in the memory
18006             * holds the U producer and each odd line hold
18007             * the C producer. The first 128 producers are for
18008             * NDSB (PF0 - 0-31; PF1 - 32-63 and so on). The last 20
18009             * producers are for the DSB for each PF.
18010             * Each PF has five segments: (the order inside each
18011             * segment is PF0; PF1; PF2; PF3) - 128-131 U prods;
18012             * 132-135 C prods; 136-139 X prods; 140-143 T prods;
18013             * 144-147 attn prods;
18014             */
18015            /* non-default-status-blocks */
18016            num_segs = CHIP_INT_MODE_IS_BC(sc) ?
18017                IGU_BC_NDSB_NUM_SEGS : IGU_NORM_NDSB_NUM_SEGS;
18018            for (sb_idx = 0; sb_idx < sc->igu_sb_cnt; sb_idx++) {
18019                prod_offset = (sc->igu_base_sb + sb_idx) *
18020                    num_segs;
18021
18022                for (i = 0; i < num_segs; i++) {
18023                    addr = IGU_REG_PROD_CONS_MEMORY +
18024                            (prod_offset + i) * 4;
18025                    REG_WR(sc, addr, 0);
18026                }
18027                /* send consumer update with value 0 */
18028                bxe_ack_sb(sc, sc->igu_base_sb + sb_idx,
18029                           USTORM_ID, 0, IGU_INT_NOP, 1);
18030                bxe_igu_clear_sb(sc, sc->igu_base_sb + sb_idx);
18031            }
18032
18033            /* default-status-blocks */
18034            num_segs = CHIP_INT_MODE_IS_BC(sc) ?
18035                IGU_BC_DSB_NUM_SEGS : IGU_NORM_DSB_NUM_SEGS;
18036
18037            if (CHIP_IS_MODE_4_PORT(sc))
18038                dsb_idx = SC_FUNC(sc);
18039            else
18040                dsb_idx = SC_VN(sc);
18041
18042            prod_offset = (CHIP_INT_MODE_IS_BC(sc) ?
18043                       IGU_BC_BASE_DSB_PROD + dsb_idx :
18044                       IGU_NORM_BASE_DSB_PROD + dsb_idx);
18045
18046            /*
18047             * igu prods come in chunks of E1HVN_MAX (4) -
18048             * does not matters what is the current chip mode
18049             */
18050            for (i = 0; i < (num_segs * E1HVN_MAX);
18051                 i += E1HVN_MAX) {
18052                addr = IGU_REG_PROD_CONS_MEMORY +
18053                            (prod_offset + i)*4;
18054                REG_WR(sc, addr, 0);
18055            }
18056            /* send consumer update with 0 */
18057            if (CHIP_INT_MODE_IS_BC(sc)) {
18058                bxe_ack_sb(sc, sc->igu_dsb_id,
18059                           USTORM_ID, 0, IGU_INT_NOP, 1);
18060                bxe_ack_sb(sc, sc->igu_dsb_id,
18061                           CSTORM_ID, 0, IGU_INT_NOP, 1);
18062                bxe_ack_sb(sc, sc->igu_dsb_id,
18063                           XSTORM_ID, 0, IGU_INT_NOP, 1);
18064                bxe_ack_sb(sc, sc->igu_dsb_id,
18065                           TSTORM_ID, 0, IGU_INT_NOP, 1);
18066                bxe_ack_sb(sc, sc->igu_dsb_id,
18067                           ATTENTION_ID, 0, IGU_INT_NOP, 1);
18068            } else {
18069                bxe_ack_sb(sc, sc->igu_dsb_id,
18070                           USTORM_ID, 0, IGU_INT_NOP, 1);
18071                bxe_ack_sb(sc, sc->igu_dsb_id,
18072                           ATTENTION_ID, 0, IGU_INT_NOP, 1);
18073            }
18074            bxe_igu_clear_sb(sc, sc->igu_dsb_id);
18075
18076            /* !!! these should become driver const once
18077               rf-tool supports split-68 const */
18078            REG_WR(sc, IGU_REG_SB_INT_BEFORE_MASK_LSB, 0);
18079            REG_WR(sc, IGU_REG_SB_INT_BEFORE_MASK_MSB, 0);
18080            REG_WR(sc, IGU_REG_SB_MASK_LSB, 0);
18081            REG_WR(sc, IGU_REG_SB_MASK_MSB, 0);
18082            REG_WR(sc, IGU_REG_PBA_STATUS_LSB, 0);
18083            REG_WR(sc, IGU_REG_PBA_STATUS_MSB, 0);
18084        }
18085    }
18086
18087    /* Reset PCIE errors for debug */
18088    REG_WR(sc, 0x2114, 0xffffffff);
18089    REG_WR(sc, 0x2120, 0xffffffff);
18090
18091    if (CHIP_IS_E1x(sc)) {
18092        main_mem_size = HC_REG_MAIN_MEMORY_SIZE / 2; /*dwords*/
18093        main_mem_base = HC_REG_MAIN_MEMORY +
18094                SC_PORT(sc) * (main_mem_size * 4);
18095        main_mem_prty_clr = HC_REG_HC_PRTY_STS_CLR;
18096        main_mem_width = 8;
18097
18098        val = REG_RD(sc, main_mem_prty_clr);
18099        if (val) {
18100            BLOGD(sc, DBG_LOAD,
18101                  "Parity errors in HC block during function init (0x%x)!\n",
18102                  val);
18103        }
18104
18105        /* Clear "false" parity errors in MSI-X table */
18106        for (i = main_mem_base;
18107             i < main_mem_base + main_mem_size * 4;
18108             i += main_mem_width) {
18109            bxe_read_dmae(sc, i, main_mem_width / 4);
18110            bxe_write_dmae(sc, BXE_SP_MAPPING(sc, wb_data),
18111                           i, main_mem_width / 4);
18112        }
18113        /* Clear HC parity attention */
18114        REG_RD(sc, main_mem_prty_clr);
18115    }
18116
18117#if 1
18118    /* Enable STORMs SP logging */
18119    REG_WR8(sc, BAR_USTRORM_INTMEM +
18120           USTORM_RECORD_SLOW_PATH_OFFSET(SC_FUNC(sc)), 1);
18121    REG_WR8(sc, BAR_TSTRORM_INTMEM +
18122           TSTORM_RECORD_SLOW_PATH_OFFSET(SC_FUNC(sc)), 1);
18123    REG_WR8(sc, BAR_CSTRORM_INTMEM +
18124           CSTORM_RECORD_SLOW_PATH_OFFSET(SC_FUNC(sc)), 1);
18125    REG_WR8(sc, BAR_XSTRORM_INTMEM +
18126           XSTORM_RECORD_SLOW_PATH_OFFSET(SC_FUNC(sc)), 1);
18127#endif
18128
18129    elink_phy_probe(&sc->link_params);
18130
18131    return (0);
18132}
18133
18134static void
18135bxe_link_reset(struct bxe_softc *sc)
18136{
18137    if (!BXE_NOMCP(sc)) {
18138	bxe_acquire_phy_lock(sc);
18139        elink_lfa_reset(&sc->link_params, &sc->link_vars);
18140	bxe_release_phy_lock(sc);
18141    } else {
18142        if (!CHIP_REV_IS_SLOW(sc)) {
18143            BLOGW(sc, "Bootcode is missing - cannot reset link\n");
18144        }
18145    }
18146}
18147
18148static void
18149bxe_reset_port(struct bxe_softc *sc)
18150{
18151    int port = SC_PORT(sc);
18152    uint32_t val;
18153
18154	ELINK_DEBUG_P0(sc, "bxe_reset_port called\n");
18155    /* reset physical Link */
18156    bxe_link_reset(sc);
18157
18158    REG_WR(sc, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
18159
18160    /* Do not rcv packets to BRB */
18161    REG_WR(sc, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
18162    /* Do not direct rcv packets that are not for MCP to the BRB */
18163    REG_WR(sc, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
18164               NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
18165
18166    /* Configure AEU */
18167    REG_WR(sc, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
18168
18169    DELAY(100000);
18170
18171    /* Check for BRB port occupancy */
18172    val = REG_RD(sc, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
18173    if (val) {
18174        BLOGD(sc, DBG_LOAD,
18175              "BRB1 is not empty, %d blocks are occupied\n", val);
18176    }
18177
18178    /* TODO: Close Doorbell port? */
18179}
18180
18181static void
18182bxe_ilt_wr(struct bxe_softc *sc,
18183           uint32_t         index,
18184           bus_addr_t       addr)
18185{
18186    int reg;
18187    uint32_t wb_write[2];
18188
18189    if (CHIP_IS_E1(sc)) {
18190        reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
18191    } else {
18192        reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
18193    }
18194
18195    wb_write[0] = ONCHIP_ADDR1(addr);
18196    wb_write[1] = ONCHIP_ADDR2(addr);
18197    REG_WR_DMAE(sc, reg, wb_write, 2);
18198}
18199
18200static void
18201bxe_clear_func_ilt(struct bxe_softc *sc,
18202                   uint32_t         func)
18203{
18204    uint32_t i, base = FUNC_ILT_BASE(func);
18205    for (i = base; i < base + ILT_PER_FUNC; i++) {
18206        bxe_ilt_wr(sc, i, 0);
18207    }
18208}
18209
18210static void
18211bxe_reset_func(struct bxe_softc *sc)
18212{
18213    struct bxe_fastpath *fp;
18214    int port = SC_PORT(sc);
18215    int func = SC_FUNC(sc);
18216    int i;
18217
18218    /* Disable the function in the FW */
18219    REG_WR8(sc, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(func), 0);
18220    REG_WR8(sc, BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(func), 0);
18221    REG_WR8(sc, BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(func), 0);
18222    REG_WR8(sc, BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(func), 0);
18223
18224    /* FP SBs */
18225    FOR_EACH_ETH_QUEUE(sc, i) {
18226        fp = &sc->fp[i];
18227        REG_WR8(sc, BAR_CSTRORM_INTMEM +
18228                CSTORM_STATUS_BLOCK_DATA_STATE_OFFSET(fp->fw_sb_id),
18229                SB_DISABLED);
18230    }
18231
18232    /* SP SB */
18233    REG_WR8(sc, BAR_CSTRORM_INTMEM +
18234            CSTORM_SP_STATUS_BLOCK_DATA_STATE_OFFSET(func),
18235            SB_DISABLED);
18236
18237    for (i = 0; i < XSTORM_SPQ_DATA_SIZE / 4; i++) {
18238        REG_WR(sc, BAR_XSTRORM_INTMEM + XSTORM_SPQ_DATA_OFFSET(func), 0);
18239    }
18240
18241    /* Configure IGU */
18242    if (sc->devinfo.int_block == INT_BLOCK_HC) {
18243        REG_WR(sc, HC_REG_LEADING_EDGE_0 + port*8, 0);
18244        REG_WR(sc, HC_REG_TRAILING_EDGE_0 + port*8, 0);
18245    } else {
18246        REG_WR(sc, IGU_REG_LEADING_EDGE_LATCH, 0);
18247        REG_WR(sc, IGU_REG_TRAILING_EDGE_LATCH, 0);
18248    }
18249
18250    if (CNIC_LOADED(sc)) {
18251        /* Disable Timer scan */
18252        REG_WR(sc, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
18253        /*
18254         * Wait for at least 10ms and up to 2 second for the timers
18255         * scan to complete
18256         */
18257        for (i = 0; i < 200; i++) {
18258            DELAY(10000);
18259            if (!REG_RD(sc, TM_REG_LIN0_SCAN_ON + port*4))
18260                break;
18261        }
18262    }
18263
18264    /* Clear ILT */
18265    bxe_clear_func_ilt(sc, func);
18266
18267    /*
18268     * Timers workaround bug for E2: if this is vnic-3,
18269     * we need to set the entire ilt range for this timers.
18270     */
18271    if (!CHIP_IS_E1x(sc) && SC_VN(sc) == 3) {
18272        struct ilt_client_info ilt_cli;
18273        /* use dummy TM client */
18274        memset(&ilt_cli, 0, sizeof(struct ilt_client_info));
18275        ilt_cli.start = 0;
18276        ilt_cli.end = ILT_NUM_PAGE_ENTRIES - 1;
18277        ilt_cli.client_num = ILT_CLIENT_TM;
18278
18279        ecore_ilt_boundry_init_op(sc, &ilt_cli, 0, INITOP_CLEAR);
18280    }
18281
18282    /* this assumes that reset_port() called before reset_func()*/
18283    if (!CHIP_IS_E1x(sc)) {
18284        bxe_pf_disable(sc);
18285    }
18286
18287    sc->dmae_ready = 0;
18288}
18289
18290static int
18291bxe_gunzip_init(struct bxe_softc *sc)
18292{
18293    return (0);
18294}
18295
18296static void
18297bxe_gunzip_end(struct bxe_softc *sc)
18298{
18299    return;
18300}
18301
18302static int
18303bxe_init_firmware(struct bxe_softc *sc)
18304{
18305    if (CHIP_IS_E1(sc)) {
18306        ecore_init_e1_firmware(sc);
18307        sc->iro_array = e1_iro_arr;
18308    } else if (CHIP_IS_E1H(sc)) {
18309        ecore_init_e1h_firmware(sc);
18310        sc->iro_array = e1h_iro_arr;
18311    } else if (!CHIP_IS_E1x(sc)) {
18312        ecore_init_e2_firmware(sc);
18313        sc->iro_array = e2_iro_arr;
18314    } else {
18315        BLOGE(sc, "Unsupported chip revision\n");
18316        return (-1);
18317    }
18318
18319    return (0);
18320}
18321
18322static void
18323bxe_release_firmware(struct bxe_softc *sc)
18324{
18325    /* Do nothing */
18326    return;
18327}
18328
18329static int
18330ecore_gunzip(struct bxe_softc *sc,
18331             const uint8_t    *zbuf,
18332             int              len)
18333{
18334    /* XXX : Implement... */
18335    BLOGD(sc, DBG_LOAD, "ECORE_GUNZIP NOT IMPLEMENTED\n");
18336    return (FALSE);
18337}
18338
18339static void
18340ecore_reg_wr_ind(struct bxe_softc *sc,
18341                 uint32_t         addr,
18342                 uint32_t         val)
18343{
18344    bxe_reg_wr_ind(sc, addr, val);
18345}
18346
18347static void
18348ecore_write_dmae_phys_len(struct bxe_softc *sc,
18349                          bus_addr_t       phys_addr,
18350                          uint32_t         addr,
18351                          uint32_t         len)
18352{
18353    bxe_write_dmae_phys_len(sc, phys_addr, addr, len);
18354}
18355
18356void
18357ecore_storm_memset_struct(struct bxe_softc *sc,
18358                          uint32_t         addr,
18359                          size_t           size,
18360                          uint32_t         *data)
18361{
18362    uint8_t i;
18363    for (i = 0; i < size/4; i++) {
18364        REG_WR(sc, addr + (i * 4), data[i]);
18365    }
18366}
18367
18368
18369/*
18370 * character device - ioctl interface definitions
18371 */
18372
18373
18374#include "bxe_dump.h"
18375#include "bxe_ioctl.h"
18376#include <sys/conf.h>
18377
18378static int bxe_eioctl(struct cdev *dev, u_long cmd, caddr_t data, int fflag,
18379                struct thread *td);
18380
18381static struct cdevsw bxe_cdevsw = {
18382    .d_version = D_VERSION,
18383    .d_ioctl = bxe_eioctl,
18384    .d_name = "bxecnic",
18385};
18386
18387#define BXE_PATH(sc)    (CHIP_IS_E1x(sc) ? 0 : (sc->pcie_func & 1))
18388
18389
18390#define DUMP_ALL_PRESETS        0x1FFF
18391#define DUMP_MAX_PRESETS        13
18392#define IS_E1_REG(chips)        ((chips & DUMP_CHIP_E1) == DUMP_CHIP_E1)
18393#define IS_E1H_REG(chips)       ((chips & DUMP_CHIP_E1H) == DUMP_CHIP_E1H)
18394#define IS_E2_REG(chips)        ((chips & DUMP_CHIP_E2) == DUMP_CHIP_E2)
18395#define IS_E3A0_REG(chips)      ((chips & DUMP_CHIP_E3A0) == DUMP_CHIP_E3A0)
18396#define IS_E3B0_REG(chips)      ((chips & DUMP_CHIP_E3B0) == DUMP_CHIP_E3B0)
18397
18398#define IS_REG_IN_PRESET(presets, idx)  \
18399                ((presets & (1 << (idx-1))) == (1 << (idx-1)))
18400
18401
18402static int
18403bxe_get_preset_regs_len(struct bxe_softc *sc, uint32_t preset)
18404{
18405    if (CHIP_IS_E1(sc))
18406        return dump_num_registers[0][preset-1];
18407    else if (CHIP_IS_E1H(sc))
18408        return dump_num_registers[1][preset-1];
18409    else if (CHIP_IS_E2(sc))
18410        return dump_num_registers[2][preset-1];
18411    else if (CHIP_IS_E3A0(sc))
18412        return dump_num_registers[3][preset-1];
18413    else if (CHIP_IS_E3B0(sc))
18414        return dump_num_registers[4][preset-1];
18415    else
18416        return 0;
18417}
18418
18419static int
18420bxe_get_total_regs_len32(struct bxe_softc *sc)
18421{
18422    uint32_t preset_idx;
18423    int regdump_len32 = 0;
18424
18425
18426    /* Calculate the total preset regs length */
18427    for (preset_idx = 1; preset_idx <= DUMP_MAX_PRESETS; preset_idx++) {
18428        regdump_len32 += bxe_get_preset_regs_len(sc, preset_idx);
18429    }
18430
18431    return regdump_len32;
18432}
18433
18434static const uint32_t *
18435__bxe_get_page_addr_ar(struct bxe_softc *sc)
18436{
18437    if (CHIP_IS_E2(sc))
18438        return page_vals_e2;
18439    else if (CHIP_IS_E3(sc))
18440        return page_vals_e3;
18441    else
18442        return NULL;
18443}
18444
18445static uint32_t
18446__bxe_get_page_reg_num(struct bxe_softc *sc)
18447{
18448    if (CHIP_IS_E2(sc))
18449        return PAGE_MODE_VALUES_E2;
18450    else if (CHIP_IS_E3(sc))
18451        return PAGE_MODE_VALUES_E3;
18452    else
18453        return 0;
18454}
18455
18456static const uint32_t *
18457__bxe_get_page_write_ar(struct bxe_softc *sc)
18458{
18459    if (CHIP_IS_E2(sc))
18460        return page_write_regs_e2;
18461    else if (CHIP_IS_E3(sc))
18462        return page_write_regs_e3;
18463    else
18464        return NULL;
18465}
18466
18467static uint32_t
18468__bxe_get_page_write_num(struct bxe_softc *sc)
18469{
18470    if (CHIP_IS_E2(sc))
18471        return PAGE_WRITE_REGS_E2;
18472    else if (CHIP_IS_E3(sc))
18473        return PAGE_WRITE_REGS_E3;
18474    else
18475        return 0;
18476}
18477
18478static const struct reg_addr *
18479__bxe_get_page_read_ar(struct bxe_softc *sc)
18480{
18481    if (CHIP_IS_E2(sc))
18482        return page_read_regs_e2;
18483    else if (CHIP_IS_E3(sc))
18484        return page_read_regs_e3;
18485    else
18486        return NULL;
18487}
18488
18489static uint32_t
18490__bxe_get_page_read_num(struct bxe_softc *sc)
18491{
18492    if (CHIP_IS_E2(sc))
18493        return PAGE_READ_REGS_E2;
18494    else if (CHIP_IS_E3(sc))
18495        return PAGE_READ_REGS_E3;
18496    else
18497        return 0;
18498}
18499
18500static bool
18501bxe_is_reg_in_chip(struct bxe_softc *sc, const struct reg_addr *reg_info)
18502{
18503    if (CHIP_IS_E1(sc))
18504        return IS_E1_REG(reg_info->chips);
18505    else if (CHIP_IS_E1H(sc))
18506        return IS_E1H_REG(reg_info->chips);
18507    else if (CHIP_IS_E2(sc))
18508        return IS_E2_REG(reg_info->chips);
18509    else if (CHIP_IS_E3A0(sc))
18510        return IS_E3A0_REG(reg_info->chips);
18511    else if (CHIP_IS_E3B0(sc))
18512        return IS_E3B0_REG(reg_info->chips);
18513    else
18514        return 0;
18515}
18516
18517static bool
18518bxe_is_wreg_in_chip(struct bxe_softc *sc, const struct wreg_addr *wreg_info)
18519{
18520    if (CHIP_IS_E1(sc))
18521        return IS_E1_REG(wreg_info->chips);
18522    else if (CHIP_IS_E1H(sc))
18523        return IS_E1H_REG(wreg_info->chips);
18524    else if (CHIP_IS_E2(sc))
18525        return IS_E2_REG(wreg_info->chips);
18526    else if (CHIP_IS_E3A0(sc))
18527        return IS_E3A0_REG(wreg_info->chips);
18528    else if (CHIP_IS_E3B0(sc))
18529        return IS_E3B0_REG(wreg_info->chips);
18530    else
18531        return 0;
18532}
18533
18534/**
18535 * bxe_read_pages_regs - read "paged" registers
18536 *
18537 * @bp          device handle
18538 * @p           output buffer
18539 *
18540 * Reads "paged" memories: memories that may only be read by first writing to a
18541 * specific address ("write address") and then reading from a specific address
18542 * ("read address"). There may be more than one write address per "page" and
18543 * more than one read address per write address.
18544 */
18545static void
18546bxe_read_pages_regs(struct bxe_softc *sc, uint32_t *p, uint32_t preset)
18547{
18548    uint32_t i, j, k, n;
18549
18550    /* addresses of the paged registers */
18551    const uint32_t *page_addr = __bxe_get_page_addr_ar(sc);
18552    /* number of paged registers */
18553    int num_pages = __bxe_get_page_reg_num(sc);
18554    /* write addresses */
18555    const uint32_t *write_addr = __bxe_get_page_write_ar(sc);
18556    /* number of write addresses */
18557    int write_num = __bxe_get_page_write_num(sc);
18558    /* read addresses info */
18559    const struct reg_addr *read_addr = __bxe_get_page_read_ar(sc);
18560    /* number of read addresses */
18561    int read_num = __bxe_get_page_read_num(sc);
18562    uint32_t addr, size;
18563
18564    for (i = 0; i < num_pages; i++) {
18565        for (j = 0; j < write_num; j++) {
18566            REG_WR(sc, write_addr[j], page_addr[i]);
18567
18568            for (k = 0; k < read_num; k++) {
18569                if (IS_REG_IN_PRESET(read_addr[k].presets, preset)) {
18570                    size = read_addr[k].size;
18571                    for (n = 0; n < size; n++) {
18572                        addr = read_addr[k].addr + n*4;
18573                        *p++ = REG_RD(sc, addr);
18574                    }
18575                }
18576            }
18577        }
18578    }
18579    return;
18580}
18581
18582
18583static int
18584bxe_get_preset_regs(struct bxe_softc *sc, uint32_t *p, uint32_t preset)
18585{
18586    uint32_t i, j, addr;
18587    const struct wreg_addr *wreg_addr_p = NULL;
18588
18589    if (CHIP_IS_E1(sc))
18590        wreg_addr_p = &wreg_addr_e1;
18591    else if (CHIP_IS_E1H(sc))
18592        wreg_addr_p = &wreg_addr_e1h;
18593    else if (CHIP_IS_E2(sc))
18594        wreg_addr_p = &wreg_addr_e2;
18595    else if (CHIP_IS_E3A0(sc))
18596        wreg_addr_p = &wreg_addr_e3;
18597    else if (CHIP_IS_E3B0(sc))
18598        wreg_addr_p = &wreg_addr_e3b0;
18599    else
18600        return (-1);
18601
18602    /* Read the idle_chk registers */
18603    for (i = 0; i < IDLE_REGS_COUNT; i++) {
18604        if (bxe_is_reg_in_chip(sc, &idle_reg_addrs[i]) &&
18605            IS_REG_IN_PRESET(idle_reg_addrs[i].presets, preset)) {
18606            for (j = 0; j < idle_reg_addrs[i].size; j++)
18607                *p++ = REG_RD(sc, idle_reg_addrs[i].addr + j*4);
18608        }
18609    }
18610
18611    /* Read the regular registers */
18612    for (i = 0; i < REGS_COUNT; i++) {
18613        if (bxe_is_reg_in_chip(sc, &reg_addrs[i]) &&
18614            IS_REG_IN_PRESET(reg_addrs[i].presets, preset)) {
18615            for (j = 0; j < reg_addrs[i].size; j++)
18616                *p++ = REG_RD(sc, reg_addrs[i].addr + j*4);
18617        }
18618    }
18619
18620    /* Read the CAM registers */
18621    if (bxe_is_wreg_in_chip(sc, wreg_addr_p) &&
18622        IS_REG_IN_PRESET(wreg_addr_p->presets, preset)) {
18623        for (i = 0; i < wreg_addr_p->size; i++) {
18624            *p++ = REG_RD(sc, wreg_addr_p->addr + i*4);
18625
18626            /* In case of wreg_addr register, read additional
18627               registers from read_regs array
18628             */
18629            for (j = 0; j < wreg_addr_p->read_regs_count; j++) {
18630                addr = *(wreg_addr_p->read_regs);
18631                *p++ = REG_RD(sc, addr + j*4);
18632            }
18633        }
18634    }
18635
18636    /* Paged registers are supported in E2 & E3 only */
18637    if (CHIP_IS_E2(sc) || CHIP_IS_E3(sc)) {
18638        /* Read "paged" registers */
18639        bxe_read_pages_regs(sc, p, preset);
18640    }
18641
18642    return 0;
18643}
18644
18645int
18646bxe_grc_dump(struct bxe_softc *sc)
18647{
18648    int rval = 0;
18649    uint32_t preset_idx;
18650    uint8_t *buf;
18651    uint32_t size;
18652    struct  dump_header *d_hdr;
18653    uint32_t i;
18654    uint32_t reg_val;
18655    uint32_t reg_addr;
18656    uint32_t cmd_offset;
18657    struct ecore_ilt *ilt = SC_ILT(sc);
18658    struct bxe_fastpath *fp;
18659    struct ilt_client_info *ilt_cli;
18660    int grc_dump_size;
18661
18662
18663    if (sc->grcdump_done || sc->grcdump_started)
18664	return (rval);
18665
18666    sc->grcdump_started = 1;
18667    BLOGI(sc, "Started collecting grcdump\n");
18668
18669    grc_dump_size = (bxe_get_total_regs_len32(sc) * sizeof(uint32_t)) +
18670                sizeof(struct  dump_header);
18671
18672    sc->grc_dump = malloc(grc_dump_size, M_DEVBUF, M_NOWAIT);
18673
18674    if (sc->grc_dump == NULL) {
18675        BLOGW(sc, "Unable to allocate memory for grcdump collection\n");
18676        return(ENOMEM);
18677    }
18678
18679
18680
18681    /* Disable parity attentions as long as following dump may
18682     * cause false alarms by reading never written registers. We
18683     * will re-enable parity attentions right after the dump.
18684     */
18685
18686    /* Disable parity on path 0 */
18687    bxe_pretend_func(sc, 0);
18688
18689    ecore_disable_blocks_parity(sc);
18690
18691    /* Disable parity on path 1 */
18692    bxe_pretend_func(sc, 1);
18693    ecore_disable_blocks_parity(sc);
18694
18695    /* Return to current function */
18696    bxe_pretend_func(sc, SC_ABS_FUNC(sc));
18697
18698    buf = sc->grc_dump;
18699    d_hdr = sc->grc_dump;
18700
18701    d_hdr->header_size = (sizeof(struct  dump_header) >> 2) - 1;
18702    d_hdr->version = BNX2X_DUMP_VERSION;
18703    d_hdr->preset = DUMP_ALL_PRESETS;
18704
18705    if (CHIP_IS_E1(sc)) {
18706        d_hdr->dump_meta_data = DUMP_CHIP_E1;
18707    } else if (CHIP_IS_E1H(sc)) {
18708        d_hdr->dump_meta_data = DUMP_CHIP_E1H;
18709    } else if (CHIP_IS_E2(sc)) {
18710        d_hdr->dump_meta_data = DUMP_CHIP_E2 |
18711                (BXE_PATH(sc) ? DUMP_PATH_1 : DUMP_PATH_0);
18712    } else if (CHIP_IS_E3A0(sc)) {
18713        d_hdr->dump_meta_data = DUMP_CHIP_E3A0 |
18714                (BXE_PATH(sc) ? DUMP_PATH_1 : DUMP_PATH_0);
18715    } else if (CHIP_IS_E3B0(sc)) {
18716        d_hdr->dump_meta_data = DUMP_CHIP_E3B0 |
18717                (BXE_PATH(sc) ? DUMP_PATH_1 : DUMP_PATH_0);
18718    }
18719
18720    buf += sizeof(struct  dump_header);
18721
18722    for (preset_idx = 1; preset_idx <= DUMP_MAX_PRESETS; preset_idx++) {
18723
18724        /* Skip presets with IOR */
18725        if ((preset_idx == 2) || (preset_idx == 5) || (preset_idx == 8) ||
18726            (preset_idx == 11))
18727            continue;
18728
18729        rval = bxe_get_preset_regs(sc, (uint32_t *)buf, preset_idx);
18730
18731	if (rval)
18732            break;
18733
18734        size = bxe_get_preset_regs_len(sc, preset_idx) * (sizeof (uint32_t));
18735
18736        buf += size;
18737    }
18738
18739    bxe_pretend_func(sc, 0);
18740    ecore_clear_blocks_parity(sc);
18741    ecore_enable_blocks_parity(sc);
18742
18743    bxe_pretend_func(sc, 1);
18744    ecore_clear_blocks_parity(sc);
18745    ecore_enable_blocks_parity(sc);
18746
18747    /* Return to current function */
18748    bxe_pretend_func(sc, SC_ABS_FUNC(sc));
18749
18750
18751
18752    if(sc->state == BXE_STATE_OPEN) {
18753        if(sc->fw_stats_req  != NULL) {
18754    		BLOGI(sc, "fw stats start_paddr %#jx end_paddr %#jx vaddr %p size 0x%x\n",
18755        			(uintmax_t)sc->fw_stats_req_mapping,
18756        			(uintmax_t)sc->fw_stats_data_mapping,
18757        			sc->fw_stats_req, (sc->fw_stats_req_size + sc->fw_stats_data_size));
18758		}
18759		if(sc->def_sb != NULL) {
18760			BLOGI(sc, "def_status_block paddr %p vaddr %p size 0x%zx\n",
18761        			(void *)sc->def_sb_dma.paddr, sc->def_sb,
18762        			sizeof(struct host_sp_status_block));
18763		}
18764		if(sc->eq_dma.vaddr != NULL) {
18765    		BLOGI(sc, "event_queue paddr %#jx vaddr %p size 0x%x\n",
18766        			(uintmax_t)sc->eq_dma.paddr, sc->eq_dma.vaddr, BCM_PAGE_SIZE);
18767		}
18768		if(sc->sp_dma.vaddr != NULL) {
18769    		BLOGI(sc, "slow path paddr %#jx vaddr %p size 0x%zx\n",
18770        			(uintmax_t)sc->sp_dma.paddr, sc->sp_dma.vaddr,
18771        			sizeof(struct bxe_slowpath));
18772		}
18773		if(sc->spq_dma.vaddr != NULL) {
18774    		BLOGI(sc, "slow path queue paddr %#jx vaddr %p size 0x%x\n",
18775        			(uintmax_t)sc->spq_dma.paddr, sc->spq_dma.vaddr, BCM_PAGE_SIZE);
18776		}
18777		if(sc->gz_buf_dma.vaddr != NULL) {
18778    		BLOGI(sc, "fw_buf paddr %#jx vaddr %p size 0x%x\n",
18779        			(uintmax_t)sc->gz_buf_dma.paddr, sc->gz_buf_dma.vaddr,
18780        			FW_BUF_SIZE);
18781		}
18782    	for (i = 0; i < sc->num_queues; i++) {
18783        	fp = &sc->fp[i];
18784			if(fp->sb_dma.vaddr != NULL && fp->tx_dma.vaddr != NULL &&
18785                        fp->rx_dma.vaddr != NULL && fp->rcq_dma.vaddr != NULL &&
18786                        fp->rx_sge_dma.vaddr != NULL) {
18787
18788				BLOGI(sc, "FP status block fp %d paddr %#jx vaddr %p size 0x%zx\n", i,
18789            			(uintmax_t)fp->sb_dma.paddr, fp->sb_dma.vaddr,
18790            			sizeof(union bxe_host_hc_status_block));
18791				BLOGI(sc, "TX BD CHAIN fp %d paddr %#jx vaddr %p size 0x%x\n", i,
18792            			(uintmax_t)fp->tx_dma.paddr, fp->tx_dma.vaddr,
18793            			(BCM_PAGE_SIZE * TX_BD_NUM_PAGES));
18794        		BLOGI(sc, "RX BD CHAIN fp %d paddr %#jx vaddr %p size 0x%x\n", i,
18795            			(uintmax_t)fp->rx_dma.paddr, fp->rx_dma.vaddr,
18796            			(BCM_PAGE_SIZE * RX_BD_NUM_PAGES));
18797        		BLOGI(sc, "RX RCQ CHAIN fp %d paddr %#jx vaddr %p size 0x%zx\n", i,
18798            			(uintmax_t)fp->rcq_dma.paddr, fp->rcq_dma.vaddr,
18799            			(BCM_PAGE_SIZE * RCQ_NUM_PAGES));
18800        		BLOGI(sc, "RX SGE CHAIN fp %d paddr %#jx vaddr %p size 0x%x\n", i,
18801            			(uintmax_t)fp->rx_sge_dma.paddr, fp->rx_sge_dma.vaddr,
18802            			(BCM_PAGE_SIZE * RX_SGE_NUM_PAGES));
18803    		}
18804		}
18805		if(ilt != NULL ) {
18806    		ilt_cli = &ilt->clients[1];
18807			if(ilt->lines != NULL) {
18808    		for (i = ilt_cli->start; i <= ilt_cli->end; i++) {
18809        		BLOGI(sc, "ECORE_ILT paddr %#jx vaddr %p size 0x%x\n",
18810            			(uintmax_t)(((struct bxe_dma *)((&ilt->lines[i])->page))->paddr),
18811            			((struct bxe_dma *)((&ilt->lines[i])->page))->vaddr, BCM_PAGE_SIZE);
18812    		}
18813			}
18814		}
18815
18816
18817    	cmd_offset = DMAE_REG_CMD_MEM;
18818    	for (i = 0; i < 224; i++) {
18819        	reg_addr = (cmd_offset +(i * 4));
18820        	reg_val = REG_RD(sc, reg_addr);
18821        	BLOGI(sc, "DMAE_REG_CMD_MEM i=%d reg_addr 0x%x reg_val 0x%08x\n",i,
18822            			reg_addr, reg_val);
18823    	}
18824	}
18825
18826    BLOGI(sc, "Collection of grcdump done\n");
18827    sc->grcdump_done = 1;
18828    return(rval);
18829}
18830
18831static int
18832bxe_add_cdev(struct bxe_softc *sc)
18833{
18834    sc->eeprom = malloc(BXE_EEPROM_MAX_DATA_LEN, M_DEVBUF, M_NOWAIT);
18835
18836    if (sc->eeprom == NULL) {
18837        BLOGW(sc, "Unable to alloc for eeprom size buffer\n");
18838        return (-1);
18839    }
18840
18841    sc->ioctl_dev = make_dev(&bxe_cdevsw,
18842                            sc->ifp->if_dunit,
18843                            UID_ROOT,
18844                            GID_WHEEL,
18845                            0600,
18846                            "%s",
18847                            if_name(sc->ifp));
18848
18849    if (sc->ioctl_dev == NULL) {
18850        free(sc->eeprom, M_DEVBUF);
18851        sc->eeprom = NULL;
18852        return (-1);
18853    }
18854
18855    sc->ioctl_dev->si_drv1 = sc;
18856
18857    return (0);
18858}
18859
18860static void
18861bxe_del_cdev(struct bxe_softc *sc)
18862{
18863    if (sc->ioctl_dev != NULL)
18864        destroy_dev(sc->ioctl_dev);
18865
18866    if (sc->eeprom != NULL) {
18867        free(sc->eeprom, M_DEVBUF);
18868        sc->eeprom = NULL;
18869    }
18870    sc->ioctl_dev = NULL;
18871
18872    return;
18873}
18874
18875static bool bxe_is_nvram_accessible(struct bxe_softc *sc)
18876{
18877
18878    if ((if_getdrvflags(sc->ifp) & IFF_DRV_RUNNING) == 0)
18879        return FALSE;
18880
18881    return TRUE;
18882}
18883
18884
18885static int
18886bxe_wr_eeprom(struct bxe_softc *sc, void *data, uint32_t offset, uint32_t len)
18887{
18888    int rval = 0;
18889
18890    if(!bxe_is_nvram_accessible(sc)) {
18891        BLOGW(sc, "Cannot access eeprom when interface is down\n");
18892        return (-EAGAIN);
18893    }
18894    rval = bxe_nvram_write(sc, offset, (uint8_t *)data, len);
18895
18896
18897   return (rval);
18898}
18899
18900static int
18901bxe_rd_eeprom(struct bxe_softc *sc, void *data, uint32_t offset, uint32_t len)
18902{
18903    int rval = 0;
18904
18905    if(!bxe_is_nvram_accessible(sc)) {
18906        BLOGW(sc, "Cannot access eeprom when interface is down\n");
18907        return (-EAGAIN);
18908    }
18909    rval = bxe_nvram_read(sc, offset, (uint8_t *)data, len);
18910
18911   return (rval);
18912}
18913
18914static int
18915bxe_eeprom_rd_wr(struct bxe_softc *sc, bxe_eeprom_t *eeprom)
18916{
18917    int rval = 0;
18918
18919    switch (eeprom->eeprom_cmd) {
18920
18921    case BXE_EEPROM_CMD_SET_EEPROM:
18922
18923        rval = copyin(eeprom->eeprom_data, sc->eeprom,
18924                       eeprom->eeprom_data_len);
18925
18926        if (rval)
18927            break;
18928
18929        rval = bxe_wr_eeprom(sc, sc->eeprom, eeprom->eeprom_offset,
18930                       eeprom->eeprom_data_len);
18931        break;
18932
18933    case BXE_EEPROM_CMD_GET_EEPROM:
18934
18935        rval = bxe_rd_eeprom(sc, sc->eeprom, eeprom->eeprom_offset,
18936                       eeprom->eeprom_data_len);
18937
18938        if (rval) {
18939            break;
18940        }
18941
18942        rval = copyout(sc->eeprom, eeprom->eeprom_data,
18943                       eeprom->eeprom_data_len);
18944        break;
18945
18946    default:
18947            rval = EINVAL;
18948            break;
18949    }
18950
18951    if (rval) {
18952        BLOGW(sc, "ioctl cmd %d  failed rval %d\n", eeprom->eeprom_cmd, rval);
18953    }
18954
18955    return (rval);
18956}
18957
18958static int
18959bxe_get_settings(struct bxe_softc *sc, bxe_dev_setting_t *dev_p)
18960{
18961    uint32_t ext_phy_config;
18962    int port = SC_PORT(sc);
18963    int cfg_idx = bxe_get_link_cfg_idx(sc);
18964
18965    dev_p->supported = sc->port.supported[cfg_idx] |
18966            (sc->port.supported[cfg_idx ^ 1] &
18967            (ELINK_SUPPORTED_TP | ELINK_SUPPORTED_FIBRE));
18968    dev_p->advertising = sc->port.advertising[cfg_idx];
18969    if(sc->link_params.phy[bxe_get_cur_phy_idx(sc)].media_type ==
18970        ELINK_ETH_PHY_SFP_1G_FIBER) {
18971        dev_p->supported = ~(ELINK_SUPPORTED_10000baseT_Full);
18972        dev_p->advertising &= ~(ADVERTISED_10000baseT_Full);
18973    }
18974    if ((sc->state == BXE_STATE_OPEN) && sc->link_vars.link_up &&
18975        !(sc->flags & BXE_MF_FUNC_DIS)) {
18976        dev_p->duplex = sc->link_vars.duplex;
18977        if (IS_MF(sc) && !BXE_NOMCP(sc))
18978            dev_p->speed = bxe_get_mf_speed(sc);
18979        else
18980            dev_p->speed = sc->link_vars.line_speed;
18981    } else {
18982        dev_p->duplex = DUPLEX_UNKNOWN;
18983        dev_p->speed = SPEED_UNKNOWN;
18984    }
18985
18986    dev_p->port = bxe_media_detect(sc);
18987
18988    ext_phy_config = SHMEM_RD(sc,
18989                         dev_info.port_hw_config[port].external_phy_config);
18990    if((ext_phy_config & PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK) ==
18991        PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT)
18992        dev_p->phy_address =  sc->port.phy_addr;
18993    else if(((ext_phy_config & PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK) !=
18994            PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE) &&
18995        ((ext_phy_config & PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK) !=
18996            PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN))
18997        dev_p->phy_address = ELINK_XGXS_EXT_PHY_ADDR(ext_phy_config);
18998    else
18999        dev_p->phy_address = 0;
19000
19001    if(sc->link_params.req_line_speed[cfg_idx] == ELINK_SPEED_AUTO_NEG)
19002        dev_p->autoneg = AUTONEG_ENABLE;
19003    else
19004       dev_p->autoneg = AUTONEG_DISABLE;
19005
19006
19007    return 0;
19008}
19009
19010static int
19011bxe_eioctl(struct cdev *dev, u_long cmd, caddr_t data, int fflag,
19012        struct thread *td)
19013{
19014    struct bxe_softc    *sc;
19015    int                 rval = 0;
19016    device_t            pci_dev;
19017    bxe_grcdump_t       *dump = NULL;
19018    int grc_dump_size;
19019    bxe_drvinfo_t   *drv_infop = NULL;
19020    bxe_dev_setting_t  *dev_p;
19021    bxe_dev_setting_t  dev_set;
19022    bxe_get_regs_t  *reg_p;
19023    bxe_reg_rdw_t *reg_rdw_p;
19024    bxe_pcicfg_rdw_t *cfg_rdw_p;
19025    bxe_perm_mac_addr_t *mac_addr_p;
19026
19027
19028    if ((sc = (struct bxe_softc *)dev->si_drv1) == NULL)
19029        return ENXIO;
19030
19031    pci_dev= sc->dev;
19032
19033    dump = (bxe_grcdump_t *)data;
19034
19035    switch(cmd) {
19036
19037        case BXE_GRC_DUMP_SIZE:
19038            dump->pci_func = sc->pcie_func;
19039            dump->grcdump_size =
19040                (bxe_get_total_regs_len32(sc) * sizeof(uint32_t)) +
19041                     sizeof(struct  dump_header);
19042            break;
19043
19044        case BXE_GRC_DUMP:
19045
19046            grc_dump_size = (bxe_get_total_regs_len32(sc) * sizeof(uint32_t)) +
19047                                sizeof(struct  dump_header);
19048            if ((!sc->trigger_grcdump) || (dump->grcdump == NULL) ||
19049                (dump->grcdump_size < grc_dump_size)) {
19050                rval = EINVAL;
19051                break;
19052            }
19053
19054            if((sc->trigger_grcdump) && (!sc->grcdump_done) &&
19055                (!sc->grcdump_started)) {
19056                rval =  bxe_grc_dump(sc);
19057            }
19058
19059            if((!rval) && (sc->grcdump_done) && (sc->grcdump_started) &&
19060                (sc->grc_dump != NULL))  {
19061                dump->grcdump_dwords = grc_dump_size >> 2;
19062                rval = copyout(sc->grc_dump, dump->grcdump, grc_dump_size);
19063                free(sc->grc_dump, M_DEVBUF);
19064                sc->grc_dump = NULL;
19065                sc->grcdump_started = 0;
19066                sc->grcdump_done = 0;
19067            }
19068
19069            break;
19070
19071        case BXE_DRV_INFO:
19072            drv_infop = (bxe_drvinfo_t *)data;
19073            snprintf(drv_infop->drv_name, BXE_DRV_NAME_LENGTH, "%s", "bxe");
19074            snprintf(drv_infop->drv_version, BXE_DRV_VERSION_LENGTH, "v:%s",
19075                BXE_DRIVER_VERSION);
19076            snprintf(drv_infop->mfw_version, BXE_MFW_VERSION_LENGTH, "%s",
19077                sc->devinfo.bc_ver_str);
19078            snprintf(drv_infop->stormfw_version, BXE_STORMFW_VERSION_LENGTH,
19079                "%s", sc->fw_ver_str);
19080            drv_infop->eeprom_dump_len = sc->devinfo.flash_size;
19081            drv_infop->reg_dump_len =
19082                (bxe_get_total_regs_len32(sc) * sizeof(uint32_t))
19083                    + sizeof(struct  dump_header);
19084            snprintf(drv_infop->bus_info, BXE_BUS_INFO_LENGTH, "%d:%d:%d",
19085                sc->pcie_bus, sc->pcie_device, sc->pcie_func);
19086            break;
19087
19088        case BXE_DEV_SETTING:
19089            dev_p = (bxe_dev_setting_t *)data;
19090            bxe_get_settings(sc, &dev_set);
19091            dev_p->supported = dev_set.supported;
19092            dev_p->advertising = dev_set.advertising;
19093            dev_p->speed = dev_set.speed;
19094            dev_p->duplex = dev_set.duplex;
19095            dev_p->port = dev_set.port;
19096            dev_p->phy_address = dev_set.phy_address;
19097            dev_p->autoneg = dev_set.autoneg;
19098
19099            break;
19100
19101        case BXE_GET_REGS:
19102
19103            reg_p = (bxe_get_regs_t *)data;
19104            grc_dump_size = reg_p->reg_buf_len;
19105
19106            if((!sc->grcdump_done) && (!sc->grcdump_started)) {
19107                bxe_grc_dump(sc);
19108            }
19109            if((sc->grcdump_done) && (sc->grcdump_started) &&
19110                (sc->grc_dump != NULL))  {
19111                rval = copyout(sc->grc_dump, reg_p->reg_buf, grc_dump_size);
19112                free(sc->grc_dump, M_DEVBUF);
19113                sc->grc_dump = NULL;
19114                sc->grcdump_started = 0;
19115                sc->grcdump_done = 0;
19116            }
19117
19118            break;
19119
19120        case BXE_RDW_REG:
19121            reg_rdw_p = (bxe_reg_rdw_t *)data;
19122            if((reg_rdw_p->reg_cmd == BXE_READ_REG_CMD) &&
19123                (reg_rdw_p->reg_access_type == BXE_REG_ACCESS_DIRECT))
19124                reg_rdw_p->reg_val = REG_RD(sc, reg_rdw_p->reg_id);
19125
19126            if((reg_rdw_p->reg_cmd == BXE_WRITE_REG_CMD) &&
19127                (reg_rdw_p->reg_access_type == BXE_REG_ACCESS_DIRECT))
19128                REG_WR(sc, reg_rdw_p->reg_id, reg_rdw_p->reg_val);
19129
19130            break;
19131
19132        case BXE_RDW_PCICFG:
19133            cfg_rdw_p = (bxe_pcicfg_rdw_t *)data;
19134            if(cfg_rdw_p->cfg_cmd == BXE_READ_PCICFG) {
19135
19136                cfg_rdw_p->cfg_val = pci_read_config(sc->dev, cfg_rdw_p->cfg_id,
19137                                         cfg_rdw_p->cfg_width);
19138
19139            } else if(cfg_rdw_p->cfg_cmd == BXE_WRITE_PCICFG) {
19140                pci_write_config(sc->dev, cfg_rdw_p->cfg_id, cfg_rdw_p->cfg_val,
19141                            cfg_rdw_p->cfg_width);
19142            } else {
19143                BLOGW(sc, "BXE_RDW_PCICFG ioctl wrong cmd passed\n");
19144            }
19145            break;
19146
19147        case BXE_MAC_ADDR:
19148            mac_addr_p = (bxe_perm_mac_addr_t *)data;
19149            snprintf(mac_addr_p->mac_addr_str, sizeof(sc->mac_addr_str), "%s",
19150                sc->mac_addr_str);
19151            break;
19152
19153        case BXE_EEPROM:
19154            rval = bxe_eeprom_rd_wr(sc, (bxe_eeprom_t *)data);
19155            break;
19156
19157
19158        default:
19159            break;
19160    }
19161
19162    return (rval);
19163}
19164