bxe.c revision 336146
1/*-
2 * Copyright (c) 2007-2014 QLogic Corporation. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 *
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS'
15 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
18 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
19 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
20 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
21 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
22 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
23 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
24 * THE POSSIBILITY OF SUCH DAMAGE.
25 */
26
27#include <sys/cdefs.h>
28__FBSDID("$FreeBSD: stable/11/sys/dev/bxe/bxe.c 336146 2018-07-09 21:13:21Z davidcs $");
29
30#define BXE_DRIVER_VERSION "1.78.91"
31
32#include "bxe.h"
33#include "ecore_sp.h"
34#include "ecore_init.h"
35#include "ecore_init_ops.h"
36
37#include "57710_int_offsets.h"
38#include "57711_int_offsets.h"
39#include "57712_int_offsets.h"
40
41/*
42 * CTLTYPE_U64 and sysctl_handle_64 were added in r217616. Define these
43 * explicitly here for older kernels that don't include this changeset.
44 */
45#ifndef CTLTYPE_U64
46#define CTLTYPE_U64      CTLTYPE_QUAD
47#define sysctl_handle_64 sysctl_handle_quad
48#endif
49
50/*
51 * CSUM_TCP_IPV6 and CSUM_UDP_IPV6 were added in r236170. Define these
52 * here as zero(0) for older kernels that don't include this changeset
53 * thereby masking the functionality.
54 */
55#ifndef CSUM_TCP_IPV6
56#define CSUM_TCP_IPV6 0
57#define CSUM_UDP_IPV6 0
58#endif
59
60/*
61 * pci_find_cap was added in r219865. Re-define this at pci_find_extcap
62 * for older kernels that don't include this changeset.
63 */
64#if __FreeBSD_version < 900035
65#define pci_find_cap pci_find_extcap
66#endif
67
68#define BXE_DEF_SB_ATT_IDX 0x0001
69#define BXE_DEF_SB_IDX     0x0002
70
71/*
72 * FLR Support - bxe_pf_flr_clnup() is called during nic_load in the per
73 * function HW initialization.
74 */
75#define FLR_WAIT_USEC     10000 /* 10 msecs */
76#define FLR_WAIT_INTERVAL 50    /* usecs */
77#define FLR_POLL_CNT      (FLR_WAIT_USEC / FLR_WAIT_INTERVAL) /* 200 */
78
79struct pbf_pN_buf_regs {
80    int pN;
81    uint32_t init_crd;
82    uint32_t crd;
83    uint32_t crd_freed;
84};
85
86struct pbf_pN_cmd_regs {
87    int pN;
88    uint32_t lines_occup;
89    uint32_t lines_freed;
90};
91
92/*
93 * PCI Device ID Table used by bxe_probe().
94 */
95#define BXE_DEVDESC_MAX 64
96static struct bxe_device_type bxe_devs[] = {
97    {
98        BRCM_VENDORID,
99        CHIP_NUM_57710,
100        PCI_ANY_ID, PCI_ANY_ID,
101        "QLogic NetXtreme II BCM57710 10GbE"
102    },
103    {
104        BRCM_VENDORID,
105        CHIP_NUM_57711,
106        PCI_ANY_ID, PCI_ANY_ID,
107        "QLogic NetXtreme II BCM57711 10GbE"
108    },
109    {
110        BRCM_VENDORID,
111        CHIP_NUM_57711E,
112        PCI_ANY_ID, PCI_ANY_ID,
113        "QLogic NetXtreme II BCM57711E 10GbE"
114    },
115    {
116        BRCM_VENDORID,
117        CHIP_NUM_57712,
118        PCI_ANY_ID, PCI_ANY_ID,
119        "QLogic NetXtreme II BCM57712 10GbE"
120    },
121    {
122        BRCM_VENDORID,
123        CHIP_NUM_57712_MF,
124        PCI_ANY_ID, PCI_ANY_ID,
125        "QLogic NetXtreme II BCM57712 MF 10GbE"
126    },
127    {
128        BRCM_VENDORID,
129        CHIP_NUM_57800,
130        PCI_ANY_ID, PCI_ANY_ID,
131        "QLogic NetXtreme II BCM57800 10GbE"
132    },
133    {
134        BRCM_VENDORID,
135        CHIP_NUM_57800_MF,
136        PCI_ANY_ID, PCI_ANY_ID,
137        "QLogic NetXtreme II BCM57800 MF 10GbE"
138    },
139    {
140        BRCM_VENDORID,
141        CHIP_NUM_57810,
142        PCI_ANY_ID, PCI_ANY_ID,
143        "QLogic NetXtreme II BCM57810 10GbE"
144    },
145    {
146        BRCM_VENDORID,
147        CHIP_NUM_57810_MF,
148        PCI_ANY_ID, PCI_ANY_ID,
149        "QLogic NetXtreme II BCM57810 MF 10GbE"
150    },
151    {
152        BRCM_VENDORID,
153        CHIP_NUM_57811,
154        PCI_ANY_ID, PCI_ANY_ID,
155        "QLogic NetXtreme II BCM57811 10GbE"
156    },
157    {
158        BRCM_VENDORID,
159        CHIP_NUM_57811_MF,
160        PCI_ANY_ID, PCI_ANY_ID,
161        "QLogic NetXtreme II BCM57811 MF 10GbE"
162    },
163    {
164        BRCM_VENDORID,
165        CHIP_NUM_57840_4_10,
166        PCI_ANY_ID, PCI_ANY_ID,
167        "QLogic NetXtreme II BCM57840 4x10GbE"
168    },
169    {
170        QLOGIC_VENDORID,
171        CHIP_NUM_57840_4_10,
172        PCI_ANY_ID, PCI_ANY_ID,
173        "QLogic NetXtreme II BCM57840 4x10GbE"
174    },
175    {
176        BRCM_VENDORID,
177        CHIP_NUM_57840_2_20,
178        PCI_ANY_ID, PCI_ANY_ID,
179        "QLogic NetXtreme II BCM57840 2x20GbE"
180    },
181    {
182        BRCM_VENDORID,
183        CHIP_NUM_57840_MF,
184        PCI_ANY_ID, PCI_ANY_ID,
185        "QLogic NetXtreme II BCM57840 MF 10GbE"
186    },
187    {
188        0, 0, 0, 0, NULL
189    }
190};
191
192MALLOC_DECLARE(M_BXE_ILT);
193MALLOC_DEFINE(M_BXE_ILT, "bxe_ilt", "bxe ILT pointer");
194
195/*
196 * FreeBSD device entry points.
197 */
198static int bxe_probe(device_t);
199static int bxe_attach(device_t);
200static int bxe_detach(device_t);
201static int bxe_shutdown(device_t);
202
203/*
204 * FreeBSD KLD module/device interface event handler method.
205 */
206static device_method_t bxe_methods[] = {
207    /* Device interface (device_if.h) */
208    DEVMETHOD(device_probe,     bxe_probe),
209    DEVMETHOD(device_attach,    bxe_attach),
210    DEVMETHOD(device_detach,    bxe_detach),
211    DEVMETHOD(device_shutdown,  bxe_shutdown),
212    /* Bus interface (bus_if.h) */
213    DEVMETHOD(bus_print_child,  bus_generic_print_child),
214    DEVMETHOD(bus_driver_added, bus_generic_driver_added),
215    KOBJMETHOD_END
216};
217
218/*
219 * FreeBSD KLD Module data declaration
220 */
221static driver_t bxe_driver = {
222    "bxe",                   /* module name */
223    bxe_methods,             /* event handler */
224    sizeof(struct bxe_softc) /* extra data */
225};
226
227/*
228 * FreeBSD dev class is needed to manage dev instances and
229 * to associate with a bus type
230 */
231static devclass_t bxe_devclass;
232
233MODULE_DEPEND(bxe, pci, 1, 1, 1);
234MODULE_DEPEND(bxe, ether, 1, 1, 1);
235DRIVER_MODULE(bxe, pci, bxe_driver, bxe_devclass, 0, 0);
236
237/* resources needed for unloading a previously loaded device */
238
239#define BXE_PREV_WAIT_NEEDED 1
240struct mtx bxe_prev_mtx;
241MTX_SYSINIT(bxe_prev_mtx, &bxe_prev_mtx, "bxe_prev_lock", MTX_DEF);
242struct bxe_prev_list_node {
243    LIST_ENTRY(bxe_prev_list_node) node;
244    uint8_t bus;
245    uint8_t slot;
246    uint8_t path;
247    uint8_t aer; /* XXX automatic error recovery */
248    uint8_t undi;
249};
250static LIST_HEAD(, bxe_prev_list_node) bxe_prev_list = LIST_HEAD_INITIALIZER(bxe_prev_list);
251
252static int load_count[2][3] = { {0} }; /* per-path: 0-common, 1-port0, 2-port1 */
253
254/* Tunable device values... */
255
256SYSCTL_NODE(_hw, OID_AUTO, bxe, CTLFLAG_RD, 0, "bxe driver parameters");
257
258/* Debug */
259unsigned long bxe_debug = 0;
260SYSCTL_ULONG(_hw_bxe, OID_AUTO, debug, CTLFLAG_RDTUN,
261             &bxe_debug, 0, "Debug logging mode");
262
263/* Interrupt Mode: 0 (IRQ), 1 (MSI/IRQ), and 2 (MSI-X/MSI/IRQ) */
264static int bxe_interrupt_mode = INTR_MODE_MSIX;
265SYSCTL_INT(_hw_bxe, OID_AUTO, interrupt_mode, CTLFLAG_RDTUN,
266           &bxe_interrupt_mode, 0, "Interrupt (MSI-X/MSI/INTx) mode");
267
268/* Number of Queues: 0 (Auto) or 1 to 16 (fixed queue number) */
269static int bxe_queue_count = 4;
270SYSCTL_INT(_hw_bxe, OID_AUTO, queue_count, CTLFLAG_RDTUN,
271           &bxe_queue_count, 0, "Multi-Queue queue count");
272
273/* max number of buffers per queue (default RX_BD_USABLE) */
274static int bxe_max_rx_bufs = 0;
275SYSCTL_INT(_hw_bxe, OID_AUTO, max_rx_bufs, CTLFLAG_RDTUN,
276           &bxe_max_rx_bufs, 0, "Maximum Number of Rx Buffers Per Queue");
277
278/* Host interrupt coalescing RX tick timer (usecs) */
279static int bxe_hc_rx_ticks = 25;
280SYSCTL_INT(_hw_bxe, OID_AUTO, hc_rx_ticks, CTLFLAG_RDTUN,
281           &bxe_hc_rx_ticks, 0, "Host Coalescing Rx ticks");
282
283/* Host interrupt coalescing TX tick timer (usecs) */
284static int bxe_hc_tx_ticks = 50;
285SYSCTL_INT(_hw_bxe, OID_AUTO, hc_tx_ticks, CTLFLAG_RDTUN,
286           &bxe_hc_tx_ticks, 0, "Host Coalescing Tx ticks");
287
288/* Maximum number of Rx packets to process at a time */
289static int bxe_rx_budget = 0xffffffff;
290SYSCTL_INT(_hw_bxe, OID_AUTO, rx_budget, CTLFLAG_TUN,
291           &bxe_rx_budget, 0, "Rx processing budget");
292
293/* Maximum LRO aggregation size */
294static int bxe_max_aggregation_size = 0;
295SYSCTL_INT(_hw_bxe, OID_AUTO, max_aggregation_size, CTLFLAG_TUN,
296           &bxe_max_aggregation_size, 0, "max aggregation size");
297
298/* PCI MRRS: -1 (Auto), 0 (128B), 1 (256B), 2 (512B), 3 (1KB) */
299static int bxe_mrrs = -1;
300SYSCTL_INT(_hw_bxe, OID_AUTO, mrrs, CTLFLAG_RDTUN,
301           &bxe_mrrs, 0, "PCIe maximum read request size");
302
303/* AutoGrEEEn: 0 (hardware default), 1 (force on), 2 (force off) */
304static int bxe_autogreeen = 0;
305SYSCTL_INT(_hw_bxe, OID_AUTO, autogreeen, CTLFLAG_RDTUN,
306           &bxe_autogreeen, 0, "AutoGrEEEn support");
307
308/* 4-tuple RSS support for UDP: 0 (disabled), 1 (enabled) */
309static int bxe_udp_rss = 0;
310SYSCTL_INT(_hw_bxe, OID_AUTO, udp_rss, CTLFLAG_RDTUN,
311           &bxe_udp_rss, 0, "UDP RSS support");
312
313
314#define STAT_NAME_LEN 32 /* no stat names below can be longer than this */
315
316#define STATS_OFFSET32(stat_name)                   \
317    (offsetof(struct bxe_eth_stats, stat_name) / 4)
318
319#define Q_STATS_OFFSET32(stat_name)                   \
320    (offsetof(struct bxe_eth_q_stats, stat_name) / 4)
321
322static const struct {
323    uint32_t offset;
324    uint32_t size;
325    uint32_t flags;
326#define STATS_FLAGS_PORT  1
327#define STATS_FLAGS_FUNC  2 /* MF only cares about function stats */
328#define STATS_FLAGS_BOTH  (STATS_FLAGS_FUNC | STATS_FLAGS_PORT)
329    char string[STAT_NAME_LEN];
330} bxe_eth_stats_arr[] = {
331    { STATS_OFFSET32(total_bytes_received_hi),
332                8, STATS_FLAGS_BOTH, "rx_bytes" },
333    { STATS_OFFSET32(error_bytes_received_hi),
334                8, STATS_FLAGS_BOTH, "rx_error_bytes" },
335    { STATS_OFFSET32(total_unicast_packets_received_hi),
336                8, STATS_FLAGS_BOTH, "rx_ucast_packets" },
337    { STATS_OFFSET32(total_multicast_packets_received_hi),
338                8, STATS_FLAGS_BOTH, "rx_mcast_packets" },
339    { STATS_OFFSET32(total_broadcast_packets_received_hi),
340                8, STATS_FLAGS_BOTH, "rx_bcast_packets" },
341    { STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi),
342                8, STATS_FLAGS_PORT, "rx_crc_errors" },
343    { STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi),
344                8, STATS_FLAGS_PORT, "rx_align_errors" },
345    { STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi),
346                8, STATS_FLAGS_PORT, "rx_undersize_packets" },
347    { STATS_OFFSET32(etherstatsoverrsizepkts_hi),
348                8, STATS_FLAGS_PORT, "rx_oversize_packets" },
349    { STATS_OFFSET32(rx_stat_etherstatsfragments_hi),
350                8, STATS_FLAGS_PORT, "rx_fragments" },
351    { STATS_OFFSET32(rx_stat_etherstatsjabbers_hi),
352                8, STATS_FLAGS_PORT, "rx_jabbers" },
353    { STATS_OFFSET32(no_buff_discard_hi),
354                8, STATS_FLAGS_BOTH, "rx_discards" },
355    { STATS_OFFSET32(mac_filter_discard),
356                4, STATS_FLAGS_PORT, "rx_filtered_packets" },
357    { STATS_OFFSET32(mf_tag_discard),
358                4, STATS_FLAGS_PORT, "rx_mf_tag_discard" },
359    { STATS_OFFSET32(pfc_frames_received_hi),
360                8, STATS_FLAGS_PORT, "pfc_frames_received" },
361    { STATS_OFFSET32(pfc_frames_sent_hi),
362                8, STATS_FLAGS_PORT, "pfc_frames_sent" },
363    { STATS_OFFSET32(brb_drop_hi),
364                8, STATS_FLAGS_PORT, "rx_brb_discard" },
365    { STATS_OFFSET32(brb_truncate_hi),
366                8, STATS_FLAGS_PORT, "rx_brb_truncate" },
367    { STATS_OFFSET32(pause_frames_received_hi),
368                8, STATS_FLAGS_PORT, "rx_pause_frames" },
369    { STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi),
370                8, STATS_FLAGS_PORT, "rx_mac_ctrl_frames" },
371    { STATS_OFFSET32(nig_timer_max),
372                4, STATS_FLAGS_PORT, "rx_constant_pause_events" },
373    { STATS_OFFSET32(total_bytes_transmitted_hi),
374                8, STATS_FLAGS_BOTH, "tx_bytes" },
375    { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi),
376                8, STATS_FLAGS_PORT, "tx_error_bytes" },
377    { STATS_OFFSET32(total_unicast_packets_transmitted_hi),
378                8, STATS_FLAGS_BOTH, "tx_ucast_packets" },
379    { STATS_OFFSET32(total_multicast_packets_transmitted_hi),
380                8, STATS_FLAGS_BOTH, "tx_mcast_packets" },
381    { STATS_OFFSET32(total_broadcast_packets_transmitted_hi),
382                8, STATS_FLAGS_BOTH, "tx_bcast_packets" },
383    { STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi),
384                8, STATS_FLAGS_PORT, "tx_mac_errors" },
385    { STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi),
386                8, STATS_FLAGS_PORT, "tx_carrier_errors" },
387    { STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi),
388                8, STATS_FLAGS_PORT, "tx_single_collisions" },
389    { STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi),
390                8, STATS_FLAGS_PORT, "tx_multi_collisions" },
391    { STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi),
392                8, STATS_FLAGS_PORT, "tx_deferred" },
393    { STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi),
394                8, STATS_FLAGS_PORT, "tx_excess_collisions" },
395    { STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi),
396                8, STATS_FLAGS_PORT, "tx_late_collisions" },
397    { STATS_OFFSET32(tx_stat_etherstatscollisions_hi),
398                8, STATS_FLAGS_PORT, "tx_total_collisions" },
399    { STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi),
400                8, STATS_FLAGS_PORT, "tx_64_byte_packets" },
401    { STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi),
402                8, STATS_FLAGS_PORT, "tx_65_to_127_byte_packets" },
403    { STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi),
404                8, STATS_FLAGS_PORT, "tx_128_to_255_byte_packets" },
405    { STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi),
406                8, STATS_FLAGS_PORT, "tx_256_to_511_byte_packets" },
407    { STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi),
408                8, STATS_FLAGS_PORT, "tx_512_to_1023_byte_packets" },
409    { STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi),
410                8, STATS_FLAGS_PORT, "tx_1024_to_1522_byte_packets" },
411    { STATS_OFFSET32(etherstatspktsover1522octets_hi),
412                8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" },
413    { STATS_OFFSET32(pause_frames_sent_hi),
414                8, STATS_FLAGS_PORT, "tx_pause_frames" },
415    { STATS_OFFSET32(total_tpa_aggregations_hi),
416                8, STATS_FLAGS_FUNC, "tpa_aggregations" },
417    { STATS_OFFSET32(total_tpa_aggregated_frames_hi),
418                8, STATS_FLAGS_FUNC, "tpa_aggregated_frames"},
419    { STATS_OFFSET32(total_tpa_bytes_hi),
420                8, STATS_FLAGS_FUNC, "tpa_bytes"},
421    { STATS_OFFSET32(eee_tx_lpi),
422                4, STATS_FLAGS_PORT, "eee_tx_lpi"},
423    { STATS_OFFSET32(rx_calls),
424                4, STATS_FLAGS_FUNC, "rx_calls"},
425    { STATS_OFFSET32(rx_pkts),
426                4, STATS_FLAGS_FUNC, "rx_pkts"},
427    { STATS_OFFSET32(rx_tpa_pkts),
428                4, STATS_FLAGS_FUNC, "rx_tpa_pkts"},
429    { STATS_OFFSET32(rx_erroneous_jumbo_sge_pkts),
430                4, STATS_FLAGS_FUNC, "rx_erroneous_jumbo_sge_pkts"},
431    { STATS_OFFSET32(rx_bxe_service_rxsgl),
432                4, STATS_FLAGS_FUNC, "rx_bxe_service_rxsgl"},
433    { STATS_OFFSET32(rx_jumbo_sge_pkts),
434                4, STATS_FLAGS_FUNC, "rx_jumbo_sge_pkts"},
435    { STATS_OFFSET32(rx_soft_errors),
436                4, STATS_FLAGS_FUNC, "rx_soft_errors"},
437    { STATS_OFFSET32(rx_hw_csum_errors),
438                4, STATS_FLAGS_FUNC, "rx_hw_csum_errors"},
439    { STATS_OFFSET32(rx_ofld_frames_csum_ip),
440                4, STATS_FLAGS_FUNC, "rx_ofld_frames_csum_ip"},
441    { STATS_OFFSET32(rx_ofld_frames_csum_tcp_udp),
442                4, STATS_FLAGS_FUNC, "rx_ofld_frames_csum_tcp_udp"},
443    { STATS_OFFSET32(rx_budget_reached),
444                4, STATS_FLAGS_FUNC, "rx_budget_reached"},
445    { STATS_OFFSET32(tx_pkts),
446                4, STATS_FLAGS_FUNC, "tx_pkts"},
447    { STATS_OFFSET32(tx_soft_errors),
448                4, STATS_FLAGS_FUNC, "tx_soft_errors"},
449    { STATS_OFFSET32(tx_ofld_frames_csum_ip),
450                4, STATS_FLAGS_FUNC, "tx_ofld_frames_csum_ip"},
451    { STATS_OFFSET32(tx_ofld_frames_csum_tcp),
452                4, STATS_FLAGS_FUNC, "tx_ofld_frames_csum_tcp"},
453    { STATS_OFFSET32(tx_ofld_frames_csum_udp),
454                4, STATS_FLAGS_FUNC, "tx_ofld_frames_csum_udp"},
455    { STATS_OFFSET32(tx_ofld_frames_lso),
456                4, STATS_FLAGS_FUNC, "tx_ofld_frames_lso"},
457    { STATS_OFFSET32(tx_ofld_frames_lso_hdr_splits),
458                4, STATS_FLAGS_FUNC, "tx_ofld_frames_lso_hdr_splits"},
459    { STATS_OFFSET32(tx_encap_failures),
460                4, STATS_FLAGS_FUNC, "tx_encap_failures"},
461    { STATS_OFFSET32(tx_hw_queue_full),
462                4, STATS_FLAGS_FUNC, "tx_hw_queue_full"},
463    { STATS_OFFSET32(tx_hw_max_queue_depth),
464                4, STATS_FLAGS_FUNC, "tx_hw_max_queue_depth"},
465    { STATS_OFFSET32(tx_dma_mapping_failure),
466                4, STATS_FLAGS_FUNC, "tx_dma_mapping_failure"},
467    { STATS_OFFSET32(tx_max_drbr_queue_depth),
468                4, STATS_FLAGS_FUNC, "tx_max_drbr_queue_depth"},
469    { STATS_OFFSET32(tx_window_violation_std),
470                4, STATS_FLAGS_FUNC, "tx_window_violation_std"},
471    { STATS_OFFSET32(tx_window_violation_tso),
472                4, STATS_FLAGS_FUNC, "tx_window_violation_tso"},
473    { STATS_OFFSET32(tx_chain_lost_mbuf),
474                4, STATS_FLAGS_FUNC, "tx_chain_lost_mbuf"},
475    { STATS_OFFSET32(tx_frames_deferred),
476                4, STATS_FLAGS_FUNC, "tx_frames_deferred"},
477    { STATS_OFFSET32(tx_queue_xoff),
478                4, STATS_FLAGS_FUNC, "tx_queue_xoff"},
479    { STATS_OFFSET32(mbuf_defrag_attempts),
480                4, STATS_FLAGS_FUNC, "mbuf_defrag_attempts"},
481    { STATS_OFFSET32(mbuf_defrag_failures),
482                4, STATS_FLAGS_FUNC, "mbuf_defrag_failures"},
483    { STATS_OFFSET32(mbuf_rx_bd_alloc_failed),
484                4, STATS_FLAGS_FUNC, "mbuf_rx_bd_alloc_failed"},
485    { STATS_OFFSET32(mbuf_rx_bd_mapping_failed),
486                4, STATS_FLAGS_FUNC, "mbuf_rx_bd_mapping_failed"},
487    { STATS_OFFSET32(mbuf_rx_tpa_alloc_failed),
488                4, STATS_FLAGS_FUNC, "mbuf_rx_tpa_alloc_failed"},
489    { STATS_OFFSET32(mbuf_rx_tpa_mapping_failed),
490                4, STATS_FLAGS_FUNC, "mbuf_rx_tpa_mapping_failed"},
491    { STATS_OFFSET32(mbuf_rx_sge_alloc_failed),
492                4, STATS_FLAGS_FUNC, "mbuf_rx_sge_alloc_failed"},
493    { STATS_OFFSET32(mbuf_rx_sge_mapping_failed),
494                4, STATS_FLAGS_FUNC, "mbuf_rx_sge_mapping_failed"},
495    { STATS_OFFSET32(mbuf_alloc_tx),
496                4, STATS_FLAGS_FUNC, "mbuf_alloc_tx"},
497    { STATS_OFFSET32(mbuf_alloc_rx),
498                4, STATS_FLAGS_FUNC, "mbuf_alloc_rx"},
499    { STATS_OFFSET32(mbuf_alloc_sge),
500                4, STATS_FLAGS_FUNC, "mbuf_alloc_sge"},
501    { STATS_OFFSET32(mbuf_alloc_tpa),
502                4, STATS_FLAGS_FUNC, "mbuf_alloc_tpa"},
503    { STATS_OFFSET32(tx_queue_full_return),
504                4, STATS_FLAGS_FUNC, "tx_queue_full_return"},
505    { STATS_OFFSET32(bxe_tx_mq_sc_state_failures),
506                4, STATS_FLAGS_FUNC, "bxe_tx_mq_sc_state_failures"},
507    { STATS_OFFSET32(tx_request_link_down_failures),
508                4, STATS_FLAGS_FUNC, "tx_request_link_down_failures"},
509    { STATS_OFFSET32(bd_avail_too_less_failures),
510                4, STATS_FLAGS_FUNC, "bd_avail_too_less_failures"},
511    { STATS_OFFSET32(tx_mq_not_empty),
512                4, STATS_FLAGS_FUNC, "tx_mq_not_empty"},
513    { STATS_OFFSET32(nsegs_path1_errors),
514                4, STATS_FLAGS_FUNC, "nsegs_path1_errors"},
515    { STATS_OFFSET32(nsegs_path2_errors),
516                4, STATS_FLAGS_FUNC, "nsegs_path2_errors"}
517
518
519};
520
521static const struct {
522    uint32_t offset;
523    uint32_t size;
524    char string[STAT_NAME_LEN];
525} bxe_eth_q_stats_arr[] = {
526    { Q_STATS_OFFSET32(total_bytes_received_hi),
527                8, "rx_bytes" },
528    { Q_STATS_OFFSET32(total_unicast_packets_received_hi),
529                8, "rx_ucast_packets" },
530    { Q_STATS_OFFSET32(total_multicast_packets_received_hi),
531                8, "rx_mcast_packets" },
532    { Q_STATS_OFFSET32(total_broadcast_packets_received_hi),
533                8, "rx_bcast_packets" },
534    { Q_STATS_OFFSET32(no_buff_discard_hi),
535                8, "rx_discards" },
536    { Q_STATS_OFFSET32(total_bytes_transmitted_hi),
537                8, "tx_bytes" },
538    { Q_STATS_OFFSET32(total_unicast_packets_transmitted_hi),
539                8, "tx_ucast_packets" },
540    { Q_STATS_OFFSET32(total_multicast_packets_transmitted_hi),
541                8, "tx_mcast_packets" },
542    { Q_STATS_OFFSET32(total_broadcast_packets_transmitted_hi),
543                8, "tx_bcast_packets" },
544    { Q_STATS_OFFSET32(total_tpa_aggregations_hi),
545                8, "tpa_aggregations" },
546    { Q_STATS_OFFSET32(total_tpa_aggregated_frames_hi),
547                8, "tpa_aggregated_frames"},
548    { Q_STATS_OFFSET32(total_tpa_bytes_hi),
549                8, "tpa_bytes"},
550    { Q_STATS_OFFSET32(rx_calls),
551                4, "rx_calls"},
552    { Q_STATS_OFFSET32(rx_pkts),
553                4, "rx_pkts"},
554    { Q_STATS_OFFSET32(rx_tpa_pkts),
555                4, "rx_tpa_pkts"},
556    { Q_STATS_OFFSET32(rx_erroneous_jumbo_sge_pkts),
557                4, "rx_erroneous_jumbo_sge_pkts"},
558    { Q_STATS_OFFSET32(rx_bxe_service_rxsgl),
559                4, "rx_bxe_service_rxsgl"},
560    { Q_STATS_OFFSET32(rx_jumbo_sge_pkts),
561                4, "rx_jumbo_sge_pkts"},
562    { Q_STATS_OFFSET32(rx_soft_errors),
563                4, "rx_soft_errors"},
564    { Q_STATS_OFFSET32(rx_hw_csum_errors),
565                4, "rx_hw_csum_errors"},
566    { Q_STATS_OFFSET32(rx_ofld_frames_csum_ip),
567                4, "rx_ofld_frames_csum_ip"},
568    { Q_STATS_OFFSET32(rx_ofld_frames_csum_tcp_udp),
569                4, "rx_ofld_frames_csum_tcp_udp"},
570    { Q_STATS_OFFSET32(rx_budget_reached),
571                4, "rx_budget_reached"},
572    { Q_STATS_OFFSET32(tx_pkts),
573                4, "tx_pkts"},
574    { Q_STATS_OFFSET32(tx_soft_errors),
575                4, "tx_soft_errors"},
576    { Q_STATS_OFFSET32(tx_ofld_frames_csum_ip),
577                4, "tx_ofld_frames_csum_ip"},
578    { Q_STATS_OFFSET32(tx_ofld_frames_csum_tcp),
579                4, "tx_ofld_frames_csum_tcp"},
580    { Q_STATS_OFFSET32(tx_ofld_frames_csum_udp),
581                4, "tx_ofld_frames_csum_udp"},
582    { Q_STATS_OFFSET32(tx_ofld_frames_lso),
583                4, "tx_ofld_frames_lso"},
584    { Q_STATS_OFFSET32(tx_ofld_frames_lso_hdr_splits),
585                4, "tx_ofld_frames_lso_hdr_splits"},
586    { Q_STATS_OFFSET32(tx_encap_failures),
587                4, "tx_encap_failures"},
588    { Q_STATS_OFFSET32(tx_hw_queue_full),
589                4, "tx_hw_queue_full"},
590    { Q_STATS_OFFSET32(tx_hw_max_queue_depth),
591                4, "tx_hw_max_queue_depth"},
592    { Q_STATS_OFFSET32(tx_dma_mapping_failure),
593                4, "tx_dma_mapping_failure"},
594    { Q_STATS_OFFSET32(tx_max_drbr_queue_depth),
595                4, "tx_max_drbr_queue_depth"},
596    { Q_STATS_OFFSET32(tx_window_violation_std),
597                4, "tx_window_violation_std"},
598    { Q_STATS_OFFSET32(tx_window_violation_tso),
599                4, "tx_window_violation_tso"},
600    { Q_STATS_OFFSET32(tx_chain_lost_mbuf),
601                4, "tx_chain_lost_mbuf"},
602    { Q_STATS_OFFSET32(tx_frames_deferred),
603                4, "tx_frames_deferred"},
604    { Q_STATS_OFFSET32(tx_queue_xoff),
605                4, "tx_queue_xoff"},
606    { Q_STATS_OFFSET32(mbuf_defrag_attempts),
607                4, "mbuf_defrag_attempts"},
608    { Q_STATS_OFFSET32(mbuf_defrag_failures),
609                4, "mbuf_defrag_failures"},
610    { Q_STATS_OFFSET32(mbuf_rx_bd_alloc_failed),
611                4, "mbuf_rx_bd_alloc_failed"},
612    { Q_STATS_OFFSET32(mbuf_rx_bd_mapping_failed),
613                4, "mbuf_rx_bd_mapping_failed"},
614    { Q_STATS_OFFSET32(mbuf_rx_tpa_alloc_failed),
615                4, "mbuf_rx_tpa_alloc_failed"},
616    { Q_STATS_OFFSET32(mbuf_rx_tpa_mapping_failed),
617                4, "mbuf_rx_tpa_mapping_failed"},
618    { Q_STATS_OFFSET32(mbuf_rx_sge_alloc_failed),
619                4, "mbuf_rx_sge_alloc_failed"},
620    { Q_STATS_OFFSET32(mbuf_rx_sge_mapping_failed),
621                4, "mbuf_rx_sge_mapping_failed"},
622    { Q_STATS_OFFSET32(mbuf_alloc_tx),
623                4, "mbuf_alloc_tx"},
624    { Q_STATS_OFFSET32(mbuf_alloc_rx),
625                4, "mbuf_alloc_rx"},
626    { Q_STATS_OFFSET32(mbuf_alloc_sge),
627                4, "mbuf_alloc_sge"},
628    { Q_STATS_OFFSET32(mbuf_alloc_tpa),
629                4, "mbuf_alloc_tpa"},
630    { Q_STATS_OFFSET32(tx_queue_full_return),
631                4, "tx_queue_full_return"},
632    { Q_STATS_OFFSET32(bxe_tx_mq_sc_state_failures),
633                4, "bxe_tx_mq_sc_state_failures"},
634    { Q_STATS_OFFSET32(tx_request_link_down_failures),
635                4, "tx_request_link_down_failures"},
636    { Q_STATS_OFFSET32(bd_avail_too_less_failures),
637                4, "bd_avail_too_less_failures"},
638    { Q_STATS_OFFSET32(tx_mq_not_empty),
639                4, "tx_mq_not_empty"},
640    { Q_STATS_OFFSET32(nsegs_path1_errors),
641                4, "nsegs_path1_errors"},
642    { Q_STATS_OFFSET32(nsegs_path2_errors),
643                4, "nsegs_path2_errors"}
644
645
646};
647
648#define BXE_NUM_ETH_STATS   ARRAY_SIZE(bxe_eth_stats_arr)
649#define BXE_NUM_ETH_Q_STATS ARRAY_SIZE(bxe_eth_q_stats_arr)
650
651
652static void    bxe_cmng_fns_init(struct bxe_softc *sc,
653                                 uint8_t          read_cfg,
654                                 uint8_t          cmng_type);
655static int     bxe_get_cmng_fns_mode(struct bxe_softc *sc);
656static void    storm_memset_cmng(struct bxe_softc *sc,
657                                 struct cmng_init *cmng,
658                                 uint8_t          port);
659static void    bxe_set_reset_global(struct bxe_softc *sc);
660static void    bxe_set_reset_in_progress(struct bxe_softc *sc);
661static uint8_t bxe_reset_is_done(struct bxe_softc *sc,
662                                 int              engine);
663static uint8_t bxe_clear_pf_load(struct bxe_softc *sc);
664static uint8_t bxe_chk_parity_attn(struct bxe_softc *sc,
665                                   uint8_t          *global,
666                                   uint8_t          print);
667static void    bxe_int_disable(struct bxe_softc *sc);
668static int     bxe_release_leader_lock(struct bxe_softc *sc);
669static void    bxe_pf_disable(struct bxe_softc *sc);
670static void    bxe_free_fp_buffers(struct bxe_softc *sc);
671static inline void bxe_update_rx_prod(struct bxe_softc    *sc,
672                                      struct bxe_fastpath *fp,
673                                      uint16_t            rx_bd_prod,
674                                      uint16_t            rx_cq_prod,
675                                      uint16_t            rx_sge_prod);
676static void    bxe_link_report_locked(struct bxe_softc *sc);
677static void    bxe_link_report(struct bxe_softc *sc);
678static void    bxe_link_status_update(struct bxe_softc *sc);
679static void    bxe_periodic_callout_func(void *xsc);
680static void    bxe_periodic_start(struct bxe_softc *sc);
681static void    bxe_periodic_stop(struct bxe_softc *sc);
682static int     bxe_alloc_rx_bd_mbuf(struct bxe_fastpath *fp,
683                                    uint16_t prev_index,
684                                    uint16_t index);
685static int     bxe_alloc_rx_tpa_mbuf(struct bxe_fastpath *fp,
686                                     int                 queue);
687static int     bxe_alloc_rx_sge_mbuf(struct bxe_fastpath *fp,
688                                     uint16_t            index);
689static uint8_t bxe_txeof(struct bxe_softc *sc,
690                         struct bxe_fastpath *fp);
691static void    bxe_task_fp(struct bxe_fastpath *fp);
692static __noinline void bxe_dump_mbuf(struct bxe_softc *sc,
693                                     struct mbuf      *m,
694                                     uint8_t          contents);
695static int     bxe_alloc_mem(struct bxe_softc *sc);
696static void    bxe_free_mem(struct bxe_softc *sc);
697static int     bxe_alloc_fw_stats_mem(struct bxe_softc *sc);
698static void    bxe_free_fw_stats_mem(struct bxe_softc *sc);
699static int     bxe_interrupt_attach(struct bxe_softc *sc);
700static void    bxe_interrupt_detach(struct bxe_softc *sc);
701static void    bxe_set_rx_mode(struct bxe_softc *sc);
702static int     bxe_init_locked(struct bxe_softc *sc);
703static int     bxe_stop_locked(struct bxe_softc *sc);
704static __noinline int bxe_nic_load(struct bxe_softc *sc,
705                                   int              load_mode);
706static __noinline int bxe_nic_unload(struct bxe_softc *sc,
707                                     uint32_t         unload_mode,
708                                     uint8_t          keep_link);
709
710static void bxe_handle_sp_tq(void *context, int pending);
711static void bxe_handle_fp_tq(void *context, int pending);
712
713static int bxe_add_cdev(struct bxe_softc *sc);
714static void bxe_del_cdev(struct bxe_softc *sc);
715int bxe_grc_dump(struct bxe_softc *sc);
716static int bxe_alloc_buf_rings(struct bxe_softc *sc);
717static void bxe_free_buf_rings(struct bxe_softc *sc);
718
719/* calculate crc32 on a buffer (NOTE: crc32_length MUST be aligned to 8) */
720uint32_t
721calc_crc32(uint8_t  *crc32_packet,
722           uint32_t crc32_length,
723           uint32_t crc32_seed,
724           uint8_t  complement)
725{
726   uint32_t byte         = 0;
727   uint32_t bit          = 0;
728   uint8_t  msb          = 0;
729   uint32_t temp         = 0;
730   uint32_t shft         = 0;
731   uint8_t  current_byte = 0;
732   uint32_t crc32_result = crc32_seed;
733   const uint32_t CRC32_POLY = 0x1edc6f41;
734
735   if ((crc32_packet == NULL) ||
736       (crc32_length == 0) ||
737       ((crc32_length % 8) != 0))
738    {
739        return (crc32_result);
740    }
741
742    for (byte = 0; byte < crc32_length; byte = byte + 1)
743    {
744        current_byte = crc32_packet[byte];
745        for (bit = 0; bit < 8; bit = bit + 1)
746        {
747            /* msb = crc32_result[31]; */
748            msb = (uint8_t)(crc32_result >> 31);
749
750            crc32_result = crc32_result << 1;
751
752            /* it (msb != current_byte[bit]) */
753            if (msb != (0x1 & (current_byte >> bit)))
754            {
755                crc32_result = crc32_result ^ CRC32_POLY;
756                /* crc32_result[0] = 1 */
757                crc32_result |= 1;
758            }
759        }
760    }
761
762    /* Last step is to:
763     * 1. "mirror" every bit
764     * 2. swap the 4 bytes
765     * 3. complement each bit
766     */
767
768    /* Mirror */
769    temp = crc32_result;
770    shft = sizeof(crc32_result) * 8 - 1;
771
772    for (crc32_result >>= 1; crc32_result; crc32_result >>= 1)
773    {
774        temp <<= 1;
775        temp |= crc32_result & 1;
776        shft-- ;
777    }
778
779    /* temp[31-bit] = crc32_result[bit] */
780    temp <<= shft;
781
782    /* Swap */
783    /* crc32_result = {temp[7:0], temp[15:8], temp[23:16], temp[31:24]} */
784    {
785        uint32_t t0, t1, t2, t3;
786        t0 = (0x000000ff & (temp >> 24));
787        t1 = (0x0000ff00 & (temp >> 8));
788        t2 = (0x00ff0000 & (temp << 8));
789        t3 = (0xff000000 & (temp << 24));
790        crc32_result = t0 | t1 | t2 | t3;
791    }
792
793    /* Complement */
794    if (complement)
795    {
796        crc32_result = ~crc32_result;
797    }
798
799    return (crc32_result);
800}
801
802int
803bxe_test_bit(int                    nr,
804             volatile unsigned long *addr)
805{
806    return ((atomic_load_acq_long(addr) & (1 << nr)) != 0);
807}
808
809void
810bxe_set_bit(unsigned int           nr,
811            volatile unsigned long *addr)
812{
813    atomic_set_acq_long(addr, (1 << nr));
814}
815
816void
817bxe_clear_bit(int                    nr,
818              volatile unsigned long *addr)
819{
820    atomic_clear_acq_long(addr, (1 << nr));
821}
822
823int
824bxe_test_and_set_bit(int                    nr,
825                       volatile unsigned long *addr)
826{
827    unsigned long x;
828    nr = (1 << nr);
829    do {
830        x = *addr;
831    } while (atomic_cmpset_acq_long(addr, x, x | nr) == 0);
832    // if (x & nr) bit_was_set; else bit_was_not_set;
833    return (x & nr);
834}
835
836int
837bxe_test_and_clear_bit(int                    nr,
838                       volatile unsigned long *addr)
839{
840    unsigned long x;
841    nr = (1 << nr);
842    do {
843        x = *addr;
844    } while (atomic_cmpset_acq_long(addr, x, x & ~nr) == 0);
845    // if (x & nr) bit_was_set; else bit_was_not_set;
846    return (x & nr);
847}
848
849int
850bxe_cmpxchg(volatile int *addr,
851            int          old,
852            int          new)
853{
854    int x;
855    do {
856        x = *addr;
857    } while (atomic_cmpset_acq_int(addr, old, new) == 0);
858    return (x);
859}
860
861/*
862 * Get DMA memory from the OS.
863 *
864 * Validates that the OS has provided DMA buffers in response to a
865 * bus_dmamap_load call and saves the physical address of those buffers.
866 * When the callback is used the OS will return 0 for the mapping function
867 * (bus_dmamap_load) so we use the value of map_arg->maxsegs to pass any
868 * failures back to the caller.
869 *
870 * Returns:
871 *   Nothing.
872 */
873static void
874bxe_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error)
875{
876    struct bxe_dma *dma = arg;
877
878    if (error) {
879        dma->paddr = 0;
880        dma->nseg  = 0;
881        BLOGE(dma->sc, "Failed DMA alloc '%s' (%d)!\n", dma->msg, error);
882    } else {
883        dma->paddr = segs->ds_addr;
884        dma->nseg  = nseg;
885    }
886}
887
888/*
889 * Allocate a block of memory and map it for DMA. No partial completions
890 * allowed and release any resources acquired if we can't acquire all
891 * resources.
892 *
893 * Returns:
894 *   0 = Success, !0 = Failure
895 */
896int
897bxe_dma_alloc(struct bxe_softc *sc,
898              bus_size_t       size,
899              struct bxe_dma   *dma,
900              const char       *msg)
901{
902    int rc;
903
904    if (dma->size > 0) {
905        BLOGE(sc, "dma block '%s' already has size %lu\n", msg,
906              (unsigned long)dma->size);
907        return (1);
908    }
909
910    memset(dma, 0, sizeof(*dma)); /* sanity */
911    dma->sc   = sc;
912    dma->size = size;
913    snprintf(dma->msg, sizeof(dma->msg), "%s", msg);
914
915    rc = bus_dma_tag_create(sc->parent_dma_tag, /* parent tag */
916                            BCM_PAGE_SIZE,      /* alignment */
917                            0,                  /* boundary limit */
918                            BUS_SPACE_MAXADDR,  /* restricted low */
919                            BUS_SPACE_MAXADDR,  /* restricted hi */
920                            NULL,               /* addr filter() */
921                            NULL,               /* addr filter() arg */
922                            size,               /* max map size */
923                            1,                  /* num discontinuous */
924                            size,               /* max seg size */
925                            BUS_DMA_ALLOCNOW,   /* flags */
926                            NULL,               /* lock() */
927                            NULL,               /* lock() arg */
928                            &dma->tag);         /* returned dma tag */
929    if (rc != 0) {
930        BLOGE(sc, "Failed to create dma tag for '%s' (%d)\n", msg, rc);
931        memset(dma, 0, sizeof(*dma));
932        return (1);
933    }
934
935    rc = bus_dmamem_alloc(dma->tag,
936                          (void **)&dma->vaddr,
937                          (BUS_DMA_NOWAIT | BUS_DMA_ZERO),
938                          &dma->map);
939    if (rc != 0) {
940        BLOGE(sc, "Failed to alloc dma mem for '%s' (%d)\n", msg, rc);
941        bus_dma_tag_destroy(dma->tag);
942        memset(dma, 0, sizeof(*dma));
943        return (1);
944    }
945
946    rc = bus_dmamap_load(dma->tag,
947                         dma->map,
948                         dma->vaddr,
949                         size,
950                         bxe_dma_map_addr, /* BLOGD in here */
951                         dma,
952                         BUS_DMA_NOWAIT);
953    if (rc != 0) {
954        BLOGE(sc, "Failed to load dma map for '%s' (%d)\n", msg, rc);
955        bus_dmamem_free(dma->tag, dma->vaddr, dma->map);
956        bus_dma_tag_destroy(dma->tag);
957        memset(dma, 0, sizeof(*dma));
958        return (1);
959    }
960
961    return (0);
962}
963
964void
965bxe_dma_free(struct bxe_softc *sc,
966             struct bxe_dma   *dma)
967{
968    if (dma->size > 0) {
969        DBASSERT(sc, (dma->tag != NULL), ("dma tag is NULL"));
970
971        bus_dmamap_sync(dma->tag, dma->map,
972                        (BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE));
973        bus_dmamap_unload(dma->tag, dma->map);
974        bus_dmamem_free(dma->tag, dma->vaddr, dma->map);
975        bus_dma_tag_destroy(dma->tag);
976    }
977
978    memset(dma, 0, sizeof(*dma));
979}
980
981/*
982 * These indirect read and write routines are only during init.
983 * The locking is handled by the MCP.
984 */
985
986void
987bxe_reg_wr_ind(struct bxe_softc *sc,
988               uint32_t         addr,
989               uint32_t         val)
990{
991    pci_write_config(sc->dev, PCICFG_GRC_ADDRESS, addr, 4);
992    pci_write_config(sc->dev, PCICFG_GRC_DATA, val, 4);
993    pci_write_config(sc->dev, PCICFG_GRC_ADDRESS, 0, 4);
994}
995
996uint32_t
997bxe_reg_rd_ind(struct bxe_softc *sc,
998               uint32_t         addr)
999{
1000    uint32_t val;
1001
1002    pci_write_config(sc->dev, PCICFG_GRC_ADDRESS, addr, 4);
1003    val = pci_read_config(sc->dev, PCICFG_GRC_DATA, 4);
1004    pci_write_config(sc->dev, PCICFG_GRC_ADDRESS, 0, 4);
1005
1006    return (val);
1007}
1008
1009static int
1010bxe_acquire_hw_lock(struct bxe_softc *sc,
1011                    uint32_t         resource)
1012{
1013    uint32_t lock_status;
1014    uint32_t resource_bit = (1 << resource);
1015    int func = SC_FUNC(sc);
1016    uint32_t hw_lock_control_reg;
1017    int cnt;
1018
1019    /* validate the resource is within range */
1020    if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1021        BLOGE(sc, "(resource 0x%x > HW_LOCK_MAX_RESOURCE_VALUE)"
1022            " resource_bit 0x%x\n", resource, resource_bit);
1023        return (-1);
1024    }
1025
1026    if (func <= 5) {
1027        hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + (func * 8));
1028    } else {
1029        hw_lock_control_reg =
1030                (MISC_REG_DRIVER_CONTROL_7 + ((func - 6) * 8));
1031    }
1032
1033    /* validate the resource is not already taken */
1034    lock_status = REG_RD(sc, hw_lock_control_reg);
1035    if (lock_status & resource_bit) {
1036        BLOGE(sc, "resource (0x%x) in use (status 0x%x bit 0x%x)\n",
1037              resource, lock_status, resource_bit);
1038        return (-1);
1039    }
1040
1041    /* try every 5ms for 5 seconds */
1042    for (cnt = 0; cnt < 1000; cnt++) {
1043        REG_WR(sc, (hw_lock_control_reg + 4), resource_bit);
1044        lock_status = REG_RD(sc, hw_lock_control_reg);
1045        if (lock_status & resource_bit) {
1046            return (0);
1047        }
1048        DELAY(5000);
1049    }
1050
1051    BLOGE(sc, "Resource 0x%x resource_bit 0x%x lock timeout!\n",
1052        resource, resource_bit);
1053    return (-1);
1054}
1055
1056static int
1057bxe_release_hw_lock(struct bxe_softc *sc,
1058                    uint32_t         resource)
1059{
1060    uint32_t lock_status;
1061    uint32_t resource_bit = (1 << resource);
1062    int func = SC_FUNC(sc);
1063    uint32_t hw_lock_control_reg;
1064
1065    /* validate the resource is within range */
1066    if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1067        BLOGE(sc, "(resource 0x%x > HW_LOCK_MAX_RESOURCE_VALUE)"
1068            " resource_bit 0x%x\n", resource, resource_bit);
1069        return (-1);
1070    }
1071
1072    if (func <= 5) {
1073        hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + (func * 8));
1074    } else {
1075        hw_lock_control_reg =
1076                (MISC_REG_DRIVER_CONTROL_7 + ((func - 6) * 8));
1077    }
1078
1079    /* validate the resource is currently taken */
1080    lock_status = REG_RD(sc, hw_lock_control_reg);
1081    if (!(lock_status & resource_bit)) {
1082        BLOGE(sc, "resource (0x%x) not in use (status 0x%x bit 0x%x)\n",
1083              resource, lock_status, resource_bit);
1084        return (-1);
1085    }
1086
1087    REG_WR(sc, hw_lock_control_reg, resource_bit);
1088    return (0);
1089}
1090static void bxe_acquire_phy_lock(struct bxe_softc *sc)
1091{
1092	BXE_PHY_LOCK(sc);
1093	bxe_acquire_hw_lock(sc,HW_LOCK_RESOURCE_MDIO);
1094}
1095
1096static void bxe_release_phy_lock(struct bxe_softc *sc)
1097{
1098	bxe_release_hw_lock(sc,HW_LOCK_RESOURCE_MDIO);
1099	BXE_PHY_UNLOCK(sc);
1100}
1101/*
1102 * Per pf misc lock must be acquired before the per port mcp lock. Otherwise,
1103 * had we done things the other way around, if two pfs from the same port
1104 * would attempt to access nvram at the same time, we could run into a
1105 * scenario such as:
1106 * pf A takes the port lock.
1107 * pf B succeeds in taking the same lock since they are from the same port.
1108 * pf A takes the per pf misc lock. Performs eeprom access.
1109 * pf A finishes. Unlocks the per pf misc lock.
1110 * Pf B takes the lock and proceeds to perform it's own access.
1111 * pf A unlocks the per port lock, while pf B is still working (!).
1112 * mcp takes the per port lock and corrupts pf B's access (and/or has it's own
1113 * access corrupted by pf B).*
1114 */
1115static int
1116bxe_acquire_nvram_lock(struct bxe_softc *sc)
1117{
1118    int port = SC_PORT(sc);
1119    int count, i;
1120    uint32_t val = 0;
1121
1122    /* acquire HW lock: protect against other PFs in PF Direct Assignment */
1123    bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_NVRAM);
1124
1125    /* adjust timeout for emulation/FPGA */
1126    count = NVRAM_TIMEOUT_COUNT;
1127    if (CHIP_REV_IS_SLOW(sc)) {
1128        count *= 100;
1129    }
1130
1131    /* request access to nvram interface */
1132    REG_WR(sc, MCP_REG_MCPR_NVM_SW_ARB,
1133           (MCPR_NVM_SW_ARB_ARB_REQ_SET1 << port));
1134
1135    for (i = 0; i < count*10; i++) {
1136        val = REG_RD(sc, MCP_REG_MCPR_NVM_SW_ARB);
1137        if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) {
1138            break;
1139        }
1140
1141        DELAY(5);
1142    }
1143
1144    if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) {
1145        BLOGE(sc, "Cannot get access to nvram interface "
1146            "port %d val 0x%x (MCPR_NVM_SW_ARB_ARB_ARB1 << port)\n",
1147            port, val);
1148        return (-1);
1149    }
1150
1151    return (0);
1152}
1153
1154static int
1155bxe_release_nvram_lock(struct bxe_softc *sc)
1156{
1157    int port = SC_PORT(sc);
1158    int count, i;
1159    uint32_t val = 0;
1160
1161    /* adjust timeout for emulation/FPGA */
1162    count = NVRAM_TIMEOUT_COUNT;
1163    if (CHIP_REV_IS_SLOW(sc)) {
1164        count *= 100;
1165    }
1166
1167    /* relinquish nvram interface */
1168    REG_WR(sc, MCP_REG_MCPR_NVM_SW_ARB,
1169           (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << port));
1170
1171    for (i = 0; i < count*10; i++) {
1172        val = REG_RD(sc, MCP_REG_MCPR_NVM_SW_ARB);
1173        if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) {
1174            break;
1175        }
1176
1177        DELAY(5);
1178    }
1179
1180    if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) {
1181        BLOGE(sc, "Cannot free access to nvram interface "
1182            "port %d val 0x%x (MCPR_NVM_SW_ARB_ARB_ARB1 << port)\n",
1183            port, val);
1184        return (-1);
1185    }
1186
1187    /* release HW lock: protect against other PFs in PF Direct Assignment */
1188    bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_NVRAM);
1189
1190    return (0);
1191}
1192
1193static void
1194bxe_enable_nvram_access(struct bxe_softc *sc)
1195{
1196    uint32_t val;
1197
1198    val = REG_RD(sc, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
1199
1200    /* enable both bits, even on read */
1201    REG_WR(sc, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
1202           (val | MCPR_NVM_ACCESS_ENABLE_EN | MCPR_NVM_ACCESS_ENABLE_WR_EN));
1203}
1204
1205static void
1206bxe_disable_nvram_access(struct bxe_softc *sc)
1207{
1208    uint32_t val;
1209
1210    val = REG_RD(sc, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
1211
1212    /* disable both bits, even after read */
1213    REG_WR(sc, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
1214           (val & ~(MCPR_NVM_ACCESS_ENABLE_EN |
1215                    MCPR_NVM_ACCESS_ENABLE_WR_EN)));
1216}
1217
1218static int
1219bxe_nvram_read_dword(struct bxe_softc *sc,
1220                     uint32_t         offset,
1221                     uint32_t         *ret_val,
1222                     uint32_t         cmd_flags)
1223{
1224    int count, i, rc;
1225    uint32_t val;
1226
1227    /* build the command word */
1228    cmd_flags |= MCPR_NVM_COMMAND_DOIT;
1229
1230    /* need to clear DONE bit separately */
1231    REG_WR(sc, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
1232
1233    /* address of the NVRAM to read from */
1234    REG_WR(sc, MCP_REG_MCPR_NVM_ADDR,
1235           (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
1236
1237    /* issue a read command */
1238    REG_WR(sc, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
1239
1240    /* adjust timeout for emulation/FPGA */
1241    count = NVRAM_TIMEOUT_COUNT;
1242    if (CHIP_REV_IS_SLOW(sc)) {
1243        count *= 100;
1244    }
1245
1246    /* wait for completion */
1247    *ret_val = 0;
1248    rc = -1;
1249    for (i = 0; i < count; i++) {
1250        DELAY(5);
1251        val = REG_RD(sc, MCP_REG_MCPR_NVM_COMMAND);
1252
1253        if (val & MCPR_NVM_COMMAND_DONE) {
1254            val = REG_RD(sc, MCP_REG_MCPR_NVM_READ);
1255            /* we read nvram data in cpu order
1256             * but ethtool sees it as an array of bytes
1257             * converting to big-endian will do the work
1258             */
1259            *ret_val = htobe32(val);
1260            rc = 0;
1261            break;
1262        }
1263    }
1264
1265    if (rc == -1) {
1266        BLOGE(sc, "nvram read timeout expired "
1267            "(offset 0x%x cmd_flags 0x%x val 0x%x)\n",
1268            offset, cmd_flags, val);
1269    }
1270
1271    return (rc);
1272}
1273
1274static int
1275bxe_nvram_read(struct bxe_softc *sc,
1276               uint32_t         offset,
1277               uint8_t          *ret_buf,
1278               int              buf_size)
1279{
1280    uint32_t cmd_flags;
1281    uint32_t val;
1282    int rc;
1283
1284    if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
1285        BLOGE(sc, "Invalid parameter, offset 0x%x buf_size 0x%x\n",
1286              offset, buf_size);
1287        return (-1);
1288    }
1289
1290    if ((offset + buf_size) > sc->devinfo.flash_size) {
1291        BLOGE(sc, "Invalid parameter, "
1292                  "offset 0x%x + buf_size 0x%x > flash_size 0x%x\n",
1293              offset, buf_size, sc->devinfo.flash_size);
1294        return (-1);
1295    }
1296
1297    /* request access to nvram interface */
1298    rc = bxe_acquire_nvram_lock(sc);
1299    if (rc) {
1300        return (rc);
1301    }
1302
1303    /* enable access to nvram interface */
1304    bxe_enable_nvram_access(sc);
1305
1306    /* read the first word(s) */
1307    cmd_flags = MCPR_NVM_COMMAND_FIRST;
1308    while ((buf_size > sizeof(uint32_t)) && (rc == 0)) {
1309        rc = bxe_nvram_read_dword(sc, offset, &val, cmd_flags);
1310        memcpy(ret_buf, &val, 4);
1311
1312        /* advance to the next dword */
1313        offset += sizeof(uint32_t);
1314        ret_buf += sizeof(uint32_t);
1315        buf_size -= sizeof(uint32_t);
1316        cmd_flags = 0;
1317    }
1318
1319    if (rc == 0) {
1320        cmd_flags |= MCPR_NVM_COMMAND_LAST;
1321        rc = bxe_nvram_read_dword(sc, offset, &val, cmd_flags);
1322        memcpy(ret_buf, &val, 4);
1323    }
1324
1325    /* disable access to nvram interface */
1326    bxe_disable_nvram_access(sc);
1327    bxe_release_nvram_lock(sc);
1328
1329    return (rc);
1330}
1331
1332static int
1333bxe_nvram_write_dword(struct bxe_softc *sc,
1334                      uint32_t         offset,
1335                      uint32_t         val,
1336                      uint32_t         cmd_flags)
1337{
1338    int count, i, rc;
1339
1340    /* build the command word */
1341    cmd_flags |= (MCPR_NVM_COMMAND_DOIT | MCPR_NVM_COMMAND_WR);
1342
1343    /* need to clear DONE bit separately */
1344    REG_WR(sc, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
1345
1346    /* write the data */
1347    REG_WR(sc, MCP_REG_MCPR_NVM_WRITE, val);
1348
1349    /* address of the NVRAM to write to */
1350    REG_WR(sc, MCP_REG_MCPR_NVM_ADDR,
1351           (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
1352
1353    /* issue the write command */
1354    REG_WR(sc, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
1355
1356    /* adjust timeout for emulation/FPGA */
1357    count = NVRAM_TIMEOUT_COUNT;
1358    if (CHIP_REV_IS_SLOW(sc)) {
1359        count *= 100;
1360    }
1361
1362    /* wait for completion */
1363    rc = -1;
1364    for (i = 0; i < count; i++) {
1365        DELAY(5);
1366        val = REG_RD(sc, MCP_REG_MCPR_NVM_COMMAND);
1367        if (val & MCPR_NVM_COMMAND_DONE) {
1368            rc = 0;
1369            break;
1370        }
1371    }
1372
1373    if (rc == -1) {
1374        BLOGE(sc, "nvram write timeout expired "
1375            "(offset 0x%x cmd_flags 0x%x val 0x%x)\n",
1376            offset, cmd_flags, val);
1377    }
1378
1379    return (rc);
1380}
1381
1382#define BYTE_OFFSET(offset) (8 * (offset & 0x03))
1383
1384static int
1385bxe_nvram_write1(struct bxe_softc *sc,
1386                 uint32_t         offset,
1387                 uint8_t          *data_buf,
1388                 int              buf_size)
1389{
1390    uint32_t cmd_flags;
1391    uint32_t align_offset;
1392    uint32_t val;
1393    int rc;
1394
1395    if ((offset + buf_size) > sc->devinfo.flash_size) {
1396        BLOGE(sc, "Invalid parameter, "
1397                  "offset 0x%x + buf_size 0x%x > flash_size 0x%x\n",
1398              offset, buf_size, sc->devinfo.flash_size);
1399        return (-1);
1400    }
1401
1402    /* request access to nvram interface */
1403    rc = bxe_acquire_nvram_lock(sc);
1404    if (rc) {
1405        return (rc);
1406    }
1407
1408    /* enable access to nvram interface */
1409    bxe_enable_nvram_access(sc);
1410
1411    cmd_flags = (MCPR_NVM_COMMAND_FIRST | MCPR_NVM_COMMAND_LAST);
1412    align_offset = (offset & ~0x03);
1413    rc = bxe_nvram_read_dword(sc, align_offset, &val, cmd_flags);
1414
1415    if (rc == 0) {
1416        val &= ~(0xff << BYTE_OFFSET(offset));
1417        val |= (*data_buf << BYTE_OFFSET(offset));
1418
1419        /* nvram data is returned as an array of bytes
1420         * convert it back to cpu order
1421         */
1422        val = be32toh(val);
1423
1424        rc = bxe_nvram_write_dword(sc, align_offset, val, cmd_flags);
1425    }
1426
1427    /* disable access to nvram interface */
1428    bxe_disable_nvram_access(sc);
1429    bxe_release_nvram_lock(sc);
1430
1431    return (rc);
1432}
1433
1434static int
1435bxe_nvram_write(struct bxe_softc *sc,
1436                uint32_t         offset,
1437                uint8_t          *data_buf,
1438                int              buf_size)
1439{
1440    uint32_t cmd_flags;
1441    uint32_t val;
1442    uint32_t written_so_far;
1443    int rc;
1444
1445    if (buf_size == 1) {
1446        return (bxe_nvram_write1(sc, offset, data_buf, buf_size));
1447    }
1448
1449    if ((offset & 0x03) || (buf_size & 0x03) /* || (buf_size == 0) */) {
1450        BLOGE(sc, "Invalid parameter, offset 0x%x buf_size 0x%x\n",
1451              offset, buf_size);
1452        return (-1);
1453    }
1454
1455    if (buf_size == 0) {
1456        return (0); /* nothing to do */
1457    }
1458
1459    if ((offset + buf_size) > sc->devinfo.flash_size) {
1460        BLOGE(sc, "Invalid parameter, "
1461                  "offset 0x%x + buf_size 0x%x > flash_size 0x%x\n",
1462              offset, buf_size, sc->devinfo.flash_size);
1463        return (-1);
1464    }
1465
1466    /* request access to nvram interface */
1467    rc = bxe_acquire_nvram_lock(sc);
1468    if (rc) {
1469        return (rc);
1470    }
1471
1472    /* enable access to nvram interface */
1473    bxe_enable_nvram_access(sc);
1474
1475    written_so_far = 0;
1476    cmd_flags = MCPR_NVM_COMMAND_FIRST;
1477    while ((written_so_far < buf_size) && (rc == 0)) {
1478        if (written_so_far == (buf_size - sizeof(uint32_t))) {
1479            cmd_flags |= MCPR_NVM_COMMAND_LAST;
1480        } else if (((offset + 4) % NVRAM_PAGE_SIZE) == 0) {
1481            cmd_flags |= MCPR_NVM_COMMAND_LAST;
1482        } else if ((offset % NVRAM_PAGE_SIZE) == 0) {
1483            cmd_flags |= MCPR_NVM_COMMAND_FIRST;
1484        }
1485
1486        memcpy(&val, data_buf, 4);
1487
1488        rc = bxe_nvram_write_dword(sc, offset, val, cmd_flags);
1489
1490        /* advance to the next dword */
1491        offset += sizeof(uint32_t);
1492        data_buf += sizeof(uint32_t);
1493        written_so_far += sizeof(uint32_t);
1494        cmd_flags = 0;
1495    }
1496
1497    /* disable access to nvram interface */
1498    bxe_disable_nvram_access(sc);
1499    bxe_release_nvram_lock(sc);
1500
1501    return (rc);
1502}
1503
1504/* copy command into DMAE command memory and set DMAE command Go */
1505void
1506bxe_post_dmae(struct bxe_softc    *sc,
1507              struct dmae_cmd *dmae,
1508              int                 idx)
1509{
1510    uint32_t cmd_offset;
1511    int i;
1512
1513    cmd_offset = (DMAE_REG_CMD_MEM + (sizeof(struct dmae_cmd) * idx));
1514    for (i = 0; i < ((sizeof(struct dmae_cmd) / 4)); i++) {
1515        REG_WR(sc, (cmd_offset + (i * 4)), *(((uint32_t *)dmae) + i));
1516    }
1517
1518    REG_WR(sc, dmae_reg_go_c[idx], 1);
1519}
1520
1521uint32_t
1522bxe_dmae_opcode_add_comp(uint32_t opcode,
1523                         uint8_t  comp_type)
1524{
1525    return (opcode | ((comp_type << DMAE_CMD_C_DST_SHIFT) |
1526                      DMAE_CMD_C_TYPE_ENABLE));
1527}
1528
1529uint32_t
1530bxe_dmae_opcode_clr_src_reset(uint32_t opcode)
1531{
1532    return (opcode & ~DMAE_CMD_SRC_RESET);
1533}
1534
1535uint32_t
1536bxe_dmae_opcode(struct bxe_softc *sc,
1537                uint8_t          src_type,
1538                uint8_t          dst_type,
1539                uint8_t          with_comp,
1540                uint8_t          comp_type)
1541{
1542    uint32_t opcode = 0;
1543
1544    opcode |= ((src_type << DMAE_CMD_SRC_SHIFT) |
1545               (dst_type << DMAE_CMD_DST_SHIFT));
1546
1547    opcode |= (DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET);
1548
1549    opcode |= (SC_PORT(sc) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0);
1550
1551    opcode |= ((SC_VN(sc) << DMAE_CMD_E1HVN_SHIFT) |
1552               (SC_VN(sc) << DMAE_CMD_DST_VN_SHIFT));
1553
1554    opcode |= (DMAE_COM_SET_ERR << DMAE_CMD_ERR_POLICY_SHIFT);
1555
1556#ifdef __BIG_ENDIAN
1557    opcode |= DMAE_CMD_ENDIANITY_B_DW_SWAP;
1558#else
1559    opcode |= DMAE_CMD_ENDIANITY_DW_SWAP;
1560#endif
1561
1562    if (with_comp) {
1563        opcode = bxe_dmae_opcode_add_comp(opcode, comp_type);
1564    }
1565
1566    return (opcode);
1567}
1568
1569static void
1570bxe_prep_dmae_with_comp(struct bxe_softc    *sc,
1571                        struct dmae_cmd *dmae,
1572                        uint8_t             src_type,
1573                        uint8_t             dst_type)
1574{
1575    memset(dmae, 0, sizeof(struct dmae_cmd));
1576
1577    /* set the opcode */
1578    dmae->opcode = bxe_dmae_opcode(sc, src_type, dst_type,
1579                                   TRUE, DMAE_COMP_PCI);
1580
1581    /* fill in the completion parameters */
1582    dmae->comp_addr_lo = U64_LO(BXE_SP_MAPPING(sc, wb_comp));
1583    dmae->comp_addr_hi = U64_HI(BXE_SP_MAPPING(sc, wb_comp));
1584    dmae->comp_val     = DMAE_COMP_VAL;
1585}
1586
1587/* issue a DMAE command over the init channel and wait for completion */
1588static int
1589bxe_issue_dmae_with_comp(struct bxe_softc    *sc,
1590                         struct dmae_cmd *dmae)
1591{
1592    uint32_t *wb_comp = BXE_SP(sc, wb_comp);
1593    int timeout = CHIP_REV_IS_SLOW(sc) ? 400000 : 4000;
1594
1595    BXE_DMAE_LOCK(sc);
1596
1597    /* reset completion */
1598    *wb_comp = 0;
1599
1600    /* post the command on the channel used for initializations */
1601    bxe_post_dmae(sc, dmae, INIT_DMAE_C(sc));
1602
1603    /* wait for completion */
1604    DELAY(5);
1605
1606    while ((*wb_comp & ~DMAE_PCI_ERR_FLAG) != DMAE_COMP_VAL) {
1607        if (!timeout ||
1608            (sc->recovery_state != BXE_RECOVERY_DONE &&
1609             sc->recovery_state != BXE_RECOVERY_NIC_LOADING)) {
1610            BLOGE(sc, "DMAE timeout! *wb_comp 0x%x recovery_state 0x%x\n",
1611                *wb_comp, sc->recovery_state);
1612            BXE_DMAE_UNLOCK(sc);
1613            return (DMAE_TIMEOUT);
1614        }
1615
1616        timeout--;
1617        DELAY(50);
1618    }
1619
1620    if (*wb_comp & DMAE_PCI_ERR_FLAG) {
1621        BLOGE(sc, "DMAE PCI error! *wb_comp 0x%x recovery_state 0x%x\n",
1622                *wb_comp, sc->recovery_state);
1623        BXE_DMAE_UNLOCK(sc);
1624        return (DMAE_PCI_ERROR);
1625    }
1626
1627    BXE_DMAE_UNLOCK(sc);
1628    return (0);
1629}
1630
1631void
1632bxe_read_dmae(struct bxe_softc *sc,
1633              uint32_t         src_addr,
1634              uint32_t         len32)
1635{
1636    struct dmae_cmd dmae;
1637    uint32_t *data;
1638    int i, rc;
1639
1640    DBASSERT(sc, (len32 <= 4), ("DMAE read length is %d", len32));
1641
1642    if (!sc->dmae_ready) {
1643        data = BXE_SP(sc, wb_data[0]);
1644
1645        for (i = 0; i < len32; i++) {
1646            data[i] = (CHIP_IS_E1(sc)) ?
1647                          bxe_reg_rd_ind(sc, (src_addr + (i * 4))) :
1648                          REG_RD(sc, (src_addr + (i * 4)));
1649        }
1650
1651        return;
1652    }
1653
1654    /* set opcode and fixed command fields */
1655    bxe_prep_dmae_with_comp(sc, &dmae, DMAE_SRC_GRC, DMAE_DST_PCI);
1656
1657    /* fill in addresses and len */
1658    dmae.src_addr_lo = (src_addr >> 2); /* GRC addr has dword resolution */
1659    dmae.src_addr_hi = 0;
1660    dmae.dst_addr_lo = U64_LO(BXE_SP_MAPPING(sc, wb_data));
1661    dmae.dst_addr_hi = U64_HI(BXE_SP_MAPPING(sc, wb_data));
1662    dmae.len         = len32;
1663
1664    /* issue the command and wait for completion */
1665    if ((rc = bxe_issue_dmae_with_comp(sc, &dmae)) != 0) {
1666        bxe_panic(sc, ("DMAE failed (%d)\n", rc));
1667    }
1668}
1669
1670void
1671bxe_write_dmae(struct bxe_softc *sc,
1672               bus_addr_t       dma_addr,
1673               uint32_t         dst_addr,
1674               uint32_t         len32)
1675{
1676    struct dmae_cmd dmae;
1677    int rc;
1678
1679    if (!sc->dmae_ready) {
1680        DBASSERT(sc, (len32 <= 4), ("DMAE not ready and length is %d", len32));
1681
1682        if (CHIP_IS_E1(sc)) {
1683            ecore_init_ind_wr(sc, dst_addr, BXE_SP(sc, wb_data[0]), len32);
1684        } else {
1685            ecore_init_str_wr(sc, dst_addr, BXE_SP(sc, wb_data[0]), len32);
1686        }
1687
1688        return;
1689    }
1690
1691    /* set opcode and fixed command fields */
1692    bxe_prep_dmae_with_comp(sc, &dmae, DMAE_SRC_PCI, DMAE_DST_GRC);
1693
1694    /* fill in addresses and len */
1695    dmae.src_addr_lo = U64_LO(dma_addr);
1696    dmae.src_addr_hi = U64_HI(dma_addr);
1697    dmae.dst_addr_lo = (dst_addr >> 2); /* GRC addr has dword resolution */
1698    dmae.dst_addr_hi = 0;
1699    dmae.len         = len32;
1700
1701    /* issue the command and wait for completion */
1702    if ((rc = bxe_issue_dmae_with_comp(sc, &dmae)) != 0) {
1703        bxe_panic(sc, ("DMAE failed (%d)\n", rc));
1704    }
1705}
1706
1707void
1708bxe_write_dmae_phys_len(struct bxe_softc *sc,
1709                        bus_addr_t       phys_addr,
1710                        uint32_t         addr,
1711                        uint32_t         len)
1712{
1713    int dmae_wr_max = DMAE_LEN32_WR_MAX(sc);
1714    int offset = 0;
1715
1716    while (len > dmae_wr_max) {
1717        bxe_write_dmae(sc,
1718                       (phys_addr + offset), /* src DMA address */
1719                       (addr + offset),      /* dst GRC address */
1720                       dmae_wr_max);
1721        offset += (dmae_wr_max * 4);
1722        len -= dmae_wr_max;
1723    }
1724
1725    bxe_write_dmae(sc,
1726                   (phys_addr + offset), /* src DMA address */
1727                   (addr + offset),      /* dst GRC address */
1728                   len);
1729}
1730
1731void
1732bxe_set_ctx_validation(struct bxe_softc   *sc,
1733                       struct eth_context *cxt,
1734                       uint32_t           cid)
1735{
1736    /* ustorm cxt validation */
1737    cxt->ustorm_ag_context.cdu_usage =
1738        CDU_RSRVD_VALUE_TYPE_A(HW_CID(sc, cid),
1739            CDU_REGION_NUMBER_UCM_AG, ETH_CONNECTION_TYPE);
1740    /* xcontext validation */
1741    cxt->xstorm_ag_context.cdu_reserved =
1742        CDU_RSRVD_VALUE_TYPE_A(HW_CID(sc, cid),
1743            CDU_REGION_NUMBER_XCM_AG, ETH_CONNECTION_TYPE);
1744}
1745
1746static void
1747bxe_storm_memset_hc_timeout(struct bxe_softc *sc,
1748                            uint8_t          port,
1749                            uint8_t          fw_sb_id,
1750                            uint8_t          sb_index,
1751                            uint8_t          ticks)
1752{
1753    uint32_t addr =
1754        (BAR_CSTRORM_INTMEM +
1755         CSTORM_STATUS_BLOCK_DATA_TIMEOUT_OFFSET(fw_sb_id, sb_index));
1756
1757    REG_WR8(sc, addr, ticks);
1758
1759    BLOGD(sc, DBG_LOAD,
1760          "port %d fw_sb_id %d sb_index %d ticks %d\n",
1761          port, fw_sb_id, sb_index, ticks);
1762}
1763
1764static void
1765bxe_storm_memset_hc_disable(struct bxe_softc *sc,
1766                            uint8_t          port,
1767                            uint16_t         fw_sb_id,
1768                            uint8_t          sb_index,
1769                            uint8_t          disable)
1770{
1771    uint32_t enable_flag =
1772        (disable) ? 0 : (1 << HC_INDEX_DATA_HC_ENABLED_SHIFT);
1773    uint32_t addr =
1774        (BAR_CSTRORM_INTMEM +
1775         CSTORM_STATUS_BLOCK_DATA_FLAGS_OFFSET(fw_sb_id, sb_index));
1776    uint8_t flags;
1777
1778    /* clear and set */
1779    flags = REG_RD8(sc, addr);
1780    flags &= ~HC_INDEX_DATA_HC_ENABLED;
1781    flags |= enable_flag;
1782    REG_WR8(sc, addr, flags);
1783
1784    BLOGD(sc, DBG_LOAD,
1785          "port %d fw_sb_id %d sb_index %d disable %d\n",
1786          port, fw_sb_id, sb_index, disable);
1787}
1788
1789void
1790bxe_update_coalesce_sb_index(struct bxe_softc *sc,
1791                             uint8_t          fw_sb_id,
1792                             uint8_t          sb_index,
1793                             uint8_t          disable,
1794                             uint16_t         usec)
1795{
1796    int port = SC_PORT(sc);
1797    uint8_t ticks = (usec / 4); /* XXX ??? */
1798
1799    bxe_storm_memset_hc_timeout(sc, port, fw_sb_id, sb_index, ticks);
1800
1801    disable = (disable) ? 1 : ((usec) ? 0 : 1);
1802    bxe_storm_memset_hc_disable(sc, port, fw_sb_id, sb_index, disable);
1803}
1804
1805void
1806elink_cb_udelay(struct bxe_softc *sc,
1807                uint32_t         usecs)
1808{
1809    DELAY(usecs);
1810}
1811
1812uint32_t
1813elink_cb_reg_read(struct bxe_softc *sc,
1814                  uint32_t         reg_addr)
1815{
1816    return (REG_RD(sc, reg_addr));
1817}
1818
1819void
1820elink_cb_reg_write(struct bxe_softc *sc,
1821                   uint32_t         reg_addr,
1822                   uint32_t         val)
1823{
1824    REG_WR(sc, reg_addr, val);
1825}
1826
1827void
1828elink_cb_reg_wb_write(struct bxe_softc *sc,
1829                      uint32_t         offset,
1830                      uint32_t         *wb_write,
1831                      uint16_t         len)
1832{
1833    REG_WR_DMAE(sc, offset, wb_write, len);
1834}
1835
1836void
1837elink_cb_reg_wb_read(struct bxe_softc *sc,
1838                     uint32_t         offset,
1839                     uint32_t         *wb_write,
1840                     uint16_t         len)
1841{
1842    REG_RD_DMAE(sc, offset, wb_write, len);
1843}
1844
1845uint8_t
1846elink_cb_path_id(struct bxe_softc *sc)
1847{
1848    return (SC_PATH(sc));
1849}
1850
1851void
1852elink_cb_event_log(struct bxe_softc     *sc,
1853                   const elink_log_id_t elink_log_id,
1854                   ...)
1855{
1856    /* XXX */
1857    BLOGI(sc, "ELINK EVENT LOG (%d)\n", elink_log_id);
1858}
1859
1860static int
1861bxe_set_spio(struct bxe_softc *sc,
1862             int              spio,
1863             uint32_t         mode)
1864{
1865    uint32_t spio_reg;
1866
1867    /* Only 2 SPIOs are configurable */
1868    if ((spio != MISC_SPIO_SPIO4) && (spio != MISC_SPIO_SPIO5)) {
1869        BLOGE(sc, "Invalid SPIO 0x%x mode 0x%x\n", spio, mode);
1870        return (-1);
1871    }
1872
1873    bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_SPIO);
1874
1875    /* read SPIO and mask except the float bits */
1876    spio_reg = (REG_RD(sc, MISC_REG_SPIO) & MISC_SPIO_FLOAT);
1877
1878    switch (mode) {
1879    case MISC_SPIO_OUTPUT_LOW:
1880        BLOGD(sc, DBG_LOAD, "Set SPIO 0x%x -> output low\n", spio);
1881        /* clear FLOAT and set CLR */
1882        spio_reg &= ~(spio << MISC_SPIO_FLOAT_POS);
1883        spio_reg |=  (spio << MISC_SPIO_CLR_POS);
1884        break;
1885
1886    case MISC_SPIO_OUTPUT_HIGH:
1887        BLOGD(sc, DBG_LOAD, "Set SPIO 0x%x -> output high\n", spio);
1888        /* clear FLOAT and set SET */
1889        spio_reg &= ~(spio << MISC_SPIO_FLOAT_POS);
1890        spio_reg |=  (spio << MISC_SPIO_SET_POS);
1891        break;
1892
1893    case MISC_SPIO_INPUT_HI_Z:
1894        BLOGD(sc, DBG_LOAD, "Set SPIO 0x%x -> input\n", spio);
1895        /* set FLOAT */
1896        spio_reg |= (spio << MISC_SPIO_FLOAT_POS);
1897        break;
1898
1899    default:
1900        break;
1901    }
1902
1903    REG_WR(sc, MISC_REG_SPIO, spio_reg);
1904    bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_SPIO);
1905
1906    return (0);
1907}
1908
1909static int
1910bxe_gpio_read(struct bxe_softc *sc,
1911              int              gpio_num,
1912              uint8_t          port)
1913{
1914    /* The GPIO should be swapped if swap register is set and active */
1915    int gpio_port = ((REG_RD(sc, NIG_REG_PORT_SWAP) &&
1916                      REG_RD(sc, NIG_REG_STRAP_OVERRIDE)) ^ port);
1917    int gpio_shift = (gpio_num +
1918                      (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0));
1919    uint32_t gpio_mask = (1 << gpio_shift);
1920    uint32_t gpio_reg;
1921
1922    if (gpio_num > MISC_REGISTERS_GPIO_3) {
1923        BLOGE(sc, "Invalid GPIO %d port 0x%x gpio_port %d gpio_shift %d"
1924            " gpio_mask 0x%x\n", gpio_num, port, gpio_port, gpio_shift,
1925            gpio_mask);
1926        return (-1);
1927    }
1928
1929    /* read GPIO value */
1930    gpio_reg = REG_RD(sc, MISC_REG_GPIO);
1931
1932    /* get the requested pin value */
1933    return ((gpio_reg & gpio_mask) == gpio_mask) ? 1 : 0;
1934}
1935
1936static int
1937bxe_gpio_write(struct bxe_softc *sc,
1938               int              gpio_num,
1939               uint32_t         mode,
1940               uint8_t          port)
1941{
1942    /* The GPIO should be swapped if swap register is set and active */
1943    int gpio_port = ((REG_RD(sc, NIG_REG_PORT_SWAP) &&
1944                      REG_RD(sc, NIG_REG_STRAP_OVERRIDE)) ^ port);
1945    int gpio_shift = (gpio_num +
1946                      (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0));
1947    uint32_t gpio_mask = (1 << gpio_shift);
1948    uint32_t gpio_reg;
1949
1950    if (gpio_num > MISC_REGISTERS_GPIO_3) {
1951        BLOGE(sc, "Invalid GPIO %d mode 0x%x port 0x%x gpio_port %d"
1952            " gpio_shift %d gpio_mask 0x%x\n",
1953            gpio_num, mode, port, gpio_port, gpio_shift, gpio_mask);
1954        return (-1);
1955    }
1956
1957    bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_GPIO);
1958
1959    /* read GPIO and mask except the float bits */
1960    gpio_reg = (REG_RD(sc, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
1961
1962    switch (mode) {
1963    case MISC_REGISTERS_GPIO_OUTPUT_LOW:
1964        BLOGD(sc, DBG_PHY,
1965              "Set GPIO %d (shift %d) -> output low\n",
1966              gpio_num, gpio_shift);
1967        /* clear FLOAT and set CLR */
1968        gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1969        gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
1970        break;
1971
1972    case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
1973        BLOGD(sc, DBG_PHY,
1974              "Set GPIO %d (shift %d) -> output high\n",
1975              gpio_num, gpio_shift);
1976        /* clear FLOAT and set SET */
1977        gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1978        gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
1979        break;
1980
1981    case MISC_REGISTERS_GPIO_INPUT_HI_Z:
1982        BLOGD(sc, DBG_PHY,
1983              "Set GPIO %d (shift %d) -> input\n",
1984              gpio_num, gpio_shift);
1985        /* set FLOAT */
1986        gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1987        break;
1988
1989    default:
1990        break;
1991    }
1992
1993    REG_WR(sc, MISC_REG_GPIO, gpio_reg);
1994    bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_GPIO);
1995
1996    return (0);
1997}
1998
1999static int
2000bxe_gpio_mult_write(struct bxe_softc *sc,
2001                    uint8_t          pins,
2002                    uint32_t         mode)
2003{
2004    uint32_t gpio_reg;
2005
2006    /* any port swapping should be handled by caller */
2007
2008    bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_GPIO);
2009
2010    /* read GPIO and mask except the float bits */
2011    gpio_reg = REG_RD(sc, MISC_REG_GPIO);
2012    gpio_reg &= ~(pins << MISC_REGISTERS_GPIO_FLOAT_POS);
2013    gpio_reg &= ~(pins << MISC_REGISTERS_GPIO_CLR_POS);
2014    gpio_reg &= ~(pins << MISC_REGISTERS_GPIO_SET_POS);
2015
2016    switch (mode) {
2017    case MISC_REGISTERS_GPIO_OUTPUT_LOW:
2018        BLOGD(sc, DBG_PHY, "Set GPIO 0x%x -> output low\n", pins);
2019        /* set CLR */
2020        gpio_reg |= (pins << MISC_REGISTERS_GPIO_CLR_POS);
2021        break;
2022
2023    case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
2024        BLOGD(sc, DBG_PHY, "Set GPIO 0x%x -> output high\n", pins);
2025        /* set SET */
2026        gpio_reg |= (pins << MISC_REGISTERS_GPIO_SET_POS);
2027        break;
2028
2029    case MISC_REGISTERS_GPIO_INPUT_HI_Z:
2030        BLOGD(sc, DBG_PHY, "Set GPIO 0x%x -> input\n", pins);
2031        /* set FLOAT */
2032        gpio_reg |= (pins << MISC_REGISTERS_GPIO_FLOAT_POS);
2033        break;
2034
2035    default:
2036        BLOGE(sc, "Invalid GPIO mode assignment pins 0x%x mode 0x%x"
2037            " gpio_reg 0x%x\n", pins, mode, gpio_reg);
2038        bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_GPIO);
2039        return (-1);
2040    }
2041
2042    REG_WR(sc, MISC_REG_GPIO, gpio_reg);
2043    bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_GPIO);
2044
2045    return (0);
2046}
2047
2048static int
2049bxe_gpio_int_write(struct bxe_softc *sc,
2050                   int              gpio_num,
2051                   uint32_t         mode,
2052                   uint8_t          port)
2053{
2054    /* The GPIO should be swapped if swap register is set and active */
2055    int gpio_port = ((REG_RD(sc, NIG_REG_PORT_SWAP) &&
2056                      REG_RD(sc, NIG_REG_STRAP_OVERRIDE)) ^ port);
2057    int gpio_shift = (gpio_num +
2058                      (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0));
2059    uint32_t gpio_mask = (1 << gpio_shift);
2060    uint32_t gpio_reg;
2061
2062    if (gpio_num > MISC_REGISTERS_GPIO_3) {
2063        BLOGE(sc, "Invalid GPIO %d mode 0x%x port 0x%x gpio_port %d"
2064            " gpio_shift %d gpio_mask 0x%x\n",
2065            gpio_num, mode, port, gpio_port, gpio_shift, gpio_mask);
2066        return (-1);
2067    }
2068
2069    bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_GPIO);
2070
2071    /* read GPIO int */
2072    gpio_reg = REG_RD(sc, MISC_REG_GPIO_INT);
2073
2074    switch (mode) {
2075    case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR:
2076        BLOGD(sc, DBG_PHY,
2077              "Clear GPIO INT %d (shift %d) -> output low\n",
2078              gpio_num, gpio_shift);
2079        /* clear SET and set CLR */
2080        gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
2081        gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
2082        break;
2083
2084    case MISC_REGISTERS_GPIO_INT_OUTPUT_SET:
2085        BLOGD(sc, DBG_PHY,
2086              "Set GPIO INT %d (shift %d) -> output high\n",
2087              gpio_num, gpio_shift);
2088        /* clear CLR and set SET */
2089        gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
2090        gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
2091        break;
2092
2093    default:
2094        break;
2095    }
2096
2097    REG_WR(sc, MISC_REG_GPIO_INT, gpio_reg);
2098    bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_GPIO);
2099
2100    return (0);
2101}
2102
2103uint32_t
2104elink_cb_gpio_read(struct bxe_softc *sc,
2105                   uint16_t         gpio_num,
2106                   uint8_t          port)
2107{
2108    return (bxe_gpio_read(sc, gpio_num, port));
2109}
2110
2111uint8_t
2112elink_cb_gpio_write(struct bxe_softc *sc,
2113                    uint16_t         gpio_num,
2114                    uint8_t          mode, /* 0=low 1=high */
2115                    uint8_t          port)
2116{
2117    return (bxe_gpio_write(sc, gpio_num, mode, port));
2118}
2119
2120uint8_t
2121elink_cb_gpio_mult_write(struct bxe_softc *sc,
2122                         uint8_t          pins,
2123                         uint8_t          mode) /* 0=low 1=high */
2124{
2125    return (bxe_gpio_mult_write(sc, pins, mode));
2126}
2127
2128uint8_t
2129elink_cb_gpio_int_write(struct bxe_softc *sc,
2130                        uint16_t         gpio_num,
2131                        uint8_t          mode, /* 0=low 1=high */
2132                        uint8_t          port)
2133{
2134    return (bxe_gpio_int_write(sc, gpio_num, mode, port));
2135}
2136
2137void
2138elink_cb_notify_link_changed(struct bxe_softc *sc)
2139{
2140    REG_WR(sc, (MISC_REG_AEU_GENERAL_ATTN_12 +
2141                (SC_FUNC(sc) * sizeof(uint32_t))), 1);
2142}
2143
2144/* send the MCP a request, block until there is a reply */
2145uint32_t
2146elink_cb_fw_command(struct bxe_softc *sc,
2147                    uint32_t         command,
2148                    uint32_t         param)
2149{
2150    int mb_idx = SC_FW_MB_IDX(sc);
2151    uint32_t seq;
2152    uint32_t rc = 0;
2153    uint32_t cnt = 1;
2154    uint8_t delay = CHIP_REV_IS_SLOW(sc) ? 100 : 10;
2155
2156    BXE_FWMB_LOCK(sc);
2157
2158    seq = ++sc->fw_seq;
2159    SHMEM_WR(sc, func_mb[mb_idx].drv_mb_param, param);
2160    SHMEM_WR(sc, func_mb[mb_idx].drv_mb_header, (command | seq));
2161
2162    BLOGD(sc, DBG_PHY,
2163          "wrote command 0x%08x to FW MB param 0x%08x\n",
2164          (command | seq), param);
2165
2166    /* Let the FW do it's magic. GIve it up to 5 seconds... */
2167    do {
2168        DELAY(delay * 1000);
2169        rc = SHMEM_RD(sc, func_mb[mb_idx].fw_mb_header);
2170    } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 500));
2171
2172    BLOGD(sc, DBG_PHY,
2173          "[after %d ms] read 0x%x seq 0x%x from FW MB\n",
2174          cnt*delay, rc, seq);
2175
2176    /* is this a reply to our command? */
2177    if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK)) {
2178        rc &= FW_MSG_CODE_MASK;
2179    } else {
2180        /* Ruh-roh! */
2181        BLOGE(sc, "FW failed to respond!\n");
2182        // XXX bxe_fw_dump(sc);
2183        rc = 0;
2184    }
2185
2186    BXE_FWMB_UNLOCK(sc);
2187    return (rc);
2188}
2189
2190static uint32_t
2191bxe_fw_command(struct bxe_softc *sc,
2192               uint32_t         command,
2193               uint32_t         param)
2194{
2195    return (elink_cb_fw_command(sc, command, param));
2196}
2197
2198static void
2199__storm_memset_dma_mapping(struct bxe_softc *sc,
2200                           uint32_t         addr,
2201                           bus_addr_t       mapping)
2202{
2203    REG_WR(sc, addr, U64_LO(mapping));
2204    REG_WR(sc, (addr + 4), U64_HI(mapping));
2205}
2206
2207static void
2208storm_memset_spq_addr(struct bxe_softc *sc,
2209                      bus_addr_t       mapping,
2210                      uint16_t         abs_fid)
2211{
2212    uint32_t addr = (XSEM_REG_FAST_MEMORY +
2213                     XSTORM_SPQ_PAGE_BASE_OFFSET(abs_fid));
2214    __storm_memset_dma_mapping(sc, addr, mapping);
2215}
2216
2217static void
2218storm_memset_vf_to_pf(struct bxe_softc *sc,
2219                      uint16_t         abs_fid,
2220                      uint16_t         pf_id)
2221{
2222    REG_WR8(sc, (BAR_XSTRORM_INTMEM + XSTORM_VF_TO_PF_OFFSET(abs_fid)), pf_id);
2223    REG_WR8(sc, (BAR_CSTRORM_INTMEM + CSTORM_VF_TO_PF_OFFSET(abs_fid)), pf_id);
2224    REG_WR8(sc, (BAR_TSTRORM_INTMEM + TSTORM_VF_TO_PF_OFFSET(abs_fid)), pf_id);
2225    REG_WR8(sc, (BAR_USTRORM_INTMEM + USTORM_VF_TO_PF_OFFSET(abs_fid)), pf_id);
2226}
2227
2228static void
2229storm_memset_func_en(struct bxe_softc *sc,
2230                     uint16_t         abs_fid,
2231                     uint8_t          enable)
2232{
2233    REG_WR8(sc, (BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(abs_fid)), enable);
2234    REG_WR8(sc, (BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(abs_fid)), enable);
2235    REG_WR8(sc, (BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(abs_fid)), enable);
2236    REG_WR8(sc, (BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(abs_fid)), enable);
2237}
2238
2239static void
2240storm_memset_eq_data(struct bxe_softc       *sc,
2241                     struct event_ring_data *eq_data,
2242                     uint16_t               pfid)
2243{
2244    uint32_t addr;
2245    size_t size;
2246
2247    addr = (BAR_CSTRORM_INTMEM + CSTORM_EVENT_RING_DATA_OFFSET(pfid));
2248    size = sizeof(struct event_ring_data);
2249    ecore_storm_memset_struct(sc, addr, size, (uint32_t *)eq_data);
2250}
2251
2252static void
2253storm_memset_eq_prod(struct bxe_softc *sc,
2254                     uint16_t         eq_prod,
2255                     uint16_t         pfid)
2256{
2257    uint32_t addr = (BAR_CSTRORM_INTMEM +
2258                     CSTORM_EVENT_RING_PROD_OFFSET(pfid));
2259    REG_WR16(sc, addr, eq_prod);
2260}
2261
2262/*
2263 * Post a slowpath command.
2264 *
2265 * A slowpath command is used to propagate a configuration change through
2266 * the controller in a controlled manner, allowing each STORM processor and
2267 * other H/W blocks to phase in the change.  The commands sent on the
2268 * slowpath are referred to as ramrods.  Depending on the ramrod used the
2269 * completion of the ramrod will occur in different ways.  Here's a
2270 * breakdown of ramrods and how they complete:
2271 *
2272 * RAMROD_CMD_ID_ETH_PORT_SETUP
2273 *   Used to setup the leading connection on a port.  Completes on the
2274 *   Receive Completion Queue (RCQ) of that port (typically fp[0]).
2275 *
2276 * RAMROD_CMD_ID_ETH_CLIENT_SETUP
2277 *   Used to setup an additional connection on a port.  Completes on the
2278 *   RCQ of the multi-queue/RSS connection being initialized.
2279 *
2280 * RAMROD_CMD_ID_ETH_STAT_QUERY
2281 *   Used to force the storm processors to update the statistics database
2282 *   in host memory.  This ramrod is send on the leading connection CID and
2283 *   completes as an index increment of the CSTORM on the default status
2284 *   block.
2285 *
2286 * RAMROD_CMD_ID_ETH_UPDATE
2287 *   Used to update the state of the leading connection, usually to udpate
2288 *   the RSS indirection table.  Completes on the RCQ of the leading
2289 *   connection. (Not currently used under FreeBSD until OS support becomes
2290 *   available.)
2291 *
2292 * RAMROD_CMD_ID_ETH_HALT
2293 *   Used when tearing down a connection prior to driver unload.  Completes
2294 *   on the RCQ of the multi-queue/RSS connection being torn down.  Don't
2295 *   use this on the leading connection.
2296 *
2297 * RAMROD_CMD_ID_ETH_SET_MAC
2298 *   Sets the Unicast/Broadcast/Multicast used by the port.  Completes on
2299 *   the RCQ of the leading connection.
2300 *
2301 * RAMROD_CMD_ID_ETH_CFC_DEL
2302 *   Used when tearing down a conneciton prior to driver unload.  Completes
2303 *   on the RCQ of the leading connection (since the current connection
2304 *   has been completely removed from controller memory).
2305 *
2306 * RAMROD_CMD_ID_ETH_PORT_DEL
2307 *   Used to tear down the leading connection prior to driver unload,
2308 *   typically fp[0].  Completes as an index increment of the CSTORM on the
2309 *   default status block.
2310 *
2311 * RAMROD_CMD_ID_ETH_FORWARD_SETUP
2312 *   Used for connection offload.  Completes on the RCQ of the multi-queue
2313 *   RSS connection that is being offloaded.  (Not currently used under
2314 *   FreeBSD.)
2315 *
2316 * There can only be one command pending per function.
2317 *
2318 * Returns:
2319 *   0 = Success, !0 = Failure.
2320 */
2321
2322/* must be called under the spq lock */
2323static inline
2324struct eth_spe *bxe_sp_get_next(struct bxe_softc *sc)
2325{
2326    struct eth_spe *next_spe = sc->spq_prod_bd;
2327
2328    if (sc->spq_prod_bd == sc->spq_last_bd) {
2329        /* wrap back to the first eth_spq */
2330        sc->spq_prod_bd = sc->spq;
2331        sc->spq_prod_idx = 0;
2332    } else {
2333        sc->spq_prod_bd++;
2334        sc->spq_prod_idx++;
2335    }
2336
2337    return (next_spe);
2338}
2339
2340/* must be called under the spq lock */
2341static inline
2342void bxe_sp_prod_update(struct bxe_softc *sc)
2343{
2344    int func = SC_FUNC(sc);
2345
2346    /*
2347     * Make sure that BD data is updated before writing the producer.
2348     * BD data is written to the memory, the producer is read from the
2349     * memory, thus we need a full memory barrier to ensure the ordering.
2350     */
2351    mb();
2352
2353    REG_WR16(sc, (BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func)),
2354             sc->spq_prod_idx);
2355
2356    bus_space_barrier(sc->bar[BAR0].tag, sc->bar[BAR0].handle, 0, 0,
2357                      BUS_SPACE_BARRIER_WRITE);
2358}
2359
2360/**
2361 * bxe_is_contextless_ramrod - check if the current command ends on EQ
2362 *
2363 * @cmd:      command to check
2364 * @cmd_type: command type
2365 */
2366static inline
2367int bxe_is_contextless_ramrod(int cmd,
2368                              int cmd_type)
2369{
2370    if ((cmd_type == NONE_CONNECTION_TYPE) ||
2371        (cmd == RAMROD_CMD_ID_ETH_FORWARD_SETUP) ||
2372        (cmd == RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES) ||
2373        (cmd == RAMROD_CMD_ID_ETH_FILTER_RULES) ||
2374        (cmd == RAMROD_CMD_ID_ETH_MULTICAST_RULES) ||
2375        (cmd == RAMROD_CMD_ID_ETH_SET_MAC) ||
2376        (cmd == RAMROD_CMD_ID_ETH_RSS_UPDATE)) {
2377        return (TRUE);
2378    } else {
2379        return (FALSE);
2380    }
2381}
2382
2383/**
2384 * bxe_sp_post - place a single command on an SP ring
2385 *
2386 * @sc:         driver handle
2387 * @command:    command to place (e.g. SETUP, FILTER_RULES, etc.)
2388 * @cid:        SW CID the command is related to
2389 * @data_hi:    command private data address (high 32 bits)
2390 * @data_lo:    command private data address (low 32 bits)
2391 * @cmd_type:   command type (e.g. NONE, ETH)
2392 *
2393 * SP data is handled as if it's always an address pair, thus data fields are
2394 * not swapped to little endian in upper functions. Instead this function swaps
2395 * data as if it's two uint32 fields.
2396 */
2397int
2398bxe_sp_post(struct bxe_softc *sc,
2399            int              command,
2400            int              cid,
2401            uint32_t         data_hi,
2402            uint32_t         data_lo,
2403            int              cmd_type)
2404{
2405    struct eth_spe *spe;
2406    uint16_t type;
2407    int common;
2408
2409    common = bxe_is_contextless_ramrod(command, cmd_type);
2410
2411    BXE_SP_LOCK(sc);
2412
2413    if (common) {
2414        if (!atomic_load_acq_long(&sc->eq_spq_left)) {
2415            BLOGE(sc, "EQ ring is full!\n");
2416            BXE_SP_UNLOCK(sc);
2417            return (-1);
2418        }
2419    } else {
2420        if (!atomic_load_acq_long(&sc->cq_spq_left)) {
2421            BLOGE(sc, "SPQ ring is full!\n");
2422            BXE_SP_UNLOCK(sc);
2423            return (-1);
2424        }
2425    }
2426
2427    spe = bxe_sp_get_next(sc);
2428
2429    /* CID needs port number to be encoded int it */
2430    spe->hdr.conn_and_cmd_data =
2431        htole32((command << SPE_HDR_T_CMD_ID_SHIFT) | HW_CID(sc, cid));
2432
2433    type = (cmd_type << SPE_HDR_T_CONN_TYPE_SHIFT) & SPE_HDR_T_CONN_TYPE;
2434
2435    /* TBD: Check if it works for VFs */
2436    type |= ((SC_FUNC(sc) << SPE_HDR_T_FUNCTION_ID_SHIFT) &
2437             SPE_HDR_T_FUNCTION_ID);
2438
2439    spe->hdr.type = htole16(type);
2440
2441    spe->data.update_data_addr.hi = htole32(data_hi);
2442    spe->data.update_data_addr.lo = htole32(data_lo);
2443
2444    /*
2445     * It's ok if the actual decrement is issued towards the memory
2446     * somewhere between the lock and unlock. Thus no more explict
2447     * memory barrier is needed.
2448     */
2449    if (common) {
2450        atomic_subtract_acq_long(&sc->eq_spq_left, 1);
2451    } else {
2452        atomic_subtract_acq_long(&sc->cq_spq_left, 1);
2453    }
2454
2455    BLOGD(sc, DBG_SP, "SPQE -> %#jx\n", (uintmax_t)sc->spq_dma.paddr);
2456    BLOGD(sc, DBG_SP, "FUNC_RDATA -> %p / %#jx\n",
2457          BXE_SP(sc, func_rdata), (uintmax_t)BXE_SP_MAPPING(sc, func_rdata));
2458    BLOGD(sc, DBG_SP,
2459          "SPQE[%x] (%x:%x) (cmd, common?) (%d,%d) hw_cid %x data (%x:%x) type(0x%x) left (CQ, EQ) (%lx,%lx)\n",
2460          sc->spq_prod_idx,
2461          (uint32_t)U64_HI(sc->spq_dma.paddr),
2462          (uint32_t)(U64_LO(sc->spq_dma.paddr) + (uint8_t *)sc->spq_prod_bd - (uint8_t *)sc->spq),
2463          command,
2464          common,
2465          HW_CID(sc, cid),
2466          data_hi,
2467          data_lo,
2468          type,
2469          atomic_load_acq_long(&sc->cq_spq_left),
2470          atomic_load_acq_long(&sc->eq_spq_left));
2471
2472    bxe_sp_prod_update(sc);
2473
2474    BXE_SP_UNLOCK(sc);
2475    return (0);
2476}
2477
2478/**
2479 * bxe_debug_print_ind_table - prints the indirection table configuration.
2480 *
2481 * @sc: driver hanlde
2482 * @p:  pointer to rss configuration
2483 */
2484
2485/*
2486 * FreeBSD Device probe function.
2487 *
2488 * Compares the device found to the driver's list of supported devices and
2489 * reports back to the bsd loader whether this is the right driver for the device.
2490 * This is the driver entry function called from the "kldload" command.
2491 *
2492 * Returns:
2493 *   BUS_PROBE_DEFAULT on success, positive value on failure.
2494 */
2495static int
2496bxe_probe(device_t dev)
2497{
2498    struct bxe_device_type *t;
2499    char *descbuf;
2500    uint16_t did, sdid, svid, vid;
2501
2502    /* Find our device structure */
2503    t = bxe_devs;
2504
2505    /* Get the data for the device to be probed. */
2506    vid  = pci_get_vendor(dev);
2507    did  = pci_get_device(dev);
2508    svid = pci_get_subvendor(dev);
2509    sdid = pci_get_subdevice(dev);
2510
2511    /* Look through the list of known devices for a match. */
2512    while (t->bxe_name != NULL) {
2513        if ((vid == t->bxe_vid) && (did == t->bxe_did) &&
2514            ((svid == t->bxe_svid) || (t->bxe_svid == PCI_ANY_ID)) &&
2515            ((sdid == t->bxe_sdid) || (t->bxe_sdid == PCI_ANY_ID))) {
2516            descbuf = malloc(BXE_DEVDESC_MAX, M_TEMP, M_NOWAIT);
2517            if (descbuf == NULL)
2518                return (ENOMEM);
2519
2520            /* Print out the device identity. */
2521            snprintf(descbuf, BXE_DEVDESC_MAX,
2522                     "%s (%c%d) BXE v:%s\n", t->bxe_name,
2523                     (((pci_read_config(dev, PCIR_REVID, 4) &
2524                        0xf0) >> 4) + 'A'),
2525                     (pci_read_config(dev, PCIR_REVID, 4) & 0xf),
2526                     BXE_DRIVER_VERSION);
2527
2528            device_set_desc_copy(dev, descbuf);
2529            free(descbuf, M_TEMP);
2530            return (BUS_PROBE_DEFAULT);
2531        }
2532        t++;
2533    }
2534
2535    return (ENXIO);
2536}
2537
2538static void
2539bxe_init_mutexes(struct bxe_softc *sc)
2540{
2541#ifdef BXE_CORE_LOCK_SX
2542    snprintf(sc->core_sx_name, sizeof(sc->core_sx_name),
2543             "bxe%d_core_lock", sc->unit);
2544    sx_init(&sc->core_sx, sc->core_sx_name);
2545#else
2546    snprintf(sc->core_mtx_name, sizeof(sc->core_mtx_name),
2547             "bxe%d_core_lock", sc->unit);
2548    mtx_init(&sc->core_mtx, sc->core_mtx_name, NULL, MTX_DEF);
2549#endif
2550
2551    snprintf(sc->sp_mtx_name, sizeof(sc->sp_mtx_name),
2552             "bxe%d_sp_lock", sc->unit);
2553    mtx_init(&sc->sp_mtx, sc->sp_mtx_name, NULL, MTX_DEF);
2554
2555    snprintf(sc->dmae_mtx_name, sizeof(sc->dmae_mtx_name),
2556             "bxe%d_dmae_lock", sc->unit);
2557    mtx_init(&sc->dmae_mtx, sc->dmae_mtx_name, NULL, MTX_DEF);
2558
2559    snprintf(sc->port.phy_mtx_name, sizeof(sc->port.phy_mtx_name),
2560             "bxe%d_phy_lock", sc->unit);
2561    mtx_init(&sc->port.phy_mtx, sc->port.phy_mtx_name, NULL, MTX_DEF);
2562
2563    snprintf(sc->fwmb_mtx_name, sizeof(sc->fwmb_mtx_name),
2564             "bxe%d_fwmb_lock", sc->unit);
2565    mtx_init(&sc->fwmb_mtx, sc->fwmb_mtx_name, NULL, MTX_DEF);
2566
2567    snprintf(sc->print_mtx_name, sizeof(sc->print_mtx_name),
2568             "bxe%d_print_lock", sc->unit);
2569    mtx_init(&(sc->print_mtx), sc->print_mtx_name, NULL, MTX_DEF);
2570
2571    snprintf(sc->stats_mtx_name, sizeof(sc->stats_mtx_name),
2572             "bxe%d_stats_lock", sc->unit);
2573    mtx_init(&(sc->stats_mtx), sc->stats_mtx_name, NULL, MTX_DEF);
2574
2575    snprintf(sc->mcast_mtx_name, sizeof(sc->mcast_mtx_name),
2576             "bxe%d_mcast_lock", sc->unit);
2577    mtx_init(&(sc->mcast_mtx), sc->mcast_mtx_name, NULL, MTX_DEF);
2578}
2579
2580static void
2581bxe_release_mutexes(struct bxe_softc *sc)
2582{
2583#ifdef BXE_CORE_LOCK_SX
2584    sx_destroy(&sc->core_sx);
2585#else
2586    if (mtx_initialized(&sc->core_mtx)) {
2587        mtx_destroy(&sc->core_mtx);
2588    }
2589#endif
2590
2591    if (mtx_initialized(&sc->sp_mtx)) {
2592        mtx_destroy(&sc->sp_mtx);
2593    }
2594
2595    if (mtx_initialized(&sc->dmae_mtx)) {
2596        mtx_destroy(&sc->dmae_mtx);
2597    }
2598
2599    if (mtx_initialized(&sc->port.phy_mtx)) {
2600        mtx_destroy(&sc->port.phy_mtx);
2601    }
2602
2603    if (mtx_initialized(&sc->fwmb_mtx)) {
2604        mtx_destroy(&sc->fwmb_mtx);
2605    }
2606
2607    if (mtx_initialized(&sc->print_mtx)) {
2608        mtx_destroy(&sc->print_mtx);
2609    }
2610
2611    if (mtx_initialized(&sc->stats_mtx)) {
2612        mtx_destroy(&sc->stats_mtx);
2613    }
2614
2615    if (mtx_initialized(&sc->mcast_mtx)) {
2616        mtx_destroy(&sc->mcast_mtx);
2617    }
2618}
2619
2620static void
2621bxe_tx_disable(struct bxe_softc* sc)
2622{
2623    if_t ifp = sc->ifp;
2624
2625    /* tell the stack the driver is stopped and TX queue is full */
2626    if (ifp !=  NULL) {
2627        if_setdrvflags(ifp, 0);
2628    }
2629}
2630
2631static void
2632bxe_drv_pulse(struct bxe_softc *sc)
2633{
2634    SHMEM_WR(sc, func_mb[SC_FW_MB_IDX(sc)].drv_pulse_mb,
2635             sc->fw_drv_pulse_wr_seq);
2636}
2637
2638static inline uint16_t
2639bxe_tx_avail(struct bxe_softc *sc,
2640             struct bxe_fastpath *fp)
2641{
2642    int16_t  used;
2643    uint16_t prod;
2644    uint16_t cons;
2645
2646    prod = fp->tx_bd_prod;
2647    cons = fp->tx_bd_cons;
2648
2649    used = SUB_S16(prod, cons);
2650
2651    return (int16_t)(sc->tx_ring_size) - used;
2652}
2653
2654static inline int
2655bxe_tx_queue_has_work(struct bxe_fastpath *fp)
2656{
2657    uint16_t hw_cons;
2658
2659    mb(); /* status block fields can change */
2660    hw_cons = le16toh(*fp->tx_cons_sb);
2661    return (hw_cons != fp->tx_pkt_cons);
2662}
2663
2664static inline uint8_t
2665bxe_has_tx_work(struct bxe_fastpath *fp)
2666{
2667    /* expand this for multi-cos if ever supported */
2668    return (bxe_tx_queue_has_work(fp)) ? TRUE : FALSE;
2669}
2670
2671static inline int
2672bxe_has_rx_work(struct bxe_fastpath *fp)
2673{
2674    uint16_t rx_cq_cons_sb;
2675
2676    mb(); /* status block fields can change */
2677    rx_cq_cons_sb = le16toh(*fp->rx_cq_cons_sb);
2678    if ((rx_cq_cons_sb & RCQ_MAX) == RCQ_MAX)
2679        rx_cq_cons_sb++;
2680    return (fp->rx_cq_cons != rx_cq_cons_sb);
2681}
2682
2683static void
2684bxe_sp_event(struct bxe_softc    *sc,
2685             struct bxe_fastpath *fp,
2686             union eth_rx_cqe    *rr_cqe)
2687{
2688    int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
2689    int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
2690    enum ecore_queue_cmd drv_cmd = ECORE_Q_CMD_MAX;
2691    struct ecore_queue_sp_obj *q_obj = &BXE_SP_OBJ(sc, fp).q_obj;
2692
2693    BLOGD(sc, DBG_SP, "fp=%d cid=%d got ramrod #%d state is %x type is %d\n",
2694          fp->index, cid, command, sc->state, rr_cqe->ramrod_cqe.ramrod_type);
2695
2696    switch (command) {
2697    case (RAMROD_CMD_ID_ETH_CLIENT_UPDATE):
2698        BLOGD(sc, DBG_SP, "got UPDATE ramrod. CID %d\n", cid);
2699        drv_cmd = ECORE_Q_CMD_UPDATE;
2700        break;
2701
2702    case (RAMROD_CMD_ID_ETH_CLIENT_SETUP):
2703        BLOGD(sc, DBG_SP, "got MULTI[%d] setup ramrod\n", cid);
2704        drv_cmd = ECORE_Q_CMD_SETUP;
2705        break;
2706
2707    case (RAMROD_CMD_ID_ETH_TX_QUEUE_SETUP):
2708        BLOGD(sc, DBG_SP, "got MULTI[%d] tx-only setup ramrod\n", cid);
2709        drv_cmd = ECORE_Q_CMD_SETUP_TX_ONLY;
2710        break;
2711
2712    case (RAMROD_CMD_ID_ETH_HALT):
2713        BLOGD(sc, DBG_SP, "got MULTI[%d] halt ramrod\n", cid);
2714        drv_cmd = ECORE_Q_CMD_HALT;
2715        break;
2716
2717    case (RAMROD_CMD_ID_ETH_TERMINATE):
2718        BLOGD(sc, DBG_SP, "got MULTI[%d] teminate ramrod\n", cid);
2719        drv_cmd = ECORE_Q_CMD_TERMINATE;
2720        break;
2721
2722    case (RAMROD_CMD_ID_ETH_EMPTY):
2723        BLOGD(sc, DBG_SP, "got MULTI[%d] empty ramrod\n", cid);
2724        drv_cmd = ECORE_Q_CMD_EMPTY;
2725        break;
2726
2727    default:
2728        BLOGD(sc, DBG_SP, "ERROR: unexpected MC reply (%d) on fp[%d]\n",
2729              command, fp->index);
2730        return;
2731    }
2732
2733    if ((drv_cmd != ECORE_Q_CMD_MAX) &&
2734        q_obj->complete_cmd(sc, q_obj, drv_cmd)) {
2735        /*
2736         * q_obj->complete_cmd() failure means that this was
2737         * an unexpected completion.
2738         *
2739         * In this case we don't want to increase the sc->spq_left
2740         * because apparently we haven't sent this command the first
2741         * place.
2742         */
2743        // bxe_panic(sc, ("Unexpected SP completion\n"));
2744        return;
2745    }
2746
2747    atomic_add_acq_long(&sc->cq_spq_left, 1);
2748
2749    BLOGD(sc, DBG_SP, "sc->cq_spq_left 0x%lx\n",
2750          atomic_load_acq_long(&sc->cq_spq_left));
2751}
2752
2753/*
2754 * The current mbuf is part of an aggregation. Move the mbuf into the TPA
2755 * aggregation queue, put an empty mbuf back onto the receive chain, and mark
2756 * the current aggregation queue as in-progress.
2757 */
2758static void
2759bxe_tpa_start(struct bxe_softc            *sc,
2760              struct bxe_fastpath         *fp,
2761              uint16_t                    queue,
2762              uint16_t                    cons,
2763              uint16_t                    prod,
2764              struct eth_fast_path_rx_cqe *cqe)
2765{
2766    struct bxe_sw_rx_bd tmp_bd;
2767    struct bxe_sw_rx_bd *rx_buf;
2768    struct eth_rx_bd *rx_bd;
2769    int max_agg_queues;
2770    struct bxe_sw_tpa_info *tpa_info = &fp->rx_tpa_info[queue];
2771    uint16_t index;
2772
2773    BLOGD(sc, DBG_LRO, "fp[%02d].tpa[%02d] TPA START "
2774                       "cons=%d prod=%d\n",
2775          fp->index, queue, cons, prod);
2776
2777    max_agg_queues = MAX_AGG_QS(sc);
2778
2779    KASSERT((queue < max_agg_queues),
2780            ("fp[%02d] invalid aggr queue (%d >= %d)!",
2781             fp->index, queue, max_agg_queues));
2782
2783    KASSERT((tpa_info->state == BXE_TPA_STATE_STOP),
2784            ("fp[%02d].tpa[%02d] starting aggr on queue not stopped!",
2785             fp->index, queue));
2786
2787    /* copy the existing mbuf and mapping from the TPA pool */
2788    tmp_bd = tpa_info->bd;
2789
2790    if (tmp_bd.m == NULL) {
2791        uint32_t *tmp;
2792
2793        tmp = (uint32_t *)cqe;
2794
2795        BLOGE(sc, "fp[%02d].tpa[%02d] cons[%d] prod[%d]mbuf not allocated!\n",
2796              fp->index, queue, cons, prod);
2797        BLOGE(sc, "cqe [0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x]\n",
2798            *tmp, *(tmp+1), *(tmp+2), *(tmp+3), *(tmp+4), *(tmp+5), *(tmp+6), *(tmp+7));
2799
2800        /* XXX Error handling? */
2801        return;
2802    }
2803
2804    /* change the TPA queue to the start state */
2805    tpa_info->state            = BXE_TPA_STATE_START;
2806    tpa_info->placement_offset = cqe->placement_offset;
2807    tpa_info->parsing_flags    = le16toh(cqe->pars_flags.flags);
2808    tpa_info->vlan_tag         = le16toh(cqe->vlan_tag);
2809    tpa_info->len_on_bd        = le16toh(cqe->len_on_bd);
2810
2811    fp->rx_tpa_queue_used |= (1 << queue);
2812
2813    /*
2814     * If all the buffer descriptors are filled with mbufs then fill in
2815     * the current consumer index with a new BD. Else if a maximum Rx
2816     * buffer limit is imposed then fill in the next producer index.
2817     */
2818    index = (sc->max_rx_bufs != RX_BD_USABLE) ?
2819                prod : cons;
2820
2821    /* move the received mbuf and mapping to TPA pool */
2822    tpa_info->bd = fp->rx_mbuf_chain[cons];
2823
2824    /* release any existing RX BD mbuf mappings */
2825    if (cons != index) {
2826        rx_buf = &fp->rx_mbuf_chain[cons];
2827
2828        if (rx_buf->m_map != NULL) {
2829            bus_dmamap_sync(fp->rx_mbuf_tag, rx_buf->m_map,
2830                            BUS_DMASYNC_POSTREAD);
2831            bus_dmamap_unload(fp->rx_mbuf_tag, rx_buf->m_map);
2832        }
2833
2834        /*
2835         * We get here when the maximum number of rx buffers is less than
2836         * RX_BD_USABLE. The mbuf is already saved above so it's OK to NULL
2837         * it out here without concern of a memory leak.
2838         */
2839        fp->rx_mbuf_chain[cons].m = NULL;
2840    }
2841
2842    /* update the Rx SW BD with the mbuf info from the TPA pool */
2843    fp->rx_mbuf_chain[index] = tmp_bd;
2844
2845    /* update the Rx BD with the empty mbuf phys address from the TPA pool */
2846    rx_bd = &fp->rx_chain[index];
2847    rx_bd->addr_hi = htole32(U64_HI(tpa_info->seg.ds_addr));
2848    rx_bd->addr_lo = htole32(U64_LO(tpa_info->seg.ds_addr));
2849}
2850
2851/*
2852 * When a TPA aggregation is completed, loop through the individual mbufs
2853 * of the aggregation, combining them into a single mbuf which will be sent
2854 * up the stack. Refill all freed SGEs with mbufs as we go along.
2855 */
2856static int
2857bxe_fill_frag_mbuf(struct bxe_softc          *sc,
2858                   struct bxe_fastpath       *fp,
2859                   struct bxe_sw_tpa_info    *tpa_info,
2860                   uint16_t                  queue,
2861                   uint16_t                  pages,
2862                   struct mbuf               *m,
2863			       struct eth_end_agg_rx_cqe *cqe,
2864                   uint16_t                  cqe_idx)
2865{
2866    struct mbuf *m_frag;
2867    uint32_t frag_len, frag_size, i;
2868    uint16_t sge_idx;
2869    int rc = 0;
2870    int j;
2871
2872    frag_size = le16toh(cqe->pkt_len) - tpa_info->len_on_bd;
2873
2874    BLOGD(sc, DBG_LRO,
2875          "fp[%02d].tpa[%02d] TPA fill len_on_bd=%d frag_size=%d pages=%d\n",
2876          fp->index, queue, tpa_info->len_on_bd, frag_size, pages);
2877
2878    /* make sure the aggregated frame is not too big to handle */
2879    if (pages > 8 * PAGES_PER_SGE) {
2880
2881        uint32_t *tmp = (uint32_t *)cqe;
2882
2883        BLOGE(sc, "fp[%02d].sge[0x%04x] has too many pages (%d)! "
2884                  "pkt_len=%d len_on_bd=%d frag_size=%d\n",
2885              fp->index, cqe_idx, pages, le16toh(cqe->pkt_len),
2886              tpa_info->len_on_bd, frag_size);
2887
2888        BLOGE(sc, "cqe [0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x]\n",
2889            *tmp, *(tmp+1), *(tmp+2), *(tmp+3), *(tmp+4), *(tmp+5), *(tmp+6), *(tmp+7));
2890
2891        bxe_panic(sc, ("sge page count error\n"));
2892        return (EINVAL);
2893    }
2894
2895    /*
2896     * Scan through the scatter gather list pulling individual mbufs into a
2897     * single mbuf for the host stack.
2898     */
2899    for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
2900        sge_idx = RX_SGE(le16toh(cqe->sgl_or_raw_data.sgl[j]));
2901
2902        /*
2903         * Firmware gives the indices of the SGE as if the ring is an array
2904         * (meaning that the "next" element will consume 2 indices).
2905         */
2906        frag_len = min(frag_size, (uint32_t)(SGE_PAGES));
2907
2908        BLOGD(sc, DBG_LRO, "fp[%02d].tpa[%02d] TPA fill i=%d j=%d "
2909                           "sge_idx=%d frag_size=%d frag_len=%d\n",
2910              fp->index, queue, i, j, sge_idx, frag_size, frag_len);
2911
2912        m_frag = fp->rx_sge_mbuf_chain[sge_idx].m;
2913
2914        /* allocate a new mbuf for the SGE */
2915        rc = bxe_alloc_rx_sge_mbuf(fp, sge_idx);
2916        if (rc) {
2917            /* Leave all remaining SGEs in the ring! */
2918            return (rc);
2919        }
2920
2921        /* update the fragment length */
2922        m_frag->m_len = frag_len;
2923
2924        /* concatenate the fragment to the head mbuf */
2925        m_cat(m, m_frag);
2926        fp->eth_q_stats.mbuf_alloc_sge--;
2927
2928        /* update the TPA mbuf size and remaining fragment size */
2929        m->m_pkthdr.len += frag_len;
2930        frag_size -= frag_len;
2931    }
2932
2933    BLOGD(sc, DBG_LRO,
2934          "fp[%02d].tpa[%02d] TPA fill done frag_size=%d\n",
2935          fp->index, queue, frag_size);
2936
2937    return (rc);
2938}
2939
2940static inline void
2941bxe_clear_sge_mask_next_elems(struct bxe_fastpath *fp)
2942{
2943    int i, j;
2944
2945    for (i = 1; i <= RX_SGE_NUM_PAGES; i++) {
2946        int idx = RX_SGE_TOTAL_PER_PAGE * i - 1;
2947
2948        for (j = 0; j < 2; j++) {
2949            BIT_VEC64_CLEAR_BIT(fp->sge_mask, idx);
2950            idx--;
2951        }
2952    }
2953}
2954
2955static inline void
2956bxe_init_sge_ring_bit_mask(struct bxe_fastpath *fp)
2957{
2958    /* set the mask to all 1's, it's faster to compare to 0 than to 0xf's */
2959    memset(fp->sge_mask, 0xff, sizeof(fp->sge_mask));
2960
2961    /*
2962     * Clear the two last indices in the page to 1. These are the indices that
2963     * correspond to the "next" element, hence will never be indicated and
2964     * should be removed from the calculations.
2965     */
2966    bxe_clear_sge_mask_next_elems(fp);
2967}
2968
2969static inline void
2970bxe_update_last_max_sge(struct bxe_fastpath *fp,
2971                        uint16_t            idx)
2972{
2973    uint16_t last_max = fp->last_max_sge;
2974
2975    if (SUB_S16(idx, last_max) > 0) {
2976        fp->last_max_sge = idx;
2977    }
2978}
2979
2980static inline void
2981bxe_update_sge_prod(struct bxe_softc          *sc,
2982                    struct bxe_fastpath       *fp,
2983                    uint16_t                  sge_len,
2984                    union eth_sgl_or_raw_data *cqe)
2985{
2986    uint16_t last_max, last_elem, first_elem;
2987    uint16_t delta = 0;
2988    uint16_t i;
2989
2990    if (!sge_len) {
2991        return;
2992    }
2993
2994    /* first mark all used pages */
2995    for (i = 0; i < sge_len; i++) {
2996        BIT_VEC64_CLEAR_BIT(fp->sge_mask,
2997                            RX_SGE(le16toh(cqe->sgl[i])));
2998    }
2999
3000    BLOGD(sc, DBG_LRO,
3001          "fp[%02d] fp_cqe->sgl[%d] = %d\n",
3002          fp->index, sge_len - 1,
3003          le16toh(cqe->sgl[sge_len - 1]));
3004
3005    /* assume that the last SGE index is the biggest */
3006    bxe_update_last_max_sge(fp,
3007                            le16toh(cqe->sgl[sge_len - 1]));
3008
3009    last_max = RX_SGE(fp->last_max_sge);
3010    last_elem = last_max >> BIT_VEC64_ELEM_SHIFT;
3011    first_elem = RX_SGE(fp->rx_sge_prod) >> BIT_VEC64_ELEM_SHIFT;
3012
3013    /* if ring is not full */
3014    if (last_elem + 1 != first_elem) {
3015        last_elem++;
3016    }
3017
3018    /* now update the prod */
3019    for (i = first_elem; i != last_elem; i = RX_SGE_NEXT_MASK_ELEM(i)) {
3020        if (__predict_true(fp->sge_mask[i])) {
3021            break;
3022        }
3023
3024        fp->sge_mask[i] = BIT_VEC64_ELEM_ONE_MASK;
3025        delta += BIT_VEC64_ELEM_SZ;
3026    }
3027
3028    if (delta > 0) {
3029        fp->rx_sge_prod += delta;
3030        /* clear page-end entries */
3031        bxe_clear_sge_mask_next_elems(fp);
3032    }
3033
3034    BLOGD(sc, DBG_LRO,
3035          "fp[%02d] fp->last_max_sge=%d fp->rx_sge_prod=%d\n",
3036          fp->index, fp->last_max_sge, fp->rx_sge_prod);
3037}
3038
3039/*
3040 * The aggregation on the current TPA queue has completed. Pull the individual
3041 * mbuf fragments together into a single mbuf, perform all necessary checksum
3042 * calculations, and send the resuting mbuf to the stack.
3043 */
3044static void
3045bxe_tpa_stop(struct bxe_softc          *sc,
3046             struct bxe_fastpath       *fp,
3047             struct bxe_sw_tpa_info    *tpa_info,
3048             uint16_t                  queue,
3049             uint16_t                  pages,
3050			 struct eth_end_agg_rx_cqe *cqe,
3051             uint16_t                  cqe_idx)
3052{
3053    if_t ifp = sc->ifp;
3054    struct mbuf *m;
3055    int rc = 0;
3056
3057    BLOGD(sc, DBG_LRO,
3058          "fp[%02d].tpa[%02d] pad=%d pkt_len=%d pages=%d vlan=%d\n",
3059          fp->index, queue, tpa_info->placement_offset,
3060          le16toh(cqe->pkt_len), pages, tpa_info->vlan_tag);
3061
3062    m = tpa_info->bd.m;
3063
3064    /* allocate a replacement before modifying existing mbuf */
3065    rc = bxe_alloc_rx_tpa_mbuf(fp, queue);
3066    if (rc) {
3067        /* drop the frame and log an error */
3068        fp->eth_q_stats.rx_soft_errors++;
3069        goto bxe_tpa_stop_exit;
3070    }
3071
3072    /* we have a replacement, fixup the current mbuf */
3073    m_adj(m, tpa_info->placement_offset);
3074    m->m_pkthdr.len = m->m_len = tpa_info->len_on_bd;
3075
3076    /* mark the checksums valid (taken care of by the firmware) */
3077    fp->eth_q_stats.rx_ofld_frames_csum_ip++;
3078    fp->eth_q_stats.rx_ofld_frames_csum_tcp_udp++;
3079    m->m_pkthdr.csum_data = 0xffff;
3080    m->m_pkthdr.csum_flags |= (CSUM_IP_CHECKED |
3081                               CSUM_IP_VALID   |
3082                               CSUM_DATA_VALID |
3083                               CSUM_PSEUDO_HDR);
3084
3085    /* aggregate all of the SGEs into a single mbuf */
3086    rc = bxe_fill_frag_mbuf(sc, fp, tpa_info, queue, pages, m, cqe, cqe_idx);
3087    if (rc) {
3088        /* drop the packet and log an error */
3089        fp->eth_q_stats.rx_soft_errors++;
3090        m_freem(m);
3091    } else {
3092        if (tpa_info->parsing_flags & PARSING_FLAGS_INNER_VLAN_EXIST) {
3093            m->m_pkthdr.ether_vtag = tpa_info->vlan_tag;
3094            m->m_flags |= M_VLANTAG;
3095        }
3096
3097        /* assign packet to this interface interface */
3098        if_setrcvif(m, ifp);
3099
3100#if __FreeBSD_version >= 800000
3101        /* specify what RSS queue was used for this flow */
3102        m->m_pkthdr.flowid = fp->index;
3103        BXE_SET_FLOWID(m);
3104#endif
3105
3106        if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
3107        fp->eth_q_stats.rx_tpa_pkts++;
3108
3109        /* pass the frame to the stack */
3110        if_input(ifp, m);
3111    }
3112
3113    /* we passed an mbuf up the stack or dropped the frame */
3114    fp->eth_q_stats.mbuf_alloc_tpa--;
3115
3116bxe_tpa_stop_exit:
3117
3118    fp->rx_tpa_info[queue].state = BXE_TPA_STATE_STOP;
3119    fp->rx_tpa_queue_used &= ~(1 << queue);
3120}
3121
3122static uint8_t
3123bxe_service_rxsgl(
3124                 struct bxe_fastpath *fp,
3125                 uint16_t len,
3126                 uint16_t lenonbd,
3127                 struct mbuf *m,
3128                 struct eth_fast_path_rx_cqe *cqe_fp)
3129{
3130    struct mbuf *m_frag;
3131    uint16_t frags, frag_len;
3132    uint16_t sge_idx = 0;
3133    uint16_t j;
3134    uint8_t i, rc = 0;
3135    uint32_t frag_size;
3136
3137    /* adjust the mbuf */
3138    m->m_len = lenonbd;
3139
3140    frag_size =  len - lenonbd;
3141    frags = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
3142
3143    for (i = 0, j = 0; i < frags; i += PAGES_PER_SGE, j++) {
3144        sge_idx = RX_SGE(le16toh(cqe_fp->sgl_or_raw_data.sgl[j]));
3145
3146        m_frag = fp->rx_sge_mbuf_chain[sge_idx].m;
3147        frag_len = min(frag_size, (uint32_t)(SGE_PAGE_SIZE));
3148        m_frag->m_len = frag_len;
3149
3150       /* allocate a new mbuf for the SGE */
3151        rc = bxe_alloc_rx_sge_mbuf(fp, sge_idx);
3152        if (rc) {
3153            /* Leave all remaining SGEs in the ring! */
3154            return (rc);
3155        }
3156        fp->eth_q_stats.mbuf_alloc_sge--;
3157
3158        /* concatenate the fragment to the head mbuf */
3159        m_cat(m, m_frag);
3160
3161        frag_size -= frag_len;
3162    }
3163
3164    bxe_update_sge_prod(fp->sc, fp, frags, &cqe_fp->sgl_or_raw_data);
3165
3166    return rc;
3167}
3168
3169static uint8_t
3170bxe_rxeof(struct bxe_softc    *sc,
3171          struct bxe_fastpath *fp)
3172{
3173    if_t ifp = sc->ifp;
3174    uint16_t bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
3175    uint16_t hw_cq_cons, sw_cq_cons, sw_cq_prod;
3176    int rx_pkts = 0;
3177    int rc = 0;
3178
3179    BXE_FP_RX_LOCK(fp);
3180
3181    /* CQ "next element" is of the size of the regular element */
3182    hw_cq_cons = le16toh(*fp->rx_cq_cons_sb);
3183    if ((hw_cq_cons & RCQ_USABLE_PER_PAGE) == RCQ_USABLE_PER_PAGE) {
3184        hw_cq_cons++;
3185    }
3186
3187    bd_cons = fp->rx_bd_cons;
3188    bd_prod = fp->rx_bd_prod;
3189    bd_prod_fw = bd_prod;
3190    sw_cq_cons = fp->rx_cq_cons;
3191    sw_cq_prod = fp->rx_cq_prod;
3192
3193    /*
3194     * Memory barrier necessary as speculative reads of the rx
3195     * buffer can be ahead of the index in the status block
3196     */
3197    rmb();
3198
3199    BLOGD(sc, DBG_RX,
3200          "fp[%02d] Rx START hw_cq_cons=%u sw_cq_cons=%u\n",
3201          fp->index, hw_cq_cons, sw_cq_cons);
3202
3203    while (sw_cq_cons != hw_cq_cons) {
3204        struct bxe_sw_rx_bd *rx_buf = NULL;
3205        union eth_rx_cqe *cqe;
3206        struct eth_fast_path_rx_cqe *cqe_fp;
3207        uint8_t cqe_fp_flags;
3208        enum eth_rx_cqe_type cqe_fp_type;
3209        uint16_t len, lenonbd,  pad;
3210        struct mbuf *m = NULL;
3211
3212        comp_ring_cons = RCQ(sw_cq_cons);
3213        bd_prod = RX_BD(bd_prod);
3214        bd_cons = RX_BD(bd_cons);
3215
3216        cqe          = &fp->rcq_chain[comp_ring_cons];
3217        cqe_fp       = &cqe->fast_path_cqe;
3218        cqe_fp_flags = cqe_fp->type_error_flags;
3219        cqe_fp_type  = cqe_fp_flags & ETH_FAST_PATH_RX_CQE_TYPE;
3220
3221        BLOGD(sc, DBG_RX,
3222              "fp[%02d] Rx hw_cq_cons=%d hw_sw_cons=%d "
3223              "BD prod=%d cons=%d CQE type=0x%x err=0x%x "
3224              "status=0x%x rss_hash=0x%x vlan=0x%x len=%u lenonbd=%u\n",
3225              fp->index,
3226              hw_cq_cons,
3227              sw_cq_cons,
3228              bd_prod,
3229              bd_cons,
3230              CQE_TYPE(cqe_fp_flags),
3231              cqe_fp_flags,
3232              cqe_fp->status_flags,
3233              le32toh(cqe_fp->rss_hash_result),
3234              le16toh(cqe_fp->vlan_tag),
3235              le16toh(cqe_fp->pkt_len_or_gro_seg_len),
3236              le16toh(cqe_fp->len_on_bd));
3237
3238        /* is this a slowpath msg? */
3239        if (__predict_false(CQE_TYPE_SLOW(cqe_fp_type))) {
3240            bxe_sp_event(sc, fp, cqe);
3241            goto next_cqe;
3242        }
3243
3244        rx_buf = &fp->rx_mbuf_chain[bd_cons];
3245
3246        if (!CQE_TYPE_FAST(cqe_fp_type)) {
3247            struct bxe_sw_tpa_info *tpa_info;
3248            uint16_t frag_size, pages;
3249            uint8_t queue;
3250
3251            if (CQE_TYPE_START(cqe_fp_type)) {
3252                bxe_tpa_start(sc, fp, cqe_fp->queue_index,
3253                              bd_cons, bd_prod, cqe_fp);
3254                m = NULL; /* packet not ready yet */
3255                goto next_rx;
3256            }
3257
3258            KASSERT(CQE_TYPE_STOP(cqe_fp_type),
3259                    ("CQE type is not STOP! (0x%x)\n", cqe_fp_type));
3260
3261            queue = cqe->end_agg_cqe.queue_index;
3262            tpa_info = &fp->rx_tpa_info[queue];
3263
3264            BLOGD(sc, DBG_LRO, "fp[%02d].tpa[%02d] TPA STOP\n",
3265                  fp->index, queue);
3266
3267            frag_size = (le16toh(cqe->end_agg_cqe.pkt_len) -
3268                         tpa_info->len_on_bd);
3269            pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
3270
3271            bxe_tpa_stop(sc, fp, tpa_info, queue, pages,
3272                         &cqe->end_agg_cqe, comp_ring_cons);
3273
3274            bxe_update_sge_prod(sc, fp, pages, &cqe->end_agg_cqe.sgl_or_raw_data);
3275
3276            goto next_cqe;
3277        }
3278
3279        /* non TPA */
3280
3281        /* is this an error packet? */
3282        if (__predict_false(cqe_fp_flags &
3283                            ETH_FAST_PATH_RX_CQE_PHY_DECODE_ERR_FLG)) {
3284            BLOGE(sc, "flags 0x%x rx packet %u\n", cqe_fp_flags, sw_cq_cons);
3285            fp->eth_q_stats.rx_soft_errors++;
3286            goto next_rx;
3287        }
3288
3289        len = le16toh(cqe_fp->pkt_len_or_gro_seg_len);
3290        lenonbd = le16toh(cqe_fp->len_on_bd);
3291        pad = cqe_fp->placement_offset;
3292
3293        m = rx_buf->m;
3294
3295        if (__predict_false(m == NULL)) {
3296            BLOGE(sc, "No mbuf in rx chain descriptor %d for fp[%02d]\n",
3297                  bd_cons, fp->index);
3298            goto next_rx;
3299        }
3300
3301        /* XXX double copy if packet length under a threshold */
3302
3303        /*
3304         * If all the buffer descriptors are filled with mbufs then fill in
3305         * the current consumer index with a new BD. Else if a maximum Rx
3306         * buffer limit is imposed then fill in the next producer index.
3307         */
3308        rc = bxe_alloc_rx_bd_mbuf(fp, bd_cons,
3309                                  (sc->max_rx_bufs != RX_BD_USABLE) ?
3310                                      bd_prod : bd_cons);
3311        if (rc != 0) {
3312
3313            /* we simply reuse the received mbuf and don't post it to the stack */
3314            m = NULL;
3315
3316            BLOGE(sc, "mbuf alloc fail for fp[%02d] rx chain (%d)\n",
3317                  fp->index, rc);
3318            fp->eth_q_stats.rx_soft_errors++;
3319
3320            if (sc->max_rx_bufs != RX_BD_USABLE) {
3321                /* copy this consumer index to the producer index */
3322                memcpy(&fp->rx_mbuf_chain[bd_prod], rx_buf,
3323                       sizeof(struct bxe_sw_rx_bd));
3324                memset(rx_buf, 0, sizeof(struct bxe_sw_rx_bd));
3325            }
3326
3327            goto next_rx;
3328        }
3329
3330        /* current mbuf was detached from the bd */
3331        fp->eth_q_stats.mbuf_alloc_rx--;
3332
3333        /* we allocated a replacement mbuf, fixup the current one */
3334        m_adj(m, pad);
3335        m->m_pkthdr.len = m->m_len = len;
3336
3337        if ((len > 60) && (len > lenonbd)) {
3338            fp->eth_q_stats.rx_bxe_service_rxsgl++;
3339            rc = bxe_service_rxsgl(fp, len, lenonbd, m, cqe_fp);
3340            if (rc)
3341                break;
3342            fp->eth_q_stats.rx_jumbo_sge_pkts++;
3343        } else if (lenonbd < len) {
3344            fp->eth_q_stats.rx_erroneous_jumbo_sge_pkts++;
3345        }
3346
3347        /* assign packet to this interface interface */
3348	if_setrcvif(m, ifp);
3349
3350        /* assume no hardware checksum has complated */
3351        m->m_pkthdr.csum_flags = 0;
3352
3353        /* validate checksum if offload enabled */
3354        if (if_getcapenable(ifp) & IFCAP_RXCSUM) {
3355            /* check for a valid IP frame */
3356            if (!(cqe->fast_path_cqe.status_flags &
3357                  ETH_FAST_PATH_RX_CQE_IP_XSUM_NO_VALIDATION_FLG)) {
3358                m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
3359                if (__predict_false(cqe_fp_flags &
3360                                    ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG)) {
3361                    fp->eth_q_stats.rx_hw_csum_errors++;
3362                } else {
3363                    fp->eth_q_stats.rx_ofld_frames_csum_ip++;
3364                    m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
3365                }
3366            }
3367
3368            /* check for a valid TCP/UDP frame */
3369            if (!(cqe->fast_path_cqe.status_flags &
3370                  ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG)) {
3371                if (__predict_false(cqe_fp_flags &
3372                                    ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG)) {
3373                    fp->eth_q_stats.rx_hw_csum_errors++;
3374                } else {
3375                    fp->eth_q_stats.rx_ofld_frames_csum_tcp_udp++;
3376                    m->m_pkthdr.csum_data = 0xFFFF;
3377                    m->m_pkthdr.csum_flags |= (CSUM_DATA_VALID |
3378                                               CSUM_PSEUDO_HDR);
3379                }
3380            }
3381        }
3382
3383        /* if there is a VLAN tag then flag that info */
3384        if (cqe->fast_path_cqe.pars_flags.flags & PARSING_FLAGS_INNER_VLAN_EXIST) {
3385            m->m_pkthdr.ether_vtag = cqe->fast_path_cqe.vlan_tag;
3386            m->m_flags |= M_VLANTAG;
3387        }
3388
3389#if __FreeBSD_version >= 800000
3390        /* specify what RSS queue was used for this flow */
3391        m->m_pkthdr.flowid = fp->index;
3392        BXE_SET_FLOWID(m);
3393#endif
3394
3395next_rx:
3396
3397        bd_cons    = RX_BD_NEXT(bd_cons);
3398        bd_prod    = RX_BD_NEXT(bd_prod);
3399        bd_prod_fw = RX_BD_NEXT(bd_prod_fw);
3400
3401        /* pass the frame to the stack */
3402        if (__predict_true(m != NULL)) {
3403            if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
3404            rx_pkts++;
3405            if_input(ifp, m);
3406        }
3407
3408next_cqe:
3409
3410        sw_cq_prod = RCQ_NEXT(sw_cq_prod);
3411        sw_cq_cons = RCQ_NEXT(sw_cq_cons);
3412
3413        /* limit spinning on the queue */
3414        if (rc != 0)
3415            break;
3416
3417        if (rx_pkts == sc->rx_budget) {
3418            fp->eth_q_stats.rx_budget_reached++;
3419            break;
3420        }
3421    } /* while work to do */
3422
3423    fp->rx_bd_cons = bd_cons;
3424    fp->rx_bd_prod = bd_prod_fw;
3425    fp->rx_cq_cons = sw_cq_cons;
3426    fp->rx_cq_prod = sw_cq_prod;
3427
3428    /* Update producers */
3429    bxe_update_rx_prod(sc, fp, bd_prod_fw, sw_cq_prod, fp->rx_sge_prod);
3430
3431    fp->eth_q_stats.rx_pkts += rx_pkts;
3432    fp->eth_q_stats.rx_calls++;
3433
3434    BXE_FP_RX_UNLOCK(fp);
3435
3436    return (sw_cq_cons != hw_cq_cons);
3437}
3438
3439static uint16_t
3440bxe_free_tx_pkt(struct bxe_softc    *sc,
3441                struct bxe_fastpath *fp,
3442                uint16_t            idx)
3443{
3444    struct bxe_sw_tx_bd *tx_buf = &fp->tx_mbuf_chain[idx];
3445    struct eth_tx_start_bd *tx_start_bd;
3446    uint16_t bd_idx = TX_BD(tx_buf->first_bd);
3447    uint16_t new_cons;
3448    int nbd;
3449
3450    /* unmap the mbuf from non-paged memory */
3451    bus_dmamap_unload(fp->tx_mbuf_tag, tx_buf->m_map);
3452
3453    tx_start_bd = &fp->tx_chain[bd_idx].start_bd;
3454    nbd = le16toh(tx_start_bd->nbd) - 1;
3455
3456    new_cons = (tx_buf->first_bd + nbd);
3457
3458    /* free the mbuf */
3459    if (__predict_true(tx_buf->m != NULL)) {
3460        m_freem(tx_buf->m);
3461        fp->eth_q_stats.mbuf_alloc_tx--;
3462    } else {
3463        fp->eth_q_stats.tx_chain_lost_mbuf++;
3464    }
3465
3466    tx_buf->m = NULL;
3467    tx_buf->first_bd = 0;
3468
3469    return (new_cons);
3470}
3471
3472/* transmit timeout watchdog */
3473static int
3474bxe_watchdog(struct bxe_softc    *sc,
3475             struct bxe_fastpath *fp)
3476{
3477    BXE_FP_TX_LOCK(fp);
3478
3479    if ((fp->watchdog_timer == 0) || (--fp->watchdog_timer)) {
3480        BXE_FP_TX_UNLOCK(fp);
3481        return (0);
3482    }
3483
3484    BLOGE(sc, "TX watchdog timeout on fp[%02d], resetting!\n", fp->index);
3485    if(sc->trigger_grcdump) {
3486         /* taking grcdump */
3487         bxe_grc_dump(sc);
3488    }
3489
3490    BXE_FP_TX_UNLOCK(fp);
3491
3492    atomic_store_rel_long(&sc->chip_tq_flags, CHIP_TQ_REINIT);
3493    taskqueue_enqueue(sc->chip_tq, &sc->chip_tq_task);
3494
3495    return (-1);
3496}
3497
3498/* processes transmit completions */
3499static uint8_t
3500bxe_txeof(struct bxe_softc    *sc,
3501          struct bxe_fastpath *fp)
3502{
3503    if_t ifp = sc->ifp;
3504    uint16_t bd_cons, hw_cons, sw_cons, pkt_cons;
3505    uint16_t tx_bd_avail;
3506
3507    BXE_FP_TX_LOCK_ASSERT(fp);
3508
3509    bd_cons = fp->tx_bd_cons;
3510    hw_cons = le16toh(*fp->tx_cons_sb);
3511    sw_cons = fp->tx_pkt_cons;
3512
3513    while (sw_cons != hw_cons) {
3514        pkt_cons = TX_BD(sw_cons);
3515
3516        BLOGD(sc, DBG_TX,
3517              "TX: fp[%d]: hw_cons=%u sw_cons=%u pkt_cons=%u\n",
3518              fp->index, hw_cons, sw_cons, pkt_cons);
3519
3520        bd_cons = bxe_free_tx_pkt(sc, fp, pkt_cons);
3521
3522        sw_cons++;
3523    }
3524
3525    fp->tx_pkt_cons = sw_cons;
3526    fp->tx_bd_cons  = bd_cons;
3527
3528    BLOGD(sc, DBG_TX,
3529          "TX done: fp[%d]: hw_cons=%u sw_cons=%u sw_prod=%u\n",
3530          fp->index, hw_cons, fp->tx_pkt_cons, fp->tx_pkt_prod);
3531
3532    mb();
3533
3534    tx_bd_avail = bxe_tx_avail(sc, fp);
3535
3536    if (tx_bd_avail < BXE_TX_CLEANUP_THRESHOLD) {
3537        if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, 0);
3538    } else {
3539        if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE);
3540    }
3541
3542    if (fp->tx_pkt_prod != fp->tx_pkt_cons) {
3543        /* reset the watchdog timer if there are pending transmits */
3544        fp->watchdog_timer = BXE_TX_TIMEOUT;
3545        return (TRUE);
3546    } else {
3547        /* clear watchdog when there are no pending transmits */
3548        fp->watchdog_timer = 0;
3549        return (FALSE);
3550    }
3551}
3552
3553static void
3554bxe_drain_tx_queues(struct bxe_softc *sc)
3555{
3556    struct bxe_fastpath *fp;
3557    int i, count;
3558
3559    /* wait until all TX fastpath tasks have completed */
3560    for (i = 0; i < sc->num_queues; i++) {
3561        fp = &sc->fp[i];
3562
3563        count = 1000;
3564
3565        while (bxe_has_tx_work(fp)) {
3566
3567            BXE_FP_TX_LOCK(fp);
3568            bxe_txeof(sc, fp);
3569            BXE_FP_TX_UNLOCK(fp);
3570
3571            if (count == 0) {
3572                BLOGE(sc, "Timeout waiting for fp[%d] "
3573                          "transmits to complete!\n", i);
3574                bxe_panic(sc, ("tx drain failure\n"));
3575                return;
3576            }
3577
3578            count--;
3579            DELAY(1000);
3580            rmb();
3581        }
3582    }
3583
3584    return;
3585}
3586
3587static int
3588bxe_del_all_macs(struct bxe_softc          *sc,
3589                 struct ecore_vlan_mac_obj *mac_obj,
3590                 int                       mac_type,
3591                 uint8_t                   wait_for_comp)
3592{
3593    unsigned long ramrod_flags = 0, vlan_mac_flags = 0;
3594    int rc;
3595
3596    /* wait for completion of requested */
3597    if (wait_for_comp) {
3598        bxe_set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
3599    }
3600
3601    /* Set the mac type of addresses we want to clear */
3602    bxe_set_bit(mac_type, &vlan_mac_flags);
3603
3604    rc = mac_obj->delete_all(sc, mac_obj, &vlan_mac_flags, &ramrod_flags);
3605    if (rc < 0) {
3606        BLOGE(sc, "Failed to delete MACs (%d) mac_type %d wait_for_comp 0x%x\n",
3607            rc, mac_type, wait_for_comp);
3608    }
3609
3610    return (rc);
3611}
3612
3613static int
3614bxe_fill_accept_flags(struct bxe_softc *sc,
3615                      uint32_t         rx_mode,
3616                      unsigned long    *rx_accept_flags,
3617                      unsigned long    *tx_accept_flags)
3618{
3619    /* Clear the flags first */
3620    *rx_accept_flags = 0;
3621    *tx_accept_flags = 0;
3622
3623    switch (rx_mode) {
3624    case BXE_RX_MODE_NONE:
3625        /*
3626         * 'drop all' supersedes any accept flags that may have been
3627         * passed to the function.
3628         */
3629        break;
3630
3631    case BXE_RX_MODE_NORMAL:
3632        bxe_set_bit(ECORE_ACCEPT_UNICAST, rx_accept_flags);
3633        bxe_set_bit(ECORE_ACCEPT_MULTICAST, rx_accept_flags);
3634        bxe_set_bit(ECORE_ACCEPT_BROADCAST, rx_accept_flags);
3635
3636        /* internal switching mode */
3637        bxe_set_bit(ECORE_ACCEPT_UNICAST, tx_accept_flags);
3638        bxe_set_bit(ECORE_ACCEPT_MULTICAST, tx_accept_flags);
3639        bxe_set_bit(ECORE_ACCEPT_BROADCAST, tx_accept_flags);
3640
3641        break;
3642
3643    case BXE_RX_MODE_ALLMULTI:
3644        bxe_set_bit(ECORE_ACCEPT_UNICAST, rx_accept_flags);
3645        bxe_set_bit(ECORE_ACCEPT_ALL_MULTICAST, rx_accept_flags);
3646        bxe_set_bit(ECORE_ACCEPT_BROADCAST, rx_accept_flags);
3647
3648        /* internal switching mode */
3649        bxe_set_bit(ECORE_ACCEPT_UNICAST, tx_accept_flags);
3650        bxe_set_bit(ECORE_ACCEPT_ALL_MULTICAST, tx_accept_flags);
3651        bxe_set_bit(ECORE_ACCEPT_BROADCAST, tx_accept_flags);
3652
3653        break;
3654
3655    case BXE_RX_MODE_PROMISC:
3656        /*
3657         * According to deffinition of SI mode, iface in promisc mode
3658         * should receive matched and unmatched (in resolution of port)
3659         * unicast packets.
3660         */
3661        bxe_set_bit(ECORE_ACCEPT_UNMATCHED, rx_accept_flags);
3662        bxe_set_bit(ECORE_ACCEPT_UNICAST, rx_accept_flags);
3663        bxe_set_bit(ECORE_ACCEPT_ALL_MULTICAST, rx_accept_flags);
3664        bxe_set_bit(ECORE_ACCEPT_BROADCAST, rx_accept_flags);
3665
3666        /* internal switching mode */
3667        bxe_set_bit(ECORE_ACCEPT_ALL_MULTICAST, tx_accept_flags);
3668        bxe_set_bit(ECORE_ACCEPT_BROADCAST, tx_accept_flags);
3669
3670        if (IS_MF_SI(sc)) {
3671            bxe_set_bit(ECORE_ACCEPT_ALL_UNICAST, tx_accept_flags);
3672        } else {
3673            bxe_set_bit(ECORE_ACCEPT_UNICAST, tx_accept_flags);
3674        }
3675
3676        break;
3677
3678    default:
3679        BLOGE(sc, "Unknown rx_mode (0x%x)\n", rx_mode);
3680        return (-1);
3681    }
3682
3683    /* Set ACCEPT_ANY_VLAN as we do not enable filtering by VLAN */
3684    if (rx_mode != BXE_RX_MODE_NONE) {
3685        bxe_set_bit(ECORE_ACCEPT_ANY_VLAN, rx_accept_flags);
3686        bxe_set_bit(ECORE_ACCEPT_ANY_VLAN, tx_accept_flags);
3687    }
3688
3689    return (0);
3690}
3691
3692static int
3693bxe_set_q_rx_mode(struct bxe_softc *sc,
3694                  uint8_t          cl_id,
3695                  unsigned long    rx_mode_flags,
3696                  unsigned long    rx_accept_flags,
3697                  unsigned long    tx_accept_flags,
3698                  unsigned long    ramrod_flags)
3699{
3700    struct ecore_rx_mode_ramrod_params ramrod_param;
3701    int rc;
3702
3703    memset(&ramrod_param, 0, sizeof(ramrod_param));
3704
3705    /* Prepare ramrod parameters */
3706    ramrod_param.cid = 0;
3707    ramrod_param.cl_id = cl_id;
3708    ramrod_param.rx_mode_obj = &sc->rx_mode_obj;
3709    ramrod_param.func_id = SC_FUNC(sc);
3710
3711    ramrod_param.pstate = &sc->sp_state;
3712    ramrod_param.state = ECORE_FILTER_RX_MODE_PENDING;
3713
3714    ramrod_param.rdata = BXE_SP(sc, rx_mode_rdata);
3715    ramrod_param.rdata_mapping = BXE_SP_MAPPING(sc, rx_mode_rdata);
3716
3717    bxe_set_bit(ECORE_FILTER_RX_MODE_PENDING, &sc->sp_state);
3718
3719    ramrod_param.ramrod_flags = ramrod_flags;
3720    ramrod_param.rx_mode_flags = rx_mode_flags;
3721
3722    ramrod_param.rx_accept_flags = rx_accept_flags;
3723    ramrod_param.tx_accept_flags = tx_accept_flags;
3724
3725    rc = ecore_config_rx_mode(sc, &ramrod_param);
3726    if (rc < 0) {
3727        BLOGE(sc, "Set rx_mode %d cli_id 0x%x rx_mode_flags 0x%x "
3728            "rx_accept_flags 0x%x tx_accept_flags 0x%x "
3729            "ramrod_flags 0x%x rc %d failed\n", sc->rx_mode, cl_id,
3730            (uint32_t)rx_mode_flags, (uint32_t)rx_accept_flags,
3731            (uint32_t)tx_accept_flags, (uint32_t)ramrod_flags, rc);
3732        return (rc);
3733    }
3734
3735    return (0);
3736}
3737
3738static int
3739bxe_set_storm_rx_mode(struct bxe_softc *sc)
3740{
3741    unsigned long rx_mode_flags = 0, ramrod_flags = 0;
3742    unsigned long rx_accept_flags = 0, tx_accept_flags = 0;
3743    int rc;
3744
3745    rc = bxe_fill_accept_flags(sc, sc->rx_mode, &rx_accept_flags,
3746                               &tx_accept_flags);
3747    if (rc) {
3748        return (rc);
3749    }
3750
3751    bxe_set_bit(RAMROD_RX, &ramrod_flags);
3752    bxe_set_bit(RAMROD_TX, &ramrod_flags);
3753
3754    /* XXX ensure all fastpath have same cl_id and/or move it to bxe_softc */
3755    return (bxe_set_q_rx_mode(sc, sc->fp[0].cl_id, rx_mode_flags,
3756                              rx_accept_flags, tx_accept_flags,
3757                              ramrod_flags));
3758}
3759
3760/* returns the "mcp load_code" according to global load_count array */
3761static int
3762bxe_nic_load_no_mcp(struct bxe_softc *sc)
3763{
3764    int path = SC_PATH(sc);
3765    int port = SC_PORT(sc);
3766
3767    BLOGI(sc, "NO MCP - load counts[%d]      %d, %d, %d\n",
3768          path, load_count[path][0], load_count[path][1],
3769          load_count[path][2]);
3770    load_count[path][0]++;
3771    load_count[path][1 + port]++;
3772    BLOGI(sc, "NO MCP - new load counts[%d]  %d, %d, %d\n",
3773          path, load_count[path][0], load_count[path][1],
3774          load_count[path][2]);
3775    if (load_count[path][0] == 1) {
3776        return (FW_MSG_CODE_DRV_LOAD_COMMON);
3777    } else if (load_count[path][1 + port] == 1) {
3778        return (FW_MSG_CODE_DRV_LOAD_PORT);
3779    } else {
3780        return (FW_MSG_CODE_DRV_LOAD_FUNCTION);
3781    }
3782}
3783
3784/* returns the "mcp load_code" according to global load_count array */
3785static int
3786bxe_nic_unload_no_mcp(struct bxe_softc *sc)
3787{
3788    int port = SC_PORT(sc);
3789    int path = SC_PATH(sc);
3790
3791    BLOGI(sc, "NO MCP - load counts[%d]      %d, %d, %d\n",
3792          path, load_count[path][0], load_count[path][1],
3793          load_count[path][2]);
3794    load_count[path][0]--;
3795    load_count[path][1 + port]--;
3796    BLOGI(sc, "NO MCP - new load counts[%d]  %d, %d, %d\n",
3797          path, load_count[path][0], load_count[path][1],
3798          load_count[path][2]);
3799    if (load_count[path][0] == 0) {
3800        return (FW_MSG_CODE_DRV_UNLOAD_COMMON);
3801    } else if (load_count[path][1 + port] == 0) {
3802        return (FW_MSG_CODE_DRV_UNLOAD_PORT);
3803    } else {
3804        return (FW_MSG_CODE_DRV_UNLOAD_FUNCTION);
3805    }
3806}
3807
3808/* request unload mode from the MCP: COMMON, PORT or FUNCTION */
3809static uint32_t
3810bxe_send_unload_req(struct bxe_softc *sc,
3811                    int              unload_mode)
3812{
3813    uint32_t reset_code = 0;
3814
3815    /* Select the UNLOAD request mode */
3816    if (unload_mode == UNLOAD_NORMAL) {
3817        reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
3818    } else {
3819        reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
3820    }
3821
3822    /* Send the request to the MCP */
3823    if (!BXE_NOMCP(sc)) {
3824        reset_code = bxe_fw_command(sc, reset_code, 0);
3825    } else {
3826        reset_code = bxe_nic_unload_no_mcp(sc);
3827    }
3828
3829    return (reset_code);
3830}
3831
3832/* send UNLOAD_DONE command to the MCP */
3833static void
3834bxe_send_unload_done(struct bxe_softc *sc,
3835                     uint8_t          keep_link)
3836{
3837    uint32_t reset_param =
3838        keep_link ? DRV_MSG_CODE_UNLOAD_SKIP_LINK_RESET : 0;
3839
3840    /* Report UNLOAD_DONE to MCP */
3841    if (!BXE_NOMCP(sc)) {
3842        bxe_fw_command(sc, DRV_MSG_CODE_UNLOAD_DONE, reset_param);
3843    }
3844}
3845
3846static int
3847bxe_func_wait_started(struct bxe_softc *sc)
3848{
3849    int tout = 50;
3850
3851    if (!sc->port.pmf) {
3852        return (0);
3853    }
3854
3855    /*
3856     * (assumption: No Attention from MCP at this stage)
3857     * PMF probably in the middle of TX disable/enable transaction
3858     * 1. Sync IRS for default SB
3859     * 2. Sync SP queue - this guarantees us that attention handling started
3860     * 3. Wait, that TX disable/enable transaction completes
3861     *
3862     * 1+2 guarantee that if DCBX attention was scheduled it already changed
3863     * pending bit of transaction from STARTED-->TX_STOPPED, if we already
3864     * received completion for the transaction the state is TX_STOPPED.
3865     * State will return to STARTED after completion of TX_STOPPED-->STARTED
3866     * transaction.
3867     */
3868
3869    /* XXX make sure default SB ISR is done */
3870    /* need a way to synchronize an irq (intr_mtx?) */
3871
3872    /* XXX flush any work queues */
3873
3874    while (ecore_func_get_state(sc, &sc->func_obj) !=
3875           ECORE_F_STATE_STARTED && tout--) {
3876        DELAY(20000);
3877    }
3878
3879    if (ecore_func_get_state(sc, &sc->func_obj) != ECORE_F_STATE_STARTED) {
3880        /*
3881         * Failed to complete the transaction in a "good way"
3882         * Force both transactions with CLR bit.
3883         */
3884        struct ecore_func_state_params func_params = { NULL };
3885
3886        BLOGE(sc, "Unexpected function state! "
3887                  "Forcing STARTED-->TX_STOPPED-->STARTED\n");
3888
3889        func_params.f_obj = &sc->func_obj;
3890        bxe_set_bit(RAMROD_DRV_CLR_ONLY, &func_params.ramrod_flags);
3891
3892        /* STARTED-->TX_STOPPED */
3893        func_params.cmd = ECORE_F_CMD_TX_STOP;
3894        ecore_func_state_change(sc, &func_params);
3895
3896        /* TX_STOPPED-->STARTED */
3897        func_params.cmd = ECORE_F_CMD_TX_START;
3898        return (ecore_func_state_change(sc, &func_params));
3899    }
3900
3901    return (0);
3902}
3903
3904static int
3905bxe_stop_queue(struct bxe_softc *sc,
3906               int              index)
3907{
3908    struct bxe_fastpath *fp = &sc->fp[index];
3909    struct ecore_queue_state_params q_params = { NULL };
3910    int rc;
3911
3912    BLOGD(sc, DBG_LOAD, "stopping queue %d cid %d\n", index, fp->index);
3913
3914    q_params.q_obj = &sc->sp_objs[fp->index].q_obj;
3915    /* We want to wait for completion in this context */
3916    bxe_set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags);
3917
3918    /* Stop the primary connection: */
3919
3920    /* ...halt the connection */
3921    q_params.cmd = ECORE_Q_CMD_HALT;
3922    rc = ecore_queue_state_change(sc, &q_params);
3923    if (rc) {
3924        return (rc);
3925    }
3926
3927    /* ...terminate the connection */
3928    q_params.cmd = ECORE_Q_CMD_TERMINATE;
3929    memset(&q_params.params.terminate, 0, sizeof(q_params.params.terminate));
3930    q_params.params.terminate.cid_index = FIRST_TX_COS_INDEX;
3931    rc = ecore_queue_state_change(sc, &q_params);
3932    if (rc) {
3933        return (rc);
3934    }
3935
3936    /* ...delete cfc entry */
3937    q_params.cmd = ECORE_Q_CMD_CFC_DEL;
3938    memset(&q_params.params.cfc_del, 0, sizeof(q_params.params.cfc_del));
3939    q_params.params.cfc_del.cid_index = FIRST_TX_COS_INDEX;
3940    return (ecore_queue_state_change(sc, &q_params));
3941}
3942
3943/* wait for the outstanding SP commands */
3944static inline uint8_t
3945bxe_wait_sp_comp(struct bxe_softc *sc,
3946                 unsigned long    mask)
3947{
3948    unsigned long tmp;
3949    int tout = 5000; /* wait for 5 secs tops */
3950
3951    while (tout--) {
3952        mb();
3953        if (!(atomic_load_acq_long(&sc->sp_state) & mask)) {
3954            return (TRUE);
3955        }
3956
3957        DELAY(1000);
3958    }
3959
3960    mb();
3961
3962    tmp = atomic_load_acq_long(&sc->sp_state);
3963    if (tmp & mask) {
3964        BLOGE(sc, "Filtering completion timed out: "
3965                  "sp_state 0x%lx, mask 0x%lx\n",
3966              tmp, mask);
3967        return (FALSE);
3968    }
3969
3970    return (FALSE);
3971}
3972
3973static int
3974bxe_func_stop(struct bxe_softc *sc)
3975{
3976    struct ecore_func_state_params func_params = { NULL };
3977    int rc;
3978
3979    /* prepare parameters for function state transitions */
3980    bxe_set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
3981    func_params.f_obj = &sc->func_obj;
3982    func_params.cmd = ECORE_F_CMD_STOP;
3983
3984    /*
3985     * Try to stop the function the 'good way'. If it fails (in case
3986     * of a parity error during bxe_chip_cleanup()) and we are
3987     * not in a debug mode, perform a state transaction in order to
3988     * enable further HW_RESET transaction.
3989     */
3990    rc = ecore_func_state_change(sc, &func_params);
3991    if (rc) {
3992        BLOGE(sc, "FUNC_STOP ramrod failed. "
3993                  "Running a dry transaction (%d)\n", rc);
3994        bxe_set_bit(RAMROD_DRV_CLR_ONLY, &func_params.ramrod_flags);
3995        return (ecore_func_state_change(sc, &func_params));
3996    }
3997
3998    return (0);
3999}
4000
4001static int
4002bxe_reset_hw(struct bxe_softc *sc,
4003             uint32_t         load_code)
4004{
4005    struct ecore_func_state_params func_params = { NULL };
4006
4007    /* Prepare parameters for function state transitions */
4008    bxe_set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
4009
4010    func_params.f_obj = &sc->func_obj;
4011    func_params.cmd = ECORE_F_CMD_HW_RESET;
4012
4013    func_params.params.hw_init.load_phase = load_code;
4014
4015    return (ecore_func_state_change(sc, &func_params));
4016}
4017
4018static void
4019bxe_int_disable_sync(struct bxe_softc *sc,
4020                     int              disable_hw)
4021{
4022    if (disable_hw) {
4023        /* prevent the HW from sending interrupts */
4024        bxe_int_disable(sc);
4025    }
4026
4027    /* XXX need a way to synchronize ALL irqs (intr_mtx?) */
4028    /* make sure all ISRs are done */
4029
4030    /* XXX make sure sp_task is not running */
4031    /* cancel and flush work queues */
4032}
4033
4034static void
4035bxe_chip_cleanup(struct bxe_softc *sc,
4036                 uint32_t         unload_mode,
4037                 uint8_t          keep_link)
4038{
4039    int port = SC_PORT(sc);
4040    struct ecore_mcast_ramrod_params rparam = { NULL };
4041    uint32_t reset_code;
4042    int i, rc = 0;
4043
4044    bxe_drain_tx_queues(sc);
4045
4046    /* give HW time to discard old tx messages */
4047    DELAY(1000);
4048
4049    /* Clean all ETH MACs */
4050    rc = bxe_del_all_macs(sc, &sc->sp_objs[0].mac_obj, ECORE_ETH_MAC, FALSE);
4051    if (rc < 0) {
4052        BLOGE(sc, "Failed to delete all ETH MACs (%d)\n", rc);
4053    }
4054
4055    /* Clean up UC list  */
4056    rc = bxe_del_all_macs(sc, &sc->sp_objs[0].mac_obj, ECORE_UC_LIST_MAC, TRUE);
4057    if (rc < 0) {
4058        BLOGE(sc, "Failed to delete UC MACs list (%d)\n", rc);
4059    }
4060
4061    /* Disable LLH */
4062    if (!CHIP_IS_E1(sc)) {
4063        REG_WR(sc, NIG_REG_LLH0_FUNC_EN + port*8, 0);
4064    }
4065
4066    /* Set "drop all" to stop Rx */
4067
4068    /*
4069     * We need to take the BXE_MCAST_LOCK() here in order to prevent
4070     * a race between the completion code and this code.
4071     */
4072    BXE_MCAST_LOCK(sc);
4073
4074    if (bxe_test_bit(ECORE_FILTER_RX_MODE_PENDING, &sc->sp_state)) {
4075        bxe_set_bit(ECORE_FILTER_RX_MODE_SCHED, &sc->sp_state);
4076    } else {
4077        bxe_set_storm_rx_mode(sc);
4078    }
4079
4080    /* Clean up multicast configuration */
4081    rparam.mcast_obj = &sc->mcast_obj;
4082    rc = ecore_config_mcast(sc, &rparam, ECORE_MCAST_CMD_DEL);
4083    if (rc < 0) {
4084        BLOGE(sc, "Failed to send DEL MCAST command (%d)\n", rc);
4085    }
4086
4087    BXE_MCAST_UNLOCK(sc);
4088
4089    // XXX bxe_iov_chip_cleanup(sc);
4090
4091    /*
4092     * Send the UNLOAD_REQUEST to the MCP. This will return if
4093     * this function should perform FUNCTION, PORT, or COMMON HW
4094     * reset.
4095     */
4096    reset_code = bxe_send_unload_req(sc, unload_mode);
4097
4098    /*
4099     * (assumption: No Attention from MCP at this stage)
4100     * PMF probably in the middle of TX disable/enable transaction
4101     */
4102    rc = bxe_func_wait_started(sc);
4103    if (rc) {
4104        BLOGE(sc, "bxe_func_wait_started failed (%d)\n", rc);
4105    }
4106
4107    /*
4108     * Close multi and leading connections
4109     * Completions for ramrods are collected in a synchronous way
4110     */
4111    for (i = 0; i < sc->num_queues; i++) {
4112        if (bxe_stop_queue(sc, i)) {
4113            goto unload_error;
4114        }
4115    }
4116
4117    /*
4118     * If SP settings didn't get completed so far - something
4119     * very wrong has happen.
4120     */
4121    if (!bxe_wait_sp_comp(sc, ~0x0UL)) {
4122        BLOGE(sc, "Common slow path ramrods got stuck!(%d)\n", rc);
4123    }
4124
4125unload_error:
4126
4127    rc = bxe_func_stop(sc);
4128    if (rc) {
4129        BLOGE(sc, "Function stop failed!(%d)\n", rc);
4130    }
4131
4132    /* disable HW interrupts */
4133    bxe_int_disable_sync(sc, TRUE);
4134
4135    /* detach interrupts */
4136    bxe_interrupt_detach(sc);
4137
4138    /* Reset the chip */
4139    rc = bxe_reset_hw(sc, reset_code);
4140    if (rc) {
4141        BLOGE(sc, "Hardware reset failed(%d)\n", rc);
4142    }
4143
4144    /* Report UNLOAD_DONE to MCP */
4145    bxe_send_unload_done(sc, keep_link);
4146}
4147
4148static void
4149bxe_disable_close_the_gate(struct bxe_softc *sc)
4150{
4151    uint32_t val;
4152    int port = SC_PORT(sc);
4153
4154    BLOGD(sc, DBG_LOAD,
4155          "Disabling 'close the gates'\n");
4156
4157    if (CHIP_IS_E1(sc)) {
4158        uint32_t addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
4159                               MISC_REG_AEU_MASK_ATTN_FUNC_0;
4160        val = REG_RD(sc, addr);
4161        val &= ~(0x300);
4162        REG_WR(sc, addr, val);
4163    } else {
4164        val = REG_RD(sc, MISC_REG_AEU_GENERAL_MASK);
4165        val &= ~(MISC_AEU_GENERAL_MASK_REG_AEU_PXP_CLOSE_MASK |
4166                 MISC_AEU_GENERAL_MASK_REG_AEU_NIG_CLOSE_MASK);
4167        REG_WR(sc, MISC_REG_AEU_GENERAL_MASK, val);
4168    }
4169}
4170
4171/*
4172 * Cleans the object that have internal lists without sending
4173 * ramrods. Should be run when interrutps are disabled.
4174 */
4175static void
4176bxe_squeeze_objects(struct bxe_softc *sc)
4177{
4178    unsigned long ramrod_flags = 0, vlan_mac_flags = 0;
4179    struct ecore_mcast_ramrod_params rparam = { NULL };
4180    struct ecore_vlan_mac_obj *mac_obj = &sc->sp_objs->mac_obj;
4181    int rc;
4182
4183    /* Cleanup MACs' object first... */
4184
4185    /* Wait for completion of requested */
4186    bxe_set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
4187    /* Perform a dry cleanup */
4188    bxe_set_bit(RAMROD_DRV_CLR_ONLY, &ramrod_flags);
4189
4190    /* Clean ETH primary MAC */
4191    bxe_set_bit(ECORE_ETH_MAC, &vlan_mac_flags);
4192    rc = mac_obj->delete_all(sc, &sc->sp_objs->mac_obj, &vlan_mac_flags,
4193                             &ramrod_flags);
4194    if (rc != 0) {
4195        BLOGE(sc, "Failed to clean ETH MACs (%d)\n", rc);
4196    }
4197
4198    /* Cleanup UC list */
4199    vlan_mac_flags = 0;
4200    bxe_set_bit(ECORE_UC_LIST_MAC, &vlan_mac_flags);
4201    rc = mac_obj->delete_all(sc, mac_obj, &vlan_mac_flags,
4202                             &ramrod_flags);
4203    if (rc != 0) {
4204        BLOGE(sc, "Failed to clean UC list MACs (%d)\n", rc);
4205    }
4206
4207    /* Now clean mcast object... */
4208
4209    rparam.mcast_obj = &sc->mcast_obj;
4210    bxe_set_bit(RAMROD_DRV_CLR_ONLY, &rparam.ramrod_flags);
4211
4212    /* Add a DEL command... */
4213    rc = ecore_config_mcast(sc, &rparam, ECORE_MCAST_CMD_DEL);
4214    if (rc < 0) {
4215        BLOGE(sc, "Failed to send DEL MCAST command (%d)\n", rc);
4216    }
4217
4218    /* now wait until all pending commands are cleared */
4219
4220    rc = ecore_config_mcast(sc, &rparam, ECORE_MCAST_CMD_CONT);
4221    while (rc != 0) {
4222        if (rc < 0) {
4223            BLOGE(sc, "Failed to clean MCAST object (%d)\n", rc);
4224            return;
4225        }
4226
4227        rc = ecore_config_mcast(sc, &rparam, ECORE_MCAST_CMD_CONT);
4228    }
4229}
4230
4231/* stop the controller */
4232static __noinline int
4233bxe_nic_unload(struct bxe_softc *sc,
4234               uint32_t         unload_mode,
4235               uint8_t          keep_link)
4236{
4237    uint8_t global = FALSE;
4238    uint32_t val;
4239    int i;
4240
4241    BXE_CORE_LOCK_ASSERT(sc);
4242
4243    if_setdrvflagbits(sc->ifp, 0, IFF_DRV_RUNNING);
4244
4245    for (i = 0; i < sc->num_queues; i++) {
4246        struct bxe_fastpath *fp;
4247
4248        fp = &sc->fp[i];
4249        BXE_FP_TX_LOCK(fp);
4250        BXE_FP_TX_UNLOCK(fp);
4251    }
4252
4253    BLOGD(sc, DBG_LOAD, "Starting NIC unload...\n");
4254
4255    /* mark driver as unloaded in shmem2 */
4256    if (IS_PF(sc) && SHMEM2_HAS(sc, drv_capabilities_flag)) {
4257        val = SHMEM2_RD(sc, drv_capabilities_flag[SC_FW_MB_IDX(sc)]);
4258        SHMEM2_WR(sc, drv_capabilities_flag[SC_FW_MB_IDX(sc)],
4259                  val & ~DRV_FLAGS_CAPABILITIES_LOADED_L2);
4260    }
4261
4262    if (IS_PF(sc) && sc->recovery_state != BXE_RECOVERY_DONE &&
4263        (sc->state == BXE_STATE_CLOSED || sc->state == BXE_STATE_ERROR)) {
4264        /*
4265         * We can get here if the driver has been unloaded
4266         * during parity error recovery and is either waiting for a
4267         * leader to complete or for other functions to unload and
4268         * then ifconfig down has been issued. In this case we want to
4269         * unload and let other functions to complete a recovery
4270         * process.
4271         */
4272        sc->recovery_state = BXE_RECOVERY_DONE;
4273        sc->is_leader = 0;
4274        bxe_release_leader_lock(sc);
4275        mb();
4276
4277        BLOGD(sc, DBG_LOAD, "Releasing a leadership...\n");
4278        BLOGE(sc, "Can't unload in closed or error state recover_state 0x%x"
4279            " state = 0x%x\n", sc->recovery_state, sc->state);
4280        return (-1);
4281    }
4282
4283    /*
4284     * Nothing to do during unload if previous bxe_nic_load()
4285     * did not completed successfully - all resourses are released.
4286     */
4287    if ((sc->state == BXE_STATE_CLOSED) ||
4288        (sc->state == BXE_STATE_ERROR)) {
4289        return (0);
4290    }
4291
4292    sc->state = BXE_STATE_CLOSING_WAITING_HALT;
4293    mb();
4294
4295    /* stop tx */
4296    bxe_tx_disable(sc);
4297
4298    sc->rx_mode = BXE_RX_MODE_NONE;
4299    /* XXX set rx mode ??? */
4300
4301    if (IS_PF(sc) && !sc->grcdump_done) {
4302        /* set ALWAYS_ALIVE bit in shmem */
4303        sc->fw_drv_pulse_wr_seq |= DRV_PULSE_ALWAYS_ALIVE;
4304
4305        bxe_drv_pulse(sc);
4306
4307        bxe_stats_handle(sc, STATS_EVENT_STOP);
4308        bxe_save_statistics(sc);
4309    }
4310
4311    /* wait till consumers catch up with producers in all queues */
4312    bxe_drain_tx_queues(sc);
4313
4314    /* if VF indicate to PF this function is going down (PF will delete sp
4315     * elements and clear initializations
4316     */
4317    if (IS_VF(sc)) {
4318        ; /* bxe_vfpf_close_vf(sc); */
4319    } else if (unload_mode != UNLOAD_RECOVERY) {
4320        /* if this is a normal/close unload need to clean up chip */
4321        if (!sc->grcdump_done)
4322            bxe_chip_cleanup(sc, unload_mode, keep_link);
4323    } else {
4324        /* Send the UNLOAD_REQUEST to the MCP */
4325        bxe_send_unload_req(sc, unload_mode);
4326
4327        /*
4328         * Prevent transactions to host from the functions on the
4329         * engine that doesn't reset global blocks in case of global
4330         * attention once gloabl blocks are reset and gates are opened
4331         * (the engine which leader will perform the recovery
4332         * last).
4333         */
4334        if (!CHIP_IS_E1x(sc)) {
4335            bxe_pf_disable(sc);
4336        }
4337
4338        /* disable HW interrupts */
4339        bxe_int_disable_sync(sc, TRUE);
4340
4341        /* detach interrupts */
4342        bxe_interrupt_detach(sc);
4343
4344        /* Report UNLOAD_DONE to MCP */
4345        bxe_send_unload_done(sc, FALSE);
4346    }
4347
4348    /*
4349     * At this stage no more interrupts will arrive so we may safely clean
4350     * the queue'able objects here in case they failed to get cleaned so far.
4351     */
4352    if (IS_PF(sc)) {
4353        bxe_squeeze_objects(sc);
4354    }
4355
4356    /* There should be no more pending SP commands at this stage */
4357    sc->sp_state = 0;
4358
4359    sc->port.pmf = 0;
4360
4361    bxe_free_fp_buffers(sc);
4362
4363    if (IS_PF(sc)) {
4364        bxe_free_mem(sc);
4365    }
4366
4367    bxe_free_fw_stats_mem(sc);
4368
4369    sc->state = BXE_STATE_CLOSED;
4370
4371    /*
4372     * Check if there are pending parity attentions. If there are - set
4373     * RECOVERY_IN_PROGRESS.
4374     */
4375    if (IS_PF(sc) && bxe_chk_parity_attn(sc, &global, FALSE)) {
4376        bxe_set_reset_in_progress(sc);
4377
4378        /* Set RESET_IS_GLOBAL if needed */
4379        if (global) {
4380            bxe_set_reset_global(sc);
4381        }
4382    }
4383
4384    /*
4385     * The last driver must disable a "close the gate" if there is no
4386     * parity attention or "process kill" pending.
4387     */
4388    if (IS_PF(sc) && !bxe_clear_pf_load(sc) &&
4389        bxe_reset_is_done(sc, SC_PATH(sc))) {
4390        bxe_disable_close_the_gate(sc);
4391    }
4392
4393    BLOGD(sc, DBG_LOAD, "Ended NIC unload\n");
4394
4395    return (0);
4396}
4397
4398/*
4399 * Called by the OS to set various media options (i.e. link, speed, etc.) when
4400 * the user runs "ifconfig bxe media ..." or "ifconfig bxe mediaopt ...".
4401 */
4402static int
4403bxe_ifmedia_update(struct ifnet  *ifp)
4404{
4405    struct bxe_softc *sc = (struct bxe_softc *)if_getsoftc(ifp);
4406    struct ifmedia *ifm;
4407
4408    ifm = &sc->ifmedia;
4409
4410    /* We only support Ethernet media type. */
4411    if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) {
4412        return (EINVAL);
4413    }
4414
4415    switch (IFM_SUBTYPE(ifm->ifm_media)) {
4416    case IFM_AUTO:
4417         break;
4418    case IFM_10G_CX4:
4419    case IFM_10G_SR:
4420    case IFM_10G_T:
4421    case IFM_10G_TWINAX:
4422    default:
4423        /* We don't support changing the media type. */
4424        BLOGD(sc, DBG_LOAD, "Invalid media type (%d)\n",
4425              IFM_SUBTYPE(ifm->ifm_media));
4426        return (EINVAL);
4427    }
4428
4429    return (0);
4430}
4431
4432/*
4433 * Called by the OS to get the current media status (i.e. link, speed, etc.).
4434 */
4435static void
4436bxe_ifmedia_status(struct ifnet *ifp, struct ifmediareq *ifmr)
4437{
4438    struct bxe_softc *sc = if_getsoftc(ifp);
4439
4440    /* Report link down if the driver isn't running. */
4441    if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0) {
4442        ifmr->ifm_active |= IFM_NONE;
4443        return;
4444    }
4445
4446    /* Setup the default interface info. */
4447    ifmr->ifm_status = IFM_AVALID;
4448    ifmr->ifm_active = IFM_ETHER;
4449
4450    if (sc->link_vars.link_up) {
4451        ifmr->ifm_status |= IFM_ACTIVE;
4452    } else {
4453        ifmr->ifm_active |= IFM_NONE;
4454        return;
4455    }
4456
4457    ifmr->ifm_active |= sc->media;
4458
4459    if (sc->link_vars.duplex == DUPLEX_FULL) {
4460        ifmr->ifm_active |= IFM_FDX;
4461    } else {
4462        ifmr->ifm_active |= IFM_HDX;
4463    }
4464}
4465
4466static void
4467bxe_handle_chip_tq(void *context,
4468                   int  pending)
4469{
4470    struct bxe_softc *sc = (struct bxe_softc *)context;
4471    long work = atomic_load_acq_long(&sc->chip_tq_flags);
4472
4473    switch (work)
4474    {
4475
4476    case CHIP_TQ_REINIT:
4477        if (if_getdrvflags(sc->ifp) & IFF_DRV_RUNNING) {
4478            /* restart the interface */
4479            BLOGD(sc, DBG_LOAD, "Restarting the interface...\n");
4480            bxe_periodic_stop(sc);
4481            BXE_CORE_LOCK(sc);
4482            bxe_stop_locked(sc);
4483            bxe_init_locked(sc);
4484            BXE_CORE_UNLOCK(sc);
4485        }
4486        break;
4487
4488    default:
4489        break;
4490    }
4491}
4492
4493/*
4494 * Handles any IOCTL calls from the operating system.
4495 *
4496 * Returns:
4497 *   0 = Success, >0 Failure
4498 */
4499static int
4500bxe_ioctl(if_t ifp,
4501          u_long       command,
4502          caddr_t      data)
4503{
4504    struct bxe_softc *sc = if_getsoftc(ifp);
4505    struct ifreq *ifr = (struct ifreq *)data;
4506    int mask = 0;
4507    int reinit = 0;
4508    int error = 0;
4509
4510    int mtu_min = (ETH_MIN_PACKET_SIZE - ETH_HLEN);
4511    int mtu_max = (MJUM9BYTES - ETH_OVERHEAD - IP_HEADER_ALIGNMENT_PADDING);
4512
4513    switch (command)
4514    {
4515    case SIOCSIFMTU:
4516        BLOGD(sc, DBG_IOCTL, "Received SIOCSIFMTU ioctl (mtu=%d)\n",
4517              ifr->ifr_mtu);
4518
4519        if (sc->mtu == ifr->ifr_mtu) {
4520            /* nothing to change */
4521            break;
4522        }
4523
4524        if ((ifr->ifr_mtu < mtu_min) || (ifr->ifr_mtu > mtu_max)) {
4525            BLOGE(sc, "Unsupported MTU size %d (range is %d-%d)\n",
4526                  ifr->ifr_mtu, mtu_min, mtu_max);
4527            error = EINVAL;
4528            break;
4529        }
4530
4531        atomic_store_rel_int((volatile unsigned int *)&sc->mtu,
4532                             (unsigned long)ifr->ifr_mtu);
4533	/*
4534        atomic_store_rel_long((volatile unsigned long *)&if_getmtu(ifp),
4535                              (unsigned long)ifr->ifr_mtu);
4536	XXX - Not sure why it needs to be atomic
4537	*/
4538	if_setmtu(ifp, ifr->ifr_mtu);
4539        reinit = 1;
4540        break;
4541
4542    case SIOCSIFFLAGS:
4543        /* toggle the interface state up or down */
4544        BLOGD(sc, DBG_IOCTL, "Received SIOCSIFFLAGS ioctl\n");
4545
4546	BXE_CORE_LOCK(sc);
4547        /* check if the interface is up */
4548        if (if_getflags(ifp) & IFF_UP) {
4549            if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
4550                /* set the receive mode flags */
4551                bxe_set_rx_mode(sc);
4552            } else if(sc->state != BXE_STATE_DISABLED) {
4553		bxe_init_locked(sc);
4554            }
4555        } else {
4556            if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
4557		bxe_periodic_stop(sc);
4558		bxe_stop_locked(sc);
4559            }
4560        }
4561	BXE_CORE_UNLOCK(sc);
4562
4563        break;
4564
4565    case SIOCADDMULTI:
4566    case SIOCDELMULTI:
4567        /* add/delete multicast addresses */
4568        BLOGD(sc, DBG_IOCTL, "Received SIOCADDMULTI/SIOCDELMULTI ioctl\n");
4569
4570        /* check if the interface is up */
4571        if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
4572            /* set the receive mode flags */
4573	    BXE_CORE_LOCK(sc);
4574            bxe_set_rx_mode(sc);
4575	    BXE_CORE_UNLOCK(sc);
4576        }
4577
4578        break;
4579
4580    case SIOCSIFCAP:
4581        /* find out which capabilities have changed */
4582        mask = (ifr->ifr_reqcap ^ if_getcapenable(ifp));
4583
4584        BLOGD(sc, DBG_IOCTL, "Received SIOCSIFCAP ioctl (mask=0x%08x)\n",
4585              mask);
4586
4587        /* toggle the LRO capabilites enable flag */
4588        if (mask & IFCAP_LRO) {
4589	    if_togglecapenable(ifp, IFCAP_LRO);
4590            BLOGD(sc, DBG_IOCTL, "Turning LRO %s\n",
4591                  (if_getcapenable(ifp) & IFCAP_LRO) ? "ON" : "OFF");
4592            reinit = 1;
4593        }
4594
4595        /* toggle the TXCSUM checksum capabilites enable flag */
4596        if (mask & IFCAP_TXCSUM) {
4597	    if_togglecapenable(ifp, IFCAP_TXCSUM);
4598            BLOGD(sc, DBG_IOCTL, "Turning TXCSUM %s\n",
4599                  (if_getcapenable(ifp) & IFCAP_TXCSUM) ? "ON" : "OFF");
4600            if (if_getcapenable(ifp) & IFCAP_TXCSUM) {
4601                if_sethwassistbits(ifp, (CSUM_IP      |
4602                                    CSUM_TCP      |
4603                                    CSUM_UDP      |
4604                                    CSUM_TSO      |
4605                                    CSUM_TCP_IPV6 |
4606                                    CSUM_UDP_IPV6), 0);
4607            } else {
4608		if_clearhwassist(ifp); /* XXX */
4609            }
4610        }
4611
4612        /* toggle the RXCSUM checksum capabilities enable flag */
4613        if (mask & IFCAP_RXCSUM) {
4614	    if_togglecapenable(ifp, IFCAP_RXCSUM);
4615            BLOGD(sc, DBG_IOCTL, "Turning RXCSUM %s\n",
4616                  (if_getcapenable(ifp) & IFCAP_RXCSUM) ? "ON" : "OFF");
4617            if (if_getcapenable(ifp) & IFCAP_RXCSUM) {
4618                if_sethwassistbits(ifp, (CSUM_IP      |
4619                                    CSUM_TCP      |
4620                                    CSUM_UDP      |
4621                                    CSUM_TSO      |
4622                                    CSUM_TCP_IPV6 |
4623                                    CSUM_UDP_IPV6), 0);
4624            } else {
4625		if_clearhwassist(ifp); /* XXX */
4626            }
4627        }
4628
4629        /* toggle TSO4 capabilities enabled flag */
4630        if (mask & IFCAP_TSO4) {
4631            if_togglecapenable(ifp, IFCAP_TSO4);
4632            BLOGD(sc, DBG_IOCTL, "Turning TSO4 %s\n",
4633                  (if_getcapenable(ifp) & IFCAP_TSO4) ? "ON" : "OFF");
4634        }
4635
4636        /* toggle TSO6 capabilities enabled flag */
4637        if (mask & IFCAP_TSO6) {
4638	    if_togglecapenable(ifp, IFCAP_TSO6);
4639            BLOGD(sc, DBG_IOCTL, "Turning TSO6 %s\n",
4640                  (if_getcapenable(ifp) & IFCAP_TSO6) ? "ON" : "OFF");
4641        }
4642
4643        /* toggle VLAN_HWTSO capabilities enabled flag */
4644        if (mask & IFCAP_VLAN_HWTSO) {
4645
4646	    if_togglecapenable(ifp, IFCAP_VLAN_HWTSO);
4647            BLOGD(sc, DBG_IOCTL, "Turning VLAN_HWTSO %s\n",
4648                  (if_getcapenable(ifp) & IFCAP_VLAN_HWTSO) ? "ON" : "OFF");
4649        }
4650
4651        /* toggle VLAN_HWCSUM capabilities enabled flag */
4652        if (mask & IFCAP_VLAN_HWCSUM) {
4653            /* XXX investigate this... */
4654            BLOGE(sc, "Changing VLAN_HWCSUM is not supported!\n");
4655            error = EINVAL;
4656        }
4657
4658        /* toggle VLAN_MTU capabilities enable flag */
4659        if (mask & IFCAP_VLAN_MTU) {
4660            /* XXX investigate this... */
4661            BLOGE(sc, "Changing VLAN_MTU is not supported!\n");
4662            error = EINVAL;
4663        }
4664
4665        /* toggle VLAN_HWTAGGING capabilities enabled flag */
4666        if (mask & IFCAP_VLAN_HWTAGGING) {
4667            /* XXX investigate this... */
4668            BLOGE(sc, "Changing VLAN_HWTAGGING is not supported!\n");
4669            error = EINVAL;
4670        }
4671
4672        /* toggle VLAN_HWFILTER capabilities enabled flag */
4673        if (mask & IFCAP_VLAN_HWFILTER) {
4674            /* XXX investigate this... */
4675            BLOGE(sc, "Changing VLAN_HWFILTER is not supported!\n");
4676            error = EINVAL;
4677        }
4678
4679        /* XXX not yet...
4680         * IFCAP_WOL_MAGIC
4681         */
4682
4683        break;
4684
4685    case SIOCSIFMEDIA:
4686    case SIOCGIFMEDIA:
4687        /* set/get interface media */
4688        BLOGD(sc, DBG_IOCTL,
4689              "Received SIOCSIFMEDIA/SIOCGIFMEDIA ioctl (cmd=%lu)\n",
4690              (command & 0xff));
4691        error = ifmedia_ioctl(ifp, ifr, &sc->ifmedia, command);
4692        break;
4693
4694    default:
4695        BLOGD(sc, DBG_IOCTL, "Received Unknown Ioctl (cmd=%lu)\n",
4696              (command & 0xff));
4697        error = ether_ioctl(ifp, command, data);
4698        break;
4699    }
4700
4701    if (reinit && (if_getdrvflags(sc->ifp) & IFF_DRV_RUNNING)) {
4702        BLOGD(sc, DBG_LOAD | DBG_IOCTL,
4703              "Re-initializing hardware from IOCTL change\n");
4704	bxe_periodic_stop(sc);
4705	BXE_CORE_LOCK(sc);
4706	bxe_stop_locked(sc);
4707	bxe_init_locked(sc);
4708	BXE_CORE_UNLOCK(sc);
4709    }
4710
4711    return (error);
4712}
4713
4714static __noinline void
4715bxe_dump_mbuf(struct bxe_softc *sc,
4716              struct mbuf      *m,
4717              uint8_t          contents)
4718{
4719    char * type;
4720    int i = 0;
4721
4722    if (!(sc->debug & DBG_MBUF)) {
4723        return;
4724    }
4725
4726    if (m == NULL) {
4727        BLOGD(sc, DBG_MBUF, "mbuf: null pointer\n");
4728        return;
4729    }
4730
4731    while (m) {
4732
4733#if __FreeBSD_version >= 1000000
4734        BLOGD(sc, DBG_MBUF,
4735              "%02d: mbuf=%p m_len=%d m_flags=0x%b m_data=%p\n",
4736              i, m, m->m_len, m->m_flags, M_FLAG_BITS, m->m_data);
4737
4738        if (m->m_flags & M_PKTHDR) {
4739             BLOGD(sc, DBG_MBUF,
4740                   "%02d: - m_pkthdr: tot_len=%d flags=0x%b csum_flags=%b\n",
4741                   i, m->m_pkthdr.len, m->m_flags, M_FLAG_BITS,
4742                   (int)m->m_pkthdr.csum_flags, CSUM_BITS);
4743        }
4744#else
4745        BLOGD(sc, DBG_MBUF,
4746              "%02d: mbuf=%p m_len=%d m_flags=0x%b m_data=%p\n",
4747              i, m, m->m_len, m->m_flags,
4748              "\20\1M_EXT\2M_PKTHDR\3M_EOR\4M_RDONLY", m->m_data);
4749
4750        if (m->m_flags & M_PKTHDR) {
4751             BLOGD(sc, DBG_MBUF,
4752                   "%02d: - m_pkthdr: tot_len=%d flags=0x%b csum_flags=%b\n",
4753                   i, m->m_pkthdr.len, m->m_flags,
4754                   "\20\12M_BCAST\13M_MCAST\14M_FRAG"
4755                   "\15M_FIRSTFRAG\16M_LASTFRAG\21M_VLANTAG"
4756                   "\22M_PROMISC\23M_NOFREE",
4757                   (int)m->m_pkthdr.csum_flags,
4758                   "\20\1CSUM_IP\2CSUM_TCP\3CSUM_UDP\4CSUM_IP_FRAGS"
4759                   "\5CSUM_FRAGMENT\6CSUM_TSO\11CSUM_IP_CHECKED"
4760                   "\12CSUM_IP_VALID\13CSUM_DATA_VALID"
4761                   "\14CSUM_PSEUDO_HDR");
4762        }
4763#endif /* #if __FreeBSD_version >= 1000000 */
4764
4765        if (m->m_flags & M_EXT) {
4766            switch (m->m_ext.ext_type) {
4767            case EXT_CLUSTER:    type = "EXT_CLUSTER";    break;
4768            case EXT_SFBUF:      type = "EXT_SFBUF";      break;
4769            case EXT_JUMBOP:     type = "EXT_JUMBOP";     break;
4770            case EXT_JUMBO9:     type = "EXT_JUMBO9";     break;
4771            case EXT_JUMBO16:    type = "EXT_JUMBO16";    break;
4772            case EXT_PACKET:     type = "EXT_PACKET";     break;
4773            case EXT_MBUF:       type = "EXT_MBUF";       break;
4774            case EXT_NET_DRV:    type = "EXT_NET_DRV";    break;
4775            case EXT_MOD_TYPE:   type = "EXT_MOD_TYPE";   break;
4776            case EXT_DISPOSABLE: type = "EXT_DISPOSABLE"; break;
4777            case EXT_EXTREF:     type = "EXT_EXTREF";     break;
4778            default:             type = "UNKNOWN";        break;
4779            }
4780
4781            BLOGD(sc, DBG_MBUF,
4782                  "%02d: - m_ext: %p ext_size=%d type=%s\n",
4783                  i, m->m_ext.ext_buf, m->m_ext.ext_size, type);
4784        }
4785
4786        if (contents) {
4787            bxe_dump_mbuf_data(sc, "mbuf data", m, TRUE);
4788        }
4789
4790        m = m->m_next;
4791        i++;
4792    }
4793}
4794
4795/*
4796 * Checks to ensure the 13 bd sliding window is >= MSS for TSO.
4797 * Check that (13 total bds - 3 bds) = 10 bd window >= MSS.
4798 * The window: 3 bds are = 1 for headers BD + 2 for parse BD and last BD
4799 * The headers comes in a separate bd in FreeBSD so 13-3=10.
4800 * Returns: 0 if OK to send, 1 if packet needs further defragmentation
4801 */
4802static int
4803bxe_chktso_window(struct bxe_softc  *sc,
4804                  int               nsegs,
4805                  bus_dma_segment_t *segs,
4806                  struct mbuf       *m)
4807{
4808    uint32_t num_wnds, wnd_size, wnd_sum;
4809    int32_t frag_idx, wnd_idx;
4810    unsigned short lso_mss;
4811    int defrag;
4812
4813    defrag = 0;
4814    wnd_sum = 0;
4815    wnd_size = 10;
4816    num_wnds = nsegs - wnd_size;
4817    lso_mss = htole16(m->m_pkthdr.tso_segsz);
4818
4819    /*
4820     * Total header lengths Eth+IP+TCP in first FreeBSD mbuf so calculate the
4821     * first window sum of data while skipping the first assuming it is the
4822     * header in FreeBSD.
4823     */
4824    for (frag_idx = 1; (frag_idx <= wnd_size); frag_idx++) {
4825        wnd_sum += htole16(segs[frag_idx].ds_len);
4826    }
4827
4828    /* check the first 10 bd window size */
4829    if (wnd_sum < lso_mss) {
4830        return (1);
4831    }
4832
4833    /* run through the windows */
4834    for (wnd_idx = 0; wnd_idx < num_wnds; wnd_idx++, frag_idx++) {
4835        /* subtract the first mbuf->m_len of the last wndw(-header) */
4836        wnd_sum -= htole16(segs[wnd_idx+1].ds_len);
4837        /* add the next mbuf len to the len of our new window */
4838        wnd_sum += htole16(segs[frag_idx].ds_len);
4839        if (wnd_sum < lso_mss) {
4840            return (1);
4841        }
4842    }
4843
4844    return (0);
4845}
4846
4847static uint8_t
4848bxe_set_pbd_csum_e2(struct bxe_fastpath *fp,
4849                    struct mbuf         *m,
4850                    uint32_t            *parsing_data)
4851{
4852    struct ether_vlan_header *eh = NULL;
4853    struct ip *ip4 = NULL;
4854    struct ip6_hdr *ip6 = NULL;
4855    caddr_t ip = NULL;
4856    struct tcphdr *th = NULL;
4857    int e_hlen, ip_hlen, l4_off;
4858    uint16_t proto;
4859
4860    if (m->m_pkthdr.csum_flags == CSUM_IP) {
4861        /* no L4 checksum offload needed */
4862        return (0);
4863    }
4864
4865    /* get the Ethernet header */
4866    eh = mtod(m, struct ether_vlan_header *);
4867
4868    /* handle VLAN encapsulation if present */
4869    if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
4870        e_hlen = (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN);
4871        proto  = ntohs(eh->evl_proto);
4872    } else {
4873        e_hlen = ETHER_HDR_LEN;
4874        proto  = ntohs(eh->evl_encap_proto);
4875    }
4876
4877    switch (proto) {
4878    case ETHERTYPE_IP:
4879        /* get the IP header, if mbuf len < 20 then header in next mbuf */
4880        ip4 = (m->m_len < sizeof(struct ip)) ?
4881                  (struct ip *)m->m_next->m_data :
4882                  (struct ip *)(m->m_data + e_hlen);
4883        /* ip_hl is number of 32-bit words */
4884        ip_hlen = (ip4->ip_hl << 2);
4885        ip = (caddr_t)ip4;
4886        break;
4887    case ETHERTYPE_IPV6:
4888        /* get the IPv6 header, if mbuf len < 40 then header in next mbuf */
4889        ip6 = (m->m_len < sizeof(struct ip6_hdr)) ?
4890                  (struct ip6_hdr *)m->m_next->m_data :
4891                  (struct ip6_hdr *)(m->m_data + e_hlen);
4892        /* XXX cannot support offload with IPv6 extensions */
4893        ip_hlen = sizeof(struct ip6_hdr);
4894        ip = (caddr_t)ip6;
4895        break;
4896    default:
4897        /* We can't offload in this case... */
4898        /* XXX error stat ??? */
4899        return (0);
4900    }
4901
4902    /* XXX assuming L4 header is contiguous to IPv4/IPv6 in the same mbuf */
4903    l4_off = (e_hlen + ip_hlen);
4904
4905    *parsing_data |=
4906        (((l4_off >> 1) << ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W_SHIFT) &
4907         ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W);
4908
4909    if (m->m_pkthdr.csum_flags & (CSUM_TCP |
4910                                  CSUM_TSO |
4911                                  CSUM_TCP_IPV6)) {
4912        fp->eth_q_stats.tx_ofld_frames_csum_tcp++;
4913        th = (struct tcphdr *)(ip + ip_hlen);
4914        /* th_off is number of 32-bit words */
4915        *parsing_data |= ((th->th_off <<
4916                           ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) &
4917                          ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW);
4918        return (l4_off + (th->th_off << 2)); /* entire header length */
4919    } else if (m->m_pkthdr.csum_flags & (CSUM_UDP |
4920                                         CSUM_UDP_IPV6)) {
4921        fp->eth_q_stats.tx_ofld_frames_csum_udp++;
4922        return (l4_off + sizeof(struct udphdr)); /* entire header length */
4923    } else {
4924        /* XXX error stat ??? */
4925        return (0);
4926    }
4927}
4928
4929static uint8_t
4930bxe_set_pbd_csum(struct bxe_fastpath        *fp,
4931                 struct mbuf                *m,
4932                 struct eth_tx_parse_bd_e1x *pbd)
4933{
4934    struct ether_vlan_header *eh = NULL;
4935    struct ip *ip4 = NULL;
4936    struct ip6_hdr *ip6 = NULL;
4937    caddr_t ip = NULL;
4938    struct tcphdr *th = NULL;
4939    struct udphdr *uh = NULL;
4940    int e_hlen, ip_hlen;
4941    uint16_t proto;
4942    uint8_t hlen;
4943    uint16_t tmp_csum;
4944    uint32_t *tmp_uh;
4945
4946    /* get the Ethernet header */
4947    eh = mtod(m, struct ether_vlan_header *);
4948
4949    /* handle VLAN encapsulation if present */
4950    if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
4951        e_hlen = (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN);
4952        proto  = ntohs(eh->evl_proto);
4953    } else {
4954        e_hlen = ETHER_HDR_LEN;
4955        proto  = ntohs(eh->evl_encap_proto);
4956    }
4957
4958    switch (proto) {
4959    case ETHERTYPE_IP:
4960        /* get the IP header, if mbuf len < 20 then header in next mbuf */
4961        ip4 = (m->m_len < sizeof(struct ip)) ?
4962                  (struct ip *)m->m_next->m_data :
4963                  (struct ip *)(m->m_data + e_hlen);
4964        /* ip_hl is number of 32-bit words */
4965        ip_hlen = (ip4->ip_hl << 1);
4966        ip = (caddr_t)ip4;
4967        break;
4968    case ETHERTYPE_IPV6:
4969        /* get the IPv6 header, if mbuf len < 40 then header in next mbuf */
4970        ip6 = (m->m_len < sizeof(struct ip6_hdr)) ?
4971                  (struct ip6_hdr *)m->m_next->m_data :
4972                  (struct ip6_hdr *)(m->m_data + e_hlen);
4973        /* XXX cannot support offload with IPv6 extensions */
4974        ip_hlen = (sizeof(struct ip6_hdr) >> 1);
4975        ip = (caddr_t)ip6;
4976        break;
4977    default:
4978        /* We can't offload in this case... */
4979        /* XXX error stat ??? */
4980        return (0);
4981    }
4982
4983    hlen = (e_hlen >> 1);
4984
4985    /* note that rest of global_data is indirectly zeroed here */
4986    if (m->m_flags & M_VLANTAG) {
4987        pbd->global_data =
4988            htole16(hlen | (1 << ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN_SHIFT));
4989    } else {
4990        pbd->global_data = htole16(hlen);
4991    }
4992
4993    pbd->ip_hlen_w = ip_hlen;
4994
4995    hlen += pbd->ip_hlen_w;
4996
4997    /* XXX assuming L4 header is contiguous to IPv4/IPv6 in the same mbuf */
4998
4999    if (m->m_pkthdr.csum_flags & (CSUM_TCP |
5000                                  CSUM_TSO |
5001                                  CSUM_TCP_IPV6)) {
5002        th = (struct tcphdr *)(ip + (ip_hlen << 1));
5003        /* th_off is number of 32-bit words */
5004        hlen += (uint16_t)(th->th_off << 1);
5005    } else if (m->m_pkthdr.csum_flags & (CSUM_UDP |
5006                                         CSUM_UDP_IPV6)) {
5007        uh = (struct udphdr *)(ip + (ip_hlen << 1));
5008        hlen += (sizeof(struct udphdr) / 2);
5009    } else {
5010        /* valid case as only CSUM_IP was set */
5011        return (0);
5012    }
5013
5014    pbd->total_hlen_w = htole16(hlen);
5015
5016    if (m->m_pkthdr.csum_flags & (CSUM_TCP |
5017                                  CSUM_TSO |
5018                                  CSUM_TCP_IPV6)) {
5019        fp->eth_q_stats.tx_ofld_frames_csum_tcp++;
5020        pbd->tcp_pseudo_csum = ntohs(th->th_sum);
5021    } else if (m->m_pkthdr.csum_flags & (CSUM_UDP |
5022                                         CSUM_UDP_IPV6)) {
5023        fp->eth_q_stats.tx_ofld_frames_csum_udp++;
5024
5025        /*
5026         * Everest1 (i.e. 57710, 57711, 57711E) does not natively support UDP
5027         * checksums and does not know anything about the UDP header and where
5028         * the checksum field is located. It only knows about TCP. Therefore
5029         * we "lie" to the hardware for outgoing UDP packets w/ checksum
5030         * offload. Since the checksum field offset for TCP is 16 bytes and
5031         * for UDP it is 6 bytes we pass a pointer to the hardware that is 10
5032         * bytes less than the start of the UDP header. This allows the
5033         * hardware to write the checksum in the correct spot. But the
5034         * hardware will compute a checksum which includes the last 10 bytes
5035         * of the IP header. To correct this we tweak the stack computed
5036         * pseudo checksum by folding in the calculation of the inverse
5037         * checksum for those final 10 bytes of the IP header. This allows
5038         * the correct checksum to be computed by the hardware.
5039         */
5040
5041        /* set pointer 10 bytes before UDP header */
5042        tmp_uh = (uint32_t *)((uint8_t *)uh - 10);
5043
5044        /* calculate a pseudo header checksum over the first 10 bytes */
5045        tmp_csum = in_pseudo(*tmp_uh,
5046                             *(tmp_uh + 1),
5047                             *(uint16_t *)(tmp_uh + 2));
5048
5049        pbd->tcp_pseudo_csum = ntohs(in_addword(uh->uh_sum, ~tmp_csum));
5050    }
5051
5052    return (hlen * 2); /* entire header length, number of bytes */
5053}
5054
5055static void
5056bxe_set_pbd_lso_e2(struct mbuf *m,
5057                   uint32_t    *parsing_data)
5058{
5059    *parsing_data |= ((m->m_pkthdr.tso_segsz <<
5060                       ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT) &
5061                      ETH_TX_PARSE_BD_E2_LSO_MSS);
5062
5063    /* XXX test for IPv6 with extension header... */
5064}
5065
5066static void
5067bxe_set_pbd_lso(struct mbuf                *m,
5068                struct eth_tx_parse_bd_e1x *pbd)
5069{
5070    struct ether_vlan_header *eh = NULL;
5071    struct ip *ip = NULL;
5072    struct tcphdr *th = NULL;
5073    int e_hlen;
5074
5075    /* get the Ethernet header */
5076    eh = mtod(m, struct ether_vlan_header *);
5077
5078    /* handle VLAN encapsulation if present */
5079    e_hlen = (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) ?
5080                 (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN) : ETHER_HDR_LEN;
5081
5082    /* get the IP and TCP header, with LSO entire header in first mbuf */
5083    /* XXX assuming IPv4 */
5084    ip = (struct ip *)(m->m_data + e_hlen);
5085    th = (struct tcphdr *)((caddr_t)ip + (ip->ip_hl << 2));
5086
5087    pbd->lso_mss = htole16(m->m_pkthdr.tso_segsz);
5088    pbd->tcp_send_seq = ntohl(th->th_seq);
5089    pbd->tcp_flags = ((ntohl(((uint32_t *)th)[3]) >> 16) & 0xff);
5090
5091#if 1
5092        /* XXX IPv4 */
5093        pbd->ip_id = ntohs(ip->ip_id);
5094        pbd->tcp_pseudo_csum =
5095            ntohs(in_pseudo(ip->ip_src.s_addr,
5096                            ip->ip_dst.s_addr,
5097                            htons(IPPROTO_TCP)));
5098#else
5099        /* XXX IPv6 */
5100        pbd->tcp_pseudo_csum =
5101            ntohs(in_pseudo(&ip6->ip6_src,
5102                            &ip6->ip6_dst,
5103                            htons(IPPROTO_TCP)));
5104#endif
5105
5106    pbd->global_data |=
5107        htole16(ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN);
5108}
5109
5110/*
5111 * Encapsulte an mbuf cluster into the tx bd chain and makes the memory
5112 * visible to the controller.
5113 *
5114 * If an mbuf is submitted to this routine and cannot be given to the
5115 * controller (e.g. it has too many fragments) then the function may free
5116 * the mbuf and return to the caller.
5117 *
5118 * Returns:
5119 *   0 = Success, !0 = Failure
5120 *   Note the side effect that an mbuf may be freed if it causes a problem.
5121 */
5122static int
5123bxe_tx_encap(struct bxe_fastpath *fp, struct mbuf **m_head)
5124{
5125    bus_dma_segment_t segs[32];
5126    struct mbuf *m0;
5127    struct bxe_sw_tx_bd *tx_buf;
5128    struct eth_tx_parse_bd_e1x *pbd_e1x = NULL;
5129    struct eth_tx_parse_bd_e2 *pbd_e2 = NULL;
5130    /* struct eth_tx_parse_2nd_bd *pbd2 = NULL; */
5131    struct eth_tx_bd *tx_data_bd;
5132    struct eth_tx_bd *tx_total_pkt_size_bd;
5133    struct eth_tx_start_bd *tx_start_bd;
5134    uint16_t bd_prod, pkt_prod, total_pkt_size;
5135    uint8_t mac_type;
5136    int defragged, error, nsegs, rc, nbds, vlan_off, ovlan;
5137    struct bxe_softc *sc;
5138    uint16_t tx_bd_avail;
5139    struct ether_vlan_header *eh;
5140    uint32_t pbd_e2_parsing_data = 0;
5141    uint8_t hlen = 0;
5142    int tmp_bd;
5143    int i;
5144
5145    sc = fp->sc;
5146
5147#if __FreeBSD_version >= 800000
5148    M_ASSERTPKTHDR(*m_head);
5149#endif /* #if __FreeBSD_version >= 800000 */
5150
5151    m0 = *m_head;
5152    rc = defragged = nbds = ovlan = vlan_off = total_pkt_size = 0;
5153    tx_start_bd = NULL;
5154    tx_data_bd = NULL;
5155    tx_total_pkt_size_bd = NULL;
5156
5157    /* get the H/W pointer for packets and BDs */
5158    pkt_prod = fp->tx_pkt_prod;
5159    bd_prod = fp->tx_bd_prod;
5160
5161    mac_type = UNICAST_ADDRESS;
5162
5163    /* map the mbuf into the next open DMAable memory */
5164    tx_buf = &fp->tx_mbuf_chain[TX_BD(pkt_prod)];
5165    error = bus_dmamap_load_mbuf_sg(fp->tx_mbuf_tag,
5166                                    tx_buf->m_map, m0,
5167                                    segs, &nsegs, BUS_DMA_NOWAIT);
5168
5169    /* mapping errors */
5170    if(__predict_false(error != 0)) {
5171        fp->eth_q_stats.tx_dma_mapping_failure++;
5172        if (error == ENOMEM) {
5173            /* resource issue, try again later */
5174            rc = ENOMEM;
5175        } else if (error == EFBIG) {
5176            /* possibly recoverable with defragmentation */
5177            fp->eth_q_stats.mbuf_defrag_attempts++;
5178            m0 = m_defrag(*m_head, M_NOWAIT);
5179            if (m0 == NULL) {
5180                fp->eth_q_stats.mbuf_defrag_failures++;
5181                rc = ENOBUFS;
5182            } else {
5183                /* defrag successful, try mapping again */
5184                *m_head = m0;
5185                error = bus_dmamap_load_mbuf_sg(fp->tx_mbuf_tag,
5186                                                tx_buf->m_map, m0,
5187                                                segs, &nsegs, BUS_DMA_NOWAIT);
5188                if (error) {
5189                    fp->eth_q_stats.tx_dma_mapping_failure++;
5190                    rc = error;
5191                }
5192            }
5193        } else {
5194            /* unknown, unrecoverable mapping error */
5195            BLOGE(sc, "Unknown TX mapping error rc=%d\n", error);
5196            bxe_dump_mbuf(sc, m0, FALSE);
5197            rc = error;
5198        }
5199
5200        goto bxe_tx_encap_continue;
5201    }
5202
5203    tx_bd_avail = bxe_tx_avail(sc, fp);
5204
5205    /* make sure there is enough room in the send queue */
5206    if (__predict_false(tx_bd_avail < (nsegs + 2))) {
5207        /* Recoverable, try again later. */
5208        fp->eth_q_stats.tx_hw_queue_full++;
5209        bus_dmamap_unload(fp->tx_mbuf_tag, tx_buf->m_map);
5210        rc = ENOMEM;
5211        goto bxe_tx_encap_continue;
5212    }
5213
5214    /* capture the current H/W TX chain high watermark */
5215    if (__predict_false(fp->eth_q_stats.tx_hw_max_queue_depth <
5216                        (TX_BD_USABLE - tx_bd_avail))) {
5217        fp->eth_q_stats.tx_hw_max_queue_depth = (TX_BD_USABLE - tx_bd_avail);
5218    }
5219
5220    /* make sure it fits in the packet window */
5221    if (__predict_false(nsegs > BXE_MAX_SEGMENTS)) {
5222        /*
5223         * The mbuf may be to big for the controller to handle. If the frame
5224         * is a TSO frame we'll need to do an additional check.
5225         */
5226        if (m0->m_pkthdr.csum_flags & CSUM_TSO) {
5227            if (bxe_chktso_window(sc, nsegs, segs, m0) == 0) {
5228                goto bxe_tx_encap_continue; /* OK to send */
5229            } else {
5230                fp->eth_q_stats.tx_window_violation_tso++;
5231            }
5232        } else {
5233            fp->eth_q_stats.tx_window_violation_std++;
5234        }
5235
5236        /* lets try to defragment this mbuf and remap it */
5237        fp->eth_q_stats.mbuf_defrag_attempts++;
5238        bus_dmamap_unload(fp->tx_mbuf_tag, tx_buf->m_map);
5239
5240        m0 = m_defrag(*m_head, M_NOWAIT);
5241        if (m0 == NULL) {
5242            fp->eth_q_stats.mbuf_defrag_failures++;
5243            /* Ugh, just drop the frame... :( */
5244            rc = ENOBUFS;
5245        } else {
5246            /* defrag successful, try mapping again */
5247            *m_head = m0;
5248            error = bus_dmamap_load_mbuf_sg(fp->tx_mbuf_tag,
5249                                            tx_buf->m_map, m0,
5250                                            segs, &nsegs, BUS_DMA_NOWAIT);
5251            if (error) {
5252                fp->eth_q_stats.tx_dma_mapping_failure++;
5253                /* No sense in trying to defrag/copy chain, drop it. :( */
5254                rc = error;
5255            } else {
5256               /* if the chain is still too long then drop it */
5257                if(m0->m_pkthdr.csum_flags & CSUM_TSO) {
5258                    /*
5259                     * in case TSO is enabled nsegs should be checked against
5260                     * BXE_TSO_MAX_SEGMENTS
5261                     */
5262                    if (__predict_false(nsegs > BXE_TSO_MAX_SEGMENTS)) {
5263                        bus_dmamap_unload(fp->tx_mbuf_tag, tx_buf->m_map);
5264                        fp->eth_q_stats.nsegs_path1_errors++;
5265                        rc = ENODEV;
5266                    }
5267                } else {
5268                    if (__predict_false(nsegs > BXE_MAX_SEGMENTS)) {
5269                        bus_dmamap_unload(fp->tx_mbuf_tag, tx_buf->m_map);
5270                        fp->eth_q_stats.nsegs_path2_errors++;
5271                        rc = ENODEV;
5272                    }
5273                }
5274            }
5275        }
5276    }
5277
5278bxe_tx_encap_continue:
5279
5280    /* Check for errors */
5281    if (rc) {
5282        if (rc == ENOMEM) {
5283            /* recoverable try again later  */
5284        } else {
5285            fp->eth_q_stats.tx_soft_errors++;
5286            fp->eth_q_stats.mbuf_alloc_tx--;
5287            m_freem(*m_head);
5288            *m_head = NULL;
5289        }
5290
5291        return (rc);
5292    }
5293
5294    /* set flag according to packet type (UNICAST_ADDRESS is default) */
5295    if (m0->m_flags & M_BCAST) {
5296        mac_type = BROADCAST_ADDRESS;
5297    } else if (m0->m_flags & M_MCAST) {
5298        mac_type = MULTICAST_ADDRESS;
5299    }
5300
5301    /* store the mbuf into the mbuf ring */
5302    tx_buf->m        = m0;
5303    tx_buf->first_bd = fp->tx_bd_prod;
5304    tx_buf->flags    = 0;
5305
5306    /* prepare the first transmit (start) BD for the mbuf */
5307    tx_start_bd = &fp->tx_chain[TX_BD(bd_prod)].start_bd;
5308
5309    BLOGD(sc, DBG_TX,
5310          "sending pkt_prod=%u tx_buf=%p next_idx=%u bd=%u tx_start_bd=%p\n",
5311          pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_start_bd);
5312
5313    tx_start_bd->addr_lo = htole32(U64_LO(segs[0].ds_addr));
5314    tx_start_bd->addr_hi = htole32(U64_HI(segs[0].ds_addr));
5315    tx_start_bd->nbytes  = htole16(segs[0].ds_len);
5316    total_pkt_size += tx_start_bd->nbytes;
5317    tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
5318
5319    tx_start_bd->general_data = (1 << ETH_TX_START_BD_HDR_NBDS_SHIFT);
5320
5321    /* all frames have at least Start BD + Parsing BD */
5322    nbds = nsegs + 1;
5323    tx_start_bd->nbd = htole16(nbds);
5324
5325    if (m0->m_flags & M_VLANTAG) {
5326        tx_start_bd->vlan_or_ethertype = htole16(m0->m_pkthdr.ether_vtag);
5327        tx_start_bd->bd_flags.as_bitfield |=
5328            (X_ETH_OUTBAND_VLAN << ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT);
5329    } else {
5330        /* vf tx, start bd must hold the ethertype for fw to enforce it */
5331        if (IS_VF(sc)) {
5332            /* map ethernet header to find type and header length */
5333            eh = mtod(m0, struct ether_vlan_header *);
5334            tx_start_bd->vlan_or_ethertype = eh->evl_encap_proto;
5335        } else {
5336            /* used by FW for packet accounting */
5337            tx_start_bd->vlan_or_ethertype = htole16(fp->tx_pkt_prod);
5338        }
5339    }
5340
5341    /*
5342     * add a parsing BD from the chain. The parsing BD is always added
5343     * though it is only used for TSO and chksum
5344     */
5345    bd_prod = TX_BD_NEXT(bd_prod);
5346
5347    if (m0->m_pkthdr.csum_flags) {
5348        if (m0->m_pkthdr.csum_flags & CSUM_IP) {
5349            fp->eth_q_stats.tx_ofld_frames_csum_ip++;
5350            tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IP_CSUM;
5351        }
5352
5353        if (m0->m_pkthdr.csum_flags & CSUM_TCP_IPV6) {
5354            tx_start_bd->bd_flags.as_bitfield |= (ETH_TX_BD_FLAGS_IPV6 |
5355                                                  ETH_TX_BD_FLAGS_L4_CSUM);
5356        } else if (m0->m_pkthdr.csum_flags & CSUM_UDP_IPV6) {
5357            tx_start_bd->bd_flags.as_bitfield |= (ETH_TX_BD_FLAGS_IPV6   |
5358                                                  ETH_TX_BD_FLAGS_IS_UDP |
5359                                                  ETH_TX_BD_FLAGS_L4_CSUM);
5360        } else if ((m0->m_pkthdr.csum_flags & CSUM_TCP) ||
5361                   (m0->m_pkthdr.csum_flags & CSUM_TSO)) {
5362            tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
5363        } else if (m0->m_pkthdr.csum_flags & CSUM_UDP) {
5364            tx_start_bd->bd_flags.as_bitfield |= (ETH_TX_BD_FLAGS_L4_CSUM |
5365                                                  ETH_TX_BD_FLAGS_IS_UDP);
5366        }
5367    }
5368
5369    if (!CHIP_IS_E1x(sc)) {
5370        pbd_e2 = &fp->tx_chain[TX_BD(bd_prod)].parse_bd_e2;
5371        memset(pbd_e2, 0, sizeof(struct eth_tx_parse_bd_e2));
5372
5373        if (m0->m_pkthdr.csum_flags) {
5374            hlen = bxe_set_pbd_csum_e2(fp, m0, &pbd_e2_parsing_data);
5375        }
5376
5377        SET_FLAG(pbd_e2_parsing_data, ETH_TX_PARSE_BD_E2_ETH_ADDR_TYPE,
5378                 mac_type);
5379    } else {
5380        uint16_t global_data = 0;
5381
5382        pbd_e1x = &fp->tx_chain[TX_BD(bd_prod)].parse_bd_e1x;
5383        memset(pbd_e1x, 0, sizeof(struct eth_tx_parse_bd_e1x));
5384
5385        if (m0->m_pkthdr.csum_flags) {
5386            hlen = bxe_set_pbd_csum(fp, m0, pbd_e1x);
5387        }
5388
5389        SET_FLAG(global_data,
5390                 ETH_TX_PARSE_BD_E1X_ETH_ADDR_TYPE, mac_type);
5391        pbd_e1x->global_data |= htole16(global_data);
5392    }
5393
5394    /* setup the parsing BD with TSO specific info */
5395    if (m0->m_pkthdr.csum_flags & CSUM_TSO) {
5396        fp->eth_q_stats.tx_ofld_frames_lso++;
5397        tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
5398
5399        if (__predict_false(tx_start_bd->nbytes > hlen)) {
5400            fp->eth_q_stats.tx_ofld_frames_lso_hdr_splits++;
5401
5402            /* split the first BD into header/data making the fw job easy */
5403            nbds++;
5404            tx_start_bd->nbd = htole16(nbds);
5405            tx_start_bd->nbytes = htole16(hlen);
5406
5407            bd_prod = TX_BD_NEXT(bd_prod);
5408
5409            /* new transmit BD after the tx_parse_bd */
5410            tx_data_bd = &fp->tx_chain[TX_BD(bd_prod)].reg_bd;
5411            tx_data_bd->addr_hi = htole32(U64_HI(segs[0].ds_addr + hlen));
5412            tx_data_bd->addr_lo = htole32(U64_LO(segs[0].ds_addr + hlen));
5413            tx_data_bd->nbytes  = htole16(segs[0].ds_len - hlen);
5414            if (tx_total_pkt_size_bd == NULL) {
5415                tx_total_pkt_size_bd = tx_data_bd;
5416            }
5417
5418            BLOGD(sc, DBG_TX,
5419                  "TSO split header size is %d (%x:%x) nbds %d\n",
5420                  le16toh(tx_start_bd->nbytes),
5421                  le32toh(tx_start_bd->addr_hi),
5422                  le32toh(tx_start_bd->addr_lo),
5423                  nbds);
5424        }
5425
5426        if (!CHIP_IS_E1x(sc)) {
5427            bxe_set_pbd_lso_e2(m0, &pbd_e2_parsing_data);
5428        } else {
5429            bxe_set_pbd_lso(m0, pbd_e1x);
5430        }
5431    }
5432
5433    if (pbd_e2_parsing_data) {
5434        pbd_e2->parsing_data = htole32(pbd_e2_parsing_data);
5435    }
5436
5437    /* prepare remaining BDs, start tx bd contains first seg/frag */
5438    for (i = 1; i < nsegs ; i++) {
5439        bd_prod = TX_BD_NEXT(bd_prod);
5440        tx_data_bd = &fp->tx_chain[TX_BD(bd_prod)].reg_bd;
5441        tx_data_bd->addr_lo = htole32(U64_LO(segs[i].ds_addr));
5442        tx_data_bd->addr_hi = htole32(U64_HI(segs[i].ds_addr));
5443        tx_data_bd->nbytes  = htole16(segs[i].ds_len);
5444        if (tx_total_pkt_size_bd == NULL) {
5445            tx_total_pkt_size_bd = tx_data_bd;
5446        }
5447        total_pkt_size += tx_data_bd->nbytes;
5448    }
5449
5450    BLOGD(sc, DBG_TX, "last bd %p\n", tx_data_bd);
5451
5452    if (tx_total_pkt_size_bd != NULL) {
5453        tx_total_pkt_size_bd->total_pkt_bytes = total_pkt_size;
5454    }
5455
5456    if (__predict_false(sc->debug & DBG_TX)) {
5457        tmp_bd = tx_buf->first_bd;
5458        for (i = 0; i < nbds; i++)
5459        {
5460            if (i == 0) {
5461                BLOGD(sc, DBG_TX,
5462                      "TX Strt: %p bd=%d nbd=%d vlan=0x%x "
5463                      "bd_flags=0x%x hdr_nbds=%d\n",
5464                      tx_start_bd,
5465                      tmp_bd,
5466                      le16toh(tx_start_bd->nbd),
5467                      le16toh(tx_start_bd->vlan_or_ethertype),
5468                      tx_start_bd->bd_flags.as_bitfield,
5469                      (tx_start_bd->general_data & ETH_TX_START_BD_HDR_NBDS));
5470            } else if (i == 1) {
5471                if (pbd_e1x) {
5472                    BLOGD(sc, DBG_TX,
5473                          "-> Prse: %p bd=%d global=0x%x ip_hlen_w=%u "
5474                          "ip_id=%u lso_mss=%u tcp_flags=0x%x csum=0x%x "
5475                          "tcp_seq=%u total_hlen_w=%u\n",
5476                          pbd_e1x,
5477                          tmp_bd,
5478                          pbd_e1x->global_data,
5479                          pbd_e1x->ip_hlen_w,
5480                          pbd_e1x->ip_id,
5481                          pbd_e1x->lso_mss,
5482                          pbd_e1x->tcp_flags,
5483                          pbd_e1x->tcp_pseudo_csum,
5484                          pbd_e1x->tcp_send_seq,
5485                          le16toh(pbd_e1x->total_hlen_w));
5486                } else { /* if (pbd_e2) */
5487                    BLOGD(sc, DBG_TX,
5488                          "-> Parse: %p bd=%d dst=%02x:%02x:%02x "
5489                          "src=%02x:%02x:%02x parsing_data=0x%x\n",
5490                          pbd_e2,
5491                          tmp_bd,
5492                          pbd_e2->data.mac_addr.dst_hi,
5493                          pbd_e2->data.mac_addr.dst_mid,
5494                          pbd_e2->data.mac_addr.dst_lo,
5495                          pbd_e2->data.mac_addr.src_hi,
5496                          pbd_e2->data.mac_addr.src_mid,
5497                          pbd_e2->data.mac_addr.src_lo,
5498                          pbd_e2->parsing_data);
5499                }
5500            }
5501
5502            if (i != 1) { /* skip parse db as it doesn't hold data */
5503                tx_data_bd = &fp->tx_chain[TX_BD(tmp_bd)].reg_bd;
5504                BLOGD(sc, DBG_TX,
5505                      "-> Frag: %p bd=%d nbytes=%d hi=0x%x lo: 0x%x\n",
5506                      tx_data_bd,
5507                      tmp_bd,
5508                      le16toh(tx_data_bd->nbytes),
5509                      le32toh(tx_data_bd->addr_hi),
5510                      le32toh(tx_data_bd->addr_lo));
5511            }
5512
5513            tmp_bd = TX_BD_NEXT(tmp_bd);
5514        }
5515    }
5516
5517    BLOGD(sc, DBG_TX, "doorbell: nbds=%d bd=%u\n", nbds, bd_prod);
5518
5519    /* update TX BD producer index value for next TX */
5520    bd_prod = TX_BD_NEXT(bd_prod);
5521
5522    /*
5523     * If the chain of tx_bd's describing this frame is adjacent to or spans
5524     * an eth_tx_next_bd element then we need to increment the nbds value.
5525     */
5526    if (TX_BD_IDX(bd_prod) < nbds) {
5527        nbds++;
5528    }
5529
5530    /* don't allow reordering of writes for nbd and packets */
5531    mb();
5532
5533    fp->tx_db.data.prod += nbds;
5534
5535    /* producer points to the next free tx_bd at this point */
5536    fp->tx_pkt_prod++;
5537    fp->tx_bd_prod = bd_prod;
5538
5539    DOORBELL(sc, fp->index, fp->tx_db.raw);
5540
5541    fp->eth_q_stats.tx_pkts++;
5542
5543    /* Prevent speculative reads from getting ahead of the status block. */
5544    bus_space_barrier(sc->bar[BAR0].tag, sc->bar[BAR0].handle,
5545                      0, 0, BUS_SPACE_BARRIER_READ);
5546
5547    /* Prevent speculative reads from getting ahead of the doorbell. */
5548    bus_space_barrier(sc->bar[BAR2].tag, sc->bar[BAR2].handle,
5549                      0, 0, BUS_SPACE_BARRIER_READ);
5550
5551    return (0);
5552}
5553
5554static void
5555bxe_tx_start_locked(struct bxe_softc *sc,
5556                    if_t ifp,
5557                    struct bxe_fastpath *fp)
5558{
5559    struct mbuf *m = NULL;
5560    int tx_count = 0;
5561    uint16_t tx_bd_avail;
5562
5563    BXE_FP_TX_LOCK_ASSERT(fp);
5564
5565    /* keep adding entries while there are frames to send */
5566    while (!if_sendq_empty(ifp)) {
5567
5568        /*
5569         * check for any frames to send
5570         * dequeue can still be NULL even if queue is not empty
5571         */
5572        m = if_dequeue(ifp);
5573        if (__predict_false(m == NULL)) {
5574            break;
5575        }
5576
5577        /* the mbuf now belongs to us */
5578        fp->eth_q_stats.mbuf_alloc_tx++;
5579
5580        /*
5581         * Put the frame into the transmit ring. If we don't have room,
5582         * place the mbuf back at the head of the TX queue, set the
5583         * OACTIVE flag, and wait for the NIC to drain the chain.
5584         */
5585        if (__predict_false(bxe_tx_encap(fp, &m))) {
5586            fp->eth_q_stats.tx_encap_failures++;
5587            if (m != NULL) {
5588                /* mark the TX queue as full and return the frame */
5589                if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, 0);
5590		if_sendq_prepend(ifp, m);
5591                fp->eth_q_stats.mbuf_alloc_tx--;
5592                fp->eth_q_stats.tx_queue_xoff++;
5593            }
5594
5595            /* stop looking for more work */
5596            break;
5597        }
5598
5599        /* the frame was enqueued successfully */
5600        tx_count++;
5601
5602        /* send a copy of the frame to any BPF listeners. */
5603        if_etherbpfmtap(ifp, m);
5604
5605        tx_bd_avail = bxe_tx_avail(sc, fp);
5606
5607        /* handle any completions if we're running low */
5608        if (tx_bd_avail < BXE_TX_CLEANUP_THRESHOLD) {
5609            /* bxe_txeof will set IFF_DRV_OACTIVE appropriately */
5610            bxe_txeof(sc, fp);
5611            if (if_getdrvflags(ifp) & IFF_DRV_OACTIVE) {
5612                break;
5613            }
5614        }
5615    }
5616
5617    /* all TX packets were dequeued and/or the tx ring is full */
5618    if (tx_count > 0) {
5619        /* reset the TX watchdog timeout timer */
5620        fp->watchdog_timer = BXE_TX_TIMEOUT;
5621    }
5622}
5623
5624/* Legacy (non-RSS) dispatch routine */
5625static void
5626bxe_tx_start(if_t ifp)
5627{
5628    struct bxe_softc *sc;
5629    struct bxe_fastpath *fp;
5630
5631    sc = if_getsoftc(ifp);
5632
5633    if (!(if_getdrvflags(ifp) & IFF_DRV_RUNNING)) {
5634        BLOGW(sc, "Interface not running, ignoring transmit request\n");
5635        return;
5636    }
5637
5638    if (!sc->link_vars.link_up) {
5639        BLOGW(sc, "Interface link is down, ignoring transmit request\n");
5640        return;
5641    }
5642
5643    fp = &sc->fp[0];
5644
5645    if (ifp->if_drv_flags & IFF_DRV_OACTIVE) {
5646        fp->eth_q_stats.tx_queue_full_return++;
5647        return;
5648    }
5649
5650    BXE_FP_TX_LOCK(fp);
5651    bxe_tx_start_locked(sc, ifp, fp);
5652    BXE_FP_TX_UNLOCK(fp);
5653}
5654
5655#if __FreeBSD_version >= 901504
5656
5657static int
5658bxe_tx_mq_start_locked(struct bxe_softc    *sc,
5659                       if_t                ifp,
5660                       struct bxe_fastpath *fp,
5661                       struct mbuf         *m)
5662{
5663    struct buf_ring *tx_br = fp->tx_br;
5664    struct mbuf *next;
5665    int depth, rc, tx_count;
5666    uint16_t tx_bd_avail;
5667
5668    rc = tx_count = 0;
5669
5670    BXE_FP_TX_LOCK_ASSERT(fp);
5671
5672    if (sc->state != BXE_STATE_OPEN)  {
5673        fp->eth_q_stats.bxe_tx_mq_sc_state_failures++;
5674        return ENETDOWN;
5675    }
5676
5677    if (!tx_br) {
5678        BLOGE(sc, "Multiqueue TX and no buf_ring!\n");
5679        return (EINVAL);
5680    }
5681
5682    if (m != NULL) {
5683        rc = drbr_enqueue(ifp, tx_br, m);
5684        if (rc != 0) {
5685            fp->eth_q_stats.tx_soft_errors++;
5686            goto bxe_tx_mq_start_locked_exit;
5687        }
5688    }
5689
5690    if (!sc->link_vars.link_up || !(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
5691        fp->eth_q_stats.tx_request_link_down_failures++;
5692        goto bxe_tx_mq_start_locked_exit;
5693    }
5694
5695    /* fetch the depth of the driver queue */
5696    depth = drbr_inuse_drv(ifp, tx_br);
5697    if (depth > fp->eth_q_stats.tx_max_drbr_queue_depth) {
5698        fp->eth_q_stats.tx_max_drbr_queue_depth = depth;
5699    }
5700
5701    /* keep adding entries while there are frames to send */
5702    while ((next = drbr_peek(ifp, tx_br)) != NULL) {
5703        /* handle any completions if we're running low */
5704        tx_bd_avail = bxe_tx_avail(sc, fp);
5705        if (tx_bd_avail < BXE_TX_CLEANUP_THRESHOLD) {
5706            /* bxe_txeof will set IFF_DRV_OACTIVE appropriately */
5707            bxe_txeof(sc, fp);
5708            tx_bd_avail = bxe_tx_avail(sc, fp);
5709            if (tx_bd_avail < (BXE_TSO_MAX_SEGMENTS + 1)) {
5710                fp->eth_q_stats.bd_avail_too_less_failures++;
5711                m_freem(next);
5712                drbr_advance(ifp, tx_br);
5713                rc = ENOBUFS;
5714                break;
5715            }
5716        }
5717
5718        /* the mbuf now belongs to us */
5719        fp->eth_q_stats.mbuf_alloc_tx++;
5720
5721        /*
5722         * Put the frame into the transmit ring. If we don't have room,
5723         * place the mbuf back at the head of the TX queue, set the
5724         * OACTIVE flag, and wait for the NIC to drain the chain.
5725         */
5726        rc = bxe_tx_encap(fp, &next);
5727        if (__predict_false(rc != 0)) {
5728            fp->eth_q_stats.tx_encap_failures++;
5729            if (next != NULL) {
5730                /* mark the TX queue as full and save the frame */
5731                if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, 0);
5732                drbr_putback(ifp, tx_br, next);
5733                fp->eth_q_stats.mbuf_alloc_tx--;
5734                fp->eth_q_stats.tx_frames_deferred++;
5735            } else
5736                drbr_advance(ifp, tx_br);
5737
5738            /* stop looking for more work */
5739            break;
5740        }
5741
5742        /* the transmit frame was enqueued successfully */
5743        tx_count++;
5744
5745        /* send a copy of the frame to any BPF listeners */
5746	if_etherbpfmtap(ifp, next);
5747
5748        drbr_advance(ifp, tx_br);
5749    }
5750
5751    /* all TX packets were dequeued and/or the tx ring is full */
5752    if (tx_count > 0) {
5753        /* reset the TX watchdog timeout timer */
5754        fp->watchdog_timer = BXE_TX_TIMEOUT;
5755    }
5756
5757bxe_tx_mq_start_locked_exit:
5758    /* If we didn't drain the drbr, enqueue a task in the future to do it. */
5759    if (!drbr_empty(ifp, tx_br)) {
5760        fp->eth_q_stats.tx_mq_not_empty++;
5761        taskqueue_enqueue_timeout(fp->tq, &fp->tx_timeout_task, 1);
5762    }
5763
5764    return (rc);
5765}
5766
5767static void
5768bxe_tx_mq_start_deferred(void *arg,
5769                         int pending)
5770{
5771    struct bxe_fastpath *fp = (struct bxe_fastpath *)arg;
5772    struct bxe_softc *sc = fp->sc;
5773    if_t ifp = sc->ifp;
5774
5775    BXE_FP_TX_LOCK(fp);
5776    bxe_tx_mq_start_locked(sc, ifp, fp, NULL);
5777    BXE_FP_TX_UNLOCK(fp);
5778}
5779
5780/* Multiqueue (TSS) dispatch routine. */
5781static int
5782bxe_tx_mq_start(struct ifnet *ifp,
5783                struct mbuf  *m)
5784{
5785    struct bxe_softc *sc = if_getsoftc(ifp);
5786    struct bxe_fastpath *fp;
5787    int fp_index, rc;
5788
5789    fp_index = 0; /* default is the first queue */
5790
5791    /* check if flowid is set */
5792
5793    if (BXE_VALID_FLOWID(m))
5794        fp_index = (m->m_pkthdr.flowid % sc->num_queues);
5795
5796    fp = &sc->fp[fp_index];
5797
5798    if (sc->state != BXE_STATE_OPEN)  {
5799        fp->eth_q_stats.bxe_tx_mq_sc_state_failures++;
5800        return ENETDOWN;
5801    }
5802
5803    if (BXE_FP_TX_TRYLOCK(fp)) {
5804        rc = bxe_tx_mq_start_locked(sc, ifp, fp, m);
5805        BXE_FP_TX_UNLOCK(fp);
5806    } else {
5807        rc = drbr_enqueue(ifp, fp->tx_br, m);
5808        taskqueue_enqueue(fp->tq, &fp->tx_task);
5809    }
5810
5811    return (rc);
5812}
5813
5814static void
5815bxe_mq_flush(struct ifnet *ifp)
5816{
5817    struct bxe_softc *sc = if_getsoftc(ifp);
5818    struct bxe_fastpath *fp;
5819    struct mbuf *m;
5820    int i;
5821
5822    for (i = 0; i < sc->num_queues; i++) {
5823        fp = &sc->fp[i];
5824
5825        if (fp->state != BXE_FP_STATE_IRQ) {
5826            BLOGD(sc, DBG_LOAD, "Not clearing fp[%02d] buf_ring (state=%d)\n",
5827                  fp->index, fp->state);
5828            continue;
5829        }
5830
5831        if (fp->tx_br != NULL) {
5832            BLOGD(sc, DBG_LOAD, "Clearing fp[%02d] buf_ring\n", fp->index);
5833            BXE_FP_TX_LOCK(fp);
5834            while ((m = buf_ring_dequeue_sc(fp->tx_br)) != NULL) {
5835                m_freem(m);
5836            }
5837            BXE_FP_TX_UNLOCK(fp);
5838        }
5839    }
5840
5841    if_qflush(ifp);
5842}
5843
5844#endif /* FreeBSD_version >= 901504 */
5845
5846static uint16_t
5847bxe_cid_ilt_lines(struct bxe_softc *sc)
5848{
5849    if (IS_SRIOV(sc)) {
5850        return ((BXE_FIRST_VF_CID + BXE_VF_CIDS) / ILT_PAGE_CIDS);
5851    }
5852    return (L2_ILT_LINES(sc));
5853}
5854
5855static void
5856bxe_ilt_set_info(struct bxe_softc *sc)
5857{
5858    struct ilt_client_info *ilt_client;
5859    struct ecore_ilt *ilt = sc->ilt;
5860    uint16_t line = 0;
5861
5862    ilt->start_line = FUNC_ILT_BASE(SC_FUNC(sc));
5863    BLOGD(sc, DBG_LOAD, "ilt starts at line %d\n", ilt->start_line);
5864
5865    /* CDU */
5866    ilt_client = &ilt->clients[ILT_CLIENT_CDU];
5867    ilt_client->client_num = ILT_CLIENT_CDU;
5868    ilt_client->page_size = CDU_ILT_PAGE_SZ;
5869    ilt_client->flags = ILT_CLIENT_SKIP_MEM;
5870    ilt_client->start = line;
5871    line += bxe_cid_ilt_lines(sc);
5872
5873    if (CNIC_SUPPORT(sc)) {
5874        line += CNIC_ILT_LINES;
5875    }
5876
5877    ilt_client->end = (line - 1);
5878
5879    BLOGD(sc, DBG_LOAD,
5880          "ilt client[CDU]: start %d, end %d, "
5881          "psz 0x%x, flags 0x%x, hw psz %d\n",
5882          ilt_client->start, ilt_client->end,
5883          ilt_client->page_size,
5884          ilt_client->flags,
5885          ilog2(ilt_client->page_size >> 12));
5886
5887    /* QM */
5888    if (QM_INIT(sc->qm_cid_count)) {
5889        ilt_client = &ilt->clients[ILT_CLIENT_QM];
5890        ilt_client->client_num = ILT_CLIENT_QM;
5891        ilt_client->page_size = QM_ILT_PAGE_SZ;
5892        ilt_client->flags = 0;
5893        ilt_client->start = line;
5894
5895        /* 4 bytes for each cid */
5896        line += DIV_ROUND_UP(sc->qm_cid_count * QM_QUEUES_PER_FUNC * 4,
5897                             QM_ILT_PAGE_SZ);
5898
5899        ilt_client->end = (line - 1);
5900
5901        BLOGD(sc, DBG_LOAD,
5902              "ilt client[QM]: start %d, end %d, "
5903              "psz 0x%x, flags 0x%x, hw psz %d\n",
5904              ilt_client->start, ilt_client->end,
5905              ilt_client->page_size, ilt_client->flags,
5906              ilog2(ilt_client->page_size >> 12));
5907    }
5908
5909    if (CNIC_SUPPORT(sc)) {
5910        /* SRC */
5911        ilt_client = &ilt->clients[ILT_CLIENT_SRC];
5912        ilt_client->client_num = ILT_CLIENT_SRC;
5913        ilt_client->page_size = SRC_ILT_PAGE_SZ;
5914        ilt_client->flags = 0;
5915        ilt_client->start = line;
5916        line += SRC_ILT_LINES;
5917        ilt_client->end = (line - 1);
5918
5919        BLOGD(sc, DBG_LOAD,
5920              "ilt client[SRC]: start %d, end %d, "
5921              "psz 0x%x, flags 0x%x, hw psz %d\n",
5922              ilt_client->start, ilt_client->end,
5923              ilt_client->page_size, ilt_client->flags,
5924              ilog2(ilt_client->page_size >> 12));
5925
5926        /* TM */
5927        ilt_client = &ilt->clients[ILT_CLIENT_TM];
5928        ilt_client->client_num = ILT_CLIENT_TM;
5929        ilt_client->page_size = TM_ILT_PAGE_SZ;
5930        ilt_client->flags = 0;
5931        ilt_client->start = line;
5932        line += TM_ILT_LINES;
5933        ilt_client->end = (line - 1);
5934
5935        BLOGD(sc, DBG_LOAD,
5936              "ilt client[TM]: start %d, end %d, "
5937              "psz 0x%x, flags 0x%x, hw psz %d\n",
5938              ilt_client->start, ilt_client->end,
5939              ilt_client->page_size, ilt_client->flags,
5940              ilog2(ilt_client->page_size >> 12));
5941    }
5942
5943    KASSERT((line <= ILT_MAX_LINES), ("Invalid number of ILT lines!"));
5944}
5945
5946static void
5947bxe_set_fp_rx_buf_size(struct bxe_softc *sc)
5948{
5949    int i;
5950    uint32_t rx_buf_size;
5951
5952    rx_buf_size = (IP_HEADER_ALIGNMENT_PADDING + ETH_OVERHEAD + sc->mtu);
5953
5954    for (i = 0; i < sc->num_queues; i++) {
5955        if(rx_buf_size <= MCLBYTES){
5956            sc->fp[i].rx_buf_size = rx_buf_size;
5957            sc->fp[i].mbuf_alloc_size = MCLBYTES;
5958        }else if (rx_buf_size <= MJUMPAGESIZE){
5959            sc->fp[i].rx_buf_size = rx_buf_size;
5960            sc->fp[i].mbuf_alloc_size = MJUMPAGESIZE;
5961        }else if (rx_buf_size <= (MJUMPAGESIZE + MCLBYTES)){
5962            sc->fp[i].rx_buf_size = MCLBYTES;
5963            sc->fp[i].mbuf_alloc_size = MCLBYTES;
5964        }else if (rx_buf_size <= (2 * MJUMPAGESIZE)){
5965            sc->fp[i].rx_buf_size = MJUMPAGESIZE;
5966            sc->fp[i].mbuf_alloc_size = MJUMPAGESIZE;
5967        }else {
5968            sc->fp[i].rx_buf_size = MCLBYTES;
5969            sc->fp[i].mbuf_alloc_size = MCLBYTES;
5970        }
5971    }
5972}
5973
5974static int
5975bxe_alloc_ilt_mem(struct bxe_softc *sc)
5976{
5977    int rc = 0;
5978
5979    if ((sc->ilt =
5980         (struct ecore_ilt *)malloc(sizeof(struct ecore_ilt),
5981                                    M_BXE_ILT,
5982                                    (M_NOWAIT | M_ZERO))) == NULL) {
5983        rc = 1;
5984    }
5985
5986    return (rc);
5987}
5988
5989static int
5990bxe_alloc_ilt_lines_mem(struct bxe_softc *sc)
5991{
5992    int rc = 0;
5993
5994    if ((sc->ilt->lines =
5995         (struct ilt_line *)malloc((sizeof(struct ilt_line) * ILT_MAX_LINES),
5996                                    M_BXE_ILT,
5997                                    (M_NOWAIT | M_ZERO))) == NULL) {
5998        rc = 1;
5999    }
6000
6001    return (rc);
6002}
6003
6004static void
6005bxe_free_ilt_mem(struct bxe_softc *sc)
6006{
6007    if (sc->ilt != NULL) {
6008        free(sc->ilt, M_BXE_ILT);
6009        sc->ilt = NULL;
6010    }
6011}
6012
6013static void
6014bxe_free_ilt_lines_mem(struct bxe_softc *sc)
6015{
6016    if (sc->ilt->lines != NULL) {
6017        free(sc->ilt->lines, M_BXE_ILT);
6018        sc->ilt->lines = NULL;
6019    }
6020}
6021
6022static void
6023bxe_free_mem(struct bxe_softc *sc)
6024{
6025    int i;
6026
6027    for (i = 0; i < L2_ILT_LINES(sc); i++) {
6028        bxe_dma_free(sc, &sc->context[i].vcxt_dma);
6029        sc->context[i].vcxt = NULL;
6030        sc->context[i].size = 0;
6031    }
6032
6033    ecore_ilt_mem_op(sc, ILT_MEMOP_FREE);
6034
6035    bxe_free_ilt_lines_mem(sc);
6036
6037}
6038
6039static int
6040bxe_alloc_mem(struct bxe_softc *sc)
6041{
6042
6043    int context_size;
6044    int allocated;
6045    int i;
6046
6047    /*
6048     * Allocate memory for CDU context:
6049     * This memory is allocated separately and not in the generic ILT
6050     * functions because CDU differs in few aspects:
6051     * 1. There can be multiple entities allocating memory for context -
6052     * regular L2, CNIC, and SRIOV drivers. Each separately controls
6053     * its own ILT lines.
6054     * 2. Since CDU page-size is not a single 4KB page (which is the case
6055     * for the other ILT clients), to be efficient we want to support
6056     * allocation of sub-page-size in the last entry.
6057     * 3. Context pointers are used by the driver to pass to FW / update
6058     * the context (for the other ILT clients the pointers are used just to
6059     * free the memory during unload).
6060     */
6061    context_size = (sizeof(union cdu_context) * BXE_L2_CID_COUNT(sc));
6062    for (i = 0, allocated = 0; allocated < context_size; i++) {
6063        sc->context[i].size = min(CDU_ILT_PAGE_SZ,
6064                                  (context_size - allocated));
6065
6066        if (bxe_dma_alloc(sc, sc->context[i].size,
6067                          &sc->context[i].vcxt_dma,
6068                          "cdu context") != 0) {
6069            bxe_free_mem(sc);
6070            return (-1);
6071        }
6072
6073        sc->context[i].vcxt =
6074            (union cdu_context *)sc->context[i].vcxt_dma.vaddr;
6075
6076        allocated += sc->context[i].size;
6077    }
6078
6079    bxe_alloc_ilt_lines_mem(sc);
6080
6081    BLOGD(sc, DBG_LOAD, "ilt=%p start_line=%u lines=%p\n",
6082          sc->ilt, sc->ilt->start_line, sc->ilt->lines);
6083    {
6084        for (i = 0; i < 4; i++) {
6085            BLOGD(sc, DBG_LOAD,
6086                  "c%d page_size=%u start=%u end=%u num=%u flags=0x%x\n",
6087                  i,
6088                  sc->ilt->clients[i].page_size,
6089                  sc->ilt->clients[i].start,
6090                  sc->ilt->clients[i].end,
6091                  sc->ilt->clients[i].client_num,
6092                  sc->ilt->clients[i].flags);
6093        }
6094    }
6095    if (ecore_ilt_mem_op(sc, ILT_MEMOP_ALLOC)) {
6096        BLOGE(sc, "ecore_ilt_mem_op ILT_MEMOP_ALLOC failed\n");
6097        bxe_free_mem(sc);
6098        return (-1);
6099    }
6100
6101    return (0);
6102}
6103
6104static void
6105bxe_free_rx_bd_chain(struct bxe_fastpath *fp)
6106{
6107    struct bxe_softc *sc;
6108    int i;
6109
6110    sc = fp->sc;
6111
6112    if (fp->rx_mbuf_tag == NULL) {
6113        return;
6114    }
6115
6116    /* free all mbufs and unload all maps */
6117    for (i = 0; i < RX_BD_TOTAL; i++) {
6118        if (fp->rx_mbuf_chain[i].m_map != NULL) {
6119            bus_dmamap_sync(fp->rx_mbuf_tag,
6120                            fp->rx_mbuf_chain[i].m_map,
6121                            BUS_DMASYNC_POSTREAD);
6122            bus_dmamap_unload(fp->rx_mbuf_tag,
6123                              fp->rx_mbuf_chain[i].m_map);
6124        }
6125
6126        if (fp->rx_mbuf_chain[i].m != NULL) {
6127            m_freem(fp->rx_mbuf_chain[i].m);
6128            fp->rx_mbuf_chain[i].m = NULL;
6129            fp->eth_q_stats.mbuf_alloc_rx--;
6130        }
6131    }
6132}
6133
6134static void
6135bxe_free_tpa_pool(struct bxe_fastpath *fp)
6136{
6137    struct bxe_softc *sc;
6138    int i, max_agg_queues;
6139
6140    sc = fp->sc;
6141
6142    if (fp->rx_mbuf_tag == NULL) {
6143        return;
6144    }
6145
6146    max_agg_queues = MAX_AGG_QS(sc);
6147
6148    /* release all mbufs and unload all DMA maps in the TPA pool */
6149    for (i = 0; i < max_agg_queues; i++) {
6150        if (fp->rx_tpa_info[i].bd.m_map != NULL) {
6151            bus_dmamap_sync(fp->rx_mbuf_tag,
6152                            fp->rx_tpa_info[i].bd.m_map,
6153                            BUS_DMASYNC_POSTREAD);
6154            bus_dmamap_unload(fp->rx_mbuf_tag,
6155                              fp->rx_tpa_info[i].bd.m_map);
6156        }
6157
6158        if (fp->rx_tpa_info[i].bd.m != NULL) {
6159            m_freem(fp->rx_tpa_info[i].bd.m);
6160            fp->rx_tpa_info[i].bd.m = NULL;
6161            fp->eth_q_stats.mbuf_alloc_tpa--;
6162        }
6163    }
6164}
6165
6166static void
6167bxe_free_sge_chain(struct bxe_fastpath *fp)
6168{
6169    struct bxe_softc *sc;
6170    int i;
6171
6172    sc = fp->sc;
6173
6174    if (fp->rx_sge_mbuf_tag == NULL) {
6175        return;
6176    }
6177
6178    /* rree all mbufs and unload all maps */
6179    for (i = 0; i < RX_SGE_TOTAL; i++) {
6180        if (fp->rx_sge_mbuf_chain[i].m_map != NULL) {
6181            bus_dmamap_sync(fp->rx_sge_mbuf_tag,
6182                            fp->rx_sge_mbuf_chain[i].m_map,
6183                            BUS_DMASYNC_POSTREAD);
6184            bus_dmamap_unload(fp->rx_sge_mbuf_tag,
6185                              fp->rx_sge_mbuf_chain[i].m_map);
6186        }
6187
6188        if (fp->rx_sge_mbuf_chain[i].m != NULL) {
6189            m_freem(fp->rx_sge_mbuf_chain[i].m);
6190            fp->rx_sge_mbuf_chain[i].m = NULL;
6191            fp->eth_q_stats.mbuf_alloc_sge--;
6192        }
6193    }
6194}
6195
6196static void
6197bxe_free_fp_buffers(struct bxe_softc *sc)
6198{
6199    struct bxe_fastpath *fp;
6200    int i;
6201
6202    for (i = 0; i < sc->num_queues; i++) {
6203        fp = &sc->fp[i];
6204
6205#if __FreeBSD_version >= 901504
6206        if (fp->tx_br != NULL) {
6207            /* just in case bxe_mq_flush() wasn't called */
6208            if (mtx_initialized(&fp->tx_mtx)) {
6209                struct mbuf *m;
6210
6211                BXE_FP_TX_LOCK(fp);
6212                while ((m = buf_ring_dequeue_sc(fp->tx_br)) != NULL)
6213                    m_freem(m);
6214                BXE_FP_TX_UNLOCK(fp);
6215            }
6216        }
6217#endif
6218
6219        /* free all RX buffers */
6220        bxe_free_rx_bd_chain(fp);
6221        bxe_free_tpa_pool(fp);
6222        bxe_free_sge_chain(fp);
6223
6224        if (fp->eth_q_stats.mbuf_alloc_rx != 0) {
6225            BLOGE(sc, "failed to claim all rx mbufs (%d left)\n",
6226                  fp->eth_q_stats.mbuf_alloc_rx);
6227        }
6228
6229        if (fp->eth_q_stats.mbuf_alloc_sge != 0) {
6230            BLOGE(sc, "failed to claim all sge mbufs (%d left)\n",
6231                  fp->eth_q_stats.mbuf_alloc_sge);
6232        }
6233
6234        if (fp->eth_q_stats.mbuf_alloc_tpa != 0) {
6235            BLOGE(sc, "failed to claim all sge mbufs (%d left)\n",
6236                  fp->eth_q_stats.mbuf_alloc_tpa);
6237        }
6238
6239        if (fp->eth_q_stats.mbuf_alloc_tx != 0) {
6240            BLOGE(sc, "failed to release tx mbufs (%d left)\n",
6241                  fp->eth_q_stats.mbuf_alloc_tx);
6242        }
6243
6244        /* XXX verify all mbufs were reclaimed */
6245    }
6246}
6247
6248static int
6249bxe_alloc_rx_bd_mbuf(struct bxe_fastpath *fp,
6250                     uint16_t            prev_index,
6251                     uint16_t            index)
6252{
6253    struct bxe_sw_rx_bd *rx_buf;
6254    struct eth_rx_bd *rx_bd;
6255    bus_dma_segment_t segs[1];
6256    bus_dmamap_t map;
6257    struct mbuf *m;
6258    int nsegs, rc;
6259
6260    rc = 0;
6261
6262    /* allocate the new RX BD mbuf */
6263    m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, fp->mbuf_alloc_size);
6264    if (__predict_false(m == NULL)) {
6265        fp->eth_q_stats.mbuf_rx_bd_alloc_failed++;
6266        return (ENOBUFS);
6267    }
6268
6269    fp->eth_q_stats.mbuf_alloc_rx++;
6270
6271    /* initialize the mbuf buffer length */
6272    m->m_pkthdr.len = m->m_len = fp->rx_buf_size;
6273
6274    /* map the mbuf into non-paged pool */
6275    rc = bus_dmamap_load_mbuf_sg(fp->rx_mbuf_tag,
6276                                 fp->rx_mbuf_spare_map,
6277                                 m, segs, &nsegs, BUS_DMA_NOWAIT);
6278    if (__predict_false(rc != 0)) {
6279        fp->eth_q_stats.mbuf_rx_bd_mapping_failed++;
6280        m_freem(m);
6281        fp->eth_q_stats.mbuf_alloc_rx--;
6282        return (rc);
6283    }
6284
6285    /* all mbufs must map to a single segment */
6286    KASSERT((nsegs == 1), ("Too many segments, %d returned!", nsegs));
6287
6288    /* release any existing RX BD mbuf mappings */
6289
6290    if (prev_index != index) {
6291        rx_buf = &fp->rx_mbuf_chain[prev_index];
6292
6293        if (rx_buf->m_map != NULL) {
6294            bus_dmamap_sync(fp->rx_mbuf_tag, rx_buf->m_map,
6295                            BUS_DMASYNC_POSTREAD);
6296            bus_dmamap_unload(fp->rx_mbuf_tag, rx_buf->m_map);
6297        }
6298
6299        /*
6300         * We only get here from bxe_rxeof() when the maximum number
6301         * of rx buffers is less than RX_BD_USABLE. bxe_rxeof() already
6302         * holds the mbuf in the prev_index so it's OK to NULL it out
6303         * here without concern of a memory leak.
6304         */
6305        fp->rx_mbuf_chain[prev_index].m = NULL;
6306    }
6307
6308    rx_buf = &fp->rx_mbuf_chain[index];
6309
6310    if (rx_buf->m_map != NULL) {
6311        bus_dmamap_sync(fp->rx_mbuf_tag, rx_buf->m_map,
6312                        BUS_DMASYNC_POSTREAD);
6313        bus_dmamap_unload(fp->rx_mbuf_tag, rx_buf->m_map);
6314    }
6315
6316    /* save the mbuf and mapping info for a future packet */
6317    map = (prev_index != index) ?
6318              fp->rx_mbuf_chain[prev_index].m_map : rx_buf->m_map;
6319    rx_buf->m_map = fp->rx_mbuf_spare_map;
6320    fp->rx_mbuf_spare_map = map;
6321    bus_dmamap_sync(fp->rx_mbuf_tag, rx_buf->m_map,
6322                    BUS_DMASYNC_PREREAD);
6323    rx_buf->m = m;
6324
6325    rx_bd = &fp->rx_chain[index];
6326    rx_bd->addr_hi = htole32(U64_HI(segs[0].ds_addr));
6327    rx_bd->addr_lo = htole32(U64_LO(segs[0].ds_addr));
6328
6329    return (rc);
6330}
6331
6332static int
6333bxe_alloc_rx_tpa_mbuf(struct bxe_fastpath *fp,
6334                      int                 queue)
6335{
6336    struct bxe_sw_tpa_info *tpa_info = &fp->rx_tpa_info[queue];
6337    bus_dma_segment_t segs[1];
6338    bus_dmamap_t map;
6339    struct mbuf *m;
6340    int nsegs;
6341    int rc = 0;
6342
6343    /* allocate the new TPA mbuf */
6344    m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, fp->mbuf_alloc_size);
6345    if (__predict_false(m == NULL)) {
6346        fp->eth_q_stats.mbuf_rx_tpa_alloc_failed++;
6347        return (ENOBUFS);
6348    }
6349
6350    fp->eth_q_stats.mbuf_alloc_tpa++;
6351
6352    /* initialize the mbuf buffer length */
6353    m->m_pkthdr.len = m->m_len = fp->rx_buf_size;
6354
6355    /* map the mbuf into non-paged pool */
6356    rc = bus_dmamap_load_mbuf_sg(fp->rx_mbuf_tag,
6357                                 fp->rx_tpa_info_mbuf_spare_map,
6358                                 m, segs, &nsegs, BUS_DMA_NOWAIT);
6359    if (__predict_false(rc != 0)) {
6360        fp->eth_q_stats.mbuf_rx_tpa_mapping_failed++;
6361        m_free(m);
6362        fp->eth_q_stats.mbuf_alloc_tpa--;
6363        return (rc);
6364    }
6365
6366    /* all mbufs must map to a single segment */
6367    KASSERT((nsegs == 1), ("Too many segments, %d returned!", nsegs));
6368
6369    /* release any existing TPA mbuf mapping */
6370    if (tpa_info->bd.m_map != NULL) {
6371        bus_dmamap_sync(fp->rx_mbuf_tag, tpa_info->bd.m_map,
6372                        BUS_DMASYNC_POSTREAD);
6373        bus_dmamap_unload(fp->rx_mbuf_tag, tpa_info->bd.m_map);
6374    }
6375
6376    /* save the mbuf and mapping info for the TPA mbuf */
6377    map = tpa_info->bd.m_map;
6378    tpa_info->bd.m_map = fp->rx_tpa_info_mbuf_spare_map;
6379    fp->rx_tpa_info_mbuf_spare_map = map;
6380    bus_dmamap_sync(fp->rx_mbuf_tag, tpa_info->bd.m_map,
6381                    BUS_DMASYNC_PREREAD);
6382    tpa_info->bd.m = m;
6383    tpa_info->seg = segs[0];
6384
6385    return (rc);
6386}
6387
6388/*
6389 * Allocate an mbuf and assign it to the receive scatter gather chain. The
6390 * caller must take care to save a copy of the existing mbuf in the SG mbuf
6391 * chain.
6392 */
6393static int
6394bxe_alloc_rx_sge_mbuf(struct bxe_fastpath *fp,
6395                      uint16_t            index)
6396{
6397    struct bxe_sw_rx_bd *sge_buf;
6398    struct eth_rx_sge *sge;
6399    bus_dma_segment_t segs[1];
6400    bus_dmamap_t map;
6401    struct mbuf *m;
6402    int nsegs;
6403    int rc = 0;
6404
6405    /* allocate a new SGE mbuf */
6406    m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, SGE_PAGE_SIZE);
6407    if (__predict_false(m == NULL)) {
6408        fp->eth_q_stats.mbuf_rx_sge_alloc_failed++;
6409        return (ENOMEM);
6410    }
6411
6412    fp->eth_q_stats.mbuf_alloc_sge++;
6413
6414    /* initialize the mbuf buffer length */
6415    m->m_pkthdr.len = m->m_len = SGE_PAGE_SIZE;
6416
6417    /* map the SGE mbuf into non-paged pool */
6418    rc = bus_dmamap_load_mbuf_sg(fp->rx_sge_mbuf_tag,
6419                                 fp->rx_sge_mbuf_spare_map,
6420                                 m, segs, &nsegs, BUS_DMA_NOWAIT);
6421    if (__predict_false(rc != 0)) {
6422        fp->eth_q_stats.mbuf_rx_sge_mapping_failed++;
6423        m_freem(m);
6424        fp->eth_q_stats.mbuf_alloc_sge--;
6425        return (rc);
6426    }
6427
6428    /* all mbufs must map to a single segment */
6429    KASSERT((nsegs == 1), ("Too many segments, %d returned!", nsegs));
6430
6431    sge_buf = &fp->rx_sge_mbuf_chain[index];
6432
6433    /* release any existing SGE mbuf mapping */
6434    if (sge_buf->m_map != NULL) {
6435        bus_dmamap_sync(fp->rx_sge_mbuf_tag, sge_buf->m_map,
6436                        BUS_DMASYNC_POSTREAD);
6437        bus_dmamap_unload(fp->rx_sge_mbuf_tag, sge_buf->m_map);
6438    }
6439
6440    /* save the mbuf and mapping info for a future packet */
6441    map = sge_buf->m_map;
6442    sge_buf->m_map = fp->rx_sge_mbuf_spare_map;
6443    fp->rx_sge_mbuf_spare_map = map;
6444    bus_dmamap_sync(fp->rx_sge_mbuf_tag, sge_buf->m_map,
6445                    BUS_DMASYNC_PREREAD);
6446    sge_buf->m = m;
6447
6448    sge = &fp->rx_sge_chain[index];
6449    sge->addr_hi = htole32(U64_HI(segs[0].ds_addr));
6450    sge->addr_lo = htole32(U64_LO(segs[0].ds_addr));
6451
6452    return (rc);
6453}
6454
6455static __noinline int
6456bxe_alloc_fp_buffers(struct bxe_softc *sc)
6457{
6458    struct bxe_fastpath *fp;
6459    int i, j, rc = 0;
6460    int ring_prod, cqe_ring_prod;
6461    int max_agg_queues;
6462
6463    for (i = 0; i < sc->num_queues; i++) {
6464        fp = &sc->fp[i];
6465
6466        ring_prod = cqe_ring_prod = 0;
6467        fp->rx_bd_cons = 0;
6468        fp->rx_cq_cons = 0;
6469
6470        /* allocate buffers for the RX BDs in RX BD chain */
6471        for (j = 0; j < sc->max_rx_bufs; j++) {
6472            rc = bxe_alloc_rx_bd_mbuf(fp, ring_prod, ring_prod);
6473            if (rc != 0) {
6474                BLOGE(sc, "mbuf alloc fail for fp[%02d] rx chain (%d)\n",
6475                      i, rc);
6476                goto bxe_alloc_fp_buffers_error;
6477            }
6478
6479            ring_prod     = RX_BD_NEXT(ring_prod);
6480            cqe_ring_prod = RCQ_NEXT(cqe_ring_prod);
6481        }
6482
6483        fp->rx_bd_prod = ring_prod;
6484        fp->rx_cq_prod = cqe_ring_prod;
6485        fp->eth_q_stats.rx_calls = fp->eth_q_stats.rx_pkts = 0;
6486
6487        max_agg_queues = MAX_AGG_QS(sc);
6488
6489        fp->tpa_enable = TRUE;
6490
6491        /* fill the TPA pool */
6492        for (j = 0; j < max_agg_queues; j++) {
6493            rc = bxe_alloc_rx_tpa_mbuf(fp, j);
6494            if (rc != 0) {
6495                BLOGE(sc, "mbuf alloc fail for fp[%02d] TPA queue %d\n",
6496                          i, j);
6497                fp->tpa_enable = FALSE;
6498                goto bxe_alloc_fp_buffers_error;
6499            }
6500
6501            fp->rx_tpa_info[j].state = BXE_TPA_STATE_STOP;
6502        }
6503
6504        if (fp->tpa_enable) {
6505            /* fill the RX SGE chain */
6506            ring_prod = 0;
6507            for (j = 0; j < RX_SGE_USABLE; j++) {
6508                rc = bxe_alloc_rx_sge_mbuf(fp, ring_prod);
6509                if (rc != 0) {
6510                    BLOGE(sc, "mbuf alloc fail for fp[%02d] SGE %d\n",
6511                              i, ring_prod);
6512                    fp->tpa_enable = FALSE;
6513                    ring_prod = 0;
6514                    goto bxe_alloc_fp_buffers_error;
6515                }
6516
6517                ring_prod = RX_SGE_NEXT(ring_prod);
6518            }
6519
6520            fp->rx_sge_prod = ring_prod;
6521        }
6522    }
6523
6524    return (0);
6525
6526bxe_alloc_fp_buffers_error:
6527
6528    /* unwind what was already allocated */
6529    bxe_free_rx_bd_chain(fp);
6530    bxe_free_tpa_pool(fp);
6531    bxe_free_sge_chain(fp);
6532
6533    return (ENOBUFS);
6534}
6535
6536static void
6537bxe_free_fw_stats_mem(struct bxe_softc *sc)
6538{
6539    bxe_dma_free(sc, &sc->fw_stats_dma);
6540
6541    sc->fw_stats_num = 0;
6542
6543    sc->fw_stats_req_size = 0;
6544    sc->fw_stats_req = NULL;
6545    sc->fw_stats_req_mapping = 0;
6546
6547    sc->fw_stats_data_size = 0;
6548    sc->fw_stats_data = NULL;
6549    sc->fw_stats_data_mapping = 0;
6550}
6551
6552static int
6553bxe_alloc_fw_stats_mem(struct bxe_softc *sc)
6554{
6555    uint8_t num_queue_stats;
6556    int num_groups;
6557
6558    /* number of queues for statistics is number of eth queues */
6559    num_queue_stats = BXE_NUM_ETH_QUEUES(sc);
6560
6561    /*
6562     * Total number of FW statistics requests =
6563     *   1 for port stats + 1 for PF stats + num of queues
6564     */
6565    sc->fw_stats_num = (2 + num_queue_stats);
6566
6567    /*
6568     * Request is built from stats_query_header and an array of
6569     * stats_query_cmd_group each of which contains STATS_QUERY_CMD_COUNT
6570     * rules. The real number or requests is configured in the
6571     * stats_query_header.
6572     */
6573    num_groups =
6574        ((sc->fw_stats_num / STATS_QUERY_CMD_COUNT) +
6575         ((sc->fw_stats_num % STATS_QUERY_CMD_COUNT) ? 1 : 0));
6576
6577    BLOGD(sc, DBG_LOAD, "stats fw_stats_num %d num_groups %d\n",
6578          sc->fw_stats_num, num_groups);
6579
6580    sc->fw_stats_req_size =
6581        (sizeof(struct stats_query_header) +
6582         (num_groups * sizeof(struct stats_query_cmd_group)));
6583
6584    /*
6585     * Data for statistics requests + stats_counter.
6586     * stats_counter holds per-STORM counters that are incremented when
6587     * STORM has finished with the current request. Memory for FCoE
6588     * offloaded statistics are counted anyway, even if they will not be sent.
6589     * VF stats are not accounted for here as the data of VF stats is stored
6590     * in memory allocated by the VF, not here.
6591     */
6592    sc->fw_stats_data_size =
6593        (sizeof(struct stats_counter) +
6594         sizeof(struct per_port_stats) +
6595         sizeof(struct per_pf_stats) +
6596         /* sizeof(struct fcoe_statistics_params) + */
6597         (sizeof(struct per_queue_stats) * num_queue_stats));
6598
6599    if (bxe_dma_alloc(sc, (sc->fw_stats_req_size + sc->fw_stats_data_size),
6600                      &sc->fw_stats_dma, "fw stats") != 0) {
6601        bxe_free_fw_stats_mem(sc);
6602        return (-1);
6603    }
6604
6605    /* set up the shortcuts */
6606
6607    sc->fw_stats_req =
6608        (struct bxe_fw_stats_req *)sc->fw_stats_dma.vaddr;
6609    sc->fw_stats_req_mapping = sc->fw_stats_dma.paddr;
6610
6611    sc->fw_stats_data =
6612        (struct bxe_fw_stats_data *)((uint8_t *)sc->fw_stats_dma.vaddr +
6613                                     sc->fw_stats_req_size);
6614    sc->fw_stats_data_mapping = (sc->fw_stats_dma.paddr +
6615                                 sc->fw_stats_req_size);
6616
6617    BLOGD(sc, DBG_LOAD, "statistics request base address set to %#jx\n",
6618          (uintmax_t)sc->fw_stats_req_mapping);
6619
6620    BLOGD(sc, DBG_LOAD, "statistics data base address set to %#jx\n",
6621          (uintmax_t)sc->fw_stats_data_mapping);
6622
6623    return (0);
6624}
6625
6626/*
6627 * Bits map:
6628 * 0-7  - Engine0 load counter.
6629 * 8-15 - Engine1 load counter.
6630 * 16   - Engine0 RESET_IN_PROGRESS bit.
6631 * 17   - Engine1 RESET_IN_PROGRESS bit.
6632 * 18   - Engine0 ONE_IS_LOADED. Set when there is at least one active
6633 *        function on the engine
6634 * 19   - Engine1 ONE_IS_LOADED.
6635 * 20   - Chip reset flow bit. When set none-leader must wait for both engines
6636 *        leader to complete (check for both RESET_IN_PROGRESS bits and not
6637 *        for just the one belonging to its engine).
6638 */
6639#define BXE_RECOVERY_GLOB_REG     MISC_REG_GENERIC_POR_1
6640#define BXE_PATH0_LOAD_CNT_MASK   0x000000ff
6641#define BXE_PATH0_LOAD_CNT_SHIFT  0
6642#define BXE_PATH1_LOAD_CNT_MASK   0x0000ff00
6643#define BXE_PATH1_LOAD_CNT_SHIFT  8
6644#define BXE_PATH0_RST_IN_PROG_BIT 0x00010000
6645#define BXE_PATH1_RST_IN_PROG_BIT 0x00020000
6646#define BXE_GLOBAL_RESET_BIT      0x00040000
6647
6648/* set the GLOBAL_RESET bit, should be run under rtnl lock */
6649static void
6650bxe_set_reset_global(struct bxe_softc *sc)
6651{
6652    uint32_t val;
6653    bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG);
6654    val = REG_RD(sc, BXE_RECOVERY_GLOB_REG);
6655    REG_WR(sc, BXE_RECOVERY_GLOB_REG, val | BXE_GLOBAL_RESET_BIT);
6656    bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG);
6657}
6658
6659/* clear the GLOBAL_RESET bit, should be run under rtnl lock */
6660static void
6661bxe_clear_reset_global(struct bxe_softc *sc)
6662{
6663    uint32_t val;
6664    bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG);
6665    val = REG_RD(sc, BXE_RECOVERY_GLOB_REG);
6666    REG_WR(sc, BXE_RECOVERY_GLOB_REG, val & (~BXE_GLOBAL_RESET_BIT));
6667    bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG);
6668}
6669
6670/* checks the GLOBAL_RESET bit, should be run under rtnl lock */
6671static uint8_t
6672bxe_reset_is_global(struct bxe_softc *sc)
6673{
6674    uint32_t val = REG_RD(sc, BXE_RECOVERY_GLOB_REG);
6675    BLOGD(sc, DBG_LOAD, "GLOB_REG=0x%08x\n", val);
6676    return (val & BXE_GLOBAL_RESET_BIT) ? TRUE : FALSE;
6677}
6678
6679/* clear RESET_IN_PROGRESS bit for the engine, should be run under rtnl lock */
6680static void
6681bxe_set_reset_done(struct bxe_softc *sc)
6682{
6683    uint32_t val;
6684    uint32_t bit = SC_PATH(sc) ? BXE_PATH1_RST_IN_PROG_BIT :
6685                                 BXE_PATH0_RST_IN_PROG_BIT;
6686
6687    bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG);
6688
6689    val = REG_RD(sc, BXE_RECOVERY_GLOB_REG);
6690    /* Clear the bit */
6691    val &= ~bit;
6692    REG_WR(sc, BXE_RECOVERY_GLOB_REG, val);
6693
6694    bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG);
6695}
6696
6697/* set RESET_IN_PROGRESS for the engine, should be run under rtnl lock */
6698static void
6699bxe_set_reset_in_progress(struct bxe_softc *sc)
6700{
6701    uint32_t val;
6702    uint32_t bit = SC_PATH(sc) ? BXE_PATH1_RST_IN_PROG_BIT :
6703                                 BXE_PATH0_RST_IN_PROG_BIT;
6704
6705    bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG);
6706
6707    val = REG_RD(sc, BXE_RECOVERY_GLOB_REG);
6708    /* Set the bit */
6709    val |= bit;
6710    REG_WR(sc, BXE_RECOVERY_GLOB_REG, val);
6711
6712    bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG);
6713}
6714
6715/* check RESET_IN_PROGRESS bit for an engine, should be run under rtnl lock */
6716static uint8_t
6717bxe_reset_is_done(struct bxe_softc *sc,
6718                  int              engine)
6719{
6720    uint32_t val = REG_RD(sc, BXE_RECOVERY_GLOB_REG);
6721    uint32_t bit = engine ? BXE_PATH1_RST_IN_PROG_BIT :
6722                            BXE_PATH0_RST_IN_PROG_BIT;
6723
6724    /* return false if bit is set */
6725    return (val & bit) ? FALSE : TRUE;
6726}
6727
6728/* get the load status for an engine, should be run under rtnl lock */
6729static uint8_t
6730bxe_get_load_status(struct bxe_softc *sc,
6731                    int              engine)
6732{
6733    uint32_t mask = engine ? BXE_PATH1_LOAD_CNT_MASK :
6734                             BXE_PATH0_LOAD_CNT_MASK;
6735    uint32_t shift = engine ? BXE_PATH1_LOAD_CNT_SHIFT :
6736                              BXE_PATH0_LOAD_CNT_SHIFT;
6737    uint32_t val = REG_RD(sc, BXE_RECOVERY_GLOB_REG);
6738
6739    BLOGD(sc, DBG_LOAD, "Old value for GLOB_REG=0x%08x\n", val);
6740
6741    val = ((val & mask) >> shift);
6742
6743    BLOGD(sc, DBG_LOAD, "Load mask engine %d = 0x%08x\n", engine, val);
6744
6745    return (val != 0);
6746}
6747
6748/* set pf load mark */
6749/* XXX needs to be under rtnl lock */
6750static void
6751bxe_set_pf_load(struct bxe_softc *sc)
6752{
6753    uint32_t val;
6754    uint32_t val1;
6755    uint32_t mask = SC_PATH(sc) ? BXE_PATH1_LOAD_CNT_MASK :
6756                                  BXE_PATH0_LOAD_CNT_MASK;
6757    uint32_t shift = SC_PATH(sc) ? BXE_PATH1_LOAD_CNT_SHIFT :
6758                                   BXE_PATH0_LOAD_CNT_SHIFT;
6759
6760    bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG);
6761
6762    val = REG_RD(sc, BXE_RECOVERY_GLOB_REG);
6763    BLOGD(sc, DBG_LOAD, "Old value for GLOB_REG=0x%08x\n", val);
6764
6765    /* get the current counter value */
6766    val1 = ((val & mask) >> shift);
6767
6768    /* set bit of this PF */
6769    val1 |= (1 << SC_ABS_FUNC(sc));
6770
6771    /* clear the old value */
6772    val &= ~mask;
6773
6774    /* set the new one */
6775    val |= ((val1 << shift) & mask);
6776
6777    REG_WR(sc, BXE_RECOVERY_GLOB_REG, val);
6778
6779    bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG);
6780}
6781
6782/* clear pf load mark */
6783/* XXX needs to be under rtnl lock */
6784static uint8_t
6785bxe_clear_pf_load(struct bxe_softc *sc)
6786{
6787    uint32_t val1, val;
6788    uint32_t mask = SC_PATH(sc) ? BXE_PATH1_LOAD_CNT_MASK :
6789                                  BXE_PATH0_LOAD_CNT_MASK;
6790    uint32_t shift = SC_PATH(sc) ? BXE_PATH1_LOAD_CNT_SHIFT :
6791                                   BXE_PATH0_LOAD_CNT_SHIFT;
6792
6793    bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG);
6794    val = REG_RD(sc, BXE_RECOVERY_GLOB_REG);
6795    BLOGD(sc, DBG_LOAD, "Old GEN_REG_VAL=0x%08x\n", val);
6796
6797    /* get the current counter value */
6798    val1 = (val & mask) >> shift;
6799
6800    /* clear bit of that PF */
6801    val1 &= ~(1 << SC_ABS_FUNC(sc));
6802
6803    /* clear the old value */
6804    val &= ~mask;
6805
6806    /* set the new one */
6807    val |= ((val1 << shift) & mask);
6808
6809    REG_WR(sc, BXE_RECOVERY_GLOB_REG, val);
6810    bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG);
6811    return (val1 != 0);
6812}
6813
6814/* send load requrest to mcp and analyze response */
6815static int
6816bxe_nic_load_request(struct bxe_softc *sc,
6817                     uint32_t         *load_code)
6818{
6819    /* init fw_seq */
6820    sc->fw_seq =
6821        (SHMEM_RD(sc, func_mb[SC_FW_MB_IDX(sc)].drv_mb_header) &
6822         DRV_MSG_SEQ_NUMBER_MASK);
6823
6824    BLOGD(sc, DBG_LOAD, "initial fw_seq 0x%04x\n", sc->fw_seq);
6825
6826    /* get the current FW pulse sequence */
6827    sc->fw_drv_pulse_wr_seq =
6828        (SHMEM_RD(sc, func_mb[SC_FW_MB_IDX(sc)].drv_pulse_mb) &
6829         DRV_PULSE_SEQ_MASK);
6830
6831    BLOGD(sc, DBG_LOAD, "initial drv_pulse 0x%04x\n",
6832          sc->fw_drv_pulse_wr_seq);
6833
6834    /* load request */
6835    (*load_code) = bxe_fw_command(sc, DRV_MSG_CODE_LOAD_REQ,
6836                                  DRV_MSG_CODE_LOAD_REQ_WITH_LFA);
6837
6838    /* if the MCP fails to respond we must abort */
6839    if (!(*load_code)) {
6840        BLOGE(sc, "MCP response failure!\n");
6841        return (-1);
6842    }
6843
6844    /* if MCP refused then must abort */
6845    if ((*load_code) == FW_MSG_CODE_DRV_LOAD_REFUSED) {
6846        BLOGE(sc, "MCP refused load request\n");
6847        return (-1);
6848    }
6849
6850    return (0);
6851}
6852
6853/*
6854 * Check whether another PF has already loaded FW to chip. In virtualized
6855 * environments a pf from anoth VM may have already initialized the device
6856 * including loading FW.
6857 */
6858static int
6859bxe_nic_load_analyze_req(struct bxe_softc *sc,
6860                         uint32_t         load_code)
6861{
6862    uint32_t my_fw, loaded_fw;
6863
6864    /* is another pf loaded on this engine? */
6865    if ((load_code != FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) &&
6866        (load_code != FW_MSG_CODE_DRV_LOAD_COMMON)) {
6867        /* build my FW version dword */
6868        my_fw = (BCM_5710_FW_MAJOR_VERSION +
6869                 (BCM_5710_FW_MINOR_VERSION << 8 ) +
6870                 (BCM_5710_FW_REVISION_VERSION << 16) +
6871                 (BCM_5710_FW_ENGINEERING_VERSION << 24));
6872
6873        /* read loaded FW from chip */
6874        loaded_fw = REG_RD(sc, XSEM_REG_PRAM);
6875        BLOGD(sc, DBG_LOAD, "loaded FW 0x%08x / my FW 0x%08x\n",
6876              loaded_fw, my_fw);
6877
6878        /* abort nic load if version mismatch */
6879        if (my_fw != loaded_fw) {
6880            BLOGE(sc, "FW 0x%08x already loaded (mine is 0x%08x)",
6881                  loaded_fw, my_fw);
6882            return (-1);
6883        }
6884    }
6885
6886    return (0);
6887}
6888
6889/* mark PMF if applicable */
6890static void
6891bxe_nic_load_pmf(struct bxe_softc *sc,
6892                 uint32_t         load_code)
6893{
6894    uint32_t ncsi_oem_data_addr;
6895
6896    if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
6897        (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) ||
6898        (load_code == FW_MSG_CODE_DRV_LOAD_PORT)) {
6899        /*
6900         * Barrier here for ordering between the writing to sc->port.pmf here
6901         * and reading it from the periodic task.
6902         */
6903        sc->port.pmf = 1;
6904        mb();
6905    } else {
6906        sc->port.pmf = 0;
6907    }
6908
6909    BLOGD(sc, DBG_LOAD, "pmf %d\n", sc->port.pmf);
6910
6911    /* XXX needed? */
6912    if (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) {
6913        if (SHMEM2_HAS(sc, ncsi_oem_data_addr)) {
6914            ncsi_oem_data_addr = SHMEM2_RD(sc, ncsi_oem_data_addr);
6915            if (ncsi_oem_data_addr) {
6916                REG_WR(sc,
6917                       (ncsi_oem_data_addr +
6918                        offsetof(struct glob_ncsi_oem_data, driver_version)),
6919                       0);
6920            }
6921        }
6922    }
6923}
6924
6925static void
6926bxe_read_mf_cfg(struct bxe_softc *sc)
6927{
6928    int n = (CHIP_IS_MODE_4_PORT(sc) ? 2 : 1);
6929    int abs_func;
6930    int vn;
6931
6932    if (BXE_NOMCP(sc)) {
6933        return; /* what should be the default bvalue in this case */
6934    }
6935
6936    /*
6937     * The formula for computing the absolute function number is...
6938     * For 2 port configuration (4 functions per port):
6939     *   abs_func = 2 * vn + SC_PORT + SC_PATH
6940     * For 4 port configuration (2 functions per port):
6941     *   abs_func = 4 * vn + 2 * SC_PORT + SC_PATH
6942     */
6943    for (vn = VN_0; vn < SC_MAX_VN_NUM(sc); vn++) {
6944        abs_func = (n * (2 * vn + SC_PORT(sc)) + SC_PATH(sc));
6945        if (abs_func >= E1H_FUNC_MAX) {
6946            break;
6947        }
6948        sc->devinfo.mf_info.mf_config[vn] =
6949            MFCFG_RD(sc, func_mf_config[abs_func].config);
6950    }
6951
6952    if (sc->devinfo.mf_info.mf_config[SC_VN(sc)] &
6953        FUNC_MF_CFG_FUNC_DISABLED) {
6954        BLOGD(sc, DBG_LOAD, "mf_cfg function disabled\n");
6955        sc->flags |= BXE_MF_FUNC_DIS;
6956    } else {
6957        BLOGD(sc, DBG_LOAD, "mf_cfg function enabled\n");
6958        sc->flags &= ~BXE_MF_FUNC_DIS;
6959    }
6960}
6961
6962/* acquire split MCP access lock register */
6963static int bxe_acquire_alr(struct bxe_softc *sc)
6964{
6965    uint32_t j, val;
6966
6967    for (j = 0; j < 1000; j++) {
6968        val = (1UL << 31);
6969        REG_WR(sc, GRCBASE_MCP + 0x9c, val);
6970        val = REG_RD(sc, GRCBASE_MCP + 0x9c);
6971        if (val & (1L << 31))
6972            break;
6973
6974        DELAY(5000);
6975    }
6976
6977    if (!(val & (1L << 31))) {
6978        BLOGE(sc, "Cannot acquire MCP access lock register\n");
6979        return (-1);
6980    }
6981
6982    return (0);
6983}
6984
6985/* release split MCP access lock register */
6986static void bxe_release_alr(struct bxe_softc *sc)
6987{
6988    REG_WR(sc, GRCBASE_MCP + 0x9c, 0);
6989}
6990
6991static void
6992bxe_fan_failure(struct bxe_softc *sc)
6993{
6994    int port = SC_PORT(sc);
6995    uint32_t ext_phy_config;
6996
6997    /* mark the failure */
6998    ext_phy_config =
6999        SHMEM_RD(sc, dev_info.port_hw_config[port].external_phy_config);
7000
7001    ext_phy_config &= ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
7002    ext_phy_config |= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
7003    SHMEM_WR(sc, dev_info.port_hw_config[port].external_phy_config,
7004             ext_phy_config);
7005
7006    /* log the failure */
7007    BLOGW(sc, "Fan Failure has caused the driver to shutdown "
7008              "the card to prevent permanent damage. "
7009              "Please contact OEM Support for assistance\n");
7010
7011    /* XXX */
7012#if 1
7013    bxe_panic(sc, ("Schedule task to handle fan failure\n"));
7014#else
7015    /*
7016     * Schedule device reset (unload)
7017     * This is due to some boards consuming sufficient power when driver is
7018     * up to overheat if fan fails.
7019     */
7020    bxe_set_bit(BXE_SP_RTNL_FAN_FAILURE, &sc->sp_rtnl_state);
7021    schedule_delayed_work(&sc->sp_rtnl_task, 0);
7022#endif
7023}
7024
7025/* this function is called upon a link interrupt */
7026static void
7027bxe_link_attn(struct bxe_softc *sc)
7028{
7029    uint32_t pause_enabled = 0;
7030    struct host_port_stats *pstats;
7031    int cmng_fns;
7032    struct bxe_fastpath *fp;
7033    int i;
7034
7035    /* Make sure that we are synced with the current statistics */
7036    bxe_stats_handle(sc, STATS_EVENT_STOP);
7037	BLOGI(sc, "link_vars phy_flags : %x\n", sc->link_vars.phy_flags);
7038    elink_link_update(&sc->link_params, &sc->link_vars);
7039
7040    if (sc->link_vars.link_up) {
7041
7042        /* dropless flow control */
7043        if (!CHIP_IS_E1(sc) && sc->dropless_fc) {
7044            pause_enabled = 0;
7045
7046            if (sc->link_vars.flow_ctrl & ELINK_FLOW_CTRL_TX) {
7047                pause_enabled = 1;
7048            }
7049
7050            REG_WR(sc,
7051                   (BAR_USTRORM_INTMEM +
7052                    USTORM_ETH_PAUSE_ENABLED_OFFSET(SC_PORT(sc))),
7053                   pause_enabled);
7054        }
7055
7056        if (sc->link_vars.mac_type != ELINK_MAC_TYPE_EMAC) {
7057            pstats = BXE_SP(sc, port_stats);
7058            /* reset old mac stats */
7059            memset(&(pstats->mac_stx[0]), 0, sizeof(struct mac_stx));
7060        }
7061
7062        if (sc->state == BXE_STATE_OPEN) {
7063            bxe_stats_handle(sc, STATS_EVENT_LINK_UP);
7064        }
7065
7066	/* Restart tx when the link comes back. */
7067        FOR_EACH_ETH_QUEUE(sc, i) {
7068            fp = &sc->fp[i];
7069            taskqueue_enqueue(fp->tq, &fp->tx_task);
7070	}
7071    }
7072
7073    if (sc->link_vars.link_up && sc->link_vars.line_speed) {
7074        cmng_fns = bxe_get_cmng_fns_mode(sc);
7075
7076        if (cmng_fns != CMNG_FNS_NONE) {
7077            bxe_cmng_fns_init(sc, FALSE, cmng_fns);
7078            storm_memset_cmng(sc, &sc->cmng, SC_PORT(sc));
7079        } else {
7080            /* rate shaping and fairness are disabled */
7081            BLOGD(sc, DBG_LOAD, "single function mode without fairness\n");
7082        }
7083    }
7084
7085    bxe_link_report_locked(sc);
7086
7087    if (IS_MF(sc)) {
7088        ; // XXX bxe_link_sync_notify(sc);
7089    }
7090}
7091
7092static void
7093bxe_attn_int_asserted(struct bxe_softc *sc,
7094                      uint32_t         asserted)
7095{
7096    int port = SC_PORT(sc);
7097    uint32_t aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
7098                               MISC_REG_AEU_MASK_ATTN_FUNC_0;
7099    uint32_t nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
7100                                        NIG_REG_MASK_INTERRUPT_PORT0;
7101    uint32_t aeu_mask;
7102    uint32_t nig_mask = 0;
7103    uint32_t reg_addr;
7104    uint32_t igu_acked;
7105    uint32_t cnt;
7106
7107    if (sc->attn_state & asserted) {
7108        BLOGE(sc, "IGU ERROR attn=0x%08x\n", asserted);
7109    }
7110
7111    bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
7112
7113    aeu_mask = REG_RD(sc, aeu_addr);
7114
7115    BLOGD(sc, DBG_INTR, "aeu_mask 0x%08x newly asserted 0x%08x\n",
7116          aeu_mask, asserted);
7117
7118    aeu_mask &= ~(asserted & 0x3ff);
7119
7120    BLOGD(sc, DBG_INTR, "new mask 0x%08x\n", aeu_mask);
7121
7122    REG_WR(sc, aeu_addr, aeu_mask);
7123
7124    bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
7125
7126    BLOGD(sc, DBG_INTR, "attn_state 0x%08x\n", sc->attn_state);
7127    sc->attn_state |= asserted;
7128    BLOGD(sc, DBG_INTR, "new state 0x%08x\n", sc->attn_state);
7129
7130    if (asserted & ATTN_HARD_WIRED_MASK) {
7131        if (asserted & ATTN_NIG_FOR_FUNC) {
7132
7133	    bxe_acquire_phy_lock(sc);
7134            /* save nig interrupt mask */
7135            nig_mask = REG_RD(sc, nig_int_mask_addr);
7136
7137            /* If nig_mask is not set, no need to call the update function */
7138            if (nig_mask) {
7139                REG_WR(sc, nig_int_mask_addr, 0);
7140
7141                bxe_link_attn(sc);
7142            }
7143
7144            /* handle unicore attn? */
7145        }
7146
7147        if (asserted & ATTN_SW_TIMER_4_FUNC) {
7148            BLOGD(sc, DBG_INTR, "ATTN_SW_TIMER_4_FUNC!\n");
7149        }
7150
7151        if (asserted & GPIO_2_FUNC) {
7152            BLOGD(sc, DBG_INTR, "GPIO_2_FUNC!\n");
7153        }
7154
7155        if (asserted & GPIO_3_FUNC) {
7156            BLOGD(sc, DBG_INTR, "GPIO_3_FUNC!\n");
7157        }
7158
7159        if (asserted & GPIO_4_FUNC) {
7160            BLOGD(sc, DBG_INTR, "GPIO_4_FUNC!\n");
7161        }
7162
7163        if (port == 0) {
7164            if (asserted & ATTN_GENERAL_ATTN_1) {
7165                BLOGD(sc, DBG_INTR, "ATTN_GENERAL_ATTN_1!\n");
7166                REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
7167            }
7168            if (asserted & ATTN_GENERAL_ATTN_2) {
7169                BLOGD(sc, DBG_INTR, "ATTN_GENERAL_ATTN_2!\n");
7170                REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
7171            }
7172            if (asserted & ATTN_GENERAL_ATTN_3) {
7173                BLOGD(sc, DBG_INTR, "ATTN_GENERAL_ATTN_3!\n");
7174                REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
7175            }
7176        } else {
7177            if (asserted & ATTN_GENERAL_ATTN_4) {
7178                BLOGD(sc, DBG_INTR, "ATTN_GENERAL_ATTN_4!\n");
7179                REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
7180            }
7181            if (asserted & ATTN_GENERAL_ATTN_5) {
7182                BLOGD(sc, DBG_INTR, "ATTN_GENERAL_ATTN_5!\n");
7183                REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
7184            }
7185            if (asserted & ATTN_GENERAL_ATTN_6) {
7186                BLOGD(sc, DBG_INTR, "ATTN_GENERAL_ATTN_6!\n");
7187                REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
7188            }
7189        }
7190    } /* hardwired */
7191
7192    if (sc->devinfo.int_block == INT_BLOCK_HC) {
7193        reg_addr = (HC_REG_COMMAND_REG + port*32 + COMMAND_REG_ATTN_BITS_SET);
7194    } else {
7195        reg_addr = (BAR_IGU_INTMEM + IGU_CMD_ATTN_BIT_SET_UPPER*8);
7196    }
7197
7198    BLOGD(sc, DBG_INTR, "about to mask 0x%08x at %s addr 0x%08x\n",
7199          asserted,
7200          (sc->devinfo.int_block == INT_BLOCK_HC) ? "HC" : "IGU", reg_addr);
7201    REG_WR(sc, reg_addr, asserted);
7202
7203    /* now set back the mask */
7204    if (asserted & ATTN_NIG_FOR_FUNC) {
7205        /*
7206         * Verify that IGU ack through BAR was written before restoring
7207         * NIG mask. This loop should exit after 2-3 iterations max.
7208         */
7209        if (sc->devinfo.int_block != INT_BLOCK_HC) {
7210            cnt = 0;
7211
7212            do {
7213                igu_acked = REG_RD(sc, IGU_REG_ATTENTION_ACK_BITS);
7214            } while (((igu_acked & ATTN_NIG_FOR_FUNC) == 0) &&
7215                     (++cnt < MAX_IGU_ATTN_ACK_TO));
7216
7217            if (!igu_acked) {
7218                BLOGE(sc, "Failed to verify IGU ack on time\n");
7219            }
7220
7221            mb();
7222        }
7223
7224        REG_WR(sc, nig_int_mask_addr, nig_mask);
7225
7226	bxe_release_phy_lock(sc);
7227    }
7228}
7229
7230static void
7231bxe_print_next_block(struct bxe_softc *sc,
7232                     int              idx,
7233                     const char       *blk)
7234{
7235    BLOGI(sc, "%s%s", idx ? ", " : "", blk);
7236}
7237
7238static int
7239bxe_check_blocks_with_parity0(struct bxe_softc *sc,
7240                              uint32_t         sig,
7241                              int              par_num,
7242                              uint8_t          print)
7243{
7244    uint32_t cur_bit = 0;
7245    int i = 0;
7246
7247    for (i = 0; sig; i++) {
7248        cur_bit = ((uint32_t)0x1 << i);
7249        if (sig & cur_bit) {
7250            switch (cur_bit) {
7251            case AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR:
7252                if (print)
7253                    bxe_print_next_block(sc, par_num++, "BRB");
7254                break;
7255            case AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR:
7256                if (print)
7257                    bxe_print_next_block(sc, par_num++, "PARSER");
7258                break;
7259            case AEU_INPUTS_ATTN_BITS_TSDM_PARITY_ERROR:
7260                if (print)
7261                    bxe_print_next_block(sc, par_num++, "TSDM");
7262                break;
7263            case AEU_INPUTS_ATTN_BITS_SEARCHER_PARITY_ERROR:
7264                if (print)
7265                    bxe_print_next_block(sc, par_num++, "SEARCHER");
7266                break;
7267            case AEU_INPUTS_ATTN_BITS_TCM_PARITY_ERROR:
7268                if (print)
7269                    bxe_print_next_block(sc, par_num++, "TCM");
7270                break;
7271            case AEU_INPUTS_ATTN_BITS_TSEMI_PARITY_ERROR:
7272                if (print)
7273                    bxe_print_next_block(sc, par_num++, "TSEMI");
7274                break;
7275            case AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR:
7276                if (print)
7277                    bxe_print_next_block(sc, par_num++, "XPB");
7278                break;
7279            }
7280
7281            /* Clear the bit */
7282            sig &= ~cur_bit;
7283        }
7284    }
7285
7286    return (par_num);
7287}
7288
7289static int
7290bxe_check_blocks_with_parity1(struct bxe_softc *sc,
7291                              uint32_t         sig,
7292                              int              par_num,
7293                              uint8_t          *global,
7294                              uint8_t          print)
7295{
7296    int i = 0;
7297    uint32_t cur_bit = 0;
7298    for (i = 0; sig; i++) {
7299        cur_bit = ((uint32_t)0x1 << i);
7300        if (sig & cur_bit) {
7301            switch (cur_bit) {
7302            case AEU_INPUTS_ATTN_BITS_PBF_PARITY_ERROR:
7303                if (print)
7304                    bxe_print_next_block(sc, par_num++, "PBF");
7305                break;
7306            case AEU_INPUTS_ATTN_BITS_QM_PARITY_ERROR:
7307                if (print)
7308                    bxe_print_next_block(sc, par_num++, "QM");
7309                break;
7310            case AEU_INPUTS_ATTN_BITS_TIMERS_PARITY_ERROR:
7311                if (print)
7312                    bxe_print_next_block(sc, par_num++, "TM");
7313                break;
7314            case AEU_INPUTS_ATTN_BITS_XSDM_PARITY_ERROR:
7315                if (print)
7316                    bxe_print_next_block(sc, par_num++, "XSDM");
7317                break;
7318            case AEU_INPUTS_ATTN_BITS_XCM_PARITY_ERROR:
7319                if (print)
7320                    bxe_print_next_block(sc, par_num++, "XCM");
7321                break;
7322            case AEU_INPUTS_ATTN_BITS_XSEMI_PARITY_ERROR:
7323                if (print)
7324                    bxe_print_next_block(sc, par_num++, "XSEMI");
7325                break;
7326            case AEU_INPUTS_ATTN_BITS_DOORBELLQ_PARITY_ERROR:
7327                if (print)
7328                    bxe_print_next_block(sc, par_num++, "DOORBELLQ");
7329                break;
7330            case AEU_INPUTS_ATTN_BITS_NIG_PARITY_ERROR:
7331                if (print)
7332                    bxe_print_next_block(sc, par_num++, "NIG");
7333                break;
7334            case AEU_INPUTS_ATTN_BITS_VAUX_PCI_CORE_PARITY_ERROR:
7335                if (print)
7336                    bxe_print_next_block(sc, par_num++, "VAUX PCI CORE");
7337                *global = TRUE;
7338                break;
7339            case AEU_INPUTS_ATTN_BITS_DEBUG_PARITY_ERROR:
7340                if (print)
7341                    bxe_print_next_block(sc, par_num++, "DEBUG");
7342                break;
7343            case AEU_INPUTS_ATTN_BITS_USDM_PARITY_ERROR:
7344                if (print)
7345                    bxe_print_next_block(sc, par_num++, "USDM");
7346                break;
7347            case AEU_INPUTS_ATTN_BITS_UCM_PARITY_ERROR:
7348                if (print)
7349                    bxe_print_next_block(sc, par_num++, "UCM");
7350                break;
7351            case AEU_INPUTS_ATTN_BITS_USEMI_PARITY_ERROR:
7352                if (print)
7353                    bxe_print_next_block(sc, par_num++, "USEMI");
7354                break;
7355            case AEU_INPUTS_ATTN_BITS_UPB_PARITY_ERROR:
7356                if (print)
7357                    bxe_print_next_block(sc, par_num++, "UPB");
7358                break;
7359            case AEU_INPUTS_ATTN_BITS_CSDM_PARITY_ERROR:
7360                if (print)
7361                    bxe_print_next_block(sc, par_num++, "CSDM");
7362                break;
7363            case AEU_INPUTS_ATTN_BITS_CCM_PARITY_ERROR:
7364                if (print)
7365                    bxe_print_next_block(sc, par_num++, "CCM");
7366                break;
7367            }
7368
7369            /* Clear the bit */
7370            sig &= ~cur_bit;
7371        }
7372    }
7373
7374    return (par_num);
7375}
7376
7377static int
7378bxe_check_blocks_with_parity2(struct bxe_softc *sc,
7379                              uint32_t         sig,
7380                              int              par_num,
7381                              uint8_t          print)
7382{
7383    uint32_t cur_bit = 0;
7384    int i = 0;
7385
7386    for (i = 0; sig; i++) {
7387        cur_bit = ((uint32_t)0x1 << i);
7388        if (sig & cur_bit) {
7389            switch (cur_bit) {
7390            case AEU_INPUTS_ATTN_BITS_CSEMI_PARITY_ERROR:
7391                if (print)
7392                    bxe_print_next_block(sc, par_num++, "CSEMI");
7393                break;
7394            case AEU_INPUTS_ATTN_BITS_PXP_PARITY_ERROR:
7395                if (print)
7396                    bxe_print_next_block(sc, par_num++, "PXP");
7397                break;
7398            case AEU_IN_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR:
7399                if (print)
7400                    bxe_print_next_block(sc, par_num++, "PXPPCICLOCKCLIENT");
7401                break;
7402            case AEU_INPUTS_ATTN_BITS_CFC_PARITY_ERROR:
7403                if (print)
7404                    bxe_print_next_block(sc, par_num++, "CFC");
7405                break;
7406            case AEU_INPUTS_ATTN_BITS_CDU_PARITY_ERROR:
7407                if (print)
7408                    bxe_print_next_block(sc, par_num++, "CDU");
7409                break;
7410            case AEU_INPUTS_ATTN_BITS_DMAE_PARITY_ERROR:
7411                if (print)
7412                    bxe_print_next_block(sc, par_num++, "DMAE");
7413                break;
7414            case AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR:
7415                if (print)
7416                    bxe_print_next_block(sc, par_num++, "IGU");
7417                break;
7418            case AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR:
7419                if (print)
7420                    bxe_print_next_block(sc, par_num++, "MISC");
7421                break;
7422            }
7423
7424            /* Clear the bit */
7425            sig &= ~cur_bit;
7426        }
7427    }
7428
7429    return (par_num);
7430}
7431
7432static int
7433bxe_check_blocks_with_parity3(struct bxe_softc *sc,
7434                              uint32_t         sig,
7435                              int              par_num,
7436                              uint8_t          *global,
7437                              uint8_t          print)
7438{
7439    uint32_t cur_bit = 0;
7440    int i = 0;
7441
7442    for (i = 0; sig; i++) {
7443        cur_bit = ((uint32_t)0x1 << i);
7444        if (sig & cur_bit) {
7445            switch (cur_bit) {
7446            case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY:
7447                if (print)
7448                    bxe_print_next_block(sc, par_num++, "MCP ROM");
7449                *global = TRUE;
7450                break;
7451            case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY:
7452                if (print)
7453                    bxe_print_next_block(sc, par_num++,
7454                              "MCP UMP RX");
7455                *global = TRUE;
7456                break;
7457            case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY:
7458                if (print)
7459                    bxe_print_next_block(sc, par_num++,
7460                              "MCP UMP TX");
7461                *global = TRUE;
7462                break;
7463            case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY:
7464                if (print)
7465                    bxe_print_next_block(sc, par_num++,
7466                              "MCP SCPAD");
7467                *global = TRUE;
7468                break;
7469            }
7470
7471            /* Clear the bit */
7472            sig &= ~cur_bit;
7473        }
7474    }
7475
7476    return (par_num);
7477}
7478
7479static int
7480bxe_check_blocks_with_parity4(struct bxe_softc *sc,
7481                              uint32_t         sig,
7482                              int              par_num,
7483                              uint8_t          print)
7484{
7485    uint32_t cur_bit = 0;
7486    int i = 0;
7487
7488    for (i = 0; sig; i++) {
7489        cur_bit = ((uint32_t)0x1 << i);
7490        if (sig & cur_bit) {
7491            switch (cur_bit) {
7492            case AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR:
7493                if (print)
7494                    bxe_print_next_block(sc, par_num++, "PGLUE_B");
7495                break;
7496            case AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR:
7497                if (print)
7498                    bxe_print_next_block(sc, par_num++, "ATC");
7499                break;
7500            }
7501
7502            /* Clear the bit */
7503            sig &= ~cur_bit;
7504        }
7505    }
7506
7507    return (par_num);
7508}
7509
7510static uint8_t
7511bxe_parity_attn(struct bxe_softc *sc,
7512                uint8_t          *global,
7513                uint8_t          print,
7514                uint32_t         *sig)
7515{
7516    int par_num = 0;
7517
7518    if ((sig[0] & HW_PRTY_ASSERT_SET_0) ||
7519        (sig[1] & HW_PRTY_ASSERT_SET_1) ||
7520        (sig[2] & HW_PRTY_ASSERT_SET_2) ||
7521        (sig[3] & HW_PRTY_ASSERT_SET_3) ||
7522        (sig[4] & HW_PRTY_ASSERT_SET_4)) {
7523        BLOGE(sc, "Parity error: HW block parity attention:\n"
7524                  "[0]:0x%08x [1]:0x%08x [2]:0x%08x [3]:0x%08x [4]:0x%08x\n",
7525              (uint32_t)(sig[0] & HW_PRTY_ASSERT_SET_0),
7526              (uint32_t)(sig[1] & HW_PRTY_ASSERT_SET_1),
7527              (uint32_t)(sig[2] & HW_PRTY_ASSERT_SET_2),
7528              (uint32_t)(sig[3] & HW_PRTY_ASSERT_SET_3),
7529              (uint32_t)(sig[4] & HW_PRTY_ASSERT_SET_4));
7530
7531        if (print)
7532            BLOGI(sc, "Parity errors detected in blocks: ");
7533
7534        par_num =
7535            bxe_check_blocks_with_parity0(sc, sig[0] &
7536                                          HW_PRTY_ASSERT_SET_0,
7537                                          par_num, print);
7538        par_num =
7539            bxe_check_blocks_with_parity1(sc, sig[1] &
7540                                          HW_PRTY_ASSERT_SET_1,
7541                                          par_num, global, print);
7542        par_num =
7543            bxe_check_blocks_with_parity2(sc, sig[2] &
7544                                          HW_PRTY_ASSERT_SET_2,
7545                                          par_num, print);
7546        par_num =
7547            bxe_check_blocks_with_parity3(sc, sig[3] &
7548                                          HW_PRTY_ASSERT_SET_3,
7549                                          par_num, global, print);
7550        par_num =
7551            bxe_check_blocks_with_parity4(sc, sig[4] &
7552                                          HW_PRTY_ASSERT_SET_4,
7553                                          par_num, print);
7554
7555        if (print)
7556            BLOGI(sc, "\n");
7557
7558        return (TRUE);
7559    }
7560
7561    return (FALSE);
7562}
7563
7564static uint8_t
7565bxe_chk_parity_attn(struct bxe_softc *sc,
7566                    uint8_t          *global,
7567                    uint8_t          print)
7568{
7569    struct attn_route attn = { {0} };
7570    int port = SC_PORT(sc);
7571
7572    attn.sig[0] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
7573    attn.sig[1] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
7574    attn.sig[2] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
7575    attn.sig[3] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
7576
7577    /*
7578     * Since MCP attentions can't be disabled inside the block, we need to
7579     * read AEU registers to see whether they're currently disabled
7580     */
7581    attn.sig[3] &= ((REG_RD(sc, (!port ? MISC_REG_AEU_ENABLE4_FUNC_0_OUT_0
7582                                      : MISC_REG_AEU_ENABLE4_FUNC_1_OUT_0)) &
7583                         MISC_AEU_ENABLE_MCP_PRTY_BITS) |
7584                        ~MISC_AEU_ENABLE_MCP_PRTY_BITS);
7585
7586
7587    if (!CHIP_IS_E1x(sc))
7588        attn.sig[4] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_5_FUNC_0 + port*4);
7589
7590    return (bxe_parity_attn(sc, global, print, attn.sig));
7591}
7592
7593static void
7594bxe_attn_int_deasserted4(struct bxe_softc *sc,
7595                         uint32_t         attn)
7596{
7597    uint32_t val;
7598
7599    if (attn & AEU_INPUTS_ATTN_BITS_PGLUE_HW_INTERRUPT) {
7600        val = REG_RD(sc, PGLUE_B_REG_PGLUE_B_INT_STS_CLR);
7601        BLOGE(sc, "PGLUE hw attention 0x%08x\n", val);
7602        if (val & PGLUE_B_PGLUE_B_INT_STS_REG_ADDRESS_ERROR)
7603            BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_ADDRESS_ERROR\n");
7604        if (val & PGLUE_B_PGLUE_B_INT_STS_REG_INCORRECT_RCV_BEHAVIOR)
7605            BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_INCORRECT_RCV_BEHAVIOR\n");
7606        if (val & PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN)
7607            BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN\n");
7608        if (val & PGLUE_B_PGLUE_B_INT_STS_REG_VF_LENGTH_VIOLATION_ATTN)
7609            BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_VF_LENGTH_VIOLATION_ATTN\n");
7610        if (val & PGLUE_B_PGLUE_B_INT_STS_REG_VF_GRC_SPACE_VIOLATION_ATTN)
7611            BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_VF_GRC_SPACE_VIOLATION_ATTN\n");
7612        if (val & PGLUE_B_PGLUE_B_INT_STS_REG_VF_MSIX_BAR_VIOLATION_ATTN)
7613            BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_VF_MSIX_BAR_VIOLATION_ATTN\n");
7614        if (val & PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_ERROR_ATTN)
7615            BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_ERROR_ATTN\n");
7616        if (val & PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_IN_TWO_RCBS_ATTN)
7617            BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_IN_TWO_RCBS_ATTN\n");
7618        if (val & PGLUE_B_PGLUE_B_INT_STS_REG_CSSNOOP_FIFO_OVERFLOW)
7619            BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_CSSNOOP_FIFO_OVERFLOW\n");
7620    }
7621
7622    if (attn & AEU_INPUTS_ATTN_BITS_ATC_HW_INTERRUPT) {
7623        val = REG_RD(sc, ATC_REG_ATC_INT_STS_CLR);
7624        BLOGE(sc, "ATC hw attention 0x%08x\n", val);
7625        if (val & ATC_ATC_INT_STS_REG_ADDRESS_ERROR)
7626            BLOGE(sc, "ATC_ATC_INT_STS_REG_ADDRESS_ERROR\n");
7627        if (val & ATC_ATC_INT_STS_REG_ATC_TCPL_TO_NOT_PEND)
7628            BLOGE(sc, "ATC_ATC_INT_STS_REG_ATC_TCPL_TO_NOT_PEND\n");
7629        if (val & ATC_ATC_INT_STS_REG_ATC_GPA_MULTIPLE_HITS)
7630            BLOGE(sc, "ATC_ATC_INT_STS_REG_ATC_GPA_MULTIPLE_HITS\n");
7631        if (val & ATC_ATC_INT_STS_REG_ATC_RCPL_TO_EMPTY_CNT)
7632            BLOGE(sc, "ATC_ATC_INT_STS_REG_ATC_RCPL_TO_EMPTY_CNT\n");
7633        if (val & ATC_ATC_INT_STS_REG_ATC_TCPL_ERROR)
7634            BLOGE(sc, "ATC_ATC_INT_STS_REG_ATC_TCPL_ERROR\n");
7635        if (val & ATC_ATC_INT_STS_REG_ATC_IREQ_LESS_THAN_STU)
7636            BLOGE(sc, "ATC_ATC_INT_STS_REG_ATC_IREQ_LESS_THAN_STU\n");
7637    }
7638
7639    if (attn & (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR |
7640                AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR)) {
7641        BLOGE(sc, "FATAL parity attention set4 0x%08x\n",
7642              (uint32_t)(attn & (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR |
7643                                 AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR)));
7644    }
7645}
7646
7647static void
7648bxe_e1h_disable(struct bxe_softc *sc)
7649{
7650    int port = SC_PORT(sc);
7651
7652    bxe_tx_disable(sc);
7653
7654    REG_WR(sc, NIG_REG_LLH0_FUNC_EN + port*8, 0);
7655}
7656
7657static void
7658bxe_e1h_enable(struct bxe_softc *sc)
7659{
7660    int port = SC_PORT(sc);
7661
7662    REG_WR(sc, NIG_REG_LLH0_FUNC_EN + port*8, 1);
7663
7664    // XXX bxe_tx_enable(sc);
7665}
7666
7667/*
7668 * called due to MCP event (on pmf):
7669 *   reread new bandwidth configuration
7670 *   configure FW
7671 *   notify others function about the change
7672 */
7673static void
7674bxe_config_mf_bw(struct bxe_softc *sc)
7675{
7676    if (sc->link_vars.link_up) {
7677        bxe_cmng_fns_init(sc, TRUE, CMNG_FNS_MINMAX);
7678        // XXX bxe_link_sync_notify(sc);
7679    }
7680
7681    storm_memset_cmng(sc, &sc->cmng, SC_PORT(sc));
7682}
7683
7684static void
7685bxe_set_mf_bw(struct bxe_softc *sc)
7686{
7687    bxe_config_mf_bw(sc);
7688    bxe_fw_command(sc, DRV_MSG_CODE_SET_MF_BW_ACK, 0);
7689}
7690
7691static void
7692bxe_handle_eee_event(struct bxe_softc *sc)
7693{
7694    BLOGD(sc, DBG_INTR, "EEE - LLDP event\n");
7695    bxe_fw_command(sc, DRV_MSG_CODE_EEE_RESULTS_ACK, 0);
7696}
7697
7698#define DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED 3
7699
7700static void
7701bxe_drv_info_ether_stat(struct bxe_softc *sc)
7702{
7703    struct eth_stats_info *ether_stat =
7704        &sc->sp->drv_info_to_mcp.ether_stat;
7705
7706    strlcpy(ether_stat->version, BXE_DRIVER_VERSION,
7707            ETH_STAT_INFO_VERSION_LEN);
7708
7709    /* XXX (+ MAC_PAD) taken from other driver... verify this is right */
7710    sc->sp_objs[0].mac_obj.get_n_elements(sc, &sc->sp_objs[0].mac_obj,
7711                                          DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED,
7712                                          ether_stat->mac_local + MAC_PAD,
7713                                          MAC_PAD, ETH_ALEN);
7714
7715    ether_stat->mtu_size = sc->mtu;
7716
7717    ether_stat->feature_flags |= FEATURE_ETH_CHKSUM_OFFLOAD_MASK;
7718    if (if_getcapenable(sc->ifp) & (IFCAP_TSO4 | IFCAP_TSO6)) {
7719        ether_stat->feature_flags |= FEATURE_ETH_LSO_MASK;
7720    }
7721
7722    // XXX ether_stat->feature_flags |= ???;
7723
7724    ether_stat->promiscuous_mode = 0; // (flags & PROMISC) ? 1 : 0;
7725
7726    ether_stat->txq_size = sc->tx_ring_size;
7727    ether_stat->rxq_size = sc->rx_ring_size;
7728}
7729
7730static void
7731bxe_handle_drv_info_req(struct bxe_softc *sc)
7732{
7733    enum drv_info_opcode op_code;
7734    uint32_t drv_info_ctl = SHMEM2_RD(sc, drv_info_control);
7735
7736    /* if drv_info version supported by MFW doesn't match - send NACK */
7737    if ((drv_info_ctl & DRV_INFO_CONTROL_VER_MASK) != DRV_INFO_CUR_VER) {
7738        bxe_fw_command(sc, DRV_MSG_CODE_DRV_INFO_NACK, 0);
7739        return;
7740    }
7741
7742    op_code = ((drv_info_ctl & DRV_INFO_CONTROL_OP_CODE_MASK) >>
7743               DRV_INFO_CONTROL_OP_CODE_SHIFT);
7744
7745    memset(&sc->sp->drv_info_to_mcp, 0, sizeof(union drv_info_to_mcp));
7746
7747    switch (op_code) {
7748    case ETH_STATS_OPCODE:
7749        bxe_drv_info_ether_stat(sc);
7750        break;
7751    case FCOE_STATS_OPCODE:
7752    case ISCSI_STATS_OPCODE:
7753    default:
7754        /* if op code isn't supported - send NACK */
7755        bxe_fw_command(sc, DRV_MSG_CODE_DRV_INFO_NACK, 0);
7756        return;
7757    }
7758
7759    /*
7760     * If we got drv_info attn from MFW then these fields are defined in
7761     * shmem2 for sure
7762     */
7763    SHMEM2_WR(sc, drv_info_host_addr_lo,
7764              U64_LO(BXE_SP_MAPPING(sc, drv_info_to_mcp)));
7765    SHMEM2_WR(sc, drv_info_host_addr_hi,
7766              U64_HI(BXE_SP_MAPPING(sc, drv_info_to_mcp)));
7767
7768    bxe_fw_command(sc, DRV_MSG_CODE_DRV_INFO_ACK, 0);
7769}
7770
7771static void
7772bxe_dcc_event(struct bxe_softc *sc,
7773              uint32_t         dcc_event)
7774{
7775    BLOGD(sc, DBG_INTR, "dcc_event 0x%08x\n", dcc_event);
7776
7777    if (dcc_event & DRV_STATUS_DCC_DISABLE_ENABLE_PF) {
7778        /*
7779         * This is the only place besides the function initialization
7780         * where the sc->flags can change so it is done without any
7781         * locks
7782         */
7783        if (sc->devinfo.mf_info.mf_config[SC_VN(sc)] & FUNC_MF_CFG_FUNC_DISABLED) {
7784            BLOGD(sc, DBG_INTR, "mf_cfg function disabled\n");
7785            sc->flags |= BXE_MF_FUNC_DIS;
7786            bxe_e1h_disable(sc);
7787        } else {
7788            BLOGD(sc, DBG_INTR, "mf_cfg function enabled\n");
7789            sc->flags &= ~BXE_MF_FUNC_DIS;
7790            bxe_e1h_enable(sc);
7791        }
7792        dcc_event &= ~DRV_STATUS_DCC_DISABLE_ENABLE_PF;
7793    }
7794
7795    if (dcc_event & DRV_STATUS_DCC_BANDWIDTH_ALLOCATION) {
7796        bxe_config_mf_bw(sc);
7797        dcc_event &= ~DRV_STATUS_DCC_BANDWIDTH_ALLOCATION;
7798    }
7799
7800    /* Report results to MCP */
7801    if (dcc_event)
7802        bxe_fw_command(sc, DRV_MSG_CODE_DCC_FAILURE, 0);
7803    else
7804        bxe_fw_command(sc, DRV_MSG_CODE_DCC_OK, 0);
7805}
7806
7807static void
7808bxe_pmf_update(struct bxe_softc *sc)
7809{
7810    int port = SC_PORT(sc);
7811    uint32_t val;
7812
7813    sc->port.pmf = 1;
7814    BLOGD(sc, DBG_INTR, "pmf %d\n", sc->port.pmf);
7815
7816    /*
7817     * We need the mb() to ensure the ordering between the writing to
7818     * sc->port.pmf here and reading it from the bxe_periodic_task().
7819     */
7820    mb();
7821
7822    /* queue a periodic task */
7823    // XXX schedule task...
7824
7825    // XXX bxe_dcbx_pmf_update(sc);
7826
7827    /* enable nig attention */
7828    val = (0xff0f | (1 << (SC_VN(sc) + 4)));
7829    if (sc->devinfo.int_block == INT_BLOCK_HC) {
7830        REG_WR(sc, HC_REG_TRAILING_EDGE_0 + port*8, val);
7831        REG_WR(sc, HC_REG_LEADING_EDGE_0 + port*8, val);
7832    } else if (!CHIP_IS_E1x(sc)) {
7833        REG_WR(sc, IGU_REG_TRAILING_EDGE_LATCH, val);
7834        REG_WR(sc, IGU_REG_LEADING_EDGE_LATCH, val);
7835    }
7836
7837    bxe_stats_handle(sc, STATS_EVENT_PMF);
7838}
7839
7840static int
7841bxe_mc_assert(struct bxe_softc *sc)
7842{
7843    char last_idx;
7844    int i, rc = 0;
7845    uint32_t row0, row1, row2, row3;
7846
7847    /* XSTORM */
7848    last_idx = REG_RD8(sc, BAR_XSTRORM_INTMEM + XSTORM_ASSERT_LIST_INDEX_OFFSET);
7849    if (last_idx)
7850        BLOGE(sc, "XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
7851
7852    /* print the asserts */
7853    for (i = 0; i < STORM_ASSERT_ARRAY_SIZE; i++) {
7854
7855        row0 = REG_RD(sc, BAR_XSTRORM_INTMEM + XSTORM_ASSERT_LIST_OFFSET(i));
7856        row1 = REG_RD(sc, BAR_XSTRORM_INTMEM + XSTORM_ASSERT_LIST_OFFSET(i) + 4);
7857        row2 = REG_RD(sc, BAR_XSTRORM_INTMEM + XSTORM_ASSERT_LIST_OFFSET(i) + 8);
7858        row3 = REG_RD(sc, BAR_XSTRORM_INTMEM + XSTORM_ASSERT_LIST_OFFSET(i) + 12);
7859
7860        if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
7861            BLOGE(sc, "XSTORM_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x\n",
7862                  i, row3, row2, row1, row0);
7863            rc++;
7864        } else {
7865            break;
7866        }
7867    }
7868
7869    /* TSTORM */
7870    last_idx = REG_RD8(sc, BAR_TSTRORM_INTMEM + TSTORM_ASSERT_LIST_INDEX_OFFSET);
7871    if (last_idx) {
7872        BLOGE(sc, "TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
7873    }
7874
7875    /* print the asserts */
7876    for (i = 0; i < STORM_ASSERT_ARRAY_SIZE; i++) {
7877
7878        row0 = REG_RD(sc, BAR_TSTRORM_INTMEM + TSTORM_ASSERT_LIST_OFFSET(i));
7879        row1 = REG_RD(sc, BAR_TSTRORM_INTMEM + TSTORM_ASSERT_LIST_OFFSET(i) + 4);
7880        row2 = REG_RD(sc, BAR_TSTRORM_INTMEM + TSTORM_ASSERT_LIST_OFFSET(i) + 8);
7881        row3 = REG_RD(sc, BAR_TSTRORM_INTMEM + TSTORM_ASSERT_LIST_OFFSET(i) + 12);
7882
7883        if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
7884            BLOGE(sc, "TSTORM_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x\n",
7885                  i, row3, row2, row1, row0);
7886            rc++;
7887        } else {
7888            break;
7889        }
7890    }
7891
7892    /* CSTORM */
7893    last_idx = REG_RD8(sc, BAR_CSTRORM_INTMEM + CSTORM_ASSERT_LIST_INDEX_OFFSET);
7894    if (last_idx) {
7895        BLOGE(sc, "CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
7896    }
7897
7898    /* print the asserts */
7899    for (i = 0; i < STORM_ASSERT_ARRAY_SIZE; i++) {
7900
7901        row0 = REG_RD(sc, BAR_CSTRORM_INTMEM + CSTORM_ASSERT_LIST_OFFSET(i));
7902        row1 = REG_RD(sc, BAR_CSTRORM_INTMEM + CSTORM_ASSERT_LIST_OFFSET(i) + 4);
7903        row2 = REG_RD(sc, BAR_CSTRORM_INTMEM + CSTORM_ASSERT_LIST_OFFSET(i) + 8);
7904        row3 = REG_RD(sc, BAR_CSTRORM_INTMEM + CSTORM_ASSERT_LIST_OFFSET(i) + 12);
7905
7906        if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
7907            BLOGE(sc, "CSTORM_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x\n",
7908                  i, row3, row2, row1, row0);
7909            rc++;
7910        } else {
7911            break;
7912        }
7913    }
7914
7915    /* USTORM */
7916    last_idx = REG_RD8(sc, BAR_USTRORM_INTMEM + USTORM_ASSERT_LIST_INDEX_OFFSET);
7917    if (last_idx) {
7918        BLOGE(sc, "USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
7919    }
7920
7921    /* print the asserts */
7922    for (i = 0; i < STORM_ASSERT_ARRAY_SIZE; i++) {
7923
7924        row0 = REG_RD(sc, BAR_USTRORM_INTMEM + USTORM_ASSERT_LIST_OFFSET(i));
7925        row1 = REG_RD(sc, BAR_USTRORM_INTMEM + USTORM_ASSERT_LIST_OFFSET(i) + 4);
7926        row2 = REG_RD(sc, BAR_USTRORM_INTMEM + USTORM_ASSERT_LIST_OFFSET(i) + 8);
7927        row3 = REG_RD(sc, BAR_USTRORM_INTMEM + USTORM_ASSERT_LIST_OFFSET(i) + 12);
7928
7929        if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
7930            BLOGE(sc, "USTORM_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x\n",
7931                  i, row3, row2, row1, row0);
7932            rc++;
7933        } else {
7934            break;
7935        }
7936    }
7937
7938    return (rc);
7939}
7940
7941static void
7942bxe_attn_int_deasserted3(struct bxe_softc *sc,
7943                         uint32_t         attn)
7944{
7945    int func = SC_FUNC(sc);
7946    uint32_t val;
7947
7948    if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
7949
7950        if (attn & BXE_PMF_LINK_ASSERT(sc)) {
7951
7952            REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
7953            bxe_read_mf_cfg(sc);
7954            sc->devinfo.mf_info.mf_config[SC_VN(sc)] =
7955                MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].config);
7956            val = SHMEM_RD(sc, func_mb[SC_FW_MB_IDX(sc)].drv_status);
7957
7958            if (val & DRV_STATUS_DCC_EVENT_MASK)
7959                bxe_dcc_event(sc, (val & DRV_STATUS_DCC_EVENT_MASK));
7960
7961            if (val & DRV_STATUS_SET_MF_BW)
7962                bxe_set_mf_bw(sc);
7963
7964            if (val & DRV_STATUS_DRV_INFO_REQ)
7965                bxe_handle_drv_info_req(sc);
7966
7967            if ((sc->port.pmf == 0) && (val & DRV_STATUS_PMF))
7968                bxe_pmf_update(sc);
7969
7970            if (val & DRV_STATUS_EEE_NEGOTIATION_RESULTS)
7971                bxe_handle_eee_event(sc);
7972
7973            if (sc->link_vars.periodic_flags &
7974                ELINK_PERIODIC_FLAGS_LINK_EVENT) {
7975                /* sync with link */
7976		bxe_acquire_phy_lock(sc);
7977                sc->link_vars.periodic_flags &=
7978                    ~ELINK_PERIODIC_FLAGS_LINK_EVENT;
7979		bxe_release_phy_lock(sc);
7980                if (IS_MF(sc))
7981                    ; // XXX bxe_link_sync_notify(sc);
7982                bxe_link_report(sc);
7983            }
7984
7985            /*
7986             * Always call it here: bxe_link_report() will
7987             * prevent the link indication duplication.
7988             */
7989            bxe_link_status_update(sc);
7990
7991        } else if (attn & BXE_MC_ASSERT_BITS) {
7992
7993            BLOGE(sc, "MC assert!\n");
7994            bxe_mc_assert(sc);
7995            REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_10, 0);
7996            REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_9, 0);
7997            REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_8, 0);
7998            REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_7, 0);
7999            bxe_panic(sc, ("MC assert!\n"));
8000
8001        } else if (attn & BXE_MCP_ASSERT) {
8002
8003            BLOGE(sc, "MCP assert!\n");
8004            REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_11, 0);
8005            // XXX bxe_fw_dump(sc);
8006
8007        } else {
8008            BLOGE(sc, "Unknown HW assert! (attn 0x%08x)\n", attn);
8009        }
8010    }
8011
8012    if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
8013        BLOGE(sc, "LATCHED attention 0x%08x (masked)\n", attn);
8014        if (attn & BXE_GRC_TIMEOUT) {
8015            val = CHIP_IS_E1(sc) ? 0 : REG_RD(sc, MISC_REG_GRC_TIMEOUT_ATTN);
8016            BLOGE(sc, "GRC time-out 0x%08x\n", val);
8017        }
8018        if (attn & BXE_GRC_RSV) {
8019            val = CHIP_IS_E1(sc) ? 0 : REG_RD(sc, MISC_REG_GRC_RSV_ATTN);
8020            BLOGE(sc, "GRC reserved 0x%08x\n", val);
8021        }
8022        REG_WR(sc, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
8023    }
8024}
8025
8026static void
8027bxe_attn_int_deasserted2(struct bxe_softc *sc,
8028                         uint32_t         attn)
8029{
8030    int port = SC_PORT(sc);
8031    int reg_offset;
8032    uint32_t val0, mask0, val1, mask1;
8033    uint32_t val;
8034
8035    if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
8036        val = REG_RD(sc, CFC_REG_CFC_INT_STS_CLR);
8037        BLOGE(sc, "CFC hw attention 0x%08x\n", val);
8038        /* CFC error attention */
8039        if (val & 0x2) {
8040            BLOGE(sc, "FATAL error from CFC\n");
8041        }
8042    }
8043
8044    if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
8045        val = REG_RD(sc, PXP_REG_PXP_INT_STS_CLR_0);
8046        BLOGE(sc, "PXP hw attention-0 0x%08x\n", val);
8047        /* RQ_USDMDP_FIFO_OVERFLOW */
8048        if (val & 0x18000) {
8049            BLOGE(sc, "FATAL error from PXP\n");
8050        }
8051
8052        if (!CHIP_IS_E1x(sc)) {
8053            val = REG_RD(sc, PXP_REG_PXP_INT_STS_CLR_1);
8054            BLOGE(sc, "PXP hw attention-1 0x%08x\n", val);
8055        }
8056    }
8057
8058#define PXP2_EOP_ERROR_BIT  PXP2_PXP2_INT_STS_CLR_0_REG_WR_PGLUE_EOP_ERROR
8059#define AEU_PXP2_HW_INT_BIT AEU_INPUTS_ATTN_BITS_PXPPCICLOCKCLIENT_HW_INTERRUPT
8060
8061    if (attn & AEU_PXP2_HW_INT_BIT) {
8062        /*  CQ47854 workaround do not panic on
8063         *  PXP2_PXP2_INT_STS_0_REG_WR_PGLUE_EOP_ERROR
8064         */
8065        if (!CHIP_IS_E1x(sc)) {
8066            mask0 = REG_RD(sc, PXP2_REG_PXP2_INT_MASK_0);
8067            val1 = REG_RD(sc, PXP2_REG_PXP2_INT_STS_1);
8068            mask1 = REG_RD(sc, PXP2_REG_PXP2_INT_MASK_1);
8069            val0 = REG_RD(sc, PXP2_REG_PXP2_INT_STS_0);
8070            /*
8071             * If the only PXP2_EOP_ERROR_BIT is set in
8072             * STS0 and STS1 - clear it
8073             *
8074             * probably we lose additional attentions between
8075             * STS0 and STS_CLR0, in this case user will not
8076             * be notified about them
8077             */
8078            if (val0 & mask0 & PXP2_EOP_ERROR_BIT &&
8079                !(val1 & mask1))
8080                val0 = REG_RD(sc, PXP2_REG_PXP2_INT_STS_CLR_0);
8081
8082            /* print the register, since no one can restore it */
8083            BLOGE(sc, "PXP2_REG_PXP2_INT_STS_CLR_0 0x%08x\n", val0);
8084
8085            /*
8086             * if PXP2_PXP2_INT_STS_0_REG_WR_PGLUE_EOP_ERROR
8087             * then notify
8088             */
8089            if (val0 & PXP2_EOP_ERROR_BIT) {
8090                BLOGE(sc, "PXP2_WR_PGLUE_EOP_ERROR\n");
8091
8092                /*
8093                 * if only PXP2_PXP2_INT_STS_0_REG_WR_PGLUE_EOP_ERROR is
8094                 * set then clear attention from PXP2 block without panic
8095                 */
8096                if (((val0 & mask0) == PXP2_EOP_ERROR_BIT) &&
8097                    ((val1 & mask1) == 0))
8098                    attn &= ~AEU_PXP2_HW_INT_BIT;
8099            }
8100        }
8101    }
8102
8103    if (attn & HW_INTERRUT_ASSERT_SET_2) {
8104        reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
8105                             MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
8106
8107        val = REG_RD(sc, reg_offset);
8108        val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
8109        REG_WR(sc, reg_offset, val);
8110
8111        BLOGE(sc, "FATAL HW block attention set2 0x%x\n",
8112              (uint32_t)(attn & HW_INTERRUT_ASSERT_SET_2));
8113        bxe_panic(sc, ("HW block attention set2\n"));
8114    }
8115}
8116
8117static void
8118bxe_attn_int_deasserted1(struct bxe_softc *sc,
8119                         uint32_t         attn)
8120{
8121    int port = SC_PORT(sc);
8122    int reg_offset;
8123    uint32_t val;
8124
8125    if (attn & AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT) {
8126        val = REG_RD(sc, DORQ_REG_DORQ_INT_STS_CLR);
8127        BLOGE(sc, "DB hw attention 0x%08x\n", val);
8128        /* DORQ discard attention */
8129        if (val & 0x2) {
8130            BLOGE(sc, "FATAL error from DORQ\n");
8131        }
8132    }
8133
8134    if (attn & HW_INTERRUT_ASSERT_SET_1) {
8135        reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
8136                             MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
8137
8138        val = REG_RD(sc, reg_offset);
8139        val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
8140        REG_WR(sc, reg_offset, val);
8141
8142        BLOGE(sc, "FATAL HW block attention set1 0x%08x\n",
8143              (uint32_t)(attn & HW_INTERRUT_ASSERT_SET_1));
8144        bxe_panic(sc, ("HW block attention set1\n"));
8145    }
8146}
8147
8148static void
8149bxe_attn_int_deasserted0(struct bxe_softc *sc,
8150                         uint32_t         attn)
8151{
8152    int port = SC_PORT(sc);
8153    int reg_offset;
8154    uint32_t val;
8155
8156    reg_offset = (port) ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
8157                          MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0;
8158
8159    if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
8160        val = REG_RD(sc, reg_offset);
8161        val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
8162        REG_WR(sc, reg_offset, val);
8163
8164        BLOGW(sc, "SPIO5 hw attention\n");
8165
8166        /* Fan failure attention */
8167        elink_hw_reset_phy(&sc->link_params);
8168        bxe_fan_failure(sc);
8169    }
8170
8171    if ((attn & sc->link_vars.aeu_int_mask) && sc->port.pmf) {
8172	bxe_acquire_phy_lock(sc);
8173        elink_handle_module_detect_int(&sc->link_params);
8174	bxe_release_phy_lock(sc);
8175    }
8176
8177    if (attn & HW_INTERRUT_ASSERT_SET_0) {
8178        val = REG_RD(sc, reg_offset);
8179        val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
8180        REG_WR(sc, reg_offset, val);
8181
8182        bxe_panic(sc, ("FATAL HW block attention set0 0x%lx\n",
8183                       (attn & HW_INTERRUT_ASSERT_SET_0)));
8184    }
8185}
8186
8187static void
8188bxe_attn_int_deasserted(struct bxe_softc *sc,
8189                        uint32_t         deasserted)
8190{
8191    struct attn_route attn;
8192    struct attn_route *group_mask;
8193    int port = SC_PORT(sc);
8194    int index;
8195    uint32_t reg_addr;
8196    uint32_t val;
8197    uint32_t aeu_mask;
8198    uint8_t global = FALSE;
8199
8200    /*
8201     * Need to take HW lock because MCP or other port might also
8202     * try to handle this event.
8203     */
8204    bxe_acquire_alr(sc);
8205
8206    if (bxe_chk_parity_attn(sc, &global, TRUE)) {
8207        /* XXX
8208         * In case of parity errors don't handle attentions so that
8209         * other function would "see" parity errors.
8210         */
8211        sc->recovery_state = BXE_RECOVERY_INIT;
8212        // XXX schedule a recovery task...
8213        /* disable HW interrupts */
8214        bxe_int_disable(sc);
8215        bxe_release_alr(sc);
8216        return;
8217    }
8218
8219    attn.sig[0] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
8220    attn.sig[1] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
8221    attn.sig[2] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
8222    attn.sig[3] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
8223    if (!CHIP_IS_E1x(sc)) {
8224        attn.sig[4] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_5_FUNC_0 + port*4);
8225    } else {
8226        attn.sig[4] = 0;
8227    }
8228
8229    BLOGD(sc, DBG_INTR, "attn: 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n",
8230          attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3], attn.sig[4]);
8231
8232    for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
8233        if (deasserted & (1 << index)) {
8234            group_mask = &sc->attn_group[index];
8235
8236            BLOGD(sc, DBG_INTR,
8237                  "group[%d]: 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n", index,
8238                  group_mask->sig[0], group_mask->sig[1],
8239                  group_mask->sig[2], group_mask->sig[3],
8240                  group_mask->sig[4]);
8241
8242            bxe_attn_int_deasserted4(sc, attn.sig[4] & group_mask->sig[4]);
8243            bxe_attn_int_deasserted3(sc, attn.sig[3] & group_mask->sig[3]);
8244            bxe_attn_int_deasserted1(sc, attn.sig[1] & group_mask->sig[1]);
8245            bxe_attn_int_deasserted2(sc, attn.sig[2] & group_mask->sig[2]);
8246            bxe_attn_int_deasserted0(sc, attn.sig[0] & group_mask->sig[0]);
8247        }
8248    }
8249
8250    bxe_release_alr(sc);
8251
8252    if (sc->devinfo.int_block == INT_BLOCK_HC) {
8253        reg_addr = (HC_REG_COMMAND_REG + port*32 +
8254                    COMMAND_REG_ATTN_BITS_CLR);
8255    } else {
8256        reg_addr = (BAR_IGU_INTMEM + IGU_CMD_ATTN_BIT_CLR_UPPER*8);
8257    }
8258
8259    val = ~deasserted;
8260    BLOGD(sc, DBG_INTR,
8261          "about to mask 0x%08x at %s addr 0x%08x\n", val,
8262          (sc->devinfo.int_block == INT_BLOCK_HC) ? "HC" : "IGU", reg_addr);
8263    REG_WR(sc, reg_addr, val);
8264
8265    if (~sc->attn_state & deasserted) {
8266        BLOGE(sc, "IGU error\n");
8267    }
8268
8269    reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
8270                      MISC_REG_AEU_MASK_ATTN_FUNC_0;
8271
8272    bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
8273
8274    aeu_mask = REG_RD(sc, reg_addr);
8275
8276    BLOGD(sc, DBG_INTR, "aeu_mask 0x%08x newly deasserted 0x%08x\n",
8277          aeu_mask, deasserted);
8278    aeu_mask |= (deasserted & 0x3ff);
8279    BLOGD(sc, DBG_INTR, "new mask 0x%08x\n", aeu_mask);
8280
8281    REG_WR(sc, reg_addr, aeu_mask);
8282    bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
8283
8284    BLOGD(sc, DBG_INTR, "attn_state 0x%08x\n", sc->attn_state);
8285    sc->attn_state &= ~deasserted;
8286    BLOGD(sc, DBG_INTR, "new state 0x%08x\n", sc->attn_state);
8287}
8288
8289static void
8290bxe_attn_int(struct bxe_softc *sc)
8291{
8292    /* read local copy of bits */
8293    uint32_t attn_bits = le32toh(sc->def_sb->atten_status_block.attn_bits);
8294    uint32_t attn_ack = le32toh(sc->def_sb->atten_status_block.attn_bits_ack);
8295    uint32_t attn_state = sc->attn_state;
8296
8297    /* look for changed bits */
8298    uint32_t asserted   =  attn_bits & ~attn_ack & ~attn_state;
8299    uint32_t deasserted = ~attn_bits &  attn_ack &  attn_state;
8300
8301    BLOGD(sc, DBG_INTR,
8302          "attn_bits 0x%08x attn_ack 0x%08x asserted 0x%08x deasserted 0x%08x\n",
8303          attn_bits, attn_ack, asserted, deasserted);
8304
8305    if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state)) {
8306        BLOGE(sc, "BAD attention state\n");
8307    }
8308
8309    /* handle bits that were raised */
8310    if (asserted) {
8311        bxe_attn_int_asserted(sc, asserted);
8312    }
8313
8314    if (deasserted) {
8315        bxe_attn_int_deasserted(sc, deasserted);
8316    }
8317}
8318
8319static uint16_t
8320bxe_update_dsb_idx(struct bxe_softc *sc)
8321{
8322    struct host_sp_status_block *def_sb = sc->def_sb;
8323    uint16_t rc = 0;
8324
8325    mb(); /* status block is written to by the chip */
8326
8327    if (sc->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
8328        sc->def_att_idx = def_sb->atten_status_block.attn_bits_index;
8329        rc |= BXE_DEF_SB_ATT_IDX;
8330    }
8331
8332    if (sc->def_idx != def_sb->sp_sb.running_index) {
8333        sc->def_idx = def_sb->sp_sb.running_index;
8334        rc |= BXE_DEF_SB_IDX;
8335    }
8336
8337    mb();
8338
8339    return (rc);
8340}
8341
8342static inline struct ecore_queue_sp_obj *
8343bxe_cid_to_q_obj(struct bxe_softc *sc,
8344                 uint32_t         cid)
8345{
8346    BLOGD(sc, DBG_SP, "retrieving fp from cid %d\n", cid);
8347    return (&sc->sp_objs[CID_TO_FP(cid, sc)].q_obj);
8348}
8349
8350static void
8351bxe_handle_mcast_eqe(struct bxe_softc *sc)
8352{
8353    struct ecore_mcast_ramrod_params rparam;
8354    int rc;
8355
8356    memset(&rparam, 0, sizeof(rparam));
8357
8358    rparam.mcast_obj = &sc->mcast_obj;
8359
8360    BXE_MCAST_LOCK(sc);
8361
8362    /* clear pending state for the last command */
8363    sc->mcast_obj.raw.clear_pending(&sc->mcast_obj.raw);
8364
8365    /* if there are pending mcast commands - send them */
8366    if (sc->mcast_obj.check_pending(&sc->mcast_obj)) {
8367        rc = ecore_config_mcast(sc, &rparam, ECORE_MCAST_CMD_CONT);
8368        if (rc < 0) {
8369            BLOGD(sc, DBG_SP,
8370                "ERROR: Failed to send pending mcast commands (%d)\n", rc);
8371        }
8372    }
8373
8374    BXE_MCAST_UNLOCK(sc);
8375}
8376
8377static void
8378bxe_handle_classification_eqe(struct bxe_softc      *sc,
8379                              union event_ring_elem *elem)
8380{
8381    unsigned long ramrod_flags = 0;
8382    int rc = 0;
8383    uint32_t cid = elem->message.data.eth_event.echo & BXE_SWCID_MASK;
8384    struct ecore_vlan_mac_obj *vlan_mac_obj;
8385
8386    /* always push next commands out, don't wait here */
8387    bit_set(&ramrod_flags, RAMROD_CONT);
8388
8389    switch (le32toh(elem->message.data.eth_event.echo) >> BXE_SWCID_SHIFT) {
8390    case ECORE_FILTER_MAC_PENDING:
8391        BLOGD(sc, DBG_SP, "Got SETUP_MAC completions\n");
8392        vlan_mac_obj = &sc->sp_objs[cid].mac_obj;
8393        break;
8394
8395    case ECORE_FILTER_MCAST_PENDING:
8396        BLOGD(sc, DBG_SP, "Got SETUP_MCAST completions\n");
8397        /*
8398         * This is only relevant for 57710 where multicast MACs are
8399         * configured as unicast MACs using the same ramrod.
8400         */
8401        bxe_handle_mcast_eqe(sc);
8402        return;
8403
8404    default:
8405        BLOGE(sc, "Unsupported classification command: %d\n",
8406              elem->message.data.eth_event.echo);
8407        return;
8408    }
8409
8410    rc = vlan_mac_obj->complete(sc, vlan_mac_obj, elem, &ramrod_flags);
8411
8412    if (rc < 0) {
8413        BLOGE(sc, "Failed to schedule new commands (%d)\n", rc);
8414    } else if (rc > 0) {
8415        BLOGD(sc, DBG_SP, "Scheduled next pending commands...\n");
8416    }
8417}
8418
8419static void
8420bxe_handle_rx_mode_eqe(struct bxe_softc      *sc,
8421                       union event_ring_elem *elem)
8422{
8423    bxe_clear_bit(ECORE_FILTER_RX_MODE_PENDING, &sc->sp_state);
8424
8425    /* send rx_mode command again if was requested */
8426    if (bxe_test_and_clear_bit(ECORE_FILTER_RX_MODE_SCHED,
8427                               &sc->sp_state)) {
8428        bxe_set_storm_rx_mode(sc);
8429    }
8430}
8431
8432static void
8433bxe_update_eq_prod(struct bxe_softc *sc,
8434                   uint16_t         prod)
8435{
8436    storm_memset_eq_prod(sc, prod, SC_FUNC(sc));
8437    wmb(); /* keep prod updates ordered */
8438}
8439
8440static void
8441bxe_eq_int(struct bxe_softc *sc)
8442{
8443    uint16_t hw_cons, sw_cons, sw_prod;
8444    union event_ring_elem *elem;
8445    uint8_t echo;
8446    uint32_t cid;
8447    uint8_t opcode;
8448    int spqe_cnt = 0;
8449    struct ecore_queue_sp_obj *q_obj;
8450    struct ecore_func_sp_obj *f_obj = &sc->func_obj;
8451    struct ecore_raw_obj *rss_raw = &sc->rss_conf_obj.raw;
8452
8453    hw_cons = le16toh(*sc->eq_cons_sb);
8454
8455    /*
8456     * The hw_cons range is 1-255, 257 - the sw_cons range is 0-254, 256.
8457     * when we get to the next-page we need to adjust so the loop
8458     * condition below will be met. The next element is the size of a
8459     * regular element and hence incrementing by 1
8460     */
8461    if ((hw_cons & EQ_DESC_MAX_PAGE) == EQ_DESC_MAX_PAGE) {
8462        hw_cons++;
8463    }
8464
8465    /*
8466     * This function may never run in parallel with itself for a
8467     * specific sc and no need for a read memory barrier here.
8468     */
8469    sw_cons = sc->eq_cons;
8470    sw_prod = sc->eq_prod;
8471
8472    BLOGD(sc, DBG_SP,"EQ: hw_cons=%u sw_cons=%u eq_spq_left=0x%lx\n",
8473          hw_cons, sw_cons, atomic_load_acq_long(&sc->eq_spq_left));
8474
8475    for (;
8476         sw_cons != hw_cons;
8477         sw_prod = NEXT_EQ_IDX(sw_prod), sw_cons = NEXT_EQ_IDX(sw_cons)) {
8478
8479        elem = &sc->eq[EQ_DESC(sw_cons)];
8480
8481        /* elem CID originates from FW, actually LE */
8482        cid = SW_CID(elem->message.data.cfc_del_event.cid);
8483        opcode = elem->message.opcode;
8484
8485        /* handle eq element */
8486        switch (opcode) {
8487
8488        case EVENT_RING_OPCODE_STAT_QUERY:
8489            BLOGD(sc, DBG_SP, "got statistics completion event %d\n",
8490                  sc->stats_comp++);
8491            /* nothing to do with stats comp */
8492            goto next_spqe;
8493
8494        case EVENT_RING_OPCODE_CFC_DEL:
8495            /* handle according to cid range */
8496            /* we may want to verify here that the sc state is HALTING */
8497            BLOGD(sc, DBG_SP, "got delete ramrod for MULTI[%d]\n", cid);
8498            q_obj = bxe_cid_to_q_obj(sc, cid);
8499            if (q_obj->complete_cmd(sc, q_obj, ECORE_Q_CMD_CFC_DEL)) {
8500                break;
8501            }
8502            goto next_spqe;
8503
8504        case EVENT_RING_OPCODE_STOP_TRAFFIC:
8505            BLOGD(sc, DBG_SP, "got STOP TRAFFIC\n");
8506            if (f_obj->complete_cmd(sc, f_obj, ECORE_F_CMD_TX_STOP)) {
8507                break;
8508            }
8509            // XXX bxe_dcbx_set_params(sc, BXE_DCBX_STATE_TX_PAUSED);
8510            goto next_spqe;
8511
8512        case EVENT_RING_OPCODE_START_TRAFFIC:
8513            BLOGD(sc, DBG_SP, "got START TRAFFIC\n");
8514            if (f_obj->complete_cmd(sc, f_obj, ECORE_F_CMD_TX_START)) {
8515                break;
8516            }
8517            // XXX bxe_dcbx_set_params(sc, BXE_DCBX_STATE_TX_RELEASED);
8518            goto next_spqe;
8519
8520        case EVENT_RING_OPCODE_FUNCTION_UPDATE:
8521            echo = elem->message.data.function_update_event.echo;
8522            if (echo == SWITCH_UPDATE) {
8523                BLOGD(sc, DBG_SP, "got FUNC_SWITCH_UPDATE ramrod\n");
8524                if (f_obj->complete_cmd(sc, f_obj,
8525                                        ECORE_F_CMD_SWITCH_UPDATE)) {
8526                    break;
8527                }
8528            }
8529            else {
8530                BLOGD(sc, DBG_SP,
8531                      "AFEX: ramrod completed FUNCTION_UPDATE\n");
8532            }
8533            goto next_spqe;
8534
8535        case EVENT_RING_OPCODE_FORWARD_SETUP:
8536            q_obj = &bxe_fwd_sp_obj(sc, q_obj);
8537            if (q_obj->complete_cmd(sc, q_obj,
8538                                    ECORE_Q_CMD_SETUP_TX_ONLY)) {
8539                break;
8540            }
8541            goto next_spqe;
8542
8543        case EVENT_RING_OPCODE_FUNCTION_START:
8544            BLOGD(sc, DBG_SP, "got FUNC_START ramrod\n");
8545            if (f_obj->complete_cmd(sc, f_obj, ECORE_F_CMD_START)) {
8546                break;
8547            }
8548            goto next_spqe;
8549
8550        case EVENT_RING_OPCODE_FUNCTION_STOP:
8551            BLOGD(sc, DBG_SP, "got FUNC_STOP ramrod\n");
8552            if (f_obj->complete_cmd(sc, f_obj, ECORE_F_CMD_STOP)) {
8553                break;
8554            }
8555            goto next_spqe;
8556        }
8557
8558        switch (opcode | sc->state) {
8559        case (EVENT_RING_OPCODE_RSS_UPDATE_RULES | BXE_STATE_OPEN):
8560        case (EVENT_RING_OPCODE_RSS_UPDATE_RULES | BXE_STATE_OPENING_WAITING_PORT):
8561            cid = elem->message.data.eth_event.echo & BXE_SWCID_MASK;
8562            BLOGD(sc, DBG_SP, "got RSS_UPDATE ramrod. CID %d\n", cid);
8563            rss_raw->clear_pending(rss_raw);
8564            break;
8565
8566        case (EVENT_RING_OPCODE_SET_MAC | BXE_STATE_OPEN):
8567        case (EVENT_RING_OPCODE_SET_MAC | BXE_STATE_DIAG):
8568        case (EVENT_RING_OPCODE_SET_MAC | BXE_STATE_CLOSING_WAITING_HALT):
8569        case (EVENT_RING_OPCODE_CLASSIFICATION_RULES | BXE_STATE_OPEN):
8570        case (EVENT_RING_OPCODE_CLASSIFICATION_RULES | BXE_STATE_DIAG):
8571        case (EVENT_RING_OPCODE_CLASSIFICATION_RULES | BXE_STATE_CLOSING_WAITING_HALT):
8572            BLOGD(sc, DBG_SP, "got (un)set mac ramrod\n");
8573            bxe_handle_classification_eqe(sc, elem);
8574            break;
8575
8576        case (EVENT_RING_OPCODE_MULTICAST_RULES | BXE_STATE_OPEN):
8577        case (EVENT_RING_OPCODE_MULTICAST_RULES | BXE_STATE_DIAG):
8578        case (EVENT_RING_OPCODE_MULTICAST_RULES | BXE_STATE_CLOSING_WAITING_HALT):
8579            BLOGD(sc, DBG_SP, "got mcast ramrod\n");
8580            bxe_handle_mcast_eqe(sc);
8581            break;
8582
8583        case (EVENT_RING_OPCODE_FILTERS_RULES | BXE_STATE_OPEN):
8584        case (EVENT_RING_OPCODE_FILTERS_RULES | BXE_STATE_DIAG):
8585        case (EVENT_RING_OPCODE_FILTERS_RULES | BXE_STATE_CLOSING_WAITING_HALT):
8586            BLOGD(sc, DBG_SP, "got rx_mode ramrod\n");
8587            bxe_handle_rx_mode_eqe(sc, elem);
8588            break;
8589
8590        default:
8591            /* unknown event log error and continue */
8592            BLOGE(sc, "Unknown EQ event %d, sc->state 0x%x\n",
8593                  elem->message.opcode, sc->state);
8594        }
8595
8596next_spqe:
8597        spqe_cnt++;
8598    } /* for */
8599
8600    mb();
8601    atomic_add_acq_long(&sc->eq_spq_left, spqe_cnt);
8602
8603    sc->eq_cons = sw_cons;
8604    sc->eq_prod = sw_prod;
8605
8606    /* make sure that above mem writes were issued towards the memory */
8607    wmb();
8608
8609    /* update producer */
8610    bxe_update_eq_prod(sc, sc->eq_prod);
8611}
8612
8613static void
8614bxe_handle_sp_tq(void *context,
8615                 int  pending)
8616{
8617    struct bxe_softc *sc = (struct bxe_softc *)context;
8618    uint16_t status;
8619
8620    BLOGD(sc, DBG_SP, "---> SP TASK <---\n");
8621
8622    /* what work needs to be performed? */
8623    status = bxe_update_dsb_idx(sc);
8624
8625    BLOGD(sc, DBG_SP, "dsb status 0x%04x\n", status);
8626
8627    /* HW attentions */
8628    if (status & BXE_DEF_SB_ATT_IDX) {
8629        BLOGD(sc, DBG_SP, "---> ATTN INTR <---\n");
8630        bxe_attn_int(sc);
8631        status &= ~BXE_DEF_SB_ATT_IDX;
8632    }
8633
8634    /* SP events: STAT_QUERY and others */
8635    if (status & BXE_DEF_SB_IDX) {
8636        /* handle EQ completions */
8637        BLOGD(sc, DBG_SP, "---> EQ INTR <---\n");
8638        bxe_eq_int(sc);
8639        bxe_ack_sb(sc, sc->igu_dsb_id, USTORM_ID,
8640                   le16toh(sc->def_idx), IGU_INT_NOP, 1);
8641        status &= ~BXE_DEF_SB_IDX;
8642    }
8643
8644    /* if status is non zero then something went wrong */
8645    if (__predict_false(status)) {
8646        BLOGE(sc, "Got an unknown SP interrupt! (0x%04x)\n", status);
8647    }
8648
8649    /* ack status block only if something was actually handled */
8650    bxe_ack_sb(sc, sc->igu_dsb_id, ATTENTION_ID,
8651               le16toh(sc->def_att_idx), IGU_INT_ENABLE, 1);
8652
8653    /*
8654     * Must be called after the EQ processing (since eq leads to sriov
8655     * ramrod completion flows).
8656     * This flow may have been scheduled by the arrival of a ramrod
8657     * completion, or by the sriov code rescheduling itself.
8658     */
8659    // XXX bxe_iov_sp_task(sc);
8660
8661}
8662
8663static void
8664bxe_handle_fp_tq(void *context,
8665                 int  pending)
8666{
8667    struct bxe_fastpath *fp = (struct bxe_fastpath *)context;
8668    struct bxe_softc *sc = fp->sc;
8669    uint8_t more_tx = FALSE;
8670    uint8_t more_rx = FALSE;
8671
8672    BLOGD(sc, DBG_INTR, "---> FP TASK QUEUE (%d) <---\n", fp->index);
8673
8674    /* XXX
8675     * IFF_DRV_RUNNING state can't be checked here since we process
8676     * slowpath events on a client queue during setup. Instead
8677     * we need to add a "process/continue" flag here that the driver
8678     * can use to tell the task here not to do anything.
8679     */
8680#if 0
8681    if (!(if_getdrvflags(sc->ifp) & IFF_DRV_RUNNING)) {
8682        return;
8683    }
8684#endif
8685
8686    /* update the fastpath index */
8687    bxe_update_fp_sb_idx(fp);
8688
8689    /* XXX add loop here if ever support multiple tx CoS */
8690    /* fp->txdata[cos] */
8691    if (bxe_has_tx_work(fp)) {
8692        BXE_FP_TX_LOCK(fp);
8693        more_tx = bxe_txeof(sc, fp);
8694        BXE_FP_TX_UNLOCK(fp);
8695    }
8696
8697    if (bxe_has_rx_work(fp)) {
8698        more_rx = bxe_rxeof(sc, fp);
8699    }
8700
8701    if (more_rx /*|| more_tx*/) {
8702        /* still more work to do */
8703        taskqueue_enqueue(fp->tq, &fp->tq_task);
8704        return;
8705    }
8706
8707    bxe_ack_sb(sc, fp->igu_sb_id, USTORM_ID,
8708               le16toh(fp->fp_hc_idx), IGU_INT_ENABLE, 1);
8709}
8710
8711static void
8712bxe_task_fp(struct bxe_fastpath *fp)
8713{
8714    struct bxe_softc *sc = fp->sc;
8715    uint8_t more_tx = FALSE;
8716    uint8_t more_rx = FALSE;
8717
8718    BLOGD(sc, DBG_INTR, "---> FP TASK ISR (%d) <---\n", fp->index);
8719
8720    /* update the fastpath index */
8721    bxe_update_fp_sb_idx(fp);
8722
8723    /* XXX add loop here if ever support multiple tx CoS */
8724    /* fp->txdata[cos] */
8725    if (bxe_has_tx_work(fp)) {
8726        BXE_FP_TX_LOCK(fp);
8727        more_tx = bxe_txeof(sc, fp);
8728        BXE_FP_TX_UNLOCK(fp);
8729    }
8730
8731    if (bxe_has_rx_work(fp)) {
8732        more_rx = bxe_rxeof(sc, fp);
8733    }
8734
8735    if (more_rx /*|| more_tx*/) {
8736        /* still more work to do, bail out if this ISR and process later */
8737        taskqueue_enqueue(fp->tq, &fp->tq_task);
8738        return;
8739    }
8740
8741    /*
8742     * Here we write the fastpath index taken before doing any tx or rx work.
8743     * It is very well possible other hw events occurred up to this point and
8744     * they were actually processed accordingly above. Since we're going to
8745     * write an older fastpath index, an interrupt is coming which we might
8746     * not do any work in.
8747     */
8748    bxe_ack_sb(sc, fp->igu_sb_id, USTORM_ID,
8749               le16toh(fp->fp_hc_idx), IGU_INT_ENABLE, 1);
8750}
8751
8752/*
8753 * Legacy interrupt entry point.
8754 *
8755 * Verifies that the controller generated the interrupt and
8756 * then calls a separate routine to handle the various
8757 * interrupt causes: link, RX, and TX.
8758 */
8759static void
8760bxe_intr_legacy(void *xsc)
8761{
8762    struct bxe_softc *sc = (struct bxe_softc *)xsc;
8763    struct bxe_fastpath *fp;
8764    uint16_t status, mask;
8765    int i;
8766
8767    BLOGD(sc, DBG_INTR, "---> BXE INTx <---\n");
8768
8769    /*
8770     * 0 for ustorm, 1 for cstorm
8771     * the bits returned from ack_int() are 0-15
8772     * bit 0 = attention status block
8773     * bit 1 = fast path status block
8774     * a mask of 0x2 or more = tx/rx event
8775     * a mask of 1 = slow path event
8776     */
8777
8778    status = bxe_ack_int(sc);
8779
8780    /* the interrupt is not for us */
8781    if (__predict_false(status == 0)) {
8782        BLOGD(sc, DBG_INTR, "Not our interrupt!\n");
8783        return;
8784    }
8785
8786    BLOGD(sc, DBG_INTR, "Interrupt status 0x%04x\n", status);
8787
8788    FOR_EACH_ETH_QUEUE(sc, i) {
8789        fp = &sc->fp[i];
8790        mask = (0x2 << (fp->index + CNIC_SUPPORT(sc)));
8791        if (status & mask) {
8792            /* acknowledge and disable further fastpath interrupts */
8793            bxe_ack_sb(sc, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
8794            bxe_task_fp(fp);
8795            status &= ~mask;
8796        }
8797    }
8798
8799    if (__predict_false(status & 0x1)) {
8800        /* acknowledge and disable further slowpath interrupts */
8801        bxe_ack_sb(sc, sc->igu_dsb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
8802
8803        /* schedule slowpath handler */
8804        taskqueue_enqueue(sc->sp_tq, &sc->sp_tq_task);
8805
8806        status &= ~0x1;
8807    }
8808
8809    if (__predict_false(status)) {
8810        BLOGW(sc, "Unexpected fastpath status (0x%08x)!\n", status);
8811    }
8812}
8813
8814/* slowpath interrupt entry point */
8815static void
8816bxe_intr_sp(void *xsc)
8817{
8818    struct bxe_softc *sc = (struct bxe_softc *)xsc;
8819
8820    BLOGD(sc, (DBG_INTR | DBG_SP), "---> SP INTR <---\n");
8821
8822    /* acknowledge and disable further slowpath interrupts */
8823    bxe_ack_sb(sc, sc->igu_dsb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
8824
8825    /* schedule slowpath handler */
8826    taskqueue_enqueue(sc->sp_tq, &sc->sp_tq_task);
8827}
8828
8829/* fastpath interrupt entry point */
8830static void
8831bxe_intr_fp(void *xfp)
8832{
8833    struct bxe_fastpath *fp = (struct bxe_fastpath *)xfp;
8834    struct bxe_softc *sc = fp->sc;
8835
8836    BLOGD(sc, DBG_INTR, "---> FP INTR %d <---\n", fp->index);
8837
8838    BLOGD(sc, DBG_INTR,
8839          "(cpu=%d) MSI-X fp=%d fw_sb=%d igu_sb=%d\n",
8840          curcpu, fp->index, fp->fw_sb_id, fp->igu_sb_id);
8841
8842    /* acknowledge and disable further fastpath interrupts */
8843    bxe_ack_sb(sc, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
8844
8845    bxe_task_fp(fp);
8846}
8847
8848/* Release all interrupts allocated by the driver. */
8849static void
8850bxe_interrupt_free(struct bxe_softc *sc)
8851{
8852    int i;
8853
8854    switch (sc->interrupt_mode) {
8855    case INTR_MODE_INTX:
8856        BLOGD(sc, DBG_LOAD, "Releasing legacy INTx vector\n");
8857        if (sc->intr[0].resource != NULL) {
8858            bus_release_resource(sc->dev,
8859                                 SYS_RES_IRQ,
8860                                 sc->intr[0].rid,
8861                                 sc->intr[0].resource);
8862        }
8863        break;
8864    case INTR_MODE_MSI:
8865        for (i = 0; i < sc->intr_count; i++) {
8866            BLOGD(sc, DBG_LOAD, "Releasing MSI vector %d\n", i);
8867            if (sc->intr[i].resource && sc->intr[i].rid) {
8868                bus_release_resource(sc->dev,
8869                                     SYS_RES_IRQ,
8870                                     sc->intr[i].rid,
8871                                     sc->intr[i].resource);
8872            }
8873        }
8874        pci_release_msi(sc->dev);
8875        break;
8876    case INTR_MODE_MSIX:
8877        for (i = 0; i < sc->intr_count; i++) {
8878            BLOGD(sc, DBG_LOAD, "Releasing MSI-X vector %d\n", i);
8879            if (sc->intr[i].resource && sc->intr[i].rid) {
8880                bus_release_resource(sc->dev,
8881                                     SYS_RES_IRQ,
8882                                     sc->intr[i].rid,
8883                                     sc->intr[i].resource);
8884            }
8885        }
8886        pci_release_msi(sc->dev);
8887        break;
8888    default:
8889        /* nothing to do as initial allocation failed */
8890        break;
8891    }
8892}
8893
8894/*
8895 * This function determines and allocates the appropriate
8896 * interrupt based on system capabilites and user request.
8897 *
8898 * The user may force a particular interrupt mode, specify
8899 * the number of receive queues, specify the method for
8900 * distribuitng received frames to receive queues, or use
8901 * the default settings which will automatically select the
8902 * best supported combination.  In addition, the OS may or
8903 * may not support certain combinations of these settings.
8904 * This routine attempts to reconcile the settings requested
8905 * by the user with the capabilites available from the system
8906 * to select the optimal combination of features.
8907 *
8908 * Returns:
8909 *   0 = Success, !0 = Failure.
8910 */
8911static int
8912bxe_interrupt_alloc(struct bxe_softc *sc)
8913{
8914    int msix_count = 0;
8915    int msi_count = 0;
8916    int num_requested = 0;
8917    int num_allocated = 0;
8918    int rid, i, j;
8919    int rc;
8920
8921    /* get the number of available MSI/MSI-X interrupts from the OS */
8922    if (sc->interrupt_mode > 0) {
8923        if (sc->devinfo.pcie_cap_flags & BXE_MSIX_CAPABLE_FLAG) {
8924            msix_count = pci_msix_count(sc->dev);
8925        }
8926
8927        if (sc->devinfo.pcie_cap_flags & BXE_MSI_CAPABLE_FLAG) {
8928            msi_count = pci_msi_count(sc->dev);
8929        }
8930
8931        BLOGD(sc, DBG_LOAD, "%d MSI and %d MSI-X vectors available\n",
8932              msi_count, msix_count);
8933    }
8934
8935    do { /* try allocating MSI-X interrupt resources (at least 2) */
8936        if (sc->interrupt_mode != INTR_MODE_MSIX) {
8937            break;
8938        }
8939
8940        if (((sc->devinfo.pcie_cap_flags & BXE_MSIX_CAPABLE_FLAG) == 0) ||
8941            (msix_count < 2)) {
8942            sc->interrupt_mode = INTR_MODE_MSI; /* try MSI next */
8943            break;
8944        }
8945
8946        /* ask for the necessary number of MSI-X vectors */
8947        num_requested = min((sc->num_queues + 1), msix_count);
8948
8949        BLOGD(sc, DBG_LOAD, "Requesting %d MSI-X vectors\n", num_requested);
8950
8951        num_allocated = num_requested;
8952        if ((rc = pci_alloc_msix(sc->dev, &num_allocated)) != 0) {
8953            BLOGE(sc, "MSI-X alloc failed! (%d)\n", rc);
8954            sc->interrupt_mode = INTR_MODE_MSI; /* try MSI next */
8955            break;
8956        }
8957
8958        if (num_allocated < 2) { /* possible? */
8959            BLOGE(sc, "MSI-X allocation less than 2!\n");
8960            sc->interrupt_mode = INTR_MODE_MSI; /* try MSI next */
8961            pci_release_msi(sc->dev);
8962            break;
8963        }
8964
8965        BLOGI(sc, "MSI-X vectors Requested %d and Allocated %d\n",
8966              num_requested, num_allocated);
8967
8968        /* best effort so use the number of vectors allocated to us */
8969        sc->intr_count = num_allocated;
8970        sc->num_queues = num_allocated - 1;
8971
8972        rid = 1; /* initial resource identifier */
8973
8974        /* allocate the MSI-X vectors */
8975        for (i = 0; i < num_allocated; i++) {
8976            sc->intr[i].rid = (rid + i);
8977
8978            if ((sc->intr[i].resource =
8979                 bus_alloc_resource_any(sc->dev,
8980                                        SYS_RES_IRQ,
8981                                        &sc->intr[i].rid,
8982                                        RF_ACTIVE)) == NULL) {
8983                BLOGE(sc, "Failed to map MSI-X[%d] (rid=%d)!\n",
8984                      i, (rid + i));
8985
8986                for (j = (i - 1); j >= 0; j--) {
8987                    bus_release_resource(sc->dev,
8988                                         SYS_RES_IRQ,
8989                                         sc->intr[j].rid,
8990                                         sc->intr[j].resource);
8991                }
8992
8993                sc->intr_count = 0;
8994                sc->num_queues = 0;
8995                sc->interrupt_mode = INTR_MODE_MSI; /* try MSI next */
8996                pci_release_msi(sc->dev);
8997                break;
8998            }
8999
9000            BLOGD(sc, DBG_LOAD, "Mapped MSI-X[%d] (rid=%d)\n", i, (rid + i));
9001        }
9002    } while (0);
9003
9004    do { /* try allocating MSI vector resources (at least 2) */
9005        if (sc->interrupt_mode != INTR_MODE_MSI) {
9006            break;
9007        }
9008
9009        if (((sc->devinfo.pcie_cap_flags & BXE_MSI_CAPABLE_FLAG) == 0) ||
9010            (msi_count < 1)) {
9011            sc->interrupt_mode = INTR_MODE_INTX; /* try INTx next */
9012            break;
9013        }
9014
9015        /* ask for a single MSI vector */
9016        num_requested = 1;
9017
9018        BLOGD(sc, DBG_LOAD, "Requesting %d MSI vectors\n", num_requested);
9019
9020        num_allocated = num_requested;
9021        if ((rc = pci_alloc_msi(sc->dev, &num_allocated)) != 0) {
9022            BLOGE(sc, "MSI alloc failed (%d)!\n", rc);
9023            sc->interrupt_mode = INTR_MODE_INTX; /* try INTx next */
9024            break;
9025        }
9026
9027        if (num_allocated != 1) { /* possible? */
9028            BLOGE(sc, "MSI allocation is not 1!\n");
9029            sc->interrupt_mode = INTR_MODE_INTX; /* try INTx next */
9030            pci_release_msi(sc->dev);
9031            break;
9032        }
9033
9034        BLOGI(sc, "MSI vectors Requested %d and Allocated %d\n",
9035              num_requested, num_allocated);
9036
9037        /* best effort so use the number of vectors allocated to us */
9038        sc->intr_count = num_allocated;
9039        sc->num_queues = num_allocated;
9040
9041        rid = 1; /* initial resource identifier */
9042
9043        sc->intr[0].rid = rid;
9044
9045        if ((sc->intr[0].resource =
9046             bus_alloc_resource_any(sc->dev,
9047                                    SYS_RES_IRQ,
9048                                    &sc->intr[0].rid,
9049                                    RF_ACTIVE)) == NULL) {
9050            BLOGE(sc, "Failed to map MSI[0] (rid=%d)!\n", rid);
9051            sc->intr_count = 0;
9052            sc->num_queues = 0;
9053            sc->interrupt_mode = INTR_MODE_INTX; /* try INTx next */
9054            pci_release_msi(sc->dev);
9055            break;
9056        }
9057
9058        BLOGD(sc, DBG_LOAD, "Mapped MSI[0] (rid=%d)\n", rid);
9059    } while (0);
9060
9061    do { /* try allocating INTx vector resources */
9062        if (sc->interrupt_mode != INTR_MODE_INTX) {
9063            break;
9064        }
9065
9066        BLOGD(sc, DBG_LOAD, "Requesting legacy INTx interrupt\n");
9067
9068        /* only one vector for INTx */
9069        sc->intr_count = 1;
9070        sc->num_queues = 1;
9071
9072        rid = 0; /* initial resource identifier */
9073
9074        sc->intr[0].rid = rid;
9075
9076        if ((sc->intr[0].resource =
9077             bus_alloc_resource_any(sc->dev,
9078                                    SYS_RES_IRQ,
9079                                    &sc->intr[0].rid,
9080                                    (RF_ACTIVE | RF_SHAREABLE))) == NULL) {
9081            BLOGE(sc, "Failed to map INTx (rid=%d)!\n", rid);
9082            sc->intr_count = 0;
9083            sc->num_queues = 0;
9084            sc->interrupt_mode = -1; /* Failed! */
9085            break;
9086        }
9087
9088        BLOGD(sc, DBG_LOAD, "Mapped INTx (rid=%d)\n", rid);
9089    } while (0);
9090
9091    if (sc->interrupt_mode == -1) {
9092        BLOGE(sc, "Interrupt Allocation: FAILED!!!\n");
9093        rc = 1;
9094    } else {
9095        BLOGD(sc, DBG_LOAD,
9096              "Interrupt Allocation: interrupt_mode=%d, num_queues=%d\n",
9097              sc->interrupt_mode, sc->num_queues);
9098        rc = 0;
9099    }
9100
9101    return (rc);
9102}
9103
9104static void
9105bxe_interrupt_detach(struct bxe_softc *sc)
9106{
9107    struct bxe_fastpath *fp;
9108    int i;
9109
9110    /* release interrupt resources */
9111    for (i = 0; i < sc->intr_count; i++) {
9112        if (sc->intr[i].resource && sc->intr[i].tag) {
9113            BLOGD(sc, DBG_LOAD, "Disabling interrupt vector %d\n", i);
9114            bus_teardown_intr(sc->dev, sc->intr[i].resource, sc->intr[i].tag);
9115        }
9116    }
9117
9118    for (i = 0; i < sc->num_queues; i++) {
9119        fp = &sc->fp[i];
9120        if (fp->tq) {
9121            taskqueue_drain(fp->tq, &fp->tq_task);
9122            taskqueue_drain(fp->tq, &fp->tx_task);
9123            while (taskqueue_cancel_timeout(fp->tq, &fp->tx_timeout_task,
9124                NULL))
9125                taskqueue_drain_timeout(fp->tq, &fp->tx_timeout_task);
9126            taskqueue_free(fp->tq);
9127            fp->tq = NULL;
9128        }
9129    }
9130
9131
9132    if (sc->sp_tq) {
9133        taskqueue_drain(sc->sp_tq, &sc->sp_tq_task);
9134        taskqueue_free(sc->sp_tq);
9135        sc->sp_tq = NULL;
9136    }
9137}
9138
9139/*
9140 * Enables interrupts and attach to the ISR.
9141 *
9142 * When using multiple MSI/MSI-X vectors the first vector
9143 * is used for slowpath operations while all remaining
9144 * vectors are used for fastpath operations.  If only a
9145 * single MSI/MSI-X vector is used (SINGLE_ISR) then the
9146 * ISR must look for both slowpath and fastpath completions.
9147 */
9148static int
9149bxe_interrupt_attach(struct bxe_softc *sc)
9150{
9151    struct bxe_fastpath *fp;
9152    int rc = 0;
9153    int i;
9154
9155    snprintf(sc->sp_tq_name, sizeof(sc->sp_tq_name),
9156             "bxe%d_sp_tq", sc->unit);
9157    TASK_INIT(&sc->sp_tq_task, 0, bxe_handle_sp_tq, sc);
9158    sc->sp_tq = taskqueue_create(sc->sp_tq_name, M_NOWAIT,
9159                                 taskqueue_thread_enqueue,
9160                                 &sc->sp_tq);
9161    taskqueue_start_threads(&sc->sp_tq, 1, PWAIT, /* lower priority */
9162                            "%s", sc->sp_tq_name);
9163
9164
9165    for (i = 0; i < sc->num_queues; i++) {
9166        fp = &sc->fp[i];
9167        snprintf(fp->tq_name, sizeof(fp->tq_name),
9168                 "bxe%d_fp%d_tq", sc->unit, i);
9169        TASK_INIT(&fp->tq_task, 0, bxe_handle_fp_tq, fp);
9170        TASK_INIT(&fp->tx_task, 0, bxe_tx_mq_start_deferred, fp);
9171        fp->tq = taskqueue_create(fp->tq_name, M_NOWAIT,
9172                                  taskqueue_thread_enqueue,
9173                                  &fp->tq);
9174        TIMEOUT_TASK_INIT(fp->tq, &fp->tx_timeout_task, 0,
9175                          bxe_tx_mq_start_deferred, fp);
9176        taskqueue_start_threads(&fp->tq, 1, PI_NET, /* higher priority */
9177                                "%s", fp->tq_name);
9178    }
9179
9180    /* setup interrupt handlers */
9181    if (sc->interrupt_mode == INTR_MODE_MSIX) {
9182        BLOGD(sc, DBG_LOAD, "Enabling slowpath MSI-X[0] vector\n");
9183
9184        /*
9185         * Setup the interrupt handler. Note that we pass the driver instance
9186         * to the interrupt handler for the slowpath.
9187         */
9188        if ((rc = bus_setup_intr(sc->dev, sc->intr[0].resource,
9189                                 (INTR_TYPE_NET | INTR_MPSAFE),
9190                                 NULL, bxe_intr_sp, sc,
9191                                 &sc->intr[0].tag)) != 0) {
9192            BLOGE(sc, "Failed to allocate MSI-X[0] vector (%d)\n", rc);
9193            goto bxe_interrupt_attach_exit;
9194        }
9195
9196        bus_describe_intr(sc->dev, sc->intr[0].resource,
9197                          sc->intr[0].tag, "sp");
9198
9199        /* bus_bind_intr(sc->dev, sc->intr[0].resource, 0); */
9200
9201        /* initialize the fastpath vectors (note the first was used for sp) */
9202        for (i = 0; i < sc->num_queues; i++) {
9203            fp = &sc->fp[i];
9204            BLOGD(sc, DBG_LOAD, "Enabling MSI-X[%d] vector\n", (i + 1));
9205
9206            /*
9207             * Setup the interrupt handler. Note that we pass the
9208             * fastpath context to the interrupt handler in this
9209             * case.
9210             */
9211            if ((rc = bus_setup_intr(sc->dev, sc->intr[i + 1].resource,
9212                                     (INTR_TYPE_NET | INTR_MPSAFE),
9213                                     NULL, bxe_intr_fp, fp,
9214                                     &sc->intr[i + 1].tag)) != 0) {
9215                BLOGE(sc, "Failed to allocate MSI-X[%d] vector (%d)\n",
9216                      (i + 1), rc);
9217                goto bxe_interrupt_attach_exit;
9218            }
9219
9220            bus_describe_intr(sc->dev, sc->intr[i + 1].resource,
9221                              sc->intr[i + 1].tag, "fp%02d", i);
9222
9223            /* bind the fastpath instance to a cpu */
9224            if (sc->num_queues > 1) {
9225                bus_bind_intr(sc->dev, sc->intr[i + 1].resource, i);
9226            }
9227
9228            fp->state = BXE_FP_STATE_IRQ;
9229        }
9230    } else if (sc->interrupt_mode == INTR_MODE_MSI) {
9231        BLOGD(sc, DBG_LOAD, "Enabling MSI[0] vector\n");
9232
9233        /*
9234         * Setup the interrupt handler. Note that we pass the
9235         * driver instance to the interrupt handler which
9236         * will handle both the slowpath and fastpath.
9237         */
9238        if ((rc = bus_setup_intr(sc->dev, sc->intr[0].resource,
9239                                 (INTR_TYPE_NET | INTR_MPSAFE),
9240                                 NULL, bxe_intr_legacy, sc,
9241                                 &sc->intr[0].tag)) != 0) {
9242            BLOGE(sc, "Failed to allocate MSI[0] vector (%d)\n", rc);
9243            goto bxe_interrupt_attach_exit;
9244        }
9245
9246    } else { /* (sc->interrupt_mode == INTR_MODE_INTX) */
9247        BLOGD(sc, DBG_LOAD, "Enabling INTx interrupts\n");
9248
9249        /*
9250         * Setup the interrupt handler. Note that we pass the
9251         * driver instance to the interrupt handler which
9252         * will handle both the slowpath and fastpath.
9253         */
9254        if ((rc = bus_setup_intr(sc->dev, sc->intr[0].resource,
9255                                 (INTR_TYPE_NET | INTR_MPSAFE),
9256                                 NULL, bxe_intr_legacy, sc,
9257                                 &sc->intr[0].tag)) != 0) {
9258            BLOGE(sc, "Failed to allocate INTx interrupt (%d)\n", rc);
9259            goto bxe_interrupt_attach_exit;
9260        }
9261    }
9262
9263bxe_interrupt_attach_exit:
9264
9265    return (rc);
9266}
9267
9268static int  bxe_init_hw_common_chip(struct bxe_softc *sc);
9269static int  bxe_init_hw_common(struct bxe_softc *sc);
9270static int  bxe_init_hw_port(struct bxe_softc *sc);
9271static int  bxe_init_hw_func(struct bxe_softc *sc);
9272static void bxe_reset_common(struct bxe_softc *sc);
9273static void bxe_reset_port(struct bxe_softc *sc);
9274static void bxe_reset_func(struct bxe_softc *sc);
9275static int  bxe_gunzip_init(struct bxe_softc *sc);
9276static void bxe_gunzip_end(struct bxe_softc *sc);
9277static int  bxe_init_firmware(struct bxe_softc *sc);
9278static void bxe_release_firmware(struct bxe_softc *sc);
9279
9280static struct
9281ecore_func_sp_drv_ops bxe_func_sp_drv = {
9282    .init_hw_cmn_chip = bxe_init_hw_common_chip,
9283    .init_hw_cmn      = bxe_init_hw_common,
9284    .init_hw_port     = bxe_init_hw_port,
9285    .init_hw_func     = bxe_init_hw_func,
9286
9287    .reset_hw_cmn     = bxe_reset_common,
9288    .reset_hw_port    = bxe_reset_port,
9289    .reset_hw_func    = bxe_reset_func,
9290
9291    .gunzip_init      = bxe_gunzip_init,
9292    .gunzip_end       = bxe_gunzip_end,
9293
9294    .init_fw          = bxe_init_firmware,
9295    .release_fw       = bxe_release_firmware,
9296};
9297
9298static void
9299bxe_init_func_obj(struct bxe_softc *sc)
9300{
9301    sc->dmae_ready = 0;
9302
9303    ecore_init_func_obj(sc,
9304                        &sc->func_obj,
9305                        BXE_SP(sc, func_rdata),
9306                        BXE_SP_MAPPING(sc, func_rdata),
9307                        BXE_SP(sc, func_afex_rdata),
9308                        BXE_SP_MAPPING(sc, func_afex_rdata),
9309                        &bxe_func_sp_drv);
9310}
9311
9312static int
9313bxe_init_hw(struct bxe_softc *sc,
9314            uint32_t         load_code)
9315{
9316    struct ecore_func_state_params func_params = { NULL };
9317    int rc;
9318
9319    /* prepare the parameters for function state transitions */
9320    bit_set(&func_params.ramrod_flags, RAMROD_COMP_WAIT);
9321
9322    func_params.f_obj = &sc->func_obj;
9323    func_params.cmd = ECORE_F_CMD_HW_INIT;
9324
9325    func_params.params.hw_init.load_phase = load_code;
9326
9327    /*
9328     * Via a plethora of function pointers, we will eventually reach
9329     * bxe_init_hw_common(), bxe_init_hw_port(), or bxe_init_hw_func().
9330     */
9331    rc = ecore_func_state_change(sc, &func_params);
9332
9333    return (rc);
9334}
9335
9336static void
9337bxe_fill(struct bxe_softc *sc,
9338         uint32_t         addr,
9339         int              fill,
9340         uint32_t         len)
9341{
9342    uint32_t i;
9343
9344    if (!(len % 4) && !(addr % 4)) {
9345        for (i = 0; i < len; i += 4) {
9346            REG_WR(sc, (addr + i), fill);
9347        }
9348    } else {
9349        for (i = 0; i < len; i++) {
9350            REG_WR8(sc, (addr + i), fill);
9351        }
9352    }
9353}
9354
9355/* writes FP SP data to FW - data_size in dwords */
9356static void
9357bxe_wr_fp_sb_data(struct bxe_softc *sc,
9358                  int              fw_sb_id,
9359                  uint32_t         *sb_data_p,
9360                  uint32_t         data_size)
9361{
9362    int index;
9363
9364    for (index = 0; index < data_size; index++) {
9365        REG_WR(sc,
9366               (BAR_CSTRORM_INTMEM +
9367                CSTORM_STATUS_BLOCK_DATA_OFFSET(fw_sb_id) +
9368                (sizeof(uint32_t) * index)),
9369               *(sb_data_p + index));
9370    }
9371}
9372
9373static void
9374bxe_zero_fp_sb(struct bxe_softc *sc,
9375               int              fw_sb_id)
9376{
9377    struct hc_status_block_data_e2 sb_data_e2;
9378    struct hc_status_block_data_e1x sb_data_e1x;
9379    uint32_t *sb_data_p;
9380    uint32_t data_size = 0;
9381
9382    if (!CHIP_IS_E1x(sc)) {
9383        memset(&sb_data_e2, 0, sizeof(struct hc_status_block_data_e2));
9384        sb_data_e2.common.state = SB_DISABLED;
9385        sb_data_e2.common.p_func.vf_valid = FALSE;
9386        sb_data_p = (uint32_t *)&sb_data_e2;
9387        data_size = (sizeof(struct hc_status_block_data_e2) /
9388                     sizeof(uint32_t));
9389    } else {
9390        memset(&sb_data_e1x, 0, sizeof(struct hc_status_block_data_e1x));
9391        sb_data_e1x.common.state = SB_DISABLED;
9392        sb_data_e1x.common.p_func.vf_valid = FALSE;
9393        sb_data_p = (uint32_t *)&sb_data_e1x;
9394        data_size = (sizeof(struct hc_status_block_data_e1x) /
9395                     sizeof(uint32_t));
9396    }
9397
9398    bxe_wr_fp_sb_data(sc, fw_sb_id, sb_data_p, data_size);
9399
9400    bxe_fill(sc, (BAR_CSTRORM_INTMEM + CSTORM_STATUS_BLOCK_OFFSET(fw_sb_id)),
9401             0, CSTORM_STATUS_BLOCK_SIZE);
9402    bxe_fill(sc, (BAR_CSTRORM_INTMEM + CSTORM_SYNC_BLOCK_OFFSET(fw_sb_id)),
9403             0, CSTORM_SYNC_BLOCK_SIZE);
9404}
9405
9406static void
9407bxe_wr_sp_sb_data(struct bxe_softc               *sc,
9408                  struct hc_sp_status_block_data *sp_sb_data)
9409{
9410    int i;
9411
9412    for (i = 0;
9413         i < (sizeof(struct hc_sp_status_block_data) / sizeof(uint32_t));
9414         i++) {
9415        REG_WR(sc,
9416               (BAR_CSTRORM_INTMEM +
9417                CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(SC_FUNC(sc)) +
9418                (i * sizeof(uint32_t))),
9419               *((uint32_t *)sp_sb_data + i));
9420    }
9421}
9422
9423static void
9424bxe_zero_sp_sb(struct bxe_softc *sc)
9425{
9426    struct hc_sp_status_block_data sp_sb_data;
9427
9428    memset(&sp_sb_data, 0, sizeof(struct hc_sp_status_block_data));
9429
9430    sp_sb_data.state           = SB_DISABLED;
9431    sp_sb_data.p_func.vf_valid = FALSE;
9432
9433    bxe_wr_sp_sb_data(sc, &sp_sb_data);
9434
9435    bxe_fill(sc,
9436             (BAR_CSTRORM_INTMEM +
9437              CSTORM_SP_STATUS_BLOCK_OFFSET(SC_FUNC(sc))),
9438              0, CSTORM_SP_STATUS_BLOCK_SIZE);
9439    bxe_fill(sc,
9440             (BAR_CSTRORM_INTMEM +
9441              CSTORM_SP_SYNC_BLOCK_OFFSET(SC_FUNC(sc))),
9442              0, CSTORM_SP_SYNC_BLOCK_SIZE);
9443}
9444
9445static void
9446bxe_setup_ndsb_state_machine(struct hc_status_block_sm *hc_sm,
9447                             int                       igu_sb_id,
9448                             int                       igu_seg_id)
9449{
9450    hc_sm->igu_sb_id      = igu_sb_id;
9451    hc_sm->igu_seg_id     = igu_seg_id;
9452    hc_sm->timer_value    = 0xFF;
9453    hc_sm->time_to_expire = 0xFFFFFFFF;
9454}
9455
9456static void
9457bxe_map_sb_state_machines(struct hc_index_data *index_data)
9458{
9459    /* zero out state machine indices */
9460
9461    /* rx indices */
9462    index_data[HC_INDEX_ETH_RX_CQ_CONS].flags &= ~HC_INDEX_DATA_SM_ID;
9463
9464    /* tx indices */
9465    index_data[HC_INDEX_OOO_TX_CQ_CONS].flags      &= ~HC_INDEX_DATA_SM_ID;
9466    index_data[HC_INDEX_ETH_TX_CQ_CONS_COS0].flags &= ~HC_INDEX_DATA_SM_ID;
9467    index_data[HC_INDEX_ETH_TX_CQ_CONS_COS1].flags &= ~HC_INDEX_DATA_SM_ID;
9468    index_data[HC_INDEX_ETH_TX_CQ_CONS_COS2].flags &= ~HC_INDEX_DATA_SM_ID;
9469
9470    /* map indices */
9471
9472    /* rx indices */
9473    index_data[HC_INDEX_ETH_RX_CQ_CONS].flags |=
9474        (SM_RX_ID << HC_INDEX_DATA_SM_ID_SHIFT);
9475
9476    /* tx indices */
9477    index_data[HC_INDEX_OOO_TX_CQ_CONS].flags |=
9478        (SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT);
9479    index_data[HC_INDEX_ETH_TX_CQ_CONS_COS0].flags |=
9480        (SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT);
9481    index_data[HC_INDEX_ETH_TX_CQ_CONS_COS1].flags |=
9482        (SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT);
9483    index_data[HC_INDEX_ETH_TX_CQ_CONS_COS2].flags |=
9484        (SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT);
9485}
9486
9487static void
9488bxe_init_sb(struct bxe_softc *sc,
9489            bus_addr_t       busaddr,
9490            int              vfid,
9491            uint8_t          vf_valid,
9492            int              fw_sb_id,
9493            int              igu_sb_id)
9494{
9495    struct hc_status_block_data_e2  sb_data_e2;
9496    struct hc_status_block_data_e1x sb_data_e1x;
9497    struct hc_status_block_sm       *hc_sm_p;
9498    uint32_t *sb_data_p;
9499    int igu_seg_id;
9500    int data_size;
9501
9502    if (CHIP_INT_MODE_IS_BC(sc)) {
9503        igu_seg_id = HC_SEG_ACCESS_NORM;
9504    } else {
9505        igu_seg_id = IGU_SEG_ACCESS_NORM;
9506    }
9507
9508    bxe_zero_fp_sb(sc, fw_sb_id);
9509
9510    if (!CHIP_IS_E1x(sc)) {
9511        memset(&sb_data_e2, 0, sizeof(struct hc_status_block_data_e2));
9512        sb_data_e2.common.state = SB_ENABLED;
9513        sb_data_e2.common.p_func.pf_id = SC_FUNC(sc);
9514        sb_data_e2.common.p_func.vf_id = vfid;
9515        sb_data_e2.common.p_func.vf_valid = vf_valid;
9516        sb_data_e2.common.p_func.vnic_id = SC_VN(sc);
9517        sb_data_e2.common.same_igu_sb_1b = TRUE;
9518        sb_data_e2.common.host_sb_addr.hi = U64_HI(busaddr);
9519        sb_data_e2.common.host_sb_addr.lo = U64_LO(busaddr);
9520        hc_sm_p = sb_data_e2.common.state_machine;
9521        sb_data_p = (uint32_t *)&sb_data_e2;
9522        data_size = (sizeof(struct hc_status_block_data_e2) /
9523                     sizeof(uint32_t));
9524        bxe_map_sb_state_machines(sb_data_e2.index_data);
9525    } else {
9526        memset(&sb_data_e1x, 0, sizeof(struct hc_status_block_data_e1x));
9527        sb_data_e1x.common.state = SB_ENABLED;
9528        sb_data_e1x.common.p_func.pf_id = SC_FUNC(sc);
9529        sb_data_e1x.common.p_func.vf_id = 0xff;
9530        sb_data_e1x.common.p_func.vf_valid = FALSE;
9531        sb_data_e1x.common.p_func.vnic_id = SC_VN(sc);
9532        sb_data_e1x.common.same_igu_sb_1b = TRUE;
9533        sb_data_e1x.common.host_sb_addr.hi = U64_HI(busaddr);
9534        sb_data_e1x.common.host_sb_addr.lo = U64_LO(busaddr);
9535        hc_sm_p = sb_data_e1x.common.state_machine;
9536        sb_data_p = (uint32_t *)&sb_data_e1x;
9537        data_size = (sizeof(struct hc_status_block_data_e1x) /
9538                     sizeof(uint32_t));
9539        bxe_map_sb_state_machines(sb_data_e1x.index_data);
9540    }
9541
9542    bxe_setup_ndsb_state_machine(&hc_sm_p[SM_RX_ID], igu_sb_id, igu_seg_id);
9543    bxe_setup_ndsb_state_machine(&hc_sm_p[SM_TX_ID], igu_sb_id, igu_seg_id);
9544
9545    BLOGD(sc, DBG_LOAD, "Init FW SB %d\n", fw_sb_id);
9546
9547    /* write indices to HW - PCI guarantees endianity of regpairs */
9548    bxe_wr_fp_sb_data(sc, fw_sb_id, sb_data_p, data_size);
9549}
9550
9551static inline uint8_t
9552bxe_fp_qzone_id(struct bxe_fastpath *fp)
9553{
9554    if (CHIP_IS_E1x(fp->sc)) {
9555        return (fp->cl_id + SC_PORT(fp->sc) * ETH_MAX_RX_CLIENTS_E1H);
9556    } else {
9557        return (fp->cl_id);
9558    }
9559}
9560
9561static inline uint32_t
9562bxe_rx_ustorm_prods_offset(struct bxe_softc    *sc,
9563                           struct bxe_fastpath *fp)
9564{
9565    uint32_t offset = BAR_USTRORM_INTMEM;
9566
9567    if (!CHIP_IS_E1x(sc)) {
9568        offset += USTORM_RX_PRODS_E2_OFFSET(fp->cl_qzone_id);
9569    } else {
9570        offset += USTORM_RX_PRODS_E1X_OFFSET(SC_PORT(sc), fp->cl_id);
9571    }
9572
9573    return (offset);
9574}
9575
9576static void
9577bxe_init_eth_fp(struct bxe_softc *sc,
9578                int              idx)
9579{
9580    struct bxe_fastpath *fp = &sc->fp[idx];
9581    uint32_t cids[ECORE_MULTI_TX_COS] = { 0 };
9582    unsigned long q_type = 0;
9583    int cos;
9584
9585    fp->sc    = sc;
9586    fp->index = idx;
9587
9588    fp->igu_sb_id = (sc->igu_base_sb + idx + CNIC_SUPPORT(sc));
9589    fp->fw_sb_id = (sc->base_fw_ndsb + idx + CNIC_SUPPORT(sc));
9590
9591    fp->cl_id = (CHIP_IS_E1x(sc)) ?
9592                    (SC_L_ID(sc) + idx) :
9593                    /* want client ID same as IGU SB ID for non-E1 */
9594                    fp->igu_sb_id;
9595    fp->cl_qzone_id = bxe_fp_qzone_id(fp);
9596
9597    /* setup sb indices */
9598    if (!CHIP_IS_E1x(sc)) {
9599        fp->sb_index_values  = fp->status_block.e2_sb->sb.index_values;
9600        fp->sb_running_index = fp->status_block.e2_sb->sb.running_index;
9601    } else {
9602        fp->sb_index_values  = fp->status_block.e1x_sb->sb.index_values;
9603        fp->sb_running_index = fp->status_block.e1x_sb->sb.running_index;
9604    }
9605
9606    /* init shortcut */
9607    fp->ustorm_rx_prods_offset = bxe_rx_ustorm_prods_offset(sc, fp);
9608
9609    fp->rx_cq_cons_sb = &fp->sb_index_values[HC_INDEX_ETH_RX_CQ_CONS];
9610
9611    /*
9612     * XXX If multiple CoS is ever supported then each fastpath structure
9613     * will need to maintain tx producer/consumer/dma/etc values *per* CoS.
9614     */
9615    for (cos = 0; cos < sc->max_cos; cos++) {
9616        cids[cos] = idx;
9617    }
9618    fp->tx_cons_sb = &fp->sb_index_values[HC_INDEX_ETH_TX_CQ_CONS_COS0];
9619
9620    /* nothing more for a VF to do */
9621    if (IS_VF(sc)) {
9622        return;
9623    }
9624
9625    bxe_init_sb(sc, fp->sb_dma.paddr, BXE_VF_ID_INVALID, FALSE,
9626                fp->fw_sb_id, fp->igu_sb_id);
9627
9628    bxe_update_fp_sb_idx(fp);
9629
9630    /* Configure Queue State object */
9631    bit_set(&q_type, ECORE_Q_TYPE_HAS_RX);
9632    bit_set(&q_type, ECORE_Q_TYPE_HAS_TX);
9633
9634    ecore_init_queue_obj(sc,
9635                         &sc->sp_objs[idx].q_obj,
9636                         fp->cl_id,
9637                         cids,
9638                         sc->max_cos,
9639                         SC_FUNC(sc),
9640                         BXE_SP(sc, q_rdata),
9641                         BXE_SP_MAPPING(sc, q_rdata),
9642                         q_type);
9643
9644    /* configure classification DBs */
9645    ecore_init_mac_obj(sc,
9646                       &sc->sp_objs[idx].mac_obj,
9647                       fp->cl_id,
9648                       idx,
9649                       SC_FUNC(sc),
9650                       BXE_SP(sc, mac_rdata),
9651                       BXE_SP_MAPPING(sc, mac_rdata),
9652                       ECORE_FILTER_MAC_PENDING,
9653                       &sc->sp_state,
9654                       ECORE_OBJ_TYPE_RX_TX,
9655                       &sc->macs_pool);
9656
9657    BLOGD(sc, DBG_LOAD, "fp[%d]: sb=%p cl_id=%d fw_sb=%d igu_sb=%d\n",
9658          idx, fp->status_block.e2_sb, fp->cl_id, fp->fw_sb_id, fp->igu_sb_id);
9659}
9660
9661static inline void
9662bxe_update_rx_prod(struct bxe_softc    *sc,
9663                   struct bxe_fastpath *fp,
9664                   uint16_t            rx_bd_prod,
9665                   uint16_t            rx_cq_prod,
9666                   uint16_t            rx_sge_prod)
9667{
9668    struct ustorm_eth_rx_producers rx_prods = { 0 };
9669    uint32_t i;
9670
9671    /* update producers */
9672    rx_prods.bd_prod  = rx_bd_prod;
9673    rx_prods.cqe_prod = rx_cq_prod;
9674    rx_prods.sge_prod = rx_sge_prod;
9675
9676    /*
9677     * Make sure that the BD and SGE data is updated before updating the
9678     * producers since FW might read the BD/SGE right after the producer
9679     * is updated.
9680     * This is only applicable for weak-ordered memory model archs such
9681     * as IA-64. The following barrier is also mandatory since FW will
9682     * assumes BDs must have buffers.
9683     */
9684    wmb();
9685
9686    for (i = 0; i < (sizeof(rx_prods) / 4); i++) {
9687        REG_WR(sc,
9688               (fp->ustorm_rx_prods_offset + (i * 4)),
9689               ((uint32_t *)&rx_prods)[i]);
9690    }
9691
9692    wmb(); /* keep prod updates ordered */
9693
9694    BLOGD(sc, DBG_RX,
9695          "RX fp[%d]: wrote prods bd_prod=%u cqe_prod=%u sge_prod=%u\n",
9696          fp->index, rx_bd_prod, rx_cq_prod, rx_sge_prod);
9697}
9698
9699static void
9700bxe_init_rx_rings(struct bxe_softc *sc)
9701{
9702    struct bxe_fastpath *fp;
9703    int i;
9704
9705    for (i = 0; i < sc->num_queues; i++) {
9706        fp = &sc->fp[i];
9707
9708        fp->rx_bd_cons = 0;
9709
9710        /*
9711         * Activate the BD ring...
9712         * Warning, this will generate an interrupt (to the TSTORM)
9713         * so this can only be done after the chip is initialized
9714         */
9715        bxe_update_rx_prod(sc, fp,
9716                           fp->rx_bd_prod,
9717                           fp->rx_cq_prod,
9718                           fp->rx_sge_prod);
9719
9720        if (i != 0) {
9721            continue;
9722        }
9723
9724        if (CHIP_IS_E1(sc)) {
9725            REG_WR(sc,
9726                   (BAR_USTRORM_INTMEM +
9727                    USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(SC_FUNC(sc))),
9728                   U64_LO(fp->rcq_dma.paddr));
9729            REG_WR(sc,
9730                   (BAR_USTRORM_INTMEM +
9731                    USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(SC_FUNC(sc)) + 4),
9732                   U64_HI(fp->rcq_dma.paddr));
9733        }
9734    }
9735}
9736
9737static void
9738bxe_init_tx_ring_one(struct bxe_fastpath *fp)
9739{
9740    SET_FLAG(fp->tx_db.data.header.data, DOORBELL_HDR_T_DB_TYPE, 1);
9741    fp->tx_db.data.zero_fill1 = 0;
9742    fp->tx_db.data.prod = 0;
9743
9744    fp->tx_pkt_prod = 0;
9745    fp->tx_pkt_cons = 0;
9746    fp->tx_bd_prod = 0;
9747    fp->tx_bd_cons = 0;
9748    fp->eth_q_stats.tx_pkts = 0;
9749}
9750
9751static inline void
9752bxe_init_tx_rings(struct bxe_softc *sc)
9753{
9754    int i;
9755
9756    for (i = 0; i < sc->num_queues; i++) {
9757        bxe_init_tx_ring_one(&sc->fp[i]);
9758    }
9759}
9760
9761static void
9762bxe_init_def_sb(struct bxe_softc *sc)
9763{
9764    struct host_sp_status_block *def_sb = sc->def_sb;
9765    bus_addr_t mapping = sc->def_sb_dma.paddr;
9766    int igu_sp_sb_index;
9767    int igu_seg_id;
9768    int port = SC_PORT(sc);
9769    int func = SC_FUNC(sc);
9770    int reg_offset, reg_offset_en5;
9771    uint64_t section;
9772    int index, sindex;
9773    struct hc_sp_status_block_data sp_sb_data;
9774
9775    memset(&sp_sb_data, 0, sizeof(struct hc_sp_status_block_data));
9776
9777    if (CHIP_INT_MODE_IS_BC(sc)) {
9778        igu_sp_sb_index = DEF_SB_IGU_ID;
9779        igu_seg_id = HC_SEG_ACCESS_DEF;
9780    } else {
9781        igu_sp_sb_index = sc->igu_dsb_id;
9782        igu_seg_id = IGU_SEG_ACCESS_DEF;
9783    }
9784
9785    /* attentions */
9786    section = ((uint64_t)mapping +
9787               offsetof(struct host_sp_status_block, atten_status_block));
9788    def_sb->atten_status_block.status_block_id = igu_sp_sb_index;
9789    sc->attn_state = 0;
9790
9791    reg_offset = (port) ?
9792                     MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
9793                     MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0;
9794    reg_offset_en5 = (port) ?
9795                         MISC_REG_AEU_ENABLE5_FUNC_1_OUT_0 :
9796                         MISC_REG_AEU_ENABLE5_FUNC_0_OUT_0;
9797
9798    for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
9799        /* take care of sig[0]..sig[4] */
9800        for (sindex = 0; sindex < 4; sindex++) {
9801            sc->attn_group[index].sig[sindex] =
9802                REG_RD(sc, (reg_offset + (sindex * 0x4) + (0x10 * index)));
9803        }
9804
9805        if (!CHIP_IS_E1x(sc)) {
9806            /*
9807             * enable5 is separate from the rest of the registers,
9808             * and the address skip is 4 and not 16 between the
9809             * different groups
9810             */
9811            sc->attn_group[index].sig[4] =
9812                REG_RD(sc, (reg_offset_en5 + (0x4 * index)));
9813        } else {
9814            sc->attn_group[index].sig[4] = 0;
9815        }
9816    }
9817
9818    if (sc->devinfo.int_block == INT_BLOCK_HC) {
9819        reg_offset = (port) ?
9820                         HC_REG_ATTN_MSG1_ADDR_L :
9821                         HC_REG_ATTN_MSG0_ADDR_L;
9822        REG_WR(sc, reg_offset, U64_LO(section));
9823        REG_WR(sc, (reg_offset + 4), U64_HI(section));
9824    } else if (!CHIP_IS_E1x(sc)) {
9825        REG_WR(sc, IGU_REG_ATTN_MSG_ADDR_L, U64_LO(section));
9826        REG_WR(sc, IGU_REG_ATTN_MSG_ADDR_H, U64_HI(section));
9827    }
9828
9829    section = ((uint64_t)mapping +
9830               offsetof(struct host_sp_status_block, sp_sb));
9831
9832    bxe_zero_sp_sb(sc);
9833
9834    /* PCI guarantees endianity of regpair */
9835    sp_sb_data.state           = SB_ENABLED;
9836    sp_sb_data.host_sb_addr.lo = U64_LO(section);
9837    sp_sb_data.host_sb_addr.hi = U64_HI(section);
9838    sp_sb_data.igu_sb_id       = igu_sp_sb_index;
9839    sp_sb_data.igu_seg_id      = igu_seg_id;
9840    sp_sb_data.p_func.pf_id    = func;
9841    sp_sb_data.p_func.vnic_id  = SC_VN(sc);
9842    sp_sb_data.p_func.vf_id    = 0xff;
9843
9844    bxe_wr_sp_sb_data(sc, &sp_sb_data);
9845
9846    bxe_ack_sb(sc, sc->igu_dsb_id, USTORM_ID, 0, IGU_INT_ENABLE, 0);
9847}
9848
9849static void
9850bxe_init_sp_ring(struct bxe_softc *sc)
9851{
9852    atomic_store_rel_long(&sc->cq_spq_left, MAX_SPQ_PENDING);
9853    sc->spq_prod_idx = 0;
9854    sc->dsb_sp_prod = &sc->def_sb->sp_sb.index_values[HC_SP_INDEX_ETH_DEF_CONS];
9855    sc->spq_prod_bd = sc->spq;
9856    sc->spq_last_bd = (sc->spq_prod_bd + MAX_SP_DESC_CNT);
9857}
9858
9859static void
9860bxe_init_eq_ring(struct bxe_softc *sc)
9861{
9862    union event_ring_elem *elem;
9863    int i;
9864
9865    for (i = 1; i <= NUM_EQ_PAGES; i++) {
9866        elem = &sc->eq[EQ_DESC_CNT_PAGE * i - 1];
9867
9868        elem->next_page.addr.hi = htole32(U64_HI(sc->eq_dma.paddr +
9869                                                 BCM_PAGE_SIZE *
9870                                                 (i % NUM_EQ_PAGES)));
9871        elem->next_page.addr.lo = htole32(U64_LO(sc->eq_dma.paddr +
9872                                                 BCM_PAGE_SIZE *
9873                                                 (i % NUM_EQ_PAGES)));
9874    }
9875
9876    sc->eq_cons    = 0;
9877    sc->eq_prod    = NUM_EQ_DESC;
9878    sc->eq_cons_sb = &sc->def_sb->sp_sb.index_values[HC_SP_INDEX_EQ_CONS];
9879
9880    atomic_store_rel_long(&sc->eq_spq_left,
9881                          (min((MAX_SP_DESC_CNT - MAX_SPQ_PENDING),
9882                               NUM_EQ_DESC) - 1));
9883}
9884
9885static void
9886bxe_init_internal_common(struct bxe_softc *sc)
9887{
9888    int i;
9889
9890    /*
9891     * Zero this manually as its initialization is currently missing
9892     * in the initTool.
9893     */
9894    for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++) {
9895        REG_WR(sc,
9896               (BAR_USTRORM_INTMEM + USTORM_AGG_DATA_OFFSET + (i * 4)),
9897               0);
9898    }
9899
9900    if (!CHIP_IS_E1x(sc)) {
9901        REG_WR8(sc, (BAR_CSTRORM_INTMEM + CSTORM_IGU_MODE_OFFSET),
9902                CHIP_INT_MODE_IS_BC(sc) ? HC_IGU_BC_MODE : HC_IGU_NBC_MODE);
9903    }
9904}
9905
9906static void
9907bxe_init_internal(struct bxe_softc *sc,
9908                  uint32_t         load_code)
9909{
9910    switch (load_code) {
9911    case FW_MSG_CODE_DRV_LOAD_COMMON:
9912    case FW_MSG_CODE_DRV_LOAD_COMMON_CHIP:
9913        bxe_init_internal_common(sc);
9914        /* no break */
9915
9916    case FW_MSG_CODE_DRV_LOAD_PORT:
9917        /* nothing to do */
9918        /* no break */
9919
9920    case FW_MSG_CODE_DRV_LOAD_FUNCTION:
9921        /* internal memory per function is initialized inside bxe_pf_init */
9922        break;
9923
9924    default:
9925        BLOGE(sc, "Unknown load_code (0x%x) from MCP\n", load_code);
9926        break;
9927    }
9928}
9929
9930static void
9931storm_memset_func_cfg(struct bxe_softc                         *sc,
9932                      struct tstorm_eth_function_common_config *tcfg,
9933                      uint16_t                                  abs_fid)
9934{
9935    uint32_t addr;
9936    size_t size;
9937
9938    addr = (BAR_TSTRORM_INTMEM +
9939            TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(abs_fid));
9940    size = sizeof(struct tstorm_eth_function_common_config);
9941    ecore_storm_memset_struct(sc, addr, size, (uint32_t *)tcfg);
9942}
9943
9944static void
9945bxe_func_init(struct bxe_softc            *sc,
9946              struct bxe_func_init_params *p)
9947{
9948    struct tstorm_eth_function_common_config tcfg = { 0 };
9949
9950    if (CHIP_IS_E1x(sc)) {
9951        storm_memset_func_cfg(sc, &tcfg, p->func_id);
9952    }
9953
9954    /* Enable the function in the FW */
9955    storm_memset_vf_to_pf(sc, p->func_id, p->pf_id);
9956    storm_memset_func_en(sc, p->func_id, 1);
9957
9958    /* spq */
9959    if (p->func_flgs & FUNC_FLG_SPQ) {
9960        storm_memset_spq_addr(sc, p->spq_map, p->func_id);
9961        REG_WR(sc,
9962               (XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(p->func_id)),
9963               p->spq_prod);
9964    }
9965}
9966
9967/*
9968 * Calculates the sum of vn_min_rates.
9969 * It's needed for further normalizing of the min_rates.
9970 * Returns:
9971 *   sum of vn_min_rates.
9972 *     or
9973 *   0 - if all the min_rates are 0.
9974 * In the later case fainess algorithm should be deactivated.
9975 * If all min rates are not zero then those that are zeroes will be set to 1.
9976 */
9977static void
9978bxe_calc_vn_min(struct bxe_softc       *sc,
9979                struct cmng_init_input *input)
9980{
9981    uint32_t vn_cfg;
9982    uint32_t vn_min_rate;
9983    int all_zero = 1;
9984    int vn;
9985
9986    for (vn = VN_0; vn < SC_MAX_VN_NUM(sc); vn++) {
9987        vn_cfg = sc->devinfo.mf_info.mf_config[vn];
9988        vn_min_rate = (((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
9989                        FUNC_MF_CFG_MIN_BW_SHIFT) * 100);
9990
9991        if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
9992            /* skip hidden VNs */
9993            vn_min_rate = 0;
9994        } else if (!vn_min_rate) {
9995            /* If min rate is zero - set it to 100 */
9996            vn_min_rate = DEF_MIN_RATE;
9997        } else {
9998            all_zero = 0;
9999        }
10000
10001        input->vnic_min_rate[vn] = vn_min_rate;
10002    }
10003
10004    /* if ETS or all min rates are zeros - disable fairness */
10005    if (BXE_IS_ETS_ENABLED(sc)) {
10006        input->flags.cmng_enables &= ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
10007        BLOGD(sc, DBG_LOAD, "Fairness disabled (ETS)\n");
10008    } else if (all_zero) {
10009        input->flags.cmng_enables &= ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
10010        BLOGD(sc, DBG_LOAD,
10011              "Fariness disabled (all MIN values are zeroes)\n");
10012    } else {
10013        input->flags.cmng_enables |= CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
10014    }
10015}
10016
10017static inline uint16_t
10018bxe_extract_max_cfg(struct bxe_softc *sc,
10019                    uint32_t         mf_cfg)
10020{
10021    uint16_t max_cfg = ((mf_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
10022                        FUNC_MF_CFG_MAX_BW_SHIFT);
10023
10024    if (!max_cfg) {
10025        BLOGD(sc, DBG_LOAD, "Max BW configured to 0 - using 100 instead\n");
10026        max_cfg = 100;
10027    }
10028
10029    return (max_cfg);
10030}
10031
10032static void
10033bxe_calc_vn_max(struct bxe_softc       *sc,
10034                int                    vn,
10035                struct cmng_init_input *input)
10036{
10037    uint16_t vn_max_rate;
10038    uint32_t vn_cfg = sc->devinfo.mf_info.mf_config[vn];
10039    uint32_t max_cfg;
10040
10041    if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
10042        vn_max_rate = 0;
10043    } else {
10044        max_cfg = bxe_extract_max_cfg(sc, vn_cfg);
10045
10046        if (IS_MF_SI(sc)) {
10047            /* max_cfg in percents of linkspeed */
10048            vn_max_rate = ((sc->link_vars.line_speed * max_cfg) / 100);
10049        } else { /* SD modes */
10050            /* max_cfg is absolute in 100Mb units */
10051            vn_max_rate = (max_cfg * 100);
10052        }
10053    }
10054
10055    BLOGD(sc, DBG_LOAD, "vn %d: vn_max_rate %d\n", vn, vn_max_rate);
10056
10057    input->vnic_max_rate[vn] = vn_max_rate;
10058}
10059
10060static void
10061bxe_cmng_fns_init(struct bxe_softc *sc,
10062                  uint8_t          read_cfg,
10063                  uint8_t          cmng_type)
10064{
10065    struct cmng_init_input input;
10066    int vn;
10067
10068    memset(&input, 0, sizeof(struct cmng_init_input));
10069
10070    input.port_rate = sc->link_vars.line_speed;
10071
10072    if (cmng_type == CMNG_FNS_MINMAX) {
10073        /* read mf conf from shmem */
10074        if (read_cfg) {
10075            bxe_read_mf_cfg(sc);
10076        }
10077
10078        /* get VN min rate and enable fairness if not 0 */
10079        bxe_calc_vn_min(sc, &input);
10080
10081        /* get VN max rate */
10082        if (sc->port.pmf) {
10083            for (vn = VN_0; vn < SC_MAX_VN_NUM(sc); vn++) {
10084                bxe_calc_vn_max(sc, vn, &input);
10085            }
10086        }
10087
10088        /* always enable rate shaping and fairness */
10089        input.flags.cmng_enables |= CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN;
10090
10091        ecore_init_cmng(&input, &sc->cmng);
10092        return;
10093    }
10094
10095    /* rate shaping and fairness are disabled */
10096    BLOGD(sc, DBG_LOAD, "rate shaping and fairness have been disabled\n");
10097}
10098
10099static int
10100bxe_get_cmng_fns_mode(struct bxe_softc *sc)
10101{
10102    if (CHIP_REV_IS_SLOW(sc)) {
10103        return (CMNG_FNS_NONE);
10104    }
10105
10106    if (IS_MF(sc)) {
10107        return (CMNG_FNS_MINMAX);
10108    }
10109
10110    return (CMNG_FNS_NONE);
10111}
10112
10113static void
10114storm_memset_cmng(struct bxe_softc *sc,
10115                  struct cmng_init *cmng,
10116                  uint8_t          port)
10117{
10118    int vn;
10119    int func;
10120    uint32_t addr;
10121    size_t size;
10122
10123    addr = (BAR_XSTRORM_INTMEM +
10124            XSTORM_CMNG_PER_PORT_VARS_OFFSET(port));
10125    size = sizeof(struct cmng_struct_per_port);
10126    ecore_storm_memset_struct(sc, addr, size, (uint32_t *)&cmng->port);
10127
10128    for (vn = VN_0; vn < SC_MAX_VN_NUM(sc); vn++) {
10129        func = func_by_vn(sc, vn);
10130
10131        addr = (BAR_XSTRORM_INTMEM +
10132                XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func));
10133        size = sizeof(struct rate_shaping_vars_per_vn);
10134        ecore_storm_memset_struct(sc, addr, size,
10135                                  (uint32_t *)&cmng->vnic.vnic_max_rate[vn]);
10136
10137        addr = (BAR_XSTRORM_INTMEM +
10138                XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func));
10139        size = sizeof(struct fairness_vars_per_vn);
10140        ecore_storm_memset_struct(sc, addr, size,
10141                                  (uint32_t *)&cmng->vnic.vnic_min_rate[vn]);
10142    }
10143}
10144
10145static void
10146bxe_pf_init(struct bxe_softc *sc)
10147{
10148    struct bxe_func_init_params func_init = { 0 };
10149    struct event_ring_data eq_data = { { 0 } };
10150    uint16_t flags;
10151
10152    if (!CHIP_IS_E1x(sc)) {
10153        /* reset IGU PF statistics: MSIX + ATTN */
10154        /* PF */
10155        REG_WR(sc,
10156               (IGU_REG_STATISTIC_NUM_MESSAGE_SENT +
10157                (BXE_IGU_STAS_MSG_VF_CNT * 4) +
10158                ((CHIP_IS_MODE_4_PORT(sc) ? SC_FUNC(sc) : SC_VN(sc)) * 4)),
10159               0);
10160        /* ATTN */
10161        REG_WR(sc,
10162               (IGU_REG_STATISTIC_NUM_MESSAGE_SENT +
10163                (BXE_IGU_STAS_MSG_VF_CNT * 4) +
10164                (BXE_IGU_STAS_MSG_PF_CNT * 4) +
10165                ((CHIP_IS_MODE_4_PORT(sc) ? SC_FUNC(sc) : SC_VN(sc)) * 4)),
10166               0);
10167    }
10168
10169    /* function setup flags */
10170    flags = (FUNC_FLG_STATS | FUNC_FLG_LEADING | FUNC_FLG_SPQ);
10171
10172    /*
10173     * This flag is relevant for E1x only.
10174     * E2 doesn't have a TPA configuration in a function level.
10175     */
10176    flags |= (if_getcapenable(sc->ifp) & IFCAP_LRO) ? FUNC_FLG_TPA : 0;
10177
10178    func_init.func_flgs = flags;
10179    func_init.pf_id     = SC_FUNC(sc);
10180    func_init.func_id   = SC_FUNC(sc);
10181    func_init.spq_map   = sc->spq_dma.paddr;
10182    func_init.spq_prod  = sc->spq_prod_idx;
10183
10184    bxe_func_init(sc, &func_init);
10185
10186    memset(&sc->cmng, 0, sizeof(struct cmng_struct_per_port));
10187
10188    /*
10189     * Congestion management values depend on the link rate.
10190     * There is no active link so initial link rate is set to 10Gbps.
10191     * When the link comes up the congestion management values are
10192     * re-calculated according to the actual link rate.
10193     */
10194    sc->link_vars.line_speed = SPEED_10000;
10195    bxe_cmng_fns_init(sc, TRUE, bxe_get_cmng_fns_mode(sc));
10196
10197    /* Only the PMF sets the HW */
10198    if (sc->port.pmf) {
10199        storm_memset_cmng(sc, &sc->cmng, SC_PORT(sc));
10200    }
10201
10202    /* init Event Queue - PCI bus guarantees correct endainity */
10203    eq_data.base_addr.hi = U64_HI(sc->eq_dma.paddr);
10204    eq_data.base_addr.lo = U64_LO(sc->eq_dma.paddr);
10205    eq_data.producer     = sc->eq_prod;
10206    eq_data.index_id     = HC_SP_INDEX_EQ_CONS;
10207    eq_data.sb_id        = DEF_SB_ID;
10208    storm_memset_eq_data(sc, &eq_data, SC_FUNC(sc));
10209}
10210
10211static void
10212bxe_hc_int_enable(struct bxe_softc *sc)
10213{
10214    int port = SC_PORT(sc);
10215    uint32_t addr = (port) ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
10216    uint32_t val = REG_RD(sc, addr);
10217    uint8_t msix = (sc->interrupt_mode == INTR_MODE_MSIX) ? TRUE : FALSE;
10218    uint8_t single_msix = ((sc->interrupt_mode == INTR_MODE_MSIX) &&
10219                           (sc->intr_count == 1)) ? TRUE : FALSE;
10220    uint8_t msi = (sc->interrupt_mode == INTR_MODE_MSI) ? TRUE : FALSE;
10221
10222    if (msix) {
10223        val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
10224                 HC_CONFIG_0_REG_INT_LINE_EN_0);
10225        val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
10226                HC_CONFIG_0_REG_ATTN_BIT_EN_0);
10227        if (single_msix) {
10228            val |= HC_CONFIG_0_REG_SINGLE_ISR_EN_0;
10229        }
10230    } else if (msi) {
10231        val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0;
10232        val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
10233                HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
10234                HC_CONFIG_0_REG_ATTN_BIT_EN_0);
10235    } else {
10236        val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
10237                HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
10238                HC_CONFIG_0_REG_INT_LINE_EN_0 |
10239                HC_CONFIG_0_REG_ATTN_BIT_EN_0);
10240
10241        if (!CHIP_IS_E1(sc)) {
10242            BLOGD(sc, DBG_INTR, "write %x to HC %d (addr 0x%x)\n",
10243                  val, port, addr);
10244
10245            REG_WR(sc, addr, val);
10246
10247            val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
10248        }
10249    }
10250
10251    if (CHIP_IS_E1(sc)) {
10252        REG_WR(sc, (HC_REG_INT_MASK + port*4), 0x1FFFF);
10253    }
10254
10255    BLOGD(sc, DBG_INTR, "write %x to HC %d (addr 0x%x) mode %s\n",
10256          val, port, addr, ((msix) ? "MSI-X" : ((msi) ? "MSI" : "INTx")));
10257
10258    REG_WR(sc, addr, val);
10259
10260    /* ensure that HC_CONFIG is written before leading/trailing edge config */
10261    mb();
10262
10263    if (!CHIP_IS_E1(sc)) {
10264        /* init leading/trailing edge */
10265        if (IS_MF(sc)) {
10266            val = (0xee0f | (1 << (SC_VN(sc) + 4)));
10267            if (sc->port.pmf) {
10268                /* enable nig and gpio3 attention */
10269                val |= 0x1100;
10270            }
10271        } else {
10272            val = 0xffff;
10273        }
10274
10275        REG_WR(sc, (HC_REG_TRAILING_EDGE_0 + port*8), val);
10276        REG_WR(sc, (HC_REG_LEADING_EDGE_0 + port*8), val);
10277    }
10278
10279    /* make sure that interrupts are indeed enabled from here on */
10280    mb();
10281}
10282
10283static void
10284bxe_igu_int_enable(struct bxe_softc *sc)
10285{
10286    uint32_t val;
10287    uint8_t msix = (sc->interrupt_mode == INTR_MODE_MSIX) ? TRUE : FALSE;
10288    uint8_t single_msix = ((sc->interrupt_mode == INTR_MODE_MSIX) &&
10289                           (sc->intr_count == 1)) ? TRUE : FALSE;
10290    uint8_t msi = (sc->interrupt_mode == INTR_MODE_MSI) ? TRUE : FALSE;
10291
10292    val = REG_RD(sc, IGU_REG_PF_CONFIGURATION);
10293
10294    if (msix) {
10295        val &= ~(IGU_PF_CONF_INT_LINE_EN |
10296                 IGU_PF_CONF_SINGLE_ISR_EN);
10297        val |= (IGU_PF_CONF_MSI_MSIX_EN |
10298                IGU_PF_CONF_ATTN_BIT_EN);
10299        if (single_msix) {
10300            val |= IGU_PF_CONF_SINGLE_ISR_EN;
10301        }
10302    } else if (msi) {
10303        val &= ~IGU_PF_CONF_INT_LINE_EN;
10304        val |= (IGU_PF_CONF_MSI_MSIX_EN |
10305                IGU_PF_CONF_ATTN_BIT_EN |
10306                IGU_PF_CONF_SINGLE_ISR_EN);
10307    } else {
10308        val &= ~IGU_PF_CONF_MSI_MSIX_EN;
10309        val |= (IGU_PF_CONF_INT_LINE_EN |
10310                IGU_PF_CONF_ATTN_BIT_EN |
10311                IGU_PF_CONF_SINGLE_ISR_EN);
10312    }
10313
10314    /* clean previous status - need to configure igu prior to ack*/
10315    if ((!msix) || single_msix) {
10316        REG_WR(sc, IGU_REG_PF_CONFIGURATION, val);
10317        bxe_ack_int(sc);
10318    }
10319
10320    val |= IGU_PF_CONF_FUNC_EN;
10321
10322    BLOGD(sc, DBG_INTR, "write 0x%x to IGU mode %s\n",
10323          val, ((msix) ? "MSI-X" : ((msi) ? "MSI" : "INTx")));
10324
10325    REG_WR(sc, IGU_REG_PF_CONFIGURATION, val);
10326
10327    mb();
10328
10329    /* init leading/trailing edge */
10330    if (IS_MF(sc)) {
10331        val = (0xee0f | (1 << (SC_VN(sc) + 4)));
10332        if (sc->port.pmf) {
10333            /* enable nig and gpio3 attention */
10334            val |= 0x1100;
10335        }
10336    } else {
10337        val = 0xffff;
10338    }
10339
10340    REG_WR(sc, IGU_REG_TRAILING_EDGE_LATCH, val);
10341    REG_WR(sc, IGU_REG_LEADING_EDGE_LATCH, val);
10342
10343    /* make sure that interrupts are indeed enabled from here on */
10344    mb();
10345}
10346
10347static void
10348bxe_int_enable(struct bxe_softc *sc)
10349{
10350    if (sc->devinfo.int_block == INT_BLOCK_HC) {
10351        bxe_hc_int_enable(sc);
10352    } else {
10353        bxe_igu_int_enable(sc);
10354    }
10355}
10356
10357static void
10358bxe_hc_int_disable(struct bxe_softc *sc)
10359{
10360    int port = SC_PORT(sc);
10361    uint32_t addr = (port) ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
10362    uint32_t val = REG_RD(sc, addr);
10363
10364    /*
10365     * In E1 we must use only PCI configuration space to disable MSI/MSIX
10366     * capablility. It's forbidden to disable IGU_PF_CONF_MSI_MSIX_EN in HC
10367     * block
10368     */
10369    if (CHIP_IS_E1(sc)) {
10370        /*
10371         * Since IGU_PF_CONF_MSI_MSIX_EN still always on use mask register
10372         * to prevent from HC sending interrupts after we exit the function
10373         */
10374        REG_WR(sc, (HC_REG_INT_MASK + port*4), 0);
10375
10376        val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
10377                 HC_CONFIG_0_REG_INT_LINE_EN_0 |
10378                 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
10379    } else {
10380        val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
10381                 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
10382                 HC_CONFIG_0_REG_INT_LINE_EN_0 |
10383                 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
10384    }
10385
10386    BLOGD(sc, DBG_INTR, "write %x to HC %d (addr 0x%x)\n", val, port, addr);
10387
10388    /* flush all outstanding writes */
10389    mb();
10390
10391    REG_WR(sc, addr, val);
10392    if (REG_RD(sc, addr) != val) {
10393        BLOGE(sc, "proper val not read from HC IGU!\n");
10394    }
10395}
10396
10397static void
10398bxe_igu_int_disable(struct bxe_softc *sc)
10399{
10400    uint32_t val = REG_RD(sc, IGU_REG_PF_CONFIGURATION);
10401
10402    val &= ~(IGU_PF_CONF_MSI_MSIX_EN |
10403             IGU_PF_CONF_INT_LINE_EN |
10404             IGU_PF_CONF_ATTN_BIT_EN);
10405
10406    BLOGD(sc, DBG_INTR, "write %x to IGU\n", val);
10407
10408    /* flush all outstanding writes */
10409    mb();
10410
10411    REG_WR(sc, IGU_REG_PF_CONFIGURATION, val);
10412    if (REG_RD(sc, IGU_REG_PF_CONFIGURATION) != val) {
10413        BLOGE(sc, "proper val not read from IGU!\n");
10414    }
10415}
10416
10417static void
10418bxe_int_disable(struct bxe_softc *sc)
10419{
10420    if (sc->devinfo.int_block == INT_BLOCK_HC) {
10421        bxe_hc_int_disable(sc);
10422    } else {
10423        bxe_igu_int_disable(sc);
10424    }
10425}
10426
10427static void
10428bxe_nic_init(struct bxe_softc *sc,
10429             int              load_code)
10430{
10431    int i;
10432
10433    for (i = 0; i < sc->num_queues; i++) {
10434        bxe_init_eth_fp(sc, i);
10435    }
10436
10437    rmb(); /* ensure status block indices were read */
10438
10439    bxe_init_rx_rings(sc);
10440    bxe_init_tx_rings(sc);
10441
10442    if (IS_VF(sc)) {
10443        return;
10444    }
10445
10446    /* initialize MOD_ABS interrupts */
10447    elink_init_mod_abs_int(sc, &sc->link_vars,
10448                           sc->devinfo.chip_id,
10449                           sc->devinfo.shmem_base,
10450                           sc->devinfo.shmem2_base,
10451                           SC_PORT(sc));
10452
10453    bxe_init_def_sb(sc);
10454    bxe_update_dsb_idx(sc);
10455    bxe_init_sp_ring(sc);
10456    bxe_init_eq_ring(sc);
10457    bxe_init_internal(sc, load_code);
10458    bxe_pf_init(sc);
10459    bxe_stats_init(sc);
10460
10461    /* flush all before enabling interrupts */
10462    mb();
10463
10464    bxe_int_enable(sc);
10465
10466    /* check for SPIO5 */
10467    bxe_attn_int_deasserted0(sc,
10468                             REG_RD(sc,
10469                                    (MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 +
10470                                     SC_PORT(sc)*4)) &
10471                             AEU_INPUTS_ATTN_BITS_SPIO5);
10472}
10473
10474static inline void
10475bxe_init_objs(struct bxe_softc *sc)
10476{
10477    /* mcast rules must be added to tx if tx switching is enabled */
10478    ecore_obj_type o_type =
10479        (sc->flags & BXE_TX_SWITCHING) ? ECORE_OBJ_TYPE_RX_TX :
10480                                         ECORE_OBJ_TYPE_RX;
10481
10482    /* RX_MODE controlling object */
10483    ecore_init_rx_mode_obj(sc, &sc->rx_mode_obj);
10484
10485    /* multicast configuration controlling object */
10486    ecore_init_mcast_obj(sc,
10487                         &sc->mcast_obj,
10488                         sc->fp[0].cl_id,
10489                         sc->fp[0].index,
10490                         SC_FUNC(sc),
10491                         SC_FUNC(sc),
10492                         BXE_SP(sc, mcast_rdata),
10493                         BXE_SP_MAPPING(sc, mcast_rdata),
10494                         ECORE_FILTER_MCAST_PENDING,
10495                         &sc->sp_state,
10496                         o_type);
10497
10498    /* Setup CAM credit pools */
10499    ecore_init_mac_credit_pool(sc,
10500                               &sc->macs_pool,
10501                               SC_FUNC(sc),
10502                               CHIP_IS_E1x(sc) ? VNICS_PER_PORT(sc) :
10503                                                 VNICS_PER_PATH(sc));
10504
10505    ecore_init_vlan_credit_pool(sc,
10506                                &sc->vlans_pool,
10507                                SC_ABS_FUNC(sc) >> 1,
10508                                CHIP_IS_E1x(sc) ? VNICS_PER_PORT(sc) :
10509                                                  VNICS_PER_PATH(sc));
10510
10511    /* RSS configuration object */
10512    ecore_init_rss_config_obj(sc,
10513                              &sc->rss_conf_obj,
10514                              sc->fp[0].cl_id,
10515                              sc->fp[0].index,
10516                              SC_FUNC(sc),
10517                              SC_FUNC(sc),
10518                              BXE_SP(sc, rss_rdata),
10519                              BXE_SP_MAPPING(sc, rss_rdata),
10520                              ECORE_FILTER_RSS_CONF_PENDING,
10521                              &sc->sp_state, ECORE_OBJ_TYPE_RX);
10522}
10523
10524/*
10525 * Initialize the function. This must be called before sending CLIENT_SETUP
10526 * for the first client.
10527 */
10528static inline int
10529bxe_func_start(struct bxe_softc *sc)
10530{
10531    struct ecore_func_state_params func_params = { NULL };
10532    struct ecore_func_start_params *start_params = &func_params.params.start;
10533
10534    /* Prepare parameters for function state transitions */
10535    bit_set(&func_params.ramrod_flags, RAMROD_COMP_WAIT);
10536
10537    func_params.f_obj = &sc->func_obj;
10538    func_params.cmd = ECORE_F_CMD_START;
10539
10540    /* Function parameters */
10541    start_params->mf_mode     = sc->devinfo.mf_info.mf_mode;
10542    start_params->sd_vlan_tag = OVLAN(sc);
10543
10544    if (CHIP_IS_E2(sc) || CHIP_IS_E3(sc)) {
10545        start_params->network_cos_mode = STATIC_COS;
10546    } else { /* CHIP_IS_E1X */
10547        start_params->network_cos_mode = FW_WRR;
10548    }
10549
10550    //start_params->gre_tunnel_mode = 0;
10551    //start_params->gre_tunnel_rss  = 0;
10552
10553    return (ecore_func_state_change(sc, &func_params));
10554}
10555
10556static int
10557bxe_set_power_state(struct bxe_softc *sc,
10558                    uint8_t          state)
10559{
10560    uint16_t pmcsr;
10561
10562    /* If there is no power capability, silently succeed */
10563    if (!(sc->devinfo.pcie_cap_flags & BXE_PM_CAPABLE_FLAG)) {
10564        BLOGW(sc, "No power capability\n");
10565        return (0);
10566    }
10567
10568    pmcsr = pci_read_config(sc->dev,
10569                            (sc->devinfo.pcie_pm_cap_reg + PCIR_POWER_STATUS),
10570                            2);
10571
10572    switch (state) {
10573    case PCI_PM_D0:
10574        pci_write_config(sc->dev,
10575                         (sc->devinfo.pcie_pm_cap_reg + PCIR_POWER_STATUS),
10576                         ((pmcsr & ~PCIM_PSTAT_DMASK) | PCIM_PSTAT_PME), 2);
10577
10578        if (pmcsr & PCIM_PSTAT_DMASK) {
10579            /* delay required during transition out of D3hot */
10580            DELAY(20000);
10581        }
10582
10583        break;
10584
10585    case PCI_PM_D3hot:
10586        /* XXX if there are other clients above don't shut down the power */
10587
10588        /* don't shut down the power for emulation and FPGA */
10589        if (CHIP_REV_IS_SLOW(sc)) {
10590            return (0);
10591        }
10592
10593        pmcsr &= ~PCIM_PSTAT_DMASK;
10594        pmcsr |= PCIM_PSTAT_D3;
10595
10596        if (sc->wol) {
10597            pmcsr |= PCIM_PSTAT_PMEENABLE;
10598        }
10599
10600        pci_write_config(sc->dev,
10601                         (sc->devinfo.pcie_pm_cap_reg + PCIR_POWER_STATUS),
10602                         pmcsr, 4);
10603
10604        /*
10605         * No more memory access after this point until device is brought back
10606         * to D0 state.
10607         */
10608        break;
10609
10610    default:
10611        BLOGE(sc, "Can't support PCI power state = 0x%x pmcsr 0x%x\n",
10612            state, pmcsr);
10613        return (-1);
10614    }
10615
10616    return (0);
10617}
10618
10619
10620/* return true if succeeded to acquire the lock */
10621static uint8_t
10622bxe_trylock_hw_lock(struct bxe_softc *sc,
10623                    uint32_t         resource)
10624{
10625    uint32_t lock_status;
10626    uint32_t resource_bit = (1 << resource);
10627    int func = SC_FUNC(sc);
10628    uint32_t hw_lock_control_reg;
10629
10630    BLOGD(sc, DBG_LOAD, "Trying to take a resource lock 0x%x\n", resource);
10631
10632    /* Validating that the resource is within range */
10633    if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
10634        BLOGD(sc, DBG_LOAD,
10635              "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
10636              resource, HW_LOCK_MAX_RESOURCE_VALUE);
10637        return (FALSE);
10638    }
10639
10640    if (func <= 5) {
10641        hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
10642    } else {
10643        hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
10644    }
10645
10646    /* try to acquire the lock */
10647    REG_WR(sc, hw_lock_control_reg + 4, resource_bit);
10648    lock_status = REG_RD(sc, hw_lock_control_reg);
10649    if (lock_status & resource_bit) {
10650        return (TRUE);
10651    }
10652
10653    BLOGE(sc, "Failed to get a resource lock 0x%x func %d "
10654        "lock_status 0x%x resource_bit 0x%x\n", resource, func,
10655        lock_status, resource_bit);
10656
10657    return (FALSE);
10658}
10659
10660/*
10661 * Get the recovery leader resource id according to the engine this function
10662 * belongs to. Currently only only 2 engines is supported.
10663 */
10664static int
10665bxe_get_leader_lock_resource(struct bxe_softc *sc)
10666{
10667    if (SC_PATH(sc)) {
10668        return (HW_LOCK_RESOURCE_RECOVERY_LEADER_1);
10669    } else {
10670        return (HW_LOCK_RESOURCE_RECOVERY_LEADER_0);
10671    }
10672}
10673
10674/* try to acquire a leader lock for current engine */
10675static uint8_t
10676bxe_trylock_leader_lock(struct bxe_softc *sc)
10677{
10678    return (bxe_trylock_hw_lock(sc, bxe_get_leader_lock_resource(sc)));
10679}
10680
10681static int
10682bxe_release_leader_lock(struct bxe_softc *sc)
10683{
10684    return (bxe_release_hw_lock(sc, bxe_get_leader_lock_resource(sc)));
10685}
10686
10687/* close gates #2, #3 and #4 */
10688static void
10689bxe_set_234_gates(struct bxe_softc *sc,
10690                  uint8_t          close)
10691{
10692    uint32_t val;
10693
10694    /* gates #2 and #4a are closed/opened for "not E1" only */
10695    if (!CHIP_IS_E1(sc)) {
10696        /* #4 */
10697        REG_WR(sc, PXP_REG_HST_DISCARD_DOORBELLS, !!close);
10698        /* #2 */
10699        REG_WR(sc, PXP_REG_HST_DISCARD_INTERNAL_WRITES, !!close);
10700    }
10701
10702    /* #3 */
10703    if (CHIP_IS_E1x(sc)) {
10704        /* prevent interrupts from HC on both ports */
10705        val = REG_RD(sc, HC_REG_CONFIG_1);
10706        REG_WR(sc, HC_REG_CONFIG_1,
10707               (!close) ? (val | HC_CONFIG_1_REG_BLOCK_DISABLE_1) :
10708               (val & ~(uint32_t)HC_CONFIG_1_REG_BLOCK_DISABLE_1));
10709
10710        val = REG_RD(sc, HC_REG_CONFIG_0);
10711        REG_WR(sc, HC_REG_CONFIG_0,
10712               (!close) ? (val | HC_CONFIG_0_REG_BLOCK_DISABLE_0) :
10713               (val & ~(uint32_t)HC_CONFIG_0_REG_BLOCK_DISABLE_0));
10714    } else {
10715        /* Prevent incoming interrupts in IGU */
10716        val = REG_RD(sc, IGU_REG_BLOCK_CONFIGURATION);
10717
10718        REG_WR(sc, IGU_REG_BLOCK_CONFIGURATION,
10719               (!close) ?
10720               (val | IGU_BLOCK_CONFIGURATION_REG_BLOCK_ENABLE) :
10721               (val & ~(uint32_t)IGU_BLOCK_CONFIGURATION_REG_BLOCK_ENABLE));
10722    }
10723
10724    BLOGD(sc, DBG_LOAD, "%s gates #2, #3 and #4\n",
10725          close ? "closing" : "opening");
10726
10727    wmb();
10728}
10729
10730/* poll for pending writes bit, it should get cleared in no more than 1s */
10731static int
10732bxe_er_poll_igu_vq(struct bxe_softc *sc)
10733{
10734    uint32_t cnt = 1000;
10735    uint32_t pend_bits = 0;
10736
10737    do {
10738        pend_bits = REG_RD(sc, IGU_REG_PENDING_BITS_STATUS);
10739
10740        if (pend_bits == 0) {
10741            break;
10742        }
10743
10744        DELAY(1000);
10745    } while (--cnt > 0);
10746
10747    if (cnt == 0) {
10748        BLOGE(sc, "Still pending IGU requests bits=0x%08x!\n", pend_bits);
10749        return (-1);
10750    }
10751
10752    return (0);
10753}
10754
10755#define SHARED_MF_CLP_MAGIC  0x80000000 /* 'magic' bit */
10756
10757static void
10758bxe_clp_reset_prep(struct bxe_softc *sc,
10759                   uint32_t         *magic_val)
10760{
10761    /* Do some magic... */
10762    uint32_t val = MFCFG_RD(sc, shared_mf_config.clp_mb);
10763    *magic_val = val & SHARED_MF_CLP_MAGIC;
10764    MFCFG_WR(sc, shared_mf_config.clp_mb, val | SHARED_MF_CLP_MAGIC);
10765}
10766
10767/* restore the value of the 'magic' bit */
10768static void
10769bxe_clp_reset_done(struct bxe_softc *sc,
10770                   uint32_t         magic_val)
10771{
10772    /* Restore the 'magic' bit value... */
10773    uint32_t val = MFCFG_RD(sc, shared_mf_config.clp_mb);
10774    MFCFG_WR(sc, shared_mf_config.clp_mb,
10775              (val & (~SHARED_MF_CLP_MAGIC)) | magic_val);
10776}
10777
10778/* prepare for MCP reset, takes care of CLP configurations */
10779static void
10780bxe_reset_mcp_prep(struct bxe_softc *sc,
10781                   uint32_t         *magic_val)
10782{
10783    uint32_t shmem;
10784    uint32_t validity_offset;
10785
10786    /* set `magic' bit in order to save MF config */
10787    if (!CHIP_IS_E1(sc)) {
10788        bxe_clp_reset_prep(sc, magic_val);
10789    }
10790
10791    /* get shmem offset */
10792    shmem = REG_RD(sc, MISC_REG_SHARED_MEM_ADDR);
10793    validity_offset =
10794        offsetof(struct shmem_region, validity_map[SC_PORT(sc)]);
10795
10796    /* Clear validity map flags */
10797    if (shmem > 0) {
10798        REG_WR(sc, shmem + validity_offset, 0);
10799    }
10800}
10801
10802#define MCP_TIMEOUT      5000   /* 5 seconds (in ms) */
10803#define MCP_ONE_TIMEOUT  100    /* 100 ms */
10804
10805static void
10806bxe_mcp_wait_one(struct bxe_softc *sc)
10807{
10808    /* special handling for emulation and FPGA (10 times longer) */
10809    if (CHIP_REV_IS_SLOW(sc)) {
10810        DELAY((MCP_ONE_TIMEOUT*10) * 1000);
10811    } else {
10812        DELAY((MCP_ONE_TIMEOUT) * 1000);
10813    }
10814}
10815
10816/* initialize shmem_base and waits for validity signature to appear */
10817static int
10818bxe_init_shmem(struct bxe_softc *sc)
10819{
10820    int cnt = 0;
10821    uint32_t val = 0;
10822
10823    do {
10824        sc->devinfo.shmem_base     =
10825        sc->link_params.shmem_base =
10826            REG_RD(sc, MISC_REG_SHARED_MEM_ADDR);
10827
10828        if (sc->devinfo.shmem_base) {
10829            val = SHMEM_RD(sc, validity_map[SC_PORT(sc)]);
10830            if (val & SHR_MEM_VALIDITY_MB)
10831                return (0);
10832        }
10833
10834        bxe_mcp_wait_one(sc);
10835
10836    } while (cnt++ < (MCP_TIMEOUT / MCP_ONE_TIMEOUT));
10837
10838    BLOGE(sc, "BAD MCP validity signature\n");
10839
10840    return (-1);
10841}
10842
10843static int
10844bxe_reset_mcp_comp(struct bxe_softc *sc,
10845                   uint32_t         magic_val)
10846{
10847    int rc = bxe_init_shmem(sc);
10848
10849    /* Restore the `magic' bit value */
10850    if (!CHIP_IS_E1(sc)) {
10851        bxe_clp_reset_done(sc, magic_val);
10852    }
10853
10854    return (rc);
10855}
10856
10857static void
10858bxe_pxp_prep(struct bxe_softc *sc)
10859{
10860    if (!CHIP_IS_E1(sc)) {
10861        REG_WR(sc, PXP2_REG_RD_START_INIT, 0);
10862        REG_WR(sc, PXP2_REG_RQ_RBC_DONE, 0);
10863        wmb();
10864    }
10865}
10866
10867/*
10868 * Reset the whole chip except for:
10869 *      - PCIE core
10870 *      - PCI Glue, PSWHST, PXP/PXP2 RF (all controlled by one reset bit)
10871 *      - IGU
10872 *      - MISC (including AEU)
10873 *      - GRC
10874 *      - RBCN, RBCP
10875 */
10876static void
10877bxe_process_kill_chip_reset(struct bxe_softc *sc,
10878                            uint8_t          global)
10879{
10880    uint32_t not_reset_mask1, reset_mask1, not_reset_mask2, reset_mask2;
10881    uint32_t global_bits2, stay_reset2;
10882
10883    /*
10884     * Bits that have to be set in reset_mask2 if we want to reset 'global'
10885     * (per chip) blocks.
10886     */
10887    global_bits2 =
10888        MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_CMN_CPU |
10889        MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_CMN_CORE;
10890
10891    /*
10892     * Don't reset the following blocks.
10893     * Important: per port blocks (such as EMAC, BMAC, UMAC) can't be
10894     *            reset, as in 4 port device they might still be owned
10895     *            by the MCP (there is only one leader per path).
10896     */
10897    not_reset_mask1 =
10898        MISC_REGISTERS_RESET_REG_1_RST_HC |
10899        MISC_REGISTERS_RESET_REG_1_RST_PXPV |
10900        MISC_REGISTERS_RESET_REG_1_RST_PXP;
10901
10902    not_reset_mask2 =
10903        MISC_REGISTERS_RESET_REG_2_RST_PCI_MDIO |
10904        MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE |
10905        MISC_REGISTERS_RESET_REG_2_RST_EMAC1_HARD_CORE |
10906        MISC_REGISTERS_RESET_REG_2_RST_MISC_CORE |
10907        MISC_REGISTERS_RESET_REG_2_RST_RBCN |
10908        MISC_REGISTERS_RESET_REG_2_RST_GRC  |
10909        MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_REG_HARD_CORE |
10910        MISC_REGISTERS_RESET_REG_2_RST_MCP_N_HARD_CORE_RST_B |
10911        MISC_REGISTERS_RESET_REG_2_RST_ATC |
10912        MISC_REGISTERS_RESET_REG_2_PGLC |
10913        MISC_REGISTERS_RESET_REG_2_RST_BMAC0 |
10914        MISC_REGISTERS_RESET_REG_2_RST_BMAC1 |
10915        MISC_REGISTERS_RESET_REG_2_RST_EMAC0 |
10916        MISC_REGISTERS_RESET_REG_2_RST_EMAC1 |
10917        MISC_REGISTERS_RESET_REG_2_UMAC0 |
10918        MISC_REGISTERS_RESET_REG_2_UMAC1;
10919
10920    /*
10921     * Keep the following blocks in reset:
10922     *  - all xxMACs are handled by the elink code.
10923     */
10924    stay_reset2 =
10925        MISC_REGISTERS_RESET_REG_2_XMAC |
10926        MISC_REGISTERS_RESET_REG_2_XMAC_SOFT;
10927
10928    /* Full reset masks according to the chip */
10929    reset_mask1 = 0xffffffff;
10930
10931    if (CHIP_IS_E1(sc))
10932        reset_mask2 = 0xffff;
10933    else if (CHIP_IS_E1H(sc))
10934        reset_mask2 = 0x1ffff;
10935    else if (CHIP_IS_E2(sc))
10936        reset_mask2 = 0xfffff;
10937    else /* CHIP_IS_E3 */
10938        reset_mask2 = 0x3ffffff;
10939
10940    /* Don't reset global blocks unless we need to */
10941    if (!global)
10942        reset_mask2 &= ~global_bits2;
10943
10944    /*
10945     * In case of attention in the QM, we need to reset PXP
10946     * (MISC_REGISTERS_RESET_REG_2_RST_PXP_RQ_RD_WR) before QM
10947     * because otherwise QM reset would release 'close the gates' shortly
10948     * before resetting the PXP, then the PSWRQ would send a write
10949     * request to PGLUE. Then when PXP is reset, PGLUE would try to
10950     * read the payload data from PSWWR, but PSWWR would not
10951     * respond. The write queue in PGLUE would stuck, dmae commands
10952     * would not return. Therefore it's important to reset the second
10953     * reset register (containing the
10954     * MISC_REGISTERS_RESET_REG_2_RST_PXP_RQ_RD_WR bit) before the
10955     * first one (containing the MISC_REGISTERS_RESET_REG_1_RST_QM
10956     * bit).
10957     */
10958    REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
10959           reset_mask2 & (~not_reset_mask2));
10960
10961    REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
10962           reset_mask1 & (~not_reset_mask1));
10963
10964    mb();
10965    wmb();
10966
10967    REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET,
10968           reset_mask2 & (~stay_reset2));
10969
10970    mb();
10971    wmb();
10972
10973    REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, reset_mask1);
10974    wmb();
10975}
10976
10977static int
10978bxe_process_kill(struct bxe_softc *sc,
10979                 uint8_t          global)
10980{
10981    int cnt = 1000;
10982    uint32_t val = 0;
10983    uint32_t sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1, pgl_exp_rom2;
10984    uint32_t tags_63_32 = 0;
10985
10986    /* Empty the Tetris buffer, wait for 1s */
10987    do {
10988        sr_cnt  = REG_RD(sc, PXP2_REG_RD_SR_CNT);
10989        blk_cnt = REG_RD(sc, PXP2_REG_RD_BLK_CNT);
10990        port_is_idle_0 = REG_RD(sc, PXP2_REG_RD_PORT_IS_IDLE_0);
10991        port_is_idle_1 = REG_RD(sc, PXP2_REG_RD_PORT_IS_IDLE_1);
10992        pgl_exp_rom2 = REG_RD(sc, PXP2_REG_PGL_EXP_ROM2);
10993        if (CHIP_IS_E3(sc)) {
10994            tags_63_32 = REG_RD(sc, PGLUE_B_REG_TAGS_63_32);
10995        }
10996
10997        if ((sr_cnt == 0x7e) && (blk_cnt == 0xa0) &&
10998            ((port_is_idle_0 & 0x1) == 0x1) &&
10999            ((port_is_idle_1 & 0x1) == 0x1) &&
11000            (pgl_exp_rom2 == 0xffffffff) &&
11001            (!CHIP_IS_E3(sc) || (tags_63_32 == 0xffffffff)))
11002            break;
11003        DELAY(1000);
11004    } while (cnt-- > 0);
11005
11006    if (cnt <= 0) {
11007        BLOGE(sc, "ERROR: Tetris buffer didn't get empty or there "
11008                  "are still outstanding read requests after 1s! "
11009                  "sr_cnt=0x%08x, blk_cnt=0x%08x, port_is_idle_0=0x%08x, "
11010                  "port_is_idle_1=0x%08x, pgl_exp_rom2=0x%08x\n",
11011              sr_cnt, blk_cnt, port_is_idle_0,
11012              port_is_idle_1, pgl_exp_rom2);
11013        return (-1);
11014    }
11015
11016    mb();
11017
11018    /* Close gates #2, #3 and #4 */
11019    bxe_set_234_gates(sc, TRUE);
11020
11021    /* Poll for IGU VQs for 57712 and newer chips */
11022    if (!CHIP_IS_E1x(sc) && bxe_er_poll_igu_vq(sc)) {
11023        return (-1);
11024    }
11025
11026    /* XXX indicate that "process kill" is in progress to MCP */
11027
11028    /* clear "unprepared" bit */
11029    REG_WR(sc, MISC_REG_UNPREPARED, 0);
11030    mb();
11031
11032    /* Make sure all is written to the chip before the reset */
11033    wmb();
11034
11035    /*
11036     * Wait for 1ms to empty GLUE and PCI-E core queues,
11037     * PSWHST, GRC and PSWRD Tetris buffer.
11038     */
11039    DELAY(1000);
11040
11041    /* Prepare to chip reset: */
11042    /* MCP */
11043    if (global) {
11044        bxe_reset_mcp_prep(sc, &val);
11045    }
11046
11047    /* PXP */
11048    bxe_pxp_prep(sc);
11049    mb();
11050
11051    /* reset the chip */
11052    bxe_process_kill_chip_reset(sc, global);
11053    mb();
11054
11055    /* clear errors in PGB */
11056    if (!CHIP_IS_E1(sc))
11057        REG_WR(sc, PGLUE_B_REG_LATCHED_ERRORS_CLR, 0x7f);
11058
11059    /* Recover after reset: */
11060    /* MCP */
11061    if (global && bxe_reset_mcp_comp(sc, val)) {
11062        return (-1);
11063    }
11064
11065    /* XXX add resetting the NO_MCP mode DB here */
11066
11067    /* Open the gates #2, #3 and #4 */
11068    bxe_set_234_gates(sc, FALSE);
11069
11070    /* XXX
11071     * IGU/AEU preparation bring back the AEU/IGU to a reset state
11072     * re-enable attentions
11073     */
11074
11075    return (0);
11076}
11077
11078static int
11079bxe_leader_reset(struct bxe_softc *sc)
11080{
11081    int rc = 0;
11082    uint8_t global = bxe_reset_is_global(sc);
11083    uint32_t load_code;
11084
11085    /*
11086     * If not going to reset MCP, load "fake" driver to reset HW while
11087     * driver is owner of the HW.
11088     */
11089    if (!global && !BXE_NOMCP(sc)) {
11090        load_code = bxe_fw_command(sc, DRV_MSG_CODE_LOAD_REQ,
11091                                   DRV_MSG_CODE_LOAD_REQ_WITH_LFA);
11092        if (!load_code) {
11093            BLOGE(sc, "MCP response failure, aborting\n");
11094            rc = -1;
11095            goto exit_leader_reset;
11096        }
11097
11098        if ((load_code != FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) &&
11099            (load_code != FW_MSG_CODE_DRV_LOAD_COMMON)) {
11100            BLOGE(sc, "MCP unexpected response, aborting\n");
11101            rc = -1;
11102            goto exit_leader_reset2;
11103        }
11104
11105        load_code = bxe_fw_command(sc, DRV_MSG_CODE_LOAD_DONE, 0);
11106        if (!load_code) {
11107            BLOGE(sc, "MCP response failure, aborting\n");
11108            rc = -1;
11109            goto exit_leader_reset2;
11110        }
11111    }
11112
11113    /* try to recover after the failure */
11114    if (bxe_process_kill(sc, global)) {
11115        BLOGE(sc, "Something bad occurred on engine %d!\n", SC_PATH(sc));
11116        rc = -1;
11117        goto exit_leader_reset2;
11118    }
11119
11120    /*
11121     * Clear the RESET_IN_PROGRESS and RESET_GLOBAL bits and update the driver
11122     * state.
11123     */
11124    bxe_set_reset_done(sc);
11125    if (global) {
11126        bxe_clear_reset_global(sc);
11127    }
11128
11129exit_leader_reset2:
11130
11131    /* unload "fake driver" if it was loaded */
11132    if (!global && !BXE_NOMCP(sc)) {
11133        bxe_fw_command(sc, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0);
11134        bxe_fw_command(sc, DRV_MSG_CODE_UNLOAD_DONE, 0);
11135    }
11136
11137exit_leader_reset:
11138
11139    sc->is_leader = 0;
11140    bxe_release_leader_lock(sc);
11141
11142    mb();
11143    return (rc);
11144}
11145
11146/*
11147 * prepare INIT transition, parameters configured:
11148 *   - HC configuration
11149 *   - Queue's CDU context
11150 */
11151static void
11152bxe_pf_q_prep_init(struct bxe_softc               *sc,
11153                   struct bxe_fastpath            *fp,
11154                   struct ecore_queue_init_params *init_params)
11155{
11156    uint8_t cos;
11157    int cxt_index, cxt_offset;
11158
11159    bxe_set_bit(ECORE_Q_FLG_HC, &init_params->rx.flags);
11160    bxe_set_bit(ECORE_Q_FLG_HC, &init_params->tx.flags);
11161
11162    bxe_set_bit(ECORE_Q_FLG_HC_EN, &init_params->rx.flags);
11163    bxe_set_bit(ECORE_Q_FLG_HC_EN, &init_params->tx.flags);
11164
11165    /* HC rate */
11166    init_params->rx.hc_rate =
11167        sc->hc_rx_ticks ? (1000000 / sc->hc_rx_ticks) : 0;
11168    init_params->tx.hc_rate =
11169        sc->hc_tx_ticks ? (1000000 / sc->hc_tx_ticks) : 0;
11170
11171    /* FW SB ID */
11172    init_params->rx.fw_sb_id = init_params->tx.fw_sb_id = fp->fw_sb_id;
11173
11174    /* CQ index among the SB indices */
11175    init_params->rx.sb_cq_index = HC_INDEX_ETH_RX_CQ_CONS;
11176    init_params->tx.sb_cq_index = HC_INDEX_ETH_FIRST_TX_CQ_CONS;
11177
11178    /* set maximum number of COSs supported by this queue */
11179    init_params->max_cos = sc->max_cos;
11180
11181    BLOGD(sc, DBG_LOAD, "fp %d setting queue params max cos to %d\n",
11182          fp->index, init_params->max_cos);
11183
11184    /* set the context pointers queue object */
11185    for (cos = FIRST_TX_COS_INDEX; cos < init_params->max_cos; cos++) {
11186        /* XXX change index/cid here if ever support multiple tx CoS */
11187        /* fp->txdata[cos]->cid */
11188        cxt_index = fp->index / ILT_PAGE_CIDS;
11189        cxt_offset = fp->index - (cxt_index * ILT_PAGE_CIDS);
11190        init_params->cxts[cos] = &sc->context[cxt_index].vcxt[cxt_offset].eth;
11191    }
11192}
11193
11194/* set flags that are common for the Tx-only and not normal connections */
11195static unsigned long
11196bxe_get_common_flags(struct bxe_softc    *sc,
11197                     struct bxe_fastpath *fp,
11198                     uint8_t             zero_stats)
11199{
11200    unsigned long flags = 0;
11201
11202    /* PF driver will always initialize the Queue to an ACTIVE state */
11203    bxe_set_bit(ECORE_Q_FLG_ACTIVE, &flags);
11204
11205    /*
11206     * tx only connections collect statistics (on the same index as the
11207     * parent connection). The statistics are zeroed when the parent
11208     * connection is initialized.
11209     */
11210
11211    bxe_set_bit(ECORE_Q_FLG_STATS, &flags);
11212    if (zero_stats) {
11213        bxe_set_bit(ECORE_Q_FLG_ZERO_STATS, &flags);
11214    }
11215
11216    /*
11217     * tx only connections can support tx-switching, though their
11218     * CoS-ness doesn't survive the loopback
11219     */
11220    if (sc->flags & BXE_TX_SWITCHING) {
11221        bxe_set_bit(ECORE_Q_FLG_TX_SWITCH, &flags);
11222    }
11223
11224    bxe_set_bit(ECORE_Q_FLG_PCSUM_ON_PKT, &flags);
11225
11226    return (flags);
11227}
11228
11229static unsigned long
11230bxe_get_q_flags(struct bxe_softc    *sc,
11231                struct bxe_fastpath *fp,
11232                uint8_t             leading)
11233{
11234    unsigned long flags = 0;
11235
11236    if (IS_MF_SD(sc)) {
11237        bxe_set_bit(ECORE_Q_FLG_OV, &flags);
11238    }
11239
11240    if (if_getcapenable(sc->ifp) & IFCAP_LRO) {
11241        bxe_set_bit(ECORE_Q_FLG_TPA, &flags);
11242#if __FreeBSD_version >= 800000
11243        bxe_set_bit(ECORE_Q_FLG_TPA_IPV6, &flags);
11244#endif
11245    }
11246
11247    if (leading) {
11248        bxe_set_bit(ECORE_Q_FLG_LEADING_RSS, &flags);
11249        bxe_set_bit(ECORE_Q_FLG_MCAST, &flags);
11250    }
11251
11252    bxe_set_bit(ECORE_Q_FLG_VLAN, &flags);
11253
11254    /* merge with common flags */
11255    return (flags | bxe_get_common_flags(sc, fp, TRUE));
11256}
11257
11258static void
11259bxe_pf_q_prep_general(struct bxe_softc                  *sc,
11260                      struct bxe_fastpath               *fp,
11261                      struct ecore_general_setup_params *gen_init,
11262                      uint8_t                           cos)
11263{
11264    gen_init->stat_id = bxe_stats_id(fp);
11265    gen_init->spcl_id = fp->cl_id;
11266    gen_init->mtu = sc->mtu;
11267    gen_init->cos = cos;
11268}
11269
11270static void
11271bxe_pf_rx_q_prep(struct bxe_softc              *sc,
11272                 struct bxe_fastpath           *fp,
11273                 struct rxq_pause_params       *pause,
11274                 struct ecore_rxq_setup_params *rxq_init)
11275{
11276    uint8_t max_sge = 0;
11277    uint16_t sge_sz = 0;
11278    uint16_t tpa_agg_size = 0;
11279
11280    pause->sge_th_lo = SGE_TH_LO(sc);
11281    pause->sge_th_hi = SGE_TH_HI(sc);
11282
11283    /* validate SGE ring has enough to cross high threshold */
11284    if (sc->dropless_fc &&
11285            (pause->sge_th_hi + FW_PREFETCH_CNT) >
11286            (RX_SGE_USABLE_PER_PAGE * RX_SGE_NUM_PAGES)) {
11287        BLOGW(sc, "sge ring threshold limit\n");
11288    }
11289
11290    /* minimum max_aggregation_size is 2*MTU (two full buffers) */
11291    tpa_agg_size = (2 * sc->mtu);
11292    if (tpa_agg_size < sc->max_aggregation_size) {
11293        tpa_agg_size = sc->max_aggregation_size;
11294    }
11295
11296    max_sge = SGE_PAGE_ALIGN(sc->mtu) >> SGE_PAGE_SHIFT;
11297    max_sge = ((max_sge + PAGES_PER_SGE - 1) &
11298                   (~(PAGES_PER_SGE - 1))) >> PAGES_PER_SGE_SHIFT;
11299    sge_sz = (uint16_t)min(SGE_PAGES, 0xffff);
11300
11301    /* pause - not for e1 */
11302    if (!CHIP_IS_E1(sc)) {
11303        pause->bd_th_lo = BD_TH_LO(sc);
11304        pause->bd_th_hi = BD_TH_HI(sc);
11305
11306        pause->rcq_th_lo = RCQ_TH_LO(sc);
11307        pause->rcq_th_hi = RCQ_TH_HI(sc);
11308
11309        /* validate rings have enough entries to cross high thresholds */
11310        if (sc->dropless_fc &&
11311            pause->bd_th_hi + FW_PREFETCH_CNT >
11312            sc->rx_ring_size) {
11313            BLOGW(sc, "rx bd ring threshold limit\n");
11314        }
11315
11316        if (sc->dropless_fc &&
11317            pause->rcq_th_hi + FW_PREFETCH_CNT >
11318            RCQ_NUM_PAGES * RCQ_USABLE_PER_PAGE) {
11319            BLOGW(sc, "rcq ring threshold limit\n");
11320        }
11321
11322        pause->pri_map = 1;
11323    }
11324
11325    /* rxq setup */
11326    rxq_init->dscr_map   = fp->rx_dma.paddr;
11327    rxq_init->sge_map    = fp->rx_sge_dma.paddr;
11328    rxq_init->rcq_map    = fp->rcq_dma.paddr;
11329    rxq_init->rcq_np_map = (fp->rcq_dma.paddr + BCM_PAGE_SIZE);
11330
11331    /*
11332     * This should be a maximum number of data bytes that may be
11333     * placed on the BD (not including paddings).
11334     */
11335    rxq_init->buf_sz = (fp->rx_buf_size -
11336                        IP_HEADER_ALIGNMENT_PADDING);
11337
11338    rxq_init->cl_qzone_id     = fp->cl_qzone_id;
11339    rxq_init->tpa_agg_sz      = tpa_agg_size;
11340    rxq_init->sge_buf_sz      = sge_sz;
11341    rxq_init->max_sges_pkt    = max_sge;
11342    rxq_init->rss_engine_id   = SC_FUNC(sc);
11343    rxq_init->mcast_engine_id = SC_FUNC(sc);
11344
11345    /*
11346     * Maximum number or simultaneous TPA aggregation for this Queue.
11347     * For PF Clients it should be the maximum available number.
11348     * VF driver(s) may want to define it to a smaller value.
11349     */
11350    rxq_init->max_tpa_queues = MAX_AGG_QS(sc);
11351
11352    rxq_init->cache_line_log = BXE_RX_ALIGN_SHIFT;
11353    rxq_init->fw_sb_id = fp->fw_sb_id;
11354
11355    rxq_init->sb_cq_index = HC_INDEX_ETH_RX_CQ_CONS;
11356
11357    /*
11358     * configure silent vlan removal
11359     * if multi function mode is afex, then mask default vlan
11360     */
11361    if (IS_MF_AFEX(sc)) {
11362        rxq_init->silent_removal_value =
11363            sc->devinfo.mf_info.afex_def_vlan_tag;
11364        rxq_init->silent_removal_mask = EVL_VLID_MASK;
11365    }
11366}
11367
11368static void
11369bxe_pf_tx_q_prep(struct bxe_softc              *sc,
11370                 struct bxe_fastpath           *fp,
11371                 struct ecore_txq_setup_params *txq_init,
11372                 uint8_t                       cos)
11373{
11374    /*
11375     * XXX If multiple CoS is ever supported then each fastpath structure
11376     * will need to maintain tx producer/consumer/dma/etc values *per* CoS.
11377     * fp->txdata[cos]->tx_dma.paddr;
11378     */
11379    txq_init->dscr_map     = fp->tx_dma.paddr;
11380    txq_init->sb_cq_index  = HC_INDEX_ETH_FIRST_TX_CQ_CONS + cos;
11381    txq_init->traffic_type = LLFC_TRAFFIC_TYPE_NW;
11382    txq_init->fw_sb_id     = fp->fw_sb_id;
11383
11384    /*
11385     * set the TSS leading client id for TX classfication to the
11386     * leading RSS client id
11387     */
11388    txq_init->tss_leading_cl_id = BXE_FP(sc, 0, cl_id);
11389}
11390
11391/*
11392 * This function performs 2 steps in a queue state machine:
11393 *   1) RESET->INIT
11394 *   2) INIT->SETUP
11395 */
11396static int
11397bxe_setup_queue(struct bxe_softc    *sc,
11398                struct bxe_fastpath *fp,
11399                uint8_t             leading)
11400{
11401    struct ecore_queue_state_params q_params = { NULL };
11402    struct ecore_queue_setup_params *setup_params =
11403                        &q_params.params.setup;
11404    int rc;
11405
11406    BLOGD(sc, DBG_LOAD, "setting up queue %d\n", fp->index);
11407
11408    bxe_ack_sb(sc, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_ENABLE, 0);
11409
11410    q_params.q_obj = &BXE_SP_OBJ(sc, fp).q_obj;
11411
11412    /* we want to wait for completion in this context */
11413    bxe_set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags);
11414
11415    /* prepare the INIT parameters */
11416    bxe_pf_q_prep_init(sc, fp, &q_params.params.init);
11417
11418    /* Set the command */
11419    q_params.cmd = ECORE_Q_CMD_INIT;
11420
11421    /* Change the state to INIT */
11422    rc = ecore_queue_state_change(sc, &q_params);
11423    if (rc) {
11424        BLOGE(sc, "Queue(%d) INIT failed rc = %d\n", fp->index, rc);
11425        return (rc);
11426    }
11427
11428    BLOGD(sc, DBG_LOAD, "init complete\n");
11429
11430    /* now move the Queue to the SETUP state */
11431    memset(setup_params, 0, sizeof(*setup_params));
11432
11433    /* set Queue flags */
11434    setup_params->flags = bxe_get_q_flags(sc, fp, leading);
11435
11436    /* set general SETUP parameters */
11437    bxe_pf_q_prep_general(sc, fp, &setup_params->gen_params,
11438                          FIRST_TX_COS_INDEX);
11439
11440    bxe_pf_rx_q_prep(sc, fp,
11441                     &setup_params->pause_params,
11442                     &setup_params->rxq_params);
11443
11444    bxe_pf_tx_q_prep(sc, fp,
11445                     &setup_params->txq_params,
11446                     FIRST_TX_COS_INDEX);
11447
11448    /* Set the command */
11449    q_params.cmd = ECORE_Q_CMD_SETUP;
11450
11451    /* change the state to SETUP */
11452    rc = ecore_queue_state_change(sc, &q_params);
11453    if (rc) {
11454        BLOGE(sc, "Queue(%d) SETUP failed (rc = %d)\n", fp->index, rc);
11455        return (rc);
11456    }
11457
11458    return (rc);
11459}
11460
11461static int
11462bxe_setup_leading(struct bxe_softc *sc)
11463{
11464    return (bxe_setup_queue(sc, &sc->fp[0], TRUE));
11465}
11466
11467static int
11468bxe_config_rss_pf(struct bxe_softc            *sc,
11469                  struct ecore_rss_config_obj *rss_obj,
11470                  uint8_t                     config_hash)
11471{
11472    struct ecore_config_rss_params params = { NULL };
11473    int i;
11474
11475    /*
11476     * Although RSS is meaningless when there is a single HW queue we
11477     * still need it enabled in order to have HW Rx hash generated.
11478     */
11479
11480    params.rss_obj = rss_obj;
11481
11482    bxe_set_bit(RAMROD_COMP_WAIT, &params.ramrod_flags);
11483
11484    bxe_set_bit(ECORE_RSS_MODE_REGULAR, &params.rss_flags);
11485
11486    /* RSS configuration */
11487    bxe_set_bit(ECORE_RSS_IPV4, &params.rss_flags);
11488    bxe_set_bit(ECORE_RSS_IPV4_TCP, &params.rss_flags);
11489    bxe_set_bit(ECORE_RSS_IPV6, &params.rss_flags);
11490    bxe_set_bit(ECORE_RSS_IPV6_TCP, &params.rss_flags);
11491    if (rss_obj->udp_rss_v4) {
11492        bxe_set_bit(ECORE_RSS_IPV4_UDP, &params.rss_flags);
11493    }
11494    if (rss_obj->udp_rss_v6) {
11495        bxe_set_bit(ECORE_RSS_IPV6_UDP, &params.rss_flags);
11496    }
11497
11498    /* Hash bits */
11499    params.rss_result_mask = MULTI_MASK;
11500
11501    memcpy(params.ind_table, rss_obj->ind_table, sizeof(params.ind_table));
11502
11503    if (config_hash) {
11504        /* RSS keys */
11505        for (i = 0; i < sizeof(params.rss_key) / 4; i++) {
11506            params.rss_key[i] = arc4random();
11507        }
11508
11509        bxe_set_bit(ECORE_RSS_SET_SRCH, &params.rss_flags);
11510    }
11511
11512    return (ecore_config_rss(sc, &params));
11513}
11514
11515static int
11516bxe_config_rss_eth(struct bxe_softc *sc,
11517                   uint8_t          config_hash)
11518{
11519    return (bxe_config_rss_pf(sc, &sc->rss_conf_obj, config_hash));
11520}
11521
11522static int
11523bxe_init_rss_pf(struct bxe_softc *sc)
11524{
11525    uint8_t num_eth_queues = BXE_NUM_ETH_QUEUES(sc);
11526    int i;
11527
11528    /*
11529     * Prepare the initial contents of the indirection table if
11530     * RSS is enabled
11531     */
11532    for (i = 0; i < sizeof(sc->rss_conf_obj.ind_table); i++) {
11533        sc->rss_conf_obj.ind_table[i] =
11534            (sc->fp->cl_id + (i % num_eth_queues));
11535    }
11536
11537    if (sc->udp_rss) {
11538        sc->rss_conf_obj.udp_rss_v4 = sc->rss_conf_obj.udp_rss_v6 = 1;
11539    }
11540
11541    /*
11542     * For 57710 and 57711 SEARCHER configuration (rss_keys) is
11543     * per-port, so if explicit configuration is needed, do it only
11544     * for a PMF.
11545     *
11546     * For 57712 and newer it's a per-function configuration.
11547     */
11548    return (bxe_config_rss_eth(sc, sc->port.pmf || !CHIP_IS_E1x(sc)));
11549}
11550
11551static int
11552bxe_set_mac_one(struct bxe_softc          *sc,
11553                uint8_t                   *mac,
11554                struct ecore_vlan_mac_obj *obj,
11555                uint8_t                   set,
11556                int                       mac_type,
11557                unsigned long             *ramrod_flags)
11558{
11559    struct ecore_vlan_mac_ramrod_params ramrod_param;
11560    int rc;
11561
11562    memset(&ramrod_param, 0, sizeof(ramrod_param));
11563
11564    /* fill in general parameters */
11565    ramrod_param.vlan_mac_obj = obj;
11566    ramrod_param.ramrod_flags = *ramrod_flags;
11567
11568    /* fill a user request section if needed */
11569    if (!bxe_test_bit(RAMROD_CONT, ramrod_flags)) {
11570        memcpy(ramrod_param.user_req.u.mac.mac, mac, ETH_ALEN);
11571
11572        bxe_set_bit(mac_type, &ramrod_param.user_req.vlan_mac_flags);
11573
11574        /* Set the command: ADD or DEL */
11575        ramrod_param.user_req.cmd = (set) ? ECORE_VLAN_MAC_ADD :
11576                                            ECORE_VLAN_MAC_DEL;
11577    }
11578
11579    rc = ecore_config_vlan_mac(sc, &ramrod_param);
11580
11581    if (rc == ECORE_EXISTS) {
11582        BLOGD(sc, DBG_SP, "Failed to schedule ADD operations (EEXIST)\n");
11583        /* do not treat adding same MAC as error */
11584        rc = 0;
11585    } else if (rc < 0) {
11586        BLOGE(sc, "%s MAC failed (%d)\n", (set ? "Set" : "Delete"), rc);
11587    }
11588
11589    return (rc);
11590}
11591
11592static int
11593bxe_set_eth_mac(struct bxe_softc *sc,
11594                uint8_t          set)
11595{
11596    unsigned long ramrod_flags = 0;
11597
11598    BLOGD(sc, DBG_LOAD, "Adding Ethernet MAC\n");
11599
11600    bxe_set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
11601
11602    /* Eth MAC is set on RSS leading client (fp[0]) */
11603    return (bxe_set_mac_one(sc, sc->link_params.mac_addr,
11604                            &sc->sp_objs->mac_obj,
11605                            set, ECORE_ETH_MAC, &ramrod_flags));
11606}
11607
11608static int
11609bxe_get_cur_phy_idx(struct bxe_softc *sc)
11610{
11611    uint32_t sel_phy_idx = 0;
11612
11613    if (sc->link_params.num_phys <= 1) {
11614        return (ELINK_INT_PHY);
11615    }
11616
11617    if (sc->link_vars.link_up) {
11618        sel_phy_idx = ELINK_EXT_PHY1;
11619        /* In case link is SERDES, check if the ELINK_EXT_PHY2 is the one */
11620        if ((sc->link_vars.link_status & LINK_STATUS_SERDES_LINK) &&
11621            (sc->link_params.phy[ELINK_EXT_PHY2].supported &
11622             ELINK_SUPPORTED_FIBRE))
11623            sel_phy_idx = ELINK_EXT_PHY2;
11624    } else {
11625        switch (elink_phy_selection(&sc->link_params)) {
11626        case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT:
11627        case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY:
11628        case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY:
11629               sel_phy_idx = ELINK_EXT_PHY1;
11630               break;
11631        case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY:
11632        case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY:
11633               sel_phy_idx = ELINK_EXT_PHY2;
11634               break;
11635        }
11636    }
11637
11638    return (sel_phy_idx);
11639}
11640
11641static int
11642bxe_get_link_cfg_idx(struct bxe_softc *sc)
11643{
11644    uint32_t sel_phy_idx = bxe_get_cur_phy_idx(sc);
11645
11646    /*
11647     * The selected activated PHY is always after swapping (in case PHY
11648     * swapping is enabled). So when swapping is enabled, we need to reverse
11649     * the configuration
11650     */
11651
11652    if (sc->link_params.multi_phy_config & PORT_HW_CFG_PHY_SWAPPED_ENABLED) {
11653        if (sel_phy_idx == ELINK_EXT_PHY1)
11654            sel_phy_idx = ELINK_EXT_PHY2;
11655        else if (sel_phy_idx == ELINK_EXT_PHY2)
11656            sel_phy_idx = ELINK_EXT_PHY1;
11657    }
11658
11659    return (ELINK_LINK_CONFIG_IDX(sel_phy_idx));
11660}
11661
11662static void
11663bxe_set_requested_fc(struct bxe_softc *sc)
11664{
11665    /*
11666     * Initialize link parameters structure variables
11667     * It is recommended to turn off RX FC for jumbo frames
11668     * for better performance
11669     */
11670    if (CHIP_IS_E1x(sc) && (sc->mtu > 5000)) {
11671        sc->link_params.req_fc_auto_adv = ELINK_FLOW_CTRL_TX;
11672    } else {
11673        sc->link_params.req_fc_auto_adv = ELINK_FLOW_CTRL_BOTH;
11674    }
11675}
11676
11677static void
11678bxe_calc_fc_adv(struct bxe_softc *sc)
11679{
11680    uint8_t cfg_idx = bxe_get_link_cfg_idx(sc);
11681
11682
11683    sc->port.advertising[cfg_idx] &= ~(ADVERTISED_Asym_Pause |
11684                                           ADVERTISED_Pause);
11685
11686    switch (sc->link_vars.ieee_fc &
11687            MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
11688
11689    case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
11690        sc->port.advertising[cfg_idx] |= (ADVERTISED_Asym_Pause |
11691                                          ADVERTISED_Pause);
11692        break;
11693
11694    case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
11695        sc->port.advertising[cfg_idx] |= ADVERTISED_Asym_Pause;
11696        break;
11697
11698    default:
11699        break;
11700
11701    }
11702}
11703
11704static uint16_t
11705bxe_get_mf_speed(struct bxe_softc *sc)
11706{
11707    uint16_t line_speed = sc->link_vars.line_speed;
11708    if (IS_MF(sc)) {
11709        uint16_t maxCfg =
11710            bxe_extract_max_cfg(sc, sc->devinfo.mf_info.mf_config[SC_VN(sc)]);
11711
11712        /* calculate the current MAX line speed limit for the MF devices */
11713        if (IS_MF_SI(sc)) {
11714            line_speed = (line_speed * maxCfg) / 100;
11715        } else { /* SD mode */
11716            uint16_t vn_max_rate = maxCfg * 100;
11717
11718            if (vn_max_rate < line_speed) {
11719                line_speed = vn_max_rate;
11720            }
11721        }
11722    }
11723
11724    return (line_speed);
11725}
11726
11727static void
11728bxe_fill_report_data(struct bxe_softc            *sc,
11729                     struct bxe_link_report_data *data)
11730{
11731    uint16_t line_speed = bxe_get_mf_speed(sc);
11732
11733    memset(data, 0, sizeof(*data));
11734
11735    /* fill the report data with the effective line speed */
11736    data->line_speed = line_speed;
11737
11738    /* Link is down */
11739    if (!sc->link_vars.link_up || (sc->flags & BXE_MF_FUNC_DIS)) {
11740        bxe_set_bit(BXE_LINK_REPORT_LINK_DOWN, &data->link_report_flags);
11741    }
11742
11743    /* Full DUPLEX */
11744    if (sc->link_vars.duplex == DUPLEX_FULL) {
11745        bxe_set_bit(BXE_LINK_REPORT_FULL_DUPLEX, &data->link_report_flags);
11746    }
11747
11748    /* Rx Flow Control is ON */
11749    if (sc->link_vars.flow_ctrl & ELINK_FLOW_CTRL_RX) {
11750        bxe_set_bit(BXE_LINK_REPORT_RX_FC_ON, &data->link_report_flags);
11751    }
11752
11753    /* Tx Flow Control is ON */
11754    if (sc->link_vars.flow_ctrl & ELINK_FLOW_CTRL_TX) {
11755        bxe_set_bit(BXE_LINK_REPORT_TX_FC_ON, &data->link_report_flags);
11756    }
11757}
11758
11759/* report link status to OS, should be called under phy_lock */
11760static void
11761bxe_link_report_locked(struct bxe_softc *sc)
11762{
11763    struct bxe_link_report_data cur_data;
11764
11765    /* reread mf_cfg */
11766    if (IS_PF(sc) && !CHIP_IS_E1(sc)) {
11767        bxe_read_mf_cfg(sc);
11768    }
11769
11770    /* Read the current link report info */
11771    bxe_fill_report_data(sc, &cur_data);
11772
11773    /* Don't report link down or exactly the same link status twice */
11774    if (!memcmp(&cur_data, &sc->last_reported_link, sizeof(cur_data)) ||
11775        (bxe_test_bit(BXE_LINK_REPORT_LINK_DOWN,
11776                      &sc->last_reported_link.link_report_flags) &&
11777         bxe_test_bit(BXE_LINK_REPORT_LINK_DOWN,
11778                      &cur_data.link_report_flags))) {
11779        return;
11780    }
11781
11782	ELINK_DEBUG_P2(sc, "Change in link status : cur_data = %x, last_reported_link = %x\n",
11783					cur_data.link_report_flags, sc->last_reported_link.link_report_flags);
11784    sc->link_cnt++;
11785
11786	ELINK_DEBUG_P1(sc, "link status change count = %x\n", sc->link_cnt);
11787    /* report new link params and remember the state for the next time */
11788    memcpy(&sc->last_reported_link, &cur_data, sizeof(cur_data));
11789
11790    if (bxe_test_bit(BXE_LINK_REPORT_LINK_DOWN,
11791                     &cur_data.link_report_flags)) {
11792        if_link_state_change(sc->ifp, LINK_STATE_DOWN);
11793    } else {
11794        const char *duplex;
11795        const char *flow;
11796
11797        if (bxe_test_and_clear_bit(BXE_LINK_REPORT_FULL_DUPLEX,
11798                                   &cur_data.link_report_flags)) {
11799            duplex = "full";
11800			ELINK_DEBUG_P0(sc, "link set to full duplex\n");
11801        } else {
11802            duplex = "half";
11803			ELINK_DEBUG_P0(sc, "link set to half duplex\n");
11804        }
11805
11806        /*
11807         * Handle the FC at the end so that only these flags would be
11808         * possibly set. This way we may easily check if there is no FC
11809         * enabled.
11810         */
11811        if (cur_data.link_report_flags) {
11812            if (bxe_test_bit(BXE_LINK_REPORT_RX_FC_ON,
11813                             &cur_data.link_report_flags) &&
11814                bxe_test_bit(BXE_LINK_REPORT_TX_FC_ON,
11815                             &cur_data.link_report_flags)) {
11816                flow = "ON - receive & transmit";
11817            } else if (bxe_test_bit(BXE_LINK_REPORT_RX_FC_ON,
11818                                    &cur_data.link_report_flags) &&
11819                       !bxe_test_bit(BXE_LINK_REPORT_TX_FC_ON,
11820                                     &cur_data.link_report_flags)) {
11821                flow = "ON - receive";
11822            } else if (!bxe_test_bit(BXE_LINK_REPORT_RX_FC_ON,
11823                                     &cur_data.link_report_flags) &&
11824                       bxe_test_bit(BXE_LINK_REPORT_TX_FC_ON,
11825                                    &cur_data.link_report_flags)) {
11826                flow = "ON - transmit";
11827            } else {
11828                flow = "none"; /* possible? */
11829            }
11830        } else {
11831            flow = "none";
11832        }
11833
11834        if_link_state_change(sc->ifp, LINK_STATE_UP);
11835        BLOGI(sc, "NIC Link is Up, %d Mbps %s duplex, Flow control: %s\n",
11836              cur_data.line_speed, duplex, flow);
11837    }
11838}
11839
11840static void
11841bxe_link_report(struct bxe_softc *sc)
11842{
11843    bxe_acquire_phy_lock(sc);
11844    bxe_link_report_locked(sc);
11845    bxe_release_phy_lock(sc);
11846}
11847
11848static void
11849bxe_link_status_update(struct bxe_softc *sc)
11850{
11851    if (sc->state != BXE_STATE_OPEN) {
11852        return;
11853    }
11854
11855    if (IS_PF(sc) && !CHIP_REV_IS_SLOW(sc)) {
11856        elink_link_status_update(&sc->link_params, &sc->link_vars);
11857    } else {
11858        sc->port.supported[0] |= (ELINK_SUPPORTED_10baseT_Half |
11859                                  ELINK_SUPPORTED_10baseT_Full |
11860                                  ELINK_SUPPORTED_100baseT_Half |
11861                                  ELINK_SUPPORTED_100baseT_Full |
11862                                  ELINK_SUPPORTED_1000baseT_Full |
11863                                  ELINK_SUPPORTED_2500baseX_Full |
11864                                  ELINK_SUPPORTED_10000baseT_Full |
11865                                  ELINK_SUPPORTED_TP |
11866                                  ELINK_SUPPORTED_FIBRE |
11867                                  ELINK_SUPPORTED_Autoneg |
11868                                  ELINK_SUPPORTED_Pause |
11869                                  ELINK_SUPPORTED_Asym_Pause);
11870        sc->port.advertising[0] = sc->port.supported[0];
11871
11872        sc->link_params.sc                = sc;
11873        sc->link_params.port              = SC_PORT(sc);
11874        sc->link_params.req_duplex[0]     = DUPLEX_FULL;
11875        sc->link_params.req_flow_ctrl[0]  = ELINK_FLOW_CTRL_NONE;
11876        sc->link_params.req_line_speed[0] = SPEED_10000;
11877        sc->link_params.speed_cap_mask[0] = 0x7f0000;
11878        sc->link_params.switch_cfg        = ELINK_SWITCH_CFG_10G;
11879
11880        if (CHIP_REV_IS_FPGA(sc)) {
11881            sc->link_vars.mac_type    = ELINK_MAC_TYPE_EMAC;
11882            sc->link_vars.line_speed  = ELINK_SPEED_1000;
11883            sc->link_vars.link_status = (LINK_STATUS_LINK_UP |
11884                                         LINK_STATUS_SPEED_AND_DUPLEX_1000TFD);
11885        } else {
11886            sc->link_vars.mac_type    = ELINK_MAC_TYPE_BMAC;
11887            sc->link_vars.line_speed  = ELINK_SPEED_10000;
11888            sc->link_vars.link_status = (LINK_STATUS_LINK_UP |
11889                                         LINK_STATUS_SPEED_AND_DUPLEX_10GTFD);
11890        }
11891
11892        sc->link_vars.link_up = 1;
11893
11894        sc->link_vars.duplex    = DUPLEX_FULL;
11895        sc->link_vars.flow_ctrl = ELINK_FLOW_CTRL_NONE;
11896
11897        if (IS_PF(sc)) {
11898            REG_WR(sc, NIG_REG_EGRESS_DRAIN0_MODE + sc->link_params.port*4, 0);
11899            bxe_stats_handle(sc, STATS_EVENT_LINK_UP);
11900            bxe_link_report(sc);
11901        }
11902    }
11903
11904    if (IS_PF(sc)) {
11905        if (sc->link_vars.link_up) {
11906            bxe_stats_handle(sc, STATS_EVENT_LINK_UP);
11907        } else {
11908            bxe_stats_handle(sc, STATS_EVENT_STOP);
11909        }
11910        bxe_link_report(sc);
11911    } else {
11912        bxe_link_report(sc);
11913        bxe_stats_handle(sc, STATS_EVENT_LINK_UP);
11914    }
11915}
11916
11917static int
11918bxe_initial_phy_init(struct bxe_softc *sc,
11919                     int              load_mode)
11920{
11921    int rc, cfg_idx = bxe_get_link_cfg_idx(sc);
11922    uint16_t req_line_speed = sc->link_params.req_line_speed[cfg_idx];
11923    struct elink_params *lp = &sc->link_params;
11924
11925    bxe_set_requested_fc(sc);
11926
11927    if (CHIP_REV_IS_SLOW(sc)) {
11928        uint32_t bond = CHIP_BOND_ID(sc);
11929        uint32_t feat = 0;
11930
11931        if (CHIP_IS_E2(sc) && CHIP_IS_MODE_4_PORT(sc)) {
11932            feat |= ELINK_FEATURE_CONFIG_EMUL_DISABLE_BMAC;
11933        } else if (bond & 0x4) {
11934            if (CHIP_IS_E3(sc)) {
11935                feat |= ELINK_FEATURE_CONFIG_EMUL_DISABLE_XMAC;
11936            } else {
11937                feat |= ELINK_FEATURE_CONFIG_EMUL_DISABLE_BMAC;
11938            }
11939        } else if (bond & 0x8) {
11940            if (CHIP_IS_E3(sc)) {
11941                feat |= ELINK_FEATURE_CONFIG_EMUL_DISABLE_UMAC;
11942            } else {
11943                feat |= ELINK_FEATURE_CONFIG_EMUL_DISABLE_EMAC;
11944            }
11945        }
11946
11947        /* disable EMAC for E3 and above */
11948        if (bond & 0x2) {
11949            feat |= ELINK_FEATURE_CONFIG_EMUL_DISABLE_EMAC;
11950        }
11951
11952        sc->link_params.feature_config_flags |= feat;
11953    }
11954
11955    bxe_acquire_phy_lock(sc);
11956
11957    if (load_mode == LOAD_DIAG) {
11958        lp->loopback_mode = ELINK_LOOPBACK_XGXS;
11959        /* Prefer doing PHY loopback at 10G speed, if possible */
11960        if (lp->req_line_speed[cfg_idx] < ELINK_SPEED_10000) {
11961            if (lp->speed_cap_mask[cfg_idx] &
11962                PORT_HW_CFG_SPEED_CAPABILITY_D0_10G) {
11963                lp->req_line_speed[cfg_idx] = ELINK_SPEED_10000;
11964            } else {
11965                lp->req_line_speed[cfg_idx] = ELINK_SPEED_1000;
11966            }
11967        }
11968    }
11969
11970    if (load_mode == LOAD_LOOPBACK_EXT) {
11971        lp->loopback_mode = ELINK_LOOPBACK_EXT;
11972    }
11973
11974    rc = elink_phy_init(&sc->link_params, &sc->link_vars);
11975
11976    bxe_release_phy_lock(sc);
11977
11978    bxe_calc_fc_adv(sc);
11979
11980    if (sc->link_vars.link_up) {
11981        bxe_stats_handle(sc, STATS_EVENT_LINK_UP);
11982        bxe_link_report(sc);
11983    }
11984
11985    if (!CHIP_REV_IS_SLOW(sc)) {
11986        bxe_periodic_start(sc);
11987    }
11988
11989    sc->link_params.req_line_speed[cfg_idx] = req_line_speed;
11990    return (rc);
11991}
11992
11993/* must be called under IF_ADDR_LOCK */
11994static int
11995bxe_init_mcast_macs_list(struct bxe_softc                 *sc,
11996                         struct ecore_mcast_ramrod_params *p)
11997{
11998    if_t ifp = sc->ifp;
11999    int mc_count = 0;
12000    struct ifmultiaddr *ifma;
12001    struct ecore_mcast_list_elem *mc_mac;
12002
12003    TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
12004        if (ifma->ifma_addr->sa_family != AF_LINK) {
12005            continue;
12006        }
12007
12008        mc_count++;
12009    }
12010
12011    ECORE_LIST_INIT(&p->mcast_list);
12012    p->mcast_list_len = 0;
12013
12014    if (!mc_count) {
12015        return (0);
12016    }
12017
12018    mc_mac = malloc(sizeof(*mc_mac) * mc_count, M_DEVBUF,
12019                    (M_NOWAIT | M_ZERO));
12020    if (!mc_mac) {
12021        BLOGE(sc, "Failed to allocate temp mcast list\n");
12022        return (-1);
12023    }
12024    bzero(mc_mac, (sizeof(*mc_mac) * mc_count));
12025
12026    TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
12027        if (ifma->ifma_addr->sa_family != AF_LINK) {
12028            continue;
12029        }
12030
12031        mc_mac->mac = (uint8_t *)LLADDR((struct sockaddr_dl *)ifma->ifma_addr);
12032        ECORE_LIST_PUSH_TAIL(&mc_mac->link, &p->mcast_list);
12033
12034        BLOGD(sc, DBG_LOAD,
12035              "Setting MCAST %02X:%02X:%02X:%02X:%02X:%02X and mc_count %d\n",
12036              mc_mac->mac[0], mc_mac->mac[1], mc_mac->mac[2],
12037              mc_mac->mac[3], mc_mac->mac[4], mc_mac->mac[5], mc_count);
12038       mc_mac++;
12039    }
12040
12041    p->mcast_list_len = mc_count;
12042
12043    return (0);
12044}
12045
12046static void
12047bxe_free_mcast_macs_list(struct ecore_mcast_ramrod_params *p)
12048{
12049    struct ecore_mcast_list_elem *mc_mac =
12050        ECORE_LIST_FIRST_ENTRY(&p->mcast_list,
12051                               struct ecore_mcast_list_elem,
12052                               link);
12053
12054    if (mc_mac) {
12055        /* only a single free as all mc_macs are in the same heap array */
12056        free(mc_mac, M_DEVBUF);
12057    }
12058}
12059static int
12060bxe_set_mc_list(struct bxe_softc *sc)
12061{
12062    struct ecore_mcast_ramrod_params rparam = { NULL };
12063    int rc = 0;
12064
12065    rparam.mcast_obj = &sc->mcast_obj;
12066
12067    BXE_MCAST_LOCK(sc);
12068
12069    /* first, clear all configured multicast MACs */
12070    rc = ecore_config_mcast(sc, &rparam, ECORE_MCAST_CMD_DEL);
12071    if (rc < 0) {
12072        BLOGE(sc, "Failed to clear multicast configuration: %d\n", rc);
12073        /* Manual backport parts of FreeBSD upstream r284470. */
12074        BXE_MCAST_UNLOCK(sc);
12075        return (rc);
12076    }
12077
12078    /* configure a new MACs list */
12079    rc = bxe_init_mcast_macs_list(sc, &rparam);
12080    if (rc) {
12081        BLOGE(sc, "Failed to create mcast MACs list (%d)\n", rc);
12082        BXE_MCAST_UNLOCK(sc);
12083        return (rc);
12084    }
12085
12086    /* Now add the new MACs */
12087    rc = ecore_config_mcast(sc, &rparam, ECORE_MCAST_CMD_ADD);
12088    if (rc < 0) {
12089        BLOGE(sc, "Failed to set new mcast config (%d)\n", rc);
12090    }
12091
12092    bxe_free_mcast_macs_list(&rparam);
12093
12094    BXE_MCAST_UNLOCK(sc);
12095
12096    return (rc);
12097}
12098
12099static int
12100bxe_set_uc_list(struct bxe_softc *sc)
12101{
12102    if_t ifp = sc->ifp;
12103    struct ecore_vlan_mac_obj *mac_obj = &sc->sp_objs->mac_obj;
12104    struct ifaddr *ifa;
12105    unsigned long ramrod_flags = 0;
12106    int rc;
12107
12108#if __FreeBSD_version < 800000
12109    IF_ADDR_LOCK(ifp);
12110#else
12111    if_addr_rlock(ifp);
12112#endif
12113
12114    /* first schedule a cleanup up of old configuration */
12115    rc = bxe_del_all_macs(sc, mac_obj, ECORE_UC_LIST_MAC, FALSE);
12116    if (rc < 0) {
12117        BLOGE(sc, "Failed to schedule delete of all ETH MACs (%d)\n", rc);
12118#if __FreeBSD_version < 800000
12119        IF_ADDR_UNLOCK(ifp);
12120#else
12121        if_addr_runlock(ifp);
12122#endif
12123        return (rc);
12124    }
12125
12126    ifa = if_getifaddr(ifp); /* XXX Is this structure */
12127    while (ifa) {
12128        if (ifa->ifa_addr->sa_family != AF_LINK) {
12129            ifa = TAILQ_NEXT(ifa, ifa_link);
12130            continue;
12131        }
12132
12133        rc = bxe_set_mac_one(sc, (uint8_t *)LLADDR((struct sockaddr_dl *)ifa->ifa_addr),
12134                             mac_obj, TRUE, ECORE_UC_LIST_MAC, &ramrod_flags);
12135        if (rc == -EEXIST) {
12136            BLOGD(sc, DBG_SP, "Failed to schedule ADD operations (EEXIST)\n");
12137            /* do not treat adding same MAC as an error */
12138            rc = 0;
12139        } else if (rc < 0) {
12140            BLOGE(sc, "Failed to schedule ADD operations (%d)\n", rc);
12141#if __FreeBSD_version < 800000
12142            IF_ADDR_UNLOCK(ifp);
12143#else
12144            if_addr_runlock(ifp);
12145#endif
12146            return (rc);
12147        }
12148
12149        ifa = TAILQ_NEXT(ifa, ifa_link);
12150    }
12151
12152#if __FreeBSD_version < 800000
12153    IF_ADDR_UNLOCK(ifp);
12154#else
12155    if_addr_runlock(ifp);
12156#endif
12157
12158    /* Execute the pending commands */
12159    bit_set(&ramrod_flags, RAMROD_CONT);
12160    return (bxe_set_mac_one(sc, NULL, mac_obj, FALSE /* don't care */,
12161                            ECORE_UC_LIST_MAC, &ramrod_flags));
12162}
12163
12164static void
12165bxe_set_rx_mode(struct bxe_softc *sc)
12166{
12167    if_t ifp = sc->ifp;
12168    uint32_t rx_mode = BXE_RX_MODE_NORMAL;
12169
12170    if (sc->state != BXE_STATE_OPEN) {
12171        BLOGD(sc, DBG_SP, "state is %x, returning\n", sc->state);
12172        return;
12173    }
12174
12175    BLOGD(sc, DBG_SP, "if_flags(ifp)=0x%x\n", if_getflags(sc->ifp));
12176
12177    if (if_getflags(ifp) & IFF_PROMISC) {
12178        rx_mode = BXE_RX_MODE_PROMISC;
12179    } else if ((if_getflags(ifp) & IFF_ALLMULTI) ||
12180               ((if_getamcount(ifp) > BXE_MAX_MULTICAST) &&
12181                CHIP_IS_E1(sc))) {
12182        rx_mode = BXE_RX_MODE_ALLMULTI;
12183    } else {
12184        if (IS_PF(sc)) {
12185            /* some multicasts */
12186            if (bxe_set_mc_list(sc) < 0) {
12187                rx_mode = BXE_RX_MODE_ALLMULTI;
12188            }
12189            if (bxe_set_uc_list(sc) < 0) {
12190                rx_mode = BXE_RX_MODE_PROMISC;
12191            }
12192        }
12193    }
12194
12195    sc->rx_mode = rx_mode;
12196
12197    /* schedule the rx_mode command */
12198    if (bxe_test_bit(ECORE_FILTER_RX_MODE_PENDING, &sc->sp_state)) {
12199        BLOGD(sc, DBG_LOAD, "Scheduled setting rx_mode with ECORE...\n");
12200        bxe_set_bit(ECORE_FILTER_RX_MODE_SCHED, &sc->sp_state);
12201        return;
12202    }
12203
12204    if (IS_PF(sc)) {
12205        bxe_set_storm_rx_mode(sc);
12206    }
12207}
12208
12209
12210/* update flags in shmem */
12211static void
12212bxe_update_drv_flags(struct bxe_softc *sc,
12213                     uint32_t         flags,
12214                     uint32_t         set)
12215{
12216    uint32_t drv_flags;
12217
12218    if (SHMEM2_HAS(sc, drv_flags)) {
12219        bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_DRV_FLAGS);
12220        drv_flags = SHMEM2_RD(sc, drv_flags);
12221
12222        if (set) {
12223            SET_FLAGS(drv_flags, flags);
12224        } else {
12225            RESET_FLAGS(drv_flags, flags);
12226        }
12227
12228        SHMEM2_WR(sc, drv_flags, drv_flags);
12229        BLOGD(sc, DBG_LOAD, "drv_flags 0x%08x\n", drv_flags);
12230
12231        bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_DRV_FLAGS);
12232    }
12233}
12234
12235/* periodic timer callout routine, only runs when the interface is up */
12236
12237static void
12238bxe_periodic_callout_func(void *xsc)
12239{
12240    struct bxe_softc *sc = (struct bxe_softc *)xsc;
12241    int i;
12242
12243    if (!BXE_CORE_TRYLOCK(sc)) {
12244        /* just bail and try again next time */
12245
12246        if ((sc->state == BXE_STATE_OPEN) &&
12247            (atomic_load_acq_long(&sc->periodic_flags) == PERIODIC_GO)) {
12248            /* schedule the next periodic callout */
12249            callout_reset(&sc->periodic_callout, hz,
12250                          bxe_periodic_callout_func, sc);
12251        }
12252
12253        return;
12254    }
12255
12256    if ((sc->state != BXE_STATE_OPEN) ||
12257        (atomic_load_acq_long(&sc->periodic_flags) == PERIODIC_STOP)) {
12258        BLOGW(sc, "periodic callout exit (state=0x%x)\n", sc->state);
12259        BXE_CORE_UNLOCK(sc);
12260        return;
12261        }
12262
12263
12264    /* Check for TX timeouts on any fastpath. */
12265    FOR_EACH_QUEUE(sc, i) {
12266        if (bxe_watchdog(sc, &sc->fp[i]) != 0) {
12267            /* Ruh-Roh, chip was reset! */
12268            break;
12269        }
12270    }
12271
12272    if (!CHIP_REV_IS_SLOW(sc)) {
12273        /*
12274         * This barrier is needed to ensure the ordering between the writing
12275         * to the sc->port.pmf in the bxe_nic_load() or bxe_pmf_update() and
12276         * the reading here.
12277         */
12278        mb();
12279        if (sc->port.pmf) {
12280	    bxe_acquire_phy_lock(sc);
12281            elink_period_func(&sc->link_params, &sc->link_vars);
12282	    bxe_release_phy_lock(sc);
12283        }
12284    }
12285
12286    if (IS_PF(sc) && !(sc->flags & BXE_NO_PULSE)) {
12287        int mb_idx = SC_FW_MB_IDX(sc);
12288        uint32_t drv_pulse;
12289        uint32_t mcp_pulse;
12290
12291        ++sc->fw_drv_pulse_wr_seq;
12292        sc->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
12293
12294        drv_pulse = sc->fw_drv_pulse_wr_seq;
12295        bxe_drv_pulse(sc);
12296
12297        mcp_pulse = (SHMEM_RD(sc, func_mb[mb_idx].mcp_pulse_mb) &
12298                     MCP_PULSE_SEQ_MASK);
12299
12300        /*
12301         * The delta between driver pulse and mcp response should
12302         * be 1 (before mcp response) or 0 (after mcp response).
12303         */
12304        if ((drv_pulse != mcp_pulse) &&
12305            (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
12306            /* someone lost a heartbeat... */
12307            BLOGE(sc, "drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
12308                  drv_pulse, mcp_pulse);
12309        }
12310    }
12311
12312    /* state is BXE_STATE_OPEN */
12313    bxe_stats_handle(sc, STATS_EVENT_UPDATE);
12314
12315    BXE_CORE_UNLOCK(sc);
12316
12317    if ((sc->state == BXE_STATE_OPEN) &&
12318        (atomic_load_acq_long(&sc->periodic_flags) == PERIODIC_GO)) {
12319        /* schedule the next periodic callout */
12320        callout_reset(&sc->periodic_callout, hz,
12321                      bxe_periodic_callout_func, sc);
12322    }
12323}
12324
12325static void
12326bxe_periodic_start(struct bxe_softc *sc)
12327{
12328    atomic_store_rel_long(&sc->periodic_flags, PERIODIC_GO);
12329    callout_reset(&sc->periodic_callout, hz, bxe_periodic_callout_func, sc);
12330}
12331
12332static void
12333bxe_periodic_stop(struct bxe_softc *sc)
12334{
12335    atomic_store_rel_long(&sc->periodic_flags, PERIODIC_STOP);
12336    callout_drain(&sc->periodic_callout);
12337}
12338
12339/* start the controller */
12340static __noinline int
12341bxe_nic_load(struct bxe_softc *sc,
12342             int              load_mode)
12343{
12344    uint32_t val;
12345    int load_code = 0;
12346    int i, rc = 0;
12347
12348    BXE_CORE_LOCK_ASSERT(sc);
12349
12350    BLOGD(sc, DBG_LOAD, "Starting NIC load...\n");
12351
12352    sc->state = BXE_STATE_OPENING_WAITING_LOAD;
12353
12354    if (IS_PF(sc)) {
12355        /* must be called before memory allocation and HW init */
12356        bxe_ilt_set_info(sc);
12357    }
12358
12359    sc->last_reported_link_state = LINK_STATE_UNKNOWN;
12360
12361    bxe_set_fp_rx_buf_size(sc);
12362
12363    if (bxe_alloc_fp_buffers(sc) != 0) {
12364        BLOGE(sc, "Failed to allocate fastpath memory\n");
12365        sc->state = BXE_STATE_CLOSED;
12366        rc = ENOMEM;
12367        goto bxe_nic_load_error0;
12368    }
12369
12370    if (bxe_alloc_mem(sc) != 0) {
12371        sc->state = BXE_STATE_CLOSED;
12372        rc = ENOMEM;
12373        goto bxe_nic_load_error0;
12374    }
12375
12376    if (bxe_alloc_fw_stats_mem(sc) != 0) {
12377        sc->state = BXE_STATE_CLOSED;
12378        rc = ENOMEM;
12379        goto bxe_nic_load_error0;
12380    }
12381
12382    if (IS_PF(sc)) {
12383        /* set pf load just before approaching the MCP */
12384        bxe_set_pf_load(sc);
12385
12386        /* if MCP exists send load request and analyze response */
12387        if (!BXE_NOMCP(sc)) {
12388            /* attempt to load pf */
12389            if (bxe_nic_load_request(sc, &load_code) != 0) {
12390                sc->state = BXE_STATE_CLOSED;
12391                rc = ENXIO;
12392                goto bxe_nic_load_error1;
12393            }
12394
12395            /* what did the MCP say? */
12396            if (bxe_nic_load_analyze_req(sc, load_code) != 0) {
12397                bxe_fw_command(sc, DRV_MSG_CODE_LOAD_DONE, 0);
12398                sc->state = BXE_STATE_CLOSED;
12399                rc = ENXIO;
12400                goto bxe_nic_load_error2;
12401            }
12402        } else {
12403            BLOGI(sc, "Device has no MCP!\n");
12404            load_code = bxe_nic_load_no_mcp(sc);
12405        }
12406
12407        /* mark PMF if applicable */
12408        bxe_nic_load_pmf(sc, load_code);
12409
12410        /* Init Function state controlling object */
12411        bxe_init_func_obj(sc);
12412
12413        /* Initialize HW */
12414        if (bxe_init_hw(sc, load_code) != 0) {
12415            BLOGE(sc, "HW init failed\n");
12416            bxe_fw_command(sc, DRV_MSG_CODE_LOAD_DONE, 0);
12417            sc->state = BXE_STATE_CLOSED;
12418            rc = ENXIO;
12419            goto bxe_nic_load_error2;
12420        }
12421    }
12422
12423    /* set ALWAYS_ALIVE bit in shmem */
12424    sc->fw_drv_pulse_wr_seq |= DRV_PULSE_ALWAYS_ALIVE;
12425    bxe_drv_pulse(sc);
12426    sc->flags |= BXE_NO_PULSE;
12427
12428    /* attach interrupts */
12429    if (bxe_interrupt_attach(sc) != 0) {
12430        sc->state = BXE_STATE_CLOSED;
12431        rc = ENXIO;
12432        goto bxe_nic_load_error2;
12433    }
12434
12435    bxe_nic_init(sc, load_code);
12436
12437    /* Init per-function objects */
12438    if (IS_PF(sc)) {
12439        bxe_init_objs(sc);
12440        // XXX bxe_iov_nic_init(sc);
12441
12442        /* set AFEX default VLAN tag to an invalid value */
12443        sc->devinfo.mf_info.afex_def_vlan_tag = -1;
12444        // XXX bxe_nic_load_afex_dcc(sc, load_code);
12445
12446        sc->state = BXE_STATE_OPENING_WAITING_PORT;
12447        rc = bxe_func_start(sc);
12448        if (rc) {
12449            BLOGE(sc, "Function start failed! rc = %d\n", rc);
12450            bxe_fw_command(sc, DRV_MSG_CODE_LOAD_DONE, 0);
12451            sc->state = BXE_STATE_ERROR;
12452            goto bxe_nic_load_error3;
12453        }
12454
12455        /* send LOAD_DONE command to MCP */
12456        if (!BXE_NOMCP(sc)) {
12457            load_code = bxe_fw_command(sc, DRV_MSG_CODE_LOAD_DONE, 0);
12458            if (!load_code) {
12459                BLOGE(sc, "MCP response failure, aborting\n");
12460                sc->state = BXE_STATE_ERROR;
12461                rc = ENXIO;
12462                goto bxe_nic_load_error3;
12463            }
12464        }
12465
12466        rc = bxe_setup_leading(sc);
12467        if (rc) {
12468            BLOGE(sc, "Setup leading failed! rc = %d\n", rc);
12469            sc->state = BXE_STATE_ERROR;
12470            goto bxe_nic_load_error3;
12471        }
12472
12473        FOR_EACH_NONDEFAULT_ETH_QUEUE(sc, i) {
12474            rc = bxe_setup_queue(sc, &sc->fp[i], FALSE);
12475            if (rc) {
12476                BLOGE(sc, "Queue(%d) setup failed rc = %d\n", i, rc);
12477                sc->state = BXE_STATE_ERROR;
12478                goto bxe_nic_load_error3;
12479            }
12480        }
12481
12482        rc = bxe_init_rss_pf(sc);
12483        if (rc) {
12484            BLOGE(sc, "PF RSS init failed\n");
12485            sc->state = BXE_STATE_ERROR;
12486            goto bxe_nic_load_error3;
12487        }
12488    }
12489    /* XXX VF */
12490
12491    /* now when Clients are configured we are ready to work */
12492    sc->state = BXE_STATE_OPEN;
12493
12494    /* Configure a ucast MAC */
12495    if (IS_PF(sc)) {
12496        rc = bxe_set_eth_mac(sc, TRUE);
12497    }
12498    if (rc) {
12499        BLOGE(sc, "Setting Ethernet MAC failed rc = %d\n", rc);
12500        sc->state = BXE_STATE_ERROR;
12501        goto bxe_nic_load_error3;
12502    }
12503
12504    if (sc->port.pmf) {
12505        rc = bxe_initial_phy_init(sc, /* XXX load_mode */LOAD_OPEN);
12506        if (rc) {
12507            sc->state = BXE_STATE_ERROR;
12508            goto bxe_nic_load_error3;
12509        }
12510    }
12511
12512    sc->link_params.feature_config_flags &=
12513        ~ELINK_FEATURE_CONFIG_BOOT_FROM_SAN;
12514
12515    /* start fast path */
12516
12517    /* Initialize Rx filter */
12518    bxe_set_rx_mode(sc);
12519
12520    /* start the Tx */
12521    switch (/* XXX load_mode */LOAD_OPEN) {
12522    case LOAD_NORMAL:
12523    case LOAD_OPEN:
12524        break;
12525
12526    case LOAD_DIAG:
12527    case LOAD_LOOPBACK_EXT:
12528        sc->state = BXE_STATE_DIAG;
12529        break;
12530
12531    default:
12532        break;
12533    }
12534
12535    if (sc->port.pmf) {
12536        bxe_update_drv_flags(sc, 1 << DRV_FLAGS_PORT_MASK, 0);
12537    } else {
12538        bxe_link_status_update(sc);
12539    }
12540
12541    /* start the periodic timer callout */
12542    bxe_periodic_start(sc);
12543
12544    if (IS_PF(sc) && SHMEM2_HAS(sc, drv_capabilities_flag)) {
12545        /* mark driver is loaded in shmem2 */
12546        val = SHMEM2_RD(sc, drv_capabilities_flag[SC_FW_MB_IDX(sc)]);
12547        SHMEM2_WR(sc, drv_capabilities_flag[SC_FW_MB_IDX(sc)],
12548                  (val |
12549                   DRV_FLAGS_CAPABILITIES_LOADED_SUPPORTED |
12550                   DRV_FLAGS_CAPABILITIES_LOADED_L2));
12551    }
12552
12553    /* wait for all pending SP commands to complete */
12554    if (IS_PF(sc) && !bxe_wait_sp_comp(sc, ~0x0UL)) {
12555        BLOGE(sc, "Timeout waiting for all SPs to complete!\n");
12556        bxe_periodic_stop(sc);
12557        bxe_nic_unload(sc, UNLOAD_CLOSE, FALSE);
12558        return (ENXIO);
12559    }
12560
12561    /* Tell the stack the driver is running! */
12562    if_setdrvflags(sc->ifp, IFF_DRV_RUNNING);
12563
12564    BLOGD(sc, DBG_LOAD, "NIC successfully loaded\n");
12565
12566    return (0);
12567
12568bxe_nic_load_error3:
12569
12570    if (IS_PF(sc)) {
12571        bxe_int_disable_sync(sc, 1);
12572
12573        /* clean out queued objects */
12574        bxe_squeeze_objects(sc);
12575    }
12576
12577    bxe_interrupt_detach(sc);
12578
12579bxe_nic_load_error2:
12580
12581    if (IS_PF(sc) && !BXE_NOMCP(sc)) {
12582        bxe_fw_command(sc, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0);
12583        bxe_fw_command(sc, DRV_MSG_CODE_UNLOAD_DONE, 0);
12584    }
12585
12586    sc->port.pmf = 0;
12587
12588bxe_nic_load_error1:
12589
12590    /* clear pf_load status, as it was already set */
12591    if (IS_PF(sc)) {
12592        bxe_clear_pf_load(sc);
12593    }
12594
12595bxe_nic_load_error0:
12596
12597    bxe_free_fw_stats_mem(sc);
12598    bxe_free_fp_buffers(sc);
12599    bxe_free_mem(sc);
12600
12601    return (rc);
12602}
12603
12604static int
12605bxe_init_locked(struct bxe_softc *sc)
12606{
12607    int other_engine = SC_PATH(sc) ? 0 : 1;
12608    uint8_t other_load_status, load_status;
12609    uint8_t global = FALSE;
12610    int rc;
12611
12612    BXE_CORE_LOCK_ASSERT(sc);
12613
12614    /* check if the driver is already running */
12615    if (if_getdrvflags(sc->ifp) & IFF_DRV_RUNNING) {
12616        BLOGD(sc, DBG_LOAD, "Init called while driver is running!\n");
12617        return (0);
12618    }
12619
12620    bxe_set_power_state(sc, PCI_PM_D0);
12621
12622    /*
12623     * If parity occurred during the unload, then attentions and/or
12624     * RECOVERY_IN_PROGRES may still be set. If so we want the first function
12625     * loaded on the current engine to complete the recovery. Parity recovery
12626     * is only relevant for PF driver.
12627     */
12628    if (IS_PF(sc)) {
12629        other_load_status = bxe_get_load_status(sc, other_engine);
12630        load_status = bxe_get_load_status(sc, SC_PATH(sc));
12631
12632        if (!bxe_reset_is_done(sc, SC_PATH(sc)) ||
12633            bxe_chk_parity_attn(sc, &global, TRUE)) {
12634            do {
12635                /*
12636                 * If there are attentions and they are in global blocks, set
12637                 * the GLOBAL_RESET bit regardless whether it will be this
12638                 * function that will complete the recovery or not.
12639                 */
12640                if (global) {
12641                    bxe_set_reset_global(sc);
12642                }
12643
12644                /*
12645                 * Only the first function on the current engine should try
12646                 * to recover in open. In case of attentions in global blocks
12647                 * only the first in the chip should try to recover.
12648                 */
12649                if ((!load_status && (!global || !other_load_status)) &&
12650                    bxe_trylock_leader_lock(sc) && !bxe_leader_reset(sc)) {
12651                    BLOGI(sc, "Recovered during init\n");
12652                    break;
12653                }
12654
12655                /* recovery has failed... */
12656                bxe_set_power_state(sc, PCI_PM_D3hot);
12657                sc->recovery_state = BXE_RECOVERY_FAILED;
12658
12659                BLOGE(sc, "Recovery flow hasn't properly "
12660                          "completed yet, try again later. "
12661                          "If you still see this message after a "
12662                          "few retries then power cycle is required.\n");
12663
12664                rc = ENXIO;
12665                goto bxe_init_locked_done;
12666            } while (0);
12667        }
12668    }
12669
12670    sc->recovery_state = BXE_RECOVERY_DONE;
12671
12672    rc = bxe_nic_load(sc, LOAD_OPEN);
12673
12674bxe_init_locked_done:
12675
12676    if (rc) {
12677        /* Tell the stack the driver is NOT running! */
12678        BLOGE(sc, "Initialization failed, "
12679                  "stack notified driver is NOT running!\n");
12680	if_setdrvflagbits(sc->ifp, 0, IFF_DRV_RUNNING);
12681    }
12682
12683    return (rc);
12684}
12685
12686static int
12687bxe_stop_locked(struct bxe_softc *sc)
12688{
12689    BXE_CORE_LOCK_ASSERT(sc);
12690    return (bxe_nic_unload(sc, UNLOAD_NORMAL, TRUE));
12691}
12692
12693/*
12694 * Handles controller initialization when called from an unlocked routine.
12695 * ifconfig calls this function.
12696 *
12697 * Returns:
12698 *   void
12699 */
12700static void
12701bxe_init(void *xsc)
12702{
12703    struct bxe_softc *sc = (struct bxe_softc *)xsc;
12704
12705    BXE_CORE_LOCK(sc);
12706    bxe_init_locked(sc);
12707    BXE_CORE_UNLOCK(sc);
12708}
12709
12710static int
12711bxe_init_ifnet(struct bxe_softc *sc)
12712{
12713    if_t ifp;
12714    int capabilities;
12715
12716    /* ifconfig entrypoint for media type/status reporting */
12717    ifmedia_init(&sc->ifmedia, IFM_IMASK,
12718                 bxe_ifmedia_update,
12719                 bxe_ifmedia_status);
12720
12721    /* set the default interface values */
12722    ifmedia_add(&sc->ifmedia, (IFM_ETHER | IFM_FDX | sc->media), 0, NULL);
12723    ifmedia_add(&sc->ifmedia, (IFM_ETHER | IFM_AUTO), 0, NULL);
12724    ifmedia_set(&sc->ifmedia, (IFM_ETHER | IFM_AUTO));
12725
12726    sc->ifmedia.ifm_media = sc->ifmedia.ifm_cur->ifm_media; /* XXX ? */
12727	BLOGI(sc, "IFMEDIA flags : %x\n", sc->ifmedia.ifm_media);
12728
12729    /* allocate the ifnet structure */
12730    if ((ifp = if_gethandle(IFT_ETHER)) == NULL) {
12731        BLOGE(sc, "Interface allocation failed!\n");
12732        return (ENXIO);
12733    }
12734
12735    if_setsoftc(ifp, sc);
12736    if_initname(ifp, device_get_name(sc->dev), device_get_unit(sc->dev));
12737    if_setflags(ifp, (IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST));
12738    if_setioctlfn(ifp, bxe_ioctl);
12739    if_setstartfn(ifp, bxe_tx_start);
12740    if_setgetcounterfn(ifp, bxe_get_counter);
12741#if __FreeBSD_version >= 901504
12742    if_settransmitfn(ifp, bxe_tx_mq_start);
12743    if_setqflushfn(ifp, bxe_mq_flush);
12744#endif
12745#ifdef FreeBSD8_0
12746    if_settimer(ifp, 0);
12747#endif
12748    if_setinitfn(ifp, bxe_init);
12749    if_setmtu(ifp, sc->mtu);
12750    if_sethwassist(ifp, (CSUM_IP      |
12751                        CSUM_TCP      |
12752                        CSUM_UDP      |
12753                        CSUM_TSO      |
12754                        CSUM_TCP_IPV6 |
12755                        CSUM_UDP_IPV6));
12756
12757    capabilities =
12758#if __FreeBSD_version < 700000
12759        (IFCAP_VLAN_MTU       |
12760         IFCAP_VLAN_HWTAGGING |
12761         IFCAP_HWCSUM         |
12762         IFCAP_JUMBO_MTU      |
12763         IFCAP_LRO);
12764#else
12765        (IFCAP_VLAN_MTU       |
12766         IFCAP_VLAN_HWTAGGING |
12767         IFCAP_VLAN_HWTSO     |
12768         IFCAP_VLAN_HWFILTER  |
12769         IFCAP_VLAN_HWCSUM    |
12770         IFCAP_HWCSUM         |
12771         IFCAP_JUMBO_MTU      |
12772         IFCAP_LRO            |
12773         IFCAP_TSO4           |
12774         IFCAP_TSO6           |
12775         IFCAP_WOL_MAGIC);
12776#endif
12777    if_setcapabilitiesbit(ifp, capabilities, 0); /* XXX */
12778    if_setcapenable(ifp, if_getcapabilities(ifp));
12779    if_setbaudrate(ifp, IF_Gbps(10));
12780/* XXX */
12781    if_setsendqlen(ifp, sc->tx_ring_size);
12782    if_setsendqready(ifp);
12783/* XXX */
12784
12785    sc->ifp = ifp;
12786
12787    /* attach to the Ethernet interface list */
12788    ether_ifattach(ifp, sc->link_params.mac_addr);
12789
12790    return (0);
12791}
12792
12793static void
12794bxe_deallocate_bars(struct bxe_softc *sc)
12795{
12796    int i;
12797
12798    for (i = 0; i < MAX_BARS; i++) {
12799        if (sc->bar[i].resource != NULL) {
12800            bus_release_resource(sc->dev,
12801                                 SYS_RES_MEMORY,
12802                                 sc->bar[i].rid,
12803                                 sc->bar[i].resource);
12804            BLOGD(sc, DBG_LOAD, "Released PCI BAR%d [%02x] memory\n",
12805                  i, PCIR_BAR(i));
12806        }
12807    }
12808}
12809
12810static int
12811bxe_allocate_bars(struct bxe_softc *sc)
12812{
12813    u_int flags;
12814    int i;
12815
12816    memset(sc->bar, 0, sizeof(sc->bar));
12817
12818    for (i = 0; i < MAX_BARS; i++) {
12819
12820        /* memory resources reside at BARs 0, 2, 4 */
12821        /* Run `pciconf -lb` to see mappings */
12822        if ((i != 0) && (i != 2) && (i != 4)) {
12823            continue;
12824        }
12825
12826        sc->bar[i].rid = PCIR_BAR(i);
12827
12828        flags = RF_ACTIVE;
12829        if (i == 0) {
12830            flags |= RF_SHAREABLE;
12831        }
12832
12833        if ((sc->bar[i].resource =
12834             bus_alloc_resource_any(sc->dev,
12835                                    SYS_RES_MEMORY,
12836                                    &sc->bar[i].rid,
12837                                    flags)) == NULL) {
12838            return (0);
12839        }
12840
12841        sc->bar[i].tag    = rman_get_bustag(sc->bar[i].resource);
12842        sc->bar[i].handle = rman_get_bushandle(sc->bar[i].resource);
12843        sc->bar[i].kva    = (vm_offset_t)rman_get_virtual(sc->bar[i].resource);
12844
12845        BLOGI(sc, "PCI BAR%d [%02x] memory allocated: %#jx-%#jx (%jd) -> %#jx\n",
12846              i, PCIR_BAR(i),
12847              rman_get_start(sc->bar[i].resource),
12848              rman_get_end(sc->bar[i].resource),
12849              rman_get_size(sc->bar[i].resource),
12850              (uintmax_t)sc->bar[i].kva);
12851    }
12852
12853    return (0);
12854}
12855
12856static void
12857bxe_get_function_num(struct bxe_softc *sc)
12858{
12859    uint32_t val = 0;
12860
12861    /*
12862     * Read the ME register to get the function number. The ME register
12863     * holds the relative-function number and absolute-function number. The
12864     * absolute-function number appears only in E2 and above. Before that
12865     * these bits always contained zero, therefore we cannot blindly use them.
12866     */
12867
12868    val = REG_RD(sc, BAR_ME_REGISTER);
12869
12870    sc->pfunc_rel =
12871        (uint8_t)((val & ME_REG_PF_NUM) >> ME_REG_PF_NUM_SHIFT);
12872    sc->path_id =
12873        (uint8_t)((val & ME_REG_ABS_PF_NUM) >> ME_REG_ABS_PF_NUM_SHIFT) & 1;
12874
12875    if (CHIP_PORT_MODE(sc) == CHIP_4_PORT_MODE) {
12876        sc->pfunc_abs = ((sc->pfunc_rel << 1) | sc->path_id);
12877    } else {
12878        sc->pfunc_abs = (sc->pfunc_rel | sc->path_id);
12879    }
12880
12881    BLOGD(sc, DBG_LOAD,
12882          "Relative function %d, Absolute function %d, Path %d\n",
12883          sc->pfunc_rel, sc->pfunc_abs, sc->path_id);
12884}
12885
12886static uint32_t
12887bxe_get_shmem_mf_cfg_base(struct bxe_softc *sc)
12888{
12889    uint32_t shmem2_size;
12890    uint32_t offset;
12891    uint32_t mf_cfg_offset_value;
12892
12893    /* Non 57712 */
12894    offset = (SHMEM_RD(sc, func_mb) +
12895              (MAX_FUNC_NUM * sizeof(struct drv_func_mb)));
12896
12897    /* 57712 plus */
12898    if (sc->devinfo.shmem2_base != 0) {
12899        shmem2_size = SHMEM2_RD(sc, size);
12900        if (shmem2_size > offsetof(struct shmem2_region, mf_cfg_addr)) {
12901            mf_cfg_offset_value = SHMEM2_RD(sc, mf_cfg_addr);
12902            if (SHMEM_MF_CFG_ADDR_NONE != mf_cfg_offset_value) {
12903                offset = mf_cfg_offset_value;
12904            }
12905        }
12906    }
12907
12908    return (offset);
12909}
12910
12911static uint32_t
12912bxe_pcie_capability_read(struct bxe_softc *sc,
12913                         int    reg,
12914                         int    width)
12915{
12916    int pcie_reg;
12917
12918    /* ensure PCIe capability is enabled */
12919    if (pci_find_cap(sc->dev, PCIY_EXPRESS, &pcie_reg) == 0) {
12920        if (pcie_reg != 0) {
12921            BLOGD(sc, DBG_LOAD, "PCIe capability at 0x%04x\n", pcie_reg);
12922            return (pci_read_config(sc->dev, (pcie_reg + reg), width));
12923        }
12924    }
12925
12926    BLOGE(sc, "PCIe capability NOT FOUND!!!\n");
12927
12928    return (0);
12929}
12930
12931static uint8_t
12932bxe_is_pcie_pending(struct bxe_softc *sc)
12933{
12934    return (bxe_pcie_capability_read(sc, PCIR_EXPRESS_DEVICE_STA, 2) &
12935            PCIM_EXP_STA_TRANSACTION_PND);
12936}
12937
12938/*
12939 * Walk the PCI capabiites list for the device to find what features are
12940 * supported. These capabilites may be enabled/disabled by firmware so it's
12941 * best to walk the list rather than make assumptions.
12942 */
12943static void
12944bxe_probe_pci_caps(struct bxe_softc *sc)
12945{
12946    uint16_t link_status;
12947    int reg;
12948
12949    /* check if PCI Power Management is enabled */
12950    if (pci_find_cap(sc->dev, PCIY_PMG, &reg) == 0) {
12951        if (reg != 0) {
12952            BLOGD(sc, DBG_LOAD, "Found PM capability at 0x%04x\n", reg);
12953
12954            sc->devinfo.pcie_cap_flags |= BXE_PM_CAPABLE_FLAG;
12955            sc->devinfo.pcie_pm_cap_reg = (uint16_t)reg;
12956        }
12957    }
12958
12959    link_status = bxe_pcie_capability_read(sc, PCIR_EXPRESS_LINK_STA, 2);
12960
12961    /* handle PCIe 2.0 workarounds for 57710 */
12962    if (CHIP_IS_E1(sc)) {
12963        /* workaround for 57710 errata E4_57710_27462 */
12964        sc->devinfo.pcie_link_speed =
12965            (REG_RD(sc, 0x3d04) & (1 << 24)) ? 2 : 1;
12966
12967        /* workaround for 57710 errata E4_57710_27488 */
12968        sc->devinfo.pcie_link_width =
12969            ((link_status & PCIM_LINK_STA_WIDTH) >> 4);
12970        if (sc->devinfo.pcie_link_speed > 1) {
12971            sc->devinfo.pcie_link_width =
12972                ((link_status & PCIM_LINK_STA_WIDTH) >> 4) >> 1;
12973        }
12974    } else {
12975        sc->devinfo.pcie_link_speed =
12976            (link_status & PCIM_LINK_STA_SPEED);
12977        sc->devinfo.pcie_link_width =
12978            ((link_status & PCIM_LINK_STA_WIDTH) >> 4);
12979    }
12980
12981    BLOGD(sc, DBG_LOAD, "PCIe link speed=%d width=%d\n",
12982          sc->devinfo.pcie_link_speed, sc->devinfo.pcie_link_width);
12983
12984    sc->devinfo.pcie_cap_flags |= BXE_PCIE_CAPABLE_FLAG;
12985    sc->devinfo.pcie_pcie_cap_reg = (uint16_t)reg;
12986
12987    /* check if MSI capability is enabled */
12988    if (pci_find_cap(sc->dev, PCIY_MSI, &reg) == 0) {
12989        if (reg != 0) {
12990            BLOGD(sc, DBG_LOAD, "Found MSI capability at 0x%04x\n", reg);
12991
12992            sc->devinfo.pcie_cap_flags |= BXE_MSI_CAPABLE_FLAG;
12993            sc->devinfo.pcie_msi_cap_reg = (uint16_t)reg;
12994        }
12995    }
12996
12997    /* check if MSI-X capability is enabled */
12998    if (pci_find_cap(sc->dev, PCIY_MSIX, &reg) == 0) {
12999        if (reg != 0) {
13000            BLOGD(sc, DBG_LOAD, "Found MSI-X capability at 0x%04x\n", reg);
13001
13002            sc->devinfo.pcie_cap_flags |= BXE_MSIX_CAPABLE_FLAG;
13003            sc->devinfo.pcie_msix_cap_reg = (uint16_t)reg;
13004        }
13005    }
13006}
13007
13008static int
13009bxe_get_shmem_mf_cfg_info_sd(struct bxe_softc *sc)
13010{
13011    struct bxe_mf_info *mf_info = &sc->devinfo.mf_info;
13012    uint32_t val;
13013
13014    /* get the outer vlan if we're in switch-dependent mode */
13015
13016    val = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].e1hov_tag);
13017    mf_info->ext_id = (uint16_t)val;
13018
13019    mf_info->multi_vnics_mode = 1;
13020
13021    if (!VALID_OVLAN(mf_info->ext_id)) {
13022        BLOGE(sc, "Invalid VLAN (%d)\n", mf_info->ext_id);
13023        return (1);
13024    }
13025
13026    /* get the capabilities */
13027    if ((mf_info->mf_config[SC_VN(sc)] & FUNC_MF_CFG_PROTOCOL_MASK) ==
13028        FUNC_MF_CFG_PROTOCOL_ISCSI) {
13029        mf_info->mf_protos_supported |= MF_PROTO_SUPPORT_ISCSI;
13030    } else if ((mf_info->mf_config[SC_VN(sc)] & FUNC_MF_CFG_PROTOCOL_MASK) ==
13031               FUNC_MF_CFG_PROTOCOL_FCOE) {
13032        mf_info->mf_protos_supported |= MF_PROTO_SUPPORT_FCOE;
13033    } else {
13034        mf_info->mf_protos_supported |= MF_PROTO_SUPPORT_ETHERNET;
13035    }
13036
13037    mf_info->vnics_per_port =
13038        (CHIP_PORT_MODE(sc) == CHIP_4_PORT_MODE) ? 2 : 4;
13039
13040    return (0);
13041}
13042
13043static uint32_t
13044bxe_get_shmem_ext_proto_support_flags(struct bxe_softc *sc)
13045{
13046    uint32_t retval = 0;
13047    uint32_t val;
13048
13049    val = MFCFG_RD(sc, func_ext_config[SC_ABS_FUNC(sc)].func_cfg);
13050
13051    if (val & MACP_FUNC_CFG_FLAGS_ENABLED) {
13052        if (val & MACP_FUNC_CFG_FLAGS_ETHERNET) {
13053            retval |= MF_PROTO_SUPPORT_ETHERNET;
13054        }
13055        if (val & MACP_FUNC_CFG_FLAGS_ISCSI_OFFLOAD) {
13056            retval |= MF_PROTO_SUPPORT_ISCSI;
13057        }
13058        if (val & MACP_FUNC_CFG_FLAGS_FCOE_OFFLOAD) {
13059            retval |= MF_PROTO_SUPPORT_FCOE;
13060        }
13061    }
13062
13063    return (retval);
13064}
13065
13066static int
13067bxe_get_shmem_mf_cfg_info_si(struct bxe_softc *sc)
13068{
13069    struct bxe_mf_info *mf_info = &sc->devinfo.mf_info;
13070    uint32_t val;
13071
13072    /*
13073     * There is no outer vlan if we're in switch-independent mode.
13074     * If the mac is valid then assume multi-function.
13075     */
13076
13077    val = MFCFG_RD(sc, func_ext_config[SC_ABS_FUNC(sc)].func_cfg);
13078
13079    mf_info->multi_vnics_mode = ((val & MACP_FUNC_CFG_FLAGS_MASK) != 0);
13080
13081    mf_info->mf_protos_supported = bxe_get_shmem_ext_proto_support_flags(sc);
13082
13083    mf_info->vnics_per_port =
13084        (CHIP_PORT_MODE(sc) == CHIP_4_PORT_MODE) ? 2 : 4;
13085
13086    return (0);
13087}
13088
13089static int
13090bxe_get_shmem_mf_cfg_info_niv(struct bxe_softc *sc)
13091{
13092    struct bxe_mf_info *mf_info = &sc->devinfo.mf_info;
13093    uint32_t e1hov_tag;
13094    uint32_t func_config;
13095    uint32_t niv_config;
13096
13097    mf_info->multi_vnics_mode = 1;
13098
13099    e1hov_tag   = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].e1hov_tag);
13100    func_config = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].config);
13101    niv_config  = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].afex_config);
13102
13103    mf_info->ext_id =
13104        (uint16_t)((e1hov_tag & FUNC_MF_CFG_E1HOV_TAG_MASK) >>
13105                   FUNC_MF_CFG_E1HOV_TAG_SHIFT);
13106
13107    mf_info->default_vlan =
13108        (uint16_t)((e1hov_tag & FUNC_MF_CFG_AFEX_VLAN_MASK) >>
13109                   FUNC_MF_CFG_AFEX_VLAN_SHIFT);
13110
13111    mf_info->niv_allowed_priorities =
13112        (uint8_t)((niv_config & FUNC_MF_CFG_AFEX_COS_FILTER_MASK) >>
13113                  FUNC_MF_CFG_AFEX_COS_FILTER_SHIFT);
13114
13115    mf_info->niv_default_cos =
13116        (uint8_t)((func_config & FUNC_MF_CFG_TRANSMIT_PRIORITY_MASK) >>
13117                  FUNC_MF_CFG_TRANSMIT_PRIORITY_SHIFT);
13118
13119    mf_info->afex_vlan_mode =
13120        ((niv_config & FUNC_MF_CFG_AFEX_VLAN_MODE_MASK) >>
13121         FUNC_MF_CFG_AFEX_VLAN_MODE_SHIFT);
13122
13123    mf_info->niv_mba_enabled =
13124        ((niv_config & FUNC_MF_CFG_AFEX_MBA_ENABLED_MASK) >>
13125         FUNC_MF_CFG_AFEX_MBA_ENABLED_SHIFT);
13126
13127    mf_info->mf_protos_supported = bxe_get_shmem_ext_proto_support_flags(sc);
13128
13129    mf_info->vnics_per_port =
13130        (CHIP_PORT_MODE(sc) == CHIP_4_PORT_MODE) ? 2 : 4;
13131
13132    return (0);
13133}
13134
13135static int
13136bxe_check_valid_mf_cfg(struct bxe_softc *sc)
13137{
13138    struct bxe_mf_info *mf_info = &sc->devinfo.mf_info;
13139    uint32_t mf_cfg1;
13140    uint32_t mf_cfg2;
13141    uint32_t ovlan1;
13142    uint32_t ovlan2;
13143    uint8_t i, j;
13144
13145    BLOGD(sc, DBG_LOAD, "MF config parameters for function %d\n",
13146          SC_PORT(sc));
13147    BLOGD(sc, DBG_LOAD, "\tmf_config=0x%x\n",
13148          mf_info->mf_config[SC_VN(sc)]);
13149    BLOGD(sc, DBG_LOAD, "\tmulti_vnics_mode=%d\n",
13150          mf_info->multi_vnics_mode);
13151    BLOGD(sc, DBG_LOAD, "\tvnics_per_port=%d\n",
13152          mf_info->vnics_per_port);
13153    BLOGD(sc, DBG_LOAD, "\tovlan/vifid=%d\n",
13154          mf_info->ext_id);
13155    BLOGD(sc, DBG_LOAD, "\tmin_bw=%d/%d/%d/%d\n",
13156          mf_info->min_bw[0], mf_info->min_bw[1],
13157          mf_info->min_bw[2], mf_info->min_bw[3]);
13158    BLOGD(sc, DBG_LOAD, "\tmax_bw=%d/%d/%d/%d\n",
13159          mf_info->max_bw[0], mf_info->max_bw[1],
13160          mf_info->max_bw[2], mf_info->max_bw[3]);
13161    BLOGD(sc, DBG_LOAD, "\tmac_addr: %s\n",
13162          sc->mac_addr_str);
13163
13164    /* various MF mode sanity checks... */
13165
13166    if (mf_info->mf_config[SC_VN(sc)] & FUNC_MF_CFG_FUNC_HIDE) {
13167        BLOGE(sc, "Enumerated function %d is marked as hidden\n",
13168              SC_PORT(sc));
13169        return (1);
13170    }
13171
13172    if ((mf_info->vnics_per_port > 1) && !mf_info->multi_vnics_mode) {
13173        BLOGE(sc, "vnics_per_port=%d multi_vnics_mode=%d\n",
13174              mf_info->vnics_per_port, mf_info->multi_vnics_mode);
13175        return (1);
13176    }
13177
13178    if (mf_info->mf_mode == MULTI_FUNCTION_SD) {
13179        /* vnic id > 0 must have valid ovlan in switch-dependent mode */
13180        if ((SC_VN(sc) > 0) && !VALID_OVLAN(OVLAN(sc))) {
13181            BLOGE(sc, "mf_mode=SD vnic_id=%d ovlan=%d\n",
13182                  SC_VN(sc), OVLAN(sc));
13183            return (1);
13184        }
13185
13186        if (!VALID_OVLAN(OVLAN(sc)) && mf_info->multi_vnics_mode) {
13187            BLOGE(sc, "mf_mode=SD multi_vnics_mode=%d ovlan=%d\n",
13188                  mf_info->multi_vnics_mode, OVLAN(sc));
13189            return (1);
13190        }
13191
13192        /*
13193         * Verify all functions are either MF or SF mode. If MF, make sure
13194         * sure that all non-hidden functions have a valid ovlan. If SF,
13195         * make sure that all non-hidden functions have an invalid ovlan.
13196         */
13197        FOREACH_ABS_FUNC_IN_PORT(sc, i) {
13198            mf_cfg1 = MFCFG_RD(sc, func_mf_config[i].config);
13199            ovlan1  = MFCFG_RD(sc, func_mf_config[i].e1hov_tag);
13200            if (!(mf_cfg1 & FUNC_MF_CFG_FUNC_HIDE) &&
13201                (((mf_info->multi_vnics_mode) && !VALID_OVLAN(ovlan1)) ||
13202                 ((!mf_info->multi_vnics_mode) && VALID_OVLAN(ovlan1)))) {
13203                BLOGE(sc, "mf_mode=SD function %d MF config "
13204                          "mismatch, multi_vnics_mode=%d ovlan=%d\n",
13205                      i, mf_info->multi_vnics_mode, ovlan1);
13206                return (1);
13207            }
13208        }
13209
13210        /* Verify all funcs on the same port each have a different ovlan. */
13211        FOREACH_ABS_FUNC_IN_PORT(sc, i) {
13212            mf_cfg1 = MFCFG_RD(sc, func_mf_config[i].config);
13213            ovlan1  = MFCFG_RD(sc, func_mf_config[i].e1hov_tag);
13214            /* iterate from the next function on the port to the max func */
13215            for (j = i + 2; j < MAX_FUNC_NUM; j += 2) {
13216                mf_cfg2 = MFCFG_RD(sc, func_mf_config[j].config);
13217                ovlan2  = MFCFG_RD(sc, func_mf_config[j].e1hov_tag);
13218                if (!(mf_cfg1 & FUNC_MF_CFG_FUNC_HIDE) &&
13219                    VALID_OVLAN(ovlan1) &&
13220                    !(mf_cfg2 & FUNC_MF_CFG_FUNC_HIDE) &&
13221                    VALID_OVLAN(ovlan2) &&
13222                    (ovlan1 == ovlan2)) {
13223                    BLOGE(sc, "mf_mode=SD functions %d and %d "
13224                              "have the same ovlan (%d)\n",
13225                          i, j, ovlan1);
13226                    return (1);
13227                }
13228            }
13229        }
13230    } /* MULTI_FUNCTION_SD */
13231
13232    return (0);
13233}
13234
13235static int
13236bxe_get_mf_cfg_info(struct bxe_softc *sc)
13237{
13238    struct bxe_mf_info *mf_info = &sc->devinfo.mf_info;
13239    uint32_t val, mac_upper;
13240    uint8_t i, vnic;
13241
13242    /* initialize mf_info defaults */
13243    mf_info->vnics_per_port   = 1;
13244    mf_info->multi_vnics_mode = FALSE;
13245    mf_info->path_has_ovlan   = FALSE;
13246    mf_info->mf_mode          = SINGLE_FUNCTION;
13247
13248    if (!CHIP_IS_MF_CAP(sc)) {
13249        return (0);
13250    }
13251
13252    if (sc->devinfo.mf_cfg_base == SHMEM_MF_CFG_ADDR_NONE) {
13253        BLOGE(sc, "Invalid mf_cfg_base!\n");
13254        return (1);
13255    }
13256
13257    /* get the MF mode (switch dependent / independent / single-function) */
13258
13259    val = SHMEM_RD(sc, dev_info.shared_feature_config.config);
13260
13261    switch (val & SHARED_FEAT_CFG_FORCE_SF_MODE_MASK)
13262    {
13263    case SHARED_FEAT_CFG_FORCE_SF_MODE_SWITCH_INDEPT:
13264
13265        mac_upper = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].mac_upper);
13266
13267        /* check for legal upper mac bytes */
13268        if (mac_upper != FUNC_MF_CFG_UPPERMAC_DEFAULT) {
13269            mf_info->mf_mode = MULTI_FUNCTION_SI;
13270        } else {
13271            BLOGE(sc, "Invalid config for Switch Independent mode\n");
13272        }
13273
13274        break;
13275
13276    case SHARED_FEAT_CFG_FORCE_SF_MODE_MF_ALLOWED:
13277    case SHARED_FEAT_CFG_FORCE_SF_MODE_SPIO4:
13278
13279        /* get outer vlan configuration */
13280        val = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].e1hov_tag);
13281
13282        if ((val & FUNC_MF_CFG_E1HOV_TAG_MASK) !=
13283            FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
13284            mf_info->mf_mode = MULTI_FUNCTION_SD;
13285        } else {
13286            BLOGE(sc, "Invalid config for Switch Dependent mode\n");
13287        }
13288
13289        break;
13290
13291    case SHARED_FEAT_CFG_FORCE_SF_MODE_FORCED_SF:
13292
13293        /* not in MF mode, vnics_per_port=1 and multi_vnics_mode=FALSE */
13294        return (0);
13295
13296    case SHARED_FEAT_CFG_FORCE_SF_MODE_AFEX_MODE:
13297
13298        /*
13299         * Mark MF mode as NIV if MCP version includes NPAR-SD support
13300         * and the MAC address is valid.
13301         */
13302        mac_upper = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].mac_upper);
13303
13304        if ((SHMEM2_HAS(sc, afex_driver_support)) &&
13305            (mac_upper != FUNC_MF_CFG_UPPERMAC_DEFAULT)) {
13306            mf_info->mf_mode = MULTI_FUNCTION_AFEX;
13307        } else {
13308            BLOGE(sc, "Invalid config for AFEX mode\n");
13309        }
13310
13311        break;
13312
13313    default:
13314
13315        BLOGE(sc, "Unknown MF mode (0x%08x)\n",
13316              (val & SHARED_FEAT_CFG_FORCE_SF_MODE_MASK));
13317
13318        return (1);
13319    }
13320
13321    /* set path mf_mode (which could be different than function mf_mode) */
13322    if (mf_info->mf_mode == MULTI_FUNCTION_SD) {
13323        mf_info->path_has_ovlan = TRUE;
13324    } else if (mf_info->mf_mode == SINGLE_FUNCTION) {
13325        /*
13326         * Decide on path multi vnics mode. If we're not in MF mode and in
13327         * 4-port mode, this is good enough to check vnic-0 of the other port
13328         * on the same path
13329         */
13330        if (CHIP_PORT_MODE(sc) == CHIP_4_PORT_MODE) {
13331            uint8_t other_port = !(PORT_ID(sc) & 1);
13332            uint8_t abs_func_other_port = (SC_PATH(sc) + (2 * other_port));
13333
13334            val = MFCFG_RD(sc, func_mf_config[abs_func_other_port].e1hov_tag);
13335
13336            mf_info->path_has_ovlan = VALID_OVLAN((uint16_t)val) ? 1 : 0;
13337        }
13338    }
13339
13340    if (mf_info->mf_mode == SINGLE_FUNCTION) {
13341        /* invalid MF config */
13342        if (SC_VN(sc) >= 1) {
13343            BLOGE(sc, "VNIC ID >= 1 in SF mode\n");
13344            return (1);
13345        }
13346
13347        return (0);
13348    }
13349
13350    /* get the MF configuration */
13351    mf_info->mf_config[SC_VN(sc)] =
13352        MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].config);
13353
13354    switch(mf_info->mf_mode)
13355    {
13356    case MULTI_FUNCTION_SD:
13357
13358        bxe_get_shmem_mf_cfg_info_sd(sc);
13359        break;
13360
13361    case MULTI_FUNCTION_SI:
13362
13363        bxe_get_shmem_mf_cfg_info_si(sc);
13364        break;
13365
13366    case MULTI_FUNCTION_AFEX:
13367
13368        bxe_get_shmem_mf_cfg_info_niv(sc);
13369        break;
13370
13371    default:
13372
13373        BLOGE(sc, "Get MF config failed (mf_mode=0x%08x)\n",
13374              mf_info->mf_mode);
13375        return (1);
13376    }
13377
13378    /* get the congestion management parameters */
13379
13380    vnic = 0;
13381    FOREACH_ABS_FUNC_IN_PORT(sc, i) {
13382        /* get min/max bw */
13383        val = MFCFG_RD(sc, func_mf_config[i].config);
13384        mf_info->min_bw[vnic] =
13385            ((val & FUNC_MF_CFG_MIN_BW_MASK) >> FUNC_MF_CFG_MIN_BW_SHIFT);
13386        mf_info->max_bw[vnic] =
13387            ((val & FUNC_MF_CFG_MAX_BW_MASK) >> FUNC_MF_CFG_MAX_BW_SHIFT);
13388        vnic++;
13389    }
13390
13391    return (bxe_check_valid_mf_cfg(sc));
13392}
13393
13394static int
13395bxe_get_shmem_info(struct bxe_softc *sc)
13396{
13397    int port;
13398    uint32_t mac_hi, mac_lo, val;
13399
13400    port = SC_PORT(sc);
13401    mac_hi = mac_lo = 0;
13402
13403    sc->link_params.sc   = sc;
13404    sc->link_params.port = port;
13405
13406    /* get the hardware config info */
13407    sc->devinfo.hw_config =
13408        SHMEM_RD(sc, dev_info.shared_hw_config.config);
13409    sc->devinfo.hw_config2 =
13410        SHMEM_RD(sc, dev_info.shared_hw_config.config2);
13411
13412    sc->link_params.hw_led_mode =
13413        ((sc->devinfo.hw_config & SHARED_HW_CFG_LED_MODE_MASK) >>
13414         SHARED_HW_CFG_LED_MODE_SHIFT);
13415
13416    /* get the port feature config */
13417    sc->port.config =
13418        SHMEM_RD(sc, dev_info.port_feature_config[port].config);
13419
13420    /* get the link params */
13421    sc->link_params.speed_cap_mask[0] =
13422        SHMEM_RD(sc, dev_info.port_hw_config[port].speed_capability_mask);
13423    sc->link_params.speed_cap_mask[1] =
13424        SHMEM_RD(sc, dev_info.port_hw_config[port].speed_capability_mask2);
13425
13426    /* get the lane config */
13427    sc->link_params.lane_config =
13428        SHMEM_RD(sc, dev_info.port_hw_config[port].lane_config);
13429
13430    /* get the link config */
13431    val = SHMEM_RD(sc, dev_info.port_feature_config[port].link_config);
13432    sc->port.link_config[ELINK_INT_PHY] = val;
13433    sc->link_params.switch_cfg = (val & PORT_FEATURE_CONNECTED_SWITCH_MASK);
13434    sc->port.link_config[ELINK_EXT_PHY1] =
13435        SHMEM_RD(sc, dev_info.port_feature_config[port].link_config2);
13436
13437    /* get the override preemphasis flag and enable it or turn it off */
13438    val = SHMEM_RD(sc, dev_info.shared_feature_config.config);
13439    if (val & SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED) {
13440        sc->link_params.feature_config_flags |=
13441            ELINK_FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
13442    } else {
13443        sc->link_params.feature_config_flags &=
13444            ~ELINK_FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
13445    }
13446
13447    /* get the initial value of the link params */
13448    sc->link_params.multi_phy_config =
13449        SHMEM_RD(sc, dev_info.port_hw_config[port].multi_phy_config);
13450
13451    /* get external phy info */
13452    sc->port.ext_phy_config =
13453        SHMEM_RD(sc, dev_info.port_hw_config[port].external_phy_config);
13454
13455    /* get the multifunction configuration */
13456    bxe_get_mf_cfg_info(sc);
13457
13458    /* get the mac address */
13459    if (IS_MF(sc)) {
13460        mac_hi = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].mac_upper);
13461        mac_lo = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].mac_lower);
13462    } else {
13463        mac_hi = SHMEM_RD(sc, dev_info.port_hw_config[port].mac_upper);
13464        mac_lo = SHMEM_RD(sc, dev_info.port_hw_config[port].mac_lower);
13465    }
13466
13467    if ((mac_lo == 0) && (mac_hi == 0)) {
13468        *sc->mac_addr_str = 0;
13469        BLOGE(sc, "No Ethernet address programmed!\n");
13470    } else {
13471        sc->link_params.mac_addr[0] = (uint8_t)(mac_hi >> 8);
13472        sc->link_params.mac_addr[1] = (uint8_t)(mac_hi);
13473        sc->link_params.mac_addr[2] = (uint8_t)(mac_lo >> 24);
13474        sc->link_params.mac_addr[3] = (uint8_t)(mac_lo >> 16);
13475        sc->link_params.mac_addr[4] = (uint8_t)(mac_lo >> 8);
13476        sc->link_params.mac_addr[5] = (uint8_t)(mac_lo);
13477        snprintf(sc->mac_addr_str, sizeof(sc->mac_addr_str),
13478                 "%02x:%02x:%02x:%02x:%02x:%02x",
13479                 sc->link_params.mac_addr[0], sc->link_params.mac_addr[1],
13480                 sc->link_params.mac_addr[2], sc->link_params.mac_addr[3],
13481                 sc->link_params.mac_addr[4], sc->link_params.mac_addr[5]);
13482        BLOGD(sc, DBG_LOAD, "Ethernet address: %s\n", sc->mac_addr_str);
13483    }
13484
13485    return (0);
13486}
13487
13488static void
13489bxe_get_tunable_params(struct bxe_softc *sc)
13490{
13491    /* sanity checks */
13492
13493    if ((bxe_interrupt_mode != INTR_MODE_INTX) &&
13494        (bxe_interrupt_mode != INTR_MODE_MSI)  &&
13495        (bxe_interrupt_mode != INTR_MODE_MSIX)) {
13496        BLOGW(sc, "invalid interrupt_mode value (%d)\n", bxe_interrupt_mode);
13497        bxe_interrupt_mode = INTR_MODE_MSIX;
13498    }
13499
13500    if ((bxe_queue_count < 0) || (bxe_queue_count > MAX_RSS_CHAINS)) {
13501        BLOGW(sc, "invalid queue_count value (%d)\n", bxe_queue_count);
13502        bxe_queue_count = 0;
13503    }
13504
13505    if ((bxe_max_rx_bufs < 1) || (bxe_max_rx_bufs > RX_BD_USABLE)) {
13506        if (bxe_max_rx_bufs == 0) {
13507            bxe_max_rx_bufs = RX_BD_USABLE;
13508        } else {
13509            BLOGW(sc, "invalid max_rx_bufs (%d)\n", bxe_max_rx_bufs);
13510            bxe_max_rx_bufs = 2048;
13511        }
13512    }
13513
13514    if ((bxe_hc_rx_ticks < 1) || (bxe_hc_rx_ticks > 100)) {
13515        BLOGW(sc, "invalid hc_rx_ticks (%d)\n", bxe_hc_rx_ticks);
13516        bxe_hc_rx_ticks = 25;
13517    }
13518
13519    if ((bxe_hc_tx_ticks < 1) || (bxe_hc_tx_ticks > 100)) {
13520        BLOGW(sc, "invalid hc_tx_ticks (%d)\n", bxe_hc_tx_ticks);
13521        bxe_hc_tx_ticks = 50;
13522    }
13523
13524    if (bxe_max_aggregation_size == 0) {
13525        bxe_max_aggregation_size = TPA_AGG_SIZE;
13526    }
13527
13528    if (bxe_max_aggregation_size > 0xffff) {
13529        BLOGW(sc, "invalid max_aggregation_size (%d)\n",
13530              bxe_max_aggregation_size);
13531        bxe_max_aggregation_size = TPA_AGG_SIZE;
13532    }
13533
13534    if ((bxe_mrrs < -1) || (bxe_mrrs > 3)) {
13535        BLOGW(sc, "invalid mrrs (%d)\n", bxe_mrrs);
13536        bxe_mrrs = -1;
13537    }
13538
13539    if ((bxe_autogreeen < 0) || (bxe_autogreeen > 2)) {
13540        BLOGW(sc, "invalid autogreeen (%d)\n", bxe_autogreeen);
13541        bxe_autogreeen = 0;
13542    }
13543
13544    if ((bxe_udp_rss < 0) || (bxe_udp_rss > 1)) {
13545        BLOGW(sc, "invalid udp_rss (%d)\n", bxe_udp_rss);
13546        bxe_udp_rss = 0;
13547    }
13548
13549    /* pull in user settings */
13550
13551    sc->interrupt_mode       = bxe_interrupt_mode;
13552    sc->max_rx_bufs          = bxe_max_rx_bufs;
13553    sc->hc_rx_ticks          = bxe_hc_rx_ticks;
13554    sc->hc_tx_ticks          = bxe_hc_tx_ticks;
13555    sc->max_aggregation_size = bxe_max_aggregation_size;
13556    sc->mrrs                 = bxe_mrrs;
13557    sc->autogreeen           = bxe_autogreeen;
13558    sc->udp_rss              = bxe_udp_rss;
13559
13560    if (bxe_interrupt_mode == INTR_MODE_INTX) {
13561        sc->num_queues = 1;
13562    } else { /* INTR_MODE_MSI or INTR_MODE_MSIX */
13563        sc->num_queues =
13564            min((bxe_queue_count ? bxe_queue_count : mp_ncpus),
13565                MAX_RSS_CHAINS);
13566        if (sc->num_queues > mp_ncpus) {
13567            sc->num_queues = mp_ncpus;
13568        }
13569    }
13570
13571    BLOGD(sc, DBG_LOAD,
13572          "User Config: "
13573          "debug=0x%lx "
13574          "interrupt_mode=%d "
13575          "queue_count=%d "
13576          "hc_rx_ticks=%d "
13577          "hc_tx_ticks=%d "
13578          "rx_budget=%d "
13579          "max_aggregation_size=%d "
13580          "mrrs=%d "
13581          "autogreeen=%d "
13582          "udp_rss=%d\n",
13583          bxe_debug,
13584          sc->interrupt_mode,
13585          sc->num_queues,
13586          sc->hc_rx_ticks,
13587          sc->hc_tx_ticks,
13588          bxe_rx_budget,
13589          sc->max_aggregation_size,
13590          sc->mrrs,
13591          sc->autogreeen,
13592          sc->udp_rss);
13593}
13594
13595static int
13596bxe_media_detect(struct bxe_softc *sc)
13597{
13598    int port_type;
13599    uint32_t phy_idx = bxe_get_cur_phy_idx(sc);
13600
13601    switch (sc->link_params.phy[phy_idx].media_type) {
13602    case ELINK_ETH_PHY_SFPP_10G_FIBER:
13603    case ELINK_ETH_PHY_XFP_FIBER:
13604        BLOGI(sc, "Found 10Gb Fiber media.\n");
13605        sc->media = IFM_10G_SR;
13606        port_type = PORT_FIBRE;
13607        break;
13608    case ELINK_ETH_PHY_SFP_1G_FIBER:
13609        BLOGI(sc, "Found 1Gb Fiber media.\n");
13610        sc->media = IFM_1000_SX;
13611        port_type = PORT_FIBRE;
13612        break;
13613    case ELINK_ETH_PHY_KR:
13614    case ELINK_ETH_PHY_CX4:
13615        BLOGI(sc, "Found 10GBase-CX4 media.\n");
13616        sc->media = IFM_10G_CX4;
13617        port_type = PORT_FIBRE;
13618        break;
13619    case ELINK_ETH_PHY_DA_TWINAX:
13620        BLOGI(sc, "Found 10Gb Twinax media.\n");
13621        sc->media = IFM_10G_TWINAX;
13622        port_type = PORT_DA;
13623        break;
13624    case ELINK_ETH_PHY_BASE_T:
13625        if (sc->link_params.speed_cap_mask[0] &
13626            PORT_HW_CFG_SPEED_CAPABILITY_D0_10G) {
13627            BLOGI(sc, "Found 10GBase-T media.\n");
13628            sc->media = IFM_10G_T;
13629            port_type = PORT_TP;
13630        } else {
13631            BLOGI(sc, "Found 1000Base-T media.\n");
13632            sc->media = IFM_1000_T;
13633            port_type = PORT_TP;
13634        }
13635        break;
13636    case ELINK_ETH_PHY_NOT_PRESENT:
13637        BLOGI(sc, "Media not present.\n");
13638        sc->media = 0;
13639        port_type = PORT_OTHER;
13640        break;
13641    case ELINK_ETH_PHY_UNSPECIFIED:
13642    default:
13643        BLOGI(sc, "Unknown media!\n");
13644        sc->media = 0;
13645        port_type = PORT_OTHER;
13646        break;
13647    }
13648    return port_type;
13649}
13650
13651#define GET_FIELD(value, fname)                     \
13652    (((value) & (fname##_MASK)) >> (fname##_SHIFT))
13653#define IGU_FID(val) GET_FIELD((val), IGU_REG_MAPPING_MEMORY_FID)
13654#define IGU_VEC(val) GET_FIELD((val), IGU_REG_MAPPING_MEMORY_VECTOR)
13655
13656static int
13657bxe_get_igu_cam_info(struct bxe_softc *sc)
13658{
13659    int pfid = SC_FUNC(sc);
13660    int igu_sb_id;
13661    uint32_t val;
13662    uint8_t fid, igu_sb_cnt = 0;
13663
13664    sc->igu_base_sb = 0xff;
13665
13666    if (CHIP_INT_MODE_IS_BC(sc)) {
13667        int vn = SC_VN(sc);
13668        igu_sb_cnt = sc->igu_sb_cnt;
13669        sc->igu_base_sb = ((CHIP_IS_MODE_4_PORT(sc) ? pfid : vn) *
13670                           FP_SB_MAX_E1x);
13671        sc->igu_dsb_id = (E1HVN_MAX * FP_SB_MAX_E1x +
13672                          (CHIP_IS_MODE_4_PORT(sc) ? pfid : vn));
13673        return (0);
13674    }
13675
13676    /* IGU in normal mode - read CAM */
13677    for (igu_sb_id = 0;
13678         igu_sb_id < IGU_REG_MAPPING_MEMORY_SIZE;
13679         igu_sb_id++) {
13680        val = REG_RD(sc, IGU_REG_MAPPING_MEMORY + igu_sb_id * 4);
13681        if (!(val & IGU_REG_MAPPING_MEMORY_VALID)) {
13682            continue;
13683        }
13684        fid = IGU_FID(val);
13685        if ((fid & IGU_FID_ENCODE_IS_PF)) {
13686            if ((fid & IGU_FID_PF_NUM_MASK) != pfid) {
13687                continue;
13688            }
13689            if (IGU_VEC(val) == 0) {
13690                /* default status block */
13691                sc->igu_dsb_id = igu_sb_id;
13692            } else {
13693                if (sc->igu_base_sb == 0xff) {
13694                    sc->igu_base_sb = igu_sb_id;
13695                }
13696                igu_sb_cnt++;
13697            }
13698        }
13699    }
13700
13701    /*
13702     * Due to new PF resource allocation by MFW T7.4 and above, it's optional
13703     * that number of CAM entries will not be equal to the value advertised in
13704     * PCI. Driver should use the minimal value of both as the actual status
13705     * block count
13706     */
13707    sc->igu_sb_cnt = min(sc->igu_sb_cnt, igu_sb_cnt);
13708
13709    if (igu_sb_cnt == 0) {
13710        BLOGE(sc, "CAM configuration error\n");
13711        return (-1);
13712    }
13713
13714    return (0);
13715}
13716
13717/*
13718 * Gather various information from the device config space, the device itself,
13719 * shmem, and the user input.
13720 */
13721static int
13722bxe_get_device_info(struct bxe_softc *sc)
13723{
13724    uint32_t val;
13725    int rc;
13726
13727    /* Get the data for the device */
13728    sc->devinfo.vendor_id    = pci_get_vendor(sc->dev);
13729    sc->devinfo.device_id    = pci_get_device(sc->dev);
13730    sc->devinfo.subvendor_id = pci_get_subvendor(sc->dev);
13731    sc->devinfo.subdevice_id = pci_get_subdevice(sc->dev);
13732
13733    /* get the chip revision (chip metal comes from pci config space) */
13734    sc->devinfo.chip_id     =
13735    sc->link_params.chip_id =
13736        (((REG_RD(sc, MISC_REG_CHIP_NUM)                   & 0xffff) << 16) |
13737         ((REG_RD(sc, MISC_REG_CHIP_REV)                   & 0xf)    << 12) |
13738         (((REG_RD(sc, PCICFG_OFFSET + PCI_ID_VAL3) >> 24) & 0xf)    << 4)  |
13739         ((REG_RD(sc, MISC_REG_BOND_ID)                    & 0xf)    << 0));
13740
13741    /* force 57811 according to MISC register */
13742    if (REG_RD(sc, MISC_REG_CHIP_TYPE) & MISC_REG_CHIP_TYPE_57811_MASK) {
13743        if (CHIP_IS_57810(sc)) {
13744            sc->devinfo.chip_id = ((CHIP_NUM_57811 << 16) |
13745                                   (sc->devinfo.chip_id & 0x0000ffff));
13746        } else if (CHIP_IS_57810_MF(sc)) {
13747            sc->devinfo.chip_id = ((CHIP_NUM_57811_MF << 16) |
13748                                   (sc->devinfo.chip_id & 0x0000ffff));
13749        }
13750        sc->devinfo.chip_id |= 0x1;
13751    }
13752
13753    BLOGD(sc, DBG_LOAD,
13754          "chip_id=0x%08x (num=0x%04x rev=0x%01x metal=0x%02x bond=0x%01x)\n",
13755          sc->devinfo.chip_id,
13756          ((sc->devinfo.chip_id >> 16) & 0xffff),
13757          ((sc->devinfo.chip_id >> 12) & 0xf),
13758          ((sc->devinfo.chip_id >>  4) & 0xff),
13759          ((sc->devinfo.chip_id >>  0) & 0xf));
13760
13761    val = (REG_RD(sc, 0x2874) & 0x55);
13762    if ((sc->devinfo.chip_id & 0x1) ||
13763        (CHIP_IS_E1(sc) && val) ||
13764        (CHIP_IS_E1H(sc) && (val == 0x55))) {
13765        sc->flags |= BXE_ONE_PORT_FLAG;
13766        BLOGD(sc, DBG_LOAD, "single port device\n");
13767    }
13768
13769    /* set the doorbell size */
13770    sc->doorbell_size = (1 << BXE_DB_SHIFT);
13771
13772    /* determine whether the device is in 2 port or 4 port mode */
13773    sc->devinfo.chip_port_mode = CHIP_PORT_MODE_NONE; /* E1 & E1h*/
13774    if (CHIP_IS_E2E3(sc)) {
13775        /*
13776         * Read port4mode_en_ovwr[0]:
13777         *   If 1, four port mode is in port4mode_en_ovwr[1].
13778         *   If 0, four port mode is in port4mode_en[0].
13779         */
13780        val = REG_RD(sc, MISC_REG_PORT4MODE_EN_OVWR);
13781        if (val & 1) {
13782            val = ((val >> 1) & 1);
13783        } else {
13784            val = REG_RD(sc, MISC_REG_PORT4MODE_EN);
13785        }
13786
13787        sc->devinfo.chip_port_mode =
13788            (val) ? CHIP_4_PORT_MODE : CHIP_2_PORT_MODE;
13789
13790        BLOGD(sc, DBG_LOAD, "Port mode = %s\n", (val) ? "4" : "2");
13791    }
13792
13793    /* get the function and path info for the device */
13794    bxe_get_function_num(sc);
13795
13796    /* get the shared memory base address */
13797    sc->devinfo.shmem_base     =
13798    sc->link_params.shmem_base =
13799        REG_RD(sc, MISC_REG_SHARED_MEM_ADDR);
13800    sc->devinfo.shmem2_base =
13801        REG_RD(sc, (SC_PATH(sc) ? MISC_REG_GENERIC_CR_1 :
13802                                  MISC_REG_GENERIC_CR_0));
13803
13804    BLOGD(sc, DBG_LOAD, "shmem_base=0x%08x, shmem2_base=0x%08x\n",
13805          sc->devinfo.shmem_base, sc->devinfo.shmem2_base);
13806
13807    if (!sc->devinfo.shmem_base) {
13808        /* this should ONLY prevent upcoming shmem reads */
13809        BLOGI(sc, "MCP not active\n");
13810        sc->flags |= BXE_NO_MCP_FLAG;
13811        return (0);
13812    }
13813
13814    /* make sure the shared memory contents are valid */
13815    val = SHMEM_RD(sc, validity_map[SC_PORT(sc)]);
13816    if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) !=
13817        (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) {
13818        BLOGE(sc, "Invalid SHMEM validity signature: 0x%08x\n", val);
13819        return (0);
13820    }
13821    BLOGD(sc, DBG_LOAD, "Valid SHMEM validity signature: 0x%08x\n", val);
13822
13823    /* get the bootcode version */
13824    sc->devinfo.bc_ver = SHMEM_RD(sc, dev_info.bc_rev);
13825    snprintf(sc->devinfo.bc_ver_str,
13826             sizeof(sc->devinfo.bc_ver_str),
13827             "%d.%d.%d",
13828             ((sc->devinfo.bc_ver >> 24) & 0xff),
13829             ((sc->devinfo.bc_ver >> 16) & 0xff),
13830             ((sc->devinfo.bc_ver >>  8) & 0xff));
13831    BLOGD(sc, DBG_LOAD, "Bootcode version: %s\n", sc->devinfo.bc_ver_str);
13832
13833    /* get the bootcode shmem address */
13834    sc->devinfo.mf_cfg_base = bxe_get_shmem_mf_cfg_base(sc);
13835    BLOGD(sc, DBG_LOAD, "mf_cfg_base=0x08%x \n", sc->devinfo.mf_cfg_base);
13836
13837    /* clean indirect addresses as they're not used */
13838    pci_write_config(sc->dev, PCICFG_GRC_ADDRESS, 0, 4);
13839    if (IS_PF(sc)) {
13840        REG_WR(sc, PXP2_REG_PGL_ADDR_88_F0, 0);
13841        REG_WR(sc, PXP2_REG_PGL_ADDR_8C_F0, 0);
13842        REG_WR(sc, PXP2_REG_PGL_ADDR_90_F0, 0);
13843        REG_WR(sc, PXP2_REG_PGL_ADDR_94_F0, 0);
13844        if (CHIP_IS_E1x(sc)) {
13845            REG_WR(sc, PXP2_REG_PGL_ADDR_88_F1, 0);
13846            REG_WR(sc, PXP2_REG_PGL_ADDR_8C_F1, 0);
13847            REG_WR(sc, PXP2_REG_PGL_ADDR_90_F1, 0);
13848            REG_WR(sc, PXP2_REG_PGL_ADDR_94_F1, 0);
13849        }
13850
13851        /*
13852         * Enable internal target-read (in case we are probed after PF
13853         * FLR). Must be done prior to any BAR read access. Only for
13854         * 57712 and up
13855         */
13856        if (!CHIP_IS_E1x(sc)) {
13857            REG_WR(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ, 1);
13858        }
13859    }
13860
13861    /* get the nvram size */
13862    val = REG_RD(sc, MCP_REG_MCPR_NVM_CFG4);
13863    sc->devinfo.flash_size =
13864        (NVRAM_1MB_SIZE << (val & MCPR_NVM_CFG4_FLASH_SIZE));
13865    BLOGD(sc, DBG_LOAD, "nvram flash size: %d\n", sc->devinfo.flash_size);
13866
13867    /* get PCI capabilites */
13868    bxe_probe_pci_caps(sc);
13869
13870    bxe_set_power_state(sc, PCI_PM_D0);
13871
13872    /* get various configuration parameters from shmem */
13873    bxe_get_shmem_info(sc);
13874
13875    if (sc->devinfo.pcie_msix_cap_reg != 0) {
13876        val = pci_read_config(sc->dev,
13877                              (sc->devinfo.pcie_msix_cap_reg +
13878                               PCIR_MSIX_CTRL),
13879                              2);
13880        sc->igu_sb_cnt = (val & PCIM_MSIXCTRL_TABLE_SIZE);
13881    } else {
13882        sc->igu_sb_cnt = 1;
13883    }
13884
13885    sc->igu_base_addr = BAR_IGU_INTMEM;
13886
13887    /* initialize IGU parameters */
13888    if (CHIP_IS_E1x(sc)) {
13889        sc->devinfo.int_block = INT_BLOCK_HC;
13890        sc->igu_dsb_id = DEF_SB_IGU_ID;
13891        sc->igu_base_sb = 0;
13892    } else {
13893        sc->devinfo.int_block = INT_BLOCK_IGU;
13894
13895        /* do not allow device reset during IGU info preocessing */
13896        bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RESET);
13897
13898        val = REG_RD(sc, IGU_REG_BLOCK_CONFIGURATION);
13899
13900        if (val & IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN) {
13901            int tout = 5000;
13902
13903            BLOGD(sc, DBG_LOAD, "FORCING IGU Normal Mode\n");
13904
13905            val &= ~(IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN);
13906            REG_WR(sc, IGU_REG_BLOCK_CONFIGURATION, val);
13907            REG_WR(sc, IGU_REG_RESET_MEMORIES, 0x7f);
13908
13909            while (tout && REG_RD(sc, IGU_REG_RESET_MEMORIES)) {
13910                tout--;
13911                DELAY(1000);
13912            }
13913
13914            if (REG_RD(sc, IGU_REG_RESET_MEMORIES)) {
13915                BLOGD(sc, DBG_LOAD, "FORCING IGU Normal Mode failed!!!\n");
13916                bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RESET);
13917                return (-1);
13918            }
13919        }
13920
13921        if (val & IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN) {
13922            BLOGD(sc, DBG_LOAD, "IGU Backward Compatible Mode\n");
13923            sc->devinfo.int_block |= INT_BLOCK_MODE_BW_COMP;
13924        } else {
13925            BLOGD(sc, DBG_LOAD, "IGU Normal Mode\n");
13926        }
13927
13928        rc = bxe_get_igu_cam_info(sc);
13929
13930        bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RESET);
13931
13932        if (rc) {
13933            return (rc);
13934        }
13935    }
13936
13937    /*
13938     * Get base FW non-default (fast path) status block ID. This value is
13939     * used to initialize the fw_sb_id saved on the fp/queue structure to
13940     * determine the id used by the FW.
13941     */
13942    if (CHIP_IS_E1x(sc)) {
13943        sc->base_fw_ndsb = ((SC_PORT(sc) * FP_SB_MAX_E1x) + SC_L_ID(sc));
13944    } else {
13945        /*
13946         * 57712+ - We currently use one FW SB per IGU SB (Rx and Tx of
13947         * the same queue are indicated on the same IGU SB). So we prefer
13948         * FW and IGU SBs to be the same value.
13949         */
13950        sc->base_fw_ndsb = sc->igu_base_sb;
13951    }
13952
13953    BLOGD(sc, DBG_LOAD,
13954          "igu_dsb_id=%d igu_base_sb=%d igu_sb_cnt=%d base_fw_ndsb=%d\n",
13955          sc->igu_dsb_id, sc->igu_base_sb,
13956          sc->igu_sb_cnt, sc->base_fw_ndsb);
13957
13958    elink_phy_probe(&sc->link_params);
13959
13960    return (0);
13961}
13962
13963static void
13964bxe_link_settings_supported(struct bxe_softc *sc,
13965                            uint32_t         switch_cfg)
13966{
13967    uint32_t cfg_size = 0;
13968    uint32_t idx;
13969    uint8_t port = SC_PORT(sc);
13970
13971    /* aggregation of supported attributes of all external phys */
13972    sc->port.supported[0] = 0;
13973    sc->port.supported[1] = 0;
13974
13975    switch (sc->link_params.num_phys) {
13976    case 1:
13977        sc->port.supported[0] = sc->link_params.phy[ELINK_INT_PHY].supported;
13978        cfg_size = 1;
13979        break;
13980    case 2:
13981        sc->port.supported[0] = sc->link_params.phy[ELINK_EXT_PHY1].supported;
13982        cfg_size = 1;
13983        break;
13984    case 3:
13985        if (sc->link_params.multi_phy_config &
13986            PORT_HW_CFG_PHY_SWAPPED_ENABLED) {
13987            sc->port.supported[1] =
13988                sc->link_params.phy[ELINK_EXT_PHY1].supported;
13989            sc->port.supported[0] =
13990                sc->link_params.phy[ELINK_EXT_PHY2].supported;
13991        } else {
13992            sc->port.supported[0] =
13993                sc->link_params.phy[ELINK_EXT_PHY1].supported;
13994            sc->port.supported[1] =
13995                sc->link_params.phy[ELINK_EXT_PHY2].supported;
13996        }
13997        cfg_size = 2;
13998        break;
13999    }
14000
14001    if (!(sc->port.supported[0] || sc->port.supported[1])) {
14002        BLOGE(sc, "Invalid phy config in NVRAM (PHY1=0x%08x PHY2=0x%08x)\n",
14003              SHMEM_RD(sc,
14004                       dev_info.port_hw_config[port].external_phy_config),
14005              SHMEM_RD(sc,
14006                       dev_info.port_hw_config[port].external_phy_config2));
14007        return;
14008    }
14009
14010    if (CHIP_IS_E3(sc))
14011        sc->port.phy_addr = REG_RD(sc, MISC_REG_WC0_CTRL_PHY_ADDR);
14012    else {
14013        switch (switch_cfg) {
14014        case ELINK_SWITCH_CFG_1G:
14015            sc->port.phy_addr =
14016                REG_RD(sc, NIG_REG_SERDES0_CTRL_PHY_ADDR + port*0x10);
14017            break;
14018        case ELINK_SWITCH_CFG_10G:
14019            sc->port.phy_addr =
14020                REG_RD(sc, NIG_REG_XGXS0_CTRL_PHY_ADDR + port*0x18);
14021            break;
14022        default:
14023            BLOGE(sc, "Invalid switch config in link_config=0x%08x\n",
14024                  sc->port.link_config[0]);
14025            return;
14026        }
14027    }
14028
14029    BLOGD(sc, DBG_LOAD, "PHY addr 0x%08x\n", sc->port.phy_addr);
14030
14031    /* mask what we support according to speed_cap_mask per configuration */
14032    for (idx = 0; idx < cfg_size; idx++) {
14033        if (!(sc->link_params.speed_cap_mask[idx] &
14034              PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF)) {
14035            sc->port.supported[idx] &= ~ELINK_SUPPORTED_10baseT_Half;
14036        }
14037
14038        if (!(sc->link_params.speed_cap_mask[idx] &
14039              PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL)) {
14040            sc->port.supported[idx] &= ~ELINK_SUPPORTED_10baseT_Full;
14041        }
14042
14043        if (!(sc->link_params.speed_cap_mask[idx] &
14044              PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF)) {
14045            sc->port.supported[idx] &= ~ELINK_SUPPORTED_100baseT_Half;
14046        }
14047
14048        if (!(sc->link_params.speed_cap_mask[idx] &
14049              PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL)) {
14050            sc->port.supported[idx] &= ~ELINK_SUPPORTED_100baseT_Full;
14051        }
14052
14053        if (!(sc->link_params.speed_cap_mask[idx] &
14054              PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)) {
14055            sc->port.supported[idx] &= ~ELINK_SUPPORTED_1000baseT_Full;
14056        }
14057
14058        if (!(sc->link_params.speed_cap_mask[idx] &
14059              PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G)) {
14060            sc->port.supported[idx] &= ~ELINK_SUPPORTED_2500baseX_Full;
14061        }
14062
14063        if (!(sc->link_params.speed_cap_mask[idx] &
14064              PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)) {
14065            sc->port.supported[idx] &= ~ELINK_SUPPORTED_10000baseT_Full;
14066        }
14067
14068        if (!(sc->link_params.speed_cap_mask[idx] &
14069              PORT_HW_CFG_SPEED_CAPABILITY_D0_20G)) {
14070            sc->port.supported[idx] &= ~ELINK_SUPPORTED_20000baseKR2_Full;
14071        }
14072    }
14073
14074    BLOGD(sc, DBG_LOAD, "PHY supported 0=0x%08x 1=0x%08x\n",
14075          sc->port.supported[0], sc->port.supported[1]);
14076	ELINK_DEBUG_P2(sc, "PHY supported 0=0x%08x 1=0x%08x\n",
14077					sc->port.supported[0], sc->port.supported[1]);
14078}
14079
14080static void
14081bxe_link_settings_requested(struct bxe_softc *sc)
14082{
14083    uint32_t link_config;
14084    uint32_t idx;
14085    uint32_t cfg_size = 0;
14086
14087    sc->port.advertising[0] = 0;
14088    sc->port.advertising[1] = 0;
14089
14090    switch (sc->link_params.num_phys) {
14091    case 1:
14092    case 2:
14093        cfg_size = 1;
14094        break;
14095    case 3:
14096        cfg_size = 2;
14097        break;
14098    }
14099
14100    for (idx = 0; idx < cfg_size; idx++) {
14101        sc->link_params.req_duplex[idx] = DUPLEX_FULL;
14102        link_config = sc->port.link_config[idx];
14103
14104        switch (link_config & PORT_FEATURE_LINK_SPEED_MASK) {
14105        case PORT_FEATURE_LINK_SPEED_AUTO:
14106            if (sc->port.supported[idx] & ELINK_SUPPORTED_Autoneg) {
14107                sc->link_params.req_line_speed[idx] = ELINK_SPEED_AUTO_NEG;
14108                sc->port.advertising[idx] |= sc->port.supported[idx];
14109                if (sc->link_params.phy[ELINK_EXT_PHY1].type ==
14110                    PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833)
14111                    sc->port.advertising[idx] |=
14112                        (ELINK_SUPPORTED_100baseT_Half |
14113                         ELINK_SUPPORTED_100baseT_Full);
14114            } else {
14115                /* force 10G, no AN */
14116                sc->link_params.req_line_speed[idx] = ELINK_SPEED_10000;
14117                sc->port.advertising[idx] |=
14118                    (ADVERTISED_10000baseT_Full | ADVERTISED_FIBRE);
14119                continue;
14120            }
14121            break;
14122
14123        case PORT_FEATURE_LINK_SPEED_10M_FULL:
14124            if (sc->port.supported[idx] & ELINK_SUPPORTED_10baseT_Full) {
14125                sc->link_params.req_line_speed[idx] = ELINK_SPEED_10;
14126                sc->port.advertising[idx] |= (ADVERTISED_10baseT_Full |
14127                                              ADVERTISED_TP);
14128            } else {
14129                BLOGE(sc, "Invalid NVRAM config link_config=0x%08x "
14130                          "speed_cap_mask=0x%08x\n",
14131                      link_config, sc->link_params.speed_cap_mask[idx]);
14132                return;
14133            }
14134            break;
14135
14136        case PORT_FEATURE_LINK_SPEED_10M_HALF:
14137            if (sc->port.supported[idx] & ELINK_SUPPORTED_10baseT_Half) {
14138                sc->link_params.req_line_speed[idx] = ELINK_SPEED_10;
14139                sc->link_params.req_duplex[idx] = DUPLEX_HALF;
14140                sc->port.advertising[idx] |= (ADVERTISED_10baseT_Half |
14141                                              ADVERTISED_TP);
14142				ELINK_DEBUG_P1(sc, "driver requesting DUPLEX_HALF req_duplex = %x!\n",
14143								sc->link_params.req_duplex[idx]);
14144            } else {
14145                BLOGE(sc, "Invalid NVRAM config link_config=0x%08x "
14146                          "speed_cap_mask=0x%08x\n",
14147                      link_config, sc->link_params.speed_cap_mask[idx]);
14148                return;
14149            }
14150            break;
14151
14152        case PORT_FEATURE_LINK_SPEED_100M_FULL:
14153            if (sc->port.supported[idx] & ELINK_SUPPORTED_100baseT_Full) {
14154                sc->link_params.req_line_speed[idx] = ELINK_SPEED_100;
14155                sc->port.advertising[idx] |= (ADVERTISED_100baseT_Full |
14156                                              ADVERTISED_TP);
14157            } else {
14158                BLOGE(sc, "Invalid NVRAM config link_config=0x%08x "
14159                          "speed_cap_mask=0x%08x\n",
14160                      link_config, sc->link_params.speed_cap_mask[idx]);
14161                return;
14162            }
14163            break;
14164
14165        case PORT_FEATURE_LINK_SPEED_100M_HALF:
14166            if (sc->port.supported[idx] & ELINK_SUPPORTED_100baseT_Half) {
14167                sc->link_params.req_line_speed[idx] = ELINK_SPEED_100;
14168                sc->link_params.req_duplex[idx] = DUPLEX_HALF;
14169                sc->port.advertising[idx] |= (ADVERTISED_100baseT_Half |
14170                                              ADVERTISED_TP);
14171            } else {
14172                BLOGE(sc, "Invalid NVRAM config link_config=0x%08x "
14173                          "speed_cap_mask=0x%08x\n",
14174                      link_config, sc->link_params.speed_cap_mask[idx]);
14175                return;
14176            }
14177            break;
14178
14179        case PORT_FEATURE_LINK_SPEED_1G:
14180            if (sc->port.supported[idx] & ELINK_SUPPORTED_1000baseT_Full) {
14181                sc->link_params.req_line_speed[idx] = ELINK_SPEED_1000;
14182                sc->port.advertising[idx] |= (ADVERTISED_1000baseT_Full |
14183                                              ADVERTISED_TP);
14184            } else {
14185                BLOGE(sc, "Invalid NVRAM config link_config=0x%08x "
14186                          "speed_cap_mask=0x%08x\n",
14187                      link_config, sc->link_params.speed_cap_mask[idx]);
14188                return;
14189            }
14190            break;
14191
14192        case PORT_FEATURE_LINK_SPEED_2_5G:
14193            if (sc->port.supported[idx] & ELINK_SUPPORTED_2500baseX_Full) {
14194                sc->link_params.req_line_speed[idx] = ELINK_SPEED_2500;
14195                sc->port.advertising[idx] |= (ADVERTISED_2500baseX_Full |
14196                                              ADVERTISED_TP);
14197            } else {
14198                BLOGE(sc, "Invalid NVRAM config link_config=0x%08x "
14199                          "speed_cap_mask=0x%08x\n",
14200                      link_config, sc->link_params.speed_cap_mask[idx]);
14201                return;
14202            }
14203            break;
14204
14205        case PORT_FEATURE_LINK_SPEED_10G_CX4:
14206            if (sc->port.supported[idx] & ELINK_SUPPORTED_10000baseT_Full) {
14207                sc->link_params.req_line_speed[idx] = ELINK_SPEED_10000;
14208                sc->port.advertising[idx] |= (ADVERTISED_10000baseT_Full |
14209                                              ADVERTISED_FIBRE);
14210            } else {
14211                BLOGE(sc, "Invalid NVRAM config link_config=0x%08x "
14212                          "speed_cap_mask=0x%08x\n",
14213                      link_config, sc->link_params.speed_cap_mask[idx]);
14214                return;
14215            }
14216            break;
14217
14218        case PORT_FEATURE_LINK_SPEED_20G:
14219            sc->link_params.req_line_speed[idx] = ELINK_SPEED_20000;
14220            break;
14221
14222        default:
14223            BLOGE(sc, "Invalid NVRAM config link_config=0x%08x "
14224                      "speed_cap_mask=0x%08x\n",
14225                  link_config, sc->link_params.speed_cap_mask[idx]);
14226            sc->link_params.req_line_speed[idx] = ELINK_SPEED_AUTO_NEG;
14227            sc->port.advertising[idx] = sc->port.supported[idx];
14228            break;
14229        }
14230
14231        sc->link_params.req_flow_ctrl[idx] =
14232            (link_config & PORT_FEATURE_FLOW_CONTROL_MASK);
14233
14234        if (sc->link_params.req_flow_ctrl[idx] == ELINK_FLOW_CTRL_AUTO) {
14235            if (!(sc->port.supported[idx] & ELINK_SUPPORTED_Autoneg)) {
14236                sc->link_params.req_flow_ctrl[idx] = ELINK_FLOW_CTRL_NONE;
14237            } else {
14238                bxe_set_requested_fc(sc);
14239            }
14240        }
14241
14242        BLOGD(sc, DBG_LOAD, "req_line_speed=%d req_duplex=%d "
14243                            "req_flow_ctrl=0x%x advertising=0x%x\n",
14244              sc->link_params.req_line_speed[idx],
14245              sc->link_params.req_duplex[idx],
14246              sc->link_params.req_flow_ctrl[idx],
14247              sc->port.advertising[idx]);
14248		ELINK_DEBUG_P3(sc, "req_line_speed=%d req_duplex=%d "
14249						"advertising=0x%x\n",
14250						sc->link_params.req_line_speed[idx],
14251						sc->link_params.req_duplex[idx],
14252						sc->port.advertising[idx]);
14253    }
14254}
14255
14256static void
14257bxe_get_phy_info(struct bxe_softc *sc)
14258{
14259    uint8_t port = SC_PORT(sc);
14260    uint32_t config = sc->port.config;
14261    uint32_t eee_mode;
14262
14263    /* shmem data already read in bxe_get_shmem_info() */
14264
14265    ELINK_DEBUG_P3(sc, "lane_config=0x%08x speed_cap_mask0=0x%08x "
14266                        "link_config0=0x%08x\n",
14267               sc->link_params.lane_config,
14268               sc->link_params.speed_cap_mask[0],
14269               sc->port.link_config[0]);
14270
14271
14272    bxe_link_settings_supported(sc, sc->link_params.switch_cfg);
14273    bxe_link_settings_requested(sc);
14274
14275    if (sc->autogreeen == AUTO_GREEN_FORCE_ON) {
14276        sc->link_params.feature_config_flags |=
14277            ELINK_FEATURE_CONFIG_AUTOGREEEN_ENABLED;
14278    } else if (sc->autogreeen == AUTO_GREEN_FORCE_OFF) {
14279        sc->link_params.feature_config_flags &=
14280            ~ELINK_FEATURE_CONFIG_AUTOGREEEN_ENABLED;
14281    } else if (config & PORT_FEAT_CFG_AUTOGREEEN_ENABLED) {
14282        sc->link_params.feature_config_flags |=
14283            ELINK_FEATURE_CONFIG_AUTOGREEEN_ENABLED;
14284    }
14285
14286    /* configure link feature according to nvram value */
14287    eee_mode =
14288        (((SHMEM_RD(sc, dev_info.port_feature_config[port].eee_power_mode)) &
14289          PORT_FEAT_CFG_EEE_POWER_MODE_MASK) >>
14290         PORT_FEAT_CFG_EEE_POWER_MODE_SHIFT);
14291    if (eee_mode != PORT_FEAT_CFG_EEE_POWER_MODE_DISABLED) {
14292        sc->link_params.eee_mode = (ELINK_EEE_MODE_ADV_LPI |
14293                                    ELINK_EEE_MODE_ENABLE_LPI |
14294                                    ELINK_EEE_MODE_OUTPUT_TIME);
14295    } else {
14296        sc->link_params.eee_mode = 0;
14297    }
14298
14299    /* get the media type */
14300    bxe_media_detect(sc);
14301	ELINK_DEBUG_P1(sc, "detected media type\n", sc->media);
14302}
14303
14304static void
14305bxe_get_params(struct bxe_softc *sc)
14306{
14307    /* get user tunable params */
14308    bxe_get_tunable_params(sc);
14309
14310    /* select the RX and TX ring sizes */
14311    sc->tx_ring_size = TX_BD_USABLE;
14312    sc->rx_ring_size = RX_BD_USABLE;
14313
14314    /* XXX disable WoL */
14315    sc->wol = 0;
14316}
14317
14318static void
14319bxe_set_modes_bitmap(struct bxe_softc *sc)
14320{
14321    uint32_t flags = 0;
14322
14323    if (CHIP_REV_IS_FPGA(sc)) {
14324        SET_FLAGS(flags, MODE_FPGA);
14325    } else if (CHIP_REV_IS_EMUL(sc)) {
14326        SET_FLAGS(flags, MODE_EMUL);
14327    } else {
14328        SET_FLAGS(flags, MODE_ASIC);
14329    }
14330
14331    if (CHIP_IS_MODE_4_PORT(sc)) {
14332        SET_FLAGS(flags, MODE_PORT4);
14333    } else {
14334        SET_FLAGS(flags, MODE_PORT2);
14335    }
14336
14337    if (CHIP_IS_E2(sc)) {
14338        SET_FLAGS(flags, MODE_E2);
14339    } else if (CHIP_IS_E3(sc)) {
14340        SET_FLAGS(flags, MODE_E3);
14341        if (CHIP_REV(sc) == CHIP_REV_Ax) {
14342            SET_FLAGS(flags, MODE_E3_A0);
14343        } else /*if (CHIP_REV(sc) == CHIP_REV_Bx)*/ {
14344            SET_FLAGS(flags, MODE_E3_B0 | MODE_COS3);
14345        }
14346    }
14347
14348    if (IS_MF(sc)) {
14349        SET_FLAGS(flags, MODE_MF);
14350        switch (sc->devinfo.mf_info.mf_mode) {
14351        case MULTI_FUNCTION_SD:
14352            SET_FLAGS(flags, MODE_MF_SD);
14353            break;
14354        case MULTI_FUNCTION_SI:
14355            SET_FLAGS(flags, MODE_MF_SI);
14356            break;
14357        case MULTI_FUNCTION_AFEX:
14358            SET_FLAGS(flags, MODE_MF_AFEX);
14359            break;
14360        }
14361    } else {
14362        SET_FLAGS(flags, MODE_SF);
14363    }
14364
14365#if defined(__LITTLE_ENDIAN)
14366    SET_FLAGS(flags, MODE_LITTLE_ENDIAN);
14367#else /* __BIG_ENDIAN */
14368    SET_FLAGS(flags, MODE_BIG_ENDIAN);
14369#endif
14370
14371    INIT_MODE_FLAGS(sc) = flags;
14372}
14373
14374static int
14375bxe_alloc_hsi_mem(struct bxe_softc *sc)
14376{
14377    struct bxe_fastpath *fp;
14378    bus_addr_t busaddr;
14379    int max_agg_queues;
14380    int max_segments;
14381    bus_size_t max_size;
14382    bus_size_t max_seg_size;
14383    char buf[32];
14384    int rc;
14385    int i, j;
14386
14387    /* XXX zero out all vars here and call bxe_alloc_hsi_mem on error */
14388
14389    /* allocate the parent bus DMA tag */
14390    rc = bus_dma_tag_create(bus_get_dma_tag(sc->dev), /* parent tag */
14391                            1,                        /* alignment */
14392                            0,                        /* boundary limit */
14393                            BUS_SPACE_MAXADDR,        /* restricted low */
14394                            BUS_SPACE_MAXADDR,        /* restricted hi */
14395                            NULL,                     /* addr filter() */
14396                            NULL,                     /* addr filter() arg */
14397                            BUS_SPACE_MAXSIZE_32BIT,  /* max map size */
14398                            BUS_SPACE_UNRESTRICTED,   /* num discontinuous */
14399                            BUS_SPACE_MAXSIZE_32BIT,  /* max seg size */
14400                            0,                        /* flags */
14401                            NULL,                     /* lock() */
14402                            NULL,                     /* lock() arg */
14403                            &sc->parent_dma_tag);     /* returned dma tag */
14404    if (rc != 0) {
14405        BLOGE(sc, "Failed to alloc parent DMA tag (%d)!\n", rc);
14406        return (1);
14407    }
14408
14409    /************************/
14410    /* DEFAULT STATUS BLOCK */
14411    /************************/
14412
14413    if (bxe_dma_alloc(sc, sizeof(struct host_sp_status_block),
14414                      &sc->def_sb_dma, "default status block") != 0) {
14415        /* XXX */
14416        bus_dma_tag_destroy(sc->parent_dma_tag);
14417        return (1);
14418    }
14419
14420    sc->def_sb = (struct host_sp_status_block *)sc->def_sb_dma.vaddr;
14421
14422    /***************/
14423    /* EVENT QUEUE */
14424    /***************/
14425
14426    if (bxe_dma_alloc(sc, BCM_PAGE_SIZE,
14427                      &sc->eq_dma, "event queue") != 0) {
14428        /* XXX */
14429        bxe_dma_free(sc, &sc->def_sb_dma);
14430        sc->def_sb = NULL;
14431        bus_dma_tag_destroy(sc->parent_dma_tag);
14432        return (1);
14433    }
14434
14435    sc->eq = (union event_ring_elem * )sc->eq_dma.vaddr;
14436
14437    /*************/
14438    /* SLOW PATH */
14439    /*************/
14440
14441    if (bxe_dma_alloc(sc, sizeof(struct bxe_slowpath),
14442                      &sc->sp_dma, "slow path") != 0) {
14443        /* XXX */
14444        bxe_dma_free(sc, &sc->eq_dma);
14445        sc->eq = NULL;
14446        bxe_dma_free(sc, &sc->def_sb_dma);
14447        sc->def_sb = NULL;
14448        bus_dma_tag_destroy(sc->parent_dma_tag);
14449        return (1);
14450    }
14451
14452    sc->sp = (struct bxe_slowpath *)sc->sp_dma.vaddr;
14453
14454    /*******************/
14455    /* SLOW PATH QUEUE */
14456    /*******************/
14457
14458    if (bxe_dma_alloc(sc, BCM_PAGE_SIZE,
14459                      &sc->spq_dma, "slow path queue") != 0) {
14460        /* XXX */
14461        bxe_dma_free(sc, &sc->sp_dma);
14462        sc->sp = NULL;
14463        bxe_dma_free(sc, &sc->eq_dma);
14464        sc->eq = NULL;
14465        bxe_dma_free(sc, &sc->def_sb_dma);
14466        sc->def_sb = NULL;
14467        bus_dma_tag_destroy(sc->parent_dma_tag);
14468        return (1);
14469    }
14470
14471    sc->spq = (struct eth_spe *)sc->spq_dma.vaddr;
14472
14473    /***************************/
14474    /* FW DECOMPRESSION BUFFER */
14475    /***************************/
14476
14477    if (bxe_dma_alloc(sc, FW_BUF_SIZE, &sc->gz_buf_dma,
14478                      "fw decompression buffer") != 0) {
14479        /* XXX */
14480        bxe_dma_free(sc, &sc->spq_dma);
14481        sc->spq = NULL;
14482        bxe_dma_free(sc, &sc->sp_dma);
14483        sc->sp = NULL;
14484        bxe_dma_free(sc, &sc->eq_dma);
14485        sc->eq = NULL;
14486        bxe_dma_free(sc, &sc->def_sb_dma);
14487        sc->def_sb = NULL;
14488        bus_dma_tag_destroy(sc->parent_dma_tag);
14489        return (1);
14490    }
14491
14492    sc->gz_buf = (void *)sc->gz_buf_dma.vaddr;
14493
14494    if ((sc->gz_strm =
14495         malloc(sizeof(*sc->gz_strm), M_DEVBUF, M_NOWAIT)) == NULL) {
14496        /* XXX */
14497        bxe_dma_free(sc, &sc->gz_buf_dma);
14498        sc->gz_buf = NULL;
14499        bxe_dma_free(sc, &sc->spq_dma);
14500        sc->spq = NULL;
14501        bxe_dma_free(sc, &sc->sp_dma);
14502        sc->sp = NULL;
14503        bxe_dma_free(sc, &sc->eq_dma);
14504        sc->eq = NULL;
14505        bxe_dma_free(sc, &sc->def_sb_dma);
14506        sc->def_sb = NULL;
14507        bus_dma_tag_destroy(sc->parent_dma_tag);
14508        return (1);
14509    }
14510
14511    /*************/
14512    /* FASTPATHS */
14513    /*************/
14514
14515    /* allocate DMA memory for each fastpath structure */
14516    for (i = 0; i < sc->num_queues; i++) {
14517        fp = &sc->fp[i];
14518        fp->sc    = sc;
14519        fp->index = i;
14520
14521        /*******************/
14522        /* FP STATUS BLOCK */
14523        /*******************/
14524
14525        snprintf(buf, sizeof(buf), "fp %d status block", i);
14526        if (bxe_dma_alloc(sc, sizeof(union bxe_host_hc_status_block),
14527                          &fp->sb_dma, buf) != 0) {
14528            /* XXX unwind and free previous fastpath allocations */
14529            BLOGE(sc, "Failed to alloc %s\n", buf);
14530            return (1);
14531        } else {
14532            if (CHIP_IS_E2E3(sc)) {
14533                fp->status_block.e2_sb =
14534                    (struct host_hc_status_block_e2 *)fp->sb_dma.vaddr;
14535            } else {
14536                fp->status_block.e1x_sb =
14537                    (struct host_hc_status_block_e1x *)fp->sb_dma.vaddr;
14538            }
14539        }
14540
14541        /******************/
14542        /* FP TX BD CHAIN */
14543        /******************/
14544
14545        snprintf(buf, sizeof(buf), "fp %d tx bd chain", i);
14546        if (bxe_dma_alloc(sc, (BCM_PAGE_SIZE * TX_BD_NUM_PAGES),
14547                          &fp->tx_dma, buf) != 0) {
14548            /* XXX unwind and free previous fastpath allocations */
14549            BLOGE(sc, "Failed to alloc %s\n", buf);
14550            return (1);
14551        } else {
14552            fp->tx_chain = (union eth_tx_bd_types *)fp->tx_dma.vaddr;
14553        }
14554
14555        /* link together the tx bd chain pages */
14556        for (j = 1; j <= TX_BD_NUM_PAGES; j++) {
14557            /* index into the tx bd chain array to last entry per page */
14558            struct eth_tx_next_bd *tx_next_bd =
14559                &fp->tx_chain[TX_BD_TOTAL_PER_PAGE * j - 1].next_bd;
14560            /* point to the next page and wrap from last page */
14561            busaddr = (fp->tx_dma.paddr +
14562                       (BCM_PAGE_SIZE * (j % TX_BD_NUM_PAGES)));
14563            tx_next_bd->addr_hi = htole32(U64_HI(busaddr));
14564            tx_next_bd->addr_lo = htole32(U64_LO(busaddr));
14565        }
14566
14567        /******************/
14568        /* FP RX BD CHAIN */
14569        /******************/
14570
14571        snprintf(buf, sizeof(buf), "fp %d rx bd chain", i);
14572        if (bxe_dma_alloc(sc, (BCM_PAGE_SIZE * RX_BD_NUM_PAGES),
14573                          &fp->rx_dma, buf) != 0) {
14574            /* XXX unwind and free previous fastpath allocations */
14575            BLOGE(sc, "Failed to alloc %s\n", buf);
14576            return (1);
14577        } else {
14578            fp->rx_chain = (struct eth_rx_bd *)fp->rx_dma.vaddr;
14579        }
14580
14581        /* link together the rx bd chain pages */
14582        for (j = 1; j <= RX_BD_NUM_PAGES; j++) {
14583            /* index into the rx bd chain array to last entry per page */
14584            struct eth_rx_bd *rx_bd =
14585                &fp->rx_chain[RX_BD_TOTAL_PER_PAGE * j - 2];
14586            /* point to the next page and wrap from last page */
14587            busaddr = (fp->rx_dma.paddr +
14588                       (BCM_PAGE_SIZE * (j % RX_BD_NUM_PAGES)));
14589            rx_bd->addr_hi = htole32(U64_HI(busaddr));
14590            rx_bd->addr_lo = htole32(U64_LO(busaddr));
14591        }
14592
14593        /*******************/
14594        /* FP RX RCQ CHAIN */
14595        /*******************/
14596
14597        snprintf(buf, sizeof(buf), "fp %d rcq chain", i);
14598        if (bxe_dma_alloc(sc, (BCM_PAGE_SIZE * RCQ_NUM_PAGES),
14599                          &fp->rcq_dma, buf) != 0) {
14600            /* XXX unwind and free previous fastpath allocations */
14601            BLOGE(sc, "Failed to alloc %s\n", buf);
14602            return (1);
14603        } else {
14604            fp->rcq_chain = (union eth_rx_cqe *)fp->rcq_dma.vaddr;
14605        }
14606
14607        /* link together the rcq chain pages */
14608        for (j = 1; j <= RCQ_NUM_PAGES; j++) {
14609            /* index into the rcq chain array to last entry per page */
14610            struct eth_rx_cqe_next_page *rx_cqe_next =
14611                (struct eth_rx_cqe_next_page *)
14612                &fp->rcq_chain[RCQ_TOTAL_PER_PAGE * j - 1];
14613            /* point to the next page and wrap from last page */
14614            busaddr = (fp->rcq_dma.paddr +
14615                       (BCM_PAGE_SIZE * (j % RCQ_NUM_PAGES)));
14616            rx_cqe_next->addr_hi = htole32(U64_HI(busaddr));
14617            rx_cqe_next->addr_lo = htole32(U64_LO(busaddr));
14618        }
14619
14620        /*******************/
14621        /* FP RX SGE CHAIN */
14622        /*******************/
14623
14624        snprintf(buf, sizeof(buf), "fp %d sge chain", i);
14625        if (bxe_dma_alloc(sc, (BCM_PAGE_SIZE * RX_SGE_NUM_PAGES),
14626                          &fp->rx_sge_dma, buf) != 0) {
14627            /* XXX unwind and free previous fastpath allocations */
14628            BLOGE(sc, "Failed to alloc %s\n", buf);
14629            return (1);
14630        } else {
14631            fp->rx_sge_chain = (struct eth_rx_sge *)fp->rx_sge_dma.vaddr;
14632        }
14633
14634        /* link together the sge chain pages */
14635        for (j = 1; j <= RX_SGE_NUM_PAGES; j++) {
14636            /* index into the rcq chain array to last entry per page */
14637            struct eth_rx_sge *rx_sge =
14638                &fp->rx_sge_chain[RX_SGE_TOTAL_PER_PAGE * j - 2];
14639            /* point to the next page and wrap from last page */
14640            busaddr = (fp->rx_sge_dma.paddr +
14641                       (BCM_PAGE_SIZE * (j % RX_SGE_NUM_PAGES)));
14642            rx_sge->addr_hi = htole32(U64_HI(busaddr));
14643            rx_sge->addr_lo = htole32(U64_LO(busaddr));
14644        }
14645
14646        /***********************/
14647        /* FP TX MBUF DMA MAPS */
14648        /***********************/
14649
14650        /* set required sizes before mapping to conserve resources */
14651        if (if_getcapenable(sc->ifp) & (IFCAP_TSO4 | IFCAP_TSO6)) {
14652            max_size     = BXE_TSO_MAX_SIZE;
14653            max_segments = BXE_TSO_MAX_SEGMENTS;
14654            max_seg_size = BXE_TSO_MAX_SEG_SIZE;
14655        } else {
14656            max_size     = (MCLBYTES * BXE_MAX_SEGMENTS);
14657            max_segments = BXE_MAX_SEGMENTS;
14658            max_seg_size = MCLBYTES;
14659        }
14660
14661        /* create a dma tag for the tx mbufs */
14662        rc = bus_dma_tag_create(sc->parent_dma_tag, /* parent tag */
14663                                1,                  /* alignment */
14664                                0,                  /* boundary limit */
14665                                BUS_SPACE_MAXADDR,  /* restricted low */
14666                                BUS_SPACE_MAXADDR,  /* restricted hi */
14667                                NULL,               /* addr filter() */
14668                                NULL,               /* addr filter() arg */
14669                                max_size,           /* max map size */
14670                                max_segments,       /* num discontinuous */
14671                                max_seg_size,       /* max seg size */
14672                                0,                  /* flags */
14673                                NULL,               /* lock() */
14674                                NULL,               /* lock() arg */
14675                                &fp->tx_mbuf_tag);  /* returned dma tag */
14676        if (rc != 0) {
14677            /* XXX unwind and free previous fastpath allocations */
14678            BLOGE(sc, "Failed to create dma tag for "
14679                      "'fp %d tx mbufs' (%d)\n", i, rc);
14680            return (1);
14681        }
14682
14683        /* create dma maps for each of the tx mbuf clusters */
14684        for (j = 0; j < TX_BD_TOTAL; j++) {
14685            if (bus_dmamap_create(fp->tx_mbuf_tag,
14686                                  BUS_DMA_NOWAIT,
14687                                  &fp->tx_mbuf_chain[j].m_map)) {
14688                /* XXX unwind and free previous fastpath allocations */
14689                BLOGE(sc, "Failed to create dma map for "
14690                          "'fp %d tx mbuf %d' (%d)\n", i, j, rc);
14691                return (1);
14692            }
14693        }
14694
14695        /***********************/
14696        /* FP RX MBUF DMA MAPS */
14697        /***********************/
14698
14699        /* create a dma tag for the rx mbufs */
14700        rc = bus_dma_tag_create(sc->parent_dma_tag, /* parent tag */
14701                                1,                  /* alignment */
14702                                0,                  /* boundary limit */
14703                                BUS_SPACE_MAXADDR,  /* restricted low */
14704                                BUS_SPACE_MAXADDR,  /* restricted hi */
14705                                NULL,               /* addr filter() */
14706                                NULL,               /* addr filter() arg */
14707                                MJUM9BYTES,         /* max map size */
14708                                1,                  /* num discontinuous */
14709                                MJUM9BYTES,         /* max seg size */
14710                                0,                  /* flags */
14711                                NULL,               /* lock() */
14712                                NULL,               /* lock() arg */
14713                                &fp->rx_mbuf_tag);  /* returned dma tag */
14714        if (rc != 0) {
14715            /* XXX unwind and free previous fastpath allocations */
14716            BLOGE(sc, "Failed to create dma tag for "
14717                      "'fp %d rx mbufs' (%d)\n", i, rc);
14718            return (1);
14719        }
14720
14721        /* create dma maps for each of the rx mbuf clusters */
14722        for (j = 0; j < RX_BD_TOTAL; j++) {
14723            if (bus_dmamap_create(fp->rx_mbuf_tag,
14724                                  BUS_DMA_NOWAIT,
14725                                  &fp->rx_mbuf_chain[j].m_map)) {
14726                /* XXX unwind and free previous fastpath allocations */
14727                BLOGE(sc, "Failed to create dma map for "
14728                          "'fp %d rx mbuf %d' (%d)\n", i, j, rc);
14729                return (1);
14730            }
14731        }
14732
14733        /* create dma map for the spare rx mbuf cluster */
14734        if (bus_dmamap_create(fp->rx_mbuf_tag,
14735                              BUS_DMA_NOWAIT,
14736                              &fp->rx_mbuf_spare_map)) {
14737            /* XXX unwind and free previous fastpath allocations */
14738            BLOGE(sc, "Failed to create dma map for "
14739                      "'fp %d spare rx mbuf' (%d)\n", i, rc);
14740            return (1);
14741        }
14742
14743        /***************************/
14744        /* FP RX SGE MBUF DMA MAPS */
14745        /***************************/
14746
14747        /* create a dma tag for the rx sge mbufs */
14748        rc = bus_dma_tag_create(sc->parent_dma_tag, /* parent tag */
14749                                1,                  /* alignment */
14750                                0,                  /* boundary limit */
14751                                BUS_SPACE_MAXADDR,  /* restricted low */
14752                                BUS_SPACE_MAXADDR,  /* restricted hi */
14753                                NULL,               /* addr filter() */
14754                                NULL,               /* addr filter() arg */
14755                                BCM_PAGE_SIZE,      /* max map size */
14756                                1,                  /* num discontinuous */
14757                                BCM_PAGE_SIZE,      /* max seg size */
14758                                0,                  /* flags */
14759                                NULL,               /* lock() */
14760                                NULL,               /* lock() arg */
14761                                &fp->rx_sge_mbuf_tag); /* returned dma tag */
14762        if (rc != 0) {
14763            /* XXX unwind and free previous fastpath allocations */
14764            BLOGE(sc, "Failed to create dma tag for "
14765                      "'fp %d rx sge mbufs' (%d)\n", i, rc);
14766            return (1);
14767        }
14768
14769        /* create dma maps for the rx sge mbuf clusters */
14770        for (j = 0; j < RX_SGE_TOTAL; j++) {
14771            if (bus_dmamap_create(fp->rx_sge_mbuf_tag,
14772                                  BUS_DMA_NOWAIT,
14773                                  &fp->rx_sge_mbuf_chain[j].m_map)) {
14774                /* XXX unwind and free previous fastpath allocations */
14775                BLOGE(sc, "Failed to create dma map for "
14776                          "'fp %d rx sge mbuf %d' (%d)\n", i, j, rc);
14777                return (1);
14778            }
14779        }
14780
14781        /* create dma map for the spare rx sge mbuf cluster */
14782        if (bus_dmamap_create(fp->rx_sge_mbuf_tag,
14783                              BUS_DMA_NOWAIT,
14784                              &fp->rx_sge_mbuf_spare_map)) {
14785            /* XXX unwind and free previous fastpath allocations */
14786            BLOGE(sc, "Failed to create dma map for "
14787                      "'fp %d spare rx sge mbuf' (%d)\n", i, rc);
14788            return (1);
14789        }
14790
14791        /***************************/
14792        /* FP RX TPA MBUF DMA MAPS */
14793        /***************************/
14794
14795        /* create dma maps for the rx tpa mbuf clusters */
14796        max_agg_queues = MAX_AGG_QS(sc);
14797
14798        for (j = 0; j < max_agg_queues; j++) {
14799            if (bus_dmamap_create(fp->rx_mbuf_tag,
14800                                  BUS_DMA_NOWAIT,
14801                                  &fp->rx_tpa_info[j].bd.m_map)) {
14802                /* XXX unwind and free previous fastpath allocations */
14803                BLOGE(sc, "Failed to create dma map for "
14804                          "'fp %d rx tpa mbuf %d' (%d)\n", i, j, rc);
14805                return (1);
14806            }
14807        }
14808
14809        /* create dma map for the spare rx tpa mbuf cluster */
14810        if (bus_dmamap_create(fp->rx_mbuf_tag,
14811                              BUS_DMA_NOWAIT,
14812                              &fp->rx_tpa_info_mbuf_spare_map)) {
14813            /* XXX unwind and free previous fastpath allocations */
14814            BLOGE(sc, "Failed to create dma map for "
14815                      "'fp %d spare rx tpa mbuf' (%d)\n", i, rc);
14816            return (1);
14817        }
14818
14819        bxe_init_sge_ring_bit_mask(fp);
14820    }
14821
14822    return (0);
14823}
14824
14825static void
14826bxe_free_hsi_mem(struct bxe_softc *sc)
14827{
14828    struct bxe_fastpath *fp;
14829    int max_agg_queues;
14830    int i, j;
14831
14832    if (sc->parent_dma_tag == NULL) {
14833        return; /* assume nothing was allocated */
14834    }
14835
14836    for (i = 0; i < sc->num_queues; i++) {
14837        fp = &sc->fp[i];
14838
14839        /*******************/
14840        /* FP STATUS BLOCK */
14841        /*******************/
14842
14843        bxe_dma_free(sc, &fp->sb_dma);
14844        memset(&fp->status_block, 0, sizeof(fp->status_block));
14845
14846        /******************/
14847        /* FP TX BD CHAIN */
14848        /******************/
14849
14850        bxe_dma_free(sc, &fp->tx_dma);
14851        fp->tx_chain = NULL;
14852
14853        /******************/
14854        /* FP RX BD CHAIN */
14855        /******************/
14856
14857        bxe_dma_free(sc, &fp->rx_dma);
14858        fp->rx_chain = NULL;
14859
14860        /*******************/
14861        /* FP RX RCQ CHAIN */
14862        /*******************/
14863
14864        bxe_dma_free(sc, &fp->rcq_dma);
14865        fp->rcq_chain = NULL;
14866
14867        /*******************/
14868        /* FP RX SGE CHAIN */
14869        /*******************/
14870
14871        bxe_dma_free(sc, &fp->rx_sge_dma);
14872        fp->rx_sge_chain = NULL;
14873
14874        /***********************/
14875        /* FP TX MBUF DMA MAPS */
14876        /***********************/
14877
14878        if (fp->tx_mbuf_tag != NULL) {
14879            for (j = 0; j < TX_BD_TOTAL; j++) {
14880                if (fp->tx_mbuf_chain[j].m_map != NULL) {
14881                    bus_dmamap_unload(fp->tx_mbuf_tag,
14882                                      fp->tx_mbuf_chain[j].m_map);
14883                    bus_dmamap_destroy(fp->tx_mbuf_tag,
14884                                       fp->tx_mbuf_chain[j].m_map);
14885                }
14886            }
14887
14888            bus_dma_tag_destroy(fp->tx_mbuf_tag);
14889            fp->tx_mbuf_tag = NULL;
14890        }
14891
14892        /***********************/
14893        /* FP RX MBUF DMA MAPS */
14894        /***********************/
14895
14896        if (fp->rx_mbuf_tag != NULL) {
14897            for (j = 0; j < RX_BD_TOTAL; j++) {
14898                if (fp->rx_mbuf_chain[j].m_map != NULL) {
14899                    bus_dmamap_unload(fp->rx_mbuf_tag,
14900                                      fp->rx_mbuf_chain[j].m_map);
14901                    bus_dmamap_destroy(fp->rx_mbuf_tag,
14902                                       fp->rx_mbuf_chain[j].m_map);
14903                }
14904            }
14905
14906            if (fp->rx_mbuf_spare_map != NULL) {
14907                bus_dmamap_unload(fp->rx_mbuf_tag, fp->rx_mbuf_spare_map);
14908                bus_dmamap_destroy(fp->rx_mbuf_tag, fp->rx_mbuf_spare_map);
14909            }
14910
14911            /***************************/
14912            /* FP RX TPA MBUF DMA MAPS */
14913            /***************************/
14914
14915            max_agg_queues = MAX_AGG_QS(sc);
14916
14917            for (j = 0; j < max_agg_queues; j++) {
14918                if (fp->rx_tpa_info[j].bd.m_map != NULL) {
14919                    bus_dmamap_unload(fp->rx_mbuf_tag,
14920                                      fp->rx_tpa_info[j].bd.m_map);
14921                    bus_dmamap_destroy(fp->rx_mbuf_tag,
14922                                       fp->rx_tpa_info[j].bd.m_map);
14923                }
14924            }
14925
14926            if (fp->rx_tpa_info_mbuf_spare_map != NULL) {
14927                bus_dmamap_unload(fp->rx_mbuf_tag,
14928                                  fp->rx_tpa_info_mbuf_spare_map);
14929                bus_dmamap_destroy(fp->rx_mbuf_tag,
14930                                   fp->rx_tpa_info_mbuf_spare_map);
14931            }
14932
14933            bus_dma_tag_destroy(fp->rx_mbuf_tag);
14934            fp->rx_mbuf_tag = NULL;
14935        }
14936
14937        /***************************/
14938        /* FP RX SGE MBUF DMA MAPS */
14939        /***************************/
14940
14941        if (fp->rx_sge_mbuf_tag != NULL) {
14942            for (j = 0; j < RX_SGE_TOTAL; j++) {
14943                if (fp->rx_sge_mbuf_chain[j].m_map != NULL) {
14944                    bus_dmamap_unload(fp->rx_sge_mbuf_tag,
14945                                      fp->rx_sge_mbuf_chain[j].m_map);
14946                    bus_dmamap_destroy(fp->rx_sge_mbuf_tag,
14947                                       fp->rx_sge_mbuf_chain[j].m_map);
14948                }
14949            }
14950
14951            if (fp->rx_sge_mbuf_spare_map != NULL) {
14952                bus_dmamap_unload(fp->rx_sge_mbuf_tag,
14953                                  fp->rx_sge_mbuf_spare_map);
14954                bus_dmamap_destroy(fp->rx_sge_mbuf_tag,
14955                                   fp->rx_sge_mbuf_spare_map);
14956            }
14957
14958            bus_dma_tag_destroy(fp->rx_sge_mbuf_tag);
14959            fp->rx_sge_mbuf_tag = NULL;
14960        }
14961    }
14962
14963    /***************************/
14964    /* FW DECOMPRESSION BUFFER */
14965    /***************************/
14966
14967    bxe_dma_free(sc, &sc->gz_buf_dma);
14968    sc->gz_buf = NULL;
14969    free(sc->gz_strm, M_DEVBUF);
14970    sc->gz_strm = NULL;
14971
14972    /*******************/
14973    /* SLOW PATH QUEUE */
14974    /*******************/
14975
14976    bxe_dma_free(sc, &sc->spq_dma);
14977    sc->spq = NULL;
14978
14979    /*************/
14980    /* SLOW PATH */
14981    /*************/
14982
14983    bxe_dma_free(sc, &sc->sp_dma);
14984    sc->sp = NULL;
14985
14986    /***************/
14987    /* EVENT QUEUE */
14988    /***************/
14989
14990    bxe_dma_free(sc, &sc->eq_dma);
14991    sc->eq = NULL;
14992
14993    /************************/
14994    /* DEFAULT STATUS BLOCK */
14995    /************************/
14996
14997    bxe_dma_free(sc, &sc->def_sb_dma);
14998    sc->def_sb = NULL;
14999
15000    bus_dma_tag_destroy(sc->parent_dma_tag);
15001    sc->parent_dma_tag = NULL;
15002}
15003
15004/*
15005 * Previous driver DMAE transaction may have occurred when pre-boot stage
15006 * ended and boot began. This would invalidate the addresses of the
15007 * transaction, resulting in was-error bit set in the PCI causing all
15008 * hw-to-host PCIe transactions to timeout. If this happened we want to clear
15009 * the interrupt which detected this from the pglueb and the was-done bit
15010 */
15011static void
15012bxe_prev_interrupted_dmae(struct bxe_softc *sc)
15013{
15014    uint32_t val;
15015
15016    if (!CHIP_IS_E1x(sc)) {
15017        val = REG_RD(sc, PGLUE_B_REG_PGLUE_B_INT_STS);
15018        if (val & PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN) {
15019            BLOGD(sc, DBG_LOAD,
15020                  "Clearing 'was-error' bit that was set in pglueb");
15021            REG_WR(sc, PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR, 1 << SC_FUNC(sc));
15022        }
15023    }
15024}
15025
15026static int
15027bxe_prev_mcp_done(struct bxe_softc *sc)
15028{
15029    uint32_t rc = bxe_fw_command(sc, DRV_MSG_CODE_UNLOAD_DONE,
15030                                 DRV_MSG_CODE_UNLOAD_SKIP_LINK_RESET);
15031    if (!rc) {
15032        BLOGE(sc, "MCP response failure, aborting\n");
15033        return (-1);
15034    }
15035
15036    return (0);
15037}
15038
15039static struct bxe_prev_list_node *
15040bxe_prev_path_get_entry(struct bxe_softc *sc)
15041{
15042    struct bxe_prev_list_node *tmp;
15043
15044    LIST_FOREACH(tmp, &bxe_prev_list, node) {
15045        if ((sc->pcie_bus == tmp->bus) &&
15046            (sc->pcie_device == tmp->slot) &&
15047            (SC_PATH(sc) == tmp->path)) {
15048            return (tmp);
15049        }
15050    }
15051
15052    return (NULL);
15053}
15054
15055static uint8_t
15056bxe_prev_is_path_marked(struct bxe_softc *sc)
15057{
15058    struct bxe_prev_list_node *tmp;
15059    int rc = FALSE;
15060
15061    mtx_lock(&bxe_prev_mtx);
15062
15063    tmp = bxe_prev_path_get_entry(sc);
15064    if (tmp) {
15065        if (tmp->aer) {
15066            BLOGD(sc, DBG_LOAD,
15067                  "Path %d/%d/%d was marked by AER\n",
15068                  sc->pcie_bus, sc->pcie_device, SC_PATH(sc));
15069        } else {
15070            rc = TRUE;
15071            BLOGD(sc, DBG_LOAD,
15072                  "Path %d/%d/%d was already cleaned from previous drivers\n",
15073                  sc->pcie_bus, sc->pcie_device, SC_PATH(sc));
15074        }
15075    }
15076
15077    mtx_unlock(&bxe_prev_mtx);
15078
15079    return (rc);
15080}
15081
15082static int
15083bxe_prev_mark_path(struct bxe_softc *sc,
15084                   uint8_t          after_undi)
15085{
15086    struct bxe_prev_list_node *tmp;
15087
15088    mtx_lock(&bxe_prev_mtx);
15089
15090    /* Check whether the entry for this path already exists */
15091    tmp = bxe_prev_path_get_entry(sc);
15092    if (tmp) {
15093        if (!tmp->aer) {
15094            BLOGD(sc, DBG_LOAD,
15095                  "Re-marking AER in path %d/%d/%d\n",
15096                  sc->pcie_bus, sc->pcie_device, SC_PATH(sc));
15097        } else {
15098            BLOGD(sc, DBG_LOAD,
15099                  "Removing AER indication from path %d/%d/%d\n",
15100                  sc->pcie_bus, sc->pcie_device, SC_PATH(sc));
15101            tmp->aer = 0;
15102        }
15103
15104        mtx_unlock(&bxe_prev_mtx);
15105        return (0);
15106    }
15107
15108    mtx_unlock(&bxe_prev_mtx);
15109
15110    /* Create an entry for this path and add it */
15111    tmp = malloc(sizeof(struct bxe_prev_list_node), M_DEVBUF,
15112                 (M_NOWAIT | M_ZERO));
15113    if (!tmp) {
15114        BLOGE(sc, "Failed to allocate 'bxe_prev_list_node'\n");
15115        return (-1);
15116    }
15117
15118    tmp->bus  = sc->pcie_bus;
15119    tmp->slot = sc->pcie_device;
15120    tmp->path = SC_PATH(sc);
15121    tmp->aer  = 0;
15122    tmp->undi = after_undi ? (1 << SC_PORT(sc)) : 0;
15123
15124    mtx_lock(&bxe_prev_mtx);
15125
15126    BLOGD(sc, DBG_LOAD,
15127          "Marked path %d/%d/%d - finished previous unload\n",
15128          sc->pcie_bus, sc->pcie_device, SC_PATH(sc));
15129    LIST_INSERT_HEAD(&bxe_prev_list, tmp, node);
15130
15131    mtx_unlock(&bxe_prev_mtx);
15132
15133    return (0);
15134}
15135
15136static int
15137bxe_do_flr(struct bxe_softc *sc)
15138{
15139    int i;
15140
15141    /* only E2 and onwards support FLR */
15142    if (CHIP_IS_E1x(sc)) {
15143        BLOGD(sc, DBG_LOAD, "FLR not supported in E1/E1H\n");
15144        return (-1);
15145    }
15146
15147    /* only bootcode REQ_BC_VER_4_INITIATE_FLR and onwards support flr */
15148    if (sc->devinfo.bc_ver < REQ_BC_VER_4_INITIATE_FLR) {
15149        BLOGD(sc, DBG_LOAD, "FLR not supported by BC_VER: 0x%08x\n",
15150              sc->devinfo.bc_ver);
15151        return (-1);
15152    }
15153
15154    /* Wait for Transaction Pending bit clean */
15155    for (i = 0; i < 4; i++) {
15156        if (i) {
15157            DELAY(((1 << (i - 1)) * 100) * 1000);
15158        }
15159
15160        if (!bxe_is_pcie_pending(sc)) {
15161            goto clear;
15162        }
15163    }
15164
15165    BLOGE(sc, "PCIE transaction is not cleared, "
15166              "proceeding with reset anyway\n");
15167
15168clear:
15169
15170    BLOGD(sc, DBG_LOAD, "Initiating FLR\n");
15171    bxe_fw_command(sc, DRV_MSG_CODE_INITIATE_FLR, 0);
15172
15173    return (0);
15174}
15175
15176struct bxe_mac_vals {
15177    uint32_t xmac_addr;
15178    uint32_t xmac_val;
15179    uint32_t emac_addr;
15180    uint32_t emac_val;
15181    uint32_t umac_addr;
15182    uint32_t umac_val;
15183    uint32_t bmac_addr;
15184    uint32_t bmac_val[2];
15185};
15186
15187static void
15188bxe_prev_unload_close_mac(struct bxe_softc *sc,
15189                          struct bxe_mac_vals *vals)
15190{
15191    uint32_t val, base_addr, offset, mask, reset_reg;
15192    uint8_t mac_stopped = FALSE;
15193    uint8_t port = SC_PORT(sc);
15194    uint32_t wb_data[2];
15195
15196    /* reset addresses as they also mark which values were changed */
15197    vals->bmac_addr = 0;
15198    vals->umac_addr = 0;
15199    vals->xmac_addr = 0;
15200    vals->emac_addr = 0;
15201
15202    reset_reg = REG_RD(sc, MISC_REG_RESET_REG_2);
15203
15204    if (!CHIP_IS_E3(sc)) {
15205        val = REG_RD(sc, NIG_REG_BMAC0_REGS_OUT_EN + port * 4);
15206        mask = MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port;
15207        if ((mask & reset_reg) && val) {
15208            BLOGD(sc, DBG_LOAD, "Disable BMAC Rx\n");
15209            base_addr = SC_PORT(sc) ? NIG_REG_INGRESS_BMAC1_MEM
15210                                    : NIG_REG_INGRESS_BMAC0_MEM;
15211            offset = CHIP_IS_E2(sc) ? BIGMAC2_REGISTER_BMAC_CONTROL
15212                                    : BIGMAC_REGISTER_BMAC_CONTROL;
15213
15214            /*
15215             * use rd/wr since we cannot use dmae. This is safe
15216             * since MCP won't access the bus due to the request
15217             * to unload, and no function on the path can be
15218             * loaded at this time.
15219             */
15220            wb_data[0] = REG_RD(sc, base_addr + offset);
15221            wb_data[1] = REG_RD(sc, base_addr + offset + 0x4);
15222            vals->bmac_addr = base_addr + offset;
15223            vals->bmac_val[0] = wb_data[0];
15224            vals->bmac_val[1] = wb_data[1];
15225            wb_data[0] &= ~ELINK_BMAC_CONTROL_RX_ENABLE;
15226            REG_WR(sc, vals->bmac_addr, wb_data[0]);
15227            REG_WR(sc, vals->bmac_addr + 0x4, wb_data[1]);
15228        }
15229
15230        BLOGD(sc, DBG_LOAD, "Disable EMAC Rx\n");
15231        vals->emac_addr = NIG_REG_NIG_EMAC0_EN + SC_PORT(sc)*4;
15232        vals->emac_val = REG_RD(sc, vals->emac_addr);
15233        REG_WR(sc, vals->emac_addr, 0);
15234        mac_stopped = TRUE;
15235    } else {
15236        if (reset_reg & MISC_REGISTERS_RESET_REG_2_XMAC) {
15237            BLOGD(sc, DBG_LOAD, "Disable XMAC Rx\n");
15238            base_addr = SC_PORT(sc) ? GRCBASE_XMAC1 : GRCBASE_XMAC0;
15239            val = REG_RD(sc, base_addr + XMAC_REG_PFC_CTRL_HI);
15240            REG_WR(sc, base_addr + XMAC_REG_PFC_CTRL_HI, val & ~(1 << 1));
15241            REG_WR(sc, base_addr + XMAC_REG_PFC_CTRL_HI, val | (1 << 1));
15242            vals->xmac_addr = base_addr + XMAC_REG_CTRL;
15243            vals->xmac_val = REG_RD(sc, vals->xmac_addr);
15244            REG_WR(sc, vals->xmac_addr, 0);
15245            mac_stopped = TRUE;
15246        }
15247
15248        mask = MISC_REGISTERS_RESET_REG_2_UMAC0 << port;
15249        if (mask & reset_reg) {
15250            BLOGD(sc, DBG_LOAD, "Disable UMAC Rx\n");
15251            base_addr = SC_PORT(sc) ? GRCBASE_UMAC1 : GRCBASE_UMAC0;
15252            vals->umac_addr = base_addr + UMAC_REG_COMMAND_CONFIG;
15253            vals->umac_val = REG_RD(sc, vals->umac_addr);
15254            REG_WR(sc, vals->umac_addr, 0);
15255            mac_stopped = TRUE;
15256        }
15257    }
15258
15259    if (mac_stopped) {
15260        DELAY(20000);
15261    }
15262}
15263
15264#define BXE_PREV_UNDI_PROD_ADDR(p)  (BAR_TSTRORM_INTMEM + 0x1508 + ((p) << 4))
15265#define BXE_PREV_UNDI_RCQ(val)      ((val) & 0xffff)
15266#define BXE_PREV_UNDI_BD(val)       ((val) >> 16 & 0xffff)
15267#define BXE_PREV_UNDI_PROD(rcq, bd) ((bd) << 16 | (rcq))
15268
15269static void
15270bxe_prev_unload_undi_inc(struct bxe_softc *sc,
15271                         uint8_t          port,
15272                         uint8_t          inc)
15273{
15274    uint16_t rcq, bd;
15275    uint32_t tmp_reg = REG_RD(sc, BXE_PREV_UNDI_PROD_ADDR(port));
15276
15277    rcq = BXE_PREV_UNDI_RCQ(tmp_reg) + inc;
15278    bd = BXE_PREV_UNDI_BD(tmp_reg) + inc;
15279
15280    tmp_reg = BXE_PREV_UNDI_PROD(rcq, bd);
15281    REG_WR(sc, BXE_PREV_UNDI_PROD_ADDR(port), tmp_reg);
15282
15283    BLOGD(sc, DBG_LOAD,
15284          "UNDI producer [%d] rings bd -> 0x%04x, rcq -> 0x%04x\n",
15285          port, bd, rcq);
15286}
15287
15288static int
15289bxe_prev_unload_common(struct bxe_softc *sc)
15290{
15291    uint32_t reset_reg, tmp_reg = 0, rc;
15292    uint8_t prev_undi = FALSE;
15293    struct bxe_mac_vals mac_vals;
15294    uint32_t timer_count = 1000;
15295    uint32_t prev_brb;
15296
15297    /*
15298     * It is possible a previous function received 'common' answer,
15299     * but hasn't loaded yet, therefore creating a scenario of
15300     * multiple functions receiving 'common' on the same path.
15301     */
15302    BLOGD(sc, DBG_LOAD, "Common unload Flow\n");
15303
15304    memset(&mac_vals, 0, sizeof(mac_vals));
15305
15306    if (bxe_prev_is_path_marked(sc)) {
15307        return (bxe_prev_mcp_done(sc));
15308    }
15309
15310    reset_reg = REG_RD(sc, MISC_REG_RESET_REG_1);
15311
15312    /* Reset should be performed after BRB is emptied */
15313    if (reset_reg & MISC_REGISTERS_RESET_REG_1_RST_BRB1) {
15314        /* Close the MAC Rx to prevent BRB from filling up */
15315        bxe_prev_unload_close_mac(sc, &mac_vals);
15316
15317        /* close LLH filters towards the BRB */
15318        elink_set_rx_filter(&sc->link_params, 0);
15319
15320        /*
15321         * Check if the UNDI driver was previously loaded.
15322         * UNDI driver initializes CID offset for normal bell to 0x7
15323         */
15324        if (reset_reg & MISC_REGISTERS_RESET_REG_1_RST_DORQ) {
15325            tmp_reg = REG_RD(sc, DORQ_REG_NORM_CID_OFST);
15326            if (tmp_reg == 0x7) {
15327                BLOGD(sc, DBG_LOAD, "UNDI previously loaded\n");
15328                prev_undi = TRUE;
15329                /* clear the UNDI indication */
15330                REG_WR(sc, DORQ_REG_NORM_CID_OFST, 0);
15331                /* clear possible idle check errors */
15332                REG_RD(sc, NIG_REG_NIG_INT_STS_CLR_0);
15333            }
15334        }
15335
15336        /* wait until BRB is empty */
15337        tmp_reg = REG_RD(sc, BRB1_REG_NUM_OF_FULL_BLOCKS);
15338        while (timer_count) {
15339            prev_brb = tmp_reg;
15340
15341            tmp_reg = REG_RD(sc, BRB1_REG_NUM_OF_FULL_BLOCKS);
15342            if (!tmp_reg) {
15343                break;
15344            }
15345
15346            BLOGD(sc, DBG_LOAD, "BRB still has 0x%08x\n", tmp_reg);
15347
15348            /* reset timer as long as BRB actually gets emptied */
15349            if (prev_brb > tmp_reg) {
15350                timer_count = 1000;
15351            } else {
15352                timer_count--;
15353            }
15354
15355            /* If UNDI resides in memory, manually increment it */
15356            if (prev_undi) {
15357                bxe_prev_unload_undi_inc(sc, SC_PORT(sc), 1);
15358            }
15359
15360            DELAY(10);
15361        }
15362
15363        if (!timer_count) {
15364            BLOGE(sc, "Failed to empty BRB\n");
15365        }
15366    }
15367
15368    /* No packets are in the pipeline, path is ready for reset */
15369    bxe_reset_common(sc);
15370
15371    if (mac_vals.xmac_addr) {
15372        REG_WR(sc, mac_vals.xmac_addr, mac_vals.xmac_val);
15373    }
15374    if (mac_vals.umac_addr) {
15375        REG_WR(sc, mac_vals.umac_addr, mac_vals.umac_val);
15376    }
15377    if (mac_vals.emac_addr) {
15378        REG_WR(sc, mac_vals.emac_addr, mac_vals.emac_val);
15379    }
15380    if (mac_vals.bmac_addr) {
15381        REG_WR(sc, mac_vals.bmac_addr, mac_vals.bmac_val[0]);
15382        REG_WR(sc, mac_vals.bmac_addr + 4, mac_vals.bmac_val[1]);
15383    }
15384
15385    rc = bxe_prev_mark_path(sc, prev_undi);
15386    if (rc) {
15387        bxe_prev_mcp_done(sc);
15388        return (rc);
15389    }
15390
15391    return (bxe_prev_mcp_done(sc));
15392}
15393
15394static int
15395bxe_prev_unload_uncommon(struct bxe_softc *sc)
15396{
15397    int rc;
15398
15399    BLOGD(sc, DBG_LOAD, "Uncommon unload Flow\n");
15400
15401    /* Test if previous unload process was already finished for this path */
15402    if (bxe_prev_is_path_marked(sc)) {
15403        return (bxe_prev_mcp_done(sc));
15404    }
15405
15406    BLOGD(sc, DBG_LOAD, "Path is unmarked\n");
15407
15408    /*
15409     * If function has FLR capabilities, and existing FW version matches
15410     * the one required, then FLR will be sufficient to clean any residue
15411     * left by previous driver
15412     */
15413    rc = bxe_nic_load_analyze_req(sc, FW_MSG_CODE_DRV_LOAD_FUNCTION);
15414    if (!rc) {
15415        /* fw version is good */
15416        BLOGD(sc, DBG_LOAD, "FW version matches our own, attempting FLR\n");
15417        rc = bxe_do_flr(sc);
15418    }
15419
15420    if (!rc) {
15421        /* FLR was performed */
15422        BLOGD(sc, DBG_LOAD, "FLR successful\n");
15423        return (0);
15424    }
15425
15426    BLOGD(sc, DBG_LOAD, "Could not FLR\n");
15427
15428    /* Close the MCP request, return failure*/
15429    rc = bxe_prev_mcp_done(sc);
15430    if (!rc) {
15431        rc = BXE_PREV_WAIT_NEEDED;
15432    }
15433
15434    return (rc);
15435}
15436
15437static int
15438bxe_prev_unload(struct bxe_softc *sc)
15439{
15440    int time_counter = 10;
15441    uint32_t fw, hw_lock_reg, hw_lock_val;
15442    uint32_t rc = 0;
15443
15444    /*
15445     * Clear HW from errors which may have resulted from an interrupted
15446     * DMAE transaction.
15447     */
15448    bxe_prev_interrupted_dmae(sc);
15449
15450    /* Release previously held locks */
15451    hw_lock_reg =
15452        (SC_FUNC(sc) <= 5) ?
15453            (MISC_REG_DRIVER_CONTROL_1 + SC_FUNC(sc) * 8) :
15454            (MISC_REG_DRIVER_CONTROL_7 + (SC_FUNC(sc) - 6) * 8);
15455
15456    hw_lock_val = (REG_RD(sc, hw_lock_reg));
15457    if (hw_lock_val) {
15458        if (hw_lock_val & HW_LOCK_RESOURCE_NVRAM) {
15459            BLOGD(sc, DBG_LOAD, "Releasing previously held NVRAM lock\n");
15460            REG_WR(sc, MCP_REG_MCPR_NVM_SW_ARB,
15461                   (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << SC_PORT(sc)));
15462        }
15463        BLOGD(sc, DBG_LOAD, "Releasing previously held HW lock\n");
15464        REG_WR(sc, hw_lock_reg, 0xffffffff);
15465    } else {
15466        BLOGD(sc, DBG_LOAD, "No need to release HW/NVRAM locks\n");
15467    }
15468
15469    if (MCPR_ACCESS_LOCK_LOCK & REG_RD(sc, MCP_REG_MCPR_ACCESS_LOCK)) {
15470        BLOGD(sc, DBG_LOAD, "Releasing previously held ALR\n");
15471        REG_WR(sc, MCP_REG_MCPR_ACCESS_LOCK, 0);
15472    }
15473
15474    do {
15475        /* Lock MCP using an unload request */
15476        fw = bxe_fw_command(sc, DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS, 0);
15477        if (!fw) {
15478            BLOGE(sc, "MCP response failure, aborting\n");
15479            rc = -1;
15480            break;
15481        }
15482
15483        if (fw == FW_MSG_CODE_DRV_UNLOAD_COMMON) {
15484            rc = bxe_prev_unload_common(sc);
15485            break;
15486        }
15487
15488        /* non-common reply from MCP night require looping */
15489        rc = bxe_prev_unload_uncommon(sc);
15490        if (rc != BXE_PREV_WAIT_NEEDED) {
15491            break;
15492        }
15493
15494        DELAY(20000);
15495    } while (--time_counter);
15496
15497    if (!time_counter || rc) {
15498        BLOGE(sc, "Failed to unload previous driver!"
15499            " time_counter %d rc %d\n", time_counter, rc);
15500        rc = -1;
15501    }
15502
15503    return (rc);
15504}
15505
15506void
15507bxe_dcbx_set_state(struct bxe_softc *sc,
15508                   uint8_t          dcb_on,
15509                   uint32_t         dcbx_enabled)
15510{
15511    if (!CHIP_IS_E1x(sc)) {
15512        sc->dcb_state = dcb_on;
15513        sc->dcbx_enabled = dcbx_enabled;
15514    } else {
15515        sc->dcb_state = FALSE;
15516        sc->dcbx_enabled = BXE_DCBX_ENABLED_INVALID;
15517    }
15518    BLOGD(sc, DBG_LOAD,
15519          "DCB state [%s:%s]\n",
15520          dcb_on ? "ON" : "OFF",
15521          (dcbx_enabled == BXE_DCBX_ENABLED_OFF) ? "user-mode" :
15522          (dcbx_enabled == BXE_DCBX_ENABLED_ON_NEG_OFF) ? "on-chip static" :
15523          (dcbx_enabled == BXE_DCBX_ENABLED_ON_NEG_ON) ?
15524          "on-chip with negotiation" : "invalid");
15525}
15526
15527/* must be called after sriov-enable */
15528static int
15529bxe_set_qm_cid_count(struct bxe_softc *sc)
15530{
15531    int cid_count = BXE_L2_MAX_CID(sc);
15532
15533    if (IS_SRIOV(sc)) {
15534        cid_count += BXE_VF_CIDS;
15535    }
15536
15537    if (CNIC_SUPPORT(sc)) {
15538        cid_count += CNIC_CID_MAX;
15539    }
15540
15541    return (roundup(cid_count, QM_CID_ROUND));
15542}
15543
15544static void
15545bxe_init_multi_cos(struct bxe_softc *sc)
15546{
15547    int pri, cos;
15548
15549    uint32_t pri_map = 0; /* XXX change to user config */
15550
15551    for (pri = 0; pri < BXE_MAX_PRIORITY; pri++) {
15552        cos = ((pri_map & (0xf << (pri * 4))) >> (pri * 4));
15553        if (cos < sc->max_cos) {
15554            sc->prio_to_cos[pri] = cos;
15555        } else {
15556            BLOGW(sc, "Invalid COS %d for priority %d "
15557                      "(max COS is %d), setting to 0\n",
15558                  cos, pri, (sc->max_cos - 1));
15559            sc->prio_to_cos[pri] = 0;
15560        }
15561    }
15562}
15563
15564static int
15565bxe_sysctl_state(SYSCTL_HANDLER_ARGS)
15566{
15567    struct bxe_softc *sc;
15568    int error, result;
15569
15570    result = 0;
15571    error = sysctl_handle_int(oidp, &result, 0, req);
15572
15573    if (error || !req->newptr) {
15574        return (error);
15575    }
15576
15577    if (result == 1) {
15578        uint32_t  temp;
15579        sc = (struct bxe_softc *)arg1;
15580
15581        BLOGI(sc, "... dumping driver state ...\n");
15582        temp = SHMEM2_RD(sc, temperature_in_half_celsius);
15583        BLOGI(sc, "\t Device Temperature = %d Celsius\n", (temp/2));
15584    }
15585
15586    return (error);
15587}
15588
15589static int
15590bxe_sysctl_eth_stat(SYSCTL_HANDLER_ARGS)
15591{
15592    struct bxe_softc *sc = (struct bxe_softc *)arg1;
15593    uint32_t *eth_stats = (uint32_t *)&sc->eth_stats;
15594    uint32_t *offset;
15595    uint64_t value = 0;
15596    int index = (int)arg2;
15597
15598    if (index >= BXE_NUM_ETH_STATS) {
15599        BLOGE(sc, "bxe_eth_stats index out of range (%d)\n", index);
15600        return (-1);
15601    }
15602
15603    offset = (eth_stats + bxe_eth_stats_arr[index].offset);
15604
15605    switch (bxe_eth_stats_arr[index].size) {
15606    case 4:
15607        value = (uint64_t)*offset;
15608        break;
15609    case 8:
15610        value = HILO_U64(*offset, *(offset + 1));
15611        break;
15612    default:
15613        BLOGE(sc, "Invalid bxe_eth_stats size (index=%d size=%d)\n",
15614              index, bxe_eth_stats_arr[index].size);
15615        return (-1);
15616    }
15617
15618    return (sysctl_handle_64(oidp, &value, 0, req));
15619}
15620
15621static int
15622bxe_sysctl_eth_q_stat(SYSCTL_HANDLER_ARGS)
15623{
15624    struct bxe_softc *sc = (struct bxe_softc *)arg1;
15625    uint32_t *eth_stats;
15626    uint32_t *offset;
15627    uint64_t value = 0;
15628    uint32_t q_stat = (uint32_t)arg2;
15629    uint32_t fp_index = ((q_stat >> 16) & 0xffff);
15630    uint32_t index = (q_stat & 0xffff);
15631
15632    eth_stats = (uint32_t *)&sc->fp[fp_index].eth_q_stats;
15633
15634    if (index >= BXE_NUM_ETH_Q_STATS) {
15635        BLOGE(sc, "bxe_eth_q_stats index out of range (%d)\n", index);
15636        return (-1);
15637    }
15638
15639    offset = (eth_stats + bxe_eth_q_stats_arr[index].offset);
15640
15641    switch (bxe_eth_q_stats_arr[index].size) {
15642    case 4:
15643        value = (uint64_t)*offset;
15644        break;
15645    case 8:
15646        value = HILO_U64(*offset, *(offset + 1));
15647        break;
15648    default:
15649        BLOGE(sc, "Invalid bxe_eth_q_stats size (index=%d size=%d)\n",
15650              index, bxe_eth_q_stats_arr[index].size);
15651        return (-1);
15652    }
15653
15654    return (sysctl_handle_64(oidp, &value, 0, req));
15655}
15656
15657static void bxe_force_link_reset(struct bxe_softc *sc)
15658{
15659
15660        bxe_acquire_phy_lock(sc);
15661        elink_link_reset(&sc->link_params, &sc->link_vars, 1);
15662        bxe_release_phy_lock(sc);
15663}
15664
15665static int
15666bxe_sysctl_pauseparam(SYSCTL_HANDLER_ARGS)
15667{
15668        struct bxe_softc *sc = (struct bxe_softc *)arg1;;
15669        uint32_t cfg_idx = bxe_get_link_cfg_idx(sc);
15670        int rc = 0;
15671        int error;
15672        int result;
15673
15674
15675        error = sysctl_handle_int(oidp, &sc->bxe_pause_param, 0, req);
15676
15677        if (error || !req->newptr) {
15678                return (error);
15679        }
15680        if ((sc->bxe_pause_param < 0) ||  (sc->bxe_pause_param > 8)) {
15681                BLOGW(sc, "invalid pause param (%d) - use intergers between 1 & 8\n",sc->bxe_pause_param);
15682                sc->bxe_pause_param = 8;
15683        }
15684
15685        result = (sc->bxe_pause_param << PORT_FEATURE_FLOW_CONTROL_SHIFT);
15686
15687
15688        if((result & 0x400) && !(sc->port.supported[cfg_idx] & ELINK_SUPPORTED_Autoneg))  {
15689                        BLOGW(sc, "Does not support Autoneg pause_param %d\n", sc->bxe_pause_param);
15690                        return -EINVAL;
15691        }
15692
15693        if(IS_MF(sc))
15694                return 0;
15695       sc->link_params.req_flow_ctrl[cfg_idx] = ELINK_FLOW_CTRL_AUTO;
15696        if(result & ELINK_FLOW_CTRL_RX)
15697                sc->link_params.req_flow_ctrl[cfg_idx] |= ELINK_FLOW_CTRL_RX;
15698
15699        if(result & ELINK_FLOW_CTRL_TX)
15700                sc->link_params.req_flow_ctrl[cfg_idx] |= ELINK_FLOW_CTRL_TX;
15701        if(sc->link_params.req_flow_ctrl[cfg_idx] == ELINK_FLOW_CTRL_AUTO)
15702                sc->link_params.req_flow_ctrl[cfg_idx] = ELINK_FLOW_CTRL_NONE;
15703
15704        if(result & 0x400) {
15705                if (sc->link_params.req_line_speed[cfg_idx] == ELINK_SPEED_AUTO_NEG) {
15706                        sc->link_params.req_flow_ctrl[cfg_idx] =
15707                                ELINK_FLOW_CTRL_AUTO;
15708                }
15709                sc->link_params.req_fc_auto_adv = 0;
15710                if (result & ELINK_FLOW_CTRL_RX)
15711                        sc->link_params.req_fc_auto_adv |= ELINK_FLOW_CTRL_RX;
15712
15713                if (result & ELINK_FLOW_CTRL_TX)
15714                        sc->link_params.req_fc_auto_adv |= ELINK_FLOW_CTRL_TX;
15715                if (!sc->link_params.req_fc_auto_adv)
15716                        sc->link_params.req_fc_auto_adv |= ELINK_FLOW_CTRL_NONE;
15717        }
15718         if (IS_PF(sc)) {
15719                        if (sc->link_vars.link_up) {
15720                                bxe_stats_handle(sc, STATS_EVENT_STOP);
15721                        }
15722			if (if_getdrvflags(sc->ifp) & IFF_DRV_RUNNING) {
15723                        bxe_force_link_reset(sc);
15724                        bxe_acquire_phy_lock(sc);
15725
15726                        rc = elink_phy_init(&sc->link_params, &sc->link_vars);
15727
15728                        bxe_release_phy_lock(sc);
15729
15730                        bxe_calc_fc_adv(sc);
15731                        }
15732        }
15733        return rc;
15734}
15735
15736
15737static void
15738bxe_add_sysctls(struct bxe_softc *sc)
15739{
15740    struct sysctl_ctx_list *ctx;
15741    struct sysctl_oid_list *children;
15742    struct sysctl_oid *queue_top, *queue;
15743    struct sysctl_oid_list *queue_top_children, *queue_children;
15744    char queue_num_buf[32];
15745    uint32_t q_stat;
15746    int i, j;
15747
15748    ctx = device_get_sysctl_ctx(sc->dev);
15749    children = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev));
15750
15751    SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "version",
15752                      CTLFLAG_RD, BXE_DRIVER_VERSION, 0,
15753                      "version");
15754
15755    snprintf(sc->fw_ver_str, sizeof(sc->fw_ver_str), "%d.%d.%d.%d",
15756             BCM_5710_FW_MAJOR_VERSION,
15757             BCM_5710_FW_MINOR_VERSION,
15758             BCM_5710_FW_REVISION_VERSION,
15759             BCM_5710_FW_ENGINEERING_VERSION);
15760
15761    snprintf(sc->mf_mode_str, sizeof(sc->mf_mode_str), "%s",
15762        ((sc->devinfo.mf_info.mf_mode == SINGLE_FUNCTION)     ? "Single"  :
15763         (sc->devinfo.mf_info.mf_mode == MULTI_FUNCTION_SD)   ? "MF-SD"   :
15764         (sc->devinfo.mf_info.mf_mode == MULTI_FUNCTION_SI)   ? "MF-SI"   :
15765         (sc->devinfo.mf_info.mf_mode == MULTI_FUNCTION_AFEX) ? "MF-AFEX" :
15766                                                                "Unknown"));
15767    SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "mf_vnics",
15768                    CTLFLAG_RD, &sc->devinfo.mf_info.vnics_per_port, 0,
15769                    "multifunction vnics per port");
15770
15771    snprintf(sc->pci_link_str, sizeof(sc->pci_link_str), "%s x%d",
15772        ((sc->devinfo.pcie_link_speed == 1) ? "2.5GT/s" :
15773         (sc->devinfo.pcie_link_speed == 2) ? "5.0GT/s" :
15774         (sc->devinfo.pcie_link_speed == 4) ? "8.0GT/s" :
15775                                              "???GT/s"),
15776        sc->devinfo.pcie_link_width);
15777
15778    sc->debug = bxe_debug;
15779
15780#if __FreeBSD_version >= 900000
15781    SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "bc_version",
15782                      CTLFLAG_RD, sc->devinfo.bc_ver_str, 0,
15783                      "bootcode version");
15784    SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "fw_version",
15785                      CTLFLAG_RD, sc->fw_ver_str, 0,
15786                      "firmware version");
15787    SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "mf_mode",
15788                      CTLFLAG_RD, sc->mf_mode_str, 0,
15789                      "multifunction mode");
15790    SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "mac_addr",
15791                      CTLFLAG_RD, sc->mac_addr_str, 0,
15792                      "mac address");
15793    SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "pci_link",
15794                      CTLFLAG_RD, sc->pci_link_str, 0,
15795                      "pci link status");
15796    SYSCTL_ADD_ULONG(ctx, children, OID_AUTO, "debug",
15797                    CTLFLAG_RW, &sc->debug,
15798                    "debug logging mode");
15799#else
15800    SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "bc_version",
15801                      CTLFLAG_RD, &sc->devinfo.bc_ver_str, 0,
15802                      "bootcode version");
15803    SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "fw_version",
15804                      CTLFLAG_RD, &sc->fw_ver_str, 0,
15805                      "firmware version");
15806    SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "mf_mode",
15807                      CTLFLAG_RD, &sc->mf_mode_str, 0,
15808                      "multifunction mode");
15809    SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "mac_addr",
15810                      CTLFLAG_RD, &sc->mac_addr_str, 0,
15811                      "mac address");
15812    SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "pci_link",
15813                      CTLFLAG_RD, &sc->pci_link_str, 0,
15814                      "pci link status");
15815    SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "debug",
15816                    CTLFLAG_RW, &sc->debug, 0,
15817                    "debug logging mode");
15818#endif /* #if __FreeBSD_version >= 900000 */
15819
15820    sc->trigger_grcdump = 0;
15821    SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "trigger_grcdump",
15822                   CTLFLAG_RW, &sc->trigger_grcdump, 0,
15823                   "trigger grcdump should be invoked"
15824                   "  before collecting grcdump");
15825
15826    sc->grcdump_started = 0;
15827    sc->grcdump_done = 0;
15828    SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "grcdump_done",
15829                   CTLFLAG_RD, &sc->grcdump_done, 0,
15830                   "set by driver when grcdump is done");
15831
15832    sc->rx_budget = bxe_rx_budget;
15833    SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "rx_budget",
15834                    CTLFLAG_RW, &sc->rx_budget, 0,
15835                    "rx processing budget");
15836
15837   SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "pause_param",
15838                    CTLTYPE_UINT | CTLFLAG_RW, sc, 0,
15839                    bxe_sysctl_pauseparam, "IU",
15840                    "need pause frames- DEF:0/TX:1/RX:2/BOTH:3/AUTO:4/AUTOTX:5/AUTORX:6/AUTORXTX:7/NONE:8");
15841
15842
15843    SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "state",
15844                    CTLTYPE_UINT | CTLFLAG_RW, sc, 0,
15845                    bxe_sysctl_state, "IU", "dump driver state");
15846
15847    for (i = 0; i < BXE_NUM_ETH_STATS; i++) {
15848        SYSCTL_ADD_PROC(ctx, children, OID_AUTO,
15849                        bxe_eth_stats_arr[i].string,
15850                        CTLTYPE_U64 | CTLFLAG_RD, sc, i,
15851                        bxe_sysctl_eth_stat, "LU",
15852                        bxe_eth_stats_arr[i].string);
15853    }
15854
15855    /* add a new parent node for all queues "dev.bxe.#.queue" */
15856    queue_top = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "queue",
15857                                CTLFLAG_RD, NULL, "queue");
15858    queue_top_children = SYSCTL_CHILDREN(queue_top);
15859
15860    for (i = 0; i < sc->num_queues; i++) {
15861        /* add a new parent node for a single queue "dev.bxe.#.queue.#" */
15862        snprintf(queue_num_buf, sizeof(queue_num_buf), "%d", i);
15863        queue = SYSCTL_ADD_NODE(ctx, queue_top_children, OID_AUTO,
15864                                queue_num_buf, CTLFLAG_RD, NULL,
15865                                "single queue");
15866        queue_children = SYSCTL_CHILDREN(queue);
15867
15868        for (j = 0; j < BXE_NUM_ETH_Q_STATS; j++) {
15869            q_stat = ((i << 16) | j);
15870            SYSCTL_ADD_PROC(ctx, queue_children, OID_AUTO,
15871                            bxe_eth_q_stats_arr[j].string,
15872                            CTLTYPE_U64 | CTLFLAG_RD, sc, q_stat,
15873                            bxe_sysctl_eth_q_stat, "LU",
15874                            bxe_eth_q_stats_arr[j].string);
15875        }
15876    }
15877}
15878
15879static int
15880bxe_alloc_buf_rings(struct bxe_softc *sc)
15881{
15882#if __FreeBSD_version >= 901504
15883
15884    int i;
15885    struct bxe_fastpath *fp;
15886
15887    for (i = 0; i < sc->num_queues; i++) {
15888
15889        fp = &sc->fp[i];
15890
15891        fp->tx_br = buf_ring_alloc(BXE_BR_SIZE, M_DEVBUF,
15892                                   M_NOWAIT, &fp->tx_mtx);
15893        if (fp->tx_br == NULL)
15894            return (-1);
15895    }
15896#endif
15897    return (0);
15898}
15899
15900static void
15901bxe_free_buf_rings(struct bxe_softc *sc)
15902{
15903#if __FreeBSD_version >= 901504
15904
15905    int i;
15906    struct bxe_fastpath *fp;
15907
15908    for (i = 0; i < sc->num_queues; i++) {
15909
15910        fp = &sc->fp[i];
15911
15912        if (fp->tx_br) {
15913            buf_ring_free(fp->tx_br, M_DEVBUF);
15914            fp->tx_br = NULL;
15915        }
15916    }
15917
15918#endif
15919}
15920
15921static void
15922bxe_init_fp_mutexs(struct bxe_softc *sc)
15923{
15924    int i;
15925    struct bxe_fastpath *fp;
15926
15927    for (i = 0; i < sc->num_queues; i++) {
15928
15929        fp = &sc->fp[i];
15930
15931        snprintf(fp->tx_mtx_name, sizeof(fp->tx_mtx_name),
15932            "bxe%d_fp%d_tx_lock", sc->unit, i);
15933        mtx_init(&fp->tx_mtx, fp->tx_mtx_name, NULL, MTX_DEF);
15934
15935        snprintf(fp->rx_mtx_name, sizeof(fp->rx_mtx_name),
15936            "bxe%d_fp%d_rx_lock", sc->unit, i);
15937        mtx_init(&fp->rx_mtx, fp->rx_mtx_name, NULL, MTX_DEF);
15938    }
15939}
15940
15941static void
15942bxe_destroy_fp_mutexs(struct bxe_softc *sc)
15943{
15944    int i;
15945    struct bxe_fastpath *fp;
15946
15947    for (i = 0; i < sc->num_queues; i++) {
15948
15949        fp = &sc->fp[i];
15950
15951        if (mtx_initialized(&fp->tx_mtx)) {
15952            mtx_destroy(&fp->tx_mtx);
15953        }
15954
15955        if (mtx_initialized(&fp->rx_mtx)) {
15956            mtx_destroy(&fp->rx_mtx);
15957        }
15958    }
15959}
15960
15961
15962/*
15963 * Device attach function.
15964 *
15965 * Allocates device resources, performs secondary chip identification, and
15966 * initializes driver instance variables. This function is called from driver
15967 * load after a successful probe.
15968 *
15969 * Returns:
15970 *   0 = Success, >0 = Failure
15971 */
15972static int
15973bxe_attach(device_t dev)
15974{
15975    struct bxe_softc *sc;
15976
15977    sc = device_get_softc(dev);
15978
15979    BLOGD(sc, DBG_LOAD, "Starting attach...\n");
15980
15981    sc->state = BXE_STATE_CLOSED;
15982
15983    sc->dev  = dev;
15984    sc->unit = device_get_unit(dev);
15985
15986    BLOGD(sc, DBG_LOAD, "softc = %p\n", sc);
15987
15988    sc->pcie_bus    = pci_get_bus(dev);
15989    sc->pcie_device = pci_get_slot(dev);
15990    sc->pcie_func   = pci_get_function(dev);
15991
15992    /* enable bus master capability */
15993    pci_enable_busmaster(dev);
15994
15995    /* get the BARs */
15996    if (bxe_allocate_bars(sc) != 0) {
15997        return (ENXIO);
15998    }
15999
16000    /* initialize the mutexes */
16001    bxe_init_mutexes(sc);
16002
16003    /* prepare the periodic callout */
16004    callout_init(&sc->periodic_callout, 0);
16005
16006    /* prepare the chip taskqueue */
16007    sc->chip_tq_flags = CHIP_TQ_NONE;
16008    snprintf(sc->chip_tq_name, sizeof(sc->chip_tq_name),
16009             "bxe%d_chip_tq", sc->unit);
16010    TASK_INIT(&sc->chip_tq_task, 0, bxe_handle_chip_tq, sc);
16011    sc->chip_tq = taskqueue_create(sc->chip_tq_name, M_NOWAIT,
16012                                   taskqueue_thread_enqueue,
16013                                   &sc->chip_tq);
16014    taskqueue_start_threads(&sc->chip_tq, 1, PWAIT, /* lower priority */
16015                            "%s", sc->chip_tq_name);
16016
16017    /* get device info and set params */
16018    if (bxe_get_device_info(sc) != 0) {
16019        BLOGE(sc, "getting device info\n");
16020        bxe_deallocate_bars(sc);
16021        pci_disable_busmaster(dev);
16022        return (ENXIO);
16023    }
16024
16025    /* get final misc params */
16026    bxe_get_params(sc);
16027
16028    /* set the default MTU (changed via ifconfig) */
16029    sc->mtu = ETHERMTU;
16030
16031    bxe_set_modes_bitmap(sc);
16032
16033    /* XXX
16034     * If in AFEX mode and the function is configured for FCoE
16035     * then bail... no L2 allowed.
16036     */
16037
16038    /* get phy settings from shmem and 'and' against admin settings */
16039    bxe_get_phy_info(sc);
16040
16041    /* initialize the FreeBSD ifnet interface */
16042    if (bxe_init_ifnet(sc) != 0) {
16043        bxe_release_mutexes(sc);
16044        bxe_deallocate_bars(sc);
16045        pci_disable_busmaster(dev);
16046        return (ENXIO);
16047    }
16048
16049    if (bxe_add_cdev(sc) != 0) {
16050        if (sc->ifp != NULL) {
16051            ether_ifdetach(sc->ifp);
16052        }
16053        ifmedia_removeall(&sc->ifmedia);
16054        bxe_release_mutexes(sc);
16055        bxe_deallocate_bars(sc);
16056        pci_disable_busmaster(dev);
16057        return (ENXIO);
16058    }
16059
16060    /* allocate device interrupts */
16061    if (bxe_interrupt_alloc(sc) != 0) {
16062        bxe_del_cdev(sc);
16063        if (sc->ifp != NULL) {
16064            ether_ifdetach(sc->ifp);
16065        }
16066        ifmedia_removeall(&sc->ifmedia);
16067        bxe_release_mutexes(sc);
16068        bxe_deallocate_bars(sc);
16069        pci_disable_busmaster(dev);
16070        return (ENXIO);
16071    }
16072
16073    bxe_init_fp_mutexs(sc);
16074
16075    if (bxe_alloc_buf_rings(sc) != 0) {
16076	bxe_free_buf_rings(sc);
16077        bxe_interrupt_free(sc);
16078        bxe_del_cdev(sc);
16079        if (sc->ifp != NULL) {
16080            ether_ifdetach(sc->ifp);
16081        }
16082        ifmedia_removeall(&sc->ifmedia);
16083        bxe_release_mutexes(sc);
16084        bxe_deallocate_bars(sc);
16085        pci_disable_busmaster(dev);
16086        return (ENXIO);
16087    }
16088
16089    /* allocate ilt */
16090    if (bxe_alloc_ilt_mem(sc) != 0) {
16091	bxe_free_buf_rings(sc);
16092        bxe_interrupt_free(sc);
16093        bxe_del_cdev(sc);
16094        if (sc->ifp != NULL) {
16095            ether_ifdetach(sc->ifp);
16096        }
16097        ifmedia_removeall(&sc->ifmedia);
16098        bxe_release_mutexes(sc);
16099        bxe_deallocate_bars(sc);
16100        pci_disable_busmaster(dev);
16101        return (ENXIO);
16102    }
16103
16104    /* allocate the host hardware/software hsi structures */
16105    if (bxe_alloc_hsi_mem(sc) != 0) {
16106        bxe_free_ilt_mem(sc);
16107	bxe_free_buf_rings(sc);
16108        bxe_interrupt_free(sc);
16109        bxe_del_cdev(sc);
16110        if (sc->ifp != NULL) {
16111            ether_ifdetach(sc->ifp);
16112        }
16113        ifmedia_removeall(&sc->ifmedia);
16114        bxe_release_mutexes(sc);
16115        bxe_deallocate_bars(sc);
16116        pci_disable_busmaster(dev);
16117        return (ENXIO);
16118    }
16119
16120    /* need to reset chip if UNDI was active */
16121    if (IS_PF(sc) && !BXE_NOMCP(sc)) {
16122        /* init fw_seq */
16123        sc->fw_seq =
16124            (SHMEM_RD(sc, func_mb[SC_FW_MB_IDX(sc)].drv_mb_header) &
16125             DRV_MSG_SEQ_NUMBER_MASK);
16126        BLOGD(sc, DBG_LOAD, "prev unload fw_seq 0x%04x\n", sc->fw_seq);
16127        bxe_prev_unload(sc);
16128    }
16129
16130#if 1
16131    /* XXX */
16132    bxe_dcbx_set_state(sc, FALSE, BXE_DCBX_ENABLED_OFF);
16133#else
16134    if (SHMEM2_HAS(sc, dcbx_lldp_params_offset) &&
16135        SHMEM2_HAS(sc, dcbx_lldp_dcbx_stat_offset) &&
16136        SHMEM2_RD(sc, dcbx_lldp_params_offset) &&
16137        SHMEM2_RD(sc, dcbx_lldp_dcbx_stat_offset)) {
16138        bxe_dcbx_set_state(sc, TRUE, BXE_DCBX_ENABLED_ON_NEG_ON);
16139        bxe_dcbx_init_params(sc);
16140    } else {
16141        bxe_dcbx_set_state(sc, FALSE, BXE_DCBX_ENABLED_OFF);
16142    }
16143#endif
16144
16145    /* calculate qm_cid_count */
16146    sc->qm_cid_count = bxe_set_qm_cid_count(sc);
16147    BLOGD(sc, DBG_LOAD, "qm_cid_count=%d\n", sc->qm_cid_count);
16148
16149    sc->max_cos = 1;
16150    bxe_init_multi_cos(sc);
16151
16152    bxe_add_sysctls(sc);
16153
16154    return (0);
16155}
16156
16157/*
16158 * Device detach function.
16159 *
16160 * Stops the controller, resets the controller, and releases resources.
16161 *
16162 * Returns:
16163 *   0 = Success, >0 = Failure
16164 */
16165static int
16166bxe_detach(device_t dev)
16167{
16168    struct bxe_softc *sc;
16169    if_t ifp;
16170
16171    sc = device_get_softc(dev);
16172
16173    BLOGD(sc, DBG_LOAD, "Starting detach...\n");
16174
16175    ifp = sc->ifp;
16176    if (ifp != NULL && if_vlantrunkinuse(ifp)) {
16177        BLOGE(sc, "Cannot detach while VLANs are in use.\n");
16178        return(EBUSY);
16179    }
16180
16181    bxe_del_cdev(sc);
16182
16183    /* stop the periodic callout */
16184    bxe_periodic_stop(sc);
16185
16186    /* stop the chip taskqueue */
16187    atomic_store_rel_long(&sc->chip_tq_flags, CHIP_TQ_NONE);
16188    if (sc->chip_tq) {
16189        taskqueue_drain(sc->chip_tq, &sc->chip_tq_task);
16190        taskqueue_free(sc->chip_tq);
16191        sc->chip_tq = NULL;
16192    }
16193
16194    /* stop and reset the controller if it was open */
16195    if (sc->state != BXE_STATE_CLOSED) {
16196        BXE_CORE_LOCK(sc);
16197        bxe_nic_unload(sc, UNLOAD_CLOSE, TRUE);
16198        sc->state = BXE_STATE_DISABLED;
16199        BXE_CORE_UNLOCK(sc);
16200    }
16201
16202    /* release the network interface */
16203    if (ifp != NULL) {
16204        ether_ifdetach(ifp);
16205    }
16206    ifmedia_removeall(&sc->ifmedia);
16207
16208    /* XXX do the following based on driver state... */
16209
16210    /* free the host hardware/software hsi structures */
16211    bxe_free_hsi_mem(sc);
16212
16213    /* free ilt */
16214    bxe_free_ilt_mem(sc);
16215
16216    bxe_free_buf_rings(sc);
16217
16218    /* release the interrupts */
16219    bxe_interrupt_free(sc);
16220
16221    /* Release the mutexes*/
16222    bxe_destroy_fp_mutexs(sc);
16223    bxe_release_mutexes(sc);
16224
16225
16226    /* Release the PCIe BAR mapped memory */
16227    bxe_deallocate_bars(sc);
16228
16229    /* Release the FreeBSD interface. */
16230    if (sc->ifp != NULL) {
16231        if_free(sc->ifp);
16232    }
16233
16234    pci_disable_busmaster(dev);
16235
16236    return (0);
16237}
16238
16239/*
16240 * Device shutdown function.
16241 *
16242 * Stops and resets the controller.
16243 *
16244 * Returns:
16245 *   Nothing
16246 */
16247static int
16248bxe_shutdown(device_t dev)
16249{
16250    struct bxe_softc *sc;
16251
16252    sc = device_get_softc(dev);
16253
16254    BLOGD(sc, DBG_LOAD, "Starting shutdown...\n");
16255
16256    /* stop the periodic callout */
16257    bxe_periodic_stop(sc);
16258
16259    BXE_CORE_LOCK(sc);
16260    bxe_nic_unload(sc, UNLOAD_NORMAL, FALSE);
16261    BXE_CORE_UNLOCK(sc);
16262
16263    return (0);
16264}
16265
16266void
16267bxe_igu_ack_sb(struct bxe_softc *sc,
16268               uint8_t          igu_sb_id,
16269               uint8_t          segment,
16270               uint16_t         index,
16271               uint8_t          op,
16272               uint8_t          update)
16273{
16274    uint32_t igu_addr = sc->igu_base_addr;
16275    igu_addr += (IGU_CMD_INT_ACK_BASE + igu_sb_id)*8;
16276    bxe_igu_ack_sb_gen(sc, igu_sb_id, segment, index, op, update, igu_addr);
16277}
16278
16279static void
16280bxe_igu_clear_sb_gen(struct bxe_softc *sc,
16281                     uint8_t          func,
16282                     uint8_t          idu_sb_id,
16283                     uint8_t          is_pf)
16284{
16285    uint32_t data, ctl, cnt = 100;
16286    uint32_t igu_addr_data = IGU_REG_COMMAND_REG_32LSB_DATA;
16287    uint32_t igu_addr_ctl = IGU_REG_COMMAND_REG_CTRL;
16288    uint32_t igu_addr_ack = IGU_REG_CSTORM_TYPE_0_SB_CLEANUP + (idu_sb_id/32)*4;
16289    uint32_t sb_bit =  1 << (idu_sb_id%32);
16290    uint32_t func_encode = func | (is_pf ? 1 : 0) << IGU_FID_ENCODE_IS_PF_SHIFT;
16291    uint32_t addr_encode = IGU_CMD_E2_PROD_UPD_BASE + idu_sb_id;
16292
16293    /* Not supported in BC mode */
16294    if (CHIP_INT_MODE_IS_BC(sc)) {
16295        return;
16296    }
16297
16298    data = ((IGU_USE_REGISTER_cstorm_type_0_sb_cleanup <<
16299             IGU_REGULAR_CLEANUP_TYPE_SHIFT) |
16300            IGU_REGULAR_CLEANUP_SET |
16301            IGU_REGULAR_BCLEANUP);
16302
16303    ctl = ((addr_encode << IGU_CTRL_REG_ADDRESS_SHIFT) |
16304           (func_encode << IGU_CTRL_REG_FID_SHIFT) |
16305           (IGU_CTRL_CMD_TYPE_WR << IGU_CTRL_REG_TYPE_SHIFT));
16306
16307    BLOGD(sc, DBG_LOAD, "write 0x%08x to IGU(via GRC) addr 0x%x\n",
16308            data, igu_addr_data);
16309    REG_WR(sc, igu_addr_data, data);
16310
16311    bus_space_barrier(sc->bar[BAR0].tag, sc->bar[BAR0].handle, 0, 0,
16312                      BUS_SPACE_BARRIER_WRITE);
16313    mb();
16314
16315    BLOGD(sc, DBG_LOAD, "write 0x%08x to IGU(via GRC) addr 0x%x\n",
16316            ctl, igu_addr_ctl);
16317    REG_WR(sc, igu_addr_ctl, ctl);
16318
16319    bus_space_barrier(sc->bar[BAR0].tag, sc->bar[BAR0].handle, 0, 0,
16320                      BUS_SPACE_BARRIER_WRITE);
16321    mb();
16322
16323    /* wait for clean up to finish */
16324    while (!(REG_RD(sc, igu_addr_ack) & sb_bit) && --cnt) {
16325        DELAY(20000);
16326    }
16327
16328    if (!(REG_RD(sc, igu_addr_ack) & sb_bit)) {
16329        BLOGD(sc, DBG_LOAD,
16330              "Unable to finish IGU cleanup: "
16331              "idu_sb_id %d offset %d bit %d (cnt %d)\n",
16332              idu_sb_id, idu_sb_id/32, idu_sb_id%32, cnt);
16333    }
16334}
16335
16336static void
16337bxe_igu_clear_sb(struct bxe_softc *sc,
16338                 uint8_t          idu_sb_id)
16339{
16340    bxe_igu_clear_sb_gen(sc, SC_FUNC(sc), idu_sb_id, TRUE /*PF*/);
16341}
16342
16343
16344
16345
16346
16347
16348
16349/*******************/
16350/* ECORE CALLBACKS */
16351/*******************/
16352
16353static void
16354bxe_reset_common(struct bxe_softc *sc)
16355{
16356    uint32_t val = 0x1400;
16357
16358    /* reset_common */
16359    REG_WR(sc, (GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR), 0xd3ffff7f);
16360
16361    if (CHIP_IS_E3(sc)) {
16362        val |= MISC_REGISTERS_RESET_REG_2_MSTAT0;
16363        val |= MISC_REGISTERS_RESET_REG_2_MSTAT1;
16364    }
16365
16366    REG_WR(sc, (GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR), val);
16367}
16368
16369static void
16370bxe_common_init_phy(struct bxe_softc *sc)
16371{
16372    uint32_t shmem_base[2];
16373    uint32_t shmem2_base[2];
16374
16375    /* Avoid common init in case MFW supports LFA */
16376    if (SHMEM2_RD(sc, size) >
16377        (uint32_t)offsetof(struct shmem2_region,
16378                           lfa_host_addr[SC_PORT(sc)])) {
16379        return;
16380    }
16381
16382    shmem_base[0]  = sc->devinfo.shmem_base;
16383    shmem2_base[0] = sc->devinfo.shmem2_base;
16384
16385    if (!CHIP_IS_E1x(sc)) {
16386        shmem_base[1]  = SHMEM2_RD(sc, other_shmem_base_addr);
16387        shmem2_base[1] = SHMEM2_RD(sc, other_shmem2_base_addr);
16388    }
16389
16390    bxe_acquire_phy_lock(sc);
16391    elink_common_init_phy(sc, shmem_base, shmem2_base,
16392                          sc->devinfo.chip_id, 0);
16393    bxe_release_phy_lock(sc);
16394}
16395
16396static void
16397bxe_pf_disable(struct bxe_softc *sc)
16398{
16399    uint32_t val = REG_RD(sc, IGU_REG_PF_CONFIGURATION);
16400
16401    val &= ~IGU_PF_CONF_FUNC_EN;
16402
16403    REG_WR(sc, IGU_REG_PF_CONFIGURATION, val);
16404    REG_WR(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 0);
16405    REG_WR(sc, CFC_REG_WEAK_ENABLE_PF, 0);
16406}
16407
16408static void
16409bxe_init_pxp(struct bxe_softc *sc)
16410{
16411    uint16_t devctl;
16412    int r_order, w_order;
16413
16414    devctl = bxe_pcie_capability_read(sc, PCIR_EXPRESS_DEVICE_CTL, 2);
16415
16416    BLOGD(sc, DBG_LOAD, "read 0x%08x from devctl\n", devctl);
16417
16418    w_order = ((devctl & PCIM_EXP_CTL_MAX_PAYLOAD) >> 5);
16419
16420    if (sc->mrrs == -1) {
16421        r_order = ((devctl & PCIM_EXP_CTL_MAX_READ_REQUEST) >> 12);
16422    } else {
16423        BLOGD(sc, DBG_LOAD, "forcing read order to %d\n", sc->mrrs);
16424        r_order = sc->mrrs;
16425    }
16426
16427    ecore_init_pxp_arb(sc, r_order, w_order);
16428}
16429
16430static uint32_t
16431bxe_get_pretend_reg(struct bxe_softc *sc)
16432{
16433    uint32_t base = PXP2_REG_PGL_PRETEND_FUNC_F0;
16434    uint32_t stride = (PXP2_REG_PGL_PRETEND_FUNC_F1 - base);
16435    return (base + (SC_ABS_FUNC(sc)) * stride);
16436}
16437
16438/*
16439 * Called only on E1H or E2.
16440 * When pretending to be PF, the pretend value is the function number 0..7.
16441 * When pretending to be VF, the pretend val is the PF-num:VF-valid:ABS-VFID
16442 * combination.
16443 */
16444static int
16445bxe_pretend_func(struct bxe_softc *sc,
16446                 uint16_t         pretend_func_val)
16447{
16448    uint32_t pretend_reg;
16449
16450    if (CHIP_IS_E1H(sc) && (pretend_func_val > E1H_FUNC_MAX)) {
16451        return (-1);
16452    }
16453
16454    /* get my own pretend register */
16455    pretend_reg = bxe_get_pretend_reg(sc);
16456    REG_WR(sc, pretend_reg, pretend_func_val);
16457    REG_RD(sc, pretend_reg);
16458    return (0);
16459}
16460
16461static void
16462bxe_iov_init_dmae(struct bxe_softc *sc)
16463{
16464    return;
16465}
16466
16467static void
16468bxe_iov_init_dq(struct bxe_softc *sc)
16469{
16470    return;
16471}
16472
16473/* send a NIG loopback debug packet */
16474static void
16475bxe_lb_pckt(struct bxe_softc *sc)
16476{
16477    uint32_t wb_write[3];
16478
16479    /* Ethernet source and destination addresses */
16480    wb_write[0] = 0x55555555;
16481    wb_write[1] = 0x55555555;
16482    wb_write[2] = 0x20;     /* SOP */
16483    REG_WR_DMAE(sc, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
16484
16485    /* NON-IP protocol */
16486    wb_write[0] = 0x09000000;
16487    wb_write[1] = 0x55555555;
16488    wb_write[2] = 0x10;     /* EOP, eop_bvalid = 0 */
16489    REG_WR_DMAE(sc, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
16490}
16491
16492/*
16493 * Some of the internal memories are not directly readable from the driver.
16494 * To test them we send debug packets.
16495 */
16496static int
16497bxe_int_mem_test(struct bxe_softc *sc)
16498{
16499    int factor;
16500    int count, i;
16501    uint32_t val = 0;
16502
16503    if (CHIP_REV_IS_FPGA(sc)) {
16504        factor = 120;
16505    } else if (CHIP_REV_IS_EMUL(sc)) {
16506        factor = 200;
16507    } else {
16508        factor = 1;
16509    }
16510
16511    /* disable inputs of parser neighbor blocks */
16512    REG_WR(sc, TSDM_REG_ENABLE_IN1, 0x0);
16513    REG_WR(sc, TCM_REG_PRS_IFEN, 0x0);
16514    REG_WR(sc, CFC_REG_DEBUG0, 0x1);
16515    REG_WR(sc, NIG_REG_PRS_REQ_IN_EN, 0x0);
16516
16517    /*  write 0 to parser credits for CFC search request */
16518    REG_WR(sc, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
16519
16520    /* send Ethernet packet */
16521    bxe_lb_pckt(sc);
16522
16523    /* TODO do i reset NIG statistic? */
16524    /* Wait until NIG register shows 1 packet of size 0x10 */
16525    count = 1000 * factor;
16526    while (count) {
16527        bxe_read_dmae(sc, NIG_REG_STAT2_BRB_OCTET, 2);
16528        val = *BXE_SP(sc, wb_data[0]);
16529        if (val == 0x10) {
16530            break;
16531        }
16532
16533        DELAY(10000);
16534        count--;
16535    }
16536
16537    if (val != 0x10) {
16538        BLOGE(sc, "NIG timeout val=0x%x\n", val);
16539        return (-1);
16540    }
16541
16542    /* wait until PRS register shows 1 packet */
16543    count = (1000 * factor);
16544    while (count) {
16545        val = REG_RD(sc, PRS_REG_NUM_OF_PACKETS);
16546        if (val == 1) {
16547            break;
16548        }
16549
16550        DELAY(10000);
16551        count--;
16552    }
16553
16554    if (val != 0x1) {
16555        BLOGE(sc, "PRS timeout val=0x%x\n", val);
16556        return (-2);
16557    }
16558
16559    /* Reset and init BRB, PRS */
16560    REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
16561    DELAY(50000);
16562    REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
16563    DELAY(50000);
16564    ecore_init_block(sc, BLOCK_BRB1, PHASE_COMMON);
16565    ecore_init_block(sc, BLOCK_PRS, PHASE_COMMON);
16566
16567    /* Disable inputs of parser neighbor blocks */
16568    REG_WR(sc, TSDM_REG_ENABLE_IN1, 0x0);
16569    REG_WR(sc, TCM_REG_PRS_IFEN, 0x0);
16570    REG_WR(sc, CFC_REG_DEBUG0, 0x1);
16571    REG_WR(sc, NIG_REG_PRS_REQ_IN_EN, 0x0);
16572
16573    /* Write 0 to parser credits for CFC search request */
16574    REG_WR(sc, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
16575
16576    /* send 10 Ethernet packets */
16577    for (i = 0; i < 10; i++) {
16578        bxe_lb_pckt(sc);
16579    }
16580
16581    /* Wait until NIG register shows 10+1 packets of size 11*0x10 = 0xb0 */
16582    count = (1000 * factor);
16583    while (count) {
16584        bxe_read_dmae(sc, NIG_REG_STAT2_BRB_OCTET, 2);
16585        val = *BXE_SP(sc, wb_data[0]);
16586        if (val == 0xb0) {
16587            break;
16588        }
16589
16590        DELAY(10000);
16591        count--;
16592    }
16593
16594    if (val != 0xb0) {
16595        BLOGE(sc, "NIG timeout val=0x%x\n", val);
16596        return (-3);
16597    }
16598
16599    /* Wait until PRS register shows 2 packets */
16600    val = REG_RD(sc, PRS_REG_NUM_OF_PACKETS);
16601    if (val != 2) {
16602        BLOGE(sc, "PRS timeout val=0x%x\n", val);
16603    }
16604
16605    /* Write 1 to parser credits for CFC search request */
16606    REG_WR(sc, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
16607
16608    /* Wait until PRS register shows 3 packets */
16609    DELAY(10000 * factor);
16610
16611    /* Wait until NIG register shows 1 packet of size 0x10 */
16612    val = REG_RD(sc, PRS_REG_NUM_OF_PACKETS);
16613    if (val != 3) {
16614        BLOGE(sc, "PRS timeout val=0x%x\n", val);
16615    }
16616
16617    /* clear NIG EOP FIFO */
16618    for (i = 0; i < 11; i++) {
16619        REG_RD(sc, NIG_REG_INGRESS_EOP_LB_FIFO);
16620    }
16621
16622    val = REG_RD(sc, NIG_REG_INGRESS_EOP_LB_EMPTY);
16623    if (val != 1) {
16624        BLOGE(sc, "clear of NIG failed val=0x%x\n", val);
16625        return (-4);
16626    }
16627
16628    /* Reset and init BRB, PRS, NIG */
16629    REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
16630    DELAY(50000);
16631    REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
16632    DELAY(50000);
16633    ecore_init_block(sc, BLOCK_BRB1, PHASE_COMMON);
16634    ecore_init_block(sc, BLOCK_PRS, PHASE_COMMON);
16635    if (!CNIC_SUPPORT(sc)) {
16636        /* set NIC mode */
16637        REG_WR(sc, PRS_REG_NIC_MODE, 1);
16638    }
16639
16640    /* Enable inputs of parser neighbor blocks */
16641    REG_WR(sc, TSDM_REG_ENABLE_IN1, 0x7fffffff);
16642    REG_WR(sc, TCM_REG_PRS_IFEN, 0x1);
16643    REG_WR(sc, CFC_REG_DEBUG0, 0x0);
16644    REG_WR(sc, NIG_REG_PRS_REQ_IN_EN, 0x1);
16645
16646    return (0);
16647}
16648
16649static void
16650bxe_setup_fan_failure_detection(struct bxe_softc *sc)
16651{
16652    int is_required;
16653    uint32_t val;
16654    int port;
16655
16656    is_required = 0;
16657    val = (SHMEM_RD(sc, dev_info.shared_hw_config.config2) &
16658           SHARED_HW_CFG_FAN_FAILURE_MASK);
16659
16660    if (val == SHARED_HW_CFG_FAN_FAILURE_ENABLED) {
16661        is_required = 1;
16662    }
16663    /*
16664     * The fan failure mechanism is usually related to the PHY type since
16665     * the power consumption of the board is affected by the PHY. Currently,
16666     * fan is required for most designs with SFX7101, BCM8727 and BCM8481.
16667     */
16668    else if (val == SHARED_HW_CFG_FAN_FAILURE_PHY_TYPE) {
16669        for (port = PORT_0; port < PORT_MAX; port++) {
16670            is_required |= elink_fan_failure_det_req(sc,
16671                                                     sc->devinfo.shmem_base,
16672                                                     sc->devinfo.shmem2_base,
16673                                                     port);
16674        }
16675    }
16676
16677    BLOGD(sc, DBG_LOAD, "fan detection setting: %d\n", is_required);
16678
16679    if (is_required == 0) {
16680        return;
16681    }
16682
16683    /* Fan failure is indicated by SPIO 5 */
16684    bxe_set_spio(sc, MISC_SPIO_SPIO5, MISC_SPIO_INPUT_HI_Z);
16685
16686    /* set to active low mode */
16687    val = REG_RD(sc, MISC_REG_SPIO_INT);
16688    val |= (MISC_SPIO_SPIO5 << MISC_SPIO_INT_OLD_SET_POS);
16689    REG_WR(sc, MISC_REG_SPIO_INT, val);
16690
16691    /* enable interrupt to signal the IGU */
16692    val = REG_RD(sc, MISC_REG_SPIO_EVENT_EN);
16693    val |= MISC_SPIO_SPIO5;
16694    REG_WR(sc, MISC_REG_SPIO_EVENT_EN, val);
16695}
16696
16697static void
16698bxe_enable_blocks_attention(struct bxe_softc *sc)
16699{
16700    uint32_t val;
16701
16702    REG_WR(sc, PXP_REG_PXP_INT_MASK_0, 0);
16703    if (!CHIP_IS_E1x(sc)) {
16704        REG_WR(sc, PXP_REG_PXP_INT_MASK_1, 0x40);
16705    } else {
16706        REG_WR(sc, PXP_REG_PXP_INT_MASK_1, 0);
16707    }
16708    REG_WR(sc, DORQ_REG_DORQ_INT_MASK, 0);
16709    REG_WR(sc, CFC_REG_CFC_INT_MASK, 0);
16710    /*
16711     * mask read length error interrupts in brb for parser
16712     * (parsing unit and 'checksum and crc' unit)
16713     * these errors are legal (PU reads fixed length and CAC can cause
16714     * read length error on truncated packets)
16715     */
16716    REG_WR(sc, BRB1_REG_BRB1_INT_MASK, 0xFC00);
16717    REG_WR(sc, QM_REG_QM_INT_MASK, 0);
16718    REG_WR(sc, TM_REG_TM_INT_MASK, 0);
16719    REG_WR(sc, XSDM_REG_XSDM_INT_MASK_0, 0);
16720    REG_WR(sc, XSDM_REG_XSDM_INT_MASK_1, 0);
16721    REG_WR(sc, XCM_REG_XCM_INT_MASK, 0);
16722/*      REG_WR(sc, XSEM_REG_XSEM_INT_MASK_0, 0); */
16723/*      REG_WR(sc, XSEM_REG_XSEM_INT_MASK_1, 0); */
16724    REG_WR(sc, USDM_REG_USDM_INT_MASK_0, 0);
16725    REG_WR(sc, USDM_REG_USDM_INT_MASK_1, 0);
16726    REG_WR(sc, UCM_REG_UCM_INT_MASK, 0);
16727/*      REG_WR(sc, USEM_REG_USEM_INT_MASK_0, 0); */
16728/*      REG_WR(sc, USEM_REG_USEM_INT_MASK_1, 0); */
16729    REG_WR(sc, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
16730    REG_WR(sc, CSDM_REG_CSDM_INT_MASK_0, 0);
16731    REG_WR(sc, CSDM_REG_CSDM_INT_MASK_1, 0);
16732    REG_WR(sc, CCM_REG_CCM_INT_MASK, 0);
16733/*      REG_WR(sc, CSEM_REG_CSEM_INT_MASK_0, 0); */
16734/*      REG_WR(sc, CSEM_REG_CSEM_INT_MASK_1, 0); */
16735
16736    val = (PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_AFT |
16737           PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_OF |
16738           PXP2_PXP2_INT_MASK_0_REG_PGL_PCIE_ATTN);
16739    if (!CHIP_IS_E1x(sc)) {
16740        val |= (PXP2_PXP2_INT_MASK_0_REG_PGL_READ_BLOCKED |
16741                PXP2_PXP2_INT_MASK_0_REG_PGL_WRITE_BLOCKED);
16742    }
16743    REG_WR(sc, PXP2_REG_PXP2_INT_MASK_0, val);
16744
16745    REG_WR(sc, TSDM_REG_TSDM_INT_MASK_0, 0);
16746    REG_WR(sc, TSDM_REG_TSDM_INT_MASK_1, 0);
16747    REG_WR(sc, TCM_REG_TCM_INT_MASK, 0);
16748/*      REG_WR(sc, TSEM_REG_TSEM_INT_MASK_0, 0); */
16749
16750    if (!CHIP_IS_E1x(sc)) {
16751        /* enable VFC attentions: bits 11 and 12, bits 31:13 reserved */
16752        REG_WR(sc, TSEM_REG_TSEM_INT_MASK_1, 0x07ff);
16753    }
16754
16755    REG_WR(sc, CDU_REG_CDU_INT_MASK, 0);
16756    REG_WR(sc, DMAE_REG_DMAE_INT_MASK, 0);
16757/*      REG_WR(sc, MISC_REG_MISC_INT_MASK, 0); */
16758    REG_WR(sc, PBF_REG_PBF_INT_MASK, 0x18);     /* bit 3,4 masked */
16759}
16760
16761/**
16762 * bxe_init_hw_common - initialize the HW at the COMMON phase.
16763 *
16764 * @sc:     driver handle
16765 */
16766static int
16767bxe_init_hw_common(struct bxe_softc *sc)
16768{
16769    uint8_t abs_func_id;
16770    uint32_t val;
16771
16772    BLOGD(sc, DBG_LOAD, "starting common init for func %d\n",
16773          SC_ABS_FUNC(sc));
16774
16775    /*
16776     * take the RESET lock to protect undi_unload flow from accessing
16777     * registers while we are resetting the chip
16778     */
16779    bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RESET);
16780
16781    bxe_reset_common(sc);
16782
16783    REG_WR(sc, (GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET), 0xffffffff);
16784
16785    val = 0xfffc;
16786    if (CHIP_IS_E3(sc)) {
16787        val |= MISC_REGISTERS_RESET_REG_2_MSTAT0;
16788        val |= MISC_REGISTERS_RESET_REG_2_MSTAT1;
16789    }
16790
16791    REG_WR(sc, (GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET), val);
16792
16793    bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RESET);
16794
16795    ecore_init_block(sc, BLOCK_MISC, PHASE_COMMON);
16796    BLOGD(sc, DBG_LOAD, "after misc block init\n");
16797
16798    if (!CHIP_IS_E1x(sc)) {
16799        /*
16800         * 4-port mode or 2-port mode we need to turn off master-enable for
16801         * everyone. After that we turn it back on for self. So, we disregard
16802         * multi-function, and always disable all functions on the given path,
16803         * this means 0,2,4,6 for path 0 and 1,3,5,7 for path 1
16804         */
16805        for (abs_func_id = SC_PATH(sc);
16806             abs_func_id < (E2_FUNC_MAX * 2);
16807             abs_func_id += 2) {
16808            if (abs_func_id == SC_ABS_FUNC(sc)) {
16809                REG_WR(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1);
16810                continue;
16811            }
16812
16813            bxe_pretend_func(sc, abs_func_id);
16814
16815            /* clear pf enable */
16816            bxe_pf_disable(sc);
16817
16818            bxe_pretend_func(sc, SC_ABS_FUNC(sc));
16819        }
16820    }
16821
16822    BLOGD(sc, DBG_LOAD, "after pf disable\n");
16823
16824    ecore_init_block(sc, BLOCK_PXP, PHASE_COMMON);
16825
16826    if (CHIP_IS_E1(sc)) {
16827        /*
16828         * enable HW interrupt from PXP on USDM overflow
16829         * bit 16 on INT_MASK_0
16830         */
16831        REG_WR(sc, PXP_REG_PXP_INT_MASK_0, 0);
16832    }
16833
16834    ecore_init_block(sc, BLOCK_PXP2, PHASE_COMMON);
16835    bxe_init_pxp(sc);
16836
16837#ifdef __BIG_ENDIAN
16838    REG_WR(sc, PXP2_REG_RQ_QM_ENDIAN_M, 1);
16839    REG_WR(sc, PXP2_REG_RQ_TM_ENDIAN_M, 1);
16840    REG_WR(sc, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
16841    REG_WR(sc, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
16842    REG_WR(sc, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
16843    /* make sure this value is 0 */
16844    REG_WR(sc, PXP2_REG_RQ_HC_ENDIAN_M, 0);
16845
16846    //REG_WR(sc, PXP2_REG_RD_PBF_SWAP_MODE, 1);
16847    REG_WR(sc, PXP2_REG_RD_QM_SWAP_MODE, 1);
16848    REG_WR(sc, PXP2_REG_RD_TM_SWAP_MODE, 1);
16849    REG_WR(sc, PXP2_REG_RD_SRC_SWAP_MODE, 1);
16850    REG_WR(sc, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
16851#endif
16852
16853    ecore_ilt_init_page_size(sc, INITOP_SET);
16854
16855    if (CHIP_REV_IS_FPGA(sc) && CHIP_IS_E1H(sc)) {
16856        REG_WR(sc, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
16857    }
16858
16859    /* let the HW do it's magic... */
16860    DELAY(100000);
16861
16862    /* finish PXP init */
16863    val = REG_RD(sc, PXP2_REG_RQ_CFG_DONE);
16864    if (val != 1) {
16865        BLOGE(sc, "PXP2 CFG failed PXP2_REG_RQ_CFG_DONE val = 0x%x\n",
16866            val);
16867        return (-1);
16868    }
16869    val = REG_RD(sc, PXP2_REG_RD_INIT_DONE);
16870    if (val != 1) {
16871        BLOGE(sc, "PXP2 RD_INIT failed val = 0x%x\n", val);
16872        return (-1);
16873    }
16874
16875    BLOGD(sc, DBG_LOAD, "after pxp init\n");
16876
16877    /*
16878     * Timer bug workaround for E2 only. We need to set the entire ILT to have
16879     * entries with value "0" and valid bit on. This needs to be done by the
16880     * first PF that is loaded in a path (i.e. common phase)
16881     */
16882    if (!CHIP_IS_E1x(sc)) {
16883/*
16884 * In E2 there is a bug in the timers block that can cause function 6 / 7
16885 * (i.e. vnic3) to start even if it is marked as "scan-off".
16886 * This occurs when a different function (func2,3) is being marked
16887 * as "scan-off". Real-life scenario for example: if a driver is being
16888 * load-unloaded while func6,7 are down. This will cause the timer to access
16889 * the ilt, translate to a logical address and send a request to read/write.
16890 * Since the ilt for the function that is down is not valid, this will cause
16891 * a translation error which is unrecoverable.
16892 * The Workaround is intended to make sure that when this happens nothing
16893 * fatal will occur. The workaround:
16894 *  1.  First PF driver which loads on a path will:
16895 *      a.  After taking the chip out of reset, by using pretend,
16896 *          it will write "0" to the following registers of
16897 *          the other vnics.
16898 *          REG_WR(pdev, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 0);
16899 *          REG_WR(pdev, CFC_REG_WEAK_ENABLE_PF,0);
16900 *          REG_WR(pdev, CFC_REG_STRONG_ENABLE_PF,0);
16901 *          And for itself it will write '1' to
16902 *          PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER to enable
16903 *          dmae-operations (writing to pram for example.)
16904 *          note: can be done for only function 6,7 but cleaner this
16905 *            way.
16906 *      b.  Write zero+valid to the entire ILT.
16907 *      c.  Init the first_timers_ilt_entry, last_timers_ilt_entry of
16908 *          VNIC3 (of that port). The range allocated will be the
16909 *          entire ILT. This is needed to prevent  ILT range error.
16910 *  2.  Any PF driver load flow:
16911 *      a.  ILT update with the physical addresses of the allocated
16912 *          logical pages.
16913 *      b.  Wait 20msec. - note that this timeout is needed to make
16914 *          sure there are no requests in one of the PXP internal
16915 *          queues with "old" ILT addresses.
16916 *      c.  PF enable in the PGLC.
16917 *      d.  Clear the was_error of the PF in the PGLC. (could have
16918 *          occurred while driver was down)
16919 *      e.  PF enable in the CFC (WEAK + STRONG)
16920 *      f.  Timers scan enable
16921 *  3.  PF driver unload flow:
16922 *      a.  Clear the Timers scan_en.
16923 *      b.  Polling for scan_on=0 for that PF.
16924 *      c.  Clear the PF enable bit in the PXP.
16925 *      d.  Clear the PF enable in the CFC (WEAK + STRONG)
16926 *      e.  Write zero+valid to all ILT entries (The valid bit must
16927 *          stay set)
16928 *      f.  If this is VNIC 3 of a port then also init
16929 *          first_timers_ilt_entry to zero and last_timers_ilt_entry
16930 *          to the last enrty in the ILT.
16931 *
16932 *      Notes:
16933 *      Currently the PF error in the PGLC is non recoverable.
16934 *      In the future the there will be a recovery routine for this error.
16935 *      Currently attention is masked.
16936 *      Having an MCP lock on the load/unload process does not guarantee that
16937 *      there is no Timer disable during Func6/7 enable. This is because the
16938 *      Timers scan is currently being cleared by the MCP on FLR.
16939 *      Step 2.d can be done only for PF6/7 and the driver can also check if
16940 *      there is error before clearing it. But the flow above is simpler and
16941 *      more general.
16942 *      All ILT entries are written by zero+valid and not just PF6/7
16943 *      ILT entries since in the future the ILT entries allocation for
16944 *      PF-s might be dynamic.
16945 */
16946        struct ilt_client_info ilt_cli;
16947        struct ecore_ilt ilt;
16948
16949        memset(&ilt_cli, 0, sizeof(struct ilt_client_info));
16950        memset(&ilt, 0, sizeof(struct ecore_ilt));
16951
16952        /* initialize dummy TM client */
16953        ilt_cli.start      = 0;
16954        ilt_cli.end        = ILT_NUM_PAGE_ENTRIES - 1;
16955        ilt_cli.client_num = ILT_CLIENT_TM;
16956
16957        /*
16958         * Step 1: set zeroes to all ilt page entries with valid bit on
16959         * Step 2: set the timers first/last ilt entry to point
16960         * to the entire range to prevent ILT range error for 3rd/4th
16961         * vnic (this code assumes existence of the vnic)
16962         *
16963         * both steps performed by call to ecore_ilt_client_init_op()
16964         * with dummy TM client
16965         *
16966         * we must use pretend since PXP2_REG_RQ_##blk##_FIRST_ILT
16967         * and his brother are split registers
16968         */
16969
16970        bxe_pretend_func(sc, (SC_PATH(sc) + 6));
16971        ecore_ilt_client_init_op_ilt(sc, &ilt, &ilt_cli, INITOP_CLEAR);
16972        bxe_pretend_func(sc, SC_ABS_FUNC(sc));
16973
16974        REG_WR(sc, PXP2_REG_RQ_DRAM_ALIGN, BXE_PXP_DRAM_ALIGN);
16975        REG_WR(sc, PXP2_REG_RQ_DRAM_ALIGN_RD, BXE_PXP_DRAM_ALIGN);
16976        REG_WR(sc, PXP2_REG_RQ_DRAM_ALIGN_SEL, 1);
16977    }
16978
16979    REG_WR(sc, PXP2_REG_RQ_DISABLE_INPUTS, 0);
16980    REG_WR(sc, PXP2_REG_RD_DISABLE_INPUTS, 0);
16981
16982    if (!CHIP_IS_E1x(sc)) {
16983        int factor = CHIP_REV_IS_EMUL(sc) ? 1000 :
16984                     (CHIP_REV_IS_FPGA(sc) ? 400 : 0);
16985
16986        ecore_init_block(sc, BLOCK_PGLUE_B, PHASE_COMMON);
16987        ecore_init_block(sc, BLOCK_ATC, PHASE_COMMON);
16988
16989        /* let the HW do it's magic... */
16990        do {
16991            DELAY(200000);
16992            val = REG_RD(sc, ATC_REG_ATC_INIT_DONE);
16993        } while (factor-- && (val != 1));
16994
16995        if (val != 1) {
16996            BLOGE(sc, "ATC_INIT failed val = 0x%x\n", val);
16997            return (-1);
16998        }
16999    }
17000
17001    BLOGD(sc, DBG_LOAD, "after pglue and atc init\n");
17002
17003    ecore_init_block(sc, BLOCK_DMAE, PHASE_COMMON);
17004
17005    bxe_iov_init_dmae(sc);
17006
17007    /* clean the DMAE memory */
17008    sc->dmae_ready = 1;
17009    ecore_init_fill(sc, TSEM_REG_PRAM, 0, 8, 1);
17010
17011    ecore_init_block(sc, BLOCK_TCM, PHASE_COMMON);
17012
17013    ecore_init_block(sc, BLOCK_UCM, PHASE_COMMON);
17014
17015    ecore_init_block(sc, BLOCK_CCM, PHASE_COMMON);
17016
17017    ecore_init_block(sc, BLOCK_XCM, PHASE_COMMON);
17018
17019    bxe_read_dmae(sc, XSEM_REG_PASSIVE_BUFFER, 3);
17020    bxe_read_dmae(sc, CSEM_REG_PASSIVE_BUFFER, 3);
17021    bxe_read_dmae(sc, TSEM_REG_PASSIVE_BUFFER, 3);
17022    bxe_read_dmae(sc, USEM_REG_PASSIVE_BUFFER, 3);
17023
17024    ecore_init_block(sc, BLOCK_QM, PHASE_COMMON);
17025
17026    /* QM queues pointers table */
17027    ecore_qm_init_ptr_table(sc, sc->qm_cid_count, INITOP_SET);
17028
17029    /* soft reset pulse */
17030    REG_WR(sc, QM_REG_SOFT_RESET, 1);
17031    REG_WR(sc, QM_REG_SOFT_RESET, 0);
17032
17033    if (CNIC_SUPPORT(sc))
17034        ecore_init_block(sc, BLOCK_TM, PHASE_COMMON);
17035
17036    ecore_init_block(sc, BLOCK_DORQ, PHASE_COMMON);
17037    REG_WR(sc, DORQ_REG_DPM_CID_OFST, BXE_DB_SHIFT);
17038    if (!CHIP_REV_IS_SLOW(sc)) {
17039        /* enable hw interrupt from doorbell Q */
17040        REG_WR(sc, DORQ_REG_DORQ_INT_MASK, 0);
17041    }
17042
17043    ecore_init_block(sc, BLOCK_BRB1, PHASE_COMMON);
17044
17045    ecore_init_block(sc, BLOCK_PRS, PHASE_COMMON);
17046    REG_WR(sc, PRS_REG_A_PRSU_20, 0xf);
17047
17048    if (!CHIP_IS_E1(sc)) {
17049        REG_WR(sc, PRS_REG_E1HOV_MODE, sc->devinfo.mf_info.path_has_ovlan);
17050    }
17051
17052    if (!CHIP_IS_E1x(sc) && !CHIP_IS_E3B0(sc)) {
17053        if (IS_MF_AFEX(sc)) {
17054            /*
17055             * configure that AFEX and VLAN headers must be
17056             * received in AFEX mode
17057             */
17058            REG_WR(sc, PRS_REG_HDRS_AFTER_BASIC, 0xE);
17059            REG_WR(sc, PRS_REG_MUST_HAVE_HDRS, 0xA);
17060            REG_WR(sc, PRS_REG_HDRS_AFTER_TAG_0, 0x6);
17061            REG_WR(sc, PRS_REG_TAG_ETHERTYPE_0, 0x8926);
17062            REG_WR(sc, PRS_REG_TAG_LEN_0, 0x4);
17063        } else {
17064            /*
17065             * Bit-map indicating which L2 hdrs may appear
17066             * after the basic Ethernet header
17067             */
17068            REG_WR(sc, PRS_REG_HDRS_AFTER_BASIC,
17069                   sc->devinfo.mf_info.path_has_ovlan ? 7 : 6);
17070        }
17071    }
17072
17073    ecore_init_block(sc, BLOCK_TSDM, PHASE_COMMON);
17074    ecore_init_block(sc, BLOCK_CSDM, PHASE_COMMON);
17075    ecore_init_block(sc, BLOCK_USDM, PHASE_COMMON);
17076    ecore_init_block(sc, BLOCK_XSDM, PHASE_COMMON);
17077
17078    if (!CHIP_IS_E1x(sc)) {
17079        /* reset VFC memories */
17080        REG_WR(sc, TSEM_REG_FAST_MEMORY + VFC_REG_MEMORIES_RST,
17081               VFC_MEMORIES_RST_REG_CAM_RST |
17082               VFC_MEMORIES_RST_REG_RAM_RST);
17083        REG_WR(sc, XSEM_REG_FAST_MEMORY + VFC_REG_MEMORIES_RST,
17084               VFC_MEMORIES_RST_REG_CAM_RST |
17085               VFC_MEMORIES_RST_REG_RAM_RST);
17086
17087        DELAY(20000);
17088    }
17089
17090    ecore_init_block(sc, BLOCK_TSEM, PHASE_COMMON);
17091    ecore_init_block(sc, BLOCK_USEM, PHASE_COMMON);
17092    ecore_init_block(sc, BLOCK_CSEM, PHASE_COMMON);
17093    ecore_init_block(sc, BLOCK_XSEM, PHASE_COMMON);
17094
17095    /* sync semi rtc */
17096    REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
17097           0x80000000);
17098    REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
17099           0x80000000);
17100
17101    ecore_init_block(sc, BLOCK_UPB, PHASE_COMMON);
17102    ecore_init_block(sc, BLOCK_XPB, PHASE_COMMON);
17103    ecore_init_block(sc, BLOCK_PBF, PHASE_COMMON);
17104
17105    if (!CHIP_IS_E1x(sc)) {
17106        if (IS_MF_AFEX(sc)) {
17107            /*
17108             * configure that AFEX and VLAN headers must be
17109             * sent in AFEX mode
17110             */
17111            REG_WR(sc, PBF_REG_HDRS_AFTER_BASIC, 0xE);
17112            REG_WR(sc, PBF_REG_MUST_HAVE_HDRS, 0xA);
17113            REG_WR(sc, PBF_REG_HDRS_AFTER_TAG_0, 0x6);
17114            REG_WR(sc, PBF_REG_TAG_ETHERTYPE_0, 0x8926);
17115            REG_WR(sc, PBF_REG_TAG_LEN_0, 0x4);
17116        } else {
17117            REG_WR(sc, PBF_REG_HDRS_AFTER_BASIC,
17118                   sc->devinfo.mf_info.path_has_ovlan ? 7 : 6);
17119        }
17120    }
17121
17122    REG_WR(sc, SRC_REG_SOFT_RST, 1);
17123
17124    ecore_init_block(sc, BLOCK_SRC, PHASE_COMMON);
17125
17126    if (CNIC_SUPPORT(sc)) {
17127        REG_WR(sc, SRC_REG_KEYSEARCH_0, 0x63285672);
17128        REG_WR(sc, SRC_REG_KEYSEARCH_1, 0x24b8f2cc);
17129        REG_WR(sc, SRC_REG_KEYSEARCH_2, 0x223aef9b);
17130        REG_WR(sc, SRC_REG_KEYSEARCH_3, 0x26001e3a);
17131        REG_WR(sc, SRC_REG_KEYSEARCH_4, 0x7ae91116);
17132        REG_WR(sc, SRC_REG_KEYSEARCH_5, 0x5ce5230b);
17133        REG_WR(sc, SRC_REG_KEYSEARCH_6, 0x298d8adf);
17134        REG_WR(sc, SRC_REG_KEYSEARCH_7, 0x6eb0ff09);
17135        REG_WR(sc, SRC_REG_KEYSEARCH_8, 0x1830f82f);
17136        REG_WR(sc, SRC_REG_KEYSEARCH_9, 0x01e46be7);
17137    }
17138    REG_WR(sc, SRC_REG_SOFT_RST, 0);
17139
17140    if (sizeof(union cdu_context) != 1024) {
17141        /* we currently assume that a context is 1024 bytes */
17142        BLOGE(sc, "please adjust the size of cdu_context(%ld)\n",
17143              (long)sizeof(union cdu_context));
17144    }
17145
17146    ecore_init_block(sc, BLOCK_CDU, PHASE_COMMON);
17147    val = (4 << 24) + (0 << 12) + 1024;
17148    REG_WR(sc, CDU_REG_CDU_GLOBAL_PARAMS, val);
17149
17150    ecore_init_block(sc, BLOCK_CFC, PHASE_COMMON);
17151
17152    REG_WR(sc, CFC_REG_INIT_REG, 0x7FF);
17153    /* enable context validation interrupt from CFC */
17154    REG_WR(sc, CFC_REG_CFC_INT_MASK, 0);
17155
17156    /* set the thresholds to prevent CFC/CDU race */
17157    REG_WR(sc, CFC_REG_DEBUG0, 0x20020000);
17158    ecore_init_block(sc, BLOCK_HC, PHASE_COMMON);
17159
17160    if (!CHIP_IS_E1x(sc) && BXE_NOMCP(sc)) {
17161        REG_WR(sc, IGU_REG_RESET_MEMORIES, 0x36);
17162    }
17163
17164    ecore_init_block(sc, BLOCK_IGU, PHASE_COMMON);
17165    ecore_init_block(sc, BLOCK_MISC_AEU, PHASE_COMMON);
17166
17167    /* Reset PCIE errors for debug */
17168    REG_WR(sc, 0x2814, 0xffffffff);
17169    REG_WR(sc, 0x3820, 0xffffffff);
17170
17171    if (!CHIP_IS_E1x(sc)) {
17172        REG_WR(sc, PCICFG_OFFSET + PXPCS_TL_CONTROL_5,
17173               (PXPCS_TL_CONTROL_5_ERR_UNSPPORT1 |
17174                PXPCS_TL_CONTROL_5_ERR_UNSPPORT));
17175        REG_WR(sc, PCICFG_OFFSET + PXPCS_TL_FUNC345_STAT,
17176               (PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT4 |
17177                PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT3 |
17178                PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT2));
17179        REG_WR(sc, PCICFG_OFFSET + PXPCS_TL_FUNC678_STAT,
17180               (PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT7 |
17181                PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT6 |
17182                PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT5));
17183    }
17184
17185    ecore_init_block(sc, BLOCK_NIG, PHASE_COMMON);
17186
17187    if (!CHIP_IS_E1(sc)) {
17188        /* in E3 this done in per-port section */
17189        if (!CHIP_IS_E3(sc))
17190            REG_WR(sc, NIG_REG_LLH_MF_MODE, IS_MF(sc));
17191    }
17192
17193    if (CHIP_IS_E1H(sc)) {
17194        /* not applicable for E2 (and above ...) */
17195        REG_WR(sc, NIG_REG_LLH_E1HOV_MODE, IS_MF_SD(sc));
17196    }
17197
17198    if (CHIP_REV_IS_SLOW(sc)) {
17199        DELAY(200000);
17200    }
17201
17202    /* finish CFC init */
17203    val = reg_poll(sc, CFC_REG_LL_INIT_DONE, 1, 100, 10);
17204    if (val != 1) {
17205        BLOGE(sc, "CFC LL_INIT failed val=0x%x\n", val);
17206        return (-1);
17207    }
17208    val = reg_poll(sc, CFC_REG_AC_INIT_DONE, 1, 100, 10);
17209    if (val != 1) {
17210        BLOGE(sc, "CFC AC_INIT failed val=0x%x\n", val);
17211        return (-1);
17212    }
17213    val = reg_poll(sc, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
17214    if (val != 1) {
17215        BLOGE(sc, "CFC CAM_INIT failed val=0x%x\n", val);
17216        return (-1);
17217    }
17218    REG_WR(sc, CFC_REG_DEBUG0, 0);
17219
17220    if (CHIP_IS_E1(sc)) {
17221        /* read NIG statistic to see if this is our first up since powerup */
17222        bxe_read_dmae(sc, NIG_REG_STAT2_BRB_OCTET, 2);
17223        val = *BXE_SP(sc, wb_data[0]);
17224
17225        /* do internal memory self test */
17226        if ((val == 0) && bxe_int_mem_test(sc)) {
17227            BLOGE(sc, "internal mem self test failed val=0x%x\n", val);
17228            return (-1);
17229        }
17230    }
17231
17232    bxe_setup_fan_failure_detection(sc);
17233
17234    /* clear PXP2 attentions */
17235    REG_RD(sc, PXP2_REG_PXP2_INT_STS_CLR_0);
17236
17237    bxe_enable_blocks_attention(sc);
17238
17239    if (!CHIP_REV_IS_SLOW(sc)) {
17240        ecore_enable_blocks_parity(sc);
17241    }
17242
17243    if (!BXE_NOMCP(sc)) {
17244        if (CHIP_IS_E1x(sc)) {
17245            bxe_common_init_phy(sc);
17246        }
17247    }
17248
17249    return (0);
17250}
17251
17252/**
17253 * bxe_init_hw_common_chip - init HW at the COMMON_CHIP phase.
17254 *
17255 * @sc:     driver handle
17256 */
17257static int
17258bxe_init_hw_common_chip(struct bxe_softc *sc)
17259{
17260    int rc = bxe_init_hw_common(sc);
17261
17262    if (rc) {
17263        BLOGE(sc, "bxe_init_hw_common failed rc=%d\n", rc);
17264        return (rc);
17265    }
17266
17267    /* In E2 2-PORT mode, same ext phy is used for the two paths */
17268    if (!BXE_NOMCP(sc)) {
17269        bxe_common_init_phy(sc);
17270    }
17271
17272    return (0);
17273}
17274
17275static int
17276bxe_init_hw_port(struct bxe_softc *sc)
17277{
17278    int port = SC_PORT(sc);
17279    int init_phase = port ? PHASE_PORT1 : PHASE_PORT0;
17280    uint32_t low, high;
17281    uint32_t val;
17282
17283    BLOGD(sc, DBG_LOAD, "starting port init for port %d\n", port);
17284
17285    REG_WR(sc, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
17286
17287    ecore_init_block(sc, BLOCK_MISC, init_phase);
17288    ecore_init_block(sc, BLOCK_PXP, init_phase);
17289    ecore_init_block(sc, BLOCK_PXP2, init_phase);
17290
17291    /*
17292     * Timers bug workaround: disables the pf_master bit in pglue at
17293     * common phase, we need to enable it here before any dmae access are
17294     * attempted. Therefore we manually added the enable-master to the
17295     * port phase (it also happens in the function phase)
17296     */
17297    if (!CHIP_IS_E1x(sc)) {
17298        REG_WR(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1);
17299    }
17300
17301    ecore_init_block(sc, BLOCK_ATC, init_phase);
17302    ecore_init_block(sc, BLOCK_DMAE, init_phase);
17303    ecore_init_block(sc, BLOCK_PGLUE_B, init_phase);
17304    ecore_init_block(sc, BLOCK_QM, init_phase);
17305
17306    ecore_init_block(sc, BLOCK_TCM, init_phase);
17307    ecore_init_block(sc, BLOCK_UCM, init_phase);
17308    ecore_init_block(sc, BLOCK_CCM, init_phase);
17309    ecore_init_block(sc, BLOCK_XCM, init_phase);
17310
17311    /* QM cid (connection) count */
17312    ecore_qm_init_cid_count(sc, sc->qm_cid_count, INITOP_SET);
17313
17314    if (CNIC_SUPPORT(sc)) {
17315        ecore_init_block(sc, BLOCK_TM, init_phase);
17316        REG_WR(sc, TM_REG_LIN0_SCAN_TIME + port*4, 20);
17317        REG_WR(sc, TM_REG_LIN0_MAX_ACTIVE_CID + port*4, 31);
17318    }
17319
17320    ecore_init_block(sc, BLOCK_DORQ, init_phase);
17321
17322    ecore_init_block(sc, BLOCK_BRB1, init_phase);
17323
17324    if (CHIP_IS_E1(sc) || CHIP_IS_E1H(sc)) {
17325        if (IS_MF(sc)) {
17326            low = (BXE_ONE_PORT(sc) ? 160 : 246);
17327        } else if (sc->mtu > 4096) {
17328            if (BXE_ONE_PORT(sc)) {
17329                low = 160;
17330            } else {
17331                val = sc->mtu;
17332                /* (24*1024 + val*4)/256 */
17333                low = (96 + (val / 64) + ((val % 64) ? 1 : 0));
17334            }
17335        } else {
17336            low = (BXE_ONE_PORT(sc) ? 80 : 160);
17337        }
17338        high = (low + 56); /* 14*1024/256 */
17339        REG_WR(sc, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port*4, low);
17340        REG_WR(sc, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high);
17341    }
17342
17343    if (CHIP_IS_MODE_4_PORT(sc)) {
17344        REG_WR(sc, SC_PORT(sc) ?
17345               BRB1_REG_MAC_GUARANTIED_1 :
17346               BRB1_REG_MAC_GUARANTIED_0, 40);
17347    }
17348
17349    ecore_init_block(sc, BLOCK_PRS, init_phase);
17350    if (CHIP_IS_E3B0(sc)) {
17351        if (IS_MF_AFEX(sc)) {
17352            /* configure headers for AFEX mode */
17353            REG_WR(sc, SC_PORT(sc) ?
17354                   PRS_REG_HDRS_AFTER_BASIC_PORT_1 :
17355                   PRS_REG_HDRS_AFTER_BASIC_PORT_0, 0xE);
17356            REG_WR(sc, SC_PORT(sc) ?
17357                   PRS_REG_HDRS_AFTER_TAG_0_PORT_1 :
17358                   PRS_REG_HDRS_AFTER_TAG_0_PORT_0, 0x6);
17359            REG_WR(sc, SC_PORT(sc) ?
17360                   PRS_REG_MUST_HAVE_HDRS_PORT_1 :
17361                   PRS_REG_MUST_HAVE_HDRS_PORT_0, 0xA);
17362        } else {
17363            /* Ovlan exists only if we are in multi-function +
17364             * switch-dependent mode, in switch-independent there
17365             * is no ovlan headers
17366             */
17367            REG_WR(sc, SC_PORT(sc) ?
17368                   PRS_REG_HDRS_AFTER_BASIC_PORT_1 :
17369                   PRS_REG_HDRS_AFTER_BASIC_PORT_0,
17370                   (sc->devinfo.mf_info.path_has_ovlan ? 7 : 6));
17371        }
17372    }
17373
17374    ecore_init_block(sc, BLOCK_TSDM, init_phase);
17375    ecore_init_block(sc, BLOCK_CSDM, init_phase);
17376    ecore_init_block(sc, BLOCK_USDM, init_phase);
17377    ecore_init_block(sc, BLOCK_XSDM, init_phase);
17378
17379    ecore_init_block(sc, BLOCK_TSEM, init_phase);
17380    ecore_init_block(sc, BLOCK_USEM, init_phase);
17381    ecore_init_block(sc, BLOCK_CSEM, init_phase);
17382    ecore_init_block(sc, BLOCK_XSEM, init_phase);
17383
17384    ecore_init_block(sc, BLOCK_UPB, init_phase);
17385    ecore_init_block(sc, BLOCK_XPB, init_phase);
17386
17387    ecore_init_block(sc, BLOCK_PBF, init_phase);
17388
17389    if (CHIP_IS_E1x(sc)) {
17390        /* configure PBF to work without PAUSE mtu 9000 */
17391        REG_WR(sc, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
17392
17393        /* update threshold */
17394        REG_WR(sc, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
17395        /* update init credit */
17396        REG_WR(sc, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
17397
17398        /* probe changes */
17399        REG_WR(sc, PBF_REG_INIT_P0 + port*4, 1);
17400        DELAY(50);
17401        REG_WR(sc, PBF_REG_INIT_P0 + port*4, 0);
17402    }
17403
17404    if (CNIC_SUPPORT(sc)) {
17405        ecore_init_block(sc, BLOCK_SRC, init_phase);
17406    }
17407
17408    ecore_init_block(sc, BLOCK_CDU, init_phase);
17409    ecore_init_block(sc, BLOCK_CFC, init_phase);
17410
17411    if (CHIP_IS_E1(sc)) {
17412        REG_WR(sc, HC_REG_LEADING_EDGE_0 + port*8, 0);
17413        REG_WR(sc, HC_REG_TRAILING_EDGE_0 + port*8, 0);
17414    }
17415    ecore_init_block(sc, BLOCK_HC, init_phase);
17416
17417    ecore_init_block(sc, BLOCK_IGU, init_phase);
17418
17419    ecore_init_block(sc, BLOCK_MISC_AEU, init_phase);
17420    /* init aeu_mask_attn_func_0/1:
17421     *  - SF mode: bits 3-7 are masked. only bits 0-2 are in use
17422     *  - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
17423     *             bits 4-7 are used for "per vn group attention" */
17424    val = IS_MF(sc) ? 0xF7 : 0x7;
17425    /* Enable DCBX attention for all but E1 */
17426    val |= CHIP_IS_E1(sc) ? 0 : 0x10;
17427    REG_WR(sc, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, val);
17428
17429    ecore_init_block(sc, BLOCK_NIG, init_phase);
17430
17431    if (!CHIP_IS_E1x(sc)) {
17432        /* Bit-map indicating which L2 hdrs may appear after the
17433         * basic Ethernet header
17434         */
17435        if (IS_MF_AFEX(sc)) {
17436            REG_WR(sc, SC_PORT(sc) ?
17437                   NIG_REG_P1_HDRS_AFTER_BASIC :
17438                   NIG_REG_P0_HDRS_AFTER_BASIC, 0xE);
17439        } else {
17440            REG_WR(sc, SC_PORT(sc) ?
17441                   NIG_REG_P1_HDRS_AFTER_BASIC :
17442                   NIG_REG_P0_HDRS_AFTER_BASIC,
17443                   IS_MF_SD(sc) ? 7 : 6);
17444        }
17445
17446        if (CHIP_IS_E3(sc)) {
17447            REG_WR(sc, SC_PORT(sc) ?
17448                   NIG_REG_LLH1_MF_MODE :
17449                   NIG_REG_LLH_MF_MODE, IS_MF(sc));
17450        }
17451    }
17452    if (!CHIP_IS_E3(sc)) {
17453        REG_WR(sc, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
17454    }
17455
17456    if (!CHIP_IS_E1(sc)) {
17457        /* 0x2 disable mf_ov, 0x1 enable */
17458        REG_WR(sc, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
17459               (IS_MF_SD(sc) ? 0x1 : 0x2));
17460
17461        if (!CHIP_IS_E1x(sc)) {
17462            val = 0;
17463            switch (sc->devinfo.mf_info.mf_mode) {
17464            case MULTI_FUNCTION_SD:
17465                val = 1;
17466                break;
17467            case MULTI_FUNCTION_SI:
17468            case MULTI_FUNCTION_AFEX:
17469                val = 2;
17470                break;
17471            }
17472
17473            REG_WR(sc, (SC_PORT(sc) ? NIG_REG_LLH1_CLS_TYPE :
17474                        NIG_REG_LLH0_CLS_TYPE), val);
17475        }
17476        REG_WR(sc, NIG_REG_LLFC_ENABLE_0 + port*4, 0);
17477        REG_WR(sc, NIG_REG_LLFC_OUT_EN_0 + port*4, 0);
17478        REG_WR(sc, NIG_REG_PAUSE_ENABLE_0 + port*4, 1);
17479    }
17480
17481    /* If SPIO5 is set to generate interrupts, enable it for this port */
17482    val = REG_RD(sc, MISC_REG_SPIO_EVENT_EN);
17483    if (val & MISC_SPIO_SPIO5) {
17484        uint32_t reg_addr = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
17485                                    MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
17486        val = REG_RD(sc, reg_addr);
17487        val |= AEU_INPUTS_ATTN_BITS_SPIO5;
17488        REG_WR(sc, reg_addr, val);
17489    }
17490
17491    return (0);
17492}
17493
17494static uint32_t
17495bxe_flr_clnup_reg_poll(struct bxe_softc *sc,
17496                       uint32_t         reg,
17497                       uint32_t         expected,
17498                       uint32_t         poll_count)
17499{
17500    uint32_t cur_cnt = poll_count;
17501    uint32_t val;
17502
17503    while ((val = REG_RD(sc, reg)) != expected && cur_cnt--) {
17504        DELAY(FLR_WAIT_INTERVAL);
17505    }
17506
17507    return (val);
17508}
17509
17510static int
17511bxe_flr_clnup_poll_hw_counter(struct bxe_softc *sc,
17512                              uint32_t         reg,
17513                              char             *msg,
17514                              uint32_t         poll_cnt)
17515{
17516    uint32_t val = bxe_flr_clnup_reg_poll(sc, reg, 0, poll_cnt);
17517
17518    if (val != 0) {
17519        BLOGE(sc, "%s usage count=%d\n", msg, val);
17520        return (1);
17521    }
17522
17523    return (0);
17524}
17525
17526/* Common routines with VF FLR cleanup */
17527static uint32_t
17528bxe_flr_clnup_poll_count(struct bxe_softc *sc)
17529{
17530    /* adjust polling timeout */
17531    if (CHIP_REV_IS_EMUL(sc)) {
17532        return (FLR_POLL_CNT * 2000);
17533    }
17534
17535    if (CHIP_REV_IS_FPGA(sc)) {
17536        return (FLR_POLL_CNT * 120);
17537    }
17538
17539    return (FLR_POLL_CNT);
17540}
17541
17542static int
17543bxe_poll_hw_usage_counters(struct bxe_softc *sc,
17544                           uint32_t         poll_cnt)
17545{
17546    /* wait for CFC PF usage-counter to zero (includes all the VFs) */
17547    if (bxe_flr_clnup_poll_hw_counter(sc,
17548                                      CFC_REG_NUM_LCIDS_INSIDE_PF,
17549                                      "CFC PF usage counter timed out",
17550                                      poll_cnt)) {
17551        return (1);
17552    }
17553
17554    /* Wait for DQ PF usage-counter to zero (until DQ cleanup) */
17555    if (bxe_flr_clnup_poll_hw_counter(sc,
17556                                      DORQ_REG_PF_USAGE_CNT,
17557                                      "DQ PF usage counter timed out",
17558                                      poll_cnt)) {
17559        return (1);
17560    }
17561
17562    /* Wait for QM PF usage-counter to zero (until DQ cleanup) */
17563    if (bxe_flr_clnup_poll_hw_counter(sc,
17564                                      QM_REG_PF_USG_CNT_0 + 4*SC_FUNC(sc),
17565                                      "QM PF usage counter timed out",
17566                                      poll_cnt)) {
17567        return (1);
17568    }
17569
17570    /* Wait for Timer PF usage-counters to zero (until DQ cleanup) */
17571    if (bxe_flr_clnup_poll_hw_counter(sc,
17572                                      TM_REG_LIN0_VNIC_UC + 4*SC_PORT(sc),
17573                                      "Timers VNIC usage counter timed out",
17574                                      poll_cnt)) {
17575        return (1);
17576    }
17577
17578    if (bxe_flr_clnup_poll_hw_counter(sc,
17579                                      TM_REG_LIN0_NUM_SCANS + 4*SC_PORT(sc),
17580                                      "Timers NUM_SCANS usage counter timed out",
17581                                      poll_cnt)) {
17582        return (1);
17583    }
17584
17585    /* Wait DMAE PF usage counter to zero */
17586    if (bxe_flr_clnup_poll_hw_counter(sc,
17587                                      dmae_reg_go_c[INIT_DMAE_C(sc)],
17588                                      "DMAE dommand register timed out",
17589                                      poll_cnt)) {
17590        return (1);
17591    }
17592
17593    return (0);
17594}
17595
17596#define OP_GEN_PARAM(param)                                            \
17597    (((param) << SDM_OP_GEN_COMP_PARAM_SHIFT) & SDM_OP_GEN_COMP_PARAM)
17598#define OP_GEN_TYPE(type)                                           \
17599    (((type) << SDM_OP_GEN_COMP_TYPE_SHIFT) & SDM_OP_GEN_COMP_TYPE)
17600#define OP_GEN_AGG_VECT(index)                                             \
17601    (((index) << SDM_OP_GEN_AGG_VECT_IDX_SHIFT) & SDM_OP_GEN_AGG_VECT_IDX)
17602
17603static int
17604bxe_send_final_clnup(struct bxe_softc *sc,
17605                     uint8_t          clnup_func,
17606                     uint32_t         poll_cnt)
17607{
17608    uint32_t op_gen_command = 0;
17609    uint32_t comp_addr = (BAR_CSTRORM_INTMEM +
17610                          CSTORM_FINAL_CLEANUP_COMPLETE_OFFSET(clnup_func));
17611    int ret = 0;
17612
17613    if (REG_RD(sc, comp_addr)) {
17614        BLOGE(sc, "Cleanup complete was not 0 before sending\n");
17615        return (1);
17616    }
17617
17618    op_gen_command |= OP_GEN_PARAM(XSTORM_AGG_INT_FINAL_CLEANUP_INDEX);
17619    op_gen_command |= OP_GEN_TYPE(XSTORM_AGG_INT_FINAL_CLEANUP_COMP_TYPE);
17620    op_gen_command |= OP_GEN_AGG_VECT(clnup_func);
17621    op_gen_command |= 1 << SDM_OP_GEN_AGG_VECT_IDX_VALID_SHIFT;
17622
17623    BLOGD(sc, DBG_LOAD, "sending FW Final cleanup\n");
17624    REG_WR(sc, XSDM_REG_OPERATION_GEN, op_gen_command);
17625
17626    if (bxe_flr_clnup_reg_poll(sc, comp_addr, 1, poll_cnt) != 1) {
17627        BLOGE(sc, "FW final cleanup did not succeed\n");
17628        BLOGD(sc, DBG_LOAD, "At timeout completion address contained %x\n",
17629              (REG_RD(sc, comp_addr)));
17630        bxe_panic(sc, ("FLR cleanup failed\n"));
17631        return (1);
17632    }
17633
17634    /* Zero completion for nxt FLR */
17635    REG_WR(sc, comp_addr, 0);
17636
17637    return (ret);
17638}
17639
17640static void
17641bxe_pbf_pN_buf_flushed(struct bxe_softc       *sc,
17642                       struct pbf_pN_buf_regs *regs,
17643                       uint32_t               poll_count)
17644{
17645    uint32_t init_crd, crd, crd_start, crd_freed, crd_freed_start;
17646    uint32_t cur_cnt = poll_count;
17647
17648    crd_freed = crd_freed_start = REG_RD(sc, regs->crd_freed);
17649    crd = crd_start = REG_RD(sc, regs->crd);
17650    init_crd = REG_RD(sc, regs->init_crd);
17651
17652    BLOGD(sc, DBG_LOAD, "INIT CREDIT[%d] : %x\n", regs->pN, init_crd);
17653    BLOGD(sc, DBG_LOAD, "CREDIT[%d]      : s:%x\n", regs->pN, crd);
17654    BLOGD(sc, DBG_LOAD, "CREDIT_FREED[%d]: s:%x\n", regs->pN, crd_freed);
17655
17656    while ((crd != init_crd) &&
17657           ((uint32_t)((int32_t)crd_freed - (int32_t)crd_freed_start) <
17658            (init_crd - crd_start))) {
17659        if (cur_cnt--) {
17660            DELAY(FLR_WAIT_INTERVAL);
17661            crd = REG_RD(sc, regs->crd);
17662            crd_freed = REG_RD(sc, regs->crd_freed);
17663        } else {
17664            BLOGD(sc, DBG_LOAD, "PBF tx buffer[%d] timed out\n", regs->pN);
17665            BLOGD(sc, DBG_LOAD, "CREDIT[%d]      : c:%x\n", regs->pN, crd);
17666            BLOGD(sc, DBG_LOAD, "CREDIT_FREED[%d]: c:%x\n", regs->pN, crd_freed);
17667            break;
17668        }
17669    }
17670
17671    BLOGD(sc, DBG_LOAD, "Waited %d*%d usec for PBF tx buffer[%d]\n",
17672          poll_count-cur_cnt, FLR_WAIT_INTERVAL, regs->pN);
17673}
17674
17675static void
17676bxe_pbf_pN_cmd_flushed(struct bxe_softc       *sc,
17677                       struct pbf_pN_cmd_regs *regs,
17678                       uint32_t               poll_count)
17679{
17680    uint32_t occup, to_free, freed, freed_start;
17681    uint32_t cur_cnt = poll_count;
17682
17683    occup = to_free = REG_RD(sc, regs->lines_occup);
17684    freed = freed_start = REG_RD(sc, regs->lines_freed);
17685
17686    BLOGD(sc, DBG_LOAD, "OCCUPANCY[%d]   : s:%x\n", regs->pN, occup);
17687    BLOGD(sc, DBG_LOAD, "LINES_FREED[%d] : s:%x\n", regs->pN, freed);
17688
17689    while (occup &&
17690           ((uint32_t)((int32_t)freed - (int32_t)freed_start) < to_free)) {
17691        if (cur_cnt--) {
17692            DELAY(FLR_WAIT_INTERVAL);
17693            occup = REG_RD(sc, regs->lines_occup);
17694            freed = REG_RD(sc, regs->lines_freed);
17695        } else {
17696            BLOGD(sc, DBG_LOAD, "PBF cmd queue[%d] timed out\n", regs->pN);
17697            BLOGD(sc, DBG_LOAD, "OCCUPANCY[%d]   : s:%x\n", regs->pN, occup);
17698            BLOGD(sc, DBG_LOAD, "LINES_FREED[%d] : s:%x\n", regs->pN, freed);
17699            break;
17700        }
17701    }
17702
17703    BLOGD(sc, DBG_LOAD, "Waited %d*%d usec for PBF cmd queue[%d]\n",
17704          poll_count - cur_cnt, FLR_WAIT_INTERVAL, regs->pN);
17705}
17706
17707static void
17708bxe_tx_hw_flushed(struct bxe_softc *sc, uint32_t poll_count)
17709{
17710    struct pbf_pN_cmd_regs cmd_regs[] = {
17711        {0, (CHIP_IS_E3B0(sc)) ?
17712            PBF_REG_TQ_OCCUPANCY_Q0 :
17713            PBF_REG_P0_TQ_OCCUPANCY,
17714            (CHIP_IS_E3B0(sc)) ?
17715            PBF_REG_TQ_LINES_FREED_CNT_Q0 :
17716            PBF_REG_P0_TQ_LINES_FREED_CNT},
17717        {1, (CHIP_IS_E3B0(sc)) ?
17718            PBF_REG_TQ_OCCUPANCY_Q1 :
17719            PBF_REG_P1_TQ_OCCUPANCY,
17720            (CHIP_IS_E3B0(sc)) ?
17721            PBF_REG_TQ_LINES_FREED_CNT_Q1 :
17722            PBF_REG_P1_TQ_LINES_FREED_CNT},
17723        {4, (CHIP_IS_E3B0(sc)) ?
17724            PBF_REG_TQ_OCCUPANCY_LB_Q :
17725            PBF_REG_P4_TQ_OCCUPANCY,
17726            (CHIP_IS_E3B0(sc)) ?
17727            PBF_REG_TQ_LINES_FREED_CNT_LB_Q :
17728            PBF_REG_P4_TQ_LINES_FREED_CNT}
17729    };
17730
17731    struct pbf_pN_buf_regs buf_regs[] = {
17732        {0, (CHIP_IS_E3B0(sc)) ?
17733            PBF_REG_INIT_CRD_Q0 :
17734            PBF_REG_P0_INIT_CRD ,
17735            (CHIP_IS_E3B0(sc)) ?
17736            PBF_REG_CREDIT_Q0 :
17737            PBF_REG_P0_CREDIT,
17738            (CHIP_IS_E3B0(sc)) ?
17739            PBF_REG_INTERNAL_CRD_FREED_CNT_Q0 :
17740            PBF_REG_P0_INTERNAL_CRD_FREED_CNT},
17741        {1, (CHIP_IS_E3B0(sc)) ?
17742            PBF_REG_INIT_CRD_Q1 :
17743            PBF_REG_P1_INIT_CRD,
17744            (CHIP_IS_E3B0(sc)) ?
17745            PBF_REG_CREDIT_Q1 :
17746            PBF_REG_P1_CREDIT,
17747            (CHIP_IS_E3B0(sc)) ?
17748            PBF_REG_INTERNAL_CRD_FREED_CNT_Q1 :
17749            PBF_REG_P1_INTERNAL_CRD_FREED_CNT},
17750        {4, (CHIP_IS_E3B0(sc)) ?
17751            PBF_REG_INIT_CRD_LB_Q :
17752            PBF_REG_P4_INIT_CRD,
17753            (CHIP_IS_E3B0(sc)) ?
17754            PBF_REG_CREDIT_LB_Q :
17755            PBF_REG_P4_CREDIT,
17756            (CHIP_IS_E3B0(sc)) ?
17757            PBF_REG_INTERNAL_CRD_FREED_CNT_LB_Q :
17758            PBF_REG_P4_INTERNAL_CRD_FREED_CNT},
17759    };
17760
17761    int i;
17762
17763    /* Verify the command queues are flushed P0, P1, P4 */
17764    for (i = 0; i < ARRAY_SIZE(cmd_regs); i++) {
17765        bxe_pbf_pN_cmd_flushed(sc, &cmd_regs[i], poll_count);
17766    }
17767
17768    /* Verify the transmission buffers are flushed P0, P1, P4 */
17769    for (i = 0; i < ARRAY_SIZE(buf_regs); i++) {
17770        bxe_pbf_pN_buf_flushed(sc, &buf_regs[i], poll_count);
17771    }
17772}
17773
17774static void
17775bxe_hw_enable_status(struct bxe_softc *sc)
17776{
17777    uint32_t val;
17778
17779    val = REG_RD(sc, CFC_REG_WEAK_ENABLE_PF);
17780    BLOGD(sc, DBG_LOAD, "CFC_REG_WEAK_ENABLE_PF is 0x%x\n", val);
17781
17782    val = REG_RD(sc, PBF_REG_DISABLE_PF);
17783    BLOGD(sc, DBG_LOAD, "PBF_REG_DISABLE_PF is 0x%x\n", val);
17784
17785    val = REG_RD(sc, IGU_REG_PCI_PF_MSI_EN);
17786    BLOGD(sc, DBG_LOAD, "IGU_REG_PCI_PF_MSI_EN is 0x%x\n", val);
17787
17788    val = REG_RD(sc, IGU_REG_PCI_PF_MSIX_EN);
17789    BLOGD(sc, DBG_LOAD, "IGU_REG_PCI_PF_MSIX_EN is 0x%x\n", val);
17790
17791    val = REG_RD(sc, IGU_REG_PCI_PF_MSIX_FUNC_MASK);
17792    BLOGD(sc, DBG_LOAD, "IGU_REG_PCI_PF_MSIX_FUNC_MASK is 0x%x\n", val);
17793
17794    val = REG_RD(sc, PGLUE_B_REG_SHADOW_BME_PF_7_0_CLR);
17795    BLOGD(sc, DBG_LOAD, "PGLUE_B_REG_SHADOW_BME_PF_7_0_CLR is 0x%x\n", val);
17796
17797    val = REG_RD(sc, PGLUE_B_REG_FLR_REQUEST_PF_7_0_CLR);
17798    BLOGD(sc, DBG_LOAD, "PGLUE_B_REG_FLR_REQUEST_PF_7_0_CLR is 0x%x\n", val);
17799
17800    val = REG_RD(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER);
17801    BLOGD(sc, DBG_LOAD, "PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER is 0x%x\n", val);
17802}
17803
17804static int
17805bxe_pf_flr_clnup(struct bxe_softc *sc)
17806{
17807    uint32_t poll_cnt = bxe_flr_clnup_poll_count(sc);
17808
17809    BLOGD(sc, DBG_LOAD, "Cleanup after FLR PF[%d]\n", SC_ABS_FUNC(sc));
17810
17811    /* Re-enable PF target read access */
17812    REG_WR(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ, 1);
17813
17814    /* Poll HW usage counters */
17815    BLOGD(sc, DBG_LOAD, "Polling usage counters\n");
17816    if (bxe_poll_hw_usage_counters(sc, poll_cnt)) {
17817        return (-1);
17818    }
17819
17820    /* Zero the igu 'trailing edge' and 'leading edge' */
17821
17822    /* Send the FW cleanup command */
17823    if (bxe_send_final_clnup(sc, (uint8_t)SC_FUNC(sc), poll_cnt)) {
17824        return (-1);
17825    }
17826
17827    /* ATC cleanup */
17828
17829    /* Verify TX hw is flushed */
17830    bxe_tx_hw_flushed(sc, poll_cnt);
17831
17832    /* Wait 100ms (not adjusted according to platform) */
17833    DELAY(100000);
17834
17835    /* Verify no pending pci transactions */
17836    if (bxe_is_pcie_pending(sc)) {
17837        BLOGE(sc, "PCIE Transactions still pending\n");
17838    }
17839
17840    /* Debug */
17841    bxe_hw_enable_status(sc);
17842
17843    /*
17844     * Master enable - Due to WB DMAE writes performed before this
17845     * register is re-initialized as part of the regular function init
17846     */
17847    REG_WR(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1);
17848
17849    return (0);
17850}
17851
17852static int
17853bxe_init_hw_func(struct bxe_softc *sc)
17854{
17855    int port = SC_PORT(sc);
17856    int func = SC_FUNC(sc);
17857    int init_phase = PHASE_PF0 + func;
17858    struct ecore_ilt *ilt = sc->ilt;
17859    uint16_t cdu_ilt_start;
17860    uint32_t addr, val;
17861    uint32_t main_mem_base, main_mem_size, main_mem_prty_clr;
17862    int i, main_mem_width, rc;
17863
17864    BLOGD(sc, DBG_LOAD, "starting func init for func %d\n", func);
17865
17866    /* FLR cleanup */
17867    if (!CHIP_IS_E1x(sc)) {
17868        rc = bxe_pf_flr_clnup(sc);
17869        if (rc) {
17870            BLOGE(sc, "FLR cleanup failed!\n");
17871            // XXX bxe_fw_dump(sc);
17872            // XXX bxe_idle_chk(sc);
17873            return (rc);
17874        }
17875    }
17876
17877    /* set MSI reconfigure capability */
17878    if (sc->devinfo.int_block == INT_BLOCK_HC) {
17879        addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0);
17880        val = REG_RD(sc, addr);
17881        val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0;
17882        REG_WR(sc, addr, val);
17883    }
17884
17885    ecore_init_block(sc, BLOCK_PXP, init_phase);
17886    ecore_init_block(sc, BLOCK_PXP2, init_phase);
17887
17888    ilt = sc->ilt;
17889    cdu_ilt_start = ilt->clients[ILT_CLIENT_CDU].start;
17890
17891    for (i = 0; i < L2_ILT_LINES(sc); i++) {
17892        ilt->lines[cdu_ilt_start + i].page = sc->context[i].vcxt;
17893        ilt->lines[cdu_ilt_start + i].page_mapping =
17894            sc->context[i].vcxt_dma.paddr;
17895        ilt->lines[cdu_ilt_start + i].size = sc->context[i].size;
17896    }
17897    ecore_ilt_init_op(sc, INITOP_SET);
17898
17899    /* Set NIC mode */
17900    REG_WR(sc, PRS_REG_NIC_MODE, 1);
17901    BLOGD(sc, DBG_LOAD, "NIC MODE configured\n");
17902
17903    if (!CHIP_IS_E1x(sc)) {
17904        uint32_t pf_conf = IGU_PF_CONF_FUNC_EN;
17905
17906        /* Turn on a single ISR mode in IGU if driver is going to use
17907         * INT#x or MSI
17908         */
17909        if (sc->interrupt_mode != INTR_MODE_MSIX) {
17910            pf_conf |= IGU_PF_CONF_SINGLE_ISR_EN;
17911        }
17912
17913        /*
17914         * Timers workaround bug: function init part.
17915         * Need to wait 20msec after initializing ILT,
17916         * needed to make sure there are no requests in
17917         * one of the PXP internal queues with "old" ILT addresses
17918         */
17919        DELAY(20000);
17920
17921        /*
17922         * Master enable - Due to WB DMAE writes performed before this
17923         * register is re-initialized as part of the regular function
17924         * init
17925         */
17926        REG_WR(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1);
17927        /* Enable the function in IGU */
17928        REG_WR(sc, IGU_REG_PF_CONFIGURATION, pf_conf);
17929    }
17930
17931    sc->dmae_ready = 1;
17932
17933    ecore_init_block(sc, BLOCK_PGLUE_B, init_phase);
17934
17935    if (!CHIP_IS_E1x(sc))
17936        REG_WR(sc, PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR, func);
17937
17938    ecore_init_block(sc, BLOCK_ATC, init_phase);
17939    ecore_init_block(sc, BLOCK_DMAE, init_phase);
17940    ecore_init_block(sc, BLOCK_NIG, init_phase);
17941    ecore_init_block(sc, BLOCK_SRC, init_phase);
17942    ecore_init_block(sc, BLOCK_MISC, init_phase);
17943    ecore_init_block(sc, BLOCK_TCM, init_phase);
17944    ecore_init_block(sc, BLOCK_UCM, init_phase);
17945    ecore_init_block(sc, BLOCK_CCM, init_phase);
17946    ecore_init_block(sc, BLOCK_XCM, init_phase);
17947    ecore_init_block(sc, BLOCK_TSEM, init_phase);
17948    ecore_init_block(sc, BLOCK_USEM, init_phase);
17949    ecore_init_block(sc, BLOCK_CSEM, init_phase);
17950    ecore_init_block(sc, BLOCK_XSEM, init_phase);
17951
17952    if (!CHIP_IS_E1x(sc))
17953        REG_WR(sc, QM_REG_PF_EN, 1);
17954
17955    if (!CHIP_IS_E1x(sc)) {
17956        REG_WR(sc, TSEM_REG_VFPF_ERR_NUM, BXE_MAX_NUM_OF_VFS + func);
17957        REG_WR(sc, USEM_REG_VFPF_ERR_NUM, BXE_MAX_NUM_OF_VFS + func);
17958        REG_WR(sc, CSEM_REG_VFPF_ERR_NUM, BXE_MAX_NUM_OF_VFS + func);
17959        REG_WR(sc, XSEM_REG_VFPF_ERR_NUM, BXE_MAX_NUM_OF_VFS + func);
17960    }
17961    ecore_init_block(sc, BLOCK_QM, init_phase);
17962
17963    ecore_init_block(sc, BLOCK_TM, init_phase);
17964    ecore_init_block(sc, BLOCK_DORQ, init_phase);
17965
17966    bxe_iov_init_dq(sc);
17967
17968    ecore_init_block(sc, BLOCK_BRB1, init_phase);
17969    ecore_init_block(sc, BLOCK_PRS, init_phase);
17970    ecore_init_block(sc, BLOCK_TSDM, init_phase);
17971    ecore_init_block(sc, BLOCK_CSDM, init_phase);
17972    ecore_init_block(sc, BLOCK_USDM, init_phase);
17973    ecore_init_block(sc, BLOCK_XSDM, init_phase);
17974    ecore_init_block(sc, BLOCK_UPB, init_phase);
17975    ecore_init_block(sc, BLOCK_XPB, init_phase);
17976    ecore_init_block(sc, BLOCK_PBF, init_phase);
17977    if (!CHIP_IS_E1x(sc))
17978        REG_WR(sc, PBF_REG_DISABLE_PF, 0);
17979
17980    ecore_init_block(sc, BLOCK_CDU, init_phase);
17981
17982    ecore_init_block(sc, BLOCK_CFC, init_phase);
17983
17984    if (!CHIP_IS_E1x(sc))
17985        REG_WR(sc, CFC_REG_WEAK_ENABLE_PF, 1);
17986
17987    if (IS_MF(sc)) {
17988        REG_WR(sc, NIG_REG_LLH0_FUNC_EN + port*8, 1);
17989        REG_WR(sc, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, OVLAN(sc));
17990    }
17991
17992    ecore_init_block(sc, BLOCK_MISC_AEU, init_phase);
17993
17994    /* HC init per function */
17995    if (sc->devinfo.int_block == INT_BLOCK_HC) {
17996        if (CHIP_IS_E1H(sc)) {
17997            REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
17998
17999            REG_WR(sc, HC_REG_LEADING_EDGE_0 + port*8, 0);
18000            REG_WR(sc, HC_REG_TRAILING_EDGE_0 + port*8, 0);
18001        }
18002        ecore_init_block(sc, BLOCK_HC, init_phase);
18003
18004    } else {
18005        int num_segs, sb_idx, prod_offset;
18006
18007        REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
18008
18009        if (!CHIP_IS_E1x(sc)) {
18010            REG_WR(sc, IGU_REG_LEADING_EDGE_LATCH, 0);
18011            REG_WR(sc, IGU_REG_TRAILING_EDGE_LATCH, 0);
18012        }
18013
18014        ecore_init_block(sc, BLOCK_IGU, init_phase);
18015
18016        if (!CHIP_IS_E1x(sc)) {
18017            int dsb_idx = 0;
18018            /**
18019             * Producer memory:
18020             * E2 mode: address 0-135 match to the mapping memory;
18021             * 136 - PF0 default prod; 137 - PF1 default prod;
18022             * 138 - PF2 default prod; 139 - PF3 default prod;
18023             * 140 - PF0 attn prod;    141 - PF1 attn prod;
18024             * 142 - PF2 attn prod;    143 - PF3 attn prod;
18025             * 144-147 reserved.
18026             *
18027             * E1.5 mode - In backward compatible mode;
18028             * for non default SB; each even line in the memory
18029             * holds the U producer and each odd line hold
18030             * the C producer. The first 128 producers are for
18031             * NDSB (PF0 - 0-31; PF1 - 32-63 and so on). The last 20
18032             * producers are for the DSB for each PF.
18033             * Each PF has five segments: (the order inside each
18034             * segment is PF0; PF1; PF2; PF3) - 128-131 U prods;
18035             * 132-135 C prods; 136-139 X prods; 140-143 T prods;
18036             * 144-147 attn prods;
18037             */
18038            /* non-default-status-blocks */
18039            num_segs = CHIP_INT_MODE_IS_BC(sc) ?
18040                IGU_BC_NDSB_NUM_SEGS : IGU_NORM_NDSB_NUM_SEGS;
18041            for (sb_idx = 0; sb_idx < sc->igu_sb_cnt; sb_idx++) {
18042                prod_offset = (sc->igu_base_sb + sb_idx) *
18043                    num_segs;
18044
18045                for (i = 0; i < num_segs; i++) {
18046                    addr = IGU_REG_PROD_CONS_MEMORY +
18047                            (prod_offset + i) * 4;
18048                    REG_WR(sc, addr, 0);
18049                }
18050                /* send consumer update with value 0 */
18051                bxe_ack_sb(sc, sc->igu_base_sb + sb_idx,
18052                           USTORM_ID, 0, IGU_INT_NOP, 1);
18053                bxe_igu_clear_sb(sc, sc->igu_base_sb + sb_idx);
18054            }
18055
18056            /* default-status-blocks */
18057            num_segs = CHIP_INT_MODE_IS_BC(sc) ?
18058                IGU_BC_DSB_NUM_SEGS : IGU_NORM_DSB_NUM_SEGS;
18059
18060            if (CHIP_IS_MODE_4_PORT(sc))
18061                dsb_idx = SC_FUNC(sc);
18062            else
18063                dsb_idx = SC_VN(sc);
18064
18065            prod_offset = (CHIP_INT_MODE_IS_BC(sc) ?
18066                       IGU_BC_BASE_DSB_PROD + dsb_idx :
18067                       IGU_NORM_BASE_DSB_PROD + dsb_idx);
18068
18069            /*
18070             * igu prods come in chunks of E1HVN_MAX (4) -
18071             * does not matters what is the current chip mode
18072             */
18073            for (i = 0; i < (num_segs * E1HVN_MAX);
18074                 i += E1HVN_MAX) {
18075                addr = IGU_REG_PROD_CONS_MEMORY +
18076                            (prod_offset + i)*4;
18077                REG_WR(sc, addr, 0);
18078            }
18079            /* send consumer update with 0 */
18080            if (CHIP_INT_MODE_IS_BC(sc)) {
18081                bxe_ack_sb(sc, sc->igu_dsb_id,
18082                           USTORM_ID, 0, IGU_INT_NOP, 1);
18083                bxe_ack_sb(sc, sc->igu_dsb_id,
18084                           CSTORM_ID, 0, IGU_INT_NOP, 1);
18085                bxe_ack_sb(sc, sc->igu_dsb_id,
18086                           XSTORM_ID, 0, IGU_INT_NOP, 1);
18087                bxe_ack_sb(sc, sc->igu_dsb_id,
18088                           TSTORM_ID, 0, IGU_INT_NOP, 1);
18089                bxe_ack_sb(sc, sc->igu_dsb_id,
18090                           ATTENTION_ID, 0, IGU_INT_NOP, 1);
18091            } else {
18092                bxe_ack_sb(sc, sc->igu_dsb_id,
18093                           USTORM_ID, 0, IGU_INT_NOP, 1);
18094                bxe_ack_sb(sc, sc->igu_dsb_id,
18095                           ATTENTION_ID, 0, IGU_INT_NOP, 1);
18096            }
18097            bxe_igu_clear_sb(sc, sc->igu_dsb_id);
18098
18099            /* !!! these should become driver const once
18100               rf-tool supports split-68 const */
18101            REG_WR(sc, IGU_REG_SB_INT_BEFORE_MASK_LSB, 0);
18102            REG_WR(sc, IGU_REG_SB_INT_BEFORE_MASK_MSB, 0);
18103            REG_WR(sc, IGU_REG_SB_MASK_LSB, 0);
18104            REG_WR(sc, IGU_REG_SB_MASK_MSB, 0);
18105            REG_WR(sc, IGU_REG_PBA_STATUS_LSB, 0);
18106            REG_WR(sc, IGU_REG_PBA_STATUS_MSB, 0);
18107        }
18108    }
18109
18110    /* Reset PCIE errors for debug */
18111    REG_WR(sc, 0x2114, 0xffffffff);
18112    REG_WR(sc, 0x2120, 0xffffffff);
18113
18114    if (CHIP_IS_E1x(sc)) {
18115        main_mem_size = HC_REG_MAIN_MEMORY_SIZE / 2; /*dwords*/
18116        main_mem_base = HC_REG_MAIN_MEMORY +
18117                SC_PORT(sc) * (main_mem_size * 4);
18118        main_mem_prty_clr = HC_REG_HC_PRTY_STS_CLR;
18119        main_mem_width = 8;
18120
18121        val = REG_RD(sc, main_mem_prty_clr);
18122        if (val) {
18123            BLOGD(sc, DBG_LOAD,
18124                  "Parity errors in HC block during function init (0x%x)!\n",
18125                  val);
18126        }
18127
18128        /* Clear "false" parity errors in MSI-X table */
18129        for (i = main_mem_base;
18130             i < main_mem_base + main_mem_size * 4;
18131             i += main_mem_width) {
18132            bxe_read_dmae(sc, i, main_mem_width / 4);
18133            bxe_write_dmae(sc, BXE_SP_MAPPING(sc, wb_data),
18134                           i, main_mem_width / 4);
18135        }
18136        /* Clear HC parity attention */
18137        REG_RD(sc, main_mem_prty_clr);
18138    }
18139
18140#if 1
18141    /* Enable STORMs SP logging */
18142    REG_WR8(sc, BAR_USTRORM_INTMEM +
18143           USTORM_RECORD_SLOW_PATH_OFFSET(SC_FUNC(sc)), 1);
18144    REG_WR8(sc, BAR_TSTRORM_INTMEM +
18145           TSTORM_RECORD_SLOW_PATH_OFFSET(SC_FUNC(sc)), 1);
18146    REG_WR8(sc, BAR_CSTRORM_INTMEM +
18147           CSTORM_RECORD_SLOW_PATH_OFFSET(SC_FUNC(sc)), 1);
18148    REG_WR8(sc, BAR_XSTRORM_INTMEM +
18149           XSTORM_RECORD_SLOW_PATH_OFFSET(SC_FUNC(sc)), 1);
18150#endif
18151
18152    elink_phy_probe(&sc->link_params);
18153
18154    return (0);
18155}
18156
18157static void
18158bxe_link_reset(struct bxe_softc *sc)
18159{
18160    if (!BXE_NOMCP(sc)) {
18161	bxe_acquire_phy_lock(sc);
18162        elink_lfa_reset(&sc->link_params, &sc->link_vars);
18163	bxe_release_phy_lock(sc);
18164    } else {
18165        if (!CHIP_REV_IS_SLOW(sc)) {
18166            BLOGW(sc, "Bootcode is missing - cannot reset link\n");
18167        }
18168    }
18169}
18170
18171static void
18172bxe_reset_port(struct bxe_softc *sc)
18173{
18174    int port = SC_PORT(sc);
18175    uint32_t val;
18176
18177	ELINK_DEBUG_P0(sc, "bxe_reset_port called\n");
18178    /* reset physical Link */
18179    bxe_link_reset(sc);
18180
18181    REG_WR(sc, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
18182
18183    /* Do not rcv packets to BRB */
18184    REG_WR(sc, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
18185    /* Do not direct rcv packets that are not for MCP to the BRB */
18186    REG_WR(sc, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
18187               NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
18188
18189    /* Configure AEU */
18190    REG_WR(sc, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
18191
18192    DELAY(100000);
18193
18194    /* Check for BRB port occupancy */
18195    val = REG_RD(sc, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
18196    if (val) {
18197        BLOGD(sc, DBG_LOAD,
18198              "BRB1 is not empty, %d blocks are occupied\n", val);
18199    }
18200
18201    /* TODO: Close Doorbell port? */
18202}
18203
18204static void
18205bxe_ilt_wr(struct bxe_softc *sc,
18206           uint32_t         index,
18207           bus_addr_t       addr)
18208{
18209    int reg;
18210    uint32_t wb_write[2];
18211
18212    if (CHIP_IS_E1(sc)) {
18213        reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
18214    } else {
18215        reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
18216    }
18217
18218    wb_write[0] = ONCHIP_ADDR1(addr);
18219    wb_write[1] = ONCHIP_ADDR2(addr);
18220    REG_WR_DMAE(sc, reg, wb_write, 2);
18221}
18222
18223static void
18224bxe_clear_func_ilt(struct bxe_softc *sc,
18225                   uint32_t         func)
18226{
18227    uint32_t i, base = FUNC_ILT_BASE(func);
18228    for (i = base; i < base + ILT_PER_FUNC; i++) {
18229        bxe_ilt_wr(sc, i, 0);
18230    }
18231}
18232
18233static void
18234bxe_reset_func(struct bxe_softc *sc)
18235{
18236    struct bxe_fastpath *fp;
18237    int port = SC_PORT(sc);
18238    int func = SC_FUNC(sc);
18239    int i;
18240
18241    /* Disable the function in the FW */
18242    REG_WR8(sc, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(func), 0);
18243    REG_WR8(sc, BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(func), 0);
18244    REG_WR8(sc, BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(func), 0);
18245    REG_WR8(sc, BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(func), 0);
18246
18247    /* FP SBs */
18248    FOR_EACH_ETH_QUEUE(sc, i) {
18249        fp = &sc->fp[i];
18250        REG_WR8(sc, BAR_CSTRORM_INTMEM +
18251                CSTORM_STATUS_BLOCK_DATA_STATE_OFFSET(fp->fw_sb_id),
18252                SB_DISABLED);
18253    }
18254
18255    /* SP SB */
18256    REG_WR8(sc, BAR_CSTRORM_INTMEM +
18257            CSTORM_SP_STATUS_BLOCK_DATA_STATE_OFFSET(func),
18258            SB_DISABLED);
18259
18260    for (i = 0; i < XSTORM_SPQ_DATA_SIZE / 4; i++) {
18261        REG_WR(sc, BAR_XSTRORM_INTMEM + XSTORM_SPQ_DATA_OFFSET(func), 0);
18262    }
18263
18264    /* Configure IGU */
18265    if (sc->devinfo.int_block == INT_BLOCK_HC) {
18266        REG_WR(sc, HC_REG_LEADING_EDGE_0 + port*8, 0);
18267        REG_WR(sc, HC_REG_TRAILING_EDGE_0 + port*8, 0);
18268    } else {
18269        REG_WR(sc, IGU_REG_LEADING_EDGE_LATCH, 0);
18270        REG_WR(sc, IGU_REG_TRAILING_EDGE_LATCH, 0);
18271    }
18272
18273    if (CNIC_LOADED(sc)) {
18274        /* Disable Timer scan */
18275        REG_WR(sc, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
18276        /*
18277         * Wait for at least 10ms and up to 2 second for the timers
18278         * scan to complete
18279         */
18280        for (i = 0; i < 200; i++) {
18281            DELAY(10000);
18282            if (!REG_RD(sc, TM_REG_LIN0_SCAN_ON + port*4))
18283                break;
18284        }
18285    }
18286
18287    /* Clear ILT */
18288    bxe_clear_func_ilt(sc, func);
18289
18290    /*
18291     * Timers workaround bug for E2: if this is vnic-3,
18292     * we need to set the entire ilt range for this timers.
18293     */
18294    if (!CHIP_IS_E1x(sc) && SC_VN(sc) == 3) {
18295        struct ilt_client_info ilt_cli;
18296        /* use dummy TM client */
18297        memset(&ilt_cli, 0, sizeof(struct ilt_client_info));
18298        ilt_cli.start = 0;
18299        ilt_cli.end = ILT_NUM_PAGE_ENTRIES - 1;
18300        ilt_cli.client_num = ILT_CLIENT_TM;
18301
18302        ecore_ilt_boundry_init_op(sc, &ilt_cli, 0, INITOP_CLEAR);
18303    }
18304
18305    /* this assumes that reset_port() called before reset_func()*/
18306    if (!CHIP_IS_E1x(sc)) {
18307        bxe_pf_disable(sc);
18308    }
18309
18310    sc->dmae_ready = 0;
18311}
18312
18313static int
18314bxe_gunzip_init(struct bxe_softc *sc)
18315{
18316    return (0);
18317}
18318
18319static void
18320bxe_gunzip_end(struct bxe_softc *sc)
18321{
18322    return;
18323}
18324
18325static int
18326bxe_init_firmware(struct bxe_softc *sc)
18327{
18328    if (CHIP_IS_E1(sc)) {
18329        ecore_init_e1_firmware(sc);
18330        sc->iro_array = e1_iro_arr;
18331    } else if (CHIP_IS_E1H(sc)) {
18332        ecore_init_e1h_firmware(sc);
18333        sc->iro_array = e1h_iro_arr;
18334    } else if (!CHIP_IS_E1x(sc)) {
18335        ecore_init_e2_firmware(sc);
18336        sc->iro_array = e2_iro_arr;
18337    } else {
18338        BLOGE(sc, "Unsupported chip revision\n");
18339        return (-1);
18340    }
18341
18342    return (0);
18343}
18344
18345static void
18346bxe_release_firmware(struct bxe_softc *sc)
18347{
18348    /* Do nothing */
18349    return;
18350}
18351
18352static int
18353ecore_gunzip(struct bxe_softc *sc,
18354             const uint8_t    *zbuf,
18355             int              len)
18356{
18357    /* XXX : Implement... */
18358    BLOGD(sc, DBG_LOAD, "ECORE_GUNZIP NOT IMPLEMENTED\n");
18359    return (FALSE);
18360}
18361
18362static void
18363ecore_reg_wr_ind(struct bxe_softc *sc,
18364                 uint32_t         addr,
18365                 uint32_t         val)
18366{
18367    bxe_reg_wr_ind(sc, addr, val);
18368}
18369
18370static void
18371ecore_write_dmae_phys_len(struct bxe_softc *sc,
18372                          bus_addr_t       phys_addr,
18373                          uint32_t         addr,
18374                          uint32_t         len)
18375{
18376    bxe_write_dmae_phys_len(sc, phys_addr, addr, len);
18377}
18378
18379void
18380ecore_storm_memset_struct(struct bxe_softc *sc,
18381                          uint32_t         addr,
18382                          size_t           size,
18383                          uint32_t         *data)
18384{
18385    uint8_t i;
18386    for (i = 0; i < size/4; i++) {
18387        REG_WR(sc, addr + (i * 4), data[i]);
18388    }
18389}
18390
18391
18392/*
18393 * character device - ioctl interface definitions
18394 */
18395
18396
18397#include "bxe_dump.h"
18398#include "bxe_ioctl.h"
18399#include <sys/conf.h>
18400
18401static int bxe_eioctl(struct cdev *dev, u_long cmd, caddr_t data, int fflag,
18402                struct thread *td);
18403
18404static struct cdevsw bxe_cdevsw = {
18405    .d_version = D_VERSION,
18406    .d_ioctl = bxe_eioctl,
18407    .d_name = "bxecnic",
18408};
18409
18410#define BXE_PATH(sc)    (CHIP_IS_E1x(sc) ? 0 : (sc->pcie_func & 1))
18411
18412
18413#define DUMP_ALL_PRESETS        0x1FFF
18414#define DUMP_MAX_PRESETS        13
18415#define IS_E1_REG(chips)        ((chips & DUMP_CHIP_E1) == DUMP_CHIP_E1)
18416#define IS_E1H_REG(chips)       ((chips & DUMP_CHIP_E1H) == DUMP_CHIP_E1H)
18417#define IS_E2_REG(chips)        ((chips & DUMP_CHIP_E2) == DUMP_CHIP_E2)
18418#define IS_E3A0_REG(chips)      ((chips & DUMP_CHIP_E3A0) == DUMP_CHIP_E3A0)
18419#define IS_E3B0_REG(chips)      ((chips & DUMP_CHIP_E3B0) == DUMP_CHIP_E3B0)
18420
18421#define IS_REG_IN_PRESET(presets, idx)  \
18422                ((presets & (1 << (idx-1))) == (1 << (idx-1)))
18423
18424
18425static int
18426bxe_get_preset_regs_len(struct bxe_softc *sc, uint32_t preset)
18427{
18428    if (CHIP_IS_E1(sc))
18429        return dump_num_registers[0][preset-1];
18430    else if (CHIP_IS_E1H(sc))
18431        return dump_num_registers[1][preset-1];
18432    else if (CHIP_IS_E2(sc))
18433        return dump_num_registers[2][preset-1];
18434    else if (CHIP_IS_E3A0(sc))
18435        return dump_num_registers[3][preset-1];
18436    else if (CHIP_IS_E3B0(sc))
18437        return dump_num_registers[4][preset-1];
18438    else
18439        return 0;
18440}
18441
18442static int
18443bxe_get_total_regs_len32(struct bxe_softc *sc)
18444{
18445    uint32_t preset_idx;
18446    int regdump_len32 = 0;
18447
18448
18449    /* Calculate the total preset regs length */
18450    for (preset_idx = 1; preset_idx <= DUMP_MAX_PRESETS; preset_idx++) {
18451        regdump_len32 += bxe_get_preset_regs_len(sc, preset_idx);
18452    }
18453
18454    return regdump_len32;
18455}
18456
18457static const uint32_t *
18458__bxe_get_page_addr_ar(struct bxe_softc *sc)
18459{
18460    if (CHIP_IS_E2(sc))
18461        return page_vals_e2;
18462    else if (CHIP_IS_E3(sc))
18463        return page_vals_e3;
18464    else
18465        return NULL;
18466}
18467
18468static uint32_t
18469__bxe_get_page_reg_num(struct bxe_softc *sc)
18470{
18471    if (CHIP_IS_E2(sc))
18472        return PAGE_MODE_VALUES_E2;
18473    else if (CHIP_IS_E3(sc))
18474        return PAGE_MODE_VALUES_E3;
18475    else
18476        return 0;
18477}
18478
18479static const uint32_t *
18480__bxe_get_page_write_ar(struct bxe_softc *sc)
18481{
18482    if (CHIP_IS_E2(sc))
18483        return page_write_regs_e2;
18484    else if (CHIP_IS_E3(sc))
18485        return page_write_regs_e3;
18486    else
18487        return NULL;
18488}
18489
18490static uint32_t
18491__bxe_get_page_write_num(struct bxe_softc *sc)
18492{
18493    if (CHIP_IS_E2(sc))
18494        return PAGE_WRITE_REGS_E2;
18495    else if (CHIP_IS_E3(sc))
18496        return PAGE_WRITE_REGS_E3;
18497    else
18498        return 0;
18499}
18500
18501static const struct reg_addr *
18502__bxe_get_page_read_ar(struct bxe_softc *sc)
18503{
18504    if (CHIP_IS_E2(sc))
18505        return page_read_regs_e2;
18506    else if (CHIP_IS_E3(sc))
18507        return page_read_regs_e3;
18508    else
18509        return NULL;
18510}
18511
18512static uint32_t
18513__bxe_get_page_read_num(struct bxe_softc *sc)
18514{
18515    if (CHIP_IS_E2(sc))
18516        return PAGE_READ_REGS_E2;
18517    else if (CHIP_IS_E3(sc))
18518        return PAGE_READ_REGS_E3;
18519    else
18520        return 0;
18521}
18522
18523static bool
18524bxe_is_reg_in_chip(struct bxe_softc *sc, const struct reg_addr *reg_info)
18525{
18526    if (CHIP_IS_E1(sc))
18527        return IS_E1_REG(reg_info->chips);
18528    else if (CHIP_IS_E1H(sc))
18529        return IS_E1H_REG(reg_info->chips);
18530    else if (CHIP_IS_E2(sc))
18531        return IS_E2_REG(reg_info->chips);
18532    else if (CHIP_IS_E3A0(sc))
18533        return IS_E3A0_REG(reg_info->chips);
18534    else if (CHIP_IS_E3B0(sc))
18535        return IS_E3B0_REG(reg_info->chips);
18536    else
18537        return 0;
18538}
18539
18540static bool
18541bxe_is_wreg_in_chip(struct bxe_softc *sc, const struct wreg_addr *wreg_info)
18542{
18543    if (CHIP_IS_E1(sc))
18544        return IS_E1_REG(wreg_info->chips);
18545    else if (CHIP_IS_E1H(sc))
18546        return IS_E1H_REG(wreg_info->chips);
18547    else if (CHIP_IS_E2(sc))
18548        return IS_E2_REG(wreg_info->chips);
18549    else if (CHIP_IS_E3A0(sc))
18550        return IS_E3A0_REG(wreg_info->chips);
18551    else if (CHIP_IS_E3B0(sc))
18552        return IS_E3B0_REG(wreg_info->chips);
18553    else
18554        return 0;
18555}
18556
18557/**
18558 * bxe_read_pages_regs - read "paged" registers
18559 *
18560 * @bp          device handle
18561 * @p           output buffer
18562 *
18563 * Reads "paged" memories: memories that may only be read by first writing to a
18564 * specific address ("write address") and then reading from a specific address
18565 * ("read address"). There may be more than one write address per "page" and
18566 * more than one read address per write address.
18567 */
18568static void
18569bxe_read_pages_regs(struct bxe_softc *sc, uint32_t *p, uint32_t preset)
18570{
18571    uint32_t i, j, k, n;
18572
18573    /* addresses of the paged registers */
18574    const uint32_t *page_addr = __bxe_get_page_addr_ar(sc);
18575    /* number of paged registers */
18576    int num_pages = __bxe_get_page_reg_num(sc);
18577    /* write addresses */
18578    const uint32_t *write_addr = __bxe_get_page_write_ar(sc);
18579    /* number of write addresses */
18580    int write_num = __bxe_get_page_write_num(sc);
18581    /* read addresses info */
18582    const struct reg_addr *read_addr = __bxe_get_page_read_ar(sc);
18583    /* number of read addresses */
18584    int read_num = __bxe_get_page_read_num(sc);
18585    uint32_t addr, size;
18586
18587    for (i = 0; i < num_pages; i++) {
18588        for (j = 0; j < write_num; j++) {
18589            REG_WR(sc, write_addr[j], page_addr[i]);
18590
18591            for (k = 0; k < read_num; k++) {
18592                if (IS_REG_IN_PRESET(read_addr[k].presets, preset)) {
18593                    size = read_addr[k].size;
18594                    for (n = 0; n < size; n++) {
18595                        addr = read_addr[k].addr + n*4;
18596                        *p++ = REG_RD(sc, addr);
18597                    }
18598                }
18599            }
18600        }
18601    }
18602    return;
18603}
18604
18605
18606static int
18607bxe_get_preset_regs(struct bxe_softc *sc, uint32_t *p, uint32_t preset)
18608{
18609    uint32_t i, j, addr;
18610    const struct wreg_addr *wreg_addr_p = NULL;
18611
18612    if (CHIP_IS_E1(sc))
18613        wreg_addr_p = &wreg_addr_e1;
18614    else if (CHIP_IS_E1H(sc))
18615        wreg_addr_p = &wreg_addr_e1h;
18616    else if (CHIP_IS_E2(sc))
18617        wreg_addr_p = &wreg_addr_e2;
18618    else if (CHIP_IS_E3A0(sc))
18619        wreg_addr_p = &wreg_addr_e3;
18620    else if (CHIP_IS_E3B0(sc))
18621        wreg_addr_p = &wreg_addr_e3b0;
18622    else
18623        return (-1);
18624
18625    /* Read the idle_chk registers */
18626    for (i = 0; i < IDLE_REGS_COUNT; i++) {
18627        if (bxe_is_reg_in_chip(sc, &idle_reg_addrs[i]) &&
18628            IS_REG_IN_PRESET(idle_reg_addrs[i].presets, preset)) {
18629            for (j = 0; j < idle_reg_addrs[i].size; j++)
18630                *p++ = REG_RD(sc, idle_reg_addrs[i].addr + j*4);
18631        }
18632    }
18633
18634    /* Read the regular registers */
18635    for (i = 0; i < REGS_COUNT; i++) {
18636        if (bxe_is_reg_in_chip(sc, &reg_addrs[i]) &&
18637            IS_REG_IN_PRESET(reg_addrs[i].presets, preset)) {
18638            for (j = 0; j < reg_addrs[i].size; j++)
18639                *p++ = REG_RD(sc, reg_addrs[i].addr + j*4);
18640        }
18641    }
18642
18643    /* Read the CAM registers */
18644    if (bxe_is_wreg_in_chip(sc, wreg_addr_p) &&
18645        IS_REG_IN_PRESET(wreg_addr_p->presets, preset)) {
18646        for (i = 0; i < wreg_addr_p->size; i++) {
18647            *p++ = REG_RD(sc, wreg_addr_p->addr + i*4);
18648
18649            /* In case of wreg_addr register, read additional
18650               registers from read_regs array
18651             */
18652            for (j = 0; j < wreg_addr_p->read_regs_count; j++) {
18653                addr = *(wreg_addr_p->read_regs);
18654                *p++ = REG_RD(sc, addr + j*4);
18655            }
18656        }
18657    }
18658
18659    /* Paged registers are supported in E2 & E3 only */
18660    if (CHIP_IS_E2(sc) || CHIP_IS_E3(sc)) {
18661        /* Read "paged" registers */
18662        bxe_read_pages_regs(sc, p, preset);
18663    }
18664
18665    return 0;
18666}
18667
18668int
18669bxe_grc_dump(struct bxe_softc *sc)
18670{
18671    int rval = 0;
18672    uint32_t preset_idx;
18673    uint8_t *buf;
18674    uint32_t size;
18675    struct  dump_header *d_hdr;
18676    uint32_t i;
18677    uint32_t reg_val;
18678    uint32_t reg_addr;
18679    uint32_t cmd_offset;
18680    struct ecore_ilt *ilt = SC_ILT(sc);
18681    struct bxe_fastpath *fp;
18682    struct ilt_client_info *ilt_cli;
18683    int grc_dump_size;
18684
18685
18686    if (sc->grcdump_done || sc->grcdump_started)
18687	return (rval);
18688
18689    sc->grcdump_started = 1;
18690    BLOGI(sc, "Started collecting grcdump\n");
18691
18692    grc_dump_size = (bxe_get_total_regs_len32(sc) * sizeof(uint32_t)) +
18693                sizeof(struct  dump_header);
18694
18695    sc->grc_dump = malloc(grc_dump_size, M_DEVBUF, M_NOWAIT);
18696
18697    if (sc->grc_dump == NULL) {
18698        BLOGW(sc, "Unable to allocate memory for grcdump collection\n");
18699        return(ENOMEM);
18700    }
18701
18702
18703
18704    /* Disable parity attentions as long as following dump may
18705     * cause false alarms by reading never written registers. We
18706     * will re-enable parity attentions right after the dump.
18707     */
18708
18709    /* Disable parity on path 0 */
18710    bxe_pretend_func(sc, 0);
18711
18712    ecore_disable_blocks_parity(sc);
18713
18714    /* Disable parity on path 1 */
18715    bxe_pretend_func(sc, 1);
18716    ecore_disable_blocks_parity(sc);
18717
18718    /* Return to current function */
18719    bxe_pretend_func(sc, SC_ABS_FUNC(sc));
18720
18721    buf = sc->grc_dump;
18722    d_hdr = sc->grc_dump;
18723
18724    d_hdr->header_size = (sizeof(struct  dump_header) >> 2) - 1;
18725    d_hdr->version = BNX2X_DUMP_VERSION;
18726    d_hdr->preset = DUMP_ALL_PRESETS;
18727
18728    if (CHIP_IS_E1(sc)) {
18729        d_hdr->dump_meta_data = DUMP_CHIP_E1;
18730    } else if (CHIP_IS_E1H(sc)) {
18731        d_hdr->dump_meta_data = DUMP_CHIP_E1H;
18732    } else if (CHIP_IS_E2(sc)) {
18733        d_hdr->dump_meta_data = DUMP_CHIP_E2 |
18734                (BXE_PATH(sc) ? DUMP_PATH_1 : DUMP_PATH_0);
18735    } else if (CHIP_IS_E3A0(sc)) {
18736        d_hdr->dump_meta_data = DUMP_CHIP_E3A0 |
18737                (BXE_PATH(sc) ? DUMP_PATH_1 : DUMP_PATH_0);
18738    } else if (CHIP_IS_E3B0(sc)) {
18739        d_hdr->dump_meta_data = DUMP_CHIP_E3B0 |
18740                (BXE_PATH(sc) ? DUMP_PATH_1 : DUMP_PATH_0);
18741    }
18742
18743    buf += sizeof(struct  dump_header);
18744
18745    for (preset_idx = 1; preset_idx <= DUMP_MAX_PRESETS; preset_idx++) {
18746
18747        /* Skip presets with IOR */
18748        if ((preset_idx == 2) || (preset_idx == 5) || (preset_idx == 8) ||
18749            (preset_idx == 11))
18750            continue;
18751
18752        rval = bxe_get_preset_regs(sc, (uint32_t *)buf, preset_idx);
18753
18754	if (rval)
18755            break;
18756
18757        size = bxe_get_preset_regs_len(sc, preset_idx) * (sizeof (uint32_t));
18758
18759        buf += size;
18760    }
18761
18762    bxe_pretend_func(sc, 0);
18763    ecore_clear_blocks_parity(sc);
18764    ecore_enable_blocks_parity(sc);
18765
18766    bxe_pretend_func(sc, 1);
18767    ecore_clear_blocks_parity(sc);
18768    ecore_enable_blocks_parity(sc);
18769
18770    /* Return to current function */
18771    bxe_pretend_func(sc, SC_ABS_FUNC(sc));
18772
18773
18774
18775    if(sc->state == BXE_STATE_OPEN) {
18776        if(sc->fw_stats_req  != NULL) {
18777    		BLOGI(sc, "fw stats start_paddr %#jx end_paddr %#jx vaddr %p size 0x%x\n",
18778        			(uintmax_t)sc->fw_stats_req_mapping,
18779        			(uintmax_t)sc->fw_stats_data_mapping,
18780        			sc->fw_stats_req, (sc->fw_stats_req_size + sc->fw_stats_data_size));
18781		}
18782		if(sc->def_sb != NULL) {
18783			BLOGI(sc, "def_status_block paddr %p vaddr %p size 0x%zx\n",
18784        			(void *)sc->def_sb_dma.paddr, sc->def_sb,
18785        			sizeof(struct host_sp_status_block));
18786		}
18787		if(sc->eq_dma.vaddr != NULL) {
18788    		BLOGI(sc, "event_queue paddr %#jx vaddr %p size 0x%x\n",
18789        			(uintmax_t)sc->eq_dma.paddr, sc->eq_dma.vaddr, BCM_PAGE_SIZE);
18790		}
18791		if(sc->sp_dma.vaddr != NULL) {
18792    		BLOGI(sc, "slow path paddr %#jx vaddr %p size 0x%zx\n",
18793        			(uintmax_t)sc->sp_dma.paddr, sc->sp_dma.vaddr,
18794        			sizeof(struct bxe_slowpath));
18795		}
18796		if(sc->spq_dma.vaddr != NULL) {
18797    		BLOGI(sc, "slow path queue paddr %#jx vaddr %p size 0x%x\n",
18798        			(uintmax_t)sc->spq_dma.paddr, sc->spq_dma.vaddr, BCM_PAGE_SIZE);
18799		}
18800		if(sc->gz_buf_dma.vaddr != NULL) {
18801    		BLOGI(sc, "fw_buf paddr %#jx vaddr %p size 0x%x\n",
18802        			(uintmax_t)sc->gz_buf_dma.paddr, sc->gz_buf_dma.vaddr,
18803        			FW_BUF_SIZE);
18804		}
18805    	for (i = 0; i < sc->num_queues; i++) {
18806        	fp = &sc->fp[i];
18807			if(fp->sb_dma.vaddr != NULL && fp->tx_dma.vaddr != NULL &&
18808                        fp->rx_dma.vaddr != NULL && fp->rcq_dma.vaddr != NULL &&
18809                        fp->rx_sge_dma.vaddr != NULL) {
18810
18811				BLOGI(sc, "FP status block fp %d paddr %#jx vaddr %p size 0x%zx\n", i,
18812            			(uintmax_t)fp->sb_dma.paddr, fp->sb_dma.vaddr,
18813            			sizeof(union bxe_host_hc_status_block));
18814				BLOGI(sc, "TX BD CHAIN fp %d paddr %#jx vaddr %p size 0x%x\n", i,
18815            			(uintmax_t)fp->tx_dma.paddr, fp->tx_dma.vaddr,
18816            			(BCM_PAGE_SIZE * TX_BD_NUM_PAGES));
18817        		BLOGI(sc, "RX BD CHAIN fp %d paddr %#jx vaddr %p size 0x%x\n", i,
18818            			(uintmax_t)fp->rx_dma.paddr, fp->rx_dma.vaddr,
18819            			(BCM_PAGE_SIZE * RX_BD_NUM_PAGES));
18820        		BLOGI(sc, "RX RCQ CHAIN fp %d paddr %#jx vaddr %p size 0x%zx\n", i,
18821            			(uintmax_t)fp->rcq_dma.paddr, fp->rcq_dma.vaddr,
18822            			(BCM_PAGE_SIZE * RCQ_NUM_PAGES));
18823        		BLOGI(sc, "RX SGE CHAIN fp %d paddr %#jx vaddr %p size 0x%x\n", i,
18824            			(uintmax_t)fp->rx_sge_dma.paddr, fp->rx_sge_dma.vaddr,
18825            			(BCM_PAGE_SIZE * RX_SGE_NUM_PAGES));
18826    		}
18827		}
18828		if(ilt != NULL ) {
18829    		ilt_cli = &ilt->clients[1];
18830			if(ilt->lines != NULL) {
18831    		for (i = ilt_cli->start; i <= ilt_cli->end; i++) {
18832        		BLOGI(sc, "ECORE_ILT paddr %#jx vaddr %p size 0x%x\n",
18833            			(uintmax_t)(((struct bxe_dma *)((&ilt->lines[i])->page))->paddr),
18834            			((struct bxe_dma *)((&ilt->lines[i])->page))->vaddr, BCM_PAGE_SIZE);
18835    		}
18836			}
18837		}
18838
18839
18840    	cmd_offset = DMAE_REG_CMD_MEM;
18841    	for (i = 0; i < 224; i++) {
18842        	reg_addr = (cmd_offset +(i * 4));
18843        	reg_val = REG_RD(sc, reg_addr);
18844        	BLOGI(sc, "DMAE_REG_CMD_MEM i=%d reg_addr 0x%x reg_val 0x%08x\n",i,
18845            			reg_addr, reg_val);
18846    	}
18847	}
18848
18849    BLOGI(sc, "Collection of grcdump done\n");
18850    sc->grcdump_done = 1;
18851    return(rval);
18852}
18853
18854static int
18855bxe_add_cdev(struct bxe_softc *sc)
18856{
18857    sc->eeprom = malloc(BXE_EEPROM_MAX_DATA_LEN, M_DEVBUF, M_NOWAIT);
18858
18859    if (sc->eeprom == NULL) {
18860        BLOGW(sc, "Unable to alloc for eeprom size buffer\n");
18861        return (-1);
18862    }
18863
18864    sc->ioctl_dev = make_dev(&bxe_cdevsw,
18865                            sc->ifp->if_dunit,
18866                            UID_ROOT,
18867                            GID_WHEEL,
18868                            0600,
18869                            "%s",
18870                            if_name(sc->ifp));
18871
18872    if (sc->ioctl_dev == NULL) {
18873        free(sc->eeprom, M_DEVBUF);
18874        sc->eeprom = NULL;
18875        return (-1);
18876    }
18877
18878    sc->ioctl_dev->si_drv1 = sc;
18879
18880    return (0);
18881}
18882
18883static void
18884bxe_del_cdev(struct bxe_softc *sc)
18885{
18886    if (sc->ioctl_dev != NULL)
18887        destroy_dev(sc->ioctl_dev);
18888
18889    if (sc->eeprom != NULL) {
18890        free(sc->eeprom, M_DEVBUF);
18891        sc->eeprom = NULL;
18892    }
18893    sc->ioctl_dev = NULL;
18894
18895    return;
18896}
18897
18898static bool bxe_is_nvram_accessible(struct bxe_softc *sc)
18899{
18900
18901    if ((if_getdrvflags(sc->ifp) & IFF_DRV_RUNNING) == 0)
18902        return FALSE;
18903
18904    return TRUE;
18905}
18906
18907
18908static int
18909bxe_wr_eeprom(struct bxe_softc *sc, void *data, uint32_t offset, uint32_t len)
18910{
18911    int rval = 0;
18912
18913    if(!bxe_is_nvram_accessible(sc)) {
18914        BLOGW(sc, "Cannot access eeprom when interface is down\n");
18915        return (-EAGAIN);
18916    }
18917    rval = bxe_nvram_write(sc, offset, (uint8_t *)data, len);
18918
18919
18920   return (rval);
18921}
18922
18923static int
18924bxe_rd_eeprom(struct bxe_softc *sc, void *data, uint32_t offset, uint32_t len)
18925{
18926    int rval = 0;
18927
18928    if(!bxe_is_nvram_accessible(sc)) {
18929        BLOGW(sc, "Cannot access eeprom when interface is down\n");
18930        return (-EAGAIN);
18931    }
18932    rval = bxe_nvram_read(sc, offset, (uint8_t *)data, len);
18933
18934   return (rval);
18935}
18936
18937static int
18938bxe_eeprom_rd_wr(struct bxe_softc *sc, bxe_eeprom_t *eeprom)
18939{
18940    int rval = 0;
18941
18942    switch (eeprom->eeprom_cmd) {
18943
18944    case BXE_EEPROM_CMD_SET_EEPROM:
18945
18946        rval = copyin(eeprom->eeprom_data, sc->eeprom,
18947                       eeprom->eeprom_data_len);
18948
18949        if (rval)
18950            break;
18951
18952        rval = bxe_wr_eeprom(sc, sc->eeprom, eeprom->eeprom_offset,
18953                       eeprom->eeprom_data_len);
18954        break;
18955
18956    case BXE_EEPROM_CMD_GET_EEPROM:
18957
18958        rval = bxe_rd_eeprom(sc, sc->eeprom, eeprom->eeprom_offset,
18959                       eeprom->eeprom_data_len);
18960
18961        if (rval) {
18962            break;
18963        }
18964
18965        rval = copyout(sc->eeprom, eeprom->eeprom_data,
18966                       eeprom->eeprom_data_len);
18967        break;
18968
18969    default:
18970            rval = EINVAL;
18971            break;
18972    }
18973
18974    if (rval) {
18975        BLOGW(sc, "ioctl cmd %d  failed rval %d\n", eeprom->eeprom_cmd, rval);
18976    }
18977
18978    return (rval);
18979}
18980
18981static int
18982bxe_get_settings(struct bxe_softc *sc, bxe_dev_setting_t *dev_p)
18983{
18984    uint32_t ext_phy_config;
18985    int port = SC_PORT(sc);
18986    int cfg_idx = bxe_get_link_cfg_idx(sc);
18987
18988    dev_p->supported = sc->port.supported[cfg_idx] |
18989            (sc->port.supported[cfg_idx ^ 1] &
18990            (ELINK_SUPPORTED_TP | ELINK_SUPPORTED_FIBRE));
18991    dev_p->advertising = sc->port.advertising[cfg_idx];
18992    if(sc->link_params.phy[bxe_get_cur_phy_idx(sc)].media_type ==
18993        ELINK_ETH_PHY_SFP_1G_FIBER) {
18994        dev_p->supported = ~(ELINK_SUPPORTED_10000baseT_Full);
18995        dev_p->advertising &= ~(ADVERTISED_10000baseT_Full);
18996    }
18997    if ((sc->state == BXE_STATE_OPEN) && sc->link_vars.link_up &&
18998        !(sc->flags & BXE_MF_FUNC_DIS)) {
18999        dev_p->duplex = sc->link_vars.duplex;
19000        if (IS_MF(sc) && !BXE_NOMCP(sc))
19001            dev_p->speed = bxe_get_mf_speed(sc);
19002        else
19003            dev_p->speed = sc->link_vars.line_speed;
19004    } else {
19005        dev_p->duplex = DUPLEX_UNKNOWN;
19006        dev_p->speed = SPEED_UNKNOWN;
19007    }
19008
19009    dev_p->port = bxe_media_detect(sc);
19010
19011    ext_phy_config = SHMEM_RD(sc,
19012                         dev_info.port_hw_config[port].external_phy_config);
19013    if((ext_phy_config & PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK) ==
19014        PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT)
19015        dev_p->phy_address =  sc->port.phy_addr;
19016    else if(((ext_phy_config & PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK) !=
19017            PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE) &&
19018        ((ext_phy_config & PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK) !=
19019            PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN))
19020        dev_p->phy_address = ELINK_XGXS_EXT_PHY_ADDR(ext_phy_config);
19021    else
19022        dev_p->phy_address = 0;
19023
19024    if(sc->link_params.req_line_speed[cfg_idx] == ELINK_SPEED_AUTO_NEG)
19025        dev_p->autoneg = AUTONEG_ENABLE;
19026    else
19027       dev_p->autoneg = AUTONEG_DISABLE;
19028
19029
19030    return 0;
19031}
19032
19033static int
19034bxe_eioctl(struct cdev *dev, u_long cmd, caddr_t data, int fflag,
19035        struct thread *td)
19036{
19037    struct bxe_softc    *sc;
19038    int                 rval = 0;
19039    device_t            pci_dev;
19040    bxe_grcdump_t       *dump = NULL;
19041    int grc_dump_size;
19042    bxe_drvinfo_t   *drv_infop = NULL;
19043    bxe_dev_setting_t  *dev_p;
19044    bxe_dev_setting_t  dev_set;
19045    bxe_get_regs_t  *reg_p;
19046    bxe_reg_rdw_t *reg_rdw_p;
19047    bxe_pcicfg_rdw_t *cfg_rdw_p;
19048    bxe_perm_mac_addr_t *mac_addr_p;
19049
19050
19051    if ((sc = (struct bxe_softc *)dev->si_drv1) == NULL)
19052        return ENXIO;
19053
19054    pci_dev= sc->dev;
19055
19056    dump = (bxe_grcdump_t *)data;
19057
19058    switch(cmd) {
19059
19060        case BXE_GRC_DUMP_SIZE:
19061            dump->pci_func = sc->pcie_func;
19062            dump->grcdump_size =
19063                (bxe_get_total_regs_len32(sc) * sizeof(uint32_t)) +
19064                     sizeof(struct  dump_header);
19065            break;
19066
19067        case BXE_GRC_DUMP:
19068
19069            grc_dump_size = (bxe_get_total_regs_len32(sc) * sizeof(uint32_t)) +
19070                                sizeof(struct  dump_header);
19071            if ((!sc->trigger_grcdump) || (dump->grcdump == NULL) ||
19072                (dump->grcdump_size < grc_dump_size)) {
19073                rval = EINVAL;
19074                break;
19075            }
19076
19077            if((sc->trigger_grcdump) && (!sc->grcdump_done) &&
19078                (!sc->grcdump_started)) {
19079                rval =  bxe_grc_dump(sc);
19080            }
19081
19082            if((!rval) && (sc->grcdump_done) && (sc->grcdump_started) &&
19083                (sc->grc_dump != NULL))  {
19084                dump->grcdump_dwords = grc_dump_size >> 2;
19085                rval = copyout(sc->grc_dump, dump->grcdump, grc_dump_size);
19086                free(sc->grc_dump, M_DEVBUF);
19087                sc->grc_dump = NULL;
19088                sc->grcdump_started = 0;
19089                sc->grcdump_done = 0;
19090            }
19091
19092            break;
19093
19094        case BXE_DRV_INFO:
19095            drv_infop = (bxe_drvinfo_t *)data;
19096            snprintf(drv_infop->drv_name, BXE_DRV_NAME_LENGTH, "%s", "bxe");
19097            snprintf(drv_infop->drv_version, BXE_DRV_VERSION_LENGTH, "v:%s",
19098                BXE_DRIVER_VERSION);
19099            snprintf(drv_infop->mfw_version, BXE_MFW_VERSION_LENGTH, "%s",
19100                sc->devinfo.bc_ver_str);
19101            snprintf(drv_infop->stormfw_version, BXE_STORMFW_VERSION_LENGTH,
19102                "%s", sc->fw_ver_str);
19103            drv_infop->eeprom_dump_len = sc->devinfo.flash_size;
19104            drv_infop->reg_dump_len =
19105                (bxe_get_total_regs_len32(sc) * sizeof(uint32_t))
19106                    + sizeof(struct  dump_header);
19107            snprintf(drv_infop->bus_info, BXE_BUS_INFO_LENGTH, "%d:%d:%d",
19108                sc->pcie_bus, sc->pcie_device, sc->pcie_func);
19109            break;
19110
19111        case BXE_DEV_SETTING:
19112            dev_p = (bxe_dev_setting_t *)data;
19113            bxe_get_settings(sc, &dev_set);
19114            dev_p->supported = dev_set.supported;
19115            dev_p->advertising = dev_set.advertising;
19116            dev_p->speed = dev_set.speed;
19117            dev_p->duplex = dev_set.duplex;
19118            dev_p->port = dev_set.port;
19119            dev_p->phy_address = dev_set.phy_address;
19120            dev_p->autoneg = dev_set.autoneg;
19121
19122            break;
19123
19124        case BXE_GET_REGS:
19125
19126            reg_p = (bxe_get_regs_t *)data;
19127            grc_dump_size = reg_p->reg_buf_len;
19128
19129            if((!sc->grcdump_done) && (!sc->grcdump_started)) {
19130                bxe_grc_dump(sc);
19131            }
19132            if((sc->grcdump_done) && (sc->grcdump_started) &&
19133                (sc->grc_dump != NULL))  {
19134                rval = copyout(sc->grc_dump, reg_p->reg_buf, grc_dump_size);
19135                free(sc->grc_dump, M_DEVBUF);
19136                sc->grc_dump = NULL;
19137                sc->grcdump_started = 0;
19138                sc->grcdump_done = 0;
19139            }
19140
19141            break;
19142
19143        case BXE_RDW_REG:
19144            reg_rdw_p = (bxe_reg_rdw_t *)data;
19145            if((reg_rdw_p->reg_cmd == BXE_READ_REG_CMD) &&
19146                (reg_rdw_p->reg_access_type == BXE_REG_ACCESS_DIRECT))
19147                reg_rdw_p->reg_val = REG_RD(sc, reg_rdw_p->reg_id);
19148
19149            if((reg_rdw_p->reg_cmd == BXE_WRITE_REG_CMD) &&
19150                (reg_rdw_p->reg_access_type == BXE_REG_ACCESS_DIRECT))
19151                REG_WR(sc, reg_rdw_p->reg_id, reg_rdw_p->reg_val);
19152
19153            break;
19154
19155        case BXE_RDW_PCICFG:
19156            cfg_rdw_p = (bxe_pcicfg_rdw_t *)data;
19157            if(cfg_rdw_p->cfg_cmd == BXE_READ_PCICFG) {
19158
19159                cfg_rdw_p->cfg_val = pci_read_config(sc->dev, cfg_rdw_p->cfg_id,
19160                                         cfg_rdw_p->cfg_width);
19161
19162            } else if(cfg_rdw_p->cfg_cmd == BXE_WRITE_PCICFG) {
19163                pci_write_config(sc->dev, cfg_rdw_p->cfg_id, cfg_rdw_p->cfg_val,
19164                            cfg_rdw_p->cfg_width);
19165            } else {
19166                BLOGW(sc, "BXE_RDW_PCICFG ioctl wrong cmd passed\n");
19167            }
19168            break;
19169
19170        case BXE_MAC_ADDR:
19171            mac_addr_p = (bxe_perm_mac_addr_t *)data;
19172            snprintf(mac_addr_p->mac_addr_str, sizeof(sc->mac_addr_str), "%s",
19173                sc->mac_addr_str);
19174            break;
19175
19176        case BXE_EEPROM:
19177            rval = bxe_eeprom_rd_wr(sc, (bxe_eeprom_t *)data);
19178            break;
19179
19180
19181        default:
19182            break;
19183    }
19184
19185    return (rval);
19186}
19187