bxe.c revision 283274
1204431Sraj/*-
2204431Sraj * Copyright (c) 2007-2014 QLogic Corporation. All rights reserved.
3204431Sraj *
4204431Sraj * Redistribution and use in source and binary forms, with or without
5204431Sraj * modification, are permitted provided that the following conditions
6204431Sraj * are met:
7204431Sraj *
8204431Sraj * 1. Redistributions of source code must retain the above copyright
9204431Sraj *    notice, this list of conditions and the following disclaimer.
10204431Sraj * 2. Redistributions in binary form must reproduce the above copyright
11204431Sraj *    notice, this list of conditions and the following disclaimer in the
12204431Sraj *    documentation and/or other materials provided with the distribution.
13204431Sraj *
14204431Sraj * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS'
15204431Sraj * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16204431Sraj * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17204431Sraj * ARE DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
18204431Sraj * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
19204431Sraj * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
20204431Sraj * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
21204431Sraj * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
22204431Sraj * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
23204431Sraj * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
24204431Sraj * THE POSSIBILITY OF SUCH DAMAGE.
25204431Sraj */
26204431Sraj
27204431Sraj#include <sys/cdefs.h>
28204431Sraj__FBSDID("$FreeBSD: head/sys/dev/bxe/bxe.c 283274 2015-05-22 01:44:07Z davidcs $");
29204431Sraj
30204431Sraj#define BXE_DRIVER_VERSION "1.78.79"
31204431Sraj
32204431Sraj#include "bxe.h"
33204431Sraj#include "ecore_sp.h"
34204431Sraj#include "ecore_init.h"
35204431Sraj#include "ecore_init_ops.h"
36204431Sraj
37204431Sraj#include "57710_int_offsets.h"
38204431Sraj#include "57711_int_offsets.h"
39204431Sraj#include "57712_int_offsets.h"
40204431Sraj
41204431Sraj/*
42204431Sraj * CTLTYPE_U64 and sysctl_handle_64 were added in r217616. Define these
43204431Sraj * explicitly here for older kernels that don't include this changeset.
44204431Sraj */
45204431Sraj#ifndef CTLTYPE_U64
46204431Sraj#define CTLTYPE_U64      CTLTYPE_QUAD
47204431Sraj#define sysctl_handle_64 sysctl_handle_quad
48204431Sraj#endif
49204431Sraj
50204431Sraj/*
51204431Sraj * CSUM_TCP_IPV6 and CSUM_UDP_IPV6 were added in r236170. Define these
52204431Sraj * here as zero(0) for older kernels that don't include this changeset
53204431Sraj * thereby masking the functionality.
54204431Sraj */
55204431Sraj#ifndef CSUM_TCP_IPV6
56204431Sraj#define CSUM_TCP_IPV6 0
57#define CSUM_UDP_IPV6 0
58#endif
59
60/*
61 * pci_find_cap was added in r219865. Re-define this at pci_find_extcap
62 * for older kernels that don't include this changeset.
63 */
64#if __FreeBSD_version < 900035
65#define pci_find_cap pci_find_extcap
66#endif
67
68#define BXE_DEF_SB_ATT_IDX 0x0001
69#define BXE_DEF_SB_IDX     0x0002
70
71/*
72 * FLR Support - bxe_pf_flr_clnup() is called during nic_load in the per
73 * function HW initialization.
74 */
75#define FLR_WAIT_USEC     10000 /* 10 msecs */
76#define FLR_WAIT_INTERVAL 50    /* usecs */
77#define FLR_POLL_CNT      (FLR_WAIT_USEC / FLR_WAIT_INTERVAL) /* 200 */
78
79struct pbf_pN_buf_regs {
80    int pN;
81    uint32_t init_crd;
82    uint32_t crd;
83    uint32_t crd_freed;
84};
85
86struct pbf_pN_cmd_regs {
87    int pN;
88    uint32_t lines_occup;
89    uint32_t lines_freed;
90};
91
92/*
93 * PCI Device ID Table used by bxe_probe().
94 */
95#define BXE_DEVDESC_MAX 64
96static struct bxe_device_type bxe_devs[] = {
97    {
98        BRCM_VENDORID,
99        CHIP_NUM_57710,
100        PCI_ANY_ID, PCI_ANY_ID,
101        "QLogic NetXtreme II BCM57710 10GbE"
102    },
103    {
104        BRCM_VENDORID,
105        CHIP_NUM_57711,
106        PCI_ANY_ID, PCI_ANY_ID,
107        "QLogic NetXtreme II BCM57711 10GbE"
108    },
109    {
110        BRCM_VENDORID,
111        CHIP_NUM_57711E,
112        PCI_ANY_ID, PCI_ANY_ID,
113        "QLogic NetXtreme II BCM57711E 10GbE"
114    },
115    {
116        BRCM_VENDORID,
117        CHIP_NUM_57712,
118        PCI_ANY_ID, PCI_ANY_ID,
119        "QLogic NetXtreme II BCM57712 10GbE"
120    },
121    {
122        BRCM_VENDORID,
123        CHIP_NUM_57712_MF,
124        PCI_ANY_ID, PCI_ANY_ID,
125        "QLogic NetXtreme II BCM57712 MF 10GbE"
126    },
127#if 0
128    {
129        BRCM_VENDORID,
130        CHIP_NUM_57712_VF,
131        PCI_ANY_ID, PCI_ANY_ID,
132        "QLogic NetXtreme II BCM57712 VF 10GbE"
133    },
134#endif
135    {
136        BRCM_VENDORID,
137        CHIP_NUM_57800,
138        PCI_ANY_ID, PCI_ANY_ID,
139        "QLogic NetXtreme II BCM57800 10GbE"
140    },
141    {
142        BRCM_VENDORID,
143        CHIP_NUM_57800_MF,
144        PCI_ANY_ID, PCI_ANY_ID,
145        "QLogic NetXtreme II BCM57800 MF 10GbE"
146    },
147#if 0
148    {
149        BRCM_VENDORID,
150        CHIP_NUM_57800_VF,
151        PCI_ANY_ID, PCI_ANY_ID,
152        "QLogic NetXtreme II BCM57800 VF 10GbE"
153    },
154#endif
155    {
156        BRCM_VENDORID,
157        CHIP_NUM_57810,
158        PCI_ANY_ID, PCI_ANY_ID,
159        "QLogic NetXtreme II BCM57810 10GbE"
160    },
161    {
162        BRCM_VENDORID,
163        CHIP_NUM_57810_MF,
164        PCI_ANY_ID, PCI_ANY_ID,
165        "QLogic NetXtreme II BCM57810 MF 10GbE"
166    },
167#if 0
168    {
169        BRCM_VENDORID,
170        CHIP_NUM_57810_VF,
171        PCI_ANY_ID, PCI_ANY_ID,
172        "QLogic NetXtreme II BCM57810 VF 10GbE"
173    },
174#endif
175    {
176        BRCM_VENDORID,
177        CHIP_NUM_57811,
178        PCI_ANY_ID, PCI_ANY_ID,
179        "QLogic NetXtreme II BCM57811 10GbE"
180    },
181    {
182        BRCM_VENDORID,
183        CHIP_NUM_57811_MF,
184        PCI_ANY_ID, PCI_ANY_ID,
185        "QLogic NetXtreme II BCM57811 MF 10GbE"
186    },
187#if 0
188    {
189        BRCM_VENDORID,
190        CHIP_NUM_57811_VF,
191        PCI_ANY_ID, PCI_ANY_ID,
192        "QLogic NetXtreme II BCM57811 VF 10GbE"
193    },
194#endif
195    {
196        BRCM_VENDORID,
197        CHIP_NUM_57840_4_10,
198        PCI_ANY_ID, PCI_ANY_ID,
199        "QLogic NetXtreme II BCM57840 4x10GbE"
200    },
201#if 0
202    {
203        BRCM_VENDORID,
204        CHIP_NUM_57840_2_20,
205        PCI_ANY_ID, PCI_ANY_ID,
206        "QLogic NetXtreme II BCM57840 2x20GbE"
207    },
208#endif
209    {
210        BRCM_VENDORID,
211        CHIP_NUM_57840_MF,
212        PCI_ANY_ID, PCI_ANY_ID,
213        "QLogic NetXtreme II BCM57840 MF 10GbE"
214    },
215#if 0
216    {
217        BRCM_VENDORID,
218        CHIP_NUM_57840_VF,
219        PCI_ANY_ID, PCI_ANY_ID,
220        "QLogic NetXtreme II BCM57840 VF 10GbE"
221    },
222#endif
223    {
224        0, 0, 0, 0, NULL
225    }
226};
227
228MALLOC_DECLARE(M_BXE_ILT);
229MALLOC_DEFINE(M_BXE_ILT, "bxe_ilt", "bxe ILT pointer");
230
231/*
232 * FreeBSD device entry points.
233 */
234static int bxe_probe(device_t);
235static int bxe_attach(device_t);
236static int bxe_detach(device_t);
237static int bxe_shutdown(device_t);
238
239/*
240 * FreeBSD KLD module/device interface event handler method.
241 */
242static device_method_t bxe_methods[] = {
243    /* Device interface (device_if.h) */
244    DEVMETHOD(device_probe,     bxe_probe),
245    DEVMETHOD(device_attach,    bxe_attach),
246    DEVMETHOD(device_detach,    bxe_detach),
247    DEVMETHOD(device_shutdown,  bxe_shutdown),
248#if 0
249    DEVMETHOD(device_suspend,   bxe_suspend),
250    DEVMETHOD(device_resume,    bxe_resume),
251#endif
252    /* Bus interface (bus_if.h) */
253    DEVMETHOD(bus_print_child,  bus_generic_print_child),
254    DEVMETHOD(bus_driver_added, bus_generic_driver_added),
255    KOBJMETHOD_END
256};
257
258/*
259 * FreeBSD KLD Module data declaration
260 */
261static driver_t bxe_driver = {
262    "bxe",                   /* module name */
263    bxe_methods,             /* event handler */
264    sizeof(struct bxe_softc) /* extra data */
265};
266
267/*
268 * FreeBSD dev class is needed to manage dev instances and
269 * to associate with a bus type
270 */
271static devclass_t bxe_devclass;
272
273MODULE_DEPEND(bxe, pci, 1, 1, 1);
274MODULE_DEPEND(bxe, ether, 1, 1, 1);
275DRIVER_MODULE(bxe, pci, bxe_driver, bxe_devclass, 0, 0);
276
277/* resources needed for unloading a previously loaded device */
278
279#define BXE_PREV_WAIT_NEEDED 1
280struct mtx bxe_prev_mtx;
281MTX_SYSINIT(bxe_prev_mtx, &bxe_prev_mtx, "bxe_prev_lock", MTX_DEF);
282struct bxe_prev_list_node {
283    LIST_ENTRY(bxe_prev_list_node) node;
284    uint8_t bus;
285    uint8_t slot;
286    uint8_t path;
287    uint8_t aer; /* XXX automatic error recovery */
288    uint8_t undi;
289};
290static LIST_HEAD(, bxe_prev_list_node) bxe_prev_list = LIST_HEAD_INITIALIZER(bxe_prev_list);
291
292static int load_count[2][3] = { {0} }; /* per-path: 0-common, 1-port0, 2-port1 */
293
294/* Tunable device values... */
295
296SYSCTL_NODE(_hw, OID_AUTO, bxe, CTLFLAG_RD, 0, "bxe driver parameters");
297
298/* Debug */
299unsigned long bxe_debug = 0;
300SYSCTL_ULONG(_hw_bxe, OID_AUTO, debug, CTLFLAG_RDTUN,
301             &bxe_debug, 0, "Debug logging mode");
302
303/* Interrupt Mode: 0 (IRQ), 1 (MSI/IRQ), and 2 (MSI-X/MSI/IRQ) */
304static int bxe_interrupt_mode = INTR_MODE_MSIX;
305SYSCTL_INT(_hw_bxe, OID_AUTO, interrupt_mode, CTLFLAG_RDTUN,
306           &bxe_interrupt_mode, 0, "Interrupt (MSI-X/MSI/INTx) mode");
307
308/* Number of Queues: 0 (Auto) or 1 to 16 (fixed queue number) */
309static int bxe_queue_count = 4;
310SYSCTL_INT(_hw_bxe, OID_AUTO, queue_count, CTLFLAG_RDTUN,
311           &bxe_queue_count, 0, "Multi-Queue queue count");
312
313/* max number of buffers per queue (default RX_BD_USABLE) */
314static int bxe_max_rx_bufs = 0;
315SYSCTL_INT(_hw_bxe, OID_AUTO, max_rx_bufs, CTLFLAG_RDTUN,
316           &bxe_max_rx_bufs, 0, "Maximum Number of Rx Buffers Per Queue");
317
318/* Host interrupt coalescing RX tick timer (usecs) */
319static int bxe_hc_rx_ticks = 25;
320SYSCTL_INT(_hw_bxe, OID_AUTO, hc_rx_ticks, CTLFLAG_RDTUN,
321           &bxe_hc_rx_ticks, 0, "Host Coalescing Rx ticks");
322
323/* Host interrupt coalescing TX tick timer (usecs) */
324static int bxe_hc_tx_ticks = 50;
325SYSCTL_INT(_hw_bxe, OID_AUTO, hc_tx_ticks, CTLFLAG_RDTUN,
326           &bxe_hc_tx_ticks, 0, "Host Coalescing Tx ticks");
327
328/* Maximum number of Rx packets to process at a time */
329static int bxe_rx_budget = 0xffffffff;
330SYSCTL_INT(_hw_bxe, OID_AUTO, rx_budget, CTLFLAG_TUN,
331           &bxe_rx_budget, 0, "Rx processing budget");
332
333/* Maximum LRO aggregation size */
334static int bxe_max_aggregation_size = 0;
335SYSCTL_INT(_hw_bxe, OID_AUTO, max_aggregation_size, CTLFLAG_TUN,
336           &bxe_max_aggregation_size, 0, "max aggregation size");
337
338/* PCI MRRS: -1 (Auto), 0 (128B), 1 (256B), 2 (512B), 3 (1KB) */
339static int bxe_mrrs = -1;
340SYSCTL_INT(_hw_bxe, OID_AUTO, mrrs, CTLFLAG_RDTUN,
341           &bxe_mrrs, 0, "PCIe maximum read request size");
342
343/* AutoGrEEEn: 0 (hardware default), 1 (force on), 2 (force off) */
344static int bxe_autogreeen = 0;
345SYSCTL_INT(_hw_bxe, OID_AUTO, autogreeen, CTLFLAG_RDTUN,
346           &bxe_autogreeen, 0, "AutoGrEEEn support");
347
348/* 4-tuple RSS support for UDP: 0 (disabled), 1 (enabled) */
349static int bxe_udp_rss = 0;
350SYSCTL_INT(_hw_bxe, OID_AUTO, udp_rss, CTLFLAG_RDTUN,
351           &bxe_udp_rss, 0, "UDP RSS support");
352
353
354#define STAT_NAME_LEN 32 /* no stat names below can be longer than this */
355
356#define STATS_OFFSET32(stat_name)                   \
357    (offsetof(struct bxe_eth_stats, stat_name) / 4)
358
359#define Q_STATS_OFFSET32(stat_name)                   \
360    (offsetof(struct bxe_eth_q_stats, stat_name) / 4)
361
362static const struct {
363    uint32_t offset;
364    uint32_t size;
365    uint32_t flags;
366#define STATS_FLAGS_PORT  1
367#define STATS_FLAGS_FUNC  2 /* MF only cares about function stats */
368#define STATS_FLAGS_BOTH  (STATS_FLAGS_FUNC | STATS_FLAGS_PORT)
369    char string[STAT_NAME_LEN];
370} bxe_eth_stats_arr[] = {
371    { STATS_OFFSET32(total_bytes_received_hi),
372                8, STATS_FLAGS_BOTH, "rx_bytes" },
373    { STATS_OFFSET32(error_bytes_received_hi),
374                8, STATS_FLAGS_BOTH, "rx_error_bytes" },
375    { STATS_OFFSET32(total_unicast_packets_received_hi),
376                8, STATS_FLAGS_BOTH, "rx_ucast_packets" },
377    { STATS_OFFSET32(total_multicast_packets_received_hi),
378                8, STATS_FLAGS_BOTH, "rx_mcast_packets" },
379    { STATS_OFFSET32(total_broadcast_packets_received_hi),
380                8, STATS_FLAGS_BOTH, "rx_bcast_packets" },
381    { STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi),
382                8, STATS_FLAGS_PORT, "rx_crc_errors" },
383    { STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi),
384                8, STATS_FLAGS_PORT, "rx_align_errors" },
385    { STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi),
386                8, STATS_FLAGS_PORT, "rx_undersize_packets" },
387    { STATS_OFFSET32(etherstatsoverrsizepkts_hi),
388                8, STATS_FLAGS_PORT, "rx_oversize_packets" },
389    { STATS_OFFSET32(rx_stat_etherstatsfragments_hi),
390                8, STATS_FLAGS_PORT, "rx_fragments" },
391    { STATS_OFFSET32(rx_stat_etherstatsjabbers_hi),
392                8, STATS_FLAGS_PORT, "rx_jabbers" },
393    { STATS_OFFSET32(no_buff_discard_hi),
394                8, STATS_FLAGS_BOTH, "rx_discards" },
395    { STATS_OFFSET32(mac_filter_discard),
396                4, STATS_FLAGS_PORT, "rx_filtered_packets" },
397    { STATS_OFFSET32(mf_tag_discard),
398                4, STATS_FLAGS_PORT, "rx_mf_tag_discard" },
399    { STATS_OFFSET32(pfc_frames_received_hi),
400                8, STATS_FLAGS_PORT, "pfc_frames_received" },
401    { STATS_OFFSET32(pfc_frames_sent_hi),
402                8, STATS_FLAGS_PORT, "pfc_frames_sent" },
403    { STATS_OFFSET32(brb_drop_hi),
404                8, STATS_FLAGS_PORT, "rx_brb_discard" },
405    { STATS_OFFSET32(brb_truncate_hi),
406                8, STATS_FLAGS_PORT, "rx_brb_truncate" },
407    { STATS_OFFSET32(pause_frames_received_hi),
408                8, STATS_FLAGS_PORT, "rx_pause_frames" },
409    { STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi),
410                8, STATS_FLAGS_PORT, "rx_mac_ctrl_frames" },
411    { STATS_OFFSET32(nig_timer_max),
412                4, STATS_FLAGS_PORT, "rx_constant_pause_events" },
413    { STATS_OFFSET32(total_bytes_transmitted_hi),
414                8, STATS_FLAGS_BOTH, "tx_bytes" },
415    { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi),
416                8, STATS_FLAGS_PORT, "tx_error_bytes" },
417    { STATS_OFFSET32(total_unicast_packets_transmitted_hi),
418                8, STATS_FLAGS_BOTH, "tx_ucast_packets" },
419    { STATS_OFFSET32(total_multicast_packets_transmitted_hi),
420                8, STATS_FLAGS_BOTH, "tx_mcast_packets" },
421    { STATS_OFFSET32(total_broadcast_packets_transmitted_hi),
422                8, STATS_FLAGS_BOTH, "tx_bcast_packets" },
423    { STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi),
424                8, STATS_FLAGS_PORT, "tx_mac_errors" },
425    { STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi),
426                8, STATS_FLAGS_PORT, "tx_carrier_errors" },
427    { STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi),
428                8, STATS_FLAGS_PORT, "tx_single_collisions" },
429    { STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi),
430                8, STATS_FLAGS_PORT, "tx_multi_collisions" },
431    { STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi),
432                8, STATS_FLAGS_PORT, "tx_deferred" },
433    { STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi),
434                8, STATS_FLAGS_PORT, "tx_excess_collisions" },
435    { STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi),
436                8, STATS_FLAGS_PORT, "tx_late_collisions" },
437    { STATS_OFFSET32(tx_stat_etherstatscollisions_hi),
438                8, STATS_FLAGS_PORT, "tx_total_collisions" },
439    { STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi),
440                8, STATS_FLAGS_PORT, "tx_64_byte_packets" },
441    { STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi),
442                8, STATS_FLAGS_PORT, "tx_65_to_127_byte_packets" },
443    { STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi),
444                8, STATS_FLAGS_PORT, "tx_128_to_255_byte_packets" },
445    { STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi),
446                8, STATS_FLAGS_PORT, "tx_256_to_511_byte_packets" },
447    { STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi),
448                8, STATS_FLAGS_PORT, "tx_512_to_1023_byte_packets" },
449    { STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi),
450                8, STATS_FLAGS_PORT, "tx_1024_to_1522_byte_packets" },
451    { STATS_OFFSET32(etherstatspktsover1522octets_hi),
452                8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" },
453    { STATS_OFFSET32(pause_frames_sent_hi),
454                8, STATS_FLAGS_PORT, "tx_pause_frames" },
455    { STATS_OFFSET32(total_tpa_aggregations_hi),
456                8, STATS_FLAGS_FUNC, "tpa_aggregations" },
457    { STATS_OFFSET32(total_tpa_aggregated_frames_hi),
458                8, STATS_FLAGS_FUNC, "tpa_aggregated_frames"},
459    { STATS_OFFSET32(total_tpa_bytes_hi),
460                8, STATS_FLAGS_FUNC, "tpa_bytes"},
461#if 0
462    { STATS_OFFSET32(recoverable_error),
463                4, STATS_FLAGS_FUNC, "recoverable_errors" },
464    { STATS_OFFSET32(unrecoverable_error),
465                4, STATS_FLAGS_FUNC, "unrecoverable_errors" },
466#endif
467    { STATS_OFFSET32(eee_tx_lpi),
468                4, STATS_FLAGS_PORT, "eee_tx_lpi"},
469    { STATS_OFFSET32(rx_calls),
470                4, STATS_FLAGS_FUNC, "rx_calls"},
471    { STATS_OFFSET32(rx_pkts),
472                4, STATS_FLAGS_FUNC, "rx_pkts"},
473    { STATS_OFFSET32(rx_tpa_pkts),
474                4, STATS_FLAGS_FUNC, "rx_tpa_pkts"},
475    { STATS_OFFSET32(rx_jumbo_sge_pkts),
476                4, STATS_FLAGS_FUNC, "rx_jumbo_sge_pkts"},
477    { STATS_OFFSET32(rx_soft_errors),
478                4, STATS_FLAGS_FUNC, "rx_soft_errors"},
479    { STATS_OFFSET32(rx_hw_csum_errors),
480                4, STATS_FLAGS_FUNC, "rx_hw_csum_errors"},
481    { STATS_OFFSET32(rx_ofld_frames_csum_ip),
482                4, STATS_FLAGS_FUNC, "rx_ofld_frames_csum_ip"},
483    { STATS_OFFSET32(rx_ofld_frames_csum_tcp_udp),
484                4, STATS_FLAGS_FUNC, "rx_ofld_frames_csum_tcp_udp"},
485    { STATS_OFFSET32(rx_budget_reached),
486                4, STATS_FLAGS_FUNC, "rx_budget_reached"},
487    { STATS_OFFSET32(tx_pkts),
488                4, STATS_FLAGS_FUNC, "tx_pkts"},
489    { STATS_OFFSET32(tx_soft_errors),
490                4, STATS_FLAGS_FUNC, "tx_soft_errors"},
491    { STATS_OFFSET32(tx_ofld_frames_csum_ip),
492                4, STATS_FLAGS_FUNC, "tx_ofld_frames_csum_ip"},
493    { STATS_OFFSET32(tx_ofld_frames_csum_tcp),
494                4, STATS_FLAGS_FUNC, "tx_ofld_frames_csum_tcp"},
495    { STATS_OFFSET32(tx_ofld_frames_csum_udp),
496                4, STATS_FLAGS_FUNC, "tx_ofld_frames_csum_udp"},
497    { STATS_OFFSET32(tx_ofld_frames_lso),
498                4, STATS_FLAGS_FUNC, "tx_ofld_frames_lso"},
499    { STATS_OFFSET32(tx_ofld_frames_lso_hdr_splits),
500                4, STATS_FLAGS_FUNC, "tx_ofld_frames_lso_hdr_splits"},
501    { STATS_OFFSET32(tx_encap_failures),
502                4, STATS_FLAGS_FUNC, "tx_encap_failures"},
503    { STATS_OFFSET32(tx_hw_queue_full),
504                4, STATS_FLAGS_FUNC, "tx_hw_queue_full"},
505    { STATS_OFFSET32(tx_hw_max_queue_depth),
506                4, STATS_FLAGS_FUNC, "tx_hw_max_queue_depth"},
507    { STATS_OFFSET32(tx_dma_mapping_failure),
508                4, STATS_FLAGS_FUNC, "tx_dma_mapping_failure"},
509    { STATS_OFFSET32(tx_max_drbr_queue_depth),
510                4, STATS_FLAGS_FUNC, "tx_max_drbr_queue_depth"},
511    { STATS_OFFSET32(tx_window_violation_std),
512                4, STATS_FLAGS_FUNC, "tx_window_violation_std"},
513    { STATS_OFFSET32(tx_window_violation_tso),
514                4, STATS_FLAGS_FUNC, "tx_window_violation_tso"},
515#if 0
516    { STATS_OFFSET32(tx_unsupported_tso_request_ipv6),
517                4, STATS_FLAGS_FUNC, "tx_unsupported_tso_request_ipv6"},
518    { STATS_OFFSET32(tx_unsupported_tso_request_not_tcp),
519                4, STATS_FLAGS_FUNC, "tx_unsupported_tso_request_not_tcp"},
520#endif
521    { STATS_OFFSET32(tx_chain_lost_mbuf),
522                4, STATS_FLAGS_FUNC, "tx_chain_lost_mbuf"},
523    { STATS_OFFSET32(tx_frames_deferred),
524                4, STATS_FLAGS_FUNC, "tx_frames_deferred"},
525    { STATS_OFFSET32(tx_queue_xoff),
526                4, STATS_FLAGS_FUNC, "tx_queue_xoff"},
527    { STATS_OFFSET32(mbuf_defrag_attempts),
528                4, STATS_FLAGS_FUNC, "mbuf_defrag_attempts"},
529    { STATS_OFFSET32(mbuf_defrag_failures),
530                4, STATS_FLAGS_FUNC, "mbuf_defrag_failures"},
531    { STATS_OFFSET32(mbuf_rx_bd_alloc_failed),
532                4, STATS_FLAGS_FUNC, "mbuf_rx_bd_alloc_failed"},
533    { STATS_OFFSET32(mbuf_rx_bd_mapping_failed),
534                4, STATS_FLAGS_FUNC, "mbuf_rx_bd_mapping_failed"},
535    { STATS_OFFSET32(mbuf_rx_tpa_alloc_failed),
536                4, STATS_FLAGS_FUNC, "mbuf_rx_tpa_alloc_failed"},
537    { STATS_OFFSET32(mbuf_rx_tpa_mapping_failed),
538                4, STATS_FLAGS_FUNC, "mbuf_rx_tpa_mapping_failed"},
539    { STATS_OFFSET32(mbuf_rx_sge_alloc_failed),
540                4, STATS_FLAGS_FUNC, "mbuf_rx_sge_alloc_failed"},
541    { STATS_OFFSET32(mbuf_rx_sge_mapping_failed),
542                4, STATS_FLAGS_FUNC, "mbuf_rx_sge_mapping_failed"},
543    { STATS_OFFSET32(mbuf_alloc_tx),
544                4, STATS_FLAGS_FUNC, "mbuf_alloc_tx"},
545    { STATS_OFFSET32(mbuf_alloc_rx),
546                4, STATS_FLAGS_FUNC, "mbuf_alloc_rx"},
547    { STATS_OFFSET32(mbuf_alloc_sge),
548                4, STATS_FLAGS_FUNC, "mbuf_alloc_sge"},
549    { STATS_OFFSET32(mbuf_alloc_tpa),
550                4, STATS_FLAGS_FUNC, "mbuf_alloc_tpa"}
551};
552
553static const struct {
554    uint32_t offset;
555    uint32_t size;
556    char string[STAT_NAME_LEN];
557} bxe_eth_q_stats_arr[] = {
558    { Q_STATS_OFFSET32(total_bytes_received_hi),
559                8, "rx_bytes" },
560    { Q_STATS_OFFSET32(total_unicast_packets_received_hi),
561                8, "rx_ucast_packets" },
562    { Q_STATS_OFFSET32(total_multicast_packets_received_hi),
563                8, "rx_mcast_packets" },
564    { Q_STATS_OFFSET32(total_broadcast_packets_received_hi),
565                8, "rx_bcast_packets" },
566    { Q_STATS_OFFSET32(no_buff_discard_hi),
567                8, "rx_discards" },
568    { Q_STATS_OFFSET32(total_bytes_transmitted_hi),
569                8, "tx_bytes" },
570    { Q_STATS_OFFSET32(total_unicast_packets_transmitted_hi),
571                8, "tx_ucast_packets" },
572    { Q_STATS_OFFSET32(total_multicast_packets_transmitted_hi),
573                8, "tx_mcast_packets" },
574    { Q_STATS_OFFSET32(total_broadcast_packets_transmitted_hi),
575                8, "tx_bcast_packets" },
576    { Q_STATS_OFFSET32(total_tpa_aggregations_hi),
577                8, "tpa_aggregations" },
578    { Q_STATS_OFFSET32(total_tpa_aggregated_frames_hi),
579                8, "tpa_aggregated_frames"},
580    { Q_STATS_OFFSET32(total_tpa_bytes_hi),
581                8, "tpa_bytes"},
582    { Q_STATS_OFFSET32(rx_calls),
583                4, "rx_calls"},
584    { Q_STATS_OFFSET32(rx_pkts),
585                4, "rx_pkts"},
586    { Q_STATS_OFFSET32(rx_tpa_pkts),
587                4, "rx_tpa_pkts"},
588    { Q_STATS_OFFSET32(rx_jumbo_sge_pkts),
589                4, "rx_jumbo_sge_pkts"},
590    { Q_STATS_OFFSET32(rx_soft_errors),
591                4, "rx_soft_errors"},
592    { Q_STATS_OFFSET32(rx_hw_csum_errors),
593                4, "rx_hw_csum_errors"},
594    { Q_STATS_OFFSET32(rx_ofld_frames_csum_ip),
595                4, "rx_ofld_frames_csum_ip"},
596    { Q_STATS_OFFSET32(rx_ofld_frames_csum_tcp_udp),
597                4, "rx_ofld_frames_csum_tcp_udp"},
598    { Q_STATS_OFFSET32(rx_budget_reached),
599                4, "rx_budget_reached"},
600    { Q_STATS_OFFSET32(tx_pkts),
601                4, "tx_pkts"},
602    { Q_STATS_OFFSET32(tx_soft_errors),
603                4, "tx_soft_errors"},
604    { Q_STATS_OFFSET32(tx_ofld_frames_csum_ip),
605                4, "tx_ofld_frames_csum_ip"},
606    { Q_STATS_OFFSET32(tx_ofld_frames_csum_tcp),
607                4, "tx_ofld_frames_csum_tcp"},
608    { Q_STATS_OFFSET32(tx_ofld_frames_csum_udp),
609                4, "tx_ofld_frames_csum_udp"},
610    { Q_STATS_OFFSET32(tx_ofld_frames_lso),
611                4, "tx_ofld_frames_lso"},
612    { Q_STATS_OFFSET32(tx_ofld_frames_lso_hdr_splits),
613                4, "tx_ofld_frames_lso_hdr_splits"},
614    { Q_STATS_OFFSET32(tx_encap_failures),
615                4, "tx_encap_failures"},
616    { Q_STATS_OFFSET32(tx_hw_queue_full),
617                4, "tx_hw_queue_full"},
618    { Q_STATS_OFFSET32(tx_hw_max_queue_depth),
619                4, "tx_hw_max_queue_depth"},
620    { Q_STATS_OFFSET32(tx_dma_mapping_failure),
621                4, "tx_dma_mapping_failure"},
622    { Q_STATS_OFFSET32(tx_max_drbr_queue_depth),
623                4, "tx_max_drbr_queue_depth"},
624    { Q_STATS_OFFSET32(tx_window_violation_std),
625                4, "tx_window_violation_std"},
626    { Q_STATS_OFFSET32(tx_window_violation_tso),
627                4, "tx_window_violation_tso"},
628#if 0
629    { Q_STATS_OFFSET32(tx_unsupported_tso_request_ipv6),
630                4, "tx_unsupported_tso_request_ipv6"},
631    { Q_STATS_OFFSET32(tx_unsupported_tso_request_not_tcp),
632                4, "tx_unsupported_tso_request_not_tcp"},
633#endif
634    { Q_STATS_OFFSET32(tx_chain_lost_mbuf),
635                4, "tx_chain_lost_mbuf"},
636    { Q_STATS_OFFSET32(tx_frames_deferred),
637                4, "tx_frames_deferred"},
638    { Q_STATS_OFFSET32(tx_queue_xoff),
639                4, "tx_queue_xoff"},
640    { Q_STATS_OFFSET32(mbuf_defrag_attempts),
641                4, "mbuf_defrag_attempts"},
642    { Q_STATS_OFFSET32(mbuf_defrag_failures),
643                4, "mbuf_defrag_failures"},
644    { Q_STATS_OFFSET32(mbuf_rx_bd_alloc_failed),
645                4, "mbuf_rx_bd_alloc_failed"},
646    { Q_STATS_OFFSET32(mbuf_rx_bd_mapping_failed),
647                4, "mbuf_rx_bd_mapping_failed"},
648    { Q_STATS_OFFSET32(mbuf_rx_tpa_alloc_failed),
649                4, "mbuf_rx_tpa_alloc_failed"},
650    { Q_STATS_OFFSET32(mbuf_rx_tpa_mapping_failed),
651                4, "mbuf_rx_tpa_mapping_failed"},
652    { Q_STATS_OFFSET32(mbuf_rx_sge_alloc_failed),
653                4, "mbuf_rx_sge_alloc_failed"},
654    { Q_STATS_OFFSET32(mbuf_rx_sge_mapping_failed),
655                4, "mbuf_rx_sge_mapping_failed"},
656    { Q_STATS_OFFSET32(mbuf_alloc_tx),
657                4, "mbuf_alloc_tx"},
658    { Q_STATS_OFFSET32(mbuf_alloc_rx),
659                4, "mbuf_alloc_rx"},
660    { Q_STATS_OFFSET32(mbuf_alloc_sge),
661                4, "mbuf_alloc_sge"},
662    { Q_STATS_OFFSET32(mbuf_alloc_tpa),
663                4, "mbuf_alloc_tpa"}
664};
665
666#define BXE_NUM_ETH_STATS   ARRAY_SIZE(bxe_eth_stats_arr)
667#define BXE_NUM_ETH_Q_STATS ARRAY_SIZE(bxe_eth_q_stats_arr)
668
669
670static void    bxe_cmng_fns_init(struct bxe_softc *sc,
671                                 uint8_t          read_cfg,
672                                 uint8_t          cmng_type);
673static int     bxe_get_cmng_fns_mode(struct bxe_softc *sc);
674static void    storm_memset_cmng(struct bxe_softc *sc,
675                                 struct cmng_init *cmng,
676                                 uint8_t          port);
677static void    bxe_set_reset_global(struct bxe_softc *sc);
678static void    bxe_set_reset_in_progress(struct bxe_softc *sc);
679static uint8_t bxe_reset_is_done(struct bxe_softc *sc,
680                                 int              engine);
681static uint8_t bxe_clear_pf_load(struct bxe_softc *sc);
682static uint8_t bxe_chk_parity_attn(struct bxe_softc *sc,
683                                   uint8_t          *global,
684                                   uint8_t          print);
685static void    bxe_int_disable(struct bxe_softc *sc);
686static int     bxe_release_leader_lock(struct bxe_softc *sc);
687static void    bxe_pf_disable(struct bxe_softc *sc);
688static void    bxe_free_fp_buffers(struct bxe_softc *sc);
689static inline void bxe_update_rx_prod(struct bxe_softc    *sc,
690                                      struct bxe_fastpath *fp,
691                                      uint16_t            rx_bd_prod,
692                                      uint16_t            rx_cq_prod,
693                                      uint16_t            rx_sge_prod);
694static void    bxe_link_report_locked(struct bxe_softc *sc);
695static void    bxe_link_report(struct bxe_softc *sc);
696static void    bxe_link_status_update(struct bxe_softc *sc);
697static void    bxe_periodic_callout_func(void *xsc);
698static void    bxe_periodic_start(struct bxe_softc *sc);
699static void    bxe_periodic_stop(struct bxe_softc *sc);
700static int     bxe_alloc_rx_bd_mbuf(struct bxe_fastpath *fp,
701                                    uint16_t prev_index,
702                                    uint16_t index);
703static int     bxe_alloc_rx_tpa_mbuf(struct bxe_fastpath *fp,
704                                     int                 queue);
705static int     bxe_alloc_rx_sge_mbuf(struct bxe_fastpath *fp,
706                                     uint16_t            index);
707static uint8_t bxe_txeof(struct bxe_softc *sc,
708                         struct bxe_fastpath *fp);
709static void    bxe_task_fp(struct bxe_fastpath *fp);
710static __noinline void bxe_dump_mbuf(struct bxe_softc *sc,
711                                     struct mbuf      *m,
712                                     uint8_t          contents);
713static int     bxe_alloc_mem(struct bxe_softc *sc);
714static void    bxe_free_mem(struct bxe_softc *sc);
715static int     bxe_alloc_fw_stats_mem(struct bxe_softc *sc);
716static void    bxe_free_fw_stats_mem(struct bxe_softc *sc);
717static int     bxe_interrupt_attach(struct bxe_softc *sc);
718static void    bxe_interrupt_detach(struct bxe_softc *sc);
719static void    bxe_set_rx_mode(struct bxe_softc *sc);
720static int     bxe_init_locked(struct bxe_softc *sc);
721static int     bxe_stop_locked(struct bxe_softc *sc);
722static __noinline int bxe_nic_load(struct bxe_softc *sc,
723                                   int              load_mode);
724static __noinline int bxe_nic_unload(struct bxe_softc *sc,
725                                     uint32_t         unload_mode,
726                                     uint8_t          keep_link);
727
728static void bxe_handle_sp_tq(void *context, int pending);
729static void bxe_handle_rx_mode_tq(void *context, int pending);
730static void bxe_handle_fp_tq(void *context, int pending);
731
732
733/* calculate crc32 on a buffer (NOTE: crc32_length MUST be aligned to 8) */
734uint32_t
735calc_crc32(uint8_t  *crc32_packet,
736           uint32_t crc32_length,
737           uint32_t crc32_seed,
738           uint8_t  complement)
739{
740   uint32_t byte         = 0;
741   uint32_t bit          = 0;
742   uint8_t  msb          = 0;
743   uint32_t temp         = 0;
744   uint32_t shft         = 0;
745   uint8_t  current_byte = 0;
746   uint32_t crc32_result = crc32_seed;
747   const uint32_t CRC32_POLY = 0x1edc6f41;
748
749   if ((crc32_packet == NULL) ||
750       (crc32_length == 0) ||
751       ((crc32_length % 8) != 0))
752    {
753        return (crc32_result);
754    }
755
756    for (byte = 0; byte < crc32_length; byte = byte + 1)
757    {
758        current_byte = crc32_packet[byte];
759        for (bit = 0; bit < 8; bit = bit + 1)
760        {
761            /* msb = crc32_result[31]; */
762            msb = (uint8_t)(crc32_result >> 31);
763
764            crc32_result = crc32_result << 1;
765
766            /* it (msb != current_byte[bit]) */
767            if (msb != (0x1 & (current_byte >> bit)))
768            {
769                crc32_result = crc32_result ^ CRC32_POLY;
770                /* crc32_result[0] = 1 */
771                crc32_result |= 1;
772            }
773        }
774    }
775
776    /* Last step is to:
777     * 1. "mirror" every bit
778     * 2. swap the 4 bytes
779     * 3. complement each bit
780     */
781
782    /* Mirror */
783    temp = crc32_result;
784    shft = sizeof(crc32_result) * 8 - 1;
785
786    for (crc32_result >>= 1; crc32_result; crc32_result >>= 1)
787    {
788        temp <<= 1;
789        temp |= crc32_result & 1;
790        shft-- ;
791    }
792
793    /* temp[31-bit] = crc32_result[bit] */
794    temp <<= shft;
795
796    /* Swap */
797    /* crc32_result = {temp[7:0], temp[15:8], temp[23:16], temp[31:24]} */
798    {
799        uint32_t t0, t1, t2, t3;
800        t0 = (0x000000ff & (temp >> 24));
801        t1 = (0x0000ff00 & (temp >> 8));
802        t2 = (0x00ff0000 & (temp << 8));
803        t3 = (0xff000000 & (temp << 24));
804        crc32_result = t0 | t1 | t2 | t3;
805    }
806
807    /* Complement */
808    if (complement)
809    {
810        crc32_result = ~crc32_result;
811    }
812
813    return (crc32_result);
814}
815
816int
817bxe_test_bit(int                    nr,
818             volatile unsigned long *addr)
819{
820    return ((atomic_load_acq_long(addr) & (1 << nr)) != 0);
821}
822
823void
824bxe_set_bit(unsigned int           nr,
825            volatile unsigned long *addr)
826{
827    atomic_set_acq_long(addr, (1 << nr));
828}
829
830void
831bxe_clear_bit(int                    nr,
832              volatile unsigned long *addr)
833{
834    atomic_clear_acq_long(addr, (1 << nr));
835}
836
837int
838bxe_test_and_set_bit(int                    nr,
839                       volatile unsigned long *addr)
840{
841    unsigned long x;
842    nr = (1 << nr);
843    do {
844        x = *addr;
845    } while (atomic_cmpset_acq_long(addr, x, x | nr) == 0);
846    // if (x & nr) bit_was_set; else bit_was_not_set;
847    return (x & nr);
848}
849
850int
851bxe_test_and_clear_bit(int                    nr,
852                       volatile unsigned long *addr)
853{
854    unsigned long x;
855    nr = (1 << nr);
856    do {
857        x = *addr;
858    } while (atomic_cmpset_acq_long(addr, x, x & ~nr) == 0);
859    // if (x & nr) bit_was_set; else bit_was_not_set;
860    return (x & nr);
861}
862
863int
864bxe_cmpxchg(volatile int *addr,
865            int          old,
866            int          new)
867{
868    int x;
869    do {
870        x = *addr;
871    } while (atomic_cmpset_acq_int(addr, old, new) == 0);
872    return (x);
873}
874
875/*
876 * Get DMA memory from the OS.
877 *
878 * Validates that the OS has provided DMA buffers in response to a
879 * bus_dmamap_load call and saves the physical address of those buffers.
880 * When the callback is used the OS will return 0 for the mapping function
881 * (bus_dmamap_load) so we use the value of map_arg->maxsegs to pass any
882 * failures back to the caller.
883 *
884 * Returns:
885 *   Nothing.
886 */
887static void
888bxe_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error)
889{
890    struct bxe_dma *dma = arg;
891
892    if (error) {
893        dma->paddr = 0;
894        dma->nseg  = 0;
895        BLOGE(dma->sc, "Failed DMA alloc '%s' (%d)!\n", dma->msg, error);
896    } else {
897        dma->paddr = segs->ds_addr;
898        dma->nseg  = nseg;
899#if 0
900        BLOGD(dma->sc, DBG_LOAD,
901              "DMA alloc '%s': vaddr=%p paddr=%p nseg=%d size=%lu\n",
902              dma->msg, dma->vaddr, (void *)dma->paddr,
903              dma->nseg, dma->size);
904#endif
905    }
906}
907
908/*
909 * Allocate a block of memory and map it for DMA. No partial completions
910 * allowed and release any resources acquired if we can't acquire all
911 * resources.
912 *
913 * Returns:
914 *   0 = Success, !0 = Failure
915 */
916int
917bxe_dma_alloc(struct bxe_softc *sc,
918              bus_size_t       size,
919              struct bxe_dma   *dma,
920              const char       *msg)
921{
922    int rc;
923
924    if (dma->size > 0) {
925        BLOGE(sc, "dma block '%s' already has size %lu\n", msg,
926              (unsigned long)dma->size);
927        return (1);
928    }
929
930    memset(dma, 0, sizeof(*dma)); /* sanity */
931    dma->sc   = sc;
932    dma->size = size;
933    snprintf(dma->msg, sizeof(dma->msg), "%s", msg);
934
935    rc = bus_dma_tag_create(sc->parent_dma_tag, /* parent tag */
936                            BCM_PAGE_SIZE,      /* alignment */
937                            0,                  /* boundary limit */
938                            BUS_SPACE_MAXADDR,  /* restricted low */
939                            BUS_SPACE_MAXADDR,  /* restricted hi */
940                            NULL,               /* addr filter() */
941                            NULL,               /* addr filter() arg */
942                            size,               /* max map size */
943                            1,                  /* num discontinuous */
944                            size,               /* max seg size */
945                            BUS_DMA_ALLOCNOW,   /* flags */
946                            NULL,               /* lock() */
947                            NULL,               /* lock() arg */
948                            &dma->tag);         /* returned dma tag */
949    if (rc != 0) {
950        BLOGE(sc, "Failed to create dma tag for '%s' (%d)\n", msg, rc);
951        memset(dma, 0, sizeof(*dma));
952        return (1);
953    }
954
955    rc = bus_dmamem_alloc(dma->tag,
956                          (void **)&dma->vaddr,
957                          (BUS_DMA_NOWAIT | BUS_DMA_ZERO),
958                          &dma->map);
959    if (rc != 0) {
960        BLOGE(sc, "Failed to alloc dma mem for '%s' (%d)\n", msg, rc);
961        bus_dma_tag_destroy(dma->tag);
962        memset(dma, 0, sizeof(*dma));
963        return (1);
964    }
965
966    rc = bus_dmamap_load(dma->tag,
967                         dma->map,
968                         dma->vaddr,
969                         size,
970                         bxe_dma_map_addr, /* BLOGD in here */
971                         dma,
972                         BUS_DMA_NOWAIT);
973    if (rc != 0) {
974        BLOGE(sc, "Failed to load dma map for '%s' (%d)\n", msg, rc);
975        bus_dmamem_free(dma->tag, dma->vaddr, dma->map);
976        bus_dma_tag_destroy(dma->tag);
977        memset(dma, 0, sizeof(*dma));
978        return (1);
979    }
980
981    return (0);
982}
983
984void
985bxe_dma_free(struct bxe_softc *sc,
986             struct bxe_dma   *dma)
987{
988    if (dma->size > 0) {
989#if 0
990        BLOGD(sc, DBG_LOAD,
991              "DMA free '%s': vaddr=%p paddr=%p nseg=%d size=%lu\n",
992              dma->msg, dma->vaddr, (void *)dma->paddr,
993              dma->nseg, dma->size);
994#endif
995
996        DBASSERT(sc, (dma->tag != NULL), ("dma tag is NULL"));
997
998        bus_dmamap_sync(dma->tag, dma->map,
999                        (BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE));
1000        bus_dmamap_unload(dma->tag, dma->map);
1001        bus_dmamem_free(dma->tag, dma->vaddr, dma->map);
1002        bus_dma_tag_destroy(dma->tag);
1003    }
1004
1005    memset(dma, 0, sizeof(*dma));
1006}
1007
1008/*
1009 * These indirect read and write routines are only during init.
1010 * The locking is handled by the MCP.
1011 */
1012
1013void
1014bxe_reg_wr_ind(struct bxe_softc *sc,
1015               uint32_t         addr,
1016               uint32_t         val)
1017{
1018    pci_write_config(sc->dev, PCICFG_GRC_ADDRESS, addr, 4);
1019    pci_write_config(sc->dev, PCICFG_GRC_DATA, val, 4);
1020    pci_write_config(sc->dev, PCICFG_GRC_ADDRESS, 0, 4);
1021}
1022
1023uint32_t
1024bxe_reg_rd_ind(struct bxe_softc *sc,
1025               uint32_t         addr)
1026{
1027    uint32_t val;
1028
1029    pci_write_config(sc->dev, PCICFG_GRC_ADDRESS, addr, 4);
1030    val = pci_read_config(sc->dev, PCICFG_GRC_DATA, 4);
1031    pci_write_config(sc->dev, PCICFG_GRC_ADDRESS, 0, 4);
1032
1033    return (val);
1034}
1035
1036#if 0
1037void bxe_dp_dmae(struct bxe_softc *sc, struct dmae_command *dmae, int msglvl)
1038{
1039    uint32_t src_type = dmae->opcode & DMAE_COMMAND_SRC;
1040
1041    switch (dmae->opcode & DMAE_COMMAND_DST) {
1042    case DMAE_CMD_DST_PCI:
1043        if (src_type == DMAE_CMD_SRC_PCI)
1044            DP(msglvl, "DMAE: opcode 0x%08x\n"
1045               "src [%x:%08x], len [%d*4], dst [%x:%08x]\n"
1046               "comp_addr [%x:%08x], comp_val 0x%08x\n",
1047               dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
1048               dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo,
1049               dmae->comp_addr_hi, dmae->comp_addr_lo,
1050               dmae->comp_val);
1051        else
1052            DP(msglvl, "DMAE: opcode 0x%08x\n"
1053               "src [%08x], len [%d*4], dst [%x:%08x]\n"
1054               "comp_addr [%x:%08x], comp_val 0x%08x\n",
1055               dmae->opcode, dmae->src_addr_lo >> 2,
1056               dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo,
1057               dmae->comp_addr_hi, dmae->comp_addr_lo,
1058               dmae->comp_val);
1059        break;
1060    case DMAE_CMD_DST_GRC:
1061        if (src_type == DMAE_CMD_SRC_PCI)
1062            DP(msglvl, "DMAE: opcode 0x%08x\n"
1063               "src [%x:%08x], len [%d*4], dst_addr [%08x]\n"
1064               "comp_addr [%x:%08x], comp_val 0x%08x\n",
1065               dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
1066               dmae->len, dmae->dst_addr_lo >> 2,
1067               dmae->comp_addr_hi, dmae->comp_addr_lo,
1068               dmae->comp_val);
1069        else
1070            DP(msglvl, "DMAE: opcode 0x%08x\n"
1071               "src [%08x], len [%d*4], dst [%08x]\n"
1072               "comp_addr [%x:%08x], comp_val 0x%08x\n",
1073               dmae->opcode, dmae->src_addr_lo >> 2,
1074               dmae->len, dmae->dst_addr_lo >> 2,
1075               dmae->comp_addr_hi, dmae->comp_addr_lo,
1076               dmae->comp_val);
1077        break;
1078    default:
1079        if (src_type == DMAE_CMD_SRC_PCI)
1080            DP(msglvl, "DMAE: opcode 0x%08x\n"
1081               "src_addr [%x:%08x]  len [%d * 4]  dst_addr [none]\n"
1082               "comp_addr [%x:%08x]  comp_val 0x%08x\n",
1083               dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
1084               dmae->len, dmae->comp_addr_hi, dmae->comp_addr_lo,
1085               dmae->comp_val);
1086        else
1087            DP(msglvl, "DMAE: opcode 0x%08x\n"
1088               "src_addr [%08x]  len [%d * 4]  dst_addr [none]\n"
1089               "comp_addr [%x:%08x]  comp_val 0x%08x\n",
1090               dmae->opcode, dmae->src_addr_lo >> 2,
1091               dmae->len, dmae->comp_addr_hi, dmae->comp_addr_lo,
1092               dmae->comp_val);
1093        break;
1094    }
1095
1096}
1097#endif
1098
1099static int
1100bxe_acquire_hw_lock(struct bxe_softc *sc,
1101                    uint32_t         resource)
1102{
1103    uint32_t lock_status;
1104    uint32_t resource_bit = (1 << resource);
1105    int func = SC_FUNC(sc);
1106    uint32_t hw_lock_control_reg;
1107    int cnt;
1108
1109    /* validate the resource is within range */
1110    if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1111        BLOGE(sc, "resource 0x%x > HW_LOCK_MAX_RESOURCE_VALUE\n", resource);
1112        return (-1);
1113    }
1114
1115    if (func <= 5) {
1116        hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + (func * 8));
1117    } else {
1118        hw_lock_control_reg =
1119                (MISC_REG_DRIVER_CONTROL_7 + ((func - 6) * 8));
1120    }
1121
1122    /* validate the resource is not already taken */
1123    lock_status = REG_RD(sc, hw_lock_control_reg);
1124    if (lock_status & resource_bit) {
1125        BLOGE(sc, "resource in use (status 0x%x bit 0x%x)\n",
1126              lock_status, resource_bit);
1127        return (-1);
1128    }
1129
1130    /* try every 5ms for 5 seconds */
1131    for (cnt = 0; cnt < 1000; cnt++) {
1132        REG_WR(sc, (hw_lock_control_reg + 4), resource_bit);
1133        lock_status = REG_RD(sc, hw_lock_control_reg);
1134        if (lock_status & resource_bit) {
1135            return (0);
1136        }
1137        DELAY(5000);
1138    }
1139
1140    BLOGE(sc, "Resource lock timeout!\n");
1141    return (-1);
1142}
1143
1144static int
1145bxe_release_hw_lock(struct bxe_softc *sc,
1146                    uint32_t         resource)
1147{
1148    uint32_t lock_status;
1149    uint32_t resource_bit = (1 << resource);
1150    int func = SC_FUNC(sc);
1151    uint32_t hw_lock_control_reg;
1152
1153    /* validate the resource is within range */
1154    if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1155        BLOGE(sc, "resource 0x%x > HW_LOCK_MAX_RESOURCE_VALUE\n", resource);
1156        return (-1);
1157    }
1158
1159    if (func <= 5) {
1160        hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + (func * 8));
1161    } else {
1162        hw_lock_control_reg =
1163                (MISC_REG_DRIVER_CONTROL_7 + ((func - 6) * 8));
1164    }
1165
1166    /* validate the resource is currently taken */
1167    lock_status = REG_RD(sc, hw_lock_control_reg);
1168    if (!(lock_status & resource_bit)) {
1169        BLOGE(sc, "resource not in use (status 0x%x bit 0x%x)\n",
1170              lock_status, resource_bit);
1171        return (-1);
1172    }
1173
1174    REG_WR(sc, hw_lock_control_reg, resource_bit);
1175    return (0);
1176}
1177
1178/*
1179 * Per pf misc lock must be acquired before the per port mcp lock. Otherwise,
1180 * had we done things the other way around, if two pfs from the same port
1181 * would attempt to access nvram at the same time, we could run into a
1182 * scenario such as:
1183 * pf A takes the port lock.
1184 * pf B succeeds in taking the same lock since they are from the same port.
1185 * pf A takes the per pf misc lock. Performs eeprom access.
1186 * pf A finishes. Unlocks the per pf misc lock.
1187 * Pf B takes the lock and proceeds to perform it's own access.
1188 * pf A unlocks the per port lock, while pf B is still working (!).
1189 * mcp takes the per port lock and corrupts pf B's access (and/or has it's own
1190 * access corrupted by pf B).*
1191 */
1192static int
1193bxe_acquire_nvram_lock(struct bxe_softc *sc)
1194{
1195    int port = SC_PORT(sc);
1196    int count, i;
1197    uint32_t val = 0;
1198
1199    /* acquire HW lock: protect against other PFs in PF Direct Assignment */
1200    bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_NVRAM);
1201
1202    /* adjust timeout for emulation/FPGA */
1203    count = NVRAM_TIMEOUT_COUNT;
1204    if (CHIP_REV_IS_SLOW(sc)) {
1205        count *= 100;
1206    }
1207
1208    /* request access to nvram interface */
1209    REG_WR(sc, MCP_REG_MCPR_NVM_SW_ARB,
1210           (MCPR_NVM_SW_ARB_ARB_REQ_SET1 << port));
1211
1212    for (i = 0; i < count*10; i++) {
1213        val = REG_RD(sc, MCP_REG_MCPR_NVM_SW_ARB);
1214        if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) {
1215            break;
1216        }
1217
1218        DELAY(5);
1219    }
1220
1221    if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) {
1222        BLOGE(sc, "Cannot get access to nvram interface\n");
1223        return (-1);
1224    }
1225
1226    return (0);
1227}
1228
1229static int
1230bxe_release_nvram_lock(struct bxe_softc *sc)
1231{
1232    int port = SC_PORT(sc);
1233    int count, i;
1234    uint32_t val = 0;
1235
1236    /* adjust timeout for emulation/FPGA */
1237    count = NVRAM_TIMEOUT_COUNT;
1238    if (CHIP_REV_IS_SLOW(sc)) {
1239        count *= 100;
1240    }
1241
1242    /* relinquish nvram interface */
1243    REG_WR(sc, MCP_REG_MCPR_NVM_SW_ARB,
1244           (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << port));
1245
1246    for (i = 0; i < count*10; i++) {
1247        val = REG_RD(sc, MCP_REG_MCPR_NVM_SW_ARB);
1248        if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) {
1249            break;
1250        }
1251
1252        DELAY(5);
1253    }
1254
1255    if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) {
1256        BLOGE(sc, "Cannot free access to nvram interface\n");
1257        return (-1);
1258    }
1259
1260    /* release HW lock: protect against other PFs in PF Direct Assignment */
1261    bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_NVRAM);
1262
1263    return (0);
1264}
1265
1266static void
1267bxe_enable_nvram_access(struct bxe_softc *sc)
1268{
1269    uint32_t val;
1270
1271    val = REG_RD(sc, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
1272
1273    /* enable both bits, even on read */
1274    REG_WR(sc, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
1275           (val | MCPR_NVM_ACCESS_ENABLE_EN | MCPR_NVM_ACCESS_ENABLE_WR_EN));
1276}
1277
1278static void
1279bxe_disable_nvram_access(struct bxe_softc *sc)
1280{
1281    uint32_t val;
1282
1283    val = REG_RD(sc, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
1284
1285    /* disable both bits, even after read */
1286    REG_WR(sc, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
1287           (val & ~(MCPR_NVM_ACCESS_ENABLE_EN |
1288                    MCPR_NVM_ACCESS_ENABLE_WR_EN)));
1289}
1290
1291static int
1292bxe_nvram_read_dword(struct bxe_softc *sc,
1293                     uint32_t         offset,
1294                     uint32_t         *ret_val,
1295                     uint32_t         cmd_flags)
1296{
1297    int count, i, rc;
1298    uint32_t val;
1299
1300    /* build the command word */
1301    cmd_flags |= MCPR_NVM_COMMAND_DOIT;
1302
1303    /* need to clear DONE bit separately */
1304    REG_WR(sc, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
1305
1306    /* address of the NVRAM to read from */
1307    REG_WR(sc, MCP_REG_MCPR_NVM_ADDR,
1308           (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
1309
1310    /* issue a read command */
1311    REG_WR(sc, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
1312
1313    /* adjust timeout for emulation/FPGA */
1314    count = NVRAM_TIMEOUT_COUNT;
1315    if (CHIP_REV_IS_SLOW(sc)) {
1316        count *= 100;
1317    }
1318
1319    /* wait for completion */
1320    *ret_val = 0;
1321    rc = -1;
1322    for (i = 0; i < count; i++) {
1323        DELAY(5);
1324        val = REG_RD(sc, MCP_REG_MCPR_NVM_COMMAND);
1325
1326        if (val & MCPR_NVM_COMMAND_DONE) {
1327            val = REG_RD(sc, MCP_REG_MCPR_NVM_READ);
1328            /* we read nvram data in cpu order
1329             * but ethtool sees it as an array of bytes
1330             * converting to big-endian will do the work
1331             */
1332            *ret_val = htobe32(val);
1333            rc = 0;
1334            break;
1335        }
1336    }
1337
1338    if (rc == -1) {
1339        BLOGE(sc, "nvram read timeout expired\n");
1340    }
1341
1342    return (rc);
1343}
1344
1345static int
1346bxe_nvram_read(struct bxe_softc *sc,
1347               uint32_t         offset,
1348               uint8_t          *ret_buf,
1349               int              buf_size)
1350{
1351    uint32_t cmd_flags;
1352    uint32_t val;
1353    int rc;
1354
1355    if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
1356        BLOGE(sc, "Invalid parameter, offset 0x%x buf_size 0x%x\n",
1357              offset, buf_size);
1358        return (-1);
1359    }
1360
1361    if ((offset + buf_size) > sc->devinfo.flash_size) {
1362        BLOGE(sc, "Invalid parameter, "
1363                  "offset 0x%x + buf_size 0x%x > flash_size 0x%x\n",
1364              offset, buf_size, sc->devinfo.flash_size);
1365        return (-1);
1366    }
1367
1368    /* request access to nvram interface */
1369    rc = bxe_acquire_nvram_lock(sc);
1370    if (rc) {
1371        return (rc);
1372    }
1373
1374    /* enable access to nvram interface */
1375    bxe_enable_nvram_access(sc);
1376
1377    /* read the first word(s) */
1378    cmd_flags = MCPR_NVM_COMMAND_FIRST;
1379    while ((buf_size > sizeof(uint32_t)) && (rc == 0)) {
1380        rc = bxe_nvram_read_dword(sc, offset, &val, cmd_flags);
1381        memcpy(ret_buf, &val, 4);
1382
1383        /* advance to the next dword */
1384        offset += sizeof(uint32_t);
1385        ret_buf += sizeof(uint32_t);
1386        buf_size -= sizeof(uint32_t);
1387        cmd_flags = 0;
1388    }
1389
1390    if (rc == 0) {
1391        cmd_flags |= MCPR_NVM_COMMAND_LAST;
1392        rc = bxe_nvram_read_dword(sc, offset, &val, cmd_flags);
1393        memcpy(ret_buf, &val, 4);
1394    }
1395
1396    /* disable access to nvram interface */
1397    bxe_disable_nvram_access(sc);
1398    bxe_release_nvram_lock(sc);
1399
1400    return (rc);
1401}
1402
1403static int
1404bxe_nvram_write_dword(struct bxe_softc *sc,
1405                      uint32_t         offset,
1406                      uint32_t         val,
1407                      uint32_t         cmd_flags)
1408{
1409    int count, i, rc;
1410
1411    /* build the command word */
1412    cmd_flags |= (MCPR_NVM_COMMAND_DOIT | MCPR_NVM_COMMAND_WR);
1413
1414    /* need to clear DONE bit separately */
1415    REG_WR(sc, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
1416
1417    /* write the data */
1418    REG_WR(sc, MCP_REG_MCPR_NVM_WRITE, val);
1419
1420    /* address of the NVRAM to write to */
1421    REG_WR(sc, MCP_REG_MCPR_NVM_ADDR,
1422           (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
1423
1424    /* issue the write command */
1425    REG_WR(sc, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
1426
1427    /* adjust timeout for emulation/FPGA */
1428    count = NVRAM_TIMEOUT_COUNT;
1429    if (CHIP_REV_IS_SLOW(sc)) {
1430        count *= 100;
1431    }
1432
1433    /* wait for completion */
1434    rc = -1;
1435    for (i = 0; i < count; i++) {
1436        DELAY(5);
1437        val = REG_RD(sc, MCP_REG_MCPR_NVM_COMMAND);
1438        if (val & MCPR_NVM_COMMAND_DONE) {
1439            rc = 0;
1440            break;
1441        }
1442    }
1443
1444    if (rc == -1) {
1445        BLOGE(sc, "nvram write timeout expired\n");
1446    }
1447
1448    return (rc);
1449}
1450
1451#define BYTE_OFFSET(offset) (8 * (offset & 0x03))
1452
1453static int
1454bxe_nvram_write1(struct bxe_softc *sc,
1455                 uint32_t         offset,
1456                 uint8_t          *data_buf,
1457                 int              buf_size)
1458{
1459    uint32_t cmd_flags;
1460    uint32_t align_offset;
1461    uint32_t val;
1462    int rc;
1463
1464    if ((offset + buf_size) > sc->devinfo.flash_size) {
1465        BLOGE(sc, "Invalid parameter, "
1466                  "offset 0x%x + buf_size 0x%x > flash_size 0x%x\n",
1467              offset, buf_size, sc->devinfo.flash_size);
1468        return (-1);
1469    }
1470
1471    /* request access to nvram interface */
1472    rc = bxe_acquire_nvram_lock(sc);
1473    if (rc) {
1474        return (rc);
1475    }
1476
1477    /* enable access to nvram interface */
1478    bxe_enable_nvram_access(sc);
1479
1480    cmd_flags = (MCPR_NVM_COMMAND_FIRST | MCPR_NVM_COMMAND_LAST);
1481    align_offset = (offset & ~0x03);
1482    rc = bxe_nvram_read_dword(sc, align_offset, &val, cmd_flags);
1483
1484    if (rc == 0) {
1485        val &= ~(0xff << BYTE_OFFSET(offset));
1486        val |= (*data_buf << BYTE_OFFSET(offset));
1487
1488        /* nvram data is returned as an array of bytes
1489         * convert it back to cpu order
1490         */
1491        val = be32toh(val);
1492
1493        rc = bxe_nvram_write_dword(sc, align_offset, val, cmd_flags);
1494    }
1495
1496    /* disable access to nvram interface */
1497    bxe_disable_nvram_access(sc);
1498    bxe_release_nvram_lock(sc);
1499
1500    return (rc);
1501}
1502
1503static int
1504bxe_nvram_write(struct bxe_softc *sc,
1505                uint32_t         offset,
1506                uint8_t          *data_buf,
1507                int              buf_size)
1508{
1509    uint32_t cmd_flags;
1510    uint32_t val;
1511    uint32_t written_so_far;
1512    int rc;
1513
1514    if (buf_size == 1) {
1515        return (bxe_nvram_write1(sc, offset, data_buf, buf_size));
1516    }
1517
1518    if ((offset & 0x03) || (buf_size & 0x03) /* || (buf_size == 0) */) {
1519        BLOGE(sc, "Invalid parameter, offset 0x%x buf_size 0x%x\n",
1520              offset, buf_size);
1521        return (-1);
1522    }
1523
1524    if (buf_size == 0) {
1525        return (0); /* nothing to do */
1526    }
1527
1528    if ((offset + buf_size) > sc->devinfo.flash_size) {
1529        BLOGE(sc, "Invalid parameter, "
1530                  "offset 0x%x + buf_size 0x%x > flash_size 0x%x\n",
1531              offset, buf_size, sc->devinfo.flash_size);
1532        return (-1);
1533    }
1534
1535    /* request access to nvram interface */
1536    rc = bxe_acquire_nvram_lock(sc);
1537    if (rc) {
1538        return (rc);
1539    }
1540
1541    /* enable access to nvram interface */
1542    bxe_enable_nvram_access(sc);
1543
1544    written_so_far = 0;
1545    cmd_flags = MCPR_NVM_COMMAND_FIRST;
1546    while ((written_so_far < buf_size) && (rc == 0)) {
1547        if (written_so_far == (buf_size - sizeof(uint32_t))) {
1548            cmd_flags |= MCPR_NVM_COMMAND_LAST;
1549        } else if (((offset + 4) % NVRAM_PAGE_SIZE) == 0) {
1550            cmd_flags |= MCPR_NVM_COMMAND_LAST;
1551        } else if ((offset % NVRAM_PAGE_SIZE) == 0) {
1552            cmd_flags |= MCPR_NVM_COMMAND_FIRST;
1553        }
1554
1555        memcpy(&val, data_buf, 4);
1556
1557        rc = bxe_nvram_write_dword(sc, offset, val, cmd_flags);
1558
1559        /* advance to the next dword */
1560        offset += sizeof(uint32_t);
1561        data_buf += sizeof(uint32_t);
1562        written_so_far += sizeof(uint32_t);
1563        cmd_flags = 0;
1564    }
1565
1566    /* disable access to nvram interface */
1567    bxe_disable_nvram_access(sc);
1568    bxe_release_nvram_lock(sc);
1569
1570    return (rc);
1571}
1572
1573/* copy command into DMAE command memory and set DMAE command Go */
1574void
1575bxe_post_dmae(struct bxe_softc    *sc,
1576              struct dmae_command *dmae,
1577              int                 idx)
1578{
1579    uint32_t cmd_offset;
1580    int i;
1581
1582    cmd_offset = (DMAE_REG_CMD_MEM + (sizeof(struct dmae_command) * idx));
1583    for (i = 0; i < ((sizeof(struct dmae_command) / 4)); i++) {
1584        REG_WR(sc, (cmd_offset + (i * 4)), *(((uint32_t *)dmae) + i));
1585    }
1586
1587    REG_WR(sc, dmae_reg_go_c[idx], 1);
1588}
1589
1590uint32_t
1591bxe_dmae_opcode_add_comp(uint32_t opcode,
1592                         uint8_t  comp_type)
1593{
1594    return (opcode | ((comp_type << DMAE_COMMAND_C_DST_SHIFT) |
1595                      DMAE_COMMAND_C_TYPE_ENABLE));
1596}
1597
1598uint32_t
1599bxe_dmae_opcode_clr_src_reset(uint32_t opcode)
1600{
1601    return (opcode & ~DMAE_COMMAND_SRC_RESET);
1602}
1603
1604uint32_t
1605bxe_dmae_opcode(struct bxe_softc *sc,
1606                uint8_t          src_type,
1607                uint8_t          dst_type,
1608                uint8_t          with_comp,
1609                uint8_t          comp_type)
1610{
1611    uint32_t opcode = 0;
1612
1613    opcode |= ((src_type << DMAE_COMMAND_SRC_SHIFT) |
1614               (dst_type << DMAE_COMMAND_DST_SHIFT));
1615
1616    opcode |= (DMAE_COMMAND_SRC_RESET | DMAE_COMMAND_DST_RESET);
1617
1618    opcode |= (SC_PORT(sc) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0);
1619
1620    opcode |= ((SC_VN(sc) << DMAE_COMMAND_E1HVN_SHIFT) |
1621               (SC_VN(sc) << DMAE_COMMAND_DST_VN_SHIFT));
1622
1623    opcode |= (DMAE_COM_SET_ERR << DMAE_COMMAND_ERR_POLICY_SHIFT);
1624
1625#ifdef __BIG_ENDIAN
1626    opcode |= DMAE_CMD_ENDIANITY_B_DW_SWAP;
1627#else
1628    opcode |= DMAE_CMD_ENDIANITY_DW_SWAP;
1629#endif
1630
1631    if (with_comp) {
1632        opcode = bxe_dmae_opcode_add_comp(opcode, comp_type);
1633    }
1634
1635    return (opcode);
1636}
1637
1638static void
1639bxe_prep_dmae_with_comp(struct bxe_softc    *sc,
1640                        struct dmae_command *dmae,
1641                        uint8_t             src_type,
1642                        uint8_t             dst_type)
1643{
1644    memset(dmae, 0, sizeof(struct dmae_command));
1645
1646    /* set the opcode */
1647    dmae->opcode = bxe_dmae_opcode(sc, src_type, dst_type,
1648                                   TRUE, DMAE_COMP_PCI);
1649
1650    /* fill in the completion parameters */
1651    dmae->comp_addr_lo = U64_LO(BXE_SP_MAPPING(sc, wb_comp));
1652    dmae->comp_addr_hi = U64_HI(BXE_SP_MAPPING(sc, wb_comp));
1653    dmae->comp_val     = DMAE_COMP_VAL;
1654}
1655
1656/* issue a DMAE command over the init channel and wait for completion */
1657static int
1658bxe_issue_dmae_with_comp(struct bxe_softc    *sc,
1659                         struct dmae_command *dmae)
1660{
1661    uint32_t *wb_comp = BXE_SP(sc, wb_comp);
1662    int timeout = CHIP_REV_IS_SLOW(sc) ? 400000 : 4000;
1663
1664    BXE_DMAE_LOCK(sc);
1665
1666    /* reset completion */
1667    *wb_comp = 0;
1668
1669    /* post the command on the channel used for initializations */
1670    bxe_post_dmae(sc, dmae, INIT_DMAE_C(sc));
1671
1672    /* wait for completion */
1673    DELAY(5);
1674
1675    while ((*wb_comp & ~DMAE_PCI_ERR_FLAG) != DMAE_COMP_VAL) {
1676        if (!timeout ||
1677            (sc->recovery_state != BXE_RECOVERY_DONE &&
1678             sc->recovery_state != BXE_RECOVERY_NIC_LOADING)) {
1679            BLOGE(sc, "DMAE timeout!\n");
1680            BXE_DMAE_UNLOCK(sc);
1681            return (DMAE_TIMEOUT);
1682        }
1683
1684        timeout--;
1685        DELAY(50);
1686    }
1687
1688    if (*wb_comp & DMAE_PCI_ERR_FLAG) {
1689        BLOGE(sc, "DMAE PCI error!\n");
1690        BXE_DMAE_UNLOCK(sc);
1691        return (DMAE_PCI_ERROR);
1692    }
1693
1694    BXE_DMAE_UNLOCK(sc);
1695    return (0);
1696}
1697
1698void
1699bxe_read_dmae(struct bxe_softc *sc,
1700              uint32_t         src_addr,
1701              uint32_t         len32)
1702{
1703    struct dmae_command dmae;
1704    uint32_t *data;
1705    int i, rc;
1706
1707    DBASSERT(sc, (len32 <= 4), ("DMAE read length is %d", len32));
1708
1709    if (!sc->dmae_ready) {
1710        data = BXE_SP(sc, wb_data[0]);
1711
1712        for (i = 0; i < len32; i++) {
1713            data[i] = (CHIP_IS_E1(sc)) ?
1714                          bxe_reg_rd_ind(sc, (src_addr + (i * 4))) :
1715                          REG_RD(sc, (src_addr + (i * 4)));
1716        }
1717
1718        return;
1719    }
1720
1721    /* set opcode and fixed command fields */
1722    bxe_prep_dmae_with_comp(sc, &dmae, DMAE_SRC_GRC, DMAE_DST_PCI);
1723
1724    /* fill in addresses and len */
1725    dmae.src_addr_lo = (src_addr >> 2); /* GRC addr has dword resolution */
1726    dmae.src_addr_hi = 0;
1727    dmae.dst_addr_lo = U64_LO(BXE_SP_MAPPING(sc, wb_data));
1728    dmae.dst_addr_hi = U64_HI(BXE_SP_MAPPING(sc, wb_data));
1729    dmae.len         = len32;
1730
1731    /* issue the command and wait for completion */
1732    if ((rc = bxe_issue_dmae_with_comp(sc, &dmae)) != 0) {
1733        bxe_panic(sc, ("DMAE failed (%d)\n", rc));
1734    };
1735}
1736
1737void
1738bxe_write_dmae(struct bxe_softc *sc,
1739               bus_addr_t       dma_addr,
1740               uint32_t         dst_addr,
1741               uint32_t         len32)
1742{
1743    struct dmae_command dmae;
1744    int rc;
1745
1746    if (!sc->dmae_ready) {
1747        DBASSERT(sc, (len32 <= 4), ("DMAE not ready and length is %d", len32));
1748
1749        if (CHIP_IS_E1(sc)) {
1750            ecore_init_ind_wr(sc, dst_addr, BXE_SP(sc, wb_data[0]), len32);
1751        } else {
1752            ecore_init_str_wr(sc, dst_addr, BXE_SP(sc, wb_data[0]), len32);
1753        }
1754
1755        return;
1756    }
1757
1758    /* set opcode and fixed command fields */
1759    bxe_prep_dmae_with_comp(sc, &dmae, DMAE_SRC_PCI, DMAE_DST_GRC);
1760
1761    /* fill in addresses and len */
1762    dmae.src_addr_lo = U64_LO(dma_addr);
1763    dmae.src_addr_hi = U64_HI(dma_addr);
1764    dmae.dst_addr_lo = (dst_addr >> 2); /* GRC addr has dword resolution */
1765    dmae.dst_addr_hi = 0;
1766    dmae.len         = len32;
1767
1768    /* issue the command and wait for completion */
1769    if ((rc = bxe_issue_dmae_with_comp(sc, &dmae)) != 0) {
1770        bxe_panic(sc, ("DMAE failed (%d)\n", rc));
1771    }
1772}
1773
1774void
1775bxe_write_dmae_phys_len(struct bxe_softc *sc,
1776                        bus_addr_t       phys_addr,
1777                        uint32_t         addr,
1778                        uint32_t         len)
1779{
1780    int dmae_wr_max = DMAE_LEN32_WR_MAX(sc);
1781    int offset = 0;
1782
1783    while (len > dmae_wr_max) {
1784        bxe_write_dmae(sc,
1785                       (phys_addr + offset), /* src DMA address */
1786                       (addr + offset),      /* dst GRC address */
1787                       dmae_wr_max);
1788        offset += (dmae_wr_max * 4);
1789        len -= dmae_wr_max;
1790    }
1791
1792    bxe_write_dmae(sc,
1793                   (phys_addr + offset), /* src DMA address */
1794                   (addr + offset),      /* dst GRC address */
1795                   len);
1796}
1797
1798void
1799bxe_set_ctx_validation(struct bxe_softc   *sc,
1800                       struct eth_context *cxt,
1801                       uint32_t           cid)
1802{
1803    /* ustorm cxt validation */
1804    cxt->ustorm_ag_context.cdu_usage =
1805        CDU_RSRVD_VALUE_TYPE_A(HW_CID(sc, cid),
1806            CDU_REGION_NUMBER_UCM_AG, ETH_CONNECTION_TYPE);
1807    /* xcontext validation */
1808    cxt->xstorm_ag_context.cdu_reserved =
1809        CDU_RSRVD_VALUE_TYPE_A(HW_CID(sc, cid),
1810            CDU_REGION_NUMBER_XCM_AG, ETH_CONNECTION_TYPE);
1811}
1812
1813static void
1814bxe_storm_memset_hc_timeout(struct bxe_softc *sc,
1815                            uint8_t          port,
1816                            uint8_t          fw_sb_id,
1817                            uint8_t          sb_index,
1818                            uint8_t          ticks)
1819{
1820    uint32_t addr =
1821        (BAR_CSTRORM_INTMEM +
1822         CSTORM_STATUS_BLOCK_DATA_TIMEOUT_OFFSET(fw_sb_id, sb_index));
1823
1824    REG_WR8(sc, addr, ticks);
1825
1826    BLOGD(sc, DBG_LOAD,
1827          "port %d fw_sb_id %d sb_index %d ticks %d\n",
1828          port, fw_sb_id, sb_index, ticks);
1829}
1830
1831static void
1832bxe_storm_memset_hc_disable(struct bxe_softc *sc,
1833                            uint8_t          port,
1834                            uint16_t         fw_sb_id,
1835                            uint8_t          sb_index,
1836                            uint8_t          disable)
1837{
1838    uint32_t enable_flag =
1839        (disable) ? 0 : (1 << HC_INDEX_DATA_HC_ENABLED_SHIFT);
1840    uint32_t addr =
1841        (BAR_CSTRORM_INTMEM +
1842         CSTORM_STATUS_BLOCK_DATA_FLAGS_OFFSET(fw_sb_id, sb_index));
1843    uint8_t flags;
1844
1845    /* clear and set */
1846    flags = REG_RD8(sc, addr);
1847    flags &= ~HC_INDEX_DATA_HC_ENABLED;
1848    flags |= enable_flag;
1849    REG_WR8(sc, addr, flags);
1850
1851    BLOGD(sc, DBG_LOAD,
1852          "port %d fw_sb_id %d sb_index %d disable %d\n",
1853          port, fw_sb_id, sb_index, disable);
1854}
1855
1856void
1857bxe_update_coalesce_sb_index(struct bxe_softc *sc,
1858                             uint8_t          fw_sb_id,
1859                             uint8_t          sb_index,
1860                             uint8_t          disable,
1861                             uint16_t         usec)
1862{
1863    int port = SC_PORT(sc);
1864    uint8_t ticks = (usec / 4); /* XXX ??? */
1865
1866    bxe_storm_memset_hc_timeout(sc, port, fw_sb_id, sb_index, ticks);
1867
1868    disable = (disable) ? 1 : ((usec) ? 0 : 1);
1869    bxe_storm_memset_hc_disable(sc, port, fw_sb_id, sb_index, disable);
1870}
1871
1872void
1873elink_cb_udelay(struct bxe_softc *sc,
1874                uint32_t         usecs)
1875{
1876    DELAY(usecs);
1877}
1878
1879uint32_t
1880elink_cb_reg_read(struct bxe_softc *sc,
1881                  uint32_t         reg_addr)
1882{
1883    return (REG_RD(sc, reg_addr));
1884}
1885
1886void
1887elink_cb_reg_write(struct bxe_softc *sc,
1888                   uint32_t         reg_addr,
1889                   uint32_t         val)
1890{
1891    REG_WR(sc, reg_addr, val);
1892}
1893
1894void
1895elink_cb_reg_wb_write(struct bxe_softc *sc,
1896                      uint32_t         offset,
1897                      uint32_t         *wb_write,
1898                      uint16_t         len)
1899{
1900    REG_WR_DMAE(sc, offset, wb_write, len);
1901}
1902
1903void
1904elink_cb_reg_wb_read(struct bxe_softc *sc,
1905                     uint32_t         offset,
1906                     uint32_t         *wb_write,
1907                     uint16_t         len)
1908{
1909    REG_RD_DMAE(sc, offset, wb_write, len);
1910}
1911
1912uint8_t
1913elink_cb_path_id(struct bxe_softc *sc)
1914{
1915    return (SC_PATH(sc));
1916}
1917
1918void
1919elink_cb_event_log(struct bxe_softc     *sc,
1920                   const elink_log_id_t elink_log_id,
1921                   ...)
1922{
1923    /* XXX */
1924#if 0
1925    //va_list ap;
1926    va_start(ap, elink_log_id);
1927    _XXX_(sc, lm_log_id, ap);
1928    va_end(ap);
1929#endif
1930    BLOGI(sc, "ELINK EVENT LOG (%d)\n", elink_log_id);
1931}
1932
1933static int
1934bxe_set_spio(struct bxe_softc *sc,
1935             int              spio,
1936             uint32_t         mode)
1937{
1938    uint32_t spio_reg;
1939
1940    /* Only 2 SPIOs are configurable */
1941    if ((spio != MISC_SPIO_SPIO4) && (spio != MISC_SPIO_SPIO5)) {
1942        BLOGE(sc, "Invalid SPIO 0x%x\n", spio);
1943        return (-1);
1944    }
1945
1946    bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_SPIO);
1947
1948    /* read SPIO and mask except the float bits */
1949    spio_reg = (REG_RD(sc, MISC_REG_SPIO) & MISC_SPIO_FLOAT);
1950
1951    switch (mode) {
1952    case MISC_SPIO_OUTPUT_LOW:
1953        BLOGD(sc, DBG_LOAD, "Set SPIO 0x%x -> output low\n", spio);
1954        /* clear FLOAT and set CLR */
1955        spio_reg &= ~(spio << MISC_SPIO_FLOAT_POS);
1956        spio_reg |=  (spio << MISC_SPIO_CLR_POS);
1957        break;
1958
1959    case MISC_SPIO_OUTPUT_HIGH:
1960        BLOGD(sc, DBG_LOAD, "Set SPIO 0x%x -> output high\n", spio);
1961        /* clear FLOAT and set SET */
1962        spio_reg &= ~(spio << MISC_SPIO_FLOAT_POS);
1963        spio_reg |=  (spio << MISC_SPIO_SET_POS);
1964        break;
1965
1966    case MISC_SPIO_INPUT_HI_Z:
1967        BLOGD(sc, DBG_LOAD, "Set SPIO 0x%x -> input\n", spio);
1968        /* set FLOAT */
1969        spio_reg |= (spio << MISC_SPIO_FLOAT_POS);
1970        break;
1971
1972    default:
1973        break;
1974    }
1975
1976    REG_WR(sc, MISC_REG_SPIO, spio_reg);
1977    bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_SPIO);
1978
1979    return (0);
1980}
1981
1982static int
1983bxe_gpio_read(struct bxe_softc *sc,
1984              int              gpio_num,
1985              uint8_t          port)
1986{
1987    /* The GPIO should be swapped if swap register is set and active */
1988    int gpio_port = ((REG_RD(sc, NIG_REG_PORT_SWAP) &&
1989                      REG_RD(sc, NIG_REG_STRAP_OVERRIDE)) ^ port);
1990    int gpio_shift = (gpio_num +
1991                      (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0));
1992    uint32_t gpio_mask = (1 << gpio_shift);
1993    uint32_t gpio_reg;
1994
1995    if (gpio_num > MISC_REGISTERS_GPIO_3) {
1996        BLOGE(sc, "Invalid GPIO %d\n", gpio_num);
1997        return (-1);
1998    }
1999
2000    /* read GPIO value */
2001    gpio_reg = REG_RD(sc, MISC_REG_GPIO);
2002
2003    /* get the requested pin value */
2004    return ((gpio_reg & gpio_mask) == gpio_mask) ? 1 : 0;
2005}
2006
2007static int
2008bxe_gpio_write(struct bxe_softc *sc,
2009               int              gpio_num,
2010               uint32_t         mode,
2011               uint8_t          port)
2012{
2013    /* The GPIO should be swapped if swap register is set and active */
2014    int gpio_port = ((REG_RD(sc, NIG_REG_PORT_SWAP) &&
2015                      REG_RD(sc, NIG_REG_STRAP_OVERRIDE)) ^ port);
2016    int gpio_shift = (gpio_num +
2017                      (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0));
2018    uint32_t gpio_mask = (1 << gpio_shift);
2019    uint32_t gpio_reg;
2020
2021    if (gpio_num > MISC_REGISTERS_GPIO_3) {
2022        BLOGE(sc, "Invalid GPIO %d\n", gpio_num);
2023        return (-1);
2024    }
2025
2026    bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_GPIO);
2027
2028    /* read GPIO and mask except the float bits */
2029    gpio_reg = (REG_RD(sc, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
2030
2031    switch (mode) {
2032    case MISC_REGISTERS_GPIO_OUTPUT_LOW:
2033        BLOGD(sc, DBG_PHY,
2034              "Set GPIO %d (shift %d) -> output low\n",
2035              gpio_num, gpio_shift);
2036        /* clear FLOAT and set CLR */
2037        gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
2038        gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
2039        break;
2040
2041    case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
2042        BLOGD(sc, DBG_PHY,
2043              "Set GPIO %d (shift %d) -> output high\n",
2044              gpio_num, gpio_shift);
2045        /* clear FLOAT and set SET */
2046        gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
2047        gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
2048        break;
2049
2050    case MISC_REGISTERS_GPIO_INPUT_HI_Z:
2051        BLOGD(sc, DBG_PHY,
2052              "Set GPIO %d (shift %d) -> input\n",
2053              gpio_num, gpio_shift);
2054        /* set FLOAT */
2055        gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
2056        break;
2057
2058    default:
2059        break;
2060    }
2061
2062    REG_WR(sc, MISC_REG_GPIO, gpio_reg);
2063    bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_GPIO);
2064
2065    return (0);
2066}
2067
2068static int
2069bxe_gpio_mult_write(struct bxe_softc *sc,
2070                    uint8_t          pins,
2071                    uint32_t         mode)
2072{
2073    uint32_t gpio_reg;
2074
2075    /* any port swapping should be handled by caller */
2076
2077    bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_GPIO);
2078
2079    /* read GPIO and mask except the float bits */
2080    gpio_reg = REG_RD(sc, MISC_REG_GPIO);
2081    gpio_reg &= ~(pins << MISC_REGISTERS_GPIO_FLOAT_POS);
2082    gpio_reg &= ~(pins << MISC_REGISTERS_GPIO_CLR_POS);
2083    gpio_reg &= ~(pins << MISC_REGISTERS_GPIO_SET_POS);
2084
2085    switch (mode) {
2086    case MISC_REGISTERS_GPIO_OUTPUT_LOW:
2087        BLOGD(sc, DBG_PHY, "Set GPIO 0x%x -> output low\n", pins);
2088        /* set CLR */
2089        gpio_reg |= (pins << MISC_REGISTERS_GPIO_CLR_POS);
2090        break;
2091
2092    case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
2093        BLOGD(sc, DBG_PHY, "Set GPIO 0x%x -> output high\n", pins);
2094        /* set SET */
2095        gpio_reg |= (pins << MISC_REGISTERS_GPIO_SET_POS);
2096        break;
2097
2098    case MISC_REGISTERS_GPIO_INPUT_HI_Z:
2099        BLOGD(sc, DBG_PHY, "Set GPIO 0x%x -> input\n", pins);
2100        /* set FLOAT */
2101        gpio_reg |= (pins << MISC_REGISTERS_GPIO_FLOAT_POS);
2102        break;
2103
2104    default:
2105        BLOGE(sc, "Invalid GPIO mode assignment %d\n", mode);
2106        bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_GPIO);
2107        return (-1);
2108    }
2109
2110    REG_WR(sc, MISC_REG_GPIO, gpio_reg);
2111    bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_GPIO);
2112
2113    return (0);
2114}
2115
2116static int
2117bxe_gpio_int_write(struct bxe_softc *sc,
2118                   int              gpio_num,
2119                   uint32_t         mode,
2120                   uint8_t          port)
2121{
2122    /* The GPIO should be swapped if swap register is set and active */
2123    int gpio_port = ((REG_RD(sc, NIG_REG_PORT_SWAP) &&
2124                      REG_RD(sc, NIG_REG_STRAP_OVERRIDE)) ^ port);
2125    int gpio_shift = (gpio_num +
2126                      (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0));
2127    uint32_t gpio_mask = (1 << gpio_shift);
2128    uint32_t gpio_reg;
2129
2130    if (gpio_num > MISC_REGISTERS_GPIO_3) {
2131        BLOGE(sc, "Invalid GPIO %d\n", gpio_num);
2132        return (-1);
2133    }
2134
2135    bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_GPIO);
2136
2137    /* read GPIO int */
2138    gpio_reg = REG_RD(sc, MISC_REG_GPIO_INT);
2139
2140    switch (mode) {
2141    case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR:
2142        BLOGD(sc, DBG_PHY,
2143              "Clear GPIO INT %d (shift %d) -> output low\n",
2144              gpio_num, gpio_shift);
2145        /* clear SET and set CLR */
2146        gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
2147        gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
2148        break;
2149
2150    case MISC_REGISTERS_GPIO_INT_OUTPUT_SET:
2151        BLOGD(sc, DBG_PHY,
2152              "Set GPIO INT %d (shift %d) -> output high\n",
2153              gpio_num, gpio_shift);
2154        /* clear CLR and set SET */
2155        gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
2156        gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
2157        break;
2158
2159    default:
2160        break;
2161    }
2162
2163    REG_WR(sc, MISC_REG_GPIO_INT, gpio_reg);
2164    bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_GPIO);
2165
2166    return (0);
2167}
2168
2169uint32_t
2170elink_cb_gpio_read(struct bxe_softc *sc,
2171                   uint16_t         gpio_num,
2172                   uint8_t          port)
2173{
2174    return (bxe_gpio_read(sc, gpio_num, port));
2175}
2176
2177uint8_t
2178elink_cb_gpio_write(struct bxe_softc *sc,
2179                    uint16_t         gpio_num,
2180                    uint8_t          mode, /* 0=low 1=high */
2181                    uint8_t          port)
2182{
2183    return (bxe_gpio_write(sc, gpio_num, mode, port));
2184}
2185
2186uint8_t
2187elink_cb_gpio_mult_write(struct bxe_softc *sc,
2188                         uint8_t          pins,
2189                         uint8_t          mode) /* 0=low 1=high */
2190{
2191    return (bxe_gpio_mult_write(sc, pins, mode));
2192}
2193
2194uint8_t
2195elink_cb_gpio_int_write(struct bxe_softc *sc,
2196                        uint16_t         gpio_num,
2197                        uint8_t          mode, /* 0=low 1=high */
2198                        uint8_t          port)
2199{
2200    return (bxe_gpio_int_write(sc, gpio_num, mode, port));
2201}
2202
2203void
2204elink_cb_notify_link_changed(struct bxe_softc *sc)
2205{
2206    REG_WR(sc, (MISC_REG_AEU_GENERAL_ATTN_12 +
2207                (SC_FUNC(sc) * sizeof(uint32_t))), 1);
2208}
2209
2210/* send the MCP a request, block until there is a reply */
2211uint32_t
2212elink_cb_fw_command(struct bxe_softc *sc,
2213                    uint32_t         command,
2214                    uint32_t         param)
2215{
2216    int mb_idx = SC_FW_MB_IDX(sc);
2217    uint32_t seq;
2218    uint32_t rc = 0;
2219    uint32_t cnt = 1;
2220    uint8_t delay = CHIP_REV_IS_SLOW(sc) ? 100 : 10;
2221
2222    BXE_FWMB_LOCK(sc);
2223
2224    seq = ++sc->fw_seq;
2225    SHMEM_WR(sc, func_mb[mb_idx].drv_mb_param, param);
2226    SHMEM_WR(sc, func_mb[mb_idx].drv_mb_header, (command | seq));
2227
2228    BLOGD(sc, DBG_PHY,
2229          "wrote command 0x%08x to FW MB param 0x%08x\n",
2230          (command | seq), param);
2231
2232    /* Let the FW do it's magic. GIve it up to 5 seconds... */
2233    do {
2234        DELAY(delay * 1000);
2235        rc = SHMEM_RD(sc, func_mb[mb_idx].fw_mb_header);
2236    } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 500));
2237
2238    BLOGD(sc, DBG_PHY,
2239          "[after %d ms] read 0x%x seq 0x%x from FW MB\n",
2240          cnt*delay, rc, seq);
2241
2242    /* is this a reply to our command? */
2243    if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK)) {
2244        rc &= FW_MSG_CODE_MASK;
2245    } else {
2246        /* Ruh-roh! */
2247        BLOGE(sc, "FW failed to respond!\n");
2248        // XXX bxe_fw_dump(sc);
2249        rc = 0;
2250    }
2251
2252    BXE_FWMB_UNLOCK(sc);
2253    return (rc);
2254}
2255
2256static uint32_t
2257bxe_fw_command(struct bxe_softc *sc,
2258               uint32_t         command,
2259               uint32_t         param)
2260{
2261    return (elink_cb_fw_command(sc, command, param));
2262}
2263
2264static void
2265__storm_memset_dma_mapping(struct bxe_softc *sc,
2266                           uint32_t         addr,
2267                           bus_addr_t       mapping)
2268{
2269    REG_WR(sc, addr, U64_LO(mapping));
2270    REG_WR(sc, (addr + 4), U64_HI(mapping));
2271}
2272
2273static void
2274storm_memset_spq_addr(struct bxe_softc *sc,
2275                      bus_addr_t       mapping,
2276                      uint16_t         abs_fid)
2277{
2278    uint32_t addr = (XSEM_REG_FAST_MEMORY +
2279                     XSTORM_SPQ_PAGE_BASE_OFFSET(abs_fid));
2280    __storm_memset_dma_mapping(sc, addr, mapping);
2281}
2282
2283static void
2284storm_memset_vf_to_pf(struct bxe_softc *sc,
2285                      uint16_t         abs_fid,
2286                      uint16_t         pf_id)
2287{
2288    REG_WR8(sc, (BAR_XSTRORM_INTMEM + XSTORM_VF_TO_PF_OFFSET(abs_fid)), pf_id);
2289    REG_WR8(sc, (BAR_CSTRORM_INTMEM + CSTORM_VF_TO_PF_OFFSET(abs_fid)), pf_id);
2290    REG_WR8(sc, (BAR_TSTRORM_INTMEM + TSTORM_VF_TO_PF_OFFSET(abs_fid)), pf_id);
2291    REG_WR8(sc, (BAR_USTRORM_INTMEM + USTORM_VF_TO_PF_OFFSET(abs_fid)), pf_id);
2292}
2293
2294static void
2295storm_memset_func_en(struct bxe_softc *sc,
2296                     uint16_t         abs_fid,
2297                     uint8_t          enable)
2298{
2299    REG_WR8(sc, (BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(abs_fid)), enable);
2300    REG_WR8(sc, (BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(abs_fid)), enable);
2301    REG_WR8(sc, (BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(abs_fid)), enable);
2302    REG_WR8(sc, (BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(abs_fid)), enable);
2303}
2304
2305static void
2306storm_memset_eq_data(struct bxe_softc       *sc,
2307                     struct event_ring_data *eq_data,
2308                     uint16_t               pfid)
2309{
2310    uint32_t addr;
2311    size_t size;
2312
2313    addr = (BAR_CSTRORM_INTMEM + CSTORM_EVENT_RING_DATA_OFFSET(pfid));
2314    size = sizeof(struct event_ring_data);
2315    ecore_storm_memset_struct(sc, addr, size, (uint32_t *)eq_data);
2316}
2317
2318static void
2319storm_memset_eq_prod(struct bxe_softc *sc,
2320                     uint16_t         eq_prod,
2321                     uint16_t         pfid)
2322{
2323    uint32_t addr = (BAR_CSTRORM_INTMEM +
2324                     CSTORM_EVENT_RING_PROD_OFFSET(pfid));
2325    REG_WR16(sc, addr, eq_prod);
2326}
2327
2328/*
2329 * Post a slowpath command.
2330 *
2331 * A slowpath command is used to propogate a configuration change through
2332 * the controller in a controlled manner, allowing each STORM processor and
2333 * other H/W blocks to phase in the change.  The commands sent on the
2334 * slowpath are referred to as ramrods.  Depending on the ramrod used the
2335 * completion of the ramrod will occur in different ways.  Here's a
2336 * breakdown of ramrods and how they complete:
2337 *
2338 * RAMROD_CMD_ID_ETH_PORT_SETUP
2339 *   Used to setup the leading connection on a port.  Completes on the
2340 *   Receive Completion Queue (RCQ) of that port (typically fp[0]).
2341 *
2342 * RAMROD_CMD_ID_ETH_CLIENT_SETUP
2343 *   Used to setup an additional connection on a port.  Completes on the
2344 *   RCQ of the multi-queue/RSS connection being initialized.
2345 *
2346 * RAMROD_CMD_ID_ETH_STAT_QUERY
2347 *   Used to force the storm processors to update the statistics database
2348 *   in host memory.  This ramrod is send on the leading connection CID and
2349 *   completes as an index increment of the CSTORM on the default status
2350 *   block.
2351 *
2352 * RAMROD_CMD_ID_ETH_UPDATE
2353 *   Used to update the state of the leading connection, usually to udpate
2354 *   the RSS indirection table.  Completes on the RCQ of the leading
2355 *   connection. (Not currently used under FreeBSD until OS support becomes
2356 *   available.)
2357 *
2358 * RAMROD_CMD_ID_ETH_HALT
2359 *   Used when tearing down a connection prior to driver unload.  Completes
2360 *   on the RCQ of the multi-queue/RSS connection being torn down.  Don't
2361 *   use this on the leading connection.
2362 *
2363 * RAMROD_CMD_ID_ETH_SET_MAC
2364 *   Sets the Unicast/Broadcast/Multicast used by the port.  Completes on
2365 *   the RCQ of the leading connection.
2366 *
2367 * RAMROD_CMD_ID_ETH_CFC_DEL
2368 *   Used when tearing down a conneciton prior to driver unload.  Completes
2369 *   on the RCQ of the leading connection (since the current connection
2370 *   has been completely removed from controller memory).
2371 *
2372 * RAMROD_CMD_ID_ETH_PORT_DEL
2373 *   Used to tear down the leading connection prior to driver unload,
2374 *   typically fp[0].  Completes as an index increment of the CSTORM on the
2375 *   default status block.
2376 *
2377 * RAMROD_CMD_ID_ETH_FORWARD_SETUP
2378 *   Used for connection offload.  Completes on the RCQ of the multi-queue
2379 *   RSS connection that is being offloaded.  (Not currently used under
2380 *   FreeBSD.)
2381 *
2382 * There can only be one command pending per function.
2383 *
2384 * Returns:
2385 *   0 = Success, !0 = Failure.
2386 */
2387
2388/* must be called under the spq lock */
2389static inline
2390struct eth_spe *bxe_sp_get_next(struct bxe_softc *sc)
2391{
2392    struct eth_spe *next_spe = sc->spq_prod_bd;
2393
2394    if (sc->spq_prod_bd == sc->spq_last_bd) {
2395        /* wrap back to the first eth_spq */
2396        sc->spq_prod_bd = sc->spq;
2397        sc->spq_prod_idx = 0;
2398    } else {
2399        sc->spq_prod_bd++;
2400        sc->spq_prod_idx++;
2401    }
2402
2403    return (next_spe);
2404}
2405
2406/* must be called under the spq lock */
2407static inline
2408void bxe_sp_prod_update(struct bxe_softc *sc)
2409{
2410    int func = SC_FUNC(sc);
2411
2412    /*
2413     * Make sure that BD data is updated before writing the producer.
2414     * BD data is written to the memory, the producer is read from the
2415     * memory, thus we need a full memory barrier to ensure the ordering.
2416     */
2417    mb();
2418
2419    REG_WR16(sc, (BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func)),
2420             sc->spq_prod_idx);
2421
2422    bus_space_barrier(sc->bar[BAR0].tag, sc->bar[BAR0].handle, 0, 0,
2423                      BUS_SPACE_BARRIER_WRITE);
2424}
2425
2426/**
2427 * bxe_is_contextless_ramrod - check if the current command ends on EQ
2428 *
2429 * @cmd:      command to check
2430 * @cmd_type: command type
2431 */
2432static inline
2433int bxe_is_contextless_ramrod(int cmd,
2434                              int cmd_type)
2435{
2436    if ((cmd_type == NONE_CONNECTION_TYPE) ||
2437        (cmd == RAMROD_CMD_ID_ETH_FORWARD_SETUP) ||
2438        (cmd == RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES) ||
2439        (cmd == RAMROD_CMD_ID_ETH_FILTER_RULES) ||
2440        (cmd == RAMROD_CMD_ID_ETH_MULTICAST_RULES) ||
2441        (cmd == RAMROD_CMD_ID_ETH_SET_MAC) ||
2442        (cmd == RAMROD_CMD_ID_ETH_RSS_UPDATE)) {
2443        return (TRUE);
2444    } else {
2445        return (FALSE);
2446    }
2447}
2448
2449/**
2450 * bxe_sp_post - place a single command on an SP ring
2451 *
2452 * @sc:         driver handle
2453 * @command:    command to place (e.g. SETUP, FILTER_RULES, etc.)
2454 * @cid:        SW CID the command is related to
2455 * @data_hi:    command private data address (high 32 bits)
2456 * @data_lo:    command private data address (low 32 bits)
2457 * @cmd_type:   command type (e.g. NONE, ETH)
2458 *
2459 * SP data is handled as if it's always an address pair, thus data fields are
2460 * not swapped to little endian in upper functions. Instead this function swaps
2461 * data as if it's two uint32 fields.
2462 */
2463int
2464bxe_sp_post(struct bxe_softc *sc,
2465            int              command,
2466            int              cid,
2467            uint32_t         data_hi,
2468            uint32_t         data_lo,
2469            int              cmd_type)
2470{
2471    struct eth_spe *spe;
2472    uint16_t type;
2473    int common;
2474
2475    common = bxe_is_contextless_ramrod(command, cmd_type);
2476
2477    BXE_SP_LOCK(sc);
2478
2479    if (common) {
2480        if (!atomic_load_acq_long(&sc->eq_spq_left)) {
2481            BLOGE(sc, "EQ ring is full!\n");
2482            BXE_SP_UNLOCK(sc);
2483            return (-1);
2484        }
2485    } else {
2486        if (!atomic_load_acq_long(&sc->cq_spq_left)) {
2487            BLOGE(sc, "SPQ ring is full!\n");
2488            BXE_SP_UNLOCK(sc);
2489            return (-1);
2490        }
2491    }
2492
2493    spe = bxe_sp_get_next(sc);
2494
2495    /* CID needs port number to be encoded int it */
2496    spe->hdr.conn_and_cmd_data =
2497        htole32((command << SPE_HDR_CMD_ID_SHIFT) | HW_CID(sc, cid));
2498
2499    type = (cmd_type << SPE_HDR_CONN_TYPE_SHIFT) & SPE_HDR_CONN_TYPE;
2500
2501    /* TBD: Check if it works for VFs */
2502    type |= ((SC_FUNC(sc) << SPE_HDR_FUNCTION_ID_SHIFT) &
2503             SPE_HDR_FUNCTION_ID);
2504
2505    spe->hdr.type = htole16(type);
2506
2507    spe->data.update_data_addr.hi = htole32(data_hi);
2508    spe->data.update_data_addr.lo = htole32(data_lo);
2509
2510    /*
2511     * It's ok if the actual decrement is issued towards the memory
2512     * somewhere between the lock and unlock. Thus no more explict
2513     * memory barrier is needed.
2514     */
2515    if (common) {
2516        atomic_subtract_acq_long(&sc->eq_spq_left, 1);
2517    } else {
2518        atomic_subtract_acq_long(&sc->cq_spq_left, 1);
2519    }
2520
2521    BLOGD(sc, DBG_SP, "SPQE -> %#jx\n", (uintmax_t)sc->spq_dma.paddr);
2522    BLOGD(sc, DBG_SP, "FUNC_RDATA -> %p / %#jx\n",
2523          BXE_SP(sc, func_rdata), (uintmax_t)BXE_SP_MAPPING(sc, func_rdata));
2524    BLOGD(sc, DBG_SP,
2525          "SPQE[%x] (%x:%x) (cmd, common?) (%d,%d) hw_cid %x data (%x:%x) type(0x%x) left (CQ, EQ) (%lx,%lx)\n",
2526          sc->spq_prod_idx,
2527          (uint32_t)U64_HI(sc->spq_dma.paddr),
2528          (uint32_t)(U64_LO(sc->spq_dma.paddr) + (uint8_t *)sc->spq_prod_bd - (uint8_t *)sc->spq),
2529          command,
2530          common,
2531          HW_CID(sc, cid),
2532          data_hi,
2533          data_lo,
2534          type,
2535          atomic_load_acq_long(&sc->cq_spq_left),
2536          atomic_load_acq_long(&sc->eq_spq_left));
2537
2538    bxe_sp_prod_update(sc);
2539
2540    BXE_SP_UNLOCK(sc);
2541    return (0);
2542}
2543
2544/**
2545 * bxe_debug_print_ind_table - prints the indirection table configuration.
2546 *
2547 * @sc: driver hanlde
2548 * @p:  pointer to rss configuration
2549 */
2550#if 0
2551static void
2552bxe_debug_print_ind_table(struct bxe_softc               *sc,
2553                          struct ecore_config_rss_params *p)
2554{
2555    int i;
2556
2557    BLOGD(sc, DBG_LOAD, "Setting indirection table to:\n");
2558    BLOGD(sc, DBG_LOAD, "    0x0000: ");
2559    for (i = 0; i < T_ETH_INDIRECTION_TABLE_SIZE; i++) {
2560        BLOGD(sc, DBG_LOAD, "0x%02x ", p->ind_table[i]);
2561
2562        /* Print 4 bytes in a line */
2563        if ((i + 1 < T_ETH_INDIRECTION_TABLE_SIZE) &&
2564            (((i + 1) & 0x3) == 0)) {
2565            BLOGD(sc, DBG_LOAD, "\n");
2566            BLOGD(sc, DBG_LOAD, "0x%04x: ", i + 1);
2567        }
2568    }
2569
2570    BLOGD(sc, DBG_LOAD, "\n");
2571}
2572#endif
2573
2574/*
2575 * FreeBSD Device probe function.
2576 *
2577 * Compares the device found to the driver's list of supported devices and
2578 * reports back to the bsd loader whether this is the right driver for the device.
2579 * This is the driver entry function called from the "kldload" command.
2580 *
2581 * Returns:
2582 *   BUS_PROBE_DEFAULT on success, positive value on failure.
2583 */
2584static int
2585bxe_probe(device_t dev)
2586{
2587    struct bxe_softc *sc;
2588    struct bxe_device_type *t;
2589    char *descbuf;
2590    uint16_t did, sdid, svid, vid;
2591
2592    /* Find our device structure */
2593    sc = device_get_softc(dev);
2594    sc->dev = dev;
2595    t = bxe_devs;
2596
2597    /* Get the data for the device to be probed. */
2598    vid  = pci_get_vendor(dev);
2599    did  = pci_get_device(dev);
2600    svid = pci_get_subvendor(dev);
2601    sdid = pci_get_subdevice(dev);
2602
2603    BLOGD(sc, DBG_LOAD,
2604          "%s(); VID = 0x%04X, DID = 0x%04X, SVID = 0x%04X, "
2605          "SDID = 0x%04X\n", __FUNCTION__, vid, did, svid, sdid);
2606
2607    /* Look through the list of known devices for a match. */
2608    while (t->bxe_name != NULL) {
2609        if ((vid == t->bxe_vid) && (did == t->bxe_did) &&
2610            ((svid == t->bxe_svid) || (t->bxe_svid == PCI_ANY_ID)) &&
2611            ((sdid == t->bxe_sdid) || (t->bxe_sdid == PCI_ANY_ID))) {
2612            descbuf = malloc(BXE_DEVDESC_MAX, M_TEMP, M_NOWAIT);
2613            if (descbuf == NULL)
2614                return (ENOMEM);
2615
2616            /* Print out the device identity. */
2617            snprintf(descbuf, BXE_DEVDESC_MAX,
2618                     "%s (%c%d) BXE v:%s\n", t->bxe_name,
2619                     (((pci_read_config(dev, PCIR_REVID, 4) &
2620                        0xf0) >> 4) + 'A'),
2621                     (pci_read_config(dev, PCIR_REVID, 4) & 0xf),
2622                     BXE_DRIVER_VERSION);
2623
2624            device_set_desc_copy(dev, descbuf);
2625            free(descbuf, M_TEMP);
2626            return (BUS_PROBE_DEFAULT);
2627        }
2628        t++;
2629    }
2630
2631    return (ENXIO);
2632}
2633
2634static void
2635bxe_init_mutexes(struct bxe_softc *sc)
2636{
2637#ifdef BXE_CORE_LOCK_SX
2638    snprintf(sc->core_sx_name, sizeof(sc->core_sx_name),
2639             "bxe%d_core_lock", sc->unit);
2640    sx_init(&sc->core_sx, sc->core_sx_name);
2641#else
2642    snprintf(sc->core_mtx_name, sizeof(sc->core_mtx_name),
2643             "bxe%d_core_lock", sc->unit);
2644    mtx_init(&sc->core_mtx, sc->core_mtx_name, NULL, MTX_DEF);
2645#endif
2646
2647    snprintf(sc->sp_mtx_name, sizeof(sc->sp_mtx_name),
2648             "bxe%d_sp_lock", sc->unit);
2649    mtx_init(&sc->sp_mtx, sc->sp_mtx_name, NULL, MTX_DEF);
2650
2651    snprintf(sc->dmae_mtx_name, sizeof(sc->dmae_mtx_name),
2652             "bxe%d_dmae_lock", sc->unit);
2653    mtx_init(&sc->dmae_mtx, sc->dmae_mtx_name, NULL, MTX_DEF);
2654
2655    snprintf(sc->port.phy_mtx_name, sizeof(sc->port.phy_mtx_name),
2656             "bxe%d_phy_lock", sc->unit);
2657    mtx_init(&sc->port.phy_mtx, sc->port.phy_mtx_name, NULL, MTX_DEF);
2658
2659    snprintf(sc->fwmb_mtx_name, sizeof(sc->fwmb_mtx_name),
2660             "bxe%d_fwmb_lock", sc->unit);
2661    mtx_init(&sc->fwmb_mtx, sc->fwmb_mtx_name, NULL, MTX_DEF);
2662
2663    snprintf(sc->print_mtx_name, sizeof(sc->print_mtx_name),
2664             "bxe%d_print_lock", sc->unit);
2665    mtx_init(&(sc->print_mtx), sc->print_mtx_name, NULL, MTX_DEF);
2666
2667    snprintf(sc->stats_mtx_name, sizeof(sc->stats_mtx_name),
2668             "bxe%d_stats_lock", sc->unit);
2669    mtx_init(&(sc->stats_mtx), sc->stats_mtx_name, NULL, MTX_DEF);
2670
2671    snprintf(sc->mcast_mtx_name, sizeof(sc->mcast_mtx_name),
2672             "bxe%d_mcast_lock", sc->unit);
2673    mtx_init(&(sc->mcast_mtx), sc->mcast_mtx_name, NULL, MTX_DEF);
2674}
2675
2676static void
2677bxe_release_mutexes(struct bxe_softc *sc)
2678{
2679#ifdef BXE_CORE_LOCK_SX
2680    sx_destroy(&sc->core_sx);
2681#else
2682    if (mtx_initialized(&sc->core_mtx)) {
2683        mtx_destroy(&sc->core_mtx);
2684    }
2685#endif
2686
2687    if (mtx_initialized(&sc->sp_mtx)) {
2688        mtx_destroy(&sc->sp_mtx);
2689    }
2690
2691    if (mtx_initialized(&sc->dmae_mtx)) {
2692        mtx_destroy(&sc->dmae_mtx);
2693    }
2694
2695    if (mtx_initialized(&sc->port.phy_mtx)) {
2696        mtx_destroy(&sc->port.phy_mtx);
2697    }
2698
2699    if (mtx_initialized(&sc->fwmb_mtx)) {
2700        mtx_destroy(&sc->fwmb_mtx);
2701    }
2702
2703    if (mtx_initialized(&sc->print_mtx)) {
2704        mtx_destroy(&sc->print_mtx);
2705    }
2706
2707    if (mtx_initialized(&sc->stats_mtx)) {
2708        mtx_destroy(&sc->stats_mtx);
2709    }
2710
2711    if (mtx_initialized(&sc->mcast_mtx)) {
2712        mtx_destroy(&sc->mcast_mtx);
2713    }
2714}
2715
2716static void
2717bxe_tx_disable(struct bxe_softc* sc)
2718{
2719    if_t ifp = sc->ifp;
2720
2721    /* tell the stack the driver is stopped and TX queue is full */
2722    if (ifp !=  NULL) {
2723        if_setdrvflags(ifp, 0);
2724    }
2725}
2726
2727static void
2728bxe_drv_pulse(struct bxe_softc *sc)
2729{
2730    SHMEM_WR(sc, func_mb[SC_FW_MB_IDX(sc)].drv_pulse_mb,
2731             sc->fw_drv_pulse_wr_seq);
2732}
2733
2734static inline uint16_t
2735bxe_tx_avail(struct bxe_softc *sc,
2736             struct bxe_fastpath *fp)
2737{
2738    int16_t  used;
2739    uint16_t prod;
2740    uint16_t cons;
2741
2742    prod = fp->tx_bd_prod;
2743    cons = fp->tx_bd_cons;
2744
2745    used = SUB_S16(prod, cons);
2746
2747#if 0
2748    KASSERT((used < 0), ("used tx bds < 0"));
2749    KASSERT((used > sc->tx_ring_size), ("used tx bds > tx_ring_size"));
2750    KASSERT(((sc->tx_ring_size - used) > MAX_TX_AVAIL),
2751            ("invalid number of tx bds used"));
2752#endif
2753
2754    return (int16_t)(sc->tx_ring_size) - used;
2755}
2756
2757static inline int
2758bxe_tx_queue_has_work(struct bxe_fastpath *fp)
2759{
2760    uint16_t hw_cons;
2761
2762    mb(); /* status block fields can change */
2763    hw_cons = le16toh(*fp->tx_cons_sb);
2764    return (hw_cons != fp->tx_pkt_cons);
2765}
2766
2767static inline uint8_t
2768bxe_has_tx_work(struct bxe_fastpath *fp)
2769{
2770    /* expand this for multi-cos if ever supported */
2771    return (bxe_tx_queue_has_work(fp)) ? TRUE : FALSE;
2772}
2773
2774static inline int
2775bxe_has_rx_work(struct bxe_fastpath *fp)
2776{
2777    uint16_t rx_cq_cons_sb;
2778
2779    mb(); /* status block fields can change */
2780    rx_cq_cons_sb = le16toh(*fp->rx_cq_cons_sb);
2781    if ((rx_cq_cons_sb & RCQ_MAX) == RCQ_MAX)
2782        rx_cq_cons_sb++;
2783    return (fp->rx_cq_cons != rx_cq_cons_sb);
2784}
2785
2786static void
2787bxe_sp_event(struct bxe_softc    *sc,
2788             struct bxe_fastpath *fp,
2789             union eth_rx_cqe    *rr_cqe)
2790{
2791    int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
2792    int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
2793    enum ecore_queue_cmd drv_cmd = ECORE_Q_CMD_MAX;
2794    struct ecore_queue_sp_obj *q_obj = &BXE_SP_OBJ(sc, fp).q_obj;
2795
2796    BLOGD(sc, DBG_SP, "fp=%d cid=%d got ramrod #%d state is %x type is %d\n",
2797          fp->index, cid, command, sc->state, rr_cqe->ramrod_cqe.ramrod_type);
2798
2799#if 0
2800    /*
2801     * If cid is within VF range, replace the slowpath object with the
2802     * one corresponding to this VF
2803     */
2804    if ((cid >= BXE_FIRST_VF_CID) && (cid < BXE_FIRST_VF_CID + BXE_VF_CIDS)) {
2805        bxe_iov_set_queue_sp_obj(sc, cid, &q_obj);
2806    }
2807#endif
2808
2809    switch (command) {
2810    case (RAMROD_CMD_ID_ETH_CLIENT_UPDATE):
2811        BLOGD(sc, DBG_SP, "got UPDATE ramrod. CID %d\n", cid);
2812        drv_cmd = ECORE_Q_CMD_UPDATE;
2813        break;
2814
2815    case (RAMROD_CMD_ID_ETH_CLIENT_SETUP):
2816        BLOGD(sc, DBG_SP, "got MULTI[%d] setup ramrod\n", cid);
2817        drv_cmd = ECORE_Q_CMD_SETUP;
2818        break;
2819
2820    case (RAMROD_CMD_ID_ETH_TX_QUEUE_SETUP):
2821        BLOGD(sc, DBG_SP, "got MULTI[%d] tx-only setup ramrod\n", cid);
2822        drv_cmd = ECORE_Q_CMD_SETUP_TX_ONLY;
2823        break;
2824
2825    case (RAMROD_CMD_ID_ETH_HALT):
2826        BLOGD(sc, DBG_SP, "got MULTI[%d] halt ramrod\n", cid);
2827        drv_cmd = ECORE_Q_CMD_HALT;
2828        break;
2829
2830    case (RAMROD_CMD_ID_ETH_TERMINATE):
2831        BLOGD(sc, DBG_SP, "got MULTI[%d] teminate ramrod\n", cid);
2832        drv_cmd = ECORE_Q_CMD_TERMINATE;
2833        break;
2834
2835    case (RAMROD_CMD_ID_ETH_EMPTY):
2836        BLOGD(sc, DBG_SP, "got MULTI[%d] empty ramrod\n", cid);
2837        drv_cmd = ECORE_Q_CMD_EMPTY;
2838        break;
2839
2840    default:
2841        BLOGD(sc, DBG_SP, "ERROR: unexpected MC reply (%d) on fp[%d]\n",
2842              command, fp->index);
2843        return;
2844    }
2845
2846    if ((drv_cmd != ECORE_Q_CMD_MAX) &&
2847        q_obj->complete_cmd(sc, q_obj, drv_cmd)) {
2848        /*
2849         * q_obj->complete_cmd() failure means that this was
2850         * an unexpected completion.
2851         *
2852         * In this case we don't want to increase the sc->spq_left
2853         * because apparently we haven't sent this command the first
2854         * place.
2855         */
2856        // bxe_panic(sc, ("Unexpected SP completion\n"));
2857        return;
2858    }
2859
2860#if 0
2861    /* SRIOV: reschedule any 'in_progress' operations */
2862    bxe_iov_sp_event(sc, cid, TRUE);
2863#endif
2864
2865    atomic_add_acq_long(&sc->cq_spq_left, 1);
2866
2867    BLOGD(sc, DBG_SP, "sc->cq_spq_left 0x%lx\n",
2868          atomic_load_acq_long(&sc->cq_spq_left));
2869
2870#if 0
2871    if ((drv_cmd == ECORE_Q_CMD_UPDATE) && (IS_FCOE_FP(fp)) &&
2872        (!!bxe_test_bit(ECORE_AFEX_FCOE_Q_UPDATE_PENDING, &sc->sp_state))) {
2873        /*
2874         * If Queue update ramrod is completed for last Queue in AFEX VIF set
2875         * flow, then ACK MCP at the end. Mark pending ACK to MCP bit to
2876         * prevent case that both bits are cleared. At the end of load/unload
2877         * driver checks that sp_state is cleared and this order prevents
2878         * races.
2879         */
2880        bxe_set_bit(ECORE_AFEX_PENDING_VIFSET_MCP_ACK, &sc->sp_state);
2881        wmb();
2882        bxe_clear_bit(ECORE_AFEX_FCOE_Q_UPDATE_PENDING, &sc->sp_state);
2883
2884        /* schedule the sp task as MCP ack is required */
2885        bxe_schedule_sp_task(sc);
2886    }
2887#endif
2888}
2889
2890/*
2891 * The current mbuf is part of an aggregation. Move the mbuf into the TPA
2892 * aggregation queue, put an empty mbuf back onto the receive chain, and mark
2893 * the current aggregation queue as in-progress.
2894 */
2895static void
2896bxe_tpa_start(struct bxe_softc            *sc,
2897              struct bxe_fastpath         *fp,
2898              uint16_t                    queue,
2899              uint16_t                    cons,
2900              uint16_t                    prod,
2901              struct eth_fast_path_rx_cqe *cqe)
2902{
2903    struct bxe_sw_rx_bd tmp_bd;
2904    struct bxe_sw_rx_bd *rx_buf;
2905    struct eth_rx_bd *rx_bd;
2906    int max_agg_queues;
2907    struct bxe_sw_tpa_info *tpa_info = &fp->rx_tpa_info[queue];
2908    uint16_t index;
2909
2910    BLOGD(sc, DBG_LRO, "fp[%02d].tpa[%02d] TPA START "
2911                       "cons=%d prod=%d\n",
2912          fp->index, queue, cons, prod);
2913
2914    max_agg_queues = MAX_AGG_QS(sc);
2915
2916    KASSERT((queue < max_agg_queues),
2917            ("fp[%02d] invalid aggr queue (%d >= %d)!",
2918             fp->index, queue, max_agg_queues));
2919
2920    KASSERT((tpa_info->state == BXE_TPA_STATE_STOP),
2921            ("fp[%02d].tpa[%02d] starting aggr on queue not stopped!",
2922             fp->index, queue));
2923
2924    /* copy the existing mbuf and mapping from the TPA pool */
2925    tmp_bd = tpa_info->bd;
2926
2927    if (tmp_bd.m == NULL) {
2928        BLOGE(sc, "fp[%02d].tpa[%02d] mbuf not allocated!\n",
2929              fp->index, queue);
2930        /* XXX Error handling? */
2931        return;
2932    }
2933
2934    /* change the TPA queue to the start state */
2935    tpa_info->state            = BXE_TPA_STATE_START;
2936    tpa_info->placement_offset = cqe->placement_offset;
2937    tpa_info->parsing_flags    = le16toh(cqe->pars_flags.flags);
2938    tpa_info->vlan_tag         = le16toh(cqe->vlan_tag);
2939    tpa_info->len_on_bd        = le16toh(cqe->len_on_bd);
2940
2941    fp->rx_tpa_queue_used |= (1 << queue);
2942
2943    /*
2944     * If all the buffer descriptors are filled with mbufs then fill in
2945     * the current consumer index with a new BD. Else if a maximum Rx
2946     * buffer limit is imposed then fill in the next producer index.
2947     */
2948    index = (sc->max_rx_bufs != RX_BD_USABLE) ?
2949                prod : cons;
2950
2951    /* move the received mbuf and mapping to TPA pool */
2952    tpa_info->bd = fp->rx_mbuf_chain[cons];
2953
2954    /* release any existing RX BD mbuf mappings */
2955    if (cons != index) {
2956        rx_buf = &fp->rx_mbuf_chain[cons];
2957
2958        if (rx_buf->m_map != NULL) {
2959            bus_dmamap_sync(fp->rx_mbuf_tag, rx_buf->m_map,
2960                            BUS_DMASYNC_POSTREAD);
2961            bus_dmamap_unload(fp->rx_mbuf_tag, rx_buf->m_map);
2962        }
2963
2964        /*
2965         * We get here when the maximum number of rx buffers is less than
2966         * RX_BD_USABLE. The mbuf is already saved above so it's OK to NULL
2967         * it out here without concern of a memory leak.
2968         */
2969        fp->rx_mbuf_chain[cons].m = NULL;
2970    }
2971
2972    /* update the Rx SW BD with the mbuf info from the TPA pool */
2973    fp->rx_mbuf_chain[index] = tmp_bd;
2974
2975    /* update the Rx BD with the empty mbuf phys address from the TPA pool */
2976    rx_bd = &fp->rx_chain[index];
2977    rx_bd->addr_hi = htole32(U64_HI(tpa_info->seg.ds_addr));
2978    rx_bd->addr_lo = htole32(U64_LO(tpa_info->seg.ds_addr));
2979}
2980
2981/*
2982 * When a TPA aggregation is completed, loop through the individual mbufs
2983 * of the aggregation, combining them into a single mbuf which will be sent
2984 * up the stack. Refill all freed SGEs with mbufs as we go along.
2985 */
2986static int
2987bxe_fill_frag_mbuf(struct bxe_softc          *sc,
2988                   struct bxe_fastpath       *fp,
2989                   struct bxe_sw_tpa_info    *tpa_info,
2990                   uint16_t                  queue,
2991                   uint16_t                  pages,
2992                   struct mbuf               *m,
2993			       struct eth_end_agg_rx_cqe *cqe,
2994                   uint16_t                  cqe_idx)
2995{
2996    struct mbuf *m_frag;
2997    uint32_t frag_len, frag_size, i;
2998    uint16_t sge_idx;
2999    int rc = 0;
3000    int j;
3001
3002    frag_size = le16toh(cqe->pkt_len) - tpa_info->len_on_bd;
3003
3004    BLOGD(sc, DBG_LRO,
3005          "fp[%02d].tpa[%02d] TPA fill len_on_bd=%d frag_size=%d pages=%d\n",
3006          fp->index, queue, tpa_info->len_on_bd, frag_size, pages);
3007
3008    /* make sure the aggregated frame is not too big to handle */
3009    if (pages > 8 * PAGES_PER_SGE) {
3010        BLOGE(sc, "fp[%02d].sge[0x%04x] has too many pages (%d)! "
3011                  "pkt_len=%d len_on_bd=%d frag_size=%d\n",
3012              fp->index, cqe_idx, pages, le16toh(cqe->pkt_len),
3013              tpa_info->len_on_bd, frag_size);
3014        bxe_panic(sc, ("sge page count error\n"));
3015        return (EINVAL);
3016    }
3017
3018    /*
3019     * Scan through the scatter gather list pulling individual mbufs into a
3020     * single mbuf for the host stack.
3021     */
3022    for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
3023        sge_idx = RX_SGE(le16toh(cqe->sgl_or_raw_data.sgl[j]));
3024
3025        /*
3026         * Firmware gives the indices of the SGE as if the ring is an array
3027         * (meaning that the "next" element will consume 2 indices).
3028         */
3029        frag_len = min(frag_size, (uint32_t)(SGE_PAGES));
3030
3031        BLOGD(sc, DBG_LRO, "fp[%02d].tpa[%02d] TPA fill i=%d j=%d "
3032                           "sge_idx=%d frag_size=%d frag_len=%d\n",
3033              fp->index, queue, i, j, sge_idx, frag_size, frag_len);
3034
3035        m_frag = fp->rx_sge_mbuf_chain[sge_idx].m;
3036
3037        /* allocate a new mbuf for the SGE */
3038        rc = bxe_alloc_rx_sge_mbuf(fp, sge_idx);
3039        if (rc) {
3040            /* Leave all remaining SGEs in the ring! */
3041            return (rc);
3042        }
3043
3044        /* update the fragment length */
3045        m_frag->m_len = frag_len;
3046
3047        /* concatenate the fragment to the head mbuf */
3048        m_cat(m, m_frag);
3049        fp->eth_q_stats.mbuf_alloc_sge--;
3050
3051        /* update the TPA mbuf size and remaining fragment size */
3052        m->m_pkthdr.len += frag_len;
3053        frag_size -= frag_len;
3054    }
3055
3056    BLOGD(sc, DBG_LRO,
3057          "fp[%02d].tpa[%02d] TPA fill done frag_size=%d\n",
3058          fp->index, queue, frag_size);
3059
3060    return (rc);
3061}
3062
3063static inline void
3064bxe_clear_sge_mask_next_elems(struct bxe_fastpath *fp)
3065{
3066    int i, j;
3067
3068    for (i = 1; i <= RX_SGE_NUM_PAGES; i++) {
3069        int idx = RX_SGE_TOTAL_PER_PAGE * i - 1;
3070
3071        for (j = 0; j < 2; j++) {
3072            BIT_VEC64_CLEAR_BIT(fp->sge_mask, idx);
3073            idx--;
3074        }
3075    }
3076}
3077
3078static inline void
3079bxe_init_sge_ring_bit_mask(struct bxe_fastpath *fp)
3080{
3081    /* set the mask to all 1's, it's faster to compare to 0 than to 0xf's */
3082    memset(fp->sge_mask, 0xff, sizeof(fp->sge_mask));
3083
3084    /*
3085     * Clear the two last indices in the page to 1. These are the indices that
3086     * correspond to the "next" element, hence will never be indicated and
3087     * should be removed from the calculations.
3088     */
3089    bxe_clear_sge_mask_next_elems(fp);
3090}
3091
3092static inline void
3093bxe_update_last_max_sge(struct bxe_fastpath *fp,
3094                        uint16_t            idx)
3095{
3096    uint16_t last_max = fp->last_max_sge;
3097
3098    if (SUB_S16(idx, last_max) > 0) {
3099        fp->last_max_sge = idx;
3100    }
3101}
3102
3103static inline void
3104bxe_update_sge_prod(struct bxe_softc          *sc,
3105                    struct bxe_fastpath       *fp,
3106                    uint16_t                  sge_len,
3107                    union eth_sgl_or_raw_data *cqe)
3108{
3109    uint16_t last_max, last_elem, first_elem;
3110    uint16_t delta = 0;
3111    uint16_t i;
3112
3113    if (!sge_len) {
3114        return;
3115    }
3116
3117    /* first mark all used pages */
3118    for (i = 0; i < sge_len; i++) {
3119        BIT_VEC64_CLEAR_BIT(fp->sge_mask,
3120                            RX_SGE(le16toh(cqe->sgl[i])));
3121    }
3122
3123    BLOGD(sc, DBG_LRO,
3124          "fp[%02d] fp_cqe->sgl[%d] = %d\n",
3125          fp->index, sge_len - 1,
3126          le16toh(cqe->sgl[sge_len - 1]));
3127
3128    /* assume that the last SGE index is the biggest */
3129    bxe_update_last_max_sge(fp,
3130                            le16toh(cqe->sgl[sge_len - 1]));
3131
3132    last_max = RX_SGE(fp->last_max_sge);
3133    last_elem = last_max >> BIT_VEC64_ELEM_SHIFT;
3134    first_elem = RX_SGE(fp->rx_sge_prod) >> BIT_VEC64_ELEM_SHIFT;
3135
3136    /* if ring is not full */
3137    if (last_elem + 1 != first_elem) {
3138        last_elem++;
3139    }
3140
3141    /* now update the prod */
3142    for (i = first_elem; i != last_elem; i = RX_SGE_NEXT_MASK_ELEM(i)) {
3143        if (__predict_true(fp->sge_mask[i])) {
3144            break;
3145        }
3146
3147        fp->sge_mask[i] = BIT_VEC64_ELEM_ONE_MASK;
3148        delta += BIT_VEC64_ELEM_SZ;
3149    }
3150
3151    if (delta > 0) {
3152        fp->rx_sge_prod += delta;
3153        /* clear page-end entries */
3154        bxe_clear_sge_mask_next_elems(fp);
3155    }
3156
3157    BLOGD(sc, DBG_LRO,
3158          "fp[%02d] fp->last_max_sge=%d fp->rx_sge_prod=%d\n",
3159          fp->index, fp->last_max_sge, fp->rx_sge_prod);
3160}
3161
3162/*
3163 * The aggregation on the current TPA queue has completed. Pull the individual
3164 * mbuf fragments together into a single mbuf, perform all necessary checksum
3165 * calculations, and send the resuting mbuf to the stack.
3166 */
3167static void
3168bxe_tpa_stop(struct bxe_softc          *sc,
3169             struct bxe_fastpath       *fp,
3170             struct bxe_sw_tpa_info    *tpa_info,
3171             uint16_t                  queue,
3172             uint16_t                  pages,
3173			 struct eth_end_agg_rx_cqe *cqe,
3174             uint16_t                  cqe_idx)
3175{
3176    if_t ifp = sc->ifp;
3177    struct mbuf *m;
3178    int rc = 0;
3179
3180    BLOGD(sc, DBG_LRO,
3181          "fp[%02d].tpa[%02d] pad=%d pkt_len=%d pages=%d vlan=%d\n",
3182          fp->index, queue, tpa_info->placement_offset,
3183          le16toh(cqe->pkt_len), pages, tpa_info->vlan_tag);
3184
3185    m = tpa_info->bd.m;
3186
3187    /* allocate a replacement before modifying existing mbuf */
3188    rc = bxe_alloc_rx_tpa_mbuf(fp, queue);
3189    if (rc) {
3190        /* drop the frame and log an error */
3191        fp->eth_q_stats.rx_soft_errors++;
3192        goto bxe_tpa_stop_exit;
3193    }
3194
3195    /* we have a replacement, fixup the current mbuf */
3196    m_adj(m, tpa_info->placement_offset);
3197    m->m_pkthdr.len = m->m_len = tpa_info->len_on_bd;
3198
3199    /* mark the checksums valid (taken care of by the firmware) */
3200    fp->eth_q_stats.rx_ofld_frames_csum_ip++;
3201    fp->eth_q_stats.rx_ofld_frames_csum_tcp_udp++;
3202    m->m_pkthdr.csum_data = 0xffff;
3203    m->m_pkthdr.csum_flags |= (CSUM_IP_CHECKED |
3204                               CSUM_IP_VALID   |
3205                               CSUM_DATA_VALID |
3206                               CSUM_PSEUDO_HDR);
3207
3208    /* aggregate all of the SGEs into a single mbuf */
3209    rc = bxe_fill_frag_mbuf(sc, fp, tpa_info, queue, pages, m, cqe, cqe_idx);
3210    if (rc) {
3211        /* drop the packet and log an error */
3212        fp->eth_q_stats.rx_soft_errors++;
3213        m_freem(m);
3214    } else {
3215        if (tpa_info->parsing_flags & PARSING_FLAGS_VLAN) {
3216            m->m_pkthdr.ether_vtag = tpa_info->vlan_tag;
3217            m->m_flags |= M_VLANTAG;
3218        }
3219
3220        /* assign packet to this interface interface */
3221        if_setrcvif(m, ifp);
3222
3223#if __FreeBSD_version >= 800000
3224        /* specify what RSS queue was used for this flow */
3225        m->m_pkthdr.flowid = fp->index;
3226        M_HASHTYPE_SET(m, M_HASHTYPE_OPAQUE);
3227#endif
3228
3229        if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
3230        fp->eth_q_stats.rx_tpa_pkts++;
3231
3232        /* pass the frame to the stack */
3233        if_input(ifp, m);
3234    }
3235
3236    /* we passed an mbuf up the stack or dropped the frame */
3237    fp->eth_q_stats.mbuf_alloc_tpa--;
3238
3239bxe_tpa_stop_exit:
3240
3241    fp->rx_tpa_info[queue].state = BXE_TPA_STATE_STOP;
3242    fp->rx_tpa_queue_used &= ~(1 << queue);
3243}
3244
3245static uint8_t
3246bxe_service_rxsgl(
3247                 struct bxe_fastpath *fp,
3248                 uint16_t len,
3249                 uint16_t lenonbd,
3250                 struct mbuf *m,
3251                 struct eth_fast_path_rx_cqe *cqe_fp)
3252{
3253    struct mbuf *m_frag;
3254    uint16_t frags, frag_len;
3255    uint16_t sge_idx = 0;
3256    uint16_t j;
3257    uint8_t i, rc = 0;
3258    uint32_t frag_size;
3259
3260    /* adjust the mbuf */
3261    m->m_len = lenonbd;
3262
3263    frag_size =  len - lenonbd;
3264    frags = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
3265
3266    for (i = 0, j = 0; i < frags; i += PAGES_PER_SGE, j++) {
3267        sge_idx = RX_SGE(le16toh(cqe_fp->sgl_or_raw_data.sgl[j]));
3268
3269        m_frag = fp->rx_sge_mbuf_chain[sge_idx].m;
3270        frag_len = min(frag_size, (uint32_t)(SGE_PAGE_SIZE));
3271        m_frag->m_len = frag_len;
3272
3273       /* allocate a new mbuf for the SGE */
3274        rc = bxe_alloc_rx_sge_mbuf(fp, sge_idx);
3275        if (rc) {
3276            /* Leave all remaining SGEs in the ring! */
3277            return (rc);
3278        }
3279        fp->eth_q_stats.mbuf_alloc_sge--;
3280
3281        /* concatenate the fragment to the head mbuf */
3282        m_cat(m, m_frag);
3283
3284        frag_size -= frag_len;
3285    }
3286
3287    bxe_update_sge_prod(fp->sc, fp, frags, &cqe_fp->sgl_or_raw_data);
3288
3289    return rc;
3290}
3291
3292static uint8_t
3293bxe_rxeof(struct bxe_softc    *sc,
3294          struct bxe_fastpath *fp)
3295{
3296    if_t ifp = sc->ifp;
3297    uint16_t bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
3298    uint16_t hw_cq_cons, sw_cq_cons, sw_cq_prod;
3299    int rx_pkts = 0;
3300    int rc = 0;
3301
3302    BXE_FP_RX_LOCK(fp);
3303
3304    /* CQ "next element" is of the size of the regular element */
3305    hw_cq_cons = le16toh(*fp->rx_cq_cons_sb);
3306    if ((hw_cq_cons & RCQ_USABLE_PER_PAGE) == RCQ_USABLE_PER_PAGE) {
3307        hw_cq_cons++;
3308    }
3309
3310    bd_cons = fp->rx_bd_cons;
3311    bd_prod = fp->rx_bd_prod;
3312    bd_prod_fw = bd_prod;
3313    sw_cq_cons = fp->rx_cq_cons;
3314    sw_cq_prod = fp->rx_cq_prod;
3315
3316    /*
3317     * Memory barrier necessary as speculative reads of the rx
3318     * buffer can be ahead of the index in the status block
3319     */
3320    rmb();
3321
3322    BLOGD(sc, DBG_RX,
3323          "fp[%02d] Rx START hw_cq_cons=%u sw_cq_cons=%u\n",
3324          fp->index, hw_cq_cons, sw_cq_cons);
3325
3326    while (sw_cq_cons != hw_cq_cons) {
3327        struct bxe_sw_rx_bd *rx_buf = NULL;
3328        union eth_rx_cqe *cqe;
3329        struct eth_fast_path_rx_cqe *cqe_fp;
3330        uint8_t cqe_fp_flags;
3331        enum eth_rx_cqe_type cqe_fp_type;
3332        uint16_t len, lenonbd,  pad;
3333        struct mbuf *m = NULL;
3334
3335        comp_ring_cons = RCQ(sw_cq_cons);
3336        bd_prod = RX_BD(bd_prod);
3337        bd_cons = RX_BD(bd_cons);
3338
3339        cqe          = &fp->rcq_chain[comp_ring_cons];
3340        cqe_fp       = &cqe->fast_path_cqe;
3341        cqe_fp_flags = cqe_fp->type_error_flags;
3342        cqe_fp_type  = cqe_fp_flags & ETH_FAST_PATH_RX_CQE_TYPE;
3343
3344        BLOGD(sc, DBG_RX,
3345              "fp[%02d] Rx hw_cq_cons=%d hw_sw_cons=%d "
3346              "BD prod=%d cons=%d CQE type=0x%x err=0x%x "
3347              "status=0x%x rss_hash=0x%x vlan=0x%x len=%u lenonbd=%u\n",
3348              fp->index,
3349              hw_cq_cons,
3350              sw_cq_cons,
3351              bd_prod,
3352              bd_cons,
3353              CQE_TYPE(cqe_fp_flags),
3354              cqe_fp_flags,
3355              cqe_fp->status_flags,
3356              le32toh(cqe_fp->rss_hash_result),
3357              le16toh(cqe_fp->vlan_tag),
3358              le16toh(cqe_fp->pkt_len_or_gro_seg_len),
3359              le16toh(cqe_fp->len_on_bd));
3360
3361        /* is this a slowpath msg? */
3362        if (__predict_false(CQE_TYPE_SLOW(cqe_fp_type))) {
3363            bxe_sp_event(sc, fp, cqe);
3364            goto next_cqe;
3365        }
3366
3367        rx_buf = &fp->rx_mbuf_chain[bd_cons];
3368
3369        if (!CQE_TYPE_FAST(cqe_fp_type)) {
3370            struct bxe_sw_tpa_info *tpa_info;
3371            uint16_t frag_size, pages;
3372            uint8_t queue;
3373
3374#if 0
3375            /* sanity check */
3376            if (!fp->tpa_enable &&
3377                (CQE_TYPE_START(cqe_fp_type) || CQE_TYPE_STOP(cqe_fp_type))) {
3378                BLOGE(sc, "START/STOP packet while !tpa_enable type (0x%x)\n",
3379                      CQE_TYPE(cqe_fp_type));
3380            }
3381#endif
3382
3383            if (CQE_TYPE_START(cqe_fp_type)) {
3384                bxe_tpa_start(sc, fp, cqe_fp->queue_index,
3385                              bd_cons, bd_prod, cqe_fp);
3386                m = NULL; /* packet not ready yet */
3387                goto next_rx;
3388            }
3389
3390            KASSERT(CQE_TYPE_STOP(cqe_fp_type),
3391                    ("CQE type is not STOP! (0x%x)\n", cqe_fp_type));
3392
3393            queue = cqe->end_agg_cqe.queue_index;
3394            tpa_info = &fp->rx_tpa_info[queue];
3395
3396            BLOGD(sc, DBG_LRO, "fp[%02d].tpa[%02d] TPA STOP\n",
3397                  fp->index, queue);
3398
3399            frag_size = (le16toh(cqe->end_agg_cqe.pkt_len) -
3400                         tpa_info->len_on_bd);
3401            pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
3402
3403            bxe_tpa_stop(sc, fp, tpa_info, queue, pages,
3404                         &cqe->end_agg_cqe, comp_ring_cons);
3405
3406            bxe_update_sge_prod(sc, fp, pages, &cqe->end_agg_cqe.sgl_or_raw_data);
3407
3408            goto next_cqe;
3409        }
3410
3411        /* non TPA */
3412
3413        /* is this an error packet? */
3414        if (__predict_false(cqe_fp_flags &
3415                            ETH_FAST_PATH_RX_CQE_PHY_DECODE_ERR_FLG)) {
3416            BLOGE(sc, "flags 0x%x rx packet %u\n", cqe_fp_flags, sw_cq_cons);
3417            fp->eth_q_stats.rx_soft_errors++;
3418            goto next_rx;
3419        }
3420
3421        len = le16toh(cqe_fp->pkt_len_or_gro_seg_len);
3422        lenonbd = le16toh(cqe_fp->len_on_bd);
3423        pad = cqe_fp->placement_offset;
3424
3425        m = rx_buf->m;
3426
3427        if (__predict_false(m == NULL)) {
3428            BLOGE(sc, "No mbuf in rx chain descriptor %d for fp[%02d]\n",
3429                  bd_cons, fp->index);
3430            goto next_rx;
3431        }
3432
3433        /* XXX double copy if packet length under a threshold */
3434
3435        /*
3436         * If all the buffer descriptors are filled with mbufs then fill in
3437         * the current consumer index with a new BD. Else if a maximum Rx
3438         * buffer limit is imposed then fill in the next producer index.
3439         */
3440        rc = bxe_alloc_rx_bd_mbuf(fp, bd_cons,
3441                                  (sc->max_rx_bufs != RX_BD_USABLE) ?
3442                                      bd_prod : bd_cons);
3443        if (rc != 0) {
3444
3445            /* we simply reuse the received mbuf and don't post it to the stack */
3446            m = NULL;
3447
3448            BLOGE(sc, "mbuf alloc fail for fp[%02d] rx chain (%d)\n",
3449                  fp->index, rc);
3450            fp->eth_q_stats.rx_soft_errors++;
3451
3452            if (sc->max_rx_bufs != RX_BD_USABLE) {
3453                /* copy this consumer index to the producer index */
3454                memcpy(&fp->rx_mbuf_chain[bd_prod], rx_buf,
3455                       sizeof(struct bxe_sw_rx_bd));
3456                memset(rx_buf, 0, sizeof(struct bxe_sw_rx_bd));
3457            }
3458
3459            goto next_rx;
3460        }
3461
3462        /* current mbuf was detached from the bd */
3463        fp->eth_q_stats.mbuf_alloc_rx--;
3464
3465        /* we allocated a replacement mbuf, fixup the current one */
3466        m_adj(m, pad);
3467        m->m_pkthdr.len = m->m_len = len;
3468
3469        if (len != lenonbd){
3470            rc = bxe_service_rxsgl(fp, len, lenonbd, m, cqe_fp);
3471            if (rc)
3472                break;
3473            fp->eth_q_stats.rx_jumbo_sge_pkts++;
3474        }
3475
3476        /* assign packet to this interface interface */
3477	if_setrcvif(m, ifp);
3478
3479        /* assume no hardware checksum has complated */
3480        m->m_pkthdr.csum_flags = 0;
3481
3482        /* validate checksum if offload enabled */
3483        if (if_getcapenable(ifp) & IFCAP_RXCSUM) {
3484            /* check for a valid IP frame */
3485            if (!(cqe->fast_path_cqe.status_flags &
3486                  ETH_FAST_PATH_RX_CQE_IP_XSUM_NO_VALIDATION_FLG)) {
3487                m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
3488                if (__predict_false(cqe_fp_flags &
3489                                    ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG)) {
3490                    fp->eth_q_stats.rx_hw_csum_errors++;
3491                } else {
3492                    fp->eth_q_stats.rx_ofld_frames_csum_ip++;
3493                    m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
3494                }
3495            }
3496
3497            /* check for a valid TCP/UDP frame */
3498            if (!(cqe->fast_path_cqe.status_flags &
3499                  ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG)) {
3500                if (__predict_false(cqe_fp_flags &
3501                                    ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG)) {
3502                    fp->eth_q_stats.rx_hw_csum_errors++;
3503                } else {
3504                    fp->eth_q_stats.rx_ofld_frames_csum_tcp_udp++;
3505                    m->m_pkthdr.csum_data = 0xFFFF;
3506                    m->m_pkthdr.csum_flags |= (CSUM_DATA_VALID |
3507                                               CSUM_PSEUDO_HDR);
3508                }
3509            }
3510        }
3511
3512        /* if there is a VLAN tag then flag that info */
3513        if (cqe->fast_path_cqe.pars_flags.flags & PARSING_FLAGS_VLAN) {
3514            m->m_pkthdr.ether_vtag = cqe->fast_path_cqe.vlan_tag;
3515            m->m_flags |= M_VLANTAG;
3516        }
3517
3518#if __FreeBSD_version >= 800000
3519        /* specify what RSS queue was used for this flow */
3520        m->m_pkthdr.flowid = fp->index;
3521        M_HASHTYPE_SET(m, M_HASHTYPE_OPAQUE);
3522#endif
3523
3524next_rx:
3525
3526        bd_cons    = RX_BD_NEXT(bd_cons);
3527        bd_prod    = RX_BD_NEXT(bd_prod);
3528        bd_prod_fw = RX_BD_NEXT(bd_prod_fw);
3529
3530        /* pass the frame to the stack */
3531        if (__predict_true(m != NULL)) {
3532            if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
3533            rx_pkts++;
3534            if_input(ifp, m);
3535        }
3536
3537next_cqe:
3538
3539        sw_cq_prod = RCQ_NEXT(sw_cq_prod);
3540        sw_cq_cons = RCQ_NEXT(sw_cq_cons);
3541
3542        /* limit spinning on the queue */
3543        if (rc != 0)
3544            break;
3545
3546        if (rx_pkts == sc->rx_budget) {
3547            fp->eth_q_stats.rx_budget_reached++;
3548            break;
3549        }
3550    } /* while work to do */
3551
3552    fp->rx_bd_cons = bd_cons;
3553    fp->rx_bd_prod = bd_prod_fw;
3554    fp->rx_cq_cons = sw_cq_cons;
3555    fp->rx_cq_prod = sw_cq_prod;
3556
3557    /* Update producers */
3558    bxe_update_rx_prod(sc, fp, bd_prod_fw, sw_cq_prod, fp->rx_sge_prod);
3559
3560    fp->eth_q_stats.rx_pkts += rx_pkts;
3561    fp->eth_q_stats.rx_calls++;
3562
3563    BXE_FP_RX_UNLOCK(fp);
3564
3565    return (sw_cq_cons != hw_cq_cons);
3566}
3567
3568static uint16_t
3569bxe_free_tx_pkt(struct bxe_softc    *sc,
3570                struct bxe_fastpath *fp,
3571                uint16_t            idx)
3572{
3573    struct bxe_sw_tx_bd *tx_buf = &fp->tx_mbuf_chain[idx];
3574    struct eth_tx_start_bd *tx_start_bd;
3575    uint16_t bd_idx = TX_BD(tx_buf->first_bd);
3576    uint16_t new_cons;
3577    int nbd;
3578
3579    /* unmap the mbuf from non-paged memory */
3580    bus_dmamap_unload(fp->tx_mbuf_tag, tx_buf->m_map);
3581
3582    tx_start_bd = &fp->tx_chain[bd_idx].start_bd;
3583    nbd = le16toh(tx_start_bd->nbd) - 1;
3584
3585#if 0
3586    if ((nbd - 1) > (MAX_MBUF_FRAGS + 2)) {
3587        bxe_panic(sc, ("BAD nbd!\n"));
3588    }
3589#endif
3590
3591    new_cons = (tx_buf->first_bd + nbd);
3592
3593#if 0
3594    struct eth_tx_bd *tx_data_bd;
3595
3596    /*
3597     * The following code doesn't do anything but is left here
3598     * for clarity on what the new value of new_cons skipped.
3599     */
3600
3601    /* get the next bd */
3602    bd_idx = TX_BD(TX_BD_NEXT(bd_idx));
3603
3604    /* skip the parse bd */
3605    --nbd;
3606    bd_idx = TX_BD(TX_BD_NEXT(bd_idx));
3607
3608    /* skip the TSO split header bd since they have no mapping */
3609    if (tx_buf->flags & BXE_TSO_SPLIT_BD) {
3610        --nbd;
3611        bd_idx = TX_BD(TX_BD_NEXT(bd_idx));
3612    }
3613
3614    /* now free frags */
3615    while (nbd > 0) {
3616        tx_data_bd = &fp->tx_chain[bd_idx].reg_bd;
3617        if (--nbd) {
3618            bd_idx = TX_BD(TX_BD_NEXT(bd_idx));
3619        }
3620    }
3621#endif
3622
3623    /* free the mbuf */
3624    if (__predict_true(tx_buf->m != NULL)) {
3625        m_freem(tx_buf->m);
3626        fp->eth_q_stats.mbuf_alloc_tx--;
3627    } else {
3628        fp->eth_q_stats.tx_chain_lost_mbuf++;
3629    }
3630
3631    tx_buf->m = NULL;
3632    tx_buf->first_bd = 0;
3633
3634    return (new_cons);
3635}
3636
3637/* transmit timeout watchdog */
3638static int
3639bxe_watchdog(struct bxe_softc    *sc,
3640             struct bxe_fastpath *fp)
3641{
3642    BXE_FP_TX_LOCK(fp);
3643
3644    if ((fp->watchdog_timer == 0) || (--fp->watchdog_timer)) {
3645        BXE_FP_TX_UNLOCK(fp);
3646        return (0);
3647    }
3648
3649    BLOGE(sc, "TX watchdog timeout on fp[%02d], resetting!\n", fp->index);
3650
3651    BXE_FP_TX_UNLOCK(fp);
3652
3653    atomic_store_rel_long(&sc->chip_tq_flags, CHIP_TQ_REINIT);
3654    taskqueue_enqueue(sc->chip_tq, &sc->chip_tq_task);
3655
3656    return (-1);
3657}
3658
3659/* processes transmit completions */
3660static uint8_t
3661bxe_txeof(struct bxe_softc    *sc,
3662          struct bxe_fastpath *fp)
3663{
3664    if_t ifp = sc->ifp;
3665    uint16_t bd_cons, hw_cons, sw_cons, pkt_cons;
3666    uint16_t tx_bd_avail;
3667
3668    BXE_FP_TX_LOCK_ASSERT(fp);
3669
3670    bd_cons = fp->tx_bd_cons;
3671    hw_cons = le16toh(*fp->tx_cons_sb);
3672    sw_cons = fp->tx_pkt_cons;
3673
3674    while (sw_cons != hw_cons) {
3675        pkt_cons = TX_BD(sw_cons);
3676
3677        BLOGD(sc, DBG_TX,
3678              "TX: fp[%d]: hw_cons=%u sw_cons=%u pkt_cons=%u\n",
3679              fp->index, hw_cons, sw_cons, pkt_cons);
3680
3681        bd_cons = bxe_free_tx_pkt(sc, fp, pkt_cons);
3682
3683        sw_cons++;
3684    }
3685
3686    fp->tx_pkt_cons = sw_cons;
3687    fp->tx_bd_cons  = bd_cons;
3688
3689    BLOGD(sc, DBG_TX,
3690          "TX done: fp[%d]: hw_cons=%u sw_cons=%u sw_prod=%u\n",
3691          fp->index, hw_cons, fp->tx_pkt_cons, fp->tx_pkt_prod);
3692
3693    mb();
3694
3695    tx_bd_avail = bxe_tx_avail(sc, fp);
3696
3697    if (tx_bd_avail < BXE_TX_CLEANUP_THRESHOLD) {
3698        if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, 0);
3699    } else {
3700        if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE);
3701    }
3702
3703    if (fp->tx_pkt_prod != fp->tx_pkt_cons) {
3704        /* reset the watchdog timer if there are pending transmits */
3705        fp->watchdog_timer = BXE_TX_TIMEOUT;
3706        return (TRUE);
3707    } else {
3708        /* clear watchdog when there are no pending transmits */
3709        fp->watchdog_timer = 0;
3710        return (FALSE);
3711    }
3712}
3713
3714static void
3715bxe_drain_tx_queues(struct bxe_softc *sc)
3716{
3717    struct bxe_fastpath *fp;
3718    int i, count;
3719
3720    /* wait until all TX fastpath tasks have completed */
3721    for (i = 0; i < sc->num_queues; i++) {
3722        fp = &sc->fp[i];
3723
3724        count = 1000;
3725
3726        while (bxe_has_tx_work(fp)) {
3727
3728            BXE_FP_TX_LOCK(fp);
3729            bxe_txeof(sc, fp);
3730            BXE_FP_TX_UNLOCK(fp);
3731
3732            if (count == 0) {
3733                BLOGE(sc, "Timeout waiting for fp[%d] "
3734                          "transmits to complete!\n", i);
3735                bxe_panic(sc, ("tx drain failure\n"));
3736                return;
3737            }
3738
3739            count--;
3740            DELAY(1000);
3741            rmb();
3742        }
3743    }
3744
3745    return;
3746}
3747
3748static int
3749bxe_del_all_macs(struct bxe_softc          *sc,
3750                 struct ecore_vlan_mac_obj *mac_obj,
3751                 int                       mac_type,
3752                 uint8_t                   wait_for_comp)
3753{
3754    unsigned long ramrod_flags = 0, vlan_mac_flags = 0;
3755    int rc;
3756
3757    /* wait for completion of requested */
3758    if (wait_for_comp) {
3759        bxe_set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
3760    }
3761
3762    /* Set the mac type of addresses we want to clear */
3763    bxe_set_bit(mac_type, &vlan_mac_flags);
3764
3765    rc = mac_obj->delete_all(sc, mac_obj, &vlan_mac_flags, &ramrod_flags);
3766    if (rc < 0) {
3767        BLOGE(sc, "Failed to delete MACs (%d)\n", rc);
3768    }
3769
3770    return (rc);
3771}
3772
3773static int
3774bxe_fill_accept_flags(struct bxe_softc *sc,
3775                      uint32_t         rx_mode,
3776                      unsigned long    *rx_accept_flags,
3777                      unsigned long    *tx_accept_flags)
3778{
3779    /* Clear the flags first */
3780    *rx_accept_flags = 0;
3781    *tx_accept_flags = 0;
3782
3783    switch (rx_mode) {
3784    case BXE_RX_MODE_NONE:
3785        /*
3786         * 'drop all' supersedes any accept flags that may have been
3787         * passed to the function.
3788         */
3789        break;
3790
3791    case BXE_RX_MODE_NORMAL:
3792        bxe_set_bit(ECORE_ACCEPT_UNICAST, rx_accept_flags);
3793        bxe_set_bit(ECORE_ACCEPT_MULTICAST, rx_accept_flags);
3794        bxe_set_bit(ECORE_ACCEPT_BROADCAST, rx_accept_flags);
3795
3796        /* internal switching mode */
3797        bxe_set_bit(ECORE_ACCEPT_UNICAST, tx_accept_flags);
3798        bxe_set_bit(ECORE_ACCEPT_MULTICAST, tx_accept_flags);
3799        bxe_set_bit(ECORE_ACCEPT_BROADCAST, tx_accept_flags);
3800
3801        break;
3802
3803    case BXE_RX_MODE_ALLMULTI:
3804        bxe_set_bit(ECORE_ACCEPT_UNICAST, rx_accept_flags);
3805        bxe_set_bit(ECORE_ACCEPT_ALL_MULTICAST, rx_accept_flags);
3806        bxe_set_bit(ECORE_ACCEPT_BROADCAST, rx_accept_flags);
3807
3808        /* internal switching mode */
3809        bxe_set_bit(ECORE_ACCEPT_UNICAST, tx_accept_flags);
3810        bxe_set_bit(ECORE_ACCEPT_ALL_MULTICAST, tx_accept_flags);
3811        bxe_set_bit(ECORE_ACCEPT_BROADCAST, tx_accept_flags);
3812
3813        break;
3814
3815    case BXE_RX_MODE_PROMISC:
3816        /*
3817         * According to deffinition of SI mode, iface in promisc mode
3818         * should receive matched and unmatched (in resolution of port)
3819         * unicast packets.
3820         */
3821        bxe_set_bit(ECORE_ACCEPT_UNMATCHED, rx_accept_flags);
3822        bxe_set_bit(ECORE_ACCEPT_UNICAST, rx_accept_flags);
3823        bxe_set_bit(ECORE_ACCEPT_ALL_MULTICAST, rx_accept_flags);
3824        bxe_set_bit(ECORE_ACCEPT_BROADCAST, rx_accept_flags);
3825
3826        /* internal switching mode */
3827        bxe_set_bit(ECORE_ACCEPT_ALL_MULTICAST, tx_accept_flags);
3828        bxe_set_bit(ECORE_ACCEPT_BROADCAST, tx_accept_flags);
3829
3830        if (IS_MF_SI(sc)) {
3831            bxe_set_bit(ECORE_ACCEPT_ALL_UNICAST, tx_accept_flags);
3832        } else {
3833            bxe_set_bit(ECORE_ACCEPT_UNICAST, tx_accept_flags);
3834        }
3835
3836        break;
3837
3838    default:
3839        BLOGE(sc, "Unknown rx_mode (%d)\n", rx_mode);
3840        return (-1);
3841    }
3842
3843    /* Set ACCEPT_ANY_VLAN as we do not enable filtering by VLAN */
3844    if (rx_mode != BXE_RX_MODE_NONE) {
3845        bxe_set_bit(ECORE_ACCEPT_ANY_VLAN, rx_accept_flags);
3846        bxe_set_bit(ECORE_ACCEPT_ANY_VLAN, tx_accept_flags);
3847    }
3848
3849    return (0);
3850}
3851
3852static int
3853bxe_set_q_rx_mode(struct bxe_softc *sc,
3854                  uint8_t          cl_id,
3855                  unsigned long    rx_mode_flags,
3856                  unsigned long    rx_accept_flags,
3857                  unsigned long    tx_accept_flags,
3858                  unsigned long    ramrod_flags)
3859{
3860    struct ecore_rx_mode_ramrod_params ramrod_param;
3861    int rc;
3862
3863    memset(&ramrod_param, 0, sizeof(ramrod_param));
3864
3865    /* Prepare ramrod parameters */
3866    ramrod_param.cid = 0;
3867    ramrod_param.cl_id = cl_id;
3868    ramrod_param.rx_mode_obj = &sc->rx_mode_obj;
3869    ramrod_param.func_id = SC_FUNC(sc);
3870
3871    ramrod_param.pstate = &sc->sp_state;
3872    ramrod_param.state = ECORE_FILTER_RX_MODE_PENDING;
3873
3874    ramrod_param.rdata = BXE_SP(sc, rx_mode_rdata);
3875    ramrod_param.rdata_mapping = BXE_SP_MAPPING(sc, rx_mode_rdata);
3876
3877    bxe_set_bit(ECORE_FILTER_RX_MODE_PENDING, &sc->sp_state);
3878
3879    ramrod_param.ramrod_flags = ramrod_flags;
3880    ramrod_param.rx_mode_flags = rx_mode_flags;
3881
3882    ramrod_param.rx_accept_flags = rx_accept_flags;
3883    ramrod_param.tx_accept_flags = tx_accept_flags;
3884
3885    rc = ecore_config_rx_mode(sc, &ramrod_param);
3886    if (rc < 0) {
3887        BLOGE(sc, "Set rx_mode %d failed\n", sc->rx_mode);
3888        return (rc);
3889    }
3890
3891    return (0);
3892}
3893
3894static int
3895bxe_set_storm_rx_mode(struct bxe_softc *sc)
3896{
3897    unsigned long rx_mode_flags = 0, ramrod_flags = 0;
3898    unsigned long rx_accept_flags = 0, tx_accept_flags = 0;
3899    int rc;
3900
3901    rc = bxe_fill_accept_flags(sc, sc->rx_mode, &rx_accept_flags,
3902                               &tx_accept_flags);
3903    if (rc) {
3904        return (rc);
3905    }
3906
3907    bxe_set_bit(RAMROD_RX, &ramrod_flags);
3908    bxe_set_bit(RAMROD_TX, &ramrod_flags);
3909
3910    /* XXX ensure all fastpath have same cl_id and/or move it to bxe_softc */
3911    return (bxe_set_q_rx_mode(sc, sc->fp[0].cl_id, rx_mode_flags,
3912                              rx_accept_flags, tx_accept_flags,
3913                              ramrod_flags));
3914}
3915
3916/* returns the "mcp load_code" according to global load_count array */
3917static int
3918bxe_nic_load_no_mcp(struct bxe_softc *sc)
3919{
3920    int path = SC_PATH(sc);
3921    int port = SC_PORT(sc);
3922
3923    BLOGI(sc, "NO MCP - load counts[%d]      %d, %d, %d\n",
3924          path, load_count[path][0], load_count[path][1],
3925          load_count[path][2]);
3926    load_count[path][0]++;
3927    load_count[path][1 + port]++;
3928    BLOGI(sc, "NO MCP - new load counts[%d]  %d, %d, %d\n",
3929          path, load_count[path][0], load_count[path][1],
3930          load_count[path][2]);
3931    if (load_count[path][0] == 1) {
3932        return (FW_MSG_CODE_DRV_LOAD_COMMON);
3933    } else if (load_count[path][1 + port] == 1) {
3934        return (FW_MSG_CODE_DRV_LOAD_PORT);
3935    } else {
3936        return (FW_MSG_CODE_DRV_LOAD_FUNCTION);
3937    }
3938}
3939
3940/* returns the "mcp load_code" according to global load_count array */
3941static int
3942bxe_nic_unload_no_mcp(struct bxe_softc *sc)
3943{
3944    int port = SC_PORT(sc);
3945    int path = SC_PATH(sc);
3946
3947    BLOGI(sc, "NO MCP - load counts[%d]      %d, %d, %d\n",
3948          path, load_count[path][0], load_count[path][1],
3949          load_count[path][2]);
3950    load_count[path][0]--;
3951    load_count[path][1 + port]--;
3952    BLOGI(sc, "NO MCP - new load counts[%d]  %d, %d, %d\n",
3953          path, load_count[path][0], load_count[path][1],
3954          load_count[path][2]);
3955    if (load_count[path][0] == 0) {
3956        return (FW_MSG_CODE_DRV_UNLOAD_COMMON);
3957    } else if (load_count[path][1 + port] == 0) {
3958        return (FW_MSG_CODE_DRV_UNLOAD_PORT);
3959    } else {
3960        return (FW_MSG_CODE_DRV_UNLOAD_FUNCTION);
3961    }
3962}
3963
3964/* request unload mode from the MCP: COMMON, PORT or FUNCTION */
3965static uint32_t
3966bxe_send_unload_req(struct bxe_softc *sc,
3967                    int              unload_mode)
3968{
3969    uint32_t reset_code = 0;
3970#if 0
3971    int port = SC_PORT(sc);
3972    int path = SC_PATH(sc);
3973#endif
3974
3975    /* Select the UNLOAD request mode */
3976    if (unload_mode == UNLOAD_NORMAL) {
3977        reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
3978    }
3979#if 0
3980    else if (sc->flags & BXE_NO_WOL_FLAG) {
3981        reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
3982    } else if (sc->wol) {
3983        uint32_t emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
3984        uint8_t *mac_addr = sc->dev->dev_addr;
3985        uint32_t val;
3986        uint16_t pmc;
3987
3988        /*
3989         * The mac address is written to entries 1-4 to
3990         * preserve entry 0 which is used by the PMF
3991         */
3992        uint8_t entry = (SC_VN(sc) + 1)*8;
3993
3994        val = (mac_addr[0] << 8) | mac_addr[1];
3995        EMAC_WR(sc, EMAC_REG_EMAC_MAC_MATCH + entry, val);
3996
3997        val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
3998              (mac_addr[4] << 8) | mac_addr[5];
3999        EMAC_WR(sc, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
4000
4001        /* Enable the PME and clear the status */
4002        pmc = pci_read_config(sc->dev,
4003                              (sc->devinfo.pcie_pm_cap_reg +
4004                               PCIR_POWER_STATUS),
4005                              2);
4006        pmc |= PCIM_PSTAT_PMEENABLE | PCIM_PSTAT_PME;
4007        pci_write_config(sc->dev,
4008                         (sc->devinfo.pcie_pm_cap_reg +
4009                          PCIR_POWER_STATUS),
4010                         pmc, 4);
4011
4012        reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
4013    }
4014#endif
4015    else {
4016        reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
4017    }
4018
4019    /* Send the request to the MCP */
4020    if (!BXE_NOMCP(sc)) {
4021        reset_code = bxe_fw_command(sc, reset_code, 0);
4022    } else {
4023        reset_code = bxe_nic_unload_no_mcp(sc);
4024    }
4025
4026    return (reset_code);
4027}
4028
4029/* send UNLOAD_DONE command to the MCP */
4030static void
4031bxe_send_unload_done(struct bxe_softc *sc,
4032                     uint8_t          keep_link)
4033{
4034    uint32_t reset_param =
4035        keep_link ? DRV_MSG_CODE_UNLOAD_SKIP_LINK_RESET : 0;
4036
4037    /* Report UNLOAD_DONE to MCP */
4038    if (!BXE_NOMCP(sc)) {
4039        bxe_fw_command(sc, DRV_MSG_CODE_UNLOAD_DONE, reset_param);
4040    }
4041}
4042
4043static int
4044bxe_func_wait_started(struct bxe_softc *sc)
4045{
4046    int tout = 50;
4047
4048    if (!sc->port.pmf) {
4049        return (0);
4050    }
4051
4052    /*
4053     * (assumption: No Attention from MCP at this stage)
4054     * PMF probably in the middle of TX disable/enable transaction
4055     * 1. Sync IRS for default SB
4056     * 2. Sync SP queue - this guarantees us that attention handling started
4057     * 3. Wait, that TX disable/enable transaction completes
4058     *
4059     * 1+2 guarantee that if DCBX attention was scheduled it already changed
4060     * pending bit of transaction from STARTED-->TX_STOPPED, if we already
4061     * received completion for the transaction the state is TX_STOPPED.
4062     * State will return to STARTED after completion of TX_STOPPED-->STARTED
4063     * transaction.
4064     */
4065
4066    /* XXX make sure default SB ISR is done */
4067    /* need a way to synchronize an irq (intr_mtx?) */
4068
4069    /* XXX flush any work queues */
4070
4071    while (ecore_func_get_state(sc, &sc->func_obj) !=
4072           ECORE_F_STATE_STARTED && tout--) {
4073        DELAY(20000);
4074    }
4075
4076    if (ecore_func_get_state(sc, &sc->func_obj) != ECORE_F_STATE_STARTED) {
4077        /*
4078         * Failed to complete the transaction in a "good way"
4079         * Force both transactions with CLR bit.
4080         */
4081        struct ecore_func_state_params func_params = { NULL };
4082
4083        BLOGE(sc, "Unexpected function state! "
4084                  "Forcing STARTED-->TX_STOPPED-->STARTED\n");
4085
4086        func_params.f_obj = &sc->func_obj;
4087        bxe_set_bit(RAMROD_DRV_CLR_ONLY, &func_params.ramrod_flags);
4088
4089        /* STARTED-->TX_STOPPED */
4090        func_params.cmd = ECORE_F_CMD_TX_STOP;
4091        ecore_func_state_change(sc, &func_params);
4092
4093        /* TX_STOPPED-->STARTED */
4094        func_params.cmd = ECORE_F_CMD_TX_START;
4095        return (ecore_func_state_change(sc, &func_params));
4096    }
4097
4098    return (0);
4099}
4100
4101static int
4102bxe_stop_queue(struct bxe_softc *sc,
4103               int              index)
4104{
4105    struct bxe_fastpath *fp = &sc->fp[index];
4106    struct ecore_queue_state_params q_params = { NULL };
4107    int rc;
4108
4109    BLOGD(sc, DBG_LOAD, "stopping queue %d cid %d\n", index, fp->index);
4110
4111    q_params.q_obj = &sc->sp_objs[fp->index].q_obj;
4112    /* We want to wait for completion in this context */
4113    bxe_set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags);
4114
4115    /* Stop the primary connection: */
4116
4117    /* ...halt the connection */
4118    q_params.cmd = ECORE_Q_CMD_HALT;
4119    rc = ecore_queue_state_change(sc, &q_params);
4120    if (rc) {
4121        return (rc);
4122    }
4123
4124    /* ...terminate the connection */
4125    q_params.cmd = ECORE_Q_CMD_TERMINATE;
4126    memset(&q_params.params.terminate, 0, sizeof(q_params.params.terminate));
4127    q_params.params.terminate.cid_index = FIRST_TX_COS_INDEX;
4128    rc = ecore_queue_state_change(sc, &q_params);
4129    if (rc) {
4130        return (rc);
4131    }
4132
4133    /* ...delete cfc entry */
4134    q_params.cmd = ECORE_Q_CMD_CFC_DEL;
4135    memset(&q_params.params.cfc_del, 0, sizeof(q_params.params.cfc_del));
4136    q_params.params.cfc_del.cid_index = FIRST_TX_COS_INDEX;
4137    return (ecore_queue_state_change(sc, &q_params));
4138}
4139
4140/* wait for the outstanding SP commands */
4141static inline uint8_t
4142bxe_wait_sp_comp(struct bxe_softc *sc,
4143                 unsigned long    mask)
4144{
4145    unsigned long tmp;
4146    int tout = 5000; /* wait for 5 secs tops */
4147
4148    while (tout--) {
4149        mb();
4150        if (!(atomic_load_acq_long(&sc->sp_state) & mask)) {
4151            return (TRUE);
4152        }
4153
4154        DELAY(1000);
4155    }
4156
4157    mb();
4158
4159    tmp = atomic_load_acq_long(&sc->sp_state);
4160    if (tmp & mask) {
4161        BLOGE(sc, "Filtering completion timed out: "
4162                  "sp_state 0x%lx, mask 0x%lx\n",
4163              tmp, mask);
4164        return (FALSE);
4165    }
4166
4167    return (FALSE);
4168}
4169
4170static int
4171bxe_func_stop(struct bxe_softc *sc)
4172{
4173    struct ecore_func_state_params func_params = { NULL };
4174    int rc;
4175
4176    /* prepare parameters for function state transitions */
4177    bxe_set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
4178    func_params.f_obj = &sc->func_obj;
4179    func_params.cmd = ECORE_F_CMD_STOP;
4180
4181    /*
4182     * Try to stop the function the 'good way'. If it fails (in case
4183     * of a parity error during bxe_chip_cleanup()) and we are
4184     * not in a debug mode, perform a state transaction in order to
4185     * enable further HW_RESET transaction.
4186     */
4187    rc = ecore_func_state_change(sc, &func_params);
4188    if (rc) {
4189        BLOGE(sc, "FUNC_STOP ramrod failed. "
4190                  "Running a dry transaction\n");
4191        bxe_set_bit(RAMROD_DRV_CLR_ONLY, &func_params.ramrod_flags);
4192        return (ecore_func_state_change(sc, &func_params));
4193    }
4194
4195    return (0);
4196}
4197
4198static int
4199bxe_reset_hw(struct bxe_softc *sc,
4200             uint32_t         load_code)
4201{
4202    struct ecore_func_state_params func_params = { NULL };
4203
4204    /* Prepare parameters for function state transitions */
4205    bxe_set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
4206
4207    func_params.f_obj = &sc->func_obj;
4208    func_params.cmd = ECORE_F_CMD_HW_RESET;
4209
4210    func_params.params.hw_init.load_phase = load_code;
4211
4212    return (ecore_func_state_change(sc, &func_params));
4213}
4214
4215static void
4216bxe_int_disable_sync(struct bxe_softc *sc,
4217                     int              disable_hw)
4218{
4219    if (disable_hw) {
4220        /* prevent the HW from sending interrupts */
4221        bxe_int_disable(sc);
4222    }
4223
4224    /* XXX need a way to synchronize ALL irqs (intr_mtx?) */
4225    /* make sure all ISRs are done */
4226
4227    /* XXX make sure sp_task is not running */
4228    /* cancel and flush work queues */
4229}
4230
4231static void
4232bxe_chip_cleanup(struct bxe_softc *sc,
4233                 uint32_t         unload_mode,
4234                 uint8_t          keep_link)
4235{
4236    int port = SC_PORT(sc);
4237    struct ecore_mcast_ramrod_params rparam = { NULL };
4238    uint32_t reset_code;
4239    int i, rc = 0;
4240
4241    bxe_drain_tx_queues(sc);
4242
4243    /* give HW time to discard old tx messages */
4244    DELAY(1000);
4245
4246    /* Clean all ETH MACs */
4247    rc = bxe_del_all_macs(sc, &sc->sp_objs[0].mac_obj, ECORE_ETH_MAC, FALSE);
4248    if (rc < 0) {
4249        BLOGE(sc, "Failed to delete all ETH MACs (%d)\n", rc);
4250    }
4251
4252    /* Clean up UC list  */
4253    rc = bxe_del_all_macs(sc, &sc->sp_objs[0].mac_obj, ECORE_UC_LIST_MAC, TRUE);
4254    if (rc < 0) {
4255        BLOGE(sc, "Failed to delete UC MACs list (%d)\n", rc);
4256    }
4257
4258    /* Disable LLH */
4259    if (!CHIP_IS_E1(sc)) {
4260        REG_WR(sc, NIG_REG_LLH0_FUNC_EN + port*8, 0);
4261    }
4262
4263    /* Set "drop all" to stop Rx */
4264
4265    /*
4266     * We need to take the BXE_MCAST_LOCK() here in order to prevent
4267     * a race between the completion code and this code.
4268     */
4269    BXE_MCAST_LOCK(sc);
4270
4271    if (bxe_test_bit(ECORE_FILTER_RX_MODE_PENDING, &sc->sp_state)) {
4272        bxe_set_bit(ECORE_FILTER_RX_MODE_SCHED, &sc->sp_state);
4273    } else {
4274        bxe_set_storm_rx_mode(sc);
4275    }
4276
4277    /* Clean up multicast configuration */
4278    rparam.mcast_obj = &sc->mcast_obj;
4279    rc = ecore_config_mcast(sc, &rparam, ECORE_MCAST_CMD_DEL);
4280    if (rc < 0) {
4281        BLOGE(sc, "Failed to send DEL MCAST command (%d)\n", rc);
4282    }
4283
4284    BXE_MCAST_UNLOCK(sc);
4285
4286    // XXX bxe_iov_chip_cleanup(sc);
4287
4288    /*
4289     * Send the UNLOAD_REQUEST to the MCP. This will return if
4290     * this function should perform FUNCTION, PORT, or COMMON HW
4291     * reset.
4292     */
4293    reset_code = bxe_send_unload_req(sc, unload_mode);
4294
4295    /*
4296     * (assumption: No Attention from MCP at this stage)
4297     * PMF probably in the middle of TX disable/enable transaction
4298     */
4299    rc = bxe_func_wait_started(sc);
4300    if (rc) {
4301        BLOGE(sc, "bxe_func_wait_started failed\n");
4302    }
4303
4304    /*
4305     * Close multi and leading connections
4306     * Completions for ramrods are collected in a synchronous way
4307     */
4308    for (i = 0; i < sc->num_queues; i++) {
4309        if (bxe_stop_queue(sc, i)) {
4310            goto unload_error;
4311        }
4312    }
4313
4314    /*
4315     * If SP settings didn't get completed so far - something
4316     * very wrong has happen.
4317     */
4318    if (!bxe_wait_sp_comp(sc, ~0x0UL)) {
4319        BLOGE(sc, "Common slow path ramrods got stuck!\n");
4320    }
4321
4322unload_error:
4323
4324    rc = bxe_func_stop(sc);
4325    if (rc) {
4326        BLOGE(sc, "Function stop failed!\n");
4327    }
4328
4329    /* disable HW interrupts */
4330    bxe_int_disable_sync(sc, TRUE);
4331
4332    /* detach interrupts */
4333    bxe_interrupt_detach(sc);
4334
4335    /* Reset the chip */
4336    rc = bxe_reset_hw(sc, reset_code);
4337    if (rc) {
4338        BLOGE(sc, "Hardware reset failed\n");
4339    }
4340
4341    /* Report UNLOAD_DONE to MCP */
4342    bxe_send_unload_done(sc, keep_link);
4343}
4344
4345static void
4346bxe_disable_close_the_gate(struct bxe_softc *sc)
4347{
4348    uint32_t val;
4349    int port = SC_PORT(sc);
4350
4351    BLOGD(sc, DBG_LOAD,
4352          "Disabling 'close the gates'\n");
4353
4354    if (CHIP_IS_E1(sc)) {
4355        uint32_t addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
4356                               MISC_REG_AEU_MASK_ATTN_FUNC_0;
4357        val = REG_RD(sc, addr);
4358        val &= ~(0x300);
4359        REG_WR(sc, addr, val);
4360    } else {
4361        val = REG_RD(sc, MISC_REG_AEU_GENERAL_MASK);
4362        val &= ~(MISC_AEU_GENERAL_MASK_REG_AEU_PXP_CLOSE_MASK |
4363                 MISC_AEU_GENERAL_MASK_REG_AEU_NIG_CLOSE_MASK);
4364        REG_WR(sc, MISC_REG_AEU_GENERAL_MASK, val);
4365    }
4366}
4367
4368/*
4369 * Cleans the object that have internal lists without sending
4370 * ramrods. Should be run when interrutps are disabled.
4371 */
4372static void
4373bxe_squeeze_objects(struct bxe_softc *sc)
4374{
4375    unsigned long ramrod_flags = 0, vlan_mac_flags = 0;
4376    struct ecore_mcast_ramrod_params rparam = { NULL };
4377    struct ecore_vlan_mac_obj *mac_obj = &sc->sp_objs->mac_obj;
4378    int rc;
4379
4380    /* Cleanup MACs' object first... */
4381
4382    /* Wait for completion of requested */
4383    bxe_set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
4384    /* Perform a dry cleanup */
4385    bxe_set_bit(RAMROD_DRV_CLR_ONLY, &ramrod_flags);
4386
4387    /* Clean ETH primary MAC */
4388    bxe_set_bit(ECORE_ETH_MAC, &vlan_mac_flags);
4389    rc = mac_obj->delete_all(sc, &sc->sp_objs->mac_obj, &vlan_mac_flags,
4390                             &ramrod_flags);
4391    if (rc != 0) {
4392        BLOGE(sc, "Failed to clean ETH MACs (%d)\n", rc);
4393    }
4394
4395    /* Cleanup UC list */
4396    vlan_mac_flags = 0;
4397    bxe_set_bit(ECORE_UC_LIST_MAC, &vlan_mac_flags);
4398    rc = mac_obj->delete_all(sc, mac_obj, &vlan_mac_flags,
4399                             &ramrod_flags);
4400    if (rc != 0) {
4401        BLOGE(sc, "Failed to clean UC list MACs (%d)\n", rc);
4402    }
4403
4404    /* Now clean mcast object... */
4405
4406    rparam.mcast_obj = &sc->mcast_obj;
4407    bxe_set_bit(RAMROD_DRV_CLR_ONLY, &rparam.ramrod_flags);
4408
4409    /* Add a DEL command... */
4410    rc = ecore_config_mcast(sc, &rparam, ECORE_MCAST_CMD_DEL);
4411    if (rc < 0) {
4412        BLOGE(sc, "Failed to send DEL MCAST command (%d)\n", rc);
4413    }
4414
4415    /* now wait until all pending commands are cleared */
4416
4417    rc = ecore_config_mcast(sc, &rparam, ECORE_MCAST_CMD_CONT);
4418    while (rc != 0) {
4419        if (rc < 0) {
4420            BLOGE(sc, "Failed to clean MCAST object (%d)\n", rc);
4421            return;
4422        }
4423
4424        rc = ecore_config_mcast(sc, &rparam, ECORE_MCAST_CMD_CONT);
4425    }
4426}
4427
4428/* stop the controller */
4429static __noinline int
4430bxe_nic_unload(struct bxe_softc *sc,
4431               uint32_t         unload_mode,
4432               uint8_t          keep_link)
4433{
4434    uint8_t global = FALSE;
4435    uint32_t val;
4436
4437    BXE_CORE_LOCK_ASSERT(sc);
4438
4439    BLOGD(sc, DBG_LOAD, "Starting NIC unload...\n");
4440
4441    /* mark driver as unloaded in shmem2 */
4442    if (IS_PF(sc) && SHMEM2_HAS(sc, drv_capabilities_flag)) {
4443        val = SHMEM2_RD(sc, drv_capabilities_flag[SC_FW_MB_IDX(sc)]);
4444        SHMEM2_WR(sc, drv_capabilities_flag[SC_FW_MB_IDX(sc)],
4445                  val & ~DRV_FLAGS_CAPABILITIES_LOADED_L2);
4446    }
4447
4448    if (IS_PF(sc) && sc->recovery_state != BXE_RECOVERY_DONE &&
4449        (sc->state == BXE_STATE_CLOSED || sc->state == BXE_STATE_ERROR)) {
4450        /*
4451         * We can get here if the driver has been unloaded
4452         * during parity error recovery and is either waiting for a
4453         * leader to complete or for other functions to unload and
4454         * then ifconfig down has been issued. In this case we want to
4455         * unload and let other functions to complete a recovery
4456         * process.
4457         */
4458        sc->recovery_state = BXE_RECOVERY_DONE;
4459        sc->is_leader = 0;
4460        bxe_release_leader_lock(sc);
4461        mb();
4462
4463        BLOGD(sc, DBG_LOAD, "Releasing a leadership...\n");
4464        BLOGE(sc, "Can't unload in closed or error state\n");
4465        return (-1);
4466    }
4467
4468    /*
4469     * Nothing to do during unload if previous bxe_nic_load()
4470     * did not completed succesfully - all resourses are released.
4471     */
4472    if ((sc->state == BXE_STATE_CLOSED) ||
4473        (sc->state == BXE_STATE_ERROR)) {
4474        return (0);
4475    }
4476
4477    sc->state = BXE_STATE_CLOSING_WAITING_HALT;
4478    mb();
4479
4480    /* stop tx */
4481    bxe_tx_disable(sc);
4482
4483    sc->rx_mode = BXE_RX_MODE_NONE;
4484    /* XXX set rx mode ??? */
4485
4486    if (IS_PF(sc)) {
4487        /* set ALWAYS_ALIVE bit in shmem */
4488        sc->fw_drv_pulse_wr_seq |= DRV_PULSE_ALWAYS_ALIVE;
4489
4490        bxe_drv_pulse(sc);
4491
4492        bxe_stats_handle(sc, STATS_EVENT_STOP);
4493        bxe_save_statistics(sc);
4494    }
4495
4496    /* wait till consumers catch up with producers in all queues */
4497    bxe_drain_tx_queues(sc);
4498
4499    /* if VF indicate to PF this function is going down (PF will delete sp
4500     * elements and clear initializations
4501     */
4502    if (IS_VF(sc)) {
4503        ; /* bxe_vfpf_close_vf(sc); */
4504    } else if (unload_mode != UNLOAD_RECOVERY) {
4505        /* if this is a normal/close unload need to clean up chip */
4506        bxe_chip_cleanup(sc, unload_mode, keep_link);
4507    } else {
4508        /* Send the UNLOAD_REQUEST to the MCP */
4509        bxe_send_unload_req(sc, unload_mode);
4510
4511        /*
4512         * Prevent transactions to host from the functions on the
4513         * engine that doesn't reset global blocks in case of global
4514         * attention once gloabl blocks are reset and gates are opened
4515         * (the engine which leader will perform the recovery
4516         * last).
4517         */
4518        if (!CHIP_IS_E1x(sc)) {
4519            bxe_pf_disable(sc);
4520        }
4521
4522        /* disable HW interrupts */
4523        bxe_int_disable_sync(sc, TRUE);
4524
4525        /* detach interrupts */
4526        bxe_interrupt_detach(sc);
4527
4528        /* Report UNLOAD_DONE to MCP */
4529        bxe_send_unload_done(sc, FALSE);
4530    }
4531
4532    /*
4533     * At this stage no more interrupts will arrive so we may safely clean
4534     * the queue'able objects here in case they failed to get cleaned so far.
4535     */
4536    if (IS_PF(sc)) {
4537        bxe_squeeze_objects(sc);
4538    }
4539
4540    /* There should be no more pending SP commands at this stage */
4541    sc->sp_state = 0;
4542
4543    sc->port.pmf = 0;
4544
4545    bxe_free_fp_buffers(sc);
4546
4547    if (IS_PF(sc)) {
4548        bxe_free_mem(sc);
4549    }
4550
4551    bxe_free_fw_stats_mem(sc);
4552
4553    sc->state = BXE_STATE_CLOSED;
4554
4555    /*
4556     * Check if there are pending parity attentions. If there are - set
4557     * RECOVERY_IN_PROGRESS.
4558     */
4559    if (IS_PF(sc) && bxe_chk_parity_attn(sc, &global, FALSE)) {
4560        bxe_set_reset_in_progress(sc);
4561
4562        /* Set RESET_IS_GLOBAL if needed */
4563        if (global) {
4564            bxe_set_reset_global(sc);
4565        }
4566    }
4567
4568    /*
4569     * The last driver must disable a "close the gate" if there is no
4570     * parity attention or "process kill" pending.
4571     */
4572    if (IS_PF(sc) && !bxe_clear_pf_load(sc) &&
4573        bxe_reset_is_done(sc, SC_PATH(sc))) {
4574        bxe_disable_close_the_gate(sc);
4575    }
4576
4577    BLOGD(sc, DBG_LOAD, "Ended NIC unload\n");
4578
4579    return (0);
4580}
4581
4582/*
4583 * Called by the OS to set various media options (i.e. link, speed, etc.) when
4584 * the user runs "ifconfig bxe media ..." or "ifconfig bxe mediaopt ...".
4585 */
4586static int
4587bxe_ifmedia_update(struct ifnet  *ifp)
4588{
4589    struct bxe_softc *sc = (struct bxe_softc *)if_getsoftc(ifp);
4590    struct ifmedia *ifm;
4591
4592    ifm = &sc->ifmedia;
4593
4594    /* We only support Ethernet media type. */
4595    if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) {
4596        return (EINVAL);
4597    }
4598
4599    switch (IFM_SUBTYPE(ifm->ifm_media)) {
4600    case IFM_AUTO:
4601         break;
4602    case IFM_10G_CX4:
4603    case IFM_10G_SR:
4604    case IFM_10G_T:
4605    case IFM_10G_TWINAX:
4606    default:
4607        /* We don't support changing the media type. */
4608        BLOGD(sc, DBG_LOAD, "Invalid media type (%d)\n",
4609              IFM_SUBTYPE(ifm->ifm_media));
4610        return (EINVAL);
4611    }
4612
4613    return (0);
4614}
4615
4616/*
4617 * Called by the OS to get the current media status (i.e. link, speed, etc.).
4618 */
4619static void
4620bxe_ifmedia_status(struct ifnet *ifp, struct ifmediareq *ifmr)
4621{
4622    struct bxe_softc *sc = if_getsoftc(ifp);
4623
4624    /* Report link down if the driver isn't running. */
4625    if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0) {
4626        ifmr->ifm_active |= IFM_NONE;
4627        return;
4628    }
4629
4630    /* Setup the default interface info. */
4631    ifmr->ifm_status = IFM_AVALID;
4632    ifmr->ifm_active = IFM_ETHER;
4633
4634    if (sc->link_vars.link_up) {
4635        ifmr->ifm_status |= IFM_ACTIVE;
4636    } else {
4637        ifmr->ifm_active |= IFM_NONE;
4638        return;
4639    }
4640
4641    ifmr->ifm_active |= sc->media;
4642
4643    if (sc->link_vars.duplex == DUPLEX_FULL) {
4644        ifmr->ifm_active |= IFM_FDX;
4645    } else {
4646        ifmr->ifm_active |= IFM_HDX;
4647    }
4648}
4649
4650static int
4651bxe_ioctl_nvram(struct bxe_softc *sc,
4652                uint32_t         priv_op,
4653                struct ifreq     *ifr)
4654{
4655    struct bxe_nvram_data nvdata_base;
4656    struct bxe_nvram_data *nvdata;
4657    int len;
4658    int error = 0;
4659
4660    copyin(ifr->ifr_data, &nvdata_base, sizeof(nvdata_base));
4661
4662    len = (sizeof(struct bxe_nvram_data) +
4663           nvdata_base.len -
4664           sizeof(uint32_t));
4665
4666    if (len > sizeof(struct bxe_nvram_data)) {
4667        if ((nvdata = (struct bxe_nvram_data *)
4668                 malloc(len, M_DEVBUF,
4669                        (M_NOWAIT | M_ZERO))) == NULL) {
4670            BLOGE(sc, "BXE_IOC_RD_NVRAM malloc failed\n");
4671            return (1);
4672        }
4673        memcpy(nvdata, &nvdata_base, sizeof(struct bxe_nvram_data));
4674    } else {
4675        nvdata = &nvdata_base;
4676    }
4677
4678    if (priv_op == BXE_IOC_RD_NVRAM) {
4679        BLOGD(sc, DBG_IOCTL, "IOC_RD_NVRAM 0x%x %d\n",
4680              nvdata->offset, nvdata->len);
4681        error = bxe_nvram_read(sc,
4682                               nvdata->offset,
4683                               (uint8_t *)nvdata->value,
4684                               nvdata->len);
4685        copyout(nvdata, ifr->ifr_data, len);
4686    } else { /* BXE_IOC_WR_NVRAM */
4687        BLOGD(sc, DBG_IOCTL, "IOC_WR_NVRAM 0x%x %d\n",
4688              nvdata->offset, nvdata->len);
4689        copyin(ifr->ifr_data, nvdata, len);
4690        error = bxe_nvram_write(sc,
4691                                nvdata->offset,
4692                                (uint8_t *)nvdata->value,
4693                                nvdata->len);
4694    }
4695
4696    if (len > sizeof(struct bxe_nvram_data)) {
4697        free(nvdata, M_DEVBUF);
4698    }
4699
4700    return (error);
4701}
4702
4703static int
4704bxe_ioctl_stats_show(struct bxe_softc *sc,
4705                     uint32_t         priv_op,
4706                     struct ifreq     *ifr)
4707{
4708    const size_t str_size   = (BXE_NUM_ETH_STATS * STAT_NAME_LEN);
4709    const size_t stats_size = (BXE_NUM_ETH_STATS * sizeof(uint64_t));
4710    caddr_t p_tmp;
4711    uint32_t *offset;
4712    int i;
4713
4714    switch (priv_op)
4715    {
4716    case BXE_IOC_STATS_SHOW_NUM:
4717        memset(ifr->ifr_data, 0, sizeof(union bxe_stats_show_data));
4718        ((union bxe_stats_show_data *)ifr->ifr_data)->desc.num =
4719            BXE_NUM_ETH_STATS;
4720        ((union bxe_stats_show_data *)ifr->ifr_data)->desc.len =
4721            STAT_NAME_LEN;
4722        return (0);
4723
4724    case BXE_IOC_STATS_SHOW_STR:
4725        memset(ifr->ifr_data, 0, str_size);
4726        p_tmp = ifr->ifr_data;
4727        for (i = 0; i < BXE_NUM_ETH_STATS; i++) {
4728            strcpy(p_tmp, bxe_eth_stats_arr[i].string);
4729            p_tmp += STAT_NAME_LEN;
4730        }
4731        return (0);
4732
4733    case BXE_IOC_STATS_SHOW_CNT:
4734        memset(ifr->ifr_data, 0, stats_size);
4735        p_tmp = ifr->ifr_data;
4736        for (i = 0; i < BXE_NUM_ETH_STATS; i++) {
4737            offset = ((uint32_t *)&sc->eth_stats +
4738                      bxe_eth_stats_arr[i].offset);
4739            switch (bxe_eth_stats_arr[i].size) {
4740            case 4:
4741                *((uint64_t *)p_tmp) = (uint64_t)*offset;
4742                break;
4743            case 8:
4744                *((uint64_t *)p_tmp) = HILO_U64(*offset, *(offset + 1));
4745                break;
4746            default:
4747                *((uint64_t *)p_tmp) = 0;
4748            }
4749            p_tmp += sizeof(uint64_t);
4750        }
4751        return (0);
4752
4753    default:
4754        return (-1);
4755    }
4756}
4757
4758static void
4759bxe_handle_chip_tq(void *context,
4760                   int  pending)
4761{
4762    struct bxe_softc *sc = (struct bxe_softc *)context;
4763    long work = atomic_load_acq_long(&sc->chip_tq_flags);
4764
4765    switch (work)
4766    {
4767    case CHIP_TQ_START:
4768        if ((if_getflags(sc->ifp) & IFF_UP) &&
4769            !(if_getdrvflags(sc->ifp) & IFF_DRV_RUNNING)) {
4770            /* start the interface */
4771            BLOGD(sc, DBG_LOAD, "Starting the interface...\n");
4772            BXE_CORE_LOCK(sc);
4773            bxe_init_locked(sc);
4774            BXE_CORE_UNLOCK(sc);
4775        }
4776        break;
4777
4778    case CHIP_TQ_STOP:
4779        if (!(if_getflags(sc->ifp) & IFF_UP) &&
4780            (if_getdrvflags(sc->ifp) & IFF_DRV_RUNNING)) {
4781            /* bring down the interface */
4782            BLOGD(sc, DBG_LOAD, "Stopping the interface...\n");
4783            bxe_periodic_stop(sc);
4784            BXE_CORE_LOCK(sc);
4785            bxe_stop_locked(sc);
4786            BXE_CORE_UNLOCK(sc);
4787        }
4788        break;
4789
4790    case CHIP_TQ_REINIT:
4791        if (if_getdrvflags(sc->ifp) & IFF_DRV_RUNNING) {
4792            /* restart the interface */
4793            BLOGD(sc, DBG_LOAD, "Restarting the interface...\n");
4794            bxe_periodic_stop(sc);
4795            BXE_CORE_LOCK(sc);
4796            bxe_stop_locked(sc);
4797            bxe_init_locked(sc);
4798            BXE_CORE_UNLOCK(sc);
4799        }
4800        break;
4801
4802    default:
4803        break;
4804    }
4805}
4806
4807/*
4808 * Handles any IOCTL calls from the operating system.
4809 *
4810 * Returns:
4811 *   0 = Success, >0 Failure
4812 */
4813static int
4814bxe_ioctl(if_t ifp,
4815          u_long       command,
4816          caddr_t      data)
4817{
4818    struct bxe_softc *sc = if_getsoftc(ifp);
4819    struct ifreq *ifr = (struct ifreq *)data;
4820    struct bxe_nvram_data *nvdata;
4821    uint32_t priv_op;
4822    int mask = 0;
4823    int reinit = 0;
4824    int error = 0;
4825
4826    int mtu_min = (ETH_MIN_PACKET_SIZE - ETH_HLEN);
4827    int mtu_max = (MJUM9BYTES - ETH_OVERHEAD - IP_HEADER_ALIGNMENT_PADDING);
4828
4829    switch (command)
4830    {
4831    case SIOCSIFMTU:
4832        BLOGD(sc, DBG_IOCTL, "Received SIOCSIFMTU ioctl (mtu=%d)\n",
4833              ifr->ifr_mtu);
4834
4835        if (sc->mtu == ifr->ifr_mtu) {
4836            /* nothing to change */
4837            break;
4838        }
4839
4840        if ((ifr->ifr_mtu < mtu_min) || (ifr->ifr_mtu > mtu_max)) {
4841            BLOGE(sc, "Unsupported MTU size %d (range is %d-%d)\n",
4842                  ifr->ifr_mtu, mtu_min, mtu_max);
4843            error = EINVAL;
4844            break;
4845        }
4846
4847        atomic_store_rel_int((volatile unsigned int *)&sc->mtu,
4848                             (unsigned long)ifr->ifr_mtu);
4849	/*
4850        atomic_store_rel_long((volatile unsigned long *)&if_getmtu(ifp),
4851                              (unsigned long)ifr->ifr_mtu);
4852	XXX - Not sure why it needs to be atomic
4853	*/
4854	if_setmtu(ifp, ifr->ifr_mtu);
4855        reinit = 1;
4856        break;
4857
4858    case SIOCSIFFLAGS:
4859        /* toggle the interface state up or down */
4860        BLOGD(sc, DBG_IOCTL, "Received SIOCSIFFLAGS ioctl\n");
4861
4862        /* check if the interface is up */
4863        if (if_getflags(ifp) & IFF_UP) {
4864            if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
4865                /* set the receive mode flags */
4866                bxe_set_rx_mode(sc);
4867            } else {
4868                atomic_store_rel_long(&sc->chip_tq_flags, CHIP_TQ_START);
4869                taskqueue_enqueue(sc->chip_tq, &sc->chip_tq_task);
4870            }
4871        } else {
4872            if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
4873                atomic_store_rel_long(&sc->chip_tq_flags, CHIP_TQ_STOP);
4874                taskqueue_enqueue(sc->chip_tq, &sc->chip_tq_task);
4875            }
4876        }
4877
4878        break;
4879
4880    case SIOCADDMULTI:
4881    case SIOCDELMULTI:
4882        /* add/delete multicast addresses */
4883        BLOGD(sc, DBG_IOCTL, "Received SIOCADDMULTI/SIOCDELMULTI ioctl\n");
4884
4885        /* check if the interface is up */
4886        if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
4887            /* set the receive mode flags */
4888            bxe_set_rx_mode(sc);
4889        }
4890
4891        break;
4892
4893    case SIOCSIFCAP:
4894        /* find out which capabilities have changed */
4895        mask = (ifr->ifr_reqcap ^ if_getcapenable(ifp));
4896
4897        BLOGD(sc, DBG_IOCTL, "Received SIOCSIFCAP ioctl (mask=0x%08x)\n",
4898              mask);
4899
4900        /* toggle the LRO capabilites enable flag */
4901        if (mask & IFCAP_LRO) {
4902	    if_togglecapenable(ifp, IFCAP_LRO);
4903            BLOGD(sc, DBG_IOCTL, "Turning LRO %s\n",
4904                  (if_getcapenable(ifp) & IFCAP_LRO) ? "ON" : "OFF");
4905            reinit = 1;
4906        }
4907
4908        /* toggle the TXCSUM checksum capabilites enable flag */
4909        if (mask & IFCAP_TXCSUM) {
4910	    if_togglecapenable(ifp, IFCAP_TXCSUM);
4911            BLOGD(sc, DBG_IOCTL, "Turning TXCSUM %s\n",
4912                  (if_getcapenable(ifp) & IFCAP_TXCSUM) ? "ON" : "OFF");
4913            if (if_getcapenable(ifp) & IFCAP_TXCSUM) {
4914                if_sethwassistbits(ifp, (CSUM_IP      |
4915                                    CSUM_TCP      |
4916                                    CSUM_UDP      |
4917                                    CSUM_TSO      |
4918                                    CSUM_TCP_IPV6 |
4919                                    CSUM_UDP_IPV6), 0);
4920            } else {
4921		if_clearhwassist(ifp); /* XXX */
4922            }
4923        }
4924
4925        /* toggle the RXCSUM checksum capabilities enable flag */
4926        if (mask & IFCAP_RXCSUM) {
4927	    if_togglecapenable(ifp, IFCAP_RXCSUM);
4928            BLOGD(sc, DBG_IOCTL, "Turning RXCSUM %s\n",
4929                  (if_getcapenable(ifp) & IFCAP_RXCSUM) ? "ON" : "OFF");
4930            if (if_getcapenable(ifp) & IFCAP_RXCSUM) {
4931                if_sethwassistbits(ifp, (CSUM_IP      |
4932                                    CSUM_TCP      |
4933                                    CSUM_UDP      |
4934                                    CSUM_TSO      |
4935                                    CSUM_TCP_IPV6 |
4936                                    CSUM_UDP_IPV6), 0);
4937            } else {
4938		if_clearhwassist(ifp); /* XXX */
4939            }
4940        }
4941
4942        /* toggle TSO4 capabilities enabled flag */
4943        if (mask & IFCAP_TSO4) {
4944            if_togglecapenable(ifp, IFCAP_TSO4);
4945            BLOGD(sc, DBG_IOCTL, "Turning TSO4 %s\n",
4946                  (if_getcapenable(ifp) & IFCAP_TSO4) ? "ON" : "OFF");
4947        }
4948
4949        /* toggle TSO6 capabilities enabled flag */
4950        if (mask & IFCAP_TSO6) {
4951	    if_togglecapenable(ifp, IFCAP_TSO6);
4952            BLOGD(sc, DBG_IOCTL, "Turning TSO6 %s\n",
4953                  (if_getcapenable(ifp) & IFCAP_TSO6) ? "ON" : "OFF");
4954        }
4955
4956        /* toggle VLAN_HWTSO capabilities enabled flag */
4957        if (mask & IFCAP_VLAN_HWTSO) {
4958
4959	    if_togglecapenable(ifp, IFCAP_VLAN_HWTSO);
4960            BLOGD(sc, DBG_IOCTL, "Turning VLAN_HWTSO %s\n",
4961                  (if_getcapenable(ifp) & IFCAP_VLAN_HWTSO) ? "ON" : "OFF");
4962        }
4963
4964        /* toggle VLAN_HWCSUM capabilities enabled flag */
4965        if (mask & IFCAP_VLAN_HWCSUM) {
4966            /* XXX investigate this... */
4967            BLOGE(sc, "Changing VLAN_HWCSUM is not supported!\n");
4968            error = EINVAL;
4969        }
4970
4971        /* toggle VLAN_MTU capabilities enable flag */
4972        if (mask & IFCAP_VLAN_MTU) {
4973            /* XXX investigate this... */
4974            BLOGE(sc, "Changing VLAN_MTU is not supported!\n");
4975            error = EINVAL;
4976        }
4977
4978        /* toggle VLAN_HWTAGGING capabilities enabled flag */
4979        if (mask & IFCAP_VLAN_HWTAGGING) {
4980            /* XXX investigate this... */
4981            BLOGE(sc, "Changing VLAN_HWTAGGING is not supported!\n");
4982            error = EINVAL;
4983        }
4984
4985        /* toggle VLAN_HWFILTER capabilities enabled flag */
4986        if (mask & IFCAP_VLAN_HWFILTER) {
4987            /* XXX investigate this... */
4988            BLOGE(sc, "Changing VLAN_HWFILTER is not supported!\n");
4989            error = EINVAL;
4990        }
4991
4992        /* XXX not yet...
4993         * IFCAP_WOL_MAGIC
4994         */
4995
4996        break;
4997
4998    case SIOCSIFMEDIA:
4999    case SIOCGIFMEDIA:
5000        /* set/get interface media */
5001        BLOGD(sc, DBG_IOCTL,
5002              "Received SIOCSIFMEDIA/SIOCGIFMEDIA ioctl (cmd=%lu)\n",
5003              (command & 0xff));
5004        error = ifmedia_ioctl(ifp, ifr, &sc->ifmedia, command);
5005        break;
5006
5007    case SIOCGPRIVATE_0:
5008        copyin(ifr->ifr_data, &priv_op, sizeof(priv_op));
5009
5010        switch (priv_op)
5011        {
5012        case BXE_IOC_RD_NVRAM:
5013        case BXE_IOC_WR_NVRAM:
5014            nvdata = (struct bxe_nvram_data *)ifr->ifr_data;
5015            BLOGD(sc, DBG_IOCTL,
5016                  "Received Private NVRAM ioctl addr=0x%x size=%u\n",
5017                  nvdata->offset, nvdata->len);
5018            error = bxe_ioctl_nvram(sc, priv_op, ifr);
5019            break;
5020
5021        case BXE_IOC_STATS_SHOW_NUM:
5022        case BXE_IOC_STATS_SHOW_STR:
5023        case BXE_IOC_STATS_SHOW_CNT:
5024            BLOGD(sc, DBG_IOCTL, "Received Private Stats ioctl (%d)\n",
5025                  priv_op);
5026            error = bxe_ioctl_stats_show(sc, priv_op, ifr);
5027            break;
5028
5029        default:
5030            BLOGW(sc, "Received Private Unknown ioctl (%d)\n", priv_op);
5031            error = EINVAL;
5032            break;
5033        }
5034
5035        break;
5036
5037    default:
5038        BLOGD(sc, DBG_IOCTL, "Received Unknown Ioctl (cmd=%lu)\n",
5039              (command & 0xff));
5040        error = ether_ioctl(ifp, command, data);
5041        break;
5042    }
5043
5044    if (reinit && (if_getdrvflags(sc->ifp) & IFF_DRV_RUNNING)) {
5045        BLOGD(sc, DBG_LOAD | DBG_IOCTL,
5046              "Re-initializing hardware from IOCTL change\n");
5047        atomic_store_rel_long(&sc->chip_tq_flags, CHIP_TQ_REINIT);
5048        taskqueue_enqueue(sc->chip_tq, &sc->chip_tq_task);
5049    }
5050
5051    return (error);
5052}
5053
5054static __noinline void
5055bxe_dump_mbuf(struct bxe_softc *sc,
5056              struct mbuf      *m,
5057              uint8_t          contents)
5058{
5059    char * type;
5060    int i = 0;
5061
5062    if (!(sc->debug & DBG_MBUF)) {
5063        return;
5064    }
5065
5066    if (m == NULL) {
5067        BLOGD(sc, DBG_MBUF, "mbuf: null pointer\n");
5068        return;
5069    }
5070
5071    while (m) {
5072        BLOGD(sc, DBG_MBUF,
5073              "%02d: mbuf=%p m_len=%d m_flags=0x%b m_data=%p\n",
5074              i, m, m->m_len, m->m_flags, M_FLAG_BITS, m->m_data);
5075
5076        if (m->m_flags & M_PKTHDR) {
5077             BLOGD(sc, DBG_MBUF,
5078                   "%02d: - m_pkthdr: tot_len=%d flags=0x%b csum_flags=%b\n",
5079                   i, m->m_pkthdr.len, m->m_flags, M_FLAG_BITS,
5080                   (int)m->m_pkthdr.csum_flags, CSUM_BITS);
5081        }
5082
5083        if (m->m_flags & M_EXT) {
5084            switch (m->m_ext.ext_type) {
5085            case EXT_CLUSTER:    type = "EXT_CLUSTER";    break;
5086            case EXT_SFBUF:      type = "EXT_SFBUF";      break;
5087            case EXT_JUMBOP:     type = "EXT_JUMBOP";     break;
5088            case EXT_JUMBO9:     type = "EXT_JUMBO9";     break;
5089            case EXT_JUMBO16:    type = "EXT_JUMBO16";    break;
5090            case EXT_PACKET:     type = "EXT_PACKET";     break;
5091            case EXT_MBUF:       type = "EXT_MBUF";       break;
5092            case EXT_NET_DRV:    type = "EXT_NET_DRV";    break;
5093            case EXT_MOD_TYPE:   type = "EXT_MOD_TYPE";   break;
5094            case EXT_DISPOSABLE: type = "EXT_DISPOSABLE"; break;
5095            case EXT_EXTREF:     type = "EXT_EXTREF";     break;
5096            default:             type = "UNKNOWN";        break;
5097            }
5098
5099            BLOGD(sc, DBG_MBUF,
5100                  "%02d: - m_ext: %p ext_size=%d type=%s\n",
5101                  i, m->m_ext.ext_buf, m->m_ext.ext_size, type);
5102        }
5103
5104        if (contents) {
5105            bxe_dump_mbuf_data(sc, "mbuf data", m, TRUE);
5106        }
5107
5108        m = m->m_next;
5109        i++;
5110    }
5111}
5112
5113/*
5114 * Checks to ensure the 13 bd sliding window is >= MSS for TSO.
5115 * Check that (13 total bds - 3 bds) = 10 bd window >= MSS.
5116 * The window: 3 bds are = 1 for headers BD + 2 for parse BD and last BD
5117 * The headers comes in a seperate bd in FreeBSD so 13-3=10.
5118 * Returns: 0 if OK to send, 1 if packet needs further defragmentation
5119 */
5120static int
5121bxe_chktso_window(struct bxe_softc  *sc,
5122                  int               nsegs,
5123                  bus_dma_segment_t *segs,
5124                  struct mbuf       *m)
5125{
5126    uint32_t num_wnds, wnd_size, wnd_sum;
5127    int32_t frag_idx, wnd_idx;
5128    unsigned short lso_mss;
5129    int defrag;
5130
5131    defrag = 0;
5132    wnd_sum = 0;
5133    wnd_size = 10;
5134    num_wnds = nsegs - wnd_size;
5135    lso_mss = htole16(m->m_pkthdr.tso_segsz);
5136
5137    /*
5138     * Total header lengths Eth+IP+TCP in first FreeBSD mbuf so calculate the
5139     * first window sum of data while skipping the first assuming it is the
5140     * header in FreeBSD.
5141     */
5142    for (frag_idx = 1; (frag_idx <= wnd_size); frag_idx++) {
5143        wnd_sum += htole16(segs[frag_idx].ds_len);
5144    }
5145
5146    /* check the first 10 bd window size */
5147    if (wnd_sum < lso_mss) {
5148        return (1);
5149    }
5150
5151    /* run through the windows */
5152    for (wnd_idx = 0; wnd_idx < num_wnds; wnd_idx++, frag_idx++) {
5153        /* subtract the first mbuf->m_len of the last wndw(-header) */
5154        wnd_sum -= htole16(segs[wnd_idx+1].ds_len);
5155        /* add the next mbuf len to the len of our new window */
5156        wnd_sum += htole16(segs[frag_idx].ds_len);
5157        if (wnd_sum < lso_mss) {
5158            return (1);
5159        }
5160    }
5161
5162    return (0);
5163}
5164
5165static uint8_t
5166bxe_set_pbd_csum_e2(struct bxe_fastpath *fp,
5167                    struct mbuf         *m,
5168                    uint32_t            *parsing_data)
5169{
5170    struct ether_vlan_header *eh = NULL;
5171    struct ip *ip4 = NULL;
5172    struct ip6_hdr *ip6 = NULL;
5173    caddr_t ip = NULL;
5174    struct tcphdr *th = NULL;
5175    int e_hlen, ip_hlen, l4_off;
5176    uint16_t proto;
5177
5178    if (m->m_pkthdr.csum_flags == CSUM_IP) {
5179        /* no L4 checksum offload needed */
5180        return (0);
5181    }
5182
5183    /* get the Ethernet header */
5184    eh = mtod(m, struct ether_vlan_header *);
5185
5186    /* handle VLAN encapsulation if present */
5187    if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
5188        e_hlen = (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN);
5189        proto  = ntohs(eh->evl_proto);
5190    } else {
5191        e_hlen = ETHER_HDR_LEN;
5192        proto  = ntohs(eh->evl_encap_proto);
5193    }
5194
5195    switch (proto) {
5196    case ETHERTYPE_IP:
5197        /* get the IP header, if mbuf len < 20 then header in next mbuf */
5198        ip4 = (m->m_len < sizeof(struct ip)) ?
5199                  (struct ip *)m->m_next->m_data :
5200                  (struct ip *)(m->m_data + e_hlen);
5201        /* ip_hl is number of 32-bit words */
5202        ip_hlen = (ip4->ip_hl << 2);
5203        ip = (caddr_t)ip4;
5204        break;
5205    case ETHERTYPE_IPV6:
5206        /* get the IPv6 header, if mbuf len < 40 then header in next mbuf */
5207        ip6 = (m->m_len < sizeof(struct ip6_hdr)) ?
5208                  (struct ip6_hdr *)m->m_next->m_data :
5209                  (struct ip6_hdr *)(m->m_data + e_hlen);
5210        /* XXX cannot support offload with IPv6 extensions */
5211        ip_hlen = sizeof(struct ip6_hdr);
5212        ip = (caddr_t)ip6;
5213        break;
5214    default:
5215        /* We can't offload in this case... */
5216        /* XXX error stat ??? */
5217        return (0);
5218    }
5219
5220    /* XXX assuming L4 header is contiguous to IPv4/IPv6 in the same mbuf */
5221    l4_off = (e_hlen + ip_hlen);
5222
5223    *parsing_data |=
5224        (((l4_off >> 1) << ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W_SHIFT) &
5225         ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W);
5226
5227    if (m->m_pkthdr.csum_flags & (CSUM_TCP |
5228                                  CSUM_TSO |
5229                                  CSUM_TCP_IPV6)) {
5230        fp->eth_q_stats.tx_ofld_frames_csum_tcp++;
5231        th = (struct tcphdr *)(ip + ip_hlen);
5232        /* th_off is number of 32-bit words */
5233        *parsing_data |= ((th->th_off <<
5234                           ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) &
5235                          ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW);
5236        return (l4_off + (th->th_off << 2)); /* entire header length */
5237    } else if (m->m_pkthdr.csum_flags & (CSUM_UDP |
5238                                         CSUM_UDP_IPV6)) {
5239        fp->eth_q_stats.tx_ofld_frames_csum_udp++;
5240        return (l4_off + sizeof(struct udphdr)); /* entire header length */
5241    } else {
5242        /* XXX error stat ??? */
5243        return (0);
5244    }
5245}
5246
5247static uint8_t
5248bxe_set_pbd_csum(struct bxe_fastpath        *fp,
5249                 struct mbuf                *m,
5250                 struct eth_tx_parse_bd_e1x *pbd)
5251{
5252    struct ether_vlan_header *eh = NULL;
5253    struct ip *ip4 = NULL;
5254    struct ip6_hdr *ip6 = NULL;
5255    caddr_t ip = NULL;
5256    struct tcphdr *th = NULL;
5257    struct udphdr *uh = NULL;
5258    int e_hlen, ip_hlen;
5259    uint16_t proto;
5260    uint8_t hlen;
5261    uint16_t tmp_csum;
5262    uint32_t *tmp_uh;
5263
5264    /* get the Ethernet header */
5265    eh = mtod(m, struct ether_vlan_header *);
5266
5267    /* handle VLAN encapsulation if present */
5268    if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
5269        e_hlen = (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN);
5270        proto  = ntohs(eh->evl_proto);
5271    } else {
5272        e_hlen = ETHER_HDR_LEN;
5273        proto  = ntohs(eh->evl_encap_proto);
5274    }
5275
5276    switch (proto) {
5277    case ETHERTYPE_IP:
5278        /* get the IP header, if mbuf len < 20 then header in next mbuf */
5279        ip4 = (m->m_len < sizeof(struct ip)) ?
5280                  (struct ip *)m->m_next->m_data :
5281                  (struct ip *)(m->m_data + e_hlen);
5282        /* ip_hl is number of 32-bit words */
5283        ip_hlen = (ip4->ip_hl << 1);
5284        ip = (caddr_t)ip4;
5285        break;
5286    case ETHERTYPE_IPV6:
5287        /* get the IPv6 header, if mbuf len < 40 then header in next mbuf */
5288        ip6 = (m->m_len < sizeof(struct ip6_hdr)) ?
5289                  (struct ip6_hdr *)m->m_next->m_data :
5290                  (struct ip6_hdr *)(m->m_data + e_hlen);
5291        /* XXX cannot support offload with IPv6 extensions */
5292        ip_hlen = (sizeof(struct ip6_hdr) >> 1);
5293        ip = (caddr_t)ip6;
5294        break;
5295    default:
5296        /* We can't offload in this case... */
5297        /* XXX error stat ??? */
5298        return (0);
5299    }
5300
5301    hlen = (e_hlen >> 1);
5302
5303    /* note that rest of global_data is indirectly zeroed here */
5304    if (m->m_flags & M_VLANTAG) {
5305        pbd->global_data =
5306            htole16(hlen | (1 << ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN_SHIFT));
5307    } else {
5308        pbd->global_data = htole16(hlen);
5309    }
5310
5311    pbd->ip_hlen_w = ip_hlen;
5312
5313    hlen += pbd->ip_hlen_w;
5314
5315    /* XXX assuming L4 header is contiguous to IPv4/IPv6 in the same mbuf */
5316
5317    if (m->m_pkthdr.csum_flags & (CSUM_TCP |
5318                                  CSUM_TSO |
5319                                  CSUM_TCP_IPV6)) {
5320        th = (struct tcphdr *)(ip + (ip_hlen << 1));
5321        /* th_off is number of 32-bit words */
5322        hlen += (uint16_t)(th->th_off << 1);
5323    } else if (m->m_pkthdr.csum_flags & (CSUM_UDP |
5324                                         CSUM_UDP_IPV6)) {
5325        uh = (struct udphdr *)(ip + (ip_hlen << 1));
5326        hlen += (sizeof(struct udphdr) / 2);
5327    } else {
5328        /* valid case as only CSUM_IP was set */
5329        return (0);
5330    }
5331
5332    pbd->total_hlen_w = htole16(hlen);
5333
5334    if (m->m_pkthdr.csum_flags & (CSUM_TCP |
5335                                  CSUM_TSO |
5336                                  CSUM_TCP_IPV6)) {
5337        fp->eth_q_stats.tx_ofld_frames_csum_tcp++;
5338        pbd->tcp_pseudo_csum = ntohs(th->th_sum);
5339    } else if (m->m_pkthdr.csum_flags & (CSUM_UDP |
5340                                         CSUM_UDP_IPV6)) {
5341        fp->eth_q_stats.tx_ofld_frames_csum_udp++;
5342
5343        /*
5344         * Everest1 (i.e. 57710, 57711, 57711E) does not natively support UDP
5345         * checksums and does not know anything about the UDP header and where
5346         * the checksum field is located. It only knows about TCP. Therefore
5347         * we "lie" to the hardware for outgoing UDP packets w/ checksum
5348         * offload. Since the checksum field offset for TCP is 16 bytes and
5349         * for UDP it is 6 bytes we pass a pointer to the hardware that is 10
5350         * bytes less than the start of the UDP header. This allows the
5351         * hardware to write the checksum in the correct spot. But the
5352         * hardware will compute a checksum which includes the last 10 bytes
5353         * of the IP header. To correct this we tweak the stack computed
5354         * pseudo checksum by folding in the calculation of the inverse
5355         * checksum for those final 10 bytes of the IP header. This allows
5356         * the correct checksum to be computed by the hardware.
5357         */
5358
5359        /* set pointer 10 bytes before UDP header */
5360        tmp_uh = (uint32_t *)((uint8_t *)uh - 10);
5361
5362        /* calculate a pseudo header checksum over the first 10 bytes */
5363        tmp_csum = in_pseudo(*tmp_uh,
5364                             *(tmp_uh + 1),
5365                             *(uint16_t *)(tmp_uh + 2));
5366
5367        pbd->tcp_pseudo_csum = ntohs(in_addword(uh->uh_sum, ~tmp_csum));
5368    }
5369
5370    return (hlen * 2); /* entire header length, number of bytes */
5371}
5372
5373static void
5374bxe_set_pbd_lso_e2(struct mbuf *m,
5375                   uint32_t    *parsing_data)
5376{
5377    *parsing_data |= ((m->m_pkthdr.tso_segsz <<
5378                       ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT) &
5379                      ETH_TX_PARSE_BD_E2_LSO_MSS);
5380
5381    /* XXX test for IPv6 with extension header... */
5382#if 0
5383    struct ip6_hdr *ip6;
5384    if (ip6 && ip6->ip6_nxt == 'some ipv6 extension header')
5385        *parsing_data |= ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR;
5386#endif
5387}
5388
5389static void
5390bxe_set_pbd_lso(struct mbuf                *m,
5391                struct eth_tx_parse_bd_e1x *pbd)
5392{
5393    struct ether_vlan_header *eh = NULL;
5394    struct ip *ip = NULL;
5395    struct tcphdr *th = NULL;
5396    int e_hlen;
5397
5398    /* get the Ethernet header */
5399    eh = mtod(m, struct ether_vlan_header *);
5400
5401    /* handle VLAN encapsulation if present */
5402    e_hlen = (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) ?
5403                 (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN) : ETHER_HDR_LEN;
5404
5405    /* get the IP and TCP header, with LSO entire header in first mbuf */
5406    /* XXX assuming IPv4 */
5407    ip = (struct ip *)(m->m_data + e_hlen);
5408    th = (struct tcphdr *)((caddr_t)ip + (ip->ip_hl << 2));
5409
5410    pbd->lso_mss = htole16(m->m_pkthdr.tso_segsz);
5411    pbd->tcp_send_seq = ntohl(th->th_seq);
5412    pbd->tcp_flags = ((ntohl(((uint32_t *)th)[3]) >> 16) & 0xff);
5413
5414#if 1
5415        /* XXX IPv4 */
5416        pbd->ip_id = ntohs(ip->ip_id);
5417        pbd->tcp_pseudo_csum =
5418            ntohs(in_pseudo(ip->ip_src.s_addr,
5419                            ip->ip_dst.s_addr,
5420                            htons(IPPROTO_TCP)));
5421#else
5422        /* XXX IPv6 */
5423        pbd->tcp_pseudo_csum =
5424            ntohs(in_pseudo(&ip6->ip6_src,
5425                            &ip6->ip6_dst,
5426                            htons(IPPROTO_TCP)));
5427#endif
5428
5429    pbd->global_data |=
5430        htole16(ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN);
5431}
5432
5433/*
5434 * Encapsulte an mbuf cluster into the tx bd chain and makes the memory
5435 * visible to the controller.
5436 *
5437 * If an mbuf is submitted to this routine and cannot be given to the
5438 * controller (e.g. it has too many fragments) then the function may free
5439 * the mbuf and return to the caller.
5440 *
5441 * Returns:
5442 *   0 = Success, !0 = Failure
5443 *   Note the side effect that an mbuf may be freed if it causes a problem.
5444 */
5445static int
5446bxe_tx_encap(struct bxe_fastpath *fp, struct mbuf **m_head)
5447{
5448    bus_dma_segment_t segs[32];
5449    struct mbuf *m0;
5450    struct bxe_sw_tx_bd *tx_buf;
5451    struct eth_tx_parse_bd_e1x *pbd_e1x = NULL;
5452    struct eth_tx_parse_bd_e2 *pbd_e2 = NULL;
5453    /* struct eth_tx_parse_2nd_bd *pbd2 = NULL; */
5454    struct eth_tx_bd *tx_data_bd;
5455    struct eth_tx_bd *tx_total_pkt_size_bd;
5456    struct eth_tx_start_bd *tx_start_bd;
5457    uint16_t bd_prod, pkt_prod, total_pkt_size;
5458    uint8_t mac_type;
5459    int defragged, error, nsegs, rc, nbds, vlan_off, ovlan;
5460    struct bxe_softc *sc;
5461    uint16_t tx_bd_avail;
5462    struct ether_vlan_header *eh;
5463    uint32_t pbd_e2_parsing_data = 0;
5464    uint8_t hlen = 0;
5465    int tmp_bd;
5466    int i;
5467
5468    sc = fp->sc;
5469
5470    M_ASSERTPKTHDR(*m_head);
5471
5472    m0 = *m_head;
5473    rc = defragged = nbds = ovlan = vlan_off = total_pkt_size = 0;
5474    tx_start_bd = NULL;
5475    tx_data_bd = NULL;
5476    tx_total_pkt_size_bd = NULL;
5477
5478    /* get the H/W pointer for packets and BDs */
5479    pkt_prod = fp->tx_pkt_prod;
5480    bd_prod = fp->tx_bd_prod;
5481
5482    mac_type = UNICAST_ADDRESS;
5483
5484    /* map the mbuf into the next open DMAable memory */
5485    tx_buf = &fp->tx_mbuf_chain[TX_BD(pkt_prod)];
5486    error = bus_dmamap_load_mbuf_sg(fp->tx_mbuf_tag,
5487                                    tx_buf->m_map, m0,
5488                                    segs, &nsegs, BUS_DMA_NOWAIT);
5489
5490    /* mapping errors */
5491    if(__predict_false(error != 0)) {
5492        fp->eth_q_stats.tx_dma_mapping_failure++;
5493        if (error == ENOMEM) {
5494            /* resource issue, try again later */
5495            rc = ENOMEM;
5496        } else if (error == EFBIG) {
5497            /* possibly recoverable with defragmentation */
5498            fp->eth_q_stats.mbuf_defrag_attempts++;
5499            m0 = m_defrag(*m_head, M_NOWAIT);
5500            if (m0 == NULL) {
5501                fp->eth_q_stats.mbuf_defrag_failures++;
5502                rc = ENOBUFS;
5503            } else {
5504                /* defrag successful, try mapping again */
5505                *m_head = m0;
5506                error = bus_dmamap_load_mbuf_sg(fp->tx_mbuf_tag,
5507                                                tx_buf->m_map, m0,
5508                                                segs, &nsegs, BUS_DMA_NOWAIT);
5509                if (error) {
5510                    fp->eth_q_stats.tx_dma_mapping_failure++;
5511                    rc = error;
5512                }
5513            }
5514        } else {
5515            /* unknown, unrecoverable mapping error */
5516            BLOGE(sc, "Unknown TX mapping error rc=%d\n", error);
5517            bxe_dump_mbuf(sc, m0, FALSE);
5518            rc = error;
5519        }
5520
5521        goto bxe_tx_encap_continue;
5522    }
5523
5524    tx_bd_avail = bxe_tx_avail(sc, fp);
5525
5526    /* make sure there is enough room in the send queue */
5527    if (__predict_false(tx_bd_avail < (nsegs + 2))) {
5528        /* Recoverable, try again later. */
5529        fp->eth_q_stats.tx_hw_queue_full++;
5530        bus_dmamap_unload(fp->tx_mbuf_tag, tx_buf->m_map);
5531        rc = ENOMEM;
5532        goto bxe_tx_encap_continue;
5533    }
5534
5535    /* capture the current H/W TX chain high watermark */
5536    if (__predict_false(fp->eth_q_stats.tx_hw_max_queue_depth <
5537                        (TX_BD_USABLE - tx_bd_avail))) {
5538        fp->eth_q_stats.tx_hw_max_queue_depth = (TX_BD_USABLE - tx_bd_avail);
5539    }
5540
5541    /* make sure it fits in the packet window */
5542    if (__predict_false(nsegs > BXE_MAX_SEGMENTS)) {
5543        /*
5544         * The mbuf may be to big for the controller to handle. If the frame
5545         * is a TSO frame we'll need to do an additional check.
5546         */
5547        if (m0->m_pkthdr.csum_flags & CSUM_TSO) {
5548            if (bxe_chktso_window(sc, nsegs, segs, m0) == 0) {
5549                goto bxe_tx_encap_continue; /* OK to send */
5550            } else {
5551                fp->eth_q_stats.tx_window_violation_tso++;
5552            }
5553        } else {
5554            fp->eth_q_stats.tx_window_violation_std++;
5555        }
5556
5557        /* lets try to defragment this mbuf and remap it */
5558        fp->eth_q_stats.mbuf_defrag_attempts++;
5559        bus_dmamap_unload(fp->tx_mbuf_tag, tx_buf->m_map);
5560
5561        m0 = m_defrag(*m_head, M_NOWAIT);
5562        if (m0 == NULL) {
5563            fp->eth_q_stats.mbuf_defrag_failures++;
5564            /* Ugh, just drop the frame... :( */
5565            rc = ENOBUFS;
5566        } else {
5567            /* defrag successful, try mapping again */
5568            *m_head = m0;
5569            error = bus_dmamap_load_mbuf_sg(fp->tx_mbuf_tag,
5570                                            tx_buf->m_map, m0,
5571                                            segs, &nsegs, BUS_DMA_NOWAIT);
5572            if (error) {
5573                fp->eth_q_stats.tx_dma_mapping_failure++;
5574                /* No sense in trying to defrag/copy chain, drop it. :( */
5575                rc = error;
5576            }
5577            else {
5578                /* if the chain is still too long then drop it */
5579                if (__predict_false(nsegs > BXE_MAX_SEGMENTS)) {
5580                    bus_dmamap_unload(fp->tx_mbuf_tag, tx_buf->m_map);
5581                    rc = ENODEV;
5582                }
5583            }
5584        }
5585    }
5586
5587bxe_tx_encap_continue:
5588
5589    /* Check for errors */
5590    if (rc) {
5591        if (rc == ENOMEM) {
5592            /* recoverable try again later  */
5593        } else {
5594            fp->eth_q_stats.tx_soft_errors++;
5595            fp->eth_q_stats.mbuf_alloc_tx--;
5596            m_freem(*m_head);
5597            *m_head = NULL;
5598        }
5599
5600        return (rc);
5601    }
5602
5603    /* set flag according to packet type (UNICAST_ADDRESS is default) */
5604    if (m0->m_flags & M_BCAST) {
5605        mac_type = BROADCAST_ADDRESS;
5606    } else if (m0->m_flags & M_MCAST) {
5607        mac_type = MULTICAST_ADDRESS;
5608    }
5609
5610    /* store the mbuf into the mbuf ring */
5611    tx_buf->m        = m0;
5612    tx_buf->first_bd = fp->tx_bd_prod;
5613    tx_buf->flags    = 0;
5614
5615    /* prepare the first transmit (start) BD for the mbuf */
5616    tx_start_bd = &fp->tx_chain[TX_BD(bd_prod)].start_bd;
5617
5618    BLOGD(sc, DBG_TX,
5619          "sending pkt_prod=%u tx_buf=%p next_idx=%u bd=%u tx_start_bd=%p\n",
5620          pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_start_bd);
5621
5622    tx_start_bd->addr_lo = htole32(U64_LO(segs[0].ds_addr));
5623    tx_start_bd->addr_hi = htole32(U64_HI(segs[0].ds_addr));
5624    tx_start_bd->nbytes  = htole16(segs[0].ds_len);
5625    total_pkt_size += tx_start_bd->nbytes;
5626    tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
5627
5628    tx_start_bd->general_data = (1 << ETH_TX_START_BD_HDR_NBDS_SHIFT);
5629
5630    /* all frames have at least Start BD + Parsing BD */
5631    nbds = nsegs + 1;
5632    tx_start_bd->nbd = htole16(nbds);
5633
5634    if (m0->m_flags & M_VLANTAG) {
5635        tx_start_bd->vlan_or_ethertype = htole16(m0->m_pkthdr.ether_vtag);
5636        tx_start_bd->bd_flags.as_bitfield |=
5637            (X_ETH_OUTBAND_VLAN << ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT);
5638    } else {
5639        /* vf tx, start bd must hold the ethertype for fw to enforce it */
5640        if (IS_VF(sc)) {
5641            /* map ethernet header to find type and header length */
5642            eh = mtod(m0, struct ether_vlan_header *);
5643            tx_start_bd->vlan_or_ethertype = eh->evl_encap_proto;
5644        } else {
5645            /* used by FW for packet accounting */
5646            tx_start_bd->vlan_or_ethertype = htole16(fp->tx_pkt_prod);
5647#if 0
5648            /*
5649             * If NPAR-SD is active then FW should do the tagging regardless
5650             * of value of priority. Otherwise, if priority indicates this is
5651             * a control packet we need to indicate to FW to avoid tagging.
5652             */
5653            if (!IS_MF_AFEX(sc) && (mbuf priority == PRIO_CONTROL)) {
5654                SET_FLAG(tx_start_bd->general_data,
5655                         ETH_TX_START_BD_FORCE_VLAN_MODE, 1);
5656            }
5657#endif
5658        }
5659    }
5660
5661    /*
5662     * add a parsing BD from the chain. The parsing BD is always added
5663     * though it is only used for TSO and chksum
5664     */
5665    bd_prod = TX_BD_NEXT(bd_prod);
5666
5667    if (m0->m_pkthdr.csum_flags) {
5668        if (m0->m_pkthdr.csum_flags & CSUM_IP) {
5669            fp->eth_q_stats.tx_ofld_frames_csum_ip++;
5670            tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IP_CSUM;
5671        }
5672
5673        if (m0->m_pkthdr.csum_flags & CSUM_TCP_IPV6) {
5674            tx_start_bd->bd_flags.as_bitfield |= (ETH_TX_BD_FLAGS_IPV6 |
5675                                                  ETH_TX_BD_FLAGS_L4_CSUM);
5676        } else if (m0->m_pkthdr.csum_flags & CSUM_UDP_IPV6) {
5677            tx_start_bd->bd_flags.as_bitfield |= (ETH_TX_BD_FLAGS_IPV6   |
5678                                                  ETH_TX_BD_FLAGS_IS_UDP |
5679                                                  ETH_TX_BD_FLAGS_L4_CSUM);
5680        } else if ((m0->m_pkthdr.csum_flags & CSUM_TCP) ||
5681                   (m0->m_pkthdr.csum_flags & CSUM_TSO)) {
5682            tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
5683        } else if (m0->m_pkthdr.csum_flags & CSUM_UDP) {
5684            tx_start_bd->bd_flags.as_bitfield |= (ETH_TX_BD_FLAGS_L4_CSUM |
5685                                                  ETH_TX_BD_FLAGS_IS_UDP);
5686        }
5687    }
5688
5689    if (!CHIP_IS_E1x(sc)) {
5690        pbd_e2 = &fp->tx_chain[TX_BD(bd_prod)].parse_bd_e2;
5691        memset(pbd_e2, 0, sizeof(struct eth_tx_parse_bd_e2));
5692
5693        if (m0->m_pkthdr.csum_flags) {
5694            hlen = bxe_set_pbd_csum_e2(fp, m0, &pbd_e2_parsing_data);
5695        }
5696
5697#if 0
5698        /*
5699         * Add the MACs to the parsing BD if the module param was
5700         * explicitly set, if this is a vf, or in switch independent
5701         * mode.
5702         */
5703        if (sc->flags & BXE_TX_SWITCHING || IS_VF(sc) || IS_MF_SI(sc)) {
5704            eh = mtod(m0, struct ether_vlan_header *);
5705            bxe_set_fw_mac_addr(&pbd_e2->data.mac_addr.src_hi,
5706                                &pbd_e2->data.mac_addr.src_mid,
5707                                &pbd_e2->data.mac_addr.src_lo,
5708                                eh->evl_shost);
5709            bxe_set_fw_mac_addr(&pbd_e2->data.mac_addr.dst_hi,
5710                                &pbd_e2->data.mac_addr.dst_mid,
5711                                &pbd_e2->data.mac_addr.dst_lo,
5712                                eh->evl_dhost);
5713        }
5714#endif
5715
5716        SET_FLAG(pbd_e2_parsing_data, ETH_TX_PARSE_BD_E2_ETH_ADDR_TYPE,
5717                 mac_type);
5718    } else {
5719        uint16_t global_data = 0;
5720
5721        pbd_e1x = &fp->tx_chain[TX_BD(bd_prod)].parse_bd_e1x;
5722        memset(pbd_e1x, 0, sizeof(struct eth_tx_parse_bd_e1x));
5723
5724        if (m0->m_pkthdr.csum_flags) {
5725            hlen = bxe_set_pbd_csum(fp, m0, pbd_e1x);
5726        }
5727
5728        SET_FLAG(global_data,
5729                 ETH_TX_PARSE_BD_E1X_ETH_ADDR_TYPE, mac_type);
5730        pbd_e1x->global_data |= htole16(global_data);
5731    }
5732
5733    /* setup the parsing BD with TSO specific info */
5734    if (m0->m_pkthdr.csum_flags & CSUM_TSO) {
5735        fp->eth_q_stats.tx_ofld_frames_lso++;
5736        tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
5737
5738        if (__predict_false(tx_start_bd->nbytes > hlen)) {
5739            fp->eth_q_stats.tx_ofld_frames_lso_hdr_splits++;
5740
5741            /* split the first BD into header/data making the fw job easy */
5742            nbds++;
5743            tx_start_bd->nbd = htole16(nbds);
5744            tx_start_bd->nbytes = htole16(hlen);
5745
5746            bd_prod = TX_BD_NEXT(bd_prod);
5747
5748            /* new transmit BD after the tx_parse_bd */
5749            tx_data_bd = &fp->tx_chain[TX_BD(bd_prod)].reg_bd;
5750            tx_data_bd->addr_hi = htole32(U64_HI(segs[0].ds_addr + hlen));
5751            tx_data_bd->addr_lo = htole32(U64_LO(segs[0].ds_addr + hlen));
5752            tx_data_bd->nbytes  = htole16(segs[0].ds_len - hlen);
5753            if (tx_total_pkt_size_bd == NULL) {
5754                tx_total_pkt_size_bd = tx_data_bd;
5755            }
5756
5757            BLOGD(sc, DBG_TX,
5758                  "TSO split header size is %d (%x:%x) nbds %d\n",
5759                  le16toh(tx_start_bd->nbytes),
5760                  le32toh(tx_start_bd->addr_hi),
5761                  le32toh(tx_start_bd->addr_lo),
5762                  nbds);
5763        }
5764
5765        if (!CHIP_IS_E1x(sc)) {
5766            bxe_set_pbd_lso_e2(m0, &pbd_e2_parsing_data);
5767        } else {
5768            bxe_set_pbd_lso(m0, pbd_e1x);
5769        }
5770    }
5771
5772    if (pbd_e2_parsing_data) {
5773        pbd_e2->parsing_data = htole32(pbd_e2_parsing_data);
5774    }
5775
5776    /* prepare remaining BDs, start tx bd contains first seg/frag */
5777    for (i = 1; i < nsegs ; i++) {
5778        bd_prod = TX_BD_NEXT(bd_prod);
5779        tx_data_bd = &fp->tx_chain[TX_BD(bd_prod)].reg_bd;
5780        tx_data_bd->addr_lo = htole32(U64_LO(segs[i].ds_addr));
5781        tx_data_bd->addr_hi = htole32(U64_HI(segs[i].ds_addr));
5782        tx_data_bd->nbytes  = htole16(segs[i].ds_len);
5783        if (tx_total_pkt_size_bd == NULL) {
5784            tx_total_pkt_size_bd = tx_data_bd;
5785        }
5786        total_pkt_size += tx_data_bd->nbytes;
5787    }
5788
5789    BLOGD(sc, DBG_TX, "last bd %p\n", tx_data_bd);
5790
5791    if (tx_total_pkt_size_bd != NULL) {
5792        tx_total_pkt_size_bd->total_pkt_bytes = total_pkt_size;
5793    }
5794
5795    if (__predict_false(sc->debug & DBG_TX)) {
5796        tmp_bd = tx_buf->first_bd;
5797        for (i = 0; i < nbds; i++)
5798        {
5799            if (i == 0) {
5800                BLOGD(sc, DBG_TX,
5801                      "TX Strt: %p bd=%d nbd=%d vlan=0x%x "
5802                      "bd_flags=0x%x hdr_nbds=%d\n",
5803                      tx_start_bd,
5804                      tmp_bd,
5805                      le16toh(tx_start_bd->nbd),
5806                      le16toh(tx_start_bd->vlan_or_ethertype),
5807                      tx_start_bd->bd_flags.as_bitfield,
5808                      (tx_start_bd->general_data & ETH_TX_START_BD_HDR_NBDS));
5809            } else if (i == 1) {
5810                if (pbd_e1x) {
5811                    BLOGD(sc, DBG_TX,
5812                          "-> Prse: %p bd=%d global=0x%x ip_hlen_w=%u "
5813                          "ip_id=%u lso_mss=%u tcp_flags=0x%x csum=0x%x "
5814                          "tcp_seq=%u total_hlen_w=%u\n",
5815                          pbd_e1x,
5816                          tmp_bd,
5817                          pbd_e1x->global_data,
5818                          pbd_e1x->ip_hlen_w,
5819                          pbd_e1x->ip_id,
5820                          pbd_e1x->lso_mss,
5821                          pbd_e1x->tcp_flags,
5822                          pbd_e1x->tcp_pseudo_csum,
5823                          pbd_e1x->tcp_send_seq,
5824                          le16toh(pbd_e1x->total_hlen_w));
5825                } else { /* if (pbd_e2) */
5826                    BLOGD(sc, DBG_TX,
5827                          "-> Parse: %p bd=%d dst=%02x:%02x:%02x "
5828                          "src=%02x:%02x:%02x parsing_data=0x%x\n",
5829                          pbd_e2,
5830                          tmp_bd,
5831                          pbd_e2->data.mac_addr.dst_hi,
5832                          pbd_e2->data.mac_addr.dst_mid,
5833                          pbd_e2->data.mac_addr.dst_lo,
5834                          pbd_e2->data.mac_addr.src_hi,
5835                          pbd_e2->data.mac_addr.src_mid,
5836                          pbd_e2->data.mac_addr.src_lo,
5837                          pbd_e2->parsing_data);
5838                }
5839            }
5840
5841            if (i != 1) { /* skip parse db as it doesn't hold data */
5842                tx_data_bd = &fp->tx_chain[TX_BD(tmp_bd)].reg_bd;
5843                BLOGD(sc, DBG_TX,
5844                      "-> Frag: %p bd=%d nbytes=%d hi=0x%x lo: 0x%x\n",
5845                      tx_data_bd,
5846                      tmp_bd,
5847                      le16toh(tx_data_bd->nbytes),
5848                      le32toh(tx_data_bd->addr_hi),
5849                      le32toh(tx_data_bd->addr_lo));
5850            }
5851
5852            tmp_bd = TX_BD_NEXT(tmp_bd);
5853        }
5854    }
5855
5856    BLOGD(sc, DBG_TX, "doorbell: nbds=%d bd=%u\n", nbds, bd_prod);
5857
5858    /* update TX BD producer index value for next TX */
5859    bd_prod = TX_BD_NEXT(bd_prod);
5860
5861    /*
5862     * If the chain of tx_bd's describing this frame is adjacent to or spans
5863     * an eth_tx_next_bd element then we need to increment the nbds value.
5864     */
5865    if (TX_BD_IDX(bd_prod) < nbds) {
5866        nbds++;
5867    }
5868
5869    /* don't allow reordering of writes for nbd and packets */
5870    mb();
5871
5872    fp->tx_db.data.prod += nbds;
5873
5874    /* producer points to the next free tx_bd at this point */
5875    fp->tx_pkt_prod++;
5876    fp->tx_bd_prod = bd_prod;
5877
5878    DOORBELL(sc, fp->index, fp->tx_db.raw);
5879
5880    fp->eth_q_stats.tx_pkts++;
5881
5882    /* Prevent speculative reads from getting ahead of the status block. */
5883    bus_space_barrier(sc->bar[BAR0].tag, sc->bar[BAR0].handle,
5884                      0, 0, BUS_SPACE_BARRIER_READ);
5885
5886    /* Prevent speculative reads from getting ahead of the doorbell. */
5887    bus_space_barrier(sc->bar[BAR2].tag, sc->bar[BAR2].handle,
5888                      0, 0, BUS_SPACE_BARRIER_READ);
5889
5890    return (0);
5891}
5892
5893static void
5894bxe_tx_start_locked(struct bxe_softc *sc,
5895                    if_t ifp,
5896                    struct bxe_fastpath *fp)
5897{
5898    struct mbuf *m = NULL;
5899    int tx_count = 0;
5900    uint16_t tx_bd_avail;
5901
5902    BXE_FP_TX_LOCK_ASSERT(fp);
5903
5904    /* keep adding entries while there are frames to send */
5905    while (!if_sendq_empty(ifp)) {
5906
5907        /*
5908         * check for any frames to send
5909         * dequeue can still be NULL even if queue is not empty
5910         */
5911        m = if_dequeue(ifp);
5912        if (__predict_false(m == NULL)) {
5913            break;
5914        }
5915
5916        /* the mbuf now belongs to us */
5917        fp->eth_q_stats.mbuf_alloc_tx++;
5918
5919        /*
5920         * Put the frame into the transmit ring. If we don't have room,
5921         * place the mbuf back at the head of the TX queue, set the
5922         * OACTIVE flag, and wait for the NIC to drain the chain.
5923         */
5924        if (__predict_false(bxe_tx_encap(fp, &m))) {
5925            fp->eth_q_stats.tx_encap_failures++;
5926            if (m != NULL) {
5927                /* mark the TX queue as full and return the frame */
5928                if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, 0);
5929		if_sendq_prepend(ifp, m);
5930                fp->eth_q_stats.mbuf_alloc_tx--;
5931                fp->eth_q_stats.tx_queue_xoff++;
5932            }
5933
5934            /* stop looking for more work */
5935            break;
5936        }
5937
5938        /* the frame was enqueued successfully */
5939        tx_count++;
5940
5941        /* send a copy of the frame to any BPF listeners. */
5942        if_etherbpfmtap(ifp, m);
5943
5944        tx_bd_avail = bxe_tx_avail(sc, fp);
5945
5946        /* handle any completions if we're running low */
5947        if (tx_bd_avail < BXE_TX_CLEANUP_THRESHOLD) {
5948            /* bxe_txeof will set IFF_DRV_OACTIVE appropriately */
5949            bxe_txeof(sc, fp);
5950            if (if_getdrvflags(ifp) & IFF_DRV_OACTIVE) {
5951                break;
5952            }
5953        }
5954    }
5955
5956    /* all TX packets were dequeued and/or the tx ring is full */
5957    if (tx_count > 0) {
5958        /* reset the TX watchdog timeout timer */
5959        fp->watchdog_timer = BXE_TX_TIMEOUT;
5960    }
5961}
5962
5963/* Legacy (non-RSS) dispatch routine */
5964static void
5965bxe_tx_start(if_t ifp)
5966{
5967    struct bxe_softc *sc;
5968    struct bxe_fastpath *fp;
5969
5970    sc = if_getsoftc(ifp);
5971
5972    if (!(if_getdrvflags(ifp) & IFF_DRV_RUNNING)) {
5973        BLOGW(sc, "Interface not running, ignoring transmit request\n");
5974        return;
5975    }
5976
5977    if (if_getdrvflags(ifp) & IFF_DRV_OACTIVE) {
5978        BLOGW(sc, "Interface TX queue is full, ignoring transmit request\n");
5979        return;
5980    }
5981
5982    if (!sc->link_vars.link_up) {
5983        BLOGW(sc, "Interface link is down, ignoring transmit request\n");
5984        return;
5985    }
5986
5987    fp = &sc->fp[0];
5988
5989    BXE_FP_TX_LOCK(fp);
5990    bxe_tx_start_locked(sc, ifp, fp);
5991    BXE_FP_TX_UNLOCK(fp);
5992}
5993
5994#if __FreeBSD_version >= 800000
5995
5996static int
5997bxe_tx_mq_start_locked(struct bxe_softc    *sc,
5998                       if_t                ifp,
5999                       struct bxe_fastpath *fp,
6000                       struct mbuf         *m)
6001{
6002    struct buf_ring *tx_br = fp->tx_br;
6003    struct mbuf *next;
6004    int depth, rc, tx_count;
6005    uint16_t tx_bd_avail;
6006
6007    rc = tx_count = 0;
6008
6009    if (!tx_br) {
6010        BLOGE(sc, "Multiqueue TX and no buf_ring!\n");
6011        return (EINVAL);
6012    }
6013
6014    /* fetch the depth of the driver queue */
6015    depth = drbr_inuse_drv(ifp, tx_br);
6016    if (depth > fp->eth_q_stats.tx_max_drbr_queue_depth) {
6017        fp->eth_q_stats.tx_max_drbr_queue_depth = depth;
6018    }
6019
6020    BXE_FP_TX_LOCK_ASSERT(fp);
6021
6022    if (m == NULL) {
6023        /* no new work, check for pending frames */
6024        next = drbr_dequeue_drv(ifp, tx_br);
6025    } else if (drbr_needs_enqueue_drv(ifp, tx_br)) {
6026        /* have both new and pending work, maintain packet order */
6027        rc = drbr_enqueue_drv(ifp, tx_br, m);
6028        if (rc != 0) {
6029            fp->eth_q_stats.tx_soft_errors++;
6030            goto bxe_tx_mq_start_locked_exit;
6031        }
6032        next = drbr_dequeue_drv(ifp, tx_br);
6033    } else {
6034        /* new work only and nothing pending */
6035        next = m;
6036    }
6037
6038    /* keep adding entries while there are frames to send */
6039    while (next != NULL) {
6040
6041        /* the mbuf now belongs to us */
6042        fp->eth_q_stats.mbuf_alloc_tx++;
6043
6044        /*
6045         * Put the frame into the transmit ring. If we don't have room,
6046         * place the mbuf back at the head of the TX queue, set the
6047         * OACTIVE flag, and wait for the NIC to drain the chain.
6048         */
6049        rc = bxe_tx_encap(fp, &next);
6050        if (__predict_false(rc != 0)) {
6051            fp->eth_q_stats.tx_encap_failures++;
6052            if (next != NULL) {
6053                /* mark the TX queue as full and save the frame */
6054                if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, 0);
6055                /* XXX this may reorder the frame */
6056                rc = drbr_enqueue_drv(ifp, tx_br, next);
6057                fp->eth_q_stats.mbuf_alloc_tx--;
6058                fp->eth_q_stats.tx_frames_deferred++;
6059            }
6060
6061            /* stop looking for more work */
6062            break;
6063        }
6064
6065        /* the transmit frame was enqueued successfully */
6066        tx_count++;
6067
6068        /* send a copy of the frame to any BPF listeners */
6069	if_etherbpfmtap(ifp, next);
6070
6071        tx_bd_avail = bxe_tx_avail(sc, fp);
6072
6073        /* handle any completions if we're running low */
6074        if (tx_bd_avail < BXE_TX_CLEANUP_THRESHOLD) {
6075            /* bxe_txeof will set IFF_DRV_OACTIVE appropriately */
6076            bxe_txeof(sc, fp);
6077            if (if_getdrvflags(ifp) & IFF_DRV_OACTIVE) {
6078                break;
6079            }
6080        }
6081
6082        next = drbr_dequeue_drv(ifp, tx_br);
6083    }
6084
6085    /* all TX packets were dequeued and/or the tx ring is full */
6086    if (tx_count > 0) {
6087        /* reset the TX watchdog timeout timer */
6088        fp->watchdog_timer = BXE_TX_TIMEOUT;
6089    }
6090
6091bxe_tx_mq_start_locked_exit:
6092
6093    return (rc);
6094}
6095
6096/* Multiqueue (TSS) dispatch routine. */
6097static int
6098bxe_tx_mq_start(struct ifnet *ifp,
6099                struct mbuf  *m)
6100{
6101    struct bxe_softc *sc = if_getsoftc(ifp);
6102    struct bxe_fastpath *fp;
6103    int fp_index, rc;
6104
6105    fp_index = 0; /* default is the first queue */
6106
6107    /* check if flowid is set */
6108    if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE)
6109        fp_index = (m->m_pkthdr.flowid % sc->num_queues);
6110
6111    fp = &sc->fp[fp_index];
6112
6113    if (!(if_getdrvflags(ifp) & IFF_DRV_RUNNING)) {
6114        BLOGW(sc, "Interface not running, ignoring transmit request\n");
6115        return (ENETDOWN);
6116    }
6117
6118    if (if_getdrvflags(ifp) & IFF_DRV_OACTIVE) {
6119        BLOGW(sc, "Interface TX queue is full, ignoring transmit request\n");
6120        return (EBUSY);
6121    }
6122
6123    if (!sc->link_vars.link_up) {
6124        BLOGW(sc, "Interface link is down, ignoring transmit request\n");
6125        return (ENETDOWN);
6126    }
6127
6128    /* XXX change to TRYLOCK here and if failed then schedule taskqueue */
6129
6130    BXE_FP_TX_LOCK(fp);
6131    rc = bxe_tx_mq_start_locked(sc, ifp, fp, m);
6132    BXE_FP_TX_UNLOCK(fp);
6133
6134    return (rc);
6135}
6136
6137static void
6138bxe_mq_flush(struct ifnet *ifp)
6139{
6140    struct bxe_softc *sc = if_getsoftc(ifp);
6141    struct bxe_fastpath *fp;
6142    struct mbuf *m;
6143    int i;
6144
6145    for (i = 0; i < sc->num_queues; i++) {
6146        fp = &sc->fp[i];
6147
6148        if (fp->state != BXE_FP_STATE_OPEN) {
6149            BLOGD(sc, DBG_LOAD, "Not clearing fp[%02d] buf_ring (state=%d)\n",
6150                  fp->index, fp->state);
6151            continue;
6152        }
6153
6154        if (fp->tx_br != NULL) {
6155            BLOGD(sc, DBG_LOAD, "Clearing fp[%02d] buf_ring\n", fp->index);
6156            BXE_FP_TX_LOCK(fp);
6157            while ((m = buf_ring_dequeue_sc(fp->tx_br)) != NULL) {
6158                m_freem(m);
6159            }
6160            BXE_FP_TX_UNLOCK(fp);
6161        }
6162    }
6163
6164    if_qflush(ifp);
6165}
6166
6167#endif /* FreeBSD_version >= 800000 */
6168
6169static uint16_t
6170bxe_cid_ilt_lines(struct bxe_softc *sc)
6171{
6172    if (IS_SRIOV(sc)) {
6173        return ((BXE_FIRST_VF_CID + BXE_VF_CIDS) / ILT_PAGE_CIDS);
6174    }
6175    return (L2_ILT_LINES(sc));
6176}
6177
6178static void
6179bxe_ilt_set_info(struct bxe_softc *sc)
6180{
6181    struct ilt_client_info *ilt_client;
6182    struct ecore_ilt *ilt = sc->ilt;
6183    uint16_t line = 0;
6184
6185    ilt->start_line = FUNC_ILT_BASE(SC_FUNC(sc));
6186    BLOGD(sc, DBG_LOAD, "ilt starts at line %d\n", ilt->start_line);
6187
6188    /* CDU */
6189    ilt_client = &ilt->clients[ILT_CLIENT_CDU];
6190    ilt_client->client_num = ILT_CLIENT_CDU;
6191    ilt_client->page_size = CDU_ILT_PAGE_SZ;
6192    ilt_client->flags = ILT_CLIENT_SKIP_MEM;
6193    ilt_client->start = line;
6194    line += bxe_cid_ilt_lines(sc);
6195
6196    if (CNIC_SUPPORT(sc)) {
6197        line += CNIC_ILT_LINES;
6198    }
6199
6200    ilt_client->end = (line - 1);
6201
6202    BLOGD(sc, DBG_LOAD,
6203          "ilt client[CDU]: start %d, end %d, "
6204          "psz 0x%x, flags 0x%x, hw psz %d\n",
6205          ilt_client->start, ilt_client->end,
6206          ilt_client->page_size,
6207          ilt_client->flags,
6208          ilog2(ilt_client->page_size >> 12));
6209
6210    /* QM */
6211    if (QM_INIT(sc->qm_cid_count)) {
6212        ilt_client = &ilt->clients[ILT_CLIENT_QM];
6213        ilt_client->client_num = ILT_CLIENT_QM;
6214        ilt_client->page_size = QM_ILT_PAGE_SZ;
6215        ilt_client->flags = 0;
6216        ilt_client->start = line;
6217
6218        /* 4 bytes for each cid */
6219        line += DIV_ROUND_UP(sc->qm_cid_count * QM_QUEUES_PER_FUNC * 4,
6220                             QM_ILT_PAGE_SZ);
6221
6222        ilt_client->end = (line - 1);
6223
6224        BLOGD(sc, DBG_LOAD,
6225              "ilt client[QM]: start %d, end %d, "
6226              "psz 0x%x, flags 0x%x, hw psz %d\n",
6227              ilt_client->start, ilt_client->end,
6228              ilt_client->page_size, ilt_client->flags,
6229              ilog2(ilt_client->page_size >> 12));
6230    }
6231
6232    if (CNIC_SUPPORT(sc)) {
6233        /* SRC */
6234        ilt_client = &ilt->clients[ILT_CLIENT_SRC];
6235        ilt_client->client_num = ILT_CLIENT_SRC;
6236        ilt_client->page_size = SRC_ILT_PAGE_SZ;
6237        ilt_client->flags = 0;
6238        ilt_client->start = line;
6239        line += SRC_ILT_LINES;
6240        ilt_client->end = (line - 1);
6241
6242        BLOGD(sc, DBG_LOAD,
6243              "ilt client[SRC]: start %d, end %d, "
6244              "psz 0x%x, flags 0x%x, hw psz %d\n",
6245              ilt_client->start, ilt_client->end,
6246              ilt_client->page_size, ilt_client->flags,
6247              ilog2(ilt_client->page_size >> 12));
6248
6249        /* TM */
6250        ilt_client = &ilt->clients[ILT_CLIENT_TM];
6251        ilt_client->client_num = ILT_CLIENT_TM;
6252        ilt_client->page_size = TM_ILT_PAGE_SZ;
6253        ilt_client->flags = 0;
6254        ilt_client->start = line;
6255        line += TM_ILT_LINES;
6256        ilt_client->end = (line - 1);
6257
6258        BLOGD(sc, DBG_LOAD,
6259              "ilt client[TM]: start %d, end %d, "
6260              "psz 0x%x, flags 0x%x, hw psz %d\n",
6261              ilt_client->start, ilt_client->end,
6262              ilt_client->page_size, ilt_client->flags,
6263              ilog2(ilt_client->page_size >> 12));
6264    }
6265
6266    KASSERT((line <= ILT_MAX_LINES), ("Invalid number of ILT lines!"));
6267}
6268
6269static void
6270bxe_set_fp_rx_buf_size(struct bxe_softc *sc)
6271{
6272    int i;
6273    uint32_t rx_buf_size;
6274
6275    rx_buf_size = (IP_HEADER_ALIGNMENT_PADDING + ETH_OVERHEAD + sc->mtu);
6276
6277    for (i = 0; i < sc->num_queues; i++) {
6278        if(rx_buf_size <= MCLBYTES){
6279            sc->fp[i].rx_buf_size = rx_buf_size;
6280            sc->fp[i].mbuf_alloc_size = MCLBYTES;
6281        }else if (rx_buf_size <= MJUMPAGESIZE){
6282            sc->fp[i].rx_buf_size = rx_buf_size;
6283            sc->fp[i].mbuf_alloc_size = MJUMPAGESIZE;
6284        }else if (rx_buf_size <= (MJUMPAGESIZE + MCLBYTES)){
6285            sc->fp[i].rx_buf_size = MCLBYTES;
6286            sc->fp[i].mbuf_alloc_size = MCLBYTES;
6287        }else if (rx_buf_size <= (2 * MJUMPAGESIZE)){
6288            sc->fp[i].rx_buf_size = MJUMPAGESIZE;
6289            sc->fp[i].mbuf_alloc_size = MJUMPAGESIZE;
6290        }else {
6291            sc->fp[i].rx_buf_size = MCLBYTES;
6292            sc->fp[i].mbuf_alloc_size = MCLBYTES;
6293        }
6294    }
6295}
6296
6297static int
6298bxe_alloc_ilt_mem(struct bxe_softc *sc)
6299{
6300    int rc = 0;
6301
6302    if ((sc->ilt =
6303         (struct ecore_ilt *)malloc(sizeof(struct ecore_ilt),
6304                                    M_BXE_ILT,
6305                                    (M_NOWAIT | M_ZERO))) == NULL) {
6306        rc = 1;
6307    }
6308
6309    return (rc);
6310}
6311
6312static int
6313bxe_alloc_ilt_lines_mem(struct bxe_softc *sc)
6314{
6315    int rc = 0;
6316
6317    if ((sc->ilt->lines =
6318         (struct ilt_line *)malloc((sizeof(struct ilt_line) * ILT_MAX_LINES),
6319                                    M_BXE_ILT,
6320                                    (M_NOWAIT | M_ZERO))) == NULL) {
6321        rc = 1;
6322    }
6323
6324    return (rc);
6325}
6326
6327static void
6328bxe_free_ilt_mem(struct bxe_softc *sc)
6329{
6330    if (sc->ilt != NULL) {
6331        free(sc->ilt, M_BXE_ILT);
6332        sc->ilt = NULL;
6333    }
6334}
6335
6336static void
6337bxe_free_ilt_lines_mem(struct bxe_softc *sc)
6338{
6339    if (sc->ilt->lines != NULL) {
6340        free(sc->ilt->lines, M_BXE_ILT);
6341        sc->ilt->lines = NULL;
6342    }
6343}
6344
6345static void
6346bxe_free_mem(struct bxe_softc *sc)
6347{
6348    int i;
6349
6350#if 0
6351    if (!CONFIGURE_NIC_MODE(sc)) {
6352        /* free searcher T2 table */
6353        bxe_dma_free(sc, &sc->t2);
6354    }
6355#endif
6356
6357    for (i = 0; i < L2_ILT_LINES(sc); i++) {
6358        bxe_dma_free(sc, &sc->context[i].vcxt_dma);
6359        sc->context[i].vcxt = NULL;
6360        sc->context[i].size = 0;
6361    }
6362
6363    ecore_ilt_mem_op(sc, ILT_MEMOP_FREE);
6364
6365    bxe_free_ilt_lines_mem(sc);
6366
6367#if 0
6368    bxe_iov_free_mem(sc);
6369#endif
6370}
6371
6372static int
6373bxe_alloc_mem(struct bxe_softc *sc)
6374{
6375    int context_size;
6376    int allocated;
6377    int i;
6378
6379#if 0
6380    if (!CONFIGURE_NIC_MODE(sc)) {
6381        /* allocate searcher T2 table */
6382        if (bxe_dma_alloc(sc, SRC_T2_SZ,
6383                          &sc->t2, "searcher t2 table") != 0) {
6384            return (-1);
6385        }
6386    }
6387#endif
6388
6389    /*
6390     * Allocate memory for CDU context:
6391     * This memory is allocated separately and not in the generic ILT
6392     * functions because CDU differs in few aspects:
6393     * 1. There can be multiple entities allocating memory for context -
6394     * regular L2, CNIC, and SRIOV drivers. Each separately controls
6395     * its own ILT lines.
6396     * 2. Since CDU page-size is not a single 4KB page (which is the case
6397     * for the other ILT clients), to be efficient we want to support
6398     * allocation of sub-page-size in the last entry.
6399     * 3. Context pointers are used by the driver to pass to FW / update
6400     * the context (for the other ILT clients the pointers are used just to
6401     * free the memory during unload).
6402     */
6403    context_size = (sizeof(union cdu_context) * BXE_L2_CID_COUNT(sc));
6404    for (i = 0, allocated = 0; allocated < context_size; i++) {
6405        sc->context[i].size = min(CDU_ILT_PAGE_SZ,
6406                                  (context_size - allocated));
6407
6408        if (bxe_dma_alloc(sc, sc->context[i].size,
6409                          &sc->context[i].vcxt_dma,
6410                          "cdu context") != 0) {
6411            bxe_free_mem(sc);
6412            return (-1);
6413        }
6414
6415        sc->context[i].vcxt =
6416            (union cdu_context *)sc->context[i].vcxt_dma.vaddr;
6417
6418        allocated += sc->context[i].size;
6419    }
6420
6421    bxe_alloc_ilt_lines_mem(sc);
6422
6423    BLOGD(sc, DBG_LOAD, "ilt=%p start_line=%u lines=%p\n",
6424          sc->ilt, sc->ilt->start_line, sc->ilt->lines);
6425    {
6426        for (i = 0; i < 4; i++) {
6427            BLOGD(sc, DBG_LOAD,
6428                  "c%d page_size=%u start=%u end=%u num=%u flags=0x%x\n",
6429                  i,
6430                  sc->ilt->clients[i].page_size,
6431                  sc->ilt->clients[i].start,
6432                  sc->ilt->clients[i].end,
6433                  sc->ilt->clients[i].client_num,
6434                  sc->ilt->clients[i].flags);
6435        }
6436    }
6437    if (ecore_ilt_mem_op(sc, ILT_MEMOP_ALLOC)) {
6438        BLOGE(sc, "ecore_ilt_mem_op ILT_MEMOP_ALLOC failed\n");
6439        bxe_free_mem(sc);
6440        return (-1);
6441    }
6442
6443#if 0
6444    if (bxe_iov_alloc_mem(sc)) {
6445        BLOGE(sc, "Failed to allocate memory for SRIOV\n");
6446        bxe_free_mem(sc);
6447        return (-1);
6448    }
6449#endif
6450
6451    return (0);
6452}
6453
6454static void
6455bxe_free_rx_bd_chain(struct bxe_fastpath *fp)
6456{
6457    struct bxe_softc *sc;
6458    int i;
6459
6460    sc = fp->sc;
6461
6462    if (fp->rx_mbuf_tag == NULL) {
6463        return;
6464    }
6465
6466    /* free all mbufs and unload all maps */
6467    for (i = 0; i < RX_BD_TOTAL; i++) {
6468        if (fp->rx_mbuf_chain[i].m_map != NULL) {
6469            bus_dmamap_sync(fp->rx_mbuf_tag,
6470                            fp->rx_mbuf_chain[i].m_map,
6471                            BUS_DMASYNC_POSTREAD);
6472            bus_dmamap_unload(fp->rx_mbuf_tag,
6473                              fp->rx_mbuf_chain[i].m_map);
6474        }
6475
6476        if (fp->rx_mbuf_chain[i].m != NULL) {
6477            m_freem(fp->rx_mbuf_chain[i].m);
6478            fp->rx_mbuf_chain[i].m = NULL;
6479            fp->eth_q_stats.mbuf_alloc_rx--;
6480        }
6481    }
6482}
6483
6484static void
6485bxe_free_tpa_pool(struct bxe_fastpath *fp)
6486{
6487    struct bxe_softc *sc;
6488    int i, max_agg_queues;
6489
6490    sc = fp->sc;
6491
6492    if (fp->rx_mbuf_tag == NULL) {
6493        return;
6494    }
6495
6496    max_agg_queues = MAX_AGG_QS(sc);
6497
6498    /* release all mbufs and unload all DMA maps in the TPA pool */
6499    for (i = 0; i < max_agg_queues; i++) {
6500        if (fp->rx_tpa_info[i].bd.m_map != NULL) {
6501            bus_dmamap_sync(fp->rx_mbuf_tag,
6502                            fp->rx_tpa_info[i].bd.m_map,
6503                            BUS_DMASYNC_POSTREAD);
6504            bus_dmamap_unload(fp->rx_mbuf_tag,
6505                              fp->rx_tpa_info[i].bd.m_map);
6506        }
6507
6508        if (fp->rx_tpa_info[i].bd.m != NULL) {
6509            m_freem(fp->rx_tpa_info[i].bd.m);
6510            fp->rx_tpa_info[i].bd.m = NULL;
6511            fp->eth_q_stats.mbuf_alloc_tpa--;
6512        }
6513    }
6514}
6515
6516static void
6517bxe_free_sge_chain(struct bxe_fastpath *fp)
6518{
6519    struct bxe_softc *sc;
6520    int i;
6521
6522    sc = fp->sc;
6523
6524    if (fp->rx_sge_mbuf_tag == NULL) {
6525        return;
6526    }
6527
6528    /* rree all mbufs and unload all maps */
6529    for (i = 0; i < RX_SGE_TOTAL; i++) {
6530        if (fp->rx_sge_mbuf_chain[i].m_map != NULL) {
6531            bus_dmamap_sync(fp->rx_sge_mbuf_tag,
6532                            fp->rx_sge_mbuf_chain[i].m_map,
6533                            BUS_DMASYNC_POSTREAD);
6534            bus_dmamap_unload(fp->rx_sge_mbuf_tag,
6535                              fp->rx_sge_mbuf_chain[i].m_map);
6536        }
6537
6538        if (fp->rx_sge_mbuf_chain[i].m != NULL) {
6539            m_freem(fp->rx_sge_mbuf_chain[i].m);
6540            fp->rx_sge_mbuf_chain[i].m = NULL;
6541            fp->eth_q_stats.mbuf_alloc_sge--;
6542        }
6543    }
6544}
6545
6546static void
6547bxe_free_fp_buffers(struct bxe_softc *sc)
6548{
6549    struct bxe_fastpath *fp;
6550    int i;
6551
6552    for (i = 0; i < sc->num_queues; i++) {
6553        fp = &sc->fp[i];
6554
6555#if __FreeBSD_version >= 800000
6556        if (fp->tx_br != NULL) {
6557            struct mbuf *m;
6558            /* just in case bxe_mq_flush() wasn't called */
6559            while ((m = buf_ring_dequeue_sc(fp->tx_br)) != NULL) {
6560                m_freem(m);
6561            }
6562            buf_ring_free(fp->tx_br, M_DEVBUF);
6563            fp->tx_br = NULL;
6564        }
6565#endif
6566
6567        /* free all RX buffers */
6568        bxe_free_rx_bd_chain(fp);
6569        bxe_free_tpa_pool(fp);
6570        bxe_free_sge_chain(fp);
6571
6572        if (fp->eth_q_stats.mbuf_alloc_rx != 0) {
6573            BLOGE(sc, "failed to claim all rx mbufs (%d left)\n",
6574                  fp->eth_q_stats.mbuf_alloc_rx);
6575        }
6576
6577        if (fp->eth_q_stats.mbuf_alloc_sge != 0) {
6578            BLOGE(sc, "failed to claim all sge mbufs (%d left)\n",
6579                  fp->eth_q_stats.mbuf_alloc_sge);
6580        }
6581
6582        if (fp->eth_q_stats.mbuf_alloc_tpa != 0) {
6583            BLOGE(sc, "failed to claim all sge mbufs (%d left)\n",
6584                  fp->eth_q_stats.mbuf_alloc_tpa);
6585        }
6586
6587        if (fp->eth_q_stats.mbuf_alloc_tx != 0) {
6588            BLOGE(sc, "failed to release tx mbufs (%d left)\n",
6589                  fp->eth_q_stats.mbuf_alloc_tx);
6590        }
6591
6592        /* XXX verify all mbufs were reclaimed */
6593
6594        if (mtx_initialized(&fp->tx_mtx)) {
6595            mtx_destroy(&fp->tx_mtx);
6596        }
6597
6598        if (mtx_initialized(&fp->rx_mtx)) {
6599            mtx_destroy(&fp->rx_mtx);
6600        }
6601    }
6602}
6603
6604static int
6605bxe_alloc_rx_bd_mbuf(struct bxe_fastpath *fp,
6606                     uint16_t            prev_index,
6607                     uint16_t            index)
6608{
6609    struct bxe_sw_rx_bd *rx_buf;
6610    struct eth_rx_bd *rx_bd;
6611    bus_dma_segment_t segs[1];
6612    bus_dmamap_t map;
6613    struct mbuf *m;
6614    int nsegs, rc;
6615
6616    rc = 0;
6617
6618    /* allocate the new RX BD mbuf */
6619    m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, fp->mbuf_alloc_size);
6620    if (__predict_false(m == NULL)) {
6621        fp->eth_q_stats.mbuf_rx_bd_alloc_failed++;
6622        return (ENOBUFS);
6623    }
6624
6625    fp->eth_q_stats.mbuf_alloc_rx++;
6626
6627    /* initialize the mbuf buffer length */
6628    m->m_pkthdr.len = m->m_len = fp->rx_buf_size;
6629
6630    /* map the mbuf into non-paged pool */
6631    rc = bus_dmamap_load_mbuf_sg(fp->rx_mbuf_tag,
6632                                 fp->rx_mbuf_spare_map,
6633                                 m, segs, &nsegs, BUS_DMA_NOWAIT);
6634    if (__predict_false(rc != 0)) {
6635        fp->eth_q_stats.mbuf_rx_bd_mapping_failed++;
6636        m_freem(m);
6637        fp->eth_q_stats.mbuf_alloc_rx--;
6638        return (rc);
6639    }
6640
6641    /* all mbufs must map to a single segment */
6642    KASSERT((nsegs == 1), ("Too many segments, %d returned!", nsegs));
6643
6644    /* release any existing RX BD mbuf mappings */
6645
6646    if (prev_index != index) {
6647        rx_buf = &fp->rx_mbuf_chain[prev_index];
6648
6649        if (rx_buf->m_map != NULL) {
6650            bus_dmamap_sync(fp->rx_mbuf_tag, rx_buf->m_map,
6651                            BUS_DMASYNC_POSTREAD);
6652            bus_dmamap_unload(fp->rx_mbuf_tag, rx_buf->m_map);
6653        }
6654
6655        /*
6656         * We only get here from bxe_rxeof() when the maximum number
6657         * of rx buffers is less than RX_BD_USABLE. bxe_rxeof() already
6658         * holds the mbuf in the prev_index so it's OK to NULL it out
6659         * here without concern of a memory leak.
6660         */
6661        fp->rx_mbuf_chain[prev_index].m = NULL;
6662    }
6663
6664    rx_buf = &fp->rx_mbuf_chain[index];
6665
6666    if (rx_buf->m_map != NULL) {
6667        bus_dmamap_sync(fp->rx_mbuf_tag, rx_buf->m_map,
6668                        BUS_DMASYNC_POSTREAD);
6669        bus_dmamap_unload(fp->rx_mbuf_tag, rx_buf->m_map);
6670    }
6671
6672    /* save the mbuf and mapping info for a future packet */
6673    map = (prev_index != index) ?
6674              fp->rx_mbuf_chain[prev_index].m_map : rx_buf->m_map;
6675    rx_buf->m_map = fp->rx_mbuf_spare_map;
6676    fp->rx_mbuf_spare_map = map;
6677    bus_dmamap_sync(fp->rx_mbuf_tag, rx_buf->m_map,
6678                    BUS_DMASYNC_PREREAD);
6679    rx_buf->m = m;
6680
6681    rx_bd = &fp->rx_chain[index];
6682    rx_bd->addr_hi = htole32(U64_HI(segs[0].ds_addr));
6683    rx_bd->addr_lo = htole32(U64_LO(segs[0].ds_addr));
6684
6685    return (rc);
6686}
6687
6688static int
6689bxe_alloc_rx_tpa_mbuf(struct bxe_fastpath *fp,
6690                      int                 queue)
6691{
6692    struct bxe_sw_tpa_info *tpa_info = &fp->rx_tpa_info[queue];
6693    bus_dma_segment_t segs[1];
6694    bus_dmamap_t map;
6695    struct mbuf *m;
6696    int nsegs;
6697    int rc = 0;
6698
6699    /* allocate the new TPA mbuf */
6700    m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, fp->mbuf_alloc_size);
6701    if (__predict_false(m == NULL)) {
6702        fp->eth_q_stats.mbuf_rx_tpa_alloc_failed++;
6703        return (ENOBUFS);
6704    }
6705
6706    fp->eth_q_stats.mbuf_alloc_tpa++;
6707
6708    /* initialize the mbuf buffer length */
6709    m->m_pkthdr.len = m->m_len = fp->rx_buf_size;
6710
6711    /* map the mbuf into non-paged pool */
6712    rc = bus_dmamap_load_mbuf_sg(fp->rx_mbuf_tag,
6713                                 fp->rx_tpa_info_mbuf_spare_map,
6714                                 m, segs, &nsegs, BUS_DMA_NOWAIT);
6715    if (__predict_false(rc != 0)) {
6716        fp->eth_q_stats.mbuf_rx_tpa_mapping_failed++;
6717        m_free(m);
6718        fp->eth_q_stats.mbuf_alloc_tpa--;
6719        return (rc);
6720    }
6721
6722    /* all mbufs must map to a single segment */
6723    KASSERT((nsegs == 1), ("Too many segments, %d returned!", nsegs));
6724
6725    /* release any existing TPA mbuf mapping */
6726    if (tpa_info->bd.m_map != NULL) {
6727        bus_dmamap_sync(fp->rx_mbuf_tag, tpa_info->bd.m_map,
6728                        BUS_DMASYNC_POSTREAD);
6729        bus_dmamap_unload(fp->rx_mbuf_tag, tpa_info->bd.m_map);
6730    }
6731
6732    /* save the mbuf and mapping info for the TPA mbuf */
6733    map = tpa_info->bd.m_map;
6734    tpa_info->bd.m_map = fp->rx_tpa_info_mbuf_spare_map;
6735    fp->rx_tpa_info_mbuf_spare_map = map;
6736    bus_dmamap_sync(fp->rx_mbuf_tag, tpa_info->bd.m_map,
6737                    BUS_DMASYNC_PREREAD);
6738    tpa_info->bd.m = m;
6739    tpa_info->seg = segs[0];
6740
6741    return (rc);
6742}
6743
6744/*
6745 * Allocate an mbuf and assign it to the receive scatter gather chain. The
6746 * caller must take care to save a copy of the existing mbuf in the SG mbuf
6747 * chain.
6748 */
6749static int
6750bxe_alloc_rx_sge_mbuf(struct bxe_fastpath *fp,
6751                      uint16_t            index)
6752{
6753    struct bxe_sw_rx_bd *sge_buf;
6754    struct eth_rx_sge *sge;
6755    bus_dma_segment_t segs[1];
6756    bus_dmamap_t map;
6757    struct mbuf *m;
6758    int nsegs;
6759    int rc = 0;
6760
6761    /* allocate a new SGE mbuf */
6762    m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, SGE_PAGE_SIZE);
6763    if (__predict_false(m == NULL)) {
6764        fp->eth_q_stats.mbuf_rx_sge_alloc_failed++;
6765        return (ENOMEM);
6766    }
6767
6768    fp->eth_q_stats.mbuf_alloc_sge++;
6769
6770    /* initialize the mbuf buffer length */
6771    m->m_pkthdr.len = m->m_len = SGE_PAGE_SIZE;
6772
6773    /* map the SGE mbuf into non-paged pool */
6774    rc = bus_dmamap_load_mbuf_sg(fp->rx_sge_mbuf_tag,
6775                                 fp->rx_sge_mbuf_spare_map,
6776                                 m, segs, &nsegs, BUS_DMA_NOWAIT);
6777    if (__predict_false(rc != 0)) {
6778        fp->eth_q_stats.mbuf_rx_sge_mapping_failed++;
6779        m_freem(m);
6780        fp->eth_q_stats.mbuf_alloc_sge--;
6781        return (rc);
6782    }
6783
6784    /* all mbufs must map to a single segment */
6785    KASSERT((nsegs == 1), ("Too many segments, %d returned!", nsegs));
6786
6787    sge_buf = &fp->rx_sge_mbuf_chain[index];
6788
6789    /* release any existing SGE mbuf mapping */
6790    if (sge_buf->m_map != NULL) {
6791        bus_dmamap_sync(fp->rx_sge_mbuf_tag, sge_buf->m_map,
6792                        BUS_DMASYNC_POSTREAD);
6793        bus_dmamap_unload(fp->rx_sge_mbuf_tag, sge_buf->m_map);
6794    }
6795
6796    /* save the mbuf and mapping info for a future packet */
6797    map = sge_buf->m_map;
6798    sge_buf->m_map = fp->rx_sge_mbuf_spare_map;
6799    fp->rx_sge_mbuf_spare_map = map;
6800    bus_dmamap_sync(fp->rx_sge_mbuf_tag, sge_buf->m_map,
6801                    BUS_DMASYNC_PREREAD);
6802    sge_buf->m = m;
6803
6804    sge = &fp->rx_sge_chain[index];
6805    sge->addr_hi = htole32(U64_HI(segs[0].ds_addr));
6806    sge->addr_lo = htole32(U64_LO(segs[0].ds_addr));
6807
6808    return (rc);
6809}
6810
6811static __noinline int
6812bxe_alloc_fp_buffers(struct bxe_softc *sc)
6813{
6814    struct bxe_fastpath *fp;
6815    int i, j, rc = 0;
6816    int ring_prod, cqe_ring_prod;
6817    int max_agg_queues;
6818
6819    for (i = 0; i < sc->num_queues; i++) {
6820        fp = &sc->fp[i];
6821
6822#if __FreeBSD_version >= 800000
6823        fp->tx_br = buf_ring_alloc(BXE_BR_SIZE, M_DEVBUF,
6824                                   M_NOWAIT, &fp->tx_mtx);
6825        if (fp->tx_br == NULL) {
6826            BLOGE(sc, "buf_ring alloc fail for fp[%02d]\n", i);
6827            goto bxe_alloc_fp_buffers_error;
6828        }
6829#endif
6830
6831        ring_prod = cqe_ring_prod = 0;
6832        fp->rx_bd_cons = 0;
6833        fp->rx_cq_cons = 0;
6834
6835        /* allocate buffers for the RX BDs in RX BD chain */
6836        for (j = 0; j < sc->max_rx_bufs; j++) {
6837            rc = bxe_alloc_rx_bd_mbuf(fp, ring_prod, ring_prod);
6838            if (rc != 0) {
6839                BLOGE(sc, "mbuf alloc fail for fp[%02d] rx chain (%d)\n",
6840                      i, rc);
6841                goto bxe_alloc_fp_buffers_error;
6842            }
6843
6844            ring_prod     = RX_BD_NEXT(ring_prod);
6845            cqe_ring_prod = RCQ_NEXT(cqe_ring_prod);
6846        }
6847
6848        fp->rx_bd_prod = ring_prod;
6849        fp->rx_cq_prod = cqe_ring_prod;
6850        fp->eth_q_stats.rx_calls = fp->eth_q_stats.rx_pkts = 0;
6851
6852        max_agg_queues = MAX_AGG_QS(sc);
6853
6854        fp->tpa_enable = TRUE;
6855
6856        /* fill the TPA pool */
6857        for (j = 0; j < max_agg_queues; j++) {
6858            rc = bxe_alloc_rx_tpa_mbuf(fp, j);
6859            if (rc != 0) {
6860                BLOGE(sc, "mbuf alloc fail for fp[%02d] TPA queue %d\n",
6861                          i, j);
6862                fp->tpa_enable = FALSE;
6863                goto bxe_alloc_fp_buffers_error;
6864            }
6865
6866            fp->rx_tpa_info[j].state = BXE_TPA_STATE_STOP;
6867        }
6868
6869        if (fp->tpa_enable) {
6870            /* fill the RX SGE chain */
6871            ring_prod = 0;
6872            for (j = 0; j < RX_SGE_USABLE; j++) {
6873                rc = bxe_alloc_rx_sge_mbuf(fp, ring_prod);
6874                if (rc != 0) {
6875                    BLOGE(sc, "mbuf alloc fail for fp[%02d] SGE %d\n",
6876                              i, ring_prod);
6877                    fp->tpa_enable = FALSE;
6878                    ring_prod = 0;
6879                    goto bxe_alloc_fp_buffers_error;
6880                }
6881
6882                ring_prod = RX_SGE_NEXT(ring_prod);
6883            }
6884
6885            fp->rx_sge_prod = ring_prod;
6886        }
6887    }
6888
6889    return (0);
6890
6891bxe_alloc_fp_buffers_error:
6892
6893    /* unwind what was already allocated */
6894    bxe_free_rx_bd_chain(fp);
6895    bxe_free_tpa_pool(fp);
6896    bxe_free_sge_chain(fp);
6897
6898    return (ENOBUFS);
6899}
6900
6901static void
6902bxe_free_fw_stats_mem(struct bxe_softc *sc)
6903{
6904    bxe_dma_free(sc, &sc->fw_stats_dma);
6905
6906    sc->fw_stats_num = 0;
6907
6908    sc->fw_stats_req_size = 0;
6909    sc->fw_stats_req = NULL;
6910    sc->fw_stats_req_mapping = 0;
6911
6912    sc->fw_stats_data_size = 0;
6913    sc->fw_stats_data = NULL;
6914    sc->fw_stats_data_mapping = 0;
6915}
6916
6917static int
6918bxe_alloc_fw_stats_mem(struct bxe_softc *sc)
6919{
6920    uint8_t num_queue_stats;
6921    int num_groups;
6922
6923    /* number of queues for statistics is number of eth queues */
6924    num_queue_stats = BXE_NUM_ETH_QUEUES(sc);
6925
6926    /*
6927     * Total number of FW statistics requests =
6928     *   1 for port stats + 1 for PF stats + num of queues
6929     */
6930    sc->fw_stats_num = (2 + num_queue_stats);
6931
6932    /*
6933     * Request is built from stats_query_header and an array of
6934     * stats_query_cmd_group each of which contains STATS_QUERY_CMD_COUNT
6935     * rules. The real number or requests is configured in the
6936     * stats_query_header.
6937     */
6938    num_groups =
6939        ((sc->fw_stats_num / STATS_QUERY_CMD_COUNT) +
6940         ((sc->fw_stats_num % STATS_QUERY_CMD_COUNT) ? 1 : 0));
6941
6942    BLOGD(sc, DBG_LOAD, "stats fw_stats_num %d num_groups %d\n",
6943          sc->fw_stats_num, num_groups);
6944
6945    sc->fw_stats_req_size =
6946        (sizeof(struct stats_query_header) +
6947         (num_groups * sizeof(struct stats_query_cmd_group)));
6948
6949    /*
6950     * Data for statistics requests + stats_counter.
6951     * stats_counter holds per-STORM counters that are incremented when
6952     * STORM has finished with the current request. Memory for FCoE
6953     * offloaded statistics are counted anyway, even if they will not be sent.
6954     * VF stats are not accounted for here as the data of VF stats is stored
6955     * in memory allocated by the VF, not here.
6956     */
6957    sc->fw_stats_data_size =
6958        (sizeof(struct stats_counter) +
6959         sizeof(struct per_port_stats) +
6960         sizeof(struct per_pf_stats) +
6961         /* sizeof(struct fcoe_statistics_params) + */
6962         (sizeof(struct per_queue_stats) * num_queue_stats));
6963
6964    if (bxe_dma_alloc(sc, (sc->fw_stats_req_size + sc->fw_stats_data_size),
6965                      &sc->fw_stats_dma, "fw stats") != 0) {
6966        bxe_free_fw_stats_mem(sc);
6967        return (-1);
6968    }
6969
6970    /* set up the shortcuts */
6971
6972    sc->fw_stats_req =
6973        (struct bxe_fw_stats_req *)sc->fw_stats_dma.vaddr;
6974    sc->fw_stats_req_mapping = sc->fw_stats_dma.paddr;
6975
6976    sc->fw_stats_data =
6977        (struct bxe_fw_stats_data *)((uint8_t *)sc->fw_stats_dma.vaddr +
6978                                     sc->fw_stats_req_size);
6979    sc->fw_stats_data_mapping = (sc->fw_stats_dma.paddr +
6980                                 sc->fw_stats_req_size);
6981
6982    BLOGD(sc, DBG_LOAD, "statistics request base address set to %#jx\n",
6983          (uintmax_t)sc->fw_stats_req_mapping);
6984
6985    BLOGD(sc, DBG_LOAD, "statistics data base address set to %#jx\n",
6986          (uintmax_t)sc->fw_stats_data_mapping);
6987
6988    return (0);
6989}
6990
6991/*
6992 * Bits map:
6993 * 0-7  - Engine0 load counter.
6994 * 8-15 - Engine1 load counter.
6995 * 16   - Engine0 RESET_IN_PROGRESS bit.
6996 * 17   - Engine1 RESET_IN_PROGRESS bit.
6997 * 18   - Engine0 ONE_IS_LOADED. Set when there is at least one active
6998 *        function on the engine
6999 * 19   - Engine1 ONE_IS_LOADED.
7000 * 20   - Chip reset flow bit. When set none-leader must wait for both engines
7001 *        leader to complete (check for both RESET_IN_PROGRESS bits and not
7002 *        for just the one belonging to its engine).
7003 */
7004#define BXE_RECOVERY_GLOB_REG     MISC_REG_GENERIC_POR_1
7005#define BXE_PATH0_LOAD_CNT_MASK   0x000000ff
7006#define BXE_PATH0_LOAD_CNT_SHIFT  0
7007#define BXE_PATH1_LOAD_CNT_MASK   0x0000ff00
7008#define BXE_PATH1_LOAD_CNT_SHIFT  8
7009#define BXE_PATH0_RST_IN_PROG_BIT 0x00010000
7010#define BXE_PATH1_RST_IN_PROG_BIT 0x00020000
7011#define BXE_GLOBAL_RESET_BIT      0x00040000
7012
7013/* set the GLOBAL_RESET bit, should be run under rtnl lock */
7014static void
7015bxe_set_reset_global(struct bxe_softc *sc)
7016{
7017    uint32_t val;
7018    bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG);
7019    val = REG_RD(sc, BXE_RECOVERY_GLOB_REG);
7020    REG_WR(sc, BXE_RECOVERY_GLOB_REG, val | BXE_GLOBAL_RESET_BIT);
7021    bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG);
7022}
7023
7024/* clear the GLOBAL_RESET bit, should be run under rtnl lock */
7025static void
7026bxe_clear_reset_global(struct bxe_softc *sc)
7027{
7028    uint32_t val;
7029    bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG);
7030    val = REG_RD(sc, BXE_RECOVERY_GLOB_REG);
7031    REG_WR(sc, BXE_RECOVERY_GLOB_REG, val & (~BXE_GLOBAL_RESET_BIT));
7032    bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG);
7033}
7034
7035/* checks the GLOBAL_RESET bit, should be run under rtnl lock */
7036static uint8_t
7037bxe_reset_is_global(struct bxe_softc *sc)
7038{
7039    uint32_t val = REG_RD(sc, BXE_RECOVERY_GLOB_REG);
7040    BLOGD(sc, DBG_LOAD, "GLOB_REG=0x%08x\n", val);
7041    return (val & BXE_GLOBAL_RESET_BIT) ? TRUE : FALSE;
7042}
7043
7044/* clear RESET_IN_PROGRESS bit for the engine, should be run under rtnl lock */
7045static void
7046bxe_set_reset_done(struct bxe_softc *sc)
7047{
7048    uint32_t val;
7049    uint32_t bit = SC_PATH(sc) ? BXE_PATH1_RST_IN_PROG_BIT :
7050                                 BXE_PATH0_RST_IN_PROG_BIT;
7051
7052    bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG);
7053
7054    val = REG_RD(sc, BXE_RECOVERY_GLOB_REG);
7055    /* Clear the bit */
7056    val &= ~bit;
7057    REG_WR(sc, BXE_RECOVERY_GLOB_REG, val);
7058
7059    bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG);
7060}
7061
7062/* set RESET_IN_PROGRESS for the engine, should be run under rtnl lock */
7063static void
7064bxe_set_reset_in_progress(struct bxe_softc *sc)
7065{
7066    uint32_t val;
7067    uint32_t bit = SC_PATH(sc) ? BXE_PATH1_RST_IN_PROG_BIT :
7068                                 BXE_PATH0_RST_IN_PROG_BIT;
7069
7070    bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG);
7071
7072    val = REG_RD(sc, BXE_RECOVERY_GLOB_REG);
7073    /* Set the bit */
7074    val |= bit;
7075    REG_WR(sc, BXE_RECOVERY_GLOB_REG, val);
7076
7077    bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG);
7078}
7079
7080/* check RESET_IN_PROGRESS bit for an engine, should be run under rtnl lock */
7081static uint8_t
7082bxe_reset_is_done(struct bxe_softc *sc,
7083                  int              engine)
7084{
7085    uint32_t val = REG_RD(sc, BXE_RECOVERY_GLOB_REG);
7086    uint32_t bit = engine ? BXE_PATH1_RST_IN_PROG_BIT :
7087                            BXE_PATH0_RST_IN_PROG_BIT;
7088
7089    /* return false if bit is set */
7090    return (val & bit) ? FALSE : TRUE;
7091}
7092
7093/* get the load status for an engine, should be run under rtnl lock */
7094static uint8_t
7095bxe_get_load_status(struct bxe_softc *sc,
7096                    int              engine)
7097{
7098    uint32_t mask = engine ? BXE_PATH1_LOAD_CNT_MASK :
7099                             BXE_PATH0_LOAD_CNT_MASK;
7100    uint32_t shift = engine ? BXE_PATH1_LOAD_CNT_SHIFT :
7101                              BXE_PATH0_LOAD_CNT_SHIFT;
7102    uint32_t val = REG_RD(sc, BXE_RECOVERY_GLOB_REG);
7103
7104    BLOGD(sc, DBG_LOAD, "Old value for GLOB_REG=0x%08x\n", val);
7105
7106    val = ((val & mask) >> shift);
7107
7108    BLOGD(sc, DBG_LOAD, "Load mask engine %d = 0x%08x\n", engine, val);
7109
7110    return (val != 0);
7111}
7112
7113/* set pf load mark */
7114/* XXX needs to be under rtnl lock */
7115static void
7116bxe_set_pf_load(struct bxe_softc *sc)
7117{
7118    uint32_t val;
7119    uint32_t val1;
7120    uint32_t mask = SC_PATH(sc) ? BXE_PATH1_LOAD_CNT_MASK :
7121                                  BXE_PATH0_LOAD_CNT_MASK;
7122    uint32_t shift = SC_PATH(sc) ? BXE_PATH1_LOAD_CNT_SHIFT :
7123                                   BXE_PATH0_LOAD_CNT_SHIFT;
7124
7125    bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG);
7126
7127    val = REG_RD(sc, BXE_RECOVERY_GLOB_REG);
7128    BLOGD(sc, DBG_LOAD, "Old value for GLOB_REG=0x%08x\n", val);
7129
7130    /* get the current counter value */
7131    val1 = ((val & mask) >> shift);
7132
7133    /* set bit of this PF */
7134    val1 |= (1 << SC_ABS_FUNC(sc));
7135
7136    /* clear the old value */
7137    val &= ~mask;
7138
7139    /* set the new one */
7140    val |= ((val1 << shift) & mask);
7141
7142    REG_WR(sc, BXE_RECOVERY_GLOB_REG, val);
7143
7144    bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG);
7145}
7146
7147/* clear pf load mark */
7148/* XXX needs to be under rtnl lock */
7149static uint8_t
7150bxe_clear_pf_load(struct bxe_softc *sc)
7151{
7152    uint32_t val1, val;
7153    uint32_t mask = SC_PATH(sc) ? BXE_PATH1_LOAD_CNT_MASK :
7154                                  BXE_PATH0_LOAD_CNT_MASK;
7155    uint32_t shift = SC_PATH(sc) ? BXE_PATH1_LOAD_CNT_SHIFT :
7156                                   BXE_PATH0_LOAD_CNT_SHIFT;
7157
7158    bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG);
7159    val = REG_RD(sc, BXE_RECOVERY_GLOB_REG);
7160    BLOGD(sc, DBG_LOAD, "Old GEN_REG_VAL=0x%08x\n", val);
7161
7162    /* get the current counter value */
7163    val1 = (val & mask) >> shift;
7164
7165    /* clear bit of that PF */
7166    val1 &= ~(1 << SC_ABS_FUNC(sc));
7167
7168    /* clear the old value */
7169    val &= ~mask;
7170
7171    /* set the new one */
7172    val |= ((val1 << shift) & mask);
7173
7174    REG_WR(sc, BXE_RECOVERY_GLOB_REG, val);
7175    bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG);
7176    return (val1 != 0);
7177}
7178
7179/* send load requrest to mcp and analyze response */
7180static int
7181bxe_nic_load_request(struct bxe_softc *sc,
7182                     uint32_t         *load_code)
7183{
7184    /* init fw_seq */
7185    sc->fw_seq =
7186        (SHMEM_RD(sc, func_mb[SC_FW_MB_IDX(sc)].drv_mb_header) &
7187         DRV_MSG_SEQ_NUMBER_MASK);
7188
7189    BLOGD(sc, DBG_LOAD, "initial fw_seq 0x%04x\n", sc->fw_seq);
7190
7191    /* get the current FW pulse sequence */
7192    sc->fw_drv_pulse_wr_seq =
7193        (SHMEM_RD(sc, func_mb[SC_FW_MB_IDX(sc)].drv_pulse_mb) &
7194         DRV_PULSE_SEQ_MASK);
7195
7196    BLOGD(sc, DBG_LOAD, "initial drv_pulse 0x%04x\n",
7197          sc->fw_drv_pulse_wr_seq);
7198
7199    /* load request */
7200    (*load_code) = bxe_fw_command(sc, DRV_MSG_CODE_LOAD_REQ,
7201                                  DRV_MSG_CODE_LOAD_REQ_WITH_LFA);
7202
7203    /* if the MCP fails to respond we must abort */
7204    if (!(*load_code)) {
7205        BLOGE(sc, "MCP response failure!\n");
7206        return (-1);
7207    }
7208
7209    /* if MCP refused then must abort */
7210    if ((*load_code) == FW_MSG_CODE_DRV_LOAD_REFUSED) {
7211        BLOGE(sc, "MCP refused load request\n");
7212        return (-1);
7213    }
7214
7215    return (0);
7216}
7217
7218/*
7219 * Check whether another PF has already loaded FW to chip. In virtualized
7220 * environments a pf from anoth VM may have already initialized the device
7221 * including loading FW.
7222 */
7223static int
7224bxe_nic_load_analyze_req(struct bxe_softc *sc,
7225                         uint32_t         load_code)
7226{
7227    uint32_t my_fw, loaded_fw;
7228
7229    /* is another pf loaded on this engine? */
7230    if ((load_code != FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) &&
7231        (load_code != FW_MSG_CODE_DRV_LOAD_COMMON)) {
7232        /* build my FW version dword */
7233        my_fw = (BCM_5710_FW_MAJOR_VERSION +
7234                 (BCM_5710_FW_MINOR_VERSION << 8 ) +
7235                 (BCM_5710_FW_REVISION_VERSION << 16) +
7236                 (BCM_5710_FW_ENGINEERING_VERSION << 24));
7237
7238        /* read loaded FW from chip */
7239        loaded_fw = REG_RD(sc, XSEM_REG_PRAM);
7240        BLOGD(sc, DBG_LOAD, "loaded FW 0x%08x / my FW 0x%08x\n",
7241              loaded_fw, my_fw);
7242
7243        /* abort nic load if version mismatch */
7244        if (my_fw != loaded_fw) {
7245            BLOGE(sc, "FW 0x%08x already loaded (mine is 0x%08x)",
7246                  loaded_fw, my_fw);
7247            return (-1);
7248        }
7249    }
7250
7251    return (0);
7252}
7253
7254/* mark PMF if applicable */
7255static void
7256bxe_nic_load_pmf(struct bxe_softc *sc,
7257                 uint32_t         load_code)
7258{
7259    uint32_t ncsi_oem_data_addr;
7260
7261    if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
7262        (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) ||
7263        (load_code == FW_MSG_CODE_DRV_LOAD_PORT)) {
7264        /*
7265         * Barrier here for ordering between the writing to sc->port.pmf here
7266         * and reading it from the periodic task.
7267         */
7268        sc->port.pmf = 1;
7269        mb();
7270    } else {
7271        sc->port.pmf = 0;
7272    }
7273
7274    BLOGD(sc, DBG_LOAD, "pmf %d\n", sc->port.pmf);
7275
7276    /* XXX needed? */
7277    if (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) {
7278        if (SHMEM2_HAS(sc, ncsi_oem_data_addr)) {
7279            ncsi_oem_data_addr = SHMEM2_RD(sc, ncsi_oem_data_addr);
7280            if (ncsi_oem_data_addr) {
7281                REG_WR(sc,
7282                       (ncsi_oem_data_addr +
7283                        offsetof(struct glob_ncsi_oem_data, driver_version)),
7284                       0);
7285            }
7286        }
7287    }
7288}
7289
7290static void
7291bxe_read_mf_cfg(struct bxe_softc *sc)
7292{
7293    int n = (CHIP_IS_MODE_4_PORT(sc) ? 2 : 1);
7294    int abs_func;
7295    int vn;
7296
7297    if (BXE_NOMCP(sc)) {
7298        return; /* what should be the default bvalue in this case */
7299    }
7300
7301    /*
7302     * The formula for computing the absolute function number is...
7303     * For 2 port configuration (4 functions per port):
7304     *   abs_func = 2 * vn + SC_PORT + SC_PATH
7305     * For 4 port configuration (2 functions per port):
7306     *   abs_func = 4 * vn + 2 * SC_PORT + SC_PATH
7307     */
7308    for (vn = VN_0; vn < SC_MAX_VN_NUM(sc); vn++) {
7309        abs_func = (n * (2 * vn + SC_PORT(sc)) + SC_PATH(sc));
7310        if (abs_func >= E1H_FUNC_MAX) {
7311            break;
7312        }
7313        sc->devinfo.mf_info.mf_config[vn] =
7314            MFCFG_RD(sc, func_mf_config[abs_func].config);
7315    }
7316
7317    if (sc->devinfo.mf_info.mf_config[SC_VN(sc)] &
7318        FUNC_MF_CFG_FUNC_DISABLED) {
7319        BLOGD(sc, DBG_LOAD, "mf_cfg function disabled\n");
7320        sc->flags |= BXE_MF_FUNC_DIS;
7321    } else {
7322        BLOGD(sc, DBG_LOAD, "mf_cfg function enabled\n");
7323        sc->flags &= ~BXE_MF_FUNC_DIS;
7324    }
7325}
7326
7327/* acquire split MCP access lock register */
7328static int bxe_acquire_alr(struct bxe_softc *sc)
7329{
7330    uint32_t j, val;
7331
7332    for (j = 0; j < 1000; j++) {
7333        val = (1UL << 31);
7334        REG_WR(sc, GRCBASE_MCP + 0x9c, val);
7335        val = REG_RD(sc, GRCBASE_MCP + 0x9c);
7336        if (val & (1L << 31))
7337            break;
7338
7339        DELAY(5000);
7340    }
7341
7342    if (!(val & (1L << 31))) {
7343        BLOGE(sc, "Cannot acquire MCP access lock register\n");
7344        return (-1);
7345    }
7346
7347    return (0);
7348}
7349
7350/* release split MCP access lock register */
7351static void bxe_release_alr(struct bxe_softc *sc)
7352{
7353    REG_WR(sc, GRCBASE_MCP + 0x9c, 0);
7354}
7355
7356static void
7357bxe_fan_failure(struct bxe_softc *sc)
7358{
7359    int port = SC_PORT(sc);
7360    uint32_t ext_phy_config;
7361
7362    /* mark the failure */
7363    ext_phy_config =
7364        SHMEM_RD(sc, dev_info.port_hw_config[port].external_phy_config);
7365
7366    ext_phy_config &= ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
7367    ext_phy_config |= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
7368    SHMEM_WR(sc, dev_info.port_hw_config[port].external_phy_config,
7369             ext_phy_config);
7370
7371    /* log the failure */
7372    BLOGW(sc, "Fan Failure has caused the driver to shutdown "
7373              "the card to prevent permanent damage. "
7374              "Please contact OEM Support for assistance\n");
7375
7376    /* XXX */
7377#if 1
7378    bxe_panic(sc, ("Schedule task to handle fan failure\n"));
7379#else
7380    /*
7381     * Schedule device reset (unload)
7382     * This is due to some boards consuming sufficient power when driver is
7383     * up to overheat if fan fails.
7384     */
7385    bxe_set_bit(BXE_SP_RTNL_FAN_FAILURE, &sc->sp_rtnl_state);
7386    schedule_delayed_work(&sc->sp_rtnl_task, 0);
7387#endif
7388}
7389
7390/* this function is called upon a link interrupt */
7391static void
7392bxe_link_attn(struct bxe_softc *sc)
7393{
7394    uint32_t pause_enabled = 0;
7395    struct host_port_stats *pstats;
7396    int cmng_fns;
7397
7398    /* Make sure that we are synced with the current statistics */
7399    bxe_stats_handle(sc, STATS_EVENT_STOP);
7400
7401    elink_link_update(&sc->link_params, &sc->link_vars);
7402
7403    if (sc->link_vars.link_up) {
7404
7405        /* dropless flow control */
7406        if (!CHIP_IS_E1(sc) && sc->dropless_fc) {
7407            pause_enabled = 0;
7408
7409            if (sc->link_vars.flow_ctrl & ELINK_FLOW_CTRL_TX) {
7410                pause_enabled = 1;
7411            }
7412
7413            REG_WR(sc,
7414                   (BAR_USTRORM_INTMEM +
7415                    USTORM_ETH_PAUSE_ENABLED_OFFSET(SC_PORT(sc))),
7416                   pause_enabled);
7417        }
7418
7419        if (sc->link_vars.mac_type != ELINK_MAC_TYPE_EMAC) {
7420            pstats = BXE_SP(sc, port_stats);
7421            /* reset old mac stats */
7422            memset(&(pstats->mac_stx[0]), 0, sizeof(struct mac_stx));
7423        }
7424
7425        if (sc->state == BXE_STATE_OPEN) {
7426            bxe_stats_handle(sc, STATS_EVENT_LINK_UP);
7427        }
7428    }
7429
7430    if (sc->link_vars.link_up && sc->link_vars.line_speed) {
7431        cmng_fns = bxe_get_cmng_fns_mode(sc);
7432
7433        if (cmng_fns != CMNG_FNS_NONE) {
7434            bxe_cmng_fns_init(sc, FALSE, cmng_fns);
7435            storm_memset_cmng(sc, &sc->cmng, SC_PORT(sc));
7436        } else {
7437            /* rate shaping and fairness are disabled */
7438            BLOGD(sc, DBG_LOAD, "single function mode without fairness\n");
7439        }
7440    }
7441
7442    bxe_link_report_locked(sc);
7443
7444    if (IS_MF(sc)) {
7445        ; // XXX bxe_link_sync_notify(sc);
7446    }
7447}
7448
7449static void
7450bxe_attn_int_asserted(struct bxe_softc *sc,
7451                      uint32_t         asserted)
7452{
7453    int port = SC_PORT(sc);
7454    uint32_t aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
7455                               MISC_REG_AEU_MASK_ATTN_FUNC_0;
7456    uint32_t nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
7457                                        NIG_REG_MASK_INTERRUPT_PORT0;
7458    uint32_t aeu_mask;
7459    uint32_t nig_mask = 0;
7460    uint32_t reg_addr;
7461    uint32_t igu_acked;
7462    uint32_t cnt;
7463
7464    if (sc->attn_state & asserted) {
7465        BLOGE(sc, "IGU ERROR attn=0x%08x\n", asserted);
7466    }
7467
7468    bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
7469
7470    aeu_mask = REG_RD(sc, aeu_addr);
7471
7472    BLOGD(sc, DBG_INTR, "aeu_mask 0x%08x newly asserted 0x%08x\n",
7473          aeu_mask, asserted);
7474
7475    aeu_mask &= ~(asserted & 0x3ff);
7476
7477    BLOGD(sc, DBG_INTR, "new mask 0x%08x\n", aeu_mask);
7478
7479    REG_WR(sc, aeu_addr, aeu_mask);
7480
7481    bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
7482
7483    BLOGD(sc, DBG_INTR, "attn_state 0x%08x\n", sc->attn_state);
7484    sc->attn_state |= asserted;
7485    BLOGD(sc, DBG_INTR, "new state 0x%08x\n", sc->attn_state);
7486
7487    if (asserted & ATTN_HARD_WIRED_MASK) {
7488        if (asserted & ATTN_NIG_FOR_FUNC) {
7489
7490            BXE_PHY_LOCK(sc);
7491
7492            /* save nig interrupt mask */
7493            nig_mask = REG_RD(sc, nig_int_mask_addr);
7494
7495            /* If nig_mask is not set, no need to call the update function */
7496            if (nig_mask) {
7497                REG_WR(sc, nig_int_mask_addr, 0);
7498
7499                bxe_link_attn(sc);
7500            }
7501
7502            /* handle unicore attn? */
7503        }
7504
7505        if (asserted & ATTN_SW_TIMER_4_FUNC) {
7506            BLOGD(sc, DBG_INTR, "ATTN_SW_TIMER_4_FUNC!\n");
7507        }
7508
7509        if (asserted & GPIO_2_FUNC) {
7510            BLOGD(sc, DBG_INTR, "GPIO_2_FUNC!\n");
7511        }
7512
7513        if (asserted & GPIO_3_FUNC) {
7514            BLOGD(sc, DBG_INTR, "GPIO_3_FUNC!\n");
7515        }
7516
7517        if (asserted & GPIO_4_FUNC) {
7518            BLOGD(sc, DBG_INTR, "GPIO_4_FUNC!\n");
7519        }
7520
7521        if (port == 0) {
7522            if (asserted & ATTN_GENERAL_ATTN_1) {
7523                BLOGD(sc, DBG_INTR, "ATTN_GENERAL_ATTN_1!\n");
7524                REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
7525            }
7526            if (asserted & ATTN_GENERAL_ATTN_2) {
7527                BLOGD(sc, DBG_INTR, "ATTN_GENERAL_ATTN_2!\n");
7528                REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
7529            }
7530            if (asserted & ATTN_GENERAL_ATTN_3) {
7531                BLOGD(sc, DBG_INTR, "ATTN_GENERAL_ATTN_3!\n");
7532                REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
7533            }
7534        } else {
7535            if (asserted & ATTN_GENERAL_ATTN_4) {
7536                BLOGD(sc, DBG_INTR, "ATTN_GENERAL_ATTN_4!\n");
7537                REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
7538            }
7539            if (asserted & ATTN_GENERAL_ATTN_5) {
7540                BLOGD(sc, DBG_INTR, "ATTN_GENERAL_ATTN_5!\n");
7541                REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
7542            }
7543            if (asserted & ATTN_GENERAL_ATTN_6) {
7544                BLOGD(sc, DBG_INTR, "ATTN_GENERAL_ATTN_6!\n");
7545                REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
7546            }
7547        }
7548    } /* hardwired */
7549
7550    if (sc->devinfo.int_block == INT_BLOCK_HC) {
7551        reg_addr = (HC_REG_COMMAND_REG + port*32 + COMMAND_REG_ATTN_BITS_SET);
7552    } else {
7553        reg_addr = (BAR_IGU_INTMEM + IGU_CMD_ATTN_BIT_SET_UPPER*8);
7554    }
7555
7556    BLOGD(sc, DBG_INTR, "about to mask 0x%08x at %s addr 0x%08x\n",
7557          asserted,
7558          (sc->devinfo.int_block == INT_BLOCK_HC) ? "HC" : "IGU", reg_addr);
7559    REG_WR(sc, reg_addr, asserted);
7560
7561    /* now set back the mask */
7562    if (asserted & ATTN_NIG_FOR_FUNC) {
7563        /*
7564         * Verify that IGU ack through BAR was written before restoring
7565         * NIG mask. This loop should exit after 2-3 iterations max.
7566         */
7567        if (sc->devinfo.int_block != INT_BLOCK_HC) {
7568            cnt = 0;
7569
7570            do {
7571                igu_acked = REG_RD(sc, IGU_REG_ATTENTION_ACK_BITS);
7572            } while (((igu_acked & ATTN_NIG_FOR_FUNC) == 0) &&
7573                     (++cnt < MAX_IGU_ATTN_ACK_TO));
7574
7575            if (!igu_acked) {
7576                BLOGE(sc, "Failed to verify IGU ack on time\n");
7577            }
7578
7579            mb();
7580        }
7581
7582        REG_WR(sc, nig_int_mask_addr, nig_mask);
7583
7584        BXE_PHY_UNLOCK(sc);
7585    }
7586}
7587
7588static void
7589bxe_print_next_block(struct bxe_softc *sc,
7590                     int              idx,
7591                     const char       *blk)
7592{
7593    BLOGI(sc, "%s%s", idx ? ", " : "", blk);
7594}
7595
7596static int
7597bxe_check_blocks_with_parity0(struct bxe_softc *sc,
7598                              uint32_t         sig,
7599                              int              par_num,
7600                              uint8_t          print)
7601{
7602    uint32_t cur_bit = 0;
7603    int i = 0;
7604
7605    for (i = 0; sig; i++) {
7606        cur_bit = ((uint32_t)0x1 << i);
7607        if (sig & cur_bit) {
7608            switch (cur_bit) {
7609            case AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR:
7610                if (print)
7611                    bxe_print_next_block(sc, par_num++, "BRB");
7612                break;
7613            case AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR:
7614                if (print)
7615                    bxe_print_next_block(sc, par_num++, "PARSER");
7616                break;
7617            case AEU_INPUTS_ATTN_BITS_TSDM_PARITY_ERROR:
7618                if (print)
7619                    bxe_print_next_block(sc, par_num++, "TSDM");
7620                break;
7621            case AEU_INPUTS_ATTN_BITS_SEARCHER_PARITY_ERROR:
7622                if (print)
7623                    bxe_print_next_block(sc, par_num++, "SEARCHER");
7624                break;
7625            case AEU_INPUTS_ATTN_BITS_TCM_PARITY_ERROR:
7626                if (print)
7627                    bxe_print_next_block(sc, par_num++, "TCM");
7628                break;
7629            case AEU_INPUTS_ATTN_BITS_TSEMI_PARITY_ERROR:
7630                if (print)
7631                    bxe_print_next_block(sc, par_num++, "TSEMI");
7632                break;
7633            case AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR:
7634                if (print)
7635                    bxe_print_next_block(sc, par_num++, "XPB");
7636                break;
7637            }
7638
7639            /* Clear the bit */
7640            sig &= ~cur_bit;
7641        }
7642    }
7643
7644    return (par_num);
7645}
7646
7647static int
7648bxe_check_blocks_with_parity1(struct bxe_softc *sc,
7649                              uint32_t         sig,
7650                              int              par_num,
7651                              uint8_t          *global,
7652                              uint8_t          print)
7653{
7654    int i = 0;
7655    uint32_t cur_bit = 0;
7656    for (i = 0; sig; i++) {
7657        cur_bit = ((uint32_t)0x1 << i);
7658        if (sig & cur_bit) {
7659            switch (cur_bit) {
7660            case AEU_INPUTS_ATTN_BITS_PBF_PARITY_ERROR:
7661                if (print)
7662                    bxe_print_next_block(sc, par_num++, "PBF");
7663                break;
7664            case AEU_INPUTS_ATTN_BITS_QM_PARITY_ERROR:
7665                if (print)
7666                    bxe_print_next_block(sc, par_num++, "QM");
7667                break;
7668            case AEU_INPUTS_ATTN_BITS_TIMERS_PARITY_ERROR:
7669                if (print)
7670                    bxe_print_next_block(sc, par_num++, "TM");
7671                break;
7672            case AEU_INPUTS_ATTN_BITS_XSDM_PARITY_ERROR:
7673                if (print)
7674                    bxe_print_next_block(sc, par_num++, "XSDM");
7675                break;
7676            case AEU_INPUTS_ATTN_BITS_XCM_PARITY_ERROR:
7677                if (print)
7678                    bxe_print_next_block(sc, par_num++, "XCM");
7679                break;
7680            case AEU_INPUTS_ATTN_BITS_XSEMI_PARITY_ERROR:
7681                if (print)
7682                    bxe_print_next_block(sc, par_num++, "XSEMI");
7683                break;
7684            case AEU_INPUTS_ATTN_BITS_DOORBELLQ_PARITY_ERROR:
7685                if (print)
7686                    bxe_print_next_block(sc, par_num++, "DOORBELLQ");
7687                break;
7688            case AEU_INPUTS_ATTN_BITS_NIG_PARITY_ERROR:
7689                if (print)
7690                    bxe_print_next_block(sc, par_num++, "NIG");
7691                break;
7692            case AEU_INPUTS_ATTN_BITS_VAUX_PCI_CORE_PARITY_ERROR:
7693                if (print)
7694                    bxe_print_next_block(sc, par_num++, "VAUX PCI CORE");
7695                *global = TRUE;
7696                break;
7697            case AEU_INPUTS_ATTN_BITS_DEBUG_PARITY_ERROR:
7698                if (print)
7699                    bxe_print_next_block(sc, par_num++, "DEBUG");
7700                break;
7701            case AEU_INPUTS_ATTN_BITS_USDM_PARITY_ERROR:
7702                if (print)
7703                    bxe_print_next_block(sc, par_num++, "USDM");
7704                break;
7705            case AEU_INPUTS_ATTN_BITS_UCM_PARITY_ERROR:
7706                if (print)
7707                    bxe_print_next_block(sc, par_num++, "UCM");
7708                break;
7709            case AEU_INPUTS_ATTN_BITS_USEMI_PARITY_ERROR:
7710                if (print)
7711                    bxe_print_next_block(sc, par_num++, "USEMI");
7712                break;
7713            case AEU_INPUTS_ATTN_BITS_UPB_PARITY_ERROR:
7714                if (print)
7715                    bxe_print_next_block(sc, par_num++, "UPB");
7716                break;
7717            case AEU_INPUTS_ATTN_BITS_CSDM_PARITY_ERROR:
7718                if (print)
7719                    bxe_print_next_block(sc, par_num++, "CSDM");
7720                break;
7721            case AEU_INPUTS_ATTN_BITS_CCM_PARITY_ERROR:
7722                if (print)
7723                    bxe_print_next_block(sc, par_num++, "CCM");
7724                break;
7725            }
7726
7727            /* Clear the bit */
7728            sig &= ~cur_bit;
7729        }
7730    }
7731
7732    return (par_num);
7733}
7734
7735static int
7736bxe_check_blocks_with_parity2(struct bxe_softc *sc,
7737                              uint32_t         sig,
7738                              int              par_num,
7739                              uint8_t          print)
7740{
7741    uint32_t cur_bit = 0;
7742    int i = 0;
7743
7744    for (i = 0; sig; i++) {
7745        cur_bit = ((uint32_t)0x1 << i);
7746        if (sig & cur_bit) {
7747            switch (cur_bit) {
7748            case AEU_INPUTS_ATTN_BITS_CSEMI_PARITY_ERROR:
7749                if (print)
7750                    bxe_print_next_block(sc, par_num++, "CSEMI");
7751                break;
7752            case AEU_INPUTS_ATTN_BITS_PXP_PARITY_ERROR:
7753                if (print)
7754                    bxe_print_next_block(sc, par_num++, "PXP");
7755                break;
7756            case AEU_IN_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR:
7757                if (print)
7758                    bxe_print_next_block(sc, par_num++, "PXPPCICLOCKCLIENT");
7759                break;
7760            case AEU_INPUTS_ATTN_BITS_CFC_PARITY_ERROR:
7761                if (print)
7762                    bxe_print_next_block(sc, par_num++, "CFC");
7763                break;
7764            case AEU_INPUTS_ATTN_BITS_CDU_PARITY_ERROR:
7765                if (print)
7766                    bxe_print_next_block(sc, par_num++, "CDU");
7767                break;
7768            case AEU_INPUTS_ATTN_BITS_DMAE_PARITY_ERROR:
7769                if (print)
7770                    bxe_print_next_block(sc, par_num++, "DMAE");
7771                break;
7772            case AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR:
7773                if (print)
7774                    bxe_print_next_block(sc, par_num++, "IGU");
7775                break;
7776            case AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR:
7777                if (print)
7778                    bxe_print_next_block(sc, par_num++, "MISC");
7779                break;
7780            }
7781
7782            /* Clear the bit */
7783            sig &= ~cur_bit;
7784        }
7785    }
7786
7787    return (par_num);
7788}
7789
7790static int
7791bxe_check_blocks_with_parity3(struct bxe_softc *sc,
7792                              uint32_t         sig,
7793                              int              par_num,
7794                              uint8_t          *global,
7795                              uint8_t          print)
7796{
7797    uint32_t cur_bit = 0;
7798    int i = 0;
7799
7800    for (i = 0; sig; i++) {
7801        cur_bit = ((uint32_t)0x1 << i);
7802        if (sig & cur_bit) {
7803            switch (cur_bit) {
7804            case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY:
7805                if (print)
7806                    bxe_print_next_block(sc, par_num++, "MCP ROM");
7807                *global = TRUE;
7808                break;
7809            case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY:
7810                if (print)
7811                    bxe_print_next_block(sc, par_num++,
7812                              "MCP UMP RX");
7813                *global = TRUE;
7814                break;
7815            case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY:
7816                if (print)
7817                    bxe_print_next_block(sc, par_num++,
7818                              "MCP UMP TX");
7819                *global = TRUE;
7820                break;
7821            case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY:
7822                if (print)
7823                    bxe_print_next_block(sc, par_num++,
7824                              "MCP SCPAD");
7825                *global = TRUE;
7826                break;
7827            }
7828
7829            /* Clear the bit */
7830            sig &= ~cur_bit;
7831        }
7832    }
7833
7834    return (par_num);
7835}
7836
7837static int
7838bxe_check_blocks_with_parity4(struct bxe_softc *sc,
7839                              uint32_t         sig,
7840                              int              par_num,
7841                              uint8_t          print)
7842{
7843    uint32_t cur_bit = 0;
7844    int i = 0;
7845
7846    for (i = 0; sig; i++) {
7847        cur_bit = ((uint32_t)0x1 << i);
7848        if (sig & cur_bit) {
7849            switch (cur_bit) {
7850            case AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR:
7851                if (print)
7852                    bxe_print_next_block(sc, par_num++, "PGLUE_B");
7853                break;
7854            case AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR:
7855                if (print)
7856                    bxe_print_next_block(sc, par_num++, "ATC");
7857                break;
7858            }
7859
7860            /* Clear the bit */
7861            sig &= ~cur_bit;
7862        }
7863    }
7864
7865    return (par_num);
7866}
7867
7868static uint8_t
7869bxe_parity_attn(struct bxe_softc *sc,
7870                uint8_t          *global,
7871                uint8_t          print,
7872                uint32_t         *sig)
7873{
7874    int par_num = 0;
7875
7876    if ((sig[0] & HW_PRTY_ASSERT_SET_0) ||
7877        (sig[1] & HW_PRTY_ASSERT_SET_1) ||
7878        (sig[2] & HW_PRTY_ASSERT_SET_2) ||
7879        (sig[3] & HW_PRTY_ASSERT_SET_3) ||
7880        (sig[4] & HW_PRTY_ASSERT_SET_4)) {
7881        BLOGE(sc, "Parity error: HW block parity attention:\n"
7882                  "[0]:0x%08x [1]:0x%08x [2]:0x%08x [3]:0x%08x [4]:0x%08x\n",
7883              (uint32_t)(sig[0] & HW_PRTY_ASSERT_SET_0),
7884              (uint32_t)(sig[1] & HW_PRTY_ASSERT_SET_1),
7885              (uint32_t)(sig[2] & HW_PRTY_ASSERT_SET_2),
7886              (uint32_t)(sig[3] & HW_PRTY_ASSERT_SET_3),
7887              (uint32_t)(sig[4] & HW_PRTY_ASSERT_SET_4));
7888
7889        if (print)
7890            BLOGI(sc, "Parity errors detected in blocks: ");
7891
7892        par_num =
7893            bxe_check_blocks_with_parity0(sc, sig[0] &
7894                                          HW_PRTY_ASSERT_SET_0,
7895                                          par_num, print);
7896        par_num =
7897            bxe_check_blocks_with_parity1(sc, sig[1] &
7898                                          HW_PRTY_ASSERT_SET_1,
7899                                          par_num, global, print);
7900        par_num =
7901            bxe_check_blocks_with_parity2(sc, sig[2] &
7902                                          HW_PRTY_ASSERT_SET_2,
7903                                          par_num, print);
7904        par_num =
7905            bxe_check_blocks_with_parity3(sc, sig[3] &
7906                                          HW_PRTY_ASSERT_SET_3,
7907                                          par_num, global, print);
7908        par_num =
7909            bxe_check_blocks_with_parity4(sc, sig[4] &
7910                                          HW_PRTY_ASSERT_SET_4,
7911                                          par_num, print);
7912
7913        if (print)
7914            BLOGI(sc, "\n");
7915
7916        return (TRUE);
7917    }
7918
7919    return (FALSE);
7920}
7921
7922static uint8_t
7923bxe_chk_parity_attn(struct bxe_softc *sc,
7924                    uint8_t          *global,
7925                    uint8_t          print)
7926{
7927    struct attn_route attn = { {0} };
7928    int port = SC_PORT(sc);
7929
7930    attn.sig[0] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
7931    attn.sig[1] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
7932    attn.sig[2] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
7933    attn.sig[3] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
7934
7935    if (!CHIP_IS_E1x(sc))
7936        attn.sig[4] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_5_FUNC_0 + port*4);
7937
7938    return (bxe_parity_attn(sc, global, print, attn.sig));
7939}
7940
7941static void
7942bxe_attn_int_deasserted4(struct bxe_softc *sc,
7943                         uint32_t         attn)
7944{
7945    uint32_t val;
7946
7947    if (attn & AEU_INPUTS_ATTN_BITS_PGLUE_HW_INTERRUPT) {
7948        val = REG_RD(sc, PGLUE_B_REG_PGLUE_B_INT_STS_CLR);
7949        BLOGE(sc, "PGLUE hw attention 0x%08x\n", val);
7950        if (val & PGLUE_B_PGLUE_B_INT_STS_REG_ADDRESS_ERROR)
7951            BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_ADDRESS_ERROR\n");
7952        if (val & PGLUE_B_PGLUE_B_INT_STS_REG_INCORRECT_RCV_BEHAVIOR)
7953            BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_INCORRECT_RCV_BEHAVIOR\n");
7954        if (val & PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN)
7955            BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN\n");
7956        if (val & PGLUE_B_PGLUE_B_INT_STS_REG_VF_LENGTH_VIOLATION_ATTN)
7957            BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_VF_LENGTH_VIOLATION_ATTN\n");
7958        if (val & PGLUE_B_PGLUE_B_INT_STS_REG_VF_GRC_SPACE_VIOLATION_ATTN)
7959            BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_VF_GRC_SPACE_VIOLATION_ATTN\n");
7960        if (val & PGLUE_B_PGLUE_B_INT_STS_REG_VF_MSIX_BAR_VIOLATION_ATTN)
7961            BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_VF_MSIX_BAR_VIOLATION_ATTN\n");
7962        if (val & PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_ERROR_ATTN)
7963            BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_ERROR_ATTN\n");
7964        if (val & PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_IN_TWO_RCBS_ATTN)
7965            BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_IN_TWO_RCBS_ATTN\n");
7966        if (val & PGLUE_B_PGLUE_B_INT_STS_REG_CSSNOOP_FIFO_OVERFLOW)
7967            BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_CSSNOOP_FIFO_OVERFLOW\n");
7968    }
7969
7970    if (attn & AEU_INPUTS_ATTN_BITS_ATC_HW_INTERRUPT) {
7971        val = REG_RD(sc, ATC_REG_ATC_INT_STS_CLR);
7972        BLOGE(sc, "ATC hw attention 0x%08x\n", val);
7973        if (val & ATC_ATC_INT_STS_REG_ADDRESS_ERROR)
7974            BLOGE(sc, "ATC_ATC_INT_STS_REG_ADDRESS_ERROR\n");
7975        if (val & ATC_ATC_INT_STS_REG_ATC_TCPL_TO_NOT_PEND)
7976            BLOGE(sc, "ATC_ATC_INT_STS_REG_ATC_TCPL_TO_NOT_PEND\n");
7977        if (val & ATC_ATC_INT_STS_REG_ATC_GPA_MULTIPLE_HITS)
7978            BLOGE(sc, "ATC_ATC_INT_STS_REG_ATC_GPA_MULTIPLE_HITS\n");
7979        if (val & ATC_ATC_INT_STS_REG_ATC_RCPL_TO_EMPTY_CNT)
7980            BLOGE(sc, "ATC_ATC_INT_STS_REG_ATC_RCPL_TO_EMPTY_CNT\n");
7981        if (val & ATC_ATC_INT_STS_REG_ATC_TCPL_ERROR)
7982            BLOGE(sc, "ATC_ATC_INT_STS_REG_ATC_TCPL_ERROR\n");
7983        if (val & ATC_ATC_INT_STS_REG_ATC_IREQ_LESS_THAN_STU)
7984            BLOGE(sc, "ATC_ATC_INT_STS_REG_ATC_IREQ_LESS_THAN_STU\n");
7985    }
7986
7987    if (attn & (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR |
7988                AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR)) {
7989        BLOGE(sc, "FATAL parity attention set4 0x%08x\n",
7990              (uint32_t)(attn & (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR |
7991                                 AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR)));
7992    }
7993}
7994
7995static void
7996bxe_e1h_disable(struct bxe_softc *sc)
7997{
7998    int port = SC_PORT(sc);
7999
8000    bxe_tx_disable(sc);
8001
8002    REG_WR(sc, NIG_REG_LLH0_FUNC_EN + port*8, 0);
8003}
8004
8005static void
8006bxe_e1h_enable(struct bxe_softc *sc)
8007{
8008    int port = SC_PORT(sc);
8009
8010    REG_WR(sc, NIG_REG_LLH0_FUNC_EN + port*8, 1);
8011
8012    // XXX bxe_tx_enable(sc);
8013}
8014
8015/*
8016 * called due to MCP event (on pmf):
8017 *   reread new bandwidth configuration
8018 *   configure FW
8019 *   notify others function about the change
8020 */
8021static void
8022bxe_config_mf_bw(struct bxe_softc *sc)
8023{
8024    if (sc->link_vars.link_up) {
8025        bxe_cmng_fns_init(sc, TRUE, CMNG_FNS_MINMAX);
8026        // XXX bxe_link_sync_notify(sc);
8027    }
8028
8029    storm_memset_cmng(sc, &sc->cmng, SC_PORT(sc));
8030}
8031
8032static void
8033bxe_set_mf_bw(struct bxe_softc *sc)
8034{
8035    bxe_config_mf_bw(sc);
8036    bxe_fw_command(sc, DRV_MSG_CODE_SET_MF_BW_ACK, 0);
8037}
8038
8039static void
8040bxe_handle_eee_event(struct bxe_softc *sc)
8041{
8042    BLOGD(sc, DBG_INTR, "EEE - LLDP event\n");
8043    bxe_fw_command(sc, DRV_MSG_CODE_EEE_RESULTS_ACK, 0);
8044}
8045
8046#define DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED 3
8047
8048static void
8049bxe_drv_info_ether_stat(struct bxe_softc *sc)
8050{
8051    struct eth_stats_info *ether_stat =
8052        &sc->sp->drv_info_to_mcp.ether_stat;
8053
8054    strlcpy(ether_stat->version, BXE_DRIVER_VERSION,
8055            ETH_STAT_INFO_VERSION_LEN);
8056
8057    /* XXX (+ MAC_PAD) taken from other driver... verify this is right */
8058    sc->sp_objs[0].mac_obj.get_n_elements(sc, &sc->sp_objs[0].mac_obj,
8059                                          DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED,
8060                                          ether_stat->mac_local + MAC_PAD,
8061                                          MAC_PAD, ETH_ALEN);
8062
8063    ether_stat->mtu_size = sc->mtu;
8064
8065    ether_stat->feature_flags |= FEATURE_ETH_CHKSUM_OFFLOAD_MASK;
8066    if (if_getcapenable(sc->ifp) & (IFCAP_TSO4 | IFCAP_TSO6)) {
8067        ether_stat->feature_flags |= FEATURE_ETH_LSO_MASK;
8068    }
8069
8070    // XXX ether_stat->feature_flags |= ???;
8071
8072    ether_stat->promiscuous_mode = 0; // (flags & PROMISC) ? 1 : 0;
8073
8074    ether_stat->txq_size = sc->tx_ring_size;
8075    ether_stat->rxq_size = sc->rx_ring_size;
8076}
8077
8078static void
8079bxe_handle_drv_info_req(struct bxe_softc *sc)
8080{
8081    enum drv_info_opcode op_code;
8082    uint32_t drv_info_ctl = SHMEM2_RD(sc, drv_info_control);
8083
8084    /* if drv_info version supported by MFW doesn't match - send NACK */
8085    if ((drv_info_ctl & DRV_INFO_CONTROL_VER_MASK) != DRV_INFO_CUR_VER) {
8086        bxe_fw_command(sc, DRV_MSG_CODE_DRV_INFO_NACK, 0);
8087        return;
8088    }
8089
8090    op_code = ((drv_info_ctl & DRV_INFO_CONTROL_OP_CODE_MASK) >>
8091               DRV_INFO_CONTROL_OP_CODE_SHIFT);
8092
8093    memset(&sc->sp->drv_info_to_mcp, 0, sizeof(union drv_info_to_mcp));
8094
8095    switch (op_code) {
8096    case ETH_STATS_OPCODE:
8097        bxe_drv_info_ether_stat(sc);
8098        break;
8099    case FCOE_STATS_OPCODE:
8100    case ISCSI_STATS_OPCODE:
8101    default:
8102        /* if op code isn't supported - send NACK */
8103        bxe_fw_command(sc, DRV_MSG_CODE_DRV_INFO_NACK, 0);
8104        return;
8105    }
8106
8107    /*
8108     * If we got drv_info attn from MFW then these fields are defined in
8109     * shmem2 for sure
8110     */
8111    SHMEM2_WR(sc, drv_info_host_addr_lo,
8112              U64_LO(BXE_SP_MAPPING(sc, drv_info_to_mcp)));
8113    SHMEM2_WR(sc, drv_info_host_addr_hi,
8114              U64_HI(BXE_SP_MAPPING(sc, drv_info_to_mcp)));
8115
8116    bxe_fw_command(sc, DRV_MSG_CODE_DRV_INFO_ACK, 0);
8117}
8118
8119static void
8120bxe_dcc_event(struct bxe_softc *sc,
8121              uint32_t         dcc_event)
8122{
8123    BLOGD(sc, DBG_INTR, "dcc_event 0x%08x\n", dcc_event);
8124
8125    if (dcc_event & DRV_STATUS_DCC_DISABLE_ENABLE_PF) {
8126        /*
8127         * This is the only place besides the function initialization
8128         * where the sc->flags can change so it is done without any
8129         * locks
8130         */
8131        if (sc->devinfo.mf_info.mf_config[SC_VN(sc)] & FUNC_MF_CFG_FUNC_DISABLED) {
8132            BLOGD(sc, DBG_INTR, "mf_cfg function disabled\n");
8133            sc->flags |= BXE_MF_FUNC_DIS;
8134            bxe_e1h_disable(sc);
8135        } else {
8136            BLOGD(sc, DBG_INTR, "mf_cfg function enabled\n");
8137            sc->flags &= ~BXE_MF_FUNC_DIS;
8138            bxe_e1h_enable(sc);
8139        }
8140        dcc_event &= ~DRV_STATUS_DCC_DISABLE_ENABLE_PF;
8141    }
8142
8143    if (dcc_event & DRV_STATUS_DCC_BANDWIDTH_ALLOCATION) {
8144        bxe_config_mf_bw(sc);
8145        dcc_event &= ~DRV_STATUS_DCC_BANDWIDTH_ALLOCATION;
8146    }
8147
8148    /* Report results to MCP */
8149    if (dcc_event)
8150        bxe_fw_command(sc, DRV_MSG_CODE_DCC_FAILURE, 0);
8151    else
8152        bxe_fw_command(sc, DRV_MSG_CODE_DCC_OK, 0);
8153}
8154
8155static void
8156bxe_pmf_update(struct bxe_softc *sc)
8157{
8158    int port = SC_PORT(sc);
8159    uint32_t val;
8160
8161    sc->port.pmf = 1;
8162    BLOGD(sc, DBG_INTR, "pmf %d\n", sc->port.pmf);
8163
8164    /*
8165     * We need the mb() to ensure the ordering between the writing to
8166     * sc->port.pmf here and reading it from the bxe_periodic_task().
8167     */
8168    mb();
8169
8170    /* queue a periodic task */
8171    // XXX schedule task...
8172
8173    // XXX bxe_dcbx_pmf_update(sc);
8174
8175    /* enable nig attention */
8176    val = (0xff0f | (1 << (SC_VN(sc) + 4)));
8177    if (sc->devinfo.int_block == INT_BLOCK_HC) {
8178        REG_WR(sc, HC_REG_TRAILING_EDGE_0 + port*8, val);
8179        REG_WR(sc, HC_REG_LEADING_EDGE_0 + port*8, val);
8180    } else if (!CHIP_IS_E1x(sc)) {
8181        REG_WR(sc, IGU_REG_TRAILING_EDGE_LATCH, val);
8182        REG_WR(sc, IGU_REG_LEADING_EDGE_LATCH, val);
8183    }
8184
8185    bxe_stats_handle(sc, STATS_EVENT_PMF);
8186}
8187
8188static int
8189bxe_mc_assert(struct bxe_softc *sc)
8190{
8191    char last_idx;
8192    int i, rc = 0;
8193    uint32_t row0, row1, row2, row3;
8194
8195    /* XSTORM */
8196    last_idx = REG_RD8(sc, BAR_XSTRORM_INTMEM + XSTORM_ASSERT_LIST_INDEX_OFFSET);
8197    if (last_idx)
8198        BLOGE(sc, "XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
8199
8200    /* print the asserts */
8201    for (i = 0; i < STORM_ASSERT_ARRAY_SIZE; i++) {
8202
8203        row0 = REG_RD(sc, BAR_XSTRORM_INTMEM + XSTORM_ASSERT_LIST_OFFSET(i));
8204        row1 = REG_RD(sc, BAR_XSTRORM_INTMEM + XSTORM_ASSERT_LIST_OFFSET(i) + 4);
8205        row2 = REG_RD(sc, BAR_XSTRORM_INTMEM + XSTORM_ASSERT_LIST_OFFSET(i) + 8);
8206        row3 = REG_RD(sc, BAR_XSTRORM_INTMEM + XSTORM_ASSERT_LIST_OFFSET(i) + 12);
8207
8208        if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
8209            BLOGE(sc, "XSTORM_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x\n",
8210                  i, row3, row2, row1, row0);
8211            rc++;
8212        } else {
8213            break;
8214        }
8215    }
8216
8217    /* TSTORM */
8218    last_idx = REG_RD8(sc, BAR_TSTRORM_INTMEM + TSTORM_ASSERT_LIST_INDEX_OFFSET);
8219    if (last_idx) {
8220        BLOGE(sc, "TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
8221    }
8222
8223    /* print the asserts */
8224    for (i = 0; i < STORM_ASSERT_ARRAY_SIZE; i++) {
8225
8226        row0 = REG_RD(sc, BAR_TSTRORM_INTMEM + TSTORM_ASSERT_LIST_OFFSET(i));
8227        row1 = REG_RD(sc, BAR_TSTRORM_INTMEM + TSTORM_ASSERT_LIST_OFFSET(i) + 4);
8228        row2 = REG_RD(sc, BAR_TSTRORM_INTMEM + TSTORM_ASSERT_LIST_OFFSET(i) + 8);
8229        row3 = REG_RD(sc, BAR_TSTRORM_INTMEM + TSTORM_ASSERT_LIST_OFFSET(i) + 12);
8230
8231        if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
8232            BLOGE(sc, "TSTORM_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x\n",
8233                  i, row3, row2, row1, row0);
8234            rc++;
8235        } else {
8236            break;
8237        }
8238    }
8239
8240    /* CSTORM */
8241    last_idx = REG_RD8(sc, BAR_CSTRORM_INTMEM + CSTORM_ASSERT_LIST_INDEX_OFFSET);
8242    if (last_idx) {
8243        BLOGE(sc, "CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
8244    }
8245
8246    /* print the asserts */
8247    for (i = 0; i < STORM_ASSERT_ARRAY_SIZE; i++) {
8248
8249        row0 = REG_RD(sc, BAR_CSTRORM_INTMEM + CSTORM_ASSERT_LIST_OFFSET(i));
8250        row1 = REG_RD(sc, BAR_CSTRORM_INTMEM + CSTORM_ASSERT_LIST_OFFSET(i) + 4);
8251        row2 = REG_RD(sc, BAR_CSTRORM_INTMEM + CSTORM_ASSERT_LIST_OFFSET(i) + 8);
8252        row3 = REG_RD(sc, BAR_CSTRORM_INTMEM + CSTORM_ASSERT_LIST_OFFSET(i) + 12);
8253
8254        if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
8255            BLOGE(sc, "CSTORM_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x\n",
8256                  i, row3, row2, row1, row0);
8257            rc++;
8258        } else {
8259            break;
8260        }
8261    }
8262
8263    /* USTORM */
8264    last_idx = REG_RD8(sc, BAR_USTRORM_INTMEM + USTORM_ASSERT_LIST_INDEX_OFFSET);
8265    if (last_idx) {
8266        BLOGE(sc, "USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
8267    }
8268
8269    /* print the asserts */
8270    for (i = 0; i < STORM_ASSERT_ARRAY_SIZE; i++) {
8271
8272        row0 = REG_RD(sc, BAR_USTRORM_INTMEM + USTORM_ASSERT_LIST_OFFSET(i));
8273        row1 = REG_RD(sc, BAR_USTRORM_INTMEM + USTORM_ASSERT_LIST_OFFSET(i) + 4);
8274        row2 = REG_RD(sc, BAR_USTRORM_INTMEM + USTORM_ASSERT_LIST_OFFSET(i) + 8);
8275        row3 = REG_RD(sc, BAR_USTRORM_INTMEM + USTORM_ASSERT_LIST_OFFSET(i) + 12);
8276
8277        if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
8278            BLOGE(sc, "USTORM_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x\n",
8279                  i, row3, row2, row1, row0);
8280            rc++;
8281        } else {
8282            break;
8283        }
8284    }
8285
8286    return (rc);
8287}
8288
8289static void
8290bxe_attn_int_deasserted3(struct bxe_softc *sc,
8291                         uint32_t         attn)
8292{
8293    int func = SC_FUNC(sc);
8294    uint32_t val;
8295
8296    if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
8297
8298        if (attn & BXE_PMF_LINK_ASSERT(sc)) {
8299
8300            REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
8301            bxe_read_mf_cfg(sc);
8302            sc->devinfo.mf_info.mf_config[SC_VN(sc)] =
8303                MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].config);
8304            val = SHMEM_RD(sc, func_mb[SC_FW_MB_IDX(sc)].drv_status);
8305
8306            if (val & DRV_STATUS_DCC_EVENT_MASK)
8307                bxe_dcc_event(sc, (val & DRV_STATUS_DCC_EVENT_MASK));
8308
8309            if (val & DRV_STATUS_SET_MF_BW)
8310                bxe_set_mf_bw(sc);
8311
8312            if (val & DRV_STATUS_DRV_INFO_REQ)
8313                bxe_handle_drv_info_req(sc);
8314
8315#if 0
8316            if (val & DRV_STATUS_VF_DISABLED)
8317                bxe_vf_handle_flr_event(sc);
8318#endif
8319
8320            if ((sc->port.pmf == 0) && (val & DRV_STATUS_PMF))
8321                bxe_pmf_update(sc);
8322
8323#if 0
8324            if (sc->port.pmf &&
8325                (val & DRV_STATUS_DCBX_NEGOTIATION_RESULTS) &&
8326                (sc->dcbx_enabled > 0))
8327                /* start dcbx state machine */
8328                bxe_dcbx_set_params(sc, BXE_DCBX_STATE_NEG_RECEIVED);
8329#endif
8330
8331#if 0
8332            if (val & DRV_STATUS_AFEX_EVENT_MASK)
8333                bxe_handle_afex_cmd(sc, val & DRV_STATUS_AFEX_EVENT_MASK);
8334#endif
8335
8336            if (val & DRV_STATUS_EEE_NEGOTIATION_RESULTS)
8337                bxe_handle_eee_event(sc);
8338
8339            if (sc->link_vars.periodic_flags &
8340                ELINK_PERIODIC_FLAGS_LINK_EVENT) {
8341                /* sync with link */
8342                BXE_PHY_LOCK(sc);
8343                sc->link_vars.periodic_flags &=
8344                    ~ELINK_PERIODIC_FLAGS_LINK_EVENT;
8345                BXE_PHY_UNLOCK(sc);
8346                if (IS_MF(sc))
8347                    ; // XXX bxe_link_sync_notify(sc);
8348                bxe_link_report(sc);
8349            }
8350
8351            /*
8352             * Always call it here: bxe_link_report() will
8353             * prevent the link indication duplication.
8354             */
8355            bxe_link_status_update(sc);
8356
8357        } else if (attn & BXE_MC_ASSERT_BITS) {
8358
8359            BLOGE(sc, "MC assert!\n");
8360            bxe_mc_assert(sc);
8361            REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_10, 0);
8362            REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_9, 0);
8363            REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_8, 0);
8364            REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_7, 0);
8365            bxe_panic(sc, ("MC assert!\n"));
8366
8367        } else if (attn & BXE_MCP_ASSERT) {
8368
8369            BLOGE(sc, "MCP assert!\n");
8370            REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_11, 0);
8371            // XXX bxe_fw_dump(sc);
8372
8373        } else {
8374            BLOGE(sc, "Unknown HW assert! (attn 0x%08x)\n", attn);
8375        }
8376    }
8377
8378    if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
8379        BLOGE(sc, "LATCHED attention 0x%08x (masked)\n", attn);
8380        if (attn & BXE_GRC_TIMEOUT) {
8381            val = CHIP_IS_E1(sc) ? 0 : REG_RD(sc, MISC_REG_GRC_TIMEOUT_ATTN);
8382            BLOGE(sc, "GRC time-out 0x%08x\n", val);
8383        }
8384        if (attn & BXE_GRC_RSV) {
8385            val = CHIP_IS_E1(sc) ? 0 : REG_RD(sc, MISC_REG_GRC_RSV_ATTN);
8386            BLOGE(sc, "GRC reserved 0x%08x\n", val);
8387        }
8388        REG_WR(sc, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
8389    }
8390}
8391
8392static void
8393bxe_attn_int_deasserted2(struct bxe_softc *sc,
8394                         uint32_t         attn)
8395{
8396    int port = SC_PORT(sc);
8397    int reg_offset;
8398    uint32_t val0, mask0, val1, mask1;
8399    uint32_t val;
8400
8401    if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
8402        val = REG_RD(sc, CFC_REG_CFC_INT_STS_CLR);
8403        BLOGE(sc, "CFC hw attention 0x%08x\n", val);
8404        /* CFC error attention */
8405        if (val & 0x2) {
8406            BLOGE(sc, "FATAL error from CFC\n");
8407        }
8408    }
8409
8410    if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
8411        val = REG_RD(sc, PXP_REG_PXP_INT_STS_CLR_0);
8412        BLOGE(sc, "PXP hw attention-0 0x%08x\n", val);
8413        /* RQ_USDMDP_FIFO_OVERFLOW */
8414        if (val & 0x18000) {
8415            BLOGE(sc, "FATAL error from PXP\n");
8416        }
8417
8418        if (!CHIP_IS_E1x(sc)) {
8419            val = REG_RD(sc, PXP_REG_PXP_INT_STS_CLR_1);
8420            BLOGE(sc, "PXP hw attention-1 0x%08x\n", val);
8421        }
8422    }
8423
8424#define PXP2_EOP_ERROR_BIT  PXP2_PXP2_INT_STS_CLR_0_REG_WR_PGLUE_EOP_ERROR
8425#define AEU_PXP2_HW_INT_BIT AEU_INPUTS_ATTN_BITS_PXPPCICLOCKCLIENT_HW_INTERRUPT
8426
8427    if (attn & AEU_PXP2_HW_INT_BIT) {
8428        /*  CQ47854 workaround do not panic on
8429         *  PXP2_PXP2_INT_STS_0_REG_WR_PGLUE_EOP_ERROR
8430         */
8431        if (!CHIP_IS_E1x(sc)) {
8432            mask0 = REG_RD(sc, PXP2_REG_PXP2_INT_MASK_0);
8433            val1 = REG_RD(sc, PXP2_REG_PXP2_INT_STS_1);
8434            mask1 = REG_RD(sc, PXP2_REG_PXP2_INT_MASK_1);
8435            val0 = REG_RD(sc, PXP2_REG_PXP2_INT_STS_0);
8436            /*
8437             * If the olny PXP2_EOP_ERROR_BIT is set in
8438             * STS0 and STS1 - clear it
8439             *
8440             * probably we lose additional attentions between
8441             * STS0 and STS_CLR0, in this case user will not
8442             * be notified about them
8443             */
8444            if (val0 & mask0 & PXP2_EOP_ERROR_BIT &&
8445                !(val1 & mask1))
8446                val0 = REG_RD(sc, PXP2_REG_PXP2_INT_STS_CLR_0);
8447
8448            /* print the register, since no one can restore it */
8449            BLOGE(sc, "PXP2_REG_PXP2_INT_STS_CLR_0 0x%08x\n", val0);
8450
8451            /*
8452             * if PXP2_PXP2_INT_STS_0_REG_WR_PGLUE_EOP_ERROR
8453             * then notify
8454             */
8455            if (val0 & PXP2_EOP_ERROR_BIT) {
8456                BLOGE(sc, "PXP2_WR_PGLUE_EOP_ERROR\n");
8457
8458                /*
8459                 * if only PXP2_PXP2_INT_STS_0_REG_WR_PGLUE_EOP_ERROR is
8460                 * set then clear attention from PXP2 block without panic
8461                 */
8462                if (((val0 & mask0) == PXP2_EOP_ERROR_BIT) &&
8463                    ((val1 & mask1) == 0))
8464                    attn &= ~AEU_PXP2_HW_INT_BIT;
8465            }
8466        }
8467    }
8468
8469    if (attn & HW_INTERRUT_ASSERT_SET_2) {
8470        reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
8471                             MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
8472
8473        val = REG_RD(sc, reg_offset);
8474        val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
8475        REG_WR(sc, reg_offset, val);
8476
8477        BLOGE(sc, "FATAL HW block attention set2 0x%x\n",
8478              (uint32_t)(attn & HW_INTERRUT_ASSERT_SET_2));
8479        bxe_panic(sc, ("HW block attention set2\n"));
8480    }
8481}
8482
8483static void
8484bxe_attn_int_deasserted1(struct bxe_softc *sc,
8485                         uint32_t         attn)
8486{
8487    int port = SC_PORT(sc);
8488    int reg_offset;
8489    uint32_t val;
8490
8491    if (attn & AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT) {
8492        val = REG_RD(sc, DORQ_REG_DORQ_INT_STS_CLR);
8493        BLOGE(sc, "DB hw attention 0x%08x\n", val);
8494        /* DORQ discard attention */
8495        if (val & 0x2) {
8496            BLOGE(sc, "FATAL error from DORQ\n");
8497        }
8498    }
8499
8500    if (attn & HW_INTERRUT_ASSERT_SET_1) {
8501        reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
8502                             MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
8503
8504        val = REG_RD(sc, reg_offset);
8505        val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
8506        REG_WR(sc, reg_offset, val);
8507
8508        BLOGE(sc, "FATAL HW block attention set1 0x%08x\n",
8509              (uint32_t)(attn & HW_INTERRUT_ASSERT_SET_1));
8510        bxe_panic(sc, ("HW block attention set1\n"));
8511    }
8512}
8513
8514static void
8515bxe_attn_int_deasserted0(struct bxe_softc *sc,
8516                         uint32_t         attn)
8517{
8518    int port = SC_PORT(sc);
8519    int reg_offset;
8520    uint32_t val;
8521
8522    reg_offset = (port) ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
8523                          MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0;
8524
8525    if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
8526        val = REG_RD(sc, reg_offset);
8527        val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
8528        REG_WR(sc, reg_offset, val);
8529
8530        BLOGW(sc, "SPIO5 hw attention\n");
8531
8532        /* Fan failure attention */
8533        elink_hw_reset_phy(&sc->link_params);
8534        bxe_fan_failure(sc);
8535    }
8536
8537    if ((attn & sc->link_vars.aeu_int_mask) && sc->port.pmf) {
8538        BXE_PHY_LOCK(sc);
8539        elink_handle_module_detect_int(&sc->link_params);
8540        BXE_PHY_UNLOCK(sc);
8541    }
8542
8543    if (attn & HW_INTERRUT_ASSERT_SET_0) {
8544        val = REG_RD(sc, reg_offset);
8545        val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
8546        REG_WR(sc, reg_offset, val);
8547
8548        bxe_panic(sc, ("FATAL HW block attention set0 0x%lx\n",
8549                       (attn & HW_INTERRUT_ASSERT_SET_0)));
8550    }
8551}
8552
8553static void
8554bxe_attn_int_deasserted(struct bxe_softc *sc,
8555                        uint32_t         deasserted)
8556{
8557    struct attn_route attn;
8558    struct attn_route *group_mask;
8559    int port = SC_PORT(sc);
8560    int index;
8561    uint32_t reg_addr;
8562    uint32_t val;
8563    uint32_t aeu_mask;
8564    uint8_t global = FALSE;
8565
8566    /*
8567     * Need to take HW lock because MCP or other port might also
8568     * try to handle this event.
8569     */
8570    bxe_acquire_alr(sc);
8571
8572    if (bxe_chk_parity_attn(sc, &global, TRUE)) {
8573        /* XXX
8574         * In case of parity errors don't handle attentions so that
8575         * other function would "see" parity errors.
8576         */
8577        sc->recovery_state = BXE_RECOVERY_INIT;
8578        // XXX schedule a recovery task...
8579        /* disable HW interrupts */
8580        bxe_int_disable(sc);
8581        bxe_release_alr(sc);
8582        return;
8583    }
8584
8585    attn.sig[0] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
8586    attn.sig[1] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
8587    attn.sig[2] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
8588    attn.sig[3] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
8589    if (!CHIP_IS_E1x(sc)) {
8590        attn.sig[4] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_5_FUNC_0 + port*4);
8591    } else {
8592        attn.sig[4] = 0;
8593    }
8594
8595    BLOGD(sc, DBG_INTR, "attn: 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n",
8596          attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3], attn.sig[4]);
8597
8598    for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
8599        if (deasserted & (1 << index)) {
8600            group_mask = &sc->attn_group[index];
8601
8602            BLOGD(sc, DBG_INTR,
8603                  "group[%d]: 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n", index,
8604                  group_mask->sig[0], group_mask->sig[1],
8605                  group_mask->sig[2], group_mask->sig[3],
8606                  group_mask->sig[4]);
8607
8608            bxe_attn_int_deasserted4(sc, attn.sig[4] & group_mask->sig[4]);
8609            bxe_attn_int_deasserted3(sc, attn.sig[3] & group_mask->sig[3]);
8610            bxe_attn_int_deasserted1(sc, attn.sig[1] & group_mask->sig[1]);
8611            bxe_attn_int_deasserted2(sc, attn.sig[2] & group_mask->sig[2]);
8612            bxe_attn_int_deasserted0(sc, attn.sig[0] & group_mask->sig[0]);
8613        }
8614    }
8615
8616    bxe_release_alr(sc);
8617
8618    if (sc->devinfo.int_block == INT_BLOCK_HC) {
8619        reg_addr = (HC_REG_COMMAND_REG + port*32 +
8620                    COMMAND_REG_ATTN_BITS_CLR);
8621    } else {
8622        reg_addr = (BAR_IGU_INTMEM + IGU_CMD_ATTN_BIT_CLR_UPPER*8);
8623    }
8624
8625    val = ~deasserted;
8626    BLOGD(sc, DBG_INTR,
8627          "about to mask 0x%08x at %s addr 0x%08x\n", val,
8628          (sc->devinfo.int_block == INT_BLOCK_HC) ? "HC" : "IGU", reg_addr);
8629    REG_WR(sc, reg_addr, val);
8630
8631    if (~sc->attn_state & deasserted) {
8632        BLOGE(sc, "IGU error\n");
8633    }
8634
8635    reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
8636                      MISC_REG_AEU_MASK_ATTN_FUNC_0;
8637
8638    bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
8639
8640    aeu_mask = REG_RD(sc, reg_addr);
8641
8642    BLOGD(sc, DBG_INTR, "aeu_mask 0x%08x newly deasserted 0x%08x\n",
8643          aeu_mask, deasserted);
8644    aeu_mask |= (deasserted & 0x3ff);
8645    BLOGD(sc, DBG_INTR, "new mask 0x%08x\n", aeu_mask);
8646
8647    REG_WR(sc, reg_addr, aeu_mask);
8648    bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
8649
8650    BLOGD(sc, DBG_INTR, "attn_state 0x%08x\n", sc->attn_state);
8651    sc->attn_state &= ~deasserted;
8652    BLOGD(sc, DBG_INTR, "new state 0x%08x\n", sc->attn_state);
8653}
8654
8655static void
8656bxe_attn_int(struct bxe_softc *sc)
8657{
8658    /* read local copy of bits */
8659    uint32_t attn_bits = le32toh(sc->def_sb->atten_status_block.attn_bits);
8660    uint32_t attn_ack = le32toh(sc->def_sb->atten_status_block.attn_bits_ack);
8661    uint32_t attn_state = sc->attn_state;
8662
8663    /* look for changed bits */
8664    uint32_t asserted   =  attn_bits & ~attn_ack & ~attn_state;
8665    uint32_t deasserted = ~attn_bits &  attn_ack &  attn_state;
8666
8667    BLOGD(sc, DBG_INTR,
8668          "attn_bits 0x%08x attn_ack 0x%08x asserted 0x%08x deasserted 0x%08x\n",
8669          attn_bits, attn_ack, asserted, deasserted);
8670
8671    if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state)) {
8672        BLOGE(sc, "BAD attention state\n");
8673    }
8674
8675    /* handle bits that were raised */
8676    if (asserted) {
8677        bxe_attn_int_asserted(sc, asserted);
8678    }
8679
8680    if (deasserted) {
8681        bxe_attn_int_deasserted(sc, deasserted);
8682    }
8683}
8684
8685static uint16_t
8686bxe_update_dsb_idx(struct bxe_softc *sc)
8687{
8688    struct host_sp_status_block *def_sb = sc->def_sb;
8689    uint16_t rc = 0;
8690
8691    mb(); /* status block is written to by the chip */
8692
8693    if (sc->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
8694        sc->def_att_idx = def_sb->atten_status_block.attn_bits_index;
8695        rc |= BXE_DEF_SB_ATT_IDX;
8696    }
8697
8698    if (sc->def_idx != def_sb->sp_sb.running_index) {
8699        sc->def_idx = def_sb->sp_sb.running_index;
8700        rc |= BXE_DEF_SB_IDX;
8701    }
8702
8703    mb();
8704
8705    return (rc);
8706}
8707
8708static inline struct ecore_queue_sp_obj *
8709bxe_cid_to_q_obj(struct bxe_softc *sc,
8710                 uint32_t         cid)
8711{
8712    BLOGD(sc, DBG_SP, "retrieving fp from cid %d\n", cid);
8713    return (&sc->sp_objs[CID_TO_FP(cid, sc)].q_obj);
8714}
8715
8716static void
8717bxe_handle_mcast_eqe(struct bxe_softc *sc)
8718{
8719    struct ecore_mcast_ramrod_params rparam;
8720    int rc;
8721
8722    memset(&rparam, 0, sizeof(rparam));
8723
8724    rparam.mcast_obj = &sc->mcast_obj;
8725
8726    BXE_MCAST_LOCK(sc);
8727
8728    /* clear pending state for the last command */
8729    sc->mcast_obj.raw.clear_pending(&sc->mcast_obj.raw);
8730
8731    /* if there are pending mcast commands - send them */
8732    if (sc->mcast_obj.check_pending(&sc->mcast_obj)) {
8733        rc = ecore_config_mcast(sc, &rparam, ECORE_MCAST_CMD_CONT);
8734        if (rc < 0) {
8735            BLOGD(sc, DBG_SP,
8736                  "ERROR: Failed to send pending mcast commands (%d)\n",
8737                  rc);
8738        }
8739    }
8740
8741    BXE_MCAST_UNLOCK(sc);
8742}
8743
8744static void
8745bxe_handle_classification_eqe(struct bxe_softc      *sc,
8746                              union event_ring_elem *elem)
8747{
8748    unsigned long ramrod_flags = 0;
8749    int rc = 0;
8750    uint32_t cid = elem->message.data.eth_event.echo & BXE_SWCID_MASK;
8751    struct ecore_vlan_mac_obj *vlan_mac_obj;
8752
8753    /* always push next commands out, don't wait here */
8754    bit_set(&ramrod_flags, RAMROD_CONT);
8755
8756    switch (le32toh(elem->message.data.eth_event.echo) >> BXE_SWCID_SHIFT) {
8757    case ECORE_FILTER_MAC_PENDING:
8758        BLOGD(sc, DBG_SP, "Got SETUP_MAC completions\n");
8759        vlan_mac_obj = &sc->sp_objs[cid].mac_obj;
8760        break;
8761
8762    case ECORE_FILTER_MCAST_PENDING:
8763        BLOGD(sc, DBG_SP, "Got SETUP_MCAST completions\n");
8764        /*
8765         * This is only relevant for 57710 where multicast MACs are
8766         * configured as unicast MACs using the same ramrod.
8767         */
8768        bxe_handle_mcast_eqe(sc);
8769        return;
8770
8771    default:
8772        BLOGE(sc, "Unsupported classification command: %d\n",
8773              elem->message.data.eth_event.echo);
8774        return;
8775    }
8776
8777    rc = vlan_mac_obj->complete(sc, vlan_mac_obj, elem, &ramrod_flags);
8778
8779    if (rc < 0) {
8780        BLOGE(sc, "Failed to schedule new commands (%d)\n", rc);
8781    } else if (rc > 0) {
8782        BLOGD(sc, DBG_SP, "Scheduled next pending commands...\n");
8783    }
8784}
8785
8786static void
8787bxe_handle_rx_mode_eqe(struct bxe_softc      *sc,
8788                       union event_ring_elem *elem)
8789{
8790    bxe_clear_bit(ECORE_FILTER_RX_MODE_PENDING, &sc->sp_state);
8791
8792    /* send rx_mode command again if was requested */
8793    if (bxe_test_and_clear_bit(ECORE_FILTER_RX_MODE_SCHED,
8794                               &sc->sp_state)) {
8795        bxe_set_storm_rx_mode(sc);
8796    }
8797#if 0
8798    else if (bxe_test_and_clear_bit(ECORE_FILTER_ISCSI_ETH_START_SCHED,
8799                                    &sc->sp_state)) {
8800        bxe_set_iscsi_eth_rx_mode(sc, TRUE);
8801    }
8802    else if (bxe_test_and_clear_bit(ECORE_FILTER_ISCSI_ETH_STOP_SCHED,
8803                                    &sc->sp_state)) {
8804        bxe_set_iscsi_eth_rx_mode(sc, FALSE);
8805    }
8806#endif
8807}
8808
8809static void
8810bxe_update_eq_prod(struct bxe_softc *sc,
8811                   uint16_t         prod)
8812{
8813    storm_memset_eq_prod(sc, prod, SC_FUNC(sc));
8814    wmb(); /* keep prod updates ordered */
8815}
8816
8817static void
8818bxe_eq_int(struct bxe_softc *sc)
8819{
8820    uint16_t hw_cons, sw_cons, sw_prod;
8821    union event_ring_elem *elem;
8822    uint8_t echo;
8823    uint32_t cid;
8824    uint8_t opcode;
8825    int spqe_cnt = 0;
8826    struct ecore_queue_sp_obj *q_obj;
8827    struct ecore_func_sp_obj *f_obj = &sc->func_obj;
8828    struct ecore_raw_obj *rss_raw = &sc->rss_conf_obj.raw;
8829
8830    hw_cons = le16toh(*sc->eq_cons_sb);
8831
8832    /*
8833     * The hw_cons range is 1-255, 257 - the sw_cons range is 0-254, 256.
8834     * when we get to the next-page we need to adjust so the loop
8835     * condition below will be met. The next element is the size of a
8836     * regular element and hence incrementing by 1
8837     */
8838    if ((hw_cons & EQ_DESC_MAX_PAGE) == EQ_DESC_MAX_PAGE) {
8839        hw_cons++;
8840    }
8841
8842    /*
8843     * This function may never run in parallel with itself for a
8844     * specific sc and no need for a read memory barrier here.
8845     */
8846    sw_cons = sc->eq_cons;
8847    sw_prod = sc->eq_prod;
8848
8849    BLOGD(sc, DBG_SP,"EQ: hw_cons=%u sw_cons=%u eq_spq_left=0x%lx\n",
8850          hw_cons, sw_cons, atomic_load_acq_long(&sc->eq_spq_left));
8851
8852    for (;
8853         sw_cons != hw_cons;
8854         sw_prod = NEXT_EQ_IDX(sw_prod), sw_cons = NEXT_EQ_IDX(sw_cons)) {
8855
8856        elem = &sc->eq[EQ_DESC(sw_cons)];
8857
8858#if 0
8859        int rc;
8860        rc = bxe_iov_eq_sp_event(sc, elem);
8861        if (!rc) {
8862            BLOGE(sc, "bxe_iov_eq_sp_event returned %d\n", rc);
8863            goto next_spqe;
8864        }
8865#endif
8866
8867        /* elem CID originates from FW, actually LE */
8868        cid = SW_CID(elem->message.data.cfc_del_event.cid);
8869        opcode = elem->message.opcode;
8870
8871        /* handle eq element */
8872        switch (opcode) {
8873#if 0
8874        case EVENT_RING_OPCODE_VF_PF_CHANNEL:
8875            BLOGD(sc, DBG_SP, "vf/pf channel element on eq\n");
8876            bxe_vf_mbx(sc, &elem->message.data.vf_pf_event);
8877            continue;
8878#endif
8879
8880        case EVENT_RING_OPCODE_STAT_QUERY:
8881            BLOGD(sc, DBG_SP, "got statistics completion event %d\n",
8882                  sc->stats_comp++);
8883            /* nothing to do with stats comp */
8884            goto next_spqe;
8885
8886        case EVENT_RING_OPCODE_CFC_DEL:
8887            /* handle according to cid range */
8888            /* we may want to verify here that the sc state is HALTING */
8889            BLOGD(sc, DBG_SP, "got delete ramrod for MULTI[%d]\n", cid);
8890            q_obj = bxe_cid_to_q_obj(sc, cid);
8891            if (q_obj->complete_cmd(sc, q_obj, ECORE_Q_CMD_CFC_DEL)) {
8892                break;
8893            }
8894            goto next_spqe;
8895
8896        case EVENT_RING_OPCODE_STOP_TRAFFIC:
8897            BLOGD(sc, DBG_SP, "got STOP TRAFFIC\n");
8898            if (f_obj->complete_cmd(sc, f_obj, ECORE_F_CMD_TX_STOP)) {
8899                break;
8900            }
8901            // XXX bxe_dcbx_set_params(sc, BXE_DCBX_STATE_TX_PAUSED);
8902            goto next_spqe;
8903
8904        case EVENT_RING_OPCODE_START_TRAFFIC:
8905            BLOGD(sc, DBG_SP, "got START TRAFFIC\n");
8906            if (f_obj->complete_cmd(sc, f_obj, ECORE_F_CMD_TX_START)) {
8907                break;
8908            }
8909            // XXX bxe_dcbx_set_params(sc, BXE_DCBX_STATE_TX_RELEASED);
8910            goto next_spqe;
8911
8912        case EVENT_RING_OPCODE_FUNCTION_UPDATE:
8913            echo = elem->message.data.function_update_event.echo;
8914            if (echo == SWITCH_UPDATE) {
8915                BLOGD(sc, DBG_SP, "got FUNC_SWITCH_UPDATE ramrod\n");
8916                if (f_obj->complete_cmd(sc, f_obj,
8917                                        ECORE_F_CMD_SWITCH_UPDATE)) {
8918                    break;
8919                }
8920            }
8921            else {
8922                BLOGD(sc, DBG_SP,
8923                      "AFEX: ramrod completed FUNCTION_UPDATE\n");
8924#if 0
8925                f_obj->complete_cmd(sc, f_obj, ECORE_F_CMD_AFEX_UPDATE);
8926                /*
8927                 * We will perform the queues update from the sp_core_task as
8928                 * all queue SP operations should run with CORE_LOCK.
8929                 */
8930                bxe_set_bit(BXE_SP_CORE_AFEX_F_UPDATE, &sc->sp_core_state);
8931                taskqueue_enqueue(sc->sp_tq, &sc->sp_tq_task);
8932#endif
8933            }
8934            goto next_spqe;
8935
8936#if 0
8937        case EVENT_RING_OPCODE_AFEX_VIF_LISTS:
8938            f_obj->complete_cmd(sc, f_obj, ECORE_F_CMD_AFEX_VIFLISTS);
8939            bxe_after_afex_vif_lists(sc, elem);
8940            goto next_spqe;
8941#endif
8942
8943        case EVENT_RING_OPCODE_FORWARD_SETUP:
8944            q_obj = &bxe_fwd_sp_obj(sc, q_obj);
8945            if (q_obj->complete_cmd(sc, q_obj,
8946                                    ECORE_Q_CMD_SETUP_TX_ONLY)) {
8947                break;
8948            }
8949            goto next_spqe;
8950
8951        case EVENT_RING_OPCODE_FUNCTION_START:
8952            BLOGD(sc, DBG_SP, "got FUNC_START ramrod\n");
8953            if (f_obj->complete_cmd(sc, f_obj, ECORE_F_CMD_START)) {
8954                break;
8955            }
8956            goto next_spqe;
8957
8958        case EVENT_RING_OPCODE_FUNCTION_STOP:
8959            BLOGD(sc, DBG_SP, "got FUNC_STOP ramrod\n");
8960            if (f_obj->complete_cmd(sc, f_obj, ECORE_F_CMD_STOP)) {
8961                break;
8962            }
8963            goto next_spqe;
8964        }
8965
8966        switch (opcode | sc->state) {
8967        case (EVENT_RING_OPCODE_RSS_UPDATE_RULES | BXE_STATE_OPEN):
8968        case (EVENT_RING_OPCODE_RSS_UPDATE_RULES | BXE_STATE_OPENING_WAITING_PORT):
8969            cid = elem->message.data.eth_event.echo & BXE_SWCID_MASK;
8970            BLOGD(sc, DBG_SP, "got RSS_UPDATE ramrod. CID %d\n", cid);
8971            rss_raw->clear_pending(rss_raw);
8972            break;
8973
8974        case (EVENT_RING_OPCODE_SET_MAC | BXE_STATE_OPEN):
8975        case (EVENT_RING_OPCODE_SET_MAC | BXE_STATE_DIAG):
8976        case (EVENT_RING_OPCODE_SET_MAC | BXE_STATE_CLOSING_WAITING_HALT):
8977        case (EVENT_RING_OPCODE_CLASSIFICATION_RULES | BXE_STATE_OPEN):
8978        case (EVENT_RING_OPCODE_CLASSIFICATION_RULES | BXE_STATE_DIAG):
8979        case (EVENT_RING_OPCODE_CLASSIFICATION_RULES | BXE_STATE_CLOSING_WAITING_HALT):
8980            BLOGD(sc, DBG_SP, "got (un)set mac ramrod\n");
8981            bxe_handle_classification_eqe(sc, elem);
8982            break;
8983
8984        case (EVENT_RING_OPCODE_MULTICAST_RULES | BXE_STATE_OPEN):
8985        case (EVENT_RING_OPCODE_MULTICAST_RULES | BXE_STATE_DIAG):
8986        case (EVENT_RING_OPCODE_MULTICAST_RULES | BXE_STATE_CLOSING_WAITING_HALT):
8987            BLOGD(sc, DBG_SP, "got mcast ramrod\n");
8988            bxe_handle_mcast_eqe(sc);
8989            break;
8990
8991        case (EVENT_RING_OPCODE_FILTERS_RULES | BXE_STATE_OPEN):
8992        case (EVENT_RING_OPCODE_FILTERS_RULES | BXE_STATE_DIAG):
8993        case (EVENT_RING_OPCODE_FILTERS_RULES | BXE_STATE_CLOSING_WAITING_HALT):
8994            BLOGD(sc, DBG_SP, "got rx_mode ramrod\n");
8995            bxe_handle_rx_mode_eqe(sc, elem);
8996            break;
8997
8998        default:
8999            /* unknown event log error and continue */
9000            BLOGE(sc, "Unknown EQ event %d, sc->state 0x%x\n",
9001                  elem->message.opcode, sc->state);
9002        }
9003
9004next_spqe:
9005        spqe_cnt++;
9006    } /* for */
9007
9008    mb();
9009    atomic_add_acq_long(&sc->eq_spq_left, spqe_cnt);
9010
9011    sc->eq_cons = sw_cons;
9012    sc->eq_prod = sw_prod;
9013
9014    /* make sure that above mem writes were issued towards the memory */
9015    wmb();
9016
9017    /* update producer */
9018    bxe_update_eq_prod(sc, sc->eq_prod);
9019}
9020
9021static void
9022bxe_handle_sp_tq(void *context,
9023                 int  pending)
9024{
9025    struct bxe_softc *sc = (struct bxe_softc *)context;
9026    uint16_t status;
9027
9028    BLOGD(sc, DBG_SP, "---> SP TASK <---\n");
9029
9030    /* what work needs to be performed? */
9031    status = bxe_update_dsb_idx(sc);
9032
9033    BLOGD(sc, DBG_SP, "dsb status 0x%04x\n", status);
9034
9035    /* HW attentions */
9036    if (status & BXE_DEF_SB_ATT_IDX) {
9037        BLOGD(sc, DBG_SP, "---> ATTN INTR <---\n");
9038        bxe_attn_int(sc);
9039        status &= ~BXE_DEF_SB_ATT_IDX;
9040    }
9041
9042    /* SP events: STAT_QUERY and others */
9043    if (status & BXE_DEF_SB_IDX) {
9044        /* handle EQ completions */
9045        BLOGD(sc, DBG_SP, "---> EQ INTR <---\n");
9046        bxe_eq_int(sc);
9047        bxe_ack_sb(sc, sc->igu_dsb_id, USTORM_ID,
9048                   le16toh(sc->def_idx), IGU_INT_NOP, 1);
9049        status &= ~BXE_DEF_SB_IDX;
9050    }
9051
9052    /* if status is non zero then something went wrong */
9053    if (__predict_false(status)) {
9054        BLOGE(sc, "Got an unknown SP interrupt! (0x%04x)\n", status);
9055    }
9056
9057    /* ack status block only if something was actually handled */
9058    bxe_ack_sb(sc, sc->igu_dsb_id, ATTENTION_ID,
9059               le16toh(sc->def_att_idx), IGU_INT_ENABLE, 1);
9060
9061    /*
9062     * Must be called after the EQ processing (since eq leads to sriov
9063     * ramrod completion flows).
9064     * This flow may have been scheduled by the arrival of a ramrod
9065     * completion, or by the sriov code rescheduling itself.
9066     */
9067    // XXX bxe_iov_sp_task(sc);
9068
9069#if 0
9070    /* AFEX - poll to check if VIFSET_ACK should be sent to MFW */
9071    if (bxe_test_and_clear_bit(ECORE_AFEX_PENDING_VIFSET_MCP_ACK,
9072                               &sc->sp_state)) {
9073        bxe_link_report(sc);
9074        bxe_fw_command(sc, DRV_MSG_CODE_AFEX_VIFSET_ACK, 0);
9075    }
9076#endif
9077}
9078
9079static void
9080bxe_handle_fp_tq(void *context,
9081                 int  pending)
9082{
9083    struct bxe_fastpath *fp = (struct bxe_fastpath *)context;
9084    struct bxe_softc *sc = fp->sc;
9085    uint8_t more_tx = FALSE;
9086    uint8_t more_rx = FALSE;
9087
9088    BLOGD(sc, DBG_INTR, "---> FP TASK QUEUE (%d) <---\n", fp->index);
9089
9090    /* XXX
9091     * IFF_DRV_RUNNING state can't be checked here since we process
9092     * slowpath events on a client queue during setup. Instead
9093     * we need to add a "process/continue" flag here that the driver
9094     * can use to tell the task here not to do anything.
9095     */
9096#if 0
9097    if (!(if_getdrvflags(sc->ifp) & IFF_DRV_RUNNING)) {
9098        return;
9099    }
9100#endif
9101
9102    /* update the fastpath index */
9103    bxe_update_fp_sb_idx(fp);
9104
9105    /* XXX add loop here if ever support multiple tx CoS */
9106    /* fp->txdata[cos] */
9107    if (bxe_has_tx_work(fp)) {
9108        BXE_FP_TX_LOCK(fp);
9109        more_tx = bxe_txeof(sc, fp);
9110        BXE_FP_TX_UNLOCK(fp);
9111    }
9112
9113    if (bxe_has_rx_work(fp)) {
9114        more_rx = bxe_rxeof(sc, fp);
9115    }
9116
9117    if (more_rx /*|| more_tx*/) {
9118        /* still more work to do */
9119        taskqueue_enqueue_fast(fp->tq, &fp->tq_task);
9120        return;
9121    }
9122
9123    bxe_ack_sb(sc, fp->igu_sb_id, USTORM_ID,
9124               le16toh(fp->fp_hc_idx), IGU_INT_ENABLE, 1);
9125}
9126
9127static void
9128bxe_task_fp(struct bxe_fastpath *fp)
9129{
9130    struct bxe_softc *sc = fp->sc;
9131    uint8_t more_tx = FALSE;
9132    uint8_t more_rx = FALSE;
9133
9134    BLOGD(sc, DBG_INTR, "---> FP TASK ISR (%d) <---\n", fp->index);
9135
9136    /* update the fastpath index */
9137    bxe_update_fp_sb_idx(fp);
9138
9139    /* XXX add loop here if ever support multiple tx CoS */
9140    /* fp->txdata[cos] */
9141    if (bxe_has_tx_work(fp)) {
9142        BXE_FP_TX_LOCK(fp);
9143        more_tx = bxe_txeof(sc, fp);
9144        BXE_FP_TX_UNLOCK(fp);
9145    }
9146
9147    if (bxe_has_rx_work(fp)) {
9148        more_rx = bxe_rxeof(sc, fp);
9149    }
9150
9151    if (more_rx /*|| more_tx*/) {
9152        /* still more work to do, bail out if this ISR and process later */
9153        taskqueue_enqueue_fast(fp->tq, &fp->tq_task);
9154        return;
9155    }
9156
9157    /*
9158     * Here we write the fastpath index taken before doing any tx or rx work.
9159     * It is very well possible other hw events occurred up to this point and
9160     * they were actually processed accordingly above. Since we're going to
9161     * write an older fastpath index, an interrupt is coming which we might
9162     * not do any work in.
9163     */
9164    bxe_ack_sb(sc, fp->igu_sb_id, USTORM_ID,
9165               le16toh(fp->fp_hc_idx), IGU_INT_ENABLE, 1);
9166}
9167
9168/*
9169 * Legacy interrupt entry point.
9170 *
9171 * Verifies that the controller generated the interrupt and
9172 * then calls a separate routine to handle the various
9173 * interrupt causes: link, RX, and TX.
9174 */
9175static void
9176bxe_intr_legacy(void *xsc)
9177{
9178    struct bxe_softc *sc = (struct bxe_softc *)xsc;
9179    struct bxe_fastpath *fp;
9180    uint16_t status, mask;
9181    int i;
9182
9183    BLOGD(sc, DBG_INTR, "---> BXE INTx <---\n");
9184
9185#if 0
9186    /* Don't handle any interrupts if we're not ready. */
9187    if (__predict_false(sc->intr_sem != 0)) {
9188        return;
9189    }
9190#endif
9191
9192    /*
9193     * 0 for ustorm, 1 for cstorm
9194     * the bits returned from ack_int() are 0-15
9195     * bit 0 = attention status block
9196     * bit 1 = fast path status block
9197     * a mask of 0x2 or more = tx/rx event
9198     * a mask of 1 = slow path event
9199     */
9200
9201    status = bxe_ack_int(sc);
9202
9203    /* the interrupt is not for us */
9204    if (__predict_false(status == 0)) {
9205        BLOGD(sc, DBG_INTR, "Not our interrupt!\n");
9206        return;
9207    }
9208
9209    BLOGD(sc, DBG_INTR, "Interrupt status 0x%04x\n", status);
9210
9211    FOR_EACH_ETH_QUEUE(sc, i) {
9212        fp = &sc->fp[i];
9213        mask = (0x2 << (fp->index + CNIC_SUPPORT(sc)));
9214        if (status & mask) {
9215            /* acknowledge and disable further fastpath interrupts */
9216            bxe_ack_sb(sc, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
9217            bxe_task_fp(fp);
9218            status &= ~mask;
9219        }
9220    }
9221
9222#if 0
9223    if (CNIC_SUPPORT(sc)) {
9224        mask = 0x2;
9225        if (status & (mask | 0x1)) {
9226            ...
9227            status &= ~mask;
9228        }
9229    }
9230#endif
9231
9232    if (__predict_false(status & 0x1)) {
9233        /* acknowledge and disable further slowpath interrupts */
9234        bxe_ack_sb(sc, sc->igu_dsb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
9235
9236        /* schedule slowpath handler */
9237        taskqueue_enqueue_fast(sc->sp_tq, &sc->sp_tq_task);
9238
9239        status &= ~0x1;
9240    }
9241
9242    if (__predict_false(status)) {
9243        BLOGW(sc, "Unexpected fastpath status (0x%08x)!\n", status);
9244    }
9245}
9246
9247/* slowpath interrupt entry point */
9248static void
9249bxe_intr_sp(void *xsc)
9250{
9251    struct bxe_softc *sc = (struct bxe_softc *)xsc;
9252
9253    BLOGD(sc, (DBG_INTR | DBG_SP), "---> SP INTR <---\n");
9254
9255    /* acknowledge and disable further slowpath interrupts */
9256    bxe_ack_sb(sc, sc->igu_dsb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
9257
9258    /* schedule slowpath handler */
9259    taskqueue_enqueue_fast(sc->sp_tq, &sc->sp_tq_task);
9260}
9261
9262/* fastpath interrupt entry point */
9263static void
9264bxe_intr_fp(void *xfp)
9265{
9266    struct bxe_fastpath *fp = (struct bxe_fastpath *)xfp;
9267    struct bxe_softc *sc = fp->sc;
9268
9269    BLOGD(sc, DBG_INTR, "---> FP INTR %d <---\n", fp->index);
9270
9271    BLOGD(sc, DBG_INTR,
9272          "(cpu=%d) MSI-X fp=%d fw_sb=%d igu_sb=%d\n",
9273          curcpu, fp->index, fp->fw_sb_id, fp->igu_sb_id);
9274
9275#if 0
9276    /* Don't handle any interrupts if we're not ready. */
9277    if (__predict_false(sc->intr_sem != 0)) {
9278        return;
9279    }
9280#endif
9281
9282    /* acknowledge and disable further fastpath interrupts */
9283    bxe_ack_sb(sc, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
9284
9285    bxe_task_fp(fp);
9286}
9287
9288/* Release all interrupts allocated by the driver. */
9289static void
9290bxe_interrupt_free(struct bxe_softc *sc)
9291{
9292    int i;
9293
9294    switch (sc->interrupt_mode) {
9295    case INTR_MODE_INTX:
9296        BLOGD(sc, DBG_LOAD, "Releasing legacy INTx vector\n");
9297        if (sc->intr[0].resource != NULL) {
9298            bus_release_resource(sc->dev,
9299                                 SYS_RES_IRQ,
9300                                 sc->intr[0].rid,
9301                                 sc->intr[0].resource);
9302        }
9303        break;
9304    case INTR_MODE_MSI:
9305        for (i = 0; i < sc->intr_count; i++) {
9306            BLOGD(sc, DBG_LOAD, "Releasing MSI vector %d\n", i);
9307            if (sc->intr[i].resource && sc->intr[i].rid) {
9308                bus_release_resource(sc->dev,
9309                                     SYS_RES_IRQ,
9310                                     sc->intr[i].rid,
9311                                     sc->intr[i].resource);
9312            }
9313        }
9314        pci_release_msi(sc->dev);
9315        break;
9316    case INTR_MODE_MSIX:
9317        for (i = 0; i < sc->intr_count; i++) {
9318            BLOGD(sc, DBG_LOAD, "Releasing MSI-X vector %d\n", i);
9319            if (sc->intr[i].resource && sc->intr[i].rid) {
9320                bus_release_resource(sc->dev,
9321                                     SYS_RES_IRQ,
9322                                     sc->intr[i].rid,
9323                                     sc->intr[i].resource);
9324            }
9325        }
9326        pci_release_msi(sc->dev);
9327        break;
9328    default:
9329        /* nothing to do as initial allocation failed */
9330        break;
9331    }
9332}
9333
9334/*
9335 * This function determines and allocates the appropriate
9336 * interrupt based on system capabilites and user request.
9337 *
9338 * The user may force a particular interrupt mode, specify
9339 * the number of receive queues, specify the method for
9340 * distribuitng received frames to receive queues, or use
9341 * the default settings which will automatically select the
9342 * best supported combination.  In addition, the OS may or
9343 * may not support certain combinations of these settings.
9344 * This routine attempts to reconcile the settings requested
9345 * by the user with the capabilites available from the system
9346 * to select the optimal combination of features.
9347 *
9348 * Returns:
9349 *   0 = Success, !0 = Failure.
9350 */
9351static int
9352bxe_interrupt_alloc(struct bxe_softc *sc)
9353{
9354    int msix_count = 0;
9355    int msi_count = 0;
9356    int num_requested = 0;
9357    int num_allocated = 0;
9358    int rid, i, j;
9359    int rc;
9360
9361    /* get the number of available MSI/MSI-X interrupts from the OS */
9362    if (sc->interrupt_mode > 0) {
9363        if (sc->devinfo.pcie_cap_flags & BXE_MSIX_CAPABLE_FLAG) {
9364            msix_count = pci_msix_count(sc->dev);
9365        }
9366
9367        if (sc->devinfo.pcie_cap_flags & BXE_MSI_CAPABLE_FLAG) {
9368            msi_count = pci_msi_count(sc->dev);
9369        }
9370
9371        BLOGD(sc, DBG_LOAD, "%d MSI and %d MSI-X vectors available\n",
9372              msi_count, msix_count);
9373    }
9374
9375    do { /* try allocating MSI-X interrupt resources (at least 2) */
9376        if (sc->interrupt_mode != INTR_MODE_MSIX) {
9377            break;
9378        }
9379
9380        if (((sc->devinfo.pcie_cap_flags & BXE_MSIX_CAPABLE_FLAG) == 0) ||
9381            (msix_count < 2)) {
9382            sc->interrupt_mode = INTR_MODE_MSI; /* try MSI next */
9383            break;
9384        }
9385
9386        /* ask for the necessary number of MSI-X vectors */
9387        num_requested = min((sc->num_queues + 1), msix_count);
9388
9389        BLOGD(sc, DBG_LOAD, "Requesting %d MSI-X vectors\n", num_requested);
9390
9391        num_allocated = num_requested;
9392        if ((rc = pci_alloc_msix(sc->dev, &num_allocated)) != 0) {
9393            BLOGE(sc, "MSI-X alloc failed! (%d)\n", rc);
9394            sc->interrupt_mode = INTR_MODE_MSI; /* try MSI next */
9395            break;
9396        }
9397
9398        if (num_allocated < 2) { /* possible? */
9399            BLOGE(sc, "MSI-X allocation less than 2!\n");
9400            sc->interrupt_mode = INTR_MODE_MSI; /* try MSI next */
9401            pci_release_msi(sc->dev);
9402            break;
9403        }
9404
9405        BLOGI(sc, "MSI-X vectors Requested %d and Allocated %d\n",
9406              num_requested, num_allocated);
9407
9408        /* best effort so use the number of vectors allocated to us */
9409        sc->intr_count = num_allocated;
9410        sc->num_queues = num_allocated - 1;
9411
9412        rid = 1; /* initial resource identifier */
9413
9414        /* allocate the MSI-X vectors */
9415        for (i = 0; i < num_allocated; i++) {
9416            sc->intr[i].rid = (rid + i);
9417
9418            if ((sc->intr[i].resource =
9419                 bus_alloc_resource_any(sc->dev,
9420                                        SYS_RES_IRQ,
9421                                        &sc->intr[i].rid,
9422                                        RF_ACTIVE)) == NULL) {
9423                BLOGE(sc, "Failed to map MSI-X[%d] (rid=%d)!\n",
9424                      i, (rid + i));
9425
9426                for (j = (i - 1); j >= 0; j--) {
9427                    bus_release_resource(sc->dev,
9428                                         SYS_RES_IRQ,
9429                                         sc->intr[j].rid,
9430                                         sc->intr[j].resource);
9431                }
9432
9433                sc->intr_count = 0;
9434                sc->num_queues = 0;
9435                sc->interrupt_mode = INTR_MODE_MSI; /* try MSI next */
9436                pci_release_msi(sc->dev);
9437                break;
9438            }
9439
9440            BLOGD(sc, DBG_LOAD, "Mapped MSI-X[%d] (rid=%d)\n", i, (rid + i));
9441        }
9442    } while (0);
9443
9444    do { /* try allocating MSI vector resources (at least 2) */
9445        if (sc->interrupt_mode != INTR_MODE_MSI) {
9446            break;
9447        }
9448
9449        if (((sc->devinfo.pcie_cap_flags & BXE_MSI_CAPABLE_FLAG) == 0) ||
9450            (msi_count < 1)) {
9451            sc->interrupt_mode = INTR_MODE_INTX; /* try INTx next */
9452            break;
9453        }
9454
9455        /* ask for a single MSI vector */
9456        num_requested = 1;
9457
9458        BLOGD(sc, DBG_LOAD, "Requesting %d MSI vectors\n", num_requested);
9459
9460        num_allocated = num_requested;
9461        if ((rc = pci_alloc_msi(sc->dev, &num_allocated)) != 0) {
9462            BLOGE(sc, "MSI alloc failed (%d)!\n", rc);
9463            sc->interrupt_mode = INTR_MODE_INTX; /* try INTx next */
9464            break;
9465        }
9466
9467        if (num_allocated != 1) { /* possible? */
9468            BLOGE(sc, "MSI allocation is not 1!\n");
9469            sc->interrupt_mode = INTR_MODE_INTX; /* try INTx next */
9470            pci_release_msi(sc->dev);
9471            break;
9472        }
9473
9474        BLOGI(sc, "MSI vectors Requested %d and Allocated %d\n",
9475              num_requested, num_allocated);
9476
9477        /* best effort so use the number of vectors allocated to us */
9478        sc->intr_count = num_allocated;
9479        sc->num_queues = num_allocated;
9480
9481        rid = 1; /* initial resource identifier */
9482
9483        sc->intr[0].rid = rid;
9484
9485        if ((sc->intr[0].resource =
9486             bus_alloc_resource_any(sc->dev,
9487                                    SYS_RES_IRQ,
9488                                    &sc->intr[0].rid,
9489                                    RF_ACTIVE)) == NULL) {
9490            BLOGE(sc, "Failed to map MSI[0] (rid=%d)!\n", rid);
9491            sc->intr_count = 0;
9492            sc->num_queues = 0;
9493            sc->interrupt_mode = INTR_MODE_INTX; /* try INTx next */
9494            pci_release_msi(sc->dev);
9495            break;
9496        }
9497
9498        BLOGD(sc, DBG_LOAD, "Mapped MSI[0] (rid=%d)\n", rid);
9499    } while (0);
9500
9501    do { /* try allocating INTx vector resources */
9502        if (sc->interrupt_mode != INTR_MODE_INTX) {
9503            break;
9504        }
9505
9506        BLOGD(sc, DBG_LOAD, "Requesting legacy INTx interrupt\n");
9507
9508        /* only one vector for INTx */
9509        sc->intr_count = 1;
9510        sc->num_queues = 1;
9511
9512        rid = 0; /* initial resource identifier */
9513
9514        sc->intr[0].rid = rid;
9515
9516        if ((sc->intr[0].resource =
9517             bus_alloc_resource_any(sc->dev,
9518                                    SYS_RES_IRQ,
9519                                    &sc->intr[0].rid,
9520                                    (RF_ACTIVE | RF_SHAREABLE))) == NULL) {
9521            BLOGE(sc, "Failed to map INTx (rid=%d)!\n", rid);
9522            sc->intr_count = 0;
9523            sc->num_queues = 0;
9524            sc->interrupt_mode = -1; /* Failed! */
9525            break;
9526        }
9527
9528        BLOGD(sc, DBG_LOAD, "Mapped INTx (rid=%d)\n", rid);
9529    } while (0);
9530
9531    if (sc->interrupt_mode == -1) {
9532        BLOGE(sc, "Interrupt Allocation: FAILED!!!\n");
9533        rc = 1;
9534    } else {
9535        BLOGD(sc, DBG_LOAD,
9536              "Interrupt Allocation: interrupt_mode=%d, num_queues=%d\n",
9537              sc->interrupt_mode, sc->num_queues);
9538        rc = 0;
9539    }
9540
9541    return (rc);
9542}
9543
9544static void
9545bxe_interrupt_detach(struct bxe_softc *sc)
9546{
9547    struct bxe_fastpath *fp;
9548    int i;
9549
9550    /* release interrupt resources */
9551    for (i = 0; i < sc->intr_count; i++) {
9552        if (sc->intr[i].resource && sc->intr[i].tag) {
9553            BLOGD(sc, DBG_LOAD, "Disabling interrupt vector %d\n", i);
9554            bus_teardown_intr(sc->dev, sc->intr[i].resource, sc->intr[i].tag);
9555        }
9556    }
9557
9558    for (i = 0; i < sc->num_queues; i++) {
9559        fp = &sc->fp[i];
9560        if (fp->tq) {
9561            taskqueue_drain(fp->tq, &fp->tq_task);
9562            taskqueue_free(fp->tq);
9563            fp->tq = NULL;
9564        }
9565    }
9566
9567    if (sc->rx_mode_tq) {
9568        taskqueue_drain(sc->rx_mode_tq, &sc->rx_mode_tq_task);
9569        taskqueue_free(sc->rx_mode_tq);
9570        sc->rx_mode_tq = NULL;
9571    }
9572
9573    if (sc->sp_tq) {
9574        taskqueue_drain(sc->sp_tq, &sc->sp_tq_task);
9575        taskqueue_free(sc->sp_tq);
9576        sc->sp_tq = NULL;
9577    }
9578}
9579
9580/*
9581 * Enables interrupts and attach to the ISR.
9582 *
9583 * When using multiple MSI/MSI-X vectors the first vector
9584 * is used for slowpath operations while all remaining
9585 * vectors are used for fastpath operations.  If only a
9586 * single MSI/MSI-X vector is used (SINGLE_ISR) then the
9587 * ISR must look for both slowpath and fastpath completions.
9588 */
9589static int
9590bxe_interrupt_attach(struct bxe_softc *sc)
9591{
9592    struct bxe_fastpath *fp;
9593    int rc = 0;
9594    int i;
9595
9596    snprintf(sc->sp_tq_name, sizeof(sc->sp_tq_name),
9597             "bxe%d_sp_tq", sc->unit);
9598    TASK_INIT(&sc->sp_tq_task, 0, bxe_handle_sp_tq, sc);
9599    sc->sp_tq = taskqueue_create_fast(sc->sp_tq_name, M_NOWAIT,
9600                                      taskqueue_thread_enqueue,
9601                                      &sc->sp_tq);
9602    taskqueue_start_threads(&sc->sp_tq, 1, PWAIT, /* lower priority */
9603                            "%s", sc->sp_tq_name);
9604
9605    snprintf(sc->rx_mode_tq_name, sizeof(sc->rx_mode_tq_name),
9606             "bxe%d_rx_mode_tq", sc->unit);
9607    TASK_INIT(&sc->rx_mode_tq_task, 0, bxe_handle_rx_mode_tq, sc);
9608    sc->rx_mode_tq = taskqueue_create_fast(sc->rx_mode_tq_name, M_NOWAIT,
9609                                           taskqueue_thread_enqueue,
9610                                           &sc->rx_mode_tq);
9611    taskqueue_start_threads(&sc->rx_mode_tq, 1, PWAIT, /* lower priority */
9612                            "%s", sc->rx_mode_tq_name);
9613
9614    for (i = 0; i < sc->num_queues; i++) {
9615        fp = &sc->fp[i];
9616        snprintf(fp->tq_name, sizeof(fp->tq_name),
9617                 "bxe%d_fp%d_tq", sc->unit, i);
9618        TASK_INIT(&fp->tq_task, 0, bxe_handle_fp_tq, fp);
9619        fp->tq = taskqueue_create_fast(fp->tq_name, M_NOWAIT,
9620                                       taskqueue_thread_enqueue,
9621                                       &fp->tq);
9622        taskqueue_start_threads(&fp->tq, 1, PI_NET, /* higher priority */
9623                                "%s", fp->tq_name);
9624    }
9625
9626    /* setup interrupt handlers */
9627    if (sc->interrupt_mode == INTR_MODE_MSIX) {
9628        BLOGD(sc, DBG_LOAD, "Enabling slowpath MSI-X[0] vector\n");
9629
9630        /*
9631         * Setup the interrupt handler. Note that we pass the driver instance
9632         * to the interrupt handler for the slowpath.
9633         */
9634        if ((rc = bus_setup_intr(sc->dev, sc->intr[0].resource,
9635                                 (INTR_TYPE_NET | INTR_MPSAFE),
9636                                 NULL, bxe_intr_sp, sc,
9637                                 &sc->intr[0].tag)) != 0) {
9638            BLOGE(sc, "Failed to allocate MSI-X[0] vector (%d)\n", rc);
9639            goto bxe_interrupt_attach_exit;
9640        }
9641
9642        bus_describe_intr(sc->dev, sc->intr[0].resource,
9643                          sc->intr[0].tag, "sp");
9644
9645        /* bus_bind_intr(sc->dev, sc->intr[0].resource, 0); */
9646
9647        /* initialize the fastpath vectors (note the first was used for sp) */
9648        for (i = 0; i < sc->num_queues; i++) {
9649            fp = &sc->fp[i];
9650            BLOGD(sc, DBG_LOAD, "Enabling MSI-X[%d] vector\n", (i + 1));
9651
9652            /*
9653             * Setup the interrupt handler. Note that we pass the
9654             * fastpath context to the interrupt handler in this
9655             * case.
9656             */
9657            if ((rc = bus_setup_intr(sc->dev, sc->intr[i + 1].resource,
9658                                     (INTR_TYPE_NET | INTR_MPSAFE),
9659                                     NULL, bxe_intr_fp, fp,
9660                                     &sc->intr[i + 1].tag)) != 0) {
9661                BLOGE(sc, "Failed to allocate MSI-X[%d] vector (%d)\n",
9662                      (i + 1), rc);
9663                goto bxe_interrupt_attach_exit;
9664            }
9665
9666            bus_describe_intr(sc->dev, sc->intr[i + 1].resource,
9667                              sc->intr[i + 1].tag, "fp%02d", i);
9668
9669            /* bind the fastpath instance to a cpu */
9670            if (sc->num_queues > 1) {
9671                bus_bind_intr(sc->dev, sc->intr[i + 1].resource, i);
9672            }
9673
9674            fp->state = BXE_FP_STATE_IRQ;
9675        }
9676    } else if (sc->interrupt_mode == INTR_MODE_MSI) {
9677        BLOGD(sc, DBG_LOAD, "Enabling MSI[0] vector\n");
9678
9679        /*
9680         * Setup the interrupt handler. Note that we pass the
9681         * driver instance to the interrupt handler which
9682         * will handle both the slowpath and fastpath.
9683         */
9684        if ((rc = bus_setup_intr(sc->dev, sc->intr[0].resource,
9685                                 (INTR_TYPE_NET | INTR_MPSAFE),
9686                                 NULL, bxe_intr_legacy, sc,
9687                                 &sc->intr[0].tag)) != 0) {
9688            BLOGE(sc, "Failed to allocate MSI[0] vector (%d)\n", rc);
9689            goto bxe_interrupt_attach_exit;
9690        }
9691
9692    } else { /* (sc->interrupt_mode == INTR_MODE_INTX) */
9693        BLOGD(sc, DBG_LOAD, "Enabling INTx interrupts\n");
9694
9695        /*
9696         * Setup the interrupt handler. Note that we pass the
9697         * driver instance to the interrupt handler which
9698         * will handle both the slowpath and fastpath.
9699         */
9700        if ((rc = bus_setup_intr(sc->dev, sc->intr[0].resource,
9701                                 (INTR_TYPE_NET | INTR_MPSAFE),
9702                                 NULL, bxe_intr_legacy, sc,
9703                                 &sc->intr[0].tag)) != 0) {
9704            BLOGE(sc, "Failed to allocate INTx interrupt (%d)\n", rc);
9705            goto bxe_interrupt_attach_exit;
9706        }
9707    }
9708
9709bxe_interrupt_attach_exit:
9710
9711    return (rc);
9712}
9713
9714static int  bxe_init_hw_common_chip(struct bxe_softc *sc);
9715static int  bxe_init_hw_common(struct bxe_softc *sc);
9716static int  bxe_init_hw_port(struct bxe_softc *sc);
9717static int  bxe_init_hw_func(struct bxe_softc *sc);
9718static void bxe_reset_common(struct bxe_softc *sc);
9719static void bxe_reset_port(struct bxe_softc *sc);
9720static void bxe_reset_func(struct bxe_softc *sc);
9721static int  bxe_gunzip_init(struct bxe_softc *sc);
9722static void bxe_gunzip_end(struct bxe_softc *sc);
9723static int  bxe_init_firmware(struct bxe_softc *sc);
9724static void bxe_release_firmware(struct bxe_softc *sc);
9725
9726static struct
9727ecore_func_sp_drv_ops bxe_func_sp_drv = {
9728    .init_hw_cmn_chip = bxe_init_hw_common_chip,
9729    .init_hw_cmn      = bxe_init_hw_common,
9730    .init_hw_port     = bxe_init_hw_port,
9731    .init_hw_func     = bxe_init_hw_func,
9732
9733    .reset_hw_cmn     = bxe_reset_common,
9734    .reset_hw_port    = bxe_reset_port,
9735    .reset_hw_func    = bxe_reset_func,
9736
9737    .gunzip_init      = bxe_gunzip_init,
9738    .gunzip_end       = bxe_gunzip_end,
9739
9740    .init_fw          = bxe_init_firmware,
9741    .release_fw       = bxe_release_firmware,
9742};
9743
9744static void
9745bxe_init_func_obj(struct bxe_softc *sc)
9746{
9747    sc->dmae_ready = 0;
9748
9749    ecore_init_func_obj(sc,
9750                        &sc->func_obj,
9751                        BXE_SP(sc, func_rdata),
9752                        BXE_SP_MAPPING(sc, func_rdata),
9753                        BXE_SP(sc, func_afex_rdata),
9754                        BXE_SP_MAPPING(sc, func_afex_rdata),
9755                        &bxe_func_sp_drv);
9756}
9757
9758static int
9759bxe_init_hw(struct bxe_softc *sc,
9760            uint32_t         load_code)
9761{
9762    struct ecore_func_state_params func_params = { NULL };
9763    int rc;
9764
9765    /* prepare the parameters for function state transitions */
9766    bit_set(&func_params.ramrod_flags, RAMROD_COMP_WAIT);
9767
9768    func_params.f_obj = &sc->func_obj;
9769    func_params.cmd = ECORE_F_CMD_HW_INIT;
9770
9771    func_params.params.hw_init.load_phase = load_code;
9772
9773    /*
9774     * Via a plethora of function pointers, we will eventually reach
9775     * bxe_init_hw_common(), bxe_init_hw_port(), or bxe_init_hw_func().
9776     */
9777    rc = ecore_func_state_change(sc, &func_params);
9778
9779    return (rc);
9780}
9781
9782static void
9783bxe_fill(struct bxe_softc *sc,
9784         uint32_t         addr,
9785         int              fill,
9786         uint32_t         len)
9787{
9788    uint32_t i;
9789
9790    if (!(len % 4) && !(addr % 4)) {
9791        for (i = 0; i < len; i += 4) {
9792            REG_WR(sc, (addr + i), fill);
9793        }
9794    } else {
9795        for (i = 0; i < len; i++) {
9796            REG_WR8(sc, (addr + i), fill);
9797        }
9798    }
9799}
9800
9801/* writes FP SP data to FW - data_size in dwords */
9802static void
9803bxe_wr_fp_sb_data(struct bxe_softc *sc,
9804                  int              fw_sb_id,
9805                  uint32_t         *sb_data_p,
9806                  uint32_t         data_size)
9807{
9808    int index;
9809
9810    for (index = 0; index < data_size; index++) {
9811        REG_WR(sc,
9812               (BAR_CSTRORM_INTMEM +
9813                CSTORM_STATUS_BLOCK_DATA_OFFSET(fw_sb_id) +
9814                (sizeof(uint32_t) * index)),
9815               *(sb_data_p + index));
9816    }
9817}
9818
9819static void
9820bxe_zero_fp_sb(struct bxe_softc *sc,
9821               int              fw_sb_id)
9822{
9823    struct hc_status_block_data_e2 sb_data_e2;
9824    struct hc_status_block_data_e1x sb_data_e1x;
9825    uint32_t *sb_data_p;
9826    uint32_t data_size = 0;
9827
9828    if (!CHIP_IS_E1x(sc)) {
9829        memset(&sb_data_e2, 0, sizeof(struct hc_status_block_data_e2));
9830        sb_data_e2.common.state = SB_DISABLED;
9831        sb_data_e2.common.p_func.vf_valid = FALSE;
9832        sb_data_p = (uint32_t *)&sb_data_e2;
9833        data_size = (sizeof(struct hc_status_block_data_e2) /
9834                     sizeof(uint32_t));
9835    } else {
9836        memset(&sb_data_e1x, 0, sizeof(struct hc_status_block_data_e1x));
9837        sb_data_e1x.common.state = SB_DISABLED;
9838        sb_data_e1x.common.p_func.vf_valid = FALSE;
9839        sb_data_p = (uint32_t *)&sb_data_e1x;
9840        data_size = (sizeof(struct hc_status_block_data_e1x) /
9841                     sizeof(uint32_t));
9842    }
9843
9844    bxe_wr_fp_sb_data(sc, fw_sb_id, sb_data_p, data_size);
9845
9846    bxe_fill(sc, (BAR_CSTRORM_INTMEM + CSTORM_STATUS_BLOCK_OFFSET(fw_sb_id)),
9847             0, CSTORM_STATUS_BLOCK_SIZE);
9848    bxe_fill(sc, (BAR_CSTRORM_INTMEM + CSTORM_SYNC_BLOCK_OFFSET(fw_sb_id)),
9849             0, CSTORM_SYNC_BLOCK_SIZE);
9850}
9851
9852static void
9853bxe_wr_sp_sb_data(struct bxe_softc               *sc,
9854                  struct hc_sp_status_block_data *sp_sb_data)
9855{
9856    int i;
9857
9858    for (i = 0;
9859         i < (sizeof(struct hc_sp_status_block_data) / sizeof(uint32_t));
9860         i++) {
9861        REG_WR(sc,
9862               (BAR_CSTRORM_INTMEM +
9863                CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(SC_FUNC(sc)) +
9864                (i * sizeof(uint32_t))),
9865               *((uint32_t *)sp_sb_data + i));
9866    }
9867}
9868
9869static void
9870bxe_zero_sp_sb(struct bxe_softc *sc)
9871{
9872    struct hc_sp_status_block_data sp_sb_data;
9873
9874    memset(&sp_sb_data, 0, sizeof(struct hc_sp_status_block_data));
9875
9876    sp_sb_data.state           = SB_DISABLED;
9877    sp_sb_data.p_func.vf_valid = FALSE;
9878
9879    bxe_wr_sp_sb_data(sc, &sp_sb_data);
9880
9881    bxe_fill(sc,
9882             (BAR_CSTRORM_INTMEM +
9883              CSTORM_SP_STATUS_BLOCK_OFFSET(SC_FUNC(sc))),
9884              0, CSTORM_SP_STATUS_BLOCK_SIZE);
9885    bxe_fill(sc,
9886             (BAR_CSTRORM_INTMEM +
9887              CSTORM_SP_SYNC_BLOCK_OFFSET(SC_FUNC(sc))),
9888              0, CSTORM_SP_SYNC_BLOCK_SIZE);
9889}
9890
9891static void
9892bxe_setup_ndsb_state_machine(struct hc_status_block_sm *hc_sm,
9893                             int                       igu_sb_id,
9894                             int                       igu_seg_id)
9895{
9896    hc_sm->igu_sb_id      = igu_sb_id;
9897    hc_sm->igu_seg_id     = igu_seg_id;
9898    hc_sm->timer_value    = 0xFF;
9899    hc_sm->time_to_expire = 0xFFFFFFFF;
9900}
9901
9902static void
9903bxe_map_sb_state_machines(struct hc_index_data *index_data)
9904{
9905    /* zero out state machine indices */
9906
9907    /* rx indices */
9908    index_data[HC_INDEX_ETH_RX_CQ_CONS].flags &= ~HC_INDEX_DATA_SM_ID;
9909
9910    /* tx indices */
9911    index_data[HC_INDEX_OOO_TX_CQ_CONS].flags      &= ~HC_INDEX_DATA_SM_ID;
9912    index_data[HC_INDEX_ETH_TX_CQ_CONS_COS0].flags &= ~HC_INDEX_DATA_SM_ID;
9913    index_data[HC_INDEX_ETH_TX_CQ_CONS_COS1].flags &= ~HC_INDEX_DATA_SM_ID;
9914    index_data[HC_INDEX_ETH_TX_CQ_CONS_COS2].flags &= ~HC_INDEX_DATA_SM_ID;
9915
9916    /* map indices */
9917
9918    /* rx indices */
9919    index_data[HC_INDEX_ETH_RX_CQ_CONS].flags |=
9920        (SM_RX_ID << HC_INDEX_DATA_SM_ID_SHIFT);
9921
9922    /* tx indices */
9923    index_data[HC_INDEX_OOO_TX_CQ_CONS].flags |=
9924        (SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT);
9925    index_data[HC_INDEX_ETH_TX_CQ_CONS_COS0].flags |=
9926        (SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT);
9927    index_data[HC_INDEX_ETH_TX_CQ_CONS_COS1].flags |=
9928        (SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT);
9929    index_data[HC_INDEX_ETH_TX_CQ_CONS_COS2].flags |=
9930        (SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT);
9931}
9932
9933static void
9934bxe_init_sb(struct bxe_softc *sc,
9935            bus_addr_t       busaddr,
9936            int              vfid,
9937            uint8_t          vf_valid,
9938            int              fw_sb_id,
9939            int              igu_sb_id)
9940{
9941    struct hc_status_block_data_e2  sb_data_e2;
9942    struct hc_status_block_data_e1x sb_data_e1x;
9943    struct hc_status_block_sm       *hc_sm_p;
9944    uint32_t *sb_data_p;
9945    int igu_seg_id;
9946    int data_size;
9947
9948    if (CHIP_INT_MODE_IS_BC(sc)) {
9949        igu_seg_id = HC_SEG_ACCESS_NORM;
9950    } else {
9951        igu_seg_id = IGU_SEG_ACCESS_NORM;
9952    }
9953
9954    bxe_zero_fp_sb(sc, fw_sb_id);
9955
9956    if (!CHIP_IS_E1x(sc)) {
9957        memset(&sb_data_e2, 0, sizeof(struct hc_status_block_data_e2));
9958        sb_data_e2.common.state = SB_ENABLED;
9959        sb_data_e2.common.p_func.pf_id = SC_FUNC(sc);
9960        sb_data_e2.common.p_func.vf_id = vfid;
9961        sb_data_e2.common.p_func.vf_valid = vf_valid;
9962        sb_data_e2.common.p_func.vnic_id = SC_VN(sc);
9963        sb_data_e2.common.same_igu_sb_1b = TRUE;
9964        sb_data_e2.common.host_sb_addr.hi = U64_HI(busaddr);
9965        sb_data_e2.common.host_sb_addr.lo = U64_LO(busaddr);
9966        hc_sm_p = sb_data_e2.common.state_machine;
9967        sb_data_p = (uint32_t *)&sb_data_e2;
9968        data_size = (sizeof(struct hc_status_block_data_e2) /
9969                     sizeof(uint32_t));
9970        bxe_map_sb_state_machines(sb_data_e2.index_data);
9971    } else {
9972        memset(&sb_data_e1x, 0, sizeof(struct hc_status_block_data_e1x));
9973        sb_data_e1x.common.state = SB_ENABLED;
9974        sb_data_e1x.common.p_func.pf_id = SC_FUNC(sc);
9975        sb_data_e1x.common.p_func.vf_id = 0xff;
9976        sb_data_e1x.common.p_func.vf_valid = FALSE;
9977        sb_data_e1x.common.p_func.vnic_id = SC_VN(sc);
9978        sb_data_e1x.common.same_igu_sb_1b = TRUE;
9979        sb_data_e1x.common.host_sb_addr.hi = U64_HI(busaddr);
9980        sb_data_e1x.common.host_sb_addr.lo = U64_LO(busaddr);
9981        hc_sm_p = sb_data_e1x.common.state_machine;
9982        sb_data_p = (uint32_t *)&sb_data_e1x;
9983        data_size = (sizeof(struct hc_status_block_data_e1x) /
9984                     sizeof(uint32_t));
9985        bxe_map_sb_state_machines(sb_data_e1x.index_data);
9986    }
9987
9988    bxe_setup_ndsb_state_machine(&hc_sm_p[SM_RX_ID], igu_sb_id, igu_seg_id);
9989    bxe_setup_ndsb_state_machine(&hc_sm_p[SM_TX_ID], igu_sb_id, igu_seg_id);
9990
9991    BLOGD(sc, DBG_LOAD, "Init FW SB %d\n", fw_sb_id);
9992
9993    /* write indices to HW - PCI guarantees endianity of regpairs */
9994    bxe_wr_fp_sb_data(sc, fw_sb_id, sb_data_p, data_size);
9995}
9996
9997static inline uint8_t
9998bxe_fp_qzone_id(struct bxe_fastpath *fp)
9999{
10000    if (CHIP_IS_E1x(fp->sc)) {
10001        return (fp->cl_id + SC_PORT(fp->sc) * ETH_MAX_RX_CLIENTS_E1H);
10002    } else {
10003        return (fp->cl_id);
10004    }
10005}
10006
10007static inline uint32_t
10008bxe_rx_ustorm_prods_offset(struct bxe_softc    *sc,
10009                           struct bxe_fastpath *fp)
10010{
10011    uint32_t offset = BAR_USTRORM_INTMEM;
10012
10013#if 0
10014    if (IS_VF(sc)) {
10015        return (PXP_VF_ADDR_USDM_QUEUES_START +
10016                (sc->acquire_resp.resc.hw_qid[fp->index] *
10017                 sizeof(struct ustorm_queue_zone_data)));
10018    } else
10019#endif
10020    if (!CHIP_IS_E1x(sc)) {
10021        offset += USTORM_RX_PRODS_E2_OFFSET(fp->cl_qzone_id);
10022    } else {
10023        offset += USTORM_RX_PRODS_E1X_OFFSET(SC_PORT(sc), fp->cl_id);
10024    }
10025
10026    return (offset);
10027}
10028
10029static void
10030bxe_init_eth_fp(struct bxe_softc *sc,
10031                int              idx)
10032{
10033    struct bxe_fastpath *fp = &sc->fp[idx];
10034    uint32_t cids[ECORE_MULTI_TX_COS] = { 0 };
10035    unsigned long q_type = 0;
10036    int cos;
10037
10038    fp->sc    = sc;
10039    fp->index = idx;
10040
10041    snprintf(fp->tx_mtx_name, sizeof(fp->tx_mtx_name),
10042             "bxe%d_fp%d_tx_lock", sc->unit, idx);
10043    mtx_init(&fp->tx_mtx, fp->tx_mtx_name, NULL, MTX_DEF);
10044
10045    snprintf(fp->rx_mtx_name, sizeof(fp->rx_mtx_name),
10046             "bxe%d_fp%d_rx_lock", sc->unit, idx);
10047    mtx_init(&fp->rx_mtx, fp->rx_mtx_name, NULL, MTX_DEF);
10048
10049    fp->igu_sb_id = (sc->igu_base_sb + idx + CNIC_SUPPORT(sc));
10050    fp->fw_sb_id = (sc->base_fw_ndsb + idx + CNIC_SUPPORT(sc));
10051
10052    fp->cl_id = (CHIP_IS_E1x(sc)) ?
10053                    (SC_L_ID(sc) + idx) :
10054                    /* want client ID same as IGU SB ID for non-E1 */
10055                    fp->igu_sb_id;
10056    fp->cl_qzone_id = bxe_fp_qzone_id(fp);
10057
10058    /* setup sb indices */
10059    if (!CHIP_IS_E1x(sc)) {
10060        fp->sb_index_values  = fp->status_block.e2_sb->sb.index_values;
10061        fp->sb_running_index = fp->status_block.e2_sb->sb.running_index;
10062    } else {
10063        fp->sb_index_values  = fp->status_block.e1x_sb->sb.index_values;
10064        fp->sb_running_index = fp->status_block.e1x_sb->sb.running_index;
10065    }
10066
10067    /* init shortcut */
10068    fp->ustorm_rx_prods_offset = bxe_rx_ustorm_prods_offset(sc, fp);
10069
10070    fp->rx_cq_cons_sb = &fp->sb_index_values[HC_INDEX_ETH_RX_CQ_CONS];
10071
10072    /*
10073     * XXX If multiple CoS is ever supported then each fastpath structure
10074     * will need to maintain tx producer/consumer/dma/etc values *per* CoS.
10075     */
10076    for (cos = 0; cos < sc->max_cos; cos++) {
10077        cids[cos] = idx;
10078    }
10079    fp->tx_cons_sb = &fp->sb_index_values[HC_INDEX_ETH_TX_CQ_CONS_COS0];
10080
10081    /* nothing more for a VF to do */
10082    if (IS_VF(sc)) {
10083        return;
10084    }
10085
10086    bxe_init_sb(sc, fp->sb_dma.paddr, BXE_VF_ID_INVALID, FALSE,
10087                fp->fw_sb_id, fp->igu_sb_id);
10088
10089    bxe_update_fp_sb_idx(fp);
10090
10091    /* Configure Queue State object */
10092    bit_set(&q_type, ECORE_Q_TYPE_HAS_RX);
10093    bit_set(&q_type, ECORE_Q_TYPE_HAS_TX);
10094
10095    ecore_init_queue_obj(sc,
10096                         &sc->sp_objs[idx].q_obj,
10097                         fp->cl_id,
10098                         cids,
10099                         sc->max_cos,
10100                         SC_FUNC(sc),
10101                         BXE_SP(sc, q_rdata),
10102                         BXE_SP_MAPPING(sc, q_rdata),
10103                         q_type);
10104
10105    /* configure classification DBs */
10106    ecore_init_mac_obj(sc,
10107                       &sc->sp_objs[idx].mac_obj,
10108                       fp->cl_id,
10109                       idx,
10110                       SC_FUNC(sc),
10111                       BXE_SP(sc, mac_rdata),
10112                       BXE_SP_MAPPING(sc, mac_rdata),
10113                       ECORE_FILTER_MAC_PENDING,
10114                       &sc->sp_state,
10115                       ECORE_OBJ_TYPE_RX_TX,
10116                       &sc->macs_pool);
10117
10118    BLOGD(sc, DBG_LOAD, "fp[%d]: sb=%p cl_id=%d fw_sb=%d igu_sb=%d\n",
10119          idx, fp->status_block.e2_sb, fp->cl_id, fp->fw_sb_id, fp->igu_sb_id);
10120}
10121
10122static inline void
10123bxe_update_rx_prod(struct bxe_softc    *sc,
10124                   struct bxe_fastpath *fp,
10125                   uint16_t            rx_bd_prod,
10126                   uint16_t            rx_cq_prod,
10127                   uint16_t            rx_sge_prod)
10128{
10129    struct ustorm_eth_rx_producers rx_prods = { 0 };
10130    uint32_t i;
10131
10132    /* update producers */
10133    rx_prods.bd_prod  = rx_bd_prod;
10134    rx_prods.cqe_prod = rx_cq_prod;
10135    rx_prods.sge_prod = rx_sge_prod;
10136
10137    /*
10138     * Make sure that the BD and SGE data is updated before updating the
10139     * producers since FW might read the BD/SGE right after the producer
10140     * is updated.
10141     * This is only applicable for weak-ordered memory model archs such
10142     * as IA-64. The following barrier is also mandatory since FW will
10143     * assumes BDs must have buffers.
10144     */
10145    wmb();
10146
10147    for (i = 0; i < (sizeof(rx_prods) / 4); i++) {
10148        REG_WR(sc,
10149               (fp->ustorm_rx_prods_offset + (i * 4)),
10150               ((uint32_t *)&rx_prods)[i]);
10151    }
10152
10153    wmb(); /* keep prod updates ordered */
10154
10155    BLOGD(sc, DBG_RX,
10156          "RX fp[%d]: wrote prods bd_prod=%u cqe_prod=%u sge_prod=%u\n",
10157          fp->index, rx_bd_prod, rx_cq_prod, rx_sge_prod);
10158}
10159
10160static void
10161bxe_init_rx_rings(struct bxe_softc *sc)
10162{
10163    struct bxe_fastpath *fp;
10164    int i;
10165
10166    for (i = 0; i < sc->num_queues; i++) {
10167        fp = &sc->fp[i];
10168
10169        fp->rx_bd_cons = 0;
10170
10171        /*
10172         * Activate the BD ring...
10173         * Warning, this will generate an interrupt (to the TSTORM)
10174         * so this can only be done after the chip is initialized
10175         */
10176        bxe_update_rx_prod(sc, fp,
10177                           fp->rx_bd_prod,
10178                           fp->rx_cq_prod,
10179                           fp->rx_sge_prod);
10180
10181        if (i != 0) {
10182            continue;
10183        }
10184
10185        if (CHIP_IS_E1(sc)) {
10186            REG_WR(sc,
10187                   (BAR_USTRORM_INTMEM +
10188                    USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(SC_FUNC(sc))),
10189                   U64_LO(fp->rcq_dma.paddr));
10190            REG_WR(sc,
10191                   (BAR_USTRORM_INTMEM +
10192                    USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(SC_FUNC(sc)) + 4),
10193                   U64_HI(fp->rcq_dma.paddr));
10194        }
10195    }
10196}
10197
10198static void
10199bxe_init_tx_ring_one(struct bxe_fastpath *fp)
10200{
10201    SET_FLAG(fp->tx_db.data.header.header, DOORBELL_HDR_DB_TYPE, 1);
10202    fp->tx_db.data.zero_fill1 = 0;
10203    fp->tx_db.data.prod = 0;
10204
10205    fp->tx_pkt_prod = 0;
10206    fp->tx_pkt_cons = 0;
10207    fp->tx_bd_prod = 0;
10208    fp->tx_bd_cons = 0;
10209    fp->eth_q_stats.tx_pkts = 0;
10210}
10211
10212static inline void
10213bxe_init_tx_rings(struct bxe_softc *sc)
10214{
10215    int i;
10216
10217    for (i = 0; i < sc->num_queues; i++) {
10218#if 0
10219        uint8_t cos;
10220        for (cos = 0; cos < sc->max_cos; cos++) {
10221            bxe_init_tx_ring_one(&sc->fp[i].txdata[cos]);
10222        }
10223#else
10224        bxe_init_tx_ring_one(&sc->fp[i]);
10225#endif
10226    }
10227}
10228
10229static void
10230bxe_init_def_sb(struct bxe_softc *sc)
10231{
10232    struct host_sp_status_block *def_sb = sc->def_sb;
10233    bus_addr_t mapping = sc->def_sb_dma.paddr;
10234    int igu_sp_sb_index;
10235    int igu_seg_id;
10236    int port = SC_PORT(sc);
10237    int func = SC_FUNC(sc);
10238    int reg_offset, reg_offset_en5;
10239    uint64_t section;
10240    int index, sindex;
10241    struct hc_sp_status_block_data sp_sb_data;
10242
10243    memset(&sp_sb_data, 0, sizeof(struct hc_sp_status_block_data));
10244
10245    if (CHIP_INT_MODE_IS_BC(sc)) {
10246        igu_sp_sb_index = DEF_SB_IGU_ID;
10247        igu_seg_id = HC_SEG_ACCESS_DEF;
10248    } else {
10249        igu_sp_sb_index = sc->igu_dsb_id;
10250        igu_seg_id = IGU_SEG_ACCESS_DEF;
10251    }
10252
10253    /* attentions */
10254    section = ((uint64_t)mapping +
10255               offsetof(struct host_sp_status_block, atten_status_block));
10256    def_sb->atten_status_block.status_block_id = igu_sp_sb_index;
10257    sc->attn_state = 0;
10258
10259    reg_offset = (port) ?
10260                     MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
10261                     MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0;
10262    reg_offset_en5 = (port) ?
10263                         MISC_REG_AEU_ENABLE5_FUNC_1_OUT_0 :
10264                         MISC_REG_AEU_ENABLE5_FUNC_0_OUT_0;
10265
10266    for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
10267        /* take care of sig[0]..sig[4] */
10268        for (sindex = 0; sindex < 4; sindex++) {
10269            sc->attn_group[index].sig[sindex] =
10270                REG_RD(sc, (reg_offset + (sindex * 0x4) + (0x10 * index)));
10271        }
10272
10273        if (!CHIP_IS_E1x(sc)) {
10274            /*
10275             * enable5 is separate from the rest of the registers,
10276             * and the address skip is 4 and not 16 between the
10277             * different groups
10278             */
10279            sc->attn_group[index].sig[4] =
10280                REG_RD(sc, (reg_offset_en5 + (0x4 * index)));
10281        } else {
10282            sc->attn_group[index].sig[4] = 0;
10283        }
10284    }
10285
10286    if (sc->devinfo.int_block == INT_BLOCK_HC) {
10287        reg_offset = (port) ?
10288                         HC_REG_ATTN_MSG1_ADDR_L :
10289                         HC_REG_ATTN_MSG0_ADDR_L;
10290        REG_WR(sc, reg_offset, U64_LO(section));
10291        REG_WR(sc, (reg_offset + 4), U64_HI(section));
10292    } else if (!CHIP_IS_E1x(sc)) {
10293        REG_WR(sc, IGU_REG_ATTN_MSG_ADDR_L, U64_LO(section));
10294        REG_WR(sc, IGU_REG_ATTN_MSG_ADDR_H, U64_HI(section));
10295    }
10296
10297    section = ((uint64_t)mapping +
10298               offsetof(struct host_sp_status_block, sp_sb));
10299
10300    bxe_zero_sp_sb(sc);
10301
10302    /* PCI guarantees endianity of regpair */
10303    sp_sb_data.state           = SB_ENABLED;
10304    sp_sb_data.host_sb_addr.lo = U64_LO(section);
10305    sp_sb_data.host_sb_addr.hi = U64_HI(section);
10306    sp_sb_data.igu_sb_id       = igu_sp_sb_index;
10307    sp_sb_data.igu_seg_id      = igu_seg_id;
10308    sp_sb_data.p_func.pf_id    = func;
10309    sp_sb_data.p_func.vnic_id  = SC_VN(sc);
10310    sp_sb_data.p_func.vf_id    = 0xff;
10311
10312    bxe_wr_sp_sb_data(sc, &sp_sb_data);
10313
10314    bxe_ack_sb(sc, sc->igu_dsb_id, USTORM_ID, 0, IGU_INT_ENABLE, 0);
10315}
10316
10317static void
10318bxe_init_sp_ring(struct bxe_softc *sc)
10319{
10320    atomic_store_rel_long(&sc->cq_spq_left, MAX_SPQ_PENDING);
10321    sc->spq_prod_idx = 0;
10322    sc->dsb_sp_prod = &sc->def_sb->sp_sb.index_values[HC_SP_INDEX_ETH_DEF_CONS];
10323    sc->spq_prod_bd = sc->spq;
10324    sc->spq_last_bd = (sc->spq_prod_bd + MAX_SP_DESC_CNT);
10325}
10326
10327static void
10328bxe_init_eq_ring(struct bxe_softc *sc)
10329{
10330    union event_ring_elem *elem;
10331    int i;
10332
10333    for (i = 1; i <= NUM_EQ_PAGES; i++) {
10334        elem = &sc->eq[EQ_DESC_CNT_PAGE * i - 1];
10335
10336        elem->next_page.addr.hi = htole32(U64_HI(sc->eq_dma.paddr +
10337                                                 BCM_PAGE_SIZE *
10338                                                 (i % NUM_EQ_PAGES)));
10339        elem->next_page.addr.lo = htole32(U64_LO(sc->eq_dma.paddr +
10340                                                 BCM_PAGE_SIZE *
10341                                                 (i % NUM_EQ_PAGES)));
10342    }
10343
10344    sc->eq_cons    = 0;
10345    sc->eq_prod    = NUM_EQ_DESC;
10346    sc->eq_cons_sb = &sc->def_sb->sp_sb.index_values[HC_SP_INDEX_EQ_CONS];
10347
10348    atomic_store_rel_long(&sc->eq_spq_left,
10349                          (min((MAX_SP_DESC_CNT - MAX_SPQ_PENDING),
10350                               NUM_EQ_DESC) - 1));
10351}
10352
10353static void
10354bxe_init_internal_common(struct bxe_softc *sc)
10355{
10356    int i;
10357
10358    if (IS_MF_SI(sc)) {
10359        /*
10360         * In switch independent mode, the TSTORM needs to accept
10361         * packets that failed classification, since approximate match
10362         * mac addresses aren't written to NIG LLH.
10363         */
10364        REG_WR8(sc,
10365                (BAR_TSTRORM_INTMEM + TSTORM_ACCEPT_CLASSIFY_FAILED_OFFSET),
10366                2);
10367    } else if (!CHIP_IS_E1(sc)) { /* 57710 doesn't support MF */
10368        REG_WR8(sc,
10369                (BAR_TSTRORM_INTMEM + TSTORM_ACCEPT_CLASSIFY_FAILED_OFFSET),
10370                0);
10371    }
10372
10373    /*
10374     * Zero this manually as its initialization is currently missing
10375     * in the initTool.
10376     */
10377    for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++) {
10378        REG_WR(sc,
10379               (BAR_USTRORM_INTMEM + USTORM_AGG_DATA_OFFSET + (i * 4)),
10380               0);
10381    }
10382
10383    if (!CHIP_IS_E1x(sc)) {
10384        REG_WR8(sc, (BAR_CSTRORM_INTMEM + CSTORM_IGU_MODE_OFFSET),
10385                CHIP_INT_MODE_IS_BC(sc) ? HC_IGU_BC_MODE : HC_IGU_NBC_MODE);
10386    }
10387}
10388
10389static void
10390bxe_init_internal(struct bxe_softc *sc,
10391                  uint32_t         load_code)
10392{
10393    switch (load_code) {
10394    case FW_MSG_CODE_DRV_LOAD_COMMON:
10395    case FW_MSG_CODE_DRV_LOAD_COMMON_CHIP:
10396        bxe_init_internal_common(sc);
10397        /* no break */
10398
10399    case FW_MSG_CODE_DRV_LOAD_PORT:
10400        /* nothing to do */
10401        /* no break */
10402
10403    case FW_MSG_CODE_DRV_LOAD_FUNCTION:
10404        /* internal memory per function is initialized inside bxe_pf_init */
10405        break;
10406
10407    default:
10408        BLOGE(sc, "Unknown load_code (0x%x) from MCP\n", load_code);
10409        break;
10410    }
10411}
10412
10413static void
10414storm_memset_func_cfg(struct bxe_softc                         *sc,
10415                      struct tstorm_eth_function_common_config *tcfg,
10416                      uint16_t                                  abs_fid)
10417{
10418    uint32_t addr;
10419    size_t size;
10420
10421    addr = (BAR_TSTRORM_INTMEM +
10422            TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(abs_fid));
10423    size = sizeof(struct tstorm_eth_function_common_config);
10424    ecore_storm_memset_struct(sc, addr, size, (uint32_t *)tcfg);
10425}
10426
10427static void
10428bxe_func_init(struct bxe_softc            *sc,
10429              struct bxe_func_init_params *p)
10430{
10431    struct tstorm_eth_function_common_config tcfg = { 0 };
10432
10433    if (CHIP_IS_E1x(sc)) {
10434        storm_memset_func_cfg(sc, &tcfg, p->func_id);
10435    }
10436
10437    /* Enable the function in the FW */
10438    storm_memset_vf_to_pf(sc, p->func_id, p->pf_id);
10439    storm_memset_func_en(sc, p->func_id, 1);
10440
10441    /* spq */
10442    if (p->func_flgs & FUNC_FLG_SPQ) {
10443        storm_memset_spq_addr(sc, p->spq_map, p->func_id);
10444        REG_WR(sc,
10445               (XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(p->func_id)),
10446               p->spq_prod);
10447    }
10448}
10449
10450/*
10451 * Calculates the sum of vn_min_rates.
10452 * It's needed for further normalizing of the min_rates.
10453 * Returns:
10454 *   sum of vn_min_rates.
10455 *     or
10456 *   0 - if all the min_rates are 0.
10457 * In the later case fainess algorithm should be deactivated.
10458 * If all min rates are not zero then those that are zeroes will be set to 1.
10459 */
10460static void
10461bxe_calc_vn_min(struct bxe_softc       *sc,
10462                struct cmng_init_input *input)
10463{
10464    uint32_t vn_cfg;
10465    uint32_t vn_min_rate;
10466    int all_zero = 1;
10467    int vn;
10468
10469    for (vn = VN_0; vn < SC_MAX_VN_NUM(sc); vn++) {
10470        vn_cfg = sc->devinfo.mf_info.mf_config[vn];
10471        vn_min_rate = (((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
10472                        FUNC_MF_CFG_MIN_BW_SHIFT) * 100);
10473
10474        if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
10475            /* skip hidden VNs */
10476            vn_min_rate = 0;
10477        } else if (!vn_min_rate) {
10478            /* If min rate is zero - set it to 100 */
10479            vn_min_rate = DEF_MIN_RATE;
10480        } else {
10481            all_zero = 0;
10482        }
10483
10484        input->vnic_min_rate[vn] = vn_min_rate;
10485    }
10486
10487    /* if ETS or all min rates are zeros - disable fairness */
10488    if (BXE_IS_ETS_ENABLED(sc)) {
10489        input->flags.cmng_enables &= ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
10490        BLOGD(sc, DBG_LOAD, "Fairness disabled (ETS)\n");
10491    } else if (all_zero) {
10492        input->flags.cmng_enables &= ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
10493        BLOGD(sc, DBG_LOAD,
10494              "Fariness disabled (all MIN values are zeroes)\n");
10495    } else {
10496        input->flags.cmng_enables |= CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
10497    }
10498}
10499
10500static inline uint16_t
10501bxe_extract_max_cfg(struct bxe_softc *sc,
10502                    uint32_t         mf_cfg)
10503{
10504    uint16_t max_cfg = ((mf_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
10505                        FUNC_MF_CFG_MAX_BW_SHIFT);
10506
10507    if (!max_cfg) {
10508        BLOGD(sc, DBG_LOAD, "Max BW configured to 0 - using 100 instead\n");
10509        max_cfg = 100;
10510    }
10511
10512    return (max_cfg);
10513}
10514
10515static void
10516bxe_calc_vn_max(struct bxe_softc       *sc,
10517                int                    vn,
10518                struct cmng_init_input *input)
10519{
10520    uint16_t vn_max_rate;
10521    uint32_t vn_cfg = sc->devinfo.mf_info.mf_config[vn];
10522    uint32_t max_cfg;
10523
10524    if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
10525        vn_max_rate = 0;
10526    } else {
10527        max_cfg = bxe_extract_max_cfg(sc, vn_cfg);
10528
10529        if (IS_MF_SI(sc)) {
10530            /* max_cfg in percents of linkspeed */
10531            vn_max_rate = ((sc->link_vars.line_speed * max_cfg) / 100);
10532        } else { /* SD modes */
10533            /* max_cfg is absolute in 100Mb units */
10534            vn_max_rate = (max_cfg * 100);
10535        }
10536    }
10537
10538    BLOGD(sc, DBG_LOAD, "vn %d: vn_max_rate %d\n", vn, vn_max_rate);
10539
10540    input->vnic_max_rate[vn] = vn_max_rate;
10541}
10542
10543static void
10544bxe_cmng_fns_init(struct bxe_softc *sc,
10545                  uint8_t          read_cfg,
10546                  uint8_t          cmng_type)
10547{
10548    struct cmng_init_input input;
10549    int vn;
10550
10551    memset(&input, 0, sizeof(struct cmng_init_input));
10552
10553    input.port_rate = sc->link_vars.line_speed;
10554
10555    if (cmng_type == CMNG_FNS_MINMAX) {
10556        /* read mf conf from shmem */
10557        if (read_cfg) {
10558            bxe_read_mf_cfg(sc);
10559        }
10560
10561        /* get VN min rate and enable fairness if not 0 */
10562        bxe_calc_vn_min(sc, &input);
10563
10564        /* get VN max rate */
10565        if (sc->port.pmf) {
10566            for (vn = VN_0; vn < SC_MAX_VN_NUM(sc); vn++) {
10567                bxe_calc_vn_max(sc, vn, &input);
10568            }
10569        }
10570
10571        /* always enable rate shaping and fairness */
10572        input.flags.cmng_enables |= CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN;
10573
10574        ecore_init_cmng(&input, &sc->cmng);
10575        return;
10576    }
10577
10578    /* rate shaping and fairness are disabled */
10579    BLOGD(sc, DBG_LOAD, "rate shaping and fairness have been disabled\n");
10580}
10581
10582static int
10583bxe_get_cmng_fns_mode(struct bxe_softc *sc)
10584{
10585    if (CHIP_REV_IS_SLOW(sc)) {
10586        return (CMNG_FNS_NONE);
10587    }
10588
10589    if (IS_MF(sc)) {
10590        return (CMNG_FNS_MINMAX);
10591    }
10592
10593    return (CMNG_FNS_NONE);
10594}
10595
10596static void
10597storm_memset_cmng(struct bxe_softc *sc,
10598                  struct cmng_init *cmng,
10599                  uint8_t          port)
10600{
10601    int vn;
10602    int func;
10603    uint32_t addr;
10604    size_t size;
10605
10606    addr = (BAR_XSTRORM_INTMEM +
10607            XSTORM_CMNG_PER_PORT_VARS_OFFSET(port));
10608    size = sizeof(struct cmng_struct_per_port);
10609    ecore_storm_memset_struct(sc, addr, size, (uint32_t *)&cmng->port);
10610
10611    for (vn = VN_0; vn < SC_MAX_VN_NUM(sc); vn++) {
10612        func = func_by_vn(sc, vn);
10613
10614        addr = (BAR_XSTRORM_INTMEM +
10615                XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func));
10616        size = sizeof(struct rate_shaping_vars_per_vn);
10617        ecore_storm_memset_struct(sc, addr, size,
10618                                  (uint32_t *)&cmng->vnic.vnic_max_rate[vn]);
10619
10620        addr = (BAR_XSTRORM_INTMEM +
10621                XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func));
10622        size = sizeof(struct fairness_vars_per_vn);
10623        ecore_storm_memset_struct(sc, addr, size,
10624                                  (uint32_t *)&cmng->vnic.vnic_min_rate[vn]);
10625    }
10626}
10627
10628static void
10629bxe_pf_init(struct bxe_softc *sc)
10630{
10631    struct bxe_func_init_params func_init = { 0 };
10632    struct event_ring_data eq_data = { { 0 } };
10633    uint16_t flags;
10634
10635    if (!CHIP_IS_E1x(sc)) {
10636        /* reset IGU PF statistics: MSIX + ATTN */
10637        /* PF */
10638        REG_WR(sc,
10639               (IGU_REG_STATISTIC_NUM_MESSAGE_SENT +
10640                (BXE_IGU_STAS_MSG_VF_CNT * 4) +
10641                ((CHIP_IS_MODE_4_PORT(sc) ? SC_FUNC(sc) : SC_VN(sc)) * 4)),
10642               0);
10643        /* ATTN */
10644        REG_WR(sc,
10645               (IGU_REG_STATISTIC_NUM_MESSAGE_SENT +
10646                (BXE_IGU_STAS_MSG_VF_CNT * 4) +
10647                (BXE_IGU_STAS_MSG_PF_CNT * 4) +
10648                ((CHIP_IS_MODE_4_PORT(sc) ? SC_FUNC(sc) : SC_VN(sc)) * 4)),
10649               0);
10650    }
10651
10652    /* function setup flags */
10653    flags = (FUNC_FLG_STATS | FUNC_FLG_LEADING | FUNC_FLG_SPQ);
10654
10655    /*
10656     * This flag is relevant for E1x only.
10657     * E2 doesn't have a TPA configuration in a function level.
10658     */
10659    flags |= (if_getcapenable(sc->ifp) & IFCAP_LRO) ? FUNC_FLG_TPA : 0;
10660
10661    func_init.func_flgs = flags;
10662    func_init.pf_id     = SC_FUNC(sc);
10663    func_init.func_id   = SC_FUNC(sc);
10664    func_init.spq_map   = sc->spq_dma.paddr;
10665    func_init.spq_prod  = sc->spq_prod_idx;
10666
10667    bxe_func_init(sc, &func_init);
10668
10669    memset(&sc->cmng, 0, sizeof(struct cmng_struct_per_port));
10670
10671    /*
10672     * Congestion management values depend on the link rate.
10673     * There is no active link so initial link rate is set to 10Gbps.
10674     * When the link comes up the congestion management values are
10675     * re-calculated according to the actual link rate.
10676     */
10677    sc->link_vars.line_speed = SPEED_10000;
10678    bxe_cmng_fns_init(sc, TRUE, bxe_get_cmng_fns_mode(sc));
10679
10680    /* Only the PMF sets the HW */
10681    if (sc->port.pmf) {
10682        storm_memset_cmng(sc, &sc->cmng, SC_PORT(sc));
10683    }
10684
10685    /* init Event Queue - PCI bus guarantees correct endainity */
10686    eq_data.base_addr.hi = U64_HI(sc->eq_dma.paddr);
10687    eq_data.base_addr.lo = U64_LO(sc->eq_dma.paddr);
10688    eq_data.producer     = sc->eq_prod;
10689    eq_data.index_id     = HC_SP_INDEX_EQ_CONS;
10690    eq_data.sb_id        = DEF_SB_ID;
10691    storm_memset_eq_data(sc, &eq_data, SC_FUNC(sc));
10692}
10693
10694static void
10695bxe_hc_int_enable(struct bxe_softc *sc)
10696{
10697    int port = SC_PORT(sc);
10698    uint32_t addr = (port) ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
10699    uint32_t val = REG_RD(sc, addr);
10700    uint8_t msix = (sc->interrupt_mode == INTR_MODE_MSIX) ? TRUE : FALSE;
10701    uint8_t single_msix = ((sc->interrupt_mode == INTR_MODE_MSIX) &&
10702                           (sc->intr_count == 1)) ? TRUE : FALSE;
10703    uint8_t msi = (sc->interrupt_mode == INTR_MODE_MSI) ? TRUE : FALSE;
10704
10705    if (msix) {
10706        val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
10707                 HC_CONFIG_0_REG_INT_LINE_EN_0);
10708        val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
10709                HC_CONFIG_0_REG_ATTN_BIT_EN_0);
10710        if (single_msix) {
10711            val |= HC_CONFIG_0_REG_SINGLE_ISR_EN_0;
10712        }
10713    } else if (msi) {
10714        val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0;
10715        val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
10716                HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
10717                HC_CONFIG_0_REG_ATTN_BIT_EN_0);
10718    } else {
10719        val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
10720                HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
10721                HC_CONFIG_0_REG_INT_LINE_EN_0 |
10722                HC_CONFIG_0_REG_ATTN_BIT_EN_0);
10723
10724        if (!CHIP_IS_E1(sc)) {
10725            BLOGD(sc, DBG_INTR, "write %x to HC %d (addr 0x%x)\n",
10726                  val, port, addr);
10727
10728            REG_WR(sc, addr, val);
10729
10730            val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
10731        }
10732    }
10733
10734    if (CHIP_IS_E1(sc)) {
10735        REG_WR(sc, (HC_REG_INT_MASK + port*4), 0x1FFFF);
10736    }
10737
10738    BLOGD(sc, DBG_INTR, "write %x to HC %d (addr 0x%x) mode %s\n",
10739          val, port, addr, ((msix) ? "MSI-X" : ((msi) ? "MSI" : "INTx")));
10740
10741    REG_WR(sc, addr, val);
10742
10743    /* ensure that HC_CONFIG is written before leading/trailing edge config */
10744    mb();
10745
10746    if (!CHIP_IS_E1(sc)) {
10747        /* init leading/trailing edge */
10748        if (IS_MF(sc)) {
10749            val = (0xee0f | (1 << (SC_VN(sc) + 4)));
10750            if (sc->port.pmf) {
10751                /* enable nig and gpio3 attention */
10752                val |= 0x1100;
10753            }
10754        } else {
10755            val = 0xffff;
10756        }
10757
10758        REG_WR(sc, (HC_REG_TRAILING_EDGE_0 + port*8), val);
10759        REG_WR(sc, (HC_REG_LEADING_EDGE_0 + port*8), val);
10760    }
10761
10762    /* make sure that interrupts are indeed enabled from here on */
10763    mb();
10764}
10765
10766static void
10767bxe_igu_int_enable(struct bxe_softc *sc)
10768{
10769    uint32_t val;
10770    uint8_t msix = (sc->interrupt_mode == INTR_MODE_MSIX) ? TRUE : FALSE;
10771    uint8_t single_msix = ((sc->interrupt_mode == INTR_MODE_MSIX) &&
10772                           (sc->intr_count == 1)) ? TRUE : FALSE;
10773    uint8_t msi = (sc->interrupt_mode == INTR_MODE_MSI) ? TRUE : FALSE;
10774
10775    val = REG_RD(sc, IGU_REG_PF_CONFIGURATION);
10776
10777    if (msix) {
10778        val &= ~(IGU_PF_CONF_INT_LINE_EN |
10779                 IGU_PF_CONF_SINGLE_ISR_EN);
10780        val |= (IGU_PF_CONF_MSI_MSIX_EN |
10781                IGU_PF_CONF_ATTN_BIT_EN);
10782        if (single_msix) {
10783            val |= IGU_PF_CONF_SINGLE_ISR_EN;
10784        }
10785    } else if (msi) {
10786        val &= ~IGU_PF_CONF_INT_LINE_EN;
10787        val |= (IGU_PF_CONF_MSI_MSIX_EN |
10788                IGU_PF_CONF_ATTN_BIT_EN |
10789                IGU_PF_CONF_SINGLE_ISR_EN);
10790    } else {
10791        val &= ~IGU_PF_CONF_MSI_MSIX_EN;
10792        val |= (IGU_PF_CONF_INT_LINE_EN |
10793                IGU_PF_CONF_ATTN_BIT_EN |
10794                IGU_PF_CONF_SINGLE_ISR_EN);
10795    }
10796
10797    /* clean previous status - need to configure igu prior to ack*/
10798    if ((!msix) || single_msix) {
10799        REG_WR(sc, IGU_REG_PF_CONFIGURATION, val);
10800        bxe_ack_int(sc);
10801    }
10802
10803    val |= IGU_PF_CONF_FUNC_EN;
10804
10805    BLOGD(sc, DBG_INTR, "write 0x%x to IGU mode %s\n",
10806          val, ((msix) ? "MSI-X" : ((msi) ? "MSI" : "INTx")));
10807
10808    REG_WR(sc, IGU_REG_PF_CONFIGURATION, val);
10809
10810    mb();
10811
10812    /* init leading/trailing edge */
10813    if (IS_MF(sc)) {
10814        val = (0xee0f | (1 << (SC_VN(sc) + 4)));
10815        if (sc->port.pmf) {
10816            /* enable nig and gpio3 attention */
10817            val |= 0x1100;
10818        }
10819    } else {
10820        val = 0xffff;
10821    }
10822
10823    REG_WR(sc, IGU_REG_TRAILING_EDGE_LATCH, val);
10824    REG_WR(sc, IGU_REG_LEADING_EDGE_LATCH, val);
10825
10826    /* make sure that interrupts are indeed enabled from here on */
10827    mb();
10828}
10829
10830static void
10831bxe_int_enable(struct bxe_softc *sc)
10832{
10833    if (sc->devinfo.int_block == INT_BLOCK_HC) {
10834        bxe_hc_int_enable(sc);
10835    } else {
10836        bxe_igu_int_enable(sc);
10837    }
10838}
10839
10840static void
10841bxe_hc_int_disable(struct bxe_softc *sc)
10842{
10843    int port = SC_PORT(sc);
10844    uint32_t addr = (port) ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
10845    uint32_t val = REG_RD(sc, addr);
10846
10847    /*
10848     * In E1 we must use only PCI configuration space to disable MSI/MSIX
10849     * capablility. It's forbidden to disable IGU_PF_CONF_MSI_MSIX_EN in HC
10850     * block
10851     */
10852    if (CHIP_IS_E1(sc)) {
10853        /*
10854         * Since IGU_PF_CONF_MSI_MSIX_EN still always on use mask register
10855         * to prevent from HC sending interrupts after we exit the function
10856         */
10857        REG_WR(sc, (HC_REG_INT_MASK + port*4), 0);
10858
10859        val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
10860                 HC_CONFIG_0_REG_INT_LINE_EN_0 |
10861                 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
10862    } else {
10863        val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
10864                 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
10865                 HC_CONFIG_0_REG_INT_LINE_EN_0 |
10866                 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
10867    }
10868
10869    BLOGD(sc, DBG_INTR, "write %x to HC %d (addr 0x%x)\n", val, port, addr);
10870
10871    /* flush all outstanding writes */
10872    mb();
10873
10874    REG_WR(sc, addr, val);
10875    if (REG_RD(sc, addr) != val) {
10876        BLOGE(sc, "proper val not read from HC IGU!\n");
10877    }
10878}
10879
10880static void
10881bxe_igu_int_disable(struct bxe_softc *sc)
10882{
10883    uint32_t val = REG_RD(sc, IGU_REG_PF_CONFIGURATION);
10884
10885    val &= ~(IGU_PF_CONF_MSI_MSIX_EN |
10886             IGU_PF_CONF_INT_LINE_EN |
10887             IGU_PF_CONF_ATTN_BIT_EN);
10888
10889    BLOGD(sc, DBG_INTR, "write %x to IGU\n", val);
10890
10891    /* flush all outstanding writes */
10892    mb();
10893
10894    REG_WR(sc, IGU_REG_PF_CONFIGURATION, val);
10895    if (REG_RD(sc, IGU_REG_PF_CONFIGURATION) != val) {
10896        BLOGE(sc, "proper val not read from IGU!\n");
10897    }
10898}
10899
10900static void
10901bxe_int_disable(struct bxe_softc *sc)
10902{
10903    if (sc->devinfo.int_block == INT_BLOCK_HC) {
10904        bxe_hc_int_disable(sc);
10905    } else {
10906        bxe_igu_int_disable(sc);
10907    }
10908}
10909
10910static void
10911bxe_nic_init(struct bxe_softc *sc,
10912             int              load_code)
10913{
10914    int i;
10915
10916    for (i = 0; i < sc->num_queues; i++) {
10917        bxe_init_eth_fp(sc, i);
10918    }
10919
10920    rmb(); /* ensure status block indices were read */
10921
10922    bxe_init_rx_rings(sc);
10923    bxe_init_tx_rings(sc);
10924
10925    if (IS_VF(sc)) {
10926        return;
10927    }
10928
10929    /* initialize MOD_ABS interrupts */
10930    elink_init_mod_abs_int(sc, &sc->link_vars,
10931                           sc->devinfo.chip_id,
10932                           sc->devinfo.shmem_base,
10933                           sc->devinfo.shmem2_base,
10934                           SC_PORT(sc));
10935
10936    bxe_init_def_sb(sc);
10937    bxe_update_dsb_idx(sc);
10938    bxe_init_sp_ring(sc);
10939    bxe_init_eq_ring(sc);
10940    bxe_init_internal(sc, load_code);
10941    bxe_pf_init(sc);
10942    bxe_stats_init(sc);
10943
10944    /* flush all before enabling interrupts */
10945    mb();
10946
10947    bxe_int_enable(sc);
10948
10949    /* check for SPIO5 */
10950    bxe_attn_int_deasserted0(sc,
10951                             REG_RD(sc,
10952                                    (MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 +
10953                                     SC_PORT(sc)*4)) &
10954                             AEU_INPUTS_ATTN_BITS_SPIO5);
10955}
10956
10957static inline void
10958bxe_init_objs(struct bxe_softc *sc)
10959{
10960    /* mcast rules must be added to tx if tx switching is enabled */
10961    ecore_obj_type o_type =
10962        (sc->flags & BXE_TX_SWITCHING) ? ECORE_OBJ_TYPE_RX_TX :
10963                                         ECORE_OBJ_TYPE_RX;
10964
10965    /* RX_MODE controlling object */
10966    ecore_init_rx_mode_obj(sc, &sc->rx_mode_obj);
10967
10968    /* multicast configuration controlling object */
10969    ecore_init_mcast_obj(sc,
10970                         &sc->mcast_obj,
10971                         sc->fp[0].cl_id,
10972                         sc->fp[0].index,
10973                         SC_FUNC(sc),
10974                         SC_FUNC(sc),
10975                         BXE_SP(sc, mcast_rdata),
10976                         BXE_SP_MAPPING(sc, mcast_rdata),
10977                         ECORE_FILTER_MCAST_PENDING,
10978                         &sc->sp_state,
10979                         o_type);
10980
10981    /* Setup CAM credit pools */
10982    ecore_init_mac_credit_pool(sc,
10983                               &sc->macs_pool,
10984                               SC_FUNC(sc),
10985                               CHIP_IS_E1x(sc) ? VNICS_PER_PORT(sc) :
10986                                                 VNICS_PER_PATH(sc));
10987
10988    ecore_init_vlan_credit_pool(sc,
10989                                &sc->vlans_pool,
10990                                SC_ABS_FUNC(sc) >> 1,
10991                                CHIP_IS_E1x(sc) ? VNICS_PER_PORT(sc) :
10992                                                  VNICS_PER_PATH(sc));
10993
10994    /* RSS configuration object */
10995    ecore_init_rss_config_obj(sc,
10996                              &sc->rss_conf_obj,
10997                              sc->fp[0].cl_id,
10998                              sc->fp[0].index,
10999                              SC_FUNC(sc),
11000                              SC_FUNC(sc),
11001                              BXE_SP(sc, rss_rdata),
11002                              BXE_SP_MAPPING(sc, rss_rdata),
11003                              ECORE_FILTER_RSS_CONF_PENDING,
11004                              &sc->sp_state, ECORE_OBJ_TYPE_RX);
11005}
11006
11007/*
11008 * Initialize the function. This must be called before sending CLIENT_SETUP
11009 * for the first client.
11010 */
11011static inline int
11012bxe_func_start(struct bxe_softc *sc)
11013{
11014    struct ecore_func_state_params func_params = { NULL };
11015    struct ecore_func_start_params *start_params = &func_params.params.start;
11016
11017    /* Prepare parameters for function state transitions */
11018    bit_set(&func_params.ramrod_flags, RAMROD_COMP_WAIT);
11019
11020    func_params.f_obj = &sc->func_obj;
11021    func_params.cmd = ECORE_F_CMD_START;
11022
11023    /* Function parameters */
11024    start_params->mf_mode     = sc->devinfo.mf_info.mf_mode;
11025    start_params->sd_vlan_tag = OVLAN(sc);
11026
11027    if (CHIP_IS_E2(sc) || CHIP_IS_E3(sc)) {
11028        start_params->network_cos_mode = STATIC_COS;
11029    } else { /* CHIP_IS_E1X */
11030        start_params->network_cos_mode = FW_WRR;
11031    }
11032
11033    start_params->gre_tunnel_mode = 0;
11034    start_params->gre_tunnel_rss  = 0;
11035
11036    return (ecore_func_state_change(sc, &func_params));
11037}
11038
11039static int
11040bxe_set_power_state(struct bxe_softc *sc,
11041                    uint8_t          state)
11042{
11043    uint16_t pmcsr;
11044
11045    /* If there is no power capability, silently succeed */
11046    if (!(sc->devinfo.pcie_cap_flags & BXE_PM_CAPABLE_FLAG)) {
11047        BLOGW(sc, "No power capability\n");
11048        return (0);
11049    }
11050
11051    pmcsr = pci_read_config(sc->dev,
11052                            (sc->devinfo.pcie_pm_cap_reg + PCIR_POWER_STATUS),
11053                            2);
11054
11055    switch (state) {
11056    case PCI_PM_D0:
11057        pci_write_config(sc->dev,
11058                         (sc->devinfo.pcie_pm_cap_reg + PCIR_POWER_STATUS),
11059                         ((pmcsr & ~PCIM_PSTAT_DMASK) | PCIM_PSTAT_PME), 2);
11060
11061        if (pmcsr & PCIM_PSTAT_DMASK) {
11062            /* delay required during transition out of D3hot */
11063            DELAY(20000);
11064        }
11065
11066        break;
11067
11068    case PCI_PM_D3hot:
11069        /* XXX if there are other clients above don't shut down the power */
11070
11071        /* don't shut down the power for emulation and FPGA */
11072        if (CHIP_REV_IS_SLOW(sc)) {
11073            return (0);
11074        }
11075
11076        pmcsr &= ~PCIM_PSTAT_DMASK;
11077        pmcsr |= PCIM_PSTAT_D3;
11078
11079        if (sc->wol) {
11080            pmcsr |= PCIM_PSTAT_PMEENABLE;
11081        }
11082
11083        pci_write_config(sc->dev,
11084                         (sc->devinfo.pcie_pm_cap_reg + PCIR_POWER_STATUS),
11085                         pmcsr, 4);
11086
11087        /*
11088         * No more memory access after this point until device is brought back
11089         * to D0 state.
11090         */
11091        break;
11092
11093    default:
11094        BLOGE(sc, "Can't support PCI power state = %d\n", state);
11095        return (-1);
11096    }
11097
11098    return (0);
11099}
11100
11101
11102/* return true if succeeded to acquire the lock */
11103static uint8_t
11104bxe_trylock_hw_lock(struct bxe_softc *sc,
11105                    uint32_t         resource)
11106{
11107    uint32_t lock_status;
11108    uint32_t resource_bit = (1 << resource);
11109    int func = SC_FUNC(sc);
11110    uint32_t hw_lock_control_reg;
11111
11112    BLOGD(sc, DBG_LOAD, "Trying to take a resource lock 0x%x\n", resource);
11113
11114    /* Validating that the resource is within range */
11115    if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
11116        BLOGD(sc, DBG_LOAD,
11117              "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
11118              resource, HW_LOCK_MAX_RESOURCE_VALUE);
11119        return (FALSE);
11120    }
11121
11122    if (func <= 5) {
11123        hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
11124    } else {
11125        hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
11126    }
11127
11128    /* try to acquire the lock */
11129    REG_WR(sc, hw_lock_control_reg + 4, resource_bit);
11130    lock_status = REG_RD(sc, hw_lock_control_reg);
11131    if (lock_status & resource_bit) {
11132        return (TRUE);
11133    }
11134
11135    BLOGE(sc, "Failed to get a resource lock 0x%x\n", resource);
11136
11137    return (FALSE);
11138}
11139
11140/*
11141 * Get the recovery leader resource id according to the engine this function
11142 * belongs to. Currently only only 2 engines is supported.
11143 */
11144static int
11145bxe_get_leader_lock_resource(struct bxe_softc *sc)
11146{
11147    if (SC_PATH(sc)) {
11148        return (HW_LOCK_RESOURCE_RECOVERY_LEADER_1);
11149    } else {
11150        return (HW_LOCK_RESOURCE_RECOVERY_LEADER_0);
11151    }
11152}
11153
11154/* try to acquire a leader lock for current engine */
11155static uint8_t
11156bxe_trylock_leader_lock(struct bxe_softc *sc)
11157{
11158    return (bxe_trylock_hw_lock(sc, bxe_get_leader_lock_resource(sc)));
11159}
11160
11161static int
11162bxe_release_leader_lock(struct bxe_softc *sc)
11163{
11164    return (bxe_release_hw_lock(sc, bxe_get_leader_lock_resource(sc)));
11165}
11166
11167/* close gates #2, #3 and #4 */
11168static void
11169bxe_set_234_gates(struct bxe_softc *sc,
11170                  uint8_t          close)
11171{
11172    uint32_t val;
11173
11174    /* gates #2 and #4a are closed/opened for "not E1" only */
11175    if (!CHIP_IS_E1(sc)) {
11176        /* #4 */
11177        REG_WR(sc, PXP_REG_HST_DISCARD_DOORBELLS, !!close);
11178        /* #2 */
11179        REG_WR(sc, PXP_REG_HST_DISCARD_INTERNAL_WRITES, !!close);
11180    }
11181
11182    /* #3 */
11183    if (CHIP_IS_E1x(sc)) {
11184        /* prevent interrupts from HC on both ports */
11185        val = REG_RD(sc, HC_REG_CONFIG_1);
11186        REG_WR(sc, HC_REG_CONFIG_1,
11187               (!close) ? (val | HC_CONFIG_1_REG_BLOCK_DISABLE_1) :
11188               (val & ~(uint32_t)HC_CONFIG_1_REG_BLOCK_DISABLE_1));
11189
11190        val = REG_RD(sc, HC_REG_CONFIG_0);
11191        REG_WR(sc, HC_REG_CONFIG_0,
11192               (!close) ? (val | HC_CONFIG_0_REG_BLOCK_DISABLE_0) :
11193               (val & ~(uint32_t)HC_CONFIG_0_REG_BLOCK_DISABLE_0));
11194    } else {
11195        /* Prevent incomming interrupts in IGU */
11196        val = REG_RD(sc, IGU_REG_BLOCK_CONFIGURATION);
11197
11198        REG_WR(sc, IGU_REG_BLOCK_CONFIGURATION,
11199               (!close) ?
11200               (val | IGU_BLOCK_CONFIGURATION_REG_BLOCK_ENABLE) :
11201               (val & ~(uint32_t)IGU_BLOCK_CONFIGURATION_REG_BLOCK_ENABLE));
11202    }
11203
11204    BLOGD(sc, DBG_LOAD, "%s gates #2, #3 and #4\n",
11205          close ? "closing" : "opening");
11206
11207    wmb();
11208}
11209
11210/* poll for pending writes bit, it should get cleared in no more than 1s */
11211static int
11212bxe_er_poll_igu_vq(struct bxe_softc *sc)
11213{
11214    uint32_t cnt = 1000;
11215    uint32_t pend_bits = 0;
11216
11217    do {
11218        pend_bits = REG_RD(sc, IGU_REG_PENDING_BITS_STATUS);
11219
11220        if (pend_bits == 0) {
11221            break;
11222        }
11223
11224        DELAY(1000);
11225    } while (--cnt > 0);
11226
11227    if (cnt == 0) {
11228        BLOGE(sc, "Still pending IGU requests bits=0x%08x!\n", pend_bits);
11229        return (-1);
11230    }
11231
11232    return (0);
11233}
11234
11235#define SHARED_MF_CLP_MAGIC  0x80000000 /* 'magic' bit */
11236
11237static void
11238bxe_clp_reset_prep(struct bxe_softc *sc,
11239                   uint32_t         *magic_val)
11240{
11241    /* Do some magic... */
11242    uint32_t val = MFCFG_RD(sc, shared_mf_config.clp_mb);
11243    *magic_val = val & SHARED_MF_CLP_MAGIC;
11244    MFCFG_WR(sc, shared_mf_config.clp_mb, val | SHARED_MF_CLP_MAGIC);
11245}
11246
11247/* restore the value of the 'magic' bit */
11248static void
11249bxe_clp_reset_done(struct bxe_softc *sc,
11250                   uint32_t         magic_val)
11251{
11252    /* Restore the 'magic' bit value... */
11253    uint32_t val = MFCFG_RD(sc, shared_mf_config.clp_mb);
11254    MFCFG_WR(sc, shared_mf_config.clp_mb,
11255              (val & (~SHARED_MF_CLP_MAGIC)) | magic_val);
11256}
11257
11258/* prepare for MCP reset, takes care of CLP configurations */
11259static void
11260bxe_reset_mcp_prep(struct bxe_softc *sc,
11261                   uint32_t         *magic_val)
11262{
11263    uint32_t shmem;
11264    uint32_t validity_offset;
11265
11266    /* set `magic' bit in order to save MF config */
11267    if (!CHIP_IS_E1(sc)) {
11268        bxe_clp_reset_prep(sc, magic_val);
11269    }
11270
11271    /* get shmem offset */
11272    shmem = REG_RD(sc, MISC_REG_SHARED_MEM_ADDR);
11273    validity_offset =
11274        offsetof(struct shmem_region, validity_map[SC_PORT(sc)]);
11275
11276    /* Clear validity map flags */
11277    if (shmem > 0) {
11278        REG_WR(sc, shmem + validity_offset, 0);
11279    }
11280}
11281
11282#define MCP_TIMEOUT      5000   /* 5 seconds (in ms) */
11283#define MCP_ONE_TIMEOUT  100    /* 100 ms */
11284
11285static void
11286bxe_mcp_wait_one(struct bxe_softc *sc)
11287{
11288    /* special handling for emulation and FPGA (10 times longer) */
11289    if (CHIP_REV_IS_SLOW(sc)) {
11290        DELAY((MCP_ONE_TIMEOUT*10) * 1000);
11291    } else {
11292        DELAY((MCP_ONE_TIMEOUT) * 1000);
11293    }
11294}
11295
11296/* initialize shmem_base and waits for validity signature to appear */
11297static int
11298bxe_init_shmem(struct bxe_softc *sc)
11299{
11300    int cnt = 0;
11301    uint32_t val = 0;
11302
11303    do {
11304        sc->devinfo.shmem_base     =
11305        sc->link_params.shmem_base =
11306            REG_RD(sc, MISC_REG_SHARED_MEM_ADDR);
11307
11308        if (sc->devinfo.shmem_base) {
11309            val = SHMEM_RD(sc, validity_map[SC_PORT(sc)]);
11310            if (val & SHR_MEM_VALIDITY_MB)
11311                return (0);
11312        }
11313
11314        bxe_mcp_wait_one(sc);
11315
11316    } while (cnt++ < (MCP_TIMEOUT / MCP_ONE_TIMEOUT));
11317
11318    BLOGE(sc, "BAD MCP validity signature\n");
11319
11320    return (-1);
11321}
11322
11323static int
11324bxe_reset_mcp_comp(struct bxe_softc *sc,
11325                   uint32_t         magic_val)
11326{
11327    int rc = bxe_init_shmem(sc);
11328
11329    /* Restore the `magic' bit value */
11330    if (!CHIP_IS_E1(sc)) {
11331        bxe_clp_reset_done(sc, magic_val);
11332    }
11333
11334    return (rc);
11335}
11336
11337static void
11338bxe_pxp_prep(struct bxe_softc *sc)
11339{
11340    if (!CHIP_IS_E1(sc)) {
11341        REG_WR(sc, PXP2_REG_RD_START_INIT, 0);
11342        REG_WR(sc, PXP2_REG_RQ_RBC_DONE, 0);
11343        wmb();
11344    }
11345}
11346
11347/*
11348 * Reset the whole chip except for:
11349 *      - PCIE core
11350 *      - PCI Glue, PSWHST, PXP/PXP2 RF (all controlled by one reset bit)
11351 *      - IGU
11352 *      - MISC (including AEU)
11353 *      - GRC
11354 *      - RBCN, RBCP
11355 */
11356static void
11357bxe_process_kill_chip_reset(struct bxe_softc *sc,
11358                            uint8_t          global)
11359{
11360    uint32_t not_reset_mask1, reset_mask1, not_reset_mask2, reset_mask2;
11361    uint32_t global_bits2, stay_reset2;
11362
11363    /*
11364     * Bits that have to be set in reset_mask2 if we want to reset 'global'
11365     * (per chip) blocks.
11366     */
11367    global_bits2 =
11368        MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_CMN_CPU |
11369        MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_CMN_CORE;
11370
11371    /*
11372     * Don't reset the following blocks.
11373     * Important: per port blocks (such as EMAC, BMAC, UMAC) can't be
11374     *            reset, as in 4 port device they might still be owned
11375     *            by the MCP (there is only one leader per path).
11376     */
11377    not_reset_mask1 =
11378        MISC_REGISTERS_RESET_REG_1_RST_HC |
11379        MISC_REGISTERS_RESET_REG_1_RST_PXPV |
11380        MISC_REGISTERS_RESET_REG_1_RST_PXP;
11381
11382    not_reset_mask2 =
11383        MISC_REGISTERS_RESET_REG_2_RST_PCI_MDIO |
11384        MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE |
11385        MISC_REGISTERS_RESET_REG_2_RST_EMAC1_HARD_CORE |
11386        MISC_REGISTERS_RESET_REG_2_RST_MISC_CORE |
11387        MISC_REGISTERS_RESET_REG_2_RST_RBCN |
11388        MISC_REGISTERS_RESET_REG_2_RST_GRC  |
11389        MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_REG_HARD_CORE |
11390        MISC_REGISTERS_RESET_REG_2_RST_MCP_N_HARD_CORE_RST_B |
11391        MISC_REGISTERS_RESET_REG_2_RST_ATC |
11392        MISC_REGISTERS_RESET_REG_2_PGLC |
11393        MISC_REGISTERS_RESET_REG_2_RST_BMAC0 |
11394        MISC_REGISTERS_RESET_REG_2_RST_BMAC1 |
11395        MISC_REGISTERS_RESET_REG_2_RST_EMAC0 |
11396        MISC_REGISTERS_RESET_REG_2_RST_EMAC1 |
11397        MISC_REGISTERS_RESET_REG_2_UMAC0 |
11398        MISC_REGISTERS_RESET_REG_2_UMAC1;
11399
11400    /*
11401     * Keep the following blocks in reset:
11402     *  - all xxMACs are handled by the elink code.
11403     */
11404    stay_reset2 =
11405        MISC_REGISTERS_RESET_REG_2_XMAC |
11406        MISC_REGISTERS_RESET_REG_2_XMAC_SOFT;
11407
11408    /* Full reset masks according to the chip */
11409    reset_mask1 = 0xffffffff;
11410
11411    if (CHIP_IS_E1(sc))
11412        reset_mask2 = 0xffff;
11413    else if (CHIP_IS_E1H(sc))
11414        reset_mask2 = 0x1ffff;
11415    else if (CHIP_IS_E2(sc))
11416        reset_mask2 = 0xfffff;
11417    else /* CHIP_IS_E3 */
11418        reset_mask2 = 0x3ffffff;
11419
11420    /* Don't reset global blocks unless we need to */
11421    if (!global)
11422        reset_mask2 &= ~global_bits2;
11423
11424    /*
11425     * In case of attention in the QM, we need to reset PXP
11426     * (MISC_REGISTERS_RESET_REG_2_RST_PXP_RQ_RD_WR) before QM
11427     * because otherwise QM reset would release 'close the gates' shortly
11428     * before resetting the PXP, then the PSWRQ would send a write
11429     * request to PGLUE. Then when PXP is reset, PGLUE would try to
11430     * read the payload data from PSWWR, but PSWWR would not
11431     * respond. The write queue in PGLUE would stuck, dmae commands
11432     * would not return. Therefore it's important to reset the second
11433     * reset register (containing the
11434     * MISC_REGISTERS_RESET_REG_2_RST_PXP_RQ_RD_WR bit) before the
11435     * first one (containing the MISC_REGISTERS_RESET_REG_1_RST_QM
11436     * bit).
11437     */
11438    REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
11439           reset_mask2 & (~not_reset_mask2));
11440
11441    REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
11442           reset_mask1 & (~not_reset_mask1));
11443
11444    mb();
11445    wmb();
11446
11447    REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET,
11448           reset_mask2 & (~stay_reset2));
11449
11450    mb();
11451    wmb();
11452
11453    REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, reset_mask1);
11454    wmb();
11455}
11456
11457static int
11458bxe_process_kill(struct bxe_softc *sc,
11459                 uint8_t          global)
11460{
11461    int cnt = 1000;
11462    uint32_t val = 0;
11463    uint32_t sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1, pgl_exp_rom2;
11464    uint32_t tags_63_32 = 0;
11465
11466    /* Empty the Tetris buffer, wait for 1s */
11467    do {
11468        sr_cnt  = REG_RD(sc, PXP2_REG_RD_SR_CNT);
11469        blk_cnt = REG_RD(sc, PXP2_REG_RD_BLK_CNT);
11470        port_is_idle_0 = REG_RD(sc, PXP2_REG_RD_PORT_IS_IDLE_0);
11471        port_is_idle_1 = REG_RD(sc, PXP2_REG_RD_PORT_IS_IDLE_1);
11472        pgl_exp_rom2 = REG_RD(sc, PXP2_REG_PGL_EXP_ROM2);
11473        if (CHIP_IS_E3(sc)) {
11474            tags_63_32 = REG_RD(sc, PGLUE_B_REG_TAGS_63_32);
11475        }
11476
11477        if ((sr_cnt == 0x7e) && (blk_cnt == 0xa0) &&
11478            ((port_is_idle_0 & 0x1) == 0x1) &&
11479            ((port_is_idle_1 & 0x1) == 0x1) &&
11480            (pgl_exp_rom2 == 0xffffffff) &&
11481            (!CHIP_IS_E3(sc) || (tags_63_32 == 0xffffffff)))
11482            break;
11483        DELAY(1000);
11484    } while (cnt-- > 0);
11485
11486    if (cnt <= 0) {
11487        BLOGE(sc, "ERROR: Tetris buffer didn't get empty or there "
11488                  "are still outstanding read requests after 1s! "
11489                  "sr_cnt=0x%08x, blk_cnt=0x%08x, port_is_idle_0=0x%08x, "
11490                  "port_is_idle_1=0x%08x, pgl_exp_rom2=0x%08x\n",
11491              sr_cnt, blk_cnt, port_is_idle_0,
11492              port_is_idle_1, pgl_exp_rom2);
11493        return (-1);
11494    }
11495
11496    mb();
11497
11498    /* Close gates #2, #3 and #4 */
11499    bxe_set_234_gates(sc, TRUE);
11500
11501    /* Poll for IGU VQs for 57712 and newer chips */
11502    if (!CHIP_IS_E1x(sc) && bxe_er_poll_igu_vq(sc)) {
11503        return (-1);
11504    }
11505
11506    /* XXX indicate that "process kill" is in progress to MCP */
11507
11508    /* clear "unprepared" bit */
11509    REG_WR(sc, MISC_REG_UNPREPARED, 0);
11510    mb();
11511
11512    /* Make sure all is written to the chip before the reset */
11513    wmb();
11514
11515    /*
11516     * Wait for 1ms to empty GLUE and PCI-E core queues,
11517     * PSWHST, GRC and PSWRD Tetris buffer.
11518     */
11519    DELAY(1000);
11520
11521    /* Prepare to chip reset: */
11522    /* MCP */
11523    if (global) {
11524        bxe_reset_mcp_prep(sc, &val);
11525    }
11526
11527    /* PXP */
11528    bxe_pxp_prep(sc);
11529    mb();
11530
11531    /* reset the chip */
11532    bxe_process_kill_chip_reset(sc, global);
11533    mb();
11534
11535    /* clear errors in PGB */
11536    if (!CHIP_IS_E1(sc))
11537        REG_WR(sc, PGLUE_B_REG_LATCHED_ERRORS_CLR, 0x7f);
11538
11539    /* Recover after reset: */
11540    /* MCP */
11541    if (global && bxe_reset_mcp_comp(sc, val)) {
11542        return (-1);
11543    }
11544
11545    /* XXX add resetting the NO_MCP mode DB here */
11546
11547    /* Open the gates #2, #3 and #4 */
11548    bxe_set_234_gates(sc, FALSE);
11549
11550    /* XXX
11551     * IGU/AEU preparation bring back the AEU/IGU to a reset state
11552     * re-enable attentions
11553     */
11554
11555    return (0);
11556}
11557
11558static int
11559bxe_leader_reset(struct bxe_softc *sc)
11560{
11561    int rc = 0;
11562    uint8_t global = bxe_reset_is_global(sc);
11563    uint32_t load_code;
11564
11565    /*
11566     * If not going to reset MCP, load "fake" driver to reset HW while
11567     * driver is owner of the HW.
11568     */
11569    if (!global && !BXE_NOMCP(sc)) {
11570        load_code = bxe_fw_command(sc, DRV_MSG_CODE_LOAD_REQ,
11571                                   DRV_MSG_CODE_LOAD_REQ_WITH_LFA);
11572        if (!load_code) {
11573            BLOGE(sc, "MCP response failure, aborting\n");
11574            rc = -1;
11575            goto exit_leader_reset;
11576        }
11577
11578        if ((load_code != FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) &&
11579            (load_code != FW_MSG_CODE_DRV_LOAD_COMMON)) {
11580            BLOGE(sc, "MCP unexpected response, aborting\n");
11581            rc = -1;
11582            goto exit_leader_reset2;
11583        }
11584
11585        load_code = bxe_fw_command(sc, DRV_MSG_CODE_LOAD_DONE, 0);
11586        if (!load_code) {
11587            BLOGE(sc, "MCP response failure, aborting\n");
11588            rc = -1;
11589            goto exit_leader_reset2;
11590        }
11591    }
11592
11593    /* try to recover after the failure */
11594    if (bxe_process_kill(sc, global)) {
11595        BLOGE(sc, "Something bad occurred on engine %d!\n", SC_PATH(sc));
11596        rc = -1;
11597        goto exit_leader_reset2;
11598    }
11599
11600    /*
11601     * Clear the RESET_IN_PROGRESS and RESET_GLOBAL bits and update the driver
11602     * state.
11603     */
11604    bxe_set_reset_done(sc);
11605    if (global) {
11606        bxe_clear_reset_global(sc);
11607    }
11608
11609exit_leader_reset2:
11610
11611    /* unload "fake driver" if it was loaded */
11612    if (!global && !BXE_NOMCP(sc)) {
11613        bxe_fw_command(sc, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0);
11614        bxe_fw_command(sc, DRV_MSG_CODE_UNLOAD_DONE, 0);
11615    }
11616
11617exit_leader_reset:
11618
11619    sc->is_leader = 0;
11620    bxe_release_leader_lock(sc);
11621
11622    mb();
11623    return (rc);
11624}
11625
11626/*
11627 * prepare INIT transition, parameters configured:
11628 *   - HC configuration
11629 *   - Queue's CDU context
11630 */
11631static void
11632bxe_pf_q_prep_init(struct bxe_softc               *sc,
11633                   struct bxe_fastpath            *fp,
11634                   struct ecore_queue_init_params *init_params)
11635{
11636    uint8_t cos;
11637    int cxt_index, cxt_offset;
11638
11639    bxe_set_bit(ECORE_Q_FLG_HC, &init_params->rx.flags);
11640    bxe_set_bit(ECORE_Q_FLG_HC, &init_params->tx.flags);
11641
11642    bxe_set_bit(ECORE_Q_FLG_HC_EN, &init_params->rx.flags);
11643    bxe_set_bit(ECORE_Q_FLG_HC_EN, &init_params->tx.flags);
11644
11645    /* HC rate */
11646    init_params->rx.hc_rate =
11647        sc->hc_rx_ticks ? (1000000 / sc->hc_rx_ticks) : 0;
11648    init_params->tx.hc_rate =
11649        sc->hc_tx_ticks ? (1000000 / sc->hc_tx_ticks) : 0;
11650
11651    /* FW SB ID */
11652    init_params->rx.fw_sb_id = init_params->tx.fw_sb_id = fp->fw_sb_id;
11653
11654    /* CQ index among the SB indices */
11655    init_params->rx.sb_cq_index = HC_INDEX_ETH_RX_CQ_CONS;
11656    init_params->tx.sb_cq_index = HC_INDEX_ETH_FIRST_TX_CQ_CONS;
11657
11658    /* set maximum number of COSs supported by this queue */
11659    init_params->max_cos = sc->max_cos;
11660
11661    BLOGD(sc, DBG_LOAD, "fp %d setting queue params max cos to %d\n",
11662          fp->index, init_params->max_cos);
11663
11664    /* set the context pointers queue object */
11665    for (cos = FIRST_TX_COS_INDEX; cos < init_params->max_cos; cos++) {
11666        /* XXX change index/cid here if ever support multiple tx CoS */
11667        /* fp->txdata[cos]->cid */
11668        cxt_index = fp->index / ILT_PAGE_CIDS;
11669        cxt_offset = fp->index - (cxt_index * ILT_PAGE_CIDS);
11670        init_params->cxts[cos] = &sc->context[cxt_index].vcxt[cxt_offset].eth;
11671    }
11672}
11673
11674/* set flags that are common for the Tx-only and not normal connections */
11675static unsigned long
11676bxe_get_common_flags(struct bxe_softc    *sc,
11677                     struct bxe_fastpath *fp,
11678                     uint8_t             zero_stats)
11679{
11680    unsigned long flags = 0;
11681
11682    /* PF driver will always initialize the Queue to an ACTIVE state */
11683    bxe_set_bit(ECORE_Q_FLG_ACTIVE, &flags);
11684
11685    /*
11686     * tx only connections collect statistics (on the same index as the
11687     * parent connection). The statistics are zeroed when the parent
11688     * connection is initialized.
11689     */
11690
11691    bxe_set_bit(ECORE_Q_FLG_STATS, &flags);
11692    if (zero_stats) {
11693        bxe_set_bit(ECORE_Q_FLG_ZERO_STATS, &flags);
11694    }
11695
11696    /*
11697     * tx only connections can support tx-switching, though their
11698     * CoS-ness doesn't survive the loopback
11699     */
11700    if (sc->flags & BXE_TX_SWITCHING) {
11701        bxe_set_bit(ECORE_Q_FLG_TX_SWITCH, &flags);
11702    }
11703
11704    bxe_set_bit(ECORE_Q_FLG_PCSUM_ON_PKT, &flags);
11705
11706    return (flags);
11707}
11708
11709static unsigned long
11710bxe_get_q_flags(struct bxe_softc    *sc,
11711                struct bxe_fastpath *fp,
11712                uint8_t             leading)
11713{
11714    unsigned long flags = 0;
11715
11716    if (IS_MF_SD(sc)) {
11717        bxe_set_bit(ECORE_Q_FLG_OV, &flags);
11718    }
11719
11720    if (if_getcapenable(sc->ifp) & IFCAP_LRO) {
11721        bxe_set_bit(ECORE_Q_FLG_TPA, &flags);
11722        bxe_set_bit(ECORE_Q_FLG_TPA_IPV6, &flags);
11723#if 0
11724        if (fp->mode == TPA_MODE_GRO)
11725            __set_bit(ECORE_Q_FLG_TPA_GRO, &flags);
11726#endif
11727    }
11728
11729    if (leading) {
11730        bxe_set_bit(ECORE_Q_FLG_LEADING_RSS, &flags);
11731        bxe_set_bit(ECORE_Q_FLG_MCAST, &flags);
11732    }
11733
11734    bxe_set_bit(ECORE_Q_FLG_VLAN, &flags);
11735
11736#if 0
11737    /* configure silent vlan removal */
11738    if (IS_MF_AFEX(sc)) {
11739        bxe_set_bit(ECORE_Q_FLG_SILENT_VLAN_REM, &flags);
11740    }
11741#endif
11742
11743    /* merge with common flags */
11744    return (flags | bxe_get_common_flags(sc, fp, TRUE));
11745}
11746
11747static void
11748bxe_pf_q_prep_general(struct bxe_softc                  *sc,
11749                      struct bxe_fastpath               *fp,
11750                      struct ecore_general_setup_params *gen_init,
11751                      uint8_t                           cos)
11752{
11753    gen_init->stat_id = bxe_stats_id(fp);
11754    gen_init->spcl_id = fp->cl_id;
11755    gen_init->mtu = sc->mtu;
11756    gen_init->cos = cos;
11757}
11758
11759static void
11760bxe_pf_rx_q_prep(struct bxe_softc              *sc,
11761                 struct bxe_fastpath           *fp,
11762                 struct rxq_pause_params       *pause,
11763                 struct ecore_rxq_setup_params *rxq_init)
11764{
11765    uint8_t max_sge = 0;
11766    uint16_t sge_sz = 0;
11767    uint16_t tpa_agg_size = 0;
11768
11769    pause->sge_th_lo = SGE_TH_LO(sc);
11770    pause->sge_th_hi = SGE_TH_HI(sc);
11771
11772    /* validate SGE ring has enough to cross high threshold */
11773    if (sc->dropless_fc &&
11774            (pause->sge_th_hi + FW_PREFETCH_CNT) >
11775            (RX_SGE_USABLE_PER_PAGE * RX_SGE_NUM_PAGES)) {
11776        BLOGW(sc, "sge ring threshold limit\n");
11777    }
11778
11779    /* minimum max_aggregation_size is 2*MTU (two full buffers) */
11780    tpa_agg_size = (2 * sc->mtu);
11781    if (tpa_agg_size < sc->max_aggregation_size) {
11782        tpa_agg_size = sc->max_aggregation_size;
11783    }
11784
11785    max_sge = SGE_PAGE_ALIGN(sc->mtu) >> SGE_PAGE_SHIFT;
11786    max_sge = ((max_sge + PAGES_PER_SGE - 1) &
11787                   (~(PAGES_PER_SGE - 1))) >> PAGES_PER_SGE_SHIFT;
11788    sge_sz = (uint16_t)min(SGE_PAGES, 0xffff);
11789
11790    /* pause - not for e1 */
11791    if (!CHIP_IS_E1(sc)) {
11792        pause->bd_th_lo = BD_TH_LO(sc);
11793        pause->bd_th_hi = BD_TH_HI(sc);
11794
11795        pause->rcq_th_lo = RCQ_TH_LO(sc);
11796        pause->rcq_th_hi = RCQ_TH_HI(sc);
11797
11798        /* validate rings have enough entries to cross high thresholds */
11799        if (sc->dropless_fc &&
11800            pause->bd_th_hi + FW_PREFETCH_CNT >
11801            sc->rx_ring_size) {
11802            BLOGW(sc, "rx bd ring threshold limit\n");
11803        }
11804
11805        if (sc->dropless_fc &&
11806            pause->rcq_th_hi + FW_PREFETCH_CNT >
11807            RCQ_NUM_PAGES * RCQ_USABLE_PER_PAGE) {
11808            BLOGW(sc, "rcq ring threshold limit\n");
11809        }
11810
11811        pause->pri_map = 1;
11812    }
11813
11814    /* rxq setup */
11815    rxq_init->dscr_map   = fp->rx_dma.paddr;
11816    rxq_init->sge_map    = fp->rx_sge_dma.paddr;
11817    rxq_init->rcq_map    = fp->rcq_dma.paddr;
11818    rxq_init->rcq_np_map = (fp->rcq_dma.paddr + BCM_PAGE_SIZE);
11819
11820    /*
11821     * This should be a maximum number of data bytes that may be
11822     * placed on the BD (not including paddings).
11823     */
11824    rxq_init->buf_sz = (fp->rx_buf_size -
11825                        IP_HEADER_ALIGNMENT_PADDING);
11826
11827    rxq_init->cl_qzone_id     = fp->cl_qzone_id;
11828    rxq_init->tpa_agg_sz      = tpa_agg_size;
11829    rxq_init->sge_buf_sz      = sge_sz;
11830    rxq_init->max_sges_pkt    = max_sge;
11831    rxq_init->rss_engine_id   = SC_FUNC(sc);
11832    rxq_init->mcast_engine_id = SC_FUNC(sc);
11833
11834    /*
11835     * Maximum number or simultaneous TPA aggregation for this Queue.
11836     * For PF Clients it should be the maximum available number.
11837     * VF driver(s) may want to define it to a smaller value.
11838     */
11839    rxq_init->max_tpa_queues = MAX_AGG_QS(sc);
11840
11841    rxq_init->cache_line_log = BXE_RX_ALIGN_SHIFT;
11842    rxq_init->fw_sb_id = fp->fw_sb_id;
11843
11844    rxq_init->sb_cq_index = HC_INDEX_ETH_RX_CQ_CONS;
11845
11846    /*
11847     * configure silent vlan removal
11848     * if multi function mode is afex, then mask default vlan
11849     */
11850    if (IS_MF_AFEX(sc)) {
11851        rxq_init->silent_removal_value =
11852            sc->devinfo.mf_info.afex_def_vlan_tag;
11853        rxq_init->silent_removal_mask = EVL_VLID_MASK;
11854    }
11855}
11856
11857static void
11858bxe_pf_tx_q_prep(struct bxe_softc              *sc,
11859                 struct bxe_fastpath           *fp,
11860                 struct ecore_txq_setup_params *txq_init,
11861                 uint8_t                       cos)
11862{
11863    /*
11864     * XXX If multiple CoS is ever supported then each fastpath structure
11865     * will need to maintain tx producer/consumer/dma/etc values *per* CoS.
11866     * fp->txdata[cos]->tx_dma.paddr;
11867     */
11868    txq_init->dscr_map     = fp->tx_dma.paddr;
11869    txq_init->sb_cq_index  = HC_INDEX_ETH_FIRST_TX_CQ_CONS + cos;
11870    txq_init->traffic_type = LLFC_TRAFFIC_TYPE_NW;
11871    txq_init->fw_sb_id     = fp->fw_sb_id;
11872
11873    /*
11874     * set the TSS leading client id for TX classfication to the
11875     * leading RSS client id
11876     */
11877    txq_init->tss_leading_cl_id = BXE_FP(sc, 0, cl_id);
11878}
11879
11880/*
11881 * This function performs 2 steps in a queue state machine:
11882 *   1) RESET->INIT
11883 *   2) INIT->SETUP
11884 */
11885static int
11886bxe_setup_queue(struct bxe_softc    *sc,
11887                struct bxe_fastpath *fp,
11888                uint8_t             leading)
11889{
11890    struct ecore_queue_state_params q_params = { NULL };
11891    struct ecore_queue_setup_params *setup_params =
11892                        &q_params.params.setup;
11893#if 0
11894    struct ecore_queue_setup_tx_only_params *tx_only_params =
11895                        &q_params.params.tx_only;
11896    uint8_t tx_index;
11897#endif
11898    int rc;
11899
11900    BLOGD(sc, DBG_LOAD, "setting up queue %d\n", fp->index);
11901
11902    bxe_ack_sb(sc, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_ENABLE, 0);
11903
11904    q_params.q_obj = &BXE_SP_OBJ(sc, fp).q_obj;
11905
11906    /* we want to wait for completion in this context */
11907    bxe_set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags);
11908
11909    /* prepare the INIT parameters */
11910    bxe_pf_q_prep_init(sc, fp, &q_params.params.init);
11911
11912    /* Set the command */
11913    q_params.cmd = ECORE_Q_CMD_INIT;
11914
11915    /* Change the state to INIT */
11916    rc = ecore_queue_state_change(sc, &q_params);
11917    if (rc) {
11918        BLOGE(sc, "Queue(%d) INIT failed\n", fp->index);
11919        return (rc);
11920    }
11921
11922    BLOGD(sc, DBG_LOAD, "init complete\n");
11923
11924    /* now move the Queue to the SETUP state */
11925    memset(setup_params, 0, sizeof(*setup_params));
11926
11927    /* set Queue flags */
11928    setup_params->flags = bxe_get_q_flags(sc, fp, leading);
11929
11930    /* set general SETUP parameters */
11931    bxe_pf_q_prep_general(sc, fp, &setup_params->gen_params,
11932                          FIRST_TX_COS_INDEX);
11933
11934    bxe_pf_rx_q_prep(sc, fp,
11935                     &setup_params->pause_params,
11936                     &setup_params->rxq_params);
11937
11938    bxe_pf_tx_q_prep(sc, fp,
11939                     &setup_params->txq_params,
11940                     FIRST_TX_COS_INDEX);
11941
11942    /* Set the command */
11943    q_params.cmd = ECORE_Q_CMD_SETUP;
11944
11945    /* change the state to SETUP */
11946    rc = ecore_queue_state_change(sc, &q_params);
11947    if (rc) {
11948        BLOGE(sc, "Queue(%d) SETUP failed\n", fp->index);
11949        return (rc);
11950    }
11951
11952#if 0
11953    /* loop through the relevant tx-only indices */
11954    for (tx_index = FIRST_TX_ONLY_COS_INDEX;
11955         tx_index < sc->max_cos;
11956         tx_index++) {
11957        /* prepare and send tx-only ramrod*/
11958        rc = bxe_setup_tx_only(sc, fp, &q_params,
11959                               tx_only_params, tx_index, leading);
11960        if (rc) {
11961            BLOGE(sc, "Queue(%d.%d) TX_ONLY_SETUP failed\n",
11962                  fp->index, tx_index);
11963            return (rc);
11964        }
11965    }
11966#endif
11967
11968    return (rc);
11969}
11970
11971static int
11972bxe_setup_leading(struct bxe_softc *sc)
11973{
11974    return (bxe_setup_queue(sc, &sc->fp[0], TRUE));
11975}
11976
11977static int
11978bxe_config_rss_pf(struct bxe_softc            *sc,
11979                  struct ecore_rss_config_obj *rss_obj,
11980                  uint8_t                     config_hash)
11981{
11982    struct ecore_config_rss_params params = { NULL };
11983    int i;
11984
11985    /*
11986     * Although RSS is meaningless when there is a single HW queue we
11987     * still need it enabled in order to have HW Rx hash generated.
11988     */
11989
11990    params.rss_obj = rss_obj;
11991
11992    bxe_set_bit(RAMROD_COMP_WAIT, &params.ramrod_flags);
11993
11994    bxe_set_bit(ECORE_RSS_MODE_REGULAR, &params.rss_flags);
11995
11996    /* RSS configuration */
11997    bxe_set_bit(ECORE_RSS_IPV4, &params.rss_flags);
11998    bxe_set_bit(ECORE_RSS_IPV4_TCP, &params.rss_flags);
11999    bxe_set_bit(ECORE_RSS_IPV6, &params.rss_flags);
12000    bxe_set_bit(ECORE_RSS_IPV6_TCP, &params.rss_flags);
12001    if (rss_obj->udp_rss_v4) {
12002        bxe_set_bit(ECORE_RSS_IPV4_UDP, &params.rss_flags);
12003    }
12004    if (rss_obj->udp_rss_v6) {
12005        bxe_set_bit(ECORE_RSS_IPV6_UDP, &params.rss_flags);
12006    }
12007
12008    /* Hash bits */
12009    params.rss_result_mask = MULTI_MASK;
12010
12011    memcpy(params.ind_table, rss_obj->ind_table, sizeof(params.ind_table));
12012
12013    if (config_hash) {
12014        /* RSS keys */
12015        for (i = 0; i < sizeof(params.rss_key) / 4; i++) {
12016            params.rss_key[i] = arc4random();
12017        }
12018
12019        bxe_set_bit(ECORE_RSS_SET_SRCH, &params.rss_flags);
12020    }
12021
12022    return (ecore_config_rss(sc, &params));
12023}
12024
12025static int
12026bxe_config_rss_eth(struct bxe_softc *sc,
12027                   uint8_t          config_hash)
12028{
12029    return (bxe_config_rss_pf(sc, &sc->rss_conf_obj, config_hash));
12030}
12031
12032static int
12033bxe_init_rss_pf(struct bxe_softc *sc)
12034{
12035    uint8_t num_eth_queues = BXE_NUM_ETH_QUEUES(sc);
12036    int i;
12037
12038    /*
12039     * Prepare the initial contents of the indirection table if
12040     * RSS is enabled
12041     */
12042    for (i = 0; i < sizeof(sc->rss_conf_obj.ind_table); i++) {
12043        sc->rss_conf_obj.ind_table[i] =
12044            (sc->fp->cl_id + (i % num_eth_queues));
12045    }
12046
12047    if (sc->udp_rss) {
12048        sc->rss_conf_obj.udp_rss_v4 = sc->rss_conf_obj.udp_rss_v6 = 1;
12049    }
12050
12051    /*
12052     * For 57710 and 57711 SEARCHER configuration (rss_keys) is
12053     * per-port, so if explicit configuration is needed, do it only
12054     * for a PMF.
12055     *
12056     * For 57712 and newer it's a per-function configuration.
12057     */
12058    return (bxe_config_rss_eth(sc, sc->port.pmf || !CHIP_IS_E1x(sc)));
12059}
12060
12061static int
12062bxe_set_mac_one(struct bxe_softc          *sc,
12063                uint8_t                   *mac,
12064                struct ecore_vlan_mac_obj *obj,
12065                uint8_t                   set,
12066                int                       mac_type,
12067                unsigned long             *ramrod_flags)
12068{
12069    struct ecore_vlan_mac_ramrod_params ramrod_param;
12070    int rc;
12071
12072    memset(&ramrod_param, 0, sizeof(ramrod_param));
12073
12074    /* fill in general parameters */
12075    ramrod_param.vlan_mac_obj = obj;
12076    ramrod_param.ramrod_flags = *ramrod_flags;
12077
12078    /* fill a user request section if needed */
12079    if (!bxe_test_bit(RAMROD_CONT, ramrod_flags)) {
12080        memcpy(ramrod_param.user_req.u.mac.mac, mac, ETH_ALEN);
12081
12082        bxe_set_bit(mac_type, &ramrod_param.user_req.vlan_mac_flags);
12083
12084        /* Set the command: ADD or DEL */
12085        ramrod_param.user_req.cmd = (set) ? ECORE_VLAN_MAC_ADD :
12086                                            ECORE_VLAN_MAC_DEL;
12087    }
12088
12089    rc = ecore_config_vlan_mac(sc, &ramrod_param);
12090
12091    if (rc == ECORE_EXISTS) {
12092        BLOGD(sc, DBG_SP, "Failed to schedule ADD operations (EEXIST)\n");
12093        /* do not treat adding same MAC as error */
12094        rc = 0;
12095    } else if (rc < 0) {
12096        BLOGE(sc, "%s MAC failed (%d)\n", (set ? "Set" : "Delete"), rc);
12097    }
12098
12099    return (rc);
12100}
12101
12102static int
12103bxe_set_eth_mac(struct bxe_softc *sc,
12104                uint8_t          set)
12105{
12106    unsigned long ramrod_flags = 0;
12107
12108    BLOGD(sc, DBG_LOAD, "Adding Ethernet MAC\n");
12109
12110    bxe_set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
12111
12112    /* Eth MAC is set on RSS leading client (fp[0]) */
12113    return (bxe_set_mac_one(sc, sc->link_params.mac_addr,
12114                            &sc->sp_objs->mac_obj,
12115                            set, ECORE_ETH_MAC, &ramrod_flags));
12116}
12117
12118#if 0
12119static void
12120bxe_update_max_mf_config(struct bxe_softc *sc,
12121                         uint32_t         value)
12122{
12123    /* load old values */
12124    uint32_t mf_cfg = sc->devinfo.mf_info.mf_config[SC_VN(sc)];
12125
12126    if (value != bxe_extract_max_cfg(sc, mf_cfg)) {
12127        /* leave all but MAX value */
12128        mf_cfg &= ~FUNC_MF_CFG_MAX_BW_MASK;
12129
12130        /* set new MAX value */
12131        mf_cfg |= ((value << FUNC_MF_CFG_MAX_BW_SHIFT) &
12132                   FUNC_MF_CFG_MAX_BW_MASK);
12133
12134        bxe_fw_command(sc, DRV_MSG_CODE_SET_MF_BW, mf_cfg);
12135    }
12136}
12137#endif
12138
12139static int
12140bxe_get_cur_phy_idx(struct bxe_softc *sc)
12141{
12142    uint32_t sel_phy_idx = 0;
12143
12144    if (sc->link_params.num_phys <= 1) {
12145        return (ELINK_INT_PHY);
12146    }
12147
12148    if (sc->link_vars.link_up) {
12149        sel_phy_idx = ELINK_EXT_PHY1;
12150        /* In case link is SERDES, check if the ELINK_EXT_PHY2 is the one */
12151        if ((sc->link_vars.link_status & LINK_STATUS_SERDES_LINK) &&
12152            (sc->link_params.phy[ELINK_EXT_PHY2].supported &
12153             ELINK_SUPPORTED_FIBRE))
12154            sel_phy_idx = ELINK_EXT_PHY2;
12155    } else {
12156        switch (elink_phy_selection(&sc->link_params)) {
12157        case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT:
12158        case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY:
12159        case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY:
12160               sel_phy_idx = ELINK_EXT_PHY1;
12161               break;
12162        case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY:
12163        case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY:
12164               sel_phy_idx = ELINK_EXT_PHY2;
12165               break;
12166        }
12167    }
12168
12169    return (sel_phy_idx);
12170}
12171
12172static int
12173bxe_get_link_cfg_idx(struct bxe_softc *sc)
12174{
12175    uint32_t sel_phy_idx = bxe_get_cur_phy_idx(sc);
12176
12177    /*
12178     * The selected activated PHY is always after swapping (in case PHY
12179     * swapping is enabled). So when swapping is enabled, we need to reverse
12180     * the configuration
12181     */
12182
12183    if (sc->link_params.multi_phy_config & PORT_HW_CFG_PHY_SWAPPED_ENABLED) {
12184        if (sel_phy_idx == ELINK_EXT_PHY1)
12185            sel_phy_idx = ELINK_EXT_PHY2;
12186        else if (sel_phy_idx == ELINK_EXT_PHY2)
12187            sel_phy_idx = ELINK_EXT_PHY1;
12188    }
12189
12190    return (ELINK_LINK_CONFIG_IDX(sel_phy_idx));
12191}
12192
12193static void
12194bxe_set_requested_fc(struct bxe_softc *sc)
12195{
12196    /*
12197     * Initialize link parameters structure variables
12198     * It is recommended to turn off RX FC for jumbo frames
12199     * for better performance
12200     */
12201    if (CHIP_IS_E1x(sc) && (sc->mtu > 5000)) {
12202        sc->link_params.req_fc_auto_adv = ELINK_FLOW_CTRL_TX;
12203    } else {
12204        sc->link_params.req_fc_auto_adv = ELINK_FLOW_CTRL_BOTH;
12205    }
12206}
12207
12208static void
12209bxe_calc_fc_adv(struct bxe_softc *sc)
12210{
12211    uint8_t cfg_idx = bxe_get_link_cfg_idx(sc);
12212    switch (sc->link_vars.ieee_fc &
12213            MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
12214    case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
12215    default:
12216        sc->port.advertising[cfg_idx] &= ~(ADVERTISED_Asym_Pause |
12217                                           ADVERTISED_Pause);
12218        break;
12219
12220    case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
12221        sc->port.advertising[cfg_idx] |= (ADVERTISED_Asym_Pause |
12222                                          ADVERTISED_Pause);
12223        break;
12224
12225    case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
12226        sc->port.advertising[cfg_idx] |= ADVERTISED_Asym_Pause;
12227        break;
12228    }
12229}
12230
12231static uint16_t
12232bxe_get_mf_speed(struct bxe_softc *sc)
12233{
12234    uint16_t line_speed = sc->link_vars.line_speed;
12235    if (IS_MF(sc)) {
12236        uint16_t maxCfg =
12237            bxe_extract_max_cfg(sc, sc->devinfo.mf_info.mf_config[SC_VN(sc)]);
12238
12239        /* calculate the current MAX line speed limit for the MF devices */
12240        if (IS_MF_SI(sc)) {
12241            line_speed = (line_speed * maxCfg) / 100;
12242        } else { /* SD mode */
12243            uint16_t vn_max_rate = maxCfg * 100;
12244
12245            if (vn_max_rate < line_speed) {
12246                line_speed = vn_max_rate;
12247            }
12248        }
12249    }
12250
12251    return (line_speed);
12252}
12253
12254static void
12255bxe_fill_report_data(struct bxe_softc            *sc,
12256                     struct bxe_link_report_data *data)
12257{
12258    uint16_t line_speed = bxe_get_mf_speed(sc);
12259
12260    memset(data, 0, sizeof(*data));
12261
12262    /* fill the report data with the effective line speed */
12263    data->line_speed = line_speed;
12264
12265    /* Link is down */
12266    if (!sc->link_vars.link_up || (sc->flags & BXE_MF_FUNC_DIS)) {
12267        bxe_set_bit(BXE_LINK_REPORT_LINK_DOWN, &data->link_report_flags);
12268    }
12269
12270    /* Full DUPLEX */
12271    if (sc->link_vars.duplex == DUPLEX_FULL) {
12272        bxe_set_bit(BXE_LINK_REPORT_FULL_DUPLEX, &data->link_report_flags);
12273    }
12274
12275    /* Rx Flow Control is ON */
12276    if (sc->link_vars.flow_ctrl & ELINK_FLOW_CTRL_RX) {
12277        bxe_set_bit(BXE_LINK_REPORT_RX_FC_ON, &data->link_report_flags);
12278    }
12279
12280    /* Tx Flow Control is ON */
12281    if (sc->link_vars.flow_ctrl & ELINK_FLOW_CTRL_TX) {
12282        bxe_set_bit(BXE_LINK_REPORT_TX_FC_ON, &data->link_report_flags);
12283    }
12284}
12285
12286/* report link status to OS, should be called under phy_lock */
12287static void
12288bxe_link_report_locked(struct bxe_softc *sc)
12289{
12290    struct bxe_link_report_data cur_data;
12291
12292    /* reread mf_cfg */
12293    if (IS_PF(sc) && !CHIP_IS_E1(sc)) {
12294        bxe_read_mf_cfg(sc);
12295    }
12296
12297    /* Read the current link report info */
12298    bxe_fill_report_data(sc, &cur_data);
12299
12300    /* Don't report link down or exactly the same link status twice */
12301    if (!memcmp(&cur_data, &sc->last_reported_link, sizeof(cur_data)) ||
12302        (bxe_test_bit(BXE_LINK_REPORT_LINK_DOWN,
12303                      &sc->last_reported_link.link_report_flags) &&
12304         bxe_test_bit(BXE_LINK_REPORT_LINK_DOWN,
12305                      &cur_data.link_report_flags))) {
12306        return;
12307    }
12308
12309    sc->link_cnt++;
12310
12311    /* report new link params and remember the state for the next time */
12312    memcpy(&sc->last_reported_link, &cur_data, sizeof(cur_data));
12313
12314    if (bxe_test_bit(BXE_LINK_REPORT_LINK_DOWN,
12315                     &cur_data.link_report_flags)) {
12316        if_link_state_change(sc->ifp, LINK_STATE_DOWN);
12317        BLOGI(sc, "NIC Link is Down\n");
12318    } else {
12319        const char *duplex;
12320        const char *flow;
12321
12322        if (bxe_test_and_clear_bit(BXE_LINK_REPORT_FULL_DUPLEX,
12323                                   &cur_data.link_report_flags)) {
12324            duplex = "full";
12325        } else {
12326            duplex = "half";
12327        }
12328
12329        /*
12330         * Handle the FC at the end so that only these flags would be
12331         * possibly set. This way we may easily check if there is no FC
12332         * enabled.
12333         */
12334        if (cur_data.link_report_flags) {
12335            if (bxe_test_bit(BXE_LINK_REPORT_RX_FC_ON,
12336                             &cur_data.link_report_flags) &&
12337                bxe_test_bit(BXE_LINK_REPORT_TX_FC_ON,
12338                             &cur_data.link_report_flags)) {
12339                flow = "ON - receive & transmit";
12340            } else if (bxe_test_bit(BXE_LINK_REPORT_RX_FC_ON,
12341                                    &cur_data.link_report_flags) &&
12342                       !bxe_test_bit(BXE_LINK_REPORT_TX_FC_ON,
12343                                     &cur_data.link_report_flags)) {
12344                flow = "ON - receive";
12345            } else if (!bxe_test_bit(BXE_LINK_REPORT_RX_FC_ON,
12346                                     &cur_data.link_report_flags) &&
12347                       bxe_test_bit(BXE_LINK_REPORT_TX_FC_ON,
12348                                    &cur_data.link_report_flags)) {
12349                flow = "ON - transmit";
12350            } else {
12351                flow = "none"; /* possible? */
12352            }
12353        } else {
12354            flow = "none";
12355        }
12356
12357        if_link_state_change(sc->ifp, LINK_STATE_UP);
12358        BLOGI(sc, "NIC Link is Up, %d Mbps %s duplex, Flow control: %s\n",
12359              cur_data.line_speed, duplex, flow);
12360    }
12361}
12362
12363static void
12364bxe_link_report(struct bxe_softc *sc)
12365{
12366    BXE_PHY_LOCK(sc);
12367    bxe_link_report_locked(sc);
12368    BXE_PHY_UNLOCK(sc);
12369}
12370
12371static void
12372bxe_link_status_update(struct bxe_softc *sc)
12373{
12374    if (sc->state != BXE_STATE_OPEN) {
12375        return;
12376    }
12377
12378#if 0
12379    /* read updated dcb configuration */
12380    if (IS_PF(sc))
12381        bxe_dcbx_pmf_update(sc);
12382#endif
12383
12384    if (IS_PF(sc) && !CHIP_REV_IS_SLOW(sc)) {
12385        elink_link_status_update(&sc->link_params, &sc->link_vars);
12386    } else {
12387        sc->port.supported[0] |= (ELINK_SUPPORTED_10baseT_Half |
12388                                  ELINK_SUPPORTED_10baseT_Full |
12389                                  ELINK_SUPPORTED_100baseT_Half |
12390                                  ELINK_SUPPORTED_100baseT_Full |
12391                                  ELINK_SUPPORTED_1000baseT_Full |
12392                                  ELINK_SUPPORTED_2500baseX_Full |
12393                                  ELINK_SUPPORTED_10000baseT_Full |
12394                                  ELINK_SUPPORTED_TP |
12395                                  ELINK_SUPPORTED_FIBRE |
12396                                  ELINK_SUPPORTED_Autoneg |
12397                                  ELINK_SUPPORTED_Pause |
12398                                  ELINK_SUPPORTED_Asym_Pause);
12399        sc->port.advertising[0] = sc->port.supported[0];
12400
12401        sc->link_params.sc                = sc;
12402        sc->link_params.port              = SC_PORT(sc);
12403        sc->link_params.req_duplex[0]     = DUPLEX_FULL;
12404        sc->link_params.req_flow_ctrl[0]  = ELINK_FLOW_CTRL_NONE;
12405        sc->link_params.req_line_speed[0] = SPEED_10000;
12406        sc->link_params.speed_cap_mask[0] = 0x7f0000;
12407        sc->link_params.switch_cfg        = ELINK_SWITCH_CFG_10G;
12408
12409        if (CHIP_REV_IS_FPGA(sc)) {
12410            sc->link_vars.mac_type    = ELINK_MAC_TYPE_EMAC;
12411            sc->link_vars.line_speed  = ELINK_SPEED_1000;
12412            sc->link_vars.link_status = (LINK_STATUS_LINK_UP |
12413                                         LINK_STATUS_SPEED_AND_DUPLEX_1000TFD);
12414        } else {
12415            sc->link_vars.mac_type    = ELINK_MAC_TYPE_BMAC;
12416            sc->link_vars.line_speed  = ELINK_SPEED_10000;
12417            sc->link_vars.link_status = (LINK_STATUS_LINK_UP |
12418                                         LINK_STATUS_SPEED_AND_DUPLEX_10GTFD);
12419        }
12420
12421        sc->link_vars.link_up = 1;
12422
12423        sc->link_vars.duplex    = DUPLEX_FULL;
12424        sc->link_vars.flow_ctrl = ELINK_FLOW_CTRL_NONE;
12425
12426        if (IS_PF(sc)) {
12427            REG_WR(sc, NIG_REG_EGRESS_DRAIN0_MODE + sc->link_params.port*4, 0);
12428            bxe_stats_handle(sc, STATS_EVENT_LINK_UP);
12429            bxe_link_report(sc);
12430        }
12431    }
12432
12433    if (IS_PF(sc)) {
12434        if (sc->link_vars.link_up) {
12435            bxe_stats_handle(sc, STATS_EVENT_LINK_UP);
12436        } else {
12437            bxe_stats_handle(sc, STATS_EVENT_STOP);
12438        }
12439        bxe_link_report(sc);
12440    } else {
12441        bxe_link_report(sc);
12442        bxe_stats_handle(sc, STATS_EVENT_LINK_UP);
12443    }
12444}
12445
12446static int
12447bxe_initial_phy_init(struct bxe_softc *sc,
12448                     int              load_mode)
12449{
12450    int rc, cfg_idx = bxe_get_link_cfg_idx(sc);
12451    uint16_t req_line_speed = sc->link_params.req_line_speed[cfg_idx];
12452    struct elink_params *lp = &sc->link_params;
12453
12454    bxe_set_requested_fc(sc);
12455
12456    if (CHIP_REV_IS_SLOW(sc)) {
12457        uint32_t bond = CHIP_BOND_ID(sc);
12458        uint32_t feat = 0;
12459
12460        if (CHIP_IS_E2(sc) && CHIP_IS_MODE_4_PORT(sc)) {
12461            feat |= ELINK_FEATURE_CONFIG_EMUL_DISABLE_BMAC;
12462        } else if (bond & 0x4) {
12463            if (CHIP_IS_E3(sc)) {
12464                feat |= ELINK_FEATURE_CONFIG_EMUL_DISABLE_XMAC;
12465            } else {
12466                feat |= ELINK_FEATURE_CONFIG_EMUL_DISABLE_BMAC;
12467            }
12468        } else if (bond & 0x8) {
12469            if (CHIP_IS_E3(sc)) {
12470                feat |= ELINK_FEATURE_CONFIG_EMUL_DISABLE_UMAC;
12471            } else {
12472                feat |= ELINK_FEATURE_CONFIG_EMUL_DISABLE_EMAC;
12473            }
12474        }
12475
12476        /* disable EMAC for E3 and above */
12477        if (bond & 0x2) {
12478            feat |= ELINK_FEATURE_CONFIG_EMUL_DISABLE_EMAC;
12479        }
12480
12481        sc->link_params.feature_config_flags |= feat;
12482    }
12483
12484    BXE_PHY_LOCK(sc);
12485
12486    if (load_mode == LOAD_DIAG) {
12487        lp->loopback_mode = ELINK_LOOPBACK_XGXS;
12488        /* Prefer doing PHY loopback at 10G speed, if possible */
12489        if (lp->req_line_speed[cfg_idx] < ELINK_SPEED_10000) {
12490            if (lp->speed_cap_mask[cfg_idx] &
12491                PORT_HW_CFG_SPEED_CAPABILITY_D0_10G) {
12492                lp->req_line_speed[cfg_idx] = ELINK_SPEED_10000;
12493            } else {
12494                lp->req_line_speed[cfg_idx] = ELINK_SPEED_1000;
12495            }
12496        }
12497    }
12498
12499    if (load_mode == LOAD_LOOPBACK_EXT) {
12500        lp->loopback_mode = ELINK_LOOPBACK_EXT;
12501    }
12502
12503    rc = elink_phy_init(&sc->link_params, &sc->link_vars);
12504
12505    BXE_PHY_UNLOCK(sc);
12506
12507    bxe_calc_fc_adv(sc);
12508
12509    if (sc->link_vars.link_up) {
12510        bxe_stats_handle(sc, STATS_EVENT_LINK_UP);
12511        bxe_link_report(sc);
12512    }
12513
12514    if (!CHIP_REV_IS_SLOW(sc)) {
12515        bxe_periodic_start(sc);
12516    }
12517
12518    sc->link_params.req_line_speed[cfg_idx] = req_line_speed;
12519    return (rc);
12520}
12521
12522/* must be called under IF_ADDR_LOCK */
12523static int
12524bxe_init_mcast_macs_list(struct bxe_softc                 *sc,
12525                         struct ecore_mcast_ramrod_params *p)
12526{
12527    if_t ifp = sc->ifp;
12528    int mc_count = 0;
12529    int mcnt, i;
12530    struct ecore_mcast_list_elem *mc_mac;
12531    unsigned char *mta;
12532
12533    mc_count = if_multiaddr_count(ifp, -1);/* XXX they don't have a limit */
12534                                           /* should we enforce one? */
12535    ECORE_LIST_INIT(&p->mcast_list);
12536    p->mcast_list_len = 0;
12537
12538    if (!mc_count) {
12539        return (0);
12540    }
12541
12542    mta = malloc(sizeof(unsigned char) * ETHER_ADDR_LEN *
12543            mc_count, M_DEVBUF, M_NOWAIT);
12544
12545    if(mta == NULL) {
12546        BLOGE(sc, "Failed to allocate temp mcast list\n");
12547        return (-1);
12548    }
12549
12550    mc_mac = malloc(sizeof(*mc_mac) * mc_count, M_DEVBUF,
12551                    (M_NOWAIT | M_ZERO));
12552    if (!mc_mac) {
12553        free(mta, M_DEVBUF);
12554        BLOGE(sc, "Failed to allocate temp mcast list\n");
12555        return (-1);
12556    }
12557
12558    if_multiaddr_array(ifp, mta, &mcnt, mc_count); /* mta and mcnt not expected
12559                                                      to be  different */
12560    for(i=0; i< mcnt; i++) {
12561
12562        bcopy((mta + (i * ETHER_ADDR_LEN)), mc_mac->mac, ETHER_ADDR_LEN);
12563        ECORE_LIST_PUSH_TAIL(&mc_mac->link, &p->mcast_list);
12564
12565        BLOGD(sc, DBG_LOAD,
12566              "Setting MCAST %02X:%02X:%02X:%02X:%02X:%02X\n",
12567              mc_mac->mac[0], mc_mac->mac[1], mc_mac->mac[2],
12568              mc_mac->mac[3], mc_mac->mac[4], mc_mac->mac[5]);
12569
12570        mc_mac++;
12571    }
12572
12573    p->mcast_list_len = mc_count;
12574    free(mta, M_DEVBUF);
12575
12576    return (0);
12577}
12578
12579static void
12580bxe_free_mcast_macs_list(struct ecore_mcast_ramrod_params *p)
12581{
12582    struct ecore_mcast_list_elem *mc_mac =
12583        ECORE_LIST_FIRST_ENTRY(&p->mcast_list,
12584                               struct ecore_mcast_list_elem,
12585                               link);
12586
12587    if (mc_mac) {
12588        /* only a single free as all mc_macs are in the same heap array */
12589        free(mc_mac, M_DEVBUF);
12590    }
12591}
12592
12593static int
12594bxe_set_mc_list(struct bxe_softc *sc)
12595{
12596    struct ecore_mcast_ramrod_params rparam = { NULL };
12597    int rc = 0;
12598
12599    rparam.mcast_obj = &sc->mcast_obj;
12600
12601    BXE_MCAST_LOCK(sc);
12602
12603    /* first, clear all configured multicast MACs */
12604    rc = ecore_config_mcast(sc, &rparam, ECORE_MCAST_CMD_DEL);
12605    if (rc < 0) {
12606        BLOGE(sc, "Failed to clear multicast configuration: %d\n", rc);
12607        return (rc);
12608    }
12609
12610    /* configure a new MACs list */
12611    rc = bxe_init_mcast_macs_list(sc, &rparam);
12612    if (rc) {
12613        BLOGE(sc, "Failed to create mcast MACs list (%d)\n", rc);
12614        BXE_MCAST_UNLOCK(sc);
12615        return (rc);
12616    }
12617
12618    /* Now add the new MACs */
12619    rc = ecore_config_mcast(sc, &rparam, ECORE_MCAST_CMD_ADD);
12620    if (rc < 0) {
12621        BLOGE(sc, "Failed to set new mcast config (%d)\n", rc);
12622    }
12623
12624    bxe_free_mcast_macs_list(&rparam);
12625
12626    BXE_MCAST_UNLOCK(sc);
12627
12628    return (rc);
12629}
12630
12631static int
12632bxe_set_uc_list(struct bxe_softc *sc)
12633{
12634    if_t ifp = sc->ifp;
12635    struct ecore_vlan_mac_obj *mac_obj = &sc->sp_objs->mac_obj;
12636    struct ifaddr *ifa;
12637    unsigned long ramrod_flags = 0;
12638    int rc;
12639
12640#if __FreeBSD_version < 800000
12641    IF_ADDR_LOCK(ifp);
12642#else
12643    if_addr_rlock(ifp);
12644#endif
12645
12646    /* first schedule a cleanup up of old configuration */
12647    rc = bxe_del_all_macs(sc, mac_obj, ECORE_UC_LIST_MAC, FALSE);
12648    if (rc < 0) {
12649        BLOGE(sc, "Failed to schedule delete of all ETH MACs (%d)\n", rc);
12650#if __FreeBSD_version < 800000
12651        IF_ADDR_UNLOCK(ifp);
12652#else
12653        if_addr_runlock(ifp);
12654#endif
12655        return (rc);
12656    }
12657
12658    ifa = if_getifaddr(ifp); /* XXX Is this structure */
12659    while (ifa) {
12660        if (ifa->ifa_addr->sa_family != AF_LINK) {
12661            ifa = TAILQ_NEXT(ifa, ifa_link);
12662            continue;
12663        }
12664
12665        rc = bxe_set_mac_one(sc, (uint8_t *)LLADDR((struct sockaddr_dl *)ifa->ifa_addr),
12666                             mac_obj, TRUE, ECORE_UC_LIST_MAC, &ramrod_flags);
12667        if (rc == -EEXIST) {
12668            BLOGD(sc, DBG_SP, "Failed to schedule ADD operations (EEXIST)\n");
12669            /* do not treat adding same MAC as an error */
12670            rc = 0;
12671        } else if (rc < 0) {
12672            BLOGE(sc, "Failed to schedule ADD operations (%d)\n", rc);
12673#if __FreeBSD_version < 800000
12674            IF_ADDR_UNLOCK(ifp);
12675#else
12676            if_addr_runlock(ifp);
12677#endif
12678            return (rc);
12679        }
12680
12681        ifa = TAILQ_NEXT(ifa, ifa_link);
12682    }
12683
12684#if __FreeBSD_version < 800000
12685    IF_ADDR_UNLOCK(ifp);
12686#else
12687    if_addr_runlock(ifp);
12688#endif
12689
12690    /* Execute the pending commands */
12691    bit_set(&ramrod_flags, RAMROD_CONT);
12692    return (bxe_set_mac_one(sc, NULL, mac_obj, FALSE /* don't care */,
12693                            ECORE_UC_LIST_MAC, &ramrod_flags));
12694}
12695
12696static void
12697bxe_handle_rx_mode_tq(void *context,
12698                      int  pending)
12699{
12700    struct bxe_softc *sc = (struct bxe_softc *)context;
12701    if_t ifp = sc->ifp;
12702    uint32_t rx_mode = BXE_RX_MODE_NORMAL;
12703
12704    BXE_CORE_LOCK(sc);
12705
12706    if (sc->state != BXE_STATE_OPEN) {
12707        BLOGD(sc, DBG_SP, "state is %x, returning\n", sc->state);
12708        BXE_CORE_UNLOCK(sc);
12709        return;
12710    }
12711
12712    BLOGD(sc, DBG_SP, "if_flags(ifp)=0x%x\n", if_getflags(sc->ifp));
12713
12714    if (if_getflags(ifp) & IFF_PROMISC) {
12715        rx_mode = BXE_RX_MODE_PROMISC;
12716    } else if ((if_getflags(ifp) & IFF_ALLMULTI) ||
12717               ((if_getamcount(ifp) > BXE_MAX_MULTICAST) &&
12718                CHIP_IS_E1(sc))) {
12719        rx_mode = BXE_RX_MODE_ALLMULTI;
12720    } else {
12721        if (IS_PF(sc)) {
12722            /* some multicasts */
12723            if (bxe_set_mc_list(sc) < 0) {
12724                rx_mode = BXE_RX_MODE_ALLMULTI;
12725            }
12726            if (bxe_set_uc_list(sc) < 0) {
12727                rx_mode = BXE_RX_MODE_PROMISC;
12728            }
12729        }
12730#if 0
12731        else {
12732            /*
12733             * Configuring mcast to a VF involves sleeping (when we
12734             * wait for the PF's response). Since this function is
12735             * called from a non sleepable context we must schedule
12736             * a work item for this purpose
12737             */
12738            bxe_set_bit(BXE_SP_RTNL_VFPF_MCAST, &sc->sp_rtnl_state);
12739            schedule_delayed_work(&sc->sp_rtnl_task, 0);
12740        }
12741#endif
12742    }
12743
12744    sc->rx_mode = rx_mode;
12745
12746    /* schedule the rx_mode command */
12747    if (bxe_test_bit(ECORE_FILTER_RX_MODE_PENDING, &sc->sp_state)) {
12748        BLOGD(sc, DBG_LOAD, "Scheduled setting rx_mode with ECORE...\n");
12749        bxe_set_bit(ECORE_FILTER_RX_MODE_SCHED, &sc->sp_state);
12750        BXE_CORE_UNLOCK(sc);
12751        return;
12752    }
12753
12754    if (IS_PF(sc)) {
12755        bxe_set_storm_rx_mode(sc);
12756    }
12757#if 0
12758    else {
12759        /*
12760         * Configuring mcast to a VF involves sleeping (when we
12761         * wait for the PF's response). Since this function is
12762         * called from a non sleepable context we must schedule
12763         * a work item for this purpose
12764         */
12765        bxe_set_bit(BXE_SP_RTNL_VFPF_STORM_RX_MODE, &sc->sp_rtnl_state);
12766        schedule_delayed_work(&sc->sp_rtnl_task, 0);
12767    }
12768#endif
12769
12770    BXE_CORE_UNLOCK(sc);
12771}
12772
12773static void
12774bxe_set_rx_mode(struct bxe_softc *sc)
12775{
12776    taskqueue_enqueue(sc->rx_mode_tq, &sc->rx_mode_tq_task);
12777}
12778
12779/* update flags in shmem */
12780static void
12781bxe_update_drv_flags(struct bxe_softc *sc,
12782                     uint32_t         flags,
12783                     uint32_t         set)
12784{
12785    uint32_t drv_flags;
12786
12787    if (SHMEM2_HAS(sc, drv_flags)) {
12788        bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_DRV_FLAGS);
12789        drv_flags = SHMEM2_RD(sc, drv_flags);
12790
12791        if (set) {
12792            SET_FLAGS(drv_flags, flags);
12793        } else {
12794            RESET_FLAGS(drv_flags, flags);
12795        }
12796
12797        SHMEM2_WR(sc, drv_flags, drv_flags);
12798        BLOGD(sc, DBG_LOAD, "drv_flags 0x%08x\n", drv_flags);
12799
12800        bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_DRV_FLAGS);
12801    }
12802}
12803
12804/* periodic timer callout routine, only runs when the interface is up */
12805
12806static void
12807bxe_periodic_callout_func(void *xsc)
12808{
12809    struct bxe_softc *sc = (struct bxe_softc *)xsc;
12810    int i;
12811
12812    if (!BXE_CORE_TRYLOCK(sc)) {
12813        /* just bail and try again next time */
12814
12815        if ((sc->state == BXE_STATE_OPEN) &&
12816            (atomic_load_acq_long(&sc->periodic_flags) == PERIODIC_GO)) {
12817            /* schedule the next periodic callout */
12818            callout_reset(&sc->periodic_callout, hz,
12819                          bxe_periodic_callout_func, sc);
12820        }
12821
12822        return;
12823    }
12824
12825    if ((sc->state != BXE_STATE_OPEN) ||
12826        (atomic_load_acq_long(&sc->periodic_flags) == PERIODIC_STOP)) {
12827        BLOGW(sc, "periodic callout exit (state=0x%x)\n", sc->state);
12828        BXE_CORE_UNLOCK(sc);
12829        return;
12830    }
12831
12832    /* Check for TX timeouts on any fastpath. */
12833    FOR_EACH_QUEUE(sc, i) {
12834        if (bxe_watchdog(sc, &sc->fp[i]) != 0) {
12835            /* Ruh-Roh, chip was reset! */
12836            break;
12837        }
12838    }
12839
12840    if (!CHIP_REV_IS_SLOW(sc)) {
12841        /*
12842         * This barrier is needed to ensure the ordering between the writing
12843         * to the sc->port.pmf in the bxe_nic_load() or bxe_pmf_update() and
12844         * the reading here.
12845         */
12846        mb();
12847        if (sc->port.pmf) {
12848            BXE_PHY_LOCK(sc);
12849            elink_period_func(&sc->link_params, &sc->link_vars);
12850            BXE_PHY_UNLOCK(sc);
12851        }
12852    }
12853
12854    if (IS_PF(sc) && !BXE_NOMCP(sc)) {
12855        int mb_idx = SC_FW_MB_IDX(sc);
12856        uint32_t drv_pulse;
12857        uint32_t mcp_pulse;
12858
12859        ++sc->fw_drv_pulse_wr_seq;
12860        sc->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
12861
12862        drv_pulse = sc->fw_drv_pulse_wr_seq;
12863        bxe_drv_pulse(sc);
12864
12865        mcp_pulse = (SHMEM_RD(sc, func_mb[mb_idx].mcp_pulse_mb) &
12866                     MCP_PULSE_SEQ_MASK);
12867
12868        /*
12869         * The delta between driver pulse and mcp response should
12870         * be 1 (before mcp response) or 0 (after mcp response).
12871         */
12872        if ((drv_pulse != mcp_pulse) &&
12873            (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
12874            /* someone lost a heartbeat... */
12875            BLOGE(sc, "drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
12876                  drv_pulse, mcp_pulse);
12877        }
12878    }
12879
12880    /* state is BXE_STATE_OPEN */
12881    bxe_stats_handle(sc, STATS_EVENT_UPDATE);
12882
12883#if 0
12884    /* sample VF bulletin board for new posts from PF */
12885    if (IS_VF(sc)) {
12886        bxe_sample_bulletin(sc);
12887    }
12888#endif
12889
12890    BXE_CORE_UNLOCK(sc);
12891
12892    if ((sc->state == BXE_STATE_OPEN) &&
12893        (atomic_load_acq_long(&sc->periodic_flags) == PERIODIC_GO)) {
12894        /* schedule the next periodic callout */
12895        callout_reset(&sc->periodic_callout, hz,
12896                      bxe_periodic_callout_func, sc);
12897    }
12898}
12899
12900static void
12901bxe_periodic_start(struct bxe_softc *sc)
12902{
12903    atomic_store_rel_long(&sc->periodic_flags, PERIODIC_GO);
12904    callout_reset(&sc->periodic_callout, hz, bxe_periodic_callout_func, sc);
12905}
12906
12907static void
12908bxe_periodic_stop(struct bxe_softc *sc)
12909{
12910    atomic_store_rel_long(&sc->periodic_flags, PERIODIC_STOP);
12911    callout_drain(&sc->periodic_callout);
12912}
12913
12914/* start the controller */
12915static __noinline int
12916bxe_nic_load(struct bxe_softc *sc,
12917             int              load_mode)
12918{
12919    uint32_t val;
12920    int load_code = 0;
12921    int i, rc = 0;
12922
12923    BXE_CORE_LOCK_ASSERT(sc);
12924
12925    BLOGD(sc, DBG_LOAD, "Starting NIC load...\n");
12926
12927    sc->state = BXE_STATE_OPENING_WAITING_LOAD;
12928
12929    if (IS_PF(sc)) {
12930        /* must be called before memory allocation and HW init */
12931        bxe_ilt_set_info(sc);
12932    }
12933
12934    sc->last_reported_link_state = LINK_STATE_UNKNOWN;
12935
12936    bxe_set_fp_rx_buf_size(sc);
12937
12938    if (bxe_alloc_fp_buffers(sc) != 0) {
12939        BLOGE(sc, "Failed to allocate fastpath memory\n");
12940        sc->state = BXE_STATE_CLOSED;
12941        rc = ENOMEM;
12942        goto bxe_nic_load_error0;
12943    }
12944
12945    if (bxe_alloc_mem(sc) != 0) {
12946        sc->state = BXE_STATE_CLOSED;
12947        rc = ENOMEM;
12948        goto bxe_nic_load_error0;
12949    }
12950
12951    if (bxe_alloc_fw_stats_mem(sc) != 0) {
12952        sc->state = BXE_STATE_CLOSED;
12953        rc = ENOMEM;
12954        goto bxe_nic_load_error0;
12955    }
12956
12957    if (IS_PF(sc)) {
12958        /* set pf load just before approaching the MCP */
12959        bxe_set_pf_load(sc);
12960
12961        /* if MCP exists send load request and analyze response */
12962        if (!BXE_NOMCP(sc)) {
12963            /* attempt to load pf */
12964            if (bxe_nic_load_request(sc, &load_code) != 0) {
12965                sc->state = BXE_STATE_CLOSED;
12966                rc = ENXIO;
12967                goto bxe_nic_load_error1;
12968            }
12969
12970            /* what did the MCP say? */
12971            if (bxe_nic_load_analyze_req(sc, load_code) != 0) {
12972                bxe_fw_command(sc, DRV_MSG_CODE_LOAD_DONE, 0);
12973                sc->state = BXE_STATE_CLOSED;
12974                rc = ENXIO;
12975                goto bxe_nic_load_error2;
12976            }
12977        } else {
12978            BLOGI(sc, "Device has no MCP!\n");
12979            load_code = bxe_nic_load_no_mcp(sc);
12980        }
12981
12982        /* mark PMF if applicable */
12983        bxe_nic_load_pmf(sc, load_code);
12984
12985        /* Init Function state controlling object */
12986        bxe_init_func_obj(sc);
12987
12988        /* Initialize HW */
12989        if (bxe_init_hw(sc, load_code) != 0) {
12990            BLOGE(sc, "HW init failed\n");
12991            bxe_fw_command(sc, DRV_MSG_CODE_LOAD_DONE, 0);
12992            sc->state = BXE_STATE_CLOSED;
12993            rc = ENXIO;
12994            goto bxe_nic_load_error2;
12995        }
12996    }
12997
12998    /* attach interrupts */
12999    if (bxe_interrupt_attach(sc) != 0) {
13000        sc->state = BXE_STATE_CLOSED;
13001        rc = ENXIO;
13002        goto bxe_nic_load_error2;
13003    }
13004
13005    bxe_nic_init(sc, load_code);
13006
13007    /* Init per-function objects */
13008    if (IS_PF(sc)) {
13009        bxe_init_objs(sc);
13010        // XXX bxe_iov_nic_init(sc);
13011
13012        /* set AFEX default VLAN tag to an invalid value */
13013        sc->devinfo.mf_info.afex_def_vlan_tag = -1;
13014        // XXX bxe_nic_load_afex_dcc(sc, load_code);
13015
13016        sc->state = BXE_STATE_OPENING_WAITING_PORT;
13017        rc = bxe_func_start(sc);
13018        if (rc) {
13019            BLOGE(sc, "Function start failed!\n");
13020            bxe_fw_command(sc, DRV_MSG_CODE_LOAD_DONE, 0);
13021            sc->state = BXE_STATE_ERROR;
13022            goto bxe_nic_load_error3;
13023        }
13024
13025        /* send LOAD_DONE command to MCP */
13026        if (!BXE_NOMCP(sc)) {
13027            load_code = bxe_fw_command(sc, DRV_MSG_CODE_LOAD_DONE, 0);
13028            if (!load_code) {
13029                BLOGE(sc, "MCP response failure, aborting\n");
13030                sc->state = BXE_STATE_ERROR;
13031                rc = ENXIO;
13032                goto bxe_nic_load_error3;
13033            }
13034        }
13035
13036        rc = bxe_setup_leading(sc);
13037        if (rc) {
13038            BLOGE(sc, "Setup leading failed!\n");
13039            sc->state = BXE_STATE_ERROR;
13040            goto bxe_nic_load_error3;
13041        }
13042
13043        FOR_EACH_NONDEFAULT_ETH_QUEUE(sc, i) {
13044            rc = bxe_setup_queue(sc, &sc->fp[i], FALSE);
13045            if (rc) {
13046                BLOGE(sc, "Queue(%d) setup failed\n", i);
13047                sc->state = BXE_STATE_ERROR;
13048                goto bxe_nic_load_error3;
13049            }
13050        }
13051
13052        rc = bxe_init_rss_pf(sc);
13053        if (rc) {
13054            BLOGE(sc, "PF RSS init failed\n");
13055            sc->state = BXE_STATE_ERROR;
13056            goto bxe_nic_load_error3;
13057        }
13058    }
13059    /* XXX VF */
13060#if 0
13061    else { /* VF */
13062        FOR_EACH_ETH_QUEUE(sc, i) {
13063            rc = bxe_vfpf_setup_q(sc, i);
13064            if (rc) {
13065                BLOGE(sc, "Queue(%d) setup failed\n", i);
13066                sc->state = BXE_STATE_ERROR;
13067                goto bxe_nic_load_error3;
13068            }
13069        }
13070    }
13071#endif
13072
13073    /* now when Clients are configured we are ready to work */
13074    sc->state = BXE_STATE_OPEN;
13075
13076    /* Configure a ucast MAC */
13077    if (IS_PF(sc)) {
13078        rc = bxe_set_eth_mac(sc, TRUE);
13079    }
13080#if 0
13081    else { /* IS_VF(sc) */
13082        rc = bxe_vfpf_set_mac(sc);
13083    }
13084#endif
13085    if (rc) {
13086        BLOGE(sc, "Setting Ethernet MAC failed\n");
13087        sc->state = BXE_STATE_ERROR;
13088        goto bxe_nic_load_error3;
13089    }
13090
13091#if 0
13092    if (IS_PF(sc) && sc->pending_max) {
13093        /* for AFEX */
13094        bxe_update_max_mf_config(sc, sc->pending_max);
13095        sc->pending_max = 0;
13096    }
13097#endif
13098
13099    if (sc->port.pmf) {
13100        rc = bxe_initial_phy_init(sc, /* XXX load_mode */LOAD_OPEN);
13101        if (rc) {
13102            sc->state = BXE_STATE_ERROR;
13103            goto bxe_nic_load_error3;
13104        }
13105    }
13106
13107    sc->link_params.feature_config_flags &=
13108        ~ELINK_FEATURE_CONFIG_BOOT_FROM_SAN;
13109
13110    /* start fast path */
13111
13112    /* Initialize Rx filter */
13113    bxe_set_rx_mode(sc);
13114
13115    /* start the Tx */
13116    switch (/* XXX load_mode */LOAD_OPEN) {
13117    case LOAD_NORMAL:
13118    case LOAD_OPEN:
13119        break;
13120
13121    case LOAD_DIAG:
13122    case LOAD_LOOPBACK_EXT:
13123        sc->state = BXE_STATE_DIAG;
13124        break;
13125
13126    default:
13127        break;
13128    }
13129
13130    if (sc->port.pmf) {
13131        bxe_update_drv_flags(sc, 1 << DRV_FLAGS_PORT_MASK, 0);
13132    } else {
13133        bxe_link_status_update(sc);
13134    }
13135
13136    /* start the periodic timer callout */
13137    bxe_periodic_start(sc);
13138
13139    if (IS_PF(sc) && SHMEM2_HAS(sc, drv_capabilities_flag)) {
13140        /* mark driver is loaded in shmem2 */
13141        val = SHMEM2_RD(sc, drv_capabilities_flag[SC_FW_MB_IDX(sc)]);
13142        SHMEM2_WR(sc, drv_capabilities_flag[SC_FW_MB_IDX(sc)],
13143                  (val |
13144                   DRV_FLAGS_CAPABILITIES_LOADED_SUPPORTED |
13145                   DRV_FLAGS_CAPABILITIES_LOADED_L2));
13146    }
13147
13148    /* wait for all pending SP commands to complete */
13149    if (IS_PF(sc) && !bxe_wait_sp_comp(sc, ~0x0UL)) {
13150        BLOGE(sc, "Timeout waiting for all SPs to complete!\n");
13151        bxe_periodic_stop(sc);
13152        bxe_nic_unload(sc, UNLOAD_CLOSE, FALSE);
13153        return (ENXIO);
13154    }
13155
13156#if 0
13157    /* If PMF - send ADMIN DCBX msg to MFW to initiate DCBX FSM */
13158    if (sc->port.pmf && (sc->state != BXE_STATE_DIAG)) {
13159        bxe_dcbx_init(sc, FALSE);
13160    }
13161#endif
13162
13163    /* Tell the stack the driver is running! */
13164    if_setdrvflags(sc->ifp, IFF_DRV_RUNNING);
13165
13166    BLOGD(sc, DBG_LOAD, "NIC successfully loaded\n");
13167
13168    return (0);
13169
13170bxe_nic_load_error3:
13171
13172    if (IS_PF(sc)) {
13173        bxe_int_disable_sync(sc, 1);
13174
13175        /* clean out queued objects */
13176        bxe_squeeze_objects(sc);
13177    }
13178
13179    bxe_interrupt_detach(sc);
13180
13181bxe_nic_load_error2:
13182
13183    if (IS_PF(sc) && !BXE_NOMCP(sc)) {
13184        bxe_fw_command(sc, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0);
13185        bxe_fw_command(sc, DRV_MSG_CODE_UNLOAD_DONE, 0);
13186    }
13187
13188    sc->port.pmf = 0;
13189
13190bxe_nic_load_error1:
13191
13192    /* clear pf_load status, as it was already set */
13193    if (IS_PF(sc)) {
13194        bxe_clear_pf_load(sc);
13195    }
13196
13197bxe_nic_load_error0:
13198
13199    bxe_free_fw_stats_mem(sc);
13200    bxe_free_fp_buffers(sc);
13201    bxe_free_mem(sc);
13202
13203    return (rc);
13204}
13205
13206static int
13207bxe_init_locked(struct bxe_softc *sc)
13208{
13209    int other_engine = SC_PATH(sc) ? 0 : 1;
13210    uint8_t other_load_status, load_status;
13211    uint8_t global = FALSE;
13212    int rc;
13213
13214    BXE_CORE_LOCK_ASSERT(sc);
13215
13216    /* check if the driver is already running */
13217    if (if_getdrvflags(sc->ifp) & IFF_DRV_RUNNING) {
13218        BLOGD(sc, DBG_LOAD, "Init called while driver is running!\n");
13219        return (0);
13220    }
13221
13222    bxe_set_power_state(sc, PCI_PM_D0);
13223
13224    /*
13225     * If parity occurred during the unload, then attentions and/or
13226     * RECOVERY_IN_PROGRES may still be set. If so we want the first function
13227     * loaded on the current engine to complete the recovery. Parity recovery
13228     * is only relevant for PF driver.
13229     */
13230    if (IS_PF(sc)) {
13231        other_load_status = bxe_get_load_status(sc, other_engine);
13232        load_status = bxe_get_load_status(sc, SC_PATH(sc));
13233
13234        if (!bxe_reset_is_done(sc, SC_PATH(sc)) ||
13235            bxe_chk_parity_attn(sc, &global, TRUE)) {
13236            do {
13237                /*
13238                 * If there are attentions and they are in global blocks, set
13239                 * the GLOBAL_RESET bit regardless whether it will be this
13240                 * function that will complete the recovery or not.
13241                 */
13242                if (global) {
13243                    bxe_set_reset_global(sc);
13244                }
13245
13246                /*
13247                 * Only the first function on the current engine should try
13248                 * to recover in open. In case of attentions in global blocks
13249                 * only the first in the chip should try to recover.
13250                 */
13251                if ((!load_status && (!global || !other_load_status)) &&
13252                    bxe_trylock_leader_lock(sc) && !bxe_leader_reset(sc)) {
13253                    BLOGI(sc, "Recovered during init\n");
13254                    break;
13255                }
13256
13257                /* recovery has failed... */
13258                bxe_set_power_state(sc, PCI_PM_D3hot);
13259                sc->recovery_state = BXE_RECOVERY_FAILED;
13260
13261                BLOGE(sc, "Recovery flow hasn't properly "
13262                          "completed yet, try again later. "
13263                          "If you still see this message after a "
13264                          "few retries then power cycle is required.\n");
13265
13266                rc = ENXIO;
13267                goto bxe_init_locked_done;
13268            } while (0);
13269        }
13270    }
13271
13272    sc->recovery_state = BXE_RECOVERY_DONE;
13273
13274    rc = bxe_nic_load(sc, LOAD_OPEN);
13275
13276bxe_init_locked_done:
13277
13278    if (rc) {
13279        /* Tell the stack the driver is NOT running! */
13280        BLOGE(sc, "Initialization failed, "
13281                  "stack notified driver is NOT running!\n");
13282	if_setdrvflagbits(sc->ifp, 0, IFF_DRV_RUNNING);
13283    }
13284
13285    return (rc);
13286}
13287
13288static int
13289bxe_stop_locked(struct bxe_softc *sc)
13290{
13291    BXE_CORE_LOCK_ASSERT(sc);
13292    return (bxe_nic_unload(sc, UNLOAD_NORMAL, TRUE));
13293}
13294
13295/*
13296 * Handles controller initialization when called from an unlocked routine.
13297 * ifconfig calls this function.
13298 *
13299 * Returns:
13300 *   void
13301 */
13302static void
13303bxe_init(void *xsc)
13304{
13305    struct bxe_softc *sc = (struct bxe_softc *)xsc;
13306
13307    BXE_CORE_LOCK(sc);
13308    bxe_init_locked(sc);
13309    BXE_CORE_UNLOCK(sc);
13310}
13311
13312static int
13313bxe_init_ifnet(struct bxe_softc *sc)
13314{
13315    if_t ifp;
13316    int capabilities;
13317
13318    /* ifconfig entrypoint for media type/status reporting */
13319    ifmedia_init(&sc->ifmedia, IFM_IMASK,
13320                 bxe_ifmedia_update,
13321                 bxe_ifmedia_status);
13322
13323    /* set the default interface values */
13324    ifmedia_add(&sc->ifmedia, (IFM_ETHER | IFM_FDX | sc->media), 0, NULL);
13325    ifmedia_add(&sc->ifmedia, (IFM_ETHER | IFM_AUTO), 0, NULL);
13326    ifmedia_set(&sc->ifmedia, (IFM_ETHER | IFM_AUTO));
13327
13328    sc->ifmedia.ifm_media = sc->ifmedia.ifm_cur->ifm_media; /* XXX ? */
13329
13330    /* allocate the ifnet structure */
13331    if ((ifp = if_gethandle(IFT_ETHER)) == NULL) {
13332        BLOGE(sc, "Interface allocation failed!\n");
13333        return (ENXIO);
13334    }
13335
13336    if_setsoftc(ifp, sc);
13337    if_initname(ifp, device_get_name(sc->dev), device_get_unit(sc->dev));
13338    if_setflags(ifp, (IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST));
13339    if_setioctlfn(ifp, bxe_ioctl);
13340    if_setstartfn(ifp, bxe_tx_start);
13341    if_setgetcounterfn(ifp, bxe_get_counter);
13342#if __FreeBSD_version >= 800000
13343    if_settransmitfn(ifp, bxe_tx_mq_start);
13344    if_setqflushfn(ifp, bxe_mq_flush);
13345#endif
13346#ifdef FreeBSD8_0
13347    if_settimer(ifp, 0);
13348#endif
13349    if_setinitfn(ifp, bxe_init);
13350    if_setmtu(ifp, sc->mtu);
13351    if_sethwassist(ifp, (CSUM_IP      |
13352                        CSUM_TCP      |
13353                        CSUM_UDP      |
13354                        CSUM_TSO      |
13355                        CSUM_TCP_IPV6 |
13356                        CSUM_UDP_IPV6));
13357
13358    capabilities =
13359#if __FreeBSD_version < 700000
13360        (IFCAP_VLAN_MTU       |
13361         IFCAP_VLAN_HWTAGGING |
13362         IFCAP_HWCSUM         |
13363         IFCAP_JUMBO_MTU      |
13364         IFCAP_LRO);
13365#else
13366        (IFCAP_VLAN_MTU       |
13367         IFCAP_VLAN_HWTAGGING |
13368         IFCAP_VLAN_HWTSO     |
13369         IFCAP_VLAN_HWFILTER  |
13370         IFCAP_VLAN_HWCSUM    |
13371         IFCAP_HWCSUM         |
13372         IFCAP_JUMBO_MTU      |
13373         IFCAP_LRO            |
13374         IFCAP_TSO4           |
13375         IFCAP_TSO6           |
13376         IFCAP_WOL_MAGIC);
13377#endif
13378    if_setcapabilitiesbit(ifp, capabilities, 0); /* XXX */
13379    if_setbaudrate(ifp, IF_Gbps(10));
13380/* XXX */
13381    if_setsendqlen(ifp, sc->tx_ring_size);
13382    if_setsendqready(ifp);
13383/* XXX */
13384
13385    sc->ifp = ifp;
13386
13387    /* attach to the Ethernet interface list */
13388    ether_ifattach(ifp, sc->link_params.mac_addr);
13389
13390    return (0);
13391}
13392
13393static void
13394bxe_deallocate_bars(struct bxe_softc *sc)
13395{
13396    int i;
13397
13398    for (i = 0; i < MAX_BARS; i++) {
13399        if (sc->bar[i].resource != NULL) {
13400            bus_release_resource(sc->dev,
13401                                 SYS_RES_MEMORY,
13402                                 sc->bar[i].rid,
13403                                 sc->bar[i].resource);
13404            BLOGD(sc, DBG_LOAD, "Released PCI BAR%d [%02x] memory\n",
13405                  i, PCIR_BAR(i));
13406        }
13407    }
13408}
13409
13410static int
13411bxe_allocate_bars(struct bxe_softc *sc)
13412{
13413    u_int flags;
13414    int i;
13415
13416    memset(sc->bar, 0, sizeof(sc->bar));
13417
13418    for (i = 0; i < MAX_BARS; i++) {
13419
13420        /* memory resources reside at BARs 0, 2, 4 */
13421        /* Run `pciconf -lb` to see mappings */
13422        if ((i != 0) && (i != 2) && (i != 4)) {
13423            continue;
13424        }
13425
13426        sc->bar[i].rid = PCIR_BAR(i);
13427
13428        flags = RF_ACTIVE;
13429        if (i == 0) {
13430            flags |= RF_SHAREABLE;
13431        }
13432
13433        if ((sc->bar[i].resource =
13434             bus_alloc_resource_any(sc->dev,
13435                                    SYS_RES_MEMORY,
13436                                    &sc->bar[i].rid,
13437                                    flags)) == NULL) {
13438#if 0
13439            /* BAR4 doesn't exist for E1 */
13440            BLOGE(sc, "PCI BAR%d [%02x] memory allocation failed\n",
13441                  i, PCIR_BAR(i));
13442#endif
13443            return (0);
13444        }
13445
13446        sc->bar[i].tag    = rman_get_bustag(sc->bar[i].resource);
13447        sc->bar[i].handle = rman_get_bushandle(sc->bar[i].resource);
13448        sc->bar[i].kva    = (vm_offset_t)rman_get_virtual(sc->bar[i].resource);
13449
13450        BLOGI(sc, "PCI BAR%d [%02x] memory allocated: %p-%p (%ld) -> %p\n",
13451              i, PCIR_BAR(i),
13452              (void *)rman_get_start(sc->bar[i].resource),
13453              (void *)rman_get_end(sc->bar[i].resource),
13454              rman_get_size(sc->bar[i].resource),
13455              (void *)sc->bar[i].kva);
13456    }
13457
13458    return (0);
13459}
13460
13461static void
13462bxe_get_function_num(struct bxe_softc *sc)
13463{
13464    uint32_t val = 0;
13465
13466    /*
13467     * Read the ME register to get the function number. The ME register
13468     * holds the relative-function number and absolute-function number. The
13469     * absolute-function number appears only in E2 and above. Before that
13470     * these bits always contained zero, therefore we cannot blindly use them.
13471     */
13472
13473    val = REG_RD(sc, BAR_ME_REGISTER);
13474
13475    sc->pfunc_rel =
13476        (uint8_t)((val & ME_REG_PF_NUM) >> ME_REG_PF_NUM_SHIFT);
13477    sc->path_id =
13478        (uint8_t)((val & ME_REG_ABS_PF_NUM) >> ME_REG_ABS_PF_NUM_SHIFT) & 1;
13479
13480    if (CHIP_PORT_MODE(sc) == CHIP_4_PORT_MODE) {
13481        sc->pfunc_abs = ((sc->pfunc_rel << 1) | sc->path_id);
13482    } else {
13483        sc->pfunc_abs = (sc->pfunc_rel | sc->path_id);
13484    }
13485
13486    BLOGD(sc, DBG_LOAD,
13487          "Relative function %d, Absolute function %d, Path %d\n",
13488          sc->pfunc_rel, sc->pfunc_abs, sc->path_id);
13489}
13490
13491static uint32_t
13492bxe_get_shmem_mf_cfg_base(struct bxe_softc *sc)
13493{
13494    uint32_t shmem2_size;
13495    uint32_t offset;
13496    uint32_t mf_cfg_offset_value;
13497
13498    /* Non 57712 */
13499    offset = (SHMEM_RD(sc, func_mb) +
13500              (MAX_FUNC_NUM * sizeof(struct drv_func_mb)));
13501
13502    /* 57712 plus */
13503    if (sc->devinfo.shmem2_base != 0) {
13504        shmem2_size = SHMEM2_RD(sc, size);
13505        if (shmem2_size > offsetof(struct shmem2_region, mf_cfg_addr)) {
13506            mf_cfg_offset_value = SHMEM2_RD(sc, mf_cfg_addr);
13507            if (SHMEM_MF_CFG_ADDR_NONE != mf_cfg_offset_value) {
13508                offset = mf_cfg_offset_value;
13509            }
13510        }
13511    }
13512
13513    return (offset);
13514}
13515
13516static uint32_t
13517bxe_pcie_capability_read(struct bxe_softc *sc,
13518                         int    reg,
13519                         int    width)
13520{
13521    int pcie_reg;
13522
13523    /* ensure PCIe capability is enabled */
13524    if (pci_find_cap(sc->dev, PCIY_EXPRESS, &pcie_reg) == 0) {
13525        if (pcie_reg != 0) {
13526            BLOGD(sc, DBG_LOAD, "PCIe capability at 0x%04x\n", pcie_reg);
13527            return (pci_read_config(sc->dev, (pcie_reg + reg), width));
13528        }
13529    }
13530
13531    BLOGE(sc, "PCIe capability NOT FOUND!!!\n");
13532
13533    return (0);
13534}
13535
13536static uint8_t
13537bxe_is_pcie_pending(struct bxe_softc *sc)
13538{
13539    return (bxe_pcie_capability_read(sc, PCIR_EXPRESS_DEVICE_STA, 2) &
13540            PCIM_EXP_STA_TRANSACTION_PND);
13541}
13542
13543/*
13544 * Walk the PCI capabiites list for the device to find what features are
13545 * supported. These capabilites may be enabled/disabled by firmware so it's
13546 * best to walk the list rather than make assumptions.
13547 */
13548static void
13549bxe_probe_pci_caps(struct bxe_softc *sc)
13550{
13551    uint16_t link_status;
13552    int reg;
13553
13554    /* check if PCI Power Management is enabled */
13555    if (pci_find_cap(sc->dev, PCIY_PMG, &reg) == 0) {
13556        if (reg != 0) {
13557            BLOGD(sc, DBG_LOAD, "Found PM capability at 0x%04x\n", reg);
13558
13559            sc->devinfo.pcie_cap_flags |= BXE_PM_CAPABLE_FLAG;
13560            sc->devinfo.pcie_pm_cap_reg = (uint16_t)reg;
13561        }
13562    }
13563
13564    link_status = bxe_pcie_capability_read(sc, PCIR_EXPRESS_LINK_STA, 2);
13565
13566    /* handle PCIe 2.0 workarounds for 57710 */
13567    if (CHIP_IS_E1(sc)) {
13568        /* workaround for 57710 errata E4_57710_27462 */
13569        sc->devinfo.pcie_link_speed =
13570            (REG_RD(sc, 0x3d04) & (1 << 24)) ? 2 : 1;
13571
13572        /* workaround for 57710 errata E4_57710_27488 */
13573        sc->devinfo.pcie_link_width =
13574            ((link_status & PCIM_LINK_STA_WIDTH) >> 4);
13575        if (sc->devinfo.pcie_link_speed > 1) {
13576            sc->devinfo.pcie_link_width =
13577                ((link_status & PCIM_LINK_STA_WIDTH) >> 4) >> 1;
13578        }
13579    } else {
13580        sc->devinfo.pcie_link_speed =
13581            (link_status & PCIM_LINK_STA_SPEED);
13582        sc->devinfo.pcie_link_width =
13583            ((link_status & PCIM_LINK_STA_WIDTH) >> 4);
13584    }
13585
13586    BLOGD(sc, DBG_LOAD, "PCIe link speed=%d width=%d\n",
13587          sc->devinfo.pcie_link_speed, sc->devinfo.pcie_link_width);
13588
13589    sc->devinfo.pcie_cap_flags |= BXE_PCIE_CAPABLE_FLAG;
13590    sc->devinfo.pcie_pcie_cap_reg = (uint16_t)reg;
13591
13592    /* check if MSI capability is enabled */
13593    if (pci_find_cap(sc->dev, PCIY_MSI, &reg) == 0) {
13594        if (reg != 0) {
13595            BLOGD(sc, DBG_LOAD, "Found MSI capability at 0x%04x\n", reg);
13596
13597            sc->devinfo.pcie_cap_flags |= BXE_MSI_CAPABLE_FLAG;
13598            sc->devinfo.pcie_msi_cap_reg = (uint16_t)reg;
13599        }
13600    }
13601
13602    /* check if MSI-X capability is enabled */
13603    if (pci_find_cap(sc->dev, PCIY_MSIX, &reg) == 0) {
13604        if (reg != 0) {
13605            BLOGD(sc, DBG_LOAD, "Found MSI-X capability at 0x%04x\n", reg);
13606
13607            sc->devinfo.pcie_cap_flags |= BXE_MSIX_CAPABLE_FLAG;
13608            sc->devinfo.pcie_msix_cap_reg = (uint16_t)reg;
13609        }
13610    }
13611}
13612
13613static int
13614bxe_get_shmem_mf_cfg_info_sd(struct bxe_softc *sc)
13615{
13616    struct bxe_mf_info *mf_info = &sc->devinfo.mf_info;
13617    uint32_t val;
13618
13619    /* get the outer vlan if we're in switch-dependent mode */
13620
13621    val = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].e1hov_tag);
13622    mf_info->ext_id = (uint16_t)val;
13623
13624    mf_info->multi_vnics_mode = 1;
13625
13626    if (!VALID_OVLAN(mf_info->ext_id)) {
13627        BLOGE(sc, "Invalid VLAN (%d)\n", mf_info->ext_id);
13628        return (1);
13629    }
13630
13631    /* get the capabilities */
13632    if ((mf_info->mf_config[SC_VN(sc)] & FUNC_MF_CFG_PROTOCOL_MASK) ==
13633        FUNC_MF_CFG_PROTOCOL_ISCSI) {
13634        mf_info->mf_protos_supported |= MF_PROTO_SUPPORT_ISCSI;
13635    } else if ((mf_info->mf_config[SC_VN(sc)] & FUNC_MF_CFG_PROTOCOL_MASK) ==
13636               FUNC_MF_CFG_PROTOCOL_FCOE) {
13637        mf_info->mf_protos_supported |= MF_PROTO_SUPPORT_FCOE;
13638    } else {
13639        mf_info->mf_protos_supported |= MF_PROTO_SUPPORT_ETHERNET;
13640    }
13641
13642    mf_info->vnics_per_port =
13643        (CHIP_PORT_MODE(sc) == CHIP_4_PORT_MODE) ? 2 : 4;
13644
13645    return (0);
13646}
13647
13648static uint32_t
13649bxe_get_shmem_ext_proto_support_flags(struct bxe_softc *sc)
13650{
13651    uint32_t retval = 0;
13652    uint32_t val;
13653
13654    val = MFCFG_RD(sc, func_ext_config[SC_ABS_FUNC(sc)].func_cfg);
13655
13656    if (val & MACP_FUNC_CFG_FLAGS_ENABLED) {
13657        if (val & MACP_FUNC_CFG_FLAGS_ETHERNET) {
13658            retval |= MF_PROTO_SUPPORT_ETHERNET;
13659        }
13660        if (val & MACP_FUNC_CFG_FLAGS_ISCSI_OFFLOAD) {
13661            retval |= MF_PROTO_SUPPORT_ISCSI;
13662        }
13663        if (val & MACP_FUNC_CFG_FLAGS_FCOE_OFFLOAD) {
13664            retval |= MF_PROTO_SUPPORT_FCOE;
13665        }
13666    }
13667
13668    return (retval);
13669}
13670
13671static int
13672bxe_get_shmem_mf_cfg_info_si(struct bxe_softc *sc)
13673{
13674    struct bxe_mf_info *mf_info = &sc->devinfo.mf_info;
13675    uint32_t val;
13676
13677    /*
13678     * There is no outer vlan if we're in switch-independent mode.
13679     * If the mac is valid then assume multi-function.
13680     */
13681
13682    val = MFCFG_RD(sc, func_ext_config[SC_ABS_FUNC(sc)].func_cfg);
13683
13684    mf_info->multi_vnics_mode = ((val & MACP_FUNC_CFG_FLAGS_MASK) != 0);
13685
13686    mf_info->mf_protos_supported = bxe_get_shmem_ext_proto_support_flags(sc);
13687
13688    mf_info->vnics_per_port =
13689        (CHIP_PORT_MODE(sc) == CHIP_4_PORT_MODE) ? 2 : 4;
13690
13691    return (0);
13692}
13693
13694static int
13695bxe_get_shmem_mf_cfg_info_niv(struct bxe_softc *sc)
13696{
13697    struct bxe_mf_info *mf_info = &sc->devinfo.mf_info;
13698    uint32_t e1hov_tag;
13699    uint32_t func_config;
13700    uint32_t niv_config;
13701
13702    mf_info->multi_vnics_mode = 1;
13703
13704    e1hov_tag   = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].e1hov_tag);
13705    func_config = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].config);
13706    niv_config  = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].afex_config);
13707
13708    mf_info->ext_id =
13709        (uint16_t)((e1hov_tag & FUNC_MF_CFG_E1HOV_TAG_MASK) >>
13710                   FUNC_MF_CFG_E1HOV_TAG_SHIFT);
13711
13712    mf_info->default_vlan =
13713        (uint16_t)((e1hov_tag & FUNC_MF_CFG_AFEX_VLAN_MASK) >>
13714                   FUNC_MF_CFG_AFEX_VLAN_SHIFT);
13715
13716    mf_info->niv_allowed_priorities =
13717        (uint8_t)((niv_config & FUNC_MF_CFG_AFEX_COS_FILTER_MASK) >>
13718                  FUNC_MF_CFG_AFEX_COS_FILTER_SHIFT);
13719
13720    mf_info->niv_default_cos =
13721        (uint8_t)((func_config & FUNC_MF_CFG_TRANSMIT_PRIORITY_MASK) >>
13722                  FUNC_MF_CFG_TRANSMIT_PRIORITY_SHIFT);
13723
13724    mf_info->afex_vlan_mode =
13725        ((niv_config & FUNC_MF_CFG_AFEX_VLAN_MODE_MASK) >>
13726         FUNC_MF_CFG_AFEX_VLAN_MODE_SHIFT);
13727
13728    mf_info->niv_mba_enabled =
13729        ((niv_config & FUNC_MF_CFG_AFEX_MBA_ENABLED_MASK) >>
13730         FUNC_MF_CFG_AFEX_MBA_ENABLED_SHIFT);
13731
13732    mf_info->mf_protos_supported = bxe_get_shmem_ext_proto_support_flags(sc);
13733
13734    mf_info->vnics_per_port =
13735        (CHIP_PORT_MODE(sc) == CHIP_4_PORT_MODE) ? 2 : 4;
13736
13737    return (0);
13738}
13739
13740static int
13741bxe_check_valid_mf_cfg(struct bxe_softc *sc)
13742{
13743    struct bxe_mf_info *mf_info = &sc->devinfo.mf_info;
13744    uint32_t mf_cfg1;
13745    uint32_t mf_cfg2;
13746    uint32_t ovlan1;
13747    uint32_t ovlan2;
13748    uint8_t i, j;
13749
13750    BLOGD(sc, DBG_LOAD, "MF config parameters for function %d\n",
13751          SC_PORT(sc));
13752    BLOGD(sc, DBG_LOAD, "\tmf_config=0x%x\n",
13753          mf_info->mf_config[SC_VN(sc)]);
13754    BLOGD(sc, DBG_LOAD, "\tmulti_vnics_mode=%d\n",
13755          mf_info->multi_vnics_mode);
13756    BLOGD(sc, DBG_LOAD, "\tvnics_per_port=%d\n",
13757          mf_info->vnics_per_port);
13758    BLOGD(sc, DBG_LOAD, "\tovlan/vifid=%d\n",
13759          mf_info->ext_id);
13760    BLOGD(sc, DBG_LOAD, "\tmin_bw=%d/%d/%d/%d\n",
13761          mf_info->min_bw[0], mf_info->min_bw[1],
13762          mf_info->min_bw[2], mf_info->min_bw[3]);
13763    BLOGD(sc, DBG_LOAD, "\tmax_bw=%d/%d/%d/%d\n",
13764          mf_info->max_bw[0], mf_info->max_bw[1],
13765          mf_info->max_bw[2], mf_info->max_bw[3]);
13766    BLOGD(sc, DBG_LOAD, "\tmac_addr: %s\n",
13767          sc->mac_addr_str);
13768
13769    /* various MF mode sanity checks... */
13770
13771    if (mf_info->mf_config[SC_VN(sc)] & FUNC_MF_CFG_FUNC_HIDE) {
13772        BLOGE(sc, "Enumerated function %d is marked as hidden\n",
13773              SC_PORT(sc));
13774        return (1);
13775    }
13776
13777    if ((mf_info->vnics_per_port > 1) && !mf_info->multi_vnics_mode) {
13778        BLOGE(sc, "vnics_per_port=%d multi_vnics_mode=%d\n",
13779              mf_info->vnics_per_port, mf_info->multi_vnics_mode);
13780        return (1);
13781    }
13782
13783    if (mf_info->mf_mode == MULTI_FUNCTION_SD) {
13784        /* vnic id > 0 must have valid ovlan in switch-dependent mode */
13785        if ((SC_VN(sc) > 0) && !VALID_OVLAN(OVLAN(sc))) {
13786            BLOGE(sc, "mf_mode=SD vnic_id=%d ovlan=%d\n",
13787                  SC_VN(sc), OVLAN(sc));
13788            return (1);
13789        }
13790
13791        if (!VALID_OVLAN(OVLAN(sc)) && mf_info->multi_vnics_mode) {
13792            BLOGE(sc, "mf_mode=SD multi_vnics_mode=%d ovlan=%d\n",
13793                  mf_info->multi_vnics_mode, OVLAN(sc));
13794            return (1);
13795        }
13796
13797        /*
13798         * Verify all functions are either MF or SF mode. If MF, make sure
13799         * sure that all non-hidden functions have a valid ovlan. If SF,
13800         * make sure that all non-hidden functions have an invalid ovlan.
13801         */
13802        FOREACH_ABS_FUNC_IN_PORT(sc, i) {
13803            mf_cfg1 = MFCFG_RD(sc, func_mf_config[i].config);
13804            ovlan1  = MFCFG_RD(sc, func_mf_config[i].e1hov_tag);
13805            if (!(mf_cfg1 & FUNC_MF_CFG_FUNC_HIDE) &&
13806                (((mf_info->multi_vnics_mode) && !VALID_OVLAN(ovlan1)) ||
13807                 ((!mf_info->multi_vnics_mode) && VALID_OVLAN(ovlan1)))) {
13808                BLOGE(sc, "mf_mode=SD function %d MF config "
13809                          "mismatch, multi_vnics_mode=%d ovlan=%d\n",
13810                      i, mf_info->multi_vnics_mode, ovlan1);
13811                return (1);
13812            }
13813        }
13814
13815        /* Verify all funcs on the same port each have a different ovlan. */
13816        FOREACH_ABS_FUNC_IN_PORT(sc, i) {
13817            mf_cfg1 = MFCFG_RD(sc, func_mf_config[i].config);
13818            ovlan1  = MFCFG_RD(sc, func_mf_config[i].e1hov_tag);
13819            /* iterate from the next function on the port to the max func */
13820            for (j = i + 2; j < MAX_FUNC_NUM; j += 2) {
13821                mf_cfg2 = MFCFG_RD(sc, func_mf_config[j].config);
13822                ovlan2  = MFCFG_RD(sc, func_mf_config[j].e1hov_tag);
13823                if (!(mf_cfg1 & FUNC_MF_CFG_FUNC_HIDE) &&
13824                    VALID_OVLAN(ovlan1) &&
13825                    !(mf_cfg2 & FUNC_MF_CFG_FUNC_HIDE) &&
13826                    VALID_OVLAN(ovlan2) &&
13827                    (ovlan1 == ovlan2)) {
13828                    BLOGE(sc, "mf_mode=SD functions %d and %d "
13829                              "have the same ovlan (%d)\n",
13830                          i, j, ovlan1);
13831                    return (1);
13832                }
13833            }
13834        }
13835    } /* MULTI_FUNCTION_SD */
13836
13837    return (0);
13838}
13839
13840static int
13841bxe_get_mf_cfg_info(struct bxe_softc *sc)
13842{
13843    struct bxe_mf_info *mf_info = &sc->devinfo.mf_info;
13844    uint32_t val, mac_upper;
13845    uint8_t i, vnic;
13846
13847    /* initialize mf_info defaults */
13848    mf_info->vnics_per_port   = 1;
13849    mf_info->multi_vnics_mode = FALSE;
13850    mf_info->path_has_ovlan   = FALSE;
13851    mf_info->mf_mode          = SINGLE_FUNCTION;
13852
13853    if (!CHIP_IS_MF_CAP(sc)) {
13854        return (0);
13855    }
13856
13857    if (sc->devinfo.mf_cfg_base == SHMEM_MF_CFG_ADDR_NONE) {
13858        BLOGE(sc, "Invalid mf_cfg_base!\n");
13859        return (1);
13860    }
13861
13862    /* get the MF mode (switch dependent / independent / single-function) */
13863
13864    val = SHMEM_RD(sc, dev_info.shared_feature_config.config);
13865
13866    switch (val & SHARED_FEAT_CFG_FORCE_SF_MODE_MASK)
13867    {
13868    case SHARED_FEAT_CFG_FORCE_SF_MODE_SWITCH_INDEPT:
13869
13870        mac_upper = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].mac_upper);
13871
13872        /* check for legal upper mac bytes */
13873        if (mac_upper != FUNC_MF_CFG_UPPERMAC_DEFAULT) {
13874            mf_info->mf_mode = MULTI_FUNCTION_SI;
13875        } else {
13876            BLOGE(sc, "Invalid config for Switch Independent mode\n");
13877        }
13878
13879        break;
13880
13881    case SHARED_FEAT_CFG_FORCE_SF_MODE_MF_ALLOWED:
13882    case SHARED_FEAT_CFG_FORCE_SF_MODE_SPIO4:
13883
13884        /* get outer vlan configuration */
13885        val = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].e1hov_tag);
13886
13887        if ((val & FUNC_MF_CFG_E1HOV_TAG_MASK) !=
13888            FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
13889            mf_info->mf_mode = MULTI_FUNCTION_SD;
13890        } else {
13891            BLOGE(sc, "Invalid config for Switch Dependent mode\n");
13892        }
13893
13894        break;
13895
13896    case SHARED_FEAT_CFG_FORCE_SF_MODE_FORCED_SF:
13897
13898        /* not in MF mode, vnics_per_port=1 and multi_vnics_mode=FALSE */
13899        return (0);
13900
13901    case SHARED_FEAT_CFG_FORCE_SF_MODE_AFEX_MODE:
13902
13903        /*
13904         * Mark MF mode as NIV if MCP version includes NPAR-SD support
13905         * and the MAC address is valid.
13906         */
13907        mac_upper = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].mac_upper);
13908
13909        if ((SHMEM2_HAS(sc, afex_driver_support)) &&
13910            (mac_upper != FUNC_MF_CFG_UPPERMAC_DEFAULT)) {
13911            mf_info->mf_mode = MULTI_FUNCTION_AFEX;
13912        } else {
13913            BLOGE(sc, "Invalid config for AFEX mode\n");
13914        }
13915
13916        break;
13917
13918    default:
13919
13920        BLOGE(sc, "Unknown MF mode (0x%08x)\n",
13921              (val & SHARED_FEAT_CFG_FORCE_SF_MODE_MASK));
13922
13923        return (1);
13924    }
13925
13926    /* set path mf_mode (which could be different than function mf_mode) */
13927    if (mf_info->mf_mode == MULTI_FUNCTION_SD) {
13928        mf_info->path_has_ovlan = TRUE;
13929    } else if (mf_info->mf_mode == SINGLE_FUNCTION) {
13930        /*
13931         * Decide on path multi vnics mode. If we're not in MF mode and in
13932         * 4-port mode, this is good enough to check vnic-0 of the other port
13933         * on the same path
13934         */
13935        if (CHIP_PORT_MODE(sc) == CHIP_4_PORT_MODE) {
13936            uint8_t other_port = !(PORT_ID(sc) & 1);
13937            uint8_t abs_func_other_port = (SC_PATH(sc) + (2 * other_port));
13938
13939            val = MFCFG_RD(sc, func_mf_config[abs_func_other_port].e1hov_tag);
13940
13941            mf_info->path_has_ovlan = VALID_OVLAN((uint16_t)val) ? 1 : 0;
13942        }
13943    }
13944
13945    if (mf_info->mf_mode == SINGLE_FUNCTION) {
13946        /* invalid MF config */
13947        if (SC_VN(sc) >= 1) {
13948            BLOGE(sc, "VNIC ID >= 1 in SF mode\n");
13949            return (1);
13950        }
13951
13952        return (0);
13953    }
13954
13955    /* get the MF configuration */
13956    mf_info->mf_config[SC_VN(sc)] =
13957        MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].config);
13958
13959    switch(mf_info->mf_mode)
13960    {
13961    case MULTI_FUNCTION_SD:
13962
13963        bxe_get_shmem_mf_cfg_info_sd(sc);
13964        break;
13965
13966    case MULTI_FUNCTION_SI:
13967
13968        bxe_get_shmem_mf_cfg_info_si(sc);
13969        break;
13970
13971    case MULTI_FUNCTION_AFEX:
13972
13973        bxe_get_shmem_mf_cfg_info_niv(sc);
13974        break;
13975
13976    default:
13977
13978        BLOGE(sc, "Get MF config failed (mf_mode=0x%08x)\n",
13979              mf_info->mf_mode);
13980        return (1);
13981    }
13982
13983    /* get the congestion management parameters */
13984
13985    vnic = 0;
13986    FOREACH_ABS_FUNC_IN_PORT(sc, i) {
13987        /* get min/max bw */
13988        val = MFCFG_RD(sc, func_mf_config[i].config);
13989        mf_info->min_bw[vnic] =
13990            ((val & FUNC_MF_CFG_MIN_BW_MASK) >> FUNC_MF_CFG_MIN_BW_SHIFT);
13991        mf_info->max_bw[vnic] =
13992            ((val & FUNC_MF_CFG_MAX_BW_MASK) >> FUNC_MF_CFG_MAX_BW_SHIFT);
13993        vnic++;
13994    }
13995
13996    return (bxe_check_valid_mf_cfg(sc));
13997}
13998
13999static int
14000bxe_get_shmem_info(struct bxe_softc *sc)
14001{
14002    int port;
14003    uint32_t mac_hi, mac_lo, val;
14004
14005    port = SC_PORT(sc);
14006    mac_hi = mac_lo = 0;
14007
14008    sc->link_params.sc   = sc;
14009    sc->link_params.port = port;
14010
14011    /* get the hardware config info */
14012    sc->devinfo.hw_config =
14013        SHMEM_RD(sc, dev_info.shared_hw_config.config);
14014    sc->devinfo.hw_config2 =
14015        SHMEM_RD(sc, dev_info.shared_hw_config.config2);
14016
14017    sc->link_params.hw_led_mode =
14018        ((sc->devinfo.hw_config & SHARED_HW_CFG_LED_MODE_MASK) >>
14019         SHARED_HW_CFG_LED_MODE_SHIFT);
14020
14021    /* get the port feature config */
14022    sc->port.config =
14023        SHMEM_RD(sc, dev_info.port_feature_config[port].config),
14024
14025    /* get the link params */
14026    sc->link_params.speed_cap_mask[0] =
14027        SHMEM_RD(sc, dev_info.port_hw_config[port].speed_capability_mask);
14028    sc->link_params.speed_cap_mask[1] =
14029        SHMEM_RD(sc, dev_info.port_hw_config[port].speed_capability_mask2);
14030
14031    /* get the lane config */
14032    sc->link_params.lane_config =
14033        SHMEM_RD(sc, dev_info.port_hw_config[port].lane_config);
14034
14035    /* get the link config */
14036    val = SHMEM_RD(sc, dev_info.port_feature_config[port].link_config);
14037    sc->port.link_config[ELINK_INT_PHY] = val;
14038    sc->link_params.switch_cfg = (val & PORT_FEATURE_CONNECTED_SWITCH_MASK);
14039    sc->port.link_config[ELINK_EXT_PHY1] =
14040        SHMEM_RD(sc, dev_info.port_feature_config[port].link_config2);
14041
14042    /* get the override preemphasis flag and enable it or turn it off */
14043    val = SHMEM_RD(sc, dev_info.shared_feature_config.config);
14044    if (val & SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED) {
14045        sc->link_params.feature_config_flags |=
14046            ELINK_FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
14047    } else {
14048        sc->link_params.feature_config_flags &=
14049            ~ELINK_FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
14050    }
14051
14052    /* get the initial value of the link params */
14053    sc->link_params.multi_phy_config =
14054        SHMEM_RD(sc, dev_info.port_hw_config[port].multi_phy_config);
14055
14056    /* get external phy info */
14057    sc->port.ext_phy_config =
14058        SHMEM_RD(sc, dev_info.port_hw_config[port].external_phy_config);
14059
14060    /* get the multifunction configuration */
14061    bxe_get_mf_cfg_info(sc);
14062
14063    /* get the mac address */
14064    if (IS_MF(sc)) {
14065        mac_hi = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].mac_upper);
14066        mac_lo = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].mac_lower);
14067    } else {
14068        mac_hi = SHMEM_RD(sc, dev_info.port_hw_config[port].mac_upper);
14069        mac_lo = SHMEM_RD(sc, dev_info.port_hw_config[port].mac_lower);
14070    }
14071
14072    if ((mac_lo == 0) && (mac_hi == 0)) {
14073        *sc->mac_addr_str = 0;
14074        BLOGE(sc, "No Ethernet address programmed!\n");
14075    } else {
14076        sc->link_params.mac_addr[0] = (uint8_t)(mac_hi >> 8);
14077        sc->link_params.mac_addr[1] = (uint8_t)(mac_hi);
14078        sc->link_params.mac_addr[2] = (uint8_t)(mac_lo >> 24);
14079        sc->link_params.mac_addr[3] = (uint8_t)(mac_lo >> 16);
14080        sc->link_params.mac_addr[4] = (uint8_t)(mac_lo >> 8);
14081        sc->link_params.mac_addr[5] = (uint8_t)(mac_lo);
14082        snprintf(sc->mac_addr_str, sizeof(sc->mac_addr_str),
14083                 "%02x:%02x:%02x:%02x:%02x:%02x",
14084                 sc->link_params.mac_addr[0], sc->link_params.mac_addr[1],
14085                 sc->link_params.mac_addr[2], sc->link_params.mac_addr[3],
14086                 sc->link_params.mac_addr[4], sc->link_params.mac_addr[5]);
14087        BLOGD(sc, DBG_LOAD, "Ethernet address: %s\n", sc->mac_addr_str);
14088    }
14089
14090#if 0
14091    if (!IS_MF(sc) &&
14092        ((sc->port.config & PORT_FEAT_CFG_STORAGE_PERSONALITY_MASK) ==
14093         PORT_FEAT_CFG_STORAGE_PERSONALITY_FCOE)) {
14094        sc->flags |= BXE_NO_ISCSI;
14095    }
14096    if (!IS_MF(sc) &&
14097        ((sc->port.config & PORT_FEAT_CFG_STORAGE_PERSONALITY_MASK) ==
14098         PORT_FEAT_CFG_STORAGE_PERSONALITY_ISCSI)) {
14099        sc->flags |= BXE_NO_FCOE_FLAG;
14100    }
14101#endif
14102
14103    return (0);
14104}
14105
14106static void
14107bxe_get_tunable_params(struct bxe_softc *sc)
14108{
14109    /* sanity checks */
14110
14111    if ((bxe_interrupt_mode != INTR_MODE_INTX) &&
14112        (bxe_interrupt_mode != INTR_MODE_MSI)  &&
14113        (bxe_interrupt_mode != INTR_MODE_MSIX)) {
14114        BLOGW(sc, "invalid interrupt_mode value (%d)\n", bxe_interrupt_mode);
14115        bxe_interrupt_mode = INTR_MODE_MSIX;
14116    }
14117
14118    if ((bxe_queue_count < 0) || (bxe_queue_count > MAX_RSS_CHAINS)) {
14119        BLOGW(sc, "invalid queue_count value (%d)\n", bxe_queue_count);
14120        bxe_queue_count = 0;
14121    }
14122
14123    if ((bxe_max_rx_bufs < 1) || (bxe_max_rx_bufs > RX_BD_USABLE)) {
14124        if (bxe_max_rx_bufs == 0) {
14125            bxe_max_rx_bufs = RX_BD_USABLE;
14126        } else {
14127            BLOGW(sc, "invalid max_rx_bufs (%d)\n", bxe_max_rx_bufs);
14128            bxe_max_rx_bufs = 2048;
14129        }
14130    }
14131
14132    if ((bxe_hc_rx_ticks < 1) || (bxe_hc_rx_ticks > 100)) {
14133        BLOGW(sc, "invalid hc_rx_ticks (%d)\n", bxe_hc_rx_ticks);
14134        bxe_hc_rx_ticks = 25;
14135    }
14136
14137    if ((bxe_hc_tx_ticks < 1) || (bxe_hc_tx_ticks > 100)) {
14138        BLOGW(sc, "invalid hc_tx_ticks (%d)\n", bxe_hc_tx_ticks);
14139        bxe_hc_tx_ticks = 50;
14140    }
14141
14142    if (bxe_max_aggregation_size == 0) {
14143        bxe_max_aggregation_size = TPA_AGG_SIZE;
14144    }
14145
14146    if (bxe_max_aggregation_size > 0xffff) {
14147        BLOGW(sc, "invalid max_aggregation_size (%d)\n",
14148              bxe_max_aggregation_size);
14149        bxe_max_aggregation_size = TPA_AGG_SIZE;
14150    }
14151
14152    if ((bxe_mrrs < -1) || (bxe_mrrs > 3)) {
14153        BLOGW(sc, "invalid mrrs (%d)\n", bxe_mrrs);
14154        bxe_mrrs = -1;
14155    }
14156
14157    if ((bxe_autogreeen < 0) || (bxe_autogreeen > 2)) {
14158        BLOGW(sc, "invalid autogreeen (%d)\n", bxe_autogreeen);
14159        bxe_autogreeen = 0;
14160    }
14161
14162    if ((bxe_udp_rss < 0) || (bxe_udp_rss > 1)) {
14163        BLOGW(sc, "invalid udp_rss (%d)\n", bxe_udp_rss);
14164        bxe_udp_rss = 0;
14165    }
14166
14167    /* pull in user settings */
14168
14169    sc->interrupt_mode       = bxe_interrupt_mode;
14170    sc->max_rx_bufs          = bxe_max_rx_bufs;
14171    sc->hc_rx_ticks          = bxe_hc_rx_ticks;
14172    sc->hc_tx_ticks          = bxe_hc_tx_ticks;
14173    sc->max_aggregation_size = bxe_max_aggregation_size;
14174    sc->mrrs                 = bxe_mrrs;
14175    sc->autogreeen           = bxe_autogreeen;
14176    sc->udp_rss              = bxe_udp_rss;
14177
14178    if (bxe_interrupt_mode == INTR_MODE_INTX) {
14179        sc->num_queues = 1;
14180    } else { /* INTR_MODE_MSI or INTR_MODE_MSIX */
14181        sc->num_queues =
14182            min((bxe_queue_count ? bxe_queue_count : mp_ncpus),
14183                MAX_RSS_CHAINS);
14184        if (sc->num_queues > mp_ncpus) {
14185            sc->num_queues = mp_ncpus;
14186        }
14187    }
14188
14189    BLOGD(sc, DBG_LOAD,
14190          "User Config: "
14191          "debug=0x%lx "
14192          "interrupt_mode=%d "
14193          "queue_count=%d "
14194          "hc_rx_ticks=%d "
14195          "hc_tx_ticks=%d "
14196          "rx_budget=%d "
14197          "max_aggregation_size=%d "
14198          "mrrs=%d "
14199          "autogreeen=%d "
14200          "udp_rss=%d\n",
14201          bxe_debug,
14202          sc->interrupt_mode,
14203          sc->num_queues,
14204          sc->hc_rx_ticks,
14205          sc->hc_tx_ticks,
14206          bxe_rx_budget,
14207          sc->max_aggregation_size,
14208          sc->mrrs,
14209          sc->autogreeen,
14210          sc->udp_rss);
14211}
14212
14213static void
14214bxe_media_detect(struct bxe_softc *sc)
14215{
14216    uint32_t phy_idx = bxe_get_cur_phy_idx(sc);
14217    switch (sc->link_params.phy[phy_idx].media_type) {
14218    case ELINK_ETH_PHY_SFPP_10G_FIBER:
14219    case ELINK_ETH_PHY_XFP_FIBER:
14220        BLOGI(sc, "Found 10Gb Fiber media.\n");
14221        sc->media = IFM_10G_SR;
14222        break;
14223    case ELINK_ETH_PHY_SFP_1G_FIBER:
14224        BLOGI(sc, "Found 1Gb Fiber media.\n");
14225        sc->media = IFM_1000_SX;
14226        break;
14227    case ELINK_ETH_PHY_KR:
14228    case ELINK_ETH_PHY_CX4:
14229        BLOGI(sc, "Found 10GBase-CX4 media.\n");
14230        sc->media = IFM_10G_CX4;
14231        break;
14232    case ELINK_ETH_PHY_DA_TWINAX:
14233        BLOGI(sc, "Found 10Gb Twinax media.\n");
14234        sc->media = IFM_10G_TWINAX;
14235        break;
14236    case ELINK_ETH_PHY_BASE_T:
14237        if (sc->link_params.speed_cap_mask[0] &
14238            PORT_HW_CFG_SPEED_CAPABILITY_D0_10G) {
14239            BLOGI(sc, "Found 10GBase-T media.\n");
14240            sc->media = IFM_10G_T;
14241        } else {
14242            BLOGI(sc, "Found 1000Base-T media.\n");
14243            sc->media = IFM_1000_T;
14244        }
14245        break;
14246    case ELINK_ETH_PHY_NOT_PRESENT:
14247        BLOGI(sc, "Media not present.\n");
14248        sc->media = 0;
14249        break;
14250    case ELINK_ETH_PHY_UNSPECIFIED:
14251    default:
14252        BLOGI(sc, "Unknown media!\n");
14253        sc->media = 0;
14254        break;
14255    }
14256}
14257
14258#define GET_FIELD(value, fname)                     \
14259    (((value) & (fname##_MASK)) >> (fname##_SHIFT))
14260#define IGU_FID(val) GET_FIELD((val), IGU_REG_MAPPING_MEMORY_FID)
14261#define IGU_VEC(val) GET_FIELD((val), IGU_REG_MAPPING_MEMORY_VECTOR)
14262
14263static int
14264bxe_get_igu_cam_info(struct bxe_softc *sc)
14265{
14266    int pfid = SC_FUNC(sc);
14267    int igu_sb_id;
14268    uint32_t val;
14269    uint8_t fid, igu_sb_cnt = 0;
14270
14271    sc->igu_base_sb = 0xff;
14272
14273    if (CHIP_INT_MODE_IS_BC(sc)) {
14274        int vn = SC_VN(sc);
14275        igu_sb_cnt = sc->igu_sb_cnt;
14276        sc->igu_base_sb = ((CHIP_IS_MODE_4_PORT(sc) ? pfid : vn) *
14277                           FP_SB_MAX_E1x);
14278        sc->igu_dsb_id = (E1HVN_MAX * FP_SB_MAX_E1x +
14279                          (CHIP_IS_MODE_4_PORT(sc) ? pfid : vn));
14280        return (0);
14281    }
14282
14283    /* IGU in normal mode - read CAM */
14284    for (igu_sb_id = 0;
14285         igu_sb_id < IGU_REG_MAPPING_MEMORY_SIZE;
14286         igu_sb_id++) {
14287        val = REG_RD(sc, IGU_REG_MAPPING_MEMORY + igu_sb_id * 4);
14288        if (!(val & IGU_REG_MAPPING_MEMORY_VALID)) {
14289            continue;
14290        }
14291        fid = IGU_FID(val);
14292        if ((fid & IGU_FID_ENCODE_IS_PF)) {
14293            if ((fid & IGU_FID_PF_NUM_MASK) != pfid) {
14294                continue;
14295            }
14296            if (IGU_VEC(val) == 0) {
14297                /* default status block */
14298                sc->igu_dsb_id = igu_sb_id;
14299            } else {
14300                if (sc->igu_base_sb == 0xff) {
14301                    sc->igu_base_sb = igu_sb_id;
14302                }
14303                igu_sb_cnt++;
14304            }
14305        }
14306    }
14307
14308    /*
14309     * Due to new PF resource allocation by MFW T7.4 and above, it's optional
14310     * that number of CAM entries will not be equal to the value advertised in
14311     * PCI. Driver should use the minimal value of both as the actual status
14312     * block count
14313     */
14314    sc->igu_sb_cnt = min(sc->igu_sb_cnt, igu_sb_cnt);
14315
14316    if (igu_sb_cnt == 0) {
14317        BLOGE(sc, "CAM configuration error\n");
14318        return (-1);
14319    }
14320
14321    return (0);
14322}
14323
14324/*
14325 * Gather various information from the device config space, the device itself,
14326 * shmem, and the user input.
14327 */
14328static int
14329bxe_get_device_info(struct bxe_softc *sc)
14330{
14331    uint32_t val;
14332    int rc;
14333
14334    /* Get the data for the device */
14335    sc->devinfo.vendor_id    = pci_get_vendor(sc->dev);
14336    sc->devinfo.device_id    = pci_get_device(sc->dev);
14337    sc->devinfo.subvendor_id = pci_get_subvendor(sc->dev);
14338    sc->devinfo.subdevice_id = pci_get_subdevice(sc->dev);
14339
14340    /* get the chip revision (chip metal comes from pci config space) */
14341    sc->devinfo.chip_id     =
14342    sc->link_params.chip_id =
14343        (((REG_RD(sc, MISC_REG_CHIP_NUM)                   & 0xffff) << 16) |
14344         ((REG_RD(sc, MISC_REG_CHIP_REV)                   & 0xf)    << 12) |
14345         (((REG_RD(sc, PCICFG_OFFSET + PCI_ID_VAL3) >> 24) & 0xf)    << 4)  |
14346         ((REG_RD(sc, MISC_REG_BOND_ID)                    & 0xf)    << 0));
14347
14348    /* force 57811 according to MISC register */
14349    if (REG_RD(sc, MISC_REG_CHIP_TYPE) & MISC_REG_CHIP_TYPE_57811_MASK) {
14350        if (CHIP_IS_57810(sc)) {
14351            sc->devinfo.chip_id = ((CHIP_NUM_57811 << 16) |
14352                                   (sc->devinfo.chip_id & 0x0000ffff));
14353        } else if (CHIP_IS_57810_MF(sc)) {
14354            sc->devinfo.chip_id = ((CHIP_NUM_57811_MF << 16) |
14355                                   (sc->devinfo.chip_id & 0x0000ffff));
14356        }
14357        sc->devinfo.chip_id |= 0x1;
14358    }
14359
14360    BLOGD(sc, DBG_LOAD,
14361          "chip_id=0x%08x (num=0x%04x rev=0x%01x metal=0x%02x bond=0x%01x)\n",
14362          sc->devinfo.chip_id,
14363          ((sc->devinfo.chip_id >> 16) & 0xffff),
14364          ((sc->devinfo.chip_id >> 12) & 0xf),
14365          ((sc->devinfo.chip_id >>  4) & 0xff),
14366          ((sc->devinfo.chip_id >>  0) & 0xf));
14367
14368    val = (REG_RD(sc, 0x2874) & 0x55);
14369    if ((sc->devinfo.chip_id & 0x1) ||
14370        (CHIP_IS_E1(sc) && val) ||
14371        (CHIP_IS_E1H(sc) && (val == 0x55))) {
14372        sc->flags |= BXE_ONE_PORT_FLAG;
14373        BLOGD(sc, DBG_LOAD, "single port device\n");
14374    }
14375
14376    /* set the doorbell size */
14377    sc->doorbell_size = (1 << BXE_DB_SHIFT);
14378
14379    /* determine whether the device is in 2 port or 4 port mode */
14380    sc->devinfo.chip_port_mode = CHIP_PORT_MODE_NONE; /* E1 & E1h*/
14381    if (CHIP_IS_E2E3(sc)) {
14382        /*
14383         * Read port4mode_en_ovwr[0]:
14384         *   If 1, four port mode is in port4mode_en_ovwr[1].
14385         *   If 0, four port mode is in port4mode_en[0].
14386         */
14387        val = REG_RD(sc, MISC_REG_PORT4MODE_EN_OVWR);
14388        if (val & 1) {
14389            val = ((val >> 1) & 1);
14390        } else {
14391            val = REG_RD(sc, MISC_REG_PORT4MODE_EN);
14392        }
14393
14394        sc->devinfo.chip_port_mode =
14395            (val) ? CHIP_4_PORT_MODE : CHIP_2_PORT_MODE;
14396
14397        BLOGD(sc, DBG_LOAD, "Port mode = %s\n", (val) ? "4" : "2");
14398    }
14399
14400    /* get the function and path info for the device */
14401    bxe_get_function_num(sc);
14402
14403    /* get the shared memory base address */
14404    sc->devinfo.shmem_base     =
14405    sc->link_params.shmem_base =
14406        REG_RD(sc, MISC_REG_SHARED_MEM_ADDR);
14407    sc->devinfo.shmem2_base =
14408        REG_RD(sc, (SC_PATH(sc) ? MISC_REG_GENERIC_CR_1 :
14409                                  MISC_REG_GENERIC_CR_0));
14410
14411    BLOGD(sc, DBG_LOAD, "shmem_base=0x%08x, shmem2_base=0x%08x\n",
14412          sc->devinfo.shmem_base, sc->devinfo.shmem2_base);
14413
14414    if (!sc->devinfo.shmem_base) {
14415        /* this should ONLY prevent upcoming shmem reads */
14416        BLOGI(sc, "MCP not active\n");
14417        sc->flags |= BXE_NO_MCP_FLAG;
14418        return (0);
14419    }
14420
14421    /* make sure the shared memory contents are valid */
14422    val = SHMEM_RD(sc, validity_map[SC_PORT(sc)]);
14423    if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) !=
14424        (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) {
14425        BLOGE(sc, "Invalid SHMEM validity signature: 0x%08x\n", val);
14426        return (0);
14427    }
14428    BLOGD(sc, DBG_LOAD, "Valid SHMEM validity signature: 0x%08x\n", val);
14429
14430    /* get the bootcode version */
14431    sc->devinfo.bc_ver = SHMEM_RD(sc, dev_info.bc_rev);
14432    snprintf(sc->devinfo.bc_ver_str,
14433             sizeof(sc->devinfo.bc_ver_str),
14434             "%d.%d.%d",
14435             ((sc->devinfo.bc_ver >> 24) & 0xff),
14436             ((sc->devinfo.bc_ver >> 16) & 0xff),
14437             ((sc->devinfo.bc_ver >>  8) & 0xff));
14438    BLOGD(sc, DBG_LOAD, "Bootcode version: %s\n", sc->devinfo.bc_ver_str);
14439
14440    /* get the bootcode shmem address */
14441    sc->devinfo.mf_cfg_base = bxe_get_shmem_mf_cfg_base(sc);
14442    BLOGD(sc, DBG_LOAD, "mf_cfg_base=0x08%x \n", sc->devinfo.mf_cfg_base);
14443
14444    /* clean indirect addresses as they're not used */
14445    pci_write_config(sc->dev, PCICFG_GRC_ADDRESS, 0, 4);
14446    if (IS_PF(sc)) {
14447        REG_WR(sc, PXP2_REG_PGL_ADDR_88_F0, 0);
14448        REG_WR(sc, PXP2_REG_PGL_ADDR_8C_F0, 0);
14449        REG_WR(sc, PXP2_REG_PGL_ADDR_90_F0, 0);
14450        REG_WR(sc, PXP2_REG_PGL_ADDR_94_F0, 0);
14451        if (CHIP_IS_E1x(sc)) {
14452            REG_WR(sc, PXP2_REG_PGL_ADDR_88_F1, 0);
14453            REG_WR(sc, PXP2_REG_PGL_ADDR_8C_F1, 0);
14454            REG_WR(sc, PXP2_REG_PGL_ADDR_90_F1, 0);
14455            REG_WR(sc, PXP2_REG_PGL_ADDR_94_F1, 0);
14456        }
14457
14458        /*
14459         * Enable internal target-read (in case we are probed after PF
14460         * FLR). Must be done prior to any BAR read access. Only for
14461         * 57712 and up
14462         */
14463        if (!CHIP_IS_E1x(sc)) {
14464            REG_WR(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ, 1);
14465        }
14466    }
14467
14468    /* get the nvram size */
14469    val = REG_RD(sc, MCP_REG_MCPR_NVM_CFG4);
14470    sc->devinfo.flash_size =
14471        (NVRAM_1MB_SIZE << (val & MCPR_NVM_CFG4_FLASH_SIZE));
14472    BLOGD(sc, DBG_LOAD, "nvram flash size: %d\n", sc->devinfo.flash_size);
14473
14474    /* get PCI capabilites */
14475    bxe_probe_pci_caps(sc);
14476
14477    bxe_set_power_state(sc, PCI_PM_D0);
14478
14479    /* get various configuration parameters from shmem */
14480    bxe_get_shmem_info(sc);
14481
14482    if (sc->devinfo.pcie_msix_cap_reg != 0) {
14483        val = pci_read_config(sc->dev,
14484                              (sc->devinfo.pcie_msix_cap_reg +
14485                               PCIR_MSIX_CTRL),
14486                              2);
14487        sc->igu_sb_cnt = (val & PCIM_MSIXCTRL_TABLE_SIZE);
14488    } else {
14489        sc->igu_sb_cnt = 1;
14490    }
14491
14492    sc->igu_base_addr = BAR_IGU_INTMEM;
14493
14494    /* initialize IGU parameters */
14495    if (CHIP_IS_E1x(sc)) {
14496        sc->devinfo.int_block = INT_BLOCK_HC;
14497        sc->igu_dsb_id = DEF_SB_IGU_ID;
14498        sc->igu_base_sb = 0;
14499    } else {
14500        sc->devinfo.int_block = INT_BLOCK_IGU;
14501
14502        /* do not allow device reset during IGU info preocessing */
14503        bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RESET);
14504
14505        val = REG_RD(sc, IGU_REG_BLOCK_CONFIGURATION);
14506
14507        if (val & IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN) {
14508            int tout = 5000;
14509
14510            BLOGD(sc, DBG_LOAD, "FORCING IGU Normal Mode\n");
14511
14512            val &= ~(IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN);
14513            REG_WR(sc, IGU_REG_BLOCK_CONFIGURATION, val);
14514            REG_WR(sc, IGU_REG_RESET_MEMORIES, 0x7f);
14515
14516            while (tout && REG_RD(sc, IGU_REG_RESET_MEMORIES)) {
14517                tout--;
14518                DELAY(1000);
14519            }
14520
14521            if (REG_RD(sc, IGU_REG_RESET_MEMORIES)) {
14522                BLOGD(sc, DBG_LOAD, "FORCING IGU Normal Mode failed!!!\n");
14523                bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RESET);
14524                return (-1);
14525            }
14526        }
14527
14528        if (val & IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN) {
14529            BLOGD(sc, DBG_LOAD, "IGU Backward Compatible Mode\n");
14530            sc->devinfo.int_block |= INT_BLOCK_MODE_BW_COMP;
14531        } else {
14532            BLOGD(sc, DBG_LOAD, "IGU Normal Mode\n");
14533        }
14534
14535        rc = bxe_get_igu_cam_info(sc);
14536
14537        bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RESET);
14538
14539        if (rc) {
14540            return (rc);
14541        }
14542    }
14543
14544    /*
14545     * Get base FW non-default (fast path) status block ID. This value is
14546     * used to initialize the fw_sb_id saved on the fp/queue structure to
14547     * determine the id used by the FW.
14548     */
14549    if (CHIP_IS_E1x(sc)) {
14550        sc->base_fw_ndsb = ((SC_PORT(sc) * FP_SB_MAX_E1x) + SC_L_ID(sc));
14551    } else {
14552        /*
14553         * 57712+ - We currently use one FW SB per IGU SB (Rx and Tx of
14554         * the same queue are indicated on the same IGU SB). So we prefer
14555         * FW and IGU SBs to be the same value.
14556         */
14557        sc->base_fw_ndsb = sc->igu_base_sb;
14558    }
14559
14560    BLOGD(sc, DBG_LOAD,
14561          "igu_dsb_id=%d igu_base_sb=%d igu_sb_cnt=%d base_fw_ndsb=%d\n",
14562          sc->igu_dsb_id, sc->igu_base_sb,
14563          sc->igu_sb_cnt, sc->base_fw_ndsb);
14564
14565    elink_phy_probe(&sc->link_params);
14566
14567    return (0);
14568}
14569
14570static void
14571bxe_link_settings_supported(struct bxe_softc *sc,
14572                            uint32_t         switch_cfg)
14573{
14574    uint32_t cfg_size = 0;
14575    uint32_t idx;
14576    uint8_t port = SC_PORT(sc);
14577
14578    /* aggregation of supported attributes of all external phys */
14579    sc->port.supported[0] = 0;
14580    sc->port.supported[1] = 0;
14581
14582    switch (sc->link_params.num_phys) {
14583    case 1:
14584        sc->port.supported[0] = sc->link_params.phy[ELINK_INT_PHY].supported;
14585        cfg_size = 1;
14586        break;
14587    case 2:
14588        sc->port.supported[0] = sc->link_params.phy[ELINK_EXT_PHY1].supported;
14589        cfg_size = 1;
14590        break;
14591    case 3:
14592        if (sc->link_params.multi_phy_config &
14593            PORT_HW_CFG_PHY_SWAPPED_ENABLED) {
14594            sc->port.supported[1] =
14595                sc->link_params.phy[ELINK_EXT_PHY1].supported;
14596            sc->port.supported[0] =
14597                sc->link_params.phy[ELINK_EXT_PHY2].supported;
14598        } else {
14599            sc->port.supported[0] =
14600                sc->link_params.phy[ELINK_EXT_PHY1].supported;
14601            sc->port.supported[1] =
14602                sc->link_params.phy[ELINK_EXT_PHY2].supported;
14603        }
14604        cfg_size = 2;
14605        break;
14606    }
14607
14608    if (!(sc->port.supported[0] || sc->port.supported[1])) {
14609        BLOGE(sc, "Invalid phy config in NVRAM (PHY1=0x%08x PHY2=0x%08x)\n",
14610              SHMEM_RD(sc,
14611                       dev_info.port_hw_config[port].external_phy_config),
14612              SHMEM_RD(sc,
14613                       dev_info.port_hw_config[port].external_phy_config2));
14614        return;
14615    }
14616
14617    if (CHIP_IS_E3(sc))
14618        sc->port.phy_addr = REG_RD(sc, MISC_REG_WC0_CTRL_PHY_ADDR);
14619    else {
14620        switch (switch_cfg) {
14621        case ELINK_SWITCH_CFG_1G:
14622            sc->port.phy_addr =
14623                REG_RD(sc, NIG_REG_SERDES0_CTRL_PHY_ADDR + port*0x10);
14624            break;
14625        case ELINK_SWITCH_CFG_10G:
14626            sc->port.phy_addr =
14627                REG_RD(sc, NIG_REG_XGXS0_CTRL_PHY_ADDR + port*0x18);
14628            break;
14629        default:
14630            BLOGE(sc, "Invalid switch config in link_config=0x%08x\n",
14631                  sc->port.link_config[0]);
14632            return;
14633        }
14634    }
14635
14636    BLOGD(sc, DBG_LOAD, "PHY addr 0x%08x\n", sc->port.phy_addr);
14637
14638    /* mask what we support according to speed_cap_mask per configuration */
14639    for (idx = 0; idx < cfg_size; idx++) {
14640        if (!(sc->link_params.speed_cap_mask[idx] &
14641              PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF)) {
14642            sc->port.supported[idx] &= ~ELINK_SUPPORTED_10baseT_Half;
14643        }
14644
14645        if (!(sc->link_params.speed_cap_mask[idx] &
14646              PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL)) {
14647            sc->port.supported[idx] &= ~ELINK_SUPPORTED_10baseT_Full;
14648        }
14649
14650        if (!(sc->link_params.speed_cap_mask[idx] &
14651              PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF)) {
14652            sc->port.supported[idx] &= ~ELINK_SUPPORTED_100baseT_Half;
14653        }
14654
14655        if (!(sc->link_params.speed_cap_mask[idx] &
14656              PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL)) {
14657            sc->port.supported[idx] &= ~ELINK_SUPPORTED_100baseT_Full;
14658        }
14659
14660        if (!(sc->link_params.speed_cap_mask[idx] &
14661              PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)) {
14662            sc->port.supported[idx] &= ~ELINK_SUPPORTED_1000baseT_Full;
14663        }
14664
14665        if (!(sc->link_params.speed_cap_mask[idx] &
14666              PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G)) {
14667            sc->port.supported[idx] &= ~ELINK_SUPPORTED_2500baseX_Full;
14668        }
14669
14670        if (!(sc->link_params.speed_cap_mask[idx] &
14671              PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)) {
14672            sc->port.supported[idx] &= ~ELINK_SUPPORTED_10000baseT_Full;
14673        }
14674
14675        if (!(sc->link_params.speed_cap_mask[idx] &
14676              PORT_HW_CFG_SPEED_CAPABILITY_D0_20G)) {
14677            sc->port.supported[idx] &= ~ELINK_SUPPORTED_20000baseKR2_Full;
14678        }
14679    }
14680
14681    BLOGD(sc, DBG_LOAD, "PHY supported 0=0x%08x 1=0x%08x\n",
14682          sc->port.supported[0], sc->port.supported[1]);
14683}
14684
14685static void
14686bxe_link_settings_requested(struct bxe_softc *sc)
14687{
14688    uint32_t link_config;
14689    uint32_t idx;
14690    uint32_t cfg_size = 0;
14691
14692    sc->port.advertising[0] = 0;
14693    sc->port.advertising[1] = 0;
14694
14695    switch (sc->link_params.num_phys) {
14696    case 1:
14697    case 2:
14698        cfg_size = 1;
14699        break;
14700    case 3:
14701        cfg_size = 2;
14702        break;
14703    }
14704
14705    for (idx = 0; idx < cfg_size; idx++) {
14706        sc->link_params.req_duplex[idx] = DUPLEX_FULL;
14707        link_config = sc->port.link_config[idx];
14708
14709        switch (link_config & PORT_FEATURE_LINK_SPEED_MASK) {
14710        case PORT_FEATURE_LINK_SPEED_AUTO:
14711            if (sc->port.supported[idx] & ELINK_SUPPORTED_Autoneg) {
14712                sc->link_params.req_line_speed[idx] = ELINK_SPEED_AUTO_NEG;
14713                sc->port.advertising[idx] |= sc->port.supported[idx];
14714                if (sc->link_params.phy[ELINK_EXT_PHY1].type ==
14715                    PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833)
14716                    sc->port.advertising[idx] |=
14717                        (ELINK_SUPPORTED_100baseT_Half |
14718                         ELINK_SUPPORTED_100baseT_Full);
14719            } else {
14720                /* force 10G, no AN */
14721                sc->link_params.req_line_speed[idx] = ELINK_SPEED_10000;
14722                sc->port.advertising[idx] |=
14723                    (ADVERTISED_10000baseT_Full | ADVERTISED_FIBRE);
14724                continue;
14725            }
14726            break;
14727
14728        case PORT_FEATURE_LINK_SPEED_10M_FULL:
14729            if (sc->port.supported[idx] & ELINK_SUPPORTED_10baseT_Full) {
14730                sc->link_params.req_line_speed[idx] = ELINK_SPEED_10;
14731                sc->port.advertising[idx] |= (ADVERTISED_10baseT_Full |
14732                                              ADVERTISED_TP);
14733            } else {
14734                BLOGE(sc, "Invalid NVRAM config link_config=0x%08x "
14735                          "speed_cap_mask=0x%08x\n",
14736                      link_config, sc->link_params.speed_cap_mask[idx]);
14737                return;
14738            }
14739            break;
14740
14741        case PORT_FEATURE_LINK_SPEED_10M_HALF:
14742            if (sc->port.supported[idx] & ELINK_SUPPORTED_10baseT_Half) {
14743                sc->link_params.req_line_speed[idx] = ELINK_SPEED_10;
14744                sc->link_params.req_duplex[idx] = DUPLEX_HALF;
14745                sc->port.advertising[idx] |= (ADVERTISED_10baseT_Half |
14746                                              ADVERTISED_TP);
14747            } else {
14748                BLOGE(sc, "Invalid NVRAM config link_config=0x%08x "
14749                          "speed_cap_mask=0x%08x\n",
14750                      link_config, sc->link_params.speed_cap_mask[idx]);
14751                return;
14752            }
14753            break;
14754
14755        case PORT_FEATURE_LINK_SPEED_100M_FULL:
14756            if (sc->port.supported[idx] & ELINK_SUPPORTED_100baseT_Full) {
14757                sc->link_params.req_line_speed[idx] = ELINK_SPEED_100;
14758                sc->port.advertising[idx] |= (ADVERTISED_100baseT_Full |
14759                                              ADVERTISED_TP);
14760            } else {
14761                BLOGE(sc, "Invalid NVRAM config link_config=0x%08x "
14762                          "speed_cap_mask=0x%08x\n",
14763                      link_config, sc->link_params.speed_cap_mask[idx]);
14764                return;
14765            }
14766            break;
14767
14768        case PORT_FEATURE_LINK_SPEED_100M_HALF:
14769            if (sc->port.supported[idx] & ELINK_SUPPORTED_100baseT_Half) {
14770                sc->link_params.req_line_speed[idx] = ELINK_SPEED_100;
14771                sc->link_params.req_duplex[idx] = DUPLEX_HALF;
14772                sc->port.advertising[idx] |= (ADVERTISED_100baseT_Half |
14773                                              ADVERTISED_TP);
14774            } else {
14775                BLOGE(sc, "Invalid NVRAM config link_config=0x%08x "
14776                          "speed_cap_mask=0x%08x\n",
14777                      link_config, sc->link_params.speed_cap_mask[idx]);
14778                return;
14779            }
14780            break;
14781
14782        case PORT_FEATURE_LINK_SPEED_1G:
14783            if (sc->port.supported[idx] & ELINK_SUPPORTED_1000baseT_Full) {
14784                sc->link_params.req_line_speed[idx] = ELINK_SPEED_1000;
14785                sc->port.advertising[idx] |= (ADVERTISED_1000baseT_Full |
14786                                              ADVERTISED_TP);
14787            } else {
14788                BLOGE(sc, "Invalid NVRAM config link_config=0x%08x "
14789                          "speed_cap_mask=0x%08x\n",
14790                      link_config, sc->link_params.speed_cap_mask[idx]);
14791                return;
14792            }
14793            break;
14794
14795        case PORT_FEATURE_LINK_SPEED_2_5G:
14796            if (sc->port.supported[idx] & ELINK_SUPPORTED_2500baseX_Full) {
14797                sc->link_params.req_line_speed[idx] = ELINK_SPEED_2500;
14798                sc->port.advertising[idx] |= (ADVERTISED_2500baseX_Full |
14799                                              ADVERTISED_TP);
14800            } else {
14801                BLOGE(sc, "Invalid NVRAM config link_config=0x%08x "
14802                          "speed_cap_mask=0x%08x\n",
14803                      link_config, sc->link_params.speed_cap_mask[idx]);
14804                return;
14805            }
14806            break;
14807
14808        case PORT_FEATURE_LINK_SPEED_10G_CX4:
14809            if (sc->port.supported[idx] & ELINK_SUPPORTED_10000baseT_Full) {
14810                sc->link_params.req_line_speed[idx] = ELINK_SPEED_10000;
14811                sc->port.advertising[idx] |= (ADVERTISED_10000baseT_Full |
14812                                              ADVERTISED_FIBRE);
14813            } else {
14814                BLOGE(sc, "Invalid NVRAM config link_config=0x%08x "
14815                          "speed_cap_mask=0x%08x\n",
14816                      link_config, sc->link_params.speed_cap_mask[idx]);
14817                return;
14818            }
14819            break;
14820
14821        case PORT_FEATURE_LINK_SPEED_20G:
14822            sc->link_params.req_line_speed[idx] = ELINK_SPEED_20000;
14823            break;
14824
14825        default:
14826            BLOGE(sc, "Invalid NVRAM config link_config=0x%08x "
14827                      "speed_cap_mask=0x%08x\n",
14828                  link_config, sc->link_params.speed_cap_mask[idx]);
14829            sc->link_params.req_line_speed[idx] = ELINK_SPEED_AUTO_NEG;
14830            sc->port.advertising[idx] = sc->port.supported[idx];
14831            break;
14832        }
14833
14834        sc->link_params.req_flow_ctrl[idx] =
14835            (link_config & PORT_FEATURE_FLOW_CONTROL_MASK);
14836
14837        if (sc->link_params.req_flow_ctrl[idx] == ELINK_FLOW_CTRL_AUTO) {
14838            if (!(sc->port.supported[idx] & ELINK_SUPPORTED_Autoneg)) {
14839                sc->link_params.req_flow_ctrl[idx] = ELINK_FLOW_CTRL_NONE;
14840            } else {
14841                bxe_set_requested_fc(sc);
14842            }
14843        }
14844
14845        BLOGD(sc, DBG_LOAD, "req_line_speed=%d req_duplex=%d "
14846                            "req_flow_ctrl=0x%x advertising=0x%x\n",
14847              sc->link_params.req_line_speed[idx],
14848              sc->link_params.req_duplex[idx],
14849              sc->link_params.req_flow_ctrl[idx],
14850              sc->port.advertising[idx]);
14851    }
14852}
14853
14854static void
14855bxe_get_phy_info(struct bxe_softc *sc)
14856{
14857    uint8_t port = SC_PORT(sc);
14858    uint32_t config = sc->port.config;
14859    uint32_t eee_mode;
14860
14861    /* shmem data already read in bxe_get_shmem_info() */
14862
14863    BLOGD(sc, DBG_LOAD, "lane_config=0x%08x speed_cap_mask0=0x%08x "
14864                        "link_config0=0x%08x\n",
14865               sc->link_params.lane_config,
14866               sc->link_params.speed_cap_mask[0],
14867               sc->port.link_config[0]);
14868
14869    bxe_link_settings_supported(sc, sc->link_params.switch_cfg);
14870    bxe_link_settings_requested(sc);
14871
14872    if (sc->autogreeen == AUTO_GREEN_FORCE_ON) {
14873        sc->link_params.feature_config_flags |=
14874            ELINK_FEATURE_CONFIG_AUTOGREEEN_ENABLED;
14875    } else if (sc->autogreeen == AUTO_GREEN_FORCE_OFF) {
14876        sc->link_params.feature_config_flags &=
14877            ~ELINK_FEATURE_CONFIG_AUTOGREEEN_ENABLED;
14878    } else if (config & PORT_FEAT_CFG_AUTOGREEEN_ENABLED) {
14879        sc->link_params.feature_config_flags |=
14880            ELINK_FEATURE_CONFIG_AUTOGREEEN_ENABLED;
14881    }
14882
14883    /* configure link feature according to nvram value */
14884    eee_mode =
14885        (((SHMEM_RD(sc, dev_info.port_feature_config[port].eee_power_mode)) &
14886          PORT_FEAT_CFG_EEE_POWER_MODE_MASK) >>
14887         PORT_FEAT_CFG_EEE_POWER_MODE_SHIFT);
14888    if (eee_mode != PORT_FEAT_CFG_EEE_POWER_MODE_DISABLED) {
14889        sc->link_params.eee_mode = (ELINK_EEE_MODE_ADV_LPI |
14890                                    ELINK_EEE_MODE_ENABLE_LPI |
14891                                    ELINK_EEE_MODE_OUTPUT_TIME);
14892    } else {
14893        sc->link_params.eee_mode = 0;
14894    }
14895
14896    /* get the media type */
14897    bxe_media_detect(sc);
14898}
14899
14900static void
14901bxe_get_params(struct bxe_softc *sc)
14902{
14903    /* get user tunable params */
14904    bxe_get_tunable_params(sc);
14905
14906    /* select the RX and TX ring sizes */
14907    sc->tx_ring_size = TX_BD_USABLE;
14908    sc->rx_ring_size = RX_BD_USABLE;
14909
14910    /* XXX disable WoL */
14911    sc->wol = 0;
14912}
14913
14914static void
14915bxe_set_modes_bitmap(struct bxe_softc *sc)
14916{
14917    uint32_t flags = 0;
14918
14919    if (CHIP_REV_IS_FPGA(sc)) {
14920        SET_FLAGS(flags, MODE_FPGA);
14921    } else if (CHIP_REV_IS_EMUL(sc)) {
14922        SET_FLAGS(flags, MODE_EMUL);
14923    } else {
14924        SET_FLAGS(flags, MODE_ASIC);
14925    }
14926
14927    if (CHIP_IS_MODE_4_PORT(sc)) {
14928        SET_FLAGS(flags, MODE_PORT4);
14929    } else {
14930        SET_FLAGS(flags, MODE_PORT2);
14931    }
14932
14933    if (CHIP_IS_E2(sc)) {
14934        SET_FLAGS(flags, MODE_E2);
14935    } else if (CHIP_IS_E3(sc)) {
14936        SET_FLAGS(flags, MODE_E3);
14937        if (CHIP_REV(sc) == CHIP_REV_Ax) {
14938            SET_FLAGS(flags, MODE_E3_A0);
14939        } else /*if (CHIP_REV(sc) == CHIP_REV_Bx)*/ {
14940            SET_FLAGS(flags, MODE_E3_B0 | MODE_COS3);
14941        }
14942    }
14943
14944    if (IS_MF(sc)) {
14945        SET_FLAGS(flags, MODE_MF);
14946        switch (sc->devinfo.mf_info.mf_mode) {
14947        case MULTI_FUNCTION_SD:
14948            SET_FLAGS(flags, MODE_MF_SD);
14949            break;
14950        case MULTI_FUNCTION_SI:
14951            SET_FLAGS(flags, MODE_MF_SI);
14952            break;
14953        case MULTI_FUNCTION_AFEX:
14954            SET_FLAGS(flags, MODE_MF_AFEX);
14955            break;
14956        }
14957    } else {
14958        SET_FLAGS(flags, MODE_SF);
14959    }
14960
14961#if defined(__LITTLE_ENDIAN)
14962    SET_FLAGS(flags, MODE_LITTLE_ENDIAN);
14963#else /* __BIG_ENDIAN */
14964    SET_FLAGS(flags, MODE_BIG_ENDIAN);
14965#endif
14966
14967    INIT_MODE_FLAGS(sc) = flags;
14968}
14969
14970static int
14971bxe_alloc_hsi_mem(struct bxe_softc *sc)
14972{
14973    struct bxe_fastpath *fp;
14974    bus_addr_t busaddr;
14975    int max_agg_queues;
14976    int max_segments;
14977    bus_size_t max_size;
14978    bus_size_t max_seg_size;
14979    char buf[32];
14980    int rc;
14981    int i, j;
14982
14983    /* XXX zero out all vars here and call bxe_alloc_hsi_mem on error */
14984
14985    /* allocate the parent bus DMA tag */
14986    rc = bus_dma_tag_create(bus_get_dma_tag(sc->dev), /* parent tag */
14987                            1,                        /* alignment */
14988                            0,                        /* boundary limit */
14989                            BUS_SPACE_MAXADDR,        /* restricted low */
14990                            BUS_SPACE_MAXADDR,        /* restricted hi */
14991                            NULL,                     /* addr filter() */
14992                            NULL,                     /* addr filter() arg */
14993                            BUS_SPACE_MAXSIZE_32BIT,  /* max map size */
14994                            BUS_SPACE_UNRESTRICTED,   /* num discontinuous */
14995                            BUS_SPACE_MAXSIZE_32BIT,  /* max seg size */
14996                            0,                        /* flags */
14997                            NULL,                     /* lock() */
14998                            NULL,                     /* lock() arg */
14999                            &sc->parent_dma_tag);     /* returned dma tag */
15000    if (rc != 0) {
15001        BLOGE(sc, "Failed to alloc parent DMA tag (%d)!\n", rc);
15002        return (1);
15003    }
15004
15005    /************************/
15006    /* DEFAULT STATUS BLOCK */
15007    /************************/
15008
15009    if (bxe_dma_alloc(sc, sizeof(struct host_sp_status_block),
15010                      &sc->def_sb_dma, "default status block") != 0) {
15011        /* XXX */
15012        bus_dma_tag_destroy(sc->parent_dma_tag);
15013        return (1);
15014    }
15015
15016    sc->def_sb = (struct host_sp_status_block *)sc->def_sb_dma.vaddr;
15017
15018    /***************/
15019    /* EVENT QUEUE */
15020    /***************/
15021
15022    if (bxe_dma_alloc(sc, BCM_PAGE_SIZE,
15023                      &sc->eq_dma, "event queue") != 0) {
15024        /* XXX */
15025        bxe_dma_free(sc, &sc->def_sb_dma);
15026        sc->def_sb = NULL;
15027        bus_dma_tag_destroy(sc->parent_dma_tag);
15028        return (1);
15029    }
15030
15031    sc->eq = (union event_ring_elem * )sc->eq_dma.vaddr;
15032
15033    /*************/
15034    /* SLOW PATH */
15035    /*************/
15036
15037    if (bxe_dma_alloc(sc, sizeof(struct bxe_slowpath),
15038                      &sc->sp_dma, "slow path") != 0) {
15039        /* XXX */
15040        bxe_dma_free(sc, &sc->eq_dma);
15041        sc->eq = NULL;
15042        bxe_dma_free(sc, &sc->def_sb_dma);
15043        sc->def_sb = NULL;
15044        bus_dma_tag_destroy(sc->parent_dma_tag);
15045        return (1);
15046    }
15047
15048    sc->sp = (struct bxe_slowpath *)sc->sp_dma.vaddr;
15049
15050    /*******************/
15051    /* SLOW PATH QUEUE */
15052    /*******************/
15053
15054    if (bxe_dma_alloc(sc, BCM_PAGE_SIZE,
15055                      &sc->spq_dma, "slow path queue") != 0) {
15056        /* XXX */
15057        bxe_dma_free(sc, &sc->sp_dma);
15058        sc->sp = NULL;
15059        bxe_dma_free(sc, &sc->eq_dma);
15060        sc->eq = NULL;
15061        bxe_dma_free(sc, &sc->def_sb_dma);
15062        sc->def_sb = NULL;
15063        bus_dma_tag_destroy(sc->parent_dma_tag);
15064        return (1);
15065    }
15066
15067    sc->spq = (struct eth_spe *)sc->spq_dma.vaddr;
15068
15069    /***************************/
15070    /* FW DECOMPRESSION BUFFER */
15071    /***************************/
15072
15073    if (bxe_dma_alloc(sc, FW_BUF_SIZE, &sc->gz_buf_dma,
15074                      "fw decompression buffer") != 0) {
15075        /* XXX */
15076        bxe_dma_free(sc, &sc->spq_dma);
15077        sc->spq = NULL;
15078        bxe_dma_free(sc, &sc->sp_dma);
15079        sc->sp = NULL;
15080        bxe_dma_free(sc, &sc->eq_dma);
15081        sc->eq = NULL;
15082        bxe_dma_free(sc, &sc->def_sb_dma);
15083        sc->def_sb = NULL;
15084        bus_dma_tag_destroy(sc->parent_dma_tag);
15085        return (1);
15086    }
15087
15088    sc->gz_buf = (void *)sc->gz_buf_dma.vaddr;
15089
15090    if ((sc->gz_strm =
15091         malloc(sizeof(*sc->gz_strm), M_DEVBUF, M_NOWAIT)) == NULL) {
15092        /* XXX */
15093        bxe_dma_free(sc, &sc->gz_buf_dma);
15094        sc->gz_buf = NULL;
15095        bxe_dma_free(sc, &sc->spq_dma);
15096        sc->spq = NULL;
15097        bxe_dma_free(sc, &sc->sp_dma);
15098        sc->sp = NULL;
15099        bxe_dma_free(sc, &sc->eq_dma);
15100        sc->eq = NULL;
15101        bxe_dma_free(sc, &sc->def_sb_dma);
15102        sc->def_sb = NULL;
15103        bus_dma_tag_destroy(sc->parent_dma_tag);
15104        return (1);
15105    }
15106
15107    /*************/
15108    /* FASTPATHS */
15109    /*************/
15110
15111    /* allocate DMA memory for each fastpath structure */
15112    for (i = 0; i < sc->num_queues; i++) {
15113        fp = &sc->fp[i];
15114        fp->sc    = sc;
15115        fp->index = i;
15116
15117        /*******************/
15118        /* FP STATUS BLOCK */
15119        /*******************/
15120
15121        snprintf(buf, sizeof(buf), "fp %d status block", i);
15122        if (bxe_dma_alloc(sc, sizeof(union bxe_host_hc_status_block),
15123                          &fp->sb_dma, buf) != 0) {
15124            /* XXX unwind and free previous fastpath allocations */
15125            BLOGE(sc, "Failed to alloc %s\n", buf);
15126            return (1);
15127        } else {
15128            if (CHIP_IS_E2E3(sc)) {
15129                fp->status_block.e2_sb =
15130                    (struct host_hc_status_block_e2 *)fp->sb_dma.vaddr;
15131            } else {
15132                fp->status_block.e1x_sb =
15133                    (struct host_hc_status_block_e1x *)fp->sb_dma.vaddr;
15134            }
15135        }
15136
15137        /******************/
15138        /* FP TX BD CHAIN */
15139        /******************/
15140
15141        snprintf(buf, sizeof(buf), "fp %d tx bd chain", i);
15142        if (bxe_dma_alloc(sc, (BCM_PAGE_SIZE * TX_BD_NUM_PAGES),
15143                          &fp->tx_dma, buf) != 0) {
15144            /* XXX unwind and free previous fastpath allocations */
15145            BLOGE(sc, "Failed to alloc %s\n", buf);
15146            return (1);
15147        } else {
15148            fp->tx_chain = (union eth_tx_bd_types *)fp->tx_dma.vaddr;
15149        }
15150
15151        /* link together the tx bd chain pages */
15152        for (j = 1; j <= TX_BD_NUM_PAGES; j++) {
15153            /* index into the tx bd chain array to last entry per page */
15154            struct eth_tx_next_bd *tx_next_bd =
15155                &fp->tx_chain[TX_BD_TOTAL_PER_PAGE * j - 1].next_bd;
15156            /* point to the next page and wrap from last page */
15157            busaddr = (fp->tx_dma.paddr +
15158                       (BCM_PAGE_SIZE * (j % TX_BD_NUM_PAGES)));
15159            tx_next_bd->addr_hi = htole32(U64_HI(busaddr));
15160            tx_next_bd->addr_lo = htole32(U64_LO(busaddr));
15161        }
15162
15163        /******************/
15164        /* FP RX BD CHAIN */
15165        /******************/
15166
15167        snprintf(buf, sizeof(buf), "fp %d rx bd chain", i);
15168        if (bxe_dma_alloc(sc, (BCM_PAGE_SIZE * RX_BD_NUM_PAGES),
15169                          &fp->rx_dma, buf) != 0) {
15170            /* XXX unwind and free previous fastpath allocations */
15171            BLOGE(sc, "Failed to alloc %s\n", buf);
15172            return (1);
15173        } else {
15174            fp->rx_chain = (struct eth_rx_bd *)fp->rx_dma.vaddr;
15175        }
15176
15177        /* link together the rx bd chain pages */
15178        for (j = 1; j <= RX_BD_NUM_PAGES; j++) {
15179            /* index into the rx bd chain array to last entry per page */
15180            struct eth_rx_bd *rx_bd =
15181                &fp->rx_chain[RX_BD_TOTAL_PER_PAGE * j - 2];
15182            /* point to the next page and wrap from last page */
15183            busaddr = (fp->rx_dma.paddr +
15184                       (BCM_PAGE_SIZE * (j % RX_BD_NUM_PAGES)));
15185            rx_bd->addr_hi = htole32(U64_HI(busaddr));
15186            rx_bd->addr_lo = htole32(U64_LO(busaddr));
15187        }
15188
15189        /*******************/
15190        /* FP RX RCQ CHAIN */
15191        /*******************/
15192
15193        snprintf(buf, sizeof(buf), "fp %d rcq chain", i);
15194        if (bxe_dma_alloc(sc, (BCM_PAGE_SIZE * RCQ_NUM_PAGES),
15195                          &fp->rcq_dma, buf) != 0) {
15196            /* XXX unwind and free previous fastpath allocations */
15197            BLOGE(sc, "Failed to alloc %s\n", buf);
15198            return (1);
15199        } else {
15200            fp->rcq_chain = (union eth_rx_cqe *)fp->rcq_dma.vaddr;
15201        }
15202
15203        /* link together the rcq chain pages */
15204        for (j = 1; j <= RCQ_NUM_PAGES; j++) {
15205            /* index into the rcq chain array to last entry per page */
15206            struct eth_rx_cqe_next_page *rx_cqe_next =
15207                (struct eth_rx_cqe_next_page *)
15208                &fp->rcq_chain[RCQ_TOTAL_PER_PAGE * j - 1];
15209            /* point to the next page and wrap from last page */
15210            busaddr = (fp->rcq_dma.paddr +
15211                       (BCM_PAGE_SIZE * (j % RCQ_NUM_PAGES)));
15212            rx_cqe_next->addr_hi = htole32(U64_HI(busaddr));
15213            rx_cqe_next->addr_lo = htole32(U64_LO(busaddr));
15214        }
15215
15216        /*******************/
15217        /* FP RX SGE CHAIN */
15218        /*******************/
15219
15220        snprintf(buf, sizeof(buf), "fp %d sge chain", i);
15221        if (bxe_dma_alloc(sc, (BCM_PAGE_SIZE * RX_SGE_NUM_PAGES),
15222                          &fp->rx_sge_dma, buf) != 0) {
15223            /* XXX unwind and free previous fastpath allocations */
15224            BLOGE(sc, "Failed to alloc %s\n", buf);
15225            return (1);
15226        } else {
15227            fp->rx_sge_chain = (struct eth_rx_sge *)fp->rx_sge_dma.vaddr;
15228        }
15229
15230        /* link together the sge chain pages */
15231        for (j = 1; j <= RX_SGE_NUM_PAGES; j++) {
15232            /* index into the rcq chain array to last entry per page */
15233            struct eth_rx_sge *rx_sge =
15234                &fp->rx_sge_chain[RX_SGE_TOTAL_PER_PAGE * j - 2];
15235            /* point to the next page and wrap from last page */
15236            busaddr = (fp->rx_sge_dma.paddr +
15237                       (BCM_PAGE_SIZE * (j % RX_SGE_NUM_PAGES)));
15238            rx_sge->addr_hi = htole32(U64_HI(busaddr));
15239            rx_sge->addr_lo = htole32(U64_LO(busaddr));
15240        }
15241
15242        /***********************/
15243        /* FP TX MBUF DMA MAPS */
15244        /***********************/
15245
15246        /* set required sizes before mapping to conserve resources */
15247        if (if_getcapenable(sc->ifp) & (IFCAP_TSO4 | IFCAP_TSO6)) {
15248            max_size     = BXE_TSO_MAX_SIZE;
15249            max_segments = BXE_TSO_MAX_SEGMENTS;
15250            max_seg_size = BXE_TSO_MAX_SEG_SIZE;
15251        } else {
15252            max_size     = (MCLBYTES * BXE_MAX_SEGMENTS);
15253            max_segments = BXE_MAX_SEGMENTS;
15254            max_seg_size = MCLBYTES;
15255        }
15256
15257        /* create a dma tag for the tx mbufs */
15258        rc = bus_dma_tag_create(sc->parent_dma_tag, /* parent tag */
15259                                1,                  /* alignment */
15260                                0,                  /* boundary limit */
15261                                BUS_SPACE_MAXADDR,  /* restricted low */
15262                                BUS_SPACE_MAXADDR,  /* restricted hi */
15263                                NULL,               /* addr filter() */
15264                                NULL,               /* addr filter() arg */
15265                                max_size,           /* max map size */
15266                                max_segments,       /* num discontinuous */
15267                                max_seg_size,       /* max seg size */
15268                                0,                  /* flags */
15269                                NULL,               /* lock() */
15270                                NULL,               /* lock() arg */
15271                                &fp->tx_mbuf_tag);  /* returned dma tag */
15272        if (rc != 0) {
15273            /* XXX unwind and free previous fastpath allocations */
15274            BLOGE(sc, "Failed to create dma tag for "
15275                      "'fp %d tx mbufs' (%d)\n",
15276                  i, rc);
15277            return (1);
15278        }
15279
15280        /* create dma maps for each of the tx mbuf clusters */
15281        for (j = 0; j < TX_BD_TOTAL; j++) {
15282            if (bus_dmamap_create(fp->tx_mbuf_tag,
15283                                  BUS_DMA_NOWAIT,
15284                                  &fp->tx_mbuf_chain[j].m_map)) {
15285                /* XXX unwind and free previous fastpath allocations */
15286                BLOGE(sc, "Failed to create dma map for "
15287                          "'fp %d tx mbuf %d' (%d)\n",
15288                      i, j, rc);
15289                return (1);
15290            }
15291        }
15292
15293        /***********************/
15294        /* FP RX MBUF DMA MAPS */
15295        /***********************/
15296
15297        /* create a dma tag for the rx mbufs */
15298        rc = bus_dma_tag_create(sc->parent_dma_tag, /* parent tag */
15299                                1,                  /* alignment */
15300                                0,                  /* boundary limit */
15301                                BUS_SPACE_MAXADDR,  /* restricted low */
15302                                BUS_SPACE_MAXADDR,  /* restricted hi */
15303                                NULL,               /* addr filter() */
15304                                NULL,               /* addr filter() arg */
15305                                MJUM9BYTES,         /* max map size */
15306                                1,                  /* num discontinuous */
15307                                MJUM9BYTES,         /* max seg size */
15308                                0,                  /* flags */
15309                                NULL,               /* lock() */
15310                                NULL,               /* lock() arg */
15311                                &fp->rx_mbuf_tag);  /* returned dma tag */
15312        if (rc != 0) {
15313            /* XXX unwind and free previous fastpath allocations */
15314            BLOGE(sc, "Failed to create dma tag for "
15315                      "'fp %d rx mbufs' (%d)\n",
15316                  i, rc);
15317            return (1);
15318        }
15319
15320        /* create dma maps for each of the rx mbuf clusters */
15321        for (j = 0; j < RX_BD_TOTAL; j++) {
15322            if (bus_dmamap_create(fp->rx_mbuf_tag,
15323                                  BUS_DMA_NOWAIT,
15324                                  &fp->rx_mbuf_chain[j].m_map)) {
15325                /* XXX unwind and free previous fastpath allocations */
15326                BLOGE(sc, "Failed to create dma map for "
15327                          "'fp %d rx mbuf %d' (%d)\n",
15328                      i, j, rc);
15329                return (1);
15330            }
15331        }
15332
15333        /* create dma map for the spare rx mbuf cluster */
15334        if (bus_dmamap_create(fp->rx_mbuf_tag,
15335                              BUS_DMA_NOWAIT,
15336                              &fp->rx_mbuf_spare_map)) {
15337            /* XXX unwind and free previous fastpath allocations */
15338            BLOGE(sc, "Failed to create dma map for "
15339                      "'fp %d spare rx mbuf' (%d)\n",
15340                  i, rc);
15341            return (1);
15342        }
15343
15344        /***************************/
15345        /* FP RX SGE MBUF DMA MAPS */
15346        /***************************/
15347
15348        /* create a dma tag for the rx sge mbufs */
15349        rc = bus_dma_tag_create(sc->parent_dma_tag, /* parent tag */
15350                                1,                  /* alignment */
15351                                0,                  /* boundary limit */
15352                                BUS_SPACE_MAXADDR,  /* restricted low */
15353                                BUS_SPACE_MAXADDR,  /* restricted hi */
15354                                NULL,               /* addr filter() */
15355                                NULL,               /* addr filter() arg */
15356                                BCM_PAGE_SIZE,      /* max map size */
15357                                1,                  /* num discontinuous */
15358                                BCM_PAGE_SIZE,      /* max seg size */
15359                                0,                  /* flags */
15360                                NULL,               /* lock() */
15361                                NULL,               /* lock() arg */
15362                                &fp->rx_sge_mbuf_tag); /* returned dma tag */
15363        if (rc != 0) {
15364            /* XXX unwind and free previous fastpath allocations */
15365            BLOGE(sc, "Failed to create dma tag for "
15366                      "'fp %d rx sge mbufs' (%d)\n",
15367                  i, rc);
15368            return (1);
15369        }
15370
15371        /* create dma maps for the rx sge mbuf clusters */
15372        for (j = 0; j < RX_SGE_TOTAL; j++) {
15373            if (bus_dmamap_create(fp->rx_sge_mbuf_tag,
15374                                  BUS_DMA_NOWAIT,
15375                                  &fp->rx_sge_mbuf_chain[j].m_map)) {
15376                /* XXX unwind and free previous fastpath allocations */
15377                BLOGE(sc, "Failed to create dma map for "
15378                          "'fp %d rx sge mbuf %d' (%d)\n",
15379                      i, j, rc);
15380                return (1);
15381            }
15382        }
15383
15384        /* create dma map for the spare rx sge mbuf cluster */
15385        if (bus_dmamap_create(fp->rx_sge_mbuf_tag,
15386                              BUS_DMA_NOWAIT,
15387                              &fp->rx_sge_mbuf_spare_map)) {
15388            /* XXX unwind and free previous fastpath allocations */
15389            BLOGE(sc, "Failed to create dma map for "
15390                      "'fp %d spare rx sge mbuf' (%d)\n",
15391                  i, rc);
15392            return (1);
15393        }
15394
15395        /***************************/
15396        /* FP RX TPA MBUF DMA MAPS */
15397        /***************************/
15398
15399        /* create dma maps for the rx tpa mbuf clusters */
15400        max_agg_queues = MAX_AGG_QS(sc);
15401
15402        for (j = 0; j < max_agg_queues; j++) {
15403            if (bus_dmamap_create(fp->rx_mbuf_tag,
15404                                  BUS_DMA_NOWAIT,
15405                                  &fp->rx_tpa_info[j].bd.m_map)) {
15406                /* XXX unwind and free previous fastpath allocations */
15407                BLOGE(sc, "Failed to create dma map for "
15408                          "'fp %d rx tpa mbuf %d' (%d)\n",
15409                      i, j, rc);
15410                return (1);
15411            }
15412        }
15413
15414        /* create dma map for the spare rx tpa mbuf cluster */
15415        if (bus_dmamap_create(fp->rx_mbuf_tag,
15416                              BUS_DMA_NOWAIT,
15417                              &fp->rx_tpa_info_mbuf_spare_map)) {
15418            /* XXX unwind and free previous fastpath allocations */
15419            BLOGE(sc, "Failed to create dma map for "
15420                      "'fp %d spare rx tpa mbuf' (%d)\n",
15421                  i, rc);
15422            return (1);
15423        }
15424
15425        bxe_init_sge_ring_bit_mask(fp);
15426    }
15427
15428    return (0);
15429}
15430
15431static void
15432bxe_free_hsi_mem(struct bxe_softc *sc)
15433{
15434    struct bxe_fastpath *fp;
15435    int max_agg_queues;
15436    int i, j;
15437
15438    if (sc->parent_dma_tag == NULL) {
15439        return; /* assume nothing was allocated */
15440    }
15441
15442    for (i = 0; i < sc->num_queues; i++) {
15443        fp = &sc->fp[i];
15444
15445        /*******************/
15446        /* FP STATUS BLOCK */
15447        /*******************/
15448
15449        bxe_dma_free(sc, &fp->sb_dma);
15450        memset(&fp->status_block, 0, sizeof(fp->status_block));
15451
15452        /******************/
15453        /* FP TX BD CHAIN */
15454        /******************/
15455
15456        bxe_dma_free(sc, &fp->tx_dma);
15457        fp->tx_chain = NULL;
15458
15459        /******************/
15460        /* FP RX BD CHAIN */
15461        /******************/
15462
15463        bxe_dma_free(sc, &fp->rx_dma);
15464        fp->rx_chain = NULL;
15465
15466        /*******************/
15467        /* FP RX RCQ CHAIN */
15468        /*******************/
15469
15470        bxe_dma_free(sc, &fp->rcq_dma);
15471        fp->rcq_chain = NULL;
15472
15473        /*******************/
15474        /* FP RX SGE CHAIN */
15475        /*******************/
15476
15477        bxe_dma_free(sc, &fp->rx_sge_dma);
15478        fp->rx_sge_chain = NULL;
15479
15480        /***********************/
15481        /* FP TX MBUF DMA MAPS */
15482        /***********************/
15483
15484        if (fp->tx_mbuf_tag != NULL) {
15485            for (j = 0; j < TX_BD_TOTAL; j++) {
15486                if (fp->tx_mbuf_chain[j].m_map != NULL) {
15487                    bus_dmamap_unload(fp->tx_mbuf_tag,
15488                                      fp->tx_mbuf_chain[j].m_map);
15489                    bus_dmamap_destroy(fp->tx_mbuf_tag,
15490                                       fp->tx_mbuf_chain[j].m_map);
15491                }
15492            }
15493
15494            bus_dma_tag_destroy(fp->tx_mbuf_tag);
15495            fp->tx_mbuf_tag = NULL;
15496        }
15497
15498        /***********************/
15499        /* FP RX MBUF DMA MAPS */
15500        /***********************/
15501
15502        if (fp->rx_mbuf_tag != NULL) {
15503            for (j = 0; j < RX_BD_TOTAL; j++) {
15504                if (fp->rx_mbuf_chain[j].m_map != NULL) {
15505                    bus_dmamap_unload(fp->rx_mbuf_tag,
15506                                      fp->rx_mbuf_chain[j].m_map);
15507                    bus_dmamap_destroy(fp->rx_mbuf_tag,
15508                                       fp->rx_mbuf_chain[j].m_map);
15509                }
15510            }
15511
15512            if (fp->rx_mbuf_spare_map != NULL) {
15513                bus_dmamap_unload(fp->rx_mbuf_tag, fp->rx_mbuf_spare_map);
15514                bus_dmamap_destroy(fp->rx_mbuf_tag, fp->rx_mbuf_spare_map);
15515            }
15516
15517            /***************************/
15518            /* FP RX TPA MBUF DMA MAPS */
15519            /***************************/
15520
15521            max_agg_queues = MAX_AGG_QS(sc);
15522
15523            for (j = 0; j < max_agg_queues; j++) {
15524                if (fp->rx_tpa_info[j].bd.m_map != NULL) {
15525                    bus_dmamap_unload(fp->rx_mbuf_tag,
15526                                      fp->rx_tpa_info[j].bd.m_map);
15527                    bus_dmamap_destroy(fp->rx_mbuf_tag,
15528                                       fp->rx_tpa_info[j].bd.m_map);
15529                }
15530            }
15531
15532            if (fp->rx_tpa_info_mbuf_spare_map != NULL) {
15533                bus_dmamap_unload(fp->rx_mbuf_tag,
15534                                  fp->rx_tpa_info_mbuf_spare_map);
15535                bus_dmamap_destroy(fp->rx_mbuf_tag,
15536                                   fp->rx_tpa_info_mbuf_spare_map);
15537            }
15538
15539            bus_dma_tag_destroy(fp->rx_mbuf_tag);
15540            fp->rx_mbuf_tag = NULL;
15541        }
15542
15543        /***************************/
15544        /* FP RX SGE MBUF DMA MAPS */
15545        /***************************/
15546
15547        if (fp->rx_sge_mbuf_tag != NULL) {
15548            for (j = 0; j < RX_SGE_TOTAL; j++) {
15549                if (fp->rx_sge_mbuf_chain[j].m_map != NULL) {
15550                    bus_dmamap_unload(fp->rx_sge_mbuf_tag,
15551                                      fp->rx_sge_mbuf_chain[j].m_map);
15552                    bus_dmamap_destroy(fp->rx_sge_mbuf_tag,
15553                                       fp->rx_sge_mbuf_chain[j].m_map);
15554                }
15555            }
15556
15557            if (fp->rx_sge_mbuf_spare_map != NULL) {
15558                bus_dmamap_unload(fp->rx_sge_mbuf_tag,
15559                                  fp->rx_sge_mbuf_spare_map);
15560                bus_dmamap_destroy(fp->rx_sge_mbuf_tag,
15561                                   fp->rx_sge_mbuf_spare_map);
15562            }
15563
15564            bus_dma_tag_destroy(fp->rx_sge_mbuf_tag);
15565            fp->rx_sge_mbuf_tag = NULL;
15566        }
15567    }
15568
15569    /***************************/
15570    /* FW DECOMPRESSION BUFFER */
15571    /***************************/
15572
15573    bxe_dma_free(sc, &sc->gz_buf_dma);
15574    sc->gz_buf = NULL;
15575    free(sc->gz_strm, M_DEVBUF);
15576    sc->gz_strm = NULL;
15577
15578    /*******************/
15579    /* SLOW PATH QUEUE */
15580    /*******************/
15581
15582    bxe_dma_free(sc, &sc->spq_dma);
15583    sc->spq = NULL;
15584
15585    /*************/
15586    /* SLOW PATH */
15587    /*************/
15588
15589    bxe_dma_free(sc, &sc->sp_dma);
15590    sc->sp = NULL;
15591
15592    /***************/
15593    /* EVENT QUEUE */
15594    /***************/
15595
15596    bxe_dma_free(sc, &sc->eq_dma);
15597    sc->eq = NULL;
15598
15599    /************************/
15600    /* DEFAULT STATUS BLOCK */
15601    /************************/
15602
15603    bxe_dma_free(sc, &sc->def_sb_dma);
15604    sc->def_sb = NULL;
15605
15606    bus_dma_tag_destroy(sc->parent_dma_tag);
15607    sc->parent_dma_tag = NULL;
15608}
15609
15610/*
15611 * Previous driver DMAE transaction may have occurred when pre-boot stage
15612 * ended and boot began. This would invalidate the addresses of the
15613 * transaction, resulting in was-error bit set in the PCI causing all
15614 * hw-to-host PCIe transactions to timeout. If this happened we want to clear
15615 * the interrupt which detected this from the pglueb and the was-done bit
15616 */
15617static void
15618bxe_prev_interrupted_dmae(struct bxe_softc *sc)
15619{
15620    uint32_t val;
15621
15622    if (!CHIP_IS_E1x(sc)) {
15623        val = REG_RD(sc, PGLUE_B_REG_PGLUE_B_INT_STS);
15624        if (val & PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN) {
15625            BLOGD(sc, DBG_LOAD,
15626                  "Clearing 'was-error' bit that was set in pglueb");
15627            REG_WR(sc, PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR, 1 << SC_FUNC(sc));
15628        }
15629    }
15630}
15631
15632static int
15633bxe_prev_mcp_done(struct bxe_softc *sc)
15634{
15635    uint32_t rc = bxe_fw_command(sc, DRV_MSG_CODE_UNLOAD_DONE,
15636                                 DRV_MSG_CODE_UNLOAD_SKIP_LINK_RESET);
15637    if (!rc) {
15638        BLOGE(sc, "MCP response failure, aborting\n");
15639        return (-1);
15640    }
15641
15642    return (0);
15643}
15644
15645static struct bxe_prev_list_node *
15646bxe_prev_path_get_entry(struct bxe_softc *sc)
15647{
15648    struct bxe_prev_list_node *tmp;
15649
15650    LIST_FOREACH(tmp, &bxe_prev_list, node) {
15651        if ((sc->pcie_bus == tmp->bus) &&
15652            (sc->pcie_device == tmp->slot) &&
15653            (SC_PATH(sc) == tmp->path)) {
15654            return (tmp);
15655        }
15656    }
15657
15658    return (NULL);
15659}
15660
15661static uint8_t
15662bxe_prev_is_path_marked(struct bxe_softc *sc)
15663{
15664    struct bxe_prev_list_node *tmp;
15665    int rc = FALSE;
15666
15667    mtx_lock(&bxe_prev_mtx);
15668
15669    tmp = bxe_prev_path_get_entry(sc);
15670    if (tmp) {
15671        if (tmp->aer) {
15672            BLOGD(sc, DBG_LOAD,
15673                  "Path %d/%d/%d was marked by AER\n",
15674                  sc->pcie_bus, sc->pcie_device, SC_PATH(sc));
15675        } else {
15676            rc = TRUE;
15677            BLOGD(sc, DBG_LOAD,
15678                  "Path %d/%d/%d was already cleaned from previous drivers\n",
15679                  sc->pcie_bus, sc->pcie_device, SC_PATH(sc));
15680        }
15681    }
15682
15683    mtx_unlock(&bxe_prev_mtx);
15684
15685    return (rc);
15686}
15687
15688static int
15689bxe_prev_mark_path(struct bxe_softc *sc,
15690                   uint8_t          after_undi)
15691{
15692    struct bxe_prev_list_node *tmp;
15693
15694    mtx_lock(&bxe_prev_mtx);
15695
15696    /* Check whether the entry for this path already exists */
15697    tmp = bxe_prev_path_get_entry(sc);
15698    if (tmp) {
15699        if (!tmp->aer) {
15700            BLOGD(sc, DBG_LOAD,
15701                  "Re-marking AER in path %d/%d/%d\n",
15702                  sc->pcie_bus, sc->pcie_device, SC_PATH(sc));
15703        } else {
15704            BLOGD(sc, DBG_LOAD,
15705                  "Removing AER indication from path %d/%d/%d\n",
15706                  sc->pcie_bus, sc->pcie_device, SC_PATH(sc));
15707            tmp->aer = 0;
15708        }
15709
15710        mtx_unlock(&bxe_prev_mtx);
15711        return (0);
15712    }
15713
15714    mtx_unlock(&bxe_prev_mtx);
15715
15716    /* Create an entry for this path and add it */
15717    tmp = malloc(sizeof(struct bxe_prev_list_node), M_DEVBUF,
15718                 (M_NOWAIT | M_ZERO));
15719    if (!tmp) {
15720        BLOGE(sc, "Failed to allocate 'bxe_prev_list_node'\n");
15721        return (-1);
15722    }
15723
15724    tmp->bus  = sc->pcie_bus;
15725    tmp->slot = sc->pcie_device;
15726    tmp->path = SC_PATH(sc);
15727    tmp->aer  = 0;
15728    tmp->undi = after_undi ? (1 << SC_PORT(sc)) : 0;
15729
15730    mtx_lock(&bxe_prev_mtx);
15731
15732    BLOGD(sc, DBG_LOAD,
15733          "Marked path %d/%d/%d - finished previous unload\n",
15734          sc->pcie_bus, sc->pcie_device, SC_PATH(sc));
15735    LIST_INSERT_HEAD(&bxe_prev_list, tmp, node);
15736
15737    mtx_unlock(&bxe_prev_mtx);
15738
15739    return (0);
15740}
15741
15742static int
15743bxe_do_flr(struct bxe_softc *sc)
15744{
15745    int i;
15746
15747    /* only E2 and onwards support FLR */
15748    if (CHIP_IS_E1x(sc)) {
15749        BLOGD(sc, DBG_LOAD, "FLR not supported in E1/E1H\n");
15750        return (-1);
15751    }
15752
15753    /* only bootcode REQ_BC_VER_4_INITIATE_FLR and onwards support flr */
15754    if (sc->devinfo.bc_ver < REQ_BC_VER_4_INITIATE_FLR) {
15755        BLOGD(sc, DBG_LOAD, "FLR not supported by BC_VER: 0x%08x\n",
15756              sc->devinfo.bc_ver);
15757        return (-1);
15758    }
15759
15760    /* Wait for Transaction Pending bit clean */
15761    for (i = 0; i < 4; i++) {
15762        if (i) {
15763            DELAY(((1 << (i - 1)) * 100) * 1000);
15764        }
15765
15766        if (!bxe_is_pcie_pending(sc)) {
15767            goto clear;
15768        }
15769    }
15770
15771    BLOGE(sc, "PCIE transaction is not cleared, "
15772              "proceeding with reset anyway\n");
15773
15774clear:
15775
15776    BLOGD(sc, DBG_LOAD, "Initiating FLR\n");
15777    bxe_fw_command(sc, DRV_MSG_CODE_INITIATE_FLR, 0);
15778
15779    return (0);
15780}
15781
15782struct bxe_mac_vals {
15783    uint32_t xmac_addr;
15784    uint32_t xmac_val;
15785    uint32_t emac_addr;
15786    uint32_t emac_val;
15787    uint32_t umac_addr;
15788    uint32_t umac_val;
15789    uint32_t bmac_addr;
15790    uint32_t bmac_val[2];
15791};
15792
15793static void
15794bxe_prev_unload_close_mac(struct bxe_softc *sc,
15795                          struct bxe_mac_vals *vals)
15796{
15797    uint32_t val, base_addr, offset, mask, reset_reg;
15798    uint8_t mac_stopped = FALSE;
15799    uint8_t port = SC_PORT(sc);
15800    uint32_t wb_data[2];
15801
15802    /* reset addresses as they also mark which values were changed */
15803    vals->bmac_addr = 0;
15804    vals->umac_addr = 0;
15805    vals->xmac_addr = 0;
15806    vals->emac_addr = 0;
15807
15808    reset_reg = REG_RD(sc, MISC_REG_RESET_REG_2);
15809
15810    if (!CHIP_IS_E3(sc)) {
15811        val = REG_RD(sc, NIG_REG_BMAC0_REGS_OUT_EN + port * 4);
15812        mask = MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port;
15813        if ((mask & reset_reg) && val) {
15814            BLOGD(sc, DBG_LOAD, "Disable BMAC Rx\n");
15815            base_addr = SC_PORT(sc) ? NIG_REG_INGRESS_BMAC1_MEM
15816                                    : NIG_REG_INGRESS_BMAC0_MEM;
15817            offset = CHIP_IS_E2(sc) ? BIGMAC2_REGISTER_BMAC_CONTROL
15818                                    : BIGMAC_REGISTER_BMAC_CONTROL;
15819
15820            /*
15821             * use rd/wr since we cannot use dmae. This is safe
15822             * since MCP won't access the bus due to the request
15823             * to unload, and no function on the path can be
15824             * loaded at this time.
15825             */
15826            wb_data[0] = REG_RD(sc, base_addr + offset);
15827            wb_data[1] = REG_RD(sc, base_addr + offset + 0x4);
15828            vals->bmac_addr = base_addr + offset;
15829            vals->bmac_val[0] = wb_data[0];
15830            vals->bmac_val[1] = wb_data[1];
15831            wb_data[0] &= ~ELINK_BMAC_CONTROL_RX_ENABLE;
15832            REG_WR(sc, vals->bmac_addr, wb_data[0]);
15833            REG_WR(sc, vals->bmac_addr + 0x4, wb_data[1]);
15834        }
15835
15836        BLOGD(sc, DBG_LOAD, "Disable EMAC Rx\n");
15837        vals->emac_addr = NIG_REG_NIG_EMAC0_EN + SC_PORT(sc)*4;
15838        vals->emac_val = REG_RD(sc, vals->emac_addr);
15839        REG_WR(sc, vals->emac_addr, 0);
15840        mac_stopped = TRUE;
15841    } else {
15842        if (reset_reg & MISC_REGISTERS_RESET_REG_2_XMAC) {
15843            BLOGD(sc, DBG_LOAD, "Disable XMAC Rx\n");
15844            base_addr = SC_PORT(sc) ? GRCBASE_XMAC1 : GRCBASE_XMAC0;
15845            val = REG_RD(sc, base_addr + XMAC_REG_PFC_CTRL_HI);
15846            REG_WR(sc, base_addr + XMAC_REG_PFC_CTRL_HI, val & ~(1 << 1));
15847            REG_WR(sc, base_addr + XMAC_REG_PFC_CTRL_HI, val | (1 << 1));
15848            vals->xmac_addr = base_addr + XMAC_REG_CTRL;
15849            vals->xmac_val = REG_RD(sc, vals->xmac_addr);
15850            REG_WR(sc, vals->xmac_addr, 0);
15851            mac_stopped = TRUE;
15852        }
15853
15854        mask = MISC_REGISTERS_RESET_REG_2_UMAC0 << port;
15855        if (mask & reset_reg) {
15856            BLOGD(sc, DBG_LOAD, "Disable UMAC Rx\n");
15857            base_addr = SC_PORT(sc) ? GRCBASE_UMAC1 : GRCBASE_UMAC0;
15858            vals->umac_addr = base_addr + UMAC_REG_COMMAND_CONFIG;
15859            vals->umac_val = REG_RD(sc, vals->umac_addr);
15860            REG_WR(sc, vals->umac_addr, 0);
15861            mac_stopped = TRUE;
15862        }
15863    }
15864
15865    if (mac_stopped) {
15866        DELAY(20000);
15867    }
15868}
15869
15870#define BXE_PREV_UNDI_PROD_ADDR(p)  (BAR_TSTRORM_INTMEM + 0x1508 + ((p) << 4))
15871#define BXE_PREV_UNDI_RCQ(val)      ((val) & 0xffff)
15872#define BXE_PREV_UNDI_BD(val)       ((val) >> 16 & 0xffff)
15873#define BXE_PREV_UNDI_PROD(rcq, bd) ((bd) << 16 | (rcq))
15874
15875static void
15876bxe_prev_unload_undi_inc(struct bxe_softc *sc,
15877                         uint8_t          port,
15878                         uint8_t          inc)
15879{
15880    uint16_t rcq, bd;
15881    uint32_t tmp_reg = REG_RD(sc, BXE_PREV_UNDI_PROD_ADDR(port));
15882
15883    rcq = BXE_PREV_UNDI_RCQ(tmp_reg) + inc;
15884    bd = BXE_PREV_UNDI_BD(tmp_reg) + inc;
15885
15886    tmp_reg = BXE_PREV_UNDI_PROD(rcq, bd);
15887    REG_WR(sc, BXE_PREV_UNDI_PROD_ADDR(port), tmp_reg);
15888
15889    BLOGD(sc, DBG_LOAD,
15890          "UNDI producer [%d] rings bd -> 0x%04x, rcq -> 0x%04x\n",
15891          port, bd, rcq);
15892}
15893
15894static int
15895bxe_prev_unload_common(struct bxe_softc *sc)
15896{
15897    uint32_t reset_reg, tmp_reg = 0, rc;
15898    uint8_t prev_undi = FALSE;
15899    struct bxe_mac_vals mac_vals;
15900    uint32_t timer_count = 1000;
15901    uint32_t prev_brb;
15902
15903    /*
15904     * It is possible a previous function received 'common' answer,
15905     * but hasn't loaded yet, therefore creating a scenario of
15906     * multiple functions receiving 'common' on the same path.
15907     */
15908    BLOGD(sc, DBG_LOAD, "Common unload Flow\n");
15909
15910    memset(&mac_vals, 0, sizeof(mac_vals));
15911
15912    if (bxe_prev_is_path_marked(sc)) {
15913        return (bxe_prev_mcp_done(sc));
15914    }
15915
15916    reset_reg = REG_RD(sc, MISC_REG_RESET_REG_1);
15917
15918    /* Reset should be performed after BRB is emptied */
15919    if (reset_reg & MISC_REGISTERS_RESET_REG_1_RST_BRB1) {
15920        /* Close the MAC Rx to prevent BRB from filling up */
15921        bxe_prev_unload_close_mac(sc, &mac_vals);
15922
15923        /* close LLH filters towards the BRB */
15924        elink_set_rx_filter(&sc->link_params, 0);
15925
15926        /*
15927         * Check if the UNDI driver was previously loaded.
15928         * UNDI driver initializes CID offset for normal bell to 0x7
15929         */
15930        if (reset_reg & MISC_REGISTERS_RESET_REG_1_RST_DORQ) {
15931            tmp_reg = REG_RD(sc, DORQ_REG_NORM_CID_OFST);
15932            if (tmp_reg == 0x7) {
15933                BLOGD(sc, DBG_LOAD, "UNDI previously loaded\n");
15934                prev_undi = TRUE;
15935                /* clear the UNDI indication */
15936                REG_WR(sc, DORQ_REG_NORM_CID_OFST, 0);
15937                /* clear possible idle check errors */
15938                REG_RD(sc, NIG_REG_NIG_INT_STS_CLR_0);
15939            }
15940        }
15941
15942        /* wait until BRB is empty */
15943        tmp_reg = REG_RD(sc, BRB1_REG_NUM_OF_FULL_BLOCKS);
15944        while (timer_count) {
15945            prev_brb = tmp_reg;
15946
15947            tmp_reg = REG_RD(sc, BRB1_REG_NUM_OF_FULL_BLOCKS);
15948            if (!tmp_reg) {
15949                break;
15950            }
15951
15952            BLOGD(sc, DBG_LOAD, "BRB still has 0x%08x\n", tmp_reg);
15953
15954            /* reset timer as long as BRB actually gets emptied */
15955            if (prev_brb > tmp_reg) {
15956                timer_count = 1000;
15957            } else {
15958                timer_count--;
15959            }
15960
15961            /* If UNDI resides in memory, manually increment it */
15962            if (prev_undi) {
15963                bxe_prev_unload_undi_inc(sc, SC_PORT(sc), 1);
15964            }
15965
15966            DELAY(10);
15967        }
15968
15969        if (!timer_count) {
15970            BLOGE(sc, "Failed to empty BRB\n");
15971        }
15972    }
15973
15974    /* No packets are in the pipeline, path is ready for reset */
15975    bxe_reset_common(sc);
15976
15977    if (mac_vals.xmac_addr) {
15978        REG_WR(sc, mac_vals.xmac_addr, mac_vals.xmac_val);
15979    }
15980    if (mac_vals.umac_addr) {
15981        REG_WR(sc, mac_vals.umac_addr, mac_vals.umac_val);
15982    }
15983    if (mac_vals.emac_addr) {
15984        REG_WR(sc, mac_vals.emac_addr, mac_vals.emac_val);
15985    }
15986    if (mac_vals.bmac_addr) {
15987        REG_WR(sc, mac_vals.bmac_addr, mac_vals.bmac_val[0]);
15988        REG_WR(sc, mac_vals.bmac_addr + 4, mac_vals.bmac_val[1]);
15989    }
15990
15991    rc = bxe_prev_mark_path(sc, prev_undi);
15992    if (rc) {
15993        bxe_prev_mcp_done(sc);
15994        return (rc);
15995    }
15996
15997    return (bxe_prev_mcp_done(sc));
15998}
15999
16000static int
16001bxe_prev_unload_uncommon(struct bxe_softc *sc)
16002{
16003    int rc;
16004
16005    BLOGD(sc, DBG_LOAD, "Uncommon unload Flow\n");
16006
16007    /* Test if previous unload process was already finished for this path */
16008    if (bxe_prev_is_path_marked(sc)) {
16009        return (bxe_prev_mcp_done(sc));
16010    }
16011
16012    BLOGD(sc, DBG_LOAD, "Path is unmarked\n");
16013
16014    /*
16015     * If function has FLR capabilities, and existing FW version matches
16016     * the one required, then FLR will be sufficient to clean any residue
16017     * left by previous driver
16018     */
16019    rc = bxe_nic_load_analyze_req(sc, FW_MSG_CODE_DRV_LOAD_FUNCTION);
16020    if (!rc) {
16021        /* fw version is good */
16022        BLOGD(sc, DBG_LOAD, "FW version matches our own, attempting FLR\n");
16023        rc = bxe_do_flr(sc);
16024    }
16025
16026    if (!rc) {
16027        /* FLR was performed */
16028        BLOGD(sc, DBG_LOAD, "FLR successful\n");
16029        return (0);
16030    }
16031
16032    BLOGD(sc, DBG_LOAD, "Could not FLR\n");
16033
16034    /* Close the MCP request, return failure*/
16035    rc = bxe_prev_mcp_done(sc);
16036    if (!rc) {
16037        rc = BXE_PREV_WAIT_NEEDED;
16038    }
16039
16040    return (rc);
16041}
16042
16043static int
16044bxe_prev_unload(struct bxe_softc *sc)
16045{
16046    int time_counter = 10;
16047    uint32_t fw, hw_lock_reg, hw_lock_val;
16048    uint32_t rc = 0;
16049
16050    /*
16051     * Clear HW from errors which may have resulted from an interrupted
16052     * DMAE transaction.
16053     */
16054    bxe_prev_interrupted_dmae(sc);
16055
16056    /* Release previously held locks */
16057    hw_lock_reg =
16058        (SC_FUNC(sc) <= 5) ?
16059            (MISC_REG_DRIVER_CONTROL_1 + SC_FUNC(sc) * 8) :
16060            (MISC_REG_DRIVER_CONTROL_7 + (SC_FUNC(sc) - 6) * 8);
16061
16062    hw_lock_val = (REG_RD(sc, hw_lock_reg));
16063    if (hw_lock_val) {
16064        if (hw_lock_val & HW_LOCK_RESOURCE_NVRAM) {
16065            BLOGD(sc, DBG_LOAD, "Releasing previously held NVRAM lock\n");
16066            REG_WR(sc, MCP_REG_MCPR_NVM_SW_ARB,
16067                   (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << SC_PORT(sc)));
16068        }
16069        BLOGD(sc, DBG_LOAD, "Releasing previously held HW lock\n");
16070        REG_WR(sc, hw_lock_reg, 0xffffffff);
16071    } else {
16072        BLOGD(sc, DBG_LOAD, "No need to release HW/NVRAM locks\n");
16073    }
16074
16075    if (MCPR_ACCESS_LOCK_LOCK & REG_RD(sc, MCP_REG_MCPR_ACCESS_LOCK)) {
16076        BLOGD(sc, DBG_LOAD, "Releasing previously held ALR\n");
16077        REG_WR(sc, MCP_REG_MCPR_ACCESS_LOCK, 0);
16078    }
16079
16080    do {
16081        /* Lock MCP using an unload request */
16082        fw = bxe_fw_command(sc, DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS, 0);
16083        if (!fw) {
16084            BLOGE(sc, "MCP response failure, aborting\n");
16085            rc = -1;
16086            break;
16087        }
16088
16089        if (fw == FW_MSG_CODE_DRV_UNLOAD_COMMON) {
16090            rc = bxe_prev_unload_common(sc);
16091            break;
16092        }
16093
16094        /* non-common reply from MCP night require looping */
16095        rc = bxe_prev_unload_uncommon(sc);
16096        if (rc != BXE_PREV_WAIT_NEEDED) {
16097            break;
16098        }
16099
16100        DELAY(20000);
16101    } while (--time_counter);
16102
16103    if (!time_counter || rc) {
16104        BLOGE(sc, "Failed to unload previous driver!\n");
16105        rc = -1;
16106    }
16107
16108    return (rc);
16109}
16110
16111void
16112bxe_dcbx_set_state(struct bxe_softc *sc,
16113                   uint8_t          dcb_on,
16114                   uint32_t         dcbx_enabled)
16115{
16116    if (!CHIP_IS_E1x(sc)) {
16117        sc->dcb_state = dcb_on;
16118        sc->dcbx_enabled = dcbx_enabled;
16119    } else {
16120        sc->dcb_state = FALSE;
16121        sc->dcbx_enabled = BXE_DCBX_ENABLED_INVALID;
16122    }
16123    BLOGD(sc, DBG_LOAD,
16124          "DCB state [%s:%s]\n",
16125          dcb_on ? "ON" : "OFF",
16126          (dcbx_enabled == BXE_DCBX_ENABLED_OFF) ? "user-mode" :
16127          (dcbx_enabled == BXE_DCBX_ENABLED_ON_NEG_OFF) ? "on-chip static" :
16128          (dcbx_enabled == BXE_DCBX_ENABLED_ON_NEG_ON) ?
16129          "on-chip with negotiation" : "invalid");
16130}
16131
16132/* must be called after sriov-enable */
16133static int
16134bxe_set_qm_cid_count(struct bxe_softc *sc)
16135{
16136    int cid_count = BXE_L2_MAX_CID(sc);
16137
16138    if (IS_SRIOV(sc)) {
16139        cid_count += BXE_VF_CIDS;
16140    }
16141
16142    if (CNIC_SUPPORT(sc)) {
16143        cid_count += CNIC_CID_MAX;
16144    }
16145
16146    return (roundup(cid_count, QM_CID_ROUND));
16147}
16148
16149static void
16150bxe_init_multi_cos(struct bxe_softc *sc)
16151{
16152    int pri, cos;
16153
16154    uint32_t pri_map = 0; /* XXX change to user config */
16155
16156    for (pri = 0; pri < BXE_MAX_PRIORITY; pri++) {
16157        cos = ((pri_map & (0xf << (pri * 4))) >> (pri * 4));
16158        if (cos < sc->max_cos) {
16159            sc->prio_to_cos[pri] = cos;
16160        } else {
16161            BLOGW(sc, "Invalid COS %d for priority %d "
16162                      "(max COS is %d), setting to 0\n",
16163                  cos, pri, (sc->max_cos - 1));
16164            sc->prio_to_cos[pri] = 0;
16165        }
16166    }
16167}
16168
16169static int
16170bxe_sysctl_state(SYSCTL_HANDLER_ARGS)
16171{
16172    struct bxe_softc *sc;
16173    int error, result;
16174
16175    result = 0;
16176    error = sysctl_handle_int(oidp, &result, 0, req);
16177
16178    if (error || !req->newptr) {
16179        return (error);
16180    }
16181
16182    if (result == 1) {
16183        sc = (struct bxe_softc *)arg1;
16184        BLOGI(sc, "... dumping driver state ...\n");
16185        /* XXX */
16186    }
16187
16188    return (error);
16189}
16190
16191static int
16192bxe_sysctl_eth_stat(SYSCTL_HANDLER_ARGS)
16193{
16194    struct bxe_softc *sc = (struct bxe_softc *)arg1;
16195    uint32_t *eth_stats = (uint32_t *)&sc->eth_stats;
16196    uint32_t *offset;
16197    uint64_t value = 0;
16198    int index = (int)arg2;
16199
16200    if (index >= BXE_NUM_ETH_STATS) {
16201        BLOGE(sc, "bxe_eth_stats index out of range (%d)\n", index);
16202        return (-1);
16203    }
16204
16205    offset = (eth_stats + bxe_eth_stats_arr[index].offset);
16206
16207    switch (bxe_eth_stats_arr[index].size) {
16208    case 4:
16209        value = (uint64_t)*offset;
16210        break;
16211    case 8:
16212        value = HILO_U64(*offset, *(offset + 1));
16213        break;
16214    default:
16215        BLOGE(sc, "Invalid bxe_eth_stats size (index=%d size=%d)\n",
16216              index, bxe_eth_stats_arr[index].size);
16217        return (-1);
16218    }
16219
16220    return (sysctl_handle_64(oidp, &value, 0, req));
16221}
16222
16223static int
16224bxe_sysctl_eth_q_stat(SYSCTL_HANDLER_ARGS)
16225{
16226    struct bxe_softc *sc = (struct bxe_softc *)arg1;
16227    uint32_t *eth_stats;
16228    uint32_t *offset;
16229    uint64_t value = 0;
16230    uint32_t q_stat = (uint32_t)arg2;
16231    uint32_t fp_index = ((q_stat >> 16) & 0xffff);
16232    uint32_t index = (q_stat & 0xffff);
16233
16234    eth_stats = (uint32_t *)&sc->fp[fp_index].eth_q_stats;
16235
16236    if (index >= BXE_NUM_ETH_Q_STATS) {
16237        BLOGE(sc, "bxe_eth_q_stats index out of range (%d)\n", index);
16238        return (-1);
16239    }
16240
16241    offset = (eth_stats + bxe_eth_q_stats_arr[index].offset);
16242
16243    switch (bxe_eth_q_stats_arr[index].size) {
16244    case 4:
16245        value = (uint64_t)*offset;
16246        break;
16247    case 8:
16248        value = HILO_U64(*offset, *(offset + 1));
16249        break;
16250    default:
16251        BLOGE(sc, "Invalid bxe_eth_q_stats size (index=%d size=%d)\n",
16252              index, bxe_eth_q_stats_arr[index].size);
16253        return (-1);
16254    }
16255
16256    return (sysctl_handle_64(oidp, &value, 0, req));
16257}
16258
16259static void
16260bxe_add_sysctls(struct bxe_softc *sc)
16261{
16262    struct sysctl_ctx_list *ctx;
16263    struct sysctl_oid_list *children;
16264    struct sysctl_oid *queue_top, *queue;
16265    struct sysctl_oid_list *queue_top_children, *queue_children;
16266    char queue_num_buf[32];
16267    uint32_t q_stat;
16268    int i, j;
16269
16270    ctx = device_get_sysctl_ctx(sc->dev);
16271    children = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev));
16272
16273    SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "version",
16274                      CTLFLAG_RD, BXE_DRIVER_VERSION, 0,
16275                      "version");
16276
16277    SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "bc_version",
16278                      CTLFLAG_RD, sc->devinfo.bc_ver_str, 0,
16279                      "bootcode version");
16280
16281    snprintf(sc->fw_ver_str, sizeof(sc->fw_ver_str), "%d.%d.%d.%d",
16282             BCM_5710_FW_MAJOR_VERSION,
16283             BCM_5710_FW_MINOR_VERSION,
16284             BCM_5710_FW_REVISION_VERSION,
16285             BCM_5710_FW_ENGINEERING_VERSION);
16286    SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "fw_version",
16287                      CTLFLAG_RD, sc->fw_ver_str, 0,
16288                      "firmware version");
16289
16290    snprintf(sc->mf_mode_str, sizeof(sc->mf_mode_str), "%s",
16291        ((sc->devinfo.mf_info.mf_mode == SINGLE_FUNCTION)     ? "Single"  :
16292         (sc->devinfo.mf_info.mf_mode == MULTI_FUNCTION_SD)   ? "MF-SD"   :
16293         (sc->devinfo.mf_info.mf_mode == MULTI_FUNCTION_SI)   ? "MF-SI"   :
16294         (sc->devinfo.mf_info.mf_mode == MULTI_FUNCTION_AFEX) ? "MF-AFEX" :
16295                                                                "Unknown"));
16296    SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "mf_mode",
16297                      CTLFLAG_RD, sc->mf_mode_str, 0,
16298                      "multifunction mode");
16299
16300    SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "mf_vnics",
16301                    CTLFLAG_RD, &sc->devinfo.mf_info.vnics_per_port, 0,
16302                    "multifunction vnics per port");
16303
16304    SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "mac_addr",
16305                      CTLFLAG_RD, sc->mac_addr_str, 0,
16306                      "mac address");
16307
16308    snprintf(sc->pci_link_str, sizeof(sc->pci_link_str), "%s x%d",
16309        ((sc->devinfo.pcie_link_speed == 1) ? "2.5GT/s" :
16310         (sc->devinfo.pcie_link_speed == 2) ? "5.0GT/s" :
16311         (sc->devinfo.pcie_link_speed == 4) ? "8.0GT/s" :
16312                                              "???GT/s"),
16313        sc->devinfo.pcie_link_width);
16314    SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "pci_link",
16315                      CTLFLAG_RD, sc->pci_link_str, 0,
16316                      "pci link status");
16317
16318    sc->debug = bxe_debug;
16319    SYSCTL_ADD_ULONG(ctx, children, OID_AUTO, "debug",
16320                    CTLFLAG_RW, &sc->debug,
16321                    "debug logging mode");
16322
16323    sc->rx_budget = bxe_rx_budget;
16324    SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "rx_budget",
16325                    CTLFLAG_RW, &sc->rx_budget, 0,
16326                    "rx processing budget");
16327
16328    SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "state",
16329                    CTLTYPE_UINT | CTLFLAG_RW, sc, 0,
16330                    bxe_sysctl_state, "IU", "dump driver state");
16331
16332    for (i = 0; i < BXE_NUM_ETH_STATS; i++) {
16333        SYSCTL_ADD_PROC(ctx, children, OID_AUTO,
16334                        bxe_eth_stats_arr[i].string,
16335                        CTLTYPE_U64 | CTLFLAG_RD, sc, i,
16336                        bxe_sysctl_eth_stat, "LU",
16337                        bxe_eth_stats_arr[i].string);
16338    }
16339
16340    /* add a new parent node for all queues "dev.bxe.#.queue" */
16341    queue_top = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "queue",
16342                                CTLFLAG_RD, NULL, "queue");
16343    queue_top_children = SYSCTL_CHILDREN(queue_top);
16344
16345    for (i = 0; i < sc->num_queues; i++) {
16346        /* add a new parent node for a single queue "dev.bxe.#.queue.#" */
16347        snprintf(queue_num_buf, sizeof(queue_num_buf), "%d", i);
16348        queue = SYSCTL_ADD_NODE(ctx, queue_top_children, OID_AUTO,
16349                                queue_num_buf, CTLFLAG_RD, NULL,
16350                                "single queue");
16351        queue_children = SYSCTL_CHILDREN(queue);
16352
16353        for (j = 0; j < BXE_NUM_ETH_Q_STATS; j++) {
16354            q_stat = ((i << 16) | j);
16355            SYSCTL_ADD_PROC(ctx, queue_children, OID_AUTO,
16356                            bxe_eth_q_stats_arr[j].string,
16357                            CTLTYPE_U64 | CTLFLAG_RD, sc, q_stat,
16358                            bxe_sysctl_eth_q_stat, "LU",
16359                            bxe_eth_q_stats_arr[j].string);
16360        }
16361    }
16362}
16363
16364/*
16365 * Device attach function.
16366 *
16367 * Allocates device resources, performs secondary chip identification, and
16368 * initializes driver instance variables. This function is called from driver
16369 * load after a successful probe.
16370 *
16371 * Returns:
16372 *   0 = Success, >0 = Failure
16373 */
16374static int
16375bxe_attach(device_t dev)
16376{
16377    struct bxe_softc *sc;
16378
16379    sc = device_get_softc(dev);
16380
16381    BLOGD(sc, DBG_LOAD, "Starting attach...\n");
16382
16383    sc->state = BXE_STATE_CLOSED;
16384
16385    sc->dev  = dev;
16386    sc->unit = device_get_unit(dev);
16387
16388    BLOGD(sc, DBG_LOAD, "softc = %p\n", sc);
16389
16390    sc->pcie_bus    = pci_get_bus(dev);
16391    sc->pcie_device = pci_get_slot(dev);
16392    sc->pcie_func   = pci_get_function(dev);
16393
16394    /* enable bus master capability */
16395    pci_enable_busmaster(dev);
16396
16397    /* get the BARs */
16398    if (bxe_allocate_bars(sc) != 0) {
16399        return (ENXIO);
16400    }
16401
16402    /* initialize the mutexes */
16403    bxe_init_mutexes(sc);
16404
16405    /* prepare the periodic callout */
16406    callout_init(&sc->periodic_callout, 0);
16407
16408    /* prepare the chip taskqueue */
16409    sc->chip_tq_flags = CHIP_TQ_NONE;
16410    snprintf(sc->chip_tq_name, sizeof(sc->chip_tq_name),
16411             "bxe%d_chip_tq", sc->unit);
16412    TASK_INIT(&sc->chip_tq_task, 0, bxe_handle_chip_tq, sc);
16413    sc->chip_tq = taskqueue_create(sc->chip_tq_name, M_NOWAIT,
16414                                   taskqueue_thread_enqueue,
16415                                   &sc->chip_tq);
16416    taskqueue_start_threads(&sc->chip_tq, 1, PWAIT, /* lower priority */
16417                            "%s", sc->chip_tq_name);
16418
16419    /* get device info and set params */
16420    if (bxe_get_device_info(sc) != 0) {
16421        BLOGE(sc, "getting device info\n");
16422        bxe_deallocate_bars(sc);
16423        pci_disable_busmaster(dev);
16424        return (ENXIO);
16425    }
16426
16427    /* get final misc params */
16428    bxe_get_params(sc);
16429
16430    /* set the default MTU (changed via ifconfig) */
16431    sc->mtu = ETHERMTU;
16432
16433    bxe_set_modes_bitmap(sc);
16434
16435    /* XXX
16436     * If in AFEX mode and the function is configured for FCoE
16437     * then bail... no L2 allowed.
16438     */
16439
16440    /* get phy settings from shmem and 'and' against admin settings */
16441    bxe_get_phy_info(sc);
16442
16443    /* initialize the FreeBSD ifnet interface */
16444    if (bxe_init_ifnet(sc) != 0) {
16445        bxe_release_mutexes(sc);
16446        bxe_deallocate_bars(sc);
16447        pci_disable_busmaster(dev);
16448        return (ENXIO);
16449    }
16450
16451    /* allocate device interrupts */
16452    if (bxe_interrupt_alloc(sc) != 0) {
16453        if (sc->ifp != NULL) {
16454            ether_ifdetach(sc->ifp);
16455        }
16456        ifmedia_removeall(&sc->ifmedia);
16457        bxe_release_mutexes(sc);
16458        bxe_deallocate_bars(sc);
16459        pci_disable_busmaster(dev);
16460        return (ENXIO);
16461    }
16462
16463    /* allocate ilt */
16464    if (bxe_alloc_ilt_mem(sc) != 0) {
16465        bxe_interrupt_free(sc);
16466        if (sc->ifp != NULL) {
16467            ether_ifdetach(sc->ifp);
16468        }
16469        ifmedia_removeall(&sc->ifmedia);
16470        bxe_release_mutexes(sc);
16471        bxe_deallocate_bars(sc);
16472        pci_disable_busmaster(dev);
16473        return (ENXIO);
16474    }
16475
16476    /* allocate the host hardware/software hsi structures */
16477    if (bxe_alloc_hsi_mem(sc) != 0) {
16478        bxe_free_ilt_mem(sc);
16479        bxe_interrupt_free(sc);
16480        if (sc->ifp != NULL) {
16481            ether_ifdetach(sc->ifp);
16482        }
16483        ifmedia_removeall(&sc->ifmedia);
16484        bxe_release_mutexes(sc);
16485        bxe_deallocate_bars(sc);
16486        pci_disable_busmaster(dev);
16487        return (ENXIO);
16488    }
16489
16490    /* need to reset chip if UNDI was active */
16491    if (IS_PF(sc) && !BXE_NOMCP(sc)) {
16492        /* init fw_seq */
16493        sc->fw_seq =
16494            (SHMEM_RD(sc, func_mb[SC_FW_MB_IDX(sc)].drv_mb_header) &
16495             DRV_MSG_SEQ_NUMBER_MASK);
16496        BLOGD(sc, DBG_LOAD, "prev unload fw_seq 0x%04x\n", sc->fw_seq);
16497        bxe_prev_unload(sc);
16498    }
16499
16500#if 1
16501    /* XXX */
16502    bxe_dcbx_set_state(sc, FALSE, BXE_DCBX_ENABLED_OFF);
16503#else
16504    if (SHMEM2_HAS(sc, dcbx_lldp_params_offset) &&
16505        SHMEM2_HAS(sc, dcbx_lldp_dcbx_stat_offset) &&
16506        SHMEM2_RD(sc, dcbx_lldp_params_offset) &&
16507        SHMEM2_RD(sc, dcbx_lldp_dcbx_stat_offset)) {
16508        bxe_dcbx_set_state(sc, TRUE, BXE_DCBX_ENABLED_ON_NEG_ON);
16509        bxe_dcbx_init_params(sc);
16510    } else {
16511        bxe_dcbx_set_state(sc, FALSE, BXE_DCBX_ENABLED_OFF);
16512    }
16513#endif
16514
16515    /* calculate qm_cid_count */
16516    sc->qm_cid_count = bxe_set_qm_cid_count(sc);
16517    BLOGD(sc, DBG_LOAD, "qm_cid_count=%d\n", sc->qm_cid_count);
16518
16519    sc->max_cos = 1;
16520    bxe_init_multi_cos(sc);
16521
16522    bxe_add_sysctls(sc);
16523
16524    return (0);
16525}
16526
16527/*
16528 * Device detach function.
16529 *
16530 * Stops the controller, resets the controller, and releases resources.
16531 *
16532 * Returns:
16533 *   0 = Success, >0 = Failure
16534 */
16535static int
16536bxe_detach(device_t dev)
16537{
16538    struct bxe_softc *sc;
16539    if_t ifp;
16540
16541    sc = device_get_softc(dev);
16542
16543    BLOGD(sc, DBG_LOAD, "Starting detach...\n");
16544
16545    ifp = sc->ifp;
16546    if (ifp != NULL && if_vlantrunkinuse(ifp)) {
16547        BLOGE(sc, "Cannot detach while VLANs are in use.\n");
16548        return(EBUSY);
16549    }
16550
16551    /* stop the periodic callout */
16552    bxe_periodic_stop(sc);
16553
16554    /* stop the chip taskqueue */
16555    atomic_store_rel_long(&sc->chip_tq_flags, CHIP_TQ_NONE);
16556    if (sc->chip_tq) {
16557        taskqueue_drain(sc->chip_tq, &sc->chip_tq_task);
16558        taskqueue_free(sc->chip_tq);
16559        sc->chip_tq = NULL;
16560    }
16561
16562    /* stop and reset the controller if it was open */
16563    if (sc->state != BXE_STATE_CLOSED) {
16564        BXE_CORE_LOCK(sc);
16565        bxe_nic_unload(sc, UNLOAD_CLOSE, TRUE);
16566        BXE_CORE_UNLOCK(sc);
16567    }
16568
16569    /* release the network interface */
16570    if (ifp != NULL) {
16571        ether_ifdetach(ifp);
16572    }
16573    ifmedia_removeall(&sc->ifmedia);
16574
16575    /* XXX do the following based on driver state... */
16576
16577    /* free the host hardware/software hsi structures */
16578    bxe_free_hsi_mem(sc);
16579
16580    /* free ilt */
16581    bxe_free_ilt_mem(sc);
16582
16583    /* release the interrupts */
16584    bxe_interrupt_free(sc);
16585
16586    /* Release the mutexes*/
16587    bxe_release_mutexes(sc);
16588
16589    /* Release the PCIe BAR mapped memory */
16590    bxe_deallocate_bars(sc);
16591
16592    /* Release the FreeBSD interface. */
16593    if (sc->ifp != NULL) {
16594        if_free(sc->ifp);
16595    }
16596
16597    pci_disable_busmaster(dev);
16598
16599    return (0);
16600}
16601
16602/*
16603 * Device shutdown function.
16604 *
16605 * Stops and resets the controller.
16606 *
16607 * Returns:
16608 *   Nothing
16609 */
16610static int
16611bxe_shutdown(device_t dev)
16612{
16613    struct bxe_softc *sc;
16614
16615    sc = device_get_softc(dev);
16616
16617    BLOGD(sc, DBG_LOAD, "Starting shutdown...\n");
16618
16619    /* stop the periodic callout */
16620    bxe_periodic_stop(sc);
16621
16622    BXE_CORE_LOCK(sc);
16623    bxe_nic_unload(sc, UNLOAD_NORMAL, FALSE);
16624    BXE_CORE_UNLOCK(sc);
16625
16626    return (0);
16627}
16628
16629void
16630bxe_igu_ack_sb(struct bxe_softc *sc,
16631               uint8_t          igu_sb_id,
16632               uint8_t          segment,
16633               uint16_t         index,
16634               uint8_t          op,
16635               uint8_t          update)
16636{
16637    uint32_t igu_addr = sc->igu_base_addr;
16638    igu_addr += (IGU_CMD_INT_ACK_BASE + igu_sb_id)*8;
16639    bxe_igu_ack_sb_gen(sc, igu_sb_id, segment, index, op, update, igu_addr);
16640}
16641
16642static void
16643bxe_igu_clear_sb_gen(struct bxe_softc *sc,
16644                     uint8_t          func,
16645                     uint8_t          idu_sb_id,
16646                     uint8_t          is_pf)
16647{
16648    uint32_t data, ctl, cnt = 100;
16649    uint32_t igu_addr_data = IGU_REG_COMMAND_REG_32LSB_DATA;
16650    uint32_t igu_addr_ctl = IGU_REG_COMMAND_REG_CTRL;
16651    uint32_t igu_addr_ack = IGU_REG_CSTORM_TYPE_0_SB_CLEANUP + (idu_sb_id/32)*4;
16652    uint32_t sb_bit =  1 << (idu_sb_id%32);
16653    uint32_t func_encode = func | (is_pf ? 1 : 0) << IGU_FID_ENCODE_IS_PF_SHIFT;
16654    uint32_t addr_encode = IGU_CMD_E2_PROD_UPD_BASE + idu_sb_id;
16655
16656    /* Not supported in BC mode */
16657    if (CHIP_INT_MODE_IS_BC(sc)) {
16658        return;
16659    }
16660
16661    data = ((IGU_USE_REGISTER_cstorm_type_0_sb_cleanup <<
16662             IGU_REGULAR_CLEANUP_TYPE_SHIFT) |
16663            IGU_REGULAR_CLEANUP_SET |
16664            IGU_REGULAR_BCLEANUP);
16665
16666    ctl = ((addr_encode << IGU_CTRL_REG_ADDRESS_SHIFT) |
16667           (func_encode << IGU_CTRL_REG_FID_SHIFT) |
16668           (IGU_CTRL_CMD_TYPE_WR << IGU_CTRL_REG_TYPE_SHIFT));
16669
16670    BLOGD(sc, DBG_LOAD, "write 0x%08x to IGU(via GRC) addr 0x%x\n",
16671            data, igu_addr_data);
16672    REG_WR(sc, igu_addr_data, data);
16673
16674    bus_space_barrier(sc->bar[BAR0].tag, sc->bar[BAR0].handle, 0, 0,
16675                      BUS_SPACE_BARRIER_WRITE);
16676    mb();
16677
16678    BLOGD(sc, DBG_LOAD, "write 0x%08x to IGU(via GRC) addr 0x%x\n",
16679            ctl, igu_addr_ctl);
16680    REG_WR(sc, igu_addr_ctl, ctl);
16681
16682    bus_space_barrier(sc->bar[BAR0].tag, sc->bar[BAR0].handle, 0, 0,
16683                      BUS_SPACE_BARRIER_WRITE);
16684    mb();
16685
16686    /* wait for clean up to finish */
16687    while (!(REG_RD(sc, igu_addr_ack) & sb_bit) && --cnt) {
16688        DELAY(20000);
16689    }
16690
16691    if (!(REG_RD(sc, igu_addr_ack) & sb_bit)) {
16692        BLOGD(sc, DBG_LOAD,
16693              "Unable to finish IGU cleanup: "
16694              "idu_sb_id %d offset %d bit %d (cnt %d)\n",
16695              idu_sb_id, idu_sb_id/32, idu_sb_id%32, cnt);
16696    }
16697}
16698
16699static void
16700bxe_igu_clear_sb(struct bxe_softc *sc,
16701                 uint8_t          idu_sb_id)
16702{
16703    bxe_igu_clear_sb_gen(sc, SC_FUNC(sc), idu_sb_id, TRUE /*PF*/);
16704}
16705
16706
16707
16708
16709
16710
16711
16712/*******************/
16713/* ECORE CALLBACKS */
16714/*******************/
16715
16716static void
16717bxe_reset_common(struct bxe_softc *sc)
16718{
16719    uint32_t val = 0x1400;
16720
16721    /* reset_common */
16722    REG_WR(sc, (GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR), 0xd3ffff7f);
16723
16724    if (CHIP_IS_E3(sc)) {
16725        val |= MISC_REGISTERS_RESET_REG_2_MSTAT0;
16726        val |= MISC_REGISTERS_RESET_REG_2_MSTAT1;
16727    }
16728
16729    REG_WR(sc, (GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR), val);
16730}
16731
16732static void
16733bxe_common_init_phy(struct bxe_softc *sc)
16734{
16735    uint32_t shmem_base[2];
16736    uint32_t shmem2_base[2];
16737
16738    /* Avoid common init in case MFW supports LFA */
16739    if (SHMEM2_RD(sc, size) >
16740        (uint32_t)offsetof(struct shmem2_region,
16741                           lfa_host_addr[SC_PORT(sc)])) {
16742        return;
16743    }
16744
16745    shmem_base[0]  = sc->devinfo.shmem_base;
16746    shmem2_base[0] = sc->devinfo.shmem2_base;
16747
16748    if (!CHIP_IS_E1x(sc)) {
16749        shmem_base[1]  = SHMEM2_RD(sc, other_shmem_base_addr);
16750        shmem2_base[1] = SHMEM2_RD(sc, other_shmem2_base_addr);
16751    }
16752
16753    BXE_PHY_LOCK(sc);
16754    elink_common_init_phy(sc, shmem_base, shmem2_base,
16755                          sc->devinfo.chip_id, 0);
16756    BXE_PHY_UNLOCK(sc);
16757}
16758
16759static void
16760bxe_pf_disable(struct bxe_softc *sc)
16761{
16762    uint32_t val = REG_RD(sc, IGU_REG_PF_CONFIGURATION);
16763
16764    val &= ~IGU_PF_CONF_FUNC_EN;
16765
16766    REG_WR(sc, IGU_REG_PF_CONFIGURATION, val);
16767    REG_WR(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 0);
16768    REG_WR(sc, CFC_REG_WEAK_ENABLE_PF, 0);
16769}
16770
16771static void
16772bxe_init_pxp(struct bxe_softc *sc)
16773{
16774    uint16_t devctl;
16775    int r_order, w_order;
16776
16777    devctl = bxe_pcie_capability_read(sc, PCIR_EXPRESS_DEVICE_CTL, 2);
16778
16779    BLOGD(sc, DBG_LOAD, "read 0x%08x from devctl\n", devctl);
16780
16781    w_order = ((devctl & PCIM_EXP_CTL_MAX_PAYLOAD) >> 5);
16782
16783    if (sc->mrrs == -1) {
16784        r_order = ((devctl & PCIM_EXP_CTL_MAX_READ_REQUEST) >> 12);
16785    } else {
16786        BLOGD(sc, DBG_LOAD, "forcing read order to %d\n", sc->mrrs);
16787        r_order = sc->mrrs;
16788    }
16789
16790    ecore_init_pxp_arb(sc, r_order, w_order);
16791}
16792
16793static uint32_t
16794bxe_get_pretend_reg(struct bxe_softc *sc)
16795{
16796    uint32_t base = PXP2_REG_PGL_PRETEND_FUNC_F0;
16797    uint32_t stride = (PXP2_REG_PGL_PRETEND_FUNC_F1 - base);
16798    return (base + (SC_ABS_FUNC(sc)) * stride);
16799}
16800
16801/*
16802 * Called only on E1H or E2.
16803 * When pretending to be PF, the pretend value is the function number 0..7.
16804 * When pretending to be VF, the pretend val is the PF-num:VF-valid:ABS-VFID
16805 * combination.
16806 */
16807static int
16808bxe_pretend_func(struct bxe_softc *sc,
16809                 uint16_t         pretend_func_val)
16810{
16811    uint32_t pretend_reg;
16812
16813    if (CHIP_IS_E1H(sc) && (pretend_func_val > E1H_FUNC_MAX)) {
16814        return (-1);
16815    }
16816
16817    /* get my own pretend register */
16818    pretend_reg = bxe_get_pretend_reg(sc);
16819    REG_WR(sc, pretend_reg, pretend_func_val);
16820    REG_RD(sc, pretend_reg);
16821    return (0);
16822}
16823
16824static void
16825bxe_iov_init_dmae(struct bxe_softc *sc)
16826{
16827    return;
16828#if 0
16829    BLOGD(sc, DBG_LOAD, "SRIOV is %s\n", IS_SRIOV(sc) ? "ON" : "OFF");
16830
16831    if (!IS_SRIOV(sc)) {
16832        return;
16833    }
16834
16835    REG_WR(sc, DMAE_REG_BACKWARD_COMP_EN, 0);
16836#endif
16837}
16838
16839#if 0
16840static int
16841bxe_iov_init_ilt(struct bxe_softc *sc,
16842                 uint16_t         line)
16843{
16844    return (line);
16845#if 0
16846    int i;
16847    struct ecore_ilt* ilt = sc->ilt;
16848
16849    if (!IS_SRIOV(sc)) {
16850        return (line);
16851    }
16852
16853    /* set vfs ilt lines */
16854    for (i = 0; i < BXE_VF_CIDS/ILT_PAGE_CIDS ; i++) {
16855        struct hw_dma *hw_cxt = SC_VF_CXT_PAGE(sc,i);
16856        ilt->lines[line+i].page = hw_cxt->addr;
16857        ilt->lines[line+i].page_mapping = hw_cxt->mapping;
16858        ilt->lines[line+i].size = hw_cxt->size; /* doesn't matter */
16859    }
16860    return (line+i);
16861#endif
16862}
16863#endif
16864
16865static void
16866bxe_iov_init_dq(struct bxe_softc *sc)
16867{
16868    return;
16869#if 0
16870    if (!IS_SRIOV(sc)) {
16871        return;
16872    }
16873
16874    /* Set the DQ such that the CID reflect the abs_vfid */
16875    REG_WR(sc, DORQ_REG_VF_NORM_VF_BASE, 0);
16876    REG_WR(sc, DORQ_REG_MAX_RVFID_SIZE, ilog2(BNX2X_MAX_NUM_OF_VFS));
16877
16878    /*
16879     * Set VFs starting CID. If its > 0 the preceding CIDs are belong to
16880     * the PF L2 queues
16881     */
16882    REG_WR(sc, DORQ_REG_VF_NORM_CID_BASE, BNX2X_FIRST_VF_CID);
16883
16884    /* The VF window size is the log2 of the max number of CIDs per VF */
16885    REG_WR(sc, DORQ_REG_VF_NORM_CID_WND_SIZE, BNX2X_VF_CID_WND);
16886
16887    /*
16888     * The VF doorbell size  0 - *B, 4 - 128B. We set it here to match
16889     * the Pf doorbell size although the 2 are independent.
16890     */
16891    REG_WR(sc, DORQ_REG_VF_NORM_CID_OFST,
16892           BNX2X_DB_SHIFT - BNX2X_DB_MIN_SHIFT);
16893
16894    /*
16895     * No security checks for now -
16896     * configure single rule (out of 16) mask = 0x1, value = 0x0,
16897     * CID range 0 - 0x1ffff
16898     */
16899    REG_WR(sc, DORQ_REG_VF_TYPE_MASK_0, 1);
16900    REG_WR(sc, DORQ_REG_VF_TYPE_VALUE_0, 0);
16901    REG_WR(sc, DORQ_REG_VF_TYPE_MIN_MCID_0, 0);
16902    REG_WR(sc, DORQ_REG_VF_TYPE_MAX_MCID_0, 0x1ffff);
16903
16904    /* set the number of VF alllowed doorbells to the full DQ range */
16905    REG_WR(sc, DORQ_REG_VF_NORM_MAX_CID_COUNT, 0x20000);
16906
16907    /* set the VF doorbell threshold */
16908    REG_WR(sc, DORQ_REG_VF_USAGE_CT_LIMIT, 4);
16909#endif
16910}
16911
16912/* send a NIG loopback debug packet */
16913static void
16914bxe_lb_pckt(struct bxe_softc *sc)
16915{
16916    uint32_t wb_write[3];
16917
16918    /* Ethernet source and destination addresses */
16919    wb_write[0] = 0x55555555;
16920    wb_write[1] = 0x55555555;
16921    wb_write[2] = 0x20;     /* SOP */
16922    REG_WR_DMAE(sc, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
16923
16924    /* NON-IP protocol */
16925    wb_write[0] = 0x09000000;
16926    wb_write[1] = 0x55555555;
16927    wb_write[2] = 0x10;     /* EOP, eop_bvalid = 0 */
16928    REG_WR_DMAE(sc, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
16929}
16930
16931/*
16932 * Some of the internal memories are not directly readable from the driver.
16933 * To test them we send debug packets.
16934 */
16935static int
16936bxe_int_mem_test(struct bxe_softc *sc)
16937{
16938    int factor;
16939    int count, i;
16940    uint32_t val = 0;
16941
16942    if (CHIP_REV_IS_FPGA(sc)) {
16943        factor = 120;
16944    } else if (CHIP_REV_IS_EMUL(sc)) {
16945        factor = 200;
16946    } else {
16947        factor = 1;
16948    }
16949
16950    /* disable inputs of parser neighbor blocks */
16951    REG_WR(sc, TSDM_REG_ENABLE_IN1, 0x0);
16952    REG_WR(sc, TCM_REG_PRS_IFEN, 0x0);
16953    REG_WR(sc, CFC_REG_DEBUG0, 0x1);
16954    REG_WR(sc, NIG_REG_PRS_REQ_IN_EN, 0x0);
16955
16956    /*  write 0 to parser credits for CFC search request */
16957    REG_WR(sc, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
16958
16959    /* send Ethernet packet */
16960    bxe_lb_pckt(sc);
16961
16962    /* TODO do i reset NIG statistic? */
16963    /* Wait until NIG register shows 1 packet of size 0x10 */
16964    count = 1000 * factor;
16965    while (count) {
16966        bxe_read_dmae(sc, NIG_REG_STAT2_BRB_OCTET, 2);
16967        val = *BXE_SP(sc, wb_data[0]);
16968        if (val == 0x10) {
16969            break;
16970        }
16971
16972        DELAY(10000);
16973        count--;
16974    }
16975
16976    if (val != 0x10) {
16977        BLOGE(sc, "NIG timeout val=0x%x\n", val);
16978        return (-1);
16979    }
16980
16981    /* wait until PRS register shows 1 packet */
16982    count = (1000 * factor);
16983    while (count) {
16984        val = REG_RD(sc, PRS_REG_NUM_OF_PACKETS);
16985        if (val == 1) {
16986            break;
16987        }
16988
16989        DELAY(10000);
16990        count--;
16991    }
16992
16993    if (val != 0x1) {
16994        BLOGE(sc, "PRS timeout val=0x%x\n", val);
16995        return (-2);
16996    }
16997
16998    /* Reset and init BRB, PRS */
16999    REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
17000    DELAY(50000);
17001    REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
17002    DELAY(50000);
17003    ecore_init_block(sc, BLOCK_BRB1, PHASE_COMMON);
17004    ecore_init_block(sc, BLOCK_PRS, PHASE_COMMON);
17005
17006    /* Disable inputs of parser neighbor blocks */
17007    REG_WR(sc, TSDM_REG_ENABLE_IN1, 0x0);
17008    REG_WR(sc, TCM_REG_PRS_IFEN, 0x0);
17009    REG_WR(sc, CFC_REG_DEBUG0, 0x1);
17010    REG_WR(sc, NIG_REG_PRS_REQ_IN_EN, 0x0);
17011
17012    /* Write 0 to parser credits for CFC search request */
17013    REG_WR(sc, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
17014
17015    /* send 10 Ethernet packets */
17016    for (i = 0; i < 10; i++) {
17017        bxe_lb_pckt(sc);
17018    }
17019
17020    /* Wait until NIG register shows 10+1 packets of size 11*0x10 = 0xb0 */
17021    count = (1000 * factor);
17022    while (count) {
17023        bxe_read_dmae(sc, NIG_REG_STAT2_BRB_OCTET, 2);
17024        val = *BXE_SP(sc, wb_data[0]);
17025        if (val == 0xb0) {
17026            break;
17027        }
17028
17029        DELAY(10000);
17030        count--;
17031    }
17032
17033    if (val != 0xb0) {
17034        BLOGE(sc, "NIG timeout val=0x%x\n", val);
17035        return (-3);
17036    }
17037
17038    /* Wait until PRS register shows 2 packets */
17039    val = REG_RD(sc, PRS_REG_NUM_OF_PACKETS);
17040    if (val != 2) {
17041        BLOGE(sc, "PRS timeout val=0x%x\n", val);
17042    }
17043
17044    /* Write 1 to parser credits for CFC search request */
17045    REG_WR(sc, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
17046
17047    /* Wait until PRS register shows 3 packets */
17048    DELAY(10000 * factor);
17049
17050    /* Wait until NIG register shows 1 packet of size 0x10 */
17051    val = REG_RD(sc, PRS_REG_NUM_OF_PACKETS);
17052    if (val != 3) {
17053        BLOGE(sc, "PRS timeout val=0x%x\n", val);
17054    }
17055
17056    /* clear NIG EOP FIFO */
17057    for (i = 0; i < 11; i++) {
17058        REG_RD(sc, NIG_REG_INGRESS_EOP_LB_FIFO);
17059    }
17060
17061    val = REG_RD(sc, NIG_REG_INGRESS_EOP_LB_EMPTY);
17062    if (val != 1) {
17063        BLOGE(sc, "clear of NIG failed\n");
17064        return (-4);
17065    }
17066
17067    /* Reset and init BRB, PRS, NIG */
17068    REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
17069    DELAY(50000);
17070    REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
17071    DELAY(50000);
17072    ecore_init_block(sc, BLOCK_BRB1, PHASE_COMMON);
17073    ecore_init_block(sc, BLOCK_PRS, PHASE_COMMON);
17074    if (!CNIC_SUPPORT(sc)) {
17075        /* set NIC mode */
17076        REG_WR(sc, PRS_REG_NIC_MODE, 1);
17077    }
17078
17079    /* Enable inputs of parser neighbor blocks */
17080    REG_WR(sc, TSDM_REG_ENABLE_IN1, 0x7fffffff);
17081    REG_WR(sc, TCM_REG_PRS_IFEN, 0x1);
17082    REG_WR(sc, CFC_REG_DEBUG0, 0x0);
17083    REG_WR(sc, NIG_REG_PRS_REQ_IN_EN, 0x1);
17084
17085    return (0);
17086}
17087
17088static void
17089bxe_setup_fan_failure_detection(struct bxe_softc *sc)
17090{
17091    int is_required;
17092    uint32_t val;
17093    int port;
17094
17095    is_required = 0;
17096    val = (SHMEM_RD(sc, dev_info.shared_hw_config.config2) &
17097           SHARED_HW_CFG_FAN_FAILURE_MASK);
17098
17099    if (val == SHARED_HW_CFG_FAN_FAILURE_ENABLED) {
17100        is_required = 1;
17101    }
17102    /*
17103     * The fan failure mechanism is usually related to the PHY type since
17104     * the power consumption of the board is affected by the PHY. Currently,
17105     * fan is required for most designs with SFX7101, BCM8727 and BCM8481.
17106     */
17107    else if (val == SHARED_HW_CFG_FAN_FAILURE_PHY_TYPE) {
17108        for (port = PORT_0; port < PORT_MAX; port++) {
17109            is_required |= elink_fan_failure_det_req(sc,
17110                                                     sc->devinfo.shmem_base,
17111                                                     sc->devinfo.shmem2_base,
17112                                                     port);
17113        }
17114    }
17115
17116    BLOGD(sc, DBG_LOAD, "fan detection setting: %d\n", is_required);
17117
17118    if (is_required == 0) {
17119        return;
17120    }
17121
17122    /* Fan failure is indicated by SPIO 5 */
17123    bxe_set_spio(sc, MISC_SPIO_SPIO5, MISC_SPIO_INPUT_HI_Z);
17124
17125    /* set to active low mode */
17126    val = REG_RD(sc, MISC_REG_SPIO_INT);
17127    val |= (MISC_SPIO_SPIO5 << MISC_SPIO_INT_OLD_SET_POS);
17128    REG_WR(sc, MISC_REG_SPIO_INT, val);
17129
17130    /* enable interrupt to signal the IGU */
17131    val = REG_RD(sc, MISC_REG_SPIO_EVENT_EN);
17132    val |= MISC_SPIO_SPIO5;
17133    REG_WR(sc, MISC_REG_SPIO_EVENT_EN, val);
17134}
17135
17136static void
17137bxe_enable_blocks_attention(struct bxe_softc *sc)
17138{
17139    uint32_t val;
17140
17141    REG_WR(sc, PXP_REG_PXP_INT_MASK_0, 0);
17142    if (!CHIP_IS_E1x(sc)) {
17143        REG_WR(sc, PXP_REG_PXP_INT_MASK_1, 0x40);
17144    } else {
17145        REG_WR(sc, PXP_REG_PXP_INT_MASK_1, 0);
17146    }
17147    REG_WR(sc, DORQ_REG_DORQ_INT_MASK, 0);
17148    REG_WR(sc, CFC_REG_CFC_INT_MASK, 0);
17149    /*
17150     * mask read length error interrupts in brb for parser
17151     * (parsing unit and 'checksum and crc' unit)
17152     * these errors are legal (PU reads fixed length and CAC can cause
17153     * read length error on truncated packets)
17154     */
17155    REG_WR(sc, BRB1_REG_BRB1_INT_MASK, 0xFC00);
17156    REG_WR(sc, QM_REG_QM_INT_MASK, 0);
17157    REG_WR(sc, TM_REG_TM_INT_MASK, 0);
17158    REG_WR(sc, XSDM_REG_XSDM_INT_MASK_0, 0);
17159    REG_WR(sc, XSDM_REG_XSDM_INT_MASK_1, 0);
17160    REG_WR(sc, XCM_REG_XCM_INT_MASK, 0);
17161/*      REG_WR(sc, XSEM_REG_XSEM_INT_MASK_0, 0); */
17162/*      REG_WR(sc, XSEM_REG_XSEM_INT_MASK_1, 0); */
17163    REG_WR(sc, USDM_REG_USDM_INT_MASK_0, 0);
17164    REG_WR(sc, USDM_REG_USDM_INT_MASK_1, 0);
17165    REG_WR(sc, UCM_REG_UCM_INT_MASK, 0);
17166/*      REG_WR(sc, USEM_REG_USEM_INT_MASK_0, 0); */
17167/*      REG_WR(sc, USEM_REG_USEM_INT_MASK_1, 0); */
17168    REG_WR(sc, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
17169    REG_WR(sc, CSDM_REG_CSDM_INT_MASK_0, 0);
17170    REG_WR(sc, CSDM_REG_CSDM_INT_MASK_1, 0);
17171    REG_WR(sc, CCM_REG_CCM_INT_MASK, 0);
17172/*      REG_WR(sc, CSEM_REG_CSEM_INT_MASK_0, 0); */
17173/*      REG_WR(sc, CSEM_REG_CSEM_INT_MASK_1, 0); */
17174
17175    val = (PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_AFT |
17176           PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_OF |
17177           PXP2_PXP2_INT_MASK_0_REG_PGL_PCIE_ATTN);
17178    if (!CHIP_IS_E1x(sc)) {
17179        val |= (PXP2_PXP2_INT_MASK_0_REG_PGL_READ_BLOCKED |
17180                PXP2_PXP2_INT_MASK_0_REG_PGL_WRITE_BLOCKED);
17181    }
17182    REG_WR(sc, PXP2_REG_PXP2_INT_MASK_0, val);
17183
17184    REG_WR(sc, TSDM_REG_TSDM_INT_MASK_0, 0);
17185    REG_WR(sc, TSDM_REG_TSDM_INT_MASK_1, 0);
17186    REG_WR(sc, TCM_REG_TCM_INT_MASK, 0);
17187/*      REG_WR(sc, TSEM_REG_TSEM_INT_MASK_0, 0); */
17188
17189    if (!CHIP_IS_E1x(sc)) {
17190        /* enable VFC attentions: bits 11 and 12, bits 31:13 reserved */
17191        REG_WR(sc, TSEM_REG_TSEM_INT_MASK_1, 0x07ff);
17192    }
17193
17194    REG_WR(sc, CDU_REG_CDU_INT_MASK, 0);
17195    REG_WR(sc, DMAE_REG_DMAE_INT_MASK, 0);
17196/*      REG_WR(sc, MISC_REG_MISC_INT_MASK, 0); */
17197    REG_WR(sc, PBF_REG_PBF_INT_MASK, 0x18);     /* bit 3,4 masked */
17198}
17199
17200/**
17201 * bxe_init_hw_common - initialize the HW at the COMMON phase.
17202 *
17203 * @sc:     driver handle
17204 */
17205static int
17206bxe_init_hw_common(struct bxe_softc *sc)
17207{
17208    uint8_t abs_func_id;
17209    uint32_t val;
17210
17211    BLOGD(sc, DBG_LOAD, "starting common init for func %d\n",
17212          SC_ABS_FUNC(sc));
17213
17214    /*
17215     * take the RESET lock to protect undi_unload flow from accessing
17216     * registers while we are resetting the chip
17217     */
17218    bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RESET);
17219
17220    bxe_reset_common(sc);
17221
17222    REG_WR(sc, (GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET), 0xffffffff);
17223
17224    val = 0xfffc;
17225    if (CHIP_IS_E3(sc)) {
17226        val |= MISC_REGISTERS_RESET_REG_2_MSTAT0;
17227        val |= MISC_REGISTERS_RESET_REG_2_MSTAT1;
17228    }
17229
17230    REG_WR(sc, (GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET), val);
17231
17232    bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RESET);
17233
17234    ecore_init_block(sc, BLOCK_MISC, PHASE_COMMON);
17235    BLOGD(sc, DBG_LOAD, "after misc block init\n");
17236
17237    if (!CHIP_IS_E1x(sc)) {
17238        /*
17239         * 4-port mode or 2-port mode we need to turn off master-enable for
17240         * everyone. After that we turn it back on for self. So, we disregard
17241         * multi-function, and always disable all functions on the given path,
17242         * this means 0,2,4,6 for path 0 and 1,3,5,7 for path 1
17243         */
17244        for (abs_func_id = SC_PATH(sc);
17245             abs_func_id < (E2_FUNC_MAX * 2);
17246             abs_func_id += 2) {
17247            if (abs_func_id == SC_ABS_FUNC(sc)) {
17248                REG_WR(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1);
17249                continue;
17250            }
17251
17252            bxe_pretend_func(sc, abs_func_id);
17253
17254            /* clear pf enable */
17255            bxe_pf_disable(sc);
17256
17257            bxe_pretend_func(sc, SC_ABS_FUNC(sc));
17258        }
17259    }
17260
17261    BLOGD(sc, DBG_LOAD, "after pf disable\n");
17262
17263    ecore_init_block(sc, BLOCK_PXP, PHASE_COMMON);
17264
17265    if (CHIP_IS_E1(sc)) {
17266        /*
17267         * enable HW interrupt from PXP on USDM overflow
17268         * bit 16 on INT_MASK_0
17269         */
17270        REG_WR(sc, PXP_REG_PXP_INT_MASK_0, 0);
17271    }
17272
17273    ecore_init_block(sc, BLOCK_PXP2, PHASE_COMMON);
17274    bxe_init_pxp(sc);
17275
17276#ifdef __BIG_ENDIAN
17277    REG_WR(sc, PXP2_REG_RQ_QM_ENDIAN_M, 1);
17278    REG_WR(sc, PXP2_REG_RQ_TM_ENDIAN_M, 1);
17279    REG_WR(sc, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
17280    REG_WR(sc, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
17281    REG_WR(sc, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
17282    /* make sure this value is 0 */
17283    REG_WR(sc, PXP2_REG_RQ_HC_ENDIAN_M, 0);
17284
17285    //REG_WR(sc, PXP2_REG_RD_PBF_SWAP_MODE, 1);
17286    REG_WR(sc, PXP2_REG_RD_QM_SWAP_MODE, 1);
17287    REG_WR(sc, PXP2_REG_RD_TM_SWAP_MODE, 1);
17288    REG_WR(sc, PXP2_REG_RD_SRC_SWAP_MODE, 1);
17289    REG_WR(sc, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
17290#endif
17291
17292    ecore_ilt_init_page_size(sc, INITOP_SET);
17293
17294    if (CHIP_REV_IS_FPGA(sc) && CHIP_IS_E1H(sc)) {
17295        REG_WR(sc, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
17296    }
17297
17298    /* let the HW do it's magic... */
17299    DELAY(100000);
17300
17301    /* finish PXP init */
17302    val = REG_RD(sc, PXP2_REG_RQ_CFG_DONE);
17303    if (val != 1) {
17304        BLOGE(sc, "PXP2 CFG failed\n");
17305        return (-1);
17306    }
17307    val = REG_RD(sc, PXP2_REG_RD_INIT_DONE);
17308    if (val != 1) {
17309        BLOGE(sc, "PXP2 RD_INIT failed\n");
17310        return (-1);
17311    }
17312
17313    BLOGD(sc, DBG_LOAD, "after pxp init\n");
17314
17315    /*
17316     * Timer bug workaround for E2 only. We need to set the entire ILT to have
17317     * entries with value "0" and valid bit on. This needs to be done by the
17318     * first PF that is loaded in a path (i.e. common phase)
17319     */
17320    if (!CHIP_IS_E1x(sc)) {
17321/*
17322 * In E2 there is a bug in the timers block that can cause function 6 / 7
17323 * (i.e. vnic3) to start even if it is marked as "scan-off".
17324 * This occurs when a different function (func2,3) is being marked
17325 * as "scan-off". Real-life scenario for example: if a driver is being
17326 * load-unloaded while func6,7 are down. This will cause the timer to access
17327 * the ilt, translate to a logical address and send a request to read/write.
17328 * Since the ilt for the function that is down is not valid, this will cause
17329 * a translation error which is unrecoverable.
17330 * The Workaround is intended to make sure that when this happens nothing
17331 * fatal will occur. The workaround:
17332 *  1.  First PF driver which loads on a path will:
17333 *      a.  After taking the chip out of reset, by using pretend,
17334 *          it will write "0" to the following registers of
17335 *          the other vnics.
17336 *          REG_WR(pdev, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 0);
17337 *          REG_WR(pdev, CFC_REG_WEAK_ENABLE_PF,0);
17338 *          REG_WR(pdev, CFC_REG_STRONG_ENABLE_PF,0);
17339 *          And for itself it will write '1' to
17340 *          PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER to enable
17341 *          dmae-operations (writing to pram for example.)
17342 *          note: can be done for only function 6,7 but cleaner this
17343 *            way.
17344 *      b.  Write zero+valid to the entire ILT.
17345 *      c.  Init the first_timers_ilt_entry, last_timers_ilt_entry of
17346 *          VNIC3 (of that port). The range allocated will be the
17347 *          entire ILT. This is needed to prevent  ILT range error.
17348 *  2.  Any PF driver load flow:
17349 *      a.  ILT update with the physical addresses of the allocated
17350 *          logical pages.
17351 *      b.  Wait 20msec. - note that this timeout is needed to make
17352 *          sure there are no requests in one of the PXP internal
17353 *          queues with "old" ILT addresses.
17354 *      c.  PF enable in the PGLC.
17355 *      d.  Clear the was_error of the PF in the PGLC. (could have
17356 *          occurred while driver was down)
17357 *      e.  PF enable in the CFC (WEAK + STRONG)
17358 *      f.  Timers scan enable
17359 *  3.  PF driver unload flow:
17360 *      a.  Clear the Timers scan_en.
17361 *      b.  Polling for scan_on=0 for that PF.
17362 *      c.  Clear the PF enable bit in the PXP.
17363 *      d.  Clear the PF enable in the CFC (WEAK + STRONG)
17364 *      e.  Write zero+valid to all ILT entries (The valid bit must
17365 *          stay set)
17366 *      f.  If this is VNIC 3 of a port then also init
17367 *          first_timers_ilt_entry to zero and last_timers_ilt_entry
17368 *          to the last enrty in the ILT.
17369 *
17370 *      Notes:
17371 *      Currently the PF error in the PGLC is non recoverable.
17372 *      In the future the there will be a recovery routine for this error.
17373 *      Currently attention is masked.
17374 *      Having an MCP lock on the load/unload process does not guarantee that
17375 *      there is no Timer disable during Func6/7 enable. This is because the
17376 *      Timers scan is currently being cleared by the MCP on FLR.
17377 *      Step 2.d can be done only for PF6/7 and the driver can also check if
17378 *      there is error before clearing it. But the flow above is simpler and
17379 *      more general.
17380 *      All ILT entries are written by zero+valid and not just PF6/7
17381 *      ILT entries since in the future the ILT entries allocation for
17382 *      PF-s might be dynamic.
17383 */
17384        struct ilt_client_info ilt_cli;
17385        struct ecore_ilt ilt;
17386
17387        memset(&ilt_cli, 0, sizeof(struct ilt_client_info));
17388        memset(&ilt, 0, sizeof(struct ecore_ilt));
17389
17390        /* initialize dummy TM client */
17391        ilt_cli.start      = 0;
17392        ilt_cli.end        = ILT_NUM_PAGE_ENTRIES - 1;
17393        ilt_cli.client_num = ILT_CLIENT_TM;
17394
17395        /*
17396         * Step 1: set zeroes to all ilt page entries with valid bit on
17397         * Step 2: set the timers first/last ilt entry to point
17398         * to the entire range to prevent ILT range error for 3rd/4th
17399         * vnic (this code assumes existence of the vnic)
17400         *
17401         * both steps performed by call to ecore_ilt_client_init_op()
17402         * with dummy TM client
17403         *
17404         * we must use pretend since PXP2_REG_RQ_##blk##_FIRST_ILT
17405         * and his brother are split registers
17406         */
17407
17408        bxe_pretend_func(sc, (SC_PATH(sc) + 6));
17409        ecore_ilt_client_init_op_ilt(sc, &ilt, &ilt_cli, INITOP_CLEAR);
17410        bxe_pretend_func(sc, SC_ABS_FUNC(sc));
17411
17412        REG_WR(sc, PXP2_REG_RQ_DRAM_ALIGN, BXE_PXP_DRAM_ALIGN);
17413        REG_WR(sc, PXP2_REG_RQ_DRAM_ALIGN_RD, BXE_PXP_DRAM_ALIGN);
17414        REG_WR(sc, PXP2_REG_RQ_DRAM_ALIGN_SEL, 1);
17415    }
17416
17417    REG_WR(sc, PXP2_REG_RQ_DISABLE_INPUTS, 0);
17418    REG_WR(sc, PXP2_REG_RD_DISABLE_INPUTS, 0);
17419
17420    if (!CHIP_IS_E1x(sc)) {
17421        int factor = CHIP_REV_IS_EMUL(sc) ? 1000 :
17422                     (CHIP_REV_IS_FPGA(sc) ? 400 : 0);
17423
17424        ecore_init_block(sc, BLOCK_PGLUE_B, PHASE_COMMON);
17425        ecore_init_block(sc, BLOCK_ATC, PHASE_COMMON);
17426
17427        /* let the HW do it's magic... */
17428        do {
17429            DELAY(200000);
17430            val = REG_RD(sc, ATC_REG_ATC_INIT_DONE);
17431        } while (factor-- && (val != 1));
17432
17433        if (val != 1) {
17434            BLOGE(sc, "ATC_INIT failed\n");
17435            return (-1);
17436        }
17437    }
17438
17439    BLOGD(sc, DBG_LOAD, "after pglue and atc init\n");
17440
17441    ecore_init_block(sc, BLOCK_DMAE, PHASE_COMMON);
17442
17443    bxe_iov_init_dmae(sc);
17444
17445    /* clean the DMAE memory */
17446    sc->dmae_ready = 1;
17447    ecore_init_fill(sc, TSEM_REG_PRAM, 0, 8, 1);
17448
17449    ecore_init_block(sc, BLOCK_TCM, PHASE_COMMON);
17450
17451    ecore_init_block(sc, BLOCK_UCM, PHASE_COMMON);
17452
17453    ecore_init_block(sc, BLOCK_CCM, PHASE_COMMON);
17454
17455    ecore_init_block(sc, BLOCK_XCM, PHASE_COMMON);
17456
17457    bxe_read_dmae(sc, XSEM_REG_PASSIVE_BUFFER, 3);
17458    bxe_read_dmae(sc, CSEM_REG_PASSIVE_BUFFER, 3);
17459    bxe_read_dmae(sc, TSEM_REG_PASSIVE_BUFFER, 3);
17460    bxe_read_dmae(sc, USEM_REG_PASSIVE_BUFFER, 3);
17461
17462    ecore_init_block(sc, BLOCK_QM, PHASE_COMMON);
17463
17464    /* QM queues pointers table */
17465    ecore_qm_init_ptr_table(sc, sc->qm_cid_count, INITOP_SET);
17466
17467    /* soft reset pulse */
17468    REG_WR(sc, QM_REG_SOFT_RESET, 1);
17469    REG_WR(sc, QM_REG_SOFT_RESET, 0);
17470
17471    if (CNIC_SUPPORT(sc))
17472        ecore_init_block(sc, BLOCK_TM, PHASE_COMMON);
17473
17474    ecore_init_block(sc, BLOCK_DORQ, PHASE_COMMON);
17475    REG_WR(sc, DORQ_REG_DPM_CID_OFST, BXE_DB_SHIFT);
17476    if (!CHIP_REV_IS_SLOW(sc)) {
17477        /* enable hw interrupt from doorbell Q */
17478        REG_WR(sc, DORQ_REG_DORQ_INT_MASK, 0);
17479    }
17480
17481    ecore_init_block(sc, BLOCK_BRB1, PHASE_COMMON);
17482
17483    ecore_init_block(sc, BLOCK_PRS, PHASE_COMMON);
17484    REG_WR(sc, PRS_REG_A_PRSU_20, 0xf);
17485
17486    if (!CHIP_IS_E1(sc)) {
17487        REG_WR(sc, PRS_REG_E1HOV_MODE, sc->devinfo.mf_info.path_has_ovlan);
17488    }
17489
17490    if (!CHIP_IS_E1x(sc) && !CHIP_IS_E3B0(sc)) {
17491        if (IS_MF_AFEX(sc)) {
17492            /*
17493             * configure that AFEX and VLAN headers must be
17494             * received in AFEX mode
17495             */
17496            REG_WR(sc, PRS_REG_HDRS_AFTER_BASIC, 0xE);
17497            REG_WR(sc, PRS_REG_MUST_HAVE_HDRS, 0xA);
17498            REG_WR(sc, PRS_REG_HDRS_AFTER_TAG_0, 0x6);
17499            REG_WR(sc, PRS_REG_TAG_ETHERTYPE_0, 0x8926);
17500            REG_WR(sc, PRS_REG_TAG_LEN_0, 0x4);
17501        } else {
17502            /*
17503             * Bit-map indicating which L2 hdrs may appear
17504             * after the basic Ethernet header
17505             */
17506            REG_WR(sc, PRS_REG_HDRS_AFTER_BASIC,
17507                   sc->devinfo.mf_info.path_has_ovlan ? 7 : 6);
17508        }
17509    }
17510
17511    ecore_init_block(sc, BLOCK_TSDM, PHASE_COMMON);
17512    ecore_init_block(sc, BLOCK_CSDM, PHASE_COMMON);
17513    ecore_init_block(sc, BLOCK_USDM, PHASE_COMMON);
17514    ecore_init_block(sc, BLOCK_XSDM, PHASE_COMMON);
17515
17516    if (!CHIP_IS_E1x(sc)) {
17517        /* reset VFC memories */
17518        REG_WR(sc, TSEM_REG_FAST_MEMORY + VFC_REG_MEMORIES_RST,
17519               VFC_MEMORIES_RST_REG_CAM_RST |
17520               VFC_MEMORIES_RST_REG_RAM_RST);
17521        REG_WR(sc, XSEM_REG_FAST_MEMORY + VFC_REG_MEMORIES_RST,
17522               VFC_MEMORIES_RST_REG_CAM_RST |
17523               VFC_MEMORIES_RST_REG_RAM_RST);
17524
17525        DELAY(20000);
17526    }
17527
17528    ecore_init_block(sc, BLOCK_TSEM, PHASE_COMMON);
17529    ecore_init_block(sc, BLOCK_USEM, PHASE_COMMON);
17530    ecore_init_block(sc, BLOCK_CSEM, PHASE_COMMON);
17531    ecore_init_block(sc, BLOCK_XSEM, PHASE_COMMON);
17532
17533    /* sync semi rtc */
17534    REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
17535           0x80000000);
17536    REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
17537           0x80000000);
17538
17539    ecore_init_block(sc, BLOCK_UPB, PHASE_COMMON);
17540    ecore_init_block(sc, BLOCK_XPB, PHASE_COMMON);
17541    ecore_init_block(sc, BLOCK_PBF, PHASE_COMMON);
17542
17543    if (!CHIP_IS_E1x(sc)) {
17544        if (IS_MF_AFEX(sc)) {
17545            /*
17546             * configure that AFEX and VLAN headers must be
17547             * sent in AFEX mode
17548             */
17549            REG_WR(sc, PBF_REG_HDRS_AFTER_BASIC, 0xE);
17550            REG_WR(sc, PBF_REG_MUST_HAVE_HDRS, 0xA);
17551            REG_WR(sc, PBF_REG_HDRS_AFTER_TAG_0, 0x6);
17552            REG_WR(sc, PBF_REG_TAG_ETHERTYPE_0, 0x8926);
17553            REG_WR(sc, PBF_REG_TAG_LEN_0, 0x4);
17554        } else {
17555            REG_WR(sc, PBF_REG_HDRS_AFTER_BASIC,
17556                   sc->devinfo.mf_info.path_has_ovlan ? 7 : 6);
17557        }
17558    }
17559
17560    REG_WR(sc, SRC_REG_SOFT_RST, 1);
17561
17562    ecore_init_block(sc, BLOCK_SRC, PHASE_COMMON);
17563
17564    if (CNIC_SUPPORT(sc)) {
17565        REG_WR(sc, SRC_REG_KEYSEARCH_0, 0x63285672);
17566        REG_WR(sc, SRC_REG_KEYSEARCH_1, 0x24b8f2cc);
17567        REG_WR(sc, SRC_REG_KEYSEARCH_2, 0x223aef9b);
17568        REG_WR(sc, SRC_REG_KEYSEARCH_3, 0x26001e3a);
17569        REG_WR(sc, SRC_REG_KEYSEARCH_4, 0x7ae91116);
17570        REG_WR(sc, SRC_REG_KEYSEARCH_5, 0x5ce5230b);
17571        REG_WR(sc, SRC_REG_KEYSEARCH_6, 0x298d8adf);
17572        REG_WR(sc, SRC_REG_KEYSEARCH_7, 0x6eb0ff09);
17573        REG_WR(sc, SRC_REG_KEYSEARCH_8, 0x1830f82f);
17574        REG_WR(sc, SRC_REG_KEYSEARCH_9, 0x01e46be7);
17575    }
17576    REG_WR(sc, SRC_REG_SOFT_RST, 0);
17577
17578    if (sizeof(union cdu_context) != 1024) {
17579        /* we currently assume that a context is 1024 bytes */
17580        BLOGE(sc, "please adjust the size of cdu_context(%ld)\n",
17581              (long)sizeof(union cdu_context));
17582    }
17583
17584    ecore_init_block(sc, BLOCK_CDU, PHASE_COMMON);
17585    val = (4 << 24) + (0 << 12) + 1024;
17586    REG_WR(sc, CDU_REG_CDU_GLOBAL_PARAMS, val);
17587
17588    ecore_init_block(sc, BLOCK_CFC, PHASE_COMMON);
17589
17590    REG_WR(sc, CFC_REG_INIT_REG, 0x7FF);
17591    /* enable context validation interrupt from CFC */
17592    REG_WR(sc, CFC_REG_CFC_INT_MASK, 0);
17593
17594    /* set the thresholds to prevent CFC/CDU race */
17595    REG_WR(sc, CFC_REG_DEBUG0, 0x20020000);
17596    ecore_init_block(sc, BLOCK_HC, PHASE_COMMON);
17597
17598    if (!CHIP_IS_E1x(sc) && BXE_NOMCP(sc)) {
17599        REG_WR(sc, IGU_REG_RESET_MEMORIES, 0x36);
17600    }
17601
17602    ecore_init_block(sc, BLOCK_IGU, PHASE_COMMON);
17603    ecore_init_block(sc, BLOCK_MISC_AEU, PHASE_COMMON);
17604
17605    /* Reset PCIE errors for debug */
17606    REG_WR(sc, 0x2814, 0xffffffff);
17607    REG_WR(sc, 0x3820, 0xffffffff);
17608
17609    if (!CHIP_IS_E1x(sc)) {
17610        REG_WR(sc, PCICFG_OFFSET + PXPCS_TL_CONTROL_5,
17611               (PXPCS_TL_CONTROL_5_ERR_UNSPPORT1 |
17612                PXPCS_TL_CONTROL_5_ERR_UNSPPORT));
17613        REG_WR(sc, PCICFG_OFFSET + PXPCS_TL_FUNC345_STAT,
17614               (PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT4 |
17615                PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT3 |
17616                PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT2));
17617        REG_WR(sc, PCICFG_OFFSET + PXPCS_TL_FUNC678_STAT,
17618               (PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT7 |
17619                PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT6 |
17620                PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT5));
17621    }
17622
17623    ecore_init_block(sc, BLOCK_NIG, PHASE_COMMON);
17624
17625    if (!CHIP_IS_E1(sc)) {
17626        /* in E3 this done in per-port section */
17627        if (!CHIP_IS_E3(sc))
17628            REG_WR(sc, NIG_REG_LLH_MF_MODE, IS_MF(sc));
17629    }
17630
17631    if (CHIP_IS_E1H(sc)) {
17632        /* not applicable for E2 (and above ...) */
17633        REG_WR(sc, NIG_REG_LLH_E1HOV_MODE, IS_MF_SD(sc));
17634    }
17635
17636    if (CHIP_REV_IS_SLOW(sc)) {
17637        DELAY(200000);
17638    }
17639
17640    /* finish CFC init */
17641    val = reg_poll(sc, CFC_REG_LL_INIT_DONE, 1, 100, 10);
17642    if (val != 1) {
17643        BLOGE(sc, "CFC LL_INIT failed\n");
17644        return (-1);
17645    }
17646    val = reg_poll(sc, CFC_REG_AC_INIT_DONE, 1, 100, 10);
17647    if (val != 1) {
17648        BLOGE(sc, "CFC AC_INIT failed\n");
17649        return (-1);
17650    }
17651    val = reg_poll(sc, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
17652    if (val != 1) {
17653        BLOGE(sc, "CFC CAM_INIT failed\n");
17654        return (-1);
17655    }
17656    REG_WR(sc, CFC_REG_DEBUG0, 0);
17657
17658    if (CHIP_IS_E1(sc)) {
17659        /* read NIG statistic to see if this is our first up since powerup */
17660        bxe_read_dmae(sc, NIG_REG_STAT2_BRB_OCTET, 2);
17661        val = *BXE_SP(sc, wb_data[0]);
17662
17663        /* do internal memory self test */
17664        if ((val == 0) && bxe_int_mem_test(sc)) {
17665            BLOGE(sc, "internal mem self test failed\n");
17666            return (-1);
17667        }
17668    }
17669
17670    bxe_setup_fan_failure_detection(sc);
17671
17672    /* clear PXP2 attentions */
17673    REG_RD(sc, PXP2_REG_PXP2_INT_STS_CLR_0);
17674
17675    bxe_enable_blocks_attention(sc);
17676
17677    if (!CHIP_REV_IS_SLOW(sc)) {
17678        ecore_enable_blocks_parity(sc);
17679    }
17680
17681    if (!BXE_NOMCP(sc)) {
17682        if (CHIP_IS_E1x(sc)) {
17683            bxe_common_init_phy(sc);
17684        }
17685    }
17686
17687    return (0);
17688}
17689
17690/**
17691 * bxe_init_hw_common_chip - init HW at the COMMON_CHIP phase.
17692 *
17693 * @sc:     driver handle
17694 */
17695static int
17696bxe_init_hw_common_chip(struct bxe_softc *sc)
17697{
17698    int rc = bxe_init_hw_common(sc);
17699
17700    if (rc) {
17701        return (rc);
17702    }
17703
17704    /* In E2 2-PORT mode, same ext phy is used for the two paths */
17705    if (!BXE_NOMCP(sc)) {
17706        bxe_common_init_phy(sc);
17707    }
17708
17709    return (0);
17710}
17711
17712static int
17713bxe_init_hw_port(struct bxe_softc *sc)
17714{
17715    int port = SC_PORT(sc);
17716    int init_phase = port ? PHASE_PORT1 : PHASE_PORT0;
17717    uint32_t low, high;
17718    uint32_t val;
17719
17720    BLOGD(sc, DBG_LOAD, "starting port init for port %d\n", port);
17721
17722    REG_WR(sc, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
17723
17724    ecore_init_block(sc, BLOCK_MISC, init_phase);
17725    ecore_init_block(sc, BLOCK_PXP, init_phase);
17726    ecore_init_block(sc, BLOCK_PXP2, init_phase);
17727
17728    /*
17729     * Timers bug workaround: disables the pf_master bit in pglue at
17730     * common phase, we need to enable it here before any dmae access are
17731     * attempted. Therefore we manually added the enable-master to the
17732     * port phase (it also happens in the function phase)
17733     */
17734    if (!CHIP_IS_E1x(sc)) {
17735        REG_WR(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1);
17736    }
17737
17738    ecore_init_block(sc, BLOCK_ATC, init_phase);
17739    ecore_init_block(sc, BLOCK_DMAE, init_phase);
17740    ecore_init_block(sc, BLOCK_PGLUE_B, init_phase);
17741    ecore_init_block(sc, BLOCK_QM, init_phase);
17742
17743    ecore_init_block(sc, BLOCK_TCM, init_phase);
17744    ecore_init_block(sc, BLOCK_UCM, init_phase);
17745    ecore_init_block(sc, BLOCK_CCM, init_phase);
17746    ecore_init_block(sc, BLOCK_XCM, init_phase);
17747
17748    /* QM cid (connection) count */
17749    ecore_qm_init_cid_count(sc, sc->qm_cid_count, INITOP_SET);
17750
17751    if (CNIC_SUPPORT(sc)) {
17752        ecore_init_block(sc, BLOCK_TM, init_phase);
17753        REG_WR(sc, TM_REG_LIN0_SCAN_TIME + port*4, 20);
17754        REG_WR(sc, TM_REG_LIN0_MAX_ACTIVE_CID + port*4, 31);
17755    }
17756
17757    ecore_init_block(sc, BLOCK_DORQ, init_phase);
17758
17759    ecore_init_block(sc, BLOCK_BRB1, init_phase);
17760
17761    if (CHIP_IS_E1(sc) || CHIP_IS_E1H(sc)) {
17762        if (IS_MF(sc)) {
17763            low = (BXE_ONE_PORT(sc) ? 160 : 246);
17764        } else if (sc->mtu > 4096) {
17765            if (BXE_ONE_PORT(sc)) {
17766                low = 160;
17767            } else {
17768                val = sc->mtu;
17769                /* (24*1024 + val*4)/256 */
17770                low = (96 + (val / 64) + ((val % 64) ? 1 : 0));
17771            }
17772        } else {
17773            low = (BXE_ONE_PORT(sc) ? 80 : 160);
17774        }
17775        high = (low + 56); /* 14*1024/256 */
17776        REG_WR(sc, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port*4, low);
17777        REG_WR(sc, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high);
17778    }
17779
17780    if (CHIP_IS_MODE_4_PORT(sc)) {
17781        REG_WR(sc, SC_PORT(sc) ?
17782               BRB1_REG_MAC_GUARANTIED_1 :
17783               BRB1_REG_MAC_GUARANTIED_0, 40);
17784    }
17785
17786    ecore_init_block(sc, BLOCK_PRS, init_phase);
17787    if (CHIP_IS_E3B0(sc)) {
17788        if (IS_MF_AFEX(sc)) {
17789            /* configure headers for AFEX mode */
17790            REG_WR(sc, SC_PORT(sc) ?
17791                   PRS_REG_HDRS_AFTER_BASIC_PORT_1 :
17792                   PRS_REG_HDRS_AFTER_BASIC_PORT_0, 0xE);
17793            REG_WR(sc, SC_PORT(sc) ?
17794                   PRS_REG_HDRS_AFTER_TAG_0_PORT_1 :
17795                   PRS_REG_HDRS_AFTER_TAG_0_PORT_0, 0x6);
17796            REG_WR(sc, SC_PORT(sc) ?
17797                   PRS_REG_MUST_HAVE_HDRS_PORT_1 :
17798                   PRS_REG_MUST_HAVE_HDRS_PORT_0, 0xA);
17799        } else {
17800            /* Ovlan exists only if we are in multi-function +
17801             * switch-dependent mode, in switch-independent there
17802             * is no ovlan headers
17803             */
17804            REG_WR(sc, SC_PORT(sc) ?
17805                   PRS_REG_HDRS_AFTER_BASIC_PORT_1 :
17806                   PRS_REG_HDRS_AFTER_BASIC_PORT_0,
17807                   (sc->devinfo.mf_info.path_has_ovlan ? 7 : 6));
17808        }
17809    }
17810
17811    ecore_init_block(sc, BLOCK_TSDM, init_phase);
17812    ecore_init_block(sc, BLOCK_CSDM, init_phase);
17813    ecore_init_block(sc, BLOCK_USDM, init_phase);
17814    ecore_init_block(sc, BLOCK_XSDM, init_phase);
17815
17816    ecore_init_block(sc, BLOCK_TSEM, init_phase);
17817    ecore_init_block(sc, BLOCK_USEM, init_phase);
17818    ecore_init_block(sc, BLOCK_CSEM, init_phase);
17819    ecore_init_block(sc, BLOCK_XSEM, init_phase);
17820
17821    ecore_init_block(sc, BLOCK_UPB, init_phase);
17822    ecore_init_block(sc, BLOCK_XPB, init_phase);
17823
17824    ecore_init_block(sc, BLOCK_PBF, init_phase);
17825
17826    if (CHIP_IS_E1x(sc)) {
17827        /* configure PBF to work without PAUSE mtu 9000 */
17828        REG_WR(sc, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
17829
17830        /* update threshold */
17831        REG_WR(sc, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
17832        /* update init credit */
17833        REG_WR(sc, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
17834
17835        /* probe changes */
17836        REG_WR(sc, PBF_REG_INIT_P0 + port*4, 1);
17837        DELAY(50);
17838        REG_WR(sc, PBF_REG_INIT_P0 + port*4, 0);
17839    }
17840
17841    if (CNIC_SUPPORT(sc)) {
17842        ecore_init_block(sc, BLOCK_SRC, init_phase);
17843    }
17844
17845    ecore_init_block(sc, BLOCK_CDU, init_phase);
17846    ecore_init_block(sc, BLOCK_CFC, init_phase);
17847
17848    if (CHIP_IS_E1(sc)) {
17849        REG_WR(sc, HC_REG_LEADING_EDGE_0 + port*8, 0);
17850        REG_WR(sc, HC_REG_TRAILING_EDGE_0 + port*8, 0);
17851    }
17852    ecore_init_block(sc, BLOCK_HC, init_phase);
17853
17854    ecore_init_block(sc, BLOCK_IGU, init_phase);
17855
17856    ecore_init_block(sc, BLOCK_MISC_AEU, init_phase);
17857    /* init aeu_mask_attn_func_0/1:
17858     *  - SF mode: bits 3-7 are masked. only bits 0-2 are in use
17859     *  - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
17860     *             bits 4-7 are used for "per vn group attention" */
17861    val = IS_MF(sc) ? 0xF7 : 0x7;
17862    /* Enable DCBX attention for all but E1 */
17863    val |= CHIP_IS_E1(sc) ? 0 : 0x10;
17864    REG_WR(sc, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, val);
17865
17866    ecore_init_block(sc, BLOCK_NIG, init_phase);
17867
17868    if (!CHIP_IS_E1x(sc)) {
17869        /* Bit-map indicating which L2 hdrs may appear after the
17870         * basic Ethernet header
17871         */
17872        if (IS_MF_AFEX(sc)) {
17873            REG_WR(sc, SC_PORT(sc) ?
17874                   NIG_REG_P1_HDRS_AFTER_BASIC :
17875                   NIG_REG_P0_HDRS_AFTER_BASIC, 0xE);
17876        } else {
17877            REG_WR(sc, SC_PORT(sc) ?
17878                   NIG_REG_P1_HDRS_AFTER_BASIC :
17879                   NIG_REG_P0_HDRS_AFTER_BASIC,
17880                   IS_MF_SD(sc) ? 7 : 6);
17881        }
17882
17883        if (CHIP_IS_E3(sc)) {
17884            REG_WR(sc, SC_PORT(sc) ?
17885                   NIG_REG_LLH1_MF_MODE :
17886                   NIG_REG_LLH_MF_MODE, IS_MF(sc));
17887        }
17888    }
17889    if (!CHIP_IS_E3(sc)) {
17890        REG_WR(sc, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
17891    }
17892
17893    if (!CHIP_IS_E1(sc)) {
17894        /* 0x2 disable mf_ov, 0x1 enable */
17895        REG_WR(sc, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
17896               (IS_MF_SD(sc) ? 0x1 : 0x2));
17897
17898        if (!CHIP_IS_E1x(sc)) {
17899            val = 0;
17900            switch (sc->devinfo.mf_info.mf_mode) {
17901            case MULTI_FUNCTION_SD:
17902                val = 1;
17903                break;
17904            case MULTI_FUNCTION_SI:
17905            case MULTI_FUNCTION_AFEX:
17906                val = 2;
17907                break;
17908            }
17909
17910            REG_WR(sc, (SC_PORT(sc) ? NIG_REG_LLH1_CLS_TYPE :
17911                        NIG_REG_LLH0_CLS_TYPE), val);
17912        }
17913        REG_WR(sc, NIG_REG_LLFC_ENABLE_0 + port*4, 0);
17914        REG_WR(sc, NIG_REG_LLFC_OUT_EN_0 + port*4, 0);
17915        REG_WR(sc, NIG_REG_PAUSE_ENABLE_0 + port*4, 1);
17916    }
17917
17918    /* If SPIO5 is set to generate interrupts, enable it for this port */
17919    val = REG_RD(sc, MISC_REG_SPIO_EVENT_EN);
17920    if (val & MISC_SPIO_SPIO5) {
17921        uint32_t reg_addr = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
17922                                    MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
17923        val = REG_RD(sc, reg_addr);
17924        val |= AEU_INPUTS_ATTN_BITS_SPIO5;
17925        REG_WR(sc, reg_addr, val);
17926    }
17927
17928    return (0);
17929}
17930
17931static uint32_t
17932bxe_flr_clnup_reg_poll(struct bxe_softc *sc,
17933                       uint32_t         reg,
17934                       uint32_t         expected,
17935                       uint32_t         poll_count)
17936{
17937    uint32_t cur_cnt = poll_count;
17938    uint32_t val;
17939
17940    while ((val = REG_RD(sc, reg)) != expected && cur_cnt--) {
17941        DELAY(FLR_WAIT_INTERVAL);
17942    }
17943
17944    return (val);
17945}
17946
17947static int
17948bxe_flr_clnup_poll_hw_counter(struct bxe_softc *sc,
17949                              uint32_t         reg,
17950                              char             *msg,
17951                              uint32_t         poll_cnt)
17952{
17953    uint32_t val = bxe_flr_clnup_reg_poll(sc, reg, 0, poll_cnt);
17954
17955    if (val != 0) {
17956        BLOGE(sc, "%s usage count=%d\n", msg, val);
17957        return (1);
17958    }
17959
17960    return (0);
17961}
17962
17963/* Common routines with VF FLR cleanup */
17964static uint32_t
17965bxe_flr_clnup_poll_count(struct bxe_softc *sc)
17966{
17967    /* adjust polling timeout */
17968    if (CHIP_REV_IS_EMUL(sc)) {
17969        return (FLR_POLL_CNT * 2000);
17970    }
17971
17972    if (CHIP_REV_IS_FPGA(sc)) {
17973        return (FLR_POLL_CNT * 120);
17974    }
17975
17976    return (FLR_POLL_CNT);
17977}
17978
17979static int
17980bxe_poll_hw_usage_counters(struct bxe_softc *sc,
17981                           uint32_t         poll_cnt)
17982{
17983    /* wait for CFC PF usage-counter to zero (includes all the VFs) */
17984    if (bxe_flr_clnup_poll_hw_counter(sc,
17985                                      CFC_REG_NUM_LCIDS_INSIDE_PF,
17986                                      "CFC PF usage counter timed out",
17987                                      poll_cnt)) {
17988        return (1);
17989    }
17990
17991    /* Wait for DQ PF usage-counter to zero (until DQ cleanup) */
17992    if (bxe_flr_clnup_poll_hw_counter(sc,
17993                                      DORQ_REG_PF_USAGE_CNT,
17994                                      "DQ PF usage counter timed out",
17995                                      poll_cnt)) {
17996        return (1);
17997    }
17998
17999    /* Wait for QM PF usage-counter to zero (until DQ cleanup) */
18000    if (bxe_flr_clnup_poll_hw_counter(sc,
18001                                      QM_REG_PF_USG_CNT_0 + 4*SC_FUNC(sc),
18002                                      "QM PF usage counter timed out",
18003                                      poll_cnt)) {
18004        return (1);
18005    }
18006
18007    /* Wait for Timer PF usage-counters to zero (until DQ cleanup) */
18008    if (bxe_flr_clnup_poll_hw_counter(sc,
18009                                      TM_REG_LIN0_VNIC_UC + 4*SC_PORT(sc),
18010                                      "Timers VNIC usage counter timed out",
18011                                      poll_cnt)) {
18012        return (1);
18013    }
18014
18015    if (bxe_flr_clnup_poll_hw_counter(sc,
18016                                      TM_REG_LIN0_NUM_SCANS + 4*SC_PORT(sc),
18017                                      "Timers NUM_SCANS usage counter timed out",
18018                                      poll_cnt)) {
18019        return (1);
18020    }
18021
18022    /* Wait DMAE PF usage counter to zero */
18023    if (bxe_flr_clnup_poll_hw_counter(sc,
18024                                      dmae_reg_go_c[INIT_DMAE_C(sc)],
18025                                      "DMAE dommand register timed out",
18026                                      poll_cnt)) {
18027        return (1);
18028    }
18029
18030    return (0);
18031}
18032
18033#define OP_GEN_PARAM(param)                                            \
18034    (((param) << SDM_OP_GEN_COMP_PARAM_SHIFT) & SDM_OP_GEN_COMP_PARAM)
18035#define OP_GEN_TYPE(type)                                           \
18036    (((type) << SDM_OP_GEN_COMP_TYPE_SHIFT) & SDM_OP_GEN_COMP_TYPE)
18037#define OP_GEN_AGG_VECT(index)                                             \
18038    (((index) << SDM_OP_GEN_AGG_VECT_IDX_SHIFT) & SDM_OP_GEN_AGG_VECT_IDX)
18039
18040static int
18041bxe_send_final_clnup(struct bxe_softc *sc,
18042                     uint8_t          clnup_func,
18043                     uint32_t         poll_cnt)
18044{
18045    uint32_t op_gen_command = 0;
18046    uint32_t comp_addr = (BAR_CSTRORM_INTMEM +
18047                          CSTORM_FINAL_CLEANUP_COMPLETE_OFFSET(clnup_func));
18048    int ret = 0;
18049
18050    if (REG_RD(sc, comp_addr)) {
18051        BLOGE(sc, "Cleanup complete was not 0 before sending\n");
18052        return (1);
18053    }
18054
18055    op_gen_command |= OP_GEN_PARAM(XSTORM_AGG_INT_FINAL_CLEANUP_INDEX);
18056    op_gen_command |= OP_GEN_TYPE(XSTORM_AGG_INT_FINAL_CLEANUP_COMP_TYPE);
18057    op_gen_command |= OP_GEN_AGG_VECT(clnup_func);
18058    op_gen_command |= 1 << SDM_OP_GEN_AGG_VECT_IDX_VALID_SHIFT;
18059
18060    BLOGD(sc, DBG_LOAD, "sending FW Final cleanup\n");
18061    REG_WR(sc, XSDM_REG_OPERATION_GEN, op_gen_command);
18062
18063    if (bxe_flr_clnup_reg_poll(sc, comp_addr, 1, poll_cnt) != 1) {
18064        BLOGE(sc, "FW final cleanup did not succeed\n");
18065        BLOGD(sc, DBG_LOAD, "At timeout completion address contained %x\n",
18066              (REG_RD(sc, comp_addr)));
18067        bxe_panic(sc, ("FLR cleanup failed\n"));
18068        return (1);
18069    }
18070
18071    /* Zero completion for nxt FLR */
18072    REG_WR(sc, comp_addr, 0);
18073
18074    return (ret);
18075}
18076
18077static void
18078bxe_pbf_pN_buf_flushed(struct bxe_softc       *sc,
18079                       struct pbf_pN_buf_regs *regs,
18080                       uint32_t               poll_count)
18081{
18082    uint32_t init_crd, crd, crd_start, crd_freed, crd_freed_start;
18083    uint32_t cur_cnt = poll_count;
18084
18085    crd_freed = crd_freed_start = REG_RD(sc, regs->crd_freed);
18086    crd = crd_start = REG_RD(sc, regs->crd);
18087    init_crd = REG_RD(sc, regs->init_crd);
18088
18089    BLOGD(sc, DBG_LOAD, "INIT CREDIT[%d] : %x\n", regs->pN, init_crd);
18090    BLOGD(sc, DBG_LOAD, "CREDIT[%d]      : s:%x\n", regs->pN, crd);
18091    BLOGD(sc, DBG_LOAD, "CREDIT_FREED[%d]: s:%x\n", regs->pN, crd_freed);
18092
18093    while ((crd != init_crd) &&
18094           ((uint32_t)((int32_t)crd_freed - (int32_t)crd_freed_start) <
18095            (init_crd - crd_start))) {
18096        if (cur_cnt--) {
18097            DELAY(FLR_WAIT_INTERVAL);
18098            crd = REG_RD(sc, regs->crd);
18099            crd_freed = REG_RD(sc, regs->crd_freed);
18100        } else {
18101            BLOGD(sc, DBG_LOAD, "PBF tx buffer[%d] timed out\n", regs->pN);
18102            BLOGD(sc, DBG_LOAD, "CREDIT[%d]      : c:%x\n", regs->pN, crd);
18103            BLOGD(sc, DBG_LOAD, "CREDIT_FREED[%d]: c:%x\n", regs->pN, crd_freed);
18104            break;
18105        }
18106    }
18107
18108    BLOGD(sc, DBG_LOAD, "Waited %d*%d usec for PBF tx buffer[%d]\n",
18109          poll_count-cur_cnt, FLR_WAIT_INTERVAL, regs->pN);
18110}
18111
18112static void
18113bxe_pbf_pN_cmd_flushed(struct bxe_softc       *sc,
18114                       struct pbf_pN_cmd_regs *regs,
18115                       uint32_t               poll_count)
18116{
18117    uint32_t occup, to_free, freed, freed_start;
18118    uint32_t cur_cnt = poll_count;
18119
18120    occup = to_free = REG_RD(sc, regs->lines_occup);
18121    freed = freed_start = REG_RD(sc, regs->lines_freed);
18122
18123    BLOGD(sc, DBG_LOAD, "OCCUPANCY[%d]   : s:%x\n", regs->pN, occup);
18124    BLOGD(sc, DBG_LOAD, "LINES_FREED[%d] : s:%x\n", regs->pN, freed);
18125
18126    while (occup &&
18127           ((uint32_t)((int32_t)freed - (int32_t)freed_start) < to_free)) {
18128        if (cur_cnt--) {
18129            DELAY(FLR_WAIT_INTERVAL);
18130            occup = REG_RD(sc, regs->lines_occup);
18131            freed = REG_RD(sc, regs->lines_freed);
18132        } else {
18133            BLOGD(sc, DBG_LOAD, "PBF cmd queue[%d] timed out\n", regs->pN);
18134            BLOGD(sc, DBG_LOAD, "OCCUPANCY[%d]   : s:%x\n", regs->pN, occup);
18135            BLOGD(sc, DBG_LOAD, "LINES_FREED[%d] : s:%x\n", regs->pN, freed);
18136            break;
18137        }
18138    }
18139
18140    BLOGD(sc, DBG_LOAD, "Waited %d*%d usec for PBF cmd queue[%d]\n",
18141          poll_count - cur_cnt, FLR_WAIT_INTERVAL, regs->pN);
18142}
18143
18144static void
18145bxe_tx_hw_flushed(struct bxe_softc *sc, uint32_t poll_count)
18146{
18147    struct pbf_pN_cmd_regs cmd_regs[] = {
18148        {0, (CHIP_IS_E3B0(sc)) ?
18149            PBF_REG_TQ_OCCUPANCY_Q0 :
18150            PBF_REG_P0_TQ_OCCUPANCY,
18151            (CHIP_IS_E3B0(sc)) ?
18152            PBF_REG_TQ_LINES_FREED_CNT_Q0 :
18153            PBF_REG_P0_TQ_LINES_FREED_CNT},
18154        {1, (CHIP_IS_E3B0(sc)) ?
18155            PBF_REG_TQ_OCCUPANCY_Q1 :
18156            PBF_REG_P1_TQ_OCCUPANCY,
18157            (CHIP_IS_E3B0(sc)) ?
18158            PBF_REG_TQ_LINES_FREED_CNT_Q1 :
18159            PBF_REG_P1_TQ_LINES_FREED_CNT},
18160        {4, (CHIP_IS_E3B0(sc)) ?
18161            PBF_REG_TQ_OCCUPANCY_LB_Q :
18162            PBF_REG_P4_TQ_OCCUPANCY,
18163            (CHIP_IS_E3B0(sc)) ?
18164            PBF_REG_TQ_LINES_FREED_CNT_LB_Q :
18165            PBF_REG_P4_TQ_LINES_FREED_CNT}
18166    };
18167
18168    struct pbf_pN_buf_regs buf_regs[] = {
18169        {0, (CHIP_IS_E3B0(sc)) ?
18170            PBF_REG_INIT_CRD_Q0 :
18171            PBF_REG_P0_INIT_CRD ,
18172            (CHIP_IS_E3B0(sc)) ?
18173            PBF_REG_CREDIT_Q0 :
18174            PBF_REG_P0_CREDIT,
18175            (CHIP_IS_E3B0(sc)) ?
18176            PBF_REG_INTERNAL_CRD_FREED_CNT_Q0 :
18177            PBF_REG_P0_INTERNAL_CRD_FREED_CNT},
18178        {1, (CHIP_IS_E3B0(sc)) ?
18179            PBF_REG_INIT_CRD_Q1 :
18180            PBF_REG_P1_INIT_CRD,
18181            (CHIP_IS_E3B0(sc)) ?
18182            PBF_REG_CREDIT_Q1 :
18183            PBF_REG_P1_CREDIT,
18184            (CHIP_IS_E3B0(sc)) ?
18185            PBF_REG_INTERNAL_CRD_FREED_CNT_Q1 :
18186            PBF_REG_P1_INTERNAL_CRD_FREED_CNT},
18187        {4, (CHIP_IS_E3B0(sc)) ?
18188            PBF_REG_INIT_CRD_LB_Q :
18189            PBF_REG_P4_INIT_CRD,
18190            (CHIP_IS_E3B0(sc)) ?
18191            PBF_REG_CREDIT_LB_Q :
18192            PBF_REG_P4_CREDIT,
18193            (CHIP_IS_E3B0(sc)) ?
18194            PBF_REG_INTERNAL_CRD_FREED_CNT_LB_Q :
18195            PBF_REG_P4_INTERNAL_CRD_FREED_CNT},
18196    };
18197
18198    int i;
18199
18200    /* Verify the command queues are flushed P0, P1, P4 */
18201    for (i = 0; i < ARRAY_SIZE(cmd_regs); i++) {
18202        bxe_pbf_pN_cmd_flushed(sc, &cmd_regs[i], poll_count);
18203    }
18204
18205    /* Verify the transmission buffers are flushed P0, P1, P4 */
18206    for (i = 0; i < ARRAY_SIZE(buf_regs); i++) {
18207        bxe_pbf_pN_buf_flushed(sc, &buf_regs[i], poll_count);
18208    }
18209}
18210
18211static void
18212bxe_hw_enable_status(struct bxe_softc *sc)
18213{
18214    uint32_t val;
18215
18216    val = REG_RD(sc, CFC_REG_WEAK_ENABLE_PF);
18217    BLOGD(sc, DBG_LOAD, "CFC_REG_WEAK_ENABLE_PF is 0x%x\n", val);
18218
18219    val = REG_RD(sc, PBF_REG_DISABLE_PF);
18220    BLOGD(sc, DBG_LOAD, "PBF_REG_DISABLE_PF is 0x%x\n", val);
18221
18222    val = REG_RD(sc, IGU_REG_PCI_PF_MSI_EN);
18223    BLOGD(sc, DBG_LOAD, "IGU_REG_PCI_PF_MSI_EN is 0x%x\n", val);
18224
18225    val = REG_RD(sc, IGU_REG_PCI_PF_MSIX_EN);
18226    BLOGD(sc, DBG_LOAD, "IGU_REG_PCI_PF_MSIX_EN is 0x%x\n", val);
18227
18228    val = REG_RD(sc, IGU_REG_PCI_PF_MSIX_FUNC_MASK);
18229    BLOGD(sc, DBG_LOAD, "IGU_REG_PCI_PF_MSIX_FUNC_MASK is 0x%x\n", val);
18230
18231    val = REG_RD(sc, PGLUE_B_REG_SHADOW_BME_PF_7_0_CLR);
18232    BLOGD(sc, DBG_LOAD, "PGLUE_B_REG_SHADOW_BME_PF_7_0_CLR is 0x%x\n", val);
18233
18234    val = REG_RD(sc, PGLUE_B_REG_FLR_REQUEST_PF_7_0_CLR);
18235    BLOGD(sc, DBG_LOAD, "PGLUE_B_REG_FLR_REQUEST_PF_7_0_CLR is 0x%x\n", val);
18236
18237    val = REG_RD(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER);
18238    BLOGD(sc, DBG_LOAD, "PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER is 0x%x\n", val);
18239}
18240
18241static int
18242bxe_pf_flr_clnup(struct bxe_softc *sc)
18243{
18244    uint32_t poll_cnt = bxe_flr_clnup_poll_count(sc);
18245
18246    BLOGD(sc, DBG_LOAD, "Cleanup after FLR PF[%d]\n", SC_ABS_FUNC(sc));
18247
18248    /* Re-enable PF target read access */
18249    REG_WR(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ, 1);
18250
18251    /* Poll HW usage counters */
18252    BLOGD(sc, DBG_LOAD, "Polling usage counters\n");
18253    if (bxe_poll_hw_usage_counters(sc, poll_cnt)) {
18254        return (-1);
18255    }
18256
18257    /* Zero the igu 'trailing edge' and 'leading edge' */
18258
18259    /* Send the FW cleanup command */
18260    if (bxe_send_final_clnup(sc, (uint8_t)SC_FUNC(sc), poll_cnt)) {
18261        return (-1);
18262    }
18263
18264    /* ATC cleanup */
18265
18266    /* Verify TX hw is flushed */
18267    bxe_tx_hw_flushed(sc, poll_cnt);
18268
18269    /* Wait 100ms (not adjusted according to platform) */
18270    DELAY(100000);
18271
18272    /* Verify no pending pci transactions */
18273    if (bxe_is_pcie_pending(sc)) {
18274        BLOGE(sc, "PCIE Transactions still pending\n");
18275    }
18276
18277    /* Debug */
18278    bxe_hw_enable_status(sc);
18279
18280    /*
18281     * Master enable - Due to WB DMAE writes performed before this
18282     * register is re-initialized as part of the regular function init
18283     */
18284    REG_WR(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1);
18285
18286    return (0);
18287}
18288
18289#if 0
18290static void
18291bxe_init_searcher(struct bxe_softc *sc)
18292{
18293    int port = SC_PORT(sc);
18294    ecore_src_init_t2(sc, sc->t2, sc->t2_mapping, SRC_CONN_NUM);
18295    /* T1 hash bits value determines the T1 number of entries */
18296    REG_WR(sc, SRC_REG_NUMBER_HASH_BITS0 + port*4, SRC_HASH_BITS);
18297}
18298#endif
18299
18300static int
18301bxe_init_hw_func(struct bxe_softc *sc)
18302{
18303    int port = SC_PORT(sc);
18304    int func = SC_FUNC(sc);
18305    int init_phase = PHASE_PF0 + func;
18306    struct ecore_ilt *ilt = sc->ilt;
18307    uint16_t cdu_ilt_start;
18308    uint32_t addr, val;
18309    uint32_t main_mem_base, main_mem_size, main_mem_prty_clr;
18310    int i, main_mem_width, rc;
18311
18312    BLOGD(sc, DBG_LOAD, "starting func init for func %d\n", func);
18313
18314    /* FLR cleanup */
18315    if (!CHIP_IS_E1x(sc)) {
18316        rc = bxe_pf_flr_clnup(sc);
18317        if (rc) {
18318            BLOGE(sc, "FLR cleanup failed!\n");
18319            // XXX bxe_fw_dump(sc);
18320            // XXX bxe_idle_chk(sc);
18321            return (rc);
18322        }
18323    }
18324
18325    /* set MSI reconfigure capability */
18326    if (sc->devinfo.int_block == INT_BLOCK_HC) {
18327        addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0);
18328        val = REG_RD(sc, addr);
18329        val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0;
18330        REG_WR(sc, addr, val);
18331    }
18332
18333    ecore_init_block(sc, BLOCK_PXP, init_phase);
18334    ecore_init_block(sc, BLOCK_PXP2, init_phase);
18335
18336    ilt = sc->ilt;
18337    cdu_ilt_start = ilt->clients[ILT_CLIENT_CDU].start;
18338
18339#if 0
18340    if (IS_SRIOV(sc)) {
18341        cdu_ilt_start += BXE_FIRST_VF_CID/ILT_PAGE_CIDS;
18342    }
18343    cdu_ilt_start = bxe_iov_init_ilt(sc, cdu_ilt_start);
18344
18345#if (BXE_FIRST_VF_CID > 0)
18346    /*
18347     * If BXE_FIRST_VF_CID > 0 then the PF L2 cids precedes
18348     * those of the VFs, so start line should be reset
18349     */
18350    cdu_ilt_start = ilt->clients[ILT_CLIENT_CDU].start;
18351#endif
18352#endif
18353
18354    for (i = 0; i < L2_ILT_LINES(sc); i++) {
18355        ilt->lines[cdu_ilt_start + i].page = sc->context[i].vcxt;
18356        ilt->lines[cdu_ilt_start + i].page_mapping =
18357            sc->context[i].vcxt_dma.paddr;
18358        ilt->lines[cdu_ilt_start + i].size = sc->context[i].size;
18359    }
18360    ecore_ilt_init_op(sc, INITOP_SET);
18361
18362#if 0
18363    if (!CONFIGURE_NIC_MODE(sc)) {
18364        bxe_init_searcher(sc);
18365        REG_WR(sc, PRS_REG_NIC_MODE, 0);
18366        BLOGD(sc, DBG_LOAD, "NIC MODE disabled\n");
18367    } else
18368#endif
18369    {
18370        /* Set NIC mode */
18371        REG_WR(sc, PRS_REG_NIC_MODE, 1);
18372        BLOGD(sc, DBG_LOAD, "NIC MODE configured\n");
18373    }
18374
18375    if (!CHIP_IS_E1x(sc)) {
18376        uint32_t pf_conf = IGU_PF_CONF_FUNC_EN;
18377
18378        /* Turn on a single ISR mode in IGU if driver is going to use
18379         * INT#x or MSI
18380         */
18381        if (sc->interrupt_mode != INTR_MODE_MSIX) {
18382            pf_conf |= IGU_PF_CONF_SINGLE_ISR_EN;
18383        }
18384
18385        /*
18386         * Timers workaround bug: function init part.
18387         * Need to wait 20msec after initializing ILT,
18388         * needed to make sure there are no requests in
18389         * one of the PXP internal queues with "old" ILT addresses
18390         */
18391        DELAY(20000);
18392
18393        /*
18394         * Master enable - Due to WB DMAE writes performed before this
18395         * register is re-initialized as part of the regular function
18396         * init
18397         */
18398        REG_WR(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1);
18399        /* Enable the function in IGU */
18400        REG_WR(sc, IGU_REG_PF_CONFIGURATION, pf_conf);
18401    }
18402
18403    sc->dmae_ready = 1;
18404
18405    ecore_init_block(sc, BLOCK_PGLUE_B, init_phase);
18406
18407    if (!CHIP_IS_E1x(sc))
18408        REG_WR(sc, PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR, func);
18409
18410    ecore_init_block(sc, BLOCK_ATC, init_phase);
18411    ecore_init_block(sc, BLOCK_DMAE, init_phase);
18412    ecore_init_block(sc, BLOCK_NIG, init_phase);
18413    ecore_init_block(sc, BLOCK_SRC, init_phase);
18414    ecore_init_block(sc, BLOCK_MISC, init_phase);
18415    ecore_init_block(sc, BLOCK_TCM, init_phase);
18416    ecore_init_block(sc, BLOCK_UCM, init_phase);
18417    ecore_init_block(sc, BLOCK_CCM, init_phase);
18418    ecore_init_block(sc, BLOCK_XCM, init_phase);
18419    ecore_init_block(sc, BLOCK_TSEM, init_phase);
18420    ecore_init_block(sc, BLOCK_USEM, init_phase);
18421    ecore_init_block(sc, BLOCK_CSEM, init_phase);
18422    ecore_init_block(sc, BLOCK_XSEM, init_phase);
18423
18424    if (!CHIP_IS_E1x(sc))
18425        REG_WR(sc, QM_REG_PF_EN, 1);
18426
18427    if (!CHIP_IS_E1x(sc)) {
18428        REG_WR(sc, TSEM_REG_VFPF_ERR_NUM, BXE_MAX_NUM_OF_VFS + func);
18429        REG_WR(sc, USEM_REG_VFPF_ERR_NUM, BXE_MAX_NUM_OF_VFS + func);
18430        REG_WR(sc, CSEM_REG_VFPF_ERR_NUM, BXE_MAX_NUM_OF_VFS + func);
18431        REG_WR(sc, XSEM_REG_VFPF_ERR_NUM, BXE_MAX_NUM_OF_VFS + func);
18432    }
18433    ecore_init_block(sc, BLOCK_QM, init_phase);
18434
18435    ecore_init_block(sc, BLOCK_TM, init_phase);
18436    ecore_init_block(sc, BLOCK_DORQ, init_phase);
18437
18438    bxe_iov_init_dq(sc);
18439
18440    ecore_init_block(sc, BLOCK_BRB1, init_phase);
18441    ecore_init_block(sc, BLOCK_PRS, init_phase);
18442    ecore_init_block(sc, BLOCK_TSDM, init_phase);
18443    ecore_init_block(sc, BLOCK_CSDM, init_phase);
18444    ecore_init_block(sc, BLOCK_USDM, init_phase);
18445    ecore_init_block(sc, BLOCK_XSDM, init_phase);
18446    ecore_init_block(sc, BLOCK_UPB, init_phase);
18447    ecore_init_block(sc, BLOCK_XPB, init_phase);
18448    ecore_init_block(sc, BLOCK_PBF, init_phase);
18449    if (!CHIP_IS_E1x(sc))
18450        REG_WR(sc, PBF_REG_DISABLE_PF, 0);
18451
18452    ecore_init_block(sc, BLOCK_CDU, init_phase);
18453
18454    ecore_init_block(sc, BLOCK_CFC, init_phase);
18455
18456    if (!CHIP_IS_E1x(sc))
18457        REG_WR(sc, CFC_REG_WEAK_ENABLE_PF, 1);
18458
18459    if (IS_MF(sc)) {
18460        REG_WR(sc, NIG_REG_LLH0_FUNC_EN + port*8, 1);
18461        REG_WR(sc, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, OVLAN(sc));
18462    }
18463
18464    ecore_init_block(sc, BLOCK_MISC_AEU, init_phase);
18465
18466    /* HC init per function */
18467    if (sc->devinfo.int_block == INT_BLOCK_HC) {
18468        if (CHIP_IS_E1H(sc)) {
18469            REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
18470
18471            REG_WR(sc, HC_REG_LEADING_EDGE_0 + port*8, 0);
18472            REG_WR(sc, HC_REG_TRAILING_EDGE_0 + port*8, 0);
18473        }
18474        ecore_init_block(sc, BLOCK_HC, init_phase);
18475
18476    } else {
18477        int num_segs, sb_idx, prod_offset;
18478
18479        REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
18480
18481        if (!CHIP_IS_E1x(sc)) {
18482            REG_WR(sc, IGU_REG_LEADING_EDGE_LATCH, 0);
18483            REG_WR(sc, IGU_REG_TRAILING_EDGE_LATCH, 0);
18484        }
18485
18486        ecore_init_block(sc, BLOCK_IGU, init_phase);
18487
18488        if (!CHIP_IS_E1x(sc)) {
18489            int dsb_idx = 0;
18490            /**
18491             * Producer memory:
18492             * E2 mode: address 0-135 match to the mapping memory;
18493             * 136 - PF0 default prod; 137 - PF1 default prod;
18494             * 138 - PF2 default prod; 139 - PF3 default prod;
18495             * 140 - PF0 attn prod;    141 - PF1 attn prod;
18496             * 142 - PF2 attn prod;    143 - PF3 attn prod;
18497             * 144-147 reserved.
18498             *
18499             * E1.5 mode - In backward compatible mode;
18500             * for non default SB; each even line in the memory
18501             * holds the U producer and each odd line hold
18502             * the C producer. The first 128 producers are for
18503             * NDSB (PF0 - 0-31; PF1 - 32-63 and so on). The last 20
18504             * producers are for the DSB for each PF.
18505             * Each PF has five segments: (the order inside each
18506             * segment is PF0; PF1; PF2; PF3) - 128-131 U prods;
18507             * 132-135 C prods; 136-139 X prods; 140-143 T prods;
18508             * 144-147 attn prods;
18509             */
18510            /* non-default-status-blocks */
18511            num_segs = CHIP_INT_MODE_IS_BC(sc) ?
18512                IGU_BC_NDSB_NUM_SEGS : IGU_NORM_NDSB_NUM_SEGS;
18513            for (sb_idx = 0; sb_idx < sc->igu_sb_cnt; sb_idx++) {
18514                prod_offset = (sc->igu_base_sb + sb_idx) *
18515                    num_segs;
18516
18517                for (i = 0; i < num_segs; i++) {
18518                    addr = IGU_REG_PROD_CONS_MEMORY +
18519                            (prod_offset + i) * 4;
18520                    REG_WR(sc, addr, 0);
18521                }
18522                /* send consumer update with value 0 */
18523                bxe_ack_sb(sc, sc->igu_base_sb + sb_idx,
18524                           USTORM_ID, 0, IGU_INT_NOP, 1);
18525                bxe_igu_clear_sb(sc, sc->igu_base_sb + sb_idx);
18526            }
18527
18528            /* default-status-blocks */
18529            num_segs = CHIP_INT_MODE_IS_BC(sc) ?
18530                IGU_BC_DSB_NUM_SEGS : IGU_NORM_DSB_NUM_SEGS;
18531
18532            if (CHIP_IS_MODE_4_PORT(sc))
18533                dsb_idx = SC_FUNC(sc);
18534            else
18535                dsb_idx = SC_VN(sc);
18536
18537            prod_offset = (CHIP_INT_MODE_IS_BC(sc) ?
18538                       IGU_BC_BASE_DSB_PROD + dsb_idx :
18539                       IGU_NORM_BASE_DSB_PROD + dsb_idx);
18540
18541            /*
18542             * igu prods come in chunks of E1HVN_MAX (4) -
18543             * does not matters what is the current chip mode
18544             */
18545            for (i = 0; i < (num_segs * E1HVN_MAX);
18546                 i += E1HVN_MAX) {
18547                addr = IGU_REG_PROD_CONS_MEMORY +
18548                            (prod_offset + i)*4;
18549                REG_WR(sc, addr, 0);
18550            }
18551            /* send consumer update with 0 */
18552            if (CHIP_INT_MODE_IS_BC(sc)) {
18553                bxe_ack_sb(sc, sc->igu_dsb_id,
18554                           USTORM_ID, 0, IGU_INT_NOP, 1);
18555                bxe_ack_sb(sc, sc->igu_dsb_id,
18556                           CSTORM_ID, 0, IGU_INT_NOP, 1);
18557                bxe_ack_sb(sc, sc->igu_dsb_id,
18558                           XSTORM_ID, 0, IGU_INT_NOP, 1);
18559                bxe_ack_sb(sc, sc->igu_dsb_id,
18560                           TSTORM_ID, 0, IGU_INT_NOP, 1);
18561                bxe_ack_sb(sc, sc->igu_dsb_id,
18562                           ATTENTION_ID, 0, IGU_INT_NOP, 1);
18563            } else {
18564                bxe_ack_sb(sc, sc->igu_dsb_id,
18565                           USTORM_ID, 0, IGU_INT_NOP, 1);
18566                bxe_ack_sb(sc, sc->igu_dsb_id,
18567                           ATTENTION_ID, 0, IGU_INT_NOP, 1);
18568            }
18569            bxe_igu_clear_sb(sc, sc->igu_dsb_id);
18570
18571            /* !!! these should become driver const once
18572               rf-tool supports split-68 const */
18573            REG_WR(sc, IGU_REG_SB_INT_BEFORE_MASK_LSB, 0);
18574            REG_WR(sc, IGU_REG_SB_INT_BEFORE_MASK_MSB, 0);
18575            REG_WR(sc, IGU_REG_SB_MASK_LSB, 0);
18576            REG_WR(sc, IGU_REG_SB_MASK_MSB, 0);
18577            REG_WR(sc, IGU_REG_PBA_STATUS_LSB, 0);
18578            REG_WR(sc, IGU_REG_PBA_STATUS_MSB, 0);
18579        }
18580    }
18581
18582    /* Reset PCIE errors for debug */
18583    REG_WR(sc, 0x2114, 0xffffffff);
18584    REG_WR(sc, 0x2120, 0xffffffff);
18585
18586    if (CHIP_IS_E1x(sc)) {
18587        main_mem_size = HC_REG_MAIN_MEMORY_SIZE / 2; /*dwords*/
18588        main_mem_base = HC_REG_MAIN_MEMORY +
18589                SC_PORT(sc) * (main_mem_size * 4);
18590        main_mem_prty_clr = HC_REG_HC_PRTY_STS_CLR;
18591        main_mem_width = 8;
18592
18593        val = REG_RD(sc, main_mem_prty_clr);
18594        if (val) {
18595            BLOGD(sc, DBG_LOAD,
18596                  "Parity errors in HC block during function init (0x%x)!\n",
18597                  val);
18598        }
18599
18600        /* Clear "false" parity errors in MSI-X table */
18601        for (i = main_mem_base;
18602             i < main_mem_base + main_mem_size * 4;
18603             i += main_mem_width) {
18604            bxe_read_dmae(sc, i, main_mem_width / 4);
18605            bxe_write_dmae(sc, BXE_SP_MAPPING(sc, wb_data),
18606                           i, main_mem_width / 4);
18607        }
18608        /* Clear HC parity attention */
18609        REG_RD(sc, main_mem_prty_clr);
18610    }
18611
18612#if 1
18613    /* Enable STORMs SP logging */
18614    REG_WR8(sc, BAR_USTRORM_INTMEM +
18615           USTORM_RECORD_SLOW_PATH_OFFSET(SC_FUNC(sc)), 1);
18616    REG_WR8(sc, BAR_TSTRORM_INTMEM +
18617           TSTORM_RECORD_SLOW_PATH_OFFSET(SC_FUNC(sc)), 1);
18618    REG_WR8(sc, BAR_CSTRORM_INTMEM +
18619           CSTORM_RECORD_SLOW_PATH_OFFSET(SC_FUNC(sc)), 1);
18620    REG_WR8(sc, BAR_XSTRORM_INTMEM +
18621           XSTORM_RECORD_SLOW_PATH_OFFSET(SC_FUNC(sc)), 1);
18622#endif
18623
18624    elink_phy_probe(&sc->link_params);
18625
18626    return (0);
18627}
18628
18629static void
18630bxe_link_reset(struct bxe_softc *sc)
18631{
18632    if (!BXE_NOMCP(sc)) {
18633        BXE_PHY_LOCK(sc);
18634        elink_lfa_reset(&sc->link_params, &sc->link_vars);
18635        BXE_PHY_UNLOCK(sc);
18636    } else {
18637        if (!CHIP_REV_IS_SLOW(sc)) {
18638            BLOGW(sc, "Bootcode is missing - cannot reset link\n");
18639        }
18640    }
18641}
18642
18643static void
18644bxe_reset_port(struct bxe_softc *sc)
18645{
18646    int port = SC_PORT(sc);
18647    uint32_t val;
18648
18649    /* reset physical Link */
18650    bxe_link_reset(sc);
18651
18652    REG_WR(sc, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
18653
18654    /* Do not rcv packets to BRB */
18655    REG_WR(sc, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
18656    /* Do not direct rcv packets that are not for MCP to the BRB */
18657    REG_WR(sc, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
18658               NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
18659
18660    /* Configure AEU */
18661    REG_WR(sc, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
18662
18663    DELAY(100000);
18664
18665    /* Check for BRB port occupancy */
18666    val = REG_RD(sc, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
18667    if (val) {
18668        BLOGD(sc, DBG_LOAD,
18669              "BRB1 is not empty, %d blocks are occupied\n", val);
18670    }
18671
18672    /* TODO: Close Doorbell port? */
18673}
18674
18675static void
18676bxe_ilt_wr(struct bxe_softc *sc,
18677           uint32_t         index,
18678           bus_addr_t       addr)
18679{
18680    int reg;
18681    uint32_t wb_write[2];
18682
18683    if (CHIP_IS_E1(sc)) {
18684        reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
18685    } else {
18686        reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
18687    }
18688
18689    wb_write[0] = ONCHIP_ADDR1(addr);
18690    wb_write[1] = ONCHIP_ADDR2(addr);
18691    REG_WR_DMAE(sc, reg, wb_write, 2);
18692}
18693
18694static void
18695bxe_clear_func_ilt(struct bxe_softc *sc,
18696                   uint32_t         func)
18697{
18698    uint32_t i, base = FUNC_ILT_BASE(func);
18699    for (i = base; i < base + ILT_PER_FUNC; i++) {
18700        bxe_ilt_wr(sc, i, 0);
18701    }
18702}
18703
18704static void
18705bxe_reset_func(struct bxe_softc *sc)
18706{
18707    struct bxe_fastpath *fp;
18708    int port = SC_PORT(sc);
18709    int func = SC_FUNC(sc);
18710    int i;
18711
18712    /* Disable the function in the FW */
18713    REG_WR8(sc, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(func), 0);
18714    REG_WR8(sc, BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(func), 0);
18715    REG_WR8(sc, BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(func), 0);
18716    REG_WR8(sc, BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(func), 0);
18717
18718    /* FP SBs */
18719    FOR_EACH_ETH_QUEUE(sc, i) {
18720        fp = &sc->fp[i];
18721        REG_WR8(sc, BAR_CSTRORM_INTMEM +
18722                CSTORM_STATUS_BLOCK_DATA_STATE_OFFSET(fp->fw_sb_id),
18723                SB_DISABLED);
18724    }
18725
18726#if 0
18727    if (CNIC_LOADED(sc)) {
18728        /* CNIC SB */
18729        REG_WR8(sc, BAR_CSTRORM_INTMEM +
18730                CSTORM_STATUS_BLOCK_DATA_STATE_OFFSET
18731                (bxe_cnic_fw_sb_id(sc)), SB_DISABLED);
18732    }
18733#endif
18734
18735    /* SP SB */
18736    REG_WR8(sc, BAR_CSTRORM_INTMEM +
18737            CSTORM_SP_STATUS_BLOCK_DATA_STATE_OFFSET(func),
18738            SB_DISABLED);
18739
18740    for (i = 0; i < XSTORM_SPQ_DATA_SIZE / 4; i++) {
18741        REG_WR(sc, BAR_XSTRORM_INTMEM + XSTORM_SPQ_DATA_OFFSET(func), 0);
18742    }
18743
18744    /* Configure IGU */
18745    if (sc->devinfo.int_block == INT_BLOCK_HC) {
18746        REG_WR(sc, HC_REG_LEADING_EDGE_0 + port*8, 0);
18747        REG_WR(sc, HC_REG_TRAILING_EDGE_0 + port*8, 0);
18748    } else {
18749        REG_WR(sc, IGU_REG_LEADING_EDGE_LATCH, 0);
18750        REG_WR(sc, IGU_REG_TRAILING_EDGE_LATCH, 0);
18751    }
18752
18753    if (CNIC_LOADED(sc)) {
18754        /* Disable Timer scan */
18755        REG_WR(sc, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
18756        /*
18757         * Wait for at least 10ms and up to 2 second for the timers
18758         * scan to complete
18759         */
18760        for (i = 0; i < 200; i++) {
18761            DELAY(10000);
18762            if (!REG_RD(sc, TM_REG_LIN0_SCAN_ON + port*4))
18763                break;
18764        }
18765    }
18766
18767    /* Clear ILT */
18768    bxe_clear_func_ilt(sc, func);
18769
18770    /*
18771     * Timers workaround bug for E2: if this is vnic-3,
18772     * we need to set the entire ilt range for this timers.
18773     */
18774    if (!CHIP_IS_E1x(sc) && SC_VN(sc) == 3) {
18775        struct ilt_client_info ilt_cli;
18776        /* use dummy TM client */
18777        memset(&ilt_cli, 0, sizeof(struct ilt_client_info));
18778        ilt_cli.start = 0;
18779        ilt_cli.end = ILT_NUM_PAGE_ENTRIES - 1;
18780        ilt_cli.client_num = ILT_CLIENT_TM;
18781
18782        ecore_ilt_boundry_init_op(sc, &ilt_cli, 0, INITOP_CLEAR);
18783    }
18784
18785    /* this assumes that reset_port() called before reset_func()*/
18786    if (!CHIP_IS_E1x(sc)) {
18787        bxe_pf_disable(sc);
18788    }
18789
18790    sc->dmae_ready = 0;
18791}
18792
18793static int
18794bxe_gunzip_init(struct bxe_softc *sc)
18795{
18796    return (0);
18797}
18798
18799static void
18800bxe_gunzip_end(struct bxe_softc *sc)
18801{
18802    return;
18803}
18804
18805static int
18806bxe_init_firmware(struct bxe_softc *sc)
18807{
18808    if (CHIP_IS_E1(sc)) {
18809        ecore_init_e1_firmware(sc);
18810        sc->iro_array = e1_iro_arr;
18811    } else if (CHIP_IS_E1H(sc)) {
18812        ecore_init_e1h_firmware(sc);
18813        sc->iro_array = e1h_iro_arr;
18814    } else if (!CHIP_IS_E1x(sc)) {
18815        ecore_init_e2_firmware(sc);
18816        sc->iro_array = e2_iro_arr;
18817    } else {
18818        BLOGE(sc, "Unsupported chip revision\n");
18819        return (-1);
18820    }
18821
18822    return (0);
18823}
18824
18825static void
18826bxe_release_firmware(struct bxe_softc *sc)
18827{
18828    /* Do nothing */
18829    return;
18830}
18831
18832static int
18833ecore_gunzip(struct bxe_softc *sc,
18834             const uint8_t    *zbuf,
18835             int              len)
18836{
18837    /* XXX : Implement... */
18838    BLOGD(sc, DBG_LOAD, "ECORE_GUNZIP NOT IMPLEMENTED\n");
18839    return (FALSE);
18840}
18841
18842static void
18843ecore_reg_wr_ind(struct bxe_softc *sc,
18844                 uint32_t         addr,
18845                 uint32_t         val)
18846{
18847    bxe_reg_wr_ind(sc, addr, val);
18848}
18849
18850static void
18851ecore_write_dmae_phys_len(struct bxe_softc *sc,
18852                          bus_addr_t       phys_addr,
18853                          uint32_t         addr,
18854                          uint32_t         len)
18855{
18856    bxe_write_dmae_phys_len(sc, phys_addr, addr, len);
18857}
18858
18859void
18860ecore_storm_memset_struct(struct bxe_softc *sc,
18861                          uint32_t         addr,
18862                          size_t           size,
18863                          uint32_t         *data)
18864{
18865    uint8_t i;
18866    for (i = 0; i < size/4; i++) {
18867        REG_WR(sc, addr + (i * 4), data[i]);
18868    }
18869}
18870
18871