Deleted Added
full compact
if_bce.c (213894) if_bce.c (215297)
1/*-
2 * Copyright (c) 2006-2010 Broadcom Corporation
3 * David Christensen <davidch@broadcom.com>. All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. Neither the name of Broadcom Corporation nor the name of its contributors
15 * may be used to endorse or promote products derived from this software
16 * without specific prior written consent.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS'
19 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
22 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
28 * THE POSSIBILITY OF SUCH DAMAGE.
29 */
30
31#include <sys/cdefs.h>
1/*-
2 * Copyright (c) 2006-2010 Broadcom Corporation
3 * David Christensen <davidch@broadcom.com>. All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. Neither the name of Broadcom Corporation nor the name of its contributors
15 * may be used to endorse or promote products derived from this software
16 * without specific prior written consent.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS'
19 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
22 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
28 * THE POSSIBILITY OF SUCH DAMAGE.
29 */
30
31#include <sys/cdefs.h>
32__FBSDID("$FreeBSD: head/sys/dev/bce/if_bce.c 213894 2010-10-15 15:00:30Z marius $");
32__FBSDID("$FreeBSD: head/sys/dev/bce/if_bce.c 215297 2010-11-14 13:26:10Z marius $");
33
34/*
35 * The following controllers are supported by this driver:
36 * BCM5706C A2, A3
37 * BCM5706S A2, A3
38 * BCM5708C B1, B2
39 * BCM5708S B1, B2
40 * BCM5709C A1, C0
41 * BCM5709S A1, C0
42 * BCM5716C C0
43 * BCM5716S C0
44 *
45 * The following controllers are not supported by this driver:
46 * BCM5706C A0, A1 (pre-production)
47 * BCM5706S A0, A1 (pre-production)
48 * BCM5708C A0, B0 (pre-production)
49 * BCM5708S A0, B0 (pre-production)
50 * BCM5709C A0 B0, B1, B2 (pre-production)
51 * BCM5709S A0, B0, B1, B2 (pre-production)
52 */
53
54#include "opt_bce.h"
55
56#include <dev/bce/if_bcereg.h>
57#include <dev/bce/if_bcefw.h>
58
59/****************************************************************************/
60/* BCE Debug Options */
61/****************************************************************************/
62#ifdef BCE_DEBUG
63 u32 bce_debug = BCE_WARN;
64
65 /* 0 = Never */
66 /* 1 = 1 in 2,147,483,648 */
67 /* 256 = 1 in 8,388,608 */
68 /* 2048 = 1 in 1,048,576 */
69 /* 65536 = 1 in 32,768 */
70 /* 1048576 = 1 in 2,048 */
71 /* 268435456 = 1 in 8 */
72 /* 536870912 = 1 in 4 */
73 /* 1073741824 = 1 in 2 */
74
75 /* Controls how often the l2_fhdr frame error check will fail. */
76 int l2fhdr_error_sim_control = 0;
77
78 /* Controls how often the unexpected attention check will fail. */
79 int unexpected_attention_sim_control = 0;
80
81 /* Controls how often to simulate an mbuf allocation failure. */
82 int mbuf_alloc_failed_sim_control = 0;
83
84 /* Controls how often to simulate a DMA mapping failure. */
85 int dma_map_addr_failed_sim_control = 0;
86
87 /* Controls how often to simulate a bootcode failure. */
88 int bootcode_running_failure_sim_control = 0;
89#endif
90
91/****************************************************************************/
92/* BCE Build Time Options */
93/****************************************************************************/
94/* #define BCE_NVRAM_WRITE_SUPPORT 1 */
95
96
97/****************************************************************************/
98/* PCI Device ID Table */
99/* */
100/* Used by bce_probe() to identify the devices supported by this driver. */
101/****************************************************************************/
102#define BCE_DEVDESC_MAX 64
103
104static struct bce_type bce_devs[] = {
105 /* BCM5706C Controllers and OEM boards. */
106 { BRCM_VENDORID, BRCM_DEVICEID_BCM5706, HP_VENDORID, 0x3101,
107 "HP NC370T Multifunction Gigabit Server Adapter" },
108 { BRCM_VENDORID, BRCM_DEVICEID_BCM5706, HP_VENDORID, 0x3106,
109 "HP NC370i Multifunction Gigabit Server Adapter" },
110 { BRCM_VENDORID, BRCM_DEVICEID_BCM5706, HP_VENDORID, 0x3070,
111 "HP NC380T PCIe DP Multifunc Gig Server Adapter" },
112 { BRCM_VENDORID, BRCM_DEVICEID_BCM5706, HP_VENDORID, 0x1709,
113 "HP NC371i Multifunction Gigabit Server Adapter" },
114 { BRCM_VENDORID, BRCM_DEVICEID_BCM5706, PCI_ANY_ID, PCI_ANY_ID,
115 "Broadcom NetXtreme II BCM5706 1000Base-T" },
116
117 /* BCM5706S controllers and OEM boards. */
118 { BRCM_VENDORID, BRCM_DEVICEID_BCM5706S, HP_VENDORID, 0x3102,
119 "HP NC370F Multifunction Gigabit Server Adapter" },
120 { BRCM_VENDORID, BRCM_DEVICEID_BCM5706S, PCI_ANY_ID, PCI_ANY_ID,
121 "Broadcom NetXtreme II BCM5706 1000Base-SX" },
122
123 /* BCM5708C controllers and OEM boards. */
124 { BRCM_VENDORID, BRCM_DEVICEID_BCM5708, HP_VENDORID, 0x7037,
125 "HP NC373T PCIe Multifunction Gig Server Adapter" },
126 { BRCM_VENDORID, BRCM_DEVICEID_BCM5708, HP_VENDORID, 0x7038,
127 "HP NC373i Multifunction Gigabit Server Adapter" },
128 { BRCM_VENDORID, BRCM_DEVICEID_BCM5708, HP_VENDORID, 0x7045,
129 "HP NC374m PCIe Multifunction Adapter" },
130 { BRCM_VENDORID, BRCM_DEVICEID_BCM5708, PCI_ANY_ID, PCI_ANY_ID,
131 "Broadcom NetXtreme II BCM5708 1000Base-T" },
132
133 /* BCM5708S controllers and OEM boards. */
134 { BRCM_VENDORID, BRCM_DEVICEID_BCM5708S, HP_VENDORID, 0x1706,
135 "HP NC373m Multifunction Gigabit Server Adapter" },
136 { BRCM_VENDORID, BRCM_DEVICEID_BCM5708S, HP_VENDORID, 0x703b,
137 "HP NC373i Multifunction Gigabit Server Adapter" },
138 { BRCM_VENDORID, BRCM_DEVICEID_BCM5708S, HP_VENDORID, 0x703d,
139 "HP NC373F PCIe Multifunc Giga Server Adapter" },
140 { BRCM_VENDORID, BRCM_DEVICEID_BCM5708S, PCI_ANY_ID, PCI_ANY_ID,
141 "Broadcom NetXtreme II BCM5708 1000Base-SX" },
142
143 /* BCM5709C controllers and OEM boards. */
144 { BRCM_VENDORID, BRCM_DEVICEID_BCM5709, HP_VENDORID, 0x7055,
145 "HP NC382i DP Multifunction Gigabit Server Adapter" },
146 { BRCM_VENDORID, BRCM_DEVICEID_BCM5709, HP_VENDORID, 0x7059,
147 "HP NC382T PCIe DP Multifunction Gigabit Server Adapter" },
148 { BRCM_VENDORID, BRCM_DEVICEID_BCM5709, PCI_ANY_ID, PCI_ANY_ID,
149 "Broadcom NetXtreme II BCM5709 1000Base-T" },
150
151 /* BCM5709S controllers and OEM boards. */
152 { BRCM_VENDORID, BRCM_DEVICEID_BCM5709S, HP_VENDORID, 0x171d,
153 "HP NC382m DP 1GbE Multifunction BL-c Adapter" },
154 { BRCM_VENDORID, BRCM_DEVICEID_BCM5709S, HP_VENDORID, 0x7056,
155 "HP NC382i DP Multifunction Gigabit Server Adapter" },
156 { BRCM_VENDORID, BRCM_DEVICEID_BCM5709S, PCI_ANY_ID, PCI_ANY_ID,
157 "Broadcom NetXtreme II BCM5709 1000Base-SX" },
158
159 /* BCM5716 controllers and OEM boards. */
160 { BRCM_VENDORID, BRCM_DEVICEID_BCM5716, PCI_ANY_ID, PCI_ANY_ID,
161 "Broadcom NetXtreme II BCM5716 1000Base-T" },
162
163 { 0, 0, 0, 0, NULL }
164};
165
166
167/****************************************************************************/
168/* Supported Flash NVRAM device data. */
169/****************************************************************************/
170static struct flash_spec flash_table[] =
171{
172#define BUFFERED_FLAGS (BCE_NV_BUFFERED | BCE_NV_TRANSLATE)
173#define NONBUFFERED_FLAGS (BCE_NV_WREN)
174
175 /* Slow EEPROM */
176 {0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
177 BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
178 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
179 "EEPROM - slow"},
180 /* Expansion entry 0001 */
181 {0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
182 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
183 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
184 "Entry 0001"},
185 /* Saifun SA25F010 (non-buffered flash) */
186 /* strap, cfg1, & write1 need updates */
187 {0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
188 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
189 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
190 "Non-buffered flash (128kB)"},
191 /* Saifun SA25F020 (non-buffered flash) */
192 /* strap, cfg1, & write1 need updates */
193 {0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
194 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
195 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
196 "Non-buffered flash (256kB)"},
197 /* Expansion entry 0100 */
198 {0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
199 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
200 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
201 "Entry 0100"},
202 /* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
203 {0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
204 NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
205 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
206 "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
207 /* Entry 0110: ST M45PE20 (non-buffered flash)*/
208 {0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
209 NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
210 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
211 "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
212 /* Saifun SA25F005 (non-buffered flash) */
213 /* strap, cfg1, & write1 need updates */
214 {0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
215 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
216 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
217 "Non-buffered flash (64kB)"},
218 /* Fast EEPROM */
219 {0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
220 BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
221 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
222 "EEPROM - fast"},
223 /* Expansion entry 1001 */
224 {0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
225 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
226 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
227 "Entry 1001"},
228 /* Expansion entry 1010 */
229 {0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
230 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
231 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
232 "Entry 1010"},
233 /* ATMEL AT45DB011B (buffered flash) */
234 {0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
235 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
236 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
237 "Buffered flash (128kB)"},
238 /* Expansion entry 1100 */
239 {0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
240 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
241 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
242 "Entry 1100"},
243 /* Expansion entry 1101 */
244 {0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
245 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
246 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
247 "Entry 1101"},
248 /* Ateml Expansion entry 1110 */
249 {0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
250 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
251 BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
252 "Entry 1110 (Atmel)"},
253 /* ATMEL AT45DB021B (buffered flash) */
254 {0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
255 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
256 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
257 "Buffered flash (256kB)"},
258};
259
260/*
261 * The BCM5709 controllers transparently handle the
262 * differences between Atmel 264 byte pages and all
263 * flash devices which use 256 byte pages, so no
264 * logical-to-physical mapping is required in the
265 * driver.
266 */
267static struct flash_spec flash_5709 = {
268 .flags = BCE_NV_BUFFERED,
269 .page_bits = BCM5709_FLASH_PAGE_BITS,
270 .page_size = BCM5709_FLASH_PAGE_SIZE,
271 .addr_mask = BCM5709_FLASH_BYTE_ADDR_MASK,
272 .total_size = BUFFERED_FLASH_TOTAL_SIZE * 2,
273 .name = "5709/5716 buffered flash (256kB)",
274};
275
276
277/****************************************************************************/
278/* FreeBSD device entry points. */
279/****************************************************************************/
280static int bce_probe (device_t);
281static int bce_attach (device_t);
282static int bce_detach (device_t);
283static int bce_shutdown (device_t);
284
285
286/****************************************************************************/
287/* BCE Debug Data Structure Dump Routines */
288/****************************************************************************/
289#ifdef BCE_DEBUG
290static u32 bce_reg_rd (struct bce_softc *, u32);
291static void bce_reg_wr (struct bce_softc *, u32, u32);
292static void bce_reg_wr16 (struct bce_softc *, u32, u16);
293static u32 bce_ctx_rd (struct bce_softc *, u32, u32);
294static void bce_dump_enet (struct bce_softc *, struct mbuf *);
295static void bce_dump_mbuf (struct bce_softc *, struct mbuf *);
296static void bce_dump_tx_mbuf_chain (struct bce_softc *, u16, int);
297static void bce_dump_rx_mbuf_chain (struct bce_softc *, u16, int);
298#ifdef BCE_JUMBO_HDRSPLIT
299static void bce_dump_pg_mbuf_chain (struct bce_softc *, u16, int);
300#endif
301static void bce_dump_txbd (struct bce_softc *,
302 int, struct tx_bd *);
303static void bce_dump_rxbd (struct bce_softc *,
304 int, struct rx_bd *);
305#ifdef BCE_JUMBO_HDRSPLIT
306static void bce_dump_pgbd (struct bce_softc *,
307 int, struct rx_bd *);
308#endif
309static void bce_dump_l2fhdr (struct bce_softc *,
310 int, struct l2_fhdr *);
311static void bce_dump_ctx (struct bce_softc *, u16);
312static void bce_dump_ftqs (struct bce_softc *);
313static void bce_dump_tx_chain (struct bce_softc *, u16, int);
314static void bce_dump_rx_bd_chain (struct bce_softc *, u16, int);
315#ifdef BCE_JUMBO_HDRSPLIT
316static void bce_dump_pg_chain (struct bce_softc *, u16, int);
317#endif
318static void bce_dump_status_block (struct bce_softc *);
319static void bce_dump_stats_block (struct bce_softc *);
320static void bce_dump_driver_state (struct bce_softc *);
321static void bce_dump_hw_state (struct bce_softc *);
322static void bce_dump_mq_regs (struct bce_softc *);
323static void bce_dump_bc_state (struct bce_softc *);
324static void bce_dump_txp_state (struct bce_softc *, int);
325static void bce_dump_rxp_state (struct bce_softc *, int);
326static void bce_dump_tpat_state (struct bce_softc *, int);
327static void bce_dump_cp_state (struct bce_softc *, int);
328static void bce_dump_com_state (struct bce_softc *, int);
329static void bce_dump_rv2p_state (struct bce_softc *);
330static void bce_breakpoint (struct bce_softc *);
331#endif
332
333
334/****************************************************************************/
335/* BCE Register/Memory Access Routines */
336/****************************************************************************/
337static u32 bce_reg_rd_ind (struct bce_softc *, u32);
338static void bce_reg_wr_ind (struct bce_softc *, u32, u32);
339static void bce_shmem_wr (struct bce_softc *, u32, u32);
340static u32 bce_shmem_rd (struct bce_softc *, u32);
341static void bce_ctx_wr (struct bce_softc *, u32, u32, u32);
342static int bce_miibus_read_reg (device_t, int, int);
343static int bce_miibus_write_reg (device_t, int, int, int);
344static void bce_miibus_statchg (device_t);
345
346#ifdef BCE_DEBUG
347static int sysctl_nvram_dump(SYSCTL_HANDLER_ARGS);
348#ifdef BCE_NVRAM_WRITE_SUPPORT
349static int sysctl_nvram_write(SYSCTL_HANDLER_ARGS);
350#endif
351#endif
352
353/****************************************************************************/
354/* BCE NVRAM Access Routines */
355/****************************************************************************/
356static int bce_acquire_nvram_lock (struct bce_softc *);
357static int bce_release_nvram_lock (struct bce_softc *);
358static void bce_enable_nvram_access (struct bce_softc *);
359static void bce_disable_nvram_access (struct bce_softc *);
360static int bce_nvram_read_dword (struct bce_softc *, u32, u8 *, u32);
361static int bce_init_nvram (struct bce_softc *);
362static int bce_nvram_read (struct bce_softc *, u32, u8 *, int);
363static int bce_nvram_test (struct bce_softc *);
364#ifdef BCE_NVRAM_WRITE_SUPPORT
365static int bce_enable_nvram_write (struct bce_softc *);
366static void bce_disable_nvram_write (struct bce_softc *);
367static int bce_nvram_erase_page (struct bce_softc *, u32);
368static int bce_nvram_write_dword (struct bce_softc *, u32, u8 *, u32);
369static int bce_nvram_write (struct bce_softc *, u32, u8 *, int);
370#endif
371
372/****************************************************************************/
373/* */
374/****************************************************************************/
375static void bce_get_media (struct bce_softc *);
376static void bce_init_media (struct bce_softc *);
377static void bce_dma_map_addr (void *,
378 bus_dma_segment_t *, int, int);
379static int bce_dma_alloc (device_t);
380static void bce_dma_free (struct bce_softc *);
381static void bce_release_resources (struct bce_softc *);
382
383/****************************************************************************/
384/* BCE Firmware Synchronization and Load */
385/****************************************************************************/
386static int bce_fw_sync (struct bce_softc *, u32);
387static void bce_load_rv2p_fw (struct bce_softc *, u32 *, u32, u32);
388static void bce_load_cpu_fw (struct bce_softc *,
389 struct cpu_reg *, struct fw_info *);
390static void bce_start_cpu (struct bce_softc *, struct cpu_reg *);
391static void bce_halt_cpu (struct bce_softc *, struct cpu_reg *);
392static void bce_start_rxp_cpu (struct bce_softc *);
393static void bce_init_rxp_cpu (struct bce_softc *);
394static void bce_init_txp_cpu (struct bce_softc *);
395static void bce_init_tpat_cpu (struct bce_softc *);
396static void bce_init_cp_cpu (struct bce_softc *);
397static void bce_init_com_cpu (struct bce_softc *);
398static void bce_init_cpus (struct bce_softc *);
399
400static void bce_print_adapter_info (struct bce_softc *);
401static void bce_probe_pci_caps (device_t, struct bce_softc *);
402static void bce_stop (struct bce_softc *);
403static int bce_reset (struct bce_softc *, u32);
404static int bce_chipinit (struct bce_softc *);
405static int bce_blockinit (struct bce_softc *);
406
407static int bce_init_tx_chain (struct bce_softc *);
408static void bce_free_tx_chain (struct bce_softc *);
409
410static int bce_get_rx_buf (struct bce_softc *,
411 struct mbuf *, u16 *, u16 *, u32 *);
412static int bce_init_rx_chain (struct bce_softc *);
413static void bce_fill_rx_chain (struct bce_softc *);
414static void bce_free_rx_chain (struct bce_softc *);
415
416#ifdef BCE_JUMBO_HDRSPLIT
417static int bce_get_pg_buf (struct bce_softc *,
418 struct mbuf *, u16 *, u16 *);
419static int bce_init_pg_chain (struct bce_softc *);
420static void bce_fill_pg_chain (struct bce_softc *);
421static void bce_free_pg_chain (struct bce_softc *);
422#endif
423
424static struct mbuf *bce_tso_setup (struct bce_softc *,
425 struct mbuf **, u16 *);
426static int bce_tx_encap (struct bce_softc *, struct mbuf **);
427static void bce_start_locked (struct ifnet *);
428static void bce_start (struct ifnet *);
429static int bce_ioctl (struct ifnet *, u_long, caddr_t);
430static void bce_watchdog (struct bce_softc *);
431static int bce_ifmedia_upd (struct ifnet *);
432static int bce_ifmedia_upd_locked (struct ifnet *);
433static void bce_ifmedia_sts (struct ifnet *, struct ifmediareq *);
434static void bce_init_locked (struct bce_softc *);
435static void bce_init (void *);
436static void bce_mgmt_init_locked (struct bce_softc *sc);
437
438static int bce_init_ctx (struct bce_softc *);
439static void bce_get_mac_addr (struct bce_softc *);
440static void bce_set_mac_addr (struct bce_softc *);
441static void bce_phy_intr (struct bce_softc *);
442static inline u16 bce_get_hw_rx_cons (struct bce_softc *);
443static void bce_rx_intr (struct bce_softc *);
444static void bce_tx_intr (struct bce_softc *);
445static void bce_disable_intr (struct bce_softc *);
446static void bce_enable_intr (struct bce_softc *, int);
447
448static void bce_intr (void *);
449static void bce_set_rx_mode (struct bce_softc *);
450static void bce_stats_update (struct bce_softc *);
451static void bce_tick (void *);
452static void bce_pulse (void *);
453static void bce_add_sysctls (struct bce_softc *);
454
455
456/****************************************************************************/
457/* FreeBSD device dispatch table. */
458/****************************************************************************/
459static device_method_t bce_methods[] = {
460 /* Device interface (device_if.h) */
461 DEVMETHOD(device_probe, bce_probe),
462 DEVMETHOD(device_attach, bce_attach),
463 DEVMETHOD(device_detach, bce_detach),
464 DEVMETHOD(device_shutdown, bce_shutdown),
465/* Supported by device interface but not used here. */
466/* DEVMETHOD(device_identify, bce_identify), */
467/* DEVMETHOD(device_suspend, bce_suspend), */
468/* DEVMETHOD(device_resume, bce_resume), */
469/* DEVMETHOD(device_quiesce, bce_quiesce), */
470
471 /* Bus interface (bus_if.h) */
472 DEVMETHOD(bus_print_child, bus_generic_print_child),
473 DEVMETHOD(bus_driver_added, bus_generic_driver_added),
474
475 /* MII interface (miibus_if.h) */
476 DEVMETHOD(miibus_readreg, bce_miibus_read_reg),
477 DEVMETHOD(miibus_writereg, bce_miibus_write_reg),
478 DEVMETHOD(miibus_statchg, bce_miibus_statchg),
479/* Supported by MII interface but not used here. */
480/* DEVMETHOD(miibus_linkchg, bce_miibus_linkchg), */
481/* DEVMETHOD(miibus_mediainit, bce_miibus_mediainit), */
482
483 { 0, 0 }
484};
485
486static driver_t bce_driver = {
487 "bce",
488 bce_methods,
489 sizeof(struct bce_softc)
490};
491
492static devclass_t bce_devclass;
493
494MODULE_DEPEND(bce, pci, 1, 1, 1);
495MODULE_DEPEND(bce, ether, 1, 1, 1);
496MODULE_DEPEND(bce, miibus, 1, 1, 1);
497
498DRIVER_MODULE(bce, pci, bce_driver, bce_devclass, 0, 0);
499DRIVER_MODULE(miibus, bce, miibus_driver, miibus_devclass, 0, 0);
500
501
502/****************************************************************************/
503/* Tunable device values */
504/****************************************************************************/
505SYSCTL_NODE(_hw, OID_AUTO, bce, CTLFLAG_RD, 0, "bce driver parameters");
506
507/* Allowable values are TRUE or FALSE */
508static int bce_tso_enable = TRUE;
509TUNABLE_INT("hw.bce.tso_enable", &bce_tso_enable);
510SYSCTL_UINT(_hw_bce, OID_AUTO, tso_enable, CTLFLAG_RDTUN, &bce_tso_enable, 0,
511"TSO Enable/Disable");
512
513/* Allowable values are 0 (IRQ), 1 (MSI/IRQ), and 2 (MSI-X/MSI/IRQ) */
514/* ToDo: Add MSI-X support. */
515static int bce_msi_enable = 1;
516TUNABLE_INT("hw.bce.msi_enable", &bce_msi_enable);
517SYSCTL_UINT(_hw_bce, OID_AUTO, msi_enable, CTLFLAG_RDTUN, &bce_msi_enable, 0,
518"MSI-X|MSI|INTx selector");
519
520/* ToDo: Add tunable to enable/disable strict MTU handling. */
521/* Currently allows "loose" RX MTU checking (i.e. sets the */
522/* H/W RX MTU to the size of the largest receive buffer, or */
523/* 2048 bytes). This will cause a UNH failure but is more */
524/* desireable from a functional perspective. */
525
526
527/****************************************************************************/
528/* Device probe function. */
529/* */
530/* Compares the device to the driver's list of supported devices and */
531/* reports back to the OS whether this is the right driver for the device. */
532/* */
533/* Returns: */
534/* BUS_PROBE_DEFAULT on success, positive value on failure. */
535/****************************************************************************/
536static int
537bce_probe(device_t dev)
538{
539 struct bce_type *t;
540 struct bce_softc *sc;
541 char *descbuf;
542 u16 vid = 0, did = 0, svid = 0, sdid = 0;
543
544 t = bce_devs;
545
546 sc = device_get_softc(dev);
547 bzero(sc, sizeof(struct bce_softc));
548 sc->bce_unit = device_get_unit(dev);
549 sc->bce_dev = dev;
550
551 /* Get the data for the device to be probed. */
552 vid = pci_get_vendor(dev);
553 did = pci_get_device(dev);
554 svid = pci_get_subvendor(dev);
555 sdid = pci_get_subdevice(dev);
556
557 DBPRINT(sc, BCE_EXTREME_LOAD,
558 "%s(); VID = 0x%04X, DID = 0x%04X, SVID = 0x%04X, "
559 "SDID = 0x%04X\n", __FUNCTION__, vid, did, svid, sdid);
560
561 /* Look through the list of known devices for a match. */
562 while(t->bce_name != NULL) {
563
564 if ((vid == t->bce_vid) && (did == t->bce_did) &&
565 ((svid == t->bce_svid) || (t->bce_svid == PCI_ANY_ID)) &&
566 ((sdid == t->bce_sdid) || (t->bce_sdid == PCI_ANY_ID))) {
567
568 descbuf = malloc(BCE_DEVDESC_MAX, M_TEMP, M_NOWAIT);
569
570 if (descbuf == NULL)
571 return(ENOMEM);
572
573 /* Print out the device identity. */
574 snprintf(descbuf, BCE_DEVDESC_MAX, "%s (%c%d)",
575 t->bce_name, (((pci_read_config(dev,
576 PCIR_REVID, 4) & 0xf0) >> 4) + 'A'),
577 (pci_read_config(dev, PCIR_REVID, 4) & 0xf));
578
579 device_set_desc_copy(dev, descbuf);
580 free(descbuf, M_TEMP);
581 return(BUS_PROBE_DEFAULT);
582 }
583 t++;
584 }
585
586 return(ENXIO);
587}
588
589
590/****************************************************************************/
591/* PCI Capabilities Probe Function. */
592/* */
593/* Walks the PCI capabiites list for the device to find what features are */
594/* supported. */
595/* */
596/* Returns: */
597/* None. */
598/****************************************************************************/
599static void
600bce_print_adapter_info(struct bce_softc *sc)
601{
602 int i = 0;
603
604 DBENTER(BCE_VERBOSE_LOAD);
605
606 if (bootverbose) {
607 BCE_PRINTF("ASIC (0x%08X); ", sc->bce_chipid);
608 printf("Rev (%c%d); ", ((BCE_CHIP_ID(sc) & 0xf000) >>
609 12) + 'A', ((BCE_CHIP_ID(sc) & 0x0ff0) >> 4));
610
611
612 /* Bus info. */
613 if (sc->bce_flags & BCE_PCIE_FLAG) {
614 printf("Bus (PCIe x%d, ", sc->link_width);
615 switch (sc->link_speed) {
616 case 1: printf("2.5Gbps); "); break;
617 case 2: printf("5Gbps); "); break;
618 default: printf("Unknown link speed); ");
619 }
620 } else {
621 printf("Bus (PCI%s, %s, %dMHz); ",
622 ((sc->bce_flags & BCE_PCIX_FLAG) ? "-X" : ""),
623 ((sc->bce_flags & BCE_PCI_32BIT_FLAG) ?
624 "32-bit" : "64-bit"), sc->bus_speed_mhz);
625 }
626
627 /* Firmware version and device features. */
628 printf("B/C (%s); Flags (", sc->bce_bc_ver);
629
630 #ifdef BCE_JUMBO_HDRSPLIT
631 printf("SPLT");
632 i++;
633 #endif
634
635 if (sc->bce_flags & BCE_USING_MSI_FLAG) {
636 if (i > 0) printf("|");
637 printf("MSI"); i++;
638 }
639
640 if (sc->bce_flags & BCE_USING_MSIX_FLAG) {
641 if (i > 0) printf("|");
642 printf("MSI-X"); i++;
643 }
644
645 if (sc->bce_phy_flags & BCE_PHY_2_5G_CAPABLE_FLAG) {
646 if (i > 0) printf("|");
647 printf("2.5G"); i++;
648 }
649
650 if (sc->bce_flags & BCE_MFW_ENABLE_FLAG) {
651 if (i > 0) printf("|");
652 printf("MFW); MFW (%s)\n", sc->bce_mfw_ver);
653 } else {
654 printf(")\n");
655 }
656 }
657
658 DBEXIT(BCE_VERBOSE_LOAD);
659}
660
661
662/****************************************************************************/
663/* PCI Capabilities Probe Function. */
664/* */
665/* Walks the PCI capabiites list for the device to find what features are */
666/* supported. */
667/* */
668/* Returns: */
669/* None. */
670/****************************************************************************/
671static void
672bce_probe_pci_caps(device_t dev, struct bce_softc *sc)
673{
674 u32 reg;
675
676 DBENTER(BCE_VERBOSE_LOAD);
677
678 /* Check if PCI-X capability is enabled. */
679 if (pci_find_extcap(dev, PCIY_PCIX, &reg) == 0) {
680 if (reg != 0)
681 sc->bce_cap_flags |= BCE_PCIX_CAPABLE_FLAG;
682 }
683
684 /* Check if PCIe capability is enabled. */
685 if (pci_find_extcap(dev, PCIY_EXPRESS, &reg) == 0) {
686 if (reg != 0) {
687 u16 link_status = pci_read_config(dev, reg + 0x12, 2);
688 DBPRINT(sc, BCE_INFO_LOAD, "PCIe link_status = "
689 "0x%08X\n", link_status);
690 sc->link_speed = link_status & 0xf;
691 sc->link_width = (link_status >> 4) & 0x3f;
692 sc->bce_cap_flags |= BCE_PCIE_CAPABLE_FLAG;
693 sc->bce_flags |= BCE_PCIE_FLAG;
694 }
695 }
696
697 /* Check if MSI capability is enabled. */
698 if (pci_find_extcap(dev, PCIY_MSI, &reg) == 0) {
699 if (reg != 0)
700 sc->bce_cap_flags |= BCE_MSI_CAPABLE_FLAG;
701 }
702
703 /* Check if MSI-X capability is enabled. */
704 if (pci_find_extcap(dev, PCIY_MSIX, &reg) == 0) {
705 if (reg != 0)
706 sc->bce_cap_flags |= BCE_MSIX_CAPABLE_FLAG;
707 }
708
709 DBEXIT(BCE_VERBOSE_LOAD);
710}
711
712
713/****************************************************************************/
714/* Device attach function. */
715/* */
716/* Allocates device resources, performs secondary chip identification, */
717/* resets and initializes the hardware, and initializes driver instance */
718/* variables. */
719/* */
720/* Returns: */
721/* 0 on success, positive value on failure. */
722/****************************************************************************/
723static int
724bce_attach(device_t dev)
725{
726 struct bce_softc *sc;
727 struct ifnet *ifp;
728 u32 val;
729 int error, rid, rc = 0;
730
731 sc = device_get_softc(dev);
732 sc->bce_dev = dev;
733
734 DBENTER(BCE_VERBOSE_LOAD | BCE_VERBOSE_RESET);
735
736 sc->bce_unit = device_get_unit(dev);
737
738 /* Set initial device and PHY flags */
739 sc->bce_flags = 0;
740 sc->bce_phy_flags = 0;
741
742 pci_enable_busmaster(dev);
743
744 /* Allocate PCI memory resources. */
745 rid = PCIR_BAR(0);
746 sc->bce_res_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
747 &rid, RF_ACTIVE);
748
749 if (sc->bce_res_mem == NULL) {
750 BCE_PRINTF("%s(%d): PCI memory allocation failed\n",
751 __FILE__, __LINE__);
752 rc = ENXIO;
753 goto bce_attach_fail;
754 }
755
756 /* Get various resource handles. */
757 sc->bce_btag = rman_get_bustag(sc->bce_res_mem);
758 sc->bce_bhandle = rman_get_bushandle(sc->bce_res_mem);
759 sc->bce_vhandle = (vm_offset_t) rman_get_virtual(sc->bce_res_mem);
760
761 bce_probe_pci_caps(dev, sc);
762
763 rid = 1;
764#if 0
765 /* Try allocating MSI-X interrupts. */
766 if ((sc->bce_cap_flags & BCE_MSIX_CAPABLE_FLAG) &&
767 (bce_msi_enable >= 2) &&
768 ((sc->bce_res_irq = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
769 &rid, RF_ACTIVE)) != NULL)) {
770
771 msi_needed = sc->bce_msi_count = 1;
772
773 if (((error = pci_alloc_msix(dev, &sc->bce_msi_count)) != 0) ||
774 (sc->bce_msi_count != msi_needed)) {
775 BCE_PRINTF("%s(%d): MSI-X allocation failed! Requested = %d,"
776 "Received = %d, error = %d\n", __FILE__, __LINE__,
777 msi_needed, sc->bce_msi_count, error);
778 sc->bce_msi_count = 0;
779 pci_release_msi(dev);
780 bus_release_resource(dev, SYS_RES_MEMORY, rid,
781 sc->bce_res_irq);
782 sc->bce_res_irq = NULL;
783 } else {
784 DBPRINT(sc, BCE_INFO_LOAD, "%s(): Using MSI-X interrupt.\n",
785 __FUNCTION__);
786 sc->bce_flags |= BCE_USING_MSIX_FLAG;
787 sc->bce_intr = bce_intr;
788 }
789 }
790#endif
791
792 /* Try allocating a MSI interrupt. */
793 if ((sc->bce_cap_flags & BCE_MSI_CAPABLE_FLAG) &&
794 (bce_msi_enable >= 1) && (sc->bce_msi_count == 0)) {
795 sc->bce_msi_count = 1;
796 if ((error = pci_alloc_msi(dev, &sc->bce_msi_count)) != 0) {
797 BCE_PRINTF("%s(%d): MSI allocation failed! "
798 "error = %d\n", __FILE__, __LINE__, error);
799 sc->bce_msi_count = 0;
800 pci_release_msi(dev);
801 } else {
802 DBPRINT(sc, BCE_INFO_LOAD, "%s(): Using MSI "
803 "interrupt.\n", __FUNCTION__);
804 sc->bce_flags |= BCE_USING_MSI_FLAG;
805 if ((BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) ||
806 (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716))
807 sc->bce_flags |= BCE_ONE_SHOT_MSI_FLAG;
808 sc->bce_irq_rid = 1;
809 sc->bce_intr = bce_intr;
810 }
811 }
812
813 /* Try allocating a legacy interrupt. */
814 if (sc->bce_msi_count == 0) {
815 DBPRINT(sc, BCE_INFO_LOAD, "%s(): Using INTx interrupt.\n",
816 __FUNCTION__);
817 rid = 0;
818 sc->bce_intr = bce_intr;
819 }
820
821 sc->bce_res_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ,
822 &rid, RF_SHAREABLE | RF_ACTIVE);
823
824 sc->bce_irq_rid = rid;
825
826 /* Report any IRQ allocation errors. */
827 if (sc->bce_res_irq == NULL) {
828 BCE_PRINTF("%s(%d): PCI map interrupt failed!\n",
829 __FILE__, __LINE__);
830 rc = ENXIO;
831 goto bce_attach_fail;
832 }
833
834 /* Initialize mutex for the current device instance. */
835 BCE_LOCK_INIT(sc, device_get_nameunit(dev));
836
837 /*
838 * Configure byte swap and enable indirect register access.
839 * Rely on CPU to do target byte swapping on big endian systems.
840 * Access to registers outside of PCI configurtion space are not
841 * valid until this is done.
842 */
843 pci_write_config(dev, BCE_PCICFG_MISC_CONFIG,
844 BCE_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
845 BCE_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP, 4);
846
847 /* Save ASIC revsion info. */
848 sc->bce_chipid = REG_RD(sc, BCE_MISC_ID);
849
850 /* Weed out any non-production controller revisions. */
851 switch(BCE_CHIP_ID(sc)) {
852 case BCE_CHIP_ID_5706_A0:
853 case BCE_CHIP_ID_5706_A1:
854 case BCE_CHIP_ID_5708_A0:
855 case BCE_CHIP_ID_5708_B0:
856 case BCE_CHIP_ID_5709_A0:
857 case BCE_CHIP_ID_5709_B0:
858 case BCE_CHIP_ID_5709_B1:
859 case BCE_CHIP_ID_5709_B2:
860 BCE_PRINTF("%s(%d): Unsupported controller "
861 "revision (%c%d)!\n", __FILE__, __LINE__,
862 (((pci_read_config(dev, PCIR_REVID, 4) &
863 0xf0) >> 4) + 'A'), (pci_read_config(dev,
864 PCIR_REVID, 4) & 0xf));
865 rc = ENODEV;
866 goto bce_attach_fail;
867 }
868
869 /*
870 * The embedded PCIe to PCI-X bridge (EPB)
871 * in the 5708 cannot address memory above
872 * 40 bits (E7_5708CB1_23043 & E6_5708SB1_23043).
873 */
874 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5708)
875 sc->max_bus_addr = BCE_BUS_SPACE_MAXADDR;
876 else
877 sc->max_bus_addr = BUS_SPACE_MAXADDR;
878
879 /*
880 * Find the base address for shared memory access.
881 * Newer versions of bootcode use a signature and offset
882 * while older versions use a fixed address.
883 */
884 val = REG_RD_IND(sc, BCE_SHM_HDR_SIGNATURE);
885 if ((val & BCE_SHM_HDR_SIGNATURE_SIG_MASK) == BCE_SHM_HDR_SIGNATURE_SIG)
886 /* Multi-port devices use different offsets in shared memory. */
887 sc->bce_shmem_base = REG_RD_IND(sc, BCE_SHM_HDR_ADDR_0 +
888 (pci_get_function(sc->bce_dev) << 2));
889 else
890 sc->bce_shmem_base = HOST_VIEW_SHMEM_BASE;
891
892 DBPRINT(sc, BCE_VERBOSE_FIRMWARE, "%s(): bce_shmem_base = 0x%08X\n",
893 __FUNCTION__, sc->bce_shmem_base);
894
895 /* Fetch the bootcode revision. */
896 val = bce_shmem_rd(sc, BCE_DEV_INFO_BC_REV);
897 for (int i = 0, j = 0; i < 3; i++) {
898 u8 num;
899
900 num = (u8) (val >> (24 - (i * 8)));
901 for (int k = 100, skip0 = 1; k >= 1; num %= k, k /= 10) {
902 if (num >= k || !skip0 || k == 1) {
903 sc->bce_bc_ver[j++] = (num / k) + '0';
904 skip0 = 0;
905 }
906 }
907
908 if (i != 2)
909 sc->bce_bc_ver[j++] = '.';
910 }
911
912 /* Check if any management firwmare is enabled. */
913 val = bce_shmem_rd(sc, BCE_PORT_FEATURE);
914 if (val & BCE_PORT_FEATURE_ASF_ENABLED) {
915 sc->bce_flags |= BCE_MFW_ENABLE_FLAG;
916
917 /* Allow time for firmware to enter the running state. */
918 for (int i = 0; i < 30; i++) {
919 val = bce_shmem_rd(sc, BCE_BC_STATE_CONDITION);
920 if (val & BCE_CONDITION_MFW_RUN_MASK)
921 break;
922 DELAY(10000);
923 }
924
925 /* Check if management firmware is running. */
926 val = bce_shmem_rd(sc, BCE_BC_STATE_CONDITION);
927 val &= BCE_CONDITION_MFW_RUN_MASK;
928 if ((val != BCE_CONDITION_MFW_RUN_UNKNOWN) &&
929 (val != BCE_CONDITION_MFW_RUN_NONE)) {
930 u32 addr = bce_shmem_rd(sc, BCE_MFW_VER_PTR);
931 int i = 0;
932
933 /* Read the management firmware version string. */
934 for (int j = 0; j < 3; j++) {
935 val = bce_reg_rd_ind(sc, addr + j * 4);
936 val = bswap32(val);
937 memcpy(&sc->bce_mfw_ver[i], &val, 4);
938 i += 4;
939 }
940 } else {
941 /* May cause firmware synchronization timeouts. */
942 BCE_PRINTF("%s(%d): Management firmware enabled "
943 "but not running!\n", __FILE__, __LINE__);
944 strcpy(sc->bce_mfw_ver, "NOT RUNNING!");
945
946 /* ToDo: Any action the driver should take? */
947 }
948 }
949
950 /* Get PCI bus information (speed and type). */
951 val = REG_RD(sc, BCE_PCICFG_MISC_STATUS);
952 if (val & BCE_PCICFG_MISC_STATUS_PCIX_DET) {
953 u32 clkreg;
954
955 sc->bce_flags |= BCE_PCIX_FLAG;
956
957 clkreg = REG_RD(sc, BCE_PCICFG_PCI_CLOCK_CONTROL_BITS);
958
959 clkreg &= BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET;
960 switch (clkreg) {
961 case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ:
962 sc->bus_speed_mhz = 133;
963 break;
964
965 case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ:
966 sc->bus_speed_mhz = 100;
967 break;
968
969 case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ:
970 case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ:
971 sc->bus_speed_mhz = 66;
972 break;
973
974 case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ:
975 case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ:
976 sc->bus_speed_mhz = 50;
977 break;
978
979 case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW:
980 case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ:
981 case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ:
982 sc->bus_speed_mhz = 33;
983 break;
984 }
985 } else {
986 if (val & BCE_PCICFG_MISC_STATUS_M66EN)
987 sc->bus_speed_mhz = 66;
988 else
989 sc->bus_speed_mhz = 33;
990 }
991
992 if (val & BCE_PCICFG_MISC_STATUS_32BIT_DET)
993 sc->bce_flags |= BCE_PCI_32BIT_FLAG;
994
995 /* Reset controller and announce to bootcode that driver is present. */
996 if (bce_reset(sc, BCE_DRV_MSG_CODE_RESET)) {
997 BCE_PRINTF("%s(%d): Controller reset failed!\n",
998 __FILE__, __LINE__);
999 rc = ENXIO;
1000 goto bce_attach_fail;
1001 }
1002
1003 /* Initialize the controller. */
1004 if (bce_chipinit(sc)) {
1005 BCE_PRINTF("%s(%d): Controller initialization failed!\n",
1006 __FILE__, __LINE__);
1007 rc = ENXIO;
1008 goto bce_attach_fail;
1009 }
1010
1011 /* Perform NVRAM test. */
1012 if (bce_nvram_test(sc)) {
1013 BCE_PRINTF("%s(%d): NVRAM test failed!\n",
1014 __FILE__, __LINE__);
1015 rc = ENXIO;
1016 goto bce_attach_fail;
1017 }
1018
1019 /* Fetch the permanent Ethernet MAC address. */
1020 bce_get_mac_addr(sc);
1021
1022 /*
1023 * Trip points control how many BDs
1024 * should be ready before generating an
1025 * interrupt while ticks control how long
1026 * a BD can sit in the chain before
1027 * generating an interrupt. Set the default
1028 * values for the RX and TX chains.
1029 */
1030
1031#ifdef BCE_DEBUG
1032 /* Force more frequent interrupts. */
1033 sc->bce_tx_quick_cons_trip_int = 1;
1034 sc->bce_tx_quick_cons_trip = 1;
1035 sc->bce_tx_ticks_int = 0;
1036 sc->bce_tx_ticks = 0;
1037
1038 sc->bce_rx_quick_cons_trip_int = 1;
1039 sc->bce_rx_quick_cons_trip = 1;
1040 sc->bce_rx_ticks_int = 0;
1041 sc->bce_rx_ticks = 0;
1042#else
1043 /* Improve throughput at the expense of increased latency. */
1044 sc->bce_tx_quick_cons_trip_int = 20;
1045 sc->bce_tx_quick_cons_trip = 20;
1046 sc->bce_tx_ticks_int = 80;
1047 sc->bce_tx_ticks = 80;
1048
1049 sc->bce_rx_quick_cons_trip_int = 6;
1050 sc->bce_rx_quick_cons_trip = 6;
1051 sc->bce_rx_ticks_int = 18;
1052 sc->bce_rx_ticks = 18;
1053#endif
1054
1055 /* Not used for L2. */
1056 sc->bce_comp_prod_trip_int = 0;
1057 sc->bce_comp_prod_trip = 0;
1058 sc->bce_com_ticks_int = 0;
1059 sc->bce_com_ticks = 0;
1060 sc->bce_cmd_ticks_int = 0;
1061 sc->bce_cmd_ticks = 0;
1062
1063 /* Update statistics once every second. */
1064 sc->bce_stats_ticks = 1000000 & 0xffff00;
1065
1066 /* Find the media type for the adapter. */
1067 bce_get_media(sc);
1068
1069 /* Store data needed by PHY driver for backplane applications */
1070 sc->bce_shared_hw_cfg = bce_shmem_rd(sc, BCE_SHARED_HW_CFG_CONFIG);
1071 sc->bce_port_hw_cfg = bce_shmem_rd(sc, BCE_PORT_HW_CFG_CONFIG);
1072
1073 /* Allocate DMA memory resources. */
1074 if (bce_dma_alloc(dev)) {
1075 BCE_PRINTF("%s(%d): DMA resource allocation failed!\n",
1076 __FILE__, __LINE__);
1077 rc = ENXIO;
1078 goto bce_attach_fail;
1079 }
1080
1081 /* Allocate an ifnet structure. */
1082 ifp = sc->bce_ifp = if_alloc(IFT_ETHER);
1083 if (ifp == NULL) {
1084 BCE_PRINTF("%s(%d): Interface allocation failed!\n",
1085 __FILE__, __LINE__);
1086 rc = ENXIO;
1087 goto bce_attach_fail;
1088 }
1089
1090 /* Initialize the ifnet interface. */
1091 ifp->if_softc = sc;
1092 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1093 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1094 ifp->if_ioctl = bce_ioctl;
1095 ifp->if_start = bce_start;
1096 ifp->if_init = bce_init;
1097 ifp->if_mtu = ETHERMTU;
1098
1099 if (bce_tso_enable) {
1100 ifp->if_hwassist = BCE_IF_HWASSIST | CSUM_TSO;
1101 ifp->if_capabilities = BCE_IF_CAPABILITIES | IFCAP_TSO4 |
1102 IFCAP_VLAN_HWTSO;
1103 } else {
1104 ifp->if_hwassist = BCE_IF_HWASSIST;
1105 ifp->if_capabilities = BCE_IF_CAPABILITIES;
1106 }
1107
1108 ifp->if_capenable = ifp->if_capabilities;
1109
1110 /*
1111 * Assume standard mbuf sizes for buffer allocation.
1112 * This may change later if the MTU size is set to
1113 * something other than 1500.
1114 */
1115#ifdef BCE_JUMBO_HDRSPLIT
1116 sc->rx_bd_mbuf_alloc_size = MHLEN;
1117 /* Make sure offset is 16 byte aligned for hardware. */
1118 sc->rx_bd_mbuf_align_pad =
1119 roundup2((MSIZE - MHLEN), 16) - (MSIZE - MHLEN);
1120 sc->rx_bd_mbuf_data_len = sc->rx_bd_mbuf_alloc_size -
1121 sc->rx_bd_mbuf_align_pad;
1122 sc->pg_bd_mbuf_alloc_size = MCLBYTES;
1123#else
1124 sc->rx_bd_mbuf_alloc_size = MCLBYTES;
1125 sc->rx_bd_mbuf_align_pad =
1126 roundup2(MCLBYTES, 16) - MCLBYTES;
1127 sc->rx_bd_mbuf_data_len = sc->rx_bd_mbuf_alloc_size -
1128 sc->rx_bd_mbuf_align_pad;
1129#endif
1130
1131 ifp->if_snd.ifq_drv_maxlen = USABLE_TX_BD;
1132 IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen);
1133 IFQ_SET_READY(&ifp->if_snd);
1134
1135 if (sc->bce_phy_flags & BCE_PHY_2_5G_CAPABLE_FLAG)
1136 ifp->if_baudrate = IF_Mbps(2500ULL);
1137 else
1138 ifp->if_baudrate = IF_Mbps(1000);
1139
1140 /* Handle any special PHY initialization for SerDes PHYs. */
1141 bce_init_media(sc);
1142
1143 /* MII child bus by attaching the PHY. */
1144 rc = mii_attach(dev, &sc->bce_miibus, ifp, bce_ifmedia_upd,
1145 bce_ifmedia_sts, BMSR_DEFCAPMASK, sc->bce_phy_addr,
33
34/*
35 * The following controllers are supported by this driver:
36 * BCM5706C A2, A3
37 * BCM5706S A2, A3
38 * BCM5708C B1, B2
39 * BCM5708S B1, B2
40 * BCM5709C A1, C0
41 * BCM5709S A1, C0
42 * BCM5716C C0
43 * BCM5716S C0
44 *
45 * The following controllers are not supported by this driver:
46 * BCM5706C A0, A1 (pre-production)
47 * BCM5706S A0, A1 (pre-production)
48 * BCM5708C A0, B0 (pre-production)
49 * BCM5708S A0, B0 (pre-production)
50 * BCM5709C A0 B0, B1, B2 (pre-production)
51 * BCM5709S A0, B0, B1, B2 (pre-production)
52 */
53
54#include "opt_bce.h"
55
56#include <dev/bce/if_bcereg.h>
57#include <dev/bce/if_bcefw.h>
58
59/****************************************************************************/
60/* BCE Debug Options */
61/****************************************************************************/
62#ifdef BCE_DEBUG
63 u32 bce_debug = BCE_WARN;
64
65 /* 0 = Never */
66 /* 1 = 1 in 2,147,483,648 */
67 /* 256 = 1 in 8,388,608 */
68 /* 2048 = 1 in 1,048,576 */
69 /* 65536 = 1 in 32,768 */
70 /* 1048576 = 1 in 2,048 */
71 /* 268435456 = 1 in 8 */
72 /* 536870912 = 1 in 4 */
73 /* 1073741824 = 1 in 2 */
74
75 /* Controls how often the l2_fhdr frame error check will fail. */
76 int l2fhdr_error_sim_control = 0;
77
78 /* Controls how often the unexpected attention check will fail. */
79 int unexpected_attention_sim_control = 0;
80
81 /* Controls how often to simulate an mbuf allocation failure. */
82 int mbuf_alloc_failed_sim_control = 0;
83
84 /* Controls how often to simulate a DMA mapping failure. */
85 int dma_map_addr_failed_sim_control = 0;
86
87 /* Controls how often to simulate a bootcode failure. */
88 int bootcode_running_failure_sim_control = 0;
89#endif
90
91/****************************************************************************/
92/* BCE Build Time Options */
93/****************************************************************************/
94/* #define BCE_NVRAM_WRITE_SUPPORT 1 */
95
96
97/****************************************************************************/
98/* PCI Device ID Table */
99/* */
100/* Used by bce_probe() to identify the devices supported by this driver. */
101/****************************************************************************/
102#define BCE_DEVDESC_MAX 64
103
104static struct bce_type bce_devs[] = {
105 /* BCM5706C Controllers and OEM boards. */
106 { BRCM_VENDORID, BRCM_DEVICEID_BCM5706, HP_VENDORID, 0x3101,
107 "HP NC370T Multifunction Gigabit Server Adapter" },
108 { BRCM_VENDORID, BRCM_DEVICEID_BCM5706, HP_VENDORID, 0x3106,
109 "HP NC370i Multifunction Gigabit Server Adapter" },
110 { BRCM_VENDORID, BRCM_DEVICEID_BCM5706, HP_VENDORID, 0x3070,
111 "HP NC380T PCIe DP Multifunc Gig Server Adapter" },
112 { BRCM_VENDORID, BRCM_DEVICEID_BCM5706, HP_VENDORID, 0x1709,
113 "HP NC371i Multifunction Gigabit Server Adapter" },
114 { BRCM_VENDORID, BRCM_DEVICEID_BCM5706, PCI_ANY_ID, PCI_ANY_ID,
115 "Broadcom NetXtreme II BCM5706 1000Base-T" },
116
117 /* BCM5706S controllers and OEM boards. */
118 { BRCM_VENDORID, BRCM_DEVICEID_BCM5706S, HP_VENDORID, 0x3102,
119 "HP NC370F Multifunction Gigabit Server Adapter" },
120 { BRCM_VENDORID, BRCM_DEVICEID_BCM5706S, PCI_ANY_ID, PCI_ANY_ID,
121 "Broadcom NetXtreme II BCM5706 1000Base-SX" },
122
123 /* BCM5708C controllers and OEM boards. */
124 { BRCM_VENDORID, BRCM_DEVICEID_BCM5708, HP_VENDORID, 0x7037,
125 "HP NC373T PCIe Multifunction Gig Server Adapter" },
126 { BRCM_VENDORID, BRCM_DEVICEID_BCM5708, HP_VENDORID, 0x7038,
127 "HP NC373i Multifunction Gigabit Server Adapter" },
128 { BRCM_VENDORID, BRCM_DEVICEID_BCM5708, HP_VENDORID, 0x7045,
129 "HP NC374m PCIe Multifunction Adapter" },
130 { BRCM_VENDORID, BRCM_DEVICEID_BCM5708, PCI_ANY_ID, PCI_ANY_ID,
131 "Broadcom NetXtreme II BCM5708 1000Base-T" },
132
133 /* BCM5708S controllers and OEM boards. */
134 { BRCM_VENDORID, BRCM_DEVICEID_BCM5708S, HP_VENDORID, 0x1706,
135 "HP NC373m Multifunction Gigabit Server Adapter" },
136 { BRCM_VENDORID, BRCM_DEVICEID_BCM5708S, HP_VENDORID, 0x703b,
137 "HP NC373i Multifunction Gigabit Server Adapter" },
138 { BRCM_VENDORID, BRCM_DEVICEID_BCM5708S, HP_VENDORID, 0x703d,
139 "HP NC373F PCIe Multifunc Giga Server Adapter" },
140 { BRCM_VENDORID, BRCM_DEVICEID_BCM5708S, PCI_ANY_ID, PCI_ANY_ID,
141 "Broadcom NetXtreme II BCM5708 1000Base-SX" },
142
143 /* BCM5709C controllers and OEM boards. */
144 { BRCM_VENDORID, BRCM_DEVICEID_BCM5709, HP_VENDORID, 0x7055,
145 "HP NC382i DP Multifunction Gigabit Server Adapter" },
146 { BRCM_VENDORID, BRCM_DEVICEID_BCM5709, HP_VENDORID, 0x7059,
147 "HP NC382T PCIe DP Multifunction Gigabit Server Adapter" },
148 { BRCM_VENDORID, BRCM_DEVICEID_BCM5709, PCI_ANY_ID, PCI_ANY_ID,
149 "Broadcom NetXtreme II BCM5709 1000Base-T" },
150
151 /* BCM5709S controllers and OEM boards. */
152 { BRCM_VENDORID, BRCM_DEVICEID_BCM5709S, HP_VENDORID, 0x171d,
153 "HP NC382m DP 1GbE Multifunction BL-c Adapter" },
154 { BRCM_VENDORID, BRCM_DEVICEID_BCM5709S, HP_VENDORID, 0x7056,
155 "HP NC382i DP Multifunction Gigabit Server Adapter" },
156 { BRCM_VENDORID, BRCM_DEVICEID_BCM5709S, PCI_ANY_ID, PCI_ANY_ID,
157 "Broadcom NetXtreme II BCM5709 1000Base-SX" },
158
159 /* BCM5716 controllers and OEM boards. */
160 { BRCM_VENDORID, BRCM_DEVICEID_BCM5716, PCI_ANY_ID, PCI_ANY_ID,
161 "Broadcom NetXtreme II BCM5716 1000Base-T" },
162
163 { 0, 0, 0, 0, NULL }
164};
165
166
167/****************************************************************************/
168/* Supported Flash NVRAM device data. */
169/****************************************************************************/
170static struct flash_spec flash_table[] =
171{
172#define BUFFERED_FLAGS (BCE_NV_BUFFERED | BCE_NV_TRANSLATE)
173#define NONBUFFERED_FLAGS (BCE_NV_WREN)
174
175 /* Slow EEPROM */
176 {0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
177 BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
178 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
179 "EEPROM - slow"},
180 /* Expansion entry 0001 */
181 {0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
182 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
183 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
184 "Entry 0001"},
185 /* Saifun SA25F010 (non-buffered flash) */
186 /* strap, cfg1, & write1 need updates */
187 {0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
188 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
189 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
190 "Non-buffered flash (128kB)"},
191 /* Saifun SA25F020 (non-buffered flash) */
192 /* strap, cfg1, & write1 need updates */
193 {0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
194 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
195 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
196 "Non-buffered flash (256kB)"},
197 /* Expansion entry 0100 */
198 {0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
199 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
200 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
201 "Entry 0100"},
202 /* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
203 {0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
204 NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
205 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
206 "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
207 /* Entry 0110: ST M45PE20 (non-buffered flash)*/
208 {0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
209 NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
210 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
211 "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
212 /* Saifun SA25F005 (non-buffered flash) */
213 /* strap, cfg1, & write1 need updates */
214 {0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
215 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
216 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
217 "Non-buffered flash (64kB)"},
218 /* Fast EEPROM */
219 {0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
220 BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
221 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
222 "EEPROM - fast"},
223 /* Expansion entry 1001 */
224 {0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
225 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
226 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
227 "Entry 1001"},
228 /* Expansion entry 1010 */
229 {0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
230 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
231 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
232 "Entry 1010"},
233 /* ATMEL AT45DB011B (buffered flash) */
234 {0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
235 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
236 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
237 "Buffered flash (128kB)"},
238 /* Expansion entry 1100 */
239 {0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
240 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
241 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
242 "Entry 1100"},
243 /* Expansion entry 1101 */
244 {0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
245 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
246 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
247 "Entry 1101"},
248 /* Ateml Expansion entry 1110 */
249 {0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
250 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
251 BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
252 "Entry 1110 (Atmel)"},
253 /* ATMEL AT45DB021B (buffered flash) */
254 {0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
255 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
256 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
257 "Buffered flash (256kB)"},
258};
259
260/*
261 * The BCM5709 controllers transparently handle the
262 * differences between Atmel 264 byte pages and all
263 * flash devices which use 256 byte pages, so no
264 * logical-to-physical mapping is required in the
265 * driver.
266 */
267static struct flash_spec flash_5709 = {
268 .flags = BCE_NV_BUFFERED,
269 .page_bits = BCM5709_FLASH_PAGE_BITS,
270 .page_size = BCM5709_FLASH_PAGE_SIZE,
271 .addr_mask = BCM5709_FLASH_BYTE_ADDR_MASK,
272 .total_size = BUFFERED_FLASH_TOTAL_SIZE * 2,
273 .name = "5709/5716 buffered flash (256kB)",
274};
275
276
277/****************************************************************************/
278/* FreeBSD device entry points. */
279/****************************************************************************/
280static int bce_probe (device_t);
281static int bce_attach (device_t);
282static int bce_detach (device_t);
283static int bce_shutdown (device_t);
284
285
286/****************************************************************************/
287/* BCE Debug Data Structure Dump Routines */
288/****************************************************************************/
289#ifdef BCE_DEBUG
290static u32 bce_reg_rd (struct bce_softc *, u32);
291static void bce_reg_wr (struct bce_softc *, u32, u32);
292static void bce_reg_wr16 (struct bce_softc *, u32, u16);
293static u32 bce_ctx_rd (struct bce_softc *, u32, u32);
294static void bce_dump_enet (struct bce_softc *, struct mbuf *);
295static void bce_dump_mbuf (struct bce_softc *, struct mbuf *);
296static void bce_dump_tx_mbuf_chain (struct bce_softc *, u16, int);
297static void bce_dump_rx_mbuf_chain (struct bce_softc *, u16, int);
298#ifdef BCE_JUMBO_HDRSPLIT
299static void bce_dump_pg_mbuf_chain (struct bce_softc *, u16, int);
300#endif
301static void bce_dump_txbd (struct bce_softc *,
302 int, struct tx_bd *);
303static void bce_dump_rxbd (struct bce_softc *,
304 int, struct rx_bd *);
305#ifdef BCE_JUMBO_HDRSPLIT
306static void bce_dump_pgbd (struct bce_softc *,
307 int, struct rx_bd *);
308#endif
309static void bce_dump_l2fhdr (struct bce_softc *,
310 int, struct l2_fhdr *);
311static void bce_dump_ctx (struct bce_softc *, u16);
312static void bce_dump_ftqs (struct bce_softc *);
313static void bce_dump_tx_chain (struct bce_softc *, u16, int);
314static void bce_dump_rx_bd_chain (struct bce_softc *, u16, int);
315#ifdef BCE_JUMBO_HDRSPLIT
316static void bce_dump_pg_chain (struct bce_softc *, u16, int);
317#endif
318static void bce_dump_status_block (struct bce_softc *);
319static void bce_dump_stats_block (struct bce_softc *);
320static void bce_dump_driver_state (struct bce_softc *);
321static void bce_dump_hw_state (struct bce_softc *);
322static void bce_dump_mq_regs (struct bce_softc *);
323static void bce_dump_bc_state (struct bce_softc *);
324static void bce_dump_txp_state (struct bce_softc *, int);
325static void bce_dump_rxp_state (struct bce_softc *, int);
326static void bce_dump_tpat_state (struct bce_softc *, int);
327static void bce_dump_cp_state (struct bce_softc *, int);
328static void bce_dump_com_state (struct bce_softc *, int);
329static void bce_dump_rv2p_state (struct bce_softc *);
330static void bce_breakpoint (struct bce_softc *);
331#endif
332
333
334/****************************************************************************/
335/* BCE Register/Memory Access Routines */
336/****************************************************************************/
337static u32 bce_reg_rd_ind (struct bce_softc *, u32);
338static void bce_reg_wr_ind (struct bce_softc *, u32, u32);
339static void bce_shmem_wr (struct bce_softc *, u32, u32);
340static u32 bce_shmem_rd (struct bce_softc *, u32);
341static void bce_ctx_wr (struct bce_softc *, u32, u32, u32);
342static int bce_miibus_read_reg (device_t, int, int);
343static int bce_miibus_write_reg (device_t, int, int, int);
344static void bce_miibus_statchg (device_t);
345
346#ifdef BCE_DEBUG
347static int sysctl_nvram_dump(SYSCTL_HANDLER_ARGS);
348#ifdef BCE_NVRAM_WRITE_SUPPORT
349static int sysctl_nvram_write(SYSCTL_HANDLER_ARGS);
350#endif
351#endif
352
353/****************************************************************************/
354/* BCE NVRAM Access Routines */
355/****************************************************************************/
356static int bce_acquire_nvram_lock (struct bce_softc *);
357static int bce_release_nvram_lock (struct bce_softc *);
358static void bce_enable_nvram_access (struct bce_softc *);
359static void bce_disable_nvram_access (struct bce_softc *);
360static int bce_nvram_read_dword (struct bce_softc *, u32, u8 *, u32);
361static int bce_init_nvram (struct bce_softc *);
362static int bce_nvram_read (struct bce_softc *, u32, u8 *, int);
363static int bce_nvram_test (struct bce_softc *);
364#ifdef BCE_NVRAM_WRITE_SUPPORT
365static int bce_enable_nvram_write (struct bce_softc *);
366static void bce_disable_nvram_write (struct bce_softc *);
367static int bce_nvram_erase_page (struct bce_softc *, u32);
368static int bce_nvram_write_dword (struct bce_softc *, u32, u8 *, u32);
369static int bce_nvram_write (struct bce_softc *, u32, u8 *, int);
370#endif
371
372/****************************************************************************/
373/* */
374/****************************************************************************/
375static void bce_get_media (struct bce_softc *);
376static void bce_init_media (struct bce_softc *);
377static void bce_dma_map_addr (void *,
378 bus_dma_segment_t *, int, int);
379static int bce_dma_alloc (device_t);
380static void bce_dma_free (struct bce_softc *);
381static void bce_release_resources (struct bce_softc *);
382
383/****************************************************************************/
384/* BCE Firmware Synchronization and Load */
385/****************************************************************************/
386static int bce_fw_sync (struct bce_softc *, u32);
387static void bce_load_rv2p_fw (struct bce_softc *, u32 *, u32, u32);
388static void bce_load_cpu_fw (struct bce_softc *,
389 struct cpu_reg *, struct fw_info *);
390static void bce_start_cpu (struct bce_softc *, struct cpu_reg *);
391static void bce_halt_cpu (struct bce_softc *, struct cpu_reg *);
392static void bce_start_rxp_cpu (struct bce_softc *);
393static void bce_init_rxp_cpu (struct bce_softc *);
394static void bce_init_txp_cpu (struct bce_softc *);
395static void bce_init_tpat_cpu (struct bce_softc *);
396static void bce_init_cp_cpu (struct bce_softc *);
397static void bce_init_com_cpu (struct bce_softc *);
398static void bce_init_cpus (struct bce_softc *);
399
400static void bce_print_adapter_info (struct bce_softc *);
401static void bce_probe_pci_caps (device_t, struct bce_softc *);
402static void bce_stop (struct bce_softc *);
403static int bce_reset (struct bce_softc *, u32);
404static int bce_chipinit (struct bce_softc *);
405static int bce_blockinit (struct bce_softc *);
406
407static int bce_init_tx_chain (struct bce_softc *);
408static void bce_free_tx_chain (struct bce_softc *);
409
410static int bce_get_rx_buf (struct bce_softc *,
411 struct mbuf *, u16 *, u16 *, u32 *);
412static int bce_init_rx_chain (struct bce_softc *);
413static void bce_fill_rx_chain (struct bce_softc *);
414static void bce_free_rx_chain (struct bce_softc *);
415
416#ifdef BCE_JUMBO_HDRSPLIT
417static int bce_get_pg_buf (struct bce_softc *,
418 struct mbuf *, u16 *, u16 *);
419static int bce_init_pg_chain (struct bce_softc *);
420static void bce_fill_pg_chain (struct bce_softc *);
421static void bce_free_pg_chain (struct bce_softc *);
422#endif
423
424static struct mbuf *bce_tso_setup (struct bce_softc *,
425 struct mbuf **, u16 *);
426static int bce_tx_encap (struct bce_softc *, struct mbuf **);
427static void bce_start_locked (struct ifnet *);
428static void bce_start (struct ifnet *);
429static int bce_ioctl (struct ifnet *, u_long, caddr_t);
430static void bce_watchdog (struct bce_softc *);
431static int bce_ifmedia_upd (struct ifnet *);
432static int bce_ifmedia_upd_locked (struct ifnet *);
433static void bce_ifmedia_sts (struct ifnet *, struct ifmediareq *);
434static void bce_init_locked (struct bce_softc *);
435static void bce_init (void *);
436static void bce_mgmt_init_locked (struct bce_softc *sc);
437
438static int bce_init_ctx (struct bce_softc *);
439static void bce_get_mac_addr (struct bce_softc *);
440static void bce_set_mac_addr (struct bce_softc *);
441static void bce_phy_intr (struct bce_softc *);
442static inline u16 bce_get_hw_rx_cons (struct bce_softc *);
443static void bce_rx_intr (struct bce_softc *);
444static void bce_tx_intr (struct bce_softc *);
445static void bce_disable_intr (struct bce_softc *);
446static void bce_enable_intr (struct bce_softc *, int);
447
448static void bce_intr (void *);
449static void bce_set_rx_mode (struct bce_softc *);
450static void bce_stats_update (struct bce_softc *);
451static void bce_tick (void *);
452static void bce_pulse (void *);
453static void bce_add_sysctls (struct bce_softc *);
454
455
456/****************************************************************************/
457/* FreeBSD device dispatch table. */
458/****************************************************************************/
459static device_method_t bce_methods[] = {
460 /* Device interface (device_if.h) */
461 DEVMETHOD(device_probe, bce_probe),
462 DEVMETHOD(device_attach, bce_attach),
463 DEVMETHOD(device_detach, bce_detach),
464 DEVMETHOD(device_shutdown, bce_shutdown),
465/* Supported by device interface but not used here. */
466/* DEVMETHOD(device_identify, bce_identify), */
467/* DEVMETHOD(device_suspend, bce_suspend), */
468/* DEVMETHOD(device_resume, bce_resume), */
469/* DEVMETHOD(device_quiesce, bce_quiesce), */
470
471 /* Bus interface (bus_if.h) */
472 DEVMETHOD(bus_print_child, bus_generic_print_child),
473 DEVMETHOD(bus_driver_added, bus_generic_driver_added),
474
475 /* MII interface (miibus_if.h) */
476 DEVMETHOD(miibus_readreg, bce_miibus_read_reg),
477 DEVMETHOD(miibus_writereg, bce_miibus_write_reg),
478 DEVMETHOD(miibus_statchg, bce_miibus_statchg),
479/* Supported by MII interface but not used here. */
480/* DEVMETHOD(miibus_linkchg, bce_miibus_linkchg), */
481/* DEVMETHOD(miibus_mediainit, bce_miibus_mediainit), */
482
483 { 0, 0 }
484};
485
486static driver_t bce_driver = {
487 "bce",
488 bce_methods,
489 sizeof(struct bce_softc)
490};
491
492static devclass_t bce_devclass;
493
494MODULE_DEPEND(bce, pci, 1, 1, 1);
495MODULE_DEPEND(bce, ether, 1, 1, 1);
496MODULE_DEPEND(bce, miibus, 1, 1, 1);
497
498DRIVER_MODULE(bce, pci, bce_driver, bce_devclass, 0, 0);
499DRIVER_MODULE(miibus, bce, miibus_driver, miibus_devclass, 0, 0);
500
501
502/****************************************************************************/
503/* Tunable device values */
504/****************************************************************************/
505SYSCTL_NODE(_hw, OID_AUTO, bce, CTLFLAG_RD, 0, "bce driver parameters");
506
507/* Allowable values are TRUE or FALSE */
508static int bce_tso_enable = TRUE;
509TUNABLE_INT("hw.bce.tso_enable", &bce_tso_enable);
510SYSCTL_UINT(_hw_bce, OID_AUTO, tso_enable, CTLFLAG_RDTUN, &bce_tso_enable, 0,
511"TSO Enable/Disable");
512
513/* Allowable values are 0 (IRQ), 1 (MSI/IRQ), and 2 (MSI-X/MSI/IRQ) */
514/* ToDo: Add MSI-X support. */
515static int bce_msi_enable = 1;
516TUNABLE_INT("hw.bce.msi_enable", &bce_msi_enable);
517SYSCTL_UINT(_hw_bce, OID_AUTO, msi_enable, CTLFLAG_RDTUN, &bce_msi_enable, 0,
518"MSI-X|MSI|INTx selector");
519
520/* ToDo: Add tunable to enable/disable strict MTU handling. */
521/* Currently allows "loose" RX MTU checking (i.e. sets the */
522/* H/W RX MTU to the size of the largest receive buffer, or */
523/* 2048 bytes). This will cause a UNH failure but is more */
524/* desireable from a functional perspective. */
525
526
527/****************************************************************************/
528/* Device probe function. */
529/* */
530/* Compares the device to the driver's list of supported devices and */
531/* reports back to the OS whether this is the right driver for the device. */
532/* */
533/* Returns: */
534/* BUS_PROBE_DEFAULT on success, positive value on failure. */
535/****************************************************************************/
536static int
537bce_probe(device_t dev)
538{
539 struct bce_type *t;
540 struct bce_softc *sc;
541 char *descbuf;
542 u16 vid = 0, did = 0, svid = 0, sdid = 0;
543
544 t = bce_devs;
545
546 sc = device_get_softc(dev);
547 bzero(sc, sizeof(struct bce_softc));
548 sc->bce_unit = device_get_unit(dev);
549 sc->bce_dev = dev;
550
551 /* Get the data for the device to be probed. */
552 vid = pci_get_vendor(dev);
553 did = pci_get_device(dev);
554 svid = pci_get_subvendor(dev);
555 sdid = pci_get_subdevice(dev);
556
557 DBPRINT(sc, BCE_EXTREME_LOAD,
558 "%s(); VID = 0x%04X, DID = 0x%04X, SVID = 0x%04X, "
559 "SDID = 0x%04X\n", __FUNCTION__, vid, did, svid, sdid);
560
561 /* Look through the list of known devices for a match. */
562 while(t->bce_name != NULL) {
563
564 if ((vid == t->bce_vid) && (did == t->bce_did) &&
565 ((svid == t->bce_svid) || (t->bce_svid == PCI_ANY_ID)) &&
566 ((sdid == t->bce_sdid) || (t->bce_sdid == PCI_ANY_ID))) {
567
568 descbuf = malloc(BCE_DEVDESC_MAX, M_TEMP, M_NOWAIT);
569
570 if (descbuf == NULL)
571 return(ENOMEM);
572
573 /* Print out the device identity. */
574 snprintf(descbuf, BCE_DEVDESC_MAX, "%s (%c%d)",
575 t->bce_name, (((pci_read_config(dev,
576 PCIR_REVID, 4) & 0xf0) >> 4) + 'A'),
577 (pci_read_config(dev, PCIR_REVID, 4) & 0xf));
578
579 device_set_desc_copy(dev, descbuf);
580 free(descbuf, M_TEMP);
581 return(BUS_PROBE_DEFAULT);
582 }
583 t++;
584 }
585
586 return(ENXIO);
587}
588
589
590/****************************************************************************/
591/* PCI Capabilities Probe Function. */
592/* */
593/* Walks the PCI capabiites list for the device to find what features are */
594/* supported. */
595/* */
596/* Returns: */
597/* None. */
598/****************************************************************************/
599static void
600bce_print_adapter_info(struct bce_softc *sc)
601{
602 int i = 0;
603
604 DBENTER(BCE_VERBOSE_LOAD);
605
606 if (bootverbose) {
607 BCE_PRINTF("ASIC (0x%08X); ", sc->bce_chipid);
608 printf("Rev (%c%d); ", ((BCE_CHIP_ID(sc) & 0xf000) >>
609 12) + 'A', ((BCE_CHIP_ID(sc) & 0x0ff0) >> 4));
610
611
612 /* Bus info. */
613 if (sc->bce_flags & BCE_PCIE_FLAG) {
614 printf("Bus (PCIe x%d, ", sc->link_width);
615 switch (sc->link_speed) {
616 case 1: printf("2.5Gbps); "); break;
617 case 2: printf("5Gbps); "); break;
618 default: printf("Unknown link speed); ");
619 }
620 } else {
621 printf("Bus (PCI%s, %s, %dMHz); ",
622 ((sc->bce_flags & BCE_PCIX_FLAG) ? "-X" : ""),
623 ((sc->bce_flags & BCE_PCI_32BIT_FLAG) ?
624 "32-bit" : "64-bit"), sc->bus_speed_mhz);
625 }
626
627 /* Firmware version and device features. */
628 printf("B/C (%s); Flags (", sc->bce_bc_ver);
629
630 #ifdef BCE_JUMBO_HDRSPLIT
631 printf("SPLT");
632 i++;
633 #endif
634
635 if (sc->bce_flags & BCE_USING_MSI_FLAG) {
636 if (i > 0) printf("|");
637 printf("MSI"); i++;
638 }
639
640 if (sc->bce_flags & BCE_USING_MSIX_FLAG) {
641 if (i > 0) printf("|");
642 printf("MSI-X"); i++;
643 }
644
645 if (sc->bce_phy_flags & BCE_PHY_2_5G_CAPABLE_FLAG) {
646 if (i > 0) printf("|");
647 printf("2.5G"); i++;
648 }
649
650 if (sc->bce_flags & BCE_MFW_ENABLE_FLAG) {
651 if (i > 0) printf("|");
652 printf("MFW); MFW (%s)\n", sc->bce_mfw_ver);
653 } else {
654 printf(")\n");
655 }
656 }
657
658 DBEXIT(BCE_VERBOSE_LOAD);
659}
660
661
662/****************************************************************************/
663/* PCI Capabilities Probe Function. */
664/* */
665/* Walks the PCI capabiites list for the device to find what features are */
666/* supported. */
667/* */
668/* Returns: */
669/* None. */
670/****************************************************************************/
671static void
672bce_probe_pci_caps(device_t dev, struct bce_softc *sc)
673{
674 u32 reg;
675
676 DBENTER(BCE_VERBOSE_LOAD);
677
678 /* Check if PCI-X capability is enabled. */
679 if (pci_find_extcap(dev, PCIY_PCIX, &reg) == 0) {
680 if (reg != 0)
681 sc->bce_cap_flags |= BCE_PCIX_CAPABLE_FLAG;
682 }
683
684 /* Check if PCIe capability is enabled. */
685 if (pci_find_extcap(dev, PCIY_EXPRESS, &reg) == 0) {
686 if (reg != 0) {
687 u16 link_status = pci_read_config(dev, reg + 0x12, 2);
688 DBPRINT(sc, BCE_INFO_LOAD, "PCIe link_status = "
689 "0x%08X\n", link_status);
690 sc->link_speed = link_status & 0xf;
691 sc->link_width = (link_status >> 4) & 0x3f;
692 sc->bce_cap_flags |= BCE_PCIE_CAPABLE_FLAG;
693 sc->bce_flags |= BCE_PCIE_FLAG;
694 }
695 }
696
697 /* Check if MSI capability is enabled. */
698 if (pci_find_extcap(dev, PCIY_MSI, &reg) == 0) {
699 if (reg != 0)
700 sc->bce_cap_flags |= BCE_MSI_CAPABLE_FLAG;
701 }
702
703 /* Check if MSI-X capability is enabled. */
704 if (pci_find_extcap(dev, PCIY_MSIX, &reg) == 0) {
705 if (reg != 0)
706 sc->bce_cap_flags |= BCE_MSIX_CAPABLE_FLAG;
707 }
708
709 DBEXIT(BCE_VERBOSE_LOAD);
710}
711
712
713/****************************************************************************/
714/* Device attach function. */
715/* */
716/* Allocates device resources, performs secondary chip identification, */
717/* resets and initializes the hardware, and initializes driver instance */
718/* variables. */
719/* */
720/* Returns: */
721/* 0 on success, positive value on failure. */
722/****************************************************************************/
723static int
724bce_attach(device_t dev)
725{
726 struct bce_softc *sc;
727 struct ifnet *ifp;
728 u32 val;
729 int error, rid, rc = 0;
730
731 sc = device_get_softc(dev);
732 sc->bce_dev = dev;
733
734 DBENTER(BCE_VERBOSE_LOAD | BCE_VERBOSE_RESET);
735
736 sc->bce_unit = device_get_unit(dev);
737
738 /* Set initial device and PHY flags */
739 sc->bce_flags = 0;
740 sc->bce_phy_flags = 0;
741
742 pci_enable_busmaster(dev);
743
744 /* Allocate PCI memory resources. */
745 rid = PCIR_BAR(0);
746 sc->bce_res_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
747 &rid, RF_ACTIVE);
748
749 if (sc->bce_res_mem == NULL) {
750 BCE_PRINTF("%s(%d): PCI memory allocation failed\n",
751 __FILE__, __LINE__);
752 rc = ENXIO;
753 goto bce_attach_fail;
754 }
755
756 /* Get various resource handles. */
757 sc->bce_btag = rman_get_bustag(sc->bce_res_mem);
758 sc->bce_bhandle = rman_get_bushandle(sc->bce_res_mem);
759 sc->bce_vhandle = (vm_offset_t) rman_get_virtual(sc->bce_res_mem);
760
761 bce_probe_pci_caps(dev, sc);
762
763 rid = 1;
764#if 0
765 /* Try allocating MSI-X interrupts. */
766 if ((sc->bce_cap_flags & BCE_MSIX_CAPABLE_FLAG) &&
767 (bce_msi_enable >= 2) &&
768 ((sc->bce_res_irq = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
769 &rid, RF_ACTIVE)) != NULL)) {
770
771 msi_needed = sc->bce_msi_count = 1;
772
773 if (((error = pci_alloc_msix(dev, &sc->bce_msi_count)) != 0) ||
774 (sc->bce_msi_count != msi_needed)) {
775 BCE_PRINTF("%s(%d): MSI-X allocation failed! Requested = %d,"
776 "Received = %d, error = %d\n", __FILE__, __LINE__,
777 msi_needed, sc->bce_msi_count, error);
778 sc->bce_msi_count = 0;
779 pci_release_msi(dev);
780 bus_release_resource(dev, SYS_RES_MEMORY, rid,
781 sc->bce_res_irq);
782 sc->bce_res_irq = NULL;
783 } else {
784 DBPRINT(sc, BCE_INFO_LOAD, "%s(): Using MSI-X interrupt.\n",
785 __FUNCTION__);
786 sc->bce_flags |= BCE_USING_MSIX_FLAG;
787 sc->bce_intr = bce_intr;
788 }
789 }
790#endif
791
792 /* Try allocating a MSI interrupt. */
793 if ((sc->bce_cap_flags & BCE_MSI_CAPABLE_FLAG) &&
794 (bce_msi_enable >= 1) && (sc->bce_msi_count == 0)) {
795 sc->bce_msi_count = 1;
796 if ((error = pci_alloc_msi(dev, &sc->bce_msi_count)) != 0) {
797 BCE_PRINTF("%s(%d): MSI allocation failed! "
798 "error = %d\n", __FILE__, __LINE__, error);
799 sc->bce_msi_count = 0;
800 pci_release_msi(dev);
801 } else {
802 DBPRINT(sc, BCE_INFO_LOAD, "%s(): Using MSI "
803 "interrupt.\n", __FUNCTION__);
804 sc->bce_flags |= BCE_USING_MSI_FLAG;
805 if ((BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) ||
806 (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716))
807 sc->bce_flags |= BCE_ONE_SHOT_MSI_FLAG;
808 sc->bce_irq_rid = 1;
809 sc->bce_intr = bce_intr;
810 }
811 }
812
813 /* Try allocating a legacy interrupt. */
814 if (sc->bce_msi_count == 0) {
815 DBPRINT(sc, BCE_INFO_LOAD, "%s(): Using INTx interrupt.\n",
816 __FUNCTION__);
817 rid = 0;
818 sc->bce_intr = bce_intr;
819 }
820
821 sc->bce_res_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ,
822 &rid, RF_SHAREABLE | RF_ACTIVE);
823
824 sc->bce_irq_rid = rid;
825
826 /* Report any IRQ allocation errors. */
827 if (sc->bce_res_irq == NULL) {
828 BCE_PRINTF("%s(%d): PCI map interrupt failed!\n",
829 __FILE__, __LINE__);
830 rc = ENXIO;
831 goto bce_attach_fail;
832 }
833
834 /* Initialize mutex for the current device instance. */
835 BCE_LOCK_INIT(sc, device_get_nameunit(dev));
836
837 /*
838 * Configure byte swap and enable indirect register access.
839 * Rely on CPU to do target byte swapping on big endian systems.
840 * Access to registers outside of PCI configurtion space are not
841 * valid until this is done.
842 */
843 pci_write_config(dev, BCE_PCICFG_MISC_CONFIG,
844 BCE_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
845 BCE_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP, 4);
846
847 /* Save ASIC revsion info. */
848 sc->bce_chipid = REG_RD(sc, BCE_MISC_ID);
849
850 /* Weed out any non-production controller revisions. */
851 switch(BCE_CHIP_ID(sc)) {
852 case BCE_CHIP_ID_5706_A0:
853 case BCE_CHIP_ID_5706_A1:
854 case BCE_CHIP_ID_5708_A0:
855 case BCE_CHIP_ID_5708_B0:
856 case BCE_CHIP_ID_5709_A0:
857 case BCE_CHIP_ID_5709_B0:
858 case BCE_CHIP_ID_5709_B1:
859 case BCE_CHIP_ID_5709_B2:
860 BCE_PRINTF("%s(%d): Unsupported controller "
861 "revision (%c%d)!\n", __FILE__, __LINE__,
862 (((pci_read_config(dev, PCIR_REVID, 4) &
863 0xf0) >> 4) + 'A'), (pci_read_config(dev,
864 PCIR_REVID, 4) & 0xf));
865 rc = ENODEV;
866 goto bce_attach_fail;
867 }
868
869 /*
870 * The embedded PCIe to PCI-X bridge (EPB)
871 * in the 5708 cannot address memory above
872 * 40 bits (E7_5708CB1_23043 & E6_5708SB1_23043).
873 */
874 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5708)
875 sc->max_bus_addr = BCE_BUS_SPACE_MAXADDR;
876 else
877 sc->max_bus_addr = BUS_SPACE_MAXADDR;
878
879 /*
880 * Find the base address for shared memory access.
881 * Newer versions of bootcode use a signature and offset
882 * while older versions use a fixed address.
883 */
884 val = REG_RD_IND(sc, BCE_SHM_HDR_SIGNATURE);
885 if ((val & BCE_SHM_HDR_SIGNATURE_SIG_MASK) == BCE_SHM_HDR_SIGNATURE_SIG)
886 /* Multi-port devices use different offsets in shared memory. */
887 sc->bce_shmem_base = REG_RD_IND(sc, BCE_SHM_HDR_ADDR_0 +
888 (pci_get_function(sc->bce_dev) << 2));
889 else
890 sc->bce_shmem_base = HOST_VIEW_SHMEM_BASE;
891
892 DBPRINT(sc, BCE_VERBOSE_FIRMWARE, "%s(): bce_shmem_base = 0x%08X\n",
893 __FUNCTION__, sc->bce_shmem_base);
894
895 /* Fetch the bootcode revision. */
896 val = bce_shmem_rd(sc, BCE_DEV_INFO_BC_REV);
897 for (int i = 0, j = 0; i < 3; i++) {
898 u8 num;
899
900 num = (u8) (val >> (24 - (i * 8)));
901 for (int k = 100, skip0 = 1; k >= 1; num %= k, k /= 10) {
902 if (num >= k || !skip0 || k == 1) {
903 sc->bce_bc_ver[j++] = (num / k) + '0';
904 skip0 = 0;
905 }
906 }
907
908 if (i != 2)
909 sc->bce_bc_ver[j++] = '.';
910 }
911
912 /* Check if any management firwmare is enabled. */
913 val = bce_shmem_rd(sc, BCE_PORT_FEATURE);
914 if (val & BCE_PORT_FEATURE_ASF_ENABLED) {
915 sc->bce_flags |= BCE_MFW_ENABLE_FLAG;
916
917 /* Allow time for firmware to enter the running state. */
918 for (int i = 0; i < 30; i++) {
919 val = bce_shmem_rd(sc, BCE_BC_STATE_CONDITION);
920 if (val & BCE_CONDITION_MFW_RUN_MASK)
921 break;
922 DELAY(10000);
923 }
924
925 /* Check if management firmware is running. */
926 val = bce_shmem_rd(sc, BCE_BC_STATE_CONDITION);
927 val &= BCE_CONDITION_MFW_RUN_MASK;
928 if ((val != BCE_CONDITION_MFW_RUN_UNKNOWN) &&
929 (val != BCE_CONDITION_MFW_RUN_NONE)) {
930 u32 addr = bce_shmem_rd(sc, BCE_MFW_VER_PTR);
931 int i = 0;
932
933 /* Read the management firmware version string. */
934 for (int j = 0; j < 3; j++) {
935 val = bce_reg_rd_ind(sc, addr + j * 4);
936 val = bswap32(val);
937 memcpy(&sc->bce_mfw_ver[i], &val, 4);
938 i += 4;
939 }
940 } else {
941 /* May cause firmware synchronization timeouts. */
942 BCE_PRINTF("%s(%d): Management firmware enabled "
943 "but not running!\n", __FILE__, __LINE__);
944 strcpy(sc->bce_mfw_ver, "NOT RUNNING!");
945
946 /* ToDo: Any action the driver should take? */
947 }
948 }
949
950 /* Get PCI bus information (speed and type). */
951 val = REG_RD(sc, BCE_PCICFG_MISC_STATUS);
952 if (val & BCE_PCICFG_MISC_STATUS_PCIX_DET) {
953 u32 clkreg;
954
955 sc->bce_flags |= BCE_PCIX_FLAG;
956
957 clkreg = REG_RD(sc, BCE_PCICFG_PCI_CLOCK_CONTROL_BITS);
958
959 clkreg &= BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET;
960 switch (clkreg) {
961 case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ:
962 sc->bus_speed_mhz = 133;
963 break;
964
965 case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ:
966 sc->bus_speed_mhz = 100;
967 break;
968
969 case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ:
970 case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ:
971 sc->bus_speed_mhz = 66;
972 break;
973
974 case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ:
975 case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ:
976 sc->bus_speed_mhz = 50;
977 break;
978
979 case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW:
980 case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ:
981 case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ:
982 sc->bus_speed_mhz = 33;
983 break;
984 }
985 } else {
986 if (val & BCE_PCICFG_MISC_STATUS_M66EN)
987 sc->bus_speed_mhz = 66;
988 else
989 sc->bus_speed_mhz = 33;
990 }
991
992 if (val & BCE_PCICFG_MISC_STATUS_32BIT_DET)
993 sc->bce_flags |= BCE_PCI_32BIT_FLAG;
994
995 /* Reset controller and announce to bootcode that driver is present. */
996 if (bce_reset(sc, BCE_DRV_MSG_CODE_RESET)) {
997 BCE_PRINTF("%s(%d): Controller reset failed!\n",
998 __FILE__, __LINE__);
999 rc = ENXIO;
1000 goto bce_attach_fail;
1001 }
1002
1003 /* Initialize the controller. */
1004 if (bce_chipinit(sc)) {
1005 BCE_PRINTF("%s(%d): Controller initialization failed!\n",
1006 __FILE__, __LINE__);
1007 rc = ENXIO;
1008 goto bce_attach_fail;
1009 }
1010
1011 /* Perform NVRAM test. */
1012 if (bce_nvram_test(sc)) {
1013 BCE_PRINTF("%s(%d): NVRAM test failed!\n",
1014 __FILE__, __LINE__);
1015 rc = ENXIO;
1016 goto bce_attach_fail;
1017 }
1018
1019 /* Fetch the permanent Ethernet MAC address. */
1020 bce_get_mac_addr(sc);
1021
1022 /*
1023 * Trip points control how many BDs
1024 * should be ready before generating an
1025 * interrupt while ticks control how long
1026 * a BD can sit in the chain before
1027 * generating an interrupt. Set the default
1028 * values for the RX and TX chains.
1029 */
1030
1031#ifdef BCE_DEBUG
1032 /* Force more frequent interrupts. */
1033 sc->bce_tx_quick_cons_trip_int = 1;
1034 sc->bce_tx_quick_cons_trip = 1;
1035 sc->bce_tx_ticks_int = 0;
1036 sc->bce_tx_ticks = 0;
1037
1038 sc->bce_rx_quick_cons_trip_int = 1;
1039 sc->bce_rx_quick_cons_trip = 1;
1040 sc->bce_rx_ticks_int = 0;
1041 sc->bce_rx_ticks = 0;
1042#else
1043 /* Improve throughput at the expense of increased latency. */
1044 sc->bce_tx_quick_cons_trip_int = 20;
1045 sc->bce_tx_quick_cons_trip = 20;
1046 sc->bce_tx_ticks_int = 80;
1047 sc->bce_tx_ticks = 80;
1048
1049 sc->bce_rx_quick_cons_trip_int = 6;
1050 sc->bce_rx_quick_cons_trip = 6;
1051 sc->bce_rx_ticks_int = 18;
1052 sc->bce_rx_ticks = 18;
1053#endif
1054
1055 /* Not used for L2. */
1056 sc->bce_comp_prod_trip_int = 0;
1057 sc->bce_comp_prod_trip = 0;
1058 sc->bce_com_ticks_int = 0;
1059 sc->bce_com_ticks = 0;
1060 sc->bce_cmd_ticks_int = 0;
1061 sc->bce_cmd_ticks = 0;
1062
1063 /* Update statistics once every second. */
1064 sc->bce_stats_ticks = 1000000 & 0xffff00;
1065
1066 /* Find the media type for the adapter. */
1067 bce_get_media(sc);
1068
1069 /* Store data needed by PHY driver for backplane applications */
1070 sc->bce_shared_hw_cfg = bce_shmem_rd(sc, BCE_SHARED_HW_CFG_CONFIG);
1071 sc->bce_port_hw_cfg = bce_shmem_rd(sc, BCE_PORT_HW_CFG_CONFIG);
1072
1073 /* Allocate DMA memory resources. */
1074 if (bce_dma_alloc(dev)) {
1075 BCE_PRINTF("%s(%d): DMA resource allocation failed!\n",
1076 __FILE__, __LINE__);
1077 rc = ENXIO;
1078 goto bce_attach_fail;
1079 }
1080
1081 /* Allocate an ifnet structure. */
1082 ifp = sc->bce_ifp = if_alloc(IFT_ETHER);
1083 if (ifp == NULL) {
1084 BCE_PRINTF("%s(%d): Interface allocation failed!\n",
1085 __FILE__, __LINE__);
1086 rc = ENXIO;
1087 goto bce_attach_fail;
1088 }
1089
1090 /* Initialize the ifnet interface. */
1091 ifp->if_softc = sc;
1092 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1093 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1094 ifp->if_ioctl = bce_ioctl;
1095 ifp->if_start = bce_start;
1096 ifp->if_init = bce_init;
1097 ifp->if_mtu = ETHERMTU;
1098
1099 if (bce_tso_enable) {
1100 ifp->if_hwassist = BCE_IF_HWASSIST | CSUM_TSO;
1101 ifp->if_capabilities = BCE_IF_CAPABILITIES | IFCAP_TSO4 |
1102 IFCAP_VLAN_HWTSO;
1103 } else {
1104 ifp->if_hwassist = BCE_IF_HWASSIST;
1105 ifp->if_capabilities = BCE_IF_CAPABILITIES;
1106 }
1107
1108 ifp->if_capenable = ifp->if_capabilities;
1109
1110 /*
1111 * Assume standard mbuf sizes for buffer allocation.
1112 * This may change later if the MTU size is set to
1113 * something other than 1500.
1114 */
1115#ifdef BCE_JUMBO_HDRSPLIT
1116 sc->rx_bd_mbuf_alloc_size = MHLEN;
1117 /* Make sure offset is 16 byte aligned for hardware. */
1118 sc->rx_bd_mbuf_align_pad =
1119 roundup2((MSIZE - MHLEN), 16) - (MSIZE - MHLEN);
1120 sc->rx_bd_mbuf_data_len = sc->rx_bd_mbuf_alloc_size -
1121 sc->rx_bd_mbuf_align_pad;
1122 sc->pg_bd_mbuf_alloc_size = MCLBYTES;
1123#else
1124 sc->rx_bd_mbuf_alloc_size = MCLBYTES;
1125 sc->rx_bd_mbuf_align_pad =
1126 roundup2(MCLBYTES, 16) - MCLBYTES;
1127 sc->rx_bd_mbuf_data_len = sc->rx_bd_mbuf_alloc_size -
1128 sc->rx_bd_mbuf_align_pad;
1129#endif
1130
1131 ifp->if_snd.ifq_drv_maxlen = USABLE_TX_BD;
1132 IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen);
1133 IFQ_SET_READY(&ifp->if_snd);
1134
1135 if (sc->bce_phy_flags & BCE_PHY_2_5G_CAPABLE_FLAG)
1136 ifp->if_baudrate = IF_Mbps(2500ULL);
1137 else
1138 ifp->if_baudrate = IF_Mbps(1000);
1139
1140 /* Handle any special PHY initialization for SerDes PHYs. */
1141 bce_init_media(sc);
1142
1143 /* MII child bus by attaching the PHY. */
1144 rc = mii_attach(dev, &sc->bce_miibus, ifp, bce_ifmedia_upd,
1145 bce_ifmedia_sts, BMSR_DEFCAPMASK, sc->bce_phy_addr,
1146 MII_OFFSET_ANY, 0);
1146 MII_OFFSET_ANY, MIIF_DOPAUSE);
1147 if (rc != 0) {
1148 BCE_PRINTF("%s(%d): attaching PHYs failed\n", __FILE__,
1149 __LINE__);
1150 goto bce_attach_fail;
1151 }
1152
1153 /* Attach to the Ethernet interface list. */
1154 ether_ifattach(ifp, sc->eaddr);
1155
1156#if __FreeBSD_version < 500000
1157 callout_init(&sc->bce_tick_callout);
1158 callout_init(&sc->bce_pulse_callout);
1159#else
1160 callout_init_mtx(&sc->bce_tick_callout, &sc->bce_mtx, 0);
1161 callout_init_mtx(&sc->bce_pulse_callout, &sc->bce_mtx, 0);
1162#endif
1163
1164 /* Hookup IRQ last. */
1165 rc = bus_setup_intr(dev, sc->bce_res_irq, INTR_TYPE_NET | INTR_MPSAFE,
1166 NULL, bce_intr, sc, &sc->bce_intrhand);
1167
1168 if (rc) {
1169 BCE_PRINTF("%s(%d): Failed to setup IRQ!\n",
1170 __FILE__, __LINE__);
1171 bce_detach(dev);
1172 goto bce_attach_exit;
1173 }
1174
1175 /*
1176 * At this point we've acquired all the resources
1177 * we need to run so there's no turning back, we're
1178 * cleared for launch.
1179 */
1180
1181 /* Print some important debugging info. */
1182 DBRUNMSG(BCE_INFO, bce_dump_driver_state(sc));
1183
1184 /* Add the supported sysctls to the kernel. */
1185 bce_add_sysctls(sc);
1186
1187 BCE_LOCK(sc);
1188
1189 /*
1190 * The chip reset earlier notified the bootcode that
1191 * a driver is present. We now need to start our pulse
1192 * routine so that the bootcode is reminded that we're
1193 * still running.
1194 */
1195 bce_pulse(sc);
1196
1197 bce_mgmt_init_locked(sc);
1198 BCE_UNLOCK(sc);
1199
1200 /* Finally, print some useful adapter info */
1201 bce_print_adapter_info(sc);
1202 DBPRINT(sc, BCE_FATAL, "%s(): sc = %p\n",
1203 __FUNCTION__, sc);
1204
1205 goto bce_attach_exit;
1206
1207bce_attach_fail:
1208 bce_release_resources(sc);
1209
1210bce_attach_exit:
1211
1212 DBEXIT(BCE_VERBOSE_LOAD | BCE_VERBOSE_RESET);
1213
1214 return(rc);
1215}
1216
1217
1218/****************************************************************************/
1219/* Device detach function. */
1220/* */
1221/* Stops the controller, resets the controller, and releases resources. */
1222/* */
1223/* Returns: */
1224/* 0 on success, positive value on failure. */
1225/****************************************************************************/
1226static int
1227bce_detach(device_t dev)
1228{
1229 struct bce_softc *sc = device_get_softc(dev);
1230 struct ifnet *ifp;
1231 u32 msg;
1232
1233 DBENTER(BCE_VERBOSE_UNLOAD | BCE_VERBOSE_RESET);
1234
1235 ifp = sc->bce_ifp;
1236
1237 /* Stop and reset the controller. */
1238 BCE_LOCK(sc);
1239
1240 /* Stop the pulse so the bootcode can go to driver absent state. */
1241 callout_stop(&sc->bce_pulse_callout);
1242
1243 bce_stop(sc);
1244 if (sc->bce_flags & BCE_NO_WOL_FLAG)
1245 msg = BCE_DRV_MSG_CODE_UNLOAD_LNK_DN;
1246 else
1247 msg = BCE_DRV_MSG_CODE_UNLOAD;
1248 bce_reset(sc, msg);
1249
1250 BCE_UNLOCK(sc);
1251
1252 ether_ifdetach(ifp);
1253
1254 /* If we have a child device on the MII bus remove it too. */
1255 bus_generic_detach(dev);
1256 device_delete_child(dev, sc->bce_miibus);
1257
1258 /* Release all remaining resources. */
1259 bce_release_resources(sc);
1260
1261 DBEXIT(BCE_VERBOSE_UNLOAD | BCE_VERBOSE_RESET);
1262
1263 return(0);
1264}
1265
1266
1267/****************************************************************************/
1268/* Device shutdown function. */
1269/* */
1270/* Stops and resets the controller. */
1271/* */
1272/* Returns: */
1273/* 0 on success, positive value on failure. */
1274/****************************************************************************/
1275static int
1276bce_shutdown(device_t dev)
1277{
1278 struct bce_softc *sc = device_get_softc(dev);
1279 u32 msg;
1280
1281 DBENTER(BCE_VERBOSE);
1282
1283 BCE_LOCK(sc);
1284 bce_stop(sc);
1285 if (sc->bce_flags & BCE_NO_WOL_FLAG)
1286 msg = BCE_DRV_MSG_CODE_UNLOAD_LNK_DN;
1287 else
1288 msg = BCE_DRV_MSG_CODE_UNLOAD;
1289 bce_reset(sc, msg);
1290 BCE_UNLOCK(sc);
1291
1292 DBEXIT(BCE_VERBOSE);
1293
1294 return (0);
1295}
1296
1297
1298#ifdef BCE_DEBUG
1299/****************************************************************************/
1300/* Register read. */
1301/* */
1302/* Returns: */
1303/* The value of the register. */
1304/****************************************************************************/
1305static u32
1306bce_reg_rd(struct bce_softc *sc, u32 offset)
1307{
1308 u32 val = bus_space_read_4(sc->bce_btag, sc->bce_bhandle, offset);
1309 DBPRINT(sc, BCE_INSANE_REG, "%s(); offset = 0x%08X, val = 0x%08X\n",
1310 __FUNCTION__, offset, val);
1311 return val;
1312}
1313
1314
1315/****************************************************************************/
1316/* Register write (16 bit). */
1317/* */
1318/* Returns: */
1319/* Nothing. */
1320/****************************************************************************/
1321static void
1322bce_reg_wr16(struct bce_softc *sc, u32 offset, u16 val)
1323{
1324 DBPRINT(sc, BCE_INSANE_REG, "%s(); offset = 0x%08X, val = 0x%04X\n",
1325 __FUNCTION__, offset, val);
1326 bus_space_write_2(sc->bce_btag, sc->bce_bhandle, offset, val);
1327}
1328
1329
1330/****************************************************************************/
1331/* Register write. */
1332/* */
1333/* Returns: */
1334/* Nothing. */
1335/****************************************************************************/
1336static void
1337bce_reg_wr(struct bce_softc *sc, u32 offset, u32 val)
1338{
1339 DBPRINT(sc, BCE_INSANE_REG, "%s(); offset = 0x%08X, val = 0x%08X\n",
1340 __FUNCTION__, offset, val);
1341 bus_space_write_4(sc->bce_btag, sc->bce_bhandle, offset, val);
1342}
1343#endif
1344
1345/****************************************************************************/
1346/* Indirect register read. */
1347/* */
1348/* Reads NetXtreme II registers using an index/data register pair in PCI */
1349/* configuration space. Using this mechanism avoids issues with posted */
1350/* reads but is much slower than memory-mapped I/O. */
1351/* */
1352/* Returns: */
1353/* The value of the register. */
1354/****************************************************************************/
1355static u32
1356bce_reg_rd_ind(struct bce_softc *sc, u32 offset)
1357{
1358 device_t dev;
1359 dev = sc->bce_dev;
1360
1361 pci_write_config(dev, BCE_PCICFG_REG_WINDOW_ADDRESS, offset, 4);
1362#ifdef BCE_DEBUG
1363 {
1364 u32 val;
1365 val = pci_read_config(dev, BCE_PCICFG_REG_WINDOW, 4);
1366 DBPRINT(sc, BCE_INSANE_REG, "%s(); offset = 0x%08X, val = 0x%08X\n",
1367 __FUNCTION__, offset, val);
1368 return val;
1369 }
1370#else
1371 return pci_read_config(dev, BCE_PCICFG_REG_WINDOW, 4);
1372#endif
1373}
1374
1375
1376/****************************************************************************/
1377/* Indirect register write. */
1378/* */
1379/* Writes NetXtreme II registers using an index/data register pair in PCI */
1380/* configuration space. Using this mechanism avoids issues with posted */
1381/* writes but is muchh slower than memory-mapped I/O. */
1382/* */
1383/* Returns: */
1384/* Nothing. */
1385/****************************************************************************/
1386static void
1387bce_reg_wr_ind(struct bce_softc *sc, u32 offset, u32 val)
1388{
1389 device_t dev;
1390 dev = sc->bce_dev;
1391
1392 DBPRINT(sc, BCE_INSANE_REG, "%s(); offset = 0x%08X, val = 0x%08X\n",
1393 __FUNCTION__, offset, val);
1394
1395 pci_write_config(dev, BCE_PCICFG_REG_WINDOW_ADDRESS, offset, 4);
1396 pci_write_config(dev, BCE_PCICFG_REG_WINDOW, val, 4);
1397}
1398
1399
1400/****************************************************************************/
1401/* Shared memory write. */
1402/* */
1403/* Writes NetXtreme II shared memory region. */
1404/* */
1405/* Returns: */
1406/* Nothing. */
1407/****************************************************************************/
1408static void
1409bce_shmem_wr(struct bce_softc *sc, u32 offset, u32 val)
1410{
1411 DBPRINT(sc, BCE_VERBOSE_FIRMWARE, "%s(): Writing 0x%08X to "
1412 "0x%08X\n", __FUNCTION__, val, offset);
1413
1414 bce_reg_wr_ind(sc, sc->bce_shmem_base + offset, val);
1415}
1416
1417
1418/****************************************************************************/
1419/* Shared memory read. */
1420/* */
1421/* Reads NetXtreme II shared memory region. */
1422/* */
1423/* Returns: */
1424/* The 32 bit value read. */
1425/****************************************************************************/
1426static u32
1427bce_shmem_rd(struct bce_softc *sc, u32 offset)
1428{
1429 u32 val = bce_reg_rd_ind(sc, sc->bce_shmem_base + offset);
1430
1431 DBPRINT(sc, BCE_VERBOSE_FIRMWARE, "%s(): Reading 0x%08X from "
1432 "0x%08X\n", __FUNCTION__, val, offset);
1433
1434 return val;
1435}
1436
1437
1438#ifdef BCE_DEBUG
1439/****************************************************************************/
1440/* Context memory read. */
1441/* */
1442/* The NetXtreme II controller uses context memory to track connection */
1443/* information for L2 and higher network protocols. */
1444/* */
1445/* Returns: */
1446/* The requested 32 bit value of context memory. */
1447/****************************************************************************/
1448static u32
1449bce_ctx_rd(struct bce_softc *sc, u32 cid_addr, u32 ctx_offset)
1450{
1451 u32 idx, offset, retry_cnt = 5, val;
1452
1453 DBRUNIF((cid_addr > MAX_CID_ADDR || ctx_offset & 0x3 ||
1454 cid_addr & CTX_MASK), BCE_PRINTF("%s(): Invalid CID "
1455 "address: 0x%08X.\n", __FUNCTION__, cid_addr));
1456
1457 offset = ctx_offset + cid_addr;
1458
1459 if ((BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) ||
1460 (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716)) {
1461
1462 REG_WR(sc, BCE_CTX_CTX_CTRL, (offset | BCE_CTX_CTX_CTRL_READ_REQ));
1463
1464 for (idx = 0; idx < retry_cnt; idx++) {
1465 val = REG_RD(sc, BCE_CTX_CTX_CTRL);
1466 if ((val & BCE_CTX_CTX_CTRL_READ_REQ) == 0)
1467 break;
1468 DELAY(5);
1469 }
1470
1471 if (val & BCE_CTX_CTX_CTRL_READ_REQ)
1472 BCE_PRINTF("%s(%d); Unable to read CTX memory: "
1473 "cid_addr = 0x%08X, offset = 0x%08X!\n",
1474 __FILE__, __LINE__, cid_addr, ctx_offset);
1475
1476 val = REG_RD(sc, BCE_CTX_CTX_DATA);
1477 } else {
1478 REG_WR(sc, BCE_CTX_DATA_ADR, offset);
1479 val = REG_RD(sc, BCE_CTX_DATA);
1480 }
1481
1482 DBPRINT(sc, BCE_EXTREME_CTX, "%s(); cid_addr = 0x%08X, offset = 0x%08X, "
1483 "val = 0x%08X\n", __FUNCTION__, cid_addr, ctx_offset, val);
1484
1485 return(val);
1486}
1487#endif
1488
1489
1490/****************************************************************************/
1491/* Context memory write. */
1492/* */
1493/* The NetXtreme II controller uses context memory to track connection */
1494/* information for L2 and higher network protocols. */
1495/* */
1496/* Returns: */
1497/* Nothing. */
1498/****************************************************************************/
1499static void
1500bce_ctx_wr(struct bce_softc *sc, u32 cid_addr, u32 ctx_offset, u32 ctx_val)
1501{
1502 u32 idx, offset = ctx_offset + cid_addr;
1503 u32 val, retry_cnt = 5;
1504
1505 DBPRINT(sc, BCE_EXTREME_CTX, "%s(); cid_addr = 0x%08X, offset = 0x%08X, "
1506 "val = 0x%08X\n", __FUNCTION__, cid_addr, ctx_offset, ctx_val);
1507
1508 DBRUNIF((cid_addr > MAX_CID_ADDR || ctx_offset & 0x3 || cid_addr & CTX_MASK),
1509 BCE_PRINTF("%s(): Invalid CID address: 0x%08X.\n",
1510 __FUNCTION__, cid_addr));
1511
1512 if ((BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) ||
1513 (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716)) {
1514
1515 REG_WR(sc, BCE_CTX_CTX_DATA, ctx_val);
1516 REG_WR(sc, BCE_CTX_CTX_CTRL, (offset | BCE_CTX_CTX_CTRL_WRITE_REQ));
1517
1518 for (idx = 0; idx < retry_cnt; idx++) {
1519 val = REG_RD(sc, BCE_CTX_CTX_CTRL);
1520 if ((val & BCE_CTX_CTX_CTRL_WRITE_REQ) == 0)
1521 break;
1522 DELAY(5);
1523 }
1524
1525 if (val & BCE_CTX_CTX_CTRL_WRITE_REQ)
1526 BCE_PRINTF("%s(%d); Unable to write CTX memory: "
1527 "cid_addr = 0x%08X, offset = 0x%08X!\n",
1528 __FILE__, __LINE__, cid_addr, ctx_offset);
1529
1530 } else {
1531 REG_WR(sc, BCE_CTX_DATA_ADR, offset);
1532 REG_WR(sc, BCE_CTX_DATA, ctx_val);
1533 }
1534}
1535
1536
1537/****************************************************************************/
1538/* PHY register read. */
1539/* */
1540/* Implements register reads on the MII bus. */
1541/* */
1542/* Returns: */
1543/* The value of the register. */
1544/****************************************************************************/
1545static int
1546bce_miibus_read_reg(device_t dev, int phy, int reg)
1547{
1548 struct bce_softc *sc;
1549 u32 val;
1550 int i;
1551
1552 sc = device_get_softc(dev);
1553
1554 /* Make sure we are accessing the correct PHY address. */
1555 if (phy != sc->bce_phy_addr) {
1556 DBPRINT(sc, BCE_INSANE_PHY, "Invalid PHY address %d "
1557 "for PHY read!\n", phy);
1558 return(0);
1559 }
1560
1561 /*
1562 * The 5709S PHY is an IEEE Clause 45 PHY
1563 * with special mappings to work with IEEE
1564 * Clause 22 register accesses.
1565 */
1566 if ((sc->bce_phy_flags & BCE_PHY_IEEE_CLAUSE_45_FLAG) != 0) {
1567 if (reg >= MII_BMCR && reg <= MII_ANLPRNP)
1568 reg += 0x10;
1569 }
1570
1571 if (sc->bce_phy_flags & BCE_PHY_INT_MODE_AUTO_POLLING_FLAG) {
1572 val = REG_RD(sc, BCE_EMAC_MDIO_MODE);
1573 val &= ~BCE_EMAC_MDIO_MODE_AUTO_POLL;
1574
1575 REG_WR(sc, BCE_EMAC_MDIO_MODE, val);
1576 REG_RD(sc, BCE_EMAC_MDIO_MODE);
1577
1578 DELAY(40);
1579 }
1580
1581
1582 val = BCE_MIPHY(phy) | BCE_MIREG(reg) |
1583 BCE_EMAC_MDIO_COMM_COMMAND_READ | BCE_EMAC_MDIO_COMM_DISEXT |
1584 BCE_EMAC_MDIO_COMM_START_BUSY;
1585 REG_WR(sc, BCE_EMAC_MDIO_COMM, val);
1586
1587 for (i = 0; i < BCE_PHY_TIMEOUT; i++) {
1588 DELAY(10);
1589
1590 val = REG_RD(sc, BCE_EMAC_MDIO_COMM);
1591 if (!(val & BCE_EMAC_MDIO_COMM_START_BUSY)) {
1592 DELAY(5);
1593
1594 val = REG_RD(sc, BCE_EMAC_MDIO_COMM);
1595 val &= BCE_EMAC_MDIO_COMM_DATA;
1596
1597 break;
1598 }
1599 }
1600
1601 if (val & BCE_EMAC_MDIO_COMM_START_BUSY) {
1602 BCE_PRINTF("%s(%d): Error: PHY read timeout! phy = %d, "
1603 "reg = 0x%04X\n", __FILE__, __LINE__, phy, reg);
1604 val = 0x0;
1605 } else {
1606 val = REG_RD(sc, BCE_EMAC_MDIO_COMM);
1607 }
1608
1609
1610 if (sc->bce_phy_flags & BCE_PHY_INT_MODE_AUTO_POLLING_FLAG) {
1611 val = REG_RD(sc, BCE_EMAC_MDIO_MODE);
1612 val |= BCE_EMAC_MDIO_MODE_AUTO_POLL;
1613
1614 REG_WR(sc, BCE_EMAC_MDIO_MODE, val);
1615 REG_RD(sc, BCE_EMAC_MDIO_MODE);
1616
1617 DELAY(40);
1618 }
1619
1620 DB_PRINT_PHY_REG(reg, val);
1621 return (val & 0xffff);
1622
1623}
1624
1625
1626/****************************************************************************/
1627/* PHY register write. */
1628/* */
1629/* Implements register writes on the MII bus. */
1630/* */
1631/* Returns: */
1632/* The value of the register. */
1633/****************************************************************************/
1634static int
1635bce_miibus_write_reg(device_t dev, int phy, int reg, int val)
1636{
1637 struct bce_softc *sc;
1638 u32 val1;
1639 int i;
1640
1641 sc = device_get_softc(dev);
1642
1643 /* Make sure we are accessing the correct PHY address. */
1644 if (phy != sc->bce_phy_addr) {
1645 DBPRINT(sc, BCE_INSANE_PHY, "Invalid PHY address %d "
1646 "for PHY write!\n", phy);
1647 return(0);
1648 }
1649
1650 DB_PRINT_PHY_REG(reg, val);
1651
1652 /*
1653 * The 5709S PHY is an IEEE Clause 45 PHY
1654 * with special mappings to work with IEEE
1655 * Clause 22 register accesses.
1656 */
1657 if ((sc->bce_phy_flags & BCE_PHY_IEEE_CLAUSE_45_FLAG) != 0) {
1658 if (reg >= MII_BMCR && reg <= MII_ANLPRNP)
1659 reg += 0x10;
1660 }
1661
1662 if (sc->bce_phy_flags & BCE_PHY_INT_MODE_AUTO_POLLING_FLAG) {
1663 val1 = REG_RD(sc, BCE_EMAC_MDIO_MODE);
1664 val1 &= ~BCE_EMAC_MDIO_MODE_AUTO_POLL;
1665
1666 REG_WR(sc, BCE_EMAC_MDIO_MODE, val1);
1667 REG_RD(sc, BCE_EMAC_MDIO_MODE);
1668
1669 DELAY(40);
1670 }
1671
1672 val1 = BCE_MIPHY(phy) | BCE_MIREG(reg) | val |
1673 BCE_EMAC_MDIO_COMM_COMMAND_WRITE |
1674 BCE_EMAC_MDIO_COMM_START_BUSY | BCE_EMAC_MDIO_COMM_DISEXT;
1675 REG_WR(sc, BCE_EMAC_MDIO_COMM, val1);
1676
1677 for (i = 0; i < BCE_PHY_TIMEOUT; i++) {
1678 DELAY(10);
1679
1680 val1 = REG_RD(sc, BCE_EMAC_MDIO_COMM);
1681 if (!(val1 & BCE_EMAC_MDIO_COMM_START_BUSY)) {
1682 DELAY(5);
1683 break;
1684 }
1685 }
1686
1687 if (val1 & BCE_EMAC_MDIO_COMM_START_BUSY)
1688 BCE_PRINTF("%s(%d): PHY write timeout!\n",
1689 __FILE__, __LINE__);
1690
1691 if (sc->bce_phy_flags & BCE_PHY_INT_MODE_AUTO_POLLING_FLAG) {
1692 val1 = REG_RD(sc, BCE_EMAC_MDIO_MODE);
1693 val1 |= BCE_EMAC_MDIO_MODE_AUTO_POLL;
1694
1695 REG_WR(sc, BCE_EMAC_MDIO_MODE, val1);
1696 REG_RD(sc, BCE_EMAC_MDIO_MODE);
1697
1698 DELAY(40);
1699 }
1700
1701 return 0;
1702}
1703
1704
1705/****************************************************************************/
1706/* MII bus status change. */
1707/* */
1708/* Called by the MII bus driver when the PHY establishes link to set the */
1709/* MAC interface registers. */
1710/* */
1711/* Returns: */
1712/* Nothing. */
1713/****************************************************************************/
1714static void
1715bce_miibus_statchg(device_t dev)
1716{
1717 struct bce_softc *sc;
1718 struct mii_data *mii;
1719 int val;
1720
1721 sc = device_get_softc(dev);
1722
1723 DBENTER(BCE_VERBOSE_PHY);
1724
1725 mii = device_get_softc(sc->bce_miibus);
1726
1727 val = REG_RD(sc, BCE_EMAC_MODE);
1728 val &= ~(BCE_EMAC_MODE_PORT | BCE_EMAC_MODE_HALF_DUPLEX |
1729 BCE_EMAC_MODE_MAC_LOOP | BCE_EMAC_MODE_FORCE_LINK |
1730 BCE_EMAC_MODE_25G);
1731
1732 /* Set MII or GMII interface based on the PHY speed. */
1733 switch (IFM_SUBTYPE(mii->mii_media_active)) {
1734 case IFM_10_T:
1735 if (BCE_CHIP_NUM(sc) != BCE_CHIP_NUM_5706) {
1736 DBPRINT(sc, BCE_INFO_PHY,
1737 "Enabling 10Mb interface.\n");
1738 val |= BCE_EMAC_MODE_PORT_MII_10;
1739 break;
1740 }
1741 /* fall-through */
1742 case IFM_100_TX:
1743 DBPRINT(sc, BCE_INFO_PHY, "Enabling MII interface.\n");
1744 val |= BCE_EMAC_MODE_PORT_MII;
1745 break;
1746 case IFM_2500_SX:
1747 DBPRINT(sc, BCE_INFO_PHY, "Enabling 2.5G MAC mode.\n");
1748 val |= BCE_EMAC_MODE_25G;
1749 /* fall-through */
1750 case IFM_1000_T:
1751 case IFM_1000_SX:
1752 DBPRINT(sc, BCE_INFO_PHY, "Enabling GMII interface.\n");
1753 val |= BCE_EMAC_MODE_PORT_GMII;
1754 break;
1755 default:
1756 DBPRINT(sc, BCE_INFO_PHY, "Unknown link speed, enabling "
1757 "default GMII interface.\n");
1758 val |= BCE_EMAC_MODE_PORT_GMII;
1759 }
1760
1761 /* Set half or full duplex based on PHY settings. */
1762 if ((mii->mii_media_active & IFM_GMASK) == IFM_HDX) {
1763 DBPRINT(sc, BCE_INFO_PHY,
1764 "Setting Half-Duplex interface.\n");
1765 val |= BCE_EMAC_MODE_HALF_DUPLEX;
1766 } else
1767 DBPRINT(sc, BCE_INFO_PHY,
1768 "Setting Full-Duplex interface.\n");
1769
1770 REG_WR(sc, BCE_EMAC_MODE, val);
1771
1147 if (rc != 0) {
1148 BCE_PRINTF("%s(%d): attaching PHYs failed\n", __FILE__,
1149 __LINE__);
1150 goto bce_attach_fail;
1151 }
1152
1153 /* Attach to the Ethernet interface list. */
1154 ether_ifattach(ifp, sc->eaddr);
1155
1156#if __FreeBSD_version < 500000
1157 callout_init(&sc->bce_tick_callout);
1158 callout_init(&sc->bce_pulse_callout);
1159#else
1160 callout_init_mtx(&sc->bce_tick_callout, &sc->bce_mtx, 0);
1161 callout_init_mtx(&sc->bce_pulse_callout, &sc->bce_mtx, 0);
1162#endif
1163
1164 /* Hookup IRQ last. */
1165 rc = bus_setup_intr(dev, sc->bce_res_irq, INTR_TYPE_NET | INTR_MPSAFE,
1166 NULL, bce_intr, sc, &sc->bce_intrhand);
1167
1168 if (rc) {
1169 BCE_PRINTF("%s(%d): Failed to setup IRQ!\n",
1170 __FILE__, __LINE__);
1171 bce_detach(dev);
1172 goto bce_attach_exit;
1173 }
1174
1175 /*
1176 * At this point we've acquired all the resources
1177 * we need to run so there's no turning back, we're
1178 * cleared for launch.
1179 */
1180
1181 /* Print some important debugging info. */
1182 DBRUNMSG(BCE_INFO, bce_dump_driver_state(sc));
1183
1184 /* Add the supported sysctls to the kernel. */
1185 bce_add_sysctls(sc);
1186
1187 BCE_LOCK(sc);
1188
1189 /*
1190 * The chip reset earlier notified the bootcode that
1191 * a driver is present. We now need to start our pulse
1192 * routine so that the bootcode is reminded that we're
1193 * still running.
1194 */
1195 bce_pulse(sc);
1196
1197 bce_mgmt_init_locked(sc);
1198 BCE_UNLOCK(sc);
1199
1200 /* Finally, print some useful adapter info */
1201 bce_print_adapter_info(sc);
1202 DBPRINT(sc, BCE_FATAL, "%s(): sc = %p\n",
1203 __FUNCTION__, sc);
1204
1205 goto bce_attach_exit;
1206
1207bce_attach_fail:
1208 bce_release_resources(sc);
1209
1210bce_attach_exit:
1211
1212 DBEXIT(BCE_VERBOSE_LOAD | BCE_VERBOSE_RESET);
1213
1214 return(rc);
1215}
1216
1217
1218/****************************************************************************/
1219/* Device detach function. */
1220/* */
1221/* Stops the controller, resets the controller, and releases resources. */
1222/* */
1223/* Returns: */
1224/* 0 on success, positive value on failure. */
1225/****************************************************************************/
1226static int
1227bce_detach(device_t dev)
1228{
1229 struct bce_softc *sc = device_get_softc(dev);
1230 struct ifnet *ifp;
1231 u32 msg;
1232
1233 DBENTER(BCE_VERBOSE_UNLOAD | BCE_VERBOSE_RESET);
1234
1235 ifp = sc->bce_ifp;
1236
1237 /* Stop and reset the controller. */
1238 BCE_LOCK(sc);
1239
1240 /* Stop the pulse so the bootcode can go to driver absent state. */
1241 callout_stop(&sc->bce_pulse_callout);
1242
1243 bce_stop(sc);
1244 if (sc->bce_flags & BCE_NO_WOL_FLAG)
1245 msg = BCE_DRV_MSG_CODE_UNLOAD_LNK_DN;
1246 else
1247 msg = BCE_DRV_MSG_CODE_UNLOAD;
1248 bce_reset(sc, msg);
1249
1250 BCE_UNLOCK(sc);
1251
1252 ether_ifdetach(ifp);
1253
1254 /* If we have a child device on the MII bus remove it too. */
1255 bus_generic_detach(dev);
1256 device_delete_child(dev, sc->bce_miibus);
1257
1258 /* Release all remaining resources. */
1259 bce_release_resources(sc);
1260
1261 DBEXIT(BCE_VERBOSE_UNLOAD | BCE_VERBOSE_RESET);
1262
1263 return(0);
1264}
1265
1266
1267/****************************************************************************/
1268/* Device shutdown function. */
1269/* */
1270/* Stops and resets the controller. */
1271/* */
1272/* Returns: */
1273/* 0 on success, positive value on failure. */
1274/****************************************************************************/
1275static int
1276bce_shutdown(device_t dev)
1277{
1278 struct bce_softc *sc = device_get_softc(dev);
1279 u32 msg;
1280
1281 DBENTER(BCE_VERBOSE);
1282
1283 BCE_LOCK(sc);
1284 bce_stop(sc);
1285 if (sc->bce_flags & BCE_NO_WOL_FLAG)
1286 msg = BCE_DRV_MSG_CODE_UNLOAD_LNK_DN;
1287 else
1288 msg = BCE_DRV_MSG_CODE_UNLOAD;
1289 bce_reset(sc, msg);
1290 BCE_UNLOCK(sc);
1291
1292 DBEXIT(BCE_VERBOSE);
1293
1294 return (0);
1295}
1296
1297
1298#ifdef BCE_DEBUG
1299/****************************************************************************/
1300/* Register read. */
1301/* */
1302/* Returns: */
1303/* The value of the register. */
1304/****************************************************************************/
1305static u32
1306bce_reg_rd(struct bce_softc *sc, u32 offset)
1307{
1308 u32 val = bus_space_read_4(sc->bce_btag, sc->bce_bhandle, offset);
1309 DBPRINT(sc, BCE_INSANE_REG, "%s(); offset = 0x%08X, val = 0x%08X\n",
1310 __FUNCTION__, offset, val);
1311 return val;
1312}
1313
1314
1315/****************************************************************************/
1316/* Register write (16 bit). */
1317/* */
1318/* Returns: */
1319/* Nothing. */
1320/****************************************************************************/
1321static void
1322bce_reg_wr16(struct bce_softc *sc, u32 offset, u16 val)
1323{
1324 DBPRINT(sc, BCE_INSANE_REG, "%s(); offset = 0x%08X, val = 0x%04X\n",
1325 __FUNCTION__, offset, val);
1326 bus_space_write_2(sc->bce_btag, sc->bce_bhandle, offset, val);
1327}
1328
1329
1330/****************************************************************************/
1331/* Register write. */
1332/* */
1333/* Returns: */
1334/* Nothing. */
1335/****************************************************************************/
1336static void
1337bce_reg_wr(struct bce_softc *sc, u32 offset, u32 val)
1338{
1339 DBPRINT(sc, BCE_INSANE_REG, "%s(); offset = 0x%08X, val = 0x%08X\n",
1340 __FUNCTION__, offset, val);
1341 bus_space_write_4(sc->bce_btag, sc->bce_bhandle, offset, val);
1342}
1343#endif
1344
1345/****************************************************************************/
1346/* Indirect register read. */
1347/* */
1348/* Reads NetXtreme II registers using an index/data register pair in PCI */
1349/* configuration space. Using this mechanism avoids issues with posted */
1350/* reads but is much slower than memory-mapped I/O. */
1351/* */
1352/* Returns: */
1353/* The value of the register. */
1354/****************************************************************************/
1355static u32
1356bce_reg_rd_ind(struct bce_softc *sc, u32 offset)
1357{
1358 device_t dev;
1359 dev = sc->bce_dev;
1360
1361 pci_write_config(dev, BCE_PCICFG_REG_WINDOW_ADDRESS, offset, 4);
1362#ifdef BCE_DEBUG
1363 {
1364 u32 val;
1365 val = pci_read_config(dev, BCE_PCICFG_REG_WINDOW, 4);
1366 DBPRINT(sc, BCE_INSANE_REG, "%s(); offset = 0x%08X, val = 0x%08X\n",
1367 __FUNCTION__, offset, val);
1368 return val;
1369 }
1370#else
1371 return pci_read_config(dev, BCE_PCICFG_REG_WINDOW, 4);
1372#endif
1373}
1374
1375
1376/****************************************************************************/
1377/* Indirect register write. */
1378/* */
1379/* Writes NetXtreme II registers using an index/data register pair in PCI */
1380/* configuration space. Using this mechanism avoids issues with posted */
1381/* writes but is muchh slower than memory-mapped I/O. */
1382/* */
1383/* Returns: */
1384/* Nothing. */
1385/****************************************************************************/
1386static void
1387bce_reg_wr_ind(struct bce_softc *sc, u32 offset, u32 val)
1388{
1389 device_t dev;
1390 dev = sc->bce_dev;
1391
1392 DBPRINT(sc, BCE_INSANE_REG, "%s(); offset = 0x%08X, val = 0x%08X\n",
1393 __FUNCTION__, offset, val);
1394
1395 pci_write_config(dev, BCE_PCICFG_REG_WINDOW_ADDRESS, offset, 4);
1396 pci_write_config(dev, BCE_PCICFG_REG_WINDOW, val, 4);
1397}
1398
1399
1400/****************************************************************************/
1401/* Shared memory write. */
1402/* */
1403/* Writes NetXtreme II shared memory region. */
1404/* */
1405/* Returns: */
1406/* Nothing. */
1407/****************************************************************************/
1408static void
1409bce_shmem_wr(struct bce_softc *sc, u32 offset, u32 val)
1410{
1411 DBPRINT(sc, BCE_VERBOSE_FIRMWARE, "%s(): Writing 0x%08X to "
1412 "0x%08X\n", __FUNCTION__, val, offset);
1413
1414 bce_reg_wr_ind(sc, sc->bce_shmem_base + offset, val);
1415}
1416
1417
1418/****************************************************************************/
1419/* Shared memory read. */
1420/* */
1421/* Reads NetXtreme II shared memory region. */
1422/* */
1423/* Returns: */
1424/* The 32 bit value read. */
1425/****************************************************************************/
1426static u32
1427bce_shmem_rd(struct bce_softc *sc, u32 offset)
1428{
1429 u32 val = bce_reg_rd_ind(sc, sc->bce_shmem_base + offset);
1430
1431 DBPRINT(sc, BCE_VERBOSE_FIRMWARE, "%s(): Reading 0x%08X from "
1432 "0x%08X\n", __FUNCTION__, val, offset);
1433
1434 return val;
1435}
1436
1437
1438#ifdef BCE_DEBUG
1439/****************************************************************************/
1440/* Context memory read. */
1441/* */
1442/* The NetXtreme II controller uses context memory to track connection */
1443/* information for L2 and higher network protocols. */
1444/* */
1445/* Returns: */
1446/* The requested 32 bit value of context memory. */
1447/****************************************************************************/
1448static u32
1449bce_ctx_rd(struct bce_softc *sc, u32 cid_addr, u32 ctx_offset)
1450{
1451 u32 idx, offset, retry_cnt = 5, val;
1452
1453 DBRUNIF((cid_addr > MAX_CID_ADDR || ctx_offset & 0x3 ||
1454 cid_addr & CTX_MASK), BCE_PRINTF("%s(): Invalid CID "
1455 "address: 0x%08X.\n", __FUNCTION__, cid_addr));
1456
1457 offset = ctx_offset + cid_addr;
1458
1459 if ((BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) ||
1460 (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716)) {
1461
1462 REG_WR(sc, BCE_CTX_CTX_CTRL, (offset | BCE_CTX_CTX_CTRL_READ_REQ));
1463
1464 for (idx = 0; idx < retry_cnt; idx++) {
1465 val = REG_RD(sc, BCE_CTX_CTX_CTRL);
1466 if ((val & BCE_CTX_CTX_CTRL_READ_REQ) == 0)
1467 break;
1468 DELAY(5);
1469 }
1470
1471 if (val & BCE_CTX_CTX_CTRL_READ_REQ)
1472 BCE_PRINTF("%s(%d); Unable to read CTX memory: "
1473 "cid_addr = 0x%08X, offset = 0x%08X!\n",
1474 __FILE__, __LINE__, cid_addr, ctx_offset);
1475
1476 val = REG_RD(sc, BCE_CTX_CTX_DATA);
1477 } else {
1478 REG_WR(sc, BCE_CTX_DATA_ADR, offset);
1479 val = REG_RD(sc, BCE_CTX_DATA);
1480 }
1481
1482 DBPRINT(sc, BCE_EXTREME_CTX, "%s(); cid_addr = 0x%08X, offset = 0x%08X, "
1483 "val = 0x%08X\n", __FUNCTION__, cid_addr, ctx_offset, val);
1484
1485 return(val);
1486}
1487#endif
1488
1489
1490/****************************************************************************/
1491/* Context memory write. */
1492/* */
1493/* The NetXtreme II controller uses context memory to track connection */
1494/* information for L2 and higher network protocols. */
1495/* */
1496/* Returns: */
1497/* Nothing. */
1498/****************************************************************************/
1499static void
1500bce_ctx_wr(struct bce_softc *sc, u32 cid_addr, u32 ctx_offset, u32 ctx_val)
1501{
1502 u32 idx, offset = ctx_offset + cid_addr;
1503 u32 val, retry_cnt = 5;
1504
1505 DBPRINT(sc, BCE_EXTREME_CTX, "%s(); cid_addr = 0x%08X, offset = 0x%08X, "
1506 "val = 0x%08X\n", __FUNCTION__, cid_addr, ctx_offset, ctx_val);
1507
1508 DBRUNIF((cid_addr > MAX_CID_ADDR || ctx_offset & 0x3 || cid_addr & CTX_MASK),
1509 BCE_PRINTF("%s(): Invalid CID address: 0x%08X.\n",
1510 __FUNCTION__, cid_addr));
1511
1512 if ((BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) ||
1513 (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716)) {
1514
1515 REG_WR(sc, BCE_CTX_CTX_DATA, ctx_val);
1516 REG_WR(sc, BCE_CTX_CTX_CTRL, (offset | BCE_CTX_CTX_CTRL_WRITE_REQ));
1517
1518 for (idx = 0; idx < retry_cnt; idx++) {
1519 val = REG_RD(sc, BCE_CTX_CTX_CTRL);
1520 if ((val & BCE_CTX_CTX_CTRL_WRITE_REQ) == 0)
1521 break;
1522 DELAY(5);
1523 }
1524
1525 if (val & BCE_CTX_CTX_CTRL_WRITE_REQ)
1526 BCE_PRINTF("%s(%d); Unable to write CTX memory: "
1527 "cid_addr = 0x%08X, offset = 0x%08X!\n",
1528 __FILE__, __LINE__, cid_addr, ctx_offset);
1529
1530 } else {
1531 REG_WR(sc, BCE_CTX_DATA_ADR, offset);
1532 REG_WR(sc, BCE_CTX_DATA, ctx_val);
1533 }
1534}
1535
1536
1537/****************************************************************************/
1538/* PHY register read. */
1539/* */
1540/* Implements register reads on the MII bus. */
1541/* */
1542/* Returns: */
1543/* The value of the register. */
1544/****************************************************************************/
1545static int
1546bce_miibus_read_reg(device_t dev, int phy, int reg)
1547{
1548 struct bce_softc *sc;
1549 u32 val;
1550 int i;
1551
1552 sc = device_get_softc(dev);
1553
1554 /* Make sure we are accessing the correct PHY address. */
1555 if (phy != sc->bce_phy_addr) {
1556 DBPRINT(sc, BCE_INSANE_PHY, "Invalid PHY address %d "
1557 "for PHY read!\n", phy);
1558 return(0);
1559 }
1560
1561 /*
1562 * The 5709S PHY is an IEEE Clause 45 PHY
1563 * with special mappings to work with IEEE
1564 * Clause 22 register accesses.
1565 */
1566 if ((sc->bce_phy_flags & BCE_PHY_IEEE_CLAUSE_45_FLAG) != 0) {
1567 if (reg >= MII_BMCR && reg <= MII_ANLPRNP)
1568 reg += 0x10;
1569 }
1570
1571 if (sc->bce_phy_flags & BCE_PHY_INT_MODE_AUTO_POLLING_FLAG) {
1572 val = REG_RD(sc, BCE_EMAC_MDIO_MODE);
1573 val &= ~BCE_EMAC_MDIO_MODE_AUTO_POLL;
1574
1575 REG_WR(sc, BCE_EMAC_MDIO_MODE, val);
1576 REG_RD(sc, BCE_EMAC_MDIO_MODE);
1577
1578 DELAY(40);
1579 }
1580
1581
1582 val = BCE_MIPHY(phy) | BCE_MIREG(reg) |
1583 BCE_EMAC_MDIO_COMM_COMMAND_READ | BCE_EMAC_MDIO_COMM_DISEXT |
1584 BCE_EMAC_MDIO_COMM_START_BUSY;
1585 REG_WR(sc, BCE_EMAC_MDIO_COMM, val);
1586
1587 for (i = 0; i < BCE_PHY_TIMEOUT; i++) {
1588 DELAY(10);
1589
1590 val = REG_RD(sc, BCE_EMAC_MDIO_COMM);
1591 if (!(val & BCE_EMAC_MDIO_COMM_START_BUSY)) {
1592 DELAY(5);
1593
1594 val = REG_RD(sc, BCE_EMAC_MDIO_COMM);
1595 val &= BCE_EMAC_MDIO_COMM_DATA;
1596
1597 break;
1598 }
1599 }
1600
1601 if (val & BCE_EMAC_MDIO_COMM_START_BUSY) {
1602 BCE_PRINTF("%s(%d): Error: PHY read timeout! phy = %d, "
1603 "reg = 0x%04X\n", __FILE__, __LINE__, phy, reg);
1604 val = 0x0;
1605 } else {
1606 val = REG_RD(sc, BCE_EMAC_MDIO_COMM);
1607 }
1608
1609
1610 if (sc->bce_phy_flags & BCE_PHY_INT_MODE_AUTO_POLLING_FLAG) {
1611 val = REG_RD(sc, BCE_EMAC_MDIO_MODE);
1612 val |= BCE_EMAC_MDIO_MODE_AUTO_POLL;
1613
1614 REG_WR(sc, BCE_EMAC_MDIO_MODE, val);
1615 REG_RD(sc, BCE_EMAC_MDIO_MODE);
1616
1617 DELAY(40);
1618 }
1619
1620 DB_PRINT_PHY_REG(reg, val);
1621 return (val & 0xffff);
1622
1623}
1624
1625
1626/****************************************************************************/
1627/* PHY register write. */
1628/* */
1629/* Implements register writes on the MII bus. */
1630/* */
1631/* Returns: */
1632/* The value of the register. */
1633/****************************************************************************/
1634static int
1635bce_miibus_write_reg(device_t dev, int phy, int reg, int val)
1636{
1637 struct bce_softc *sc;
1638 u32 val1;
1639 int i;
1640
1641 sc = device_get_softc(dev);
1642
1643 /* Make sure we are accessing the correct PHY address. */
1644 if (phy != sc->bce_phy_addr) {
1645 DBPRINT(sc, BCE_INSANE_PHY, "Invalid PHY address %d "
1646 "for PHY write!\n", phy);
1647 return(0);
1648 }
1649
1650 DB_PRINT_PHY_REG(reg, val);
1651
1652 /*
1653 * The 5709S PHY is an IEEE Clause 45 PHY
1654 * with special mappings to work with IEEE
1655 * Clause 22 register accesses.
1656 */
1657 if ((sc->bce_phy_flags & BCE_PHY_IEEE_CLAUSE_45_FLAG) != 0) {
1658 if (reg >= MII_BMCR && reg <= MII_ANLPRNP)
1659 reg += 0x10;
1660 }
1661
1662 if (sc->bce_phy_flags & BCE_PHY_INT_MODE_AUTO_POLLING_FLAG) {
1663 val1 = REG_RD(sc, BCE_EMAC_MDIO_MODE);
1664 val1 &= ~BCE_EMAC_MDIO_MODE_AUTO_POLL;
1665
1666 REG_WR(sc, BCE_EMAC_MDIO_MODE, val1);
1667 REG_RD(sc, BCE_EMAC_MDIO_MODE);
1668
1669 DELAY(40);
1670 }
1671
1672 val1 = BCE_MIPHY(phy) | BCE_MIREG(reg) | val |
1673 BCE_EMAC_MDIO_COMM_COMMAND_WRITE |
1674 BCE_EMAC_MDIO_COMM_START_BUSY | BCE_EMAC_MDIO_COMM_DISEXT;
1675 REG_WR(sc, BCE_EMAC_MDIO_COMM, val1);
1676
1677 for (i = 0; i < BCE_PHY_TIMEOUT; i++) {
1678 DELAY(10);
1679
1680 val1 = REG_RD(sc, BCE_EMAC_MDIO_COMM);
1681 if (!(val1 & BCE_EMAC_MDIO_COMM_START_BUSY)) {
1682 DELAY(5);
1683 break;
1684 }
1685 }
1686
1687 if (val1 & BCE_EMAC_MDIO_COMM_START_BUSY)
1688 BCE_PRINTF("%s(%d): PHY write timeout!\n",
1689 __FILE__, __LINE__);
1690
1691 if (sc->bce_phy_flags & BCE_PHY_INT_MODE_AUTO_POLLING_FLAG) {
1692 val1 = REG_RD(sc, BCE_EMAC_MDIO_MODE);
1693 val1 |= BCE_EMAC_MDIO_MODE_AUTO_POLL;
1694
1695 REG_WR(sc, BCE_EMAC_MDIO_MODE, val1);
1696 REG_RD(sc, BCE_EMAC_MDIO_MODE);
1697
1698 DELAY(40);
1699 }
1700
1701 return 0;
1702}
1703
1704
1705/****************************************************************************/
1706/* MII bus status change. */
1707/* */
1708/* Called by the MII bus driver when the PHY establishes link to set the */
1709/* MAC interface registers. */
1710/* */
1711/* Returns: */
1712/* Nothing. */
1713/****************************************************************************/
1714static void
1715bce_miibus_statchg(device_t dev)
1716{
1717 struct bce_softc *sc;
1718 struct mii_data *mii;
1719 int val;
1720
1721 sc = device_get_softc(dev);
1722
1723 DBENTER(BCE_VERBOSE_PHY);
1724
1725 mii = device_get_softc(sc->bce_miibus);
1726
1727 val = REG_RD(sc, BCE_EMAC_MODE);
1728 val &= ~(BCE_EMAC_MODE_PORT | BCE_EMAC_MODE_HALF_DUPLEX |
1729 BCE_EMAC_MODE_MAC_LOOP | BCE_EMAC_MODE_FORCE_LINK |
1730 BCE_EMAC_MODE_25G);
1731
1732 /* Set MII or GMII interface based on the PHY speed. */
1733 switch (IFM_SUBTYPE(mii->mii_media_active)) {
1734 case IFM_10_T:
1735 if (BCE_CHIP_NUM(sc) != BCE_CHIP_NUM_5706) {
1736 DBPRINT(sc, BCE_INFO_PHY,
1737 "Enabling 10Mb interface.\n");
1738 val |= BCE_EMAC_MODE_PORT_MII_10;
1739 break;
1740 }
1741 /* fall-through */
1742 case IFM_100_TX:
1743 DBPRINT(sc, BCE_INFO_PHY, "Enabling MII interface.\n");
1744 val |= BCE_EMAC_MODE_PORT_MII;
1745 break;
1746 case IFM_2500_SX:
1747 DBPRINT(sc, BCE_INFO_PHY, "Enabling 2.5G MAC mode.\n");
1748 val |= BCE_EMAC_MODE_25G;
1749 /* fall-through */
1750 case IFM_1000_T:
1751 case IFM_1000_SX:
1752 DBPRINT(sc, BCE_INFO_PHY, "Enabling GMII interface.\n");
1753 val |= BCE_EMAC_MODE_PORT_GMII;
1754 break;
1755 default:
1756 DBPRINT(sc, BCE_INFO_PHY, "Unknown link speed, enabling "
1757 "default GMII interface.\n");
1758 val |= BCE_EMAC_MODE_PORT_GMII;
1759 }
1760
1761 /* Set half or full duplex based on PHY settings. */
1762 if ((mii->mii_media_active & IFM_GMASK) == IFM_HDX) {
1763 DBPRINT(sc, BCE_INFO_PHY,
1764 "Setting Half-Duplex interface.\n");
1765 val |= BCE_EMAC_MODE_HALF_DUPLEX;
1766 } else
1767 DBPRINT(sc, BCE_INFO_PHY,
1768 "Setting Full-Duplex interface.\n");
1769
1770 REG_WR(sc, BCE_EMAC_MODE, val);
1771
1772 /* FLAG0 is set if RX is enabled and FLAG1 if TX is enabled */
1773 if (mii->mii_media_active & IFM_FLAG0) {
1772 if ((mii->mii_media_active & IFM_ETH_RXPAUSE) != 0) {
1774 DBPRINT(sc, BCE_INFO_PHY,
1775 "%s(): Enabling RX flow control.\n", __FUNCTION__);
1776 BCE_SETBIT(sc, BCE_EMAC_RX_MODE, BCE_EMAC_RX_MODE_FLOW_EN);
1777 } else {
1778 DBPRINT(sc, BCE_INFO_PHY,
1779 "%s(): Disabling RX flow control.\n", __FUNCTION__);
1780 BCE_CLRBIT(sc, BCE_EMAC_RX_MODE, BCE_EMAC_RX_MODE_FLOW_EN);
1781 }
1782
1773 DBPRINT(sc, BCE_INFO_PHY,
1774 "%s(): Enabling RX flow control.\n", __FUNCTION__);
1775 BCE_SETBIT(sc, BCE_EMAC_RX_MODE, BCE_EMAC_RX_MODE_FLOW_EN);
1776 } else {
1777 DBPRINT(sc, BCE_INFO_PHY,
1778 "%s(): Disabling RX flow control.\n", __FUNCTION__);
1779 BCE_CLRBIT(sc, BCE_EMAC_RX_MODE, BCE_EMAC_RX_MODE_FLOW_EN);
1780 }
1781
1783 if (mii->mii_media_active & IFM_FLAG1) {
1782 if ((mii->mii_media_active & IFM_ETH_TXPAUSE) != 0) {
1784 DBPRINT(sc, BCE_INFO_PHY,
1785 "%s(): Enabling TX flow control.\n", __FUNCTION__);
1786 BCE_SETBIT(sc, BCE_EMAC_TX_MODE, BCE_EMAC_TX_MODE_FLOW_EN);
1787 sc->bce_flags |= BCE_USING_TX_FLOW_CONTROL;
1788 } else {
1789 DBPRINT(sc, BCE_INFO_PHY,
1790 "%s(): Disabling TX flow control.\n", __FUNCTION__);
1791 BCE_CLRBIT(sc, BCE_EMAC_TX_MODE, BCE_EMAC_TX_MODE_FLOW_EN);
1792 sc->bce_flags &= ~BCE_USING_TX_FLOW_CONTROL;
1793 }
1794
1795 /* ToDo: Update watermarks in bce_init_rx_context(). */
1796
1797 DBEXIT(BCE_VERBOSE_PHY);
1798}
1799
1800
1801/****************************************************************************/
1802/* Acquire NVRAM lock. */
1803/* */
1804/* Before the NVRAM can be accessed the caller must acquire an NVRAM lock. */
1805/* Locks 0 and 2 are reserved, lock 1 is used by firmware and lock 2 is */
1806/* for use by the driver. */
1807/* */
1808/* Returns: */
1809/* 0 on success, positive value on failure. */
1810/****************************************************************************/
1811static int
1812bce_acquire_nvram_lock(struct bce_softc *sc)
1813{
1814 u32 val;
1815 int j, rc = 0;
1816
1817 DBENTER(BCE_VERBOSE_NVRAM);
1818
1819 /* Request access to the flash interface. */
1820 REG_WR(sc, BCE_NVM_SW_ARB, BCE_NVM_SW_ARB_ARB_REQ_SET2);
1821 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
1822 val = REG_RD(sc, BCE_NVM_SW_ARB);
1823 if (val & BCE_NVM_SW_ARB_ARB_ARB2)
1824 break;
1825
1826 DELAY(5);
1827 }
1828
1829 if (j >= NVRAM_TIMEOUT_COUNT) {
1830 DBPRINT(sc, BCE_WARN, "Timeout acquiring NVRAM lock!\n");
1831 rc = EBUSY;
1832 }
1833
1834 DBEXIT(BCE_VERBOSE_NVRAM);
1835 return (rc);
1836}
1837
1838
1839/****************************************************************************/
1840/* Release NVRAM lock. */
1841/* */
1842/* When the caller is finished accessing NVRAM the lock must be released. */
1843/* Locks 0 and 2 are reserved, lock 1 is used by firmware and lock 2 is */
1844/* for use by the driver. */
1845/* */
1846/* Returns: */
1847/* 0 on success, positive value on failure. */
1848/****************************************************************************/
1849static int
1850bce_release_nvram_lock(struct bce_softc *sc)
1851{
1852 u32 val;
1853 int j, rc = 0;
1854
1855 DBENTER(BCE_VERBOSE_NVRAM);
1856
1857 /*
1858 * Relinquish nvram interface.
1859 */
1860 REG_WR(sc, BCE_NVM_SW_ARB, BCE_NVM_SW_ARB_ARB_REQ_CLR2);
1861
1862 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
1863 val = REG_RD(sc, BCE_NVM_SW_ARB);
1864 if (!(val & BCE_NVM_SW_ARB_ARB_ARB2))
1865 break;
1866
1867 DELAY(5);
1868 }
1869
1870 if (j >= NVRAM_TIMEOUT_COUNT) {
1871 DBPRINT(sc, BCE_WARN, "Timeout releasing NVRAM lock!\n");
1872 rc = EBUSY;
1873 }
1874
1875 DBEXIT(BCE_VERBOSE_NVRAM);
1876 return (rc);
1877}
1878
1879
1880#ifdef BCE_NVRAM_WRITE_SUPPORT
1881/****************************************************************************/
1882/* Enable NVRAM write access. */
1883/* */
1884/* Before writing to NVRAM the caller must enable NVRAM writes. */
1885/* */
1886/* Returns: */
1887/* 0 on success, positive value on failure. */
1888/****************************************************************************/
1889static int
1890bce_enable_nvram_write(struct bce_softc *sc)
1891{
1892 u32 val;
1893 int rc = 0;
1894
1895 DBENTER(BCE_VERBOSE_NVRAM);
1896
1897 val = REG_RD(sc, BCE_MISC_CFG);
1898 REG_WR(sc, BCE_MISC_CFG, val | BCE_MISC_CFG_NVM_WR_EN_PCI);
1899
1900 if (!(sc->bce_flash_info->flags & BCE_NV_BUFFERED)) {
1901 int j;
1902
1903 REG_WR(sc, BCE_NVM_COMMAND, BCE_NVM_COMMAND_DONE);
1904 REG_WR(sc, BCE_NVM_COMMAND, BCE_NVM_COMMAND_WREN | BCE_NVM_COMMAND_DOIT);
1905
1906 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
1907 DELAY(5);
1908
1909 val = REG_RD(sc, BCE_NVM_COMMAND);
1910 if (val & BCE_NVM_COMMAND_DONE)
1911 break;
1912 }
1913
1914 if (j >= NVRAM_TIMEOUT_COUNT) {
1915 DBPRINT(sc, BCE_WARN, "Timeout writing NVRAM!\n");
1916 rc = EBUSY;
1917 }
1918 }
1919
1920 DBENTER(BCE_VERBOSE_NVRAM);
1921 return (rc);
1922}
1923
1924
1925/****************************************************************************/
1926/* Disable NVRAM write access. */
1927/* */
1928/* When the caller is finished writing to NVRAM write access must be */
1929/* disabled. */
1930/* */
1931/* Returns: */
1932/* Nothing. */
1933/****************************************************************************/
1934static void
1935bce_disable_nvram_write(struct bce_softc *sc)
1936{
1937 u32 val;
1938
1939 DBENTER(BCE_VERBOSE_NVRAM);
1940
1941 val = REG_RD(sc, BCE_MISC_CFG);
1942 REG_WR(sc, BCE_MISC_CFG, val & ~BCE_MISC_CFG_NVM_WR_EN);
1943
1944 DBEXIT(BCE_VERBOSE_NVRAM);
1945
1946}
1947#endif
1948
1949
1950/****************************************************************************/
1951/* Enable NVRAM access. */
1952/* */
1953/* Before accessing NVRAM for read or write operations the caller must */
1954/* enabled NVRAM access. */
1955/* */
1956/* Returns: */
1957/* Nothing. */
1958/****************************************************************************/
1959static void
1960bce_enable_nvram_access(struct bce_softc *sc)
1961{
1962 u32 val;
1963
1964 DBENTER(BCE_VERBOSE_NVRAM);
1965
1966 val = REG_RD(sc, BCE_NVM_ACCESS_ENABLE);
1967 /* Enable both bits, even on read. */
1968 REG_WR(sc, BCE_NVM_ACCESS_ENABLE, val |
1969 BCE_NVM_ACCESS_ENABLE_EN | BCE_NVM_ACCESS_ENABLE_WR_EN);
1970
1971 DBEXIT(BCE_VERBOSE_NVRAM);
1972}
1973
1974
1975/****************************************************************************/
1976/* Disable NVRAM access. */
1977/* */
1978/* When the caller is finished accessing NVRAM access must be disabled. */
1979/* */
1980/* Returns: */
1981/* Nothing. */
1982/****************************************************************************/
1983static void
1984bce_disable_nvram_access(struct bce_softc *sc)
1985{
1986 u32 val;
1987
1988 DBENTER(BCE_VERBOSE_NVRAM);
1989
1990 val = REG_RD(sc, BCE_NVM_ACCESS_ENABLE);
1991
1992 /* Disable both bits, even after read. */
1993 REG_WR(sc, BCE_NVM_ACCESS_ENABLE, val &
1994 ~(BCE_NVM_ACCESS_ENABLE_EN | BCE_NVM_ACCESS_ENABLE_WR_EN));
1995
1996 DBEXIT(BCE_VERBOSE_NVRAM);
1997}
1998
1999
2000#ifdef BCE_NVRAM_WRITE_SUPPORT
2001/****************************************************************************/
2002/* Erase NVRAM page before writing. */
2003/* */
2004/* Non-buffered flash parts require that a page be erased before it is */
2005/* written. */
2006/* */
2007/* Returns: */
2008/* 0 on success, positive value on failure. */
2009/****************************************************************************/
2010static int
2011bce_nvram_erase_page(struct bce_softc *sc, u32 offset)
2012{
2013 u32 cmd;
2014 int j, rc = 0;
2015
2016 DBENTER(BCE_VERBOSE_NVRAM);
2017
2018 /* Buffered flash doesn't require an erase. */
2019 if (sc->bce_flash_info->flags & BCE_NV_BUFFERED)
2020 goto bce_nvram_erase_page_exit;
2021
2022 /* Build an erase command. */
2023 cmd = BCE_NVM_COMMAND_ERASE | BCE_NVM_COMMAND_WR |
2024 BCE_NVM_COMMAND_DOIT;
2025
2026 /*
2027 * Clear the DONE bit separately, set the NVRAM adress to erase,
2028 * and issue the erase command.
2029 */
2030 REG_WR(sc, BCE_NVM_COMMAND, BCE_NVM_COMMAND_DONE);
2031 REG_WR(sc, BCE_NVM_ADDR, offset & BCE_NVM_ADDR_NVM_ADDR_VALUE);
2032 REG_WR(sc, BCE_NVM_COMMAND, cmd);
2033
2034 /* Wait for completion. */
2035 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2036 u32 val;
2037
2038 DELAY(5);
2039
2040 val = REG_RD(sc, BCE_NVM_COMMAND);
2041 if (val & BCE_NVM_COMMAND_DONE)
2042 break;
2043 }
2044
2045 if (j >= NVRAM_TIMEOUT_COUNT) {
2046 DBPRINT(sc, BCE_WARN, "Timeout erasing NVRAM.\n");
2047 rc = EBUSY;
2048 }
2049
2050bce_nvram_erase_page_exit:
2051 DBEXIT(BCE_VERBOSE_NVRAM);
2052 return (rc);
2053}
2054#endif /* BCE_NVRAM_WRITE_SUPPORT */
2055
2056
2057/****************************************************************************/
2058/* Read a dword (32 bits) from NVRAM. */
2059/* */
2060/* Read a 32 bit word from NVRAM. The caller is assumed to have already */
2061/* obtained the NVRAM lock and enabled the controller for NVRAM access. */
2062/* */
2063/* Returns: */
2064/* 0 on success and the 32 bit value read, positive value on failure. */
2065/****************************************************************************/
2066static int
2067bce_nvram_read_dword(struct bce_softc *sc,
2068 u32 offset, u8 *ret_val, u32 cmd_flags)
2069{
2070 u32 cmd;
2071 int i, rc = 0;
2072
2073 DBENTER(BCE_EXTREME_NVRAM);
2074
2075 /* Build the command word. */
2076 cmd = BCE_NVM_COMMAND_DOIT | cmd_flags;
2077
2078 /* Calculate the offset for buffered flash if translation is used. */
2079 if (sc->bce_flash_info->flags & BCE_NV_TRANSLATE) {
2080 offset = ((offset / sc->bce_flash_info->page_size) <<
2081 sc->bce_flash_info->page_bits) +
2082 (offset % sc->bce_flash_info->page_size);
2083 }
2084
2085 /*
2086 * Clear the DONE bit separately, set the address to read,
2087 * and issue the read.
2088 */
2089 REG_WR(sc, BCE_NVM_COMMAND, BCE_NVM_COMMAND_DONE);
2090 REG_WR(sc, BCE_NVM_ADDR, offset & BCE_NVM_ADDR_NVM_ADDR_VALUE);
2091 REG_WR(sc, BCE_NVM_COMMAND, cmd);
2092
2093 /* Wait for completion. */
2094 for (i = 0; i < NVRAM_TIMEOUT_COUNT; i++) {
2095 u32 val;
2096
2097 DELAY(5);
2098
2099 val = REG_RD(sc, BCE_NVM_COMMAND);
2100 if (val & BCE_NVM_COMMAND_DONE) {
2101 val = REG_RD(sc, BCE_NVM_READ);
2102
2103 val = bce_be32toh(val);
2104 memcpy(ret_val, &val, 4);
2105 break;
2106 }
2107 }
2108
2109 /* Check for errors. */
2110 if (i >= NVRAM_TIMEOUT_COUNT) {
2111 BCE_PRINTF("%s(%d): Timeout error reading NVRAM at "
2112 "offset 0x%08X!\n", __FILE__, __LINE__, offset);
2113 rc = EBUSY;
2114 }
2115
2116 DBEXIT(BCE_EXTREME_NVRAM);
2117 return(rc);
2118}
2119
2120
2121#ifdef BCE_NVRAM_WRITE_SUPPORT
2122/****************************************************************************/
2123/* Write a dword (32 bits) to NVRAM. */
2124/* */
2125/* Write a 32 bit word to NVRAM. The caller is assumed to have already */
2126/* obtained the NVRAM lock, enabled the controller for NVRAM access, and */
2127/* enabled NVRAM write access. */
2128/* */
2129/* Returns: */
2130/* 0 on success, positive value on failure. */
2131/****************************************************************************/
2132static int
2133bce_nvram_write_dword(struct bce_softc *sc, u32 offset, u8 *val,
2134 u32 cmd_flags)
2135{
2136 u32 cmd, val32;
2137 int j, rc = 0;
2138
2139 DBENTER(BCE_VERBOSE_NVRAM);
2140
2141 /* Build the command word. */
2142 cmd = BCE_NVM_COMMAND_DOIT | BCE_NVM_COMMAND_WR | cmd_flags;
2143
2144 /* Calculate the offset for buffered flash if translation is used. */
2145 if (sc->bce_flash_info->flags & BCE_NV_TRANSLATE) {
2146 offset = ((offset / sc->bce_flash_info->page_size) <<
2147 sc->bce_flash_info->page_bits) +
2148 (offset % sc->bce_flash_info->page_size);
2149 }
2150
2151 /*
2152 * Clear the DONE bit separately, convert NVRAM data to big-endian,
2153 * set the NVRAM address to write, and issue the write command
2154 */
2155 REG_WR(sc, BCE_NVM_COMMAND, BCE_NVM_COMMAND_DONE);
2156 memcpy(&val32, val, 4);
2157 val32 = htobe32(val32);
2158 REG_WR(sc, BCE_NVM_WRITE, val32);
2159 REG_WR(sc, BCE_NVM_ADDR, offset & BCE_NVM_ADDR_NVM_ADDR_VALUE);
2160 REG_WR(sc, BCE_NVM_COMMAND, cmd);
2161
2162 /* Wait for completion. */
2163 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2164 DELAY(5);
2165
2166 if (REG_RD(sc, BCE_NVM_COMMAND) & BCE_NVM_COMMAND_DONE)
2167 break;
2168 }
2169 if (j >= NVRAM_TIMEOUT_COUNT) {
2170 BCE_PRINTF("%s(%d): Timeout error writing NVRAM at "
2171 "offset 0x%08X\n", __FILE__, __LINE__, offset);
2172 rc = EBUSY;
2173 }
2174
2175 DBEXIT(BCE_VERBOSE_NVRAM);
2176 return (rc);
2177}
2178#endif /* BCE_NVRAM_WRITE_SUPPORT */
2179
2180
2181/****************************************************************************/
2182/* Initialize NVRAM access. */
2183/* */
2184/* Identify the NVRAM device in use and prepare the NVRAM interface to */
2185/* access that device. */
2186/* */
2187/* Returns: */
2188/* 0 on success, positive value on failure. */
2189/****************************************************************************/
2190static int
2191bce_init_nvram(struct bce_softc *sc)
2192{
2193 u32 val;
2194 int j, entry_count, rc = 0;
2195 struct flash_spec *flash;
2196
2197 DBENTER(BCE_VERBOSE_NVRAM);
2198
2199 if ((BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) ||
2200 (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716)) {
2201 sc->bce_flash_info = &flash_5709;
2202 goto bce_init_nvram_get_flash_size;
2203 }
2204
2205 /* Determine the selected interface. */
2206 val = REG_RD(sc, BCE_NVM_CFG1);
2207
2208 entry_count = sizeof(flash_table) / sizeof(struct flash_spec);
2209
2210 /*
2211 * Flash reconfiguration is required to support additional
2212 * NVRAM devices not directly supported in hardware.
2213 * Check if the flash interface was reconfigured
2214 * by the bootcode.
2215 */
2216
2217 if (val & 0x40000000) {
2218 /* Flash interface reconfigured by bootcode. */
2219
2220 DBPRINT(sc,BCE_INFO_LOAD,
2221 "bce_init_nvram(): Flash WAS reconfigured.\n");
2222
2223 for (j = 0, flash = &flash_table[0]; j < entry_count;
2224 j++, flash++) {
2225 if ((val & FLASH_BACKUP_STRAP_MASK) ==
2226 (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
2227 sc->bce_flash_info = flash;
2228 break;
2229 }
2230 }
2231 } else {
2232 /* Flash interface not yet reconfigured. */
2233 u32 mask;
2234
2235 DBPRINT(sc, BCE_INFO_LOAD, "%s(): Flash was NOT reconfigured.\n",
2236 __FUNCTION__);
2237
2238 if (val & (1 << 23))
2239 mask = FLASH_BACKUP_STRAP_MASK;
2240 else
2241 mask = FLASH_STRAP_MASK;
2242
2243 /* Look for the matching NVRAM device configuration data. */
2244 for (j = 0, flash = &flash_table[0]; j < entry_count; j++, flash++) {
2245
2246 /* Check if the device matches any of the known devices. */
2247 if ((val & mask) == (flash->strapping & mask)) {
2248 /* Found a device match. */
2249 sc->bce_flash_info = flash;
2250
2251 /* Request access to the flash interface. */
2252 if ((rc = bce_acquire_nvram_lock(sc)) != 0)
2253 return rc;
2254
2255 /* Reconfigure the flash interface. */
2256 bce_enable_nvram_access(sc);
2257 REG_WR(sc, BCE_NVM_CFG1, flash->config1);
2258 REG_WR(sc, BCE_NVM_CFG2, flash->config2);
2259 REG_WR(sc, BCE_NVM_CFG3, flash->config3);
2260 REG_WR(sc, BCE_NVM_WRITE1, flash->write1);
2261 bce_disable_nvram_access(sc);
2262 bce_release_nvram_lock(sc);
2263
2264 break;
2265 }
2266 }
2267 }
2268
2269 /* Check if a matching device was found. */
2270 if (j == entry_count) {
2271 sc->bce_flash_info = NULL;
2272 BCE_PRINTF("%s(%d): Unknown Flash NVRAM found!\n",
2273 __FILE__, __LINE__);
2274 DBEXIT(BCE_VERBOSE_NVRAM);
2275 return (ENODEV);
2276 }
2277
2278bce_init_nvram_get_flash_size:
2279 /* Write the flash config data to the shared memory interface. */
2280 val = bce_shmem_rd(sc, BCE_SHARED_HW_CFG_CONFIG2);
2281 val &= BCE_SHARED_HW_CFG2_NVM_SIZE_MASK;
2282 if (val)
2283 sc->bce_flash_size = val;
2284 else
2285 sc->bce_flash_size = sc->bce_flash_info->total_size;
2286
2287 DBPRINT(sc, BCE_INFO_LOAD, "%s(): Found %s, size = 0x%08X\n",
2288 __FUNCTION__, sc->bce_flash_info->name,
2289 sc->bce_flash_info->total_size);
2290
2291 DBEXIT(BCE_VERBOSE_NVRAM);
2292 return rc;
2293}
2294
2295
2296/****************************************************************************/
2297/* Read an arbitrary range of data from NVRAM. */
2298/* */
2299/* Prepares the NVRAM interface for access and reads the requested data */
2300/* into the supplied buffer. */
2301/* */
2302/* Returns: */
2303/* 0 on success and the data read, positive value on failure. */
2304/****************************************************************************/
2305static int
2306bce_nvram_read(struct bce_softc *sc, u32 offset, u8 *ret_buf,
2307 int buf_size)
2308{
2309 int rc = 0;
2310 u32 cmd_flags, offset32, len32, extra;
2311
2312 DBENTER(BCE_VERBOSE_NVRAM);
2313
2314 if (buf_size == 0)
2315 goto bce_nvram_read_exit;
2316
2317 /* Request access to the flash interface. */
2318 if ((rc = bce_acquire_nvram_lock(sc)) != 0)
2319 goto bce_nvram_read_exit;
2320
2321 /* Enable access to flash interface */
2322 bce_enable_nvram_access(sc);
2323
2324 len32 = buf_size;
2325 offset32 = offset;
2326 extra = 0;
2327
2328 cmd_flags = 0;
2329
2330 if (offset32 & 3) {
2331 u8 buf[4];
2332 u32 pre_len;
2333
2334 offset32 &= ~3;
2335 pre_len = 4 - (offset & 3);
2336
2337 if (pre_len >= len32) {
2338 pre_len = len32;
2339 cmd_flags = BCE_NVM_COMMAND_FIRST | BCE_NVM_COMMAND_LAST;
2340 }
2341 else {
2342 cmd_flags = BCE_NVM_COMMAND_FIRST;
2343 }
2344
2345 rc = bce_nvram_read_dword(sc, offset32, buf, cmd_flags);
2346
2347 if (rc)
2348 return rc;
2349
2350 memcpy(ret_buf, buf + (offset & 3), pre_len);
2351
2352 offset32 += 4;
2353 ret_buf += pre_len;
2354 len32 -= pre_len;
2355 }
2356
2357 if (len32 & 3) {
2358 extra = 4 - (len32 & 3);
2359 len32 = (len32 + 4) & ~3;
2360 }
2361
2362 if (len32 == 4) {
2363 u8 buf[4];
2364
2365 if (cmd_flags)
2366 cmd_flags = BCE_NVM_COMMAND_LAST;
2367 else
2368 cmd_flags = BCE_NVM_COMMAND_FIRST |
2369 BCE_NVM_COMMAND_LAST;
2370
2371 rc = bce_nvram_read_dword(sc, offset32, buf, cmd_flags);
2372
2373 memcpy(ret_buf, buf, 4 - extra);
2374 }
2375 else if (len32 > 0) {
2376 u8 buf[4];
2377
2378 /* Read the first word. */
2379 if (cmd_flags)
2380 cmd_flags = 0;
2381 else
2382 cmd_flags = BCE_NVM_COMMAND_FIRST;
2383
2384 rc = bce_nvram_read_dword(sc, offset32, ret_buf, cmd_flags);
2385
2386 /* Advance to the next dword. */
2387 offset32 += 4;
2388 ret_buf += 4;
2389 len32 -= 4;
2390
2391 while (len32 > 4 && rc == 0) {
2392 rc = bce_nvram_read_dword(sc, offset32, ret_buf, 0);
2393
2394 /* Advance to the next dword. */
2395 offset32 += 4;
2396 ret_buf += 4;
2397 len32 -= 4;
2398 }
2399
2400 if (rc)
2401 goto bce_nvram_read_locked_exit;
2402
2403 cmd_flags = BCE_NVM_COMMAND_LAST;
2404 rc = bce_nvram_read_dword(sc, offset32, buf, cmd_flags);
2405
2406 memcpy(ret_buf, buf, 4 - extra);
2407 }
2408
2409bce_nvram_read_locked_exit:
2410 /* Disable access to flash interface and release the lock. */
2411 bce_disable_nvram_access(sc);
2412 bce_release_nvram_lock(sc);
2413
2414bce_nvram_read_exit:
2415 DBEXIT(BCE_VERBOSE_NVRAM);
2416 return rc;
2417}
2418
2419
2420#ifdef BCE_NVRAM_WRITE_SUPPORT
2421/****************************************************************************/
2422/* Write an arbitrary range of data from NVRAM. */
2423/* */
2424/* Prepares the NVRAM interface for write access and writes the requested */
2425/* data from the supplied buffer. The caller is responsible for */
2426/* calculating any appropriate CRCs. */
2427/* */
2428/* Returns: */
2429/* 0 on success, positive value on failure. */
2430/****************************************************************************/
2431static int
2432bce_nvram_write(struct bce_softc *sc, u32 offset, u8 *data_buf,
2433 int buf_size)
2434{
2435 u32 written, offset32, len32;
2436 u8 *buf, start[4], end[4];
2437 int rc = 0;
2438 int align_start, align_end;
2439
2440 DBENTER(BCE_VERBOSE_NVRAM);
2441
2442 buf = data_buf;
2443 offset32 = offset;
2444 len32 = buf_size;
2445 align_start = align_end = 0;
2446
2447 if ((align_start = (offset32 & 3))) {
2448 offset32 &= ~3;
2449 len32 += align_start;
2450 if ((rc = bce_nvram_read(sc, offset32, start, 4)))
2451 goto bce_nvram_write_exit;
2452 }
2453
2454 if (len32 & 3) {
2455 if ((len32 > 4) || !align_start) {
2456 align_end = 4 - (len32 & 3);
2457 len32 += align_end;
2458 if ((rc = bce_nvram_read(sc, offset32 + len32 - 4,
2459 end, 4))) {
2460 goto bce_nvram_write_exit;
2461 }
2462 }
2463 }
2464
2465 if (align_start || align_end) {
2466 buf = malloc(len32, M_DEVBUF, M_NOWAIT);
2467 if (buf == 0) {
2468 rc = ENOMEM;
2469 goto bce_nvram_write_exit;
2470 }
2471
2472 if (align_start) {
2473 memcpy(buf, start, 4);
2474 }
2475
2476 if (align_end) {
2477 memcpy(buf + len32 - 4, end, 4);
2478 }
2479 memcpy(buf + align_start, data_buf, buf_size);
2480 }
2481
2482 written = 0;
2483 while ((written < len32) && (rc == 0)) {
2484 u32 page_start, page_end, data_start, data_end;
2485 u32 addr, cmd_flags;
2486 int i;
2487 u8 flash_buffer[264];
2488
2489 /* Find the page_start addr */
2490 page_start = offset32 + written;
2491 page_start -= (page_start % sc->bce_flash_info->page_size);
2492 /* Find the page_end addr */
2493 page_end = page_start + sc->bce_flash_info->page_size;
2494 /* Find the data_start addr */
2495 data_start = (written == 0) ? offset32 : page_start;
2496 /* Find the data_end addr */
2497 data_end = (page_end > offset32 + len32) ?
2498 (offset32 + len32) : page_end;
2499
2500 /* Request access to the flash interface. */
2501 if ((rc = bce_acquire_nvram_lock(sc)) != 0)
2502 goto bce_nvram_write_exit;
2503
2504 /* Enable access to flash interface */
2505 bce_enable_nvram_access(sc);
2506
2507 cmd_flags = BCE_NVM_COMMAND_FIRST;
2508 if (!(sc->bce_flash_info->flags & BCE_NV_BUFFERED)) {
2509 int j;
2510
2511 /* Read the whole page into the buffer
2512 * (non-buffer flash only) */
2513 for (j = 0; j < sc->bce_flash_info->page_size; j += 4) {
2514 if (j == (sc->bce_flash_info->page_size - 4)) {
2515 cmd_flags |= BCE_NVM_COMMAND_LAST;
2516 }
2517 rc = bce_nvram_read_dword(sc,
2518 page_start + j,
2519 &flash_buffer[j],
2520 cmd_flags);
2521
2522 if (rc)
2523 goto bce_nvram_write_locked_exit;
2524
2525 cmd_flags = 0;
2526 }
2527 }
2528
2529 /* Enable writes to flash interface (unlock write-protect) */
2530 if ((rc = bce_enable_nvram_write(sc)) != 0)
2531 goto bce_nvram_write_locked_exit;
2532
2533 /* Erase the page */
2534 if ((rc = bce_nvram_erase_page(sc, page_start)) != 0)
2535 goto bce_nvram_write_locked_exit;
2536
2537 /* Re-enable the write again for the actual write */
2538 bce_enable_nvram_write(sc);
2539
2540 /* Loop to write back the buffer data from page_start to
2541 * data_start */
2542 i = 0;
2543 if (!(sc->bce_flash_info->flags & BCE_NV_BUFFERED)) {
2544 for (addr = page_start; addr < data_start;
2545 addr += 4, i += 4) {
2546
2547 rc = bce_nvram_write_dword(sc, addr,
2548 &flash_buffer[i], cmd_flags);
2549
2550 if (rc != 0)
2551 goto bce_nvram_write_locked_exit;
2552
2553 cmd_flags = 0;
2554 }
2555 }
2556
2557 /* Loop to write the new data from data_start to data_end */
2558 for (addr = data_start; addr < data_end; addr += 4, i++) {
2559 if ((addr == page_end - 4) ||
2560 ((sc->bce_flash_info->flags & BCE_NV_BUFFERED) &&
2561 (addr == data_end - 4))) {
2562
2563 cmd_flags |= BCE_NVM_COMMAND_LAST;
2564 }
2565 rc = bce_nvram_write_dword(sc, addr, buf,
2566 cmd_flags);
2567
2568 if (rc != 0)
2569 goto bce_nvram_write_locked_exit;
2570
2571 cmd_flags = 0;
2572 buf += 4;
2573 }
2574
2575 /* Loop to write back the buffer data from data_end
2576 * to page_end */
2577 if (!(sc->bce_flash_info->flags & BCE_NV_BUFFERED)) {
2578 for (addr = data_end; addr < page_end;
2579 addr += 4, i += 4) {
2580
2581 if (addr == page_end-4) {
2582 cmd_flags = BCE_NVM_COMMAND_LAST;
2583 }
2584 rc = bce_nvram_write_dword(sc, addr,
2585 &flash_buffer[i], cmd_flags);
2586
2587 if (rc != 0)
2588 goto bce_nvram_write_locked_exit;
2589
2590 cmd_flags = 0;
2591 }
2592 }
2593
2594 /* Disable writes to flash interface (lock write-protect) */
2595 bce_disable_nvram_write(sc);
2596
2597 /* Disable access to flash interface */
2598 bce_disable_nvram_access(sc);
2599 bce_release_nvram_lock(sc);
2600
2601 /* Increment written */
2602 written += data_end - data_start;
2603 }
2604
2605 goto bce_nvram_write_exit;
2606
2607bce_nvram_write_locked_exit:
2608 bce_disable_nvram_write(sc);
2609 bce_disable_nvram_access(sc);
2610 bce_release_nvram_lock(sc);
2611
2612bce_nvram_write_exit:
2613 if (align_start || align_end)
2614 free(buf, M_DEVBUF);
2615
2616 DBEXIT(BCE_VERBOSE_NVRAM);
2617 return (rc);
2618}
2619#endif /* BCE_NVRAM_WRITE_SUPPORT */
2620
2621
2622/****************************************************************************/
2623/* Verifies that NVRAM is accessible and contains valid data. */
2624/* */
2625/* Reads the configuration data from NVRAM and verifies that the CRC is */
2626/* correct. */
2627/* */
2628/* Returns: */
2629/* 0 on success, positive value on failure. */
2630/****************************************************************************/
2631static int
2632bce_nvram_test(struct bce_softc *sc)
2633{
2634 u32 buf[BCE_NVRAM_SIZE / 4];
2635 u8 *data = (u8 *) buf;
2636 int rc = 0;
2637 u32 magic, csum;
2638
2639 DBENTER(BCE_VERBOSE_NVRAM | BCE_VERBOSE_LOAD | BCE_VERBOSE_RESET);
2640
2641 /*
2642 * Check that the device NVRAM is valid by reading
2643 * the magic value at offset 0.
2644 */
2645 if ((rc = bce_nvram_read(sc, 0, data, 4)) != 0) {
2646 BCE_PRINTF("%s(%d): Unable to read NVRAM!\n",
2647 __FILE__, __LINE__);
2648 goto bce_nvram_test_exit;
2649 }
2650
2651 /*
2652 * Verify that offset 0 of the NVRAM contains
2653 * a valid magic number.
2654 */
2655 magic = bce_be32toh(buf[0]);
2656 if (magic != BCE_NVRAM_MAGIC) {
2657 rc = ENODEV;
2658 BCE_PRINTF("%s(%d): Invalid NVRAM magic value! "
2659 "Expected: 0x%08X, Found: 0x%08X\n",
2660 __FILE__, __LINE__, BCE_NVRAM_MAGIC, magic);
2661 goto bce_nvram_test_exit;
2662 }
2663
2664 /*
2665 * Verify that the device NVRAM includes valid
2666 * configuration data.
2667 */
2668 if ((rc = bce_nvram_read(sc, 0x100, data, BCE_NVRAM_SIZE)) != 0) {
2669 BCE_PRINTF("%s(%d): Unable to read manufacturing "
2670 "Information from NVRAM!\n", __FILE__, __LINE__);
2671 goto bce_nvram_test_exit;
2672 }
2673
2674 csum = ether_crc32_le(data, 0x100);
2675 if (csum != BCE_CRC32_RESIDUAL) {
2676 rc = ENODEV;
2677 BCE_PRINTF("%s(%d): Invalid manufacturing information "
2678 "NVRAM CRC! Expected: 0x%08X, Found: 0x%08X\n",
2679 __FILE__, __LINE__, BCE_CRC32_RESIDUAL, csum);
2680 goto bce_nvram_test_exit;
2681 }
2682
2683 csum = ether_crc32_le(data + 0x100, 0x100);
2684 if (csum != BCE_CRC32_RESIDUAL) {
2685 rc = ENODEV;
2686 BCE_PRINTF("%s(%d): Invalid feature configuration "
2687 "information NVRAM CRC! Expected: 0x%08X, "
2688 "Found: 08%08X\n", __FILE__, __LINE__,
2689 BCE_CRC32_RESIDUAL, csum);
2690 }
2691
2692bce_nvram_test_exit:
2693 DBEXIT(BCE_VERBOSE_NVRAM | BCE_VERBOSE_LOAD | BCE_VERBOSE_RESET);
2694 return rc;
2695}
2696
2697
2698/****************************************************************************/
2699/* Identifies the current media type of the controller and sets the PHY */
2700/* address. */
2701/* */
2702/* Returns: */
2703/* Nothing. */
2704/****************************************************************************/
2705static void
2706bce_get_media(struct bce_softc *sc)
2707{
2708 u32 val;
2709
2710 DBENTER(BCE_VERBOSE_PHY);
2711
2712 /* Assume PHY address for copper controllers. */
2713 sc->bce_phy_addr = 1;
2714
2715 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) {
2716 u32 val = REG_RD(sc, BCE_MISC_DUAL_MEDIA_CTRL);
2717 u32 bond_id = val & BCE_MISC_DUAL_MEDIA_CTRL_BOND_ID;
2718 u32 strap;
2719
2720 /*
2721 * The BCM5709S is software configurable
2722 * for Copper or SerDes operation.
2723 */
2724 if (bond_id == BCE_MISC_DUAL_MEDIA_CTRL_BOND_ID_C) {
2725 DBPRINT(sc, BCE_INFO_LOAD, "5709 bonded "
2726 "for copper.\n");
2727 goto bce_get_media_exit;
2728 } else if (bond_id == BCE_MISC_DUAL_MEDIA_CTRL_BOND_ID_S) {
2729 DBPRINT(sc, BCE_INFO_LOAD, "5709 bonded "
2730 "for dual media.\n");
2731 sc->bce_phy_flags |= BCE_PHY_SERDES_FLAG;
2732 goto bce_get_media_exit;
2733 }
2734
2735 if (val & BCE_MISC_DUAL_MEDIA_CTRL_STRAP_OVERRIDE)
2736 strap = (val &
2737 BCE_MISC_DUAL_MEDIA_CTRL_PHY_CTRL) >> 21;
2738 else
2739 strap = (val &
2740 BCE_MISC_DUAL_MEDIA_CTRL_PHY_CTRL_STRAP) >> 8;
2741
2742 if (pci_get_function(sc->bce_dev) == 0) {
2743 switch (strap) {
2744 case 0x4:
2745 case 0x5:
2746 case 0x6:
2747 DBPRINT(sc, BCE_INFO_LOAD,
2748 "BCM5709 s/w configured for SerDes.\n");
2749 sc->bce_phy_flags |= BCE_PHY_SERDES_FLAG;
2750 break;
2751 default:
2752 DBPRINT(sc, BCE_INFO_LOAD,
2753 "BCM5709 s/w configured for Copper.\n");
2754 break;
2755 }
2756 } else {
2757 switch (strap) {
2758 case 0x1:
2759 case 0x2:
2760 case 0x4:
2761 DBPRINT(sc, BCE_INFO_LOAD,
2762 "BCM5709 s/w configured for SerDes.\n");
2763 sc->bce_phy_flags |= BCE_PHY_SERDES_FLAG;
2764 break;
2765 default:
2766 DBPRINT(sc, BCE_INFO_LOAD,
2767 "BCM5709 s/w configured for Copper.\n");
2768 break;
2769 }
2770 }
2771
2772 } else if (BCE_CHIP_BOND_ID(sc) & BCE_CHIP_BOND_ID_SERDES_BIT)
2773 sc->bce_phy_flags |= BCE_PHY_SERDES_FLAG;
2774
2775 if (sc->bce_phy_flags & BCE_PHY_SERDES_FLAG) {
2776
2777 sc->bce_flags |= BCE_NO_WOL_FLAG;
2778
2779 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709)
2780 sc->bce_phy_flags |= BCE_PHY_IEEE_CLAUSE_45_FLAG;
2781
2782 if (BCE_CHIP_NUM(sc) != BCE_CHIP_NUM_5706) {
2783 /* 5708S/09S/16S use a separate PHY for SerDes. */
2784 sc->bce_phy_addr = 2;
2785
2786 val = bce_shmem_rd(sc, BCE_SHARED_HW_CFG_CONFIG);
2787 if (val & BCE_SHARED_HW_CFG_PHY_2_5G) {
2788 sc->bce_phy_flags |=
2789 BCE_PHY_2_5G_CAPABLE_FLAG;
2790 DBPRINT(sc, BCE_INFO_LOAD, "Found 2.5Gb "
2791 "capable adapter\n");
2792 }
2793 }
2794 } else if ((BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5706) ||
2795 (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5708))
2796 sc->bce_phy_flags |= BCE_PHY_CRC_FIX_FLAG;
2797
2798bce_get_media_exit:
2799 DBPRINT(sc, (BCE_INFO_LOAD | BCE_INFO_PHY),
2800 "Using PHY address %d.\n", sc->bce_phy_addr);
2801
2802 DBEXIT(BCE_VERBOSE_PHY);
2803}
2804
2805
2806/****************************************************************************/
2807/* Performs PHY initialization required before MII drivers access the */
2808/* device. */
2809/* */
2810/* Returns: */
2811/* Nothing. */
2812/****************************************************************************/
2813static void
2814bce_init_media(struct bce_softc *sc)
2815{
2816 if ((sc->bce_phy_flags & BCE_PHY_IEEE_CLAUSE_45_FLAG) != 0) {
2817 /*
2818 * Configure 5709S/5716S PHYs to use traditional IEEE
2819 * Clause 22 method. Otherwise we have no way to attach
2820 * the PHY in mii(4) layer. PHY specific configuration
2821 * is done in mii layer.
2822 */
2823
2824 /* Select auto-negotiation MMD of the PHY. */
2825 bce_miibus_write_reg(sc->bce_dev, sc->bce_phy_addr,
2826 BRGPHY_BLOCK_ADDR, BRGPHY_BLOCK_ADDR_ADDR_EXT);
2827 bce_miibus_write_reg(sc->bce_dev, sc->bce_phy_addr,
2828 BRGPHY_ADDR_EXT, BRGPHY_ADDR_EXT_AN_MMD);
2829
2830 /* Set IEEE0 block of AN MMD (assumed in brgphy(4) code). */
2831 bce_miibus_write_reg(sc->bce_dev, sc->bce_phy_addr,
2832 BRGPHY_BLOCK_ADDR, BRGPHY_BLOCK_ADDR_COMBO_IEEE0);
2833 }
2834}
2835
2836
2837/****************************************************************************/
2838/* Free any DMA memory owned by the driver. */
2839/* */
2840/* Scans through each data structre that requires DMA memory and frees */
2841/* the memory if allocated. */
2842/* */
2843/* Returns: */
2844/* Nothing. */
2845/****************************************************************************/
2846static void
2847bce_dma_free(struct bce_softc *sc)
2848{
2849 int i;
2850
2851 DBENTER(BCE_VERBOSE_RESET | BCE_VERBOSE_UNLOAD | BCE_VERBOSE_CTX);
2852
2853 /* Free, unmap, and destroy the status block. */
2854 if (sc->status_block != NULL) {
2855 bus_dmamem_free(
2856 sc->status_tag,
2857 sc->status_block,
2858 sc->status_map);
2859 sc->status_block = NULL;
2860 }
2861
2862 if (sc->status_map != NULL) {
2863 bus_dmamap_unload(
2864 sc->status_tag,
2865 sc->status_map);
2866 bus_dmamap_destroy(sc->status_tag,
2867 sc->status_map);
2868 sc->status_map = NULL;
2869 }
2870
2871 if (sc->status_tag != NULL) {
2872 bus_dma_tag_destroy(sc->status_tag);
2873 sc->status_tag = NULL;
2874 }
2875
2876
2877 /* Free, unmap, and destroy the statistics block. */
2878 if (sc->stats_block != NULL) {
2879 bus_dmamem_free(
2880 sc->stats_tag,
2881 sc->stats_block,
2882 sc->stats_map);
2883 sc->stats_block = NULL;
2884 }
2885
2886 if (sc->stats_map != NULL) {
2887 bus_dmamap_unload(
2888 sc->stats_tag,
2889 sc->stats_map);
2890 bus_dmamap_destroy(sc->stats_tag,
2891 sc->stats_map);
2892 sc->stats_map = NULL;
2893 }
2894
2895 if (sc->stats_tag != NULL) {
2896 bus_dma_tag_destroy(sc->stats_tag);
2897 sc->stats_tag = NULL;
2898 }
2899
2900
2901 /* Free, unmap and destroy all context memory pages. */
2902 if ((BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) ||
2903 (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716)) {
2904 for (i = 0; i < sc->ctx_pages; i++ ) {
2905 if (sc->ctx_block[i] != NULL) {
2906 bus_dmamem_free(
2907 sc->ctx_tag,
2908 sc->ctx_block[i],
2909 sc->ctx_map[i]);
2910 sc->ctx_block[i] = NULL;
2911 }
2912
2913 if (sc->ctx_map[i] != NULL) {
2914 bus_dmamap_unload(
2915 sc->ctx_tag,
2916 sc->ctx_map[i]);
2917 bus_dmamap_destroy(
2918 sc->ctx_tag,
2919 sc->ctx_map[i]);
2920 sc->ctx_map[i] = NULL;
2921 }
2922 }
2923
2924 /* Destroy the context memory tag. */
2925 if (sc->ctx_tag != NULL) {
2926 bus_dma_tag_destroy(sc->ctx_tag);
2927 sc->ctx_tag = NULL;
2928 }
2929 }
2930
2931
2932 /* Free, unmap and destroy all TX buffer descriptor chain pages. */
2933 for (i = 0; i < TX_PAGES; i++ ) {
2934 if (sc->tx_bd_chain[i] != NULL) {
2935 bus_dmamem_free(
2936 sc->tx_bd_chain_tag,
2937 sc->tx_bd_chain[i],
2938 sc->tx_bd_chain_map[i]);
2939 sc->tx_bd_chain[i] = NULL;
2940 }
2941
2942 if (sc->tx_bd_chain_map[i] != NULL) {
2943 bus_dmamap_unload(
2944 sc->tx_bd_chain_tag,
2945 sc->tx_bd_chain_map[i]);
2946 bus_dmamap_destroy(
2947 sc->tx_bd_chain_tag,
2948 sc->tx_bd_chain_map[i]);
2949 sc->tx_bd_chain_map[i] = NULL;
2950 }
2951 }
2952
2953 /* Destroy the TX buffer descriptor tag. */
2954 if (sc->tx_bd_chain_tag != NULL) {
2955 bus_dma_tag_destroy(sc->tx_bd_chain_tag);
2956 sc->tx_bd_chain_tag = NULL;
2957 }
2958
2959
2960 /* Free, unmap and destroy all RX buffer descriptor chain pages. */
2961 for (i = 0; i < RX_PAGES; i++ ) {
2962 if (sc->rx_bd_chain[i] != NULL) {
2963 bus_dmamem_free(
2964 sc->rx_bd_chain_tag,
2965 sc->rx_bd_chain[i],
2966 sc->rx_bd_chain_map[i]);
2967 sc->rx_bd_chain[i] = NULL;
2968 }
2969
2970 if (sc->rx_bd_chain_map[i] != NULL) {
2971 bus_dmamap_unload(
2972 sc->rx_bd_chain_tag,
2973 sc->rx_bd_chain_map[i]);
2974 bus_dmamap_destroy(
2975 sc->rx_bd_chain_tag,
2976 sc->rx_bd_chain_map[i]);
2977 sc->rx_bd_chain_map[i] = NULL;
2978 }
2979 }
2980
2981 /* Destroy the RX buffer descriptor tag. */
2982 if (sc->rx_bd_chain_tag != NULL) {
2983 bus_dma_tag_destroy(sc->rx_bd_chain_tag);
2984 sc->rx_bd_chain_tag = NULL;
2985 }
2986
2987
2988#ifdef BCE_JUMBO_HDRSPLIT
2989 /* Free, unmap and destroy all page buffer descriptor chain pages. */
2990 for (i = 0; i < PG_PAGES; i++ ) {
2991 if (sc->pg_bd_chain[i] != NULL) {
2992 bus_dmamem_free(
2993 sc->pg_bd_chain_tag,
2994 sc->pg_bd_chain[i],
2995 sc->pg_bd_chain_map[i]);
2996 sc->pg_bd_chain[i] = NULL;
2997 }
2998
2999 if (sc->pg_bd_chain_map[i] != NULL) {
3000 bus_dmamap_unload(
3001 sc->pg_bd_chain_tag,
3002 sc->pg_bd_chain_map[i]);
3003 bus_dmamap_destroy(
3004 sc->pg_bd_chain_tag,
3005 sc->pg_bd_chain_map[i]);
3006 sc->pg_bd_chain_map[i] = NULL;
3007 }
3008 }
3009
3010 /* Destroy the page buffer descriptor tag. */
3011 if (sc->pg_bd_chain_tag != NULL) {
3012 bus_dma_tag_destroy(sc->pg_bd_chain_tag);
3013 sc->pg_bd_chain_tag = NULL;
3014 }
3015#endif
3016
3017
3018 /* Unload and destroy the TX mbuf maps. */
3019 for (i = 0; i < TOTAL_TX_BD; i++) {
3020 if (sc->tx_mbuf_map[i] != NULL) {
3021 bus_dmamap_unload(sc->tx_mbuf_tag,
3022 sc->tx_mbuf_map[i]);
3023 bus_dmamap_destroy(sc->tx_mbuf_tag,
3024 sc->tx_mbuf_map[i]);
3025 sc->tx_mbuf_map[i] = NULL;
3026 }
3027 }
3028
3029 /* Destroy the TX mbuf tag. */
3030 if (sc->tx_mbuf_tag != NULL) {
3031 bus_dma_tag_destroy(sc->tx_mbuf_tag);
3032 sc->tx_mbuf_tag = NULL;
3033 }
3034
3035 /* Unload and destroy the RX mbuf maps. */
3036 for (i = 0; i < TOTAL_RX_BD; i++) {
3037 if (sc->rx_mbuf_map[i] != NULL) {
3038 bus_dmamap_unload(sc->rx_mbuf_tag,
3039 sc->rx_mbuf_map[i]);
3040 bus_dmamap_destroy(sc->rx_mbuf_tag,
3041 sc->rx_mbuf_map[i]);
3042 sc->rx_mbuf_map[i] = NULL;
3043 }
3044 }
3045
3046 /* Destroy the RX mbuf tag. */
3047 if (sc->rx_mbuf_tag != NULL) {
3048 bus_dma_tag_destroy(sc->rx_mbuf_tag);
3049 sc->rx_mbuf_tag = NULL;
3050 }
3051
3052#ifdef BCE_JUMBO_HDRSPLIT
3053 /* Unload and destroy the page mbuf maps. */
3054 for (i = 0; i < TOTAL_PG_BD; i++) {
3055 if (sc->pg_mbuf_map[i] != NULL) {
3056 bus_dmamap_unload(sc->pg_mbuf_tag,
3057 sc->pg_mbuf_map[i]);
3058 bus_dmamap_destroy(sc->pg_mbuf_tag,
3059 sc->pg_mbuf_map[i]);
3060 sc->pg_mbuf_map[i] = NULL;
3061 }
3062 }
3063
3064 /* Destroy the page mbuf tag. */
3065 if (sc->pg_mbuf_tag != NULL) {
3066 bus_dma_tag_destroy(sc->pg_mbuf_tag);
3067 sc->pg_mbuf_tag = NULL;
3068 }
3069#endif
3070
3071 /* Destroy the parent tag */
3072 if (sc->parent_tag != NULL) {
3073 bus_dma_tag_destroy(sc->parent_tag);
3074 sc->parent_tag = NULL;
3075 }
3076
3077 DBEXIT(BCE_VERBOSE_RESET | BCE_VERBOSE_UNLOAD | BCE_VERBOSE_CTX);
3078}
3079
3080
3081/****************************************************************************/
3082/* Get DMA memory from the OS. */
3083/* */
3084/* Validates that the OS has provided DMA buffers in response to a */
3085/* bus_dmamap_load() call and saves the physical address of those buffers. */
3086/* When the callback is used the OS will return 0 for the mapping function */
3087/* (bus_dmamap_load()) so we use the value of map_arg->maxsegs to pass any */
3088/* failures back to the caller. */
3089/* */
3090/* Returns: */
3091/* Nothing. */
3092/****************************************************************************/
3093static void
3094bce_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error)
3095{
3096 bus_addr_t *busaddr = arg;
3097
3098 KASSERT(nseg == 1, ("%s(): Too many segments returned (%d)!",
3099 __FUNCTION__, nseg));
3100 /* Simulate a mapping failure. */
3101 DBRUNIF(DB_RANDOMTRUE(dma_map_addr_failed_sim_control),
3102 error = ENOMEM);
3103
3104 /* ToDo: How to increment debug sim_count variable here? */
3105
3106 /* Check for an error and signal the caller that an error occurred. */
3107 if (error) {
3108 *busaddr = 0;
3109 } else {
3110 *busaddr = segs->ds_addr;
3111 }
3112
3113 return;
3114}
3115
3116
3117/****************************************************************************/
3118/* Allocate any DMA memory needed by the driver. */
3119/* */
3120/* Allocates DMA memory needed for the various global structures needed by */
3121/* hardware. */
3122/* */
3123/* Memory alignment requirements: */
3124/* +-----------------+----------+----------+----------+----------+ */
3125/* | | 5706 | 5708 | 5709 | 5716 | */
3126/* +-----------------+----------+----------+----------+----------+ */
3127/* |Status Block | 8 bytes | 8 bytes | 16 bytes | 16 bytes | */
3128/* |Statistics Block | 8 bytes | 8 bytes | 16 bytes | 16 bytes | */
3129/* |RX Buffers | 16 bytes | 16 bytes | 16 bytes | 16 bytes | */
3130/* |PG Buffers | none | none | none | none | */
3131/* |TX Buffers | none | none | none | none | */
3132/* |Chain Pages(1) | 4KiB | 4KiB | 4KiB | 4KiB | */
3133/* |Context Memory | | | | | */
3134/* +-----------------+----------+----------+----------+----------+ */
3135/* */
3136/* (1) Must align with CPU page size (BCM_PAGE_SZIE). */
3137/* */
3138/* Returns: */
3139/* 0 for success, positive value for failure. */
3140/****************************************************************************/
3141static int
3142bce_dma_alloc(device_t dev)
3143{
3144 struct bce_softc *sc;
3145 int i, error, rc = 0;
3146 bus_size_t max_size, max_seg_size;
3147 int max_segments;
3148
3149 sc = device_get_softc(dev);
3150
3151 DBENTER(BCE_VERBOSE_RESET | BCE_VERBOSE_CTX);
3152
3153 /*
3154 * Allocate the parent bus DMA tag appropriate for PCI.
3155 */
3156 if (bus_dma_tag_create(bus_get_dma_tag(dev), 1, BCE_DMA_BOUNDARY,
3157 sc->max_bus_addr, BUS_SPACE_MAXADDR, NULL, NULL,
3158 BUS_SPACE_MAXSIZE_32BIT, 0, BUS_SPACE_MAXSIZE_32BIT, 0, NULL, NULL,
3159 &sc->parent_tag)) {
3160 BCE_PRINTF("%s(%d): Could not allocate parent DMA tag!\n",
3161 __FILE__, __LINE__);
3162 rc = ENOMEM;
3163 goto bce_dma_alloc_exit;
3164 }
3165
3166 /*
3167 * Create a DMA tag for the status block, allocate and clear the
3168 * memory, map the memory into DMA space, and fetch the physical
3169 * address of the block.
3170 */
3171 if (bus_dma_tag_create(sc->parent_tag, BCE_DMA_ALIGN,
3172 BCE_DMA_BOUNDARY, sc->max_bus_addr, BUS_SPACE_MAXADDR,
3173 NULL, NULL, BCE_STATUS_BLK_SZ, 1, BCE_STATUS_BLK_SZ,
3174 0, NULL, NULL, &sc->status_tag)) {
3175 BCE_PRINTF("%s(%d): Could not allocate status block "
3176 "DMA tag!\n", __FILE__, __LINE__);
3177 rc = ENOMEM;
3178 goto bce_dma_alloc_exit;
3179 }
3180
3181 if(bus_dmamem_alloc(sc->status_tag, (void **)&sc->status_block,
3182 BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT,
3183 &sc->status_map)) {
3184 BCE_PRINTF("%s(%d): Could not allocate status block "
3185 "DMA memory!\n", __FILE__, __LINE__);
3186 rc = ENOMEM;
3187 goto bce_dma_alloc_exit;
3188 }
3189
3190 error = bus_dmamap_load(sc->status_tag, sc->status_map,
3191 sc->status_block, BCE_STATUS_BLK_SZ, bce_dma_map_addr,
3192 &sc->status_block_paddr, BUS_DMA_NOWAIT);
3193
3194 if (error) {
3195 BCE_PRINTF("%s(%d): Could not map status block "
3196 "DMA memory!\n", __FILE__, __LINE__);
3197 rc = ENOMEM;
3198 goto bce_dma_alloc_exit;
3199 }
3200
3201 DBPRINT(sc, BCE_INFO_LOAD, "%s(): status_block_paddr = 0x%jX\n",
3202 __FUNCTION__, (uintmax_t) sc->status_block_paddr);
3203
3204 /*
3205 * Create a DMA tag for the statistics block, allocate and clear the
3206 * memory, map the memory into DMA space, and fetch the physical
3207 * address of the block.
3208 */
3209 if (bus_dma_tag_create(sc->parent_tag, BCE_DMA_ALIGN,
3210 BCE_DMA_BOUNDARY, sc->max_bus_addr, BUS_SPACE_MAXADDR,
3211 NULL, NULL, BCE_STATS_BLK_SZ, 1, BCE_STATS_BLK_SZ,
3212 0, NULL, NULL, &sc->stats_tag)) {
3213 BCE_PRINTF("%s(%d): Could not allocate statistics block "
3214 "DMA tag!\n", __FILE__, __LINE__);
3215 rc = ENOMEM;
3216 goto bce_dma_alloc_exit;
3217 }
3218
3219 if (bus_dmamem_alloc(sc->stats_tag, (void **)&sc->stats_block,
3220 BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT, &sc->stats_map)) {
3221 BCE_PRINTF("%s(%d): Could not allocate statistics block "
3222 "DMA memory!\n", __FILE__, __LINE__);
3223 rc = ENOMEM;
3224 goto bce_dma_alloc_exit;
3225 }
3226
3227 error = bus_dmamap_load(sc->stats_tag, sc->stats_map,
3228 sc->stats_block, BCE_STATS_BLK_SZ, bce_dma_map_addr,
3229 &sc->stats_block_paddr, BUS_DMA_NOWAIT);
3230
3231 if(error) {
3232 BCE_PRINTF("%s(%d): Could not map statistics block "
3233 "DMA memory!\n", __FILE__, __LINE__);
3234 rc = ENOMEM;
3235 goto bce_dma_alloc_exit;
3236 }
3237
3238 DBPRINT(sc, BCE_INFO_LOAD, "%s(): stats_block_paddr = 0x%jX\n",
3239 __FUNCTION__, (uintmax_t) sc->stats_block_paddr);
3240
3241 /* BCM5709 uses host memory as cache for context memory. */
3242 if ((BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) ||
3243 (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716)) {
3244 sc->ctx_pages = 0x2000 / BCM_PAGE_SIZE;
3245 if (sc->ctx_pages == 0)
3246 sc->ctx_pages = 1;
3247
3248 DBRUNIF((sc->ctx_pages > 512),
3249 BCE_PRINTF("%s(%d): Too many CTX pages! %d > 512\n",
3250 __FILE__, __LINE__, sc->ctx_pages));
3251
3252 /*
3253 * Create a DMA tag for the context pages,
3254 * allocate and clear the memory, map the
3255 * memory into DMA space, and fetch the
3256 * physical address of the block.
3257 */
3258 if(bus_dma_tag_create(sc->parent_tag, BCM_PAGE_SIZE,
3259 BCE_DMA_BOUNDARY, sc->max_bus_addr, BUS_SPACE_MAXADDR,
3260 NULL, NULL, BCM_PAGE_SIZE, 1, BCM_PAGE_SIZE,
3261 0, NULL, NULL, &sc->ctx_tag)) {
3262 BCE_PRINTF("%s(%d): Could not allocate CTX "
3263 "DMA tag!\n", __FILE__, __LINE__);
3264 rc = ENOMEM;
3265 goto bce_dma_alloc_exit;
3266 }
3267
3268 for (i = 0; i < sc->ctx_pages; i++) {
3269
3270 if(bus_dmamem_alloc(sc->ctx_tag,
3271 (void **)&sc->ctx_block[i],
3272 BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT,
3273 &sc->ctx_map[i])) {
3274 BCE_PRINTF("%s(%d): Could not allocate CTX "
3275 "DMA memory!\n", __FILE__, __LINE__);
3276 rc = ENOMEM;
3277 goto bce_dma_alloc_exit;
3278 }
3279
3280 error = bus_dmamap_load(sc->ctx_tag, sc->ctx_map[i],
3281 sc->ctx_block[i], BCM_PAGE_SIZE, bce_dma_map_addr,
3282 &sc->ctx_paddr[i], BUS_DMA_NOWAIT);
3283
3284 if (error) {
3285 BCE_PRINTF("%s(%d): Could not map CTX "
3286 "DMA memory!\n", __FILE__, __LINE__);
3287 rc = ENOMEM;
3288 goto bce_dma_alloc_exit;
3289 }
3290
3291 DBPRINT(sc, BCE_INFO_LOAD, "%s(): ctx_paddr[%d] "
3292 "= 0x%jX\n", __FUNCTION__, i,
3293 (uintmax_t) sc->ctx_paddr[i]);
3294 }
3295 }
3296
3297 /*
3298 * Create a DMA tag for the TX buffer descriptor chain,
3299 * allocate and clear the memory, and fetch the
3300 * physical address of the block.
3301 */
3302 if(bus_dma_tag_create(sc->parent_tag, BCM_PAGE_SIZE, BCE_DMA_BOUNDARY,
3303 sc->max_bus_addr, BUS_SPACE_MAXADDR, NULL, NULL,
3304 BCE_TX_CHAIN_PAGE_SZ, 1, BCE_TX_CHAIN_PAGE_SZ, 0,
3305 NULL, NULL, &sc->tx_bd_chain_tag)) {
3306 BCE_PRINTF("%s(%d): Could not allocate TX descriptor "
3307 "chain DMA tag!\n", __FILE__, __LINE__);
3308 rc = ENOMEM;
3309 goto bce_dma_alloc_exit;
3310 }
3311
3312 for (i = 0; i < TX_PAGES; i++) {
3313
3314 if(bus_dmamem_alloc(sc->tx_bd_chain_tag,
3315 (void **)&sc->tx_bd_chain[i],
3316 BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT,
3317 &sc->tx_bd_chain_map[i])) {
3318 BCE_PRINTF("%s(%d): Could not allocate TX descriptor "
3319 "chain DMA memory!\n", __FILE__, __LINE__);
3320 rc = ENOMEM;
3321 goto bce_dma_alloc_exit;
3322 }
3323
3324 error = bus_dmamap_load(sc->tx_bd_chain_tag,
3325 sc->tx_bd_chain_map[i], sc->tx_bd_chain[i],
3326 BCE_TX_CHAIN_PAGE_SZ, bce_dma_map_addr,
3327 &sc->tx_bd_chain_paddr[i], BUS_DMA_NOWAIT);
3328
3329 if (error) {
3330 BCE_PRINTF("%s(%d): Could not map TX descriptor "
3331 "chain DMA memory!\n", __FILE__, __LINE__);
3332 rc = ENOMEM;
3333 goto bce_dma_alloc_exit;
3334 }
3335
3336 DBPRINT(sc, BCE_INFO_LOAD, "%s(): tx_bd_chain_paddr[%d] = "
3337 "0x%jX\n", __FUNCTION__, i,
3338 (uintmax_t) sc->tx_bd_chain_paddr[i]);
3339 }
3340
3341 /* Check the required size before mapping to conserve resources. */
3342 if (bce_tso_enable) {
3343 max_size = BCE_TSO_MAX_SIZE;
3344 max_segments = BCE_MAX_SEGMENTS;
3345 max_seg_size = BCE_TSO_MAX_SEG_SIZE;
3346 } else {
3347 max_size = MCLBYTES * BCE_MAX_SEGMENTS;
3348 max_segments = BCE_MAX_SEGMENTS;
3349 max_seg_size = MCLBYTES;
3350 }
3351
3352 /* Create a DMA tag for TX mbufs. */
3353 if (bus_dma_tag_create(sc->parent_tag, 1, BCE_DMA_BOUNDARY,
3354 sc->max_bus_addr, BUS_SPACE_MAXADDR, NULL, NULL, max_size,
3355 max_segments, max_seg_size, 0, NULL, NULL, &sc->tx_mbuf_tag)) {
3356 BCE_PRINTF("%s(%d): Could not allocate TX mbuf DMA tag!\n",
3357 __FILE__, __LINE__);
3358 rc = ENOMEM;
3359 goto bce_dma_alloc_exit;
3360 }
3361
3362 /* Create DMA maps for the TX mbufs clusters. */
3363 for (i = 0; i < TOTAL_TX_BD; i++) {
3364 if (bus_dmamap_create(sc->tx_mbuf_tag, BUS_DMA_NOWAIT,
3365 &sc->tx_mbuf_map[i])) {
3366 BCE_PRINTF("%s(%d): Unable to create TX mbuf DMA "
3367 "map!\n", __FILE__, __LINE__);
3368 rc = ENOMEM;
3369 goto bce_dma_alloc_exit;
3370 }
3371 }
3372
3373 /*
3374 * Create a DMA tag for the RX buffer descriptor chain,
3375 * allocate and clear the memory, and fetch the physical
3376 * address of the blocks.
3377 */
3378 if (bus_dma_tag_create(sc->parent_tag, BCM_PAGE_SIZE,
3379 BCE_DMA_BOUNDARY, BUS_SPACE_MAXADDR,
3380 sc->max_bus_addr, NULL, NULL,
3381 BCE_RX_CHAIN_PAGE_SZ, 1, BCE_RX_CHAIN_PAGE_SZ,
3382 0, NULL, NULL, &sc->rx_bd_chain_tag)) {
3383 BCE_PRINTF("%s(%d): Could not allocate RX descriptor chain "
3384 "DMA tag!\n", __FILE__, __LINE__);
3385 rc = ENOMEM;
3386 goto bce_dma_alloc_exit;
3387 }
3388
3389 for (i = 0; i < RX_PAGES; i++) {
3390
3391 if (bus_dmamem_alloc(sc->rx_bd_chain_tag,
3392 (void **)&sc->rx_bd_chain[i],
3393 BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT,
3394 &sc->rx_bd_chain_map[i])) {
3395 BCE_PRINTF("%s(%d): Could not allocate RX descriptor "
3396 "chain DMA memory!\n", __FILE__, __LINE__);
3397 rc = ENOMEM;
3398 goto bce_dma_alloc_exit;
3399 }
3400
3401 error = bus_dmamap_load(sc->rx_bd_chain_tag,
3402 sc->rx_bd_chain_map[i], sc->rx_bd_chain[i],
3403 BCE_RX_CHAIN_PAGE_SZ, bce_dma_map_addr,
3404 &sc->rx_bd_chain_paddr[i], BUS_DMA_NOWAIT);
3405
3406 if (error) {
3407 BCE_PRINTF("%s(%d): Could not map RX descriptor "
3408 "chain DMA memory!\n", __FILE__, __LINE__);
3409 rc = ENOMEM;
3410 goto bce_dma_alloc_exit;
3411 }
3412
3413 DBPRINT(sc, BCE_INFO_LOAD, "%s(): rx_bd_chain_paddr[%d] = "
3414 "0x%jX\n", __FUNCTION__, i,
3415 (uintmax_t) sc->rx_bd_chain_paddr[i]);
3416 }
3417
3418 /*
3419 * Create a DMA tag for RX mbufs.
3420 */
3421#ifdef BCE_JUMBO_HDRSPLIT
3422 max_size = max_seg_size = ((sc->rx_bd_mbuf_alloc_size < MCLBYTES) ?
3423 MCLBYTES : sc->rx_bd_mbuf_alloc_size);
3424#else
3425 max_size = max_seg_size = MJUM9BYTES;
3426#endif
3427 max_segments = 1;
3428
3429 DBPRINT(sc, BCE_INFO_LOAD, "%s(): Creating rx_mbuf_tag "
3430 "(max size = 0x%jX max segments = %d, max segment "
3431 "size = 0x%jX)\n", __FUNCTION__, (uintmax_t) max_size,
3432 max_segments, (uintmax_t) max_seg_size);
3433
3434 if (bus_dma_tag_create(sc->parent_tag, BCE_RX_BUF_ALIGN,
3435 BCE_DMA_BOUNDARY, sc->max_bus_addr, BUS_SPACE_MAXADDR, NULL, NULL,
3436 max_size, max_segments, max_seg_size, 0, NULL, NULL,
3437 &sc->rx_mbuf_tag)) {
3438 BCE_PRINTF("%s(%d): Could not allocate RX mbuf DMA tag!\n",
3439 __FILE__, __LINE__);
3440 rc = ENOMEM;
3441 goto bce_dma_alloc_exit;
3442 }
3443
3444 /* Create DMA maps for the RX mbuf clusters. */
3445 for (i = 0; i < TOTAL_RX_BD; i++) {
3446 if (bus_dmamap_create(sc->rx_mbuf_tag, BUS_DMA_NOWAIT,
3447 &sc->rx_mbuf_map[i])) {
3448 BCE_PRINTF("%s(%d): Unable to create RX mbuf "
3449 "DMA map!\n", __FILE__, __LINE__);
3450 rc = ENOMEM;
3451 goto bce_dma_alloc_exit;
3452 }
3453 }
3454
3455#ifdef BCE_JUMBO_HDRSPLIT
3456 /*
3457 * Create a DMA tag for the page buffer descriptor chain,
3458 * allocate and clear the memory, and fetch the physical
3459 * address of the blocks.
3460 */
3461 if (bus_dma_tag_create(sc->parent_tag, BCM_PAGE_SIZE,
3462 BCE_DMA_BOUNDARY, BUS_SPACE_MAXADDR, sc->max_bus_addr,
3463 NULL, NULL, BCE_PG_CHAIN_PAGE_SZ, 1, BCE_PG_CHAIN_PAGE_SZ,
3464 0, NULL, NULL, &sc->pg_bd_chain_tag)) {
3465 BCE_PRINTF("%s(%d): Could not allocate page descriptor "
3466 "chain DMA tag!\n", __FILE__, __LINE__);
3467 rc = ENOMEM;
3468 goto bce_dma_alloc_exit;
3469 }
3470
3471 for (i = 0; i < PG_PAGES; i++) {
3472
3473 if (bus_dmamem_alloc(sc->pg_bd_chain_tag,
3474 (void **)&sc->pg_bd_chain[i],
3475 BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT,
3476 &sc->pg_bd_chain_map[i])) {
3477 BCE_PRINTF("%s(%d): Could not allocate page "
3478 "descriptor chain DMA memory!\n",
3479 __FILE__, __LINE__);
3480 rc = ENOMEM;
3481 goto bce_dma_alloc_exit;
3482 }
3483
3484 error = bus_dmamap_load(sc->pg_bd_chain_tag,
3485 sc->pg_bd_chain_map[i], sc->pg_bd_chain[i],
3486 BCE_PG_CHAIN_PAGE_SZ, bce_dma_map_addr,
3487 &sc->pg_bd_chain_paddr[i], BUS_DMA_NOWAIT);
3488
3489 if (error) {
3490 BCE_PRINTF("%s(%d): Could not map page descriptor "
3491 "chain DMA memory!\n", __FILE__, __LINE__);
3492 rc = ENOMEM;
3493 goto bce_dma_alloc_exit;
3494 }
3495
3496 DBPRINT(sc, BCE_INFO_LOAD, "%s(): pg_bd_chain_paddr[%d] = "
3497 "0x%jX\n", __FUNCTION__, i,
3498 (uintmax_t) sc->pg_bd_chain_paddr[i]);
3499 }
3500
3501 /*
3502 * Create a DMA tag for page mbufs.
3503 */
3504 max_size = max_seg_size = ((sc->pg_bd_mbuf_alloc_size < MCLBYTES) ?
3505 MCLBYTES : sc->pg_bd_mbuf_alloc_size);
3506
3507 if (bus_dma_tag_create(sc->parent_tag, 1, BCE_DMA_BOUNDARY,
3508 sc->max_bus_addr, BUS_SPACE_MAXADDR, NULL, NULL,
3509 max_size, 1, max_seg_size, 0, NULL, NULL, &sc->pg_mbuf_tag)) {
3510 BCE_PRINTF("%s(%d): Could not allocate page mbuf "
3511 "DMA tag!\n", __FILE__, __LINE__);
3512 rc = ENOMEM;
3513 goto bce_dma_alloc_exit;
3514 }
3515
3516 /* Create DMA maps for the page mbuf clusters. */
3517 for (i = 0; i < TOTAL_PG_BD; i++) {
3518 if (bus_dmamap_create(sc->pg_mbuf_tag, BUS_DMA_NOWAIT,
3519 &sc->pg_mbuf_map[i])) {
3520 BCE_PRINTF("%s(%d): Unable to create page mbuf "
3521 "DMA map!\n", __FILE__, __LINE__);
3522 rc = ENOMEM;
3523 goto bce_dma_alloc_exit;
3524 }
3525 }
3526#endif
3527
3528bce_dma_alloc_exit:
3529 DBEXIT(BCE_VERBOSE_RESET | BCE_VERBOSE_CTX);
3530 return(rc);
3531}
3532
3533
3534/****************************************************************************/
3535/* Release all resources used by the driver. */
3536/* */
3537/* Releases all resources acquired by the driver including interrupts, */
3538/* interrupt handler, interfaces, mutexes, and DMA memory. */
3539/* */
3540/* Returns: */
3541/* Nothing. */
3542/****************************************************************************/
3543static void
3544bce_release_resources(struct bce_softc *sc)
3545{
3546 device_t dev;
3547
3548 DBENTER(BCE_VERBOSE_RESET);
3549
3550 dev = sc->bce_dev;
3551
3552 bce_dma_free(sc);
3553
3554 if (sc->bce_intrhand != NULL) {
3555 DBPRINT(sc, BCE_INFO_RESET, "Removing interrupt handler.\n");
3556 bus_teardown_intr(dev, sc->bce_res_irq, sc->bce_intrhand);
3557 }
3558
3559 if (sc->bce_res_irq != NULL) {
3560 DBPRINT(sc, BCE_INFO_RESET, "Releasing IRQ.\n");
3561 bus_release_resource(dev, SYS_RES_IRQ, sc->bce_irq_rid,
3562 sc->bce_res_irq);
3563 }
3564
3565 if (sc->bce_flags & (BCE_USING_MSI_FLAG | BCE_USING_MSIX_FLAG)) {
3566 DBPRINT(sc, BCE_INFO_RESET, "Releasing MSI/MSI-X vector.\n");
3567 pci_release_msi(dev);
3568 }
3569
3570 if (sc->bce_res_mem != NULL) {
3571 DBPRINT(sc, BCE_INFO_RESET, "Releasing PCI memory.\n");
3572 bus_release_resource(dev, SYS_RES_MEMORY, PCIR_BAR(0),
3573 sc->bce_res_mem);
3574 }
3575
3576 if (sc->bce_ifp != NULL) {
3577 DBPRINT(sc, BCE_INFO_RESET, "Releasing IF.\n");
3578 if_free(sc->bce_ifp);
3579 }
3580
3581 if (mtx_initialized(&sc->bce_mtx))
3582 BCE_LOCK_DESTROY(sc);
3583
3584 DBEXIT(BCE_VERBOSE_RESET);
3585}
3586
3587
3588/****************************************************************************/
3589/* Firmware synchronization. */
3590/* */
3591/* Before performing certain events such as a chip reset, synchronize with */
3592/* the firmware first. */
3593/* */
3594/* Returns: */
3595/* 0 for success, positive value for failure. */
3596/****************************************************************************/
3597static int
3598bce_fw_sync(struct bce_softc *sc, u32 msg_data)
3599{
3600 int i, rc = 0;
3601 u32 val;
3602
3603 DBENTER(BCE_VERBOSE_RESET);
3604
3605 /* Don't waste any time if we've timed out before. */
3606 if (sc->bce_fw_timed_out == TRUE) {
3607 rc = EBUSY;
3608 goto bce_fw_sync_exit;
3609 }
3610
3611 /* Increment the message sequence number. */
3612 sc->bce_fw_wr_seq++;
3613 msg_data |= sc->bce_fw_wr_seq;
3614
3615 DBPRINT(sc, BCE_VERBOSE_FIRMWARE, "bce_fw_sync(): msg_data = "
3616 "0x%08X\n", msg_data);
3617
3618 /* Send the message to the bootcode driver mailbox. */
3619 bce_shmem_wr(sc, BCE_DRV_MB, msg_data);
3620
3621 /* Wait for the bootcode to acknowledge the message. */
3622 for (i = 0; i < FW_ACK_TIME_OUT_MS; i++) {
3623 /* Check for a response in the bootcode firmware mailbox. */
3624 val = bce_shmem_rd(sc, BCE_FW_MB);
3625 if ((val & BCE_FW_MSG_ACK) == (msg_data & BCE_DRV_MSG_SEQ))
3626 break;
3627 DELAY(1000);
3628 }
3629
3630 /* If we've timed out, tell bootcode that we've stopped waiting. */
3631 if (((val & BCE_FW_MSG_ACK) != (msg_data & BCE_DRV_MSG_SEQ)) &&
3632 ((msg_data & BCE_DRV_MSG_DATA) != BCE_DRV_MSG_DATA_WAIT0)) {
3633
3634 BCE_PRINTF("%s(%d): Firmware synchronization timeout! "
3635 "msg_data = 0x%08X\n", __FILE__, __LINE__, msg_data);
3636
3637 msg_data &= ~BCE_DRV_MSG_CODE;
3638 msg_data |= BCE_DRV_MSG_CODE_FW_TIMEOUT;
3639
3640 bce_shmem_wr(sc, BCE_DRV_MB, msg_data);
3641
3642 sc->bce_fw_timed_out = TRUE;
3643 rc = EBUSY;
3644 }
3645
3646bce_fw_sync_exit:
3647 DBEXIT(BCE_VERBOSE_RESET);
3648 return (rc);
3649}
3650
3651
3652/****************************************************************************/
3653/* Load Receive Virtual 2 Physical (RV2P) processor firmware. */
3654/* */
3655/* Returns: */
3656/* Nothing. */
3657/****************************************************************************/
3658static void
3659bce_load_rv2p_fw(struct bce_softc *sc, u32 *rv2p_code,
3660 u32 rv2p_code_len, u32 rv2p_proc)
3661{
3662 int i;
3663 u32 val;
3664
3665 DBENTER(BCE_VERBOSE_RESET);
3666
3667 /* Set the page size used by RV2P. */
3668 if (rv2p_proc == RV2P_PROC2) {
3669 BCE_RV2P_PROC2_CHG_MAX_BD_PAGE(USABLE_RX_BD_PER_PAGE);
3670 }
3671
3672 for (i = 0; i < rv2p_code_len; i += 8) {
3673 REG_WR(sc, BCE_RV2P_INSTR_HIGH, *rv2p_code);
3674 rv2p_code++;
3675 REG_WR(sc, BCE_RV2P_INSTR_LOW, *rv2p_code);
3676 rv2p_code++;
3677
3678 if (rv2p_proc == RV2P_PROC1) {
3679 val = (i / 8) | BCE_RV2P_PROC1_ADDR_CMD_RDWR;
3680 REG_WR(sc, BCE_RV2P_PROC1_ADDR_CMD, val);
3681 }
3682 else {
3683 val = (i / 8) | BCE_RV2P_PROC2_ADDR_CMD_RDWR;
3684 REG_WR(sc, BCE_RV2P_PROC2_ADDR_CMD, val);
3685 }
3686 }
3687
3688 /* Reset the processor, un-stall is done later. */
3689 if (rv2p_proc == RV2P_PROC1) {
3690 REG_WR(sc, BCE_RV2P_COMMAND, BCE_RV2P_COMMAND_PROC1_RESET);
3691 }
3692 else {
3693 REG_WR(sc, BCE_RV2P_COMMAND, BCE_RV2P_COMMAND_PROC2_RESET);
3694 }
3695
3696 DBEXIT(BCE_VERBOSE_RESET);
3697}
3698
3699
3700/****************************************************************************/
3701/* Load RISC processor firmware. */
3702/* */
3703/* Loads firmware from the file if_bcefw.h into the scratchpad memory */
3704/* associated with a particular processor. */
3705/* */
3706/* Returns: */
3707/* Nothing. */
3708/****************************************************************************/
3709static void
3710bce_load_cpu_fw(struct bce_softc *sc, struct cpu_reg *cpu_reg,
3711 struct fw_info *fw)
3712{
3713 u32 offset;
3714
3715 DBENTER(BCE_VERBOSE_RESET);
3716
3717 bce_halt_cpu(sc, cpu_reg);
3718
3719 /* Load the Text area. */
3720 offset = cpu_reg->spad_base + (fw->text_addr - cpu_reg->mips_view_base);
3721 if (fw->text) {
3722 int j;
3723
3724 for (j = 0; j < (fw->text_len / 4); j++, offset += 4) {
3725 REG_WR_IND(sc, offset, fw->text[j]);
3726 }
3727 }
3728
3729 /* Load the Data area. */
3730 offset = cpu_reg->spad_base + (fw->data_addr - cpu_reg->mips_view_base);
3731 if (fw->data) {
3732 int j;
3733
3734 for (j = 0; j < (fw->data_len / 4); j++, offset += 4) {
3735 REG_WR_IND(sc, offset, fw->data[j]);
3736 }
3737 }
3738
3739 /* Load the SBSS area. */
3740 offset = cpu_reg->spad_base + (fw->sbss_addr - cpu_reg->mips_view_base);
3741 if (fw->sbss) {
3742 int j;
3743
3744 for (j = 0; j < (fw->sbss_len / 4); j++, offset += 4) {
3745 REG_WR_IND(sc, offset, fw->sbss[j]);
3746 }
3747 }
3748
3749 /* Load the BSS area. */
3750 offset = cpu_reg->spad_base + (fw->bss_addr - cpu_reg->mips_view_base);
3751 if (fw->bss) {
3752 int j;
3753
3754 for (j = 0; j < (fw->bss_len/4); j++, offset += 4) {
3755 REG_WR_IND(sc, offset, fw->bss[j]);
3756 }
3757 }
3758
3759 /* Load the Read-Only area. */
3760 offset = cpu_reg->spad_base +
3761 (fw->rodata_addr - cpu_reg->mips_view_base);
3762 if (fw->rodata) {
3763 int j;
3764
3765 for (j = 0; j < (fw->rodata_len / 4); j++, offset += 4) {
3766 REG_WR_IND(sc, offset, fw->rodata[j]);
3767 }
3768 }
3769
3770 /* Clear the pre-fetch instruction and set the FW start address. */
3771 REG_WR_IND(sc, cpu_reg->inst, 0);
3772 REG_WR_IND(sc, cpu_reg->pc, fw->start_addr);
3773
3774 DBEXIT(BCE_VERBOSE_RESET);
3775}
3776
3777
3778/****************************************************************************/
3779/* Starts the RISC processor. */
3780/* */
3781/* Assumes the CPU starting address has already been set. */
3782/* */
3783/* Returns: */
3784/* Nothing. */
3785/****************************************************************************/
3786static void
3787bce_start_cpu(struct bce_softc *sc, struct cpu_reg *cpu_reg)
3788{
3789 u32 val;
3790
3791 DBENTER(BCE_VERBOSE_RESET);
3792
3793 /* Start the CPU. */
3794 val = REG_RD_IND(sc, cpu_reg->mode);
3795 val &= ~cpu_reg->mode_value_halt;
3796 REG_WR_IND(sc, cpu_reg->state, cpu_reg->state_value_clear);
3797 REG_WR_IND(sc, cpu_reg->mode, val);
3798
3799 DBEXIT(BCE_VERBOSE_RESET);
3800}
3801
3802
3803/****************************************************************************/
3804/* Halts the RISC processor. */
3805/* */
3806/* Returns: */
3807/* Nothing. */
3808/****************************************************************************/
3809static void
3810bce_halt_cpu(struct bce_softc *sc, struct cpu_reg *cpu_reg)
3811{
3812 u32 val;
3813
3814 DBENTER(BCE_VERBOSE_RESET);
3815
3816 /* Halt the CPU. */
3817 val = REG_RD_IND(sc, cpu_reg->mode);
3818 val |= cpu_reg->mode_value_halt;
3819 REG_WR_IND(sc, cpu_reg->mode, val);
3820 REG_WR_IND(sc, cpu_reg->state, cpu_reg->state_value_clear);
3821
3822 DBEXIT(BCE_VERBOSE_RESET);
3823}
3824
3825
3826/****************************************************************************/
3827/* Initialize the RX CPU. */
3828/* */
3829/* Returns: */
3830/* Nothing. */
3831/****************************************************************************/
3832static void
3833bce_start_rxp_cpu(struct bce_softc *sc)
3834{
3835 struct cpu_reg cpu_reg;
3836
3837 DBENTER(BCE_VERBOSE_RESET);
3838
3839 cpu_reg.mode = BCE_RXP_CPU_MODE;
3840 cpu_reg.mode_value_halt = BCE_RXP_CPU_MODE_SOFT_HALT;
3841 cpu_reg.mode_value_sstep = BCE_RXP_CPU_MODE_STEP_ENA;
3842 cpu_reg.state = BCE_RXP_CPU_STATE;
3843 cpu_reg.state_value_clear = 0xffffff;
3844 cpu_reg.gpr0 = BCE_RXP_CPU_REG_FILE;
3845 cpu_reg.evmask = BCE_RXP_CPU_EVENT_MASK;
3846 cpu_reg.pc = BCE_RXP_CPU_PROGRAM_COUNTER;
3847 cpu_reg.inst = BCE_RXP_CPU_INSTRUCTION;
3848 cpu_reg.bp = BCE_RXP_CPU_HW_BREAKPOINT;
3849 cpu_reg.spad_base = BCE_RXP_SCRATCH;
3850 cpu_reg.mips_view_base = 0x8000000;
3851
3852 DBPRINT(sc, BCE_INFO_RESET, "Starting RX firmware.\n");
3853 bce_start_cpu(sc, &cpu_reg);
3854
3855 DBEXIT(BCE_VERBOSE_RESET);
3856}
3857
3858
3859/****************************************************************************/
3860/* Initialize the RX CPU. */
3861/* */
3862/* Returns: */
3863/* Nothing. */
3864/****************************************************************************/
3865static void
3866bce_init_rxp_cpu(struct bce_softc *sc)
3867{
3868 struct cpu_reg cpu_reg;
3869 struct fw_info fw;
3870
3871 DBENTER(BCE_VERBOSE_RESET);
3872
3873 cpu_reg.mode = BCE_RXP_CPU_MODE;
3874 cpu_reg.mode_value_halt = BCE_RXP_CPU_MODE_SOFT_HALT;
3875 cpu_reg.mode_value_sstep = BCE_RXP_CPU_MODE_STEP_ENA;
3876 cpu_reg.state = BCE_RXP_CPU_STATE;
3877 cpu_reg.state_value_clear = 0xffffff;
3878 cpu_reg.gpr0 = BCE_RXP_CPU_REG_FILE;
3879 cpu_reg.evmask = BCE_RXP_CPU_EVENT_MASK;
3880 cpu_reg.pc = BCE_RXP_CPU_PROGRAM_COUNTER;
3881 cpu_reg.inst = BCE_RXP_CPU_INSTRUCTION;
3882 cpu_reg.bp = BCE_RXP_CPU_HW_BREAKPOINT;
3883 cpu_reg.spad_base = BCE_RXP_SCRATCH;
3884 cpu_reg.mips_view_base = 0x8000000;
3885
3886 if ((BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) ||
3887 (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716)) {
3888 fw.ver_major = bce_RXP_b09FwReleaseMajor;
3889 fw.ver_minor = bce_RXP_b09FwReleaseMinor;
3890 fw.ver_fix = bce_RXP_b09FwReleaseFix;
3891 fw.start_addr = bce_RXP_b09FwStartAddr;
3892
3893 fw.text_addr = bce_RXP_b09FwTextAddr;
3894 fw.text_len = bce_RXP_b09FwTextLen;
3895 fw.text_index = 0;
3896 fw.text = bce_RXP_b09FwText;
3897
3898 fw.data_addr = bce_RXP_b09FwDataAddr;
3899 fw.data_len = bce_RXP_b09FwDataLen;
3900 fw.data_index = 0;
3901 fw.data = bce_RXP_b09FwData;
3902
3903 fw.sbss_addr = bce_RXP_b09FwSbssAddr;
3904 fw.sbss_len = bce_RXP_b09FwSbssLen;
3905 fw.sbss_index = 0;
3906 fw.sbss = bce_RXP_b09FwSbss;
3907
3908 fw.bss_addr = bce_RXP_b09FwBssAddr;
3909 fw.bss_len = bce_RXP_b09FwBssLen;
3910 fw.bss_index = 0;
3911 fw.bss = bce_RXP_b09FwBss;
3912
3913 fw.rodata_addr = bce_RXP_b09FwRodataAddr;
3914 fw.rodata_len = bce_RXP_b09FwRodataLen;
3915 fw.rodata_index = 0;
3916 fw.rodata = bce_RXP_b09FwRodata;
3917 } else {
3918 fw.ver_major = bce_RXP_b06FwReleaseMajor;
3919 fw.ver_minor = bce_RXP_b06FwReleaseMinor;
3920 fw.ver_fix = bce_RXP_b06FwReleaseFix;
3921 fw.start_addr = bce_RXP_b06FwStartAddr;
3922
3923 fw.text_addr = bce_RXP_b06FwTextAddr;
3924 fw.text_len = bce_RXP_b06FwTextLen;
3925 fw.text_index = 0;
3926 fw.text = bce_RXP_b06FwText;
3927
3928 fw.data_addr = bce_RXP_b06FwDataAddr;
3929 fw.data_len = bce_RXP_b06FwDataLen;
3930 fw.data_index = 0;
3931 fw.data = bce_RXP_b06FwData;
3932
3933 fw.sbss_addr = bce_RXP_b06FwSbssAddr;
3934 fw.sbss_len = bce_RXP_b06FwSbssLen;
3935 fw.sbss_index = 0;
3936 fw.sbss = bce_RXP_b06FwSbss;
3937
3938 fw.bss_addr = bce_RXP_b06FwBssAddr;
3939 fw.bss_len = bce_RXP_b06FwBssLen;
3940 fw.bss_index = 0;
3941 fw.bss = bce_RXP_b06FwBss;
3942
3943 fw.rodata_addr = bce_RXP_b06FwRodataAddr;
3944 fw.rodata_len = bce_RXP_b06FwRodataLen;
3945 fw.rodata_index = 0;
3946 fw.rodata = bce_RXP_b06FwRodata;
3947 }
3948
3949 DBPRINT(sc, BCE_INFO_RESET, "Loading RX firmware.\n");
3950 bce_load_cpu_fw(sc, &cpu_reg, &fw);
3951
3952 /* Delay RXP start until initialization is complete. */
3953
3954 DBEXIT(BCE_VERBOSE_RESET);
3955}
3956
3957
3958/****************************************************************************/
3959/* Initialize the TX CPU. */
3960/* */
3961/* Returns: */
3962/* Nothing. */
3963/****************************************************************************/
3964static void
3965bce_init_txp_cpu(struct bce_softc *sc)
3966{
3967 struct cpu_reg cpu_reg;
3968 struct fw_info fw;
3969
3970 DBENTER(BCE_VERBOSE_RESET);
3971
3972 cpu_reg.mode = BCE_TXP_CPU_MODE;
3973 cpu_reg.mode_value_halt = BCE_TXP_CPU_MODE_SOFT_HALT;
3974 cpu_reg.mode_value_sstep = BCE_TXP_CPU_MODE_STEP_ENA;
3975 cpu_reg.state = BCE_TXP_CPU_STATE;
3976 cpu_reg.state_value_clear = 0xffffff;
3977 cpu_reg.gpr0 = BCE_TXP_CPU_REG_FILE;
3978 cpu_reg.evmask = BCE_TXP_CPU_EVENT_MASK;
3979 cpu_reg.pc = BCE_TXP_CPU_PROGRAM_COUNTER;
3980 cpu_reg.inst = BCE_TXP_CPU_INSTRUCTION;
3981 cpu_reg.bp = BCE_TXP_CPU_HW_BREAKPOINT;
3982 cpu_reg.spad_base = BCE_TXP_SCRATCH;
3983 cpu_reg.mips_view_base = 0x8000000;
3984
3985 if ((BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) ||
3986 (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716)) {
3987 fw.ver_major = bce_TXP_b09FwReleaseMajor;
3988 fw.ver_minor = bce_TXP_b09FwReleaseMinor;
3989 fw.ver_fix = bce_TXP_b09FwReleaseFix;
3990 fw.start_addr = bce_TXP_b09FwStartAddr;
3991
3992 fw.text_addr = bce_TXP_b09FwTextAddr;
3993 fw.text_len = bce_TXP_b09FwTextLen;
3994 fw.text_index = 0;
3995 fw.text = bce_TXP_b09FwText;
3996
3997 fw.data_addr = bce_TXP_b09FwDataAddr;
3998 fw.data_len = bce_TXP_b09FwDataLen;
3999 fw.data_index = 0;
4000 fw.data = bce_TXP_b09FwData;
4001
4002 fw.sbss_addr = bce_TXP_b09FwSbssAddr;
4003 fw.sbss_len = bce_TXP_b09FwSbssLen;
4004 fw.sbss_index = 0;
4005 fw.sbss = bce_TXP_b09FwSbss;
4006
4007 fw.bss_addr = bce_TXP_b09FwBssAddr;
4008 fw.bss_len = bce_TXP_b09FwBssLen;
4009 fw.bss_index = 0;
4010 fw.bss = bce_TXP_b09FwBss;
4011
4012 fw.rodata_addr = bce_TXP_b09FwRodataAddr;
4013 fw.rodata_len = bce_TXP_b09FwRodataLen;
4014 fw.rodata_index = 0;
4015 fw.rodata = bce_TXP_b09FwRodata;
4016 } else {
4017 fw.ver_major = bce_TXP_b06FwReleaseMajor;
4018 fw.ver_minor = bce_TXP_b06FwReleaseMinor;
4019 fw.ver_fix = bce_TXP_b06FwReleaseFix;
4020 fw.start_addr = bce_TXP_b06FwStartAddr;
4021
4022 fw.text_addr = bce_TXP_b06FwTextAddr;
4023 fw.text_len = bce_TXP_b06FwTextLen;
4024 fw.text_index = 0;
4025 fw.text = bce_TXP_b06FwText;
4026
4027 fw.data_addr = bce_TXP_b06FwDataAddr;
4028 fw.data_len = bce_TXP_b06FwDataLen;
4029 fw.data_index = 0;
4030 fw.data = bce_TXP_b06FwData;
4031
4032 fw.sbss_addr = bce_TXP_b06FwSbssAddr;
4033 fw.sbss_len = bce_TXP_b06FwSbssLen;
4034 fw.sbss_index = 0;
4035 fw.sbss = bce_TXP_b06FwSbss;
4036
4037 fw.bss_addr = bce_TXP_b06FwBssAddr;
4038 fw.bss_len = bce_TXP_b06FwBssLen;
4039 fw.bss_index = 0;
4040 fw.bss = bce_TXP_b06FwBss;
4041
4042 fw.rodata_addr = bce_TXP_b06FwRodataAddr;
4043 fw.rodata_len = bce_TXP_b06FwRodataLen;
4044 fw.rodata_index = 0;
4045 fw.rodata = bce_TXP_b06FwRodata;
4046 }
4047
4048 DBPRINT(sc, BCE_INFO_RESET, "Loading TX firmware.\n");
4049 bce_load_cpu_fw(sc, &cpu_reg, &fw);
4050 bce_start_cpu(sc, &cpu_reg);
4051
4052 DBEXIT(BCE_VERBOSE_RESET);
4053}
4054
4055
4056/****************************************************************************/
4057/* Initialize the TPAT CPU. */
4058/* */
4059/* Returns: */
4060/* Nothing. */
4061/****************************************************************************/
4062static void
4063bce_init_tpat_cpu(struct bce_softc *sc)
4064{
4065 struct cpu_reg cpu_reg;
4066 struct fw_info fw;
4067
4068 DBENTER(BCE_VERBOSE_RESET);
4069
4070 cpu_reg.mode = BCE_TPAT_CPU_MODE;
4071 cpu_reg.mode_value_halt = BCE_TPAT_CPU_MODE_SOFT_HALT;
4072 cpu_reg.mode_value_sstep = BCE_TPAT_CPU_MODE_STEP_ENA;
4073 cpu_reg.state = BCE_TPAT_CPU_STATE;
4074 cpu_reg.state_value_clear = 0xffffff;
4075 cpu_reg.gpr0 = BCE_TPAT_CPU_REG_FILE;
4076 cpu_reg.evmask = BCE_TPAT_CPU_EVENT_MASK;
4077 cpu_reg.pc = BCE_TPAT_CPU_PROGRAM_COUNTER;
4078 cpu_reg.inst = BCE_TPAT_CPU_INSTRUCTION;
4079 cpu_reg.bp = BCE_TPAT_CPU_HW_BREAKPOINT;
4080 cpu_reg.spad_base = BCE_TPAT_SCRATCH;
4081 cpu_reg.mips_view_base = 0x8000000;
4082
4083 if ((BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) ||
4084 (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716)) {
4085 fw.ver_major = bce_TPAT_b09FwReleaseMajor;
4086 fw.ver_minor = bce_TPAT_b09FwReleaseMinor;
4087 fw.ver_fix = bce_TPAT_b09FwReleaseFix;
4088 fw.start_addr = bce_TPAT_b09FwStartAddr;
4089
4090 fw.text_addr = bce_TPAT_b09FwTextAddr;
4091 fw.text_len = bce_TPAT_b09FwTextLen;
4092 fw.text_index = 0;
4093 fw.text = bce_TPAT_b09FwText;
4094
4095 fw.data_addr = bce_TPAT_b09FwDataAddr;
4096 fw.data_len = bce_TPAT_b09FwDataLen;
4097 fw.data_index = 0;
4098 fw.data = bce_TPAT_b09FwData;
4099
4100 fw.sbss_addr = bce_TPAT_b09FwSbssAddr;
4101 fw.sbss_len = bce_TPAT_b09FwSbssLen;
4102 fw.sbss_index = 0;
4103 fw.sbss = bce_TPAT_b09FwSbss;
4104
4105 fw.bss_addr = bce_TPAT_b09FwBssAddr;
4106 fw.bss_len = bce_TPAT_b09FwBssLen;
4107 fw.bss_index = 0;
4108 fw.bss = bce_TPAT_b09FwBss;
4109
4110 fw.rodata_addr = bce_TPAT_b09FwRodataAddr;
4111 fw.rodata_len = bce_TPAT_b09FwRodataLen;
4112 fw.rodata_index = 0;
4113 fw.rodata = bce_TPAT_b09FwRodata;
4114 } else {
4115 fw.ver_major = bce_TPAT_b06FwReleaseMajor;
4116 fw.ver_minor = bce_TPAT_b06FwReleaseMinor;
4117 fw.ver_fix = bce_TPAT_b06FwReleaseFix;
4118 fw.start_addr = bce_TPAT_b06FwStartAddr;
4119
4120 fw.text_addr = bce_TPAT_b06FwTextAddr;
4121 fw.text_len = bce_TPAT_b06FwTextLen;
4122 fw.text_index = 0;
4123 fw.text = bce_TPAT_b06FwText;
4124
4125 fw.data_addr = bce_TPAT_b06FwDataAddr;
4126 fw.data_len = bce_TPAT_b06FwDataLen;
4127 fw.data_index = 0;
4128 fw.data = bce_TPAT_b06FwData;
4129
4130 fw.sbss_addr = bce_TPAT_b06FwSbssAddr;
4131 fw.sbss_len = bce_TPAT_b06FwSbssLen;
4132 fw.sbss_index = 0;
4133 fw.sbss = bce_TPAT_b06FwSbss;
4134
4135 fw.bss_addr = bce_TPAT_b06FwBssAddr;
4136 fw.bss_len = bce_TPAT_b06FwBssLen;
4137 fw.bss_index = 0;
4138 fw.bss = bce_TPAT_b06FwBss;
4139
4140 fw.rodata_addr = bce_TPAT_b06FwRodataAddr;
4141 fw.rodata_len = bce_TPAT_b06FwRodataLen;
4142 fw.rodata_index = 0;
4143 fw.rodata = bce_TPAT_b06FwRodata;
4144 }
4145
4146 DBPRINT(sc, BCE_INFO_RESET, "Loading TPAT firmware.\n");
4147 bce_load_cpu_fw(sc, &cpu_reg, &fw);
4148 bce_start_cpu(sc, &cpu_reg);
4149
4150 DBEXIT(BCE_VERBOSE_RESET);
4151}
4152
4153
4154/****************************************************************************/
4155/* Initialize the CP CPU. */
4156/* */
4157/* Returns: */
4158/* Nothing. */
4159/****************************************************************************/
4160static void
4161bce_init_cp_cpu(struct bce_softc *sc)
4162{
4163 struct cpu_reg cpu_reg;
4164 struct fw_info fw;
4165
4166 DBENTER(BCE_VERBOSE_RESET);
4167
4168 cpu_reg.mode = BCE_CP_CPU_MODE;
4169 cpu_reg.mode_value_halt = BCE_CP_CPU_MODE_SOFT_HALT;
4170 cpu_reg.mode_value_sstep = BCE_CP_CPU_MODE_STEP_ENA;
4171 cpu_reg.state = BCE_CP_CPU_STATE;
4172 cpu_reg.state_value_clear = 0xffffff;
4173 cpu_reg.gpr0 = BCE_CP_CPU_REG_FILE;
4174 cpu_reg.evmask = BCE_CP_CPU_EVENT_MASK;
4175 cpu_reg.pc = BCE_CP_CPU_PROGRAM_COUNTER;
4176 cpu_reg.inst = BCE_CP_CPU_INSTRUCTION;
4177 cpu_reg.bp = BCE_CP_CPU_HW_BREAKPOINT;
4178 cpu_reg.spad_base = BCE_CP_SCRATCH;
4179 cpu_reg.mips_view_base = 0x8000000;
4180
4181 if ((BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) ||
4182 (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716)) {
4183 fw.ver_major = bce_CP_b09FwReleaseMajor;
4184 fw.ver_minor = bce_CP_b09FwReleaseMinor;
4185 fw.ver_fix = bce_CP_b09FwReleaseFix;
4186 fw.start_addr = bce_CP_b09FwStartAddr;
4187
4188 fw.text_addr = bce_CP_b09FwTextAddr;
4189 fw.text_len = bce_CP_b09FwTextLen;
4190 fw.text_index = 0;
4191 fw.text = bce_CP_b09FwText;
4192
4193 fw.data_addr = bce_CP_b09FwDataAddr;
4194 fw.data_len = bce_CP_b09FwDataLen;
4195 fw.data_index = 0;
4196 fw.data = bce_CP_b09FwData;
4197
4198 fw.sbss_addr = bce_CP_b09FwSbssAddr;
4199 fw.sbss_len = bce_CP_b09FwSbssLen;
4200 fw.sbss_index = 0;
4201 fw.sbss = bce_CP_b09FwSbss;
4202
4203 fw.bss_addr = bce_CP_b09FwBssAddr;
4204 fw.bss_len = bce_CP_b09FwBssLen;
4205 fw.bss_index = 0;
4206 fw.bss = bce_CP_b09FwBss;
4207
4208 fw.rodata_addr = bce_CP_b09FwRodataAddr;
4209 fw.rodata_len = bce_CP_b09FwRodataLen;
4210 fw.rodata_index = 0;
4211 fw.rodata = bce_CP_b09FwRodata;
4212 } else {
4213 fw.ver_major = bce_CP_b06FwReleaseMajor;
4214 fw.ver_minor = bce_CP_b06FwReleaseMinor;
4215 fw.ver_fix = bce_CP_b06FwReleaseFix;
4216 fw.start_addr = bce_CP_b06FwStartAddr;
4217
4218 fw.text_addr = bce_CP_b06FwTextAddr;
4219 fw.text_len = bce_CP_b06FwTextLen;
4220 fw.text_index = 0;
4221 fw.text = bce_CP_b06FwText;
4222
4223 fw.data_addr = bce_CP_b06FwDataAddr;
4224 fw.data_len = bce_CP_b06FwDataLen;
4225 fw.data_index = 0;
4226 fw.data = bce_CP_b06FwData;
4227
4228 fw.sbss_addr = bce_CP_b06FwSbssAddr;
4229 fw.sbss_len = bce_CP_b06FwSbssLen;
4230 fw.sbss_index = 0;
4231 fw.sbss = bce_CP_b06FwSbss;
4232
4233 fw.bss_addr = bce_CP_b06FwBssAddr;
4234 fw.bss_len = bce_CP_b06FwBssLen;
4235 fw.bss_index = 0;
4236 fw.bss = bce_CP_b06FwBss;
4237
4238 fw.rodata_addr = bce_CP_b06FwRodataAddr;
4239 fw.rodata_len = bce_CP_b06FwRodataLen;
4240 fw.rodata_index = 0;
4241 fw.rodata = bce_CP_b06FwRodata;
4242 }
4243
4244 DBPRINT(sc, BCE_INFO_RESET, "Loading CP firmware.\n");
4245 bce_load_cpu_fw(sc, &cpu_reg, &fw);
4246 bce_start_cpu(sc, &cpu_reg);
4247
4248 DBEXIT(BCE_VERBOSE_RESET);
4249}
4250
4251
4252/****************************************************************************/
4253/* Initialize the COM CPU. */
4254/* */
4255/* Returns: */
4256/* Nothing. */
4257/****************************************************************************/
4258static void
4259bce_init_com_cpu(struct bce_softc *sc)
4260{
4261 struct cpu_reg cpu_reg;
4262 struct fw_info fw;
4263
4264 DBENTER(BCE_VERBOSE_RESET);
4265
4266 cpu_reg.mode = BCE_COM_CPU_MODE;
4267 cpu_reg.mode_value_halt = BCE_COM_CPU_MODE_SOFT_HALT;
4268 cpu_reg.mode_value_sstep = BCE_COM_CPU_MODE_STEP_ENA;
4269 cpu_reg.state = BCE_COM_CPU_STATE;
4270 cpu_reg.state_value_clear = 0xffffff;
4271 cpu_reg.gpr0 = BCE_COM_CPU_REG_FILE;
4272 cpu_reg.evmask = BCE_COM_CPU_EVENT_MASK;
4273 cpu_reg.pc = BCE_COM_CPU_PROGRAM_COUNTER;
4274 cpu_reg.inst = BCE_COM_CPU_INSTRUCTION;
4275 cpu_reg.bp = BCE_COM_CPU_HW_BREAKPOINT;
4276 cpu_reg.spad_base = BCE_COM_SCRATCH;
4277 cpu_reg.mips_view_base = 0x8000000;
4278
4279 if ((BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) ||
4280 (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716)) {
4281 fw.ver_major = bce_COM_b09FwReleaseMajor;
4282 fw.ver_minor = bce_COM_b09FwReleaseMinor;
4283 fw.ver_fix = bce_COM_b09FwReleaseFix;
4284 fw.start_addr = bce_COM_b09FwStartAddr;
4285
4286 fw.text_addr = bce_COM_b09FwTextAddr;
4287 fw.text_len = bce_COM_b09FwTextLen;
4288 fw.text_index = 0;
4289 fw.text = bce_COM_b09FwText;
4290
4291 fw.data_addr = bce_COM_b09FwDataAddr;
4292 fw.data_len = bce_COM_b09FwDataLen;
4293 fw.data_index = 0;
4294 fw.data = bce_COM_b09FwData;
4295
4296 fw.sbss_addr = bce_COM_b09FwSbssAddr;
4297 fw.sbss_len = bce_COM_b09FwSbssLen;
4298 fw.sbss_index = 0;
4299 fw.sbss = bce_COM_b09FwSbss;
4300
4301 fw.bss_addr = bce_COM_b09FwBssAddr;
4302 fw.bss_len = bce_COM_b09FwBssLen;
4303 fw.bss_index = 0;
4304 fw.bss = bce_COM_b09FwBss;
4305
4306 fw.rodata_addr = bce_COM_b09FwRodataAddr;
4307 fw.rodata_len = bce_COM_b09FwRodataLen;
4308 fw.rodata_index = 0;
4309 fw.rodata = bce_COM_b09FwRodata;
4310 } else {
4311 fw.ver_major = bce_COM_b06FwReleaseMajor;
4312 fw.ver_minor = bce_COM_b06FwReleaseMinor;
4313 fw.ver_fix = bce_COM_b06FwReleaseFix;
4314 fw.start_addr = bce_COM_b06FwStartAddr;
4315
4316 fw.text_addr = bce_COM_b06FwTextAddr;
4317 fw.text_len = bce_COM_b06FwTextLen;
4318 fw.text_index = 0;
4319 fw.text = bce_COM_b06FwText;
4320
4321 fw.data_addr = bce_COM_b06FwDataAddr;
4322 fw.data_len = bce_COM_b06FwDataLen;
4323 fw.data_index = 0;
4324 fw.data = bce_COM_b06FwData;
4325
4326 fw.sbss_addr = bce_COM_b06FwSbssAddr;
4327 fw.sbss_len = bce_COM_b06FwSbssLen;
4328 fw.sbss_index = 0;
4329 fw.sbss = bce_COM_b06FwSbss;
4330
4331 fw.bss_addr = bce_COM_b06FwBssAddr;
4332 fw.bss_len = bce_COM_b06FwBssLen;
4333 fw.bss_index = 0;
4334 fw.bss = bce_COM_b06FwBss;
4335
4336 fw.rodata_addr = bce_COM_b06FwRodataAddr;
4337 fw.rodata_len = bce_COM_b06FwRodataLen;
4338 fw.rodata_index = 0;
4339 fw.rodata = bce_COM_b06FwRodata;
4340 }
4341
4342 DBPRINT(sc, BCE_INFO_RESET, "Loading COM firmware.\n");
4343 bce_load_cpu_fw(sc, &cpu_reg, &fw);
4344 bce_start_cpu(sc, &cpu_reg);
4345
4346 DBEXIT(BCE_VERBOSE_RESET);
4347}
4348
4349
4350/****************************************************************************/
4351/* Initialize the RV2P, RX, TX, TPAT, COM, and CP CPUs. */
4352/* */
4353/* Loads the firmware for each CPU and starts the CPU. */
4354/* */
4355/* Returns: */
4356/* Nothing. */
4357/****************************************************************************/
4358static void
4359bce_init_cpus(struct bce_softc *sc)
4360{
4361 DBENTER(BCE_VERBOSE_RESET);
4362
4363 if ((BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) ||
4364 (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716)) {
4365
4366 if ((BCE_CHIP_REV(sc) == BCE_CHIP_REV_Ax)) {
4367 bce_load_rv2p_fw(sc, bce_xi90_rv2p_proc1,
4368 sizeof(bce_xi90_rv2p_proc1), RV2P_PROC1);
4369 bce_load_rv2p_fw(sc, bce_xi90_rv2p_proc2,
4370 sizeof(bce_xi90_rv2p_proc2), RV2P_PROC2);
4371 } else {
4372 bce_load_rv2p_fw(sc, bce_xi_rv2p_proc1,
4373 sizeof(bce_xi_rv2p_proc1), RV2P_PROC1);
4374 bce_load_rv2p_fw(sc, bce_xi_rv2p_proc2,
4375 sizeof(bce_xi_rv2p_proc2), RV2P_PROC2);
4376 }
4377
4378 } else {
4379 bce_load_rv2p_fw(sc, bce_rv2p_proc1,
4380 sizeof(bce_rv2p_proc1), RV2P_PROC1);
4381 bce_load_rv2p_fw(sc, bce_rv2p_proc2,
4382 sizeof(bce_rv2p_proc2), RV2P_PROC2);
4383 }
4384
4385 bce_init_rxp_cpu(sc);
4386 bce_init_txp_cpu(sc);
4387 bce_init_tpat_cpu(sc);
4388 bce_init_com_cpu(sc);
4389 bce_init_cp_cpu(sc);
4390
4391 DBEXIT(BCE_VERBOSE_RESET);
4392}
4393
4394
4395/****************************************************************************/
4396/* Initialize context memory. */
4397/* */
4398/* Clears the memory associated with each Context ID (CID). */
4399/* */
4400/* Returns: */
4401/* Nothing. */
4402/****************************************************************************/
4403static int
4404bce_init_ctx(struct bce_softc *sc)
4405{
4406 u32 offset, val, vcid_addr;
4407 int i, j, rc, retry_cnt;
4408
4409 rc = 0;
4410 DBENTER(BCE_VERBOSE_RESET | BCE_VERBOSE_CTX);
4411
4412 if ((BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) ||
4413 (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716)) {
4414 retry_cnt = CTX_INIT_RETRY_COUNT;
4415
4416 DBPRINT(sc, BCE_INFO_CTX, "Initializing 5709 context.\n");
4417
4418 /*
4419 * BCM5709 context memory may be cached
4420 * in host memory so prepare the host memory
4421 * for access.
4422 */
4423 val = BCE_CTX_COMMAND_ENABLED |
4424 BCE_CTX_COMMAND_MEM_INIT | (1 << 12);
4425 val |= (BCM_PAGE_BITS - 8) << 16;
4426 REG_WR(sc, BCE_CTX_COMMAND, val);
4427
4428 /* Wait for mem init command to complete. */
4429 for (i = 0; i < retry_cnt; i++) {
4430 val = REG_RD(sc, BCE_CTX_COMMAND);
4431 if (!(val & BCE_CTX_COMMAND_MEM_INIT))
4432 break;
4433 DELAY(2);
4434 }
4435 if ((val & BCE_CTX_COMMAND_MEM_INIT) != 0) {
4436 BCE_PRINTF("%s(): Context memory initialization failed!\n",
4437 __FUNCTION__);
4438 rc = EBUSY;
4439 goto init_ctx_fail;
4440 }
4441
4442 for (i = 0; i < sc->ctx_pages; i++) {
4443 /* Set the physical address of the context memory. */
4444 REG_WR(sc, BCE_CTX_HOST_PAGE_TBL_DATA0,
4445 BCE_ADDR_LO(sc->ctx_paddr[i] & 0xfffffff0) |
4446 BCE_CTX_HOST_PAGE_TBL_DATA0_VALID);
4447 REG_WR(sc, BCE_CTX_HOST_PAGE_TBL_DATA1,
4448 BCE_ADDR_HI(sc->ctx_paddr[i]));
4449 REG_WR(sc, BCE_CTX_HOST_PAGE_TBL_CTRL, i |
4450 BCE_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
4451
4452 /* Verify the context memory write was successful. */
4453 for (j = 0; j < retry_cnt; j++) {
4454 val = REG_RD(sc, BCE_CTX_HOST_PAGE_TBL_CTRL);
4455 if ((val &
4456 BCE_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) == 0)
4457 break;
4458 DELAY(5);
4459 }
4460 if ((val & BCE_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) != 0) {
4461 BCE_PRINTF("%s(): Failed to initialize "
4462 "context page %d!\n", __FUNCTION__, i);
4463 rc = EBUSY;
4464 goto init_ctx_fail;
4465 }
4466 }
4467 } else {
4468
4469 DBPRINT(sc, BCE_INFO, "Initializing 5706/5708 context.\n");
4470
4471 /*
4472 * For the 5706/5708, context memory is local to
4473 * the controller, so initialize the controller
4474 * context memory.
4475 */
4476
4477 vcid_addr = GET_CID_ADDR(96);
4478 while (vcid_addr) {
4479
4480 vcid_addr -= PHY_CTX_SIZE;
4481
4482 REG_WR(sc, BCE_CTX_VIRT_ADDR, 0);
4483 REG_WR(sc, BCE_CTX_PAGE_TBL, vcid_addr);
4484
4485 for(offset = 0; offset < PHY_CTX_SIZE; offset += 4) {
4486 CTX_WR(sc, 0x00, offset, 0);
4487 }
4488
4489 REG_WR(sc, BCE_CTX_VIRT_ADDR, vcid_addr);
4490 REG_WR(sc, BCE_CTX_PAGE_TBL, vcid_addr);
4491 }
4492
4493 }
4494init_ctx_fail:
4495 DBEXIT(BCE_VERBOSE_RESET | BCE_VERBOSE_CTX);
4496 return (rc);
4497}
4498
4499
4500/****************************************************************************/
4501/* Fetch the permanent MAC address of the controller. */
4502/* */
4503/* Returns: */
4504/* Nothing. */
4505/****************************************************************************/
4506static void
4507bce_get_mac_addr(struct bce_softc *sc)
4508{
4509 u32 mac_lo = 0, mac_hi = 0;
4510
4511 DBENTER(BCE_VERBOSE_RESET);
4512
4513 /*
4514 * The NetXtreme II bootcode populates various NIC
4515 * power-on and runtime configuration items in a
4516 * shared memory area. The factory configured MAC
4517 * address is available from both NVRAM and the
4518 * shared memory area so we'll read the value from
4519 * shared memory for speed.
4520 */
4521
4522 mac_hi = bce_shmem_rd(sc, BCE_PORT_HW_CFG_MAC_UPPER);
4523 mac_lo = bce_shmem_rd(sc, BCE_PORT_HW_CFG_MAC_LOWER);
4524
4525 if ((mac_lo == 0) && (mac_hi == 0)) {
4526 BCE_PRINTF("%s(%d): Invalid Ethernet address!\n",
4527 __FILE__, __LINE__);
4528 } else {
4529 sc->eaddr[0] = (u_char)(mac_hi >> 8);
4530 sc->eaddr[1] = (u_char)(mac_hi >> 0);
4531 sc->eaddr[2] = (u_char)(mac_lo >> 24);
4532 sc->eaddr[3] = (u_char)(mac_lo >> 16);
4533 sc->eaddr[4] = (u_char)(mac_lo >> 8);
4534 sc->eaddr[5] = (u_char)(mac_lo >> 0);
4535 }
4536
4537 DBPRINT(sc, BCE_INFO_MISC, "Permanent Ethernet "
4538 "address = %6D\n", sc->eaddr, ":");
4539 DBEXIT(BCE_VERBOSE_RESET);
4540}
4541
4542
4543/****************************************************************************/
4544/* Program the MAC address. */
4545/* */
4546/* Returns: */
4547/* Nothing. */
4548/****************************************************************************/
4549static void
4550bce_set_mac_addr(struct bce_softc *sc)
4551{
4552 u32 val;
4553 u8 *mac_addr = sc->eaddr;
4554
4555 /* ToDo: Add support for setting multiple MAC addresses. */
4556
4557 DBENTER(BCE_VERBOSE_RESET);
4558 DBPRINT(sc, BCE_INFO_MISC, "Setting Ethernet address = "
4559 "%6D\n", sc->eaddr, ":");
4560
4561 val = (mac_addr[0] << 8) | mac_addr[1];
4562
4563 REG_WR(sc, BCE_EMAC_MAC_MATCH0, val);
4564
4565 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
4566 (mac_addr[4] << 8) | mac_addr[5];
4567
4568 REG_WR(sc, BCE_EMAC_MAC_MATCH1, val);
4569
4570 DBEXIT(BCE_VERBOSE_RESET);
4571}
4572
4573
4574/****************************************************************************/
4575/* Stop the controller. */
4576/* */
4577/* Returns: */
4578/* Nothing. */
4579/****************************************************************************/
4580static void
4581bce_stop(struct bce_softc *sc)
4582{
4583 struct ifnet *ifp;
4584
4585 DBENTER(BCE_VERBOSE_RESET);
4586
4587 BCE_LOCK_ASSERT(sc);
4588
4589 ifp = sc->bce_ifp;
4590
4591 callout_stop(&sc->bce_tick_callout);
4592
4593 /* Disable the transmit/receive blocks. */
4594 REG_WR(sc, BCE_MISC_ENABLE_CLR_BITS, BCE_MISC_ENABLE_CLR_DEFAULT);
4595 REG_RD(sc, BCE_MISC_ENABLE_CLR_BITS);
4596 DELAY(20);
4597
4598 bce_disable_intr(sc);
4599
4600 /* Free RX buffers. */
4601#ifdef BCE_JUMBO_HDRSPLIT
4602 bce_free_pg_chain(sc);
4603#endif
4604 bce_free_rx_chain(sc);
4605
4606 /* Free TX buffers. */
4607 bce_free_tx_chain(sc);
4608
4609 sc->watchdog_timer = 0;
4610
4611 sc->bce_link_up = FALSE;
4612
4613 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
4614
4615 DBEXIT(BCE_VERBOSE_RESET);
4616}
4617
4618
4619static int
4620bce_reset(struct bce_softc *sc, u32 reset_code)
4621{
4622 u32 val;
4623 int i, rc = 0;
4624
4625 DBENTER(BCE_VERBOSE_RESET);
4626
4627 DBPRINT(sc, BCE_VERBOSE_RESET, "%s(): reset_code = 0x%08X\n",
4628 __FUNCTION__, reset_code);
4629
4630 /* Wait for pending PCI transactions to complete. */
4631 REG_WR(sc, BCE_MISC_ENABLE_CLR_BITS,
4632 BCE_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
4633 BCE_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
4634 BCE_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
4635 BCE_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
4636 val = REG_RD(sc, BCE_MISC_ENABLE_CLR_BITS);
4637 DELAY(5);
4638
4639 /* Disable DMA */
4640 if ((BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) ||
4641 (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716)) {
4642 val = REG_RD(sc, BCE_MISC_NEW_CORE_CTL);
4643 val &= ~BCE_MISC_NEW_CORE_CTL_DMA_ENABLE;
4644 REG_WR(sc, BCE_MISC_NEW_CORE_CTL, val);
4645 }
4646
4647 /* Assume bootcode is running. */
4648 sc->bce_fw_timed_out = FALSE;
4649 sc->bce_drv_cardiac_arrest = FALSE;
4650
4651 /* Give the firmware a chance to prepare for the reset. */
4652 rc = bce_fw_sync(sc, BCE_DRV_MSG_DATA_WAIT0 | reset_code);
4653 if (rc)
4654 goto bce_reset_exit;
4655
4656 /* Set a firmware reminder that this is a soft reset. */
4657 bce_shmem_wr(sc, BCE_DRV_RESET_SIGNATURE, BCE_DRV_RESET_SIGNATURE_MAGIC);
4658
4659 /* Dummy read to force the chip to complete all current transactions. */
4660 val = REG_RD(sc, BCE_MISC_ID);
4661
4662 /* Chip reset. */
4663 if ((BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) ||
4664 (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716)) {
4665 REG_WR(sc, BCE_MISC_COMMAND, BCE_MISC_COMMAND_SW_RESET);
4666 REG_RD(sc, BCE_MISC_COMMAND);
4667 DELAY(5);
4668
4669 val = BCE_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
4670 BCE_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
4671
4672 pci_write_config(sc->bce_dev, BCE_PCICFG_MISC_CONFIG, val, 4);
4673 } else {
4674 val = BCE_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4675 BCE_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
4676 BCE_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
4677 REG_WR(sc, BCE_PCICFG_MISC_CONFIG, val);
4678
4679 /* Allow up to 30us for reset to complete. */
4680 for (i = 0; i < 10; i++) {
4681 val = REG_RD(sc, BCE_PCICFG_MISC_CONFIG);
4682 if ((val & (BCE_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4683 BCE_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0) {
4684 break;
4685 }
4686 DELAY(10);
4687 }
4688
4689 /* Check that reset completed successfully. */
4690 if (val & (BCE_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4691 BCE_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
4692 BCE_PRINTF("%s(%d): Reset failed!\n",
4693 __FILE__, __LINE__);
4694 rc = EBUSY;
4695 goto bce_reset_exit;
4696 }
4697 }
4698
4699 /* Make sure byte swapping is properly configured. */
4700 val = REG_RD(sc, BCE_PCI_SWAP_DIAG0);
4701 if (val != 0x01020304) {
4702 BCE_PRINTF("%s(%d): Byte swap is incorrect!\n",
4703 __FILE__, __LINE__);
4704 rc = ENODEV;
4705 goto bce_reset_exit;
4706 }
4707
4708 /* Just completed a reset, assume that firmware is running again. */
4709 sc->bce_fw_timed_out = FALSE;
4710 sc->bce_drv_cardiac_arrest = FALSE;
4711
4712 /* Wait for the firmware to finish its initialization. */
4713 rc = bce_fw_sync(sc, BCE_DRV_MSG_DATA_WAIT1 | reset_code);
4714 if (rc)
4715 BCE_PRINTF("%s(%d): Firmware did not complete "
4716 "initialization!\n", __FILE__, __LINE__);
4717
4718bce_reset_exit:
4719 DBEXIT(BCE_VERBOSE_RESET);
4720 return (rc);
4721}
4722
4723
4724static int
4725bce_chipinit(struct bce_softc *sc)
4726{
4727 u32 val;
4728 int rc = 0;
4729
4730 DBENTER(BCE_VERBOSE_RESET);
4731
4732 bce_disable_intr(sc);
4733
4734 /*
4735 * Initialize DMA byte/word swapping, configure the number of DMA
4736 * channels and PCI clock compensation delay.
4737 */
4738 val = BCE_DMA_CONFIG_DATA_BYTE_SWAP |
4739 BCE_DMA_CONFIG_DATA_WORD_SWAP |
4740#if BYTE_ORDER == BIG_ENDIAN
4741 BCE_DMA_CONFIG_CNTL_BYTE_SWAP |
4742#endif
4743 BCE_DMA_CONFIG_CNTL_WORD_SWAP |
4744 DMA_READ_CHANS << 12 |
4745 DMA_WRITE_CHANS << 16;
4746
4747 val |= (0x2 << 20) | BCE_DMA_CONFIG_CNTL_PCI_COMP_DLY;
4748
4749 if ((sc->bce_flags & BCE_PCIX_FLAG) && (sc->bus_speed_mhz == 133))
4750 val |= BCE_DMA_CONFIG_PCI_FAST_CLK_CMP;
4751
4752 /*
4753 * This setting resolves a problem observed on certain Intel PCI
4754 * chipsets that cannot handle multiple outstanding DMA operations.
4755 * See errata E9_5706A1_65.
4756 */
4757 if ((BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5706) &&
4758 (BCE_CHIP_ID(sc) != BCE_CHIP_ID_5706_A0) &&
4759 !(sc->bce_flags & BCE_PCIX_FLAG))
4760 val |= BCE_DMA_CONFIG_CNTL_PING_PONG_DMA;
4761
4762 REG_WR(sc, BCE_DMA_CONFIG, val);
4763
4764 /* Enable the RX_V2P and Context state machines before access. */
4765 REG_WR(sc, BCE_MISC_ENABLE_SET_BITS,
4766 BCE_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
4767 BCE_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
4768 BCE_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
4769
4770 /* Initialize context mapping and zero out the quick contexts. */
4771 if ((rc = bce_init_ctx(sc)) != 0)
4772 goto bce_chipinit_exit;
4773
4774 /* Initialize the on-boards CPUs */
4775 bce_init_cpus(sc);
4776
4777 /* Enable management frames (NC-SI) to flow to the MCP. */
4778 if (sc->bce_flags & BCE_MFW_ENABLE_FLAG) {
4779 val = REG_RD(sc, BCE_RPM_MGMT_PKT_CTRL) | BCE_RPM_MGMT_PKT_CTRL_MGMT_EN;
4780 REG_WR(sc, BCE_RPM_MGMT_PKT_CTRL, val);
4781 }
4782
4783 /* Prepare NVRAM for access. */
4784 if ((rc = bce_init_nvram(sc)) != 0)
4785 goto bce_chipinit_exit;
4786
4787 /* Set the kernel bypass block size */
4788 val = REG_RD(sc, BCE_MQ_CONFIG);
4789 val &= ~BCE_MQ_CONFIG_KNL_BYP_BLK_SIZE;
4790 val |= BCE_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
4791
4792 /* Enable bins used on the 5709. */
4793 if ((BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) ||
4794 (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716)) {
4795 val |= BCE_MQ_CONFIG_BIN_MQ_MODE;
4796 if (BCE_CHIP_ID(sc) == BCE_CHIP_ID_5709_A1)
4797 val |= BCE_MQ_CONFIG_HALT_DIS;
4798 }
4799
4800 REG_WR(sc, BCE_MQ_CONFIG, val);
4801
4802 val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
4803 REG_WR(sc, BCE_MQ_KNL_BYP_WIND_START, val);
4804 REG_WR(sc, BCE_MQ_KNL_WIND_END, val);
4805
4806 /* Set the page size and clear the RV2P processor stall bits. */
4807 val = (BCM_PAGE_BITS - 8) << 24;
4808 REG_WR(sc, BCE_RV2P_CONFIG, val);
4809
4810 /* Configure page size. */
4811 val = REG_RD(sc, BCE_TBDR_CONFIG);
4812 val &= ~BCE_TBDR_CONFIG_PAGE_SIZE;
4813 val |= (BCM_PAGE_BITS - 8) << 24 | 0x40;
4814 REG_WR(sc, BCE_TBDR_CONFIG, val);
4815
4816 /* Set the perfect match control register to default. */
4817 REG_WR_IND(sc, BCE_RXP_PM_CTRL, 0);
4818
4819bce_chipinit_exit:
4820 DBEXIT(BCE_VERBOSE_RESET);
4821
4822 return(rc);
4823}
4824
4825
4826/****************************************************************************/
4827/* Initialize the controller in preparation to send/receive traffic. */
4828/* */
4829/* Returns: */
4830/* 0 for success, positive value for failure. */
4831/****************************************************************************/
4832static int
4833bce_blockinit(struct bce_softc *sc)
4834{
4835 u32 reg, val;
4836 int rc = 0;
4837
4838 DBENTER(BCE_VERBOSE_RESET);
4839
4840 /* Load the hardware default MAC address. */
4841 bce_set_mac_addr(sc);
4842
4843 /* Set the Ethernet backoff seed value */
4844 val = sc->eaddr[0] + (sc->eaddr[1] << 8) +
4845 (sc->eaddr[2] << 16) + (sc->eaddr[3] ) +
4846 (sc->eaddr[4] << 8) + (sc->eaddr[5] << 16);
4847 REG_WR(sc, BCE_EMAC_BACKOFF_SEED, val);
4848
4849 sc->last_status_idx = 0;
4850 sc->rx_mode = BCE_EMAC_RX_MODE_SORT_MODE;
4851
4852 /* Set up link change interrupt generation. */
4853 REG_WR(sc, BCE_EMAC_ATTENTION_ENA, BCE_EMAC_ATTENTION_ENA_LINK);
4854
4855 /* Program the physical address of the status block. */
4856 REG_WR(sc, BCE_HC_STATUS_ADDR_L,
4857 BCE_ADDR_LO(sc->status_block_paddr));
4858 REG_WR(sc, BCE_HC_STATUS_ADDR_H,
4859 BCE_ADDR_HI(sc->status_block_paddr));
4860
4861 /* Program the physical address of the statistics block. */
4862 REG_WR(sc, BCE_HC_STATISTICS_ADDR_L,
4863 BCE_ADDR_LO(sc->stats_block_paddr));
4864 REG_WR(sc, BCE_HC_STATISTICS_ADDR_H,
4865 BCE_ADDR_HI(sc->stats_block_paddr));
4866
4867 /* Program various host coalescing parameters. */
4868 REG_WR(sc, BCE_HC_TX_QUICK_CONS_TRIP,
4869 (sc->bce_tx_quick_cons_trip_int << 16) | sc->bce_tx_quick_cons_trip);
4870 REG_WR(sc, BCE_HC_RX_QUICK_CONS_TRIP,
4871 (sc->bce_rx_quick_cons_trip_int << 16) | sc->bce_rx_quick_cons_trip);
4872 REG_WR(sc, BCE_HC_COMP_PROD_TRIP,
4873 (sc->bce_comp_prod_trip_int << 16) | sc->bce_comp_prod_trip);
4874 REG_WR(sc, BCE_HC_TX_TICKS,
4875 (sc->bce_tx_ticks_int << 16) | sc->bce_tx_ticks);
4876 REG_WR(sc, BCE_HC_RX_TICKS,
4877 (sc->bce_rx_ticks_int << 16) | sc->bce_rx_ticks);
4878 REG_WR(sc, BCE_HC_COM_TICKS,
4879 (sc->bce_com_ticks_int << 16) | sc->bce_com_ticks);
4880 REG_WR(sc, BCE_HC_CMD_TICKS,
4881 (sc->bce_cmd_ticks_int << 16) | sc->bce_cmd_ticks);
4882 REG_WR(sc, BCE_HC_STATS_TICKS,
4883 (sc->bce_stats_ticks & 0xffff00));
4884 REG_WR(sc, BCE_HC_STAT_COLLECT_TICKS, 0xbb8); /* 3ms */
4885
4886 /* Configure the Host Coalescing block. */
4887 val = BCE_HC_CONFIG_RX_TMR_MODE | BCE_HC_CONFIG_TX_TMR_MODE |
4888 BCE_HC_CONFIG_COLLECT_STATS;
4889
4890#if 0
4891 /* ToDo: Add MSI-X support. */
4892 if (sc->bce_flags & BCE_USING_MSIX_FLAG) {
4893 u32 base = ((BCE_TX_VEC - 1) * BCE_HC_SB_CONFIG_SIZE) +
4894 BCE_HC_SB_CONFIG_1;
4895
4896 REG_WR(sc, BCE_HC_MSIX_BIT_VECTOR, BCE_HC_MSIX_BIT_VECTOR_VAL);
4897
4898 REG_WR(sc, base, BCE_HC_SB_CONFIG_1_TX_TMR_MODE |
4899 BCE_HC_SB_CONFIG_1_ONE_SHOT);
4900
4901 REG_WR(sc, base + BCE_HC_TX_QUICK_CONS_TRIP_OFF,
4902 (sc->tx_quick_cons_trip_int << 16) |
4903 sc->tx_quick_cons_trip);
4904
4905 REG_WR(sc, base + BCE_HC_TX_TICKS_OFF,
4906 (sc->tx_ticks_int << 16) | sc->tx_ticks);
4907
4908 val |= BCE_HC_CONFIG_SB_ADDR_INC_128B;
4909 }
4910
4911 /*
4912 * Tell the HC block to automatically set the
4913 * INT_MASK bit after an MSI/MSI-X interrupt
4914 * is generated so the driver doesn't have to.
4915 */
4916 if (sc->bce_flags & BCE_ONE_SHOT_MSI_FLAG)
4917 val |= BCE_HC_CONFIG_ONE_SHOT;
4918
4919 /* Set the MSI-X status blocks to 128 byte boundaries. */
4920 if (sc->bce_flags & BCE_USING_MSIX_FLAG)
4921 val |= BCE_HC_CONFIG_SB_ADDR_INC_128B;
4922#endif
4923
4924 REG_WR(sc, BCE_HC_CONFIG, val);
4925
4926 /* Clear the internal statistics counters. */
4927 REG_WR(sc, BCE_HC_COMMAND, BCE_HC_COMMAND_CLR_STAT_NOW);
4928
4929 /* Verify that bootcode is running. */
4930 reg = bce_shmem_rd(sc, BCE_DEV_INFO_SIGNATURE);
4931
4932 DBRUNIF(DB_RANDOMTRUE(bootcode_running_failure_sim_control),
4933 BCE_PRINTF("%s(%d): Simulating bootcode failure.\n",
4934 __FILE__, __LINE__);
4935 reg = 0);
4936
4937 if ((reg & BCE_DEV_INFO_SIGNATURE_MAGIC_MASK) !=
4938 BCE_DEV_INFO_SIGNATURE_MAGIC) {
4939 BCE_PRINTF("%s(%d): Bootcode not running! Found: 0x%08X, "
4940 "Expected: 08%08X\n", __FILE__, __LINE__,
4941 (reg & BCE_DEV_INFO_SIGNATURE_MAGIC_MASK),
4942 BCE_DEV_INFO_SIGNATURE_MAGIC);
4943 rc = ENODEV;
4944 goto bce_blockinit_exit;
4945 }
4946
4947 /* Enable DMA */
4948 if ((BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) ||
4949 (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716)) {
4950 val = REG_RD(sc, BCE_MISC_NEW_CORE_CTL);
4951 val |= BCE_MISC_NEW_CORE_CTL_DMA_ENABLE;
4952 REG_WR(sc, BCE_MISC_NEW_CORE_CTL, val);
4953 }
4954
4955 /* Allow bootcode to apply additional fixes before enabling MAC. */
4956 rc = bce_fw_sync(sc, BCE_DRV_MSG_DATA_WAIT2 |
4957 BCE_DRV_MSG_CODE_RESET);
4958
4959 /* Enable link state change interrupt generation. */
4960 REG_WR(sc, BCE_HC_ATTN_BITS_ENABLE, STATUS_ATTN_BITS_LINK_STATE);
4961
4962 /* Enable the RXP. */
4963 bce_start_rxp_cpu(sc);
4964
4965 /* Disable management frames (NC-SI) from flowing to the MCP. */
4966 if (sc->bce_flags & BCE_MFW_ENABLE_FLAG) {
4967 val = REG_RD(sc, BCE_RPM_MGMT_PKT_CTRL) &
4968 ~BCE_RPM_MGMT_PKT_CTRL_MGMT_EN;
4969 REG_WR(sc, BCE_RPM_MGMT_PKT_CTRL, val);
4970 }
4971
4972 /* Enable all remaining blocks in the MAC. */
4973 if ((BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) ||
4974 (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716))
4975 REG_WR(sc, BCE_MISC_ENABLE_SET_BITS,
4976 BCE_MISC_ENABLE_DEFAULT_XI);
4977 else
4978 REG_WR(sc, BCE_MISC_ENABLE_SET_BITS,
4979 BCE_MISC_ENABLE_DEFAULT);
4980
4981 REG_RD(sc, BCE_MISC_ENABLE_SET_BITS);
4982 DELAY(20);
4983
4984 /* Save the current host coalescing block settings. */
4985 sc->hc_command = REG_RD(sc, BCE_HC_COMMAND);
4986
4987bce_blockinit_exit:
4988 DBEXIT(BCE_VERBOSE_RESET);
4989
4990 return (rc);
4991}
4992
4993
4994/****************************************************************************/
4995/* Encapsulate an mbuf into the rx_bd chain. */
4996/* */
4997/* Returns: */
4998/* 0 for success, positive value for failure. */
4999/****************************************************************************/
5000static int
5001bce_get_rx_buf(struct bce_softc *sc, struct mbuf *m, u16 *prod,
5002 u16 *chain_prod, u32 *prod_bseq)
5003{
5004 bus_dmamap_t map;
5005 bus_dma_segment_t segs[BCE_MAX_SEGMENTS];
5006 struct mbuf *m_new = NULL;
5007 struct rx_bd *rxbd;
5008 int nsegs, error, rc = 0;
5009#ifdef BCE_DEBUG
5010 u16 debug_chain_prod = *chain_prod;
5011#endif
5012
5013 DBENTER(BCE_EXTREME_RESET | BCE_EXTREME_RECV | BCE_EXTREME_LOAD);
5014
5015 /* Make sure the inputs are valid. */
5016 DBRUNIF((*chain_prod > MAX_RX_BD),
5017 BCE_PRINTF("%s(%d): RX producer out of range: "
5018 "0x%04X > 0x%04X\n", __FILE__, __LINE__,
5019 *chain_prod, (u16) MAX_RX_BD));
5020
5021 DBPRINT(sc, BCE_EXTREME_RECV, "%s(enter): prod = 0x%04X, "
5022 "chain_prod = 0x%04X, prod_bseq = 0x%08X\n", __FUNCTION__,
5023 *prod, *chain_prod, *prod_bseq);
5024
5025 /* Update some debug statistic counters */
5026 DBRUNIF((sc->free_rx_bd < sc->rx_low_watermark),
5027 sc->rx_low_watermark = sc->free_rx_bd);
5028 DBRUNIF((sc->free_rx_bd == sc->max_rx_bd),
5029 sc->rx_empty_count++);
5030
5031 /* Check whether this is a new mbuf allocation. */
5032 if (m == NULL) {
5033
5034 /* Simulate an mbuf allocation failure. */
5035 DBRUNIF(DB_RANDOMTRUE(mbuf_alloc_failed_sim_control),
5036 sc->mbuf_alloc_failed_count++;
5037 sc->mbuf_alloc_failed_sim_count++;
5038 rc = ENOBUFS;
5039 goto bce_get_rx_buf_exit);
5040
5041 /* This is a new mbuf allocation. */
5042#ifdef BCE_JUMBO_HDRSPLIT
5043 MGETHDR(m_new, M_DONTWAIT, MT_DATA);
5044#else
5045 m_new = m_getjcl(M_DONTWAIT, MT_DATA, M_PKTHDR,
5046 sc->rx_bd_mbuf_alloc_size);
5047#endif
5048
5049 if (m_new == NULL) {
5050 sc->mbuf_alloc_failed_count++;
5051 rc = ENOBUFS;
5052 goto bce_get_rx_buf_exit;
5053 }
5054
5055 DBRUN(sc->debug_rx_mbuf_alloc++);
5056 } else {
5057 /* Reuse an existing mbuf. */
5058 m_new = m;
5059 }
5060
5061 /* Make sure we have a valid packet header. */
5062 M_ASSERTPKTHDR(m_new);
5063
5064 /* Initialize the mbuf size and pad if necessary for alignment. */
5065 m_new->m_pkthdr.len = m_new->m_len = sc->rx_bd_mbuf_alloc_size;
5066 m_adj(m_new, sc->rx_bd_mbuf_align_pad);
5067
5068 /* ToDo: Consider calling m_fragment() to test error handling. */
5069
5070 /* Map the mbuf cluster into device memory. */
5071 map = sc->rx_mbuf_map[*chain_prod];
5072 error = bus_dmamap_load_mbuf_sg(sc->rx_mbuf_tag, map, m_new,
5073 segs, &nsegs, BUS_DMA_NOWAIT);
5074
5075 /* Handle any mapping errors. */
5076 if (error) {
5077 BCE_PRINTF("%s(%d): Error mapping mbuf into RX "
5078 "chain (%d)!\n", __FILE__, __LINE__, error);
5079
5080 sc->dma_map_addr_rx_failed_count++;
5081 m_freem(m_new);
5082
5083 DBRUN(sc->debug_rx_mbuf_alloc--);
5084
5085 rc = ENOBUFS;
5086 goto bce_get_rx_buf_exit;
5087 }
5088
5089 /* All mbufs must map to a single segment. */
5090 KASSERT(nsegs == 1, ("%s(): Too many segments returned (%d)!",
5091 __FUNCTION__, nsegs));
5092
5093 /* Setup the rx_bd for the segment. */
5094 rxbd = &sc->rx_bd_chain[RX_PAGE(*chain_prod)][RX_IDX(*chain_prod)];
5095
5096 rxbd->rx_bd_haddr_lo = htole32(BCE_ADDR_LO(segs[0].ds_addr));
5097 rxbd->rx_bd_haddr_hi = htole32(BCE_ADDR_HI(segs[0].ds_addr));
5098 rxbd->rx_bd_len = htole32(segs[0].ds_len);
5099 rxbd->rx_bd_flags = htole32(RX_BD_FLAGS_START | RX_BD_FLAGS_END);
5100 *prod_bseq += segs[0].ds_len;
5101
5102 /* Save the mbuf and update our counter. */
5103 sc->rx_mbuf_ptr[*chain_prod] = m_new;
5104 sc->free_rx_bd -= nsegs;
5105
5106 DBRUNMSG(BCE_INSANE_RECV,
5107 bce_dump_rx_mbuf_chain(sc, debug_chain_prod, nsegs));
5108
5109 DBPRINT(sc, BCE_EXTREME_RECV, "%s(exit): prod = 0x%04X, "
5110 "chain_prod = 0x%04X, prod_bseq = 0x%08X\n",
5111 __FUNCTION__, *prod, *chain_prod, *prod_bseq);
5112
5113bce_get_rx_buf_exit:
5114 DBEXIT(BCE_EXTREME_RESET | BCE_EXTREME_RECV | BCE_EXTREME_LOAD);
5115
5116 return(rc);
5117}
5118
5119
5120#ifdef BCE_JUMBO_HDRSPLIT
5121/****************************************************************************/
5122/* Encapsulate an mbuf cluster into the page chain. */
5123/* */
5124/* Returns: */
5125/* 0 for success, positive value for failure. */
5126/****************************************************************************/
5127static int
5128bce_get_pg_buf(struct bce_softc *sc, struct mbuf *m, u16 *prod,
5129 u16 *prod_idx)
5130{
5131 bus_dmamap_t map;
5132 bus_addr_t busaddr;
5133 struct mbuf *m_new = NULL;
5134 struct rx_bd *pgbd;
5135 int error, rc = 0;
5136#ifdef BCE_DEBUG
5137 u16 debug_prod_idx = *prod_idx;
5138#endif
5139
5140 DBENTER(BCE_EXTREME_RESET | BCE_EXTREME_RECV | BCE_EXTREME_LOAD);
5141
5142 /* Make sure the inputs are valid. */
5143 DBRUNIF((*prod_idx > MAX_PG_BD),
5144 BCE_PRINTF("%s(%d): page producer out of range: "
5145 "0x%04X > 0x%04X\n", __FILE__, __LINE__,
5146 *prod_idx, (u16) MAX_PG_BD));
5147
5148 DBPRINT(sc, BCE_EXTREME_RECV, "%s(enter): prod = 0x%04X, "
5149 "chain_prod = 0x%04X\n", __FUNCTION__, *prod, *prod_idx);
5150
5151 /* Update counters if we've hit a new low or run out of pages. */
5152 DBRUNIF((sc->free_pg_bd < sc->pg_low_watermark),
5153 sc->pg_low_watermark = sc->free_pg_bd);
5154 DBRUNIF((sc->free_pg_bd == sc->max_pg_bd), sc->pg_empty_count++);
5155
5156 /* Check whether this is a new mbuf allocation. */
5157 if (m == NULL) {
5158
5159 /* Simulate an mbuf allocation failure. */
5160 DBRUNIF(DB_RANDOMTRUE(mbuf_alloc_failed_sim_control),
5161 sc->mbuf_alloc_failed_count++;
5162 sc->mbuf_alloc_failed_sim_count++;
5163 rc = ENOBUFS;
5164 goto bce_get_pg_buf_exit);
5165
5166 /* This is a new mbuf allocation. */
5167 m_new = m_getcl(M_DONTWAIT, MT_DATA, 0);
5168 if (m_new == NULL) {
5169 sc->mbuf_alloc_failed_count++;
5170 rc = ENOBUFS;
5171 goto bce_get_pg_buf_exit;
5172 }
5173
5174 DBRUN(sc->debug_pg_mbuf_alloc++);
5175 } else {
5176 /* Reuse an existing mbuf. */
5177 m_new = m;
5178 m_new->m_data = m_new->m_ext.ext_buf;
5179 }
5180
5181 m_new->m_len = sc->pg_bd_mbuf_alloc_size;
5182
5183 /* ToDo: Consider calling m_fragment() to test error handling. */
5184
5185 /* Map the mbuf cluster into device memory. */
5186 map = sc->pg_mbuf_map[*prod_idx];
5187 error = bus_dmamap_load(sc->pg_mbuf_tag, map, mtod(m_new, void *),
5188 sc->pg_bd_mbuf_alloc_size, bce_dma_map_addr,
5189 &busaddr, BUS_DMA_NOWAIT);
5190
5191 /* Handle any mapping errors. */
5192 if (error) {
5193 BCE_PRINTF("%s(%d): Error mapping mbuf into page chain!\n",
5194 __FILE__, __LINE__);
5195
5196 m_freem(m_new);
5197 DBRUN(sc->debug_pg_mbuf_alloc--);
5198
5199 rc = ENOBUFS;
5200 goto bce_get_pg_buf_exit;
5201 }
5202
5203 /* ToDo: Do we need bus_dmamap_sync(,,BUS_DMASYNC_PREREAD) here? */
5204
5205 /*
5206 * The page chain uses the same rx_bd data structure
5207 * as the receive chain but doesn't require a byte sequence (bseq).
5208 */
5209 pgbd = &sc->pg_bd_chain[PG_PAGE(*prod_idx)][PG_IDX(*prod_idx)];
5210
5211 pgbd->rx_bd_haddr_lo = htole32(BCE_ADDR_LO(busaddr));
5212 pgbd->rx_bd_haddr_hi = htole32(BCE_ADDR_HI(busaddr));
5213 pgbd->rx_bd_len = htole32(sc->pg_bd_mbuf_alloc_size);
5214 pgbd->rx_bd_flags = htole32(RX_BD_FLAGS_START | RX_BD_FLAGS_END);
5215
5216 /* Save the mbuf and update our counter. */
5217 sc->pg_mbuf_ptr[*prod_idx] = m_new;
5218 sc->free_pg_bd--;
5219
5220 DBRUNMSG(BCE_INSANE_RECV,
5221 bce_dump_pg_mbuf_chain(sc, debug_prod_idx, 1));
5222
5223 DBPRINT(sc, BCE_EXTREME_RECV, "%s(exit): prod = 0x%04X, "
5224 "prod_idx = 0x%04X\n", __FUNCTION__, *prod, *prod_idx);
5225
5226bce_get_pg_buf_exit:
5227 DBEXIT(BCE_EXTREME_RESET | BCE_EXTREME_RECV | BCE_EXTREME_LOAD);
5228
5229 return(rc);
5230}
5231#endif /* BCE_JUMBO_HDRSPLIT */
5232
5233
5234/****************************************************************************/
5235/* Initialize the TX context memory. */
5236/* */
5237/* Returns: */
5238/* Nothing */
5239/****************************************************************************/
5240static void
5241bce_init_tx_context(struct bce_softc *sc)
5242{
5243 u32 val;
5244
5245 DBENTER(BCE_VERBOSE_RESET | BCE_VERBOSE_SEND | BCE_VERBOSE_CTX);
5246
5247 /* Initialize the context ID for an L2 TX chain. */
5248 if ((BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) ||
5249 (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716)) {
5250 /* Set the CID type to support an L2 connection. */
5251 val = BCE_L2CTX_TX_TYPE_TYPE_L2_XI |
5252 BCE_L2CTX_TX_TYPE_SIZE_L2_XI;
5253 CTX_WR(sc, GET_CID_ADDR(TX_CID), BCE_L2CTX_TX_TYPE_XI, val);
5254 val = BCE_L2CTX_TX_CMD_TYPE_TYPE_L2_XI | (8 << 16);
5255 CTX_WR(sc, GET_CID_ADDR(TX_CID),
5256 BCE_L2CTX_TX_CMD_TYPE_XI, val);
5257
5258 /* Point the hardware to the first page in the chain. */
5259 val = BCE_ADDR_HI(sc->tx_bd_chain_paddr[0]);
5260 CTX_WR(sc, GET_CID_ADDR(TX_CID),
5261 BCE_L2CTX_TX_TBDR_BHADDR_HI_XI, val);
5262 val = BCE_ADDR_LO(sc->tx_bd_chain_paddr[0]);
5263 CTX_WR(sc, GET_CID_ADDR(TX_CID),
5264 BCE_L2CTX_TX_TBDR_BHADDR_LO_XI, val);
5265 } else {
5266 /* Set the CID type to support an L2 connection. */
5267 val = BCE_L2CTX_TX_TYPE_TYPE_L2 | BCE_L2CTX_TX_TYPE_SIZE_L2;
5268 CTX_WR(sc, GET_CID_ADDR(TX_CID), BCE_L2CTX_TX_TYPE, val);
5269 val = BCE_L2CTX_TX_CMD_TYPE_TYPE_L2 | (8 << 16);
5270 CTX_WR(sc, GET_CID_ADDR(TX_CID), BCE_L2CTX_TX_CMD_TYPE, val);
5271
5272 /* Point the hardware to the first page in the chain. */
5273 val = BCE_ADDR_HI(sc->tx_bd_chain_paddr[0]);
5274 CTX_WR(sc, GET_CID_ADDR(TX_CID),
5275 BCE_L2CTX_TX_TBDR_BHADDR_HI, val);
5276 val = BCE_ADDR_LO(sc->tx_bd_chain_paddr[0]);
5277 CTX_WR(sc, GET_CID_ADDR(TX_CID),
5278 BCE_L2CTX_TX_TBDR_BHADDR_LO, val);
5279 }
5280
5281 DBEXIT(BCE_VERBOSE_RESET | BCE_VERBOSE_SEND | BCE_VERBOSE_CTX);
5282}
5283
5284
5285/****************************************************************************/
5286/* Allocate memory and initialize the TX data structures. */
5287/* */
5288/* Returns: */
5289/* 0 for success, positive value for failure. */
5290/****************************************************************************/
5291static int
5292bce_init_tx_chain(struct bce_softc *sc)
5293{
5294 struct tx_bd *txbd;
5295 int i, rc = 0;
5296
5297 DBENTER(BCE_VERBOSE_RESET | BCE_VERBOSE_SEND | BCE_VERBOSE_LOAD);
5298
5299 /* Set the initial TX producer/consumer indices. */
5300 sc->tx_prod = 0;
5301 sc->tx_cons = 0;
5302 sc->tx_prod_bseq = 0;
5303 sc->used_tx_bd = 0;
5304 sc->max_tx_bd = USABLE_TX_BD;
5305 DBRUN(sc->tx_hi_watermark = 0);
5306 DBRUN(sc->tx_full_count = 0);
5307
5308 /*
5309 * The NetXtreme II supports a linked-list structre called
5310 * a Buffer Descriptor Chain (or BD chain). A BD chain
5311 * consists of a series of 1 or more chain pages, each of which
5312 * consists of a fixed number of BD entries.
5313 * The last BD entry on each page is a pointer to the next page
5314 * in the chain, and the last pointer in the BD chain
5315 * points back to the beginning of the chain.
5316 */
5317
5318 /* Set the TX next pointer chain entries. */
5319 for (i = 0; i < TX_PAGES; i++) {
5320 int j;
5321
5322 txbd = &sc->tx_bd_chain[i][USABLE_TX_BD_PER_PAGE];
5323
5324 /* Check if we've reached the last page. */
5325 if (i == (TX_PAGES - 1))
5326 j = 0;
5327 else
5328 j = i + 1;
5329
5330 txbd->tx_bd_haddr_hi = htole32(BCE_ADDR_HI(sc->tx_bd_chain_paddr[j]));
5331 txbd->tx_bd_haddr_lo = htole32(BCE_ADDR_LO(sc->tx_bd_chain_paddr[j]));
5332 }
5333
5334 bce_init_tx_context(sc);
5335
5336 DBRUNMSG(BCE_INSANE_SEND, bce_dump_tx_chain(sc, 0, TOTAL_TX_BD));
5337 DBEXIT(BCE_VERBOSE_RESET | BCE_VERBOSE_SEND | BCE_VERBOSE_LOAD);
5338
5339 return(rc);
5340}
5341
5342
5343/****************************************************************************/
5344/* Free memory and clear the TX data structures. */
5345/* */
5346/* Returns: */
5347/* Nothing. */
5348/****************************************************************************/
5349static void
5350bce_free_tx_chain(struct bce_softc *sc)
5351{
5352 int i;
5353
5354 DBENTER(BCE_VERBOSE_RESET | BCE_VERBOSE_SEND | BCE_VERBOSE_UNLOAD);
5355
5356 /* Unmap, unload, and free any mbufs still in the TX mbuf chain. */
5357 for (i = 0; i < TOTAL_TX_BD; i++) {
5358 if (sc->tx_mbuf_ptr[i] != NULL) {
5359 if (sc->tx_mbuf_map[i] != NULL)
5360 bus_dmamap_sync(sc->tx_mbuf_tag,
5361 sc->tx_mbuf_map[i],
5362 BUS_DMASYNC_POSTWRITE);
5363 m_freem(sc->tx_mbuf_ptr[i]);
5364 sc->tx_mbuf_ptr[i] = NULL;
5365 DBRUN(sc->debug_tx_mbuf_alloc--);
5366 }
5367 }
5368
5369 /* Clear each TX chain page. */
5370 for (i = 0; i < TX_PAGES; i++)
5371 bzero((char *)sc->tx_bd_chain[i], BCE_TX_CHAIN_PAGE_SZ);
5372
5373 sc->used_tx_bd = 0;
5374
5375 /* Check if we lost any mbufs in the process. */
5376 DBRUNIF((sc->debug_tx_mbuf_alloc),
5377 BCE_PRINTF("%s(%d): Memory leak! Lost %d mbufs "
5378 "from tx chain!\n", __FILE__, __LINE__,
5379 sc->debug_tx_mbuf_alloc));
5380
5381 DBEXIT(BCE_VERBOSE_RESET | BCE_VERBOSE_SEND | BCE_VERBOSE_UNLOAD);
5382}
5383
5384
5385/****************************************************************************/
5386/* Initialize the RX context memory. */
5387/* */
5388/* Returns: */
5389/* Nothing */
5390/****************************************************************************/
5391static void
5392bce_init_rx_context(struct bce_softc *sc)
5393{
5394 u32 val;
5395
5396 DBENTER(BCE_VERBOSE_RESET | BCE_VERBOSE_RECV | BCE_VERBOSE_CTX);
5397
5398 /* Init the type, size, and BD cache levels for the RX context. */
5399 val = BCE_L2CTX_RX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE |
5400 BCE_L2CTX_RX_CTX_TYPE_SIZE_L2 |
5401 (0x02 << BCE_L2CTX_RX_BD_PRE_READ_SHIFT);
5402
5403 /*
5404 * Set the level for generating pause frames
5405 * when the number of available rx_bd's gets
5406 * too low (the low watermark) and the level
5407 * when pause frames can be stopped (the high
5408 * watermark).
5409 */
5410 if ((BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) ||
5411 (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716)) {
5412 u32 lo_water, hi_water;
5413
5414 if (sc->bce_flags & BCE_USING_TX_FLOW_CONTROL) {
5415 lo_water = BCE_L2CTX_RX_LO_WATER_MARK_DEFAULT;
5416 } else {
5417 lo_water = 0;
5418 }
5419
5420 if (lo_water >= USABLE_RX_BD) {
5421 lo_water = 0;
5422 }
5423
5424 hi_water = USABLE_RX_BD / 4;
5425
5426 if (hi_water <= lo_water) {
5427 lo_water = 0;
5428 }
5429
5430 lo_water /= BCE_L2CTX_RX_LO_WATER_MARK_SCALE;
5431 hi_water /= BCE_L2CTX_RX_HI_WATER_MARK_SCALE;
5432
5433 if (hi_water > 0xf)
5434 hi_water = 0xf;
5435 else if (hi_water == 0)
5436 lo_water = 0;
5437
5438 val |= (lo_water << BCE_L2CTX_RX_LO_WATER_MARK_SHIFT) |
5439 (hi_water << BCE_L2CTX_RX_HI_WATER_MARK_SHIFT);
5440 }
5441
5442 CTX_WR(sc, GET_CID_ADDR(RX_CID), BCE_L2CTX_RX_CTX_TYPE, val);
5443
5444 /* Setup the MQ BIN mapping for l2_ctx_host_bseq. */
5445 if ((BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) ||
5446 (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716)) {
5447 val = REG_RD(sc, BCE_MQ_MAP_L2_5);
5448 REG_WR(sc, BCE_MQ_MAP_L2_5, val | BCE_MQ_MAP_L2_5_ARM);
5449 }
5450
5451 /* Point the hardware to the first page in the chain. */
5452 val = BCE_ADDR_HI(sc->rx_bd_chain_paddr[0]);
5453 CTX_WR(sc, GET_CID_ADDR(RX_CID), BCE_L2CTX_RX_NX_BDHADDR_HI, val);
5454 val = BCE_ADDR_LO(sc->rx_bd_chain_paddr[0]);
5455 CTX_WR(sc, GET_CID_ADDR(RX_CID), BCE_L2CTX_RX_NX_BDHADDR_LO, val);
5456
5457 DBEXIT(BCE_VERBOSE_RESET | BCE_VERBOSE_RECV | BCE_VERBOSE_CTX);
5458}
5459
5460
5461/****************************************************************************/
5462/* Allocate memory and initialize the RX data structures. */
5463/* */
5464/* Returns: */
5465/* 0 for success, positive value for failure. */
5466/****************************************************************************/
5467static int
5468bce_init_rx_chain(struct bce_softc *sc)
5469{
5470 struct rx_bd *rxbd;
5471 int i, rc = 0;
5472
5473 DBENTER(BCE_VERBOSE_RESET | BCE_VERBOSE_RECV | BCE_VERBOSE_LOAD |
5474 BCE_VERBOSE_CTX);
5475
5476 /* Initialize the RX producer and consumer indices. */
5477 sc->rx_prod = 0;
5478 sc->rx_cons = 0;
5479 sc->rx_prod_bseq = 0;
5480 sc->free_rx_bd = USABLE_RX_BD;
5481 sc->max_rx_bd = USABLE_RX_BD;
5482
5483 /* Initialize the RX next pointer chain entries. */
5484 for (i = 0; i < RX_PAGES; i++) {
5485 int j;
5486
5487 rxbd = &sc->rx_bd_chain[i][USABLE_RX_BD_PER_PAGE];
5488
5489 /* Check if we've reached the last page. */
5490 if (i == (RX_PAGES - 1))
5491 j = 0;
5492 else
5493 j = i + 1;
5494
5495 /* Setup the chain page pointers. */
5496 rxbd->rx_bd_haddr_hi =
5497 htole32(BCE_ADDR_HI(sc->rx_bd_chain_paddr[j]));
5498 rxbd->rx_bd_haddr_lo =
5499 htole32(BCE_ADDR_LO(sc->rx_bd_chain_paddr[j]));
5500 }
5501
5502 /* Fill up the RX chain. */
5503 bce_fill_rx_chain(sc);
5504
5505 DBRUN(sc->rx_low_watermark = USABLE_RX_BD);
5506 DBRUN(sc->rx_empty_count = 0);
5507 for (i = 0; i < RX_PAGES; i++) {
5508 bus_dmamap_sync(sc->rx_bd_chain_tag, sc->rx_bd_chain_map[i],
5509 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
5510 }
5511
5512 bce_init_rx_context(sc);
5513
5514 DBRUNMSG(BCE_EXTREME_RECV, bce_dump_rx_bd_chain(sc, 0, TOTAL_RX_BD));
5515 DBEXIT(BCE_VERBOSE_RESET | BCE_VERBOSE_RECV | BCE_VERBOSE_LOAD |
5516 BCE_VERBOSE_CTX);
5517
5518 /* ToDo: Are there possible failure modes here? */
5519
5520 return(rc);
5521}
5522
5523
5524/****************************************************************************/
5525/* Add mbufs to the RX chain until its full or an mbuf allocation error */
5526/* occurs. */
5527/* */
5528/* Returns: */
5529/* Nothing */
5530/****************************************************************************/
5531static void
5532bce_fill_rx_chain(struct bce_softc *sc)
5533{
5534 u16 prod, prod_idx;
5535 u32 prod_bseq;
5536
5537 DBENTER(BCE_VERBOSE_RESET | BCE_EXTREME_RECV | BCE_VERBOSE_LOAD |
5538 BCE_VERBOSE_CTX);
5539
5540 /* Get the RX chain producer indices. */
5541 prod = sc->rx_prod;
5542 prod_bseq = sc->rx_prod_bseq;
5543
5544 /* Keep filling the RX chain until it's full. */
5545 while (sc->free_rx_bd > 0) {
5546 prod_idx = RX_CHAIN_IDX(prod);
5547 if (bce_get_rx_buf(sc, NULL, &prod, &prod_idx, &prod_bseq)) {
5548 /* Bail out if we can't add an mbuf to the chain. */
5549 break;
5550 }
5551 prod = NEXT_RX_BD(prod);
5552 }
5553
5554 /* Save the RX chain producer indices. */
5555 sc->rx_prod = prod;
5556 sc->rx_prod_bseq = prod_bseq;
5557
5558 /* We should never end up pointing to a next page pointer. */
5559 DBRUNIF(((prod & USABLE_RX_BD_PER_PAGE) == USABLE_RX_BD_PER_PAGE),
5560 BCE_PRINTF("%s(): Invalid rx_prod value: 0x%04X\n",
5561 __FUNCTION__, sc->rx_prod));
5562
5563 /* Write the mailbox and tell the chip about the waiting rx_bd's. */
5564 REG_WR16(sc, MB_GET_CID_ADDR(RX_CID) +
5565 BCE_L2MQ_RX_HOST_BDIDX, sc->rx_prod);
5566 REG_WR(sc, MB_GET_CID_ADDR(RX_CID) +
5567 BCE_L2MQ_RX_HOST_BSEQ, sc->rx_prod_bseq);
5568
5569 DBEXIT(BCE_VERBOSE_RESET | BCE_EXTREME_RECV | BCE_VERBOSE_LOAD |
5570 BCE_VERBOSE_CTX);
5571}
5572
5573
5574/****************************************************************************/
5575/* Free memory and clear the RX data structures. */
5576/* */
5577/* Returns: */
5578/* Nothing. */
5579/****************************************************************************/
5580static void
5581bce_free_rx_chain(struct bce_softc *sc)
5582{
5583 int i;
5584
5585 DBENTER(BCE_VERBOSE_RESET | BCE_VERBOSE_RECV | BCE_VERBOSE_UNLOAD);
5586
5587 /* Free any mbufs still in the RX mbuf chain. */
5588 for (i = 0; i < TOTAL_RX_BD; i++) {
5589 if (sc->rx_mbuf_ptr[i] != NULL) {
5590 if (sc->rx_mbuf_map[i] != NULL)
5591 bus_dmamap_sync(sc->rx_mbuf_tag,
5592 sc->rx_mbuf_map[i],
5593 BUS_DMASYNC_POSTREAD);
5594 m_freem(sc->rx_mbuf_ptr[i]);
5595 sc->rx_mbuf_ptr[i] = NULL;
5596 DBRUN(sc->debug_rx_mbuf_alloc--);
5597 }
5598 }
5599
5600 /* Clear each RX chain page. */
5601 for (i = 0; i < RX_PAGES; i++)
5602 if (sc->rx_bd_chain[i] != NULL) {
5603 bzero((char *)sc->rx_bd_chain[i],
5604 BCE_RX_CHAIN_PAGE_SZ);
5605 }
5606
5607 sc->free_rx_bd = sc->max_rx_bd;
5608
5609 /* Check if we lost any mbufs in the process. */
5610 DBRUNIF((sc->debug_rx_mbuf_alloc),
5611 BCE_PRINTF("%s(): Memory leak! Lost %d mbufs from rx chain!\n",
5612 __FUNCTION__, sc->debug_rx_mbuf_alloc));
5613
5614 DBEXIT(BCE_VERBOSE_RESET | BCE_VERBOSE_RECV | BCE_VERBOSE_UNLOAD);
5615}
5616
5617
5618#ifdef BCE_JUMBO_HDRSPLIT
5619/****************************************************************************/
5620/* Allocate memory and initialize the page data structures. */
5621/* Assumes that bce_init_rx_chain() has not already been called. */
5622/* */
5623/* Returns: */
5624/* 0 for success, positive value for failure. */
5625/****************************************************************************/
5626static int
5627bce_init_pg_chain(struct bce_softc *sc)
5628{
5629 struct rx_bd *pgbd;
5630 int i, rc = 0;
5631 u32 val;
5632
5633 DBENTER(BCE_VERBOSE_RESET | BCE_VERBOSE_RECV | BCE_VERBOSE_LOAD |
5634 BCE_VERBOSE_CTX);
5635
5636 /* Initialize the page producer and consumer indices. */
5637 sc->pg_prod = 0;
5638 sc->pg_cons = 0;
5639 sc->free_pg_bd = USABLE_PG_BD;
5640 sc->max_pg_bd = USABLE_PG_BD;
5641 DBRUN(sc->pg_low_watermark = sc->max_pg_bd);
5642 DBRUN(sc->pg_empty_count = 0);
5643
5644 /* Initialize the page next pointer chain entries. */
5645 for (i = 0; i < PG_PAGES; i++) {
5646 int j;
5647
5648 pgbd = &sc->pg_bd_chain[i][USABLE_PG_BD_PER_PAGE];
5649
5650 /* Check if we've reached the last page. */
5651 if (i == (PG_PAGES - 1))
5652 j = 0;
5653 else
5654 j = i + 1;
5655
5656 /* Setup the chain page pointers. */
5657 pgbd->rx_bd_haddr_hi = htole32(BCE_ADDR_HI(sc->pg_bd_chain_paddr[j]));
5658 pgbd->rx_bd_haddr_lo = htole32(BCE_ADDR_LO(sc->pg_bd_chain_paddr[j]));
5659 }
5660
5661 /* Setup the MQ BIN mapping for host_pg_bidx. */
5662 if ((BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) ||
5663 (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716))
5664 REG_WR(sc, BCE_MQ_MAP_L2_3, BCE_MQ_MAP_L2_3_DEFAULT);
5665
5666 CTX_WR(sc, GET_CID_ADDR(RX_CID), BCE_L2CTX_RX_PG_BUF_SIZE, 0);
5667
5668 /* Configure the rx_bd and page chain mbuf cluster size. */
5669 val = (sc->rx_bd_mbuf_data_len << 16) | sc->pg_bd_mbuf_alloc_size;
5670 CTX_WR(sc, GET_CID_ADDR(RX_CID), BCE_L2CTX_RX_PG_BUF_SIZE, val);
5671
5672 /* Configure the context reserved for jumbo support. */
5673 CTX_WR(sc, GET_CID_ADDR(RX_CID), BCE_L2CTX_RX_RBDC_KEY,
5674 BCE_L2CTX_RX_RBDC_JUMBO_KEY);
5675
5676 /* Point the hardware to the first page in the page chain. */
5677 val = BCE_ADDR_HI(sc->pg_bd_chain_paddr[0]);
5678 CTX_WR(sc, GET_CID_ADDR(RX_CID), BCE_L2CTX_RX_NX_PG_BDHADDR_HI, val);
5679 val = BCE_ADDR_LO(sc->pg_bd_chain_paddr[0]);
5680 CTX_WR(sc, GET_CID_ADDR(RX_CID), BCE_L2CTX_RX_NX_PG_BDHADDR_LO, val);
5681
5682 /* Fill up the page chain. */
5683 bce_fill_pg_chain(sc);
5684
5685 for (i = 0; i < PG_PAGES; i++) {
5686 bus_dmamap_sync(sc->pg_bd_chain_tag, sc->pg_bd_chain_map[i],
5687 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
5688 }
5689
5690 DBRUNMSG(BCE_EXTREME_RECV, bce_dump_pg_chain(sc, 0, TOTAL_PG_BD));
5691 DBEXIT(BCE_VERBOSE_RESET | BCE_VERBOSE_RECV | BCE_VERBOSE_LOAD |
5692 BCE_VERBOSE_CTX);
5693 return(rc);
5694}
5695
5696
5697/****************************************************************************/
5698/* Add mbufs to the page chain until its full or an mbuf allocation error */
5699/* occurs. */
5700/* */
5701/* Returns: */
5702/* Nothing */
5703/****************************************************************************/
5704static void
5705bce_fill_pg_chain(struct bce_softc *sc)
5706{
5707 u16 prod, prod_idx;
5708
5709 DBENTER(BCE_VERBOSE_RESET | BCE_EXTREME_RECV | BCE_VERBOSE_LOAD |
5710 BCE_VERBOSE_CTX);
5711
5712 /* Get the page chain prodcuer index. */
5713 prod = sc->pg_prod;
5714
5715 /* Keep filling the page chain until it's full. */
5716 while (sc->free_pg_bd > 0) {
5717 prod_idx = PG_CHAIN_IDX(prod);
5718 if (bce_get_pg_buf(sc, NULL, &prod, &prod_idx)) {
5719 /* Bail out if we can't add an mbuf to the chain. */
5720 break;
5721 }
5722 prod = NEXT_PG_BD(prod);
5723 }
5724
5725 /* Save the page chain producer index. */
5726 sc->pg_prod = prod;
5727
5728 DBRUNIF(((prod & USABLE_RX_BD_PER_PAGE) == USABLE_RX_BD_PER_PAGE),
5729 BCE_PRINTF("%s(): Invalid pg_prod value: 0x%04X\n",
5730 __FUNCTION__, sc->pg_prod));
5731
5732 /*
5733 * Write the mailbox and tell the chip about
5734 * the new rx_bd's in the page chain.
5735 */
5736 REG_WR16(sc, MB_GET_CID_ADDR(RX_CID) +
5737 BCE_L2MQ_RX_HOST_PG_BDIDX, sc->pg_prod);
5738
5739 DBEXIT(BCE_VERBOSE_RESET | BCE_EXTREME_RECV | BCE_VERBOSE_LOAD |
5740 BCE_VERBOSE_CTX);
5741}
5742
5743
5744/****************************************************************************/
5745/* Free memory and clear the RX data structures. */
5746/* */
5747/* Returns: */
5748/* Nothing. */
5749/****************************************************************************/
5750static void
5751bce_free_pg_chain(struct bce_softc *sc)
5752{
5753 int i;
5754
5755 DBENTER(BCE_VERBOSE_RESET | BCE_VERBOSE_RECV | BCE_VERBOSE_UNLOAD);
5756
5757 /* Free any mbufs still in the mbuf page chain. */
5758 for (i = 0; i < TOTAL_PG_BD; i++) {
5759 if (sc->pg_mbuf_ptr[i] != NULL) {
5760 if (sc->pg_mbuf_map[i] != NULL)
5761 bus_dmamap_sync(sc->pg_mbuf_tag,
5762 sc->pg_mbuf_map[i],
5763 BUS_DMASYNC_POSTREAD);
5764 m_freem(sc->pg_mbuf_ptr[i]);
5765 sc->pg_mbuf_ptr[i] = NULL;
5766 DBRUN(sc->debug_pg_mbuf_alloc--);
5767 }
5768 }
5769
5770 /* Clear each page chain pages. */
5771 for (i = 0; i < PG_PAGES; i++)
5772 bzero((char *)sc->pg_bd_chain[i], BCE_PG_CHAIN_PAGE_SZ);
5773
5774 sc->free_pg_bd = sc->max_pg_bd;
5775
5776 /* Check if we lost any mbufs in the process. */
5777 DBRUNIF((sc->debug_pg_mbuf_alloc),
5778 BCE_PRINTF("%s(): Memory leak! Lost %d mbufs from page chain!\n",
5779 __FUNCTION__, sc->debug_pg_mbuf_alloc));
5780
5781 DBEXIT(BCE_VERBOSE_RESET | BCE_VERBOSE_RECV | BCE_VERBOSE_UNLOAD);
5782}
5783#endif /* BCE_JUMBO_HDRSPLIT */
5784
5785
5786/****************************************************************************/
5787/* Set media options. */
5788/* */
5789/* Returns: */
5790/* 0 for success, positive value for failure. */
5791/****************************************************************************/
5792static int
5793bce_ifmedia_upd(struct ifnet *ifp)
5794{
5795 struct bce_softc *sc = ifp->if_softc;
5796 int error;
5797
5798 DBENTER(BCE_VERBOSE);
5799
5800 BCE_LOCK(sc);
5801 error = bce_ifmedia_upd_locked(ifp);
5802 BCE_UNLOCK(sc);
5803
5804 DBEXIT(BCE_VERBOSE);
5805 return (error);
5806}
5807
5808
5809/****************************************************************************/
5810/* Set media options. */
5811/* */
5812/* Returns: */
5813/* Nothing. */
5814/****************************************************************************/
5815static int
5816bce_ifmedia_upd_locked(struct ifnet *ifp)
5817{
5818 struct bce_softc *sc = ifp->if_softc;
5819 struct mii_data *mii;
5820 int error;
5821
5822 DBENTER(BCE_VERBOSE_PHY);
5823
5824 error = 0;
5825 BCE_LOCK_ASSERT(sc);
5826
5827 mii = device_get_softc(sc->bce_miibus);
5828
5829 /* Make sure the MII bus has been enumerated. */
5830 if (mii) {
5831 sc->bce_link_up = FALSE;
5832 if (mii->mii_instance) {
5833 struct mii_softc *miisc;
5834
5835 LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
5836 mii_phy_reset(miisc);
5837 }
5838 error = mii_mediachg(mii);
5839 }
5840
5841 DBEXIT(BCE_VERBOSE_PHY);
5842 return (error);
5843}
5844
5845
5846/****************************************************************************/
5847/* Reports current media status. */
5848/* */
5849/* Returns: */
5850/* Nothing. */
5851/****************************************************************************/
5852static void
5853bce_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
5854{
5855 struct bce_softc *sc = ifp->if_softc;
5856 struct mii_data *mii;
5857
5858 DBENTER(BCE_VERBOSE_PHY);
5859
5860 BCE_LOCK(sc);
5861
5862 if ((ifp->if_flags & IFF_UP) == 0) {
5863 BCE_UNLOCK(sc);
5864 return;
5865 }
5866 mii = device_get_softc(sc->bce_miibus);
5867
5868 mii_pollstat(mii);
5869 ifmr->ifm_active = mii->mii_media_active;
5870 ifmr->ifm_status = mii->mii_media_status;
5871
5872 BCE_UNLOCK(sc);
5873
5874 DBEXIT(BCE_VERBOSE_PHY);
5875}
5876
5877
5878/****************************************************************************/
5879/* Handles PHY generated interrupt events. */
5880/* */
5881/* Returns: */
5882/* Nothing. */
5883/****************************************************************************/
5884static void
5885bce_phy_intr(struct bce_softc *sc)
5886{
5887 u32 new_link_state, old_link_state;
5888
5889 DBENTER(BCE_VERBOSE_PHY | BCE_VERBOSE_INTR);
5890
5891 DBRUN(sc->phy_interrupts++);
5892
5893 new_link_state = sc->status_block->status_attn_bits &
5894 STATUS_ATTN_BITS_LINK_STATE;
5895 old_link_state = sc->status_block->status_attn_bits_ack &
5896 STATUS_ATTN_BITS_LINK_STATE;
5897
5898 /* Handle any changes if the link state has changed. */
5899 if (new_link_state != old_link_state) {
5900
5901 /* Update the status_attn_bits_ack field. */
5902 if (new_link_state) {
5903 REG_WR(sc, BCE_PCICFG_STATUS_BIT_SET_CMD,
5904 STATUS_ATTN_BITS_LINK_STATE);
5905 DBPRINT(sc, BCE_INFO_PHY, "%s(): Link is now UP.\n",
5906 __FUNCTION__);
5907 }
5908 else {
5909 REG_WR(sc, BCE_PCICFG_STATUS_BIT_CLEAR_CMD,
5910 STATUS_ATTN_BITS_LINK_STATE);
5911 DBPRINT(sc, BCE_INFO_PHY, "%s(): Link is now DOWN.\n",
5912 __FUNCTION__);
5913 }
5914
5915 /*
5916 * Assume link is down and allow
5917 * tick routine to update the state
5918 * based on the actual media state.
5919 */
5920 sc->bce_link_up = FALSE;
5921 callout_stop(&sc->bce_tick_callout);
5922 bce_tick(sc);
5923 }
5924
5925 /* Acknowledge the link change interrupt. */
5926 REG_WR(sc, BCE_EMAC_STATUS, BCE_EMAC_STATUS_LINK_CHANGE);
5927
5928 DBEXIT(BCE_VERBOSE_PHY | BCE_VERBOSE_INTR);
5929}
5930
5931
5932/****************************************************************************/
5933/* Reads the receive consumer value from the status block (skipping over */
5934/* chain page pointer if necessary). */
5935/* */
5936/* Returns: */
5937/* hw_cons */
5938/****************************************************************************/
5939static inline u16
5940bce_get_hw_rx_cons(struct bce_softc *sc)
5941{
5942 u16 hw_cons;
5943
5944 rmb();
5945 hw_cons = sc->status_block->status_rx_quick_consumer_index0;
5946 if ((hw_cons & USABLE_RX_BD_PER_PAGE) == USABLE_RX_BD_PER_PAGE)
5947 hw_cons++;
5948
5949 return hw_cons;
5950}
5951
5952/****************************************************************************/
5953/* Handles received frame interrupt events. */
5954/* */
5955/* Returns: */
5956/* Nothing. */
5957/****************************************************************************/
5958static void
5959bce_rx_intr(struct bce_softc *sc)
5960{
5961 struct ifnet *ifp = sc->bce_ifp;
5962 struct l2_fhdr *l2fhdr;
5963 struct ether_vlan_header *vh;
5964 unsigned int pkt_len;
5965 u16 sw_rx_cons, sw_rx_cons_idx, hw_rx_cons;
5966 u32 status;
5967#ifdef BCE_JUMBO_HDRSPLIT
5968 unsigned int rem_len;
5969 u16 sw_pg_cons, sw_pg_cons_idx;
5970#endif
5971
5972 DBENTER(BCE_VERBOSE_RECV | BCE_VERBOSE_INTR);
5973 DBRUN(sc->interrupts_rx++);
5974 DBPRINT(sc, BCE_EXTREME_RECV, "%s(enter): rx_prod = 0x%04X, "
5975 "rx_cons = 0x%04X, rx_prod_bseq = 0x%08X\n",
5976 __FUNCTION__, sc->rx_prod, sc->rx_cons, sc->rx_prod_bseq);
5977
5978 /* Prepare the RX chain pages to be accessed by the host CPU. */
5979 for (int i = 0; i < RX_PAGES; i++)
5980 bus_dmamap_sync(sc->rx_bd_chain_tag,
5981 sc->rx_bd_chain_map[i], BUS_DMASYNC_POSTREAD);
5982
5983#ifdef BCE_JUMBO_HDRSPLIT
5984 /* Prepare the page chain pages to be accessed by the host CPU. */
5985 for (int i = 0; i < PG_PAGES; i++)
5986 bus_dmamap_sync(sc->pg_bd_chain_tag,
5987 sc->pg_bd_chain_map[i], BUS_DMASYNC_POSTREAD);
5988#endif
5989
5990 /* Get the hardware's view of the RX consumer index. */
5991 hw_rx_cons = sc->hw_rx_cons = bce_get_hw_rx_cons(sc);
5992
5993 /* Get working copies of the driver's view of the consumer indices. */
5994 sw_rx_cons = sc->rx_cons;
5995
5996#ifdef BCE_JUMBO_HDRSPLIT
5997 sw_pg_cons = sc->pg_cons;
5998#endif
5999
6000 /* Update some debug statistics counters */
6001 DBRUNIF((sc->free_rx_bd < sc->rx_low_watermark),
6002 sc->rx_low_watermark = sc->free_rx_bd);
6003 DBRUNIF((sc->free_rx_bd == sc->max_rx_bd),
6004 sc->rx_empty_count++);
6005
6006 /* Scan through the receive chain as long as there is work to do */
6007 /* ToDo: Consider setting a limit on the number of packets processed. */
6008 rmb();
6009 while (sw_rx_cons != hw_rx_cons) {
6010 struct mbuf *m0;
6011
6012 /* Convert the producer/consumer indices to an actual rx_bd index. */
6013 sw_rx_cons_idx = RX_CHAIN_IDX(sw_rx_cons);
6014
6015 /* Unmap the mbuf from DMA space. */
6016 bus_dmamap_sync(sc->rx_mbuf_tag,
6017 sc->rx_mbuf_map[sw_rx_cons_idx],
6018 BUS_DMASYNC_POSTREAD);
6019 bus_dmamap_unload(sc->rx_mbuf_tag,
6020 sc->rx_mbuf_map[sw_rx_cons_idx]);
6021
6022 /* Remove the mbuf from the RX chain. */
6023 m0 = sc->rx_mbuf_ptr[sw_rx_cons_idx];
6024 sc->rx_mbuf_ptr[sw_rx_cons_idx] = NULL;
6025 DBRUN(sc->debug_rx_mbuf_alloc--);
6026 sc->free_rx_bd++;
6027
6028 if(m0 == NULL) {
6029 DBPRINT(sc, BCE_EXTREME_RECV,
6030 "%s(): Oops! Empty mbuf pointer "
6031 "found in sc->rx_mbuf_ptr[0x%04X]!\n",
6032 __FUNCTION__, sw_rx_cons_idx);
6033 goto bce_rx_int_next_rx;
6034 }
6035
6036 /*
6037 * Frames received on the NetXteme II are prepended
6038 * with an l2_fhdr structure which provides status
6039 * information about the received frame (including
6040 * VLAN tags and checksum info). The frames are
6041 * also automatically adjusted to align the IP
6042 * header (i.e. two null bytes are inserted before
6043 * the Ethernet header). As a result the data
6044 * DMA'd by the controller into the mbuf looks
6045 * like this:
6046 *
6047 * +---------+-----+---------------------+-----+
6048 * | l2_fhdr | pad | packet data | FCS |
6049 * +---------+-----+---------------------+-----+
6050 *
6051 * The l2_fhdr needs to be checked and skipped and
6052 * the FCS needs to be stripped before sending the
6053 * packet up the stack.
6054 */
6055 l2fhdr = mtod(m0, struct l2_fhdr *);
6056
6057 /* Get the packet data + FCS length and the status. */
6058 pkt_len = l2fhdr->l2_fhdr_pkt_len;
6059 status = l2fhdr->l2_fhdr_status;
6060
6061 /*
6062 * Skip over the l2_fhdr and pad, resulting in the
6063 * following data in the mbuf:
6064 * +---------------------+-----+
6065 * | packet data | FCS |
6066 * +---------------------+-----+
6067 */
6068 m_adj(m0, sizeof(struct l2_fhdr) + ETHER_ALIGN);
6069
6070#ifdef BCE_JUMBO_HDRSPLIT
6071 /*
6072 * Check whether the received frame fits in a single
6073 * mbuf or not (i.e. packet data + FCS <=
6074 * sc->rx_bd_mbuf_data_len bytes).
6075 */
6076 if (pkt_len > m0->m_len) {
6077 /*
6078 * The received frame is larger than a single mbuf.
6079 * If the frame was a TCP frame then only the TCP
6080 * header is placed in the mbuf, the remaining
6081 * payload (including FCS) is placed in the page
6082 * chain, the SPLIT flag is set, and the header
6083 * length is placed in the IP checksum field.
6084 * If the frame is not a TCP frame then the mbuf
6085 * is filled and the remaining bytes are placed
6086 * in the page chain.
6087 */
6088
6089 DBPRINT(sc, BCE_INFO_RECV, "%s(): Found a large "
6090 "packet.\n", __FUNCTION__);
6091
6092 /*
6093 * When the page chain is enabled and the TCP
6094 * header has been split from the TCP payload,
6095 * the ip_xsum structure will reflect the length
6096 * of the TCP header, not the IP checksum. Set
6097 * the packet length of the mbuf accordingly.
6098 */
6099 if (status & L2_FHDR_STATUS_SPLIT)
6100 m0->m_len = l2fhdr->l2_fhdr_ip_xsum;
6101
6102 rem_len = pkt_len - m0->m_len;
6103
6104 /* Pull mbufs off the page chain for the remaining data. */
6105 while (rem_len > 0) {
6106 struct mbuf *m_pg;
6107
6108 sw_pg_cons_idx = PG_CHAIN_IDX(sw_pg_cons);
6109
6110 /* Remove the mbuf from the page chain. */
6111 m_pg = sc->pg_mbuf_ptr[sw_pg_cons_idx];
6112 sc->pg_mbuf_ptr[sw_pg_cons_idx] = NULL;
6113 DBRUN(sc->debug_pg_mbuf_alloc--);
6114 sc->free_pg_bd++;
6115
6116 /* Unmap the page chain mbuf from DMA space. */
6117 bus_dmamap_sync(sc->pg_mbuf_tag,
6118 sc->pg_mbuf_map[sw_pg_cons_idx],
6119 BUS_DMASYNC_POSTREAD);
6120 bus_dmamap_unload(sc->pg_mbuf_tag,
6121 sc->pg_mbuf_map[sw_pg_cons_idx]);
6122
6123 /* Adjust the mbuf length. */
6124 if (rem_len < m_pg->m_len) {
6125 /* The mbuf chain is complete. */
6126 m_pg->m_len = rem_len;
6127 rem_len = 0;
6128 } else {
6129 /* More packet data is waiting. */
6130 rem_len -= m_pg->m_len;
6131 }
6132
6133 /* Concatenate the mbuf cluster to the mbuf. */
6134 m_cat(m0, m_pg);
6135
6136 sw_pg_cons = NEXT_PG_BD(sw_pg_cons);
6137 }
6138
6139 /* Set the total packet length. */
6140 m0->m_pkthdr.len = pkt_len;
6141
6142 } else {
6143 /*
6144 * The received packet is small and fits in a
6145 * single mbuf (i.e. the l2_fhdr + pad + packet +
6146 * FCS <= MHLEN). In other words, the packet is
6147 * 154 bytes or less in size.
6148 */
6149
6150 DBPRINT(sc, BCE_INFO_RECV, "%s(): Found a small "
6151 "packet.\n", __FUNCTION__);
6152
6153 /* Set the total packet length. */
6154 m0->m_pkthdr.len = m0->m_len = pkt_len;
6155 }
6156#else
6157 /* Set the total packet length. */
6158 m0->m_pkthdr.len = m0->m_len = pkt_len;
6159#endif
6160
6161 /* Remove the trailing Ethernet FCS. */
6162 m_adj(m0, -ETHER_CRC_LEN);
6163
6164 /* Check that the resulting mbuf chain is valid. */
6165 DBRUN(m_sanity(m0, FALSE));
6166 DBRUNIF(((m0->m_len < ETHER_HDR_LEN) |
6167 (m0->m_pkthdr.len > BCE_MAX_JUMBO_ETHER_MTU_VLAN)),
6168 BCE_PRINTF("Invalid Ethernet frame size!\n");
6169 m_print(m0, 128));
6170
6171 DBRUNIF(DB_RANDOMTRUE(l2fhdr_error_sim_control),
6172 sc->l2fhdr_error_sim_count++;
6173 status = status | L2_FHDR_ERRORS_PHY_DECODE);
6174
6175 /* Check the received frame for errors. */
6176 if (status & (L2_FHDR_ERRORS_BAD_CRC |
6177 L2_FHDR_ERRORS_PHY_DECODE | L2_FHDR_ERRORS_ALIGNMENT |
6178 L2_FHDR_ERRORS_TOO_SHORT | L2_FHDR_ERRORS_GIANT_FRAME)) {
6179
6180 /* Log the error and release the mbuf. */
6181 ifp->if_ierrors++;
6182 sc->l2fhdr_error_count++;
6183
6184 m_freem(m0);
6185 m0 = NULL;
6186 goto bce_rx_int_next_rx;
6187 }
6188
6189 /* Send the packet to the appropriate interface. */
6190 m0->m_pkthdr.rcvif = ifp;
6191
6192 /* Assume no hardware checksum. */
6193 m0->m_pkthdr.csum_flags = 0;
6194
6195 /* Validate the checksum if offload enabled. */
6196 if (ifp->if_capenable & IFCAP_RXCSUM) {
6197
6198 /* Check for an IP datagram. */
6199 if (!(status & L2_FHDR_STATUS_SPLIT) &&
6200 (status & L2_FHDR_STATUS_IP_DATAGRAM)) {
6201 m0->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
6202 DBRUN(sc->csum_offload_ip++);
6203 /* Check if the IP checksum is valid. */
6204 if ((l2fhdr->l2_fhdr_ip_xsum ^ 0xffff) == 0)
6205 m0->m_pkthdr.csum_flags |=
6206 CSUM_IP_VALID;
6207 }
6208
6209 /* Check for a valid TCP/UDP frame. */
6210 if (status & (L2_FHDR_STATUS_TCP_SEGMENT |
6211 L2_FHDR_STATUS_UDP_DATAGRAM)) {
6212
6213 /* Check for a good TCP/UDP checksum. */
6214 if ((status & (L2_FHDR_ERRORS_TCP_XSUM |
6215 L2_FHDR_ERRORS_UDP_XSUM)) == 0) {
6216 DBRUN(sc->csum_offload_tcp_udp++);
6217 m0->m_pkthdr.csum_data =
6218 l2fhdr->l2_fhdr_tcp_udp_xsum;
6219 m0->m_pkthdr.csum_flags |=
6220 (CSUM_DATA_VALID
6221 | CSUM_PSEUDO_HDR);
6222 }
6223 }
6224 }
6225
6226 /* Attach the VLAN tag. */
6227 if (status & L2_FHDR_STATUS_L2_VLAN_TAG) {
6228 if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) {
6229#if __FreeBSD_version < 700000
6230 VLAN_INPUT_TAG(ifp, m0,
6231 l2fhdr->l2_fhdr_vlan_tag, continue);
6232#else
6233 m0->m_pkthdr.ether_vtag =
6234 l2fhdr->l2_fhdr_vlan_tag;
6235 m0->m_flags |= M_VLANTAG;
6236#endif
6237 } else {
6238 /*
6239 * bce(4) controllers can't disable VLAN
6240 * tag stripping if management firmware
6241 * (ASF/IPMI/UMP) is running. So we always
6242 * strip VLAN tag and manually reconstruct
6243 * the VLAN frame by appending stripped
6244 * VLAN tag in driver if VLAN tag stripping
6245 * was disabled.
6246 *
6247 * TODO: LLC SNAP handling.
6248 */
6249 bcopy(mtod(m0, uint8_t *),
6250 mtod(m0, uint8_t *) - ETHER_VLAN_ENCAP_LEN,
6251 ETHER_ADDR_LEN * 2);
6252 m0->m_data -= ETHER_VLAN_ENCAP_LEN;
6253 vh = mtod(m0, struct ether_vlan_header *);
6254 vh->evl_encap_proto = htons(ETHERTYPE_VLAN);
6255 vh->evl_tag = htons(l2fhdr->l2_fhdr_vlan_tag);
6256 m0->m_pkthdr.len += ETHER_VLAN_ENCAP_LEN;
6257 m0->m_len += ETHER_VLAN_ENCAP_LEN;
6258 }
6259 }
6260
6261 /* Increment received packet statistics. */
6262 ifp->if_ipackets++;
6263
6264bce_rx_int_next_rx:
6265 sw_rx_cons = NEXT_RX_BD(sw_rx_cons);
6266
6267 /* If we have a packet, pass it up the stack */
6268 if (m0) {
6269 /* Make sure we don't lose our place when we release the lock. */
6270 sc->rx_cons = sw_rx_cons;
6271#ifdef BCE_JUMBO_HDRSPLIT
6272 sc->pg_cons = sw_pg_cons;
6273#endif
6274
6275 BCE_UNLOCK(sc);
6276 (*ifp->if_input)(ifp, m0);
6277 BCE_LOCK(sc);
6278
6279 /* Recover our place. */
6280 sw_rx_cons = sc->rx_cons;
6281#ifdef BCE_JUMBO_HDRSPLIT
6282 sw_pg_cons = sc->pg_cons;
6283#endif
6284 }
6285
6286 /* Refresh hw_cons to see if there's new work */
6287 if (sw_rx_cons == hw_rx_cons)
6288 hw_rx_cons = sc->hw_rx_cons = bce_get_hw_rx_cons(sc);
6289 }
6290
6291#ifdef BCE_JUMBO_HDRSPLIT
6292 /* No new packets. Refill the page chain. */
6293 sc->pg_cons = sw_pg_cons;
6294 bce_fill_pg_chain(sc);
6295#endif
6296
6297 /* No new packets. Refill the RX chain. */
6298 sc->rx_cons = sw_rx_cons;
6299 bce_fill_rx_chain(sc);
6300
6301 /* Prepare the page chain pages to be accessed by the NIC. */
6302 for (int i = 0; i < RX_PAGES; i++)
6303 bus_dmamap_sync(sc->rx_bd_chain_tag,
6304 sc->rx_bd_chain_map[i], BUS_DMASYNC_PREWRITE);
6305
6306#ifdef BCE_JUMBO_HDRSPLIT
6307 for (int i = 0; i < PG_PAGES; i++)
6308 bus_dmamap_sync(sc->pg_bd_chain_tag,
6309 sc->pg_bd_chain_map[i], BUS_DMASYNC_PREWRITE);
6310#endif
6311
6312 DBPRINT(sc, BCE_EXTREME_RECV, "%s(exit): rx_prod = 0x%04X, "
6313 "rx_cons = 0x%04X, rx_prod_bseq = 0x%08X\n",
6314 __FUNCTION__, sc->rx_prod, sc->rx_cons, sc->rx_prod_bseq);
6315 DBEXIT(BCE_VERBOSE_RECV | BCE_VERBOSE_INTR);
6316}
6317
6318
6319/****************************************************************************/
6320/* Reads the transmit consumer value from the status block (skipping over */
6321/* chain page pointer if necessary). */
6322/* */
6323/* Returns: */
6324/* hw_cons */
6325/****************************************************************************/
6326static inline u16
6327bce_get_hw_tx_cons(struct bce_softc *sc)
6328{
6329 u16 hw_cons;
6330
6331 mb();
6332 hw_cons = sc->status_block->status_tx_quick_consumer_index0;
6333 if ((hw_cons & USABLE_TX_BD_PER_PAGE) == USABLE_TX_BD_PER_PAGE)
6334 hw_cons++;
6335
6336 return hw_cons;
6337}
6338
6339
6340/****************************************************************************/
6341/* Handles transmit completion interrupt events. */
6342/* */
6343/* Returns: */
6344/* Nothing. */
6345/****************************************************************************/
6346static void
6347bce_tx_intr(struct bce_softc *sc)
6348{
6349 struct ifnet *ifp = sc->bce_ifp;
6350 u16 hw_tx_cons, sw_tx_cons, sw_tx_chain_cons;
6351
6352 DBENTER(BCE_VERBOSE_SEND | BCE_VERBOSE_INTR);
6353 DBRUN(sc->interrupts_tx++);
6354 DBPRINT(sc, BCE_EXTREME_SEND, "%s(enter): tx_prod = 0x%04X, "
6355 "tx_cons = 0x%04X, tx_prod_bseq = 0x%08X\n",
6356 __FUNCTION__, sc->tx_prod, sc->tx_cons, sc->tx_prod_bseq);
6357
6358 BCE_LOCK_ASSERT(sc);
6359
6360 /* Get the hardware's view of the TX consumer index. */
6361 hw_tx_cons = sc->hw_tx_cons = bce_get_hw_tx_cons(sc);
6362 sw_tx_cons = sc->tx_cons;
6363
6364 /* Prevent speculative reads of the status block. */
6365 bus_space_barrier(sc->bce_btag, sc->bce_bhandle, 0, 0,
6366 BUS_SPACE_BARRIER_READ);
6367
6368 /* Cycle through any completed TX chain page entries. */
6369 while (sw_tx_cons != hw_tx_cons) {
6370#ifdef BCE_DEBUG
6371 struct tx_bd *txbd = NULL;
6372#endif
6373 sw_tx_chain_cons = TX_CHAIN_IDX(sw_tx_cons);
6374
6375 DBPRINT(sc, BCE_INFO_SEND,
6376 "%s(): hw_tx_cons = 0x%04X, sw_tx_cons = 0x%04X, "
6377 "sw_tx_chain_cons = 0x%04X\n",
6378 __FUNCTION__, hw_tx_cons, sw_tx_cons, sw_tx_chain_cons);
6379
6380 DBRUNIF((sw_tx_chain_cons > MAX_TX_BD),
6381 BCE_PRINTF("%s(%d): TX chain consumer out of range! "
6382 " 0x%04X > 0x%04X\n", __FILE__, __LINE__, sw_tx_chain_cons,
6383 (int) MAX_TX_BD);
6384 bce_breakpoint(sc));
6385
6386 DBRUN(txbd = &sc->tx_bd_chain[TX_PAGE(sw_tx_chain_cons)]
6387 [TX_IDX(sw_tx_chain_cons)]);
6388
6389 DBRUNIF((txbd == NULL),
6390 BCE_PRINTF("%s(%d): Unexpected NULL tx_bd[0x%04X]!\n",
6391 __FILE__, __LINE__, sw_tx_chain_cons);
6392 bce_breakpoint(sc));
6393
6394 DBRUNMSG(BCE_INFO_SEND, BCE_PRINTF("%s(): ", __FUNCTION__);
6395 bce_dump_txbd(sc, sw_tx_chain_cons, txbd));
6396
6397 /*
6398 * Free the associated mbuf. Remember
6399 * that only the last tx_bd of a packet
6400 * has an mbuf pointer and DMA map.
6401 */
6402 if (sc->tx_mbuf_ptr[sw_tx_chain_cons] != NULL) {
6403
6404 /* Validate that this is the last tx_bd. */
6405 DBRUNIF((!(txbd->tx_bd_flags & TX_BD_FLAGS_END)),
6406 BCE_PRINTF("%s(%d): tx_bd END flag not set but "
6407 "txmbuf == NULL!\n", __FILE__, __LINE__);
6408 bce_breakpoint(sc));
6409
6410 DBRUNMSG(BCE_INFO_SEND,
6411 BCE_PRINTF("%s(): Unloading map/freeing mbuf "
6412 "from tx_bd[0x%04X]\n", __FUNCTION__,
6413 sw_tx_chain_cons));
6414
6415 /* Unmap the mbuf. */
6416 bus_dmamap_unload(sc->tx_mbuf_tag,
6417 sc->tx_mbuf_map[sw_tx_chain_cons]);
6418
6419 /* Free the mbuf. */
6420 m_freem(sc->tx_mbuf_ptr[sw_tx_chain_cons]);
6421 sc->tx_mbuf_ptr[sw_tx_chain_cons] = NULL;
6422 DBRUN(sc->debug_tx_mbuf_alloc--);
6423
6424 ifp->if_opackets++;
6425 }
6426
6427 sc->used_tx_bd--;
6428 sw_tx_cons = NEXT_TX_BD(sw_tx_cons);
6429
6430 /* Refresh hw_cons to see if there's new work. */
6431 hw_tx_cons = sc->hw_tx_cons = bce_get_hw_tx_cons(sc);
6432
6433 /* Prevent speculative reads of the status block. */
6434 bus_space_barrier(sc->bce_btag, sc->bce_bhandle, 0, 0,
6435 BUS_SPACE_BARRIER_READ);
6436 }
6437
6438 /* Clear the TX timeout timer. */
6439 sc->watchdog_timer = 0;
6440
6441 /* Clear the tx hardware queue full flag. */
6442 if (sc->used_tx_bd < sc->max_tx_bd) {
6443 DBRUNIF((ifp->if_drv_flags & IFF_DRV_OACTIVE),
6444 DBPRINT(sc, BCE_INFO_SEND,
6445 "%s(): Open TX chain! %d/%d (used/total)\n",
6446 __FUNCTION__, sc->used_tx_bd, sc->max_tx_bd));
6447 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
6448 }
6449
6450 sc->tx_cons = sw_tx_cons;
6451
6452 DBPRINT(sc, BCE_EXTREME_SEND, "%s(exit): tx_prod = 0x%04X, "
6453 "tx_cons = 0x%04X, tx_prod_bseq = 0x%08X\n",
6454 __FUNCTION__, sc->tx_prod, sc->tx_cons, sc->tx_prod_bseq);
6455 DBEXIT(BCE_VERBOSE_SEND | BCE_VERBOSE_INTR);
6456}
6457
6458
6459/****************************************************************************/
6460/* Disables interrupt generation. */
6461/* */
6462/* Returns: */
6463/* Nothing. */
6464/****************************************************************************/
6465static void
6466bce_disable_intr(struct bce_softc *sc)
6467{
6468 DBENTER(BCE_VERBOSE_INTR);
6469
6470 REG_WR(sc, BCE_PCICFG_INT_ACK_CMD, BCE_PCICFG_INT_ACK_CMD_MASK_INT);
6471 REG_RD(sc, BCE_PCICFG_INT_ACK_CMD);
6472
6473 DBEXIT(BCE_VERBOSE_INTR);
6474}
6475
6476
6477/****************************************************************************/
6478/* Enables interrupt generation. */
6479/* */
6480/* Returns: */
6481/* Nothing. */
6482/****************************************************************************/
6483static void
6484bce_enable_intr(struct bce_softc *sc, int coal_now)
6485{
6486 DBENTER(BCE_VERBOSE_INTR);
6487
6488 REG_WR(sc, BCE_PCICFG_INT_ACK_CMD,
6489 BCE_PCICFG_INT_ACK_CMD_INDEX_VALID |
6490 BCE_PCICFG_INT_ACK_CMD_MASK_INT | sc->last_status_idx);
6491
6492 REG_WR(sc, BCE_PCICFG_INT_ACK_CMD,
6493 BCE_PCICFG_INT_ACK_CMD_INDEX_VALID | sc->last_status_idx);
6494
6495 /* Force an immediate interrupt (whether there is new data or not). */
6496 if (coal_now)
6497 REG_WR(sc, BCE_HC_COMMAND, sc->hc_command | BCE_HC_COMMAND_COAL_NOW);
6498
6499 DBEXIT(BCE_VERBOSE_INTR);
6500}
6501
6502
6503/****************************************************************************/
6504/* Handles controller initialization. */
6505/* */
6506/* Returns: */
6507/* Nothing. */
6508/****************************************************************************/
6509static void
6510bce_init_locked(struct bce_softc *sc)
6511{
6512 struct ifnet *ifp;
6513 u32 ether_mtu = 0;
6514
6515 DBENTER(BCE_VERBOSE_RESET);
6516
6517 BCE_LOCK_ASSERT(sc);
6518
6519 ifp = sc->bce_ifp;
6520
6521 /* Check if the driver is still running and bail out if it is. */
6522 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
6523 goto bce_init_locked_exit;
6524
6525 bce_stop(sc);
6526
6527 if (bce_reset(sc, BCE_DRV_MSG_CODE_RESET)) {
6528 BCE_PRINTF("%s(%d): Controller reset failed!\n",
6529 __FILE__, __LINE__);
6530 goto bce_init_locked_exit;
6531 }
6532
6533 if (bce_chipinit(sc)) {
6534 BCE_PRINTF("%s(%d): Controller initialization failed!\n",
6535 __FILE__, __LINE__);
6536 goto bce_init_locked_exit;
6537 }
6538
6539 if (bce_blockinit(sc)) {
6540 BCE_PRINTF("%s(%d): Block initialization failed!\n",
6541 __FILE__, __LINE__);
6542 goto bce_init_locked_exit;
6543 }
6544
6545 /* Load our MAC address. */
6546 bcopy(IF_LLADDR(sc->bce_ifp), sc->eaddr, ETHER_ADDR_LEN);
6547 bce_set_mac_addr(sc);
6548
6549 /*
6550 * Calculate and program the hardware Ethernet MTU
6551 * size. Be generous on the receive if we have room.
6552 */
6553#ifdef BCE_JUMBO_HDRSPLIT
6554 if (ifp->if_mtu <= (sc->rx_bd_mbuf_data_len +
6555 sc->pg_bd_mbuf_alloc_size))
6556 ether_mtu = sc->rx_bd_mbuf_data_len +
6557 sc->pg_bd_mbuf_alloc_size;
6558#else
6559 if (ifp->if_mtu <= sc->rx_bd_mbuf_data_len)
6560 ether_mtu = sc->rx_bd_mbuf_data_len;
6561#endif
6562 else
6563 ether_mtu = ifp->if_mtu;
6564
6565 ether_mtu += ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN + ETHER_CRC_LEN;
6566
6567 DBPRINT(sc, BCE_INFO_MISC, "%s(): setting h/w mtu = %d\n",
6568 __FUNCTION__, ether_mtu);
6569
6570 /* Program the mtu, enabling jumbo frame support if necessary. */
6571 if (ether_mtu > (ETHER_MAX_LEN + ETHER_VLAN_ENCAP_LEN))
6572 REG_WR(sc, BCE_EMAC_RX_MTU_SIZE,
6573 min(ether_mtu, BCE_MAX_JUMBO_ETHER_MTU) |
6574 BCE_EMAC_RX_MTU_SIZE_JUMBO_ENA);
6575 else
6576 REG_WR(sc, BCE_EMAC_RX_MTU_SIZE, ether_mtu);
6577
6578 DBPRINT(sc, BCE_INFO_LOAD,
6579 "%s(): rx_bd_mbuf_alloc_size = %d, rx_bce_mbuf_data_len = %d, "
6580 "rx_bd_mbuf_align_pad = %d\n", __FUNCTION__,
6581 sc->rx_bd_mbuf_alloc_size, sc->rx_bd_mbuf_data_len,
6582 sc->rx_bd_mbuf_align_pad);
6583
6584 /* Program appropriate promiscuous/multicast filtering. */
6585 bce_set_rx_mode(sc);
6586
6587#ifdef BCE_JUMBO_HDRSPLIT
6588 DBPRINT(sc, BCE_INFO_LOAD, "%s(): pg_bd_mbuf_alloc_size = %d\n",
6589 __FUNCTION__, sc->pg_bd_mbuf_alloc_size);
6590
6591 /* Init page buffer descriptor chain. */
6592 bce_init_pg_chain(sc);
6593#endif
6594
6595 /* Init RX buffer descriptor chain. */
6596 bce_init_rx_chain(sc);
6597
6598 /* Init TX buffer descriptor chain. */
6599 bce_init_tx_chain(sc);
6600
6601 /* Enable host interrupts. */
6602 bce_enable_intr(sc, 1);
6603
6604 bce_ifmedia_upd_locked(ifp);
6605
6606 /* Let the OS know the driver is up and running. */
6607 ifp->if_drv_flags |= IFF_DRV_RUNNING;
6608 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
6609
6610 callout_reset(&sc->bce_tick_callout, hz, bce_tick, sc);
6611
6612bce_init_locked_exit:
6613 DBEXIT(BCE_VERBOSE_RESET);
6614}
6615
6616
6617/****************************************************************************/
6618/* Initialize the controller just enough so that any management firmware */
6619/* running on the device will continue to operate correctly. */
6620/* */
6621/* Returns: */
6622/* Nothing. */
6623/****************************************************************************/
6624static void
6625bce_mgmt_init_locked(struct bce_softc *sc)
6626{
6627 struct ifnet *ifp;
6628
6629 DBENTER(BCE_VERBOSE_RESET);
6630
6631 BCE_LOCK_ASSERT(sc);
6632
6633 /* Bail out if management firmware is not running. */
6634 if (!(sc->bce_flags & BCE_MFW_ENABLE_FLAG)) {
6635 DBPRINT(sc, BCE_VERBOSE_SPECIAL,
6636 "No management firmware running...\n");
6637 goto bce_mgmt_init_locked_exit;
6638 }
6639
6640 ifp = sc->bce_ifp;
6641
6642 /* Enable all critical blocks in the MAC. */
6643 REG_WR(sc, BCE_MISC_ENABLE_SET_BITS, BCE_MISC_ENABLE_DEFAULT);
6644 REG_RD(sc, BCE_MISC_ENABLE_SET_BITS);
6645 DELAY(20);
6646
6647 bce_ifmedia_upd_locked(ifp);
6648
6649bce_mgmt_init_locked_exit:
6650 DBEXIT(BCE_VERBOSE_RESET);
6651}
6652
6653
6654/****************************************************************************/
6655/* Handles controller initialization when called from an unlocked routine. */
6656/* */
6657/* Returns: */
6658/* Nothing. */
6659/****************************************************************************/
6660static void
6661bce_init(void *xsc)
6662{
6663 struct bce_softc *sc = xsc;
6664
6665 DBENTER(BCE_VERBOSE_RESET);
6666
6667 BCE_LOCK(sc);
6668 bce_init_locked(sc);
6669 BCE_UNLOCK(sc);
6670
6671 DBEXIT(BCE_VERBOSE_RESET);
6672}
6673
6674
6675/****************************************************************************/
6676/* Modifies an mbuf for TSO on the hardware. */
6677/* */
6678/* Returns: */
6679/* Pointer to a modified mbuf. */
6680/****************************************************************************/
6681static struct mbuf *
6682bce_tso_setup(struct bce_softc *sc, struct mbuf **m_head, u16 *flags)
6683{
6684 struct mbuf *m;
6685 struct ether_header *eh;
6686 struct ip *ip;
6687 struct tcphdr *th;
6688 u16 etype;
6689 int hdr_len, ip_hlen = 0, tcp_hlen = 0, ip_len = 0;
6690
6691 DBRUN(sc->tso_frames_requested++);
6692
6693 /* Controller may modify mbuf chains. */
6694 if (M_WRITABLE(*m_head) == 0) {
6695 m = m_dup(*m_head, M_DONTWAIT);
6696 m_freem(*m_head);
6697 if (m == NULL) {
6698 sc->mbuf_alloc_failed_count++;
6699 *m_head = NULL;
6700 return (NULL);
6701 }
6702 *m_head = m;
6703 }
6704
6705 /*
6706 * For TSO the controller needs two pieces of info,
6707 * the MSS and the IP+TCP options length.
6708 */
6709 m = m_pullup(*m_head, sizeof(struct ether_header) + sizeof(struct ip));
6710 if (m == NULL) {
6711 *m_head = NULL;
6712 return (NULL);
6713 }
6714 eh = mtod(m, struct ether_header *);
6715 etype = ntohs(eh->ether_type);
6716
6717 /* Check for supported TSO Ethernet types (only IPv4 for now) */
6718 switch (etype) {
6719 case ETHERTYPE_IP:
6720 ip = (struct ip *)(m->m_data + sizeof(struct ether_header));
6721 /* TSO only supported for TCP protocol. */
6722 if (ip->ip_p != IPPROTO_TCP) {
6723 BCE_PRINTF("%s(%d): TSO enabled for non-TCP frame!.\n",
6724 __FILE__, __LINE__);
6725 m_freem(*m_head);
6726 *m_head = NULL;
6727 return (NULL);
6728 }
6729
6730 /* Get IP header length in bytes (min 20) */
6731 ip_hlen = ip->ip_hl << 2;
6732 m = m_pullup(*m_head, sizeof(struct ether_header) + ip_hlen +
6733 sizeof(struct tcphdr));
6734 if (m == NULL) {
6735 *m_head = NULL;
6736 return (NULL);
6737 }
6738
6739 /* Get the TCP header length in bytes (min 20) */
6740 ip = (struct ip *)(m->m_data + sizeof(struct ether_header));
6741 th = (struct tcphdr *)((caddr_t)ip + ip_hlen);
6742 tcp_hlen = (th->th_off << 2);
6743
6744 /* Make sure all IP/TCP options live in the same buffer. */
6745 m = m_pullup(*m_head, sizeof(struct ether_header)+ ip_hlen +
6746 tcp_hlen);
6747 if (m == NULL) {
6748 *m_head = NULL;
6749 return (NULL);
6750 }
6751
6752 /* IP header length and checksum will be calc'd by hardware */
6753 ip = (struct ip *)(m->m_data + sizeof(struct ether_header));
6754 ip_len = ip->ip_len;
6755 ip->ip_len = 0;
6756 ip->ip_sum = 0;
6757 break;
6758 case ETHERTYPE_IPV6:
6759 BCE_PRINTF("%s(%d): TSO over IPv6 not supported!.\n",
6760 __FILE__, __LINE__);
6761 m_freem(*m_head);
6762 *m_head = NULL;
6763 return (NULL);
6764 /* NOT REACHED */
6765 default:
6766 BCE_PRINTF("%s(%d): TSO enabled for unsupported protocol!.\n",
6767 __FILE__, __LINE__);
6768 m_freem(*m_head);
6769 *m_head = NULL;
6770 return (NULL);
6771 }
6772
6773 hdr_len = sizeof(struct ether_header) + ip_hlen + tcp_hlen;
6774
6775 DBPRINT(sc, BCE_EXTREME_SEND, "%s(): hdr_len = %d, e_hlen = %d, "
6776 "ip_hlen = %d, tcp_hlen = %d, ip_len = %d\n",
6777 __FUNCTION__, hdr_len, (int) sizeof(struct ether_header), ip_hlen,
6778 tcp_hlen, ip_len);
6779
6780 /* Set the LSO flag in the TX BD */
6781 *flags |= TX_BD_FLAGS_SW_LSO;
6782
6783 /* Set the length of IP + TCP options (in 32 bit words) */
6784 *flags |= (((ip_hlen + tcp_hlen - sizeof(struct ip) -
6785 sizeof(struct tcphdr)) >> 2) << 8);
6786
6787 DBRUN(sc->tso_frames_completed++);
6788 return (*m_head);
6789}
6790
6791
6792/****************************************************************************/
6793/* Encapsultes an mbuf cluster into the tx_bd chain structure and makes the */
6794/* memory visible to the controller. */
6795/* */
6796/* Returns: */
6797/* 0 for success, positive value for failure. */
6798/* Modified: */
6799/* m_head: May be set to NULL if MBUF is excessively fragmented. */
6800/****************************************************************************/
6801static int
6802bce_tx_encap(struct bce_softc *sc, struct mbuf **m_head)
6803{
6804 bus_dma_segment_t segs[BCE_MAX_SEGMENTS];
6805 bus_dmamap_t map;
6806 struct tx_bd *txbd = NULL;
6807 struct mbuf *m0;
6808 u16 prod, chain_prod, mss = 0, vlan_tag = 0, flags = 0;
6809 u32 prod_bseq;
6810
6811#ifdef BCE_DEBUG
6812 u16 debug_prod;
6813#endif
6814
6815 int i, error, nsegs, rc = 0;
6816
6817 DBENTER(BCE_VERBOSE_SEND);
6818
6819 /* Make sure we have room in the TX chain. */
6820 if (sc->used_tx_bd >= sc->max_tx_bd)
6821 goto bce_tx_encap_exit;
6822
6823 /* Transfer any checksum offload flags to the bd. */
6824 m0 = *m_head;
6825 if (m0->m_pkthdr.csum_flags) {
6826 if (m0->m_pkthdr.csum_flags & CSUM_TSO) {
6827 m0 = bce_tso_setup(sc, m_head, &flags);
6828 if (m0 == NULL) {
6829 DBRUN(sc->tso_frames_failed++);
6830 goto bce_tx_encap_exit;
6831 }
6832 mss = htole16(m0->m_pkthdr.tso_segsz);
6833 } else {
6834 if (m0->m_pkthdr.csum_flags & CSUM_IP)
6835 flags |= TX_BD_FLAGS_IP_CKSUM;
6836 if (m0->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP))
6837 flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
6838 }
6839 }
6840
6841 /* Transfer any VLAN tags to the bd. */
6842 if (m0->m_flags & M_VLANTAG) {
6843 flags |= TX_BD_FLAGS_VLAN_TAG;
6844 vlan_tag = m0->m_pkthdr.ether_vtag;
6845 }
6846
6847 /* Map the mbuf into DMAable memory. */
6848 prod = sc->tx_prod;
6849 chain_prod = TX_CHAIN_IDX(prod);
6850 map = sc->tx_mbuf_map[chain_prod];
6851
6852 /* Map the mbuf into our DMA address space. */
6853 error = bus_dmamap_load_mbuf_sg(sc->tx_mbuf_tag, map, m0,
6854 segs, &nsegs, BUS_DMA_NOWAIT);
6855
6856 /* Check if the DMA mapping was successful */
6857 if (error == EFBIG) {
6858 sc->mbuf_frag_count++;
6859
6860 /* Try to defrag the mbuf. */
6861 m0 = m_collapse(*m_head, M_DONTWAIT, BCE_MAX_SEGMENTS);
6862 if (m0 == NULL) {
6863 /* Defrag was unsuccessful */
6864 m_freem(*m_head);
6865 *m_head = NULL;
6866 sc->mbuf_alloc_failed_count++;
6867 rc = ENOBUFS;
6868 goto bce_tx_encap_exit;
6869 }
6870
6871 /* Defrag was successful, try mapping again */
6872 *m_head = m0;
6873 error = bus_dmamap_load_mbuf_sg(sc->tx_mbuf_tag,
6874 map, m0, segs, &nsegs, BUS_DMA_NOWAIT);
6875
6876 /* Still getting an error after a defrag. */
6877 if (error == ENOMEM) {
6878 /* Insufficient DMA buffers available. */
6879 sc->dma_map_addr_tx_failed_count++;
6880 rc = error;
6881 goto bce_tx_encap_exit;
6882 } else if (error != 0) {
6883 /* Release it and return an error. */
6884 BCE_PRINTF("%s(%d): Unknown error mapping mbuf into "
6885 "TX chain!\n", __FILE__, __LINE__);
6886 m_freem(m0);
6887 *m_head = NULL;
6888 sc->dma_map_addr_tx_failed_count++;
6889 rc = ENOBUFS;
6890 goto bce_tx_encap_exit;
6891 }
6892 } else if (error == ENOMEM) {
6893 /* Insufficient DMA buffers available. */
6894 sc->dma_map_addr_tx_failed_count++;
6895 rc = error;
6896 goto bce_tx_encap_exit;
6897 } else if (error != 0) {
6898 m_freem(m0);
6899 *m_head = NULL;
6900 sc->dma_map_addr_tx_failed_count++;
6901 rc = error;
6902 goto bce_tx_encap_exit;
6903 }
6904
6905 /* Make sure there's room in the chain */
6906 if (nsegs > (sc->max_tx_bd - sc->used_tx_bd)) {
6907 bus_dmamap_unload(sc->tx_mbuf_tag, map);
6908 rc = ENOBUFS;
6909 goto bce_tx_encap_exit;
6910 }
6911
6912 /* prod points to an empty tx_bd at this point. */
6913 prod_bseq = sc->tx_prod_bseq;
6914
6915#ifdef BCE_DEBUG
6916 debug_prod = chain_prod;
6917#endif
6918
6919 DBPRINT(sc, BCE_INFO_SEND,
6920 "%s(start): prod = 0x%04X, chain_prod = 0x%04X, "
6921 "prod_bseq = 0x%08X\n",
6922 __FUNCTION__, prod, chain_prod, prod_bseq);
6923
6924 /*
6925 * Cycle through each mbuf segment that makes up
6926 * the outgoing frame, gathering the mapping info
6927 * for that segment and creating a tx_bd for
6928 * the mbuf.
6929 */
6930 for (i = 0; i < nsegs ; i++) {
6931
6932 chain_prod = TX_CHAIN_IDX(prod);
6933 txbd= &sc->tx_bd_chain[TX_PAGE(chain_prod)]
6934 [TX_IDX(chain_prod)];
6935
6936 txbd->tx_bd_haddr_lo =
6937 htole32(BCE_ADDR_LO(segs[i].ds_addr));
6938 txbd->tx_bd_haddr_hi =
6939 htole32(BCE_ADDR_HI(segs[i].ds_addr));
6940 txbd->tx_bd_mss_nbytes = htole32(mss << 16) |
6941 htole16(segs[i].ds_len);
6942 txbd->tx_bd_vlan_tag = htole16(vlan_tag);
6943 txbd->tx_bd_flags = htole16(flags);
6944 prod_bseq += segs[i].ds_len;
6945 if (i == 0)
6946 txbd->tx_bd_flags |= htole16(TX_BD_FLAGS_START);
6947 prod = NEXT_TX_BD(prod);
6948 }
6949
6950 /* Set the END flag on the last TX buffer descriptor. */
6951 txbd->tx_bd_flags |= htole16(TX_BD_FLAGS_END);
6952
6953 DBRUNMSG(BCE_EXTREME_SEND,
6954 bce_dump_tx_chain(sc, debug_prod, nsegs));
6955
6956 /*
6957 * Ensure that the mbuf pointer for this transmission
6958 * is placed at the array index of the last
6959 * descriptor in this chain. This is done
6960 * because a single map is used for all
6961 * segments of the mbuf and we don't want to
6962 * unload the map before all of the segments
6963 * have been freed.
6964 */
6965 sc->tx_mbuf_ptr[chain_prod] = m0;
6966 sc->used_tx_bd += nsegs;
6967
6968 /* Update some debug statistic counters */
6969 DBRUNIF((sc->used_tx_bd > sc->tx_hi_watermark),
6970 sc->tx_hi_watermark = sc->used_tx_bd);
6971 DBRUNIF((sc->used_tx_bd == sc->max_tx_bd), sc->tx_full_count++);
6972 DBRUNIF(sc->debug_tx_mbuf_alloc++);
6973
6974 DBRUNMSG(BCE_EXTREME_SEND, bce_dump_tx_mbuf_chain(sc, chain_prod, 1));
6975
6976 /* prod points to the next free tx_bd at this point. */
6977 sc->tx_prod = prod;
6978 sc->tx_prod_bseq = prod_bseq;
6979
6980 /* Tell the chip about the waiting TX frames. */
6981 REG_WR16(sc, MB_GET_CID_ADDR(TX_CID) +
6982 BCE_L2MQ_TX_HOST_BIDX, sc->tx_prod);
6983 REG_WR(sc, MB_GET_CID_ADDR(TX_CID) +
6984 BCE_L2MQ_TX_HOST_BSEQ, sc->tx_prod_bseq);
6985
6986bce_tx_encap_exit:
6987 DBEXIT(BCE_VERBOSE_SEND);
6988 return(rc);
6989}
6990
6991
6992/****************************************************************************/
6993/* Main transmit routine when called from another routine with a lock. */
6994/* */
6995/* Returns: */
6996/* Nothing. */
6997/****************************************************************************/
6998static void
6999bce_start_locked(struct ifnet *ifp)
7000{
7001 struct bce_softc *sc = ifp->if_softc;
7002 struct mbuf *m_head = NULL;
7003 int count = 0;
7004 u16 tx_prod, tx_chain_prod;
7005
7006 DBENTER(BCE_VERBOSE_SEND | BCE_VERBOSE_CTX);
7007
7008 BCE_LOCK_ASSERT(sc);
7009
7010 /* prod points to the next free tx_bd. */
7011 tx_prod = sc->tx_prod;
7012 tx_chain_prod = TX_CHAIN_IDX(tx_prod);
7013
7014 DBPRINT(sc, BCE_INFO_SEND,
7015 "%s(enter): tx_prod = 0x%04X, tx_chain_prod = 0x%04X, "
7016 "tx_prod_bseq = 0x%08X\n",
7017 __FUNCTION__, tx_prod, tx_chain_prod, sc->tx_prod_bseq);
7018
7019 /* If there's no link or the transmit queue is empty then just exit. */
7020 if (sc->bce_link_up == FALSE) {
7021 DBPRINT(sc, BCE_INFO_SEND, "%s(): No link.\n",
7022 __FUNCTION__);
7023 goto bce_start_locked_exit;
7024 }
7025
7026 if (IFQ_DRV_IS_EMPTY(&ifp->if_snd)) {
7027 DBPRINT(sc, BCE_INFO_SEND, "%s(): Transmit queue empty.\n",
7028 __FUNCTION__);
7029 goto bce_start_locked_exit;
7030 }
7031
7032 /*
7033 * Keep adding entries while there is space in the ring.
7034 */
7035 while (sc->used_tx_bd < sc->max_tx_bd) {
7036
7037 /* Check for any frames to send. */
7038 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
7039
7040 /* Stop when the transmit queue is empty. */
7041 if (m_head == NULL)
7042 break;
7043
7044 /*
7045 * Pack the data into the transmit ring. If we
7046 * don't have room, place the mbuf back at the
7047 * head of the queue and set the OACTIVE flag
7048 * to wait for the NIC to drain the chain.
7049 */
7050 if (bce_tx_encap(sc, &m_head)) {
7051 if (m_head != NULL)
7052 IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
7053 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
7054 DBPRINT(sc, BCE_INFO_SEND,
7055 "TX chain is closed for business! Total "
7056 "tx_bd used = %d\n", sc->used_tx_bd);
7057 break;
7058 }
7059
7060 count++;
7061
7062 /* Send a copy of the frame to any BPF listeners. */
7063 ETHER_BPF_MTAP(ifp, m_head);
7064 }
7065
7066 /* Exit if no packets were dequeued. */
7067 if (count == 0) {
7068 DBPRINT(sc, BCE_VERBOSE_SEND, "%s(): No packets were "
7069 "dequeued\n", __FUNCTION__);
7070 goto bce_start_locked_exit;
7071 }
7072
7073 DBPRINT(sc, BCE_VERBOSE_SEND, "%s(): Inserted %d frames into "
7074 "send queue.\n", __FUNCTION__, count);
7075
7076 /* Set the tx timeout. */
7077 sc->watchdog_timer = BCE_TX_TIMEOUT;
7078
7079 DBRUNMSG(BCE_VERBOSE_SEND, bce_dump_ctx(sc, TX_CID));
7080 DBRUNMSG(BCE_VERBOSE_SEND, bce_dump_mq_regs(sc));
7081
7082bce_start_locked_exit:
7083 DBEXIT(BCE_VERBOSE_SEND | BCE_VERBOSE_CTX);
7084 return;
7085}
7086
7087
7088/****************************************************************************/
7089/* Main transmit routine when called from another routine without a lock. */
7090/* */
7091/* Returns: */
7092/* Nothing. */
7093/****************************************************************************/
7094static void
7095bce_start(struct ifnet *ifp)
7096{
7097 struct bce_softc *sc = ifp->if_softc;
7098
7099 DBENTER(BCE_VERBOSE_SEND);
7100
7101 BCE_LOCK(sc);
7102 bce_start_locked(ifp);
7103 BCE_UNLOCK(sc);
7104
7105 DBEXIT(BCE_VERBOSE_SEND);
7106}
7107
7108
7109/****************************************************************************/
7110/* Handles any IOCTL calls from the operating system. */
7111/* */
7112/* Returns: */
7113/* 0 for success, positive value for failure. */
7114/****************************************************************************/
7115static int
7116bce_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
7117{
7118 struct bce_softc *sc = ifp->if_softc;
7119 struct ifreq *ifr = (struct ifreq *) data;
7120 struct mii_data *mii;
7121 int mask, error = 0, reinit;
7122
7123 DBENTER(BCE_VERBOSE_MISC);
7124
7125 switch(command) {
7126
7127 /* Set the interface MTU. */
7128 case SIOCSIFMTU:
7129 /* Check that the MTU setting is supported. */
7130 if ((ifr->ifr_mtu < BCE_MIN_MTU) ||
7131 (ifr->ifr_mtu > BCE_MAX_JUMBO_MTU)) {
7132 error = EINVAL;
7133 break;
7134 }
7135
7136 DBPRINT(sc, BCE_INFO_MISC,
7137 "SIOCSIFMTU: Changing MTU from %d to %d\n",
7138 (int) ifp->if_mtu, (int) ifr->ifr_mtu);
7139
7140 BCE_LOCK(sc);
7141 ifp->if_mtu = ifr->ifr_mtu;
7142 reinit = 0;
7143 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
7144 /*
7145 * Because allocation size is used in RX
7146 * buffer allocation, stop controller if
7147 * it is already running.
7148 */
7149 bce_stop(sc);
7150 reinit = 1;
7151 }
7152#ifdef BCE_JUMBO_HDRSPLIT
7153 /* No buffer allocation size changes are necessary. */
7154#else
7155 /* Recalculate our buffer allocation sizes. */
7156 if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN +
7157 ETHER_CRC_LEN) > MCLBYTES) {
7158 sc->rx_bd_mbuf_alloc_size = MJUM9BYTES;
7159 sc->rx_bd_mbuf_align_pad =
7160 roundup2(MJUM9BYTES, 16) - MJUM9BYTES;
7161 sc->rx_bd_mbuf_data_len =
7162 sc->rx_bd_mbuf_alloc_size -
7163 sc->rx_bd_mbuf_align_pad;
7164 } else {
7165 sc->rx_bd_mbuf_alloc_size = MCLBYTES;
7166 sc->rx_bd_mbuf_align_pad =
7167 roundup2(MCLBYTES, 16) - MCLBYTES;
7168 sc->rx_bd_mbuf_data_len =
7169 sc->rx_bd_mbuf_alloc_size -
7170 sc->rx_bd_mbuf_align_pad;
7171 }
7172#endif
7173
7174 if (reinit != 0)
7175 bce_init_locked(sc);
7176 BCE_UNLOCK(sc);
7177 break;
7178
7179 /* Set interface flags. */
7180 case SIOCSIFFLAGS:
7181 DBPRINT(sc, BCE_VERBOSE_SPECIAL, "Received SIOCSIFFLAGS\n");
7182
7183 BCE_LOCK(sc);
7184
7185 /* Check if the interface is up. */
7186 if (ifp->if_flags & IFF_UP) {
7187 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
7188 /* Change promiscuous/multicast flags as necessary. */
7189 bce_set_rx_mode(sc);
7190 } else {
7191 /* Start the HW */
7192 bce_init_locked(sc);
7193 }
7194 } else {
7195 /* The interface is down, check if driver is running. */
7196 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
7197 bce_stop(sc);
7198
7199 /* If MFW is running, restart the controller a bit. */
7200 if (sc->bce_flags & BCE_MFW_ENABLE_FLAG) {
7201 bce_reset(sc, BCE_DRV_MSG_CODE_RESET);
7202 bce_chipinit(sc);
7203 bce_mgmt_init_locked(sc);
7204 }
7205 }
7206 }
7207
7208 BCE_UNLOCK(sc);
7209 break;
7210
7211 /* Add/Delete multicast address */
7212 case SIOCADDMULTI:
7213 case SIOCDELMULTI:
7214 DBPRINT(sc, BCE_VERBOSE_MISC,
7215 "Received SIOCADDMULTI/SIOCDELMULTI\n");
7216
7217 BCE_LOCK(sc);
7218 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
7219 bce_set_rx_mode(sc);
7220 BCE_UNLOCK(sc);
7221
7222 break;
7223
7224 /* Set/Get Interface media */
7225 case SIOCSIFMEDIA:
7226 case SIOCGIFMEDIA:
7227 DBPRINT(sc, BCE_VERBOSE_MISC,
7228 "Received SIOCSIFMEDIA/SIOCGIFMEDIA\n");
7229
7230 mii = device_get_softc(sc->bce_miibus);
7231 error = ifmedia_ioctl(ifp, ifr,
7232 &mii->mii_media, command);
7233 break;
7234
7235 /* Set interface capability */
7236 case SIOCSIFCAP:
7237 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
7238 DBPRINT(sc, BCE_INFO_MISC,
7239 "Received SIOCSIFCAP = 0x%08X\n", (u32) mask);
7240
7241 /* Toggle the TX checksum capabilities enable flag. */
7242 if (mask & IFCAP_TXCSUM &&
7243 ifp->if_capabilities & IFCAP_TXCSUM) {
7244 ifp->if_capenable ^= IFCAP_TXCSUM;
7245 if (IFCAP_TXCSUM & ifp->if_capenable)
7246 ifp->if_hwassist |= BCE_IF_HWASSIST;
7247 else
7248 ifp->if_hwassist &= ~BCE_IF_HWASSIST;
7249 }
7250
7251 /* Toggle the RX checksum capabilities enable flag. */
7252 if (mask & IFCAP_RXCSUM &&
7253 ifp->if_capabilities & IFCAP_RXCSUM)
7254 ifp->if_capenable ^= IFCAP_RXCSUM;
7255
7256 /* Toggle the TSO capabilities enable flag. */
7257 if (bce_tso_enable && (mask & IFCAP_TSO4) &&
7258 ifp->if_capabilities & IFCAP_TSO4) {
7259 ifp->if_capenable ^= IFCAP_TSO4;
7260 if (IFCAP_TSO4 & ifp->if_capenable)
7261 ifp->if_hwassist |= CSUM_TSO;
7262 else
7263 ifp->if_hwassist &= ~CSUM_TSO;
7264 }
7265
7266 if (mask & IFCAP_VLAN_HWCSUM &&
7267 ifp->if_capabilities & IFCAP_VLAN_HWCSUM)
7268 ifp->if_capenable ^= IFCAP_VLAN_HWCSUM;
7269
7270 if ((mask & IFCAP_VLAN_HWTSO) != 0 &&
7271 (ifp->if_capabilities & IFCAP_VLAN_HWTSO) != 0)
7272 ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
7273 /*
7274 * Don't actually disable VLAN tag stripping as
7275 * management firmware (ASF/IPMI/UMP) requires the
7276 * feature. If VLAN tag stripping is disabled driver
7277 * will manually reconstruct the VLAN frame by
7278 * appending stripped VLAN tag.
7279 */
7280 if ((mask & IFCAP_VLAN_HWTAGGING) != 0 &&
7281 (ifp->if_capabilities & IFCAP_VLAN_HWTAGGING)) {
7282 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
7283 if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING)
7284 == 0)
7285 ifp->if_capenable &= ~IFCAP_VLAN_HWTSO;
7286 }
7287 VLAN_CAPABILITIES(ifp);
7288 break;
7289 default:
7290 /* We don't know how to handle the IOCTL, pass it on. */
7291 error = ether_ioctl(ifp, command, data);
7292 break;
7293 }
7294
7295 DBEXIT(BCE_VERBOSE_MISC);
7296 return(error);
7297}
7298
7299
7300/****************************************************************************/
7301/* Transmit timeout handler. */
7302/* */
7303/* Returns: */
7304/* Nothing. */
7305/****************************************************************************/
7306static void
7307bce_watchdog(struct bce_softc *sc)
7308{
7309 DBENTER(BCE_EXTREME_SEND);
7310
7311 BCE_LOCK_ASSERT(sc);
7312
7313 /* If the watchdog timer hasn't expired then just exit. */
7314 if (sc->watchdog_timer == 0 || --sc->watchdog_timer)
7315 goto bce_watchdog_exit;
7316
7317 /* If pause frames are active then don't reset the hardware. */
7318 /* ToDo: Should we reset the timer here? */
7319 if (REG_RD(sc, BCE_EMAC_TX_STATUS) & BCE_EMAC_TX_STATUS_XOFFED)
7320 goto bce_watchdog_exit;
7321
7322 BCE_PRINTF("%s(%d): Watchdog timeout occurred, resetting!\n",
7323 __FILE__, __LINE__);
7324
7325 DBRUNMSG(BCE_INFO,
7326 bce_dump_driver_state(sc);
7327 bce_dump_status_block(sc);
7328 bce_dump_stats_block(sc);
7329 bce_dump_ftqs(sc);
7330 bce_dump_txp_state(sc, 0);
7331 bce_dump_rxp_state(sc, 0);
7332 bce_dump_tpat_state(sc, 0);
7333 bce_dump_cp_state(sc, 0);
7334 bce_dump_com_state(sc, 0));
7335
7336 DBRUN(bce_breakpoint(sc));
7337
7338 sc->bce_ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
7339
7340 bce_init_locked(sc);
7341 sc->bce_ifp->if_oerrors++;
7342
7343bce_watchdog_exit:
7344 DBEXIT(BCE_EXTREME_SEND);
7345}
7346
7347
7348/*
7349 * Interrupt handler.
7350 */
7351/****************************************************************************/
7352/* Main interrupt entry point. Verifies that the controller generated the */
7353/* interrupt and then calls a separate routine for handle the various */
7354/* interrupt causes (PHY, TX, RX). */
7355/* */
7356/* Returns: */
7357/* 0 for success, positive value for failure. */
7358/****************************************************************************/
7359static void
7360bce_intr(void *xsc)
7361{
7362 struct bce_softc *sc;
7363 struct ifnet *ifp;
7364 u32 status_attn_bits;
7365 u16 hw_rx_cons, hw_tx_cons;
7366
7367 sc = xsc;
7368 ifp = sc->bce_ifp;
7369
7370 DBENTER(BCE_VERBOSE_SEND | BCE_VERBOSE_RECV | BCE_VERBOSE_INTR);
7371 DBRUNMSG(BCE_VERBOSE_INTR, bce_dump_status_block(sc));
7372 DBRUNMSG(BCE_VERBOSE_INTR, bce_dump_stats_block(sc));
7373
7374 BCE_LOCK(sc);
7375
7376 DBRUN(sc->interrupts_generated++);
7377
7378 /* Synchnorize before we read from interface's status block */
7379 bus_dmamap_sync(sc->status_tag, sc->status_map,
7380 BUS_DMASYNC_POSTREAD);
7381
7382 /*
7383 * If the hardware status block index
7384 * matches the last value read by the
7385 * driver and we haven't asserted our
7386 * interrupt then there's nothing to do.
7387 */
7388 if ((sc->status_block->status_idx == sc->last_status_idx) &&
7389 (REG_RD(sc, BCE_PCICFG_MISC_STATUS) &
7390 BCE_PCICFG_MISC_STATUS_INTA_VALUE)) {
7391 DBPRINT(sc, BCE_VERBOSE_INTR, "%s(): Spurious interrupt.\n",
7392 __FUNCTION__);
7393 goto bce_intr_exit;
7394 }
7395
7396 /* Ack the interrupt and stop others from occuring. */
7397 REG_WR(sc, BCE_PCICFG_INT_ACK_CMD,
7398 BCE_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
7399 BCE_PCICFG_INT_ACK_CMD_MASK_INT);
7400
7401 /* Check if the hardware has finished any work. */
7402 hw_rx_cons = bce_get_hw_rx_cons(sc);
7403 hw_tx_cons = bce_get_hw_tx_cons(sc);
7404
7405 /* Keep processing data as long as there is work to do. */
7406 for (;;) {
7407
7408 status_attn_bits = sc->status_block->status_attn_bits;
7409
7410 DBRUNIF(DB_RANDOMTRUE(unexpected_attention_sim_control),
7411 BCE_PRINTF("Simulating unexpected status attention "
7412 "bit set.");
7413 sc->unexpected_attention_sim_count++;
7414 status_attn_bits = status_attn_bits |
7415 STATUS_ATTN_BITS_PARITY_ERROR);
7416
7417 /* Was it a link change interrupt? */
7418 if ((status_attn_bits & STATUS_ATTN_BITS_LINK_STATE) !=
7419 (sc->status_block->status_attn_bits_ack &
7420 STATUS_ATTN_BITS_LINK_STATE)) {
7421 bce_phy_intr(sc);
7422
7423 /* Clear transient updates during link state change. */
7424 REG_WR(sc, BCE_HC_COMMAND, sc->hc_command |
7425 BCE_HC_COMMAND_COAL_NOW_WO_INT);
7426 REG_RD(sc, BCE_HC_COMMAND);
7427 }
7428
7429 /* If any other attention is asserted, the chip is toast. */
7430 if (((status_attn_bits & ~STATUS_ATTN_BITS_LINK_STATE) !=
7431 (sc->status_block->status_attn_bits_ack &
7432 ~STATUS_ATTN_BITS_LINK_STATE))) {
7433
7434 sc->unexpected_attention_count++;
7435
7436 BCE_PRINTF("%s(%d): Fatal attention detected: "
7437 "0x%08X\n", __FILE__, __LINE__,
7438 sc->status_block->status_attn_bits);
7439
7440 DBRUNMSG(BCE_FATAL,
7441 if (unexpected_attention_sim_control == 0)
7442 bce_breakpoint(sc));
7443
7444 bce_init_locked(sc);
7445 goto bce_intr_exit;
7446 }
7447
7448 /* Check for any completed RX frames. */
7449 if (hw_rx_cons != sc->hw_rx_cons)
7450 bce_rx_intr(sc);
7451
7452 /* Check for any completed TX frames. */
7453 if (hw_tx_cons != sc->hw_tx_cons)
7454 bce_tx_intr(sc);
7455
7456 /* Save status block index value for the next interrupt. */
7457 sc->last_status_idx = sc->status_block->status_idx;
7458
7459 /*
7460 * Prevent speculative reads from getting
7461 * ahead of the status block.
7462 */
7463 bus_space_barrier(sc->bce_btag, sc->bce_bhandle, 0, 0,
7464 BUS_SPACE_BARRIER_READ);
7465
7466 /*
7467 * If there's no work left then exit the
7468 * interrupt service routine.
7469 */
7470 hw_rx_cons = bce_get_hw_rx_cons(sc);
7471 hw_tx_cons = bce_get_hw_tx_cons(sc);
7472
7473 if ((hw_rx_cons == sc->hw_rx_cons) &&
7474 (hw_tx_cons == sc->hw_tx_cons))
7475 break;
7476
7477 }
7478
7479 bus_dmamap_sync(sc->status_tag, sc->status_map,
7480 BUS_DMASYNC_PREREAD);
7481
7482 /* Re-enable interrupts. */
7483 bce_enable_intr(sc, 0);
7484
7485 /* Handle any frames that arrived while handling the interrupt. */
7486 if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
7487 !IFQ_DRV_IS_EMPTY(&ifp->if_snd))
7488 bce_start_locked(ifp);
7489
7490bce_intr_exit:
7491 BCE_UNLOCK(sc);
7492
7493 DBEXIT(BCE_VERBOSE_SEND | BCE_VERBOSE_RECV | BCE_VERBOSE_INTR);
7494}
7495
7496
7497/****************************************************************************/
7498/* Programs the various packet receive modes (broadcast and multicast). */
7499/* */
7500/* Returns: */
7501/* Nothing. */
7502/****************************************************************************/
7503static void
7504bce_set_rx_mode(struct bce_softc *sc)
7505{
7506 struct ifnet *ifp;
7507 struct ifmultiaddr *ifma;
7508 u32 hashes[NUM_MC_HASH_REGISTERS] = { 0, 0, 0, 0, 0, 0, 0, 0 };
7509 u32 rx_mode, sort_mode;
7510 int h, i;
7511
7512 DBENTER(BCE_VERBOSE_MISC);
7513
7514 BCE_LOCK_ASSERT(sc);
7515
7516 ifp = sc->bce_ifp;
7517
7518 /* Initialize receive mode default settings. */
7519 rx_mode = sc->rx_mode & ~(BCE_EMAC_RX_MODE_PROMISCUOUS |
7520 BCE_EMAC_RX_MODE_KEEP_VLAN_TAG);
7521 sort_mode = 1 | BCE_RPM_SORT_USER0_BC_EN;
7522
7523 /*
7524 * ASF/IPMI/UMP firmware requires that VLAN tag stripping
7525 * be enbled.
7526 */
7527 if (!(BCE_IF_CAPABILITIES & IFCAP_VLAN_HWTAGGING) &&
7528 (!(sc->bce_flags & BCE_MFW_ENABLE_FLAG)))
7529 rx_mode |= BCE_EMAC_RX_MODE_KEEP_VLAN_TAG;
7530
7531 /*
7532 * Check for promiscuous, all multicast, or selected
7533 * multicast address filtering.
7534 */
7535 if (ifp->if_flags & IFF_PROMISC) {
7536 DBPRINT(sc, BCE_INFO_MISC, "Enabling promiscuous mode.\n");
7537
7538 /* Enable promiscuous mode. */
7539 rx_mode |= BCE_EMAC_RX_MODE_PROMISCUOUS;
7540 sort_mode |= BCE_RPM_SORT_USER0_PROM_EN;
7541 } else if (ifp->if_flags & IFF_ALLMULTI) {
7542 DBPRINT(sc, BCE_INFO_MISC, "Enabling all multicast mode.\n");
7543
7544 /* Enable all multicast addresses. */
7545 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
7546 REG_WR(sc, BCE_EMAC_MULTICAST_HASH0 + (i * 4), 0xffffffff);
7547 }
7548 sort_mode |= BCE_RPM_SORT_USER0_MC_EN;
7549 } else {
7550 /* Accept one or more multicast(s). */
7551 DBPRINT(sc, BCE_INFO_MISC, "Enabling selective multicast mode.\n");
7552
7553 if_maddr_rlock(ifp);
7554 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
7555 if (ifma->ifma_addr->sa_family != AF_LINK)
7556 continue;
7557 h = ether_crc32_le(LLADDR((struct sockaddr_dl *)
7558 ifma->ifma_addr), ETHER_ADDR_LEN) & 0xFF;
7559 hashes[(h & 0xE0) >> 5] |= 1 << (h & 0x1F);
7560 }
7561 if_maddr_runlock(ifp);
7562
7563 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++)
7564 REG_WR(sc, BCE_EMAC_MULTICAST_HASH0 + (i * 4), hashes[i]);
7565
7566 sort_mode |= BCE_RPM_SORT_USER0_MC_HSH_EN;
7567 }
7568
7569 /* Only make changes if the recive mode has actually changed. */
7570 if (rx_mode != sc->rx_mode) {
7571 DBPRINT(sc, BCE_VERBOSE_MISC, "Enabling new receive mode: "
7572 "0x%08X\n", rx_mode);
7573
7574 sc->rx_mode = rx_mode;
7575 REG_WR(sc, BCE_EMAC_RX_MODE, rx_mode);
7576 }
7577
7578 /* Disable and clear the exisitng sort before enabling a new sort. */
7579 REG_WR(sc, BCE_RPM_SORT_USER0, 0x0);
7580 REG_WR(sc, BCE_RPM_SORT_USER0, sort_mode);
7581 REG_WR(sc, BCE_RPM_SORT_USER0, sort_mode | BCE_RPM_SORT_USER0_ENA);
7582
7583 DBEXIT(BCE_VERBOSE_MISC);
7584}
7585
7586
7587/****************************************************************************/
7588/* Called periodically to updates statistics from the controllers */
7589/* statistics block. */
7590/* */
7591/* Returns: */
7592/* Nothing. */
7593/****************************************************************************/
7594static void
7595bce_stats_update(struct bce_softc *sc)
7596{
7597 struct ifnet *ifp;
7598 struct statistics_block *stats;
7599
7600 DBENTER(BCE_EXTREME_MISC);
7601
7602 ifp = sc->bce_ifp;
7603
7604 stats = (struct statistics_block *) sc->stats_block;
7605
7606 /*
7607 * Certain controllers don't report
7608 * carrier sense errors correctly.
7609 * See errata E11_5708CA0_1165.
7610 */
7611 if (!(BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5706) &&
7612 !(BCE_CHIP_ID(sc) == BCE_CHIP_ID_5708_A0))
7613 ifp->if_oerrors +=
7614 (u_long) stats->stat_Dot3StatsCarrierSenseErrors;
7615
7616 /*
7617 * Update the sysctl statistics from the
7618 * hardware statistics.
7619 */
7620 sc->stat_IfHCInOctets =
7621 ((u64) stats->stat_IfHCInOctets_hi << 32) +
7622 (u64) stats->stat_IfHCInOctets_lo;
7623
7624 sc->stat_IfHCInBadOctets =
7625 ((u64) stats->stat_IfHCInBadOctets_hi << 32) +
7626 (u64) stats->stat_IfHCInBadOctets_lo;
7627
7628 sc->stat_IfHCOutOctets =
7629 ((u64) stats->stat_IfHCOutOctets_hi << 32) +
7630 (u64) stats->stat_IfHCOutOctets_lo;
7631
7632 sc->stat_IfHCOutBadOctets =
7633 ((u64) stats->stat_IfHCOutBadOctets_hi << 32) +
7634 (u64) stats->stat_IfHCOutBadOctets_lo;
7635
7636 sc->stat_IfHCInUcastPkts =
7637 ((u64) stats->stat_IfHCInUcastPkts_hi << 32) +
7638 (u64) stats->stat_IfHCInUcastPkts_lo;
7639
7640 sc->stat_IfHCInMulticastPkts =
7641 ((u64) stats->stat_IfHCInMulticastPkts_hi << 32) +
7642 (u64) stats->stat_IfHCInMulticastPkts_lo;
7643
7644 sc->stat_IfHCInBroadcastPkts =
7645 ((u64) stats->stat_IfHCInBroadcastPkts_hi << 32) +
7646 (u64) stats->stat_IfHCInBroadcastPkts_lo;
7647
7648 sc->stat_IfHCOutUcastPkts =
7649 ((u64) stats->stat_IfHCOutUcastPkts_hi << 32) +
7650 (u64) stats->stat_IfHCOutUcastPkts_lo;
7651
7652 sc->stat_IfHCOutMulticastPkts =
7653 ((u64) stats->stat_IfHCOutMulticastPkts_hi << 32) +
7654 (u64) stats->stat_IfHCOutMulticastPkts_lo;
7655
7656 sc->stat_IfHCOutBroadcastPkts =
7657 ((u64) stats->stat_IfHCOutBroadcastPkts_hi << 32) +
7658 (u64) stats->stat_IfHCOutBroadcastPkts_lo;
7659
7660 /* ToDo: Preserve counters beyond 32 bits? */
7661 /* ToDo: Read the statistics from auto-clear regs? */
7662
7663 sc->stat_emac_tx_stat_dot3statsinternalmactransmiterrors =
7664 stats->stat_emac_tx_stat_dot3statsinternalmactransmiterrors;
7665
7666 sc->stat_Dot3StatsCarrierSenseErrors =
7667 stats->stat_Dot3StatsCarrierSenseErrors;
7668
7669 sc->stat_Dot3StatsFCSErrors =
7670 stats->stat_Dot3StatsFCSErrors;
7671
7672 sc->stat_Dot3StatsAlignmentErrors =
7673 stats->stat_Dot3StatsAlignmentErrors;
7674
7675 sc->stat_Dot3StatsSingleCollisionFrames =
7676 stats->stat_Dot3StatsSingleCollisionFrames;
7677
7678 sc->stat_Dot3StatsMultipleCollisionFrames =
7679 stats->stat_Dot3StatsMultipleCollisionFrames;
7680
7681 sc->stat_Dot3StatsDeferredTransmissions =
7682 stats->stat_Dot3StatsDeferredTransmissions;
7683
7684 sc->stat_Dot3StatsExcessiveCollisions =
7685 stats->stat_Dot3StatsExcessiveCollisions;
7686
7687 sc->stat_Dot3StatsLateCollisions =
7688 stats->stat_Dot3StatsLateCollisions;
7689
7690 sc->stat_EtherStatsCollisions =
7691 stats->stat_EtherStatsCollisions;
7692
7693 sc->stat_EtherStatsFragments =
7694 stats->stat_EtherStatsFragments;
7695
7696 sc->stat_EtherStatsJabbers =
7697 stats->stat_EtherStatsJabbers;
7698
7699 sc->stat_EtherStatsUndersizePkts =
7700 stats->stat_EtherStatsUndersizePkts;
7701
7702 sc->stat_EtherStatsOversizePkts =
7703 stats->stat_EtherStatsOversizePkts;
7704
7705 sc->stat_EtherStatsPktsRx64Octets =
7706 stats->stat_EtherStatsPktsRx64Octets;
7707
7708 sc->stat_EtherStatsPktsRx65Octetsto127Octets =
7709 stats->stat_EtherStatsPktsRx65Octetsto127Octets;
7710
7711 sc->stat_EtherStatsPktsRx128Octetsto255Octets =
7712 stats->stat_EtherStatsPktsRx128Octetsto255Octets;
7713
7714 sc->stat_EtherStatsPktsRx256Octetsto511Octets =
7715 stats->stat_EtherStatsPktsRx256Octetsto511Octets;
7716
7717 sc->stat_EtherStatsPktsRx512Octetsto1023Octets =
7718 stats->stat_EtherStatsPktsRx512Octetsto1023Octets;
7719
7720 sc->stat_EtherStatsPktsRx1024Octetsto1522Octets =
7721 stats->stat_EtherStatsPktsRx1024Octetsto1522Octets;
7722
7723 sc->stat_EtherStatsPktsRx1523Octetsto9022Octets =
7724 stats->stat_EtherStatsPktsRx1523Octetsto9022Octets;
7725
7726 sc->stat_EtherStatsPktsTx64Octets =
7727 stats->stat_EtherStatsPktsTx64Octets;
7728
7729 sc->stat_EtherStatsPktsTx65Octetsto127Octets =
7730 stats->stat_EtherStatsPktsTx65Octetsto127Octets;
7731
7732 sc->stat_EtherStatsPktsTx128Octetsto255Octets =
7733 stats->stat_EtherStatsPktsTx128Octetsto255Octets;
7734
7735 sc->stat_EtherStatsPktsTx256Octetsto511Octets =
7736 stats->stat_EtherStatsPktsTx256Octetsto511Octets;
7737
7738 sc->stat_EtherStatsPktsTx512Octetsto1023Octets =
7739 stats->stat_EtherStatsPktsTx512Octetsto1023Octets;
7740
7741 sc->stat_EtherStatsPktsTx1024Octetsto1522Octets =
7742 stats->stat_EtherStatsPktsTx1024Octetsto1522Octets;
7743
7744 sc->stat_EtherStatsPktsTx1523Octetsto9022Octets =
7745 stats->stat_EtherStatsPktsTx1523Octetsto9022Octets;
7746
7747 sc->stat_XonPauseFramesReceived =
7748 stats->stat_XonPauseFramesReceived;
7749
7750 sc->stat_XoffPauseFramesReceived =
7751 stats->stat_XoffPauseFramesReceived;
7752
7753 sc->stat_OutXonSent =
7754 stats->stat_OutXonSent;
7755
7756 sc->stat_OutXoffSent =
7757 stats->stat_OutXoffSent;
7758
7759 sc->stat_FlowControlDone =
7760 stats->stat_FlowControlDone;
7761
7762 sc->stat_MacControlFramesReceived =
7763 stats->stat_MacControlFramesReceived;
7764
7765 sc->stat_XoffStateEntered =
7766 stats->stat_XoffStateEntered;
7767
7768 sc->stat_IfInFramesL2FilterDiscards =
7769 stats->stat_IfInFramesL2FilterDiscards;
7770
7771 sc->stat_IfInRuleCheckerDiscards =
7772 stats->stat_IfInRuleCheckerDiscards;
7773
7774 sc->stat_IfInFTQDiscards =
7775 stats->stat_IfInFTQDiscards;
7776
7777 sc->stat_IfInMBUFDiscards =
7778 stats->stat_IfInMBUFDiscards;
7779
7780 sc->stat_IfInRuleCheckerP4Hit =
7781 stats->stat_IfInRuleCheckerP4Hit;
7782
7783 sc->stat_CatchupInRuleCheckerDiscards =
7784 stats->stat_CatchupInRuleCheckerDiscards;
7785
7786 sc->stat_CatchupInFTQDiscards =
7787 stats->stat_CatchupInFTQDiscards;
7788
7789 sc->stat_CatchupInMBUFDiscards =
7790 stats->stat_CatchupInMBUFDiscards;
7791
7792 sc->stat_CatchupInRuleCheckerP4Hit =
7793 stats->stat_CatchupInRuleCheckerP4Hit;
7794
7795 sc->com_no_buffers = REG_RD_IND(sc, 0x120084);
7796
7797 /*
7798 * Update the interface statistics from the
7799 * hardware statistics.
7800 */
7801 ifp->if_collisions =
7802 (u_long) sc->stat_EtherStatsCollisions;
7803
7804 /* ToDo: This method loses soft errors. */
7805 ifp->if_ierrors =
7806 (u_long) sc->stat_EtherStatsUndersizePkts +
7807 (u_long) sc->stat_EtherStatsOversizePkts +
7808 (u_long) sc->stat_IfInMBUFDiscards +
7809 (u_long) sc->stat_Dot3StatsAlignmentErrors +
7810 (u_long) sc->stat_Dot3StatsFCSErrors +
7811 (u_long) sc->stat_IfInRuleCheckerDiscards +
7812 (u_long) sc->stat_IfInFTQDiscards +
7813 (u_long) sc->com_no_buffers;
7814
7815 /* ToDo: This method loses soft errors. */
7816 ifp->if_oerrors =
7817 (u_long) sc->stat_emac_tx_stat_dot3statsinternalmactransmiterrors +
7818 (u_long) sc->stat_Dot3StatsExcessiveCollisions +
7819 (u_long) sc->stat_Dot3StatsLateCollisions;
7820
7821 /* ToDo: Add additional statistics? */
7822
7823 DBEXIT(BCE_EXTREME_MISC);
7824}
7825
7826
7827/****************************************************************************/
7828/* Periodic function to notify the bootcode that the driver is still */
7829/* present. */
7830/* */
7831/* Returns: */
7832/* Nothing. */
7833/****************************************************************************/
7834static void
7835bce_pulse(void *xsc)
7836{
7837 struct bce_softc *sc = xsc;
7838 u32 msg;
7839
7840 DBENTER(BCE_EXTREME_MISC);
7841
7842 BCE_LOCK_ASSERT(sc);
7843
7844 /* Tell the firmware that the driver is still running. */
7845 msg = (u32) ++sc->bce_fw_drv_pulse_wr_seq;
7846 bce_shmem_wr(sc, BCE_DRV_PULSE_MB, msg);
7847
7848 /* Update the bootcode condition. */
7849 sc->bc_state = bce_shmem_rd(sc, BCE_BC_STATE_CONDITION);
7850
7851 /* Report whether the bootcode still knows the driver is running. */
7852 if (bootverbose) {
7853 if (sc->bce_drv_cardiac_arrest == FALSE) {
7854 if (!(sc->bc_state & BCE_CONDITION_DRV_PRESENT)) {
7855 sc->bce_drv_cardiac_arrest = TRUE;
7856 BCE_PRINTF("%s(): Warning: bootcode "
7857 "thinks driver is absent! "
7858 "(bc_state = 0x%08X)\n",
7859 __FUNCTION__, sc->bc_state);
7860 }
7861 } else {
7862 /*
7863 * Not supported by all bootcode versions.
7864 * (v5.0.11+ and v5.2.1+) Older bootcode
7865 * will require the driver to reset the
7866 * controller to clear this condition.
7867 */
7868 if (sc->bc_state & BCE_CONDITION_DRV_PRESENT) {
7869 sc->bce_drv_cardiac_arrest = FALSE;
7870 BCE_PRINTF("%s(): Bootcode found the "
7871 "driver pulse! (bc_state = 0x%08X)\n",
7872 __FUNCTION__, sc->bc_state);
7873 }
7874 }
7875 }
7876
7877
7878 /* Schedule the next pulse. */
7879 callout_reset(&sc->bce_pulse_callout, hz, bce_pulse, sc);
7880
7881 DBEXIT(BCE_EXTREME_MISC);
7882}
7883
7884
7885/****************************************************************************/
7886/* Periodic function to perform maintenance tasks. */
7887/* */
7888/* Returns: */
7889/* Nothing. */
7890/****************************************************************************/
7891static void
7892bce_tick(void *xsc)
7893{
7894 struct bce_softc *sc = xsc;
7895 struct mii_data *mii;
7896 struct ifnet *ifp;
7897
7898 ifp = sc->bce_ifp;
7899
7900 DBENTER(BCE_EXTREME_MISC);
7901
7902 BCE_LOCK_ASSERT(sc);
7903
7904 /* Schedule the next tick. */
7905 callout_reset(&sc->bce_tick_callout, hz, bce_tick, sc);
7906
7907 /* Update the statistics from the hardware statistics block. */
7908 bce_stats_update(sc);
7909
7910 /* Top off the receive and page chains. */
7911#ifdef BCE_JUMBO_HDRSPLIT
7912 bce_fill_pg_chain(sc);
7913#endif
7914 bce_fill_rx_chain(sc);
7915
7916 /* Check that chip hasn't hung. */
7917 bce_watchdog(sc);
7918
7919 /* If link is up already up then we're done. */
7920 if (sc->bce_link_up == TRUE)
7921 goto bce_tick_exit;
7922
7923 /* Link is down. Check what the PHY's doing. */
7924 mii = device_get_softc(sc->bce_miibus);
7925 mii_tick(mii);
7926
7927 /* Check if the link has come up. */
7928 if ((mii->mii_media_status & IFM_ACTIVE) &&
7929 (IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE)) {
7930 DBPRINT(sc, BCE_VERBOSE_MISC,
7931 "%s(): Link up!\n", __FUNCTION__);
7932 sc->bce_link_up = TRUE;
7933 if ((IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T ||
7934 IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_SX ||
7935 IFM_SUBTYPE(mii->mii_media_active) == IFM_2500_SX) &&
7936 bootverbose)
7937 BCE_PRINTF("Gigabit link up!\n");
7938
7939 /* Now that link is up, handle any outstanding TX traffic. */
7940 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) {
7941 DBPRINT(sc, BCE_VERBOSE_MISC, "%s(): Found "
7942 "pending TX traffic.\n", __FUNCTION__);
7943 bce_start_locked(ifp);
7944 }
7945 }
7946
7947bce_tick_exit:
7948 DBEXIT(BCE_EXTREME_MISC);
7949 return;
7950}
7951
7952
7953#ifdef BCE_DEBUG
7954/****************************************************************************/
7955/* Allows the driver state to be dumped through the sysctl interface. */
7956/* */
7957/* Returns: */
7958/* 0 for success, positive value for failure. */
7959/****************************************************************************/
7960static int
7961bce_sysctl_driver_state(SYSCTL_HANDLER_ARGS)
7962{
7963 int error;
7964 int result;
7965 struct bce_softc *sc;
7966
7967 result = -1;
7968 error = sysctl_handle_int(oidp, &result, 0, req);
7969
7970 if (error || !req->newptr)
7971 return (error);
7972
7973 if (result == 1) {
7974 sc = (struct bce_softc *)arg1;
7975 bce_dump_driver_state(sc);
7976 }
7977
7978 return error;
7979}
7980
7981
7982/****************************************************************************/
7983/* Allows the hardware state to be dumped through the sysctl interface. */
7984/* */
7985/* Returns: */
7986/* 0 for success, positive value for failure. */
7987/****************************************************************************/
7988static int
7989bce_sysctl_hw_state(SYSCTL_HANDLER_ARGS)
7990{
7991 int error;
7992 int result;
7993 struct bce_softc *sc;
7994
7995 result = -1;
7996 error = sysctl_handle_int(oidp, &result, 0, req);
7997
7998 if (error || !req->newptr)
7999 return (error);
8000
8001 if (result == 1) {
8002 sc = (struct bce_softc *)arg1;
8003 bce_dump_hw_state(sc);
8004 }
8005
8006 return error;
8007}
8008
8009
8010/****************************************************************************/
8011/* Allows the status block to be dumped through the sysctl interface. */
8012/* */
8013/* Returns: */
8014/* 0 for success, positive value for failure. */
8015/****************************************************************************/
8016static int
8017bce_sysctl_status_block(SYSCTL_HANDLER_ARGS)
8018{
8019 int error;
8020 int result;
8021 struct bce_softc *sc;
8022
8023 result = -1;
8024 error = sysctl_handle_int(oidp, &result, 0, req);
8025
8026 if (error || !req->newptr)
8027 return (error);
8028
8029 if (result == 1) {
8030 sc = (struct bce_softc *)arg1;
8031 bce_dump_status_block(sc);
8032 }
8033
8034 return error;
8035}
8036
8037
8038/****************************************************************************/
8039/* Allows the stats block to be dumped through the sysctl interface. */
8040/* */
8041/* Returns: */
8042/* 0 for success, positive value for failure. */
8043/****************************************************************************/
8044static int
8045bce_sysctl_stats_block(SYSCTL_HANDLER_ARGS)
8046{
8047 int error;
8048 int result;
8049 struct bce_softc *sc;
8050
8051 result = -1;
8052 error = sysctl_handle_int(oidp, &result, 0, req);
8053
8054 if (error || !req->newptr)
8055 return (error);
8056
8057 if (result == 1) {
8058 sc = (struct bce_softc *)arg1;
8059 bce_dump_stats_block(sc);
8060 }
8061
8062 return error;
8063}
8064
8065
8066/****************************************************************************/
8067/* Allows the stat counters to be cleared without unloading/reloading the */
8068/* driver. */
8069/* */
8070/* Returns: */
8071/* 0 for success, positive value for failure. */
8072/****************************************************************************/
8073static int
8074bce_sysctl_stats_clear(SYSCTL_HANDLER_ARGS)
8075{
8076 int error;
8077 int result;
8078 struct bce_softc *sc;
8079
8080 result = -1;
8081 error = sysctl_handle_int(oidp, &result, 0, req);
8082
8083 if (error || !req->newptr)
8084 return (error);
8085
8086 if (result == 1) {
8087 sc = (struct bce_softc *)arg1;
8088
8089 /* Clear the internal H/W statistics counters. */
8090 REG_WR(sc, BCE_HC_COMMAND, BCE_HC_COMMAND_CLR_STAT_NOW);
8091
8092 /* Reset the driver maintained statistics. */
8093 sc->interrupts_rx =
8094 sc->interrupts_tx = 0;
8095 sc->tso_frames_requested =
8096 sc->tso_frames_completed =
8097 sc->tso_frames_failed = 0;
8098 sc->rx_empty_count =
8099 sc->tx_full_count = 0;
8100 sc->rx_low_watermark = USABLE_RX_BD;
8101 sc->tx_hi_watermark = 0;
8102 sc->l2fhdr_error_count =
8103 sc->l2fhdr_error_sim_count = 0;
8104 sc->mbuf_alloc_failed_count =
8105 sc->mbuf_alloc_failed_sim_count = 0;
8106 sc->dma_map_addr_rx_failed_count =
8107 sc->dma_map_addr_tx_failed_count = 0;
8108 sc->mbuf_frag_count = 0;
8109 sc->csum_offload_tcp_udp =
8110 sc->csum_offload_ip = 0;
8111 sc->vlan_tagged_frames_rcvd =
8112 sc->vlan_tagged_frames_stripped = 0;
8113
8114 /* Clear firmware maintained statistics. */
8115 REG_WR_IND(sc, 0x120084, 0);
8116 }
8117
8118 return error;
8119}
8120
8121
8122/****************************************************************************/
8123/* Allows the bootcode state to be dumped through the sysctl interface. */
8124/* */
8125/* Returns: */
8126/* 0 for success, positive value for failure. */
8127/****************************************************************************/
8128static int
8129bce_sysctl_bc_state(SYSCTL_HANDLER_ARGS)
8130{
8131 int error;
8132 int result;
8133 struct bce_softc *sc;
8134
8135 result = -1;
8136 error = sysctl_handle_int(oidp, &result, 0, req);
8137
8138 if (error || !req->newptr)
8139 return (error);
8140
8141 if (result == 1) {
8142 sc = (struct bce_softc *)arg1;
8143 bce_dump_bc_state(sc);
8144 }
8145
8146 return error;
8147}
8148
8149
8150/****************************************************************************/
8151/* Provides a sysctl interface to allow dumping the RX BD chain. */
8152/* */
8153/* Returns: */
8154/* 0 for success, positive value for failure. */
8155/****************************************************************************/
8156static int
8157bce_sysctl_dump_rx_bd_chain(SYSCTL_HANDLER_ARGS)
8158{
8159 int error;
8160 int result;
8161 struct bce_softc *sc;
8162
8163 result = -1;
8164 error = sysctl_handle_int(oidp, &result, 0, req);
8165
8166 if (error || !req->newptr)
8167 return (error);
8168
8169 if (result == 1) {
8170 sc = (struct bce_softc *)arg1;
8171 bce_dump_rx_bd_chain(sc, 0, TOTAL_RX_BD);
8172 }
8173
8174 return error;
8175}
8176
8177
8178/****************************************************************************/
8179/* Provides a sysctl interface to allow dumping the RX MBUF chain. */
8180/* */
8181/* Returns: */
8182/* 0 for success, positive value for failure. */
8183/****************************************************************************/
8184static int
8185bce_sysctl_dump_rx_mbuf_chain(SYSCTL_HANDLER_ARGS)
8186{
8187 int error;
8188 int result;
8189 struct bce_softc *sc;
8190
8191 result = -1;
8192 error = sysctl_handle_int(oidp, &result, 0, req);
8193
8194 if (error || !req->newptr)
8195 return (error);
8196
8197 if (result == 1) {
8198 sc = (struct bce_softc *)arg1;
8199 bce_dump_rx_mbuf_chain(sc, 0, USABLE_RX_BD);
8200 }
8201
8202 return error;
8203}
8204
8205
8206/****************************************************************************/
8207/* Provides a sysctl interface to allow dumping the TX chain. */
8208/* */
8209/* Returns: */
8210/* 0 for success, positive value for failure. */
8211/****************************************************************************/
8212static int
8213bce_sysctl_dump_tx_chain(SYSCTL_HANDLER_ARGS)
8214{
8215 int error;
8216 int result;
8217 struct bce_softc *sc;
8218
8219 result = -1;
8220 error = sysctl_handle_int(oidp, &result, 0, req);
8221
8222 if (error || !req->newptr)
8223 return (error);
8224
8225 if (result == 1) {
8226 sc = (struct bce_softc *)arg1;
8227 bce_dump_tx_chain(sc, 0, TOTAL_TX_BD);
8228 }
8229
8230 return error;
8231}
8232
8233
8234#ifdef BCE_JUMBO_HDRSPLIT
8235/****************************************************************************/
8236/* Provides a sysctl interface to allow dumping the page chain. */
8237/* */
8238/* Returns: */
8239/* 0 for success, positive value for failure. */
8240/****************************************************************************/
8241static int
8242bce_sysctl_dump_pg_chain(SYSCTL_HANDLER_ARGS)
8243{
8244 int error;
8245 int result;
8246 struct bce_softc *sc;
8247
8248 result = -1;
8249 error = sysctl_handle_int(oidp, &result, 0, req);
8250
8251 if (error || !req->newptr)
8252 return (error);
8253
8254 if (result == 1) {
8255 sc = (struct bce_softc *)arg1;
8256 bce_dump_pg_chain(sc, 0, TOTAL_PG_BD);
8257 }
8258
8259 return error;
8260}
8261#endif
8262
8263/****************************************************************************/
8264/* Provides a sysctl interface to allow reading arbitrary NVRAM offsets in */
8265/* the device. DO NOT ENABLE ON PRODUCTION SYSTEMS! */
8266/* */
8267/* Returns: */
8268/* 0 for success, positive value for failure. */
8269/****************************************************************************/
8270static int
8271bce_sysctl_nvram_read(SYSCTL_HANDLER_ARGS)
8272{
8273 struct bce_softc *sc = (struct bce_softc *)arg1;
8274 int error;
8275 u32 result;
8276 u32 val[1];
8277 u8 *data = (u8 *) val;
8278
8279 result = -1;
8280 error = sysctl_handle_int(oidp, &result, 0, req);
8281 if (error || (req->newptr == NULL))
8282 return (error);
8283
8284 bce_nvram_read(sc, result, data, 4);
8285 BCE_PRINTF("offset 0x%08X = 0x%08X\n", result, bce_be32toh(val[0]));
8286
8287 return (error);
8288}
8289
8290
8291/****************************************************************************/
8292/* Provides a sysctl interface to allow reading arbitrary registers in the */
8293/* device. DO NOT ENABLE ON PRODUCTION SYSTEMS! */
8294/* */
8295/* Returns: */
8296/* 0 for success, positive value for failure. */
8297/****************************************************************************/
8298static int
8299bce_sysctl_reg_read(SYSCTL_HANDLER_ARGS)
8300{
8301 struct bce_softc *sc = (struct bce_softc *)arg1;
8302 int error;
8303 u32 val, result;
8304
8305 result = -1;
8306 error = sysctl_handle_int(oidp, &result, 0, req);
8307 if (error || (req->newptr == NULL))
8308 return (error);
8309
8310 /* Make sure the register is accessible. */
8311 if (result < 0x8000) {
8312 val = REG_RD(sc, result);
8313 BCE_PRINTF("reg 0x%08X = 0x%08X\n", result, val);
8314 } else if (result < 0x0280000) {
8315 val = REG_RD_IND(sc, result);
8316 BCE_PRINTF("reg 0x%08X = 0x%08X\n", result, val);
8317 }
8318
8319 return (error);
8320}
8321
8322
8323/****************************************************************************/
8324/* Provides a sysctl interface to allow reading arbitrary PHY registers in */
8325/* the device. DO NOT ENABLE ON PRODUCTION SYSTEMS! */
8326/* */
8327/* Returns: */
8328/* 0 for success, positive value for failure. */
8329/****************************************************************************/
8330static int
8331bce_sysctl_phy_read(SYSCTL_HANDLER_ARGS)
8332{
8333 struct bce_softc *sc;
8334 device_t dev;
8335 int error, result;
8336 u16 val;
8337
8338 result = -1;
8339 error = sysctl_handle_int(oidp, &result, 0, req);
8340 if (error || (req->newptr == NULL))
8341 return (error);
8342
8343 /* Make sure the register is accessible. */
8344 if (result < 0x20) {
8345 sc = (struct bce_softc *)arg1;
8346 dev = sc->bce_dev;
8347 val = bce_miibus_read_reg(dev, sc->bce_phy_addr, result);
8348 BCE_PRINTF("phy 0x%02X = 0x%04X\n", result, val);
8349 }
8350 return (error);
8351}
8352
8353
8354static int
8355sysctl_nvram_dump(SYSCTL_HANDLER_ARGS)
8356{
8357 struct bce_softc *sc = (struct bce_softc *)arg1;
8358 int error, i;
8359
8360 if (sc->nvram_buf == NULL) {
8361 sc->nvram_buf = malloc(sc->bce_flash_size,
8362 M_TEMP, M_ZERO | M_WAITOK);
8363 }
8364 if (sc->nvram_buf == NULL) {
8365 return(ENOMEM);
8366 }
8367 if (req->oldlen == sc->bce_flash_size) {
8368 for (i = 0; i < sc->bce_flash_size; i++) {
8369 bce_nvram_read(sc, i, &sc->nvram_buf[i], 1);
8370 }
8371 }
8372
8373 error = SYSCTL_OUT(req, sc->nvram_buf, sc->bce_flash_size);
8374
8375 return error;
8376}
8377
8378#ifdef BCE_NVRAM_WRITE_SUPPORT
8379static int
8380sysctl_nvram_write(SYSCTL_HANDLER_ARGS)
8381{
8382 struct bce_softc *sc = (struct bce_softc *)arg1;
8383 int error;
8384
8385 if (sc->nvram_buf == NULL) {
8386 sc->nvram_buf = malloc(sc->bce_flash_size,
8387 M_TEMP, M_ZERO | M_WAITOK);
8388 }
8389 if (sc->nvram_buf == NULL) {
8390 return(ENOMEM);
8391 }
8392 bzero(sc->nvram_buf, sc->bce_flash_size);
8393 error = SYSCTL_IN(req, sc->nvram_buf, sc->bce_flash_size);
8394
8395 if (req->newlen == sc->bce_flash_size) {
8396 bce_nvram_write(sc, 0, sc->nvram_buf , sc->bce_flash_size);
8397 }
8398
8399
8400 return error;
8401}
8402#endif
8403
8404
8405/****************************************************************************/
8406/* Provides a sysctl interface to allow reading a CID. */
8407/* */
8408/* Returns: */
8409/* 0 for success, positive value for failure. */
8410/****************************************************************************/
8411static int
8412bce_sysctl_dump_ctx(SYSCTL_HANDLER_ARGS)
8413{
8414 struct bce_softc *sc;
8415 int error, result;
8416
8417 result = -1;
8418 error = sysctl_handle_int(oidp, &result, 0, req);
8419 if (error || (req->newptr == NULL))
8420 return (error);
8421
8422 /* Make sure the register is accessible. */
8423 if (result <= TX_CID) {
8424 sc = (struct bce_softc *)arg1;
8425 bce_dump_ctx(sc, result);
8426 }
8427
8428 return (error);
8429}
8430
8431
8432 /****************************************************************************/
8433/* Provides a sysctl interface to forcing the driver to dump state and */
8434/* enter the debugger. DO NOT ENABLE ON PRODUCTION SYSTEMS! */
8435/* */
8436/* Returns: */
8437/* 0 for success, positive value for failure. */
8438/****************************************************************************/
8439static int
8440bce_sysctl_breakpoint(SYSCTL_HANDLER_ARGS)
8441{
8442 int error;
8443 int result;
8444 struct bce_softc *sc;
8445
8446 result = -1;
8447 error = sysctl_handle_int(oidp, &result, 0, req);
8448
8449 if (error || !req->newptr)
8450 return (error);
8451
8452 if (result == 1) {
8453 sc = (struct bce_softc *)arg1;
8454 bce_breakpoint(sc);
8455 }
8456
8457 return error;
8458}
8459#endif
8460
8461
8462/****************************************************************************/
8463/* Adds any sysctl parameters for tuning or debugging purposes. */
8464/* */
8465/* Returns: */
8466/* 0 for success, positive value for failure. */
8467/****************************************************************************/
8468static void
8469bce_add_sysctls(struct bce_softc *sc)
8470{
8471 struct sysctl_ctx_list *ctx;
8472 struct sysctl_oid_list *children;
8473
8474 DBENTER(BCE_VERBOSE_MISC);
8475
8476 ctx = device_get_sysctl_ctx(sc->bce_dev);
8477 children = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->bce_dev));
8478
8479#ifdef BCE_DEBUG
8480 SYSCTL_ADD_INT(ctx, children, OID_AUTO,
8481 "l2fhdr_error_sim_control",
8482 CTLFLAG_RW, &l2fhdr_error_sim_control,
8483 0, "Debug control to force l2fhdr errors");
8484
8485 SYSCTL_ADD_INT(ctx, children, OID_AUTO,
8486 "l2fhdr_error_sim_count",
8487 CTLFLAG_RD, &sc->l2fhdr_error_sim_count,
8488 0, "Number of simulated l2_fhdr errors");
8489#endif
8490
8491 SYSCTL_ADD_INT(ctx, children, OID_AUTO,
8492 "l2fhdr_error_count",
8493 CTLFLAG_RD, &sc->l2fhdr_error_count,
8494 0, "Number of l2_fhdr errors");
8495
8496#ifdef BCE_DEBUG
8497 SYSCTL_ADD_INT(ctx, children, OID_AUTO,
8498 "mbuf_alloc_failed_sim_control",
8499 CTLFLAG_RW, &mbuf_alloc_failed_sim_control,
8500 0, "Debug control to force mbuf allocation failures");
8501
8502 SYSCTL_ADD_INT(ctx, children, OID_AUTO,
8503 "mbuf_alloc_failed_sim_count",
8504 CTLFLAG_RD, &sc->mbuf_alloc_failed_sim_count,
8505 0, "Number of simulated mbuf cluster allocation failures");
8506#endif
8507
8508 SYSCTL_ADD_INT(ctx, children, OID_AUTO,
8509 "mbuf_alloc_failed_count",
8510 CTLFLAG_RD, &sc->mbuf_alloc_failed_count,
8511 0, "Number of mbuf allocation failures");
8512
8513 SYSCTL_ADD_INT(ctx, children, OID_AUTO,
8514 "mbuf_frag_count",
8515 CTLFLAG_RD, &sc->mbuf_frag_count,
8516 0, "Number of fragmented mbufs");
8517
8518#ifdef BCE_DEBUG
8519 SYSCTL_ADD_INT(ctx, children, OID_AUTO,
8520 "dma_map_addr_failed_sim_control",
8521 CTLFLAG_RW, &dma_map_addr_failed_sim_control,
8522 0, "Debug control to force DMA mapping failures");
8523
8524 /* ToDo: Figure out how to update this value in bce_dma_map_addr(). */
8525 SYSCTL_ADD_INT(ctx, children, OID_AUTO,
8526 "dma_map_addr_failed_sim_count",
8527 CTLFLAG_RD, &sc->dma_map_addr_failed_sim_count,
8528 0, "Number of simulated DMA mapping failures");
8529
8530#endif
8531
8532 SYSCTL_ADD_INT(ctx, children, OID_AUTO,
8533 "dma_map_addr_rx_failed_count",
8534 CTLFLAG_RD, &sc->dma_map_addr_rx_failed_count,
8535 0, "Number of RX DMA mapping failures");
8536
8537 SYSCTL_ADD_INT(ctx, children, OID_AUTO,
8538 "dma_map_addr_tx_failed_count",
8539 CTLFLAG_RD, &sc->dma_map_addr_tx_failed_count,
8540 0, "Number of TX DMA mapping failures");
8541
8542#ifdef BCE_DEBUG
8543 SYSCTL_ADD_INT(ctx, children, OID_AUTO,
8544 "unexpected_attention_sim_control",
8545 CTLFLAG_RW, &unexpected_attention_sim_control,
8546 0, "Debug control to simulate unexpected attentions");
8547
8548 SYSCTL_ADD_INT(ctx, children, OID_AUTO,
8549 "unexpected_attention_sim_count",
8550 CTLFLAG_RW, &sc->unexpected_attention_sim_count,
8551 0, "Number of simulated unexpected attentions");
8552#endif
8553
8554 SYSCTL_ADD_INT(ctx, children, OID_AUTO,
8555 "unexpected_attention_count",
8556 CTLFLAG_RW, &sc->unexpected_attention_count,
8557 0, "Number of unexpected attentions");
8558
8559#ifdef BCE_DEBUG
8560 SYSCTL_ADD_INT(ctx, children, OID_AUTO,
8561 "debug_bootcode_running_failure",
8562 CTLFLAG_RW, &bootcode_running_failure_sim_control,
8563 0, "Debug control to force bootcode running failures");
8564
8565 SYSCTL_ADD_INT(ctx, children, OID_AUTO,
8566 "rx_low_watermark",
8567 CTLFLAG_RD, &sc->rx_low_watermark,
8568 0, "Lowest level of free rx_bd's");
8569
8570 SYSCTL_ADD_INT(ctx, children, OID_AUTO,
8571 "rx_empty_count",
8572 CTLFLAG_RD, &sc->rx_empty_count,
8573 0, "Number of times the RX chain was empty");
8574
8575 SYSCTL_ADD_INT(ctx, children, OID_AUTO,
8576 "tx_hi_watermark",
8577 CTLFLAG_RD, &sc->tx_hi_watermark,
8578 0, "Highest level of used tx_bd's");
8579
8580 SYSCTL_ADD_INT(ctx, children, OID_AUTO,
8581 "tx_full_count",
8582 CTLFLAG_RD, &sc->tx_full_count,
8583 0, "Number of times the TX chain was full");
8584
8585 SYSCTL_ADD_INT(ctx, children, OID_AUTO,
8586 "tso_frames_requested",
8587 CTLFLAG_RD, &sc->tso_frames_requested,
8588 0, "Number of TSO frames requested");
8589
8590 SYSCTL_ADD_INT(ctx, children, OID_AUTO,
8591 "tso_frames_completed",
8592 CTLFLAG_RD, &sc->tso_frames_completed,
8593 0, "Number of TSO frames completed");
8594
8595 SYSCTL_ADD_INT(ctx, children, OID_AUTO,
8596 "tso_frames_failed",
8597 CTLFLAG_RD, &sc->tso_frames_failed,
8598 0, "Number of TSO frames failed");
8599
8600 SYSCTL_ADD_INT(ctx, children, OID_AUTO,
8601 "csum_offload_ip",
8602 CTLFLAG_RD, &sc->csum_offload_ip,
8603 0, "Number of IP checksum offload frames");
8604
8605 SYSCTL_ADD_INT(ctx, children, OID_AUTO,
8606 "csum_offload_tcp_udp",
8607 CTLFLAG_RD, &sc->csum_offload_tcp_udp,
8608 0, "Number of TCP/UDP checksum offload frames");
8609
8610 SYSCTL_ADD_INT(ctx, children, OID_AUTO,
8611 "vlan_tagged_frames_rcvd",
8612 CTLFLAG_RD, &sc->vlan_tagged_frames_rcvd,
8613 0, "Number of VLAN tagged frames received");
8614
8615 SYSCTL_ADD_INT(ctx, children, OID_AUTO,
8616 "vlan_tagged_frames_stripped",
8617 CTLFLAG_RD, &sc->vlan_tagged_frames_stripped,
8618 0, "Number of VLAN tagged frames stripped");
8619
8620 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
8621 "interrupts_rx",
8622 CTLFLAG_RD, &sc->interrupts_rx,
8623 0, "Number of RX interrupts");
8624
8625 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
8626 "interrupts_tx",
8627 CTLFLAG_RD, &sc->interrupts_tx,
8628 0, "Number of TX interrupts");
8629 SYSCTL_ADD_PROC(ctx, children, OID_AUTO,
8630 "nvram_dump", CTLTYPE_OPAQUE | CTLFLAG_RD,
8631 (void *)sc, 0,
8632 sysctl_nvram_dump, "S", "");
8633#ifdef BCE_NVRAM_WRITE_SUPPORT
8634 SYSCTL_ADD_PROC(ctx, children, OID_AUTO,
8635 "nvram_write", CTLTYPE_OPAQUE | CTLFLAG_WR,
8636 (void *)sc, 0,
8637 sysctl_nvram_write, "S", "");
8638#endif
8639#endif
8640
8641 SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
8642 "stat_IfHcInOctets",
8643 CTLFLAG_RD, &sc->stat_IfHCInOctets,
8644 "Bytes received");
8645
8646 SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
8647 "stat_IfHCInBadOctets",
8648 CTLFLAG_RD, &sc->stat_IfHCInBadOctets,
8649 "Bad bytes received");
8650
8651 SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
8652 "stat_IfHCOutOctets",
8653 CTLFLAG_RD, &sc->stat_IfHCOutOctets,
8654 "Bytes sent");
8655
8656 SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
8657 "stat_IfHCOutBadOctets",
8658 CTLFLAG_RD, &sc->stat_IfHCOutBadOctets,
8659 "Bad bytes sent");
8660
8661 SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
8662 "stat_IfHCInUcastPkts",
8663 CTLFLAG_RD, &sc->stat_IfHCInUcastPkts,
8664 "Unicast packets received");
8665
8666 SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
8667 "stat_IfHCInMulticastPkts",
8668 CTLFLAG_RD, &sc->stat_IfHCInMulticastPkts,
8669 "Multicast packets received");
8670
8671 SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
8672 "stat_IfHCInBroadcastPkts",
8673 CTLFLAG_RD, &sc->stat_IfHCInBroadcastPkts,
8674 "Broadcast packets received");
8675
8676 SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
8677 "stat_IfHCOutUcastPkts",
8678 CTLFLAG_RD, &sc->stat_IfHCOutUcastPkts,
8679 "Unicast packets sent");
8680
8681 SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
8682 "stat_IfHCOutMulticastPkts",
8683 CTLFLAG_RD, &sc->stat_IfHCOutMulticastPkts,
8684 "Multicast packets sent");
8685
8686 SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
8687 "stat_IfHCOutBroadcastPkts",
8688 CTLFLAG_RD, &sc->stat_IfHCOutBroadcastPkts,
8689 "Broadcast packets sent");
8690
8691 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
8692 "stat_emac_tx_stat_dot3statsinternalmactransmiterrors",
8693 CTLFLAG_RD, &sc->stat_emac_tx_stat_dot3statsinternalmactransmiterrors,
8694 0, "Internal MAC transmit errors");
8695
8696 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
8697 "stat_Dot3StatsCarrierSenseErrors",
8698 CTLFLAG_RD, &sc->stat_Dot3StatsCarrierSenseErrors,
8699 0, "Carrier sense errors");
8700
8701 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
8702 "stat_Dot3StatsFCSErrors",
8703 CTLFLAG_RD, &sc->stat_Dot3StatsFCSErrors,
8704 0, "Frame check sequence errors");
8705
8706 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
8707 "stat_Dot3StatsAlignmentErrors",
8708 CTLFLAG_RD, &sc->stat_Dot3StatsAlignmentErrors,
8709 0, "Alignment errors");
8710
8711 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
8712 "stat_Dot3StatsSingleCollisionFrames",
8713 CTLFLAG_RD, &sc->stat_Dot3StatsSingleCollisionFrames,
8714 0, "Single Collision Frames");
8715
8716 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
8717 "stat_Dot3StatsMultipleCollisionFrames",
8718 CTLFLAG_RD, &sc->stat_Dot3StatsMultipleCollisionFrames,
8719 0, "Multiple Collision Frames");
8720
8721 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
8722 "stat_Dot3StatsDeferredTransmissions",
8723 CTLFLAG_RD, &sc->stat_Dot3StatsDeferredTransmissions,
8724 0, "Deferred Transmissions");
8725
8726 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
8727 "stat_Dot3StatsExcessiveCollisions",
8728 CTLFLAG_RD, &sc->stat_Dot3StatsExcessiveCollisions,
8729 0, "Excessive Collisions");
8730
8731 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
8732 "stat_Dot3StatsLateCollisions",
8733 CTLFLAG_RD, &sc->stat_Dot3StatsLateCollisions,
8734 0, "Late Collisions");
8735
8736 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
8737 "stat_EtherStatsCollisions",
8738 CTLFLAG_RD, &sc->stat_EtherStatsCollisions,
8739 0, "Collisions");
8740
8741 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
8742 "stat_EtherStatsFragments",
8743 CTLFLAG_RD, &sc->stat_EtherStatsFragments,
8744 0, "Fragments");
8745
8746 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
8747 "stat_EtherStatsJabbers",
8748 CTLFLAG_RD, &sc->stat_EtherStatsJabbers,
8749 0, "Jabbers");
8750
8751 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
8752 "stat_EtherStatsUndersizePkts",
8753 CTLFLAG_RD, &sc->stat_EtherStatsUndersizePkts,
8754 0, "Undersize packets");
8755
8756 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
8757 "stat_EtherStatsOversizePkts",
8758 CTLFLAG_RD, &sc->stat_EtherStatsOversizePkts,
8759 0, "stat_EtherStatsOversizePkts");
8760
8761 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
8762 "stat_EtherStatsPktsRx64Octets",
8763 CTLFLAG_RD, &sc->stat_EtherStatsPktsRx64Octets,
8764 0, "Bytes received in 64 byte packets");
8765
8766 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
8767 "stat_EtherStatsPktsRx65Octetsto127Octets",
8768 CTLFLAG_RD, &sc->stat_EtherStatsPktsRx65Octetsto127Octets,
8769 0, "Bytes received in 65 to 127 byte packets");
8770
8771 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
8772 "stat_EtherStatsPktsRx128Octetsto255Octets",
8773 CTLFLAG_RD, &sc->stat_EtherStatsPktsRx128Octetsto255Octets,
8774 0, "Bytes received in 128 to 255 byte packets");
8775
8776 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
8777 "stat_EtherStatsPktsRx256Octetsto511Octets",
8778 CTLFLAG_RD, &sc->stat_EtherStatsPktsRx256Octetsto511Octets,
8779 0, "Bytes received in 256 to 511 byte packets");
8780
8781 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
8782 "stat_EtherStatsPktsRx512Octetsto1023Octets",
8783 CTLFLAG_RD, &sc->stat_EtherStatsPktsRx512Octetsto1023Octets,
8784 0, "Bytes received in 512 to 1023 byte packets");
8785
8786 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
8787 "stat_EtherStatsPktsRx1024Octetsto1522Octets",
8788 CTLFLAG_RD, &sc->stat_EtherStatsPktsRx1024Octetsto1522Octets,
8789 0, "Bytes received in 1024 t0 1522 byte packets");
8790
8791 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
8792 "stat_EtherStatsPktsRx1523Octetsto9022Octets",
8793 CTLFLAG_RD, &sc->stat_EtherStatsPktsRx1523Octetsto9022Octets,
8794 0, "Bytes received in 1523 to 9022 byte packets");
8795
8796 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
8797 "stat_EtherStatsPktsTx64Octets",
8798 CTLFLAG_RD, &sc->stat_EtherStatsPktsTx64Octets,
8799 0, "Bytes sent in 64 byte packets");
8800
8801 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
8802 "stat_EtherStatsPktsTx65Octetsto127Octets",
8803 CTLFLAG_RD, &sc->stat_EtherStatsPktsTx65Octetsto127Octets,
8804 0, "Bytes sent in 65 to 127 byte packets");
8805
8806 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
8807 "stat_EtherStatsPktsTx128Octetsto255Octets",
8808 CTLFLAG_RD, &sc->stat_EtherStatsPktsTx128Octetsto255Octets,
8809 0, "Bytes sent in 128 to 255 byte packets");
8810
8811 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
8812 "stat_EtherStatsPktsTx256Octetsto511Octets",
8813 CTLFLAG_RD, &sc->stat_EtherStatsPktsTx256Octetsto511Octets,
8814 0, "Bytes sent in 256 to 511 byte packets");
8815
8816 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
8817 "stat_EtherStatsPktsTx512Octetsto1023Octets",
8818 CTLFLAG_RD, &sc->stat_EtherStatsPktsTx512Octetsto1023Octets,
8819 0, "Bytes sent in 512 to 1023 byte packets");
8820
8821 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
8822 "stat_EtherStatsPktsTx1024Octetsto1522Octets",
8823 CTLFLAG_RD, &sc->stat_EtherStatsPktsTx1024Octetsto1522Octets,
8824 0, "Bytes sent in 1024 to 1522 byte packets");
8825
8826 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
8827 "stat_EtherStatsPktsTx1523Octetsto9022Octets",
8828 CTLFLAG_RD, &sc->stat_EtherStatsPktsTx1523Octetsto9022Octets,
8829 0, "Bytes sent in 1523 to 9022 byte packets");
8830
8831 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
8832 "stat_XonPauseFramesReceived",
8833 CTLFLAG_RD, &sc->stat_XonPauseFramesReceived,
8834 0, "XON pause frames receved");
8835
8836 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
8837 "stat_XoffPauseFramesReceived",
8838 CTLFLAG_RD, &sc->stat_XoffPauseFramesReceived,
8839 0, "XOFF pause frames received");
8840
8841 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
8842 "stat_OutXonSent",
8843 CTLFLAG_RD, &sc->stat_OutXonSent,
8844 0, "XON pause frames sent");
8845
8846 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
8847 "stat_OutXoffSent",
8848 CTLFLAG_RD, &sc->stat_OutXoffSent,
8849 0, "XOFF pause frames sent");
8850
8851 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
8852 "stat_FlowControlDone",
8853 CTLFLAG_RD, &sc->stat_FlowControlDone,
8854 0, "Flow control done");
8855
8856 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
8857 "stat_MacControlFramesReceived",
8858 CTLFLAG_RD, &sc->stat_MacControlFramesReceived,
8859 0, "MAC control frames received");
8860
8861 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
8862 "stat_XoffStateEntered",
8863 CTLFLAG_RD, &sc->stat_XoffStateEntered,
8864 0, "XOFF state entered");
8865
8866 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
8867 "stat_IfInFramesL2FilterDiscards",
8868 CTLFLAG_RD, &sc->stat_IfInFramesL2FilterDiscards,
8869 0, "Received L2 packets discarded");
8870
8871 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
8872 "stat_IfInRuleCheckerDiscards",
8873 CTLFLAG_RD, &sc->stat_IfInRuleCheckerDiscards,
8874 0, "Received packets discarded by rule");
8875
8876 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
8877 "stat_IfInFTQDiscards",
8878 CTLFLAG_RD, &sc->stat_IfInFTQDiscards,
8879 0, "Received packet FTQ discards");
8880
8881 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
8882 "stat_IfInMBUFDiscards",
8883 CTLFLAG_RD, &sc->stat_IfInMBUFDiscards,
8884 0, "Received packets discarded due to lack "
8885 "of controller buffer memory");
8886
8887 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
8888 "stat_IfInRuleCheckerP4Hit",
8889 CTLFLAG_RD, &sc->stat_IfInRuleCheckerP4Hit,
8890 0, "Received packets rule checker hits");
8891
8892 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
8893 "stat_CatchupInRuleCheckerDiscards",
8894 CTLFLAG_RD, &sc->stat_CatchupInRuleCheckerDiscards,
8895 0, "Received packets discarded in Catchup path");
8896
8897 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
8898 "stat_CatchupInFTQDiscards",
8899 CTLFLAG_RD, &sc->stat_CatchupInFTQDiscards,
8900 0, "Received packets discarded in FTQ in Catchup path");
8901
8902 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
8903 "stat_CatchupInMBUFDiscards",
8904 CTLFLAG_RD, &sc->stat_CatchupInMBUFDiscards,
8905 0, "Received packets discarded in controller "
8906 "buffer memory in Catchup path");
8907
8908 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
8909 "stat_CatchupInRuleCheckerP4Hit",
8910 CTLFLAG_RD, &sc->stat_CatchupInRuleCheckerP4Hit,
8911 0, "Received packets rule checker hits in Catchup path");
8912
8913 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
8914 "com_no_buffers",
8915 CTLFLAG_RD, &sc->com_no_buffers,
8916 0, "Valid packets received but no RX buffers available");
8917
8918#ifdef BCE_DEBUG
8919 SYSCTL_ADD_PROC(ctx, children, OID_AUTO,
8920 "driver_state", CTLTYPE_INT | CTLFLAG_RW,
8921 (void *)sc, 0,
8922 bce_sysctl_driver_state, "I", "Drive state information");
8923
8924 SYSCTL_ADD_PROC(ctx, children, OID_AUTO,
8925 "hw_state", CTLTYPE_INT | CTLFLAG_RW,
8926 (void *)sc, 0,
8927 bce_sysctl_hw_state, "I", "Hardware state information");
8928
8929 SYSCTL_ADD_PROC(ctx, children, OID_AUTO,
8930 "status_block", CTLTYPE_INT | CTLFLAG_RW,
8931 (void *)sc, 0,
8932 bce_sysctl_status_block, "I", "Dump status block");
8933
8934 SYSCTL_ADD_PROC(ctx, children, OID_AUTO,
8935 "stats_block", CTLTYPE_INT | CTLFLAG_RW,
8936 (void *)sc, 0,
8937 bce_sysctl_stats_block, "I", "Dump statistics block");
8938
8939 SYSCTL_ADD_PROC(ctx, children, OID_AUTO,
8940 "stats_clear", CTLTYPE_INT | CTLFLAG_RW,
8941 (void *)sc, 0,
8942 bce_sysctl_stats_clear, "I", "Clear statistics block");
8943
8944 SYSCTL_ADD_PROC(ctx, children, OID_AUTO,
8945 "bc_state", CTLTYPE_INT | CTLFLAG_RW,
8946 (void *)sc, 0,
8947 bce_sysctl_bc_state, "I", "Bootcode state information");
8948
8949 SYSCTL_ADD_PROC(ctx, children, OID_AUTO,
8950 "dump_rx_bd_chain", CTLTYPE_INT | CTLFLAG_RW,
8951 (void *)sc, 0,
8952 bce_sysctl_dump_rx_bd_chain, "I", "Dump RX BD chain");
8953
8954 SYSCTL_ADD_PROC(ctx, children, OID_AUTO,
8955 "dump_rx_mbuf_chain", CTLTYPE_INT | CTLFLAG_RW,
8956 (void *)sc, 0,
8957 bce_sysctl_dump_rx_mbuf_chain, "I", "Dump RX MBUF chain");
8958
8959 SYSCTL_ADD_PROC(ctx, children, OID_AUTO,
8960 "dump_tx_chain", CTLTYPE_INT | CTLFLAG_RW,
8961 (void *)sc, 0,
8962 bce_sysctl_dump_tx_chain, "I", "Dump tx_bd chain");
8963
8964#ifdef BCE_JUMBO_HDRSPLIT
8965 SYSCTL_ADD_PROC(ctx, children, OID_AUTO,
8966 "dump_pg_chain", CTLTYPE_INT | CTLFLAG_RW,
8967 (void *)sc, 0,
8968 bce_sysctl_dump_pg_chain, "I", "Dump page chain");
8969#endif
8970 SYSCTL_ADD_PROC(ctx, children, OID_AUTO,
8971 "dump_ctx", CTLTYPE_INT | CTLFLAG_RW,
8972 (void *)sc, 0,
8973 bce_sysctl_dump_ctx, "I", "Dump context memory");
8974
8975 SYSCTL_ADD_PROC(ctx, children, OID_AUTO,
8976 "breakpoint", CTLTYPE_INT | CTLFLAG_RW,
8977 (void *)sc, 0,
8978 bce_sysctl_breakpoint, "I", "Driver breakpoint");
8979
8980 SYSCTL_ADD_PROC(ctx, children, OID_AUTO,
8981 "reg_read", CTLTYPE_INT | CTLFLAG_RW,
8982 (void *)sc, 0,
8983 bce_sysctl_reg_read, "I", "Register read");
8984
8985 SYSCTL_ADD_PROC(ctx, children, OID_AUTO,
8986 "nvram_read", CTLTYPE_INT | CTLFLAG_RW,
8987 (void *)sc, 0,
8988 bce_sysctl_nvram_read, "I", "NVRAM read");
8989
8990 SYSCTL_ADD_PROC(ctx, children, OID_AUTO,
8991 "phy_read", CTLTYPE_INT | CTLFLAG_RW,
8992 (void *)sc, 0,
8993 bce_sysctl_phy_read, "I", "PHY register read");
8994
8995#endif
8996
8997 DBEXIT(BCE_VERBOSE_MISC);
8998}
8999
9000
9001/****************************************************************************/
9002/* BCE Debug Routines */
9003/****************************************************************************/
9004#ifdef BCE_DEBUG
9005
9006/****************************************************************************/
9007/* Freezes the controller to allow for a cohesive state dump. */
9008/* */
9009/* Returns: */
9010/* Nothing. */
9011/****************************************************************************/
9012static __attribute__ ((noinline)) void
9013bce_freeze_controller(struct bce_softc *sc)
9014{
9015 u32 val;
9016 val = REG_RD(sc, BCE_MISC_COMMAND);
9017 val |= BCE_MISC_COMMAND_DISABLE_ALL;
9018 REG_WR(sc, BCE_MISC_COMMAND, val);
9019}
9020
9021
9022/****************************************************************************/
9023/* Unfreezes the controller after a freeze operation. This may not always */
9024/* work and the controller will require a reset! */
9025/* */
9026/* Returns: */
9027/* Nothing. */
9028/****************************************************************************/
9029static __attribute__ ((noinline)) void
9030bce_unfreeze_controller(struct bce_softc *sc)
9031{
9032 u32 val;
9033 val = REG_RD(sc, BCE_MISC_COMMAND);
9034 val |= BCE_MISC_COMMAND_ENABLE_ALL;
9035 REG_WR(sc, BCE_MISC_COMMAND, val);
9036}
9037
9038
9039/****************************************************************************/
9040/* Prints out Ethernet frame information from an mbuf. */
9041/* */
9042/* Partially decode an Ethernet frame to look at some important headers. */
9043/* */
9044/* Returns: */
9045/* Nothing. */
9046/****************************************************************************/
9047static __attribute__ ((noinline)) void
9048bce_dump_enet(struct bce_softc *sc, struct mbuf *m)
9049{
9050 struct ether_vlan_header *eh;
9051 u16 etype;
9052 int ehlen;
9053 struct ip *ip;
9054 struct tcphdr *th;
9055 struct udphdr *uh;
9056 struct arphdr *ah;
9057
9058 BCE_PRINTF(
9059 "-----------------------------"
9060 " Frame Decode "
9061 "-----------------------------\n");
9062
9063 eh = mtod(m, struct ether_vlan_header *);
9064
9065 /* Handle VLAN encapsulation if present. */
9066 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
9067 etype = ntohs(eh->evl_proto);
9068 ehlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
9069 } else {
9070 etype = ntohs(eh->evl_encap_proto);
9071 ehlen = ETHER_HDR_LEN;
9072 }
9073
9074 /* ToDo: Add VLAN output. */
9075 BCE_PRINTF("enet: dest = %6D, src = %6D, type = 0x%04X, hlen = %d\n",
9076 eh->evl_dhost, ":", eh->evl_shost, ":", etype, ehlen);
9077
9078 switch (etype) {
9079 case ETHERTYPE_IP:
9080 ip = (struct ip *)(m->m_data + ehlen);
9081 BCE_PRINTF("--ip: dest = 0x%08X , src = 0x%08X, "
9082 "len = %d bytes, protocol = 0x%02X, xsum = 0x%04X\n",
9083 ntohl(ip->ip_dst.s_addr), ntohl(ip->ip_src.s_addr),
9084 ntohs(ip->ip_len), ip->ip_p, ntohs(ip->ip_sum));
9085
9086 switch (ip->ip_p) {
9087 case IPPROTO_TCP:
9088 th = (struct tcphdr *)((caddr_t)ip + (ip->ip_hl << 2));
9089 BCE_PRINTF("-tcp: dest = %d, src = %d, hlen = "
9090 "%d bytes, flags = 0x%b, csum = 0x%04X\n",
9091 ntohs(th->th_dport), ntohs(th->th_sport),
9092 (th->th_off << 2), th->th_flags,
9093 "\20\10CWR\07ECE\06URG\05ACK\04PSH\03RST"
9094 "\02SYN\01FIN", ntohs(th->th_sum));
9095 break;
9096 case IPPROTO_UDP:
9097 uh = (struct udphdr *)((caddr_t)ip + (ip->ip_hl << 2));
9098 BCE_PRINTF("-udp: dest = %d, src = %d, len = %d "
9099 "bytes, csum = 0x%04X\n", ntohs(uh->uh_dport),
9100 ntohs(uh->uh_sport), ntohs(uh->uh_ulen),
9101 ntohs(uh->uh_sum));
9102 break;
9103 case IPPROTO_ICMP:
9104 BCE_PRINTF("icmp:\n");
9105 break;
9106 default:
9107 BCE_PRINTF("----: Other IP protocol.\n");
9108 }
9109 break;
9110 case ETHERTYPE_IPV6:
9111 BCE_PRINTF("ipv6: No decode supported.\n");
9112 break;
9113 case ETHERTYPE_ARP:
9114 BCE_PRINTF("-arp: ");
9115 ah = (struct arphdr *) (m->m_data + ehlen);
9116 switch (ntohs(ah->ar_op)) {
9117 case ARPOP_REVREQUEST:
9118 printf("reverse ARP request\n");
9119 break;
9120 case ARPOP_REVREPLY:
9121 printf("reverse ARP reply\n");
9122 break;
9123 case ARPOP_REQUEST:
9124 printf("ARP request\n");
9125 break;
9126 case ARPOP_REPLY:
9127 printf("ARP reply\n");
9128 break;
9129 default:
9130 printf("other ARP operation\n");
9131 }
9132 break;
9133 default:
9134 BCE_PRINTF("----: Other protocol.\n");
9135 }
9136
9137 BCE_PRINTF(
9138 "-----------------------------"
9139 "--------------"
9140 "-----------------------------\n");
9141}
9142
9143
9144/****************************************************************************/
9145/* Prints out information about an mbuf. */
9146/* */
9147/* Returns: */
9148/* Nothing. */
9149/****************************************************************************/
9150static __attribute__ ((noinline)) void
9151bce_dump_mbuf(struct bce_softc *sc, struct mbuf *m)
9152{
9153 struct mbuf *mp = m;
9154
9155 if (m == NULL) {
9156 BCE_PRINTF("mbuf: null pointer\n");
9157 return;
9158 }
9159
9160 while (mp) {
9161 BCE_PRINTF("mbuf: %p, m_len = %d, m_flags = 0x%b, "
9162 "m_data = %p\n", mp, mp->m_len, mp->m_flags,
9163 "\20\1M_EXT\2M_PKTHDR\3M_EOR\4M_RDONLY", mp->m_data);
9164
9165 if (mp->m_flags & M_PKTHDR) {
9166 BCE_PRINTF("- m_pkthdr: len = %d, flags = 0x%b, "
9167 "csum_flags = %b\n", mp->m_pkthdr.len,
9168 mp->m_flags, "\20\12M_BCAST\13M_MCAST\14M_FRAG"
9169 "\15M_FIRSTFRAG\16M_LASTFRAG\21M_VLANTAG"
9170 "\22M_PROMISC\23M_NOFREE",
9171 mp->m_pkthdr.csum_flags,
9172 "\20\1CSUM_IP\2CSUM_TCP\3CSUM_UDP\4CSUM_IP_FRAGS"
9173 "\5CSUM_FRAGMENT\6CSUM_TSO\11CSUM_IP_CHECKED"
9174 "\12CSUM_IP_VALID\13CSUM_DATA_VALID"
9175 "\14CSUM_PSEUDO_HDR");
9176 }
9177
9178 if (mp->m_flags & M_EXT) {
9179 BCE_PRINTF("- m_ext: %p, ext_size = %d, type = ",
9180 mp->m_ext.ext_buf, mp->m_ext.ext_size);
9181 switch (mp->m_ext.ext_type) {
9182 case EXT_CLUSTER:
9183 printf("EXT_CLUSTER\n"); break;
9184 case EXT_SFBUF:
9185 printf("EXT_SFBUF\n"); break;
9186 case EXT_JUMBO9:
9187 printf("EXT_JUMBO9\n"); break;
9188 case EXT_JUMBO16:
9189 printf("EXT_JUMBO16\n"); break;
9190 case EXT_PACKET:
9191 printf("EXT_PACKET\n"); break;
9192 case EXT_MBUF:
9193 printf("EXT_MBUF\n"); break;
9194 case EXT_NET_DRV:
9195 printf("EXT_NET_DRV\n"); break;
9196 case EXT_MOD_TYPE:
9197 printf("EXT_MDD_TYPE\n"); break;
9198 case EXT_DISPOSABLE:
9199 printf("EXT_DISPOSABLE\n"); break;
9200 case EXT_EXTREF:
9201 printf("EXT_EXTREF\n"); break;
9202 default:
9203 printf("UNKNOWN\n");
9204 }
9205 }
9206
9207 mp = mp->m_next;
9208 }
9209}
9210
9211
9212/****************************************************************************/
9213/* Prints out the mbufs in the TX mbuf chain. */
9214/* */
9215/* Returns: */
9216/* Nothing. */
9217/****************************************************************************/
9218static __attribute__ ((noinline)) void
9219bce_dump_tx_mbuf_chain(struct bce_softc *sc, u16 chain_prod, int count)
9220{
9221 struct mbuf *m;
9222
9223 BCE_PRINTF(
9224 "----------------------------"
9225 " tx mbuf data "
9226 "----------------------------\n");
9227
9228 for (int i = 0; i < count; i++) {
9229 m = sc->tx_mbuf_ptr[chain_prod];
9230 BCE_PRINTF("txmbuf[0x%04X]\n", chain_prod);
9231 bce_dump_mbuf(sc, m);
9232 chain_prod = TX_CHAIN_IDX(NEXT_TX_BD(chain_prod));
9233 }
9234
9235 BCE_PRINTF(
9236 "----------------------------"
9237 "----------------"
9238 "----------------------------\n");
9239}
9240
9241
9242/****************************************************************************/
9243/* Prints out the mbufs in the RX mbuf chain. */
9244/* */
9245/* Returns: */
9246/* Nothing. */
9247/****************************************************************************/
9248static __attribute__ ((noinline)) void
9249bce_dump_rx_mbuf_chain(struct bce_softc *sc, u16 chain_prod, int count)
9250{
9251 struct mbuf *m;
9252
9253 BCE_PRINTF(
9254 "----------------------------"
9255 " rx mbuf data "
9256 "----------------------------\n");
9257
9258 for (int i = 0; i < count; i++) {
9259 m = sc->rx_mbuf_ptr[chain_prod];
9260 BCE_PRINTF("rxmbuf[0x%04X]\n", chain_prod);
9261 bce_dump_mbuf(sc, m);
9262 chain_prod = RX_CHAIN_IDX(NEXT_RX_BD(chain_prod));
9263 }
9264
9265
9266 BCE_PRINTF(
9267 "----------------------------"
9268 "----------------"
9269 "----------------------------\n");
9270}
9271
9272
9273#ifdef BCE_JUMBO_HDRSPLIT
9274/****************************************************************************/
9275/* Prints out the mbufs in the mbuf page chain. */
9276/* */
9277/* Returns: */
9278/* Nothing. */
9279/****************************************************************************/
9280static __attribute__ ((noinline)) void
9281bce_dump_pg_mbuf_chain(struct bce_softc *sc, u16 chain_prod, int count)
9282{
9283 struct mbuf *m;
9284
9285 BCE_PRINTF(
9286 "----------------------------"
9287 " pg mbuf data "
9288 "----------------------------\n");
9289
9290 for (int i = 0; i < count; i++) {
9291 m = sc->pg_mbuf_ptr[chain_prod];
9292 BCE_PRINTF("pgmbuf[0x%04X]\n", chain_prod);
9293 bce_dump_mbuf(sc, m);
9294 chain_prod = PG_CHAIN_IDX(NEXT_PG_BD(chain_prod));
9295 }
9296
9297
9298 BCE_PRINTF(
9299 "----------------------------"
9300 "----------------"
9301 "----------------------------\n");
9302}
9303#endif
9304
9305
9306/****************************************************************************/
9307/* Prints out a tx_bd structure. */
9308/* */
9309/* Returns: */
9310/* Nothing. */
9311/****************************************************************************/
9312static __attribute__ ((noinline)) void
9313bce_dump_txbd(struct bce_softc *sc, int idx, struct tx_bd *txbd)
9314{
9315 int i = 0;
9316
9317 if (idx > MAX_TX_BD)
9318 /* Index out of range. */
9319 BCE_PRINTF("tx_bd[0x%04X]: Invalid tx_bd index!\n", idx);
9320 else if ((idx & USABLE_TX_BD_PER_PAGE) == USABLE_TX_BD_PER_PAGE)
9321 /* TX Chain page pointer. */
9322 BCE_PRINTF("tx_bd[0x%04X]: haddr = 0x%08X:%08X, chain page "
9323 "pointer\n", idx, txbd->tx_bd_haddr_hi,
9324 txbd->tx_bd_haddr_lo);
9325 else {
9326 /* Normal tx_bd entry. */
9327 BCE_PRINTF("tx_bd[0x%04X]: haddr = 0x%08X:%08X, "
9328 "mss_nbytes = 0x%08X, vlan tag = 0x%04X, flags = "
9329 "0x%04X (", idx, txbd->tx_bd_haddr_hi,
9330 txbd->tx_bd_haddr_lo, txbd->tx_bd_mss_nbytes,
9331 txbd->tx_bd_vlan_tag, txbd->tx_bd_flags);
9332
9333 if (txbd->tx_bd_flags & TX_BD_FLAGS_CONN_FAULT) {
9334 if (i>0)
9335 printf("|");
9336 printf("CONN_FAULT");
9337 i++;
9338 }
9339
9340 if (txbd->tx_bd_flags & TX_BD_FLAGS_TCP_UDP_CKSUM) {
9341 if (i>0)
9342 printf("|");
9343 printf("TCP_UDP_CKSUM");
9344 i++;
9345 }
9346
9347 if (txbd->tx_bd_flags & TX_BD_FLAGS_IP_CKSUM) {
9348 if (i>0)
9349 printf("|");
9350 printf("IP_CKSUM");
9351 i++;
9352 }
9353
9354 if (txbd->tx_bd_flags & TX_BD_FLAGS_VLAN_TAG) {
9355 if (i>0)
9356 printf("|");
9357 printf("VLAN");
9358 i++;
9359 }
9360
9361 if (txbd->tx_bd_flags & TX_BD_FLAGS_COAL_NOW) {
9362 if (i>0)
9363 printf("|");
9364 printf("COAL_NOW");
9365 i++;
9366 }
9367
9368 if (txbd->tx_bd_flags & TX_BD_FLAGS_DONT_GEN_CRC) {
9369 if (i>0)
9370 printf("|");
9371 printf("DONT_GEN_CRC");
9372 i++;
9373 }
9374
9375 if (txbd->tx_bd_flags & TX_BD_FLAGS_START) {
9376 if (i>0)
9377 printf("|");
9378 printf("START");
9379 i++;
9380 }
9381
9382 if (txbd->tx_bd_flags & TX_BD_FLAGS_END) {
9383 if (i>0)
9384 printf("|");
9385 printf("END");
9386 i++;
9387 }
9388
9389 if (txbd->tx_bd_flags & TX_BD_FLAGS_SW_LSO) {
9390 if (i>0)
9391 printf("|");
9392 printf("LSO");
9393 i++;
9394 }
9395
9396 if (txbd->tx_bd_flags & TX_BD_FLAGS_SW_OPTION_WORD) {
9397 if (i>0)
9398 printf("|");
9399 printf("SW_OPTION=%d", ((txbd->tx_bd_flags &
9400 TX_BD_FLAGS_SW_OPTION_WORD) >> 8)); i++;
9401 }
9402
9403 if (txbd->tx_bd_flags & TX_BD_FLAGS_SW_FLAGS) {
9404 if (i>0)
9405 printf("|");
9406 printf("SW_FLAGS");
9407 i++;
9408 }
9409
9410 if (txbd->tx_bd_flags & TX_BD_FLAGS_SW_SNAP) {
9411 if (i>0)
9412 printf("|");
9413 printf("SNAP)");
9414 } else {
9415 printf(")\n");
9416 }
9417 }
9418}
9419
9420
9421/****************************************************************************/
9422/* Prints out a rx_bd structure. */
9423/* */
9424/* Returns: */
9425/* Nothing. */
9426/****************************************************************************/
9427static __attribute__ ((noinline)) void
9428bce_dump_rxbd(struct bce_softc *sc, int idx, struct rx_bd *rxbd)
9429{
9430 if (idx > MAX_RX_BD)
9431 /* Index out of range. */
9432 BCE_PRINTF("rx_bd[0x%04X]: Invalid rx_bd index!\n", idx);
9433 else if ((idx & USABLE_RX_BD_PER_PAGE) == USABLE_RX_BD_PER_PAGE)
9434 /* RX Chain page pointer. */
9435 BCE_PRINTF("rx_bd[0x%04X]: haddr = 0x%08X:%08X, chain page "
9436 "pointer\n", idx, rxbd->rx_bd_haddr_hi,
9437 rxbd->rx_bd_haddr_lo);
9438 else
9439 /* Normal rx_bd entry. */
9440 BCE_PRINTF("rx_bd[0x%04X]: haddr = 0x%08X:%08X, nbytes = "
9441 "0x%08X, flags = 0x%08X\n", idx, rxbd->rx_bd_haddr_hi,
9442 rxbd->rx_bd_haddr_lo, rxbd->rx_bd_len,
9443 rxbd->rx_bd_flags);
9444}
9445
9446
9447#ifdef BCE_JUMBO_HDRSPLIT
9448/****************************************************************************/
9449/* Prints out a rx_bd structure in the page chain. */
9450/* */
9451/* Returns: */
9452/* Nothing. */
9453/****************************************************************************/
9454static __attribute__ ((noinline)) void
9455bce_dump_pgbd(struct bce_softc *sc, int idx, struct rx_bd *pgbd)
9456{
9457 if (idx > MAX_PG_BD)
9458 /* Index out of range. */
9459 BCE_PRINTF("pg_bd[0x%04X]: Invalid pg_bd index!\n", idx);
9460 else if ((idx & USABLE_PG_BD_PER_PAGE) == USABLE_PG_BD_PER_PAGE)
9461 /* Page Chain page pointer. */
9462 BCE_PRINTF("px_bd[0x%04X]: haddr = 0x%08X:%08X, chain page pointer\n",
9463 idx, pgbd->rx_bd_haddr_hi, pgbd->rx_bd_haddr_lo);
9464 else
9465 /* Normal rx_bd entry. */
9466 BCE_PRINTF("pg_bd[0x%04X]: haddr = 0x%08X:%08X, nbytes = 0x%08X, "
9467 "flags = 0x%08X\n", idx,
9468 pgbd->rx_bd_haddr_hi, pgbd->rx_bd_haddr_lo,
9469 pgbd->rx_bd_len, pgbd->rx_bd_flags);
9470}
9471#endif
9472
9473
9474/****************************************************************************/
9475/* Prints out a l2_fhdr structure. */
9476/* */
9477/* Returns: */
9478/* Nothing. */
9479/****************************************************************************/
9480static __attribute__ ((noinline)) void
9481bce_dump_l2fhdr(struct bce_softc *sc, int idx, struct l2_fhdr *l2fhdr)
9482{
9483 BCE_PRINTF("l2_fhdr[0x%04X]: status = 0x%b, "
9484 "pkt_len = %d, vlan = 0x%04x, ip_xsum/hdr_len = 0x%04X, "
9485 "tcp_udp_xsum = 0x%04X\n", idx,
9486 l2fhdr->l2_fhdr_status, BCE_L2FHDR_PRINTFB,
9487 l2fhdr->l2_fhdr_pkt_len, l2fhdr->l2_fhdr_vlan_tag,
9488 l2fhdr->l2_fhdr_ip_xsum, l2fhdr->l2_fhdr_tcp_udp_xsum);
9489}
9490
9491
9492/****************************************************************************/
9493/* Prints out context memory info. (Only useful for CID 0 to 16.) */
9494/* */
9495/* Returns: */
9496/* Nothing. */
9497/****************************************************************************/
9498static __attribute__ ((noinline)) void
9499bce_dump_ctx(struct bce_softc *sc, u16 cid)
9500{
9501 if (cid > TX_CID) {
9502 BCE_PRINTF(" Unknown CID\n");
9503 return;
9504 }
9505
9506 BCE_PRINTF(
9507 "----------------------------"
9508 " CTX Data "
9509 "----------------------------\n");
9510
9511 BCE_PRINTF(" 0x%04X - (CID) Context ID\n", cid);
9512
9513 if (cid == RX_CID) {
9514 BCE_PRINTF(" 0x%08X - (L2CTX_RX_HOST_BDIDX) host rx "
9515 "producer index\n",
9516 CTX_RD(sc, GET_CID_ADDR(cid), BCE_L2CTX_RX_HOST_BDIDX));
9517 BCE_PRINTF(" 0x%08X - (L2CTX_RX_HOST_BSEQ) host "
9518 "byte sequence\n", CTX_RD(sc, GET_CID_ADDR(cid),
9519 BCE_L2CTX_RX_HOST_BSEQ));
9520 BCE_PRINTF(" 0x%08X - (L2CTX_RX_NX_BSEQ) h/w byte sequence\n",
9521 CTX_RD(sc, GET_CID_ADDR(cid), BCE_L2CTX_RX_NX_BSEQ));
9522 BCE_PRINTF(" 0x%08X - (L2CTX_RX_NX_BDHADDR_HI) h/w buffer "
9523 "descriptor address\n",
9524 CTX_RD(sc, GET_CID_ADDR(cid), BCE_L2CTX_RX_NX_BDHADDR_HI));
9525 BCE_PRINTF(" 0x%08X - (L2CTX_RX_NX_BDHADDR_LO) h/w buffer "
9526 "descriptor address\n",
9527 CTX_RD(sc, GET_CID_ADDR(cid), BCE_L2CTX_RX_NX_BDHADDR_LO));
9528 BCE_PRINTF(" 0x%08X - (L2CTX_RX_NX_BDIDX) h/w rx consumer "
9529 "index\n", CTX_RD(sc, GET_CID_ADDR(cid),
9530 BCE_L2CTX_RX_NX_BDIDX));
9531 BCE_PRINTF(" 0x%08X - (L2CTX_RX_HOST_PG_BDIDX) host page "
9532 "producer index\n", CTX_RD(sc, GET_CID_ADDR(cid),
9533 BCE_L2CTX_RX_HOST_PG_BDIDX));
9534 BCE_PRINTF(" 0x%08X - (L2CTX_RX_PG_BUF_SIZE) host rx_bd/page "
9535 "buffer size\n", CTX_RD(sc, GET_CID_ADDR(cid),
9536 BCE_L2CTX_RX_PG_BUF_SIZE));
9537 BCE_PRINTF(" 0x%08X - (L2CTX_RX_NX_PG_BDHADDR_HI) h/w page "
9538 "chain address\n", CTX_RD(sc, GET_CID_ADDR(cid),
9539 BCE_L2CTX_RX_NX_PG_BDHADDR_HI));
9540 BCE_PRINTF(" 0x%08X - (L2CTX_RX_NX_PG_BDHADDR_LO) h/w page "
9541 "chain address\n", CTX_RD(sc, GET_CID_ADDR(cid),
9542 BCE_L2CTX_RX_NX_PG_BDHADDR_LO));
9543 BCE_PRINTF(" 0x%08X - (L2CTX_RX_NX_PG_BDIDX) h/w page "
9544 "consumer index\n", CTX_RD(sc, GET_CID_ADDR(cid),
9545 BCE_L2CTX_RX_NX_PG_BDIDX));
9546 } else if (cid == TX_CID) {
9547 if ((BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) ||
9548 (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716)) {
9549 BCE_PRINTF(" 0x%08X - (L2CTX_TX_TYPE_XI) ctx type\n",
9550 CTX_RD(sc, GET_CID_ADDR(cid),
9551 BCE_L2CTX_TX_TYPE_XI));
9552 BCE_PRINTF(" 0x%08X - (L2CTX_CMD_TX_TYPE_XI) ctx "
9553 "cmd\n", CTX_RD(sc, GET_CID_ADDR(cid),
9554 BCE_L2CTX_TX_CMD_TYPE_XI));
9555 BCE_PRINTF(" 0x%08X - (L2CTX_TX_TBDR_BDHADDR_HI_XI) "
9556 "h/w buffer descriptor address\n",
9557 CTX_RD(sc, GET_CID_ADDR(cid),
9558 BCE_L2CTX_TX_TBDR_BHADDR_HI_XI));
9559 BCE_PRINTF(" 0x%08X - (L2CTX_TX_TBDR_BHADDR_LO_XI) "
9560 "h/w buffer descriptor address\n",
9561 CTX_RD(sc, GET_CID_ADDR(cid),
9562 BCE_L2CTX_TX_TBDR_BHADDR_LO_XI));
9563 BCE_PRINTF(" 0x%08X - (L2CTX_TX_HOST_BIDX_XI) "
9564 "host producer index\n",
9565 CTX_RD(sc, GET_CID_ADDR(cid),
9566 BCE_L2CTX_TX_HOST_BIDX_XI));
9567 BCE_PRINTF(" 0x%08X - (L2CTX_TX_HOST_BSEQ_XI) "
9568 "host byte sequence\n",
9569 CTX_RD(sc, GET_CID_ADDR(cid),
9570 BCE_L2CTX_TX_HOST_BSEQ_XI));
9571 } else {
9572 BCE_PRINTF(" 0x%08X - (L2CTX_TX_TYPE) ctx type\n",
9573 CTX_RD(sc, GET_CID_ADDR(cid), BCE_L2CTX_TX_TYPE));
9574 BCE_PRINTF(" 0x%08X - (L2CTX_TX_CMD_TYPE) ctx cmd\n",
9575 CTX_RD(sc, GET_CID_ADDR(cid),
9576 BCE_L2CTX_TX_CMD_TYPE));
9577 BCE_PRINTF(" 0x%08X - (L2CTX_TX_TBDR_BDHADDR_HI) "
9578 "h/w buffer descriptor address\n",
9579 CTX_RD(sc, GET_CID_ADDR(cid),
9580 BCE_L2CTX_TX_TBDR_BHADDR_HI));
9581 BCE_PRINTF(" 0x%08X - (L2CTX_TX_TBDR_BHADDR_LO) "
9582 "h/w buffer descriptor address\n",
9583 CTX_RD(sc, GET_CID_ADDR(cid),
9584 BCE_L2CTX_TX_TBDR_BHADDR_LO));
9585 BCE_PRINTF(" 0x%08X - (L2CTX_TX_HOST_BIDX) host "
9586 "producer index\n", CTX_RD(sc, GET_CID_ADDR(cid),
9587 BCE_L2CTX_TX_HOST_BIDX));
9588 BCE_PRINTF(" 0x%08X - (L2CTX_TX_HOST_BSEQ) host byte "
9589 "sequence\n", CTX_RD(sc, GET_CID_ADDR(cid),
9590 BCE_L2CTX_TX_HOST_BSEQ));
9591 }
9592 }
9593
9594 BCE_PRINTF(
9595 "----------------------------"
9596 " Raw CTX "
9597 "----------------------------\n");
9598
9599 for (int i = 0x0; i < 0x300; i += 0x10) {
9600 BCE_PRINTF("0x%04X: 0x%08X 0x%08X 0x%08X 0x%08X\n", i,
9601 CTX_RD(sc, GET_CID_ADDR(cid), i),
9602 CTX_RD(sc, GET_CID_ADDR(cid), i + 0x4),
9603 CTX_RD(sc, GET_CID_ADDR(cid), i + 0x8),
9604 CTX_RD(sc, GET_CID_ADDR(cid), i + 0xc));
9605 }
9606
9607
9608 BCE_PRINTF(
9609 "----------------------------"
9610 "----------------"
9611 "----------------------------\n");
9612}
9613
9614
9615/****************************************************************************/
9616/* Prints out the FTQ data. */
9617/* */
9618/* Returns: */
9619/* Nothing. */
9620/****************************************************************************/
9621static __attribute__ ((noinline)) void
9622bce_dump_ftqs(struct bce_softc *sc)
9623{
9624 u32 cmd, ctl, cur_depth, max_depth, valid_cnt, val;
9625
9626 BCE_PRINTF(
9627 "----------------------------"
9628 " FTQ Data "
9629 "----------------------------\n");
9630
9631 BCE_PRINTF(" FTQ Command Control Depth_Now "
9632 "Max_Depth Valid_Cnt \n");
9633 BCE_PRINTF(" ------- ---------- ---------- ---------- "
9634 "---------- ----------\n");
9635
9636 /* Setup the generic statistic counters for the FTQ valid count. */
9637 val = (BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_RV2PPQ_VALID_CNT << 24) |
9638 (BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_RXPCQ_VALID_CNT << 16) |
9639 (BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_RXPQ_VALID_CNT << 8) |
9640 (BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_RLUPQ_VALID_CNT);
9641 REG_WR(sc, BCE_HC_STAT_GEN_SEL_0, val);
9642
9643 val = (BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_TSCHQ_VALID_CNT << 24) |
9644 (BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_RDMAQ_VALID_CNT << 16) |
9645 (BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_RV2PTQ_VALID_CNT << 8) |
9646 (BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_RV2PMQ_VALID_CNT);
9647 REG_WR(sc, BCE_HC_STAT_GEN_SEL_1, val);
9648
9649 val = (BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_TPATQ_VALID_CNT << 24) |
9650 (BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_TDMAQ_VALID_CNT << 16) |
9651 (BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_TXPQ_VALID_CNT << 8) |
9652 (BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_TBDRQ_VALID_CNT);
9653 REG_WR(sc, BCE_HC_STAT_GEN_SEL_2, val);
9654
9655 val = (BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_COMQ_VALID_CNT << 24) |
9656 (BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_COMTQ_VALID_CNT << 16) |
9657 (BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_COMXQ_VALID_CNT << 8) |
9658 (BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_TASQ_VALID_CNT);
9659 REG_WR(sc, BCE_HC_STAT_GEN_SEL_3, val);
9660
9661 /* Input queue to the Receive Lookup state machine */
9662 cmd = REG_RD(sc, BCE_RLUP_FTQ_CMD);
9663 ctl = REG_RD(sc, BCE_RLUP_FTQ_CTL);
9664 cur_depth = (ctl & BCE_RLUP_FTQ_CTL_CUR_DEPTH) >> 22;
9665 max_depth = (ctl & BCE_RLUP_FTQ_CTL_MAX_DEPTH) >> 12;
9666 valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT0);
9667 BCE_PRINTF(" RLUP 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n",
9668 cmd, ctl, cur_depth, max_depth, valid_cnt);
9669
9670 /* Input queue to the Receive Processor */
9671 cmd = REG_RD_IND(sc, BCE_RXP_FTQ_CMD);
9672 ctl = REG_RD_IND(sc, BCE_RXP_FTQ_CTL);
9673 cur_depth = (ctl & BCE_RXP_FTQ_CTL_CUR_DEPTH) >> 22;
9674 max_depth = (ctl & BCE_RXP_FTQ_CTL_MAX_DEPTH) >> 12;
9675 valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT1);
9676 BCE_PRINTF(" RXP 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n",
9677 cmd, ctl, cur_depth, max_depth, valid_cnt);
9678
9679 /* Input queue to the Recevie Processor */
9680 cmd = REG_RD_IND(sc, BCE_RXP_CFTQ_CMD);
9681 ctl = REG_RD_IND(sc, BCE_RXP_CFTQ_CTL);
9682 cur_depth = (ctl & BCE_RXP_CFTQ_CTL_CUR_DEPTH) >> 22;
9683 max_depth = (ctl & BCE_RXP_CFTQ_CTL_MAX_DEPTH) >> 12;
9684 valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT2);
9685 BCE_PRINTF(" RXPC 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n",
9686 cmd, ctl, cur_depth, max_depth, valid_cnt);
9687
9688 /* Input queue to the Receive Virtual to Physical state machine */
9689 cmd = REG_RD(sc, BCE_RV2P_PFTQ_CMD);
9690 ctl = REG_RD(sc, BCE_RV2P_PFTQ_CTL);
9691 cur_depth = (ctl & BCE_RV2P_PFTQ_CTL_CUR_DEPTH) >> 22;
9692 max_depth = (ctl & BCE_RV2P_PFTQ_CTL_MAX_DEPTH) >> 12;
9693 valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT3);
9694 BCE_PRINTF(" RV2PP 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n",
9695 cmd, ctl, cur_depth, max_depth, valid_cnt);
9696
9697 /* Input queue to the Recevie Virtual to Physical state machine */
9698 cmd = REG_RD(sc, BCE_RV2P_MFTQ_CMD);
9699 ctl = REG_RD(sc, BCE_RV2P_MFTQ_CTL);
9700 cur_depth = (ctl & BCE_RV2P_MFTQ_CTL_CUR_DEPTH) >> 22;
9701 max_depth = (ctl & BCE_RV2P_MFTQ_CTL_MAX_DEPTH) >> 12;
9702 valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT4);
9703 BCE_PRINTF(" RV2PM 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n",
9704 cmd, ctl, cur_depth, max_depth, valid_cnt);
9705
9706 /* Input queue to the Receive Virtual to Physical state machine */
9707 cmd = REG_RD(sc, BCE_RV2P_TFTQ_CMD);
9708 ctl = REG_RD(sc, BCE_RV2P_TFTQ_CTL);
9709 cur_depth = (ctl & BCE_RV2P_TFTQ_CTL_CUR_DEPTH) >> 22;
9710 max_depth = (ctl & BCE_RV2P_TFTQ_CTL_MAX_DEPTH) >> 12;
9711 valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT5);
9712 BCE_PRINTF(" RV2PT 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n",
9713 cmd, ctl, cur_depth, max_depth, valid_cnt);
9714
9715 /* Input queue to the Receive DMA state machine */
9716 cmd = REG_RD(sc, BCE_RDMA_FTQ_CMD);
9717 ctl = REG_RD(sc, BCE_RDMA_FTQ_CTL);
9718 cur_depth = (ctl & BCE_RDMA_FTQ_CTL_CUR_DEPTH) >> 22;
9719 max_depth = (ctl & BCE_RDMA_FTQ_CTL_MAX_DEPTH) >> 12;
9720 valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT6);
9721 BCE_PRINTF(" RDMA 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n",
9722 cmd, ctl, cur_depth, max_depth, valid_cnt);
9723
9724 /* Input queue to the Transmit Scheduler state machine */
9725 cmd = REG_RD(sc, BCE_TSCH_FTQ_CMD);
9726 ctl = REG_RD(sc, BCE_TSCH_FTQ_CTL);
9727 cur_depth = (ctl & BCE_TSCH_FTQ_CTL_CUR_DEPTH) >> 22;
9728 max_depth = (ctl & BCE_TSCH_FTQ_CTL_MAX_DEPTH) >> 12;
9729 valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT7);
9730 BCE_PRINTF(" TSCH 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n",
9731 cmd, ctl, cur_depth, max_depth, valid_cnt);
9732
9733 /* Input queue to the Transmit Buffer Descriptor state machine */
9734 cmd = REG_RD(sc, BCE_TBDR_FTQ_CMD);
9735 ctl = REG_RD(sc, BCE_TBDR_FTQ_CTL);
9736 cur_depth = (ctl & BCE_TBDR_FTQ_CTL_CUR_DEPTH) >> 22;
9737 max_depth = (ctl & BCE_TBDR_FTQ_CTL_MAX_DEPTH) >> 12;
9738 valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT8);
9739 BCE_PRINTF(" TBDR 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n",
9740 cmd, ctl, cur_depth, max_depth, valid_cnt);
9741
9742 /* Input queue to the Transmit Processor */
9743 cmd = REG_RD_IND(sc, BCE_TXP_FTQ_CMD);
9744 ctl = REG_RD_IND(sc, BCE_TXP_FTQ_CTL);
9745 cur_depth = (ctl & BCE_TXP_FTQ_CTL_CUR_DEPTH) >> 22;
9746 max_depth = (ctl & BCE_TXP_FTQ_CTL_MAX_DEPTH) >> 12;
9747 valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT9);
9748 BCE_PRINTF(" TXP 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n",
9749 cmd, ctl, cur_depth, max_depth, valid_cnt);
9750
9751 /* Input queue to the Transmit DMA state machine */
9752 cmd = REG_RD(sc, BCE_TDMA_FTQ_CMD);
9753 ctl = REG_RD(sc, BCE_TDMA_FTQ_CTL);
9754 cur_depth = (ctl & BCE_TDMA_FTQ_CTL_CUR_DEPTH) >> 22;
9755 max_depth = (ctl & BCE_TDMA_FTQ_CTL_MAX_DEPTH) >> 12;
9756 valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT10);
9757 BCE_PRINTF(" TDMA 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n",
9758 cmd, ctl, cur_depth, max_depth, valid_cnt);
9759
9760 /* Input queue to the Transmit Patch-Up Processor */
9761 cmd = REG_RD_IND(sc, BCE_TPAT_FTQ_CMD);
9762 ctl = REG_RD_IND(sc, BCE_TPAT_FTQ_CTL);
9763 cur_depth = (ctl & BCE_TPAT_FTQ_CTL_CUR_DEPTH) >> 22;
9764 max_depth = (ctl & BCE_TPAT_FTQ_CTL_MAX_DEPTH) >> 12;
9765 valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT11);
9766 BCE_PRINTF(" TPAT 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n",
9767 cmd, ctl, cur_depth, max_depth, valid_cnt);
9768
9769 /* Input queue to the Transmit Assembler state machine */
9770 cmd = REG_RD_IND(sc, BCE_TAS_FTQ_CMD);
9771 ctl = REG_RD_IND(sc, BCE_TAS_FTQ_CTL);
9772 cur_depth = (ctl & BCE_TAS_FTQ_CTL_CUR_DEPTH) >> 22;
9773 max_depth = (ctl & BCE_TAS_FTQ_CTL_MAX_DEPTH) >> 12;
9774 valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT12);
9775 BCE_PRINTF(" TAS 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n",
9776 cmd, ctl, cur_depth, max_depth, valid_cnt);
9777
9778 /* Input queue to the Completion Processor */
9779 cmd = REG_RD_IND(sc, BCE_COM_COMXQ_FTQ_CMD);
9780 ctl = REG_RD_IND(sc, BCE_COM_COMXQ_FTQ_CTL);
9781 cur_depth = (ctl & BCE_COM_COMXQ_FTQ_CTL_CUR_DEPTH) >> 22;
9782 max_depth = (ctl & BCE_COM_COMXQ_FTQ_CTL_MAX_DEPTH) >> 12;
9783 valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT13);
9784 BCE_PRINTF(" COMX 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n",
9785 cmd, ctl, cur_depth, max_depth, valid_cnt);
9786
9787 /* Input queue to the Completion Processor */
9788 cmd = REG_RD_IND(sc, BCE_COM_COMTQ_FTQ_CMD);
9789 ctl = REG_RD_IND(sc, BCE_COM_COMTQ_FTQ_CTL);
9790 cur_depth = (ctl & BCE_COM_COMTQ_FTQ_CTL_CUR_DEPTH) >> 22;
9791 max_depth = (ctl & BCE_COM_COMTQ_FTQ_CTL_MAX_DEPTH) >> 12;
9792 valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT14);
9793 BCE_PRINTF(" COMT 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n",
9794 cmd, ctl, cur_depth, max_depth, valid_cnt);
9795
9796 /* Input queue to the Completion Processor */
9797 cmd = REG_RD_IND(sc, BCE_COM_COMQ_FTQ_CMD);
9798 ctl = REG_RD_IND(sc, BCE_COM_COMQ_FTQ_CTL);
9799 cur_depth = (ctl & BCE_COM_COMQ_FTQ_CTL_CUR_DEPTH) >> 22;
9800 max_depth = (ctl & BCE_COM_COMQ_FTQ_CTL_MAX_DEPTH) >> 12;
9801 valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT15);
9802 BCE_PRINTF(" COMX 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n",
9803 cmd, ctl, cur_depth, max_depth, valid_cnt);
9804
9805 /* Setup the generic statistic counters for the FTQ valid count. */
9806 val = (BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_CSQ_VALID_CNT << 16) |
9807 (BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_CPQ_VALID_CNT << 8) |
9808 (BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_MGMQ_VALID_CNT);
9809
9810 if ((BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) ||
9811 (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716))
9812 val = val |
9813 (BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_RV2PCSQ_VALID_CNT_XI <<
9814 24);
9815 REG_WR(sc, BCE_HC_STAT_GEN_SEL_0, val);
9816
9817 /* Input queue to the Management Control Processor */
9818 cmd = REG_RD_IND(sc, BCE_MCP_MCPQ_FTQ_CMD);
9819 ctl = REG_RD_IND(sc, BCE_MCP_MCPQ_FTQ_CTL);
9820 cur_depth = (ctl & BCE_MCP_MCPQ_FTQ_CTL_CUR_DEPTH) >> 22;
9821 max_depth = (ctl & BCE_MCP_MCPQ_FTQ_CTL_MAX_DEPTH) >> 12;
9822 valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT0);
9823 BCE_PRINTF(" MCP 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n",
9824 cmd, ctl, cur_depth, max_depth, valid_cnt);
9825
9826 /* Input queue to the Command Processor */
9827 cmd = REG_RD_IND(sc, BCE_CP_CPQ_FTQ_CMD);
9828 ctl = REG_RD_IND(sc, BCE_CP_CPQ_FTQ_CTL);
9829 cur_depth = (ctl & BCE_CP_CPQ_FTQ_CTL_CUR_DEPTH) >> 22;
9830 max_depth = (ctl & BCE_CP_CPQ_FTQ_CTL_MAX_DEPTH) >> 12;
9831 valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT1);
9832 BCE_PRINTF(" CP 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n",
9833 cmd, ctl, cur_depth, max_depth, valid_cnt);
9834
9835 /* Input queue to the Completion Scheduler state machine */
9836 cmd = REG_RD(sc, BCE_CSCH_CH_FTQ_CMD);
9837 ctl = REG_RD(sc, BCE_CSCH_CH_FTQ_CTL);
9838 cur_depth = (ctl & BCE_CSCH_CH_FTQ_CTL_CUR_DEPTH) >> 22;
9839 max_depth = (ctl & BCE_CSCH_CH_FTQ_CTL_MAX_DEPTH) >> 12;
9840 valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT2);
9841 BCE_PRINTF(" CS 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n",
9842 cmd, ctl, cur_depth, max_depth, valid_cnt);
9843
9844 if ((BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) ||
9845 (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716)) {
9846 /* Input queue to the RV2P Command Scheduler */
9847 cmd = REG_RD(sc, BCE_RV2PCSR_FTQ_CMD);
9848 ctl = REG_RD(sc, BCE_RV2PCSR_FTQ_CTL);
9849 cur_depth = (ctl & 0xFFC00000) >> 22;
9850 max_depth = (ctl & 0x003FF000) >> 12;
9851 valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT3);
9852 BCE_PRINTF(" RV2PCSR 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n",
9853 cmd, ctl, cur_depth, max_depth, valid_cnt);
9854 }
9855
9856 BCE_PRINTF(
9857 "----------------------------"
9858 "----------------"
9859 "----------------------------\n");
9860}
9861
9862
9863/****************************************************************************/
9864/* Prints out the TX chain. */
9865/* */
9866/* Returns: */
9867/* Nothing. */
9868/****************************************************************************/
9869static __attribute__ ((noinline)) void
9870bce_dump_tx_chain(struct bce_softc *sc, u16 tx_prod, int count)
9871{
9872 struct tx_bd *txbd;
9873
9874 /* First some info about the tx_bd chain structure. */
9875 BCE_PRINTF(
9876 "----------------------------"
9877 " tx_bd chain "
9878 "----------------------------\n");
9879
9880 BCE_PRINTF("page size = 0x%08X, tx chain pages = 0x%08X\n",
9881 (u32) BCM_PAGE_SIZE, (u32) TX_PAGES);
9882 BCE_PRINTF("tx_bd per page = 0x%08X, usable tx_bd per page = 0x%08X\n",
9883 (u32) TOTAL_TX_BD_PER_PAGE, (u32) USABLE_TX_BD_PER_PAGE);
9884 BCE_PRINTF("total tx_bd = 0x%08X\n", (u32) TOTAL_TX_BD);
9885
9886 BCE_PRINTF(
9887 "----------------------------"
9888 " tx_bd data "
9889 "----------------------------\n");
9890
9891 /* Now print out a decoded list of TX buffer descriptors. */
9892 for (int i = 0; i < count; i++) {
9893 txbd = &sc->tx_bd_chain[TX_PAGE(tx_prod)][TX_IDX(tx_prod)];
9894 bce_dump_txbd(sc, tx_prod, txbd);
9895 tx_prod++;
9896 }
9897
9898 BCE_PRINTF(
9899 "----------------------------"
9900 "----------------"
9901 "----------------------------\n");
9902}
9903
9904
9905/****************************************************************************/
9906/* Prints out the RX chain. */
9907/* */
9908/* Returns: */
9909/* Nothing. */
9910/****************************************************************************/
9911static __attribute__ ((noinline)) void
9912bce_dump_rx_bd_chain(struct bce_softc *sc, u16 rx_prod, int count)
9913{
9914 struct rx_bd *rxbd;
9915
9916 /* First some info about the rx_bd chain structure. */
9917 BCE_PRINTF(
9918 "----------------------------"
9919 " rx_bd chain "
9920 "----------------------------\n");
9921
9922 BCE_PRINTF("page size = 0x%08X, rx chain pages = 0x%08X\n",
9923 (u32) BCM_PAGE_SIZE, (u32) RX_PAGES);
9924
9925 BCE_PRINTF("rx_bd per page = 0x%08X, usable rx_bd per page = 0x%08X\n",
9926 (u32) TOTAL_RX_BD_PER_PAGE, (u32) USABLE_RX_BD_PER_PAGE);
9927
9928 BCE_PRINTF("total rx_bd = 0x%08X\n", (u32) TOTAL_RX_BD);
9929
9930 BCE_PRINTF(
9931 "----------------------------"
9932 " rx_bd data "
9933 "----------------------------\n");
9934
9935 /* Now print out the rx_bd's themselves. */
9936 for (int i = 0; i < count; i++) {
9937 rxbd = &sc->rx_bd_chain[RX_PAGE(rx_prod)][RX_IDX(rx_prod)];
9938 bce_dump_rxbd(sc, rx_prod, rxbd);
9939 rx_prod = RX_CHAIN_IDX(rx_prod + 1);
9940 }
9941
9942 BCE_PRINTF(
9943 "----------------------------"
9944 "----------------"
9945 "----------------------------\n");
9946}
9947
9948
9949#ifdef BCE_JUMBO_HDRSPLIT
9950/****************************************************************************/
9951/* Prints out the page chain. */
9952/* */
9953/* Returns: */
9954/* Nothing. */
9955/****************************************************************************/
9956static __attribute__ ((noinline)) void
9957bce_dump_pg_chain(struct bce_softc *sc, u16 pg_prod, int count)
9958{
9959 struct rx_bd *pgbd;
9960
9961 /* First some info about the page chain structure. */
9962 BCE_PRINTF(
9963 "----------------------------"
9964 " page chain "
9965 "----------------------------\n");
9966
9967 BCE_PRINTF("page size = 0x%08X, pg chain pages = 0x%08X\n",
9968 (u32) BCM_PAGE_SIZE, (u32) PG_PAGES);
9969
9970 BCE_PRINTF("rx_bd per page = 0x%08X, usable rx_bd per page = 0x%08X\n",
9971 (u32) TOTAL_PG_BD_PER_PAGE, (u32) USABLE_PG_BD_PER_PAGE);
9972
9973 BCE_PRINTF("total rx_bd = 0x%08X, max_pg_bd = 0x%08X\n",
9974 (u32) TOTAL_PG_BD, (u32) MAX_PG_BD);
9975
9976 BCE_PRINTF(
9977 "----------------------------"
9978 " page data "
9979 "----------------------------\n");
9980
9981 /* Now print out the rx_bd's themselves. */
9982 for (int i = 0; i < count; i++) {
9983 pgbd = &sc->pg_bd_chain[PG_PAGE(pg_prod)][PG_IDX(pg_prod)];
9984 bce_dump_pgbd(sc, pg_prod, pgbd);
9985 pg_prod = PG_CHAIN_IDX(pg_prod + 1);
9986 }
9987
9988 BCE_PRINTF(
9989 "----------------------------"
9990 "----------------"
9991 "----------------------------\n");
9992}
9993#endif
9994
9995
9996#define BCE_PRINT_RX_CONS(arg) \
9997if (sblk->status_rx_quick_consumer_index##arg) \
9998 BCE_PRINTF("0x%04X(0x%04X) - rx_quick_consumer_index%d\n", \
9999 sblk->status_rx_quick_consumer_index##arg, (u16) \
10000 RX_CHAIN_IDX(sblk->status_rx_quick_consumer_index##arg), \
10001 arg);
10002
10003
10004#define BCE_PRINT_TX_CONS(arg) \
10005if (sblk->status_tx_quick_consumer_index##arg) \
10006 BCE_PRINTF("0x%04X(0x%04X) - tx_quick_consumer_index%d\n", \
10007 sblk->status_tx_quick_consumer_index##arg, (u16) \
10008 TX_CHAIN_IDX(sblk->status_tx_quick_consumer_index##arg), \
10009 arg);
10010
10011/****************************************************************************/
10012/* Prints out the status block from host memory. */
10013/* */
10014/* Returns: */
10015/* Nothing. */
10016/****************************************************************************/
10017static __attribute__ ((noinline)) void
10018bce_dump_status_block(struct bce_softc *sc)
10019{
10020 struct status_block *sblk;
10021
10022 sblk = sc->status_block;
10023
10024 BCE_PRINTF(
10025 "----------------------------"
10026 " Status Block "
10027 "----------------------------\n");
10028
10029 /* Theses indices are used for normal L2 drivers. */
10030 BCE_PRINTF(" 0x%08X - attn_bits\n",
10031 sblk->status_attn_bits);
10032
10033 BCE_PRINTF(" 0x%08X - attn_bits_ack\n",
10034 sblk->status_attn_bits_ack);
10035
10036 BCE_PRINT_RX_CONS(0);
10037 BCE_PRINT_TX_CONS(0)
10038
10039 BCE_PRINTF(" 0x%04X - status_idx\n", sblk->status_idx);
10040
10041 /* Theses indices are not used for normal L2 drivers. */
10042 BCE_PRINT_RX_CONS(1); BCE_PRINT_RX_CONS(2); BCE_PRINT_RX_CONS(3);
10043 BCE_PRINT_RX_CONS(4); BCE_PRINT_RX_CONS(5); BCE_PRINT_RX_CONS(6);
10044 BCE_PRINT_RX_CONS(7); BCE_PRINT_RX_CONS(8); BCE_PRINT_RX_CONS(9);
10045 BCE_PRINT_RX_CONS(10); BCE_PRINT_RX_CONS(11); BCE_PRINT_RX_CONS(12);
10046 BCE_PRINT_RX_CONS(13); BCE_PRINT_RX_CONS(14); BCE_PRINT_RX_CONS(15);
10047
10048 BCE_PRINT_TX_CONS(1); BCE_PRINT_TX_CONS(2); BCE_PRINT_TX_CONS(3);
10049
10050 if (sblk->status_completion_producer_index ||
10051 sblk->status_cmd_consumer_index)
10052 BCE_PRINTF("com_prod = 0x%08X, cmd_cons = 0x%08X\n",
10053 sblk->status_completion_producer_index,
10054 sblk->status_cmd_consumer_index);
10055
10056 BCE_PRINTF(
10057 "----------------------------"
10058 "----------------"
10059 "----------------------------\n");
10060}
10061
10062
10063#define BCE_PRINT_64BIT_STAT(arg) \
10064if (sblk->arg##_lo || sblk->arg##_hi) \
10065 BCE_PRINTF("0x%08X:%08X : %s\n", sblk->arg##_hi, \
10066 sblk->arg##_lo, #arg);
10067
10068#define BCE_PRINT_32BIT_STAT(arg) \
10069if (sblk->arg) \
10070 BCE_PRINTF(" 0x%08X : %s\n", \
10071 sblk->arg, #arg);
10072
10073/****************************************************************************/
10074/* Prints out the statistics block from host memory. */
10075/* */
10076/* Returns: */
10077/* Nothing. */
10078/****************************************************************************/
10079static __attribute__ ((noinline)) void
10080bce_dump_stats_block(struct bce_softc *sc)
10081{
10082 struct statistics_block *sblk;
10083
10084 sblk = sc->stats_block;
10085
10086 BCE_PRINTF(
10087 "---------------"
10088 " Stats Block (All Stats Not Shown Are 0) "
10089 "---------------\n");
10090
10091 BCE_PRINT_64BIT_STAT(stat_IfHCInOctets);
10092 BCE_PRINT_64BIT_STAT(stat_IfHCInBadOctets);
10093 BCE_PRINT_64BIT_STAT(stat_IfHCOutOctets);
10094 BCE_PRINT_64BIT_STAT(stat_IfHCOutBadOctets);
10095 BCE_PRINT_64BIT_STAT(stat_IfHCInUcastPkts);
10096 BCE_PRINT_64BIT_STAT(stat_IfHCInBroadcastPkts);
10097 BCE_PRINT_64BIT_STAT(stat_IfHCInMulticastPkts);
10098 BCE_PRINT_64BIT_STAT(stat_IfHCOutUcastPkts);
10099 BCE_PRINT_64BIT_STAT(stat_IfHCOutBroadcastPkts);
10100 BCE_PRINT_64BIT_STAT(stat_IfHCOutMulticastPkts);
10101 BCE_PRINT_32BIT_STAT(
10102 stat_emac_tx_stat_dot3statsinternalmactransmiterrors);
10103 BCE_PRINT_32BIT_STAT(stat_Dot3StatsCarrierSenseErrors);
10104 BCE_PRINT_32BIT_STAT(stat_Dot3StatsFCSErrors);
10105 BCE_PRINT_32BIT_STAT(stat_Dot3StatsAlignmentErrors);
10106 BCE_PRINT_32BIT_STAT(stat_Dot3StatsSingleCollisionFrames);
10107 BCE_PRINT_32BIT_STAT(stat_Dot3StatsMultipleCollisionFrames);
10108 BCE_PRINT_32BIT_STAT(stat_Dot3StatsDeferredTransmissions);
10109 BCE_PRINT_32BIT_STAT(stat_Dot3StatsExcessiveCollisions);
10110 BCE_PRINT_32BIT_STAT(stat_Dot3StatsLateCollisions);
10111 BCE_PRINT_32BIT_STAT(stat_EtherStatsCollisions);
10112 BCE_PRINT_32BIT_STAT(stat_EtherStatsFragments);
10113 BCE_PRINT_32BIT_STAT(stat_EtherStatsJabbers);
10114 BCE_PRINT_32BIT_STAT(stat_EtherStatsUndersizePkts);
10115 BCE_PRINT_32BIT_STAT(stat_EtherStatsOversizePkts);
10116 BCE_PRINT_32BIT_STAT(stat_EtherStatsPktsRx64Octets);
10117 BCE_PRINT_32BIT_STAT(stat_EtherStatsPktsRx65Octetsto127Octets);
10118 BCE_PRINT_32BIT_STAT(stat_EtherStatsPktsRx128Octetsto255Octets);
10119 BCE_PRINT_32BIT_STAT(stat_EtherStatsPktsRx256Octetsto511Octets);
10120 BCE_PRINT_32BIT_STAT(stat_EtherStatsPktsRx512Octetsto1023Octets);
10121 BCE_PRINT_32BIT_STAT(stat_EtherStatsPktsRx1024Octetsto1522Octets);
10122 BCE_PRINT_32BIT_STAT(stat_EtherStatsPktsRx1523Octetsto9022Octets);
10123 BCE_PRINT_32BIT_STAT(stat_EtherStatsPktsTx64Octets);
10124 BCE_PRINT_32BIT_STAT(stat_EtherStatsPktsTx65Octetsto127Octets);
10125 BCE_PRINT_32BIT_STAT(stat_EtherStatsPktsTx128Octetsto255Octets);
10126 BCE_PRINT_32BIT_STAT(stat_EtherStatsPktsTx256Octetsto511Octets);
10127 BCE_PRINT_32BIT_STAT(stat_EtherStatsPktsTx512Octetsto1023Octets);
10128 BCE_PRINT_32BIT_STAT(stat_EtherStatsPktsTx1024Octetsto1522Octets);
10129 BCE_PRINT_32BIT_STAT(stat_EtherStatsPktsTx1523Octetsto9022Octets);
10130 BCE_PRINT_32BIT_STAT(stat_XonPauseFramesReceived);
10131 BCE_PRINT_32BIT_STAT(stat_XoffPauseFramesReceived);
10132 BCE_PRINT_32BIT_STAT(stat_OutXonSent);
10133 BCE_PRINT_32BIT_STAT(stat_OutXoffSent);
10134 BCE_PRINT_32BIT_STAT(stat_FlowControlDone);
10135 BCE_PRINT_32BIT_STAT(stat_MacControlFramesReceived);
10136 BCE_PRINT_32BIT_STAT(stat_XoffStateEntered);
10137 BCE_PRINT_32BIT_STAT(stat_IfInFramesL2FilterDiscards);
10138 BCE_PRINT_32BIT_STAT(stat_IfInRuleCheckerDiscards);
10139 BCE_PRINT_32BIT_STAT(stat_IfInFTQDiscards);
10140 BCE_PRINT_32BIT_STAT(stat_IfInMBUFDiscards);
10141 BCE_PRINT_32BIT_STAT(stat_IfInRuleCheckerP4Hit);
10142 BCE_PRINT_32BIT_STAT(stat_CatchupInRuleCheckerDiscards);
10143 BCE_PRINT_32BIT_STAT(stat_CatchupInFTQDiscards);
10144 BCE_PRINT_32BIT_STAT(stat_CatchupInMBUFDiscards);
10145 BCE_PRINT_32BIT_STAT(stat_CatchupInRuleCheckerP4Hit);
10146
10147 BCE_PRINTF(
10148 "----------------------------"
10149 "----------------"
10150 "----------------------------\n");
10151}
10152
10153
10154/****************************************************************************/
10155/* Prints out a summary of the driver state. */
10156/* */
10157/* Returns: */
10158/* Nothing. */
10159/****************************************************************************/
10160static __attribute__ ((noinline)) void
10161bce_dump_driver_state(struct bce_softc *sc)
10162{
10163 u32 val_hi, val_lo;
10164
10165 BCE_PRINTF(
10166 "-----------------------------"
10167 " Driver State "
10168 "-----------------------------\n");
10169
10170 val_hi = BCE_ADDR_HI(sc);
10171 val_lo = BCE_ADDR_LO(sc);
10172 BCE_PRINTF("0x%08X:%08X - (sc) driver softc structure virtual "
10173 "address\n", val_hi, val_lo);
10174
10175 val_hi = BCE_ADDR_HI(sc->bce_vhandle);
10176 val_lo = BCE_ADDR_LO(sc->bce_vhandle);
10177 BCE_PRINTF("0x%08X:%08X - (sc->bce_vhandle) PCI BAR virtual "
10178 "address\n", val_hi, val_lo);
10179
10180 val_hi = BCE_ADDR_HI(sc->status_block);
10181 val_lo = BCE_ADDR_LO(sc->status_block);
10182 BCE_PRINTF("0x%08X:%08X - (sc->status_block) status block "
10183 "virtual address\n", val_hi, val_lo);
10184
10185 val_hi = BCE_ADDR_HI(sc->stats_block);
10186 val_lo = BCE_ADDR_LO(sc->stats_block);
10187 BCE_PRINTF("0x%08X:%08X - (sc->stats_block) statistics block "
10188 "virtual address\n", val_hi, val_lo);
10189
10190 val_hi = BCE_ADDR_HI(sc->tx_bd_chain);
10191 val_lo = BCE_ADDR_LO(sc->tx_bd_chain);
10192 BCE_PRINTF("0x%08X:%08X - (sc->tx_bd_chain) tx_bd chain "
10193 "virtual adddress\n", val_hi, val_lo);
10194
10195 val_hi = BCE_ADDR_HI(sc->rx_bd_chain);
10196 val_lo = BCE_ADDR_LO(sc->rx_bd_chain);
10197 BCE_PRINTF("0x%08X:%08X - (sc->rx_bd_chain) rx_bd chain "
10198 "virtual address\n", val_hi, val_lo);
10199
10200#ifdef BCE_JUMBO_HDRSPLIT
10201 val_hi = BCE_ADDR_HI(sc->pg_bd_chain);
10202 val_lo = BCE_ADDR_LO(sc->pg_bd_chain);
10203 BCE_PRINTF("0x%08X:%08X - (sc->pg_bd_chain) page chain "
10204 "virtual address\n", val_hi, val_lo);
10205#endif
10206
10207 val_hi = BCE_ADDR_HI(sc->tx_mbuf_ptr);
10208 val_lo = BCE_ADDR_LO(sc->tx_mbuf_ptr);
10209 BCE_PRINTF("0x%08X:%08X - (sc->tx_mbuf_ptr) tx mbuf chain "
10210 "virtual address\n", val_hi, val_lo);
10211
10212 val_hi = BCE_ADDR_HI(sc->rx_mbuf_ptr);
10213 val_lo = BCE_ADDR_LO(sc->rx_mbuf_ptr);
10214 BCE_PRINTF("0x%08X:%08X - (sc->rx_mbuf_ptr) rx mbuf chain "
10215 "virtual address\n", val_hi, val_lo);
10216
10217#ifdef BCE_JUMBO_HDRSPLIT
10218 val_hi = BCE_ADDR_HI(sc->pg_mbuf_ptr);
10219 val_lo = BCE_ADDR_LO(sc->pg_mbuf_ptr);
10220 BCE_PRINTF("0x%08X:%08X - (sc->pg_mbuf_ptr) page mbuf chain "
10221 "virtual address\n", val_hi, val_lo);
10222#endif
10223
10224 BCE_PRINTF(" 0x%08X - (sc->interrupts_generated) "
10225 "h/w intrs\n", sc->interrupts_generated);
10226
10227 BCE_PRINTF(" 0x%08X - (sc->interrupts_rx) "
10228 "rx interrupts handled\n", sc->interrupts_rx);
10229
10230 BCE_PRINTF(" 0x%08X - (sc->interrupts_tx) "
10231 "tx interrupts handled\n", sc->interrupts_tx);
10232
10233 BCE_PRINTF(" 0x%08X - (sc->phy_interrupts) "
10234 "phy interrupts handled\n", sc->phy_interrupts);
10235
10236 BCE_PRINTF(" 0x%08X - (sc->last_status_idx) "
10237 "status block index\n", sc->last_status_idx);
10238
10239 BCE_PRINTF(" 0x%04X(0x%04X) - (sc->tx_prod) tx producer "
10240 "index\n", sc->tx_prod, (u16) TX_CHAIN_IDX(sc->tx_prod));
10241
10242 BCE_PRINTF(" 0x%04X(0x%04X) - (sc->tx_cons) tx consumer "
10243 "index\n", sc->tx_cons, (u16) TX_CHAIN_IDX(sc->tx_cons));
10244
10245 BCE_PRINTF(" 0x%08X - (sc->tx_prod_bseq) tx producer "
10246 "byte seq index\n", sc->tx_prod_bseq);
10247
10248 BCE_PRINTF(" 0x%08X - (sc->debug_tx_mbuf_alloc) tx "
10249 "mbufs allocated\n", sc->debug_tx_mbuf_alloc);
10250
10251 BCE_PRINTF(" 0x%08X - (sc->used_tx_bd) used "
10252 "tx_bd's\n", sc->used_tx_bd);
10253
10254 BCE_PRINTF("0x%08X/%08X - (sc->tx_hi_watermark) tx hi "
10255 "watermark\n", sc->tx_hi_watermark, sc->max_tx_bd);
10256
10257 BCE_PRINTF(" 0x%04X(0x%04X) - (sc->rx_prod) rx producer "
10258 "index\n", sc->rx_prod, (u16) RX_CHAIN_IDX(sc->rx_prod));
10259
10260 BCE_PRINTF(" 0x%04X(0x%04X) - (sc->rx_cons) rx consumer "
10261 "index\n", sc->rx_cons, (u16) RX_CHAIN_IDX(sc->rx_cons));
10262
10263 BCE_PRINTF(" 0x%08X - (sc->rx_prod_bseq) rx producer "
10264 "byte seq index\n", sc->rx_prod_bseq);
10265
10266 BCE_PRINTF(" 0x%08X - (sc->debug_rx_mbuf_alloc) rx "
10267 "mbufs allocated\n", sc->debug_rx_mbuf_alloc);
10268
10269 BCE_PRINTF(" 0x%08X - (sc->free_rx_bd) free "
10270 "rx_bd's\n", sc->free_rx_bd);
10271
10272#ifdef BCE_JUMBO_HDRSPLIT
10273 BCE_PRINTF(" 0x%04X(0x%04X) - (sc->pg_prod) page producer "
10274 "index\n", sc->pg_prod, (u16) PG_CHAIN_IDX(sc->pg_prod));
10275
10276 BCE_PRINTF(" 0x%04X(0x%04X) - (sc->pg_cons) page consumer "
10277 "index\n", sc->pg_cons, (u16) PG_CHAIN_IDX(sc->pg_cons));
10278
10279 BCE_PRINTF(" 0x%08X - (sc->debug_pg_mbuf_alloc) page "
10280 "mbufs allocated\n", sc->debug_pg_mbuf_alloc);
10281
10282 BCE_PRINTF(" 0x%08X - (sc->free_pg_bd) free page "
10283 "rx_bd's\n", sc->free_pg_bd);
10284
10285 BCE_PRINTF("0x%08X/%08X - (sc->pg_low_watermark) page low "
10286 "watermark\n", sc->pg_low_watermark, sc->max_pg_bd);
10287#endif
10288
10289 BCE_PRINTF(" 0x%08X - (sc->mbuf_alloc_failed_count) "
10290 "mbuf alloc failures\n", sc->mbuf_alloc_failed_count);
10291
10292 BCE_PRINTF(" 0x%08X - (sc->bce_flags) "
10293 "bce mac flags\n", sc->bce_flags);
10294
10295 BCE_PRINTF(" 0x%08X - (sc->bce_phy_flags) "
10296 "bce phy flags\n", sc->bce_phy_flags);
10297
10298 BCE_PRINTF(
10299 "----------------------------"
10300 "----------------"
10301 "----------------------------\n");
10302}
10303
10304
10305/****************************************************************************/
10306/* Prints out the hardware state through a summary of important register, */
10307/* followed by a complete register dump. */
10308/* */
10309/* Returns: */
10310/* Nothing. */
10311/****************************************************************************/
10312static __attribute__ ((noinline)) void
10313bce_dump_hw_state(struct bce_softc *sc)
10314{
10315 u32 val;
10316
10317 BCE_PRINTF(
10318 "----------------------------"
10319 " Hardware State "
10320 "----------------------------\n");
10321
10322 BCE_PRINTF("%s - bootcode version\n", sc->bce_bc_ver);
10323
10324 val = REG_RD(sc, BCE_MISC_ENABLE_STATUS_BITS);
10325 BCE_PRINTF("0x%08X - (0x%06X) misc_enable_status_bits\n",
10326 val, BCE_MISC_ENABLE_STATUS_BITS);
10327
10328 val = REG_RD(sc, BCE_DMA_STATUS);
10329 BCE_PRINTF("0x%08X - (0x%06X) dma_status\n",
10330 val, BCE_DMA_STATUS);
10331
10332 val = REG_RD(sc, BCE_CTX_STATUS);
10333 BCE_PRINTF("0x%08X - (0x%06X) ctx_status\n",
10334 val, BCE_CTX_STATUS);
10335
10336 val = REG_RD(sc, BCE_EMAC_STATUS);
10337 BCE_PRINTF("0x%08X - (0x%06X) emac_status\n",
10338 val, BCE_EMAC_STATUS);
10339
10340 val = REG_RD(sc, BCE_RPM_STATUS);
10341 BCE_PRINTF("0x%08X - (0x%06X) rpm_status\n",
10342 val, BCE_RPM_STATUS);
10343
10344 /* ToDo: Create a #define for this constant. */
10345 val = REG_RD(sc, 0x2004);
10346 BCE_PRINTF("0x%08X - (0x%06X) rlup_status\n",
10347 val, 0x2004);
10348
10349 val = REG_RD(sc, BCE_RV2P_STATUS);
10350 BCE_PRINTF("0x%08X - (0x%06X) rv2p_status\n",
10351 val, BCE_RV2P_STATUS);
10352
10353 /* ToDo: Create a #define for this constant. */
10354 val = REG_RD(sc, 0x2c04);
10355 BCE_PRINTF("0x%08X - (0x%06X) rdma_status\n",
10356 val, 0x2c04);
10357
10358 val = REG_RD(sc, BCE_TBDR_STATUS);
10359 BCE_PRINTF("0x%08X - (0x%06X) tbdr_status\n",
10360 val, BCE_TBDR_STATUS);
10361
10362 val = REG_RD(sc, BCE_TDMA_STATUS);
10363 BCE_PRINTF("0x%08X - (0x%06X) tdma_status\n",
10364 val, BCE_TDMA_STATUS);
10365
10366 val = REG_RD(sc, BCE_HC_STATUS);
10367 BCE_PRINTF("0x%08X - (0x%06X) hc_status\n",
10368 val, BCE_HC_STATUS);
10369
10370 val = REG_RD_IND(sc, BCE_TXP_CPU_STATE);
10371 BCE_PRINTF("0x%08X - (0x%06X) txp_cpu_state\n",
10372 val, BCE_TXP_CPU_STATE);
10373
10374 val = REG_RD_IND(sc, BCE_TPAT_CPU_STATE);
10375 BCE_PRINTF("0x%08X - (0x%06X) tpat_cpu_state\n",
10376 val, BCE_TPAT_CPU_STATE);
10377
10378 val = REG_RD_IND(sc, BCE_RXP_CPU_STATE);
10379 BCE_PRINTF("0x%08X - (0x%06X) rxp_cpu_state\n",
10380 val, BCE_RXP_CPU_STATE);
10381
10382 val = REG_RD_IND(sc, BCE_COM_CPU_STATE);
10383 BCE_PRINTF("0x%08X - (0x%06X) com_cpu_state\n",
10384 val, BCE_COM_CPU_STATE);
10385
10386 val = REG_RD_IND(sc, BCE_MCP_CPU_STATE);
10387 BCE_PRINTF("0x%08X - (0x%06X) mcp_cpu_state\n",
10388 val, BCE_MCP_CPU_STATE);
10389
10390 val = REG_RD_IND(sc, BCE_CP_CPU_STATE);
10391 BCE_PRINTF("0x%08X - (0x%06X) cp_cpu_state\n",
10392 val, BCE_CP_CPU_STATE);
10393
10394 BCE_PRINTF(
10395 "----------------------------"
10396 "----------------"
10397 "----------------------------\n");
10398
10399 BCE_PRINTF(
10400 "----------------------------"
10401 " Register Dump "
10402 "----------------------------\n");
10403
10404 for (int i = 0x400; i < 0x8000; i += 0x10) {
10405 BCE_PRINTF("0x%04X: 0x%08X 0x%08X 0x%08X 0x%08X\n",
10406 i, REG_RD(sc, i), REG_RD(sc, i + 0x4),
10407 REG_RD(sc, i + 0x8), REG_RD(sc, i + 0xC));
10408 }
10409
10410 BCE_PRINTF(
10411 "----------------------------"
10412 "----------------"
10413 "----------------------------\n");
10414}
10415
10416
10417/****************************************************************************/
10418/* Prints out the mailbox queue registers. */
10419/* */
10420/* Returns: */
10421/* Nothing. */
10422/****************************************************************************/
10423static __attribute__ ((noinline)) void
10424bce_dump_mq_regs(struct bce_softc *sc)
10425{
10426 BCE_PRINTF(
10427 "----------------------------"
10428 " MQ Regs "
10429 "----------------------------\n");
10430
10431 BCE_PRINTF(
10432 "----------------------------"
10433 "----------------"
10434 "----------------------------\n");
10435
10436 for (int i = 0x3c00; i < 0x4000; i += 0x10) {
10437 BCE_PRINTF("0x%04X: 0x%08X 0x%08X 0x%08X 0x%08X\n",
10438 i, REG_RD(sc, i), REG_RD(sc, i + 0x4),
10439 REG_RD(sc, i + 0x8), REG_RD(sc, i + 0xC));
10440 }
10441
10442 BCE_PRINTF(
10443 "----------------------------"
10444 "----------------"
10445 "----------------------------\n");
10446}
10447
10448
10449/****************************************************************************/
10450/* Prints out the bootcode state. */
10451/* */
10452/* Returns: */
10453/* Nothing. */
10454/****************************************************************************/
10455static __attribute__ ((noinline)) void
10456bce_dump_bc_state(struct bce_softc *sc)
10457{
10458 u32 val;
10459
10460 BCE_PRINTF(
10461 "----------------------------"
10462 " Bootcode State "
10463 "----------------------------\n");
10464
10465 BCE_PRINTF("%s - bootcode version\n", sc->bce_bc_ver);
10466
10467 val = bce_shmem_rd(sc, BCE_BC_RESET_TYPE);
10468 BCE_PRINTF("0x%08X - (0x%06X) reset_type\n",
10469 val, BCE_BC_RESET_TYPE);
10470
10471 val = bce_shmem_rd(sc, BCE_BC_STATE);
10472 BCE_PRINTF("0x%08X - (0x%06X) state\n",
10473 val, BCE_BC_STATE);
10474
10475 val = bce_shmem_rd(sc, BCE_BC_STATE_CONDITION);
10476 BCE_PRINTF("0x%08X - (0x%06X) condition\n",
10477 val, BCE_BC_STATE_CONDITION);
10478
10479 val = bce_shmem_rd(sc, BCE_BC_STATE_DEBUG_CMD);
10480 BCE_PRINTF("0x%08X - (0x%06X) debug_cmd\n",
10481 val, BCE_BC_STATE_DEBUG_CMD);
10482
10483 BCE_PRINTF(
10484 "----------------------------"
10485 "----------------"
10486 "----------------------------\n");
10487}
10488
10489
10490/****************************************************************************/
10491/* Prints out the TXP processor state. */
10492/* */
10493/* Returns: */
10494/* Nothing. */
10495/****************************************************************************/
10496static __attribute__ ((noinline)) void
10497bce_dump_txp_state(struct bce_softc *sc, int regs)
10498{
10499 u32 val;
10500 u32 fw_version[3];
10501
10502 BCE_PRINTF(
10503 "----------------------------"
10504 " TXP State "
10505 "----------------------------\n");
10506
10507 for (int i = 0; i < 3; i++)
10508 fw_version[i] = htonl(REG_RD_IND(sc,
10509 (BCE_TXP_SCRATCH + 0x10 + i * 4)));
10510 BCE_PRINTF("Firmware version - %s\n", (char *) fw_version);
10511
10512 val = REG_RD_IND(sc, BCE_TXP_CPU_MODE);
10513 BCE_PRINTF("0x%08X - (0x%06X) txp_cpu_mode\n",
10514 val, BCE_TXP_CPU_MODE);
10515
10516 val = REG_RD_IND(sc, BCE_TXP_CPU_STATE);
10517 BCE_PRINTF("0x%08X - (0x%06X) txp_cpu_state\n",
10518 val, BCE_TXP_CPU_STATE);
10519
10520 val = REG_RD_IND(sc, BCE_TXP_CPU_EVENT_MASK);
10521 BCE_PRINTF("0x%08X - (0x%06X) txp_cpu_event_mask\n",
10522 val, BCE_TXP_CPU_EVENT_MASK);
10523
10524 if (regs) {
10525 BCE_PRINTF(
10526 "----------------------------"
10527 " Register Dump "
10528 "----------------------------\n");
10529
10530 for (int i = BCE_TXP_CPU_MODE; i < 0x68000; i += 0x10) {
10531 /* Skip the big blank spaces */
10532 if (i < 0x454000 && i > 0x5ffff)
10533 BCE_PRINTF("0x%04X: 0x%08X 0x%08X "
10534 "0x%08X 0x%08X\n", i,
10535 REG_RD_IND(sc, i),
10536 REG_RD_IND(sc, i + 0x4),
10537 REG_RD_IND(sc, i + 0x8),
10538 REG_RD_IND(sc, i + 0xC));
10539 }
10540 }
10541
10542 BCE_PRINTF(
10543 "----------------------------"
10544 "----------------"
10545 "----------------------------\n");
10546}
10547
10548
10549/****************************************************************************/
10550/* Prints out the RXP processor state. */
10551/* */
10552/* Returns: */
10553/* Nothing. */
10554/****************************************************************************/
10555static __attribute__ ((noinline)) void
10556bce_dump_rxp_state(struct bce_softc *sc, int regs)
10557{
10558 u32 val;
10559 u32 fw_version[3];
10560
10561 BCE_PRINTF(
10562 "----------------------------"
10563 " RXP State "
10564 "----------------------------\n");
10565
10566 for (int i = 0; i < 3; i++)
10567 fw_version[i] = htonl(REG_RD_IND(sc,
10568 (BCE_RXP_SCRATCH + 0x10 + i * 4)));
10569
10570 BCE_PRINTF("Firmware version - %s\n", (char *) fw_version);
10571
10572 val = REG_RD_IND(sc, BCE_RXP_CPU_MODE);
10573 BCE_PRINTF("0x%08X - (0x%06X) rxp_cpu_mode\n",
10574 val, BCE_RXP_CPU_MODE);
10575
10576 val = REG_RD_IND(sc, BCE_RXP_CPU_STATE);
10577 BCE_PRINTF("0x%08X - (0x%06X) rxp_cpu_state\n",
10578 val, BCE_RXP_CPU_STATE);
10579
10580 val = REG_RD_IND(sc, BCE_RXP_CPU_EVENT_MASK);
10581 BCE_PRINTF("0x%08X - (0x%06X) rxp_cpu_event_mask\n",
10582 val, BCE_RXP_CPU_EVENT_MASK);
10583
10584 if (regs) {
10585 BCE_PRINTF(
10586 "----------------------------"
10587 " Register Dump "
10588 "----------------------------\n");
10589
10590 for (int i = BCE_RXP_CPU_MODE; i < 0xe8fff; i += 0x10) {
10591 /* Skip the big blank sapces */
10592 if (i < 0xc5400 && i > 0xdffff)
10593 BCE_PRINTF("0x%04X: 0x%08X 0x%08X "
10594 "0x%08X 0x%08X\n", i,
10595 REG_RD_IND(sc, i),
10596 REG_RD_IND(sc, i + 0x4),
10597 REG_RD_IND(sc, i + 0x8),
10598 REG_RD_IND(sc, i + 0xC));
10599 }
10600 }
10601
10602 BCE_PRINTF(
10603 "----------------------------"
10604 "----------------"
10605 "----------------------------\n");
10606}
10607
10608
10609/****************************************************************************/
10610/* Prints out the TPAT processor state. */
10611/* */
10612/* Returns: */
10613/* Nothing. */
10614/****************************************************************************/
10615static __attribute__ ((noinline)) void
10616bce_dump_tpat_state(struct bce_softc *sc, int regs)
10617{
10618 u32 val;
10619 u32 fw_version[3];
10620
10621 BCE_PRINTF(
10622 "----------------------------"
10623 " TPAT State "
10624 "----------------------------\n");
10625
10626 for (int i = 0; i < 3; i++)
10627 fw_version[i] = htonl(REG_RD_IND(sc,
10628 (BCE_TPAT_SCRATCH + 0x410 + i * 4)));
10629
10630 BCE_PRINTF("Firmware version - %s\n", (char *) fw_version);
10631
10632 val = REG_RD_IND(sc, BCE_TPAT_CPU_MODE);
10633 BCE_PRINTF("0x%08X - (0x%06X) tpat_cpu_mode\n",
10634 val, BCE_TPAT_CPU_MODE);
10635
10636 val = REG_RD_IND(sc, BCE_TPAT_CPU_STATE);
10637 BCE_PRINTF("0x%08X - (0x%06X) tpat_cpu_state\n",
10638 val, BCE_TPAT_CPU_STATE);
10639
10640 val = REG_RD_IND(sc, BCE_TPAT_CPU_EVENT_MASK);
10641 BCE_PRINTF("0x%08X - (0x%06X) tpat_cpu_event_mask\n",
10642 val, BCE_TPAT_CPU_EVENT_MASK);
10643
10644 if (regs) {
10645 BCE_PRINTF(
10646 "----------------------------"
10647 " Register Dump "
10648 "----------------------------\n");
10649
10650 for (int i = BCE_TPAT_CPU_MODE; i < 0xa3fff; i += 0x10) {
10651 /* Skip the big blank spaces */
10652 if (i < 0x854000 && i > 0x9ffff)
10653 BCE_PRINTF("0x%04X: 0x%08X 0x%08X "
10654 "0x%08X 0x%08X\n", i,
10655 REG_RD_IND(sc, i),
10656 REG_RD_IND(sc, i + 0x4),
10657 REG_RD_IND(sc, i + 0x8),
10658 REG_RD_IND(sc, i + 0xC));
10659 }
10660 }
10661
10662 BCE_PRINTF(
10663 "----------------------------"
10664 "----------------"
10665 "----------------------------\n");
10666}
10667
10668
10669/****************************************************************************/
10670/* Prints out the Command Procesor (CP) state. */
10671/* */
10672/* Returns: */
10673/* Nothing. */
10674/****************************************************************************/
10675static __attribute__ ((noinline)) void
10676bce_dump_cp_state(struct bce_softc *sc, int regs)
10677{
10678 u32 val;
10679 u32 fw_version[3];
10680
10681 BCE_PRINTF(
10682 "----------------------------"
10683 " CP State "
10684 "----------------------------\n");
10685
10686 for (int i = 0; i < 3; i++)
10687 fw_version[i] = htonl(REG_RD_IND(sc,
10688 (BCE_CP_SCRATCH + 0x10 + i * 4)));
10689
10690 BCE_PRINTF("Firmware version - %s\n", (char *) fw_version);
10691
10692 val = REG_RD_IND(sc, BCE_CP_CPU_MODE);
10693 BCE_PRINTF("0x%08X - (0x%06X) cp_cpu_mode\n",
10694 val, BCE_CP_CPU_MODE);
10695
10696 val = REG_RD_IND(sc, BCE_CP_CPU_STATE);
10697 BCE_PRINTF("0x%08X - (0x%06X) cp_cpu_state\n",
10698 val, BCE_CP_CPU_STATE);
10699
10700 val = REG_RD_IND(sc, BCE_CP_CPU_EVENT_MASK);
10701 BCE_PRINTF("0x%08X - (0x%06X) cp_cpu_event_mask\n", val,
10702 BCE_CP_CPU_EVENT_MASK);
10703
10704 if (regs) {
10705 BCE_PRINTF(
10706 "----------------------------"
10707 " Register Dump "
10708 "----------------------------\n");
10709
10710 for (int i = BCE_CP_CPU_MODE; i < 0x1aa000; i += 0x10) {
10711 /* Skip the big blank spaces */
10712 if (i < 0x185400 && i > 0x19ffff)
10713 BCE_PRINTF("0x%04X: 0x%08X 0x%08X "
10714 "0x%08X 0x%08X\n", i,
10715 REG_RD_IND(sc, i),
10716 REG_RD_IND(sc, i + 0x4),
10717 REG_RD_IND(sc, i + 0x8),
10718 REG_RD_IND(sc, i + 0xC));
10719 }
10720 }
10721
10722 BCE_PRINTF(
10723 "----------------------------"
10724 "----------------"
10725 "----------------------------\n");
10726}
10727
10728
10729/****************************************************************************/
10730/* Prints out the Completion Procesor (COM) state. */
10731/* */
10732/* Returns: */
10733/* Nothing. */
10734/****************************************************************************/
10735static __attribute__ ((noinline)) void
10736bce_dump_com_state(struct bce_softc *sc, int regs)
10737{
10738 u32 val;
10739 u32 fw_version[4];
10740
10741 BCE_PRINTF(
10742 "----------------------------"
10743 " COM State "
10744 "----------------------------\n");
10745
10746 for (int i = 0; i < 3; i++)
10747 fw_version[i] = htonl(REG_RD_IND(sc,
10748 (BCE_COM_SCRATCH + 0x10 + i * 4)));
10749
10750 BCE_PRINTF("Firmware version - %s\n", (char *) fw_version);
10751
10752 val = REG_RD_IND(sc, BCE_COM_CPU_MODE);
10753 BCE_PRINTF("0x%08X - (0x%06X) com_cpu_mode\n",
10754 val, BCE_COM_CPU_MODE);
10755
10756 val = REG_RD_IND(sc, BCE_COM_CPU_STATE);
10757 BCE_PRINTF("0x%08X - (0x%06X) com_cpu_state\n",
10758 val, BCE_COM_CPU_STATE);
10759
10760 val = REG_RD_IND(sc, BCE_COM_CPU_EVENT_MASK);
10761 BCE_PRINTF("0x%08X - (0x%06X) com_cpu_event_mask\n", val,
10762 BCE_COM_CPU_EVENT_MASK);
10763
10764 if (regs) {
10765 BCE_PRINTF(
10766 "----------------------------"
10767 " Register Dump "
10768 "----------------------------\n");
10769
10770 for (int i = BCE_COM_CPU_MODE; i < 0x1053e8; i += 0x10) {
10771 BCE_PRINTF("0x%04X: 0x%08X 0x%08X "
10772 "0x%08X 0x%08X\n", i,
10773 REG_RD_IND(sc, i),
10774 REG_RD_IND(sc, i + 0x4),
10775 REG_RD_IND(sc, i + 0x8),
10776 REG_RD_IND(sc, i + 0xC));
10777 }
10778 }
10779
10780 BCE_PRINTF(
10781 "----------------------------"
10782 "----------------"
10783 "----------------------------\n");
10784}
10785
10786
10787/****************************************************************************/
10788/* Prints out the Receive Virtual 2 Physical (RV2P) state. */
10789/* */
10790/* Returns: */
10791/* Nothing. */
10792/****************************************************************************/
10793static __attribute__ ((noinline)) void
10794bce_dump_rv2p_state(struct bce_softc *sc)
10795{
10796 u32 val, pc1, pc2, fw_ver_high, fw_ver_low;
10797
10798 BCE_PRINTF(
10799 "----------------------------"
10800 " RV2P State "
10801 "----------------------------\n");
10802
10803 /* Stall the RV2P processors. */
10804 val = REG_RD_IND(sc, BCE_RV2P_CONFIG);
10805 val |= BCE_RV2P_CONFIG_STALL_PROC1 | BCE_RV2P_CONFIG_STALL_PROC2;
10806 REG_WR_IND(sc, BCE_RV2P_CONFIG, val);
10807
10808 /* Read the firmware version. */
10809 val = 0x00000001;
10810 REG_WR_IND(sc, BCE_RV2P_PROC1_ADDR_CMD, val);
10811 fw_ver_low = REG_RD_IND(sc, BCE_RV2P_INSTR_LOW);
10812 fw_ver_high = REG_RD_IND(sc, BCE_RV2P_INSTR_HIGH) &
10813 BCE_RV2P_INSTR_HIGH_HIGH;
10814 BCE_PRINTF("RV2P1 Firmware version - 0x%08X:0x%08X\n",
10815 fw_ver_high, fw_ver_low);
10816
10817 val = 0x00000001;
10818 REG_WR_IND(sc, BCE_RV2P_PROC2_ADDR_CMD, val);
10819 fw_ver_low = REG_RD_IND(sc, BCE_RV2P_INSTR_LOW);
10820 fw_ver_high = REG_RD_IND(sc, BCE_RV2P_INSTR_HIGH) &
10821 BCE_RV2P_INSTR_HIGH_HIGH;
10822 BCE_PRINTF("RV2P2 Firmware version - 0x%08X:0x%08X\n",
10823 fw_ver_high, fw_ver_low);
10824
10825 /* Resume the RV2P processors. */
10826 val = REG_RD_IND(sc, BCE_RV2P_CONFIG);
10827 val &= ~(BCE_RV2P_CONFIG_STALL_PROC1 | BCE_RV2P_CONFIG_STALL_PROC2);
10828 REG_WR_IND(sc, BCE_RV2P_CONFIG, val);
10829
10830 /* Fetch the program counter value. */
10831 val = 0x68007800;
10832 REG_WR_IND(sc, BCE_RV2P_DEBUG_VECT_PEEK, val);
10833 val = REG_RD_IND(sc, BCE_RV2P_DEBUG_VECT_PEEK);
10834 pc1 = (val & BCE_RV2P_DEBUG_VECT_PEEK_1_VALUE);
10835 pc2 = (val & BCE_RV2P_DEBUG_VECT_PEEK_2_VALUE) >> 16;
10836 BCE_PRINTF("0x%08X - RV2P1 program counter (1st read)\n", pc1);
10837 BCE_PRINTF("0x%08X - RV2P2 program counter (1st read)\n", pc2);
10838
10839 /* Fetch the program counter value again to see if it is advancing. */
10840 val = 0x68007800;
10841 REG_WR_IND(sc, BCE_RV2P_DEBUG_VECT_PEEK, val);
10842 val = REG_RD_IND(sc, BCE_RV2P_DEBUG_VECT_PEEK);
10843 pc1 = (val & BCE_RV2P_DEBUG_VECT_PEEK_1_VALUE);
10844 pc2 = (val & BCE_RV2P_DEBUG_VECT_PEEK_2_VALUE) >> 16;
10845 BCE_PRINTF("0x%08X - RV2P1 program counter (2nd read)\n", pc1);
10846 BCE_PRINTF("0x%08X - RV2P2 program counter (2nd read)\n", pc2);
10847
10848 BCE_PRINTF(
10849 "----------------------------"
10850 "----------------"
10851 "----------------------------\n");
10852}
10853
10854
10855/****************************************************************************/
10856/* Prints out the driver state and then enters the debugger. */
10857/* */
10858/* Returns: */
10859/* Nothing. */
10860/****************************************************************************/
10861static __attribute__ ((noinline)) void
10862bce_breakpoint(struct bce_softc *sc)
10863{
10864
10865 /*
10866 * Unreachable code to silence compiler warnings
10867 * about unused functions.
10868 */
10869 if (0) {
10870 bce_freeze_controller(sc);
10871 bce_unfreeze_controller(sc);
10872 bce_dump_enet(sc, NULL);
10873 bce_dump_txbd(sc, 0, NULL);
10874 bce_dump_rxbd(sc, 0, NULL);
10875 bce_dump_tx_mbuf_chain(sc, 0, USABLE_TX_BD);
10876 bce_dump_rx_mbuf_chain(sc, 0, USABLE_RX_BD);
10877 bce_dump_l2fhdr(sc, 0, NULL);
10878 bce_dump_ctx(sc, RX_CID);
10879 bce_dump_ftqs(sc);
10880 bce_dump_tx_chain(sc, 0, USABLE_TX_BD);
10881 bce_dump_rx_bd_chain(sc, 0, USABLE_RX_BD);
10882 bce_dump_status_block(sc);
10883 bce_dump_stats_block(sc);
10884 bce_dump_driver_state(sc);
10885 bce_dump_hw_state(sc);
10886 bce_dump_bc_state(sc);
10887 bce_dump_txp_state(sc, 0);
10888 bce_dump_rxp_state(sc, 0);
10889 bce_dump_tpat_state(sc, 0);
10890 bce_dump_cp_state(sc, 0);
10891 bce_dump_com_state(sc, 0);
10892 bce_dump_rv2p_state(sc);
10893
10894#ifdef BCE_JUMBO_HDRSPLIT
10895 bce_dump_pgbd(sc, 0, NULL);
10896 bce_dump_pg_mbuf_chain(sc, 0, USABLE_PG_BD);
10897 bce_dump_pg_chain(sc, 0, USABLE_PG_BD);
10898#endif
10899 }
10900
10901 bce_dump_status_block(sc);
10902 bce_dump_driver_state(sc);
10903
10904 /* Call the debugger. */
10905 breakpoint();
10906
10907 return;
10908}
10909#endif
10910
1783 DBPRINT(sc, BCE_INFO_PHY,
1784 "%s(): Enabling TX flow control.\n", __FUNCTION__);
1785 BCE_SETBIT(sc, BCE_EMAC_TX_MODE, BCE_EMAC_TX_MODE_FLOW_EN);
1786 sc->bce_flags |= BCE_USING_TX_FLOW_CONTROL;
1787 } else {
1788 DBPRINT(sc, BCE_INFO_PHY,
1789 "%s(): Disabling TX flow control.\n", __FUNCTION__);
1790 BCE_CLRBIT(sc, BCE_EMAC_TX_MODE, BCE_EMAC_TX_MODE_FLOW_EN);
1791 sc->bce_flags &= ~BCE_USING_TX_FLOW_CONTROL;
1792 }
1793
1794 /* ToDo: Update watermarks in bce_init_rx_context(). */
1795
1796 DBEXIT(BCE_VERBOSE_PHY);
1797}
1798
1799
1800/****************************************************************************/
1801/* Acquire NVRAM lock. */
1802/* */
1803/* Before the NVRAM can be accessed the caller must acquire an NVRAM lock. */
1804/* Locks 0 and 2 are reserved, lock 1 is used by firmware and lock 2 is */
1805/* for use by the driver. */
1806/* */
1807/* Returns: */
1808/* 0 on success, positive value on failure. */
1809/****************************************************************************/
1810static int
1811bce_acquire_nvram_lock(struct bce_softc *sc)
1812{
1813 u32 val;
1814 int j, rc = 0;
1815
1816 DBENTER(BCE_VERBOSE_NVRAM);
1817
1818 /* Request access to the flash interface. */
1819 REG_WR(sc, BCE_NVM_SW_ARB, BCE_NVM_SW_ARB_ARB_REQ_SET2);
1820 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
1821 val = REG_RD(sc, BCE_NVM_SW_ARB);
1822 if (val & BCE_NVM_SW_ARB_ARB_ARB2)
1823 break;
1824
1825 DELAY(5);
1826 }
1827
1828 if (j >= NVRAM_TIMEOUT_COUNT) {
1829 DBPRINT(sc, BCE_WARN, "Timeout acquiring NVRAM lock!\n");
1830 rc = EBUSY;
1831 }
1832
1833 DBEXIT(BCE_VERBOSE_NVRAM);
1834 return (rc);
1835}
1836
1837
1838/****************************************************************************/
1839/* Release NVRAM lock. */
1840/* */
1841/* When the caller is finished accessing NVRAM the lock must be released. */
1842/* Locks 0 and 2 are reserved, lock 1 is used by firmware and lock 2 is */
1843/* for use by the driver. */
1844/* */
1845/* Returns: */
1846/* 0 on success, positive value on failure. */
1847/****************************************************************************/
1848static int
1849bce_release_nvram_lock(struct bce_softc *sc)
1850{
1851 u32 val;
1852 int j, rc = 0;
1853
1854 DBENTER(BCE_VERBOSE_NVRAM);
1855
1856 /*
1857 * Relinquish nvram interface.
1858 */
1859 REG_WR(sc, BCE_NVM_SW_ARB, BCE_NVM_SW_ARB_ARB_REQ_CLR2);
1860
1861 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
1862 val = REG_RD(sc, BCE_NVM_SW_ARB);
1863 if (!(val & BCE_NVM_SW_ARB_ARB_ARB2))
1864 break;
1865
1866 DELAY(5);
1867 }
1868
1869 if (j >= NVRAM_TIMEOUT_COUNT) {
1870 DBPRINT(sc, BCE_WARN, "Timeout releasing NVRAM lock!\n");
1871 rc = EBUSY;
1872 }
1873
1874 DBEXIT(BCE_VERBOSE_NVRAM);
1875 return (rc);
1876}
1877
1878
1879#ifdef BCE_NVRAM_WRITE_SUPPORT
1880/****************************************************************************/
1881/* Enable NVRAM write access. */
1882/* */
1883/* Before writing to NVRAM the caller must enable NVRAM writes. */
1884/* */
1885/* Returns: */
1886/* 0 on success, positive value on failure. */
1887/****************************************************************************/
1888static int
1889bce_enable_nvram_write(struct bce_softc *sc)
1890{
1891 u32 val;
1892 int rc = 0;
1893
1894 DBENTER(BCE_VERBOSE_NVRAM);
1895
1896 val = REG_RD(sc, BCE_MISC_CFG);
1897 REG_WR(sc, BCE_MISC_CFG, val | BCE_MISC_CFG_NVM_WR_EN_PCI);
1898
1899 if (!(sc->bce_flash_info->flags & BCE_NV_BUFFERED)) {
1900 int j;
1901
1902 REG_WR(sc, BCE_NVM_COMMAND, BCE_NVM_COMMAND_DONE);
1903 REG_WR(sc, BCE_NVM_COMMAND, BCE_NVM_COMMAND_WREN | BCE_NVM_COMMAND_DOIT);
1904
1905 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
1906 DELAY(5);
1907
1908 val = REG_RD(sc, BCE_NVM_COMMAND);
1909 if (val & BCE_NVM_COMMAND_DONE)
1910 break;
1911 }
1912
1913 if (j >= NVRAM_TIMEOUT_COUNT) {
1914 DBPRINT(sc, BCE_WARN, "Timeout writing NVRAM!\n");
1915 rc = EBUSY;
1916 }
1917 }
1918
1919 DBENTER(BCE_VERBOSE_NVRAM);
1920 return (rc);
1921}
1922
1923
1924/****************************************************************************/
1925/* Disable NVRAM write access. */
1926/* */
1927/* When the caller is finished writing to NVRAM write access must be */
1928/* disabled. */
1929/* */
1930/* Returns: */
1931/* Nothing. */
1932/****************************************************************************/
1933static void
1934bce_disable_nvram_write(struct bce_softc *sc)
1935{
1936 u32 val;
1937
1938 DBENTER(BCE_VERBOSE_NVRAM);
1939
1940 val = REG_RD(sc, BCE_MISC_CFG);
1941 REG_WR(sc, BCE_MISC_CFG, val & ~BCE_MISC_CFG_NVM_WR_EN);
1942
1943 DBEXIT(BCE_VERBOSE_NVRAM);
1944
1945}
1946#endif
1947
1948
1949/****************************************************************************/
1950/* Enable NVRAM access. */
1951/* */
1952/* Before accessing NVRAM for read or write operations the caller must */
1953/* enabled NVRAM access. */
1954/* */
1955/* Returns: */
1956/* Nothing. */
1957/****************************************************************************/
1958static void
1959bce_enable_nvram_access(struct bce_softc *sc)
1960{
1961 u32 val;
1962
1963 DBENTER(BCE_VERBOSE_NVRAM);
1964
1965 val = REG_RD(sc, BCE_NVM_ACCESS_ENABLE);
1966 /* Enable both bits, even on read. */
1967 REG_WR(sc, BCE_NVM_ACCESS_ENABLE, val |
1968 BCE_NVM_ACCESS_ENABLE_EN | BCE_NVM_ACCESS_ENABLE_WR_EN);
1969
1970 DBEXIT(BCE_VERBOSE_NVRAM);
1971}
1972
1973
1974/****************************************************************************/
1975/* Disable NVRAM access. */
1976/* */
1977/* When the caller is finished accessing NVRAM access must be disabled. */
1978/* */
1979/* Returns: */
1980/* Nothing. */
1981/****************************************************************************/
1982static void
1983bce_disable_nvram_access(struct bce_softc *sc)
1984{
1985 u32 val;
1986
1987 DBENTER(BCE_VERBOSE_NVRAM);
1988
1989 val = REG_RD(sc, BCE_NVM_ACCESS_ENABLE);
1990
1991 /* Disable both bits, even after read. */
1992 REG_WR(sc, BCE_NVM_ACCESS_ENABLE, val &
1993 ~(BCE_NVM_ACCESS_ENABLE_EN | BCE_NVM_ACCESS_ENABLE_WR_EN));
1994
1995 DBEXIT(BCE_VERBOSE_NVRAM);
1996}
1997
1998
1999#ifdef BCE_NVRAM_WRITE_SUPPORT
2000/****************************************************************************/
2001/* Erase NVRAM page before writing. */
2002/* */
2003/* Non-buffered flash parts require that a page be erased before it is */
2004/* written. */
2005/* */
2006/* Returns: */
2007/* 0 on success, positive value on failure. */
2008/****************************************************************************/
2009static int
2010bce_nvram_erase_page(struct bce_softc *sc, u32 offset)
2011{
2012 u32 cmd;
2013 int j, rc = 0;
2014
2015 DBENTER(BCE_VERBOSE_NVRAM);
2016
2017 /* Buffered flash doesn't require an erase. */
2018 if (sc->bce_flash_info->flags & BCE_NV_BUFFERED)
2019 goto bce_nvram_erase_page_exit;
2020
2021 /* Build an erase command. */
2022 cmd = BCE_NVM_COMMAND_ERASE | BCE_NVM_COMMAND_WR |
2023 BCE_NVM_COMMAND_DOIT;
2024
2025 /*
2026 * Clear the DONE bit separately, set the NVRAM adress to erase,
2027 * and issue the erase command.
2028 */
2029 REG_WR(sc, BCE_NVM_COMMAND, BCE_NVM_COMMAND_DONE);
2030 REG_WR(sc, BCE_NVM_ADDR, offset & BCE_NVM_ADDR_NVM_ADDR_VALUE);
2031 REG_WR(sc, BCE_NVM_COMMAND, cmd);
2032
2033 /* Wait for completion. */
2034 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2035 u32 val;
2036
2037 DELAY(5);
2038
2039 val = REG_RD(sc, BCE_NVM_COMMAND);
2040 if (val & BCE_NVM_COMMAND_DONE)
2041 break;
2042 }
2043
2044 if (j >= NVRAM_TIMEOUT_COUNT) {
2045 DBPRINT(sc, BCE_WARN, "Timeout erasing NVRAM.\n");
2046 rc = EBUSY;
2047 }
2048
2049bce_nvram_erase_page_exit:
2050 DBEXIT(BCE_VERBOSE_NVRAM);
2051 return (rc);
2052}
2053#endif /* BCE_NVRAM_WRITE_SUPPORT */
2054
2055
2056/****************************************************************************/
2057/* Read a dword (32 bits) from NVRAM. */
2058/* */
2059/* Read a 32 bit word from NVRAM. The caller is assumed to have already */
2060/* obtained the NVRAM lock and enabled the controller for NVRAM access. */
2061/* */
2062/* Returns: */
2063/* 0 on success and the 32 bit value read, positive value on failure. */
2064/****************************************************************************/
2065static int
2066bce_nvram_read_dword(struct bce_softc *sc,
2067 u32 offset, u8 *ret_val, u32 cmd_flags)
2068{
2069 u32 cmd;
2070 int i, rc = 0;
2071
2072 DBENTER(BCE_EXTREME_NVRAM);
2073
2074 /* Build the command word. */
2075 cmd = BCE_NVM_COMMAND_DOIT | cmd_flags;
2076
2077 /* Calculate the offset for buffered flash if translation is used. */
2078 if (sc->bce_flash_info->flags & BCE_NV_TRANSLATE) {
2079 offset = ((offset / sc->bce_flash_info->page_size) <<
2080 sc->bce_flash_info->page_bits) +
2081 (offset % sc->bce_flash_info->page_size);
2082 }
2083
2084 /*
2085 * Clear the DONE bit separately, set the address to read,
2086 * and issue the read.
2087 */
2088 REG_WR(sc, BCE_NVM_COMMAND, BCE_NVM_COMMAND_DONE);
2089 REG_WR(sc, BCE_NVM_ADDR, offset & BCE_NVM_ADDR_NVM_ADDR_VALUE);
2090 REG_WR(sc, BCE_NVM_COMMAND, cmd);
2091
2092 /* Wait for completion. */
2093 for (i = 0; i < NVRAM_TIMEOUT_COUNT; i++) {
2094 u32 val;
2095
2096 DELAY(5);
2097
2098 val = REG_RD(sc, BCE_NVM_COMMAND);
2099 if (val & BCE_NVM_COMMAND_DONE) {
2100 val = REG_RD(sc, BCE_NVM_READ);
2101
2102 val = bce_be32toh(val);
2103 memcpy(ret_val, &val, 4);
2104 break;
2105 }
2106 }
2107
2108 /* Check for errors. */
2109 if (i >= NVRAM_TIMEOUT_COUNT) {
2110 BCE_PRINTF("%s(%d): Timeout error reading NVRAM at "
2111 "offset 0x%08X!\n", __FILE__, __LINE__, offset);
2112 rc = EBUSY;
2113 }
2114
2115 DBEXIT(BCE_EXTREME_NVRAM);
2116 return(rc);
2117}
2118
2119
2120#ifdef BCE_NVRAM_WRITE_SUPPORT
2121/****************************************************************************/
2122/* Write a dword (32 bits) to NVRAM. */
2123/* */
2124/* Write a 32 bit word to NVRAM. The caller is assumed to have already */
2125/* obtained the NVRAM lock, enabled the controller for NVRAM access, and */
2126/* enabled NVRAM write access. */
2127/* */
2128/* Returns: */
2129/* 0 on success, positive value on failure. */
2130/****************************************************************************/
2131static int
2132bce_nvram_write_dword(struct bce_softc *sc, u32 offset, u8 *val,
2133 u32 cmd_flags)
2134{
2135 u32 cmd, val32;
2136 int j, rc = 0;
2137
2138 DBENTER(BCE_VERBOSE_NVRAM);
2139
2140 /* Build the command word. */
2141 cmd = BCE_NVM_COMMAND_DOIT | BCE_NVM_COMMAND_WR | cmd_flags;
2142
2143 /* Calculate the offset for buffered flash if translation is used. */
2144 if (sc->bce_flash_info->flags & BCE_NV_TRANSLATE) {
2145 offset = ((offset / sc->bce_flash_info->page_size) <<
2146 sc->bce_flash_info->page_bits) +
2147 (offset % sc->bce_flash_info->page_size);
2148 }
2149
2150 /*
2151 * Clear the DONE bit separately, convert NVRAM data to big-endian,
2152 * set the NVRAM address to write, and issue the write command
2153 */
2154 REG_WR(sc, BCE_NVM_COMMAND, BCE_NVM_COMMAND_DONE);
2155 memcpy(&val32, val, 4);
2156 val32 = htobe32(val32);
2157 REG_WR(sc, BCE_NVM_WRITE, val32);
2158 REG_WR(sc, BCE_NVM_ADDR, offset & BCE_NVM_ADDR_NVM_ADDR_VALUE);
2159 REG_WR(sc, BCE_NVM_COMMAND, cmd);
2160
2161 /* Wait for completion. */
2162 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2163 DELAY(5);
2164
2165 if (REG_RD(sc, BCE_NVM_COMMAND) & BCE_NVM_COMMAND_DONE)
2166 break;
2167 }
2168 if (j >= NVRAM_TIMEOUT_COUNT) {
2169 BCE_PRINTF("%s(%d): Timeout error writing NVRAM at "
2170 "offset 0x%08X\n", __FILE__, __LINE__, offset);
2171 rc = EBUSY;
2172 }
2173
2174 DBEXIT(BCE_VERBOSE_NVRAM);
2175 return (rc);
2176}
2177#endif /* BCE_NVRAM_WRITE_SUPPORT */
2178
2179
2180/****************************************************************************/
2181/* Initialize NVRAM access. */
2182/* */
2183/* Identify the NVRAM device in use and prepare the NVRAM interface to */
2184/* access that device. */
2185/* */
2186/* Returns: */
2187/* 0 on success, positive value on failure. */
2188/****************************************************************************/
2189static int
2190bce_init_nvram(struct bce_softc *sc)
2191{
2192 u32 val;
2193 int j, entry_count, rc = 0;
2194 struct flash_spec *flash;
2195
2196 DBENTER(BCE_VERBOSE_NVRAM);
2197
2198 if ((BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) ||
2199 (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716)) {
2200 sc->bce_flash_info = &flash_5709;
2201 goto bce_init_nvram_get_flash_size;
2202 }
2203
2204 /* Determine the selected interface. */
2205 val = REG_RD(sc, BCE_NVM_CFG1);
2206
2207 entry_count = sizeof(flash_table) / sizeof(struct flash_spec);
2208
2209 /*
2210 * Flash reconfiguration is required to support additional
2211 * NVRAM devices not directly supported in hardware.
2212 * Check if the flash interface was reconfigured
2213 * by the bootcode.
2214 */
2215
2216 if (val & 0x40000000) {
2217 /* Flash interface reconfigured by bootcode. */
2218
2219 DBPRINT(sc,BCE_INFO_LOAD,
2220 "bce_init_nvram(): Flash WAS reconfigured.\n");
2221
2222 for (j = 0, flash = &flash_table[0]; j < entry_count;
2223 j++, flash++) {
2224 if ((val & FLASH_BACKUP_STRAP_MASK) ==
2225 (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
2226 sc->bce_flash_info = flash;
2227 break;
2228 }
2229 }
2230 } else {
2231 /* Flash interface not yet reconfigured. */
2232 u32 mask;
2233
2234 DBPRINT(sc, BCE_INFO_LOAD, "%s(): Flash was NOT reconfigured.\n",
2235 __FUNCTION__);
2236
2237 if (val & (1 << 23))
2238 mask = FLASH_BACKUP_STRAP_MASK;
2239 else
2240 mask = FLASH_STRAP_MASK;
2241
2242 /* Look for the matching NVRAM device configuration data. */
2243 for (j = 0, flash = &flash_table[0]; j < entry_count; j++, flash++) {
2244
2245 /* Check if the device matches any of the known devices. */
2246 if ((val & mask) == (flash->strapping & mask)) {
2247 /* Found a device match. */
2248 sc->bce_flash_info = flash;
2249
2250 /* Request access to the flash interface. */
2251 if ((rc = bce_acquire_nvram_lock(sc)) != 0)
2252 return rc;
2253
2254 /* Reconfigure the flash interface. */
2255 bce_enable_nvram_access(sc);
2256 REG_WR(sc, BCE_NVM_CFG1, flash->config1);
2257 REG_WR(sc, BCE_NVM_CFG2, flash->config2);
2258 REG_WR(sc, BCE_NVM_CFG3, flash->config3);
2259 REG_WR(sc, BCE_NVM_WRITE1, flash->write1);
2260 bce_disable_nvram_access(sc);
2261 bce_release_nvram_lock(sc);
2262
2263 break;
2264 }
2265 }
2266 }
2267
2268 /* Check if a matching device was found. */
2269 if (j == entry_count) {
2270 sc->bce_flash_info = NULL;
2271 BCE_PRINTF("%s(%d): Unknown Flash NVRAM found!\n",
2272 __FILE__, __LINE__);
2273 DBEXIT(BCE_VERBOSE_NVRAM);
2274 return (ENODEV);
2275 }
2276
2277bce_init_nvram_get_flash_size:
2278 /* Write the flash config data to the shared memory interface. */
2279 val = bce_shmem_rd(sc, BCE_SHARED_HW_CFG_CONFIG2);
2280 val &= BCE_SHARED_HW_CFG2_NVM_SIZE_MASK;
2281 if (val)
2282 sc->bce_flash_size = val;
2283 else
2284 sc->bce_flash_size = sc->bce_flash_info->total_size;
2285
2286 DBPRINT(sc, BCE_INFO_LOAD, "%s(): Found %s, size = 0x%08X\n",
2287 __FUNCTION__, sc->bce_flash_info->name,
2288 sc->bce_flash_info->total_size);
2289
2290 DBEXIT(BCE_VERBOSE_NVRAM);
2291 return rc;
2292}
2293
2294
2295/****************************************************************************/
2296/* Read an arbitrary range of data from NVRAM. */
2297/* */
2298/* Prepares the NVRAM interface for access and reads the requested data */
2299/* into the supplied buffer. */
2300/* */
2301/* Returns: */
2302/* 0 on success and the data read, positive value on failure. */
2303/****************************************************************************/
2304static int
2305bce_nvram_read(struct bce_softc *sc, u32 offset, u8 *ret_buf,
2306 int buf_size)
2307{
2308 int rc = 0;
2309 u32 cmd_flags, offset32, len32, extra;
2310
2311 DBENTER(BCE_VERBOSE_NVRAM);
2312
2313 if (buf_size == 0)
2314 goto bce_nvram_read_exit;
2315
2316 /* Request access to the flash interface. */
2317 if ((rc = bce_acquire_nvram_lock(sc)) != 0)
2318 goto bce_nvram_read_exit;
2319
2320 /* Enable access to flash interface */
2321 bce_enable_nvram_access(sc);
2322
2323 len32 = buf_size;
2324 offset32 = offset;
2325 extra = 0;
2326
2327 cmd_flags = 0;
2328
2329 if (offset32 & 3) {
2330 u8 buf[4];
2331 u32 pre_len;
2332
2333 offset32 &= ~3;
2334 pre_len = 4 - (offset & 3);
2335
2336 if (pre_len >= len32) {
2337 pre_len = len32;
2338 cmd_flags = BCE_NVM_COMMAND_FIRST | BCE_NVM_COMMAND_LAST;
2339 }
2340 else {
2341 cmd_flags = BCE_NVM_COMMAND_FIRST;
2342 }
2343
2344 rc = bce_nvram_read_dword(sc, offset32, buf, cmd_flags);
2345
2346 if (rc)
2347 return rc;
2348
2349 memcpy(ret_buf, buf + (offset & 3), pre_len);
2350
2351 offset32 += 4;
2352 ret_buf += pre_len;
2353 len32 -= pre_len;
2354 }
2355
2356 if (len32 & 3) {
2357 extra = 4 - (len32 & 3);
2358 len32 = (len32 + 4) & ~3;
2359 }
2360
2361 if (len32 == 4) {
2362 u8 buf[4];
2363
2364 if (cmd_flags)
2365 cmd_flags = BCE_NVM_COMMAND_LAST;
2366 else
2367 cmd_flags = BCE_NVM_COMMAND_FIRST |
2368 BCE_NVM_COMMAND_LAST;
2369
2370 rc = bce_nvram_read_dword(sc, offset32, buf, cmd_flags);
2371
2372 memcpy(ret_buf, buf, 4 - extra);
2373 }
2374 else if (len32 > 0) {
2375 u8 buf[4];
2376
2377 /* Read the first word. */
2378 if (cmd_flags)
2379 cmd_flags = 0;
2380 else
2381 cmd_flags = BCE_NVM_COMMAND_FIRST;
2382
2383 rc = bce_nvram_read_dword(sc, offset32, ret_buf, cmd_flags);
2384
2385 /* Advance to the next dword. */
2386 offset32 += 4;
2387 ret_buf += 4;
2388 len32 -= 4;
2389
2390 while (len32 > 4 && rc == 0) {
2391 rc = bce_nvram_read_dword(sc, offset32, ret_buf, 0);
2392
2393 /* Advance to the next dword. */
2394 offset32 += 4;
2395 ret_buf += 4;
2396 len32 -= 4;
2397 }
2398
2399 if (rc)
2400 goto bce_nvram_read_locked_exit;
2401
2402 cmd_flags = BCE_NVM_COMMAND_LAST;
2403 rc = bce_nvram_read_dword(sc, offset32, buf, cmd_flags);
2404
2405 memcpy(ret_buf, buf, 4 - extra);
2406 }
2407
2408bce_nvram_read_locked_exit:
2409 /* Disable access to flash interface and release the lock. */
2410 bce_disable_nvram_access(sc);
2411 bce_release_nvram_lock(sc);
2412
2413bce_nvram_read_exit:
2414 DBEXIT(BCE_VERBOSE_NVRAM);
2415 return rc;
2416}
2417
2418
2419#ifdef BCE_NVRAM_WRITE_SUPPORT
2420/****************************************************************************/
2421/* Write an arbitrary range of data from NVRAM. */
2422/* */
2423/* Prepares the NVRAM interface for write access and writes the requested */
2424/* data from the supplied buffer. The caller is responsible for */
2425/* calculating any appropriate CRCs. */
2426/* */
2427/* Returns: */
2428/* 0 on success, positive value on failure. */
2429/****************************************************************************/
2430static int
2431bce_nvram_write(struct bce_softc *sc, u32 offset, u8 *data_buf,
2432 int buf_size)
2433{
2434 u32 written, offset32, len32;
2435 u8 *buf, start[4], end[4];
2436 int rc = 0;
2437 int align_start, align_end;
2438
2439 DBENTER(BCE_VERBOSE_NVRAM);
2440
2441 buf = data_buf;
2442 offset32 = offset;
2443 len32 = buf_size;
2444 align_start = align_end = 0;
2445
2446 if ((align_start = (offset32 & 3))) {
2447 offset32 &= ~3;
2448 len32 += align_start;
2449 if ((rc = bce_nvram_read(sc, offset32, start, 4)))
2450 goto bce_nvram_write_exit;
2451 }
2452
2453 if (len32 & 3) {
2454 if ((len32 > 4) || !align_start) {
2455 align_end = 4 - (len32 & 3);
2456 len32 += align_end;
2457 if ((rc = bce_nvram_read(sc, offset32 + len32 - 4,
2458 end, 4))) {
2459 goto bce_nvram_write_exit;
2460 }
2461 }
2462 }
2463
2464 if (align_start || align_end) {
2465 buf = malloc(len32, M_DEVBUF, M_NOWAIT);
2466 if (buf == 0) {
2467 rc = ENOMEM;
2468 goto bce_nvram_write_exit;
2469 }
2470
2471 if (align_start) {
2472 memcpy(buf, start, 4);
2473 }
2474
2475 if (align_end) {
2476 memcpy(buf + len32 - 4, end, 4);
2477 }
2478 memcpy(buf + align_start, data_buf, buf_size);
2479 }
2480
2481 written = 0;
2482 while ((written < len32) && (rc == 0)) {
2483 u32 page_start, page_end, data_start, data_end;
2484 u32 addr, cmd_flags;
2485 int i;
2486 u8 flash_buffer[264];
2487
2488 /* Find the page_start addr */
2489 page_start = offset32 + written;
2490 page_start -= (page_start % sc->bce_flash_info->page_size);
2491 /* Find the page_end addr */
2492 page_end = page_start + sc->bce_flash_info->page_size;
2493 /* Find the data_start addr */
2494 data_start = (written == 0) ? offset32 : page_start;
2495 /* Find the data_end addr */
2496 data_end = (page_end > offset32 + len32) ?
2497 (offset32 + len32) : page_end;
2498
2499 /* Request access to the flash interface. */
2500 if ((rc = bce_acquire_nvram_lock(sc)) != 0)
2501 goto bce_nvram_write_exit;
2502
2503 /* Enable access to flash interface */
2504 bce_enable_nvram_access(sc);
2505
2506 cmd_flags = BCE_NVM_COMMAND_FIRST;
2507 if (!(sc->bce_flash_info->flags & BCE_NV_BUFFERED)) {
2508 int j;
2509
2510 /* Read the whole page into the buffer
2511 * (non-buffer flash only) */
2512 for (j = 0; j < sc->bce_flash_info->page_size; j += 4) {
2513 if (j == (sc->bce_flash_info->page_size - 4)) {
2514 cmd_flags |= BCE_NVM_COMMAND_LAST;
2515 }
2516 rc = bce_nvram_read_dword(sc,
2517 page_start + j,
2518 &flash_buffer[j],
2519 cmd_flags);
2520
2521 if (rc)
2522 goto bce_nvram_write_locked_exit;
2523
2524 cmd_flags = 0;
2525 }
2526 }
2527
2528 /* Enable writes to flash interface (unlock write-protect) */
2529 if ((rc = bce_enable_nvram_write(sc)) != 0)
2530 goto bce_nvram_write_locked_exit;
2531
2532 /* Erase the page */
2533 if ((rc = bce_nvram_erase_page(sc, page_start)) != 0)
2534 goto bce_nvram_write_locked_exit;
2535
2536 /* Re-enable the write again for the actual write */
2537 bce_enable_nvram_write(sc);
2538
2539 /* Loop to write back the buffer data from page_start to
2540 * data_start */
2541 i = 0;
2542 if (!(sc->bce_flash_info->flags & BCE_NV_BUFFERED)) {
2543 for (addr = page_start; addr < data_start;
2544 addr += 4, i += 4) {
2545
2546 rc = bce_nvram_write_dword(sc, addr,
2547 &flash_buffer[i], cmd_flags);
2548
2549 if (rc != 0)
2550 goto bce_nvram_write_locked_exit;
2551
2552 cmd_flags = 0;
2553 }
2554 }
2555
2556 /* Loop to write the new data from data_start to data_end */
2557 for (addr = data_start; addr < data_end; addr += 4, i++) {
2558 if ((addr == page_end - 4) ||
2559 ((sc->bce_flash_info->flags & BCE_NV_BUFFERED) &&
2560 (addr == data_end - 4))) {
2561
2562 cmd_flags |= BCE_NVM_COMMAND_LAST;
2563 }
2564 rc = bce_nvram_write_dword(sc, addr, buf,
2565 cmd_flags);
2566
2567 if (rc != 0)
2568 goto bce_nvram_write_locked_exit;
2569
2570 cmd_flags = 0;
2571 buf += 4;
2572 }
2573
2574 /* Loop to write back the buffer data from data_end
2575 * to page_end */
2576 if (!(sc->bce_flash_info->flags & BCE_NV_BUFFERED)) {
2577 for (addr = data_end; addr < page_end;
2578 addr += 4, i += 4) {
2579
2580 if (addr == page_end-4) {
2581 cmd_flags = BCE_NVM_COMMAND_LAST;
2582 }
2583 rc = bce_nvram_write_dword(sc, addr,
2584 &flash_buffer[i], cmd_flags);
2585
2586 if (rc != 0)
2587 goto bce_nvram_write_locked_exit;
2588
2589 cmd_flags = 0;
2590 }
2591 }
2592
2593 /* Disable writes to flash interface (lock write-protect) */
2594 bce_disable_nvram_write(sc);
2595
2596 /* Disable access to flash interface */
2597 bce_disable_nvram_access(sc);
2598 bce_release_nvram_lock(sc);
2599
2600 /* Increment written */
2601 written += data_end - data_start;
2602 }
2603
2604 goto bce_nvram_write_exit;
2605
2606bce_nvram_write_locked_exit:
2607 bce_disable_nvram_write(sc);
2608 bce_disable_nvram_access(sc);
2609 bce_release_nvram_lock(sc);
2610
2611bce_nvram_write_exit:
2612 if (align_start || align_end)
2613 free(buf, M_DEVBUF);
2614
2615 DBEXIT(BCE_VERBOSE_NVRAM);
2616 return (rc);
2617}
2618#endif /* BCE_NVRAM_WRITE_SUPPORT */
2619
2620
2621/****************************************************************************/
2622/* Verifies that NVRAM is accessible and contains valid data. */
2623/* */
2624/* Reads the configuration data from NVRAM and verifies that the CRC is */
2625/* correct. */
2626/* */
2627/* Returns: */
2628/* 0 on success, positive value on failure. */
2629/****************************************************************************/
2630static int
2631bce_nvram_test(struct bce_softc *sc)
2632{
2633 u32 buf[BCE_NVRAM_SIZE / 4];
2634 u8 *data = (u8 *) buf;
2635 int rc = 0;
2636 u32 magic, csum;
2637
2638 DBENTER(BCE_VERBOSE_NVRAM | BCE_VERBOSE_LOAD | BCE_VERBOSE_RESET);
2639
2640 /*
2641 * Check that the device NVRAM is valid by reading
2642 * the magic value at offset 0.
2643 */
2644 if ((rc = bce_nvram_read(sc, 0, data, 4)) != 0) {
2645 BCE_PRINTF("%s(%d): Unable to read NVRAM!\n",
2646 __FILE__, __LINE__);
2647 goto bce_nvram_test_exit;
2648 }
2649
2650 /*
2651 * Verify that offset 0 of the NVRAM contains
2652 * a valid magic number.
2653 */
2654 magic = bce_be32toh(buf[0]);
2655 if (magic != BCE_NVRAM_MAGIC) {
2656 rc = ENODEV;
2657 BCE_PRINTF("%s(%d): Invalid NVRAM magic value! "
2658 "Expected: 0x%08X, Found: 0x%08X\n",
2659 __FILE__, __LINE__, BCE_NVRAM_MAGIC, magic);
2660 goto bce_nvram_test_exit;
2661 }
2662
2663 /*
2664 * Verify that the device NVRAM includes valid
2665 * configuration data.
2666 */
2667 if ((rc = bce_nvram_read(sc, 0x100, data, BCE_NVRAM_SIZE)) != 0) {
2668 BCE_PRINTF("%s(%d): Unable to read manufacturing "
2669 "Information from NVRAM!\n", __FILE__, __LINE__);
2670 goto bce_nvram_test_exit;
2671 }
2672
2673 csum = ether_crc32_le(data, 0x100);
2674 if (csum != BCE_CRC32_RESIDUAL) {
2675 rc = ENODEV;
2676 BCE_PRINTF("%s(%d): Invalid manufacturing information "
2677 "NVRAM CRC! Expected: 0x%08X, Found: 0x%08X\n",
2678 __FILE__, __LINE__, BCE_CRC32_RESIDUAL, csum);
2679 goto bce_nvram_test_exit;
2680 }
2681
2682 csum = ether_crc32_le(data + 0x100, 0x100);
2683 if (csum != BCE_CRC32_RESIDUAL) {
2684 rc = ENODEV;
2685 BCE_PRINTF("%s(%d): Invalid feature configuration "
2686 "information NVRAM CRC! Expected: 0x%08X, "
2687 "Found: 08%08X\n", __FILE__, __LINE__,
2688 BCE_CRC32_RESIDUAL, csum);
2689 }
2690
2691bce_nvram_test_exit:
2692 DBEXIT(BCE_VERBOSE_NVRAM | BCE_VERBOSE_LOAD | BCE_VERBOSE_RESET);
2693 return rc;
2694}
2695
2696
2697/****************************************************************************/
2698/* Identifies the current media type of the controller and sets the PHY */
2699/* address. */
2700/* */
2701/* Returns: */
2702/* Nothing. */
2703/****************************************************************************/
2704static void
2705bce_get_media(struct bce_softc *sc)
2706{
2707 u32 val;
2708
2709 DBENTER(BCE_VERBOSE_PHY);
2710
2711 /* Assume PHY address for copper controllers. */
2712 sc->bce_phy_addr = 1;
2713
2714 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) {
2715 u32 val = REG_RD(sc, BCE_MISC_DUAL_MEDIA_CTRL);
2716 u32 bond_id = val & BCE_MISC_DUAL_MEDIA_CTRL_BOND_ID;
2717 u32 strap;
2718
2719 /*
2720 * The BCM5709S is software configurable
2721 * for Copper or SerDes operation.
2722 */
2723 if (bond_id == BCE_MISC_DUAL_MEDIA_CTRL_BOND_ID_C) {
2724 DBPRINT(sc, BCE_INFO_LOAD, "5709 bonded "
2725 "for copper.\n");
2726 goto bce_get_media_exit;
2727 } else if (bond_id == BCE_MISC_DUAL_MEDIA_CTRL_BOND_ID_S) {
2728 DBPRINT(sc, BCE_INFO_LOAD, "5709 bonded "
2729 "for dual media.\n");
2730 sc->bce_phy_flags |= BCE_PHY_SERDES_FLAG;
2731 goto bce_get_media_exit;
2732 }
2733
2734 if (val & BCE_MISC_DUAL_MEDIA_CTRL_STRAP_OVERRIDE)
2735 strap = (val &
2736 BCE_MISC_DUAL_MEDIA_CTRL_PHY_CTRL) >> 21;
2737 else
2738 strap = (val &
2739 BCE_MISC_DUAL_MEDIA_CTRL_PHY_CTRL_STRAP) >> 8;
2740
2741 if (pci_get_function(sc->bce_dev) == 0) {
2742 switch (strap) {
2743 case 0x4:
2744 case 0x5:
2745 case 0x6:
2746 DBPRINT(sc, BCE_INFO_LOAD,
2747 "BCM5709 s/w configured for SerDes.\n");
2748 sc->bce_phy_flags |= BCE_PHY_SERDES_FLAG;
2749 break;
2750 default:
2751 DBPRINT(sc, BCE_INFO_LOAD,
2752 "BCM5709 s/w configured for Copper.\n");
2753 break;
2754 }
2755 } else {
2756 switch (strap) {
2757 case 0x1:
2758 case 0x2:
2759 case 0x4:
2760 DBPRINT(sc, BCE_INFO_LOAD,
2761 "BCM5709 s/w configured for SerDes.\n");
2762 sc->bce_phy_flags |= BCE_PHY_SERDES_FLAG;
2763 break;
2764 default:
2765 DBPRINT(sc, BCE_INFO_LOAD,
2766 "BCM5709 s/w configured for Copper.\n");
2767 break;
2768 }
2769 }
2770
2771 } else if (BCE_CHIP_BOND_ID(sc) & BCE_CHIP_BOND_ID_SERDES_BIT)
2772 sc->bce_phy_flags |= BCE_PHY_SERDES_FLAG;
2773
2774 if (sc->bce_phy_flags & BCE_PHY_SERDES_FLAG) {
2775
2776 sc->bce_flags |= BCE_NO_WOL_FLAG;
2777
2778 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709)
2779 sc->bce_phy_flags |= BCE_PHY_IEEE_CLAUSE_45_FLAG;
2780
2781 if (BCE_CHIP_NUM(sc) != BCE_CHIP_NUM_5706) {
2782 /* 5708S/09S/16S use a separate PHY for SerDes. */
2783 sc->bce_phy_addr = 2;
2784
2785 val = bce_shmem_rd(sc, BCE_SHARED_HW_CFG_CONFIG);
2786 if (val & BCE_SHARED_HW_CFG_PHY_2_5G) {
2787 sc->bce_phy_flags |=
2788 BCE_PHY_2_5G_CAPABLE_FLAG;
2789 DBPRINT(sc, BCE_INFO_LOAD, "Found 2.5Gb "
2790 "capable adapter\n");
2791 }
2792 }
2793 } else if ((BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5706) ||
2794 (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5708))
2795 sc->bce_phy_flags |= BCE_PHY_CRC_FIX_FLAG;
2796
2797bce_get_media_exit:
2798 DBPRINT(sc, (BCE_INFO_LOAD | BCE_INFO_PHY),
2799 "Using PHY address %d.\n", sc->bce_phy_addr);
2800
2801 DBEXIT(BCE_VERBOSE_PHY);
2802}
2803
2804
2805/****************************************************************************/
2806/* Performs PHY initialization required before MII drivers access the */
2807/* device. */
2808/* */
2809/* Returns: */
2810/* Nothing. */
2811/****************************************************************************/
2812static void
2813bce_init_media(struct bce_softc *sc)
2814{
2815 if ((sc->bce_phy_flags & BCE_PHY_IEEE_CLAUSE_45_FLAG) != 0) {
2816 /*
2817 * Configure 5709S/5716S PHYs to use traditional IEEE
2818 * Clause 22 method. Otherwise we have no way to attach
2819 * the PHY in mii(4) layer. PHY specific configuration
2820 * is done in mii layer.
2821 */
2822
2823 /* Select auto-negotiation MMD of the PHY. */
2824 bce_miibus_write_reg(sc->bce_dev, sc->bce_phy_addr,
2825 BRGPHY_BLOCK_ADDR, BRGPHY_BLOCK_ADDR_ADDR_EXT);
2826 bce_miibus_write_reg(sc->bce_dev, sc->bce_phy_addr,
2827 BRGPHY_ADDR_EXT, BRGPHY_ADDR_EXT_AN_MMD);
2828
2829 /* Set IEEE0 block of AN MMD (assumed in brgphy(4) code). */
2830 bce_miibus_write_reg(sc->bce_dev, sc->bce_phy_addr,
2831 BRGPHY_BLOCK_ADDR, BRGPHY_BLOCK_ADDR_COMBO_IEEE0);
2832 }
2833}
2834
2835
2836/****************************************************************************/
2837/* Free any DMA memory owned by the driver. */
2838/* */
2839/* Scans through each data structre that requires DMA memory and frees */
2840/* the memory if allocated. */
2841/* */
2842/* Returns: */
2843/* Nothing. */
2844/****************************************************************************/
2845static void
2846bce_dma_free(struct bce_softc *sc)
2847{
2848 int i;
2849
2850 DBENTER(BCE_VERBOSE_RESET | BCE_VERBOSE_UNLOAD | BCE_VERBOSE_CTX);
2851
2852 /* Free, unmap, and destroy the status block. */
2853 if (sc->status_block != NULL) {
2854 bus_dmamem_free(
2855 sc->status_tag,
2856 sc->status_block,
2857 sc->status_map);
2858 sc->status_block = NULL;
2859 }
2860
2861 if (sc->status_map != NULL) {
2862 bus_dmamap_unload(
2863 sc->status_tag,
2864 sc->status_map);
2865 bus_dmamap_destroy(sc->status_tag,
2866 sc->status_map);
2867 sc->status_map = NULL;
2868 }
2869
2870 if (sc->status_tag != NULL) {
2871 bus_dma_tag_destroy(sc->status_tag);
2872 sc->status_tag = NULL;
2873 }
2874
2875
2876 /* Free, unmap, and destroy the statistics block. */
2877 if (sc->stats_block != NULL) {
2878 bus_dmamem_free(
2879 sc->stats_tag,
2880 sc->stats_block,
2881 sc->stats_map);
2882 sc->stats_block = NULL;
2883 }
2884
2885 if (sc->stats_map != NULL) {
2886 bus_dmamap_unload(
2887 sc->stats_tag,
2888 sc->stats_map);
2889 bus_dmamap_destroy(sc->stats_tag,
2890 sc->stats_map);
2891 sc->stats_map = NULL;
2892 }
2893
2894 if (sc->stats_tag != NULL) {
2895 bus_dma_tag_destroy(sc->stats_tag);
2896 sc->stats_tag = NULL;
2897 }
2898
2899
2900 /* Free, unmap and destroy all context memory pages. */
2901 if ((BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) ||
2902 (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716)) {
2903 for (i = 0; i < sc->ctx_pages; i++ ) {
2904 if (sc->ctx_block[i] != NULL) {
2905 bus_dmamem_free(
2906 sc->ctx_tag,
2907 sc->ctx_block[i],
2908 sc->ctx_map[i]);
2909 sc->ctx_block[i] = NULL;
2910 }
2911
2912 if (sc->ctx_map[i] != NULL) {
2913 bus_dmamap_unload(
2914 sc->ctx_tag,
2915 sc->ctx_map[i]);
2916 bus_dmamap_destroy(
2917 sc->ctx_tag,
2918 sc->ctx_map[i]);
2919 sc->ctx_map[i] = NULL;
2920 }
2921 }
2922
2923 /* Destroy the context memory tag. */
2924 if (sc->ctx_tag != NULL) {
2925 bus_dma_tag_destroy(sc->ctx_tag);
2926 sc->ctx_tag = NULL;
2927 }
2928 }
2929
2930
2931 /* Free, unmap and destroy all TX buffer descriptor chain pages. */
2932 for (i = 0; i < TX_PAGES; i++ ) {
2933 if (sc->tx_bd_chain[i] != NULL) {
2934 bus_dmamem_free(
2935 sc->tx_bd_chain_tag,
2936 sc->tx_bd_chain[i],
2937 sc->tx_bd_chain_map[i]);
2938 sc->tx_bd_chain[i] = NULL;
2939 }
2940
2941 if (sc->tx_bd_chain_map[i] != NULL) {
2942 bus_dmamap_unload(
2943 sc->tx_bd_chain_tag,
2944 sc->tx_bd_chain_map[i]);
2945 bus_dmamap_destroy(
2946 sc->tx_bd_chain_tag,
2947 sc->tx_bd_chain_map[i]);
2948 sc->tx_bd_chain_map[i] = NULL;
2949 }
2950 }
2951
2952 /* Destroy the TX buffer descriptor tag. */
2953 if (sc->tx_bd_chain_tag != NULL) {
2954 bus_dma_tag_destroy(sc->tx_bd_chain_tag);
2955 sc->tx_bd_chain_tag = NULL;
2956 }
2957
2958
2959 /* Free, unmap and destroy all RX buffer descriptor chain pages. */
2960 for (i = 0; i < RX_PAGES; i++ ) {
2961 if (sc->rx_bd_chain[i] != NULL) {
2962 bus_dmamem_free(
2963 sc->rx_bd_chain_tag,
2964 sc->rx_bd_chain[i],
2965 sc->rx_bd_chain_map[i]);
2966 sc->rx_bd_chain[i] = NULL;
2967 }
2968
2969 if (sc->rx_bd_chain_map[i] != NULL) {
2970 bus_dmamap_unload(
2971 sc->rx_bd_chain_tag,
2972 sc->rx_bd_chain_map[i]);
2973 bus_dmamap_destroy(
2974 sc->rx_bd_chain_tag,
2975 sc->rx_bd_chain_map[i]);
2976 sc->rx_bd_chain_map[i] = NULL;
2977 }
2978 }
2979
2980 /* Destroy the RX buffer descriptor tag. */
2981 if (sc->rx_bd_chain_tag != NULL) {
2982 bus_dma_tag_destroy(sc->rx_bd_chain_tag);
2983 sc->rx_bd_chain_tag = NULL;
2984 }
2985
2986
2987#ifdef BCE_JUMBO_HDRSPLIT
2988 /* Free, unmap and destroy all page buffer descriptor chain pages. */
2989 for (i = 0; i < PG_PAGES; i++ ) {
2990 if (sc->pg_bd_chain[i] != NULL) {
2991 bus_dmamem_free(
2992 sc->pg_bd_chain_tag,
2993 sc->pg_bd_chain[i],
2994 sc->pg_bd_chain_map[i]);
2995 sc->pg_bd_chain[i] = NULL;
2996 }
2997
2998 if (sc->pg_bd_chain_map[i] != NULL) {
2999 bus_dmamap_unload(
3000 sc->pg_bd_chain_tag,
3001 sc->pg_bd_chain_map[i]);
3002 bus_dmamap_destroy(
3003 sc->pg_bd_chain_tag,
3004 sc->pg_bd_chain_map[i]);
3005 sc->pg_bd_chain_map[i] = NULL;
3006 }
3007 }
3008
3009 /* Destroy the page buffer descriptor tag. */
3010 if (sc->pg_bd_chain_tag != NULL) {
3011 bus_dma_tag_destroy(sc->pg_bd_chain_tag);
3012 sc->pg_bd_chain_tag = NULL;
3013 }
3014#endif
3015
3016
3017 /* Unload and destroy the TX mbuf maps. */
3018 for (i = 0; i < TOTAL_TX_BD; i++) {
3019 if (sc->tx_mbuf_map[i] != NULL) {
3020 bus_dmamap_unload(sc->tx_mbuf_tag,
3021 sc->tx_mbuf_map[i]);
3022 bus_dmamap_destroy(sc->tx_mbuf_tag,
3023 sc->tx_mbuf_map[i]);
3024 sc->tx_mbuf_map[i] = NULL;
3025 }
3026 }
3027
3028 /* Destroy the TX mbuf tag. */
3029 if (sc->tx_mbuf_tag != NULL) {
3030 bus_dma_tag_destroy(sc->tx_mbuf_tag);
3031 sc->tx_mbuf_tag = NULL;
3032 }
3033
3034 /* Unload and destroy the RX mbuf maps. */
3035 for (i = 0; i < TOTAL_RX_BD; i++) {
3036 if (sc->rx_mbuf_map[i] != NULL) {
3037 bus_dmamap_unload(sc->rx_mbuf_tag,
3038 sc->rx_mbuf_map[i]);
3039 bus_dmamap_destroy(sc->rx_mbuf_tag,
3040 sc->rx_mbuf_map[i]);
3041 sc->rx_mbuf_map[i] = NULL;
3042 }
3043 }
3044
3045 /* Destroy the RX mbuf tag. */
3046 if (sc->rx_mbuf_tag != NULL) {
3047 bus_dma_tag_destroy(sc->rx_mbuf_tag);
3048 sc->rx_mbuf_tag = NULL;
3049 }
3050
3051#ifdef BCE_JUMBO_HDRSPLIT
3052 /* Unload and destroy the page mbuf maps. */
3053 for (i = 0; i < TOTAL_PG_BD; i++) {
3054 if (sc->pg_mbuf_map[i] != NULL) {
3055 bus_dmamap_unload(sc->pg_mbuf_tag,
3056 sc->pg_mbuf_map[i]);
3057 bus_dmamap_destroy(sc->pg_mbuf_tag,
3058 sc->pg_mbuf_map[i]);
3059 sc->pg_mbuf_map[i] = NULL;
3060 }
3061 }
3062
3063 /* Destroy the page mbuf tag. */
3064 if (sc->pg_mbuf_tag != NULL) {
3065 bus_dma_tag_destroy(sc->pg_mbuf_tag);
3066 sc->pg_mbuf_tag = NULL;
3067 }
3068#endif
3069
3070 /* Destroy the parent tag */
3071 if (sc->parent_tag != NULL) {
3072 bus_dma_tag_destroy(sc->parent_tag);
3073 sc->parent_tag = NULL;
3074 }
3075
3076 DBEXIT(BCE_VERBOSE_RESET | BCE_VERBOSE_UNLOAD | BCE_VERBOSE_CTX);
3077}
3078
3079
3080/****************************************************************************/
3081/* Get DMA memory from the OS. */
3082/* */
3083/* Validates that the OS has provided DMA buffers in response to a */
3084/* bus_dmamap_load() call and saves the physical address of those buffers. */
3085/* When the callback is used the OS will return 0 for the mapping function */
3086/* (bus_dmamap_load()) so we use the value of map_arg->maxsegs to pass any */
3087/* failures back to the caller. */
3088/* */
3089/* Returns: */
3090/* Nothing. */
3091/****************************************************************************/
3092static void
3093bce_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error)
3094{
3095 bus_addr_t *busaddr = arg;
3096
3097 KASSERT(nseg == 1, ("%s(): Too many segments returned (%d)!",
3098 __FUNCTION__, nseg));
3099 /* Simulate a mapping failure. */
3100 DBRUNIF(DB_RANDOMTRUE(dma_map_addr_failed_sim_control),
3101 error = ENOMEM);
3102
3103 /* ToDo: How to increment debug sim_count variable here? */
3104
3105 /* Check for an error and signal the caller that an error occurred. */
3106 if (error) {
3107 *busaddr = 0;
3108 } else {
3109 *busaddr = segs->ds_addr;
3110 }
3111
3112 return;
3113}
3114
3115
3116/****************************************************************************/
3117/* Allocate any DMA memory needed by the driver. */
3118/* */
3119/* Allocates DMA memory needed for the various global structures needed by */
3120/* hardware. */
3121/* */
3122/* Memory alignment requirements: */
3123/* +-----------------+----------+----------+----------+----------+ */
3124/* | | 5706 | 5708 | 5709 | 5716 | */
3125/* +-----------------+----------+----------+----------+----------+ */
3126/* |Status Block | 8 bytes | 8 bytes | 16 bytes | 16 bytes | */
3127/* |Statistics Block | 8 bytes | 8 bytes | 16 bytes | 16 bytes | */
3128/* |RX Buffers | 16 bytes | 16 bytes | 16 bytes | 16 bytes | */
3129/* |PG Buffers | none | none | none | none | */
3130/* |TX Buffers | none | none | none | none | */
3131/* |Chain Pages(1) | 4KiB | 4KiB | 4KiB | 4KiB | */
3132/* |Context Memory | | | | | */
3133/* +-----------------+----------+----------+----------+----------+ */
3134/* */
3135/* (1) Must align with CPU page size (BCM_PAGE_SZIE). */
3136/* */
3137/* Returns: */
3138/* 0 for success, positive value for failure. */
3139/****************************************************************************/
3140static int
3141bce_dma_alloc(device_t dev)
3142{
3143 struct bce_softc *sc;
3144 int i, error, rc = 0;
3145 bus_size_t max_size, max_seg_size;
3146 int max_segments;
3147
3148 sc = device_get_softc(dev);
3149
3150 DBENTER(BCE_VERBOSE_RESET | BCE_VERBOSE_CTX);
3151
3152 /*
3153 * Allocate the parent bus DMA tag appropriate for PCI.
3154 */
3155 if (bus_dma_tag_create(bus_get_dma_tag(dev), 1, BCE_DMA_BOUNDARY,
3156 sc->max_bus_addr, BUS_SPACE_MAXADDR, NULL, NULL,
3157 BUS_SPACE_MAXSIZE_32BIT, 0, BUS_SPACE_MAXSIZE_32BIT, 0, NULL, NULL,
3158 &sc->parent_tag)) {
3159 BCE_PRINTF("%s(%d): Could not allocate parent DMA tag!\n",
3160 __FILE__, __LINE__);
3161 rc = ENOMEM;
3162 goto bce_dma_alloc_exit;
3163 }
3164
3165 /*
3166 * Create a DMA tag for the status block, allocate and clear the
3167 * memory, map the memory into DMA space, and fetch the physical
3168 * address of the block.
3169 */
3170 if (bus_dma_tag_create(sc->parent_tag, BCE_DMA_ALIGN,
3171 BCE_DMA_BOUNDARY, sc->max_bus_addr, BUS_SPACE_MAXADDR,
3172 NULL, NULL, BCE_STATUS_BLK_SZ, 1, BCE_STATUS_BLK_SZ,
3173 0, NULL, NULL, &sc->status_tag)) {
3174 BCE_PRINTF("%s(%d): Could not allocate status block "
3175 "DMA tag!\n", __FILE__, __LINE__);
3176 rc = ENOMEM;
3177 goto bce_dma_alloc_exit;
3178 }
3179
3180 if(bus_dmamem_alloc(sc->status_tag, (void **)&sc->status_block,
3181 BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT,
3182 &sc->status_map)) {
3183 BCE_PRINTF("%s(%d): Could not allocate status block "
3184 "DMA memory!\n", __FILE__, __LINE__);
3185 rc = ENOMEM;
3186 goto bce_dma_alloc_exit;
3187 }
3188
3189 error = bus_dmamap_load(sc->status_tag, sc->status_map,
3190 sc->status_block, BCE_STATUS_BLK_SZ, bce_dma_map_addr,
3191 &sc->status_block_paddr, BUS_DMA_NOWAIT);
3192
3193 if (error) {
3194 BCE_PRINTF("%s(%d): Could not map status block "
3195 "DMA memory!\n", __FILE__, __LINE__);
3196 rc = ENOMEM;
3197 goto bce_dma_alloc_exit;
3198 }
3199
3200 DBPRINT(sc, BCE_INFO_LOAD, "%s(): status_block_paddr = 0x%jX\n",
3201 __FUNCTION__, (uintmax_t) sc->status_block_paddr);
3202
3203 /*
3204 * Create a DMA tag for the statistics block, allocate and clear the
3205 * memory, map the memory into DMA space, and fetch the physical
3206 * address of the block.
3207 */
3208 if (bus_dma_tag_create(sc->parent_tag, BCE_DMA_ALIGN,
3209 BCE_DMA_BOUNDARY, sc->max_bus_addr, BUS_SPACE_MAXADDR,
3210 NULL, NULL, BCE_STATS_BLK_SZ, 1, BCE_STATS_BLK_SZ,
3211 0, NULL, NULL, &sc->stats_tag)) {
3212 BCE_PRINTF("%s(%d): Could not allocate statistics block "
3213 "DMA tag!\n", __FILE__, __LINE__);
3214 rc = ENOMEM;
3215 goto bce_dma_alloc_exit;
3216 }
3217
3218 if (bus_dmamem_alloc(sc->stats_tag, (void **)&sc->stats_block,
3219 BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT, &sc->stats_map)) {
3220 BCE_PRINTF("%s(%d): Could not allocate statistics block "
3221 "DMA memory!\n", __FILE__, __LINE__);
3222 rc = ENOMEM;
3223 goto bce_dma_alloc_exit;
3224 }
3225
3226 error = bus_dmamap_load(sc->stats_tag, sc->stats_map,
3227 sc->stats_block, BCE_STATS_BLK_SZ, bce_dma_map_addr,
3228 &sc->stats_block_paddr, BUS_DMA_NOWAIT);
3229
3230 if(error) {
3231 BCE_PRINTF("%s(%d): Could not map statistics block "
3232 "DMA memory!\n", __FILE__, __LINE__);
3233 rc = ENOMEM;
3234 goto bce_dma_alloc_exit;
3235 }
3236
3237 DBPRINT(sc, BCE_INFO_LOAD, "%s(): stats_block_paddr = 0x%jX\n",
3238 __FUNCTION__, (uintmax_t) sc->stats_block_paddr);
3239
3240 /* BCM5709 uses host memory as cache for context memory. */
3241 if ((BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) ||
3242 (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716)) {
3243 sc->ctx_pages = 0x2000 / BCM_PAGE_SIZE;
3244 if (sc->ctx_pages == 0)
3245 sc->ctx_pages = 1;
3246
3247 DBRUNIF((sc->ctx_pages > 512),
3248 BCE_PRINTF("%s(%d): Too many CTX pages! %d > 512\n",
3249 __FILE__, __LINE__, sc->ctx_pages));
3250
3251 /*
3252 * Create a DMA tag for the context pages,
3253 * allocate and clear the memory, map the
3254 * memory into DMA space, and fetch the
3255 * physical address of the block.
3256 */
3257 if(bus_dma_tag_create(sc->parent_tag, BCM_PAGE_SIZE,
3258 BCE_DMA_BOUNDARY, sc->max_bus_addr, BUS_SPACE_MAXADDR,
3259 NULL, NULL, BCM_PAGE_SIZE, 1, BCM_PAGE_SIZE,
3260 0, NULL, NULL, &sc->ctx_tag)) {
3261 BCE_PRINTF("%s(%d): Could not allocate CTX "
3262 "DMA tag!\n", __FILE__, __LINE__);
3263 rc = ENOMEM;
3264 goto bce_dma_alloc_exit;
3265 }
3266
3267 for (i = 0; i < sc->ctx_pages; i++) {
3268
3269 if(bus_dmamem_alloc(sc->ctx_tag,
3270 (void **)&sc->ctx_block[i],
3271 BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT,
3272 &sc->ctx_map[i])) {
3273 BCE_PRINTF("%s(%d): Could not allocate CTX "
3274 "DMA memory!\n", __FILE__, __LINE__);
3275 rc = ENOMEM;
3276 goto bce_dma_alloc_exit;
3277 }
3278
3279 error = bus_dmamap_load(sc->ctx_tag, sc->ctx_map[i],
3280 sc->ctx_block[i], BCM_PAGE_SIZE, bce_dma_map_addr,
3281 &sc->ctx_paddr[i], BUS_DMA_NOWAIT);
3282
3283 if (error) {
3284 BCE_PRINTF("%s(%d): Could not map CTX "
3285 "DMA memory!\n", __FILE__, __LINE__);
3286 rc = ENOMEM;
3287 goto bce_dma_alloc_exit;
3288 }
3289
3290 DBPRINT(sc, BCE_INFO_LOAD, "%s(): ctx_paddr[%d] "
3291 "= 0x%jX\n", __FUNCTION__, i,
3292 (uintmax_t) sc->ctx_paddr[i]);
3293 }
3294 }
3295
3296 /*
3297 * Create a DMA tag for the TX buffer descriptor chain,
3298 * allocate and clear the memory, and fetch the
3299 * physical address of the block.
3300 */
3301 if(bus_dma_tag_create(sc->parent_tag, BCM_PAGE_SIZE, BCE_DMA_BOUNDARY,
3302 sc->max_bus_addr, BUS_SPACE_MAXADDR, NULL, NULL,
3303 BCE_TX_CHAIN_PAGE_SZ, 1, BCE_TX_CHAIN_PAGE_SZ, 0,
3304 NULL, NULL, &sc->tx_bd_chain_tag)) {
3305 BCE_PRINTF("%s(%d): Could not allocate TX descriptor "
3306 "chain DMA tag!\n", __FILE__, __LINE__);
3307 rc = ENOMEM;
3308 goto bce_dma_alloc_exit;
3309 }
3310
3311 for (i = 0; i < TX_PAGES; i++) {
3312
3313 if(bus_dmamem_alloc(sc->tx_bd_chain_tag,
3314 (void **)&sc->tx_bd_chain[i],
3315 BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT,
3316 &sc->tx_bd_chain_map[i])) {
3317 BCE_PRINTF("%s(%d): Could not allocate TX descriptor "
3318 "chain DMA memory!\n", __FILE__, __LINE__);
3319 rc = ENOMEM;
3320 goto bce_dma_alloc_exit;
3321 }
3322
3323 error = bus_dmamap_load(sc->tx_bd_chain_tag,
3324 sc->tx_bd_chain_map[i], sc->tx_bd_chain[i],
3325 BCE_TX_CHAIN_PAGE_SZ, bce_dma_map_addr,
3326 &sc->tx_bd_chain_paddr[i], BUS_DMA_NOWAIT);
3327
3328 if (error) {
3329 BCE_PRINTF("%s(%d): Could not map TX descriptor "
3330 "chain DMA memory!\n", __FILE__, __LINE__);
3331 rc = ENOMEM;
3332 goto bce_dma_alloc_exit;
3333 }
3334
3335 DBPRINT(sc, BCE_INFO_LOAD, "%s(): tx_bd_chain_paddr[%d] = "
3336 "0x%jX\n", __FUNCTION__, i,
3337 (uintmax_t) sc->tx_bd_chain_paddr[i]);
3338 }
3339
3340 /* Check the required size before mapping to conserve resources. */
3341 if (bce_tso_enable) {
3342 max_size = BCE_TSO_MAX_SIZE;
3343 max_segments = BCE_MAX_SEGMENTS;
3344 max_seg_size = BCE_TSO_MAX_SEG_SIZE;
3345 } else {
3346 max_size = MCLBYTES * BCE_MAX_SEGMENTS;
3347 max_segments = BCE_MAX_SEGMENTS;
3348 max_seg_size = MCLBYTES;
3349 }
3350
3351 /* Create a DMA tag for TX mbufs. */
3352 if (bus_dma_tag_create(sc->parent_tag, 1, BCE_DMA_BOUNDARY,
3353 sc->max_bus_addr, BUS_SPACE_MAXADDR, NULL, NULL, max_size,
3354 max_segments, max_seg_size, 0, NULL, NULL, &sc->tx_mbuf_tag)) {
3355 BCE_PRINTF("%s(%d): Could not allocate TX mbuf DMA tag!\n",
3356 __FILE__, __LINE__);
3357 rc = ENOMEM;
3358 goto bce_dma_alloc_exit;
3359 }
3360
3361 /* Create DMA maps for the TX mbufs clusters. */
3362 for (i = 0; i < TOTAL_TX_BD; i++) {
3363 if (bus_dmamap_create(sc->tx_mbuf_tag, BUS_DMA_NOWAIT,
3364 &sc->tx_mbuf_map[i])) {
3365 BCE_PRINTF("%s(%d): Unable to create TX mbuf DMA "
3366 "map!\n", __FILE__, __LINE__);
3367 rc = ENOMEM;
3368 goto bce_dma_alloc_exit;
3369 }
3370 }
3371
3372 /*
3373 * Create a DMA tag for the RX buffer descriptor chain,
3374 * allocate and clear the memory, and fetch the physical
3375 * address of the blocks.
3376 */
3377 if (bus_dma_tag_create(sc->parent_tag, BCM_PAGE_SIZE,
3378 BCE_DMA_BOUNDARY, BUS_SPACE_MAXADDR,
3379 sc->max_bus_addr, NULL, NULL,
3380 BCE_RX_CHAIN_PAGE_SZ, 1, BCE_RX_CHAIN_PAGE_SZ,
3381 0, NULL, NULL, &sc->rx_bd_chain_tag)) {
3382 BCE_PRINTF("%s(%d): Could not allocate RX descriptor chain "
3383 "DMA tag!\n", __FILE__, __LINE__);
3384 rc = ENOMEM;
3385 goto bce_dma_alloc_exit;
3386 }
3387
3388 for (i = 0; i < RX_PAGES; i++) {
3389
3390 if (bus_dmamem_alloc(sc->rx_bd_chain_tag,
3391 (void **)&sc->rx_bd_chain[i],
3392 BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT,
3393 &sc->rx_bd_chain_map[i])) {
3394 BCE_PRINTF("%s(%d): Could not allocate RX descriptor "
3395 "chain DMA memory!\n", __FILE__, __LINE__);
3396 rc = ENOMEM;
3397 goto bce_dma_alloc_exit;
3398 }
3399
3400 error = bus_dmamap_load(sc->rx_bd_chain_tag,
3401 sc->rx_bd_chain_map[i], sc->rx_bd_chain[i],
3402 BCE_RX_CHAIN_PAGE_SZ, bce_dma_map_addr,
3403 &sc->rx_bd_chain_paddr[i], BUS_DMA_NOWAIT);
3404
3405 if (error) {
3406 BCE_PRINTF("%s(%d): Could not map RX descriptor "
3407 "chain DMA memory!\n", __FILE__, __LINE__);
3408 rc = ENOMEM;
3409 goto bce_dma_alloc_exit;
3410 }
3411
3412 DBPRINT(sc, BCE_INFO_LOAD, "%s(): rx_bd_chain_paddr[%d] = "
3413 "0x%jX\n", __FUNCTION__, i,
3414 (uintmax_t) sc->rx_bd_chain_paddr[i]);
3415 }
3416
3417 /*
3418 * Create a DMA tag for RX mbufs.
3419 */
3420#ifdef BCE_JUMBO_HDRSPLIT
3421 max_size = max_seg_size = ((sc->rx_bd_mbuf_alloc_size < MCLBYTES) ?
3422 MCLBYTES : sc->rx_bd_mbuf_alloc_size);
3423#else
3424 max_size = max_seg_size = MJUM9BYTES;
3425#endif
3426 max_segments = 1;
3427
3428 DBPRINT(sc, BCE_INFO_LOAD, "%s(): Creating rx_mbuf_tag "
3429 "(max size = 0x%jX max segments = %d, max segment "
3430 "size = 0x%jX)\n", __FUNCTION__, (uintmax_t) max_size,
3431 max_segments, (uintmax_t) max_seg_size);
3432
3433 if (bus_dma_tag_create(sc->parent_tag, BCE_RX_BUF_ALIGN,
3434 BCE_DMA_BOUNDARY, sc->max_bus_addr, BUS_SPACE_MAXADDR, NULL, NULL,
3435 max_size, max_segments, max_seg_size, 0, NULL, NULL,
3436 &sc->rx_mbuf_tag)) {
3437 BCE_PRINTF("%s(%d): Could not allocate RX mbuf DMA tag!\n",
3438 __FILE__, __LINE__);
3439 rc = ENOMEM;
3440 goto bce_dma_alloc_exit;
3441 }
3442
3443 /* Create DMA maps for the RX mbuf clusters. */
3444 for (i = 0; i < TOTAL_RX_BD; i++) {
3445 if (bus_dmamap_create(sc->rx_mbuf_tag, BUS_DMA_NOWAIT,
3446 &sc->rx_mbuf_map[i])) {
3447 BCE_PRINTF("%s(%d): Unable to create RX mbuf "
3448 "DMA map!\n", __FILE__, __LINE__);
3449 rc = ENOMEM;
3450 goto bce_dma_alloc_exit;
3451 }
3452 }
3453
3454#ifdef BCE_JUMBO_HDRSPLIT
3455 /*
3456 * Create a DMA tag for the page buffer descriptor chain,
3457 * allocate and clear the memory, and fetch the physical
3458 * address of the blocks.
3459 */
3460 if (bus_dma_tag_create(sc->parent_tag, BCM_PAGE_SIZE,
3461 BCE_DMA_BOUNDARY, BUS_SPACE_MAXADDR, sc->max_bus_addr,
3462 NULL, NULL, BCE_PG_CHAIN_PAGE_SZ, 1, BCE_PG_CHAIN_PAGE_SZ,
3463 0, NULL, NULL, &sc->pg_bd_chain_tag)) {
3464 BCE_PRINTF("%s(%d): Could not allocate page descriptor "
3465 "chain DMA tag!\n", __FILE__, __LINE__);
3466 rc = ENOMEM;
3467 goto bce_dma_alloc_exit;
3468 }
3469
3470 for (i = 0; i < PG_PAGES; i++) {
3471
3472 if (bus_dmamem_alloc(sc->pg_bd_chain_tag,
3473 (void **)&sc->pg_bd_chain[i],
3474 BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT,
3475 &sc->pg_bd_chain_map[i])) {
3476 BCE_PRINTF("%s(%d): Could not allocate page "
3477 "descriptor chain DMA memory!\n",
3478 __FILE__, __LINE__);
3479 rc = ENOMEM;
3480 goto bce_dma_alloc_exit;
3481 }
3482
3483 error = bus_dmamap_load(sc->pg_bd_chain_tag,
3484 sc->pg_bd_chain_map[i], sc->pg_bd_chain[i],
3485 BCE_PG_CHAIN_PAGE_SZ, bce_dma_map_addr,
3486 &sc->pg_bd_chain_paddr[i], BUS_DMA_NOWAIT);
3487
3488 if (error) {
3489 BCE_PRINTF("%s(%d): Could not map page descriptor "
3490 "chain DMA memory!\n", __FILE__, __LINE__);
3491 rc = ENOMEM;
3492 goto bce_dma_alloc_exit;
3493 }
3494
3495 DBPRINT(sc, BCE_INFO_LOAD, "%s(): pg_bd_chain_paddr[%d] = "
3496 "0x%jX\n", __FUNCTION__, i,
3497 (uintmax_t) sc->pg_bd_chain_paddr[i]);
3498 }
3499
3500 /*
3501 * Create a DMA tag for page mbufs.
3502 */
3503 max_size = max_seg_size = ((sc->pg_bd_mbuf_alloc_size < MCLBYTES) ?
3504 MCLBYTES : sc->pg_bd_mbuf_alloc_size);
3505
3506 if (bus_dma_tag_create(sc->parent_tag, 1, BCE_DMA_BOUNDARY,
3507 sc->max_bus_addr, BUS_SPACE_MAXADDR, NULL, NULL,
3508 max_size, 1, max_seg_size, 0, NULL, NULL, &sc->pg_mbuf_tag)) {
3509 BCE_PRINTF("%s(%d): Could not allocate page mbuf "
3510 "DMA tag!\n", __FILE__, __LINE__);
3511 rc = ENOMEM;
3512 goto bce_dma_alloc_exit;
3513 }
3514
3515 /* Create DMA maps for the page mbuf clusters. */
3516 for (i = 0; i < TOTAL_PG_BD; i++) {
3517 if (bus_dmamap_create(sc->pg_mbuf_tag, BUS_DMA_NOWAIT,
3518 &sc->pg_mbuf_map[i])) {
3519 BCE_PRINTF("%s(%d): Unable to create page mbuf "
3520 "DMA map!\n", __FILE__, __LINE__);
3521 rc = ENOMEM;
3522 goto bce_dma_alloc_exit;
3523 }
3524 }
3525#endif
3526
3527bce_dma_alloc_exit:
3528 DBEXIT(BCE_VERBOSE_RESET | BCE_VERBOSE_CTX);
3529 return(rc);
3530}
3531
3532
3533/****************************************************************************/
3534/* Release all resources used by the driver. */
3535/* */
3536/* Releases all resources acquired by the driver including interrupts, */
3537/* interrupt handler, interfaces, mutexes, and DMA memory. */
3538/* */
3539/* Returns: */
3540/* Nothing. */
3541/****************************************************************************/
3542static void
3543bce_release_resources(struct bce_softc *sc)
3544{
3545 device_t dev;
3546
3547 DBENTER(BCE_VERBOSE_RESET);
3548
3549 dev = sc->bce_dev;
3550
3551 bce_dma_free(sc);
3552
3553 if (sc->bce_intrhand != NULL) {
3554 DBPRINT(sc, BCE_INFO_RESET, "Removing interrupt handler.\n");
3555 bus_teardown_intr(dev, sc->bce_res_irq, sc->bce_intrhand);
3556 }
3557
3558 if (sc->bce_res_irq != NULL) {
3559 DBPRINT(sc, BCE_INFO_RESET, "Releasing IRQ.\n");
3560 bus_release_resource(dev, SYS_RES_IRQ, sc->bce_irq_rid,
3561 sc->bce_res_irq);
3562 }
3563
3564 if (sc->bce_flags & (BCE_USING_MSI_FLAG | BCE_USING_MSIX_FLAG)) {
3565 DBPRINT(sc, BCE_INFO_RESET, "Releasing MSI/MSI-X vector.\n");
3566 pci_release_msi(dev);
3567 }
3568
3569 if (sc->bce_res_mem != NULL) {
3570 DBPRINT(sc, BCE_INFO_RESET, "Releasing PCI memory.\n");
3571 bus_release_resource(dev, SYS_RES_MEMORY, PCIR_BAR(0),
3572 sc->bce_res_mem);
3573 }
3574
3575 if (sc->bce_ifp != NULL) {
3576 DBPRINT(sc, BCE_INFO_RESET, "Releasing IF.\n");
3577 if_free(sc->bce_ifp);
3578 }
3579
3580 if (mtx_initialized(&sc->bce_mtx))
3581 BCE_LOCK_DESTROY(sc);
3582
3583 DBEXIT(BCE_VERBOSE_RESET);
3584}
3585
3586
3587/****************************************************************************/
3588/* Firmware synchronization. */
3589/* */
3590/* Before performing certain events such as a chip reset, synchronize with */
3591/* the firmware first. */
3592/* */
3593/* Returns: */
3594/* 0 for success, positive value for failure. */
3595/****************************************************************************/
3596static int
3597bce_fw_sync(struct bce_softc *sc, u32 msg_data)
3598{
3599 int i, rc = 0;
3600 u32 val;
3601
3602 DBENTER(BCE_VERBOSE_RESET);
3603
3604 /* Don't waste any time if we've timed out before. */
3605 if (sc->bce_fw_timed_out == TRUE) {
3606 rc = EBUSY;
3607 goto bce_fw_sync_exit;
3608 }
3609
3610 /* Increment the message sequence number. */
3611 sc->bce_fw_wr_seq++;
3612 msg_data |= sc->bce_fw_wr_seq;
3613
3614 DBPRINT(sc, BCE_VERBOSE_FIRMWARE, "bce_fw_sync(): msg_data = "
3615 "0x%08X\n", msg_data);
3616
3617 /* Send the message to the bootcode driver mailbox. */
3618 bce_shmem_wr(sc, BCE_DRV_MB, msg_data);
3619
3620 /* Wait for the bootcode to acknowledge the message. */
3621 for (i = 0; i < FW_ACK_TIME_OUT_MS; i++) {
3622 /* Check for a response in the bootcode firmware mailbox. */
3623 val = bce_shmem_rd(sc, BCE_FW_MB);
3624 if ((val & BCE_FW_MSG_ACK) == (msg_data & BCE_DRV_MSG_SEQ))
3625 break;
3626 DELAY(1000);
3627 }
3628
3629 /* If we've timed out, tell bootcode that we've stopped waiting. */
3630 if (((val & BCE_FW_MSG_ACK) != (msg_data & BCE_DRV_MSG_SEQ)) &&
3631 ((msg_data & BCE_DRV_MSG_DATA) != BCE_DRV_MSG_DATA_WAIT0)) {
3632
3633 BCE_PRINTF("%s(%d): Firmware synchronization timeout! "
3634 "msg_data = 0x%08X\n", __FILE__, __LINE__, msg_data);
3635
3636 msg_data &= ~BCE_DRV_MSG_CODE;
3637 msg_data |= BCE_DRV_MSG_CODE_FW_TIMEOUT;
3638
3639 bce_shmem_wr(sc, BCE_DRV_MB, msg_data);
3640
3641 sc->bce_fw_timed_out = TRUE;
3642 rc = EBUSY;
3643 }
3644
3645bce_fw_sync_exit:
3646 DBEXIT(BCE_VERBOSE_RESET);
3647 return (rc);
3648}
3649
3650
3651/****************************************************************************/
3652/* Load Receive Virtual 2 Physical (RV2P) processor firmware. */
3653/* */
3654/* Returns: */
3655/* Nothing. */
3656/****************************************************************************/
3657static void
3658bce_load_rv2p_fw(struct bce_softc *sc, u32 *rv2p_code,
3659 u32 rv2p_code_len, u32 rv2p_proc)
3660{
3661 int i;
3662 u32 val;
3663
3664 DBENTER(BCE_VERBOSE_RESET);
3665
3666 /* Set the page size used by RV2P. */
3667 if (rv2p_proc == RV2P_PROC2) {
3668 BCE_RV2P_PROC2_CHG_MAX_BD_PAGE(USABLE_RX_BD_PER_PAGE);
3669 }
3670
3671 for (i = 0; i < rv2p_code_len; i += 8) {
3672 REG_WR(sc, BCE_RV2P_INSTR_HIGH, *rv2p_code);
3673 rv2p_code++;
3674 REG_WR(sc, BCE_RV2P_INSTR_LOW, *rv2p_code);
3675 rv2p_code++;
3676
3677 if (rv2p_proc == RV2P_PROC1) {
3678 val = (i / 8) | BCE_RV2P_PROC1_ADDR_CMD_RDWR;
3679 REG_WR(sc, BCE_RV2P_PROC1_ADDR_CMD, val);
3680 }
3681 else {
3682 val = (i / 8) | BCE_RV2P_PROC2_ADDR_CMD_RDWR;
3683 REG_WR(sc, BCE_RV2P_PROC2_ADDR_CMD, val);
3684 }
3685 }
3686
3687 /* Reset the processor, un-stall is done later. */
3688 if (rv2p_proc == RV2P_PROC1) {
3689 REG_WR(sc, BCE_RV2P_COMMAND, BCE_RV2P_COMMAND_PROC1_RESET);
3690 }
3691 else {
3692 REG_WR(sc, BCE_RV2P_COMMAND, BCE_RV2P_COMMAND_PROC2_RESET);
3693 }
3694
3695 DBEXIT(BCE_VERBOSE_RESET);
3696}
3697
3698
3699/****************************************************************************/
3700/* Load RISC processor firmware. */
3701/* */
3702/* Loads firmware from the file if_bcefw.h into the scratchpad memory */
3703/* associated with a particular processor. */
3704/* */
3705/* Returns: */
3706/* Nothing. */
3707/****************************************************************************/
3708static void
3709bce_load_cpu_fw(struct bce_softc *sc, struct cpu_reg *cpu_reg,
3710 struct fw_info *fw)
3711{
3712 u32 offset;
3713
3714 DBENTER(BCE_VERBOSE_RESET);
3715
3716 bce_halt_cpu(sc, cpu_reg);
3717
3718 /* Load the Text area. */
3719 offset = cpu_reg->spad_base + (fw->text_addr - cpu_reg->mips_view_base);
3720 if (fw->text) {
3721 int j;
3722
3723 for (j = 0; j < (fw->text_len / 4); j++, offset += 4) {
3724 REG_WR_IND(sc, offset, fw->text[j]);
3725 }
3726 }
3727
3728 /* Load the Data area. */
3729 offset = cpu_reg->spad_base + (fw->data_addr - cpu_reg->mips_view_base);
3730 if (fw->data) {
3731 int j;
3732
3733 for (j = 0; j < (fw->data_len / 4); j++, offset += 4) {
3734 REG_WR_IND(sc, offset, fw->data[j]);
3735 }
3736 }
3737
3738 /* Load the SBSS area. */
3739 offset = cpu_reg->spad_base + (fw->sbss_addr - cpu_reg->mips_view_base);
3740 if (fw->sbss) {
3741 int j;
3742
3743 for (j = 0; j < (fw->sbss_len / 4); j++, offset += 4) {
3744 REG_WR_IND(sc, offset, fw->sbss[j]);
3745 }
3746 }
3747
3748 /* Load the BSS area. */
3749 offset = cpu_reg->spad_base + (fw->bss_addr - cpu_reg->mips_view_base);
3750 if (fw->bss) {
3751 int j;
3752
3753 for (j = 0; j < (fw->bss_len/4); j++, offset += 4) {
3754 REG_WR_IND(sc, offset, fw->bss[j]);
3755 }
3756 }
3757
3758 /* Load the Read-Only area. */
3759 offset = cpu_reg->spad_base +
3760 (fw->rodata_addr - cpu_reg->mips_view_base);
3761 if (fw->rodata) {
3762 int j;
3763
3764 for (j = 0; j < (fw->rodata_len / 4); j++, offset += 4) {
3765 REG_WR_IND(sc, offset, fw->rodata[j]);
3766 }
3767 }
3768
3769 /* Clear the pre-fetch instruction and set the FW start address. */
3770 REG_WR_IND(sc, cpu_reg->inst, 0);
3771 REG_WR_IND(sc, cpu_reg->pc, fw->start_addr);
3772
3773 DBEXIT(BCE_VERBOSE_RESET);
3774}
3775
3776
3777/****************************************************************************/
3778/* Starts the RISC processor. */
3779/* */
3780/* Assumes the CPU starting address has already been set. */
3781/* */
3782/* Returns: */
3783/* Nothing. */
3784/****************************************************************************/
3785static void
3786bce_start_cpu(struct bce_softc *sc, struct cpu_reg *cpu_reg)
3787{
3788 u32 val;
3789
3790 DBENTER(BCE_VERBOSE_RESET);
3791
3792 /* Start the CPU. */
3793 val = REG_RD_IND(sc, cpu_reg->mode);
3794 val &= ~cpu_reg->mode_value_halt;
3795 REG_WR_IND(sc, cpu_reg->state, cpu_reg->state_value_clear);
3796 REG_WR_IND(sc, cpu_reg->mode, val);
3797
3798 DBEXIT(BCE_VERBOSE_RESET);
3799}
3800
3801
3802/****************************************************************************/
3803/* Halts the RISC processor. */
3804/* */
3805/* Returns: */
3806/* Nothing. */
3807/****************************************************************************/
3808static void
3809bce_halt_cpu(struct bce_softc *sc, struct cpu_reg *cpu_reg)
3810{
3811 u32 val;
3812
3813 DBENTER(BCE_VERBOSE_RESET);
3814
3815 /* Halt the CPU. */
3816 val = REG_RD_IND(sc, cpu_reg->mode);
3817 val |= cpu_reg->mode_value_halt;
3818 REG_WR_IND(sc, cpu_reg->mode, val);
3819 REG_WR_IND(sc, cpu_reg->state, cpu_reg->state_value_clear);
3820
3821 DBEXIT(BCE_VERBOSE_RESET);
3822}
3823
3824
3825/****************************************************************************/
3826/* Initialize the RX CPU. */
3827/* */
3828/* Returns: */
3829/* Nothing. */
3830/****************************************************************************/
3831static void
3832bce_start_rxp_cpu(struct bce_softc *sc)
3833{
3834 struct cpu_reg cpu_reg;
3835
3836 DBENTER(BCE_VERBOSE_RESET);
3837
3838 cpu_reg.mode = BCE_RXP_CPU_MODE;
3839 cpu_reg.mode_value_halt = BCE_RXP_CPU_MODE_SOFT_HALT;
3840 cpu_reg.mode_value_sstep = BCE_RXP_CPU_MODE_STEP_ENA;
3841 cpu_reg.state = BCE_RXP_CPU_STATE;
3842 cpu_reg.state_value_clear = 0xffffff;
3843 cpu_reg.gpr0 = BCE_RXP_CPU_REG_FILE;
3844 cpu_reg.evmask = BCE_RXP_CPU_EVENT_MASK;
3845 cpu_reg.pc = BCE_RXP_CPU_PROGRAM_COUNTER;
3846 cpu_reg.inst = BCE_RXP_CPU_INSTRUCTION;
3847 cpu_reg.bp = BCE_RXP_CPU_HW_BREAKPOINT;
3848 cpu_reg.spad_base = BCE_RXP_SCRATCH;
3849 cpu_reg.mips_view_base = 0x8000000;
3850
3851 DBPRINT(sc, BCE_INFO_RESET, "Starting RX firmware.\n");
3852 bce_start_cpu(sc, &cpu_reg);
3853
3854 DBEXIT(BCE_VERBOSE_RESET);
3855}
3856
3857
3858/****************************************************************************/
3859/* Initialize the RX CPU. */
3860/* */
3861/* Returns: */
3862/* Nothing. */
3863/****************************************************************************/
3864static void
3865bce_init_rxp_cpu(struct bce_softc *sc)
3866{
3867 struct cpu_reg cpu_reg;
3868 struct fw_info fw;
3869
3870 DBENTER(BCE_VERBOSE_RESET);
3871
3872 cpu_reg.mode = BCE_RXP_CPU_MODE;
3873 cpu_reg.mode_value_halt = BCE_RXP_CPU_MODE_SOFT_HALT;
3874 cpu_reg.mode_value_sstep = BCE_RXP_CPU_MODE_STEP_ENA;
3875 cpu_reg.state = BCE_RXP_CPU_STATE;
3876 cpu_reg.state_value_clear = 0xffffff;
3877 cpu_reg.gpr0 = BCE_RXP_CPU_REG_FILE;
3878 cpu_reg.evmask = BCE_RXP_CPU_EVENT_MASK;
3879 cpu_reg.pc = BCE_RXP_CPU_PROGRAM_COUNTER;
3880 cpu_reg.inst = BCE_RXP_CPU_INSTRUCTION;
3881 cpu_reg.bp = BCE_RXP_CPU_HW_BREAKPOINT;
3882 cpu_reg.spad_base = BCE_RXP_SCRATCH;
3883 cpu_reg.mips_view_base = 0x8000000;
3884
3885 if ((BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) ||
3886 (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716)) {
3887 fw.ver_major = bce_RXP_b09FwReleaseMajor;
3888 fw.ver_minor = bce_RXP_b09FwReleaseMinor;
3889 fw.ver_fix = bce_RXP_b09FwReleaseFix;
3890 fw.start_addr = bce_RXP_b09FwStartAddr;
3891
3892 fw.text_addr = bce_RXP_b09FwTextAddr;
3893 fw.text_len = bce_RXP_b09FwTextLen;
3894 fw.text_index = 0;
3895 fw.text = bce_RXP_b09FwText;
3896
3897 fw.data_addr = bce_RXP_b09FwDataAddr;
3898 fw.data_len = bce_RXP_b09FwDataLen;
3899 fw.data_index = 0;
3900 fw.data = bce_RXP_b09FwData;
3901
3902 fw.sbss_addr = bce_RXP_b09FwSbssAddr;
3903 fw.sbss_len = bce_RXP_b09FwSbssLen;
3904 fw.sbss_index = 0;
3905 fw.sbss = bce_RXP_b09FwSbss;
3906
3907 fw.bss_addr = bce_RXP_b09FwBssAddr;
3908 fw.bss_len = bce_RXP_b09FwBssLen;
3909 fw.bss_index = 0;
3910 fw.bss = bce_RXP_b09FwBss;
3911
3912 fw.rodata_addr = bce_RXP_b09FwRodataAddr;
3913 fw.rodata_len = bce_RXP_b09FwRodataLen;
3914 fw.rodata_index = 0;
3915 fw.rodata = bce_RXP_b09FwRodata;
3916 } else {
3917 fw.ver_major = bce_RXP_b06FwReleaseMajor;
3918 fw.ver_minor = bce_RXP_b06FwReleaseMinor;
3919 fw.ver_fix = bce_RXP_b06FwReleaseFix;
3920 fw.start_addr = bce_RXP_b06FwStartAddr;
3921
3922 fw.text_addr = bce_RXP_b06FwTextAddr;
3923 fw.text_len = bce_RXP_b06FwTextLen;
3924 fw.text_index = 0;
3925 fw.text = bce_RXP_b06FwText;
3926
3927 fw.data_addr = bce_RXP_b06FwDataAddr;
3928 fw.data_len = bce_RXP_b06FwDataLen;
3929 fw.data_index = 0;
3930 fw.data = bce_RXP_b06FwData;
3931
3932 fw.sbss_addr = bce_RXP_b06FwSbssAddr;
3933 fw.sbss_len = bce_RXP_b06FwSbssLen;
3934 fw.sbss_index = 0;
3935 fw.sbss = bce_RXP_b06FwSbss;
3936
3937 fw.bss_addr = bce_RXP_b06FwBssAddr;
3938 fw.bss_len = bce_RXP_b06FwBssLen;
3939 fw.bss_index = 0;
3940 fw.bss = bce_RXP_b06FwBss;
3941
3942 fw.rodata_addr = bce_RXP_b06FwRodataAddr;
3943 fw.rodata_len = bce_RXP_b06FwRodataLen;
3944 fw.rodata_index = 0;
3945 fw.rodata = bce_RXP_b06FwRodata;
3946 }
3947
3948 DBPRINT(sc, BCE_INFO_RESET, "Loading RX firmware.\n");
3949 bce_load_cpu_fw(sc, &cpu_reg, &fw);
3950
3951 /* Delay RXP start until initialization is complete. */
3952
3953 DBEXIT(BCE_VERBOSE_RESET);
3954}
3955
3956
3957/****************************************************************************/
3958/* Initialize the TX CPU. */
3959/* */
3960/* Returns: */
3961/* Nothing. */
3962/****************************************************************************/
3963static void
3964bce_init_txp_cpu(struct bce_softc *sc)
3965{
3966 struct cpu_reg cpu_reg;
3967 struct fw_info fw;
3968
3969 DBENTER(BCE_VERBOSE_RESET);
3970
3971 cpu_reg.mode = BCE_TXP_CPU_MODE;
3972 cpu_reg.mode_value_halt = BCE_TXP_CPU_MODE_SOFT_HALT;
3973 cpu_reg.mode_value_sstep = BCE_TXP_CPU_MODE_STEP_ENA;
3974 cpu_reg.state = BCE_TXP_CPU_STATE;
3975 cpu_reg.state_value_clear = 0xffffff;
3976 cpu_reg.gpr0 = BCE_TXP_CPU_REG_FILE;
3977 cpu_reg.evmask = BCE_TXP_CPU_EVENT_MASK;
3978 cpu_reg.pc = BCE_TXP_CPU_PROGRAM_COUNTER;
3979 cpu_reg.inst = BCE_TXP_CPU_INSTRUCTION;
3980 cpu_reg.bp = BCE_TXP_CPU_HW_BREAKPOINT;
3981 cpu_reg.spad_base = BCE_TXP_SCRATCH;
3982 cpu_reg.mips_view_base = 0x8000000;
3983
3984 if ((BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) ||
3985 (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716)) {
3986 fw.ver_major = bce_TXP_b09FwReleaseMajor;
3987 fw.ver_minor = bce_TXP_b09FwReleaseMinor;
3988 fw.ver_fix = bce_TXP_b09FwReleaseFix;
3989 fw.start_addr = bce_TXP_b09FwStartAddr;
3990
3991 fw.text_addr = bce_TXP_b09FwTextAddr;
3992 fw.text_len = bce_TXP_b09FwTextLen;
3993 fw.text_index = 0;
3994 fw.text = bce_TXP_b09FwText;
3995
3996 fw.data_addr = bce_TXP_b09FwDataAddr;
3997 fw.data_len = bce_TXP_b09FwDataLen;
3998 fw.data_index = 0;
3999 fw.data = bce_TXP_b09FwData;
4000
4001 fw.sbss_addr = bce_TXP_b09FwSbssAddr;
4002 fw.sbss_len = bce_TXP_b09FwSbssLen;
4003 fw.sbss_index = 0;
4004 fw.sbss = bce_TXP_b09FwSbss;
4005
4006 fw.bss_addr = bce_TXP_b09FwBssAddr;
4007 fw.bss_len = bce_TXP_b09FwBssLen;
4008 fw.bss_index = 0;
4009 fw.bss = bce_TXP_b09FwBss;
4010
4011 fw.rodata_addr = bce_TXP_b09FwRodataAddr;
4012 fw.rodata_len = bce_TXP_b09FwRodataLen;
4013 fw.rodata_index = 0;
4014 fw.rodata = bce_TXP_b09FwRodata;
4015 } else {
4016 fw.ver_major = bce_TXP_b06FwReleaseMajor;
4017 fw.ver_minor = bce_TXP_b06FwReleaseMinor;
4018 fw.ver_fix = bce_TXP_b06FwReleaseFix;
4019 fw.start_addr = bce_TXP_b06FwStartAddr;
4020
4021 fw.text_addr = bce_TXP_b06FwTextAddr;
4022 fw.text_len = bce_TXP_b06FwTextLen;
4023 fw.text_index = 0;
4024 fw.text = bce_TXP_b06FwText;
4025
4026 fw.data_addr = bce_TXP_b06FwDataAddr;
4027 fw.data_len = bce_TXP_b06FwDataLen;
4028 fw.data_index = 0;
4029 fw.data = bce_TXP_b06FwData;
4030
4031 fw.sbss_addr = bce_TXP_b06FwSbssAddr;
4032 fw.sbss_len = bce_TXP_b06FwSbssLen;
4033 fw.sbss_index = 0;
4034 fw.sbss = bce_TXP_b06FwSbss;
4035
4036 fw.bss_addr = bce_TXP_b06FwBssAddr;
4037 fw.bss_len = bce_TXP_b06FwBssLen;
4038 fw.bss_index = 0;
4039 fw.bss = bce_TXP_b06FwBss;
4040
4041 fw.rodata_addr = bce_TXP_b06FwRodataAddr;
4042 fw.rodata_len = bce_TXP_b06FwRodataLen;
4043 fw.rodata_index = 0;
4044 fw.rodata = bce_TXP_b06FwRodata;
4045 }
4046
4047 DBPRINT(sc, BCE_INFO_RESET, "Loading TX firmware.\n");
4048 bce_load_cpu_fw(sc, &cpu_reg, &fw);
4049 bce_start_cpu(sc, &cpu_reg);
4050
4051 DBEXIT(BCE_VERBOSE_RESET);
4052}
4053
4054
4055/****************************************************************************/
4056/* Initialize the TPAT CPU. */
4057/* */
4058/* Returns: */
4059/* Nothing. */
4060/****************************************************************************/
4061static void
4062bce_init_tpat_cpu(struct bce_softc *sc)
4063{
4064 struct cpu_reg cpu_reg;
4065 struct fw_info fw;
4066
4067 DBENTER(BCE_VERBOSE_RESET);
4068
4069 cpu_reg.mode = BCE_TPAT_CPU_MODE;
4070 cpu_reg.mode_value_halt = BCE_TPAT_CPU_MODE_SOFT_HALT;
4071 cpu_reg.mode_value_sstep = BCE_TPAT_CPU_MODE_STEP_ENA;
4072 cpu_reg.state = BCE_TPAT_CPU_STATE;
4073 cpu_reg.state_value_clear = 0xffffff;
4074 cpu_reg.gpr0 = BCE_TPAT_CPU_REG_FILE;
4075 cpu_reg.evmask = BCE_TPAT_CPU_EVENT_MASK;
4076 cpu_reg.pc = BCE_TPAT_CPU_PROGRAM_COUNTER;
4077 cpu_reg.inst = BCE_TPAT_CPU_INSTRUCTION;
4078 cpu_reg.bp = BCE_TPAT_CPU_HW_BREAKPOINT;
4079 cpu_reg.spad_base = BCE_TPAT_SCRATCH;
4080 cpu_reg.mips_view_base = 0x8000000;
4081
4082 if ((BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) ||
4083 (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716)) {
4084 fw.ver_major = bce_TPAT_b09FwReleaseMajor;
4085 fw.ver_minor = bce_TPAT_b09FwReleaseMinor;
4086 fw.ver_fix = bce_TPAT_b09FwReleaseFix;
4087 fw.start_addr = bce_TPAT_b09FwStartAddr;
4088
4089 fw.text_addr = bce_TPAT_b09FwTextAddr;
4090 fw.text_len = bce_TPAT_b09FwTextLen;
4091 fw.text_index = 0;
4092 fw.text = bce_TPAT_b09FwText;
4093
4094 fw.data_addr = bce_TPAT_b09FwDataAddr;
4095 fw.data_len = bce_TPAT_b09FwDataLen;
4096 fw.data_index = 0;
4097 fw.data = bce_TPAT_b09FwData;
4098
4099 fw.sbss_addr = bce_TPAT_b09FwSbssAddr;
4100 fw.sbss_len = bce_TPAT_b09FwSbssLen;
4101 fw.sbss_index = 0;
4102 fw.sbss = bce_TPAT_b09FwSbss;
4103
4104 fw.bss_addr = bce_TPAT_b09FwBssAddr;
4105 fw.bss_len = bce_TPAT_b09FwBssLen;
4106 fw.bss_index = 0;
4107 fw.bss = bce_TPAT_b09FwBss;
4108
4109 fw.rodata_addr = bce_TPAT_b09FwRodataAddr;
4110 fw.rodata_len = bce_TPAT_b09FwRodataLen;
4111 fw.rodata_index = 0;
4112 fw.rodata = bce_TPAT_b09FwRodata;
4113 } else {
4114 fw.ver_major = bce_TPAT_b06FwReleaseMajor;
4115 fw.ver_minor = bce_TPAT_b06FwReleaseMinor;
4116 fw.ver_fix = bce_TPAT_b06FwReleaseFix;
4117 fw.start_addr = bce_TPAT_b06FwStartAddr;
4118
4119 fw.text_addr = bce_TPAT_b06FwTextAddr;
4120 fw.text_len = bce_TPAT_b06FwTextLen;
4121 fw.text_index = 0;
4122 fw.text = bce_TPAT_b06FwText;
4123
4124 fw.data_addr = bce_TPAT_b06FwDataAddr;
4125 fw.data_len = bce_TPAT_b06FwDataLen;
4126 fw.data_index = 0;
4127 fw.data = bce_TPAT_b06FwData;
4128
4129 fw.sbss_addr = bce_TPAT_b06FwSbssAddr;
4130 fw.sbss_len = bce_TPAT_b06FwSbssLen;
4131 fw.sbss_index = 0;
4132 fw.sbss = bce_TPAT_b06FwSbss;
4133
4134 fw.bss_addr = bce_TPAT_b06FwBssAddr;
4135 fw.bss_len = bce_TPAT_b06FwBssLen;
4136 fw.bss_index = 0;
4137 fw.bss = bce_TPAT_b06FwBss;
4138
4139 fw.rodata_addr = bce_TPAT_b06FwRodataAddr;
4140 fw.rodata_len = bce_TPAT_b06FwRodataLen;
4141 fw.rodata_index = 0;
4142 fw.rodata = bce_TPAT_b06FwRodata;
4143 }
4144
4145 DBPRINT(sc, BCE_INFO_RESET, "Loading TPAT firmware.\n");
4146 bce_load_cpu_fw(sc, &cpu_reg, &fw);
4147 bce_start_cpu(sc, &cpu_reg);
4148
4149 DBEXIT(BCE_VERBOSE_RESET);
4150}
4151
4152
4153/****************************************************************************/
4154/* Initialize the CP CPU. */
4155/* */
4156/* Returns: */
4157/* Nothing. */
4158/****************************************************************************/
4159static void
4160bce_init_cp_cpu(struct bce_softc *sc)
4161{
4162 struct cpu_reg cpu_reg;
4163 struct fw_info fw;
4164
4165 DBENTER(BCE_VERBOSE_RESET);
4166
4167 cpu_reg.mode = BCE_CP_CPU_MODE;
4168 cpu_reg.mode_value_halt = BCE_CP_CPU_MODE_SOFT_HALT;
4169 cpu_reg.mode_value_sstep = BCE_CP_CPU_MODE_STEP_ENA;
4170 cpu_reg.state = BCE_CP_CPU_STATE;
4171 cpu_reg.state_value_clear = 0xffffff;
4172 cpu_reg.gpr0 = BCE_CP_CPU_REG_FILE;
4173 cpu_reg.evmask = BCE_CP_CPU_EVENT_MASK;
4174 cpu_reg.pc = BCE_CP_CPU_PROGRAM_COUNTER;
4175 cpu_reg.inst = BCE_CP_CPU_INSTRUCTION;
4176 cpu_reg.bp = BCE_CP_CPU_HW_BREAKPOINT;
4177 cpu_reg.spad_base = BCE_CP_SCRATCH;
4178 cpu_reg.mips_view_base = 0x8000000;
4179
4180 if ((BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) ||
4181 (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716)) {
4182 fw.ver_major = bce_CP_b09FwReleaseMajor;
4183 fw.ver_minor = bce_CP_b09FwReleaseMinor;
4184 fw.ver_fix = bce_CP_b09FwReleaseFix;
4185 fw.start_addr = bce_CP_b09FwStartAddr;
4186
4187 fw.text_addr = bce_CP_b09FwTextAddr;
4188 fw.text_len = bce_CP_b09FwTextLen;
4189 fw.text_index = 0;
4190 fw.text = bce_CP_b09FwText;
4191
4192 fw.data_addr = bce_CP_b09FwDataAddr;
4193 fw.data_len = bce_CP_b09FwDataLen;
4194 fw.data_index = 0;
4195 fw.data = bce_CP_b09FwData;
4196
4197 fw.sbss_addr = bce_CP_b09FwSbssAddr;
4198 fw.sbss_len = bce_CP_b09FwSbssLen;
4199 fw.sbss_index = 0;
4200 fw.sbss = bce_CP_b09FwSbss;
4201
4202 fw.bss_addr = bce_CP_b09FwBssAddr;
4203 fw.bss_len = bce_CP_b09FwBssLen;
4204 fw.bss_index = 0;
4205 fw.bss = bce_CP_b09FwBss;
4206
4207 fw.rodata_addr = bce_CP_b09FwRodataAddr;
4208 fw.rodata_len = bce_CP_b09FwRodataLen;
4209 fw.rodata_index = 0;
4210 fw.rodata = bce_CP_b09FwRodata;
4211 } else {
4212 fw.ver_major = bce_CP_b06FwReleaseMajor;
4213 fw.ver_minor = bce_CP_b06FwReleaseMinor;
4214 fw.ver_fix = bce_CP_b06FwReleaseFix;
4215 fw.start_addr = bce_CP_b06FwStartAddr;
4216
4217 fw.text_addr = bce_CP_b06FwTextAddr;
4218 fw.text_len = bce_CP_b06FwTextLen;
4219 fw.text_index = 0;
4220 fw.text = bce_CP_b06FwText;
4221
4222 fw.data_addr = bce_CP_b06FwDataAddr;
4223 fw.data_len = bce_CP_b06FwDataLen;
4224 fw.data_index = 0;
4225 fw.data = bce_CP_b06FwData;
4226
4227 fw.sbss_addr = bce_CP_b06FwSbssAddr;
4228 fw.sbss_len = bce_CP_b06FwSbssLen;
4229 fw.sbss_index = 0;
4230 fw.sbss = bce_CP_b06FwSbss;
4231
4232 fw.bss_addr = bce_CP_b06FwBssAddr;
4233 fw.bss_len = bce_CP_b06FwBssLen;
4234 fw.bss_index = 0;
4235 fw.bss = bce_CP_b06FwBss;
4236
4237 fw.rodata_addr = bce_CP_b06FwRodataAddr;
4238 fw.rodata_len = bce_CP_b06FwRodataLen;
4239 fw.rodata_index = 0;
4240 fw.rodata = bce_CP_b06FwRodata;
4241 }
4242
4243 DBPRINT(sc, BCE_INFO_RESET, "Loading CP firmware.\n");
4244 bce_load_cpu_fw(sc, &cpu_reg, &fw);
4245 bce_start_cpu(sc, &cpu_reg);
4246
4247 DBEXIT(BCE_VERBOSE_RESET);
4248}
4249
4250
4251/****************************************************************************/
4252/* Initialize the COM CPU. */
4253/* */
4254/* Returns: */
4255/* Nothing. */
4256/****************************************************************************/
4257static void
4258bce_init_com_cpu(struct bce_softc *sc)
4259{
4260 struct cpu_reg cpu_reg;
4261 struct fw_info fw;
4262
4263 DBENTER(BCE_VERBOSE_RESET);
4264
4265 cpu_reg.mode = BCE_COM_CPU_MODE;
4266 cpu_reg.mode_value_halt = BCE_COM_CPU_MODE_SOFT_HALT;
4267 cpu_reg.mode_value_sstep = BCE_COM_CPU_MODE_STEP_ENA;
4268 cpu_reg.state = BCE_COM_CPU_STATE;
4269 cpu_reg.state_value_clear = 0xffffff;
4270 cpu_reg.gpr0 = BCE_COM_CPU_REG_FILE;
4271 cpu_reg.evmask = BCE_COM_CPU_EVENT_MASK;
4272 cpu_reg.pc = BCE_COM_CPU_PROGRAM_COUNTER;
4273 cpu_reg.inst = BCE_COM_CPU_INSTRUCTION;
4274 cpu_reg.bp = BCE_COM_CPU_HW_BREAKPOINT;
4275 cpu_reg.spad_base = BCE_COM_SCRATCH;
4276 cpu_reg.mips_view_base = 0x8000000;
4277
4278 if ((BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) ||
4279 (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716)) {
4280 fw.ver_major = bce_COM_b09FwReleaseMajor;
4281 fw.ver_minor = bce_COM_b09FwReleaseMinor;
4282 fw.ver_fix = bce_COM_b09FwReleaseFix;
4283 fw.start_addr = bce_COM_b09FwStartAddr;
4284
4285 fw.text_addr = bce_COM_b09FwTextAddr;
4286 fw.text_len = bce_COM_b09FwTextLen;
4287 fw.text_index = 0;
4288 fw.text = bce_COM_b09FwText;
4289
4290 fw.data_addr = bce_COM_b09FwDataAddr;
4291 fw.data_len = bce_COM_b09FwDataLen;
4292 fw.data_index = 0;
4293 fw.data = bce_COM_b09FwData;
4294
4295 fw.sbss_addr = bce_COM_b09FwSbssAddr;
4296 fw.sbss_len = bce_COM_b09FwSbssLen;
4297 fw.sbss_index = 0;
4298 fw.sbss = bce_COM_b09FwSbss;
4299
4300 fw.bss_addr = bce_COM_b09FwBssAddr;
4301 fw.bss_len = bce_COM_b09FwBssLen;
4302 fw.bss_index = 0;
4303 fw.bss = bce_COM_b09FwBss;
4304
4305 fw.rodata_addr = bce_COM_b09FwRodataAddr;
4306 fw.rodata_len = bce_COM_b09FwRodataLen;
4307 fw.rodata_index = 0;
4308 fw.rodata = bce_COM_b09FwRodata;
4309 } else {
4310 fw.ver_major = bce_COM_b06FwReleaseMajor;
4311 fw.ver_minor = bce_COM_b06FwReleaseMinor;
4312 fw.ver_fix = bce_COM_b06FwReleaseFix;
4313 fw.start_addr = bce_COM_b06FwStartAddr;
4314
4315 fw.text_addr = bce_COM_b06FwTextAddr;
4316 fw.text_len = bce_COM_b06FwTextLen;
4317 fw.text_index = 0;
4318 fw.text = bce_COM_b06FwText;
4319
4320 fw.data_addr = bce_COM_b06FwDataAddr;
4321 fw.data_len = bce_COM_b06FwDataLen;
4322 fw.data_index = 0;
4323 fw.data = bce_COM_b06FwData;
4324
4325 fw.sbss_addr = bce_COM_b06FwSbssAddr;
4326 fw.sbss_len = bce_COM_b06FwSbssLen;
4327 fw.sbss_index = 0;
4328 fw.sbss = bce_COM_b06FwSbss;
4329
4330 fw.bss_addr = bce_COM_b06FwBssAddr;
4331 fw.bss_len = bce_COM_b06FwBssLen;
4332 fw.bss_index = 0;
4333 fw.bss = bce_COM_b06FwBss;
4334
4335 fw.rodata_addr = bce_COM_b06FwRodataAddr;
4336 fw.rodata_len = bce_COM_b06FwRodataLen;
4337 fw.rodata_index = 0;
4338 fw.rodata = bce_COM_b06FwRodata;
4339 }
4340
4341 DBPRINT(sc, BCE_INFO_RESET, "Loading COM firmware.\n");
4342 bce_load_cpu_fw(sc, &cpu_reg, &fw);
4343 bce_start_cpu(sc, &cpu_reg);
4344
4345 DBEXIT(BCE_VERBOSE_RESET);
4346}
4347
4348
4349/****************************************************************************/
4350/* Initialize the RV2P, RX, TX, TPAT, COM, and CP CPUs. */
4351/* */
4352/* Loads the firmware for each CPU and starts the CPU. */
4353/* */
4354/* Returns: */
4355/* Nothing. */
4356/****************************************************************************/
4357static void
4358bce_init_cpus(struct bce_softc *sc)
4359{
4360 DBENTER(BCE_VERBOSE_RESET);
4361
4362 if ((BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) ||
4363 (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716)) {
4364
4365 if ((BCE_CHIP_REV(sc) == BCE_CHIP_REV_Ax)) {
4366 bce_load_rv2p_fw(sc, bce_xi90_rv2p_proc1,
4367 sizeof(bce_xi90_rv2p_proc1), RV2P_PROC1);
4368 bce_load_rv2p_fw(sc, bce_xi90_rv2p_proc2,
4369 sizeof(bce_xi90_rv2p_proc2), RV2P_PROC2);
4370 } else {
4371 bce_load_rv2p_fw(sc, bce_xi_rv2p_proc1,
4372 sizeof(bce_xi_rv2p_proc1), RV2P_PROC1);
4373 bce_load_rv2p_fw(sc, bce_xi_rv2p_proc2,
4374 sizeof(bce_xi_rv2p_proc2), RV2P_PROC2);
4375 }
4376
4377 } else {
4378 bce_load_rv2p_fw(sc, bce_rv2p_proc1,
4379 sizeof(bce_rv2p_proc1), RV2P_PROC1);
4380 bce_load_rv2p_fw(sc, bce_rv2p_proc2,
4381 sizeof(bce_rv2p_proc2), RV2P_PROC2);
4382 }
4383
4384 bce_init_rxp_cpu(sc);
4385 bce_init_txp_cpu(sc);
4386 bce_init_tpat_cpu(sc);
4387 bce_init_com_cpu(sc);
4388 bce_init_cp_cpu(sc);
4389
4390 DBEXIT(BCE_VERBOSE_RESET);
4391}
4392
4393
4394/****************************************************************************/
4395/* Initialize context memory. */
4396/* */
4397/* Clears the memory associated with each Context ID (CID). */
4398/* */
4399/* Returns: */
4400/* Nothing. */
4401/****************************************************************************/
4402static int
4403bce_init_ctx(struct bce_softc *sc)
4404{
4405 u32 offset, val, vcid_addr;
4406 int i, j, rc, retry_cnt;
4407
4408 rc = 0;
4409 DBENTER(BCE_VERBOSE_RESET | BCE_VERBOSE_CTX);
4410
4411 if ((BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) ||
4412 (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716)) {
4413 retry_cnt = CTX_INIT_RETRY_COUNT;
4414
4415 DBPRINT(sc, BCE_INFO_CTX, "Initializing 5709 context.\n");
4416
4417 /*
4418 * BCM5709 context memory may be cached
4419 * in host memory so prepare the host memory
4420 * for access.
4421 */
4422 val = BCE_CTX_COMMAND_ENABLED |
4423 BCE_CTX_COMMAND_MEM_INIT | (1 << 12);
4424 val |= (BCM_PAGE_BITS - 8) << 16;
4425 REG_WR(sc, BCE_CTX_COMMAND, val);
4426
4427 /* Wait for mem init command to complete. */
4428 for (i = 0; i < retry_cnt; i++) {
4429 val = REG_RD(sc, BCE_CTX_COMMAND);
4430 if (!(val & BCE_CTX_COMMAND_MEM_INIT))
4431 break;
4432 DELAY(2);
4433 }
4434 if ((val & BCE_CTX_COMMAND_MEM_INIT) != 0) {
4435 BCE_PRINTF("%s(): Context memory initialization failed!\n",
4436 __FUNCTION__);
4437 rc = EBUSY;
4438 goto init_ctx_fail;
4439 }
4440
4441 for (i = 0; i < sc->ctx_pages; i++) {
4442 /* Set the physical address of the context memory. */
4443 REG_WR(sc, BCE_CTX_HOST_PAGE_TBL_DATA0,
4444 BCE_ADDR_LO(sc->ctx_paddr[i] & 0xfffffff0) |
4445 BCE_CTX_HOST_PAGE_TBL_DATA0_VALID);
4446 REG_WR(sc, BCE_CTX_HOST_PAGE_TBL_DATA1,
4447 BCE_ADDR_HI(sc->ctx_paddr[i]));
4448 REG_WR(sc, BCE_CTX_HOST_PAGE_TBL_CTRL, i |
4449 BCE_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
4450
4451 /* Verify the context memory write was successful. */
4452 for (j = 0; j < retry_cnt; j++) {
4453 val = REG_RD(sc, BCE_CTX_HOST_PAGE_TBL_CTRL);
4454 if ((val &
4455 BCE_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) == 0)
4456 break;
4457 DELAY(5);
4458 }
4459 if ((val & BCE_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) != 0) {
4460 BCE_PRINTF("%s(): Failed to initialize "
4461 "context page %d!\n", __FUNCTION__, i);
4462 rc = EBUSY;
4463 goto init_ctx_fail;
4464 }
4465 }
4466 } else {
4467
4468 DBPRINT(sc, BCE_INFO, "Initializing 5706/5708 context.\n");
4469
4470 /*
4471 * For the 5706/5708, context memory is local to
4472 * the controller, so initialize the controller
4473 * context memory.
4474 */
4475
4476 vcid_addr = GET_CID_ADDR(96);
4477 while (vcid_addr) {
4478
4479 vcid_addr -= PHY_CTX_SIZE;
4480
4481 REG_WR(sc, BCE_CTX_VIRT_ADDR, 0);
4482 REG_WR(sc, BCE_CTX_PAGE_TBL, vcid_addr);
4483
4484 for(offset = 0; offset < PHY_CTX_SIZE; offset += 4) {
4485 CTX_WR(sc, 0x00, offset, 0);
4486 }
4487
4488 REG_WR(sc, BCE_CTX_VIRT_ADDR, vcid_addr);
4489 REG_WR(sc, BCE_CTX_PAGE_TBL, vcid_addr);
4490 }
4491
4492 }
4493init_ctx_fail:
4494 DBEXIT(BCE_VERBOSE_RESET | BCE_VERBOSE_CTX);
4495 return (rc);
4496}
4497
4498
4499/****************************************************************************/
4500/* Fetch the permanent MAC address of the controller. */
4501/* */
4502/* Returns: */
4503/* Nothing. */
4504/****************************************************************************/
4505static void
4506bce_get_mac_addr(struct bce_softc *sc)
4507{
4508 u32 mac_lo = 0, mac_hi = 0;
4509
4510 DBENTER(BCE_VERBOSE_RESET);
4511
4512 /*
4513 * The NetXtreme II bootcode populates various NIC
4514 * power-on and runtime configuration items in a
4515 * shared memory area. The factory configured MAC
4516 * address is available from both NVRAM and the
4517 * shared memory area so we'll read the value from
4518 * shared memory for speed.
4519 */
4520
4521 mac_hi = bce_shmem_rd(sc, BCE_PORT_HW_CFG_MAC_UPPER);
4522 mac_lo = bce_shmem_rd(sc, BCE_PORT_HW_CFG_MAC_LOWER);
4523
4524 if ((mac_lo == 0) && (mac_hi == 0)) {
4525 BCE_PRINTF("%s(%d): Invalid Ethernet address!\n",
4526 __FILE__, __LINE__);
4527 } else {
4528 sc->eaddr[0] = (u_char)(mac_hi >> 8);
4529 sc->eaddr[1] = (u_char)(mac_hi >> 0);
4530 sc->eaddr[2] = (u_char)(mac_lo >> 24);
4531 sc->eaddr[3] = (u_char)(mac_lo >> 16);
4532 sc->eaddr[4] = (u_char)(mac_lo >> 8);
4533 sc->eaddr[5] = (u_char)(mac_lo >> 0);
4534 }
4535
4536 DBPRINT(sc, BCE_INFO_MISC, "Permanent Ethernet "
4537 "address = %6D\n", sc->eaddr, ":");
4538 DBEXIT(BCE_VERBOSE_RESET);
4539}
4540
4541
4542/****************************************************************************/
4543/* Program the MAC address. */
4544/* */
4545/* Returns: */
4546/* Nothing. */
4547/****************************************************************************/
4548static void
4549bce_set_mac_addr(struct bce_softc *sc)
4550{
4551 u32 val;
4552 u8 *mac_addr = sc->eaddr;
4553
4554 /* ToDo: Add support for setting multiple MAC addresses. */
4555
4556 DBENTER(BCE_VERBOSE_RESET);
4557 DBPRINT(sc, BCE_INFO_MISC, "Setting Ethernet address = "
4558 "%6D\n", sc->eaddr, ":");
4559
4560 val = (mac_addr[0] << 8) | mac_addr[1];
4561
4562 REG_WR(sc, BCE_EMAC_MAC_MATCH0, val);
4563
4564 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
4565 (mac_addr[4] << 8) | mac_addr[5];
4566
4567 REG_WR(sc, BCE_EMAC_MAC_MATCH1, val);
4568
4569 DBEXIT(BCE_VERBOSE_RESET);
4570}
4571
4572
4573/****************************************************************************/
4574/* Stop the controller. */
4575/* */
4576/* Returns: */
4577/* Nothing. */
4578/****************************************************************************/
4579static void
4580bce_stop(struct bce_softc *sc)
4581{
4582 struct ifnet *ifp;
4583
4584 DBENTER(BCE_VERBOSE_RESET);
4585
4586 BCE_LOCK_ASSERT(sc);
4587
4588 ifp = sc->bce_ifp;
4589
4590 callout_stop(&sc->bce_tick_callout);
4591
4592 /* Disable the transmit/receive blocks. */
4593 REG_WR(sc, BCE_MISC_ENABLE_CLR_BITS, BCE_MISC_ENABLE_CLR_DEFAULT);
4594 REG_RD(sc, BCE_MISC_ENABLE_CLR_BITS);
4595 DELAY(20);
4596
4597 bce_disable_intr(sc);
4598
4599 /* Free RX buffers. */
4600#ifdef BCE_JUMBO_HDRSPLIT
4601 bce_free_pg_chain(sc);
4602#endif
4603 bce_free_rx_chain(sc);
4604
4605 /* Free TX buffers. */
4606 bce_free_tx_chain(sc);
4607
4608 sc->watchdog_timer = 0;
4609
4610 sc->bce_link_up = FALSE;
4611
4612 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
4613
4614 DBEXIT(BCE_VERBOSE_RESET);
4615}
4616
4617
4618static int
4619bce_reset(struct bce_softc *sc, u32 reset_code)
4620{
4621 u32 val;
4622 int i, rc = 0;
4623
4624 DBENTER(BCE_VERBOSE_RESET);
4625
4626 DBPRINT(sc, BCE_VERBOSE_RESET, "%s(): reset_code = 0x%08X\n",
4627 __FUNCTION__, reset_code);
4628
4629 /* Wait for pending PCI transactions to complete. */
4630 REG_WR(sc, BCE_MISC_ENABLE_CLR_BITS,
4631 BCE_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
4632 BCE_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
4633 BCE_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
4634 BCE_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
4635 val = REG_RD(sc, BCE_MISC_ENABLE_CLR_BITS);
4636 DELAY(5);
4637
4638 /* Disable DMA */
4639 if ((BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) ||
4640 (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716)) {
4641 val = REG_RD(sc, BCE_MISC_NEW_CORE_CTL);
4642 val &= ~BCE_MISC_NEW_CORE_CTL_DMA_ENABLE;
4643 REG_WR(sc, BCE_MISC_NEW_CORE_CTL, val);
4644 }
4645
4646 /* Assume bootcode is running. */
4647 sc->bce_fw_timed_out = FALSE;
4648 sc->bce_drv_cardiac_arrest = FALSE;
4649
4650 /* Give the firmware a chance to prepare for the reset. */
4651 rc = bce_fw_sync(sc, BCE_DRV_MSG_DATA_WAIT0 | reset_code);
4652 if (rc)
4653 goto bce_reset_exit;
4654
4655 /* Set a firmware reminder that this is a soft reset. */
4656 bce_shmem_wr(sc, BCE_DRV_RESET_SIGNATURE, BCE_DRV_RESET_SIGNATURE_MAGIC);
4657
4658 /* Dummy read to force the chip to complete all current transactions. */
4659 val = REG_RD(sc, BCE_MISC_ID);
4660
4661 /* Chip reset. */
4662 if ((BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) ||
4663 (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716)) {
4664 REG_WR(sc, BCE_MISC_COMMAND, BCE_MISC_COMMAND_SW_RESET);
4665 REG_RD(sc, BCE_MISC_COMMAND);
4666 DELAY(5);
4667
4668 val = BCE_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
4669 BCE_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
4670
4671 pci_write_config(sc->bce_dev, BCE_PCICFG_MISC_CONFIG, val, 4);
4672 } else {
4673 val = BCE_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4674 BCE_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
4675 BCE_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
4676 REG_WR(sc, BCE_PCICFG_MISC_CONFIG, val);
4677
4678 /* Allow up to 30us for reset to complete. */
4679 for (i = 0; i < 10; i++) {
4680 val = REG_RD(sc, BCE_PCICFG_MISC_CONFIG);
4681 if ((val & (BCE_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4682 BCE_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0) {
4683 break;
4684 }
4685 DELAY(10);
4686 }
4687
4688 /* Check that reset completed successfully. */
4689 if (val & (BCE_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4690 BCE_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
4691 BCE_PRINTF("%s(%d): Reset failed!\n",
4692 __FILE__, __LINE__);
4693 rc = EBUSY;
4694 goto bce_reset_exit;
4695 }
4696 }
4697
4698 /* Make sure byte swapping is properly configured. */
4699 val = REG_RD(sc, BCE_PCI_SWAP_DIAG0);
4700 if (val != 0x01020304) {
4701 BCE_PRINTF("%s(%d): Byte swap is incorrect!\n",
4702 __FILE__, __LINE__);
4703 rc = ENODEV;
4704 goto bce_reset_exit;
4705 }
4706
4707 /* Just completed a reset, assume that firmware is running again. */
4708 sc->bce_fw_timed_out = FALSE;
4709 sc->bce_drv_cardiac_arrest = FALSE;
4710
4711 /* Wait for the firmware to finish its initialization. */
4712 rc = bce_fw_sync(sc, BCE_DRV_MSG_DATA_WAIT1 | reset_code);
4713 if (rc)
4714 BCE_PRINTF("%s(%d): Firmware did not complete "
4715 "initialization!\n", __FILE__, __LINE__);
4716
4717bce_reset_exit:
4718 DBEXIT(BCE_VERBOSE_RESET);
4719 return (rc);
4720}
4721
4722
4723static int
4724bce_chipinit(struct bce_softc *sc)
4725{
4726 u32 val;
4727 int rc = 0;
4728
4729 DBENTER(BCE_VERBOSE_RESET);
4730
4731 bce_disable_intr(sc);
4732
4733 /*
4734 * Initialize DMA byte/word swapping, configure the number of DMA
4735 * channels and PCI clock compensation delay.
4736 */
4737 val = BCE_DMA_CONFIG_DATA_BYTE_SWAP |
4738 BCE_DMA_CONFIG_DATA_WORD_SWAP |
4739#if BYTE_ORDER == BIG_ENDIAN
4740 BCE_DMA_CONFIG_CNTL_BYTE_SWAP |
4741#endif
4742 BCE_DMA_CONFIG_CNTL_WORD_SWAP |
4743 DMA_READ_CHANS << 12 |
4744 DMA_WRITE_CHANS << 16;
4745
4746 val |= (0x2 << 20) | BCE_DMA_CONFIG_CNTL_PCI_COMP_DLY;
4747
4748 if ((sc->bce_flags & BCE_PCIX_FLAG) && (sc->bus_speed_mhz == 133))
4749 val |= BCE_DMA_CONFIG_PCI_FAST_CLK_CMP;
4750
4751 /*
4752 * This setting resolves a problem observed on certain Intel PCI
4753 * chipsets that cannot handle multiple outstanding DMA operations.
4754 * See errata E9_5706A1_65.
4755 */
4756 if ((BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5706) &&
4757 (BCE_CHIP_ID(sc) != BCE_CHIP_ID_5706_A0) &&
4758 !(sc->bce_flags & BCE_PCIX_FLAG))
4759 val |= BCE_DMA_CONFIG_CNTL_PING_PONG_DMA;
4760
4761 REG_WR(sc, BCE_DMA_CONFIG, val);
4762
4763 /* Enable the RX_V2P and Context state machines before access. */
4764 REG_WR(sc, BCE_MISC_ENABLE_SET_BITS,
4765 BCE_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
4766 BCE_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
4767 BCE_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
4768
4769 /* Initialize context mapping and zero out the quick contexts. */
4770 if ((rc = bce_init_ctx(sc)) != 0)
4771 goto bce_chipinit_exit;
4772
4773 /* Initialize the on-boards CPUs */
4774 bce_init_cpus(sc);
4775
4776 /* Enable management frames (NC-SI) to flow to the MCP. */
4777 if (sc->bce_flags & BCE_MFW_ENABLE_FLAG) {
4778 val = REG_RD(sc, BCE_RPM_MGMT_PKT_CTRL) | BCE_RPM_MGMT_PKT_CTRL_MGMT_EN;
4779 REG_WR(sc, BCE_RPM_MGMT_PKT_CTRL, val);
4780 }
4781
4782 /* Prepare NVRAM for access. */
4783 if ((rc = bce_init_nvram(sc)) != 0)
4784 goto bce_chipinit_exit;
4785
4786 /* Set the kernel bypass block size */
4787 val = REG_RD(sc, BCE_MQ_CONFIG);
4788 val &= ~BCE_MQ_CONFIG_KNL_BYP_BLK_SIZE;
4789 val |= BCE_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
4790
4791 /* Enable bins used on the 5709. */
4792 if ((BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) ||
4793 (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716)) {
4794 val |= BCE_MQ_CONFIG_BIN_MQ_MODE;
4795 if (BCE_CHIP_ID(sc) == BCE_CHIP_ID_5709_A1)
4796 val |= BCE_MQ_CONFIG_HALT_DIS;
4797 }
4798
4799 REG_WR(sc, BCE_MQ_CONFIG, val);
4800
4801 val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
4802 REG_WR(sc, BCE_MQ_KNL_BYP_WIND_START, val);
4803 REG_WR(sc, BCE_MQ_KNL_WIND_END, val);
4804
4805 /* Set the page size and clear the RV2P processor stall bits. */
4806 val = (BCM_PAGE_BITS - 8) << 24;
4807 REG_WR(sc, BCE_RV2P_CONFIG, val);
4808
4809 /* Configure page size. */
4810 val = REG_RD(sc, BCE_TBDR_CONFIG);
4811 val &= ~BCE_TBDR_CONFIG_PAGE_SIZE;
4812 val |= (BCM_PAGE_BITS - 8) << 24 | 0x40;
4813 REG_WR(sc, BCE_TBDR_CONFIG, val);
4814
4815 /* Set the perfect match control register to default. */
4816 REG_WR_IND(sc, BCE_RXP_PM_CTRL, 0);
4817
4818bce_chipinit_exit:
4819 DBEXIT(BCE_VERBOSE_RESET);
4820
4821 return(rc);
4822}
4823
4824
4825/****************************************************************************/
4826/* Initialize the controller in preparation to send/receive traffic. */
4827/* */
4828/* Returns: */
4829/* 0 for success, positive value for failure. */
4830/****************************************************************************/
4831static int
4832bce_blockinit(struct bce_softc *sc)
4833{
4834 u32 reg, val;
4835 int rc = 0;
4836
4837 DBENTER(BCE_VERBOSE_RESET);
4838
4839 /* Load the hardware default MAC address. */
4840 bce_set_mac_addr(sc);
4841
4842 /* Set the Ethernet backoff seed value */
4843 val = sc->eaddr[0] + (sc->eaddr[1] << 8) +
4844 (sc->eaddr[2] << 16) + (sc->eaddr[3] ) +
4845 (sc->eaddr[4] << 8) + (sc->eaddr[5] << 16);
4846 REG_WR(sc, BCE_EMAC_BACKOFF_SEED, val);
4847
4848 sc->last_status_idx = 0;
4849 sc->rx_mode = BCE_EMAC_RX_MODE_SORT_MODE;
4850
4851 /* Set up link change interrupt generation. */
4852 REG_WR(sc, BCE_EMAC_ATTENTION_ENA, BCE_EMAC_ATTENTION_ENA_LINK);
4853
4854 /* Program the physical address of the status block. */
4855 REG_WR(sc, BCE_HC_STATUS_ADDR_L,
4856 BCE_ADDR_LO(sc->status_block_paddr));
4857 REG_WR(sc, BCE_HC_STATUS_ADDR_H,
4858 BCE_ADDR_HI(sc->status_block_paddr));
4859
4860 /* Program the physical address of the statistics block. */
4861 REG_WR(sc, BCE_HC_STATISTICS_ADDR_L,
4862 BCE_ADDR_LO(sc->stats_block_paddr));
4863 REG_WR(sc, BCE_HC_STATISTICS_ADDR_H,
4864 BCE_ADDR_HI(sc->stats_block_paddr));
4865
4866 /* Program various host coalescing parameters. */
4867 REG_WR(sc, BCE_HC_TX_QUICK_CONS_TRIP,
4868 (sc->bce_tx_quick_cons_trip_int << 16) | sc->bce_tx_quick_cons_trip);
4869 REG_WR(sc, BCE_HC_RX_QUICK_CONS_TRIP,
4870 (sc->bce_rx_quick_cons_trip_int << 16) | sc->bce_rx_quick_cons_trip);
4871 REG_WR(sc, BCE_HC_COMP_PROD_TRIP,
4872 (sc->bce_comp_prod_trip_int << 16) | sc->bce_comp_prod_trip);
4873 REG_WR(sc, BCE_HC_TX_TICKS,
4874 (sc->bce_tx_ticks_int << 16) | sc->bce_tx_ticks);
4875 REG_WR(sc, BCE_HC_RX_TICKS,
4876 (sc->bce_rx_ticks_int << 16) | sc->bce_rx_ticks);
4877 REG_WR(sc, BCE_HC_COM_TICKS,
4878 (sc->bce_com_ticks_int << 16) | sc->bce_com_ticks);
4879 REG_WR(sc, BCE_HC_CMD_TICKS,
4880 (sc->bce_cmd_ticks_int << 16) | sc->bce_cmd_ticks);
4881 REG_WR(sc, BCE_HC_STATS_TICKS,
4882 (sc->bce_stats_ticks & 0xffff00));
4883 REG_WR(sc, BCE_HC_STAT_COLLECT_TICKS, 0xbb8); /* 3ms */
4884
4885 /* Configure the Host Coalescing block. */
4886 val = BCE_HC_CONFIG_RX_TMR_MODE | BCE_HC_CONFIG_TX_TMR_MODE |
4887 BCE_HC_CONFIG_COLLECT_STATS;
4888
4889#if 0
4890 /* ToDo: Add MSI-X support. */
4891 if (sc->bce_flags & BCE_USING_MSIX_FLAG) {
4892 u32 base = ((BCE_TX_VEC - 1) * BCE_HC_SB_CONFIG_SIZE) +
4893 BCE_HC_SB_CONFIG_1;
4894
4895 REG_WR(sc, BCE_HC_MSIX_BIT_VECTOR, BCE_HC_MSIX_BIT_VECTOR_VAL);
4896
4897 REG_WR(sc, base, BCE_HC_SB_CONFIG_1_TX_TMR_MODE |
4898 BCE_HC_SB_CONFIG_1_ONE_SHOT);
4899
4900 REG_WR(sc, base + BCE_HC_TX_QUICK_CONS_TRIP_OFF,
4901 (sc->tx_quick_cons_trip_int << 16) |
4902 sc->tx_quick_cons_trip);
4903
4904 REG_WR(sc, base + BCE_HC_TX_TICKS_OFF,
4905 (sc->tx_ticks_int << 16) | sc->tx_ticks);
4906
4907 val |= BCE_HC_CONFIG_SB_ADDR_INC_128B;
4908 }
4909
4910 /*
4911 * Tell the HC block to automatically set the
4912 * INT_MASK bit after an MSI/MSI-X interrupt
4913 * is generated so the driver doesn't have to.
4914 */
4915 if (sc->bce_flags & BCE_ONE_SHOT_MSI_FLAG)
4916 val |= BCE_HC_CONFIG_ONE_SHOT;
4917
4918 /* Set the MSI-X status blocks to 128 byte boundaries. */
4919 if (sc->bce_flags & BCE_USING_MSIX_FLAG)
4920 val |= BCE_HC_CONFIG_SB_ADDR_INC_128B;
4921#endif
4922
4923 REG_WR(sc, BCE_HC_CONFIG, val);
4924
4925 /* Clear the internal statistics counters. */
4926 REG_WR(sc, BCE_HC_COMMAND, BCE_HC_COMMAND_CLR_STAT_NOW);
4927
4928 /* Verify that bootcode is running. */
4929 reg = bce_shmem_rd(sc, BCE_DEV_INFO_SIGNATURE);
4930
4931 DBRUNIF(DB_RANDOMTRUE(bootcode_running_failure_sim_control),
4932 BCE_PRINTF("%s(%d): Simulating bootcode failure.\n",
4933 __FILE__, __LINE__);
4934 reg = 0);
4935
4936 if ((reg & BCE_DEV_INFO_SIGNATURE_MAGIC_MASK) !=
4937 BCE_DEV_INFO_SIGNATURE_MAGIC) {
4938 BCE_PRINTF("%s(%d): Bootcode not running! Found: 0x%08X, "
4939 "Expected: 08%08X\n", __FILE__, __LINE__,
4940 (reg & BCE_DEV_INFO_SIGNATURE_MAGIC_MASK),
4941 BCE_DEV_INFO_SIGNATURE_MAGIC);
4942 rc = ENODEV;
4943 goto bce_blockinit_exit;
4944 }
4945
4946 /* Enable DMA */
4947 if ((BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) ||
4948 (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716)) {
4949 val = REG_RD(sc, BCE_MISC_NEW_CORE_CTL);
4950 val |= BCE_MISC_NEW_CORE_CTL_DMA_ENABLE;
4951 REG_WR(sc, BCE_MISC_NEW_CORE_CTL, val);
4952 }
4953
4954 /* Allow bootcode to apply additional fixes before enabling MAC. */
4955 rc = bce_fw_sync(sc, BCE_DRV_MSG_DATA_WAIT2 |
4956 BCE_DRV_MSG_CODE_RESET);
4957
4958 /* Enable link state change interrupt generation. */
4959 REG_WR(sc, BCE_HC_ATTN_BITS_ENABLE, STATUS_ATTN_BITS_LINK_STATE);
4960
4961 /* Enable the RXP. */
4962 bce_start_rxp_cpu(sc);
4963
4964 /* Disable management frames (NC-SI) from flowing to the MCP. */
4965 if (sc->bce_flags & BCE_MFW_ENABLE_FLAG) {
4966 val = REG_RD(sc, BCE_RPM_MGMT_PKT_CTRL) &
4967 ~BCE_RPM_MGMT_PKT_CTRL_MGMT_EN;
4968 REG_WR(sc, BCE_RPM_MGMT_PKT_CTRL, val);
4969 }
4970
4971 /* Enable all remaining blocks in the MAC. */
4972 if ((BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) ||
4973 (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716))
4974 REG_WR(sc, BCE_MISC_ENABLE_SET_BITS,
4975 BCE_MISC_ENABLE_DEFAULT_XI);
4976 else
4977 REG_WR(sc, BCE_MISC_ENABLE_SET_BITS,
4978 BCE_MISC_ENABLE_DEFAULT);
4979
4980 REG_RD(sc, BCE_MISC_ENABLE_SET_BITS);
4981 DELAY(20);
4982
4983 /* Save the current host coalescing block settings. */
4984 sc->hc_command = REG_RD(sc, BCE_HC_COMMAND);
4985
4986bce_blockinit_exit:
4987 DBEXIT(BCE_VERBOSE_RESET);
4988
4989 return (rc);
4990}
4991
4992
4993/****************************************************************************/
4994/* Encapsulate an mbuf into the rx_bd chain. */
4995/* */
4996/* Returns: */
4997/* 0 for success, positive value for failure. */
4998/****************************************************************************/
4999static int
5000bce_get_rx_buf(struct bce_softc *sc, struct mbuf *m, u16 *prod,
5001 u16 *chain_prod, u32 *prod_bseq)
5002{
5003 bus_dmamap_t map;
5004 bus_dma_segment_t segs[BCE_MAX_SEGMENTS];
5005 struct mbuf *m_new = NULL;
5006 struct rx_bd *rxbd;
5007 int nsegs, error, rc = 0;
5008#ifdef BCE_DEBUG
5009 u16 debug_chain_prod = *chain_prod;
5010#endif
5011
5012 DBENTER(BCE_EXTREME_RESET | BCE_EXTREME_RECV | BCE_EXTREME_LOAD);
5013
5014 /* Make sure the inputs are valid. */
5015 DBRUNIF((*chain_prod > MAX_RX_BD),
5016 BCE_PRINTF("%s(%d): RX producer out of range: "
5017 "0x%04X > 0x%04X\n", __FILE__, __LINE__,
5018 *chain_prod, (u16) MAX_RX_BD));
5019
5020 DBPRINT(sc, BCE_EXTREME_RECV, "%s(enter): prod = 0x%04X, "
5021 "chain_prod = 0x%04X, prod_bseq = 0x%08X\n", __FUNCTION__,
5022 *prod, *chain_prod, *prod_bseq);
5023
5024 /* Update some debug statistic counters */
5025 DBRUNIF((sc->free_rx_bd < sc->rx_low_watermark),
5026 sc->rx_low_watermark = sc->free_rx_bd);
5027 DBRUNIF((sc->free_rx_bd == sc->max_rx_bd),
5028 sc->rx_empty_count++);
5029
5030 /* Check whether this is a new mbuf allocation. */
5031 if (m == NULL) {
5032
5033 /* Simulate an mbuf allocation failure. */
5034 DBRUNIF(DB_RANDOMTRUE(mbuf_alloc_failed_sim_control),
5035 sc->mbuf_alloc_failed_count++;
5036 sc->mbuf_alloc_failed_sim_count++;
5037 rc = ENOBUFS;
5038 goto bce_get_rx_buf_exit);
5039
5040 /* This is a new mbuf allocation. */
5041#ifdef BCE_JUMBO_HDRSPLIT
5042 MGETHDR(m_new, M_DONTWAIT, MT_DATA);
5043#else
5044 m_new = m_getjcl(M_DONTWAIT, MT_DATA, M_PKTHDR,
5045 sc->rx_bd_mbuf_alloc_size);
5046#endif
5047
5048 if (m_new == NULL) {
5049 sc->mbuf_alloc_failed_count++;
5050 rc = ENOBUFS;
5051 goto bce_get_rx_buf_exit;
5052 }
5053
5054 DBRUN(sc->debug_rx_mbuf_alloc++);
5055 } else {
5056 /* Reuse an existing mbuf. */
5057 m_new = m;
5058 }
5059
5060 /* Make sure we have a valid packet header. */
5061 M_ASSERTPKTHDR(m_new);
5062
5063 /* Initialize the mbuf size and pad if necessary for alignment. */
5064 m_new->m_pkthdr.len = m_new->m_len = sc->rx_bd_mbuf_alloc_size;
5065 m_adj(m_new, sc->rx_bd_mbuf_align_pad);
5066
5067 /* ToDo: Consider calling m_fragment() to test error handling. */
5068
5069 /* Map the mbuf cluster into device memory. */
5070 map = sc->rx_mbuf_map[*chain_prod];
5071 error = bus_dmamap_load_mbuf_sg(sc->rx_mbuf_tag, map, m_new,
5072 segs, &nsegs, BUS_DMA_NOWAIT);
5073
5074 /* Handle any mapping errors. */
5075 if (error) {
5076 BCE_PRINTF("%s(%d): Error mapping mbuf into RX "
5077 "chain (%d)!\n", __FILE__, __LINE__, error);
5078
5079 sc->dma_map_addr_rx_failed_count++;
5080 m_freem(m_new);
5081
5082 DBRUN(sc->debug_rx_mbuf_alloc--);
5083
5084 rc = ENOBUFS;
5085 goto bce_get_rx_buf_exit;
5086 }
5087
5088 /* All mbufs must map to a single segment. */
5089 KASSERT(nsegs == 1, ("%s(): Too many segments returned (%d)!",
5090 __FUNCTION__, nsegs));
5091
5092 /* Setup the rx_bd for the segment. */
5093 rxbd = &sc->rx_bd_chain[RX_PAGE(*chain_prod)][RX_IDX(*chain_prod)];
5094
5095 rxbd->rx_bd_haddr_lo = htole32(BCE_ADDR_LO(segs[0].ds_addr));
5096 rxbd->rx_bd_haddr_hi = htole32(BCE_ADDR_HI(segs[0].ds_addr));
5097 rxbd->rx_bd_len = htole32(segs[0].ds_len);
5098 rxbd->rx_bd_flags = htole32(RX_BD_FLAGS_START | RX_BD_FLAGS_END);
5099 *prod_bseq += segs[0].ds_len;
5100
5101 /* Save the mbuf and update our counter. */
5102 sc->rx_mbuf_ptr[*chain_prod] = m_new;
5103 sc->free_rx_bd -= nsegs;
5104
5105 DBRUNMSG(BCE_INSANE_RECV,
5106 bce_dump_rx_mbuf_chain(sc, debug_chain_prod, nsegs));
5107
5108 DBPRINT(sc, BCE_EXTREME_RECV, "%s(exit): prod = 0x%04X, "
5109 "chain_prod = 0x%04X, prod_bseq = 0x%08X\n",
5110 __FUNCTION__, *prod, *chain_prod, *prod_bseq);
5111
5112bce_get_rx_buf_exit:
5113 DBEXIT(BCE_EXTREME_RESET | BCE_EXTREME_RECV | BCE_EXTREME_LOAD);
5114
5115 return(rc);
5116}
5117
5118
5119#ifdef BCE_JUMBO_HDRSPLIT
5120/****************************************************************************/
5121/* Encapsulate an mbuf cluster into the page chain. */
5122/* */
5123/* Returns: */
5124/* 0 for success, positive value for failure. */
5125/****************************************************************************/
5126static int
5127bce_get_pg_buf(struct bce_softc *sc, struct mbuf *m, u16 *prod,
5128 u16 *prod_idx)
5129{
5130 bus_dmamap_t map;
5131 bus_addr_t busaddr;
5132 struct mbuf *m_new = NULL;
5133 struct rx_bd *pgbd;
5134 int error, rc = 0;
5135#ifdef BCE_DEBUG
5136 u16 debug_prod_idx = *prod_idx;
5137#endif
5138
5139 DBENTER(BCE_EXTREME_RESET | BCE_EXTREME_RECV | BCE_EXTREME_LOAD);
5140
5141 /* Make sure the inputs are valid. */
5142 DBRUNIF((*prod_idx > MAX_PG_BD),
5143 BCE_PRINTF("%s(%d): page producer out of range: "
5144 "0x%04X > 0x%04X\n", __FILE__, __LINE__,
5145 *prod_idx, (u16) MAX_PG_BD));
5146
5147 DBPRINT(sc, BCE_EXTREME_RECV, "%s(enter): prod = 0x%04X, "
5148 "chain_prod = 0x%04X\n", __FUNCTION__, *prod, *prod_idx);
5149
5150 /* Update counters if we've hit a new low or run out of pages. */
5151 DBRUNIF((sc->free_pg_bd < sc->pg_low_watermark),
5152 sc->pg_low_watermark = sc->free_pg_bd);
5153 DBRUNIF((sc->free_pg_bd == sc->max_pg_bd), sc->pg_empty_count++);
5154
5155 /* Check whether this is a new mbuf allocation. */
5156 if (m == NULL) {
5157
5158 /* Simulate an mbuf allocation failure. */
5159 DBRUNIF(DB_RANDOMTRUE(mbuf_alloc_failed_sim_control),
5160 sc->mbuf_alloc_failed_count++;
5161 sc->mbuf_alloc_failed_sim_count++;
5162 rc = ENOBUFS;
5163 goto bce_get_pg_buf_exit);
5164
5165 /* This is a new mbuf allocation. */
5166 m_new = m_getcl(M_DONTWAIT, MT_DATA, 0);
5167 if (m_new == NULL) {
5168 sc->mbuf_alloc_failed_count++;
5169 rc = ENOBUFS;
5170 goto bce_get_pg_buf_exit;
5171 }
5172
5173 DBRUN(sc->debug_pg_mbuf_alloc++);
5174 } else {
5175 /* Reuse an existing mbuf. */
5176 m_new = m;
5177 m_new->m_data = m_new->m_ext.ext_buf;
5178 }
5179
5180 m_new->m_len = sc->pg_bd_mbuf_alloc_size;
5181
5182 /* ToDo: Consider calling m_fragment() to test error handling. */
5183
5184 /* Map the mbuf cluster into device memory. */
5185 map = sc->pg_mbuf_map[*prod_idx];
5186 error = bus_dmamap_load(sc->pg_mbuf_tag, map, mtod(m_new, void *),
5187 sc->pg_bd_mbuf_alloc_size, bce_dma_map_addr,
5188 &busaddr, BUS_DMA_NOWAIT);
5189
5190 /* Handle any mapping errors. */
5191 if (error) {
5192 BCE_PRINTF("%s(%d): Error mapping mbuf into page chain!\n",
5193 __FILE__, __LINE__);
5194
5195 m_freem(m_new);
5196 DBRUN(sc->debug_pg_mbuf_alloc--);
5197
5198 rc = ENOBUFS;
5199 goto bce_get_pg_buf_exit;
5200 }
5201
5202 /* ToDo: Do we need bus_dmamap_sync(,,BUS_DMASYNC_PREREAD) here? */
5203
5204 /*
5205 * The page chain uses the same rx_bd data structure
5206 * as the receive chain but doesn't require a byte sequence (bseq).
5207 */
5208 pgbd = &sc->pg_bd_chain[PG_PAGE(*prod_idx)][PG_IDX(*prod_idx)];
5209
5210 pgbd->rx_bd_haddr_lo = htole32(BCE_ADDR_LO(busaddr));
5211 pgbd->rx_bd_haddr_hi = htole32(BCE_ADDR_HI(busaddr));
5212 pgbd->rx_bd_len = htole32(sc->pg_bd_mbuf_alloc_size);
5213 pgbd->rx_bd_flags = htole32(RX_BD_FLAGS_START | RX_BD_FLAGS_END);
5214
5215 /* Save the mbuf and update our counter. */
5216 sc->pg_mbuf_ptr[*prod_idx] = m_new;
5217 sc->free_pg_bd--;
5218
5219 DBRUNMSG(BCE_INSANE_RECV,
5220 bce_dump_pg_mbuf_chain(sc, debug_prod_idx, 1));
5221
5222 DBPRINT(sc, BCE_EXTREME_RECV, "%s(exit): prod = 0x%04X, "
5223 "prod_idx = 0x%04X\n", __FUNCTION__, *prod, *prod_idx);
5224
5225bce_get_pg_buf_exit:
5226 DBEXIT(BCE_EXTREME_RESET | BCE_EXTREME_RECV | BCE_EXTREME_LOAD);
5227
5228 return(rc);
5229}
5230#endif /* BCE_JUMBO_HDRSPLIT */
5231
5232
5233/****************************************************************************/
5234/* Initialize the TX context memory. */
5235/* */
5236/* Returns: */
5237/* Nothing */
5238/****************************************************************************/
5239static void
5240bce_init_tx_context(struct bce_softc *sc)
5241{
5242 u32 val;
5243
5244 DBENTER(BCE_VERBOSE_RESET | BCE_VERBOSE_SEND | BCE_VERBOSE_CTX);
5245
5246 /* Initialize the context ID for an L2 TX chain. */
5247 if ((BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) ||
5248 (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716)) {
5249 /* Set the CID type to support an L2 connection. */
5250 val = BCE_L2CTX_TX_TYPE_TYPE_L2_XI |
5251 BCE_L2CTX_TX_TYPE_SIZE_L2_XI;
5252 CTX_WR(sc, GET_CID_ADDR(TX_CID), BCE_L2CTX_TX_TYPE_XI, val);
5253 val = BCE_L2CTX_TX_CMD_TYPE_TYPE_L2_XI | (8 << 16);
5254 CTX_WR(sc, GET_CID_ADDR(TX_CID),
5255 BCE_L2CTX_TX_CMD_TYPE_XI, val);
5256
5257 /* Point the hardware to the first page in the chain. */
5258 val = BCE_ADDR_HI(sc->tx_bd_chain_paddr[0]);
5259 CTX_WR(sc, GET_CID_ADDR(TX_CID),
5260 BCE_L2CTX_TX_TBDR_BHADDR_HI_XI, val);
5261 val = BCE_ADDR_LO(sc->tx_bd_chain_paddr[0]);
5262 CTX_WR(sc, GET_CID_ADDR(TX_CID),
5263 BCE_L2CTX_TX_TBDR_BHADDR_LO_XI, val);
5264 } else {
5265 /* Set the CID type to support an L2 connection. */
5266 val = BCE_L2CTX_TX_TYPE_TYPE_L2 | BCE_L2CTX_TX_TYPE_SIZE_L2;
5267 CTX_WR(sc, GET_CID_ADDR(TX_CID), BCE_L2CTX_TX_TYPE, val);
5268 val = BCE_L2CTX_TX_CMD_TYPE_TYPE_L2 | (8 << 16);
5269 CTX_WR(sc, GET_CID_ADDR(TX_CID), BCE_L2CTX_TX_CMD_TYPE, val);
5270
5271 /* Point the hardware to the first page in the chain. */
5272 val = BCE_ADDR_HI(sc->tx_bd_chain_paddr[0]);
5273 CTX_WR(sc, GET_CID_ADDR(TX_CID),
5274 BCE_L2CTX_TX_TBDR_BHADDR_HI, val);
5275 val = BCE_ADDR_LO(sc->tx_bd_chain_paddr[0]);
5276 CTX_WR(sc, GET_CID_ADDR(TX_CID),
5277 BCE_L2CTX_TX_TBDR_BHADDR_LO, val);
5278 }
5279
5280 DBEXIT(BCE_VERBOSE_RESET | BCE_VERBOSE_SEND | BCE_VERBOSE_CTX);
5281}
5282
5283
5284/****************************************************************************/
5285/* Allocate memory and initialize the TX data structures. */
5286/* */
5287/* Returns: */
5288/* 0 for success, positive value for failure. */
5289/****************************************************************************/
5290static int
5291bce_init_tx_chain(struct bce_softc *sc)
5292{
5293 struct tx_bd *txbd;
5294 int i, rc = 0;
5295
5296 DBENTER(BCE_VERBOSE_RESET | BCE_VERBOSE_SEND | BCE_VERBOSE_LOAD);
5297
5298 /* Set the initial TX producer/consumer indices. */
5299 sc->tx_prod = 0;
5300 sc->tx_cons = 0;
5301 sc->tx_prod_bseq = 0;
5302 sc->used_tx_bd = 0;
5303 sc->max_tx_bd = USABLE_TX_BD;
5304 DBRUN(sc->tx_hi_watermark = 0);
5305 DBRUN(sc->tx_full_count = 0);
5306
5307 /*
5308 * The NetXtreme II supports a linked-list structre called
5309 * a Buffer Descriptor Chain (or BD chain). A BD chain
5310 * consists of a series of 1 or more chain pages, each of which
5311 * consists of a fixed number of BD entries.
5312 * The last BD entry on each page is a pointer to the next page
5313 * in the chain, and the last pointer in the BD chain
5314 * points back to the beginning of the chain.
5315 */
5316
5317 /* Set the TX next pointer chain entries. */
5318 for (i = 0; i < TX_PAGES; i++) {
5319 int j;
5320
5321 txbd = &sc->tx_bd_chain[i][USABLE_TX_BD_PER_PAGE];
5322
5323 /* Check if we've reached the last page. */
5324 if (i == (TX_PAGES - 1))
5325 j = 0;
5326 else
5327 j = i + 1;
5328
5329 txbd->tx_bd_haddr_hi = htole32(BCE_ADDR_HI(sc->tx_bd_chain_paddr[j]));
5330 txbd->tx_bd_haddr_lo = htole32(BCE_ADDR_LO(sc->tx_bd_chain_paddr[j]));
5331 }
5332
5333 bce_init_tx_context(sc);
5334
5335 DBRUNMSG(BCE_INSANE_SEND, bce_dump_tx_chain(sc, 0, TOTAL_TX_BD));
5336 DBEXIT(BCE_VERBOSE_RESET | BCE_VERBOSE_SEND | BCE_VERBOSE_LOAD);
5337
5338 return(rc);
5339}
5340
5341
5342/****************************************************************************/
5343/* Free memory and clear the TX data structures. */
5344/* */
5345/* Returns: */
5346/* Nothing. */
5347/****************************************************************************/
5348static void
5349bce_free_tx_chain(struct bce_softc *sc)
5350{
5351 int i;
5352
5353 DBENTER(BCE_VERBOSE_RESET | BCE_VERBOSE_SEND | BCE_VERBOSE_UNLOAD);
5354
5355 /* Unmap, unload, and free any mbufs still in the TX mbuf chain. */
5356 for (i = 0; i < TOTAL_TX_BD; i++) {
5357 if (sc->tx_mbuf_ptr[i] != NULL) {
5358 if (sc->tx_mbuf_map[i] != NULL)
5359 bus_dmamap_sync(sc->tx_mbuf_tag,
5360 sc->tx_mbuf_map[i],
5361 BUS_DMASYNC_POSTWRITE);
5362 m_freem(sc->tx_mbuf_ptr[i]);
5363 sc->tx_mbuf_ptr[i] = NULL;
5364 DBRUN(sc->debug_tx_mbuf_alloc--);
5365 }
5366 }
5367
5368 /* Clear each TX chain page. */
5369 for (i = 0; i < TX_PAGES; i++)
5370 bzero((char *)sc->tx_bd_chain[i], BCE_TX_CHAIN_PAGE_SZ);
5371
5372 sc->used_tx_bd = 0;
5373
5374 /* Check if we lost any mbufs in the process. */
5375 DBRUNIF((sc->debug_tx_mbuf_alloc),
5376 BCE_PRINTF("%s(%d): Memory leak! Lost %d mbufs "
5377 "from tx chain!\n", __FILE__, __LINE__,
5378 sc->debug_tx_mbuf_alloc));
5379
5380 DBEXIT(BCE_VERBOSE_RESET | BCE_VERBOSE_SEND | BCE_VERBOSE_UNLOAD);
5381}
5382
5383
5384/****************************************************************************/
5385/* Initialize the RX context memory. */
5386/* */
5387/* Returns: */
5388/* Nothing */
5389/****************************************************************************/
5390static void
5391bce_init_rx_context(struct bce_softc *sc)
5392{
5393 u32 val;
5394
5395 DBENTER(BCE_VERBOSE_RESET | BCE_VERBOSE_RECV | BCE_VERBOSE_CTX);
5396
5397 /* Init the type, size, and BD cache levels for the RX context. */
5398 val = BCE_L2CTX_RX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE |
5399 BCE_L2CTX_RX_CTX_TYPE_SIZE_L2 |
5400 (0x02 << BCE_L2CTX_RX_BD_PRE_READ_SHIFT);
5401
5402 /*
5403 * Set the level for generating pause frames
5404 * when the number of available rx_bd's gets
5405 * too low (the low watermark) and the level
5406 * when pause frames can be stopped (the high
5407 * watermark).
5408 */
5409 if ((BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) ||
5410 (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716)) {
5411 u32 lo_water, hi_water;
5412
5413 if (sc->bce_flags & BCE_USING_TX_FLOW_CONTROL) {
5414 lo_water = BCE_L2CTX_RX_LO_WATER_MARK_DEFAULT;
5415 } else {
5416 lo_water = 0;
5417 }
5418
5419 if (lo_water >= USABLE_RX_BD) {
5420 lo_water = 0;
5421 }
5422
5423 hi_water = USABLE_RX_BD / 4;
5424
5425 if (hi_water <= lo_water) {
5426 lo_water = 0;
5427 }
5428
5429 lo_water /= BCE_L2CTX_RX_LO_WATER_MARK_SCALE;
5430 hi_water /= BCE_L2CTX_RX_HI_WATER_MARK_SCALE;
5431
5432 if (hi_water > 0xf)
5433 hi_water = 0xf;
5434 else if (hi_water == 0)
5435 lo_water = 0;
5436
5437 val |= (lo_water << BCE_L2CTX_RX_LO_WATER_MARK_SHIFT) |
5438 (hi_water << BCE_L2CTX_RX_HI_WATER_MARK_SHIFT);
5439 }
5440
5441 CTX_WR(sc, GET_CID_ADDR(RX_CID), BCE_L2CTX_RX_CTX_TYPE, val);
5442
5443 /* Setup the MQ BIN mapping for l2_ctx_host_bseq. */
5444 if ((BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) ||
5445 (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716)) {
5446 val = REG_RD(sc, BCE_MQ_MAP_L2_5);
5447 REG_WR(sc, BCE_MQ_MAP_L2_5, val | BCE_MQ_MAP_L2_5_ARM);
5448 }
5449
5450 /* Point the hardware to the first page in the chain. */
5451 val = BCE_ADDR_HI(sc->rx_bd_chain_paddr[0]);
5452 CTX_WR(sc, GET_CID_ADDR(RX_CID), BCE_L2CTX_RX_NX_BDHADDR_HI, val);
5453 val = BCE_ADDR_LO(sc->rx_bd_chain_paddr[0]);
5454 CTX_WR(sc, GET_CID_ADDR(RX_CID), BCE_L2CTX_RX_NX_BDHADDR_LO, val);
5455
5456 DBEXIT(BCE_VERBOSE_RESET | BCE_VERBOSE_RECV | BCE_VERBOSE_CTX);
5457}
5458
5459
5460/****************************************************************************/
5461/* Allocate memory and initialize the RX data structures. */
5462/* */
5463/* Returns: */
5464/* 0 for success, positive value for failure. */
5465/****************************************************************************/
5466static int
5467bce_init_rx_chain(struct bce_softc *sc)
5468{
5469 struct rx_bd *rxbd;
5470 int i, rc = 0;
5471
5472 DBENTER(BCE_VERBOSE_RESET | BCE_VERBOSE_RECV | BCE_VERBOSE_LOAD |
5473 BCE_VERBOSE_CTX);
5474
5475 /* Initialize the RX producer and consumer indices. */
5476 sc->rx_prod = 0;
5477 sc->rx_cons = 0;
5478 sc->rx_prod_bseq = 0;
5479 sc->free_rx_bd = USABLE_RX_BD;
5480 sc->max_rx_bd = USABLE_RX_BD;
5481
5482 /* Initialize the RX next pointer chain entries. */
5483 for (i = 0; i < RX_PAGES; i++) {
5484 int j;
5485
5486 rxbd = &sc->rx_bd_chain[i][USABLE_RX_BD_PER_PAGE];
5487
5488 /* Check if we've reached the last page. */
5489 if (i == (RX_PAGES - 1))
5490 j = 0;
5491 else
5492 j = i + 1;
5493
5494 /* Setup the chain page pointers. */
5495 rxbd->rx_bd_haddr_hi =
5496 htole32(BCE_ADDR_HI(sc->rx_bd_chain_paddr[j]));
5497 rxbd->rx_bd_haddr_lo =
5498 htole32(BCE_ADDR_LO(sc->rx_bd_chain_paddr[j]));
5499 }
5500
5501 /* Fill up the RX chain. */
5502 bce_fill_rx_chain(sc);
5503
5504 DBRUN(sc->rx_low_watermark = USABLE_RX_BD);
5505 DBRUN(sc->rx_empty_count = 0);
5506 for (i = 0; i < RX_PAGES; i++) {
5507 bus_dmamap_sync(sc->rx_bd_chain_tag, sc->rx_bd_chain_map[i],
5508 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
5509 }
5510
5511 bce_init_rx_context(sc);
5512
5513 DBRUNMSG(BCE_EXTREME_RECV, bce_dump_rx_bd_chain(sc, 0, TOTAL_RX_BD));
5514 DBEXIT(BCE_VERBOSE_RESET | BCE_VERBOSE_RECV | BCE_VERBOSE_LOAD |
5515 BCE_VERBOSE_CTX);
5516
5517 /* ToDo: Are there possible failure modes here? */
5518
5519 return(rc);
5520}
5521
5522
5523/****************************************************************************/
5524/* Add mbufs to the RX chain until its full or an mbuf allocation error */
5525/* occurs. */
5526/* */
5527/* Returns: */
5528/* Nothing */
5529/****************************************************************************/
5530static void
5531bce_fill_rx_chain(struct bce_softc *sc)
5532{
5533 u16 prod, prod_idx;
5534 u32 prod_bseq;
5535
5536 DBENTER(BCE_VERBOSE_RESET | BCE_EXTREME_RECV | BCE_VERBOSE_LOAD |
5537 BCE_VERBOSE_CTX);
5538
5539 /* Get the RX chain producer indices. */
5540 prod = sc->rx_prod;
5541 prod_bseq = sc->rx_prod_bseq;
5542
5543 /* Keep filling the RX chain until it's full. */
5544 while (sc->free_rx_bd > 0) {
5545 prod_idx = RX_CHAIN_IDX(prod);
5546 if (bce_get_rx_buf(sc, NULL, &prod, &prod_idx, &prod_bseq)) {
5547 /* Bail out if we can't add an mbuf to the chain. */
5548 break;
5549 }
5550 prod = NEXT_RX_BD(prod);
5551 }
5552
5553 /* Save the RX chain producer indices. */
5554 sc->rx_prod = prod;
5555 sc->rx_prod_bseq = prod_bseq;
5556
5557 /* We should never end up pointing to a next page pointer. */
5558 DBRUNIF(((prod & USABLE_RX_BD_PER_PAGE) == USABLE_RX_BD_PER_PAGE),
5559 BCE_PRINTF("%s(): Invalid rx_prod value: 0x%04X\n",
5560 __FUNCTION__, sc->rx_prod));
5561
5562 /* Write the mailbox and tell the chip about the waiting rx_bd's. */
5563 REG_WR16(sc, MB_GET_CID_ADDR(RX_CID) +
5564 BCE_L2MQ_RX_HOST_BDIDX, sc->rx_prod);
5565 REG_WR(sc, MB_GET_CID_ADDR(RX_CID) +
5566 BCE_L2MQ_RX_HOST_BSEQ, sc->rx_prod_bseq);
5567
5568 DBEXIT(BCE_VERBOSE_RESET | BCE_EXTREME_RECV | BCE_VERBOSE_LOAD |
5569 BCE_VERBOSE_CTX);
5570}
5571
5572
5573/****************************************************************************/
5574/* Free memory and clear the RX data structures. */
5575/* */
5576/* Returns: */
5577/* Nothing. */
5578/****************************************************************************/
5579static void
5580bce_free_rx_chain(struct bce_softc *sc)
5581{
5582 int i;
5583
5584 DBENTER(BCE_VERBOSE_RESET | BCE_VERBOSE_RECV | BCE_VERBOSE_UNLOAD);
5585
5586 /* Free any mbufs still in the RX mbuf chain. */
5587 for (i = 0; i < TOTAL_RX_BD; i++) {
5588 if (sc->rx_mbuf_ptr[i] != NULL) {
5589 if (sc->rx_mbuf_map[i] != NULL)
5590 bus_dmamap_sync(sc->rx_mbuf_tag,
5591 sc->rx_mbuf_map[i],
5592 BUS_DMASYNC_POSTREAD);
5593 m_freem(sc->rx_mbuf_ptr[i]);
5594 sc->rx_mbuf_ptr[i] = NULL;
5595 DBRUN(sc->debug_rx_mbuf_alloc--);
5596 }
5597 }
5598
5599 /* Clear each RX chain page. */
5600 for (i = 0; i < RX_PAGES; i++)
5601 if (sc->rx_bd_chain[i] != NULL) {
5602 bzero((char *)sc->rx_bd_chain[i],
5603 BCE_RX_CHAIN_PAGE_SZ);
5604 }
5605
5606 sc->free_rx_bd = sc->max_rx_bd;
5607
5608 /* Check if we lost any mbufs in the process. */
5609 DBRUNIF((sc->debug_rx_mbuf_alloc),
5610 BCE_PRINTF("%s(): Memory leak! Lost %d mbufs from rx chain!\n",
5611 __FUNCTION__, sc->debug_rx_mbuf_alloc));
5612
5613 DBEXIT(BCE_VERBOSE_RESET | BCE_VERBOSE_RECV | BCE_VERBOSE_UNLOAD);
5614}
5615
5616
5617#ifdef BCE_JUMBO_HDRSPLIT
5618/****************************************************************************/
5619/* Allocate memory and initialize the page data structures. */
5620/* Assumes that bce_init_rx_chain() has not already been called. */
5621/* */
5622/* Returns: */
5623/* 0 for success, positive value for failure. */
5624/****************************************************************************/
5625static int
5626bce_init_pg_chain(struct bce_softc *sc)
5627{
5628 struct rx_bd *pgbd;
5629 int i, rc = 0;
5630 u32 val;
5631
5632 DBENTER(BCE_VERBOSE_RESET | BCE_VERBOSE_RECV | BCE_VERBOSE_LOAD |
5633 BCE_VERBOSE_CTX);
5634
5635 /* Initialize the page producer and consumer indices. */
5636 sc->pg_prod = 0;
5637 sc->pg_cons = 0;
5638 sc->free_pg_bd = USABLE_PG_BD;
5639 sc->max_pg_bd = USABLE_PG_BD;
5640 DBRUN(sc->pg_low_watermark = sc->max_pg_bd);
5641 DBRUN(sc->pg_empty_count = 0);
5642
5643 /* Initialize the page next pointer chain entries. */
5644 for (i = 0; i < PG_PAGES; i++) {
5645 int j;
5646
5647 pgbd = &sc->pg_bd_chain[i][USABLE_PG_BD_PER_PAGE];
5648
5649 /* Check if we've reached the last page. */
5650 if (i == (PG_PAGES - 1))
5651 j = 0;
5652 else
5653 j = i + 1;
5654
5655 /* Setup the chain page pointers. */
5656 pgbd->rx_bd_haddr_hi = htole32(BCE_ADDR_HI(sc->pg_bd_chain_paddr[j]));
5657 pgbd->rx_bd_haddr_lo = htole32(BCE_ADDR_LO(sc->pg_bd_chain_paddr[j]));
5658 }
5659
5660 /* Setup the MQ BIN mapping for host_pg_bidx. */
5661 if ((BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) ||
5662 (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716))
5663 REG_WR(sc, BCE_MQ_MAP_L2_3, BCE_MQ_MAP_L2_3_DEFAULT);
5664
5665 CTX_WR(sc, GET_CID_ADDR(RX_CID), BCE_L2CTX_RX_PG_BUF_SIZE, 0);
5666
5667 /* Configure the rx_bd and page chain mbuf cluster size. */
5668 val = (sc->rx_bd_mbuf_data_len << 16) | sc->pg_bd_mbuf_alloc_size;
5669 CTX_WR(sc, GET_CID_ADDR(RX_CID), BCE_L2CTX_RX_PG_BUF_SIZE, val);
5670
5671 /* Configure the context reserved for jumbo support. */
5672 CTX_WR(sc, GET_CID_ADDR(RX_CID), BCE_L2CTX_RX_RBDC_KEY,
5673 BCE_L2CTX_RX_RBDC_JUMBO_KEY);
5674
5675 /* Point the hardware to the first page in the page chain. */
5676 val = BCE_ADDR_HI(sc->pg_bd_chain_paddr[0]);
5677 CTX_WR(sc, GET_CID_ADDR(RX_CID), BCE_L2CTX_RX_NX_PG_BDHADDR_HI, val);
5678 val = BCE_ADDR_LO(sc->pg_bd_chain_paddr[0]);
5679 CTX_WR(sc, GET_CID_ADDR(RX_CID), BCE_L2CTX_RX_NX_PG_BDHADDR_LO, val);
5680
5681 /* Fill up the page chain. */
5682 bce_fill_pg_chain(sc);
5683
5684 for (i = 0; i < PG_PAGES; i++) {
5685 bus_dmamap_sync(sc->pg_bd_chain_tag, sc->pg_bd_chain_map[i],
5686 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
5687 }
5688
5689 DBRUNMSG(BCE_EXTREME_RECV, bce_dump_pg_chain(sc, 0, TOTAL_PG_BD));
5690 DBEXIT(BCE_VERBOSE_RESET | BCE_VERBOSE_RECV | BCE_VERBOSE_LOAD |
5691 BCE_VERBOSE_CTX);
5692 return(rc);
5693}
5694
5695
5696/****************************************************************************/
5697/* Add mbufs to the page chain until its full or an mbuf allocation error */
5698/* occurs. */
5699/* */
5700/* Returns: */
5701/* Nothing */
5702/****************************************************************************/
5703static void
5704bce_fill_pg_chain(struct bce_softc *sc)
5705{
5706 u16 prod, prod_idx;
5707
5708 DBENTER(BCE_VERBOSE_RESET | BCE_EXTREME_RECV | BCE_VERBOSE_LOAD |
5709 BCE_VERBOSE_CTX);
5710
5711 /* Get the page chain prodcuer index. */
5712 prod = sc->pg_prod;
5713
5714 /* Keep filling the page chain until it's full. */
5715 while (sc->free_pg_bd > 0) {
5716 prod_idx = PG_CHAIN_IDX(prod);
5717 if (bce_get_pg_buf(sc, NULL, &prod, &prod_idx)) {
5718 /* Bail out if we can't add an mbuf to the chain. */
5719 break;
5720 }
5721 prod = NEXT_PG_BD(prod);
5722 }
5723
5724 /* Save the page chain producer index. */
5725 sc->pg_prod = prod;
5726
5727 DBRUNIF(((prod & USABLE_RX_BD_PER_PAGE) == USABLE_RX_BD_PER_PAGE),
5728 BCE_PRINTF("%s(): Invalid pg_prod value: 0x%04X\n",
5729 __FUNCTION__, sc->pg_prod));
5730
5731 /*
5732 * Write the mailbox and tell the chip about
5733 * the new rx_bd's in the page chain.
5734 */
5735 REG_WR16(sc, MB_GET_CID_ADDR(RX_CID) +
5736 BCE_L2MQ_RX_HOST_PG_BDIDX, sc->pg_prod);
5737
5738 DBEXIT(BCE_VERBOSE_RESET | BCE_EXTREME_RECV | BCE_VERBOSE_LOAD |
5739 BCE_VERBOSE_CTX);
5740}
5741
5742
5743/****************************************************************************/
5744/* Free memory and clear the RX data structures. */
5745/* */
5746/* Returns: */
5747/* Nothing. */
5748/****************************************************************************/
5749static void
5750bce_free_pg_chain(struct bce_softc *sc)
5751{
5752 int i;
5753
5754 DBENTER(BCE_VERBOSE_RESET | BCE_VERBOSE_RECV | BCE_VERBOSE_UNLOAD);
5755
5756 /* Free any mbufs still in the mbuf page chain. */
5757 for (i = 0; i < TOTAL_PG_BD; i++) {
5758 if (sc->pg_mbuf_ptr[i] != NULL) {
5759 if (sc->pg_mbuf_map[i] != NULL)
5760 bus_dmamap_sync(sc->pg_mbuf_tag,
5761 sc->pg_mbuf_map[i],
5762 BUS_DMASYNC_POSTREAD);
5763 m_freem(sc->pg_mbuf_ptr[i]);
5764 sc->pg_mbuf_ptr[i] = NULL;
5765 DBRUN(sc->debug_pg_mbuf_alloc--);
5766 }
5767 }
5768
5769 /* Clear each page chain pages. */
5770 for (i = 0; i < PG_PAGES; i++)
5771 bzero((char *)sc->pg_bd_chain[i], BCE_PG_CHAIN_PAGE_SZ);
5772
5773 sc->free_pg_bd = sc->max_pg_bd;
5774
5775 /* Check if we lost any mbufs in the process. */
5776 DBRUNIF((sc->debug_pg_mbuf_alloc),
5777 BCE_PRINTF("%s(): Memory leak! Lost %d mbufs from page chain!\n",
5778 __FUNCTION__, sc->debug_pg_mbuf_alloc));
5779
5780 DBEXIT(BCE_VERBOSE_RESET | BCE_VERBOSE_RECV | BCE_VERBOSE_UNLOAD);
5781}
5782#endif /* BCE_JUMBO_HDRSPLIT */
5783
5784
5785/****************************************************************************/
5786/* Set media options. */
5787/* */
5788/* Returns: */
5789/* 0 for success, positive value for failure. */
5790/****************************************************************************/
5791static int
5792bce_ifmedia_upd(struct ifnet *ifp)
5793{
5794 struct bce_softc *sc = ifp->if_softc;
5795 int error;
5796
5797 DBENTER(BCE_VERBOSE);
5798
5799 BCE_LOCK(sc);
5800 error = bce_ifmedia_upd_locked(ifp);
5801 BCE_UNLOCK(sc);
5802
5803 DBEXIT(BCE_VERBOSE);
5804 return (error);
5805}
5806
5807
5808/****************************************************************************/
5809/* Set media options. */
5810/* */
5811/* Returns: */
5812/* Nothing. */
5813/****************************************************************************/
5814static int
5815bce_ifmedia_upd_locked(struct ifnet *ifp)
5816{
5817 struct bce_softc *sc = ifp->if_softc;
5818 struct mii_data *mii;
5819 int error;
5820
5821 DBENTER(BCE_VERBOSE_PHY);
5822
5823 error = 0;
5824 BCE_LOCK_ASSERT(sc);
5825
5826 mii = device_get_softc(sc->bce_miibus);
5827
5828 /* Make sure the MII bus has been enumerated. */
5829 if (mii) {
5830 sc->bce_link_up = FALSE;
5831 if (mii->mii_instance) {
5832 struct mii_softc *miisc;
5833
5834 LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
5835 mii_phy_reset(miisc);
5836 }
5837 error = mii_mediachg(mii);
5838 }
5839
5840 DBEXIT(BCE_VERBOSE_PHY);
5841 return (error);
5842}
5843
5844
5845/****************************************************************************/
5846/* Reports current media status. */
5847/* */
5848/* Returns: */
5849/* Nothing. */
5850/****************************************************************************/
5851static void
5852bce_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
5853{
5854 struct bce_softc *sc = ifp->if_softc;
5855 struct mii_data *mii;
5856
5857 DBENTER(BCE_VERBOSE_PHY);
5858
5859 BCE_LOCK(sc);
5860
5861 if ((ifp->if_flags & IFF_UP) == 0) {
5862 BCE_UNLOCK(sc);
5863 return;
5864 }
5865 mii = device_get_softc(sc->bce_miibus);
5866
5867 mii_pollstat(mii);
5868 ifmr->ifm_active = mii->mii_media_active;
5869 ifmr->ifm_status = mii->mii_media_status;
5870
5871 BCE_UNLOCK(sc);
5872
5873 DBEXIT(BCE_VERBOSE_PHY);
5874}
5875
5876
5877/****************************************************************************/
5878/* Handles PHY generated interrupt events. */
5879/* */
5880/* Returns: */
5881/* Nothing. */
5882/****************************************************************************/
5883static void
5884bce_phy_intr(struct bce_softc *sc)
5885{
5886 u32 new_link_state, old_link_state;
5887
5888 DBENTER(BCE_VERBOSE_PHY | BCE_VERBOSE_INTR);
5889
5890 DBRUN(sc->phy_interrupts++);
5891
5892 new_link_state = sc->status_block->status_attn_bits &
5893 STATUS_ATTN_BITS_LINK_STATE;
5894 old_link_state = sc->status_block->status_attn_bits_ack &
5895 STATUS_ATTN_BITS_LINK_STATE;
5896
5897 /* Handle any changes if the link state has changed. */
5898 if (new_link_state != old_link_state) {
5899
5900 /* Update the status_attn_bits_ack field. */
5901 if (new_link_state) {
5902 REG_WR(sc, BCE_PCICFG_STATUS_BIT_SET_CMD,
5903 STATUS_ATTN_BITS_LINK_STATE);
5904 DBPRINT(sc, BCE_INFO_PHY, "%s(): Link is now UP.\n",
5905 __FUNCTION__);
5906 }
5907 else {
5908 REG_WR(sc, BCE_PCICFG_STATUS_BIT_CLEAR_CMD,
5909 STATUS_ATTN_BITS_LINK_STATE);
5910 DBPRINT(sc, BCE_INFO_PHY, "%s(): Link is now DOWN.\n",
5911 __FUNCTION__);
5912 }
5913
5914 /*
5915 * Assume link is down and allow
5916 * tick routine to update the state
5917 * based on the actual media state.
5918 */
5919 sc->bce_link_up = FALSE;
5920 callout_stop(&sc->bce_tick_callout);
5921 bce_tick(sc);
5922 }
5923
5924 /* Acknowledge the link change interrupt. */
5925 REG_WR(sc, BCE_EMAC_STATUS, BCE_EMAC_STATUS_LINK_CHANGE);
5926
5927 DBEXIT(BCE_VERBOSE_PHY | BCE_VERBOSE_INTR);
5928}
5929
5930
5931/****************************************************************************/
5932/* Reads the receive consumer value from the status block (skipping over */
5933/* chain page pointer if necessary). */
5934/* */
5935/* Returns: */
5936/* hw_cons */
5937/****************************************************************************/
5938static inline u16
5939bce_get_hw_rx_cons(struct bce_softc *sc)
5940{
5941 u16 hw_cons;
5942
5943 rmb();
5944 hw_cons = sc->status_block->status_rx_quick_consumer_index0;
5945 if ((hw_cons & USABLE_RX_BD_PER_PAGE) == USABLE_RX_BD_PER_PAGE)
5946 hw_cons++;
5947
5948 return hw_cons;
5949}
5950
5951/****************************************************************************/
5952/* Handles received frame interrupt events. */
5953/* */
5954/* Returns: */
5955/* Nothing. */
5956/****************************************************************************/
5957static void
5958bce_rx_intr(struct bce_softc *sc)
5959{
5960 struct ifnet *ifp = sc->bce_ifp;
5961 struct l2_fhdr *l2fhdr;
5962 struct ether_vlan_header *vh;
5963 unsigned int pkt_len;
5964 u16 sw_rx_cons, sw_rx_cons_idx, hw_rx_cons;
5965 u32 status;
5966#ifdef BCE_JUMBO_HDRSPLIT
5967 unsigned int rem_len;
5968 u16 sw_pg_cons, sw_pg_cons_idx;
5969#endif
5970
5971 DBENTER(BCE_VERBOSE_RECV | BCE_VERBOSE_INTR);
5972 DBRUN(sc->interrupts_rx++);
5973 DBPRINT(sc, BCE_EXTREME_RECV, "%s(enter): rx_prod = 0x%04X, "
5974 "rx_cons = 0x%04X, rx_prod_bseq = 0x%08X\n",
5975 __FUNCTION__, sc->rx_prod, sc->rx_cons, sc->rx_prod_bseq);
5976
5977 /* Prepare the RX chain pages to be accessed by the host CPU. */
5978 for (int i = 0; i < RX_PAGES; i++)
5979 bus_dmamap_sync(sc->rx_bd_chain_tag,
5980 sc->rx_bd_chain_map[i], BUS_DMASYNC_POSTREAD);
5981
5982#ifdef BCE_JUMBO_HDRSPLIT
5983 /* Prepare the page chain pages to be accessed by the host CPU. */
5984 for (int i = 0; i < PG_PAGES; i++)
5985 bus_dmamap_sync(sc->pg_bd_chain_tag,
5986 sc->pg_bd_chain_map[i], BUS_DMASYNC_POSTREAD);
5987#endif
5988
5989 /* Get the hardware's view of the RX consumer index. */
5990 hw_rx_cons = sc->hw_rx_cons = bce_get_hw_rx_cons(sc);
5991
5992 /* Get working copies of the driver's view of the consumer indices. */
5993 sw_rx_cons = sc->rx_cons;
5994
5995#ifdef BCE_JUMBO_HDRSPLIT
5996 sw_pg_cons = sc->pg_cons;
5997#endif
5998
5999 /* Update some debug statistics counters */
6000 DBRUNIF((sc->free_rx_bd < sc->rx_low_watermark),
6001 sc->rx_low_watermark = sc->free_rx_bd);
6002 DBRUNIF((sc->free_rx_bd == sc->max_rx_bd),
6003 sc->rx_empty_count++);
6004
6005 /* Scan through the receive chain as long as there is work to do */
6006 /* ToDo: Consider setting a limit on the number of packets processed. */
6007 rmb();
6008 while (sw_rx_cons != hw_rx_cons) {
6009 struct mbuf *m0;
6010
6011 /* Convert the producer/consumer indices to an actual rx_bd index. */
6012 sw_rx_cons_idx = RX_CHAIN_IDX(sw_rx_cons);
6013
6014 /* Unmap the mbuf from DMA space. */
6015 bus_dmamap_sync(sc->rx_mbuf_tag,
6016 sc->rx_mbuf_map[sw_rx_cons_idx],
6017 BUS_DMASYNC_POSTREAD);
6018 bus_dmamap_unload(sc->rx_mbuf_tag,
6019 sc->rx_mbuf_map[sw_rx_cons_idx]);
6020
6021 /* Remove the mbuf from the RX chain. */
6022 m0 = sc->rx_mbuf_ptr[sw_rx_cons_idx];
6023 sc->rx_mbuf_ptr[sw_rx_cons_idx] = NULL;
6024 DBRUN(sc->debug_rx_mbuf_alloc--);
6025 sc->free_rx_bd++;
6026
6027 if(m0 == NULL) {
6028 DBPRINT(sc, BCE_EXTREME_RECV,
6029 "%s(): Oops! Empty mbuf pointer "
6030 "found in sc->rx_mbuf_ptr[0x%04X]!\n",
6031 __FUNCTION__, sw_rx_cons_idx);
6032 goto bce_rx_int_next_rx;
6033 }
6034
6035 /*
6036 * Frames received on the NetXteme II are prepended
6037 * with an l2_fhdr structure which provides status
6038 * information about the received frame (including
6039 * VLAN tags and checksum info). The frames are
6040 * also automatically adjusted to align the IP
6041 * header (i.e. two null bytes are inserted before
6042 * the Ethernet header). As a result the data
6043 * DMA'd by the controller into the mbuf looks
6044 * like this:
6045 *
6046 * +---------+-----+---------------------+-----+
6047 * | l2_fhdr | pad | packet data | FCS |
6048 * +---------+-----+---------------------+-----+
6049 *
6050 * The l2_fhdr needs to be checked and skipped and
6051 * the FCS needs to be stripped before sending the
6052 * packet up the stack.
6053 */
6054 l2fhdr = mtod(m0, struct l2_fhdr *);
6055
6056 /* Get the packet data + FCS length and the status. */
6057 pkt_len = l2fhdr->l2_fhdr_pkt_len;
6058 status = l2fhdr->l2_fhdr_status;
6059
6060 /*
6061 * Skip over the l2_fhdr and pad, resulting in the
6062 * following data in the mbuf:
6063 * +---------------------+-----+
6064 * | packet data | FCS |
6065 * +---------------------+-----+
6066 */
6067 m_adj(m0, sizeof(struct l2_fhdr) + ETHER_ALIGN);
6068
6069#ifdef BCE_JUMBO_HDRSPLIT
6070 /*
6071 * Check whether the received frame fits in a single
6072 * mbuf or not (i.e. packet data + FCS <=
6073 * sc->rx_bd_mbuf_data_len bytes).
6074 */
6075 if (pkt_len > m0->m_len) {
6076 /*
6077 * The received frame is larger than a single mbuf.
6078 * If the frame was a TCP frame then only the TCP
6079 * header is placed in the mbuf, the remaining
6080 * payload (including FCS) is placed in the page
6081 * chain, the SPLIT flag is set, and the header
6082 * length is placed in the IP checksum field.
6083 * If the frame is not a TCP frame then the mbuf
6084 * is filled and the remaining bytes are placed
6085 * in the page chain.
6086 */
6087
6088 DBPRINT(sc, BCE_INFO_RECV, "%s(): Found a large "
6089 "packet.\n", __FUNCTION__);
6090
6091 /*
6092 * When the page chain is enabled and the TCP
6093 * header has been split from the TCP payload,
6094 * the ip_xsum structure will reflect the length
6095 * of the TCP header, not the IP checksum. Set
6096 * the packet length of the mbuf accordingly.
6097 */
6098 if (status & L2_FHDR_STATUS_SPLIT)
6099 m0->m_len = l2fhdr->l2_fhdr_ip_xsum;
6100
6101 rem_len = pkt_len - m0->m_len;
6102
6103 /* Pull mbufs off the page chain for the remaining data. */
6104 while (rem_len > 0) {
6105 struct mbuf *m_pg;
6106
6107 sw_pg_cons_idx = PG_CHAIN_IDX(sw_pg_cons);
6108
6109 /* Remove the mbuf from the page chain. */
6110 m_pg = sc->pg_mbuf_ptr[sw_pg_cons_idx];
6111 sc->pg_mbuf_ptr[sw_pg_cons_idx] = NULL;
6112 DBRUN(sc->debug_pg_mbuf_alloc--);
6113 sc->free_pg_bd++;
6114
6115 /* Unmap the page chain mbuf from DMA space. */
6116 bus_dmamap_sync(sc->pg_mbuf_tag,
6117 sc->pg_mbuf_map[sw_pg_cons_idx],
6118 BUS_DMASYNC_POSTREAD);
6119 bus_dmamap_unload(sc->pg_mbuf_tag,
6120 sc->pg_mbuf_map[sw_pg_cons_idx]);
6121
6122 /* Adjust the mbuf length. */
6123 if (rem_len < m_pg->m_len) {
6124 /* The mbuf chain is complete. */
6125 m_pg->m_len = rem_len;
6126 rem_len = 0;
6127 } else {
6128 /* More packet data is waiting. */
6129 rem_len -= m_pg->m_len;
6130 }
6131
6132 /* Concatenate the mbuf cluster to the mbuf. */
6133 m_cat(m0, m_pg);
6134
6135 sw_pg_cons = NEXT_PG_BD(sw_pg_cons);
6136 }
6137
6138 /* Set the total packet length. */
6139 m0->m_pkthdr.len = pkt_len;
6140
6141 } else {
6142 /*
6143 * The received packet is small and fits in a
6144 * single mbuf (i.e. the l2_fhdr + pad + packet +
6145 * FCS <= MHLEN). In other words, the packet is
6146 * 154 bytes or less in size.
6147 */
6148
6149 DBPRINT(sc, BCE_INFO_RECV, "%s(): Found a small "
6150 "packet.\n", __FUNCTION__);
6151
6152 /* Set the total packet length. */
6153 m0->m_pkthdr.len = m0->m_len = pkt_len;
6154 }
6155#else
6156 /* Set the total packet length. */
6157 m0->m_pkthdr.len = m0->m_len = pkt_len;
6158#endif
6159
6160 /* Remove the trailing Ethernet FCS. */
6161 m_adj(m0, -ETHER_CRC_LEN);
6162
6163 /* Check that the resulting mbuf chain is valid. */
6164 DBRUN(m_sanity(m0, FALSE));
6165 DBRUNIF(((m0->m_len < ETHER_HDR_LEN) |
6166 (m0->m_pkthdr.len > BCE_MAX_JUMBO_ETHER_MTU_VLAN)),
6167 BCE_PRINTF("Invalid Ethernet frame size!\n");
6168 m_print(m0, 128));
6169
6170 DBRUNIF(DB_RANDOMTRUE(l2fhdr_error_sim_control),
6171 sc->l2fhdr_error_sim_count++;
6172 status = status | L2_FHDR_ERRORS_PHY_DECODE);
6173
6174 /* Check the received frame for errors. */
6175 if (status & (L2_FHDR_ERRORS_BAD_CRC |
6176 L2_FHDR_ERRORS_PHY_DECODE | L2_FHDR_ERRORS_ALIGNMENT |
6177 L2_FHDR_ERRORS_TOO_SHORT | L2_FHDR_ERRORS_GIANT_FRAME)) {
6178
6179 /* Log the error and release the mbuf. */
6180 ifp->if_ierrors++;
6181 sc->l2fhdr_error_count++;
6182
6183 m_freem(m0);
6184 m0 = NULL;
6185 goto bce_rx_int_next_rx;
6186 }
6187
6188 /* Send the packet to the appropriate interface. */
6189 m0->m_pkthdr.rcvif = ifp;
6190
6191 /* Assume no hardware checksum. */
6192 m0->m_pkthdr.csum_flags = 0;
6193
6194 /* Validate the checksum if offload enabled. */
6195 if (ifp->if_capenable & IFCAP_RXCSUM) {
6196
6197 /* Check for an IP datagram. */
6198 if (!(status & L2_FHDR_STATUS_SPLIT) &&
6199 (status & L2_FHDR_STATUS_IP_DATAGRAM)) {
6200 m0->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
6201 DBRUN(sc->csum_offload_ip++);
6202 /* Check if the IP checksum is valid. */
6203 if ((l2fhdr->l2_fhdr_ip_xsum ^ 0xffff) == 0)
6204 m0->m_pkthdr.csum_flags |=
6205 CSUM_IP_VALID;
6206 }
6207
6208 /* Check for a valid TCP/UDP frame. */
6209 if (status & (L2_FHDR_STATUS_TCP_SEGMENT |
6210 L2_FHDR_STATUS_UDP_DATAGRAM)) {
6211
6212 /* Check for a good TCP/UDP checksum. */
6213 if ((status & (L2_FHDR_ERRORS_TCP_XSUM |
6214 L2_FHDR_ERRORS_UDP_XSUM)) == 0) {
6215 DBRUN(sc->csum_offload_tcp_udp++);
6216 m0->m_pkthdr.csum_data =
6217 l2fhdr->l2_fhdr_tcp_udp_xsum;
6218 m0->m_pkthdr.csum_flags |=
6219 (CSUM_DATA_VALID
6220 | CSUM_PSEUDO_HDR);
6221 }
6222 }
6223 }
6224
6225 /* Attach the VLAN tag. */
6226 if (status & L2_FHDR_STATUS_L2_VLAN_TAG) {
6227 if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) {
6228#if __FreeBSD_version < 700000
6229 VLAN_INPUT_TAG(ifp, m0,
6230 l2fhdr->l2_fhdr_vlan_tag, continue);
6231#else
6232 m0->m_pkthdr.ether_vtag =
6233 l2fhdr->l2_fhdr_vlan_tag;
6234 m0->m_flags |= M_VLANTAG;
6235#endif
6236 } else {
6237 /*
6238 * bce(4) controllers can't disable VLAN
6239 * tag stripping if management firmware
6240 * (ASF/IPMI/UMP) is running. So we always
6241 * strip VLAN tag and manually reconstruct
6242 * the VLAN frame by appending stripped
6243 * VLAN tag in driver if VLAN tag stripping
6244 * was disabled.
6245 *
6246 * TODO: LLC SNAP handling.
6247 */
6248 bcopy(mtod(m0, uint8_t *),
6249 mtod(m0, uint8_t *) - ETHER_VLAN_ENCAP_LEN,
6250 ETHER_ADDR_LEN * 2);
6251 m0->m_data -= ETHER_VLAN_ENCAP_LEN;
6252 vh = mtod(m0, struct ether_vlan_header *);
6253 vh->evl_encap_proto = htons(ETHERTYPE_VLAN);
6254 vh->evl_tag = htons(l2fhdr->l2_fhdr_vlan_tag);
6255 m0->m_pkthdr.len += ETHER_VLAN_ENCAP_LEN;
6256 m0->m_len += ETHER_VLAN_ENCAP_LEN;
6257 }
6258 }
6259
6260 /* Increment received packet statistics. */
6261 ifp->if_ipackets++;
6262
6263bce_rx_int_next_rx:
6264 sw_rx_cons = NEXT_RX_BD(sw_rx_cons);
6265
6266 /* If we have a packet, pass it up the stack */
6267 if (m0) {
6268 /* Make sure we don't lose our place when we release the lock. */
6269 sc->rx_cons = sw_rx_cons;
6270#ifdef BCE_JUMBO_HDRSPLIT
6271 sc->pg_cons = sw_pg_cons;
6272#endif
6273
6274 BCE_UNLOCK(sc);
6275 (*ifp->if_input)(ifp, m0);
6276 BCE_LOCK(sc);
6277
6278 /* Recover our place. */
6279 sw_rx_cons = sc->rx_cons;
6280#ifdef BCE_JUMBO_HDRSPLIT
6281 sw_pg_cons = sc->pg_cons;
6282#endif
6283 }
6284
6285 /* Refresh hw_cons to see if there's new work */
6286 if (sw_rx_cons == hw_rx_cons)
6287 hw_rx_cons = sc->hw_rx_cons = bce_get_hw_rx_cons(sc);
6288 }
6289
6290#ifdef BCE_JUMBO_HDRSPLIT
6291 /* No new packets. Refill the page chain. */
6292 sc->pg_cons = sw_pg_cons;
6293 bce_fill_pg_chain(sc);
6294#endif
6295
6296 /* No new packets. Refill the RX chain. */
6297 sc->rx_cons = sw_rx_cons;
6298 bce_fill_rx_chain(sc);
6299
6300 /* Prepare the page chain pages to be accessed by the NIC. */
6301 for (int i = 0; i < RX_PAGES; i++)
6302 bus_dmamap_sync(sc->rx_bd_chain_tag,
6303 sc->rx_bd_chain_map[i], BUS_DMASYNC_PREWRITE);
6304
6305#ifdef BCE_JUMBO_HDRSPLIT
6306 for (int i = 0; i < PG_PAGES; i++)
6307 bus_dmamap_sync(sc->pg_bd_chain_tag,
6308 sc->pg_bd_chain_map[i], BUS_DMASYNC_PREWRITE);
6309#endif
6310
6311 DBPRINT(sc, BCE_EXTREME_RECV, "%s(exit): rx_prod = 0x%04X, "
6312 "rx_cons = 0x%04X, rx_prod_bseq = 0x%08X\n",
6313 __FUNCTION__, sc->rx_prod, sc->rx_cons, sc->rx_prod_bseq);
6314 DBEXIT(BCE_VERBOSE_RECV | BCE_VERBOSE_INTR);
6315}
6316
6317
6318/****************************************************************************/
6319/* Reads the transmit consumer value from the status block (skipping over */
6320/* chain page pointer if necessary). */
6321/* */
6322/* Returns: */
6323/* hw_cons */
6324/****************************************************************************/
6325static inline u16
6326bce_get_hw_tx_cons(struct bce_softc *sc)
6327{
6328 u16 hw_cons;
6329
6330 mb();
6331 hw_cons = sc->status_block->status_tx_quick_consumer_index0;
6332 if ((hw_cons & USABLE_TX_BD_PER_PAGE) == USABLE_TX_BD_PER_PAGE)
6333 hw_cons++;
6334
6335 return hw_cons;
6336}
6337
6338
6339/****************************************************************************/
6340/* Handles transmit completion interrupt events. */
6341/* */
6342/* Returns: */
6343/* Nothing. */
6344/****************************************************************************/
6345static void
6346bce_tx_intr(struct bce_softc *sc)
6347{
6348 struct ifnet *ifp = sc->bce_ifp;
6349 u16 hw_tx_cons, sw_tx_cons, sw_tx_chain_cons;
6350
6351 DBENTER(BCE_VERBOSE_SEND | BCE_VERBOSE_INTR);
6352 DBRUN(sc->interrupts_tx++);
6353 DBPRINT(sc, BCE_EXTREME_SEND, "%s(enter): tx_prod = 0x%04X, "
6354 "tx_cons = 0x%04X, tx_prod_bseq = 0x%08X\n",
6355 __FUNCTION__, sc->tx_prod, sc->tx_cons, sc->tx_prod_bseq);
6356
6357 BCE_LOCK_ASSERT(sc);
6358
6359 /* Get the hardware's view of the TX consumer index. */
6360 hw_tx_cons = sc->hw_tx_cons = bce_get_hw_tx_cons(sc);
6361 sw_tx_cons = sc->tx_cons;
6362
6363 /* Prevent speculative reads of the status block. */
6364 bus_space_barrier(sc->bce_btag, sc->bce_bhandle, 0, 0,
6365 BUS_SPACE_BARRIER_READ);
6366
6367 /* Cycle through any completed TX chain page entries. */
6368 while (sw_tx_cons != hw_tx_cons) {
6369#ifdef BCE_DEBUG
6370 struct tx_bd *txbd = NULL;
6371#endif
6372 sw_tx_chain_cons = TX_CHAIN_IDX(sw_tx_cons);
6373
6374 DBPRINT(sc, BCE_INFO_SEND,
6375 "%s(): hw_tx_cons = 0x%04X, sw_tx_cons = 0x%04X, "
6376 "sw_tx_chain_cons = 0x%04X\n",
6377 __FUNCTION__, hw_tx_cons, sw_tx_cons, sw_tx_chain_cons);
6378
6379 DBRUNIF((sw_tx_chain_cons > MAX_TX_BD),
6380 BCE_PRINTF("%s(%d): TX chain consumer out of range! "
6381 " 0x%04X > 0x%04X\n", __FILE__, __LINE__, sw_tx_chain_cons,
6382 (int) MAX_TX_BD);
6383 bce_breakpoint(sc));
6384
6385 DBRUN(txbd = &sc->tx_bd_chain[TX_PAGE(sw_tx_chain_cons)]
6386 [TX_IDX(sw_tx_chain_cons)]);
6387
6388 DBRUNIF((txbd == NULL),
6389 BCE_PRINTF("%s(%d): Unexpected NULL tx_bd[0x%04X]!\n",
6390 __FILE__, __LINE__, sw_tx_chain_cons);
6391 bce_breakpoint(sc));
6392
6393 DBRUNMSG(BCE_INFO_SEND, BCE_PRINTF("%s(): ", __FUNCTION__);
6394 bce_dump_txbd(sc, sw_tx_chain_cons, txbd));
6395
6396 /*
6397 * Free the associated mbuf. Remember
6398 * that only the last tx_bd of a packet
6399 * has an mbuf pointer and DMA map.
6400 */
6401 if (sc->tx_mbuf_ptr[sw_tx_chain_cons] != NULL) {
6402
6403 /* Validate that this is the last tx_bd. */
6404 DBRUNIF((!(txbd->tx_bd_flags & TX_BD_FLAGS_END)),
6405 BCE_PRINTF("%s(%d): tx_bd END flag not set but "
6406 "txmbuf == NULL!\n", __FILE__, __LINE__);
6407 bce_breakpoint(sc));
6408
6409 DBRUNMSG(BCE_INFO_SEND,
6410 BCE_PRINTF("%s(): Unloading map/freeing mbuf "
6411 "from tx_bd[0x%04X]\n", __FUNCTION__,
6412 sw_tx_chain_cons));
6413
6414 /* Unmap the mbuf. */
6415 bus_dmamap_unload(sc->tx_mbuf_tag,
6416 sc->tx_mbuf_map[sw_tx_chain_cons]);
6417
6418 /* Free the mbuf. */
6419 m_freem(sc->tx_mbuf_ptr[sw_tx_chain_cons]);
6420 sc->tx_mbuf_ptr[sw_tx_chain_cons] = NULL;
6421 DBRUN(sc->debug_tx_mbuf_alloc--);
6422
6423 ifp->if_opackets++;
6424 }
6425
6426 sc->used_tx_bd--;
6427 sw_tx_cons = NEXT_TX_BD(sw_tx_cons);
6428
6429 /* Refresh hw_cons to see if there's new work. */
6430 hw_tx_cons = sc->hw_tx_cons = bce_get_hw_tx_cons(sc);
6431
6432 /* Prevent speculative reads of the status block. */
6433 bus_space_barrier(sc->bce_btag, sc->bce_bhandle, 0, 0,
6434 BUS_SPACE_BARRIER_READ);
6435 }
6436
6437 /* Clear the TX timeout timer. */
6438 sc->watchdog_timer = 0;
6439
6440 /* Clear the tx hardware queue full flag. */
6441 if (sc->used_tx_bd < sc->max_tx_bd) {
6442 DBRUNIF((ifp->if_drv_flags & IFF_DRV_OACTIVE),
6443 DBPRINT(sc, BCE_INFO_SEND,
6444 "%s(): Open TX chain! %d/%d (used/total)\n",
6445 __FUNCTION__, sc->used_tx_bd, sc->max_tx_bd));
6446 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
6447 }
6448
6449 sc->tx_cons = sw_tx_cons;
6450
6451 DBPRINT(sc, BCE_EXTREME_SEND, "%s(exit): tx_prod = 0x%04X, "
6452 "tx_cons = 0x%04X, tx_prod_bseq = 0x%08X\n",
6453 __FUNCTION__, sc->tx_prod, sc->tx_cons, sc->tx_prod_bseq);
6454 DBEXIT(BCE_VERBOSE_SEND | BCE_VERBOSE_INTR);
6455}
6456
6457
6458/****************************************************************************/
6459/* Disables interrupt generation. */
6460/* */
6461/* Returns: */
6462/* Nothing. */
6463/****************************************************************************/
6464static void
6465bce_disable_intr(struct bce_softc *sc)
6466{
6467 DBENTER(BCE_VERBOSE_INTR);
6468
6469 REG_WR(sc, BCE_PCICFG_INT_ACK_CMD, BCE_PCICFG_INT_ACK_CMD_MASK_INT);
6470 REG_RD(sc, BCE_PCICFG_INT_ACK_CMD);
6471
6472 DBEXIT(BCE_VERBOSE_INTR);
6473}
6474
6475
6476/****************************************************************************/
6477/* Enables interrupt generation. */
6478/* */
6479/* Returns: */
6480/* Nothing. */
6481/****************************************************************************/
6482static void
6483bce_enable_intr(struct bce_softc *sc, int coal_now)
6484{
6485 DBENTER(BCE_VERBOSE_INTR);
6486
6487 REG_WR(sc, BCE_PCICFG_INT_ACK_CMD,
6488 BCE_PCICFG_INT_ACK_CMD_INDEX_VALID |
6489 BCE_PCICFG_INT_ACK_CMD_MASK_INT | sc->last_status_idx);
6490
6491 REG_WR(sc, BCE_PCICFG_INT_ACK_CMD,
6492 BCE_PCICFG_INT_ACK_CMD_INDEX_VALID | sc->last_status_idx);
6493
6494 /* Force an immediate interrupt (whether there is new data or not). */
6495 if (coal_now)
6496 REG_WR(sc, BCE_HC_COMMAND, sc->hc_command | BCE_HC_COMMAND_COAL_NOW);
6497
6498 DBEXIT(BCE_VERBOSE_INTR);
6499}
6500
6501
6502/****************************************************************************/
6503/* Handles controller initialization. */
6504/* */
6505/* Returns: */
6506/* Nothing. */
6507/****************************************************************************/
6508static void
6509bce_init_locked(struct bce_softc *sc)
6510{
6511 struct ifnet *ifp;
6512 u32 ether_mtu = 0;
6513
6514 DBENTER(BCE_VERBOSE_RESET);
6515
6516 BCE_LOCK_ASSERT(sc);
6517
6518 ifp = sc->bce_ifp;
6519
6520 /* Check if the driver is still running and bail out if it is. */
6521 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
6522 goto bce_init_locked_exit;
6523
6524 bce_stop(sc);
6525
6526 if (bce_reset(sc, BCE_DRV_MSG_CODE_RESET)) {
6527 BCE_PRINTF("%s(%d): Controller reset failed!\n",
6528 __FILE__, __LINE__);
6529 goto bce_init_locked_exit;
6530 }
6531
6532 if (bce_chipinit(sc)) {
6533 BCE_PRINTF("%s(%d): Controller initialization failed!\n",
6534 __FILE__, __LINE__);
6535 goto bce_init_locked_exit;
6536 }
6537
6538 if (bce_blockinit(sc)) {
6539 BCE_PRINTF("%s(%d): Block initialization failed!\n",
6540 __FILE__, __LINE__);
6541 goto bce_init_locked_exit;
6542 }
6543
6544 /* Load our MAC address. */
6545 bcopy(IF_LLADDR(sc->bce_ifp), sc->eaddr, ETHER_ADDR_LEN);
6546 bce_set_mac_addr(sc);
6547
6548 /*
6549 * Calculate and program the hardware Ethernet MTU
6550 * size. Be generous on the receive if we have room.
6551 */
6552#ifdef BCE_JUMBO_HDRSPLIT
6553 if (ifp->if_mtu <= (sc->rx_bd_mbuf_data_len +
6554 sc->pg_bd_mbuf_alloc_size))
6555 ether_mtu = sc->rx_bd_mbuf_data_len +
6556 sc->pg_bd_mbuf_alloc_size;
6557#else
6558 if (ifp->if_mtu <= sc->rx_bd_mbuf_data_len)
6559 ether_mtu = sc->rx_bd_mbuf_data_len;
6560#endif
6561 else
6562 ether_mtu = ifp->if_mtu;
6563
6564 ether_mtu += ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN + ETHER_CRC_LEN;
6565
6566 DBPRINT(sc, BCE_INFO_MISC, "%s(): setting h/w mtu = %d\n",
6567 __FUNCTION__, ether_mtu);
6568
6569 /* Program the mtu, enabling jumbo frame support if necessary. */
6570 if (ether_mtu > (ETHER_MAX_LEN + ETHER_VLAN_ENCAP_LEN))
6571 REG_WR(sc, BCE_EMAC_RX_MTU_SIZE,
6572 min(ether_mtu, BCE_MAX_JUMBO_ETHER_MTU) |
6573 BCE_EMAC_RX_MTU_SIZE_JUMBO_ENA);
6574 else
6575 REG_WR(sc, BCE_EMAC_RX_MTU_SIZE, ether_mtu);
6576
6577 DBPRINT(sc, BCE_INFO_LOAD,
6578 "%s(): rx_bd_mbuf_alloc_size = %d, rx_bce_mbuf_data_len = %d, "
6579 "rx_bd_mbuf_align_pad = %d\n", __FUNCTION__,
6580 sc->rx_bd_mbuf_alloc_size, sc->rx_bd_mbuf_data_len,
6581 sc->rx_bd_mbuf_align_pad);
6582
6583 /* Program appropriate promiscuous/multicast filtering. */
6584 bce_set_rx_mode(sc);
6585
6586#ifdef BCE_JUMBO_HDRSPLIT
6587 DBPRINT(sc, BCE_INFO_LOAD, "%s(): pg_bd_mbuf_alloc_size = %d\n",
6588 __FUNCTION__, sc->pg_bd_mbuf_alloc_size);
6589
6590 /* Init page buffer descriptor chain. */
6591 bce_init_pg_chain(sc);
6592#endif
6593
6594 /* Init RX buffer descriptor chain. */
6595 bce_init_rx_chain(sc);
6596
6597 /* Init TX buffer descriptor chain. */
6598 bce_init_tx_chain(sc);
6599
6600 /* Enable host interrupts. */
6601 bce_enable_intr(sc, 1);
6602
6603 bce_ifmedia_upd_locked(ifp);
6604
6605 /* Let the OS know the driver is up and running. */
6606 ifp->if_drv_flags |= IFF_DRV_RUNNING;
6607 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
6608
6609 callout_reset(&sc->bce_tick_callout, hz, bce_tick, sc);
6610
6611bce_init_locked_exit:
6612 DBEXIT(BCE_VERBOSE_RESET);
6613}
6614
6615
6616/****************************************************************************/
6617/* Initialize the controller just enough so that any management firmware */
6618/* running on the device will continue to operate correctly. */
6619/* */
6620/* Returns: */
6621/* Nothing. */
6622/****************************************************************************/
6623static void
6624bce_mgmt_init_locked(struct bce_softc *sc)
6625{
6626 struct ifnet *ifp;
6627
6628 DBENTER(BCE_VERBOSE_RESET);
6629
6630 BCE_LOCK_ASSERT(sc);
6631
6632 /* Bail out if management firmware is not running. */
6633 if (!(sc->bce_flags & BCE_MFW_ENABLE_FLAG)) {
6634 DBPRINT(sc, BCE_VERBOSE_SPECIAL,
6635 "No management firmware running...\n");
6636 goto bce_mgmt_init_locked_exit;
6637 }
6638
6639 ifp = sc->bce_ifp;
6640
6641 /* Enable all critical blocks in the MAC. */
6642 REG_WR(sc, BCE_MISC_ENABLE_SET_BITS, BCE_MISC_ENABLE_DEFAULT);
6643 REG_RD(sc, BCE_MISC_ENABLE_SET_BITS);
6644 DELAY(20);
6645
6646 bce_ifmedia_upd_locked(ifp);
6647
6648bce_mgmt_init_locked_exit:
6649 DBEXIT(BCE_VERBOSE_RESET);
6650}
6651
6652
6653/****************************************************************************/
6654/* Handles controller initialization when called from an unlocked routine. */
6655/* */
6656/* Returns: */
6657/* Nothing. */
6658/****************************************************************************/
6659static void
6660bce_init(void *xsc)
6661{
6662 struct bce_softc *sc = xsc;
6663
6664 DBENTER(BCE_VERBOSE_RESET);
6665
6666 BCE_LOCK(sc);
6667 bce_init_locked(sc);
6668 BCE_UNLOCK(sc);
6669
6670 DBEXIT(BCE_VERBOSE_RESET);
6671}
6672
6673
6674/****************************************************************************/
6675/* Modifies an mbuf for TSO on the hardware. */
6676/* */
6677/* Returns: */
6678/* Pointer to a modified mbuf. */
6679/****************************************************************************/
6680static struct mbuf *
6681bce_tso_setup(struct bce_softc *sc, struct mbuf **m_head, u16 *flags)
6682{
6683 struct mbuf *m;
6684 struct ether_header *eh;
6685 struct ip *ip;
6686 struct tcphdr *th;
6687 u16 etype;
6688 int hdr_len, ip_hlen = 0, tcp_hlen = 0, ip_len = 0;
6689
6690 DBRUN(sc->tso_frames_requested++);
6691
6692 /* Controller may modify mbuf chains. */
6693 if (M_WRITABLE(*m_head) == 0) {
6694 m = m_dup(*m_head, M_DONTWAIT);
6695 m_freem(*m_head);
6696 if (m == NULL) {
6697 sc->mbuf_alloc_failed_count++;
6698 *m_head = NULL;
6699 return (NULL);
6700 }
6701 *m_head = m;
6702 }
6703
6704 /*
6705 * For TSO the controller needs two pieces of info,
6706 * the MSS and the IP+TCP options length.
6707 */
6708 m = m_pullup(*m_head, sizeof(struct ether_header) + sizeof(struct ip));
6709 if (m == NULL) {
6710 *m_head = NULL;
6711 return (NULL);
6712 }
6713 eh = mtod(m, struct ether_header *);
6714 etype = ntohs(eh->ether_type);
6715
6716 /* Check for supported TSO Ethernet types (only IPv4 for now) */
6717 switch (etype) {
6718 case ETHERTYPE_IP:
6719 ip = (struct ip *)(m->m_data + sizeof(struct ether_header));
6720 /* TSO only supported for TCP protocol. */
6721 if (ip->ip_p != IPPROTO_TCP) {
6722 BCE_PRINTF("%s(%d): TSO enabled for non-TCP frame!.\n",
6723 __FILE__, __LINE__);
6724 m_freem(*m_head);
6725 *m_head = NULL;
6726 return (NULL);
6727 }
6728
6729 /* Get IP header length in bytes (min 20) */
6730 ip_hlen = ip->ip_hl << 2;
6731 m = m_pullup(*m_head, sizeof(struct ether_header) + ip_hlen +
6732 sizeof(struct tcphdr));
6733 if (m == NULL) {
6734 *m_head = NULL;
6735 return (NULL);
6736 }
6737
6738 /* Get the TCP header length in bytes (min 20) */
6739 ip = (struct ip *)(m->m_data + sizeof(struct ether_header));
6740 th = (struct tcphdr *)((caddr_t)ip + ip_hlen);
6741 tcp_hlen = (th->th_off << 2);
6742
6743 /* Make sure all IP/TCP options live in the same buffer. */
6744 m = m_pullup(*m_head, sizeof(struct ether_header)+ ip_hlen +
6745 tcp_hlen);
6746 if (m == NULL) {
6747 *m_head = NULL;
6748 return (NULL);
6749 }
6750
6751 /* IP header length and checksum will be calc'd by hardware */
6752 ip = (struct ip *)(m->m_data + sizeof(struct ether_header));
6753 ip_len = ip->ip_len;
6754 ip->ip_len = 0;
6755 ip->ip_sum = 0;
6756 break;
6757 case ETHERTYPE_IPV6:
6758 BCE_PRINTF("%s(%d): TSO over IPv6 not supported!.\n",
6759 __FILE__, __LINE__);
6760 m_freem(*m_head);
6761 *m_head = NULL;
6762 return (NULL);
6763 /* NOT REACHED */
6764 default:
6765 BCE_PRINTF("%s(%d): TSO enabled for unsupported protocol!.\n",
6766 __FILE__, __LINE__);
6767 m_freem(*m_head);
6768 *m_head = NULL;
6769 return (NULL);
6770 }
6771
6772 hdr_len = sizeof(struct ether_header) + ip_hlen + tcp_hlen;
6773
6774 DBPRINT(sc, BCE_EXTREME_SEND, "%s(): hdr_len = %d, e_hlen = %d, "
6775 "ip_hlen = %d, tcp_hlen = %d, ip_len = %d\n",
6776 __FUNCTION__, hdr_len, (int) sizeof(struct ether_header), ip_hlen,
6777 tcp_hlen, ip_len);
6778
6779 /* Set the LSO flag in the TX BD */
6780 *flags |= TX_BD_FLAGS_SW_LSO;
6781
6782 /* Set the length of IP + TCP options (in 32 bit words) */
6783 *flags |= (((ip_hlen + tcp_hlen - sizeof(struct ip) -
6784 sizeof(struct tcphdr)) >> 2) << 8);
6785
6786 DBRUN(sc->tso_frames_completed++);
6787 return (*m_head);
6788}
6789
6790
6791/****************************************************************************/
6792/* Encapsultes an mbuf cluster into the tx_bd chain structure and makes the */
6793/* memory visible to the controller. */
6794/* */
6795/* Returns: */
6796/* 0 for success, positive value for failure. */
6797/* Modified: */
6798/* m_head: May be set to NULL if MBUF is excessively fragmented. */
6799/****************************************************************************/
6800static int
6801bce_tx_encap(struct bce_softc *sc, struct mbuf **m_head)
6802{
6803 bus_dma_segment_t segs[BCE_MAX_SEGMENTS];
6804 bus_dmamap_t map;
6805 struct tx_bd *txbd = NULL;
6806 struct mbuf *m0;
6807 u16 prod, chain_prod, mss = 0, vlan_tag = 0, flags = 0;
6808 u32 prod_bseq;
6809
6810#ifdef BCE_DEBUG
6811 u16 debug_prod;
6812#endif
6813
6814 int i, error, nsegs, rc = 0;
6815
6816 DBENTER(BCE_VERBOSE_SEND);
6817
6818 /* Make sure we have room in the TX chain. */
6819 if (sc->used_tx_bd >= sc->max_tx_bd)
6820 goto bce_tx_encap_exit;
6821
6822 /* Transfer any checksum offload flags to the bd. */
6823 m0 = *m_head;
6824 if (m0->m_pkthdr.csum_flags) {
6825 if (m0->m_pkthdr.csum_flags & CSUM_TSO) {
6826 m0 = bce_tso_setup(sc, m_head, &flags);
6827 if (m0 == NULL) {
6828 DBRUN(sc->tso_frames_failed++);
6829 goto bce_tx_encap_exit;
6830 }
6831 mss = htole16(m0->m_pkthdr.tso_segsz);
6832 } else {
6833 if (m0->m_pkthdr.csum_flags & CSUM_IP)
6834 flags |= TX_BD_FLAGS_IP_CKSUM;
6835 if (m0->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP))
6836 flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
6837 }
6838 }
6839
6840 /* Transfer any VLAN tags to the bd. */
6841 if (m0->m_flags & M_VLANTAG) {
6842 flags |= TX_BD_FLAGS_VLAN_TAG;
6843 vlan_tag = m0->m_pkthdr.ether_vtag;
6844 }
6845
6846 /* Map the mbuf into DMAable memory. */
6847 prod = sc->tx_prod;
6848 chain_prod = TX_CHAIN_IDX(prod);
6849 map = sc->tx_mbuf_map[chain_prod];
6850
6851 /* Map the mbuf into our DMA address space. */
6852 error = bus_dmamap_load_mbuf_sg(sc->tx_mbuf_tag, map, m0,
6853 segs, &nsegs, BUS_DMA_NOWAIT);
6854
6855 /* Check if the DMA mapping was successful */
6856 if (error == EFBIG) {
6857 sc->mbuf_frag_count++;
6858
6859 /* Try to defrag the mbuf. */
6860 m0 = m_collapse(*m_head, M_DONTWAIT, BCE_MAX_SEGMENTS);
6861 if (m0 == NULL) {
6862 /* Defrag was unsuccessful */
6863 m_freem(*m_head);
6864 *m_head = NULL;
6865 sc->mbuf_alloc_failed_count++;
6866 rc = ENOBUFS;
6867 goto bce_tx_encap_exit;
6868 }
6869
6870 /* Defrag was successful, try mapping again */
6871 *m_head = m0;
6872 error = bus_dmamap_load_mbuf_sg(sc->tx_mbuf_tag,
6873 map, m0, segs, &nsegs, BUS_DMA_NOWAIT);
6874
6875 /* Still getting an error after a defrag. */
6876 if (error == ENOMEM) {
6877 /* Insufficient DMA buffers available. */
6878 sc->dma_map_addr_tx_failed_count++;
6879 rc = error;
6880 goto bce_tx_encap_exit;
6881 } else if (error != 0) {
6882 /* Release it and return an error. */
6883 BCE_PRINTF("%s(%d): Unknown error mapping mbuf into "
6884 "TX chain!\n", __FILE__, __LINE__);
6885 m_freem(m0);
6886 *m_head = NULL;
6887 sc->dma_map_addr_tx_failed_count++;
6888 rc = ENOBUFS;
6889 goto bce_tx_encap_exit;
6890 }
6891 } else if (error == ENOMEM) {
6892 /* Insufficient DMA buffers available. */
6893 sc->dma_map_addr_tx_failed_count++;
6894 rc = error;
6895 goto bce_tx_encap_exit;
6896 } else if (error != 0) {
6897 m_freem(m0);
6898 *m_head = NULL;
6899 sc->dma_map_addr_tx_failed_count++;
6900 rc = error;
6901 goto bce_tx_encap_exit;
6902 }
6903
6904 /* Make sure there's room in the chain */
6905 if (nsegs > (sc->max_tx_bd - sc->used_tx_bd)) {
6906 bus_dmamap_unload(sc->tx_mbuf_tag, map);
6907 rc = ENOBUFS;
6908 goto bce_tx_encap_exit;
6909 }
6910
6911 /* prod points to an empty tx_bd at this point. */
6912 prod_bseq = sc->tx_prod_bseq;
6913
6914#ifdef BCE_DEBUG
6915 debug_prod = chain_prod;
6916#endif
6917
6918 DBPRINT(sc, BCE_INFO_SEND,
6919 "%s(start): prod = 0x%04X, chain_prod = 0x%04X, "
6920 "prod_bseq = 0x%08X\n",
6921 __FUNCTION__, prod, chain_prod, prod_bseq);
6922
6923 /*
6924 * Cycle through each mbuf segment that makes up
6925 * the outgoing frame, gathering the mapping info
6926 * for that segment and creating a tx_bd for
6927 * the mbuf.
6928 */
6929 for (i = 0; i < nsegs ; i++) {
6930
6931 chain_prod = TX_CHAIN_IDX(prod);
6932 txbd= &sc->tx_bd_chain[TX_PAGE(chain_prod)]
6933 [TX_IDX(chain_prod)];
6934
6935 txbd->tx_bd_haddr_lo =
6936 htole32(BCE_ADDR_LO(segs[i].ds_addr));
6937 txbd->tx_bd_haddr_hi =
6938 htole32(BCE_ADDR_HI(segs[i].ds_addr));
6939 txbd->tx_bd_mss_nbytes = htole32(mss << 16) |
6940 htole16(segs[i].ds_len);
6941 txbd->tx_bd_vlan_tag = htole16(vlan_tag);
6942 txbd->tx_bd_flags = htole16(flags);
6943 prod_bseq += segs[i].ds_len;
6944 if (i == 0)
6945 txbd->tx_bd_flags |= htole16(TX_BD_FLAGS_START);
6946 prod = NEXT_TX_BD(prod);
6947 }
6948
6949 /* Set the END flag on the last TX buffer descriptor. */
6950 txbd->tx_bd_flags |= htole16(TX_BD_FLAGS_END);
6951
6952 DBRUNMSG(BCE_EXTREME_SEND,
6953 bce_dump_tx_chain(sc, debug_prod, nsegs));
6954
6955 /*
6956 * Ensure that the mbuf pointer for this transmission
6957 * is placed at the array index of the last
6958 * descriptor in this chain. This is done
6959 * because a single map is used for all
6960 * segments of the mbuf and we don't want to
6961 * unload the map before all of the segments
6962 * have been freed.
6963 */
6964 sc->tx_mbuf_ptr[chain_prod] = m0;
6965 sc->used_tx_bd += nsegs;
6966
6967 /* Update some debug statistic counters */
6968 DBRUNIF((sc->used_tx_bd > sc->tx_hi_watermark),
6969 sc->tx_hi_watermark = sc->used_tx_bd);
6970 DBRUNIF((sc->used_tx_bd == sc->max_tx_bd), sc->tx_full_count++);
6971 DBRUNIF(sc->debug_tx_mbuf_alloc++);
6972
6973 DBRUNMSG(BCE_EXTREME_SEND, bce_dump_tx_mbuf_chain(sc, chain_prod, 1));
6974
6975 /* prod points to the next free tx_bd at this point. */
6976 sc->tx_prod = prod;
6977 sc->tx_prod_bseq = prod_bseq;
6978
6979 /* Tell the chip about the waiting TX frames. */
6980 REG_WR16(sc, MB_GET_CID_ADDR(TX_CID) +
6981 BCE_L2MQ_TX_HOST_BIDX, sc->tx_prod);
6982 REG_WR(sc, MB_GET_CID_ADDR(TX_CID) +
6983 BCE_L2MQ_TX_HOST_BSEQ, sc->tx_prod_bseq);
6984
6985bce_tx_encap_exit:
6986 DBEXIT(BCE_VERBOSE_SEND);
6987 return(rc);
6988}
6989
6990
6991/****************************************************************************/
6992/* Main transmit routine when called from another routine with a lock. */
6993/* */
6994/* Returns: */
6995/* Nothing. */
6996/****************************************************************************/
6997static void
6998bce_start_locked(struct ifnet *ifp)
6999{
7000 struct bce_softc *sc = ifp->if_softc;
7001 struct mbuf *m_head = NULL;
7002 int count = 0;
7003 u16 tx_prod, tx_chain_prod;
7004
7005 DBENTER(BCE_VERBOSE_SEND | BCE_VERBOSE_CTX);
7006
7007 BCE_LOCK_ASSERT(sc);
7008
7009 /* prod points to the next free tx_bd. */
7010 tx_prod = sc->tx_prod;
7011 tx_chain_prod = TX_CHAIN_IDX(tx_prod);
7012
7013 DBPRINT(sc, BCE_INFO_SEND,
7014 "%s(enter): tx_prod = 0x%04X, tx_chain_prod = 0x%04X, "
7015 "tx_prod_bseq = 0x%08X\n",
7016 __FUNCTION__, tx_prod, tx_chain_prod, sc->tx_prod_bseq);
7017
7018 /* If there's no link or the transmit queue is empty then just exit. */
7019 if (sc->bce_link_up == FALSE) {
7020 DBPRINT(sc, BCE_INFO_SEND, "%s(): No link.\n",
7021 __FUNCTION__);
7022 goto bce_start_locked_exit;
7023 }
7024
7025 if (IFQ_DRV_IS_EMPTY(&ifp->if_snd)) {
7026 DBPRINT(sc, BCE_INFO_SEND, "%s(): Transmit queue empty.\n",
7027 __FUNCTION__);
7028 goto bce_start_locked_exit;
7029 }
7030
7031 /*
7032 * Keep adding entries while there is space in the ring.
7033 */
7034 while (sc->used_tx_bd < sc->max_tx_bd) {
7035
7036 /* Check for any frames to send. */
7037 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
7038
7039 /* Stop when the transmit queue is empty. */
7040 if (m_head == NULL)
7041 break;
7042
7043 /*
7044 * Pack the data into the transmit ring. If we
7045 * don't have room, place the mbuf back at the
7046 * head of the queue and set the OACTIVE flag
7047 * to wait for the NIC to drain the chain.
7048 */
7049 if (bce_tx_encap(sc, &m_head)) {
7050 if (m_head != NULL)
7051 IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
7052 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
7053 DBPRINT(sc, BCE_INFO_SEND,
7054 "TX chain is closed for business! Total "
7055 "tx_bd used = %d\n", sc->used_tx_bd);
7056 break;
7057 }
7058
7059 count++;
7060
7061 /* Send a copy of the frame to any BPF listeners. */
7062 ETHER_BPF_MTAP(ifp, m_head);
7063 }
7064
7065 /* Exit if no packets were dequeued. */
7066 if (count == 0) {
7067 DBPRINT(sc, BCE_VERBOSE_SEND, "%s(): No packets were "
7068 "dequeued\n", __FUNCTION__);
7069 goto bce_start_locked_exit;
7070 }
7071
7072 DBPRINT(sc, BCE_VERBOSE_SEND, "%s(): Inserted %d frames into "
7073 "send queue.\n", __FUNCTION__, count);
7074
7075 /* Set the tx timeout. */
7076 sc->watchdog_timer = BCE_TX_TIMEOUT;
7077
7078 DBRUNMSG(BCE_VERBOSE_SEND, bce_dump_ctx(sc, TX_CID));
7079 DBRUNMSG(BCE_VERBOSE_SEND, bce_dump_mq_regs(sc));
7080
7081bce_start_locked_exit:
7082 DBEXIT(BCE_VERBOSE_SEND | BCE_VERBOSE_CTX);
7083 return;
7084}
7085
7086
7087/****************************************************************************/
7088/* Main transmit routine when called from another routine without a lock. */
7089/* */
7090/* Returns: */
7091/* Nothing. */
7092/****************************************************************************/
7093static void
7094bce_start(struct ifnet *ifp)
7095{
7096 struct bce_softc *sc = ifp->if_softc;
7097
7098 DBENTER(BCE_VERBOSE_SEND);
7099
7100 BCE_LOCK(sc);
7101 bce_start_locked(ifp);
7102 BCE_UNLOCK(sc);
7103
7104 DBEXIT(BCE_VERBOSE_SEND);
7105}
7106
7107
7108/****************************************************************************/
7109/* Handles any IOCTL calls from the operating system. */
7110/* */
7111/* Returns: */
7112/* 0 for success, positive value for failure. */
7113/****************************************************************************/
7114static int
7115bce_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
7116{
7117 struct bce_softc *sc = ifp->if_softc;
7118 struct ifreq *ifr = (struct ifreq *) data;
7119 struct mii_data *mii;
7120 int mask, error = 0, reinit;
7121
7122 DBENTER(BCE_VERBOSE_MISC);
7123
7124 switch(command) {
7125
7126 /* Set the interface MTU. */
7127 case SIOCSIFMTU:
7128 /* Check that the MTU setting is supported. */
7129 if ((ifr->ifr_mtu < BCE_MIN_MTU) ||
7130 (ifr->ifr_mtu > BCE_MAX_JUMBO_MTU)) {
7131 error = EINVAL;
7132 break;
7133 }
7134
7135 DBPRINT(sc, BCE_INFO_MISC,
7136 "SIOCSIFMTU: Changing MTU from %d to %d\n",
7137 (int) ifp->if_mtu, (int) ifr->ifr_mtu);
7138
7139 BCE_LOCK(sc);
7140 ifp->if_mtu = ifr->ifr_mtu;
7141 reinit = 0;
7142 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
7143 /*
7144 * Because allocation size is used in RX
7145 * buffer allocation, stop controller if
7146 * it is already running.
7147 */
7148 bce_stop(sc);
7149 reinit = 1;
7150 }
7151#ifdef BCE_JUMBO_HDRSPLIT
7152 /* No buffer allocation size changes are necessary. */
7153#else
7154 /* Recalculate our buffer allocation sizes. */
7155 if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN +
7156 ETHER_CRC_LEN) > MCLBYTES) {
7157 sc->rx_bd_mbuf_alloc_size = MJUM9BYTES;
7158 sc->rx_bd_mbuf_align_pad =
7159 roundup2(MJUM9BYTES, 16) - MJUM9BYTES;
7160 sc->rx_bd_mbuf_data_len =
7161 sc->rx_bd_mbuf_alloc_size -
7162 sc->rx_bd_mbuf_align_pad;
7163 } else {
7164 sc->rx_bd_mbuf_alloc_size = MCLBYTES;
7165 sc->rx_bd_mbuf_align_pad =
7166 roundup2(MCLBYTES, 16) - MCLBYTES;
7167 sc->rx_bd_mbuf_data_len =
7168 sc->rx_bd_mbuf_alloc_size -
7169 sc->rx_bd_mbuf_align_pad;
7170 }
7171#endif
7172
7173 if (reinit != 0)
7174 bce_init_locked(sc);
7175 BCE_UNLOCK(sc);
7176 break;
7177
7178 /* Set interface flags. */
7179 case SIOCSIFFLAGS:
7180 DBPRINT(sc, BCE_VERBOSE_SPECIAL, "Received SIOCSIFFLAGS\n");
7181
7182 BCE_LOCK(sc);
7183
7184 /* Check if the interface is up. */
7185 if (ifp->if_flags & IFF_UP) {
7186 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
7187 /* Change promiscuous/multicast flags as necessary. */
7188 bce_set_rx_mode(sc);
7189 } else {
7190 /* Start the HW */
7191 bce_init_locked(sc);
7192 }
7193 } else {
7194 /* The interface is down, check if driver is running. */
7195 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
7196 bce_stop(sc);
7197
7198 /* If MFW is running, restart the controller a bit. */
7199 if (sc->bce_flags & BCE_MFW_ENABLE_FLAG) {
7200 bce_reset(sc, BCE_DRV_MSG_CODE_RESET);
7201 bce_chipinit(sc);
7202 bce_mgmt_init_locked(sc);
7203 }
7204 }
7205 }
7206
7207 BCE_UNLOCK(sc);
7208 break;
7209
7210 /* Add/Delete multicast address */
7211 case SIOCADDMULTI:
7212 case SIOCDELMULTI:
7213 DBPRINT(sc, BCE_VERBOSE_MISC,
7214 "Received SIOCADDMULTI/SIOCDELMULTI\n");
7215
7216 BCE_LOCK(sc);
7217 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
7218 bce_set_rx_mode(sc);
7219 BCE_UNLOCK(sc);
7220
7221 break;
7222
7223 /* Set/Get Interface media */
7224 case SIOCSIFMEDIA:
7225 case SIOCGIFMEDIA:
7226 DBPRINT(sc, BCE_VERBOSE_MISC,
7227 "Received SIOCSIFMEDIA/SIOCGIFMEDIA\n");
7228
7229 mii = device_get_softc(sc->bce_miibus);
7230 error = ifmedia_ioctl(ifp, ifr,
7231 &mii->mii_media, command);
7232 break;
7233
7234 /* Set interface capability */
7235 case SIOCSIFCAP:
7236 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
7237 DBPRINT(sc, BCE_INFO_MISC,
7238 "Received SIOCSIFCAP = 0x%08X\n", (u32) mask);
7239
7240 /* Toggle the TX checksum capabilities enable flag. */
7241 if (mask & IFCAP_TXCSUM &&
7242 ifp->if_capabilities & IFCAP_TXCSUM) {
7243 ifp->if_capenable ^= IFCAP_TXCSUM;
7244 if (IFCAP_TXCSUM & ifp->if_capenable)
7245 ifp->if_hwassist |= BCE_IF_HWASSIST;
7246 else
7247 ifp->if_hwassist &= ~BCE_IF_HWASSIST;
7248 }
7249
7250 /* Toggle the RX checksum capabilities enable flag. */
7251 if (mask & IFCAP_RXCSUM &&
7252 ifp->if_capabilities & IFCAP_RXCSUM)
7253 ifp->if_capenable ^= IFCAP_RXCSUM;
7254
7255 /* Toggle the TSO capabilities enable flag. */
7256 if (bce_tso_enable && (mask & IFCAP_TSO4) &&
7257 ifp->if_capabilities & IFCAP_TSO4) {
7258 ifp->if_capenable ^= IFCAP_TSO4;
7259 if (IFCAP_TSO4 & ifp->if_capenable)
7260 ifp->if_hwassist |= CSUM_TSO;
7261 else
7262 ifp->if_hwassist &= ~CSUM_TSO;
7263 }
7264
7265 if (mask & IFCAP_VLAN_HWCSUM &&
7266 ifp->if_capabilities & IFCAP_VLAN_HWCSUM)
7267 ifp->if_capenable ^= IFCAP_VLAN_HWCSUM;
7268
7269 if ((mask & IFCAP_VLAN_HWTSO) != 0 &&
7270 (ifp->if_capabilities & IFCAP_VLAN_HWTSO) != 0)
7271 ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
7272 /*
7273 * Don't actually disable VLAN tag stripping as
7274 * management firmware (ASF/IPMI/UMP) requires the
7275 * feature. If VLAN tag stripping is disabled driver
7276 * will manually reconstruct the VLAN frame by
7277 * appending stripped VLAN tag.
7278 */
7279 if ((mask & IFCAP_VLAN_HWTAGGING) != 0 &&
7280 (ifp->if_capabilities & IFCAP_VLAN_HWTAGGING)) {
7281 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
7282 if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING)
7283 == 0)
7284 ifp->if_capenable &= ~IFCAP_VLAN_HWTSO;
7285 }
7286 VLAN_CAPABILITIES(ifp);
7287 break;
7288 default:
7289 /* We don't know how to handle the IOCTL, pass it on. */
7290 error = ether_ioctl(ifp, command, data);
7291 break;
7292 }
7293
7294 DBEXIT(BCE_VERBOSE_MISC);
7295 return(error);
7296}
7297
7298
7299/****************************************************************************/
7300/* Transmit timeout handler. */
7301/* */
7302/* Returns: */
7303/* Nothing. */
7304/****************************************************************************/
7305static void
7306bce_watchdog(struct bce_softc *sc)
7307{
7308 DBENTER(BCE_EXTREME_SEND);
7309
7310 BCE_LOCK_ASSERT(sc);
7311
7312 /* If the watchdog timer hasn't expired then just exit. */
7313 if (sc->watchdog_timer == 0 || --sc->watchdog_timer)
7314 goto bce_watchdog_exit;
7315
7316 /* If pause frames are active then don't reset the hardware. */
7317 /* ToDo: Should we reset the timer here? */
7318 if (REG_RD(sc, BCE_EMAC_TX_STATUS) & BCE_EMAC_TX_STATUS_XOFFED)
7319 goto bce_watchdog_exit;
7320
7321 BCE_PRINTF("%s(%d): Watchdog timeout occurred, resetting!\n",
7322 __FILE__, __LINE__);
7323
7324 DBRUNMSG(BCE_INFO,
7325 bce_dump_driver_state(sc);
7326 bce_dump_status_block(sc);
7327 bce_dump_stats_block(sc);
7328 bce_dump_ftqs(sc);
7329 bce_dump_txp_state(sc, 0);
7330 bce_dump_rxp_state(sc, 0);
7331 bce_dump_tpat_state(sc, 0);
7332 bce_dump_cp_state(sc, 0);
7333 bce_dump_com_state(sc, 0));
7334
7335 DBRUN(bce_breakpoint(sc));
7336
7337 sc->bce_ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
7338
7339 bce_init_locked(sc);
7340 sc->bce_ifp->if_oerrors++;
7341
7342bce_watchdog_exit:
7343 DBEXIT(BCE_EXTREME_SEND);
7344}
7345
7346
7347/*
7348 * Interrupt handler.
7349 */
7350/****************************************************************************/
7351/* Main interrupt entry point. Verifies that the controller generated the */
7352/* interrupt and then calls a separate routine for handle the various */
7353/* interrupt causes (PHY, TX, RX). */
7354/* */
7355/* Returns: */
7356/* 0 for success, positive value for failure. */
7357/****************************************************************************/
7358static void
7359bce_intr(void *xsc)
7360{
7361 struct bce_softc *sc;
7362 struct ifnet *ifp;
7363 u32 status_attn_bits;
7364 u16 hw_rx_cons, hw_tx_cons;
7365
7366 sc = xsc;
7367 ifp = sc->bce_ifp;
7368
7369 DBENTER(BCE_VERBOSE_SEND | BCE_VERBOSE_RECV | BCE_VERBOSE_INTR);
7370 DBRUNMSG(BCE_VERBOSE_INTR, bce_dump_status_block(sc));
7371 DBRUNMSG(BCE_VERBOSE_INTR, bce_dump_stats_block(sc));
7372
7373 BCE_LOCK(sc);
7374
7375 DBRUN(sc->interrupts_generated++);
7376
7377 /* Synchnorize before we read from interface's status block */
7378 bus_dmamap_sync(sc->status_tag, sc->status_map,
7379 BUS_DMASYNC_POSTREAD);
7380
7381 /*
7382 * If the hardware status block index
7383 * matches the last value read by the
7384 * driver and we haven't asserted our
7385 * interrupt then there's nothing to do.
7386 */
7387 if ((sc->status_block->status_idx == sc->last_status_idx) &&
7388 (REG_RD(sc, BCE_PCICFG_MISC_STATUS) &
7389 BCE_PCICFG_MISC_STATUS_INTA_VALUE)) {
7390 DBPRINT(sc, BCE_VERBOSE_INTR, "%s(): Spurious interrupt.\n",
7391 __FUNCTION__);
7392 goto bce_intr_exit;
7393 }
7394
7395 /* Ack the interrupt and stop others from occuring. */
7396 REG_WR(sc, BCE_PCICFG_INT_ACK_CMD,
7397 BCE_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
7398 BCE_PCICFG_INT_ACK_CMD_MASK_INT);
7399
7400 /* Check if the hardware has finished any work. */
7401 hw_rx_cons = bce_get_hw_rx_cons(sc);
7402 hw_tx_cons = bce_get_hw_tx_cons(sc);
7403
7404 /* Keep processing data as long as there is work to do. */
7405 for (;;) {
7406
7407 status_attn_bits = sc->status_block->status_attn_bits;
7408
7409 DBRUNIF(DB_RANDOMTRUE(unexpected_attention_sim_control),
7410 BCE_PRINTF("Simulating unexpected status attention "
7411 "bit set.");
7412 sc->unexpected_attention_sim_count++;
7413 status_attn_bits = status_attn_bits |
7414 STATUS_ATTN_BITS_PARITY_ERROR);
7415
7416 /* Was it a link change interrupt? */
7417 if ((status_attn_bits & STATUS_ATTN_BITS_LINK_STATE) !=
7418 (sc->status_block->status_attn_bits_ack &
7419 STATUS_ATTN_BITS_LINK_STATE)) {
7420 bce_phy_intr(sc);
7421
7422 /* Clear transient updates during link state change. */
7423 REG_WR(sc, BCE_HC_COMMAND, sc->hc_command |
7424 BCE_HC_COMMAND_COAL_NOW_WO_INT);
7425 REG_RD(sc, BCE_HC_COMMAND);
7426 }
7427
7428 /* If any other attention is asserted, the chip is toast. */
7429 if (((status_attn_bits & ~STATUS_ATTN_BITS_LINK_STATE) !=
7430 (sc->status_block->status_attn_bits_ack &
7431 ~STATUS_ATTN_BITS_LINK_STATE))) {
7432
7433 sc->unexpected_attention_count++;
7434
7435 BCE_PRINTF("%s(%d): Fatal attention detected: "
7436 "0x%08X\n", __FILE__, __LINE__,
7437 sc->status_block->status_attn_bits);
7438
7439 DBRUNMSG(BCE_FATAL,
7440 if (unexpected_attention_sim_control == 0)
7441 bce_breakpoint(sc));
7442
7443 bce_init_locked(sc);
7444 goto bce_intr_exit;
7445 }
7446
7447 /* Check for any completed RX frames. */
7448 if (hw_rx_cons != sc->hw_rx_cons)
7449 bce_rx_intr(sc);
7450
7451 /* Check for any completed TX frames. */
7452 if (hw_tx_cons != sc->hw_tx_cons)
7453 bce_tx_intr(sc);
7454
7455 /* Save status block index value for the next interrupt. */
7456 sc->last_status_idx = sc->status_block->status_idx;
7457
7458 /*
7459 * Prevent speculative reads from getting
7460 * ahead of the status block.
7461 */
7462 bus_space_barrier(sc->bce_btag, sc->bce_bhandle, 0, 0,
7463 BUS_SPACE_BARRIER_READ);
7464
7465 /*
7466 * If there's no work left then exit the
7467 * interrupt service routine.
7468 */
7469 hw_rx_cons = bce_get_hw_rx_cons(sc);
7470 hw_tx_cons = bce_get_hw_tx_cons(sc);
7471
7472 if ((hw_rx_cons == sc->hw_rx_cons) &&
7473 (hw_tx_cons == sc->hw_tx_cons))
7474 break;
7475
7476 }
7477
7478 bus_dmamap_sync(sc->status_tag, sc->status_map,
7479 BUS_DMASYNC_PREREAD);
7480
7481 /* Re-enable interrupts. */
7482 bce_enable_intr(sc, 0);
7483
7484 /* Handle any frames that arrived while handling the interrupt. */
7485 if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
7486 !IFQ_DRV_IS_EMPTY(&ifp->if_snd))
7487 bce_start_locked(ifp);
7488
7489bce_intr_exit:
7490 BCE_UNLOCK(sc);
7491
7492 DBEXIT(BCE_VERBOSE_SEND | BCE_VERBOSE_RECV | BCE_VERBOSE_INTR);
7493}
7494
7495
7496/****************************************************************************/
7497/* Programs the various packet receive modes (broadcast and multicast). */
7498/* */
7499/* Returns: */
7500/* Nothing. */
7501/****************************************************************************/
7502static void
7503bce_set_rx_mode(struct bce_softc *sc)
7504{
7505 struct ifnet *ifp;
7506 struct ifmultiaddr *ifma;
7507 u32 hashes[NUM_MC_HASH_REGISTERS] = { 0, 0, 0, 0, 0, 0, 0, 0 };
7508 u32 rx_mode, sort_mode;
7509 int h, i;
7510
7511 DBENTER(BCE_VERBOSE_MISC);
7512
7513 BCE_LOCK_ASSERT(sc);
7514
7515 ifp = sc->bce_ifp;
7516
7517 /* Initialize receive mode default settings. */
7518 rx_mode = sc->rx_mode & ~(BCE_EMAC_RX_MODE_PROMISCUOUS |
7519 BCE_EMAC_RX_MODE_KEEP_VLAN_TAG);
7520 sort_mode = 1 | BCE_RPM_SORT_USER0_BC_EN;
7521
7522 /*
7523 * ASF/IPMI/UMP firmware requires that VLAN tag stripping
7524 * be enbled.
7525 */
7526 if (!(BCE_IF_CAPABILITIES & IFCAP_VLAN_HWTAGGING) &&
7527 (!(sc->bce_flags & BCE_MFW_ENABLE_FLAG)))
7528 rx_mode |= BCE_EMAC_RX_MODE_KEEP_VLAN_TAG;
7529
7530 /*
7531 * Check for promiscuous, all multicast, or selected
7532 * multicast address filtering.
7533 */
7534 if (ifp->if_flags & IFF_PROMISC) {
7535 DBPRINT(sc, BCE_INFO_MISC, "Enabling promiscuous mode.\n");
7536
7537 /* Enable promiscuous mode. */
7538 rx_mode |= BCE_EMAC_RX_MODE_PROMISCUOUS;
7539 sort_mode |= BCE_RPM_SORT_USER0_PROM_EN;
7540 } else if (ifp->if_flags & IFF_ALLMULTI) {
7541 DBPRINT(sc, BCE_INFO_MISC, "Enabling all multicast mode.\n");
7542
7543 /* Enable all multicast addresses. */
7544 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
7545 REG_WR(sc, BCE_EMAC_MULTICAST_HASH0 + (i * 4), 0xffffffff);
7546 }
7547 sort_mode |= BCE_RPM_SORT_USER0_MC_EN;
7548 } else {
7549 /* Accept one or more multicast(s). */
7550 DBPRINT(sc, BCE_INFO_MISC, "Enabling selective multicast mode.\n");
7551
7552 if_maddr_rlock(ifp);
7553 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
7554 if (ifma->ifma_addr->sa_family != AF_LINK)
7555 continue;
7556 h = ether_crc32_le(LLADDR((struct sockaddr_dl *)
7557 ifma->ifma_addr), ETHER_ADDR_LEN) & 0xFF;
7558 hashes[(h & 0xE0) >> 5] |= 1 << (h & 0x1F);
7559 }
7560 if_maddr_runlock(ifp);
7561
7562 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++)
7563 REG_WR(sc, BCE_EMAC_MULTICAST_HASH0 + (i * 4), hashes[i]);
7564
7565 sort_mode |= BCE_RPM_SORT_USER0_MC_HSH_EN;
7566 }
7567
7568 /* Only make changes if the recive mode has actually changed. */
7569 if (rx_mode != sc->rx_mode) {
7570 DBPRINT(sc, BCE_VERBOSE_MISC, "Enabling new receive mode: "
7571 "0x%08X\n", rx_mode);
7572
7573 sc->rx_mode = rx_mode;
7574 REG_WR(sc, BCE_EMAC_RX_MODE, rx_mode);
7575 }
7576
7577 /* Disable and clear the exisitng sort before enabling a new sort. */
7578 REG_WR(sc, BCE_RPM_SORT_USER0, 0x0);
7579 REG_WR(sc, BCE_RPM_SORT_USER0, sort_mode);
7580 REG_WR(sc, BCE_RPM_SORT_USER0, sort_mode | BCE_RPM_SORT_USER0_ENA);
7581
7582 DBEXIT(BCE_VERBOSE_MISC);
7583}
7584
7585
7586/****************************************************************************/
7587/* Called periodically to updates statistics from the controllers */
7588/* statistics block. */
7589/* */
7590/* Returns: */
7591/* Nothing. */
7592/****************************************************************************/
7593static void
7594bce_stats_update(struct bce_softc *sc)
7595{
7596 struct ifnet *ifp;
7597 struct statistics_block *stats;
7598
7599 DBENTER(BCE_EXTREME_MISC);
7600
7601 ifp = sc->bce_ifp;
7602
7603 stats = (struct statistics_block *) sc->stats_block;
7604
7605 /*
7606 * Certain controllers don't report
7607 * carrier sense errors correctly.
7608 * See errata E11_5708CA0_1165.
7609 */
7610 if (!(BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5706) &&
7611 !(BCE_CHIP_ID(sc) == BCE_CHIP_ID_5708_A0))
7612 ifp->if_oerrors +=
7613 (u_long) stats->stat_Dot3StatsCarrierSenseErrors;
7614
7615 /*
7616 * Update the sysctl statistics from the
7617 * hardware statistics.
7618 */
7619 sc->stat_IfHCInOctets =
7620 ((u64) stats->stat_IfHCInOctets_hi << 32) +
7621 (u64) stats->stat_IfHCInOctets_lo;
7622
7623 sc->stat_IfHCInBadOctets =
7624 ((u64) stats->stat_IfHCInBadOctets_hi << 32) +
7625 (u64) stats->stat_IfHCInBadOctets_lo;
7626
7627 sc->stat_IfHCOutOctets =
7628 ((u64) stats->stat_IfHCOutOctets_hi << 32) +
7629 (u64) stats->stat_IfHCOutOctets_lo;
7630
7631 sc->stat_IfHCOutBadOctets =
7632 ((u64) stats->stat_IfHCOutBadOctets_hi << 32) +
7633 (u64) stats->stat_IfHCOutBadOctets_lo;
7634
7635 sc->stat_IfHCInUcastPkts =
7636 ((u64) stats->stat_IfHCInUcastPkts_hi << 32) +
7637 (u64) stats->stat_IfHCInUcastPkts_lo;
7638
7639 sc->stat_IfHCInMulticastPkts =
7640 ((u64) stats->stat_IfHCInMulticastPkts_hi << 32) +
7641 (u64) stats->stat_IfHCInMulticastPkts_lo;
7642
7643 sc->stat_IfHCInBroadcastPkts =
7644 ((u64) stats->stat_IfHCInBroadcastPkts_hi << 32) +
7645 (u64) stats->stat_IfHCInBroadcastPkts_lo;
7646
7647 sc->stat_IfHCOutUcastPkts =
7648 ((u64) stats->stat_IfHCOutUcastPkts_hi << 32) +
7649 (u64) stats->stat_IfHCOutUcastPkts_lo;
7650
7651 sc->stat_IfHCOutMulticastPkts =
7652 ((u64) stats->stat_IfHCOutMulticastPkts_hi << 32) +
7653 (u64) stats->stat_IfHCOutMulticastPkts_lo;
7654
7655 sc->stat_IfHCOutBroadcastPkts =
7656 ((u64) stats->stat_IfHCOutBroadcastPkts_hi << 32) +
7657 (u64) stats->stat_IfHCOutBroadcastPkts_lo;
7658
7659 /* ToDo: Preserve counters beyond 32 bits? */
7660 /* ToDo: Read the statistics from auto-clear regs? */
7661
7662 sc->stat_emac_tx_stat_dot3statsinternalmactransmiterrors =
7663 stats->stat_emac_tx_stat_dot3statsinternalmactransmiterrors;
7664
7665 sc->stat_Dot3StatsCarrierSenseErrors =
7666 stats->stat_Dot3StatsCarrierSenseErrors;
7667
7668 sc->stat_Dot3StatsFCSErrors =
7669 stats->stat_Dot3StatsFCSErrors;
7670
7671 sc->stat_Dot3StatsAlignmentErrors =
7672 stats->stat_Dot3StatsAlignmentErrors;
7673
7674 sc->stat_Dot3StatsSingleCollisionFrames =
7675 stats->stat_Dot3StatsSingleCollisionFrames;
7676
7677 sc->stat_Dot3StatsMultipleCollisionFrames =
7678 stats->stat_Dot3StatsMultipleCollisionFrames;
7679
7680 sc->stat_Dot3StatsDeferredTransmissions =
7681 stats->stat_Dot3StatsDeferredTransmissions;
7682
7683 sc->stat_Dot3StatsExcessiveCollisions =
7684 stats->stat_Dot3StatsExcessiveCollisions;
7685
7686 sc->stat_Dot3StatsLateCollisions =
7687 stats->stat_Dot3StatsLateCollisions;
7688
7689 sc->stat_EtherStatsCollisions =
7690 stats->stat_EtherStatsCollisions;
7691
7692 sc->stat_EtherStatsFragments =
7693 stats->stat_EtherStatsFragments;
7694
7695 sc->stat_EtherStatsJabbers =
7696 stats->stat_EtherStatsJabbers;
7697
7698 sc->stat_EtherStatsUndersizePkts =
7699 stats->stat_EtherStatsUndersizePkts;
7700
7701 sc->stat_EtherStatsOversizePkts =
7702 stats->stat_EtherStatsOversizePkts;
7703
7704 sc->stat_EtherStatsPktsRx64Octets =
7705 stats->stat_EtherStatsPktsRx64Octets;
7706
7707 sc->stat_EtherStatsPktsRx65Octetsto127Octets =
7708 stats->stat_EtherStatsPktsRx65Octetsto127Octets;
7709
7710 sc->stat_EtherStatsPktsRx128Octetsto255Octets =
7711 stats->stat_EtherStatsPktsRx128Octetsto255Octets;
7712
7713 sc->stat_EtherStatsPktsRx256Octetsto511Octets =
7714 stats->stat_EtherStatsPktsRx256Octetsto511Octets;
7715
7716 sc->stat_EtherStatsPktsRx512Octetsto1023Octets =
7717 stats->stat_EtherStatsPktsRx512Octetsto1023Octets;
7718
7719 sc->stat_EtherStatsPktsRx1024Octetsto1522Octets =
7720 stats->stat_EtherStatsPktsRx1024Octetsto1522Octets;
7721
7722 sc->stat_EtherStatsPktsRx1523Octetsto9022Octets =
7723 stats->stat_EtherStatsPktsRx1523Octetsto9022Octets;
7724
7725 sc->stat_EtherStatsPktsTx64Octets =
7726 stats->stat_EtherStatsPktsTx64Octets;
7727
7728 sc->stat_EtherStatsPktsTx65Octetsto127Octets =
7729 stats->stat_EtherStatsPktsTx65Octetsto127Octets;
7730
7731 sc->stat_EtherStatsPktsTx128Octetsto255Octets =
7732 stats->stat_EtherStatsPktsTx128Octetsto255Octets;
7733
7734 sc->stat_EtherStatsPktsTx256Octetsto511Octets =
7735 stats->stat_EtherStatsPktsTx256Octetsto511Octets;
7736
7737 sc->stat_EtherStatsPktsTx512Octetsto1023Octets =
7738 stats->stat_EtherStatsPktsTx512Octetsto1023Octets;
7739
7740 sc->stat_EtherStatsPktsTx1024Octetsto1522Octets =
7741 stats->stat_EtherStatsPktsTx1024Octetsto1522Octets;
7742
7743 sc->stat_EtherStatsPktsTx1523Octetsto9022Octets =
7744 stats->stat_EtherStatsPktsTx1523Octetsto9022Octets;
7745
7746 sc->stat_XonPauseFramesReceived =
7747 stats->stat_XonPauseFramesReceived;
7748
7749 sc->stat_XoffPauseFramesReceived =
7750 stats->stat_XoffPauseFramesReceived;
7751
7752 sc->stat_OutXonSent =
7753 stats->stat_OutXonSent;
7754
7755 sc->stat_OutXoffSent =
7756 stats->stat_OutXoffSent;
7757
7758 sc->stat_FlowControlDone =
7759 stats->stat_FlowControlDone;
7760
7761 sc->stat_MacControlFramesReceived =
7762 stats->stat_MacControlFramesReceived;
7763
7764 sc->stat_XoffStateEntered =
7765 stats->stat_XoffStateEntered;
7766
7767 sc->stat_IfInFramesL2FilterDiscards =
7768 stats->stat_IfInFramesL2FilterDiscards;
7769
7770 sc->stat_IfInRuleCheckerDiscards =
7771 stats->stat_IfInRuleCheckerDiscards;
7772
7773 sc->stat_IfInFTQDiscards =
7774 stats->stat_IfInFTQDiscards;
7775
7776 sc->stat_IfInMBUFDiscards =
7777 stats->stat_IfInMBUFDiscards;
7778
7779 sc->stat_IfInRuleCheckerP4Hit =
7780 stats->stat_IfInRuleCheckerP4Hit;
7781
7782 sc->stat_CatchupInRuleCheckerDiscards =
7783 stats->stat_CatchupInRuleCheckerDiscards;
7784
7785 sc->stat_CatchupInFTQDiscards =
7786 stats->stat_CatchupInFTQDiscards;
7787
7788 sc->stat_CatchupInMBUFDiscards =
7789 stats->stat_CatchupInMBUFDiscards;
7790
7791 sc->stat_CatchupInRuleCheckerP4Hit =
7792 stats->stat_CatchupInRuleCheckerP4Hit;
7793
7794 sc->com_no_buffers = REG_RD_IND(sc, 0x120084);
7795
7796 /*
7797 * Update the interface statistics from the
7798 * hardware statistics.
7799 */
7800 ifp->if_collisions =
7801 (u_long) sc->stat_EtherStatsCollisions;
7802
7803 /* ToDo: This method loses soft errors. */
7804 ifp->if_ierrors =
7805 (u_long) sc->stat_EtherStatsUndersizePkts +
7806 (u_long) sc->stat_EtherStatsOversizePkts +
7807 (u_long) sc->stat_IfInMBUFDiscards +
7808 (u_long) sc->stat_Dot3StatsAlignmentErrors +
7809 (u_long) sc->stat_Dot3StatsFCSErrors +
7810 (u_long) sc->stat_IfInRuleCheckerDiscards +
7811 (u_long) sc->stat_IfInFTQDiscards +
7812 (u_long) sc->com_no_buffers;
7813
7814 /* ToDo: This method loses soft errors. */
7815 ifp->if_oerrors =
7816 (u_long) sc->stat_emac_tx_stat_dot3statsinternalmactransmiterrors +
7817 (u_long) sc->stat_Dot3StatsExcessiveCollisions +
7818 (u_long) sc->stat_Dot3StatsLateCollisions;
7819
7820 /* ToDo: Add additional statistics? */
7821
7822 DBEXIT(BCE_EXTREME_MISC);
7823}
7824
7825
7826/****************************************************************************/
7827/* Periodic function to notify the bootcode that the driver is still */
7828/* present. */
7829/* */
7830/* Returns: */
7831/* Nothing. */
7832/****************************************************************************/
7833static void
7834bce_pulse(void *xsc)
7835{
7836 struct bce_softc *sc = xsc;
7837 u32 msg;
7838
7839 DBENTER(BCE_EXTREME_MISC);
7840
7841 BCE_LOCK_ASSERT(sc);
7842
7843 /* Tell the firmware that the driver is still running. */
7844 msg = (u32) ++sc->bce_fw_drv_pulse_wr_seq;
7845 bce_shmem_wr(sc, BCE_DRV_PULSE_MB, msg);
7846
7847 /* Update the bootcode condition. */
7848 sc->bc_state = bce_shmem_rd(sc, BCE_BC_STATE_CONDITION);
7849
7850 /* Report whether the bootcode still knows the driver is running. */
7851 if (bootverbose) {
7852 if (sc->bce_drv_cardiac_arrest == FALSE) {
7853 if (!(sc->bc_state & BCE_CONDITION_DRV_PRESENT)) {
7854 sc->bce_drv_cardiac_arrest = TRUE;
7855 BCE_PRINTF("%s(): Warning: bootcode "
7856 "thinks driver is absent! "
7857 "(bc_state = 0x%08X)\n",
7858 __FUNCTION__, sc->bc_state);
7859 }
7860 } else {
7861 /*
7862 * Not supported by all bootcode versions.
7863 * (v5.0.11+ and v5.2.1+) Older bootcode
7864 * will require the driver to reset the
7865 * controller to clear this condition.
7866 */
7867 if (sc->bc_state & BCE_CONDITION_DRV_PRESENT) {
7868 sc->bce_drv_cardiac_arrest = FALSE;
7869 BCE_PRINTF("%s(): Bootcode found the "
7870 "driver pulse! (bc_state = 0x%08X)\n",
7871 __FUNCTION__, sc->bc_state);
7872 }
7873 }
7874 }
7875
7876
7877 /* Schedule the next pulse. */
7878 callout_reset(&sc->bce_pulse_callout, hz, bce_pulse, sc);
7879
7880 DBEXIT(BCE_EXTREME_MISC);
7881}
7882
7883
7884/****************************************************************************/
7885/* Periodic function to perform maintenance tasks. */
7886/* */
7887/* Returns: */
7888/* Nothing. */
7889/****************************************************************************/
7890static void
7891bce_tick(void *xsc)
7892{
7893 struct bce_softc *sc = xsc;
7894 struct mii_data *mii;
7895 struct ifnet *ifp;
7896
7897 ifp = sc->bce_ifp;
7898
7899 DBENTER(BCE_EXTREME_MISC);
7900
7901 BCE_LOCK_ASSERT(sc);
7902
7903 /* Schedule the next tick. */
7904 callout_reset(&sc->bce_tick_callout, hz, bce_tick, sc);
7905
7906 /* Update the statistics from the hardware statistics block. */
7907 bce_stats_update(sc);
7908
7909 /* Top off the receive and page chains. */
7910#ifdef BCE_JUMBO_HDRSPLIT
7911 bce_fill_pg_chain(sc);
7912#endif
7913 bce_fill_rx_chain(sc);
7914
7915 /* Check that chip hasn't hung. */
7916 bce_watchdog(sc);
7917
7918 /* If link is up already up then we're done. */
7919 if (sc->bce_link_up == TRUE)
7920 goto bce_tick_exit;
7921
7922 /* Link is down. Check what the PHY's doing. */
7923 mii = device_get_softc(sc->bce_miibus);
7924 mii_tick(mii);
7925
7926 /* Check if the link has come up. */
7927 if ((mii->mii_media_status & IFM_ACTIVE) &&
7928 (IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE)) {
7929 DBPRINT(sc, BCE_VERBOSE_MISC,
7930 "%s(): Link up!\n", __FUNCTION__);
7931 sc->bce_link_up = TRUE;
7932 if ((IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T ||
7933 IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_SX ||
7934 IFM_SUBTYPE(mii->mii_media_active) == IFM_2500_SX) &&
7935 bootverbose)
7936 BCE_PRINTF("Gigabit link up!\n");
7937
7938 /* Now that link is up, handle any outstanding TX traffic. */
7939 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) {
7940 DBPRINT(sc, BCE_VERBOSE_MISC, "%s(): Found "
7941 "pending TX traffic.\n", __FUNCTION__);
7942 bce_start_locked(ifp);
7943 }
7944 }
7945
7946bce_tick_exit:
7947 DBEXIT(BCE_EXTREME_MISC);
7948 return;
7949}
7950
7951
7952#ifdef BCE_DEBUG
7953/****************************************************************************/
7954/* Allows the driver state to be dumped through the sysctl interface. */
7955/* */
7956/* Returns: */
7957/* 0 for success, positive value for failure. */
7958/****************************************************************************/
7959static int
7960bce_sysctl_driver_state(SYSCTL_HANDLER_ARGS)
7961{
7962 int error;
7963 int result;
7964 struct bce_softc *sc;
7965
7966 result = -1;
7967 error = sysctl_handle_int(oidp, &result, 0, req);
7968
7969 if (error || !req->newptr)
7970 return (error);
7971
7972 if (result == 1) {
7973 sc = (struct bce_softc *)arg1;
7974 bce_dump_driver_state(sc);
7975 }
7976
7977 return error;
7978}
7979
7980
7981/****************************************************************************/
7982/* Allows the hardware state to be dumped through the sysctl interface. */
7983/* */
7984/* Returns: */
7985/* 0 for success, positive value for failure. */
7986/****************************************************************************/
7987static int
7988bce_sysctl_hw_state(SYSCTL_HANDLER_ARGS)
7989{
7990 int error;
7991 int result;
7992 struct bce_softc *sc;
7993
7994 result = -1;
7995 error = sysctl_handle_int(oidp, &result, 0, req);
7996
7997 if (error || !req->newptr)
7998 return (error);
7999
8000 if (result == 1) {
8001 sc = (struct bce_softc *)arg1;
8002 bce_dump_hw_state(sc);
8003 }
8004
8005 return error;
8006}
8007
8008
8009/****************************************************************************/
8010/* Allows the status block to be dumped through the sysctl interface. */
8011/* */
8012/* Returns: */
8013/* 0 for success, positive value for failure. */
8014/****************************************************************************/
8015static int
8016bce_sysctl_status_block(SYSCTL_HANDLER_ARGS)
8017{
8018 int error;
8019 int result;
8020 struct bce_softc *sc;
8021
8022 result = -1;
8023 error = sysctl_handle_int(oidp, &result, 0, req);
8024
8025 if (error || !req->newptr)
8026 return (error);
8027
8028 if (result == 1) {
8029 sc = (struct bce_softc *)arg1;
8030 bce_dump_status_block(sc);
8031 }
8032
8033 return error;
8034}
8035
8036
8037/****************************************************************************/
8038/* Allows the stats block to be dumped through the sysctl interface. */
8039/* */
8040/* Returns: */
8041/* 0 for success, positive value for failure. */
8042/****************************************************************************/
8043static int
8044bce_sysctl_stats_block(SYSCTL_HANDLER_ARGS)
8045{
8046 int error;
8047 int result;
8048 struct bce_softc *sc;
8049
8050 result = -1;
8051 error = sysctl_handle_int(oidp, &result, 0, req);
8052
8053 if (error || !req->newptr)
8054 return (error);
8055
8056 if (result == 1) {
8057 sc = (struct bce_softc *)arg1;
8058 bce_dump_stats_block(sc);
8059 }
8060
8061 return error;
8062}
8063
8064
8065/****************************************************************************/
8066/* Allows the stat counters to be cleared without unloading/reloading the */
8067/* driver. */
8068/* */
8069/* Returns: */
8070/* 0 for success, positive value for failure. */
8071/****************************************************************************/
8072static int
8073bce_sysctl_stats_clear(SYSCTL_HANDLER_ARGS)
8074{
8075 int error;
8076 int result;
8077 struct bce_softc *sc;
8078
8079 result = -1;
8080 error = sysctl_handle_int(oidp, &result, 0, req);
8081
8082 if (error || !req->newptr)
8083 return (error);
8084
8085 if (result == 1) {
8086 sc = (struct bce_softc *)arg1;
8087
8088 /* Clear the internal H/W statistics counters. */
8089 REG_WR(sc, BCE_HC_COMMAND, BCE_HC_COMMAND_CLR_STAT_NOW);
8090
8091 /* Reset the driver maintained statistics. */
8092 sc->interrupts_rx =
8093 sc->interrupts_tx = 0;
8094 sc->tso_frames_requested =
8095 sc->tso_frames_completed =
8096 sc->tso_frames_failed = 0;
8097 sc->rx_empty_count =
8098 sc->tx_full_count = 0;
8099 sc->rx_low_watermark = USABLE_RX_BD;
8100 sc->tx_hi_watermark = 0;
8101 sc->l2fhdr_error_count =
8102 sc->l2fhdr_error_sim_count = 0;
8103 sc->mbuf_alloc_failed_count =
8104 sc->mbuf_alloc_failed_sim_count = 0;
8105 sc->dma_map_addr_rx_failed_count =
8106 sc->dma_map_addr_tx_failed_count = 0;
8107 sc->mbuf_frag_count = 0;
8108 sc->csum_offload_tcp_udp =
8109 sc->csum_offload_ip = 0;
8110 sc->vlan_tagged_frames_rcvd =
8111 sc->vlan_tagged_frames_stripped = 0;
8112
8113 /* Clear firmware maintained statistics. */
8114 REG_WR_IND(sc, 0x120084, 0);
8115 }
8116
8117 return error;
8118}
8119
8120
8121/****************************************************************************/
8122/* Allows the bootcode state to be dumped through the sysctl interface. */
8123/* */
8124/* Returns: */
8125/* 0 for success, positive value for failure. */
8126/****************************************************************************/
8127static int
8128bce_sysctl_bc_state(SYSCTL_HANDLER_ARGS)
8129{
8130 int error;
8131 int result;
8132 struct bce_softc *sc;
8133
8134 result = -1;
8135 error = sysctl_handle_int(oidp, &result, 0, req);
8136
8137 if (error || !req->newptr)
8138 return (error);
8139
8140 if (result == 1) {
8141 sc = (struct bce_softc *)arg1;
8142 bce_dump_bc_state(sc);
8143 }
8144
8145 return error;
8146}
8147
8148
8149/****************************************************************************/
8150/* Provides a sysctl interface to allow dumping the RX BD chain. */
8151/* */
8152/* Returns: */
8153/* 0 for success, positive value for failure. */
8154/****************************************************************************/
8155static int
8156bce_sysctl_dump_rx_bd_chain(SYSCTL_HANDLER_ARGS)
8157{
8158 int error;
8159 int result;
8160 struct bce_softc *sc;
8161
8162 result = -1;
8163 error = sysctl_handle_int(oidp, &result, 0, req);
8164
8165 if (error || !req->newptr)
8166 return (error);
8167
8168 if (result == 1) {
8169 sc = (struct bce_softc *)arg1;
8170 bce_dump_rx_bd_chain(sc, 0, TOTAL_RX_BD);
8171 }
8172
8173 return error;
8174}
8175
8176
8177/****************************************************************************/
8178/* Provides a sysctl interface to allow dumping the RX MBUF chain. */
8179/* */
8180/* Returns: */
8181/* 0 for success, positive value for failure. */
8182/****************************************************************************/
8183static int
8184bce_sysctl_dump_rx_mbuf_chain(SYSCTL_HANDLER_ARGS)
8185{
8186 int error;
8187 int result;
8188 struct bce_softc *sc;
8189
8190 result = -1;
8191 error = sysctl_handle_int(oidp, &result, 0, req);
8192
8193 if (error || !req->newptr)
8194 return (error);
8195
8196 if (result == 1) {
8197 sc = (struct bce_softc *)arg1;
8198 bce_dump_rx_mbuf_chain(sc, 0, USABLE_RX_BD);
8199 }
8200
8201 return error;
8202}
8203
8204
8205/****************************************************************************/
8206/* Provides a sysctl interface to allow dumping the TX chain. */
8207/* */
8208/* Returns: */
8209/* 0 for success, positive value for failure. */
8210/****************************************************************************/
8211static int
8212bce_sysctl_dump_tx_chain(SYSCTL_HANDLER_ARGS)
8213{
8214 int error;
8215 int result;
8216 struct bce_softc *sc;
8217
8218 result = -1;
8219 error = sysctl_handle_int(oidp, &result, 0, req);
8220
8221 if (error || !req->newptr)
8222 return (error);
8223
8224 if (result == 1) {
8225 sc = (struct bce_softc *)arg1;
8226 bce_dump_tx_chain(sc, 0, TOTAL_TX_BD);
8227 }
8228
8229 return error;
8230}
8231
8232
8233#ifdef BCE_JUMBO_HDRSPLIT
8234/****************************************************************************/
8235/* Provides a sysctl interface to allow dumping the page chain. */
8236/* */
8237/* Returns: */
8238/* 0 for success, positive value for failure. */
8239/****************************************************************************/
8240static int
8241bce_sysctl_dump_pg_chain(SYSCTL_HANDLER_ARGS)
8242{
8243 int error;
8244 int result;
8245 struct bce_softc *sc;
8246
8247 result = -1;
8248 error = sysctl_handle_int(oidp, &result, 0, req);
8249
8250 if (error || !req->newptr)
8251 return (error);
8252
8253 if (result == 1) {
8254 sc = (struct bce_softc *)arg1;
8255 bce_dump_pg_chain(sc, 0, TOTAL_PG_BD);
8256 }
8257
8258 return error;
8259}
8260#endif
8261
8262/****************************************************************************/
8263/* Provides a sysctl interface to allow reading arbitrary NVRAM offsets in */
8264/* the device. DO NOT ENABLE ON PRODUCTION SYSTEMS! */
8265/* */
8266/* Returns: */
8267/* 0 for success, positive value for failure. */
8268/****************************************************************************/
8269static int
8270bce_sysctl_nvram_read(SYSCTL_HANDLER_ARGS)
8271{
8272 struct bce_softc *sc = (struct bce_softc *)arg1;
8273 int error;
8274 u32 result;
8275 u32 val[1];
8276 u8 *data = (u8 *) val;
8277
8278 result = -1;
8279 error = sysctl_handle_int(oidp, &result, 0, req);
8280 if (error || (req->newptr == NULL))
8281 return (error);
8282
8283 bce_nvram_read(sc, result, data, 4);
8284 BCE_PRINTF("offset 0x%08X = 0x%08X\n", result, bce_be32toh(val[0]));
8285
8286 return (error);
8287}
8288
8289
8290/****************************************************************************/
8291/* Provides a sysctl interface to allow reading arbitrary registers in the */
8292/* device. DO NOT ENABLE ON PRODUCTION SYSTEMS! */
8293/* */
8294/* Returns: */
8295/* 0 for success, positive value for failure. */
8296/****************************************************************************/
8297static int
8298bce_sysctl_reg_read(SYSCTL_HANDLER_ARGS)
8299{
8300 struct bce_softc *sc = (struct bce_softc *)arg1;
8301 int error;
8302 u32 val, result;
8303
8304 result = -1;
8305 error = sysctl_handle_int(oidp, &result, 0, req);
8306 if (error || (req->newptr == NULL))
8307 return (error);
8308
8309 /* Make sure the register is accessible. */
8310 if (result < 0x8000) {
8311 val = REG_RD(sc, result);
8312 BCE_PRINTF("reg 0x%08X = 0x%08X\n", result, val);
8313 } else if (result < 0x0280000) {
8314 val = REG_RD_IND(sc, result);
8315 BCE_PRINTF("reg 0x%08X = 0x%08X\n", result, val);
8316 }
8317
8318 return (error);
8319}
8320
8321
8322/****************************************************************************/
8323/* Provides a sysctl interface to allow reading arbitrary PHY registers in */
8324/* the device. DO NOT ENABLE ON PRODUCTION SYSTEMS! */
8325/* */
8326/* Returns: */
8327/* 0 for success, positive value for failure. */
8328/****************************************************************************/
8329static int
8330bce_sysctl_phy_read(SYSCTL_HANDLER_ARGS)
8331{
8332 struct bce_softc *sc;
8333 device_t dev;
8334 int error, result;
8335 u16 val;
8336
8337 result = -1;
8338 error = sysctl_handle_int(oidp, &result, 0, req);
8339 if (error || (req->newptr == NULL))
8340 return (error);
8341
8342 /* Make sure the register is accessible. */
8343 if (result < 0x20) {
8344 sc = (struct bce_softc *)arg1;
8345 dev = sc->bce_dev;
8346 val = bce_miibus_read_reg(dev, sc->bce_phy_addr, result);
8347 BCE_PRINTF("phy 0x%02X = 0x%04X\n", result, val);
8348 }
8349 return (error);
8350}
8351
8352
8353static int
8354sysctl_nvram_dump(SYSCTL_HANDLER_ARGS)
8355{
8356 struct bce_softc *sc = (struct bce_softc *)arg1;
8357 int error, i;
8358
8359 if (sc->nvram_buf == NULL) {
8360 sc->nvram_buf = malloc(sc->bce_flash_size,
8361 M_TEMP, M_ZERO | M_WAITOK);
8362 }
8363 if (sc->nvram_buf == NULL) {
8364 return(ENOMEM);
8365 }
8366 if (req->oldlen == sc->bce_flash_size) {
8367 for (i = 0; i < sc->bce_flash_size; i++) {
8368 bce_nvram_read(sc, i, &sc->nvram_buf[i], 1);
8369 }
8370 }
8371
8372 error = SYSCTL_OUT(req, sc->nvram_buf, sc->bce_flash_size);
8373
8374 return error;
8375}
8376
8377#ifdef BCE_NVRAM_WRITE_SUPPORT
8378static int
8379sysctl_nvram_write(SYSCTL_HANDLER_ARGS)
8380{
8381 struct bce_softc *sc = (struct bce_softc *)arg1;
8382 int error;
8383
8384 if (sc->nvram_buf == NULL) {
8385 sc->nvram_buf = malloc(sc->bce_flash_size,
8386 M_TEMP, M_ZERO | M_WAITOK);
8387 }
8388 if (sc->nvram_buf == NULL) {
8389 return(ENOMEM);
8390 }
8391 bzero(sc->nvram_buf, sc->bce_flash_size);
8392 error = SYSCTL_IN(req, sc->nvram_buf, sc->bce_flash_size);
8393
8394 if (req->newlen == sc->bce_flash_size) {
8395 bce_nvram_write(sc, 0, sc->nvram_buf , sc->bce_flash_size);
8396 }
8397
8398
8399 return error;
8400}
8401#endif
8402
8403
8404/****************************************************************************/
8405/* Provides a sysctl interface to allow reading a CID. */
8406/* */
8407/* Returns: */
8408/* 0 for success, positive value for failure. */
8409/****************************************************************************/
8410static int
8411bce_sysctl_dump_ctx(SYSCTL_HANDLER_ARGS)
8412{
8413 struct bce_softc *sc;
8414 int error, result;
8415
8416 result = -1;
8417 error = sysctl_handle_int(oidp, &result, 0, req);
8418 if (error || (req->newptr == NULL))
8419 return (error);
8420
8421 /* Make sure the register is accessible. */
8422 if (result <= TX_CID) {
8423 sc = (struct bce_softc *)arg1;
8424 bce_dump_ctx(sc, result);
8425 }
8426
8427 return (error);
8428}
8429
8430
8431 /****************************************************************************/
8432/* Provides a sysctl interface to forcing the driver to dump state and */
8433/* enter the debugger. DO NOT ENABLE ON PRODUCTION SYSTEMS! */
8434/* */
8435/* Returns: */
8436/* 0 for success, positive value for failure. */
8437/****************************************************************************/
8438static int
8439bce_sysctl_breakpoint(SYSCTL_HANDLER_ARGS)
8440{
8441 int error;
8442 int result;
8443 struct bce_softc *sc;
8444
8445 result = -1;
8446 error = sysctl_handle_int(oidp, &result, 0, req);
8447
8448 if (error || !req->newptr)
8449 return (error);
8450
8451 if (result == 1) {
8452 sc = (struct bce_softc *)arg1;
8453 bce_breakpoint(sc);
8454 }
8455
8456 return error;
8457}
8458#endif
8459
8460
8461/****************************************************************************/
8462/* Adds any sysctl parameters for tuning or debugging purposes. */
8463/* */
8464/* Returns: */
8465/* 0 for success, positive value for failure. */
8466/****************************************************************************/
8467static void
8468bce_add_sysctls(struct bce_softc *sc)
8469{
8470 struct sysctl_ctx_list *ctx;
8471 struct sysctl_oid_list *children;
8472
8473 DBENTER(BCE_VERBOSE_MISC);
8474
8475 ctx = device_get_sysctl_ctx(sc->bce_dev);
8476 children = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->bce_dev));
8477
8478#ifdef BCE_DEBUG
8479 SYSCTL_ADD_INT(ctx, children, OID_AUTO,
8480 "l2fhdr_error_sim_control",
8481 CTLFLAG_RW, &l2fhdr_error_sim_control,
8482 0, "Debug control to force l2fhdr errors");
8483
8484 SYSCTL_ADD_INT(ctx, children, OID_AUTO,
8485 "l2fhdr_error_sim_count",
8486 CTLFLAG_RD, &sc->l2fhdr_error_sim_count,
8487 0, "Number of simulated l2_fhdr errors");
8488#endif
8489
8490 SYSCTL_ADD_INT(ctx, children, OID_AUTO,
8491 "l2fhdr_error_count",
8492 CTLFLAG_RD, &sc->l2fhdr_error_count,
8493 0, "Number of l2_fhdr errors");
8494
8495#ifdef BCE_DEBUG
8496 SYSCTL_ADD_INT(ctx, children, OID_AUTO,
8497 "mbuf_alloc_failed_sim_control",
8498 CTLFLAG_RW, &mbuf_alloc_failed_sim_control,
8499 0, "Debug control to force mbuf allocation failures");
8500
8501 SYSCTL_ADD_INT(ctx, children, OID_AUTO,
8502 "mbuf_alloc_failed_sim_count",
8503 CTLFLAG_RD, &sc->mbuf_alloc_failed_sim_count,
8504 0, "Number of simulated mbuf cluster allocation failures");
8505#endif
8506
8507 SYSCTL_ADD_INT(ctx, children, OID_AUTO,
8508 "mbuf_alloc_failed_count",
8509 CTLFLAG_RD, &sc->mbuf_alloc_failed_count,
8510 0, "Number of mbuf allocation failures");
8511
8512 SYSCTL_ADD_INT(ctx, children, OID_AUTO,
8513 "mbuf_frag_count",
8514 CTLFLAG_RD, &sc->mbuf_frag_count,
8515 0, "Number of fragmented mbufs");
8516
8517#ifdef BCE_DEBUG
8518 SYSCTL_ADD_INT(ctx, children, OID_AUTO,
8519 "dma_map_addr_failed_sim_control",
8520 CTLFLAG_RW, &dma_map_addr_failed_sim_control,
8521 0, "Debug control to force DMA mapping failures");
8522
8523 /* ToDo: Figure out how to update this value in bce_dma_map_addr(). */
8524 SYSCTL_ADD_INT(ctx, children, OID_AUTO,
8525 "dma_map_addr_failed_sim_count",
8526 CTLFLAG_RD, &sc->dma_map_addr_failed_sim_count,
8527 0, "Number of simulated DMA mapping failures");
8528
8529#endif
8530
8531 SYSCTL_ADD_INT(ctx, children, OID_AUTO,
8532 "dma_map_addr_rx_failed_count",
8533 CTLFLAG_RD, &sc->dma_map_addr_rx_failed_count,
8534 0, "Number of RX DMA mapping failures");
8535
8536 SYSCTL_ADD_INT(ctx, children, OID_AUTO,
8537 "dma_map_addr_tx_failed_count",
8538 CTLFLAG_RD, &sc->dma_map_addr_tx_failed_count,
8539 0, "Number of TX DMA mapping failures");
8540
8541#ifdef BCE_DEBUG
8542 SYSCTL_ADD_INT(ctx, children, OID_AUTO,
8543 "unexpected_attention_sim_control",
8544 CTLFLAG_RW, &unexpected_attention_sim_control,
8545 0, "Debug control to simulate unexpected attentions");
8546
8547 SYSCTL_ADD_INT(ctx, children, OID_AUTO,
8548 "unexpected_attention_sim_count",
8549 CTLFLAG_RW, &sc->unexpected_attention_sim_count,
8550 0, "Number of simulated unexpected attentions");
8551#endif
8552
8553 SYSCTL_ADD_INT(ctx, children, OID_AUTO,
8554 "unexpected_attention_count",
8555 CTLFLAG_RW, &sc->unexpected_attention_count,
8556 0, "Number of unexpected attentions");
8557
8558#ifdef BCE_DEBUG
8559 SYSCTL_ADD_INT(ctx, children, OID_AUTO,
8560 "debug_bootcode_running_failure",
8561 CTLFLAG_RW, &bootcode_running_failure_sim_control,
8562 0, "Debug control to force bootcode running failures");
8563
8564 SYSCTL_ADD_INT(ctx, children, OID_AUTO,
8565 "rx_low_watermark",
8566 CTLFLAG_RD, &sc->rx_low_watermark,
8567 0, "Lowest level of free rx_bd's");
8568
8569 SYSCTL_ADD_INT(ctx, children, OID_AUTO,
8570 "rx_empty_count",
8571 CTLFLAG_RD, &sc->rx_empty_count,
8572 0, "Number of times the RX chain was empty");
8573
8574 SYSCTL_ADD_INT(ctx, children, OID_AUTO,
8575 "tx_hi_watermark",
8576 CTLFLAG_RD, &sc->tx_hi_watermark,
8577 0, "Highest level of used tx_bd's");
8578
8579 SYSCTL_ADD_INT(ctx, children, OID_AUTO,
8580 "tx_full_count",
8581 CTLFLAG_RD, &sc->tx_full_count,
8582 0, "Number of times the TX chain was full");
8583
8584 SYSCTL_ADD_INT(ctx, children, OID_AUTO,
8585 "tso_frames_requested",
8586 CTLFLAG_RD, &sc->tso_frames_requested,
8587 0, "Number of TSO frames requested");
8588
8589 SYSCTL_ADD_INT(ctx, children, OID_AUTO,
8590 "tso_frames_completed",
8591 CTLFLAG_RD, &sc->tso_frames_completed,
8592 0, "Number of TSO frames completed");
8593
8594 SYSCTL_ADD_INT(ctx, children, OID_AUTO,
8595 "tso_frames_failed",
8596 CTLFLAG_RD, &sc->tso_frames_failed,
8597 0, "Number of TSO frames failed");
8598
8599 SYSCTL_ADD_INT(ctx, children, OID_AUTO,
8600 "csum_offload_ip",
8601 CTLFLAG_RD, &sc->csum_offload_ip,
8602 0, "Number of IP checksum offload frames");
8603
8604 SYSCTL_ADD_INT(ctx, children, OID_AUTO,
8605 "csum_offload_tcp_udp",
8606 CTLFLAG_RD, &sc->csum_offload_tcp_udp,
8607 0, "Number of TCP/UDP checksum offload frames");
8608
8609 SYSCTL_ADD_INT(ctx, children, OID_AUTO,
8610 "vlan_tagged_frames_rcvd",
8611 CTLFLAG_RD, &sc->vlan_tagged_frames_rcvd,
8612 0, "Number of VLAN tagged frames received");
8613
8614 SYSCTL_ADD_INT(ctx, children, OID_AUTO,
8615 "vlan_tagged_frames_stripped",
8616 CTLFLAG_RD, &sc->vlan_tagged_frames_stripped,
8617 0, "Number of VLAN tagged frames stripped");
8618
8619 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
8620 "interrupts_rx",
8621 CTLFLAG_RD, &sc->interrupts_rx,
8622 0, "Number of RX interrupts");
8623
8624 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
8625 "interrupts_tx",
8626 CTLFLAG_RD, &sc->interrupts_tx,
8627 0, "Number of TX interrupts");
8628 SYSCTL_ADD_PROC(ctx, children, OID_AUTO,
8629 "nvram_dump", CTLTYPE_OPAQUE | CTLFLAG_RD,
8630 (void *)sc, 0,
8631 sysctl_nvram_dump, "S", "");
8632#ifdef BCE_NVRAM_WRITE_SUPPORT
8633 SYSCTL_ADD_PROC(ctx, children, OID_AUTO,
8634 "nvram_write", CTLTYPE_OPAQUE | CTLFLAG_WR,
8635 (void *)sc, 0,
8636 sysctl_nvram_write, "S", "");
8637#endif
8638#endif
8639
8640 SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
8641 "stat_IfHcInOctets",
8642 CTLFLAG_RD, &sc->stat_IfHCInOctets,
8643 "Bytes received");
8644
8645 SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
8646 "stat_IfHCInBadOctets",
8647 CTLFLAG_RD, &sc->stat_IfHCInBadOctets,
8648 "Bad bytes received");
8649
8650 SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
8651 "stat_IfHCOutOctets",
8652 CTLFLAG_RD, &sc->stat_IfHCOutOctets,
8653 "Bytes sent");
8654
8655 SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
8656 "stat_IfHCOutBadOctets",
8657 CTLFLAG_RD, &sc->stat_IfHCOutBadOctets,
8658 "Bad bytes sent");
8659
8660 SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
8661 "stat_IfHCInUcastPkts",
8662 CTLFLAG_RD, &sc->stat_IfHCInUcastPkts,
8663 "Unicast packets received");
8664
8665 SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
8666 "stat_IfHCInMulticastPkts",
8667 CTLFLAG_RD, &sc->stat_IfHCInMulticastPkts,
8668 "Multicast packets received");
8669
8670 SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
8671 "stat_IfHCInBroadcastPkts",
8672 CTLFLAG_RD, &sc->stat_IfHCInBroadcastPkts,
8673 "Broadcast packets received");
8674
8675 SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
8676 "stat_IfHCOutUcastPkts",
8677 CTLFLAG_RD, &sc->stat_IfHCOutUcastPkts,
8678 "Unicast packets sent");
8679
8680 SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
8681 "stat_IfHCOutMulticastPkts",
8682 CTLFLAG_RD, &sc->stat_IfHCOutMulticastPkts,
8683 "Multicast packets sent");
8684
8685 SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
8686 "stat_IfHCOutBroadcastPkts",
8687 CTLFLAG_RD, &sc->stat_IfHCOutBroadcastPkts,
8688 "Broadcast packets sent");
8689
8690 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
8691 "stat_emac_tx_stat_dot3statsinternalmactransmiterrors",
8692 CTLFLAG_RD, &sc->stat_emac_tx_stat_dot3statsinternalmactransmiterrors,
8693 0, "Internal MAC transmit errors");
8694
8695 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
8696 "stat_Dot3StatsCarrierSenseErrors",
8697 CTLFLAG_RD, &sc->stat_Dot3StatsCarrierSenseErrors,
8698 0, "Carrier sense errors");
8699
8700 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
8701 "stat_Dot3StatsFCSErrors",
8702 CTLFLAG_RD, &sc->stat_Dot3StatsFCSErrors,
8703 0, "Frame check sequence errors");
8704
8705 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
8706 "stat_Dot3StatsAlignmentErrors",
8707 CTLFLAG_RD, &sc->stat_Dot3StatsAlignmentErrors,
8708 0, "Alignment errors");
8709
8710 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
8711 "stat_Dot3StatsSingleCollisionFrames",
8712 CTLFLAG_RD, &sc->stat_Dot3StatsSingleCollisionFrames,
8713 0, "Single Collision Frames");
8714
8715 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
8716 "stat_Dot3StatsMultipleCollisionFrames",
8717 CTLFLAG_RD, &sc->stat_Dot3StatsMultipleCollisionFrames,
8718 0, "Multiple Collision Frames");
8719
8720 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
8721 "stat_Dot3StatsDeferredTransmissions",
8722 CTLFLAG_RD, &sc->stat_Dot3StatsDeferredTransmissions,
8723 0, "Deferred Transmissions");
8724
8725 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
8726 "stat_Dot3StatsExcessiveCollisions",
8727 CTLFLAG_RD, &sc->stat_Dot3StatsExcessiveCollisions,
8728 0, "Excessive Collisions");
8729
8730 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
8731 "stat_Dot3StatsLateCollisions",
8732 CTLFLAG_RD, &sc->stat_Dot3StatsLateCollisions,
8733 0, "Late Collisions");
8734
8735 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
8736 "stat_EtherStatsCollisions",
8737 CTLFLAG_RD, &sc->stat_EtherStatsCollisions,
8738 0, "Collisions");
8739
8740 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
8741 "stat_EtherStatsFragments",
8742 CTLFLAG_RD, &sc->stat_EtherStatsFragments,
8743 0, "Fragments");
8744
8745 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
8746 "stat_EtherStatsJabbers",
8747 CTLFLAG_RD, &sc->stat_EtherStatsJabbers,
8748 0, "Jabbers");
8749
8750 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
8751 "stat_EtherStatsUndersizePkts",
8752 CTLFLAG_RD, &sc->stat_EtherStatsUndersizePkts,
8753 0, "Undersize packets");
8754
8755 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
8756 "stat_EtherStatsOversizePkts",
8757 CTLFLAG_RD, &sc->stat_EtherStatsOversizePkts,
8758 0, "stat_EtherStatsOversizePkts");
8759
8760 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
8761 "stat_EtherStatsPktsRx64Octets",
8762 CTLFLAG_RD, &sc->stat_EtherStatsPktsRx64Octets,
8763 0, "Bytes received in 64 byte packets");
8764
8765 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
8766 "stat_EtherStatsPktsRx65Octetsto127Octets",
8767 CTLFLAG_RD, &sc->stat_EtherStatsPktsRx65Octetsto127Octets,
8768 0, "Bytes received in 65 to 127 byte packets");
8769
8770 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
8771 "stat_EtherStatsPktsRx128Octetsto255Octets",
8772 CTLFLAG_RD, &sc->stat_EtherStatsPktsRx128Octetsto255Octets,
8773 0, "Bytes received in 128 to 255 byte packets");
8774
8775 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
8776 "stat_EtherStatsPktsRx256Octetsto511Octets",
8777 CTLFLAG_RD, &sc->stat_EtherStatsPktsRx256Octetsto511Octets,
8778 0, "Bytes received in 256 to 511 byte packets");
8779
8780 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
8781 "stat_EtherStatsPktsRx512Octetsto1023Octets",
8782 CTLFLAG_RD, &sc->stat_EtherStatsPktsRx512Octetsto1023Octets,
8783 0, "Bytes received in 512 to 1023 byte packets");
8784
8785 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
8786 "stat_EtherStatsPktsRx1024Octetsto1522Octets",
8787 CTLFLAG_RD, &sc->stat_EtherStatsPktsRx1024Octetsto1522Octets,
8788 0, "Bytes received in 1024 t0 1522 byte packets");
8789
8790 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
8791 "stat_EtherStatsPktsRx1523Octetsto9022Octets",
8792 CTLFLAG_RD, &sc->stat_EtherStatsPktsRx1523Octetsto9022Octets,
8793 0, "Bytes received in 1523 to 9022 byte packets");
8794
8795 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
8796 "stat_EtherStatsPktsTx64Octets",
8797 CTLFLAG_RD, &sc->stat_EtherStatsPktsTx64Octets,
8798 0, "Bytes sent in 64 byte packets");
8799
8800 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
8801 "stat_EtherStatsPktsTx65Octetsto127Octets",
8802 CTLFLAG_RD, &sc->stat_EtherStatsPktsTx65Octetsto127Octets,
8803 0, "Bytes sent in 65 to 127 byte packets");
8804
8805 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
8806 "stat_EtherStatsPktsTx128Octetsto255Octets",
8807 CTLFLAG_RD, &sc->stat_EtherStatsPktsTx128Octetsto255Octets,
8808 0, "Bytes sent in 128 to 255 byte packets");
8809
8810 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
8811 "stat_EtherStatsPktsTx256Octetsto511Octets",
8812 CTLFLAG_RD, &sc->stat_EtherStatsPktsTx256Octetsto511Octets,
8813 0, "Bytes sent in 256 to 511 byte packets");
8814
8815 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
8816 "stat_EtherStatsPktsTx512Octetsto1023Octets",
8817 CTLFLAG_RD, &sc->stat_EtherStatsPktsTx512Octetsto1023Octets,
8818 0, "Bytes sent in 512 to 1023 byte packets");
8819
8820 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
8821 "stat_EtherStatsPktsTx1024Octetsto1522Octets",
8822 CTLFLAG_RD, &sc->stat_EtherStatsPktsTx1024Octetsto1522Octets,
8823 0, "Bytes sent in 1024 to 1522 byte packets");
8824
8825 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
8826 "stat_EtherStatsPktsTx1523Octetsto9022Octets",
8827 CTLFLAG_RD, &sc->stat_EtherStatsPktsTx1523Octetsto9022Octets,
8828 0, "Bytes sent in 1523 to 9022 byte packets");
8829
8830 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
8831 "stat_XonPauseFramesReceived",
8832 CTLFLAG_RD, &sc->stat_XonPauseFramesReceived,
8833 0, "XON pause frames receved");
8834
8835 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
8836 "stat_XoffPauseFramesReceived",
8837 CTLFLAG_RD, &sc->stat_XoffPauseFramesReceived,
8838 0, "XOFF pause frames received");
8839
8840 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
8841 "stat_OutXonSent",
8842 CTLFLAG_RD, &sc->stat_OutXonSent,
8843 0, "XON pause frames sent");
8844
8845 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
8846 "stat_OutXoffSent",
8847 CTLFLAG_RD, &sc->stat_OutXoffSent,
8848 0, "XOFF pause frames sent");
8849
8850 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
8851 "stat_FlowControlDone",
8852 CTLFLAG_RD, &sc->stat_FlowControlDone,
8853 0, "Flow control done");
8854
8855 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
8856 "stat_MacControlFramesReceived",
8857 CTLFLAG_RD, &sc->stat_MacControlFramesReceived,
8858 0, "MAC control frames received");
8859
8860 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
8861 "stat_XoffStateEntered",
8862 CTLFLAG_RD, &sc->stat_XoffStateEntered,
8863 0, "XOFF state entered");
8864
8865 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
8866 "stat_IfInFramesL2FilterDiscards",
8867 CTLFLAG_RD, &sc->stat_IfInFramesL2FilterDiscards,
8868 0, "Received L2 packets discarded");
8869
8870 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
8871 "stat_IfInRuleCheckerDiscards",
8872 CTLFLAG_RD, &sc->stat_IfInRuleCheckerDiscards,
8873 0, "Received packets discarded by rule");
8874
8875 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
8876 "stat_IfInFTQDiscards",
8877 CTLFLAG_RD, &sc->stat_IfInFTQDiscards,
8878 0, "Received packet FTQ discards");
8879
8880 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
8881 "stat_IfInMBUFDiscards",
8882 CTLFLAG_RD, &sc->stat_IfInMBUFDiscards,
8883 0, "Received packets discarded due to lack "
8884 "of controller buffer memory");
8885
8886 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
8887 "stat_IfInRuleCheckerP4Hit",
8888 CTLFLAG_RD, &sc->stat_IfInRuleCheckerP4Hit,
8889 0, "Received packets rule checker hits");
8890
8891 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
8892 "stat_CatchupInRuleCheckerDiscards",
8893 CTLFLAG_RD, &sc->stat_CatchupInRuleCheckerDiscards,
8894 0, "Received packets discarded in Catchup path");
8895
8896 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
8897 "stat_CatchupInFTQDiscards",
8898 CTLFLAG_RD, &sc->stat_CatchupInFTQDiscards,
8899 0, "Received packets discarded in FTQ in Catchup path");
8900
8901 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
8902 "stat_CatchupInMBUFDiscards",
8903 CTLFLAG_RD, &sc->stat_CatchupInMBUFDiscards,
8904 0, "Received packets discarded in controller "
8905 "buffer memory in Catchup path");
8906
8907 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
8908 "stat_CatchupInRuleCheckerP4Hit",
8909 CTLFLAG_RD, &sc->stat_CatchupInRuleCheckerP4Hit,
8910 0, "Received packets rule checker hits in Catchup path");
8911
8912 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
8913 "com_no_buffers",
8914 CTLFLAG_RD, &sc->com_no_buffers,
8915 0, "Valid packets received but no RX buffers available");
8916
8917#ifdef BCE_DEBUG
8918 SYSCTL_ADD_PROC(ctx, children, OID_AUTO,
8919 "driver_state", CTLTYPE_INT | CTLFLAG_RW,
8920 (void *)sc, 0,
8921 bce_sysctl_driver_state, "I", "Drive state information");
8922
8923 SYSCTL_ADD_PROC(ctx, children, OID_AUTO,
8924 "hw_state", CTLTYPE_INT | CTLFLAG_RW,
8925 (void *)sc, 0,
8926 bce_sysctl_hw_state, "I", "Hardware state information");
8927
8928 SYSCTL_ADD_PROC(ctx, children, OID_AUTO,
8929 "status_block", CTLTYPE_INT | CTLFLAG_RW,
8930 (void *)sc, 0,
8931 bce_sysctl_status_block, "I", "Dump status block");
8932
8933 SYSCTL_ADD_PROC(ctx, children, OID_AUTO,
8934 "stats_block", CTLTYPE_INT | CTLFLAG_RW,
8935 (void *)sc, 0,
8936 bce_sysctl_stats_block, "I", "Dump statistics block");
8937
8938 SYSCTL_ADD_PROC(ctx, children, OID_AUTO,
8939 "stats_clear", CTLTYPE_INT | CTLFLAG_RW,
8940 (void *)sc, 0,
8941 bce_sysctl_stats_clear, "I", "Clear statistics block");
8942
8943 SYSCTL_ADD_PROC(ctx, children, OID_AUTO,
8944 "bc_state", CTLTYPE_INT | CTLFLAG_RW,
8945 (void *)sc, 0,
8946 bce_sysctl_bc_state, "I", "Bootcode state information");
8947
8948 SYSCTL_ADD_PROC(ctx, children, OID_AUTO,
8949 "dump_rx_bd_chain", CTLTYPE_INT | CTLFLAG_RW,
8950 (void *)sc, 0,
8951 bce_sysctl_dump_rx_bd_chain, "I", "Dump RX BD chain");
8952
8953 SYSCTL_ADD_PROC(ctx, children, OID_AUTO,
8954 "dump_rx_mbuf_chain", CTLTYPE_INT | CTLFLAG_RW,
8955 (void *)sc, 0,
8956 bce_sysctl_dump_rx_mbuf_chain, "I", "Dump RX MBUF chain");
8957
8958 SYSCTL_ADD_PROC(ctx, children, OID_AUTO,
8959 "dump_tx_chain", CTLTYPE_INT | CTLFLAG_RW,
8960 (void *)sc, 0,
8961 bce_sysctl_dump_tx_chain, "I", "Dump tx_bd chain");
8962
8963#ifdef BCE_JUMBO_HDRSPLIT
8964 SYSCTL_ADD_PROC(ctx, children, OID_AUTO,
8965 "dump_pg_chain", CTLTYPE_INT | CTLFLAG_RW,
8966 (void *)sc, 0,
8967 bce_sysctl_dump_pg_chain, "I", "Dump page chain");
8968#endif
8969 SYSCTL_ADD_PROC(ctx, children, OID_AUTO,
8970 "dump_ctx", CTLTYPE_INT | CTLFLAG_RW,
8971 (void *)sc, 0,
8972 bce_sysctl_dump_ctx, "I", "Dump context memory");
8973
8974 SYSCTL_ADD_PROC(ctx, children, OID_AUTO,
8975 "breakpoint", CTLTYPE_INT | CTLFLAG_RW,
8976 (void *)sc, 0,
8977 bce_sysctl_breakpoint, "I", "Driver breakpoint");
8978
8979 SYSCTL_ADD_PROC(ctx, children, OID_AUTO,
8980 "reg_read", CTLTYPE_INT | CTLFLAG_RW,
8981 (void *)sc, 0,
8982 bce_sysctl_reg_read, "I", "Register read");
8983
8984 SYSCTL_ADD_PROC(ctx, children, OID_AUTO,
8985 "nvram_read", CTLTYPE_INT | CTLFLAG_RW,
8986 (void *)sc, 0,
8987 bce_sysctl_nvram_read, "I", "NVRAM read");
8988
8989 SYSCTL_ADD_PROC(ctx, children, OID_AUTO,
8990 "phy_read", CTLTYPE_INT | CTLFLAG_RW,
8991 (void *)sc, 0,
8992 bce_sysctl_phy_read, "I", "PHY register read");
8993
8994#endif
8995
8996 DBEXIT(BCE_VERBOSE_MISC);
8997}
8998
8999
9000/****************************************************************************/
9001/* BCE Debug Routines */
9002/****************************************************************************/
9003#ifdef BCE_DEBUG
9004
9005/****************************************************************************/
9006/* Freezes the controller to allow for a cohesive state dump. */
9007/* */
9008/* Returns: */
9009/* Nothing. */
9010/****************************************************************************/
9011static __attribute__ ((noinline)) void
9012bce_freeze_controller(struct bce_softc *sc)
9013{
9014 u32 val;
9015 val = REG_RD(sc, BCE_MISC_COMMAND);
9016 val |= BCE_MISC_COMMAND_DISABLE_ALL;
9017 REG_WR(sc, BCE_MISC_COMMAND, val);
9018}
9019
9020
9021/****************************************************************************/
9022/* Unfreezes the controller after a freeze operation. This may not always */
9023/* work and the controller will require a reset! */
9024/* */
9025/* Returns: */
9026/* Nothing. */
9027/****************************************************************************/
9028static __attribute__ ((noinline)) void
9029bce_unfreeze_controller(struct bce_softc *sc)
9030{
9031 u32 val;
9032 val = REG_RD(sc, BCE_MISC_COMMAND);
9033 val |= BCE_MISC_COMMAND_ENABLE_ALL;
9034 REG_WR(sc, BCE_MISC_COMMAND, val);
9035}
9036
9037
9038/****************************************************************************/
9039/* Prints out Ethernet frame information from an mbuf. */
9040/* */
9041/* Partially decode an Ethernet frame to look at some important headers. */
9042/* */
9043/* Returns: */
9044/* Nothing. */
9045/****************************************************************************/
9046static __attribute__ ((noinline)) void
9047bce_dump_enet(struct bce_softc *sc, struct mbuf *m)
9048{
9049 struct ether_vlan_header *eh;
9050 u16 etype;
9051 int ehlen;
9052 struct ip *ip;
9053 struct tcphdr *th;
9054 struct udphdr *uh;
9055 struct arphdr *ah;
9056
9057 BCE_PRINTF(
9058 "-----------------------------"
9059 " Frame Decode "
9060 "-----------------------------\n");
9061
9062 eh = mtod(m, struct ether_vlan_header *);
9063
9064 /* Handle VLAN encapsulation if present. */
9065 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
9066 etype = ntohs(eh->evl_proto);
9067 ehlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
9068 } else {
9069 etype = ntohs(eh->evl_encap_proto);
9070 ehlen = ETHER_HDR_LEN;
9071 }
9072
9073 /* ToDo: Add VLAN output. */
9074 BCE_PRINTF("enet: dest = %6D, src = %6D, type = 0x%04X, hlen = %d\n",
9075 eh->evl_dhost, ":", eh->evl_shost, ":", etype, ehlen);
9076
9077 switch (etype) {
9078 case ETHERTYPE_IP:
9079 ip = (struct ip *)(m->m_data + ehlen);
9080 BCE_PRINTF("--ip: dest = 0x%08X , src = 0x%08X, "
9081 "len = %d bytes, protocol = 0x%02X, xsum = 0x%04X\n",
9082 ntohl(ip->ip_dst.s_addr), ntohl(ip->ip_src.s_addr),
9083 ntohs(ip->ip_len), ip->ip_p, ntohs(ip->ip_sum));
9084
9085 switch (ip->ip_p) {
9086 case IPPROTO_TCP:
9087 th = (struct tcphdr *)((caddr_t)ip + (ip->ip_hl << 2));
9088 BCE_PRINTF("-tcp: dest = %d, src = %d, hlen = "
9089 "%d bytes, flags = 0x%b, csum = 0x%04X\n",
9090 ntohs(th->th_dport), ntohs(th->th_sport),
9091 (th->th_off << 2), th->th_flags,
9092 "\20\10CWR\07ECE\06URG\05ACK\04PSH\03RST"
9093 "\02SYN\01FIN", ntohs(th->th_sum));
9094 break;
9095 case IPPROTO_UDP:
9096 uh = (struct udphdr *)((caddr_t)ip + (ip->ip_hl << 2));
9097 BCE_PRINTF("-udp: dest = %d, src = %d, len = %d "
9098 "bytes, csum = 0x%04X\n", ntohs(uh->uh_dport),
9099 ntohs(uh->uh_sport), ntohs(uh->uh_ulen),
9100 ntohs(uh->uh_sum));
9101 break;
9102 case IPPROTO_ICMP:
9103 BCE_PRINTF("icmp:\n");
9104 break;
9105 default:
9106 BCE_PRINTF("----: Other IP protocol.\n");
9107 }
9108 break;
9109 case ETHERTYPE_IPV6:
9110 BCE_PRINTF("ipv6: No decode supported.\n");
9111 break;
9112 case ETHERTYPE_ARP:
9113 BCE_PRINTF("-arp: ");
9114 ah = (struct arphdr *) (m->m_data + ehlen);
9115 switch (ntohs(ah->ar_op)) {
9116 case ARPOP_REVREQUEST:
9117 printf("reverse ARP request\n");
9118 break;
9119 case ARPOP_REVREPLY:
9120 printf("reverse ARP reply\n");
9121 break;
9122 case ARPOP_REQUEST:
9123 printf("ARP request\n");
9124 break;
9125 case ARPOP_REPLY:
9126 printf("ARP reply\n");
9127 break;
9128 default:
9129 printf("other ARP operation\n");
9130 }
9131 break;
9132 default:
9133 BCE_PRINTF("----: Other protocol.\n");
9134 }
9135
9136 BCE_PRINTF(
9137 "-----------------------------"
9138 "--------------"
9139 "-----------------------------\n");
9140}
9141
9142
9143/****************************************************************************/
9144/* Prints out information about an mbuf. */
9145/* */
9146/* Returns: */
9147/* Nothing. */
9148/****************************************************************************/
9149static __attribute__ ((noinline)) void
9150bce_dump_mbuf(struct bce_softc *sc, struct mbuf *m)
9151{
9152 struct mbuf *mp = m;
9153
9154 if (m == NULL) {
9155 BCE_PRINTF("mbuf: null pointer\n");
9156 return;
9157 }
9158
9159 while (mp) {
9160 BCE_PRINTF("mbuf: %p, m_len = %d, m_flags = 0x%b, "
9161 "m_data = %p\n", mp, mp->m_len, mp->m_flags,
9162 "\20\1M_EXT\2M_PKTHDR\3M_EOR\4M_RDONLY", mp->m_data);
9163
9164 if (mp->m_flags & M_PKTHDR) {
9165 BCE_PRINTF("- m_pkthdr: len = %d, flags = 0x%b, "
9166 "csum_flags = %b\n", mp->m_pkthdr.len,
9167 mp->m_flags, "\20\12M_BCAST\13M_MCAST\14M_FRAG"
9168 "\15M_FIRSTFRAG\16M_LASTFRAG\21M_VLANTAG"
9169 "\22M_PROMISC\23M_NOFREE",
9170 mp->m_pkthdr.csum_flags,
9171 "\20\1CSUM_IP\2CSUM_TCP\3CSUM_UDP\4CSUM_IP_FRAGS"
9172 "\5CSUM_FRAGMENT\6CSUM_TSO\11CSUM_IP_CHECKED"
9173 "\12CSUM_IP_VALID\13CSUM_DATA_VALID"
9174 "\14CSUM_PSEUDO_HDR");
9175 }
9176
9177 if (mp->m_flags & M_EXT) {
9178 BCE_PRINTF("- m_ext: %p, ext_size = %d, type = ",
9179 mp->m_ext.ext_buf, mp->m_ext.ext_size);
9180 switch (mp->m_ext.ext_type) {
9181 case EXT_CLUSTER:
9182 printf("EXT_CLUSTER\n"); break;
9183 case EXT_SFBUF:
9184 printf("EXT_SFBUF\n"); break;
9185 case EXT_JUMBO9:
9186 printf("EXT_JUMBO9\n"); break;
9187 case EXT_JUMBO16:
9188 printf("EXT_JUMBO16\n"); break;
9189 case EXT_PACKET:
9190 printf("EXT_PACKET\n"); break;
9191 case EXT_MBUF:
9192 printf("EXT_MBUF\n"); break;
9193 case EXT_NET_DRV:
9194 printf("EXT_NET_DRV\n"); break;
9195 case EXT_MOD_TYPE:
9196 printf("EXT_MDD_TYPE\n"); break;
9197 case EXT_DISPOSABLE:
9198 printf("EXT_DISPOSABLE\n"); break;
9199 case EXT_EXTREF:
9200 printf("EXT_EXTREF\n"); break;
9201 default:
9202 printf("UNKNOWN\n");
9203 }
9204 }
9205
9206 mp = mp->m_next;
9207 }
9208}
9209
9210
9211/****************************************************************************/
9212/* Prints out the mbufs in the TX mbuf chain. */
9213/* */
9214/* Returns: */
9215/* Nothing. */
9216/****************************************************************************/
9217static __attribute__ ((noinline)) void
9218bce_dump_tx_mbuf_chain(struct bce_softc *sc, u16 chain_prod, int count)
9219{
9220 struct mbuf *m;
9221
9222 BCE_PRINTF(
9223 "----------------------------"
9224 " tx mbuf data "
9225 "----------------------------\n");
9226
9227 for (int i = 0; i < count; i++) {
9228 m = sc->tx_mbuf_ptr[chain_prod];
9229 BCE_PRINTF("txmbuf[0x%04X]\n", chain_prod);
9230 bce_dump_mbuf(sc, m);
9231 chain_prod = TX_CHAIN_IDX(NEXT_TX_BD(chain_prod));
9232 }
9233
9234 BCE_PRINTF(
9235 "----------------------------"
9236 "----------------"
9237 "----------------------------\n");
9238}
9239
9240
9241/****************************************************************************/
9242/* Prints out the mbufs in the RX mbuf chain. */
9243/* */
9244/* Returns: */
9245/* Nothing. */
9246/****************************************************************************/
9247static __attribute__ ((noinline)) void
9248bce_dump_rx_mbuf_chain(struct bce_softc *sc, u16 chain_prod, int count)
9249{
9250 struct mbuf *m;
9251
9252 BCE_PRINTF(
9253 "----------------------------"
9254 " rx mbuf data "
9255 "----------------------------\n");
9256
9257 for (int i = 0; i < count; i++) {
9258 m = sc->rx_mbuf_ptr[chain_prod];
9259 BCE_PRINTF("rxmbuf[0x%04X]\n", chain_prod);
9260 bce_dump_mbuf(sc, m);
9261 chain_prod = RX_CHAIN_IDX(NEXT_RX_BD(chain_prod));
9262 }
9263
9264
9265 BCE_PRINTF(
9266 "----------------------------"
9267 "----------------"
9268 "----------------------------\n");
9269}
9270
9271
9272#ifdef BCE_JUMBO_HDRSPLIT
9273/****************************************************************************/
9274/* Prints out the mbufs in the mbuf page chain. */
9275/* */
9276/* Returns: */
9277/* Nothing. */
9278/****************************************************************************/
9279static __attribute__ ((noinline)) void
9280bce_dump_pg_mbuf_chain(struct bce_softc *sc, u16 chain_prod, int count)
9281{
9282 struct mbuf *m;
9283
9284 BCE_PRINTF(
9285 "----------------------------"
9286 " pg mbuf data "
9287 "----------------------------\n");
9288
9289 for (int i = 0; i < count; i++) {
9290 m = sc->pg_mbuf_ptr[chain_prod];
9291 BCE_PRINTF("pgmbuf[0x%04X]\n", chain_prod);
9292 bce_dump_mbuf(sc, m);
9293 chain_prod = PG_CHAIN_IDX(NEXT_PG_BD(chain_prod));
9294 }
9295
9296
9297 BCE_PRINTF(
9298 "----------------------------"
9299 "----------------"
9300 "----------------------------\n");
9301}
9302#endif
9303
9304
9305/****************************************************************************/
9306/* Prints out a tx_bd structure. */
9307/* */
9308/* Returns: */
9309/* Nothing. */
9310/****************************************************************************/
9311static __attribute__ ((noinline)) void
9312bce_dump_txbd(struct bce_softc *sc, int idx, struct tx_bd *txbd)
9313{
9314 int i = 0;
9315
9316 if (idx > MAX_TX_BD)
9317 /* Index out of range. */
9318 BCE_PRINTF("tx_bd[0x%04X]: Invalid tx_bd index!\n", idx);
9319 else if ((idx & USABLE_TX_BD_PER_PAGE) == USABLE_TX_BD_PER_PAGE)
9320 /* TX Chain page pointer. */
9321 BCE_PRINTF("tx_bd[0x%04X]: haddr = 0x%08X:%08X, chain page "
9322 "pointer\n", idx, txbd->tx_bd_haddr_hi,
9323 txbd->tx_bd_haddr_lo);
9324 else {
9325 /* Normal tx_bd entry. */
9326 BCE_PRINTF("tx_bd[0x%04X]: haddr = 0x%08X:%08X, "
9327 "mss_nbytes = 0x%08X, vlan tag = 0x%04X, flags = "
9328 "0x%04X (", idx, txbd->tx_bd_haddr_hi,
9329 txbd->tx_bd_haddr_lo, txbd->tx_bd_mss_nbytes,
9330 txbd->tx_bd_vlan_tag, txbd->tx_bd_flags);
9331
9332 if (txbd->tx_bd_flags & TX_BD_FLAGS_CONN_FAULT) {
9333 if (i>0)
9334 printf("|");
9335 printf("CONN_FAULT");
9336 i++;
9337 }
9338
9339 if (txbd->tx_bd_flags & TX_BD_FLAGS_TCP_UDP_CKSUM) {
9340 if (i>0)
9341 printf("|");
9342 printf("TCP_UDP_CKSUM");
9343 i++;
9344 }
9345
9346 if (txbd->tx_bd_flags & TX_BD_FLAGS_IP_CKSUM) {
9347 if (i>0)
9348 printf("|");
9349 printf("IP_CKSUM");
9350 i++;
9351 }
9352
9353 if (txbd->tx_bd_flags & TX_BD_FLAGS_VLAN_TAG) {
9354 if (i>0)
9355 printf("|");
9356 printf("VLAN");
9357 i++;
9358 }
9359
9360 if (txbd->tx_bd_flags & TX_BD_FLAGS_COAL_NOW) {
9361 if (i>0)
9362 printf("|");
9363 printf("COAL_NOW");
9364 i++;
9365 }
9366
9367 if (txbd->tx_bd_flags & TX_BD_FLAGS_DONT_GEN_CRC) {
9368 if (i>0)
9369 printf("|");
9370 printf("DONT_GEN_CRC");
9371 i++;
9372 }
9373
9374 if (txbd->tx_bd_flags & TX_BD_FLAGS_START) {
9375 if (i>0)
9376 printf("|");
9377 printf("START");
9378 i++;
9379 }
9380
9381 if (txbd->tx_bd_flags & TX_BD_FLAGS_END) {
9382 if (i>0)
9383 printf("|");
9384 printf("END");
9385 i++;
9386 }
9387
9388 if (txbd->tx_bd_flags & TX_BD_FLAGS_SW_LSO) {
9389 if (i>0)
9390 printf("|");
9391 printf("LSO");
9392 i++;
9393 }
9394
9395 if (txbd->tx_bd_flags & TX_BD_FLAGS_SW_OPTION_WORD) {
9396 if (i>0)
9397 printf("|");
9398 printf("SW_OPTION=%d", ((txbd->tx_bd_flags &
9399 TX_BD_FLAGS_SW_OPTION_WORD) >> 8)); i++;
9400 }
9401
9402 if (txbd->tx_bd_flags & TX_BD_FLAGS_SW_FLAGS) {
9403 if (i>0)
9404 printf("|");
9405 printf("SW_FLAGS");
9406 i++;
9407 }
9408
9409 if (txbd->tx_bd_flags & TX_BD_FLAGS_SW_SNAP) {
9410 if (i>0)
9411 printf("|");
9412 printf("SNAP)");
9413 } else {
9414 printf(")\n");
9415 }
9416 }
9417}
9418
9419
9420/****************************************************************************/
9421/* Prints out a rx_bd structure. */
9422/* */
9423/* Returns: */
9424/* Nothing. */
9425/****************************************************************************/
9426static __attribute__ ((noinline)) void
9427bce_dump_rxbd(struct bce_softc *sc, int idx, struct rx_bd *rxbd)
9428{
9429 if (idx > MAX_RX_BD)
9430 /* Index out of range. */
9431 BCE_PRINTF("rx_bd[0x%04X]: Invalid rx_bd index!\n", idx);
9432 else if ((idx & USABLE_RX_BD_PER_PAGE) == USABLE_RX_BD_PER_PAGE)
9433 /* RX Chain page pointer. */
9434 BCE_PRINTF("rx_bd[0x%04X]: haddr = 0x%08X:%08X, chain page "
9435 "pointer\n", idx, rxbd->rx_bd_haddr_hi,
9436 rxbd->rx_bd_haddr_lo);
9437 else
9438 /* Normal rx_bd entry. */
9439 BCE_PRINTF("rx_bd[0x%04X]: haddr = 0x%08X:%08X, nbytes = "
9440 "0x%08X, flags = 0x%08X\n", idx, rxbd->rx_bd_haddr_hi,
9441 rxbd->rx_bd_haddr_lo, rxbd->rx_bd_len,
9442 rxbd->rx_bd_flags);
9443}
9444
9445
9446#ifdef BCE_JUMBO_HDRSPLIT
9447/****************************************************************************/
9448/* Prints out a rx_bd structure in the page chain. */
9449/* */
9450/* Returns: */
9451/* Nothing. */
9452/****************************************************************************/
9453static __attribute__ ((noinline)) void
9454bce_dump_pgbd(struct bce_softc *sc, int idx, struct rx_bd *pgbd)
9455{
9456 if (idx > MAX_PG_BD)
9457 /* Index out of range. */
9458 BCE_PRINTF("pg_bd[0x%04X]: Invalid pg_bd index!\n", idx);
9459 else if ((idx & USABLE_PG_BD_PER_PAGE) == USABLE_PG_BD_PER_PAGE)
9460 /* Page Chain page pointer. */
9461 BCE_PRINTF("px_bd[0x%04X]: haddr = 0x%08X:%08X, chain page pointer\n",
9462 idx, pgbd->rx_bd_haddr_hi, pgbd->rx_bd_haddr_lo);
9463 else
9464 /* Normal rx_bd entry. */
9465 BCE_PRINTF("pg_bd[0x%04X]: haddr = 0x%08X:%08X, nbytes = 0x%08X, "
9466 "flags = 0x%08X\n", idx,
9467 pgbd->rx_bd_haddr_hi, pgbd->rx_bd_haddr_lo,
9468 pgbd->rx_bd_len, pgbd->rx_bd_flags);
9469}
9470#endif
9471
9472
9473/****************************************************************************/
9474/* Prints out a l2_fhdr structure. */
9475/* */
9476/* Returns: */
9477/* Nothing. */
9478/****************************************************************************/
9479static __attribute__ ((noinline)) void
9480bce_dump_l2fhdr(struct bce_softc *sc, int idx, struct l2_fhdr *l2fhdr)
9481{
9482 BCE_PRINTF("l2_fhdr[0x%04X]: status = 0x%b, "
9483 "pkt_len = %d, vlan = 0x%04x, ip_xsum/hdr_len = 0x%04X, "
9484 "tcp_udp_xsum = 0x%04X\n", idx,
9485 l2fhdr->l2_fhdr_status, BCE_L2FHDR_PRINTFB,
9486 l2fhdr->l2_fhdr_pkt_len, l2fhdr->l2_fhdr_vlan_tag,
9487 l2fhdr->l2_fhdr_ip_xsum, l2fhdr->l2_fhdr_tcp_udp_xsum);
9488}
9489
9490
9491/****************************************************************************/
9492/* Prints out context memory info. (Only useful for CID 0 to 16.) */
9493/* */
9494/* Returns: */
9495/* Nothing. */
9496/****************************************************************************/
9497static __attribute__ ((noinline)) void
9498bce_dump_ctx(struct bce_softc *sc, u16 cid)
9499{
9500 if (cid > TX_CID) {
9501 BCE_PRINTF(" Unknown CID\n");
9502 return;
9503 }
9504
9505 BCE_PRINTF(
9506 "----------------------------"
9507 " CTX Data "
9508 "----------------------------\n");
9509
9510 BCE_PRINTF(" 0x%04X - (CID) Context ID\n", cid);
9511
9512 if (cid == RX_CID) {
9513 BCE_PRINTF(" 0x%08X - (L2CTX_RX_HOST_BDIDX) host rx "
9514 "producer index\n",
9515 CTX_RD(sc, GET_CID_ADDR(cid), BCE_L2CTX_RX_HOST_BDIDX));
9516 BCE_PRINTF(" 0x%08X - (L2CTX_RX_HOST_BSEQ) host "
9517 "byte sequence\n", CTX_RD(sc, GET_CID_ADDR(cid),
9518 BCE_L2CTX_RX_HOST_BSEQ));
9519 BCE_PRINTF(" 0x%08X - (L2CTX_RX_NX_BSEQ) h/w byte sequence\n",
9520 CTX_RD(sc, GET_CID_ADDR(cid), BCE_L2CTX_RX_NX_BSEQ));
9521 BCE_PRINTF(" 0x%08X - (L2CTX_RX_NX_BDHADDR_HI) h/w buffer "
9522 "descriptor address\n",
9523 CTX_RD(sc, GET_CID_ADDR(cid), BCE_L2CTX_RX_NX_BDHADDR_HI));
9524 BCE_PRINTF(" 0x%08X - (L2CTX_RX_NX_BDHADDR_LO) h/w buffer "
9525 "descriptor address\n",
9526 CTX_RD(sc, GET_CID_ADDR(cid), BCE_L2CTX_RX_NX_BDHADDR_LO));
9527 BCE_PRINTF(" 0x%08X - (L2CTX_RX_NX_BDIDX) h/w rx consumer "
9528 "index\n", CTX_RD(sc, GET_CID_ADDR(cid),
9529 BCE_L2CTX_RX_NX_BDIDX));
9530 BCE_PRINTF(" 0x%08X - (L2CTX_RX_HOST_PG_BDIDX) host page "
9531 "producer index\n", CTX_RD(sc, GET_CID_ADDR(cid),
9532 BCE_L2CTX_RX_HOST_PG_BDIDX));
9533 BCE_PRINTF(" 0x%08X - (L2CTX_RX_PG_BUF_SIZE) host rx_bd/page "
9534 "buffer size\n", CTX_RD(sc, GET_CID_ADDR(cid),
9535 BCE_L2CTX_RX_PG_BUF_SIZE));
9536 BCE_PRINTF(" 0x%08X - (L2CTX_RX_NX_PG_BDHADDR_HI) h/w page "
9537 "chain address\n", CTX_RD(sc, GET_CID_ADDR(cid),
9538 BCE_L2CTX_RX_NX_PG_BDHADDR_HI));
9539 BCE_PRINTF(" 0x%08X - (L2CTX_RX_NX_PG_BDHADDR_LO) h/w page "
9540 "chain address\n", CTX_RD(sc, GET_CID_ADDR(cid),
9541 BCE_L2CTX_RX_NX_PG_BDHADDR_LO));
9542 BCE_PRINTF(" 0x%08X - (L2CTX_RX_NX_PG_BDIDX) h/w page "
9543 "consumer index\n", CTX_RD(sc, GET_CID_ADDR(cid),
9544 BCE_L2CTX_RX_NX_PG_BDIDX));
9545 } else if (cid == TX_CID) {
9546 if ((BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) ||
9547 (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716)) {
9548 BCE_PRINTF(" 0x%08X - (L2CTX_TX_TYPE_XI) ctx type\n",
9549 CTX_RD(sc, GET_CID_ADDR(cid),
9550 BCE_L2CTX_TX_TYPE_XI));
9551 BCE_PRINTF(" 0x%08X - (L2CTX_CMD_TX_TYPE_XI) ctx "
9552 "cmd\n", CTX_RD(sc, GET_CID_ADDR(cid),
9553 BCE_L2CTX_TX_CMD_TYPE_XI));
9554 BCE_PRINTF(" 0x%08X - (L2CTX_TX_TBDR_BDHADDR_HI_XI) "
9555 "h/w buffer descriptor address\n",
9556 CTX_RD(sc, GET_CID_ADDR(cid),
9557 BCE_L2CTX_TX_TBDR_BHADDR_HI_XI));
9558 BCE_PRINTF(" 0x%08X - (L2CTX_TX_TBDR_BHADDR_LO_XI) "
9559 "h/w buffer descriptor address\n",
9560 CTX_RD(sc, GET_CID_ADDR(cid),
9561 BCE_L2CTX_TX_TBDR_BHADDR_LO_XI));
9562 BCE_PRINTF(" 0x%08X - (L2CTX_TX_HOST_BIDX_XI) "
9563 "host producer index\n",
9564 CTX_RD(sc, GET_CID_ADDR(cid),
9565 BCE_L2CTX_TX_HOST_BIDX_XI));
9566 BCE_PRINTF(" 0x%08X - (L2CTX_TX_HOST_BSEQ_XI) "
9567 "host byte sequence\n",
9568 CTX_RD(sc, GET_CID_ADDR(cid),
9569 BCE_L2CTX_TX_HOST_BSEQ_XI));
9570 } else {
9571 BCE_PRINTF(" 0x%08X - (L2CTX_TX_TYPE) ctx type\n",
9572 CTX_RD(sc, GET_CID_ADDR(cid), BCE_L2CTX_TX_TYPE));
9573 BCE_PRINTF(" 0x%08X - (L2CTX_TX_CMD_TYPE) ctx cmd\n",
9574 CTX_RD(sc, GET_CID_ADDR(cid),
9575 BCE_L2CTX_TX_CMD_TYPE));
9576 BCE_PRINTF(" 0x%08X - (L2CTX_TX_TBDR_BDHADDR_HI) "
9577 "h/w buffer descriptor address\n",
9578 CTX_RD(sc, GET_CID_ADDR(cid),
9579 BCE_L2CTX_TX_TBDR_BHADDR_HI));
9580 BCE_PRINTF(" 0x%08X - (L2CTX_TX_TBDR_BHADDR_LO) "
9581 "h/w buffer descriptor address\n",
9582 CTX_RD(sc, GET_CID_ADDR(cid),
9583 BCE_L2CTX_TX_TBDR_BHADDR_LO));
9584 BCE_PRINTF(" 0x%08X - (L2CTX_TX_HOST_BIDX) host "
9585 "producer index\n", CTX_RD(sc, GET_CID_ADDR(cid),
9586 BCE_L2CTX_TX_HOST_BIDX));
9587 BCE_PRINTF(" 0x%08X - (L2CTX_TX_HOST_BSEQ) host byte "
9588 "sequence\n", CTX_RD(sc, GET_CID_ADDR(cid),
9589 BCE_L2CTX_TX_HOST_BSEQ));
9590 }
9591 }
9592
9593 BCE_PRINTF(
9594 "----------------------------"
9595 " Raw CTX "
9596 "----------------------------\n");
9597
9598 for (int i = 0x0; i < 0x300; i += 0x10) {
9599 BCE_PRINTF("0x%04X: 0x%08X 0x%08X 0x%08X 0x%08X\n", i,
9600 CTX_RD(sc, GET_CID_ADDR(cid), i),
9601 CTX_RD(sc, GET_CID_ADDR(cid), i + 0x4),
9602 CTX_RD(sc, GET_CID_ADDR(cid), i + 0x8),
9603 CTX_RD(sc, GET_CID_ADDR(cid), i + 0xc));
9604 }
9605
9606
9607 BCE_PRINTF(
9608 "----------------------------"
9609 "----------------"
9610 "----------------------------\n");
9611}
9612
9613
9614/****************************************************************************/
9615/* Prints out the FTQ data. */
9616/* */
9617/* Returns: */
9618/* Nothing. */
9619/****************************************************************************/
9620static __attribute__ ((noinline)) void
9621bce_dump_ftqs(struct bce_softc *sc)
9622{
9623 u32 cmd, ctl, cur_depth, max_depth, valid_cnt, val;
9624
9625 BCE_PRINTF(
9626 "----------------------------"
9627 " FTQ Data "
9628 "----------------------------\n");
9629
9630 BCE_PRINTF(" FTQ Command Control Depth_Now "
9631 "Max_Depth Valid_Cnt \n");
9632 BCE_PRINTF(" ------- ---------- ---------- ---------- "
9633 "---------- ----------\n");
9634
9635 /* Setup the generic statistic counters for the FTQ valid count. */
9636 val = (BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_RV2PPQ_VALID_CNT << 24) |
9637 (BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_RXPCQ_VALID_CNT << 16) |
9638 (BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_RXPQ_VALID_CNT << 8) |
9639 (BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_RLUPQ_VALID_CNT);
9640 REG_WR(sc, BCE_HC_STAT_GEN_SEL_0, val);
9641
9642 val = (BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_TSCHQ_VALID_CNT << 24) |
9643 (BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_RDMAQ_VALID_CNT << 16) |
9644 (BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_RV2PTQ_VALID_CNT << 8) |
9645 (BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_RV2PMQ_VALID_CNT);
9646 REG_WR(sc, BCE_HC_STAT_GEN_SEL_1, val);
9647
9648 val = (BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_TPATQ_VALID_CNT << 24) |
9649 (BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_TDMAQ_VALID_CNT << 16) |
9650 (BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_TXPQ_VALID_CNT << 8) |
9651 (BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_TBDRQ_VALID_CNT);
9652 REG_WR(sc, BCE_HC_STAT_GEN_SEL_2, val);
9653
9654 val = (BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_COMQ_VALID_CNT << 24) |
9655 (BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_COMTQ_VALID_CNT << 16) |
9656 (BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_COMXQ_VALID_CNT << 8) |
9657 (BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_TASQ_VALID_CNT);
9658 REG_WR(sc, BCE_HC_STAT_GEN_SEL_3, val);
9659
9660 /* Input queue to the Receive Lookup state machine */
9661 cmd = REG_RD(sc, BCE_RLUP_FTQ_CMD);
9662 ctl = REG_RD(sc, BCE_RLUP_FTQ_CTL);
9663 cur_depth = (ctl & BCE_RLUP_FTQ_CTL_CUR_DEPTH) >> 22;
9664 max_depth = (ctl & BCE_RLUP_FTQ_CTL_MAX_DEPTH) >> 12;
9665 valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT0);
9666 BCE_PRINTF(" RLUP 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n",
9667 cmd, ctl, cur_depth, max_depth, valid_cnt);
9668
9669 /* Input queue to the Receive Processor */
9670 cmd = REG_RD_IND(sc, BCE_RXP_FTQ_CMD);
9671 ctl = REG_RD_IND(sc, BCE_RXP_FTQ_CTL);
9672 cur_depth = (ctl & BCE_RXP_FTQ_CTL_CUR_DEPTH) >> 22;
9673 max_depth = (ctl & BCE_RXP_FTQ_CTL_MAX_DEPTH) >> 12;
9674 valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT1);
9675 BCE_PRINTF(" RXP 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n",
9676 cmd, ctl, cur_depth, max_depth, valid_cnt);
9677
9678 /* Input queue to the Recevie Processor */
9679 cmd = REG_RD_IND(sc, BCE_RXP_CFTQ_CMD);
9680 ctl = REG_RD_IND(sc, BCE_RXP_CFTQ_CTL);
9681 cur_depth = (ctl & BCE_RXP_CFTQ_CTL_CUR_DEPTH) >> 22;
9682 max_depth = (ctl & BCE_RXP_CFTQ_CTL_MAX_DEPTH) >> 12;
9683 valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT2);
9684 BCE_PRINTF(" RXPC 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n",
9685 cmd, ctl, cur_depth, max_depth, valid_cnt);
9686
9687 /* Input queue to the Receive Virtual to Physical state machine */
9688 cmd = REG_RD(sc, BCE_RV2P_PFTQ_CMD);
9689 ctl = REG_RD(sc, BCE_RV2P_PFTQ_CTL);
9690 cur_depth = (ctl & BCE_RV2P_PFTQ_CTL_CUR_DEPTH) >> 22;
9691 max_depth = (ctl & BCE_RV2P_PFTQ_CTL_MAX_DEPTH) >> 12;
9692 valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT3);
9693 BCE_PRINTF(" RV2PP 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n",
9694 cmd, ctl, cur_depth, max_depth, valid_cnt);
9695
9696 /* Input queue to the Recevie Virtual to Physical state machine */
9697 cmd = REG_RD(sc, BCE_RV2P_MFTQ_CMD);
9698 ctl = REG_RD(sc, BCE_RV2P_MFTQ_CTL);
9699 cur_depth = (ctl & BCE_RV2P_MFTQ_CTL_CUR_DEPTH) >> 22;
9700 max_depth = (ctl & BCE_RV2P_MFTQ_CTL_MAX_DEPTH) >> 12;
9701 valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT4);
9702 BCE_PRINTF(" RV2PM 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n",
9703 cmd, ctl, cur_depth, max_depth, valid_cnt);
9704
9705 /* Input queue to the Receive Virtual to Physical state machine */
9706 cmd = REG_RD(sc, BCE_RV2P_TFTQ_CMD);
9707 ctl = REG_RD(sc, BCE_RV2P_TFTQ_CTL);
9708 cur_depth = (ctl & BCE_RV2P_TFTQ_CTL_CUR_DEPTH) >> 22;
9709 max_depth = (ctl & BCE_RV2P_TFTQ_CTL_MAX_DEPTH) >> 12;
9710 valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT5);
9711 BCE_PRINTF(" RV2PT 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n",
9712 cmd, ctl, cur_depth, max_depth, valid_cnt);
9713
9714 /* Input queue to the Receive DMA state machine */
9715 cmd = REG_RD(sc, BCE_RDMA_FTQ_CMD);
9716 ctl = REG_RD(sc, BCE_RDMA_FTQ_CTL);
9717 cur_depth = (ctl & BCE_RDMA_FTQ_CTL_CUR_DEPTH) >> 22;
9718 max_depth = (ctl & BCE_RDMA_FTQ_CTL_MAX_DEPTH) >> 12;
9719 valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT6);
9720 BCE_PRINTF(" RDMA 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n",
9721 cmd, ctl, cur_depth, max_depth, valid_cnt);
9722
9723 /* Input queue to the Transmit Scheduler state machine */
9724 cmd = REG_RD(sc, BCE_TSCH_FTQ_CMD);
9725 ctl = REG_RD(sc, BCE_TSCH_FTQ_CTL);
9726 cur_depth = (ctl & BCE_TSCH_FTQ_CTL_CUR_DEPTH) >> 22;
9727 max_depth = (ctl & BCE_TSCH_FTQ_CTL_MAX_DEPTH) >> 12;
9728 valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT7);
9729 BCE_PRINTF(" TSCH 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n",
9730 cmd, ctl, cur_depth, max_depth, valid_cnt);
9731
9732 /* Input queue to the Transmit Buffer Descriptor state machine */
9733 cmd = REG_RD(sc, BCE_TBDR_FTQ_CMD);
9734 ctl = REG_RD(sc, BCE_TBDR_FTQ_CTL);
9735 cur_depth = (ctl & BCE_TBDR_FTQ_CTL_CUR_DEPTH) >> 22;
9736 max_depth = (ctl & BCE_TBDR_FTQ_CTL_MAX_DEPTH) >> 12;
9737 valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT8);
9738 BCE_PRINTF(" TBDR 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n",
9739 cmd, ctl, cur_depth, max_depth, valid_cnt);
9740
9741 /* Input queue to the Transmit Processor */
9742 cmd = REG_RD_IND(sc, BCE_TXP_FTQ_CMD);
9743 ctl = REG_RD_IND(sc, BCE_TXP_FTQ_CTL);
9744 cur_depth = (ctl & BCE_TXP_FTQ_CTL_CUR_DEPTH) >> 22;
9745 max_depth = (ctl & BCE_TXP_FTQ_CTL_MAX_DEPTH) >> 12;
9746 valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT9);
9747 BCE_PRINTF(" TXP 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n",
9748 cmd, ctl, cur_depth, max_depth, valid_cnt);
9749
9750 /* Input queue to the Transmit DMA state machine */
9751 cmd = REG_RD(sc, BCE_TDMA_FTQ_CMD);
9752 ctl = REG_RD(sc, BCE_TDMA_FTQ_CTL);
9753 cur_depth = (ctl & BCE_TDMA_FTQ_CTL_CUR_DEPTH) >> 22;
9754 max_depth = (ctl & BCE_TDMA_FTQ_CTL_MAX_DEPTH) >> 12;
9755 valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT10);
9756 BCE_PRINTF(" TDMA 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n",
9757 cmd, ctl, cur_depth, max_depth, valid_cnt);
9758
9759 /* Input queue to the Transmit Patch-Up Processor */
9760 cmd = REG_RD_IND(sc, BCE_TPAT_FTQ_CMD);
9761 ctl = REG_RD_IND(sc, BCE_TPAT_FTQ_CTL);
9762 cur_depth = (ctl & BCE_TPAT_FTQ_CTL_CUR_DEPTH) >> 22;
9763 max_depth = (ctl & BCE_TPAT_FTQ_CTL_MAX_DEPTH) >> 12;
9764 valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT11);
9765 BCE_PRINTF(" TPAT 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n",
9766 cmd, ctl, cur_depth, max_depth, valid_cnt);
9767
9768 /* Input queue to the Transmit Assembler state machine */
9769 cmd = REG_RD_IND(sc, BCE_TAS_FTQ_CMD);
9770 ctl = REG_RD_IND(sc, BCE_TAS_FTQ_CTL);
9771 cur_depth = (ctl & BCE_TAS_FTQ_CTL_CUR_DEPTH) >> 22;
9772 max_depth = (ctl & BCE_TAS_FTQ_CTL_MAX_DEPTH) >> 12;
9773 valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT12);
9774 BCE_PRINTF(" TAS 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n",
9775 cmd, ctl, cur_depth, max_depth, valid_cnt);
9776
9777 /* Input queue to the Completion Processor */
9778 cmd = REG_RD_IND(sc, BCE_COM_COMXQ_FTQ_CMD);
9779 ctl = REG_RD_IND(sc, BCE_COM_COMXQ_FTQ_CTL);
9780 cur_depth = (ctl & BCE_COM_COMXQ_FTQ_CTL_CUR_DEPTH) >> 22;
9781 max_depth = (ctl & BCE_COM_COMXQ_FTQ_CTL_MAX_DEPTH) >> 12;
9782 valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT13);
9783 BCE_PRINTF(" COMX 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n",
9784 cmd, ctl, cur_depth, max_depth, valid_cnt);
9785
9786 /* Input queue to the Completion Processor */
9787 cmd = REG_RD_IND(sc, BCE_COM_COMTQ_FTQ_CMD);
9788 ctl = REG_RD_IND(sc, BCE_COM_COMTQ_FTQ_CTL);
9789 cur_depth = (ctl & BCE_COM_COMTQ_FTQ_CTL_CUR_DEPTH) >> 22;
9790 max_depth = (ctl & BCE_COM_COMTQ_FTQ_CTL_MAX_DEPTH) >> 12;
9791 valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT14);
9792 BCE_PRINTF(" COMT 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n",
9793 cmd, ctl, cur_depth, max_depth, valid_cnt);
9794
9795 /* Input queue to the Completion Processor */
9796 cmd = REG_RD_IND(sc, BCE_COM_COMQ_FTQ_CMD);
9797 ctl = REG_RD_IND(sc, BCE_COM_COMQ_FTQ_CTL);
9798 cur_depth = (ctl & BCE_COM_COMQ_FTQ_CTL_CUR_DEPTH) >> 22;
9799 max_depth = (ctl & BCE_COM_COMQ_FTQ_CTL_MAX_DEPTH) >> 12;
9800 valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT15);
9801 BCE_PRINTF(" COMX 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n",
9802 cmd, ctl, cur_depth, max_depth, valid_cnt);
9803
9804 /* Setup the generic statistic counters for the FTQ valid count. */
9805 val = (BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_CSQ_VALID_CNT << 16) |
9806 (BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_CPQ_VALID_CNT << 8) |
9807 (BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_MGMQ_VALID_CNT);
9808
9809 if ((BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) ||
9810 (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716))
9811 val = val |
9812 (BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_RV2PCSQ_VALID_CNT_XI <<
9813 24);
9814 REG_WR(sc, BCE_HC_STAT_GEN_SEL_0, val);
9815
9816 /* Input queue to the Management Control Processor */
9817 cmd = REG_RD_IND(sc, BCE_MCP_MCPQ_FTQ_CMD);
9818 ctl = REG_RD_IND(sc, BCE_MCP_MCPQ_FTQ_CTL);
9819 cur_depth = (ctl & BCE_MCP_MCPQ_FTQ_CTL_CUR_DEPTH) >> 22;
9820 max_depth = (ctl & BCE_MCP_MCPQ_FTQ_CTL_MAX_DEPTH) >> 12;
9821 valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT0);
9822 BCE_PRINTF(" MCP 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n",
9823 cmd, ctl, cur_depth, max_depth, valid_cnt);
9824
9825 /* Input queue to the Command Processor */
9826 cmd = REG_RD_IND(sc, BCE_CP_CPQ_FTQ_CMD);
9827 ctl = REG_RD_IND(sc, BCE_CP_CPQ_FTQ_CTL);
9828 cur_depth = (ctl & BCE_CP_CPQ_FTQ_CTL_CUR_DEPTH) >> 22;
9829 max_depth = (ctl & BCE_CP_CPQ_FTQ_CTL_MAX_DEPTH) >> 12;
9830 valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT1);
9831 BCE_PRINTF(" CP 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n",
9832 cmd, ctl, cur_depth, max_depth, valid_cnt);
9833
9834 /* Input queue to the Completion Scheduler state machine */
9835 cmd = REG_RD(sc, BCE_CSCH_CH_FTQ_CMD);
9836 ctl = REG_RD(sc, BCE_CSCH_CH_FTQ_CTL);
9837 cur_depth = (ctl & BCE_CSCH_CH_FTQ_CTL_CUR_DEPTH) >> 22;
9838 max_depth = (ctl & BCE_CSCH_CH_FTQ_CTL_MAX_DEPTH) >> 12;
9839 valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT2);
9840 BCE_PRINTF(" CS 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n",
9841 cmd, ctl, cur_depth, max_depth, valid_cnt);
9842
9843 if ((BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) ||
9844 (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716)) {
9845 /* Input queue to the RV2P Command Scheduler */
9846 cmd = REG_RD(sc, BCE_RV2PCSR_FTQ_CMD);
9847 ctl = REG_RD(sc, BCE_RV2PCSR_FTQ_CTL);
9848 cur_depth = (ctl & 0xFFC00000) >> 22;
9849 max_depth = (ctl & 0x003FF000) >> 12;
9850 valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT3);
9851 BCE_PRINTF(" RV2PCSR 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n",
9852 cmd, ctl, cur_depth, max_depth, valid_cnt);
9853 }
9854
9855 BCE_PRINTF(
9856 "----------------------------"
9857 "----------------"
9858 "----------------------------\n");
9859}
9860
9861
9862/****************************************************************************/
9863/* Prints out the TX chain. */
9864/* */
9865/* Returns: */
9866/* Nothing. */
9867/****************************************************************************/
9868static __attribute__ ((noinline)) void
9869bce_dump_tx_chain(struct bce_softc *sc, u16 tx_prod, int count)
9870{
9871 struct tx_bd *txbd;
9872
9873 /* First some info about the tx_bd chain structure. */
9874 BCE_PRINTF(
9875 "----------------------------"
9876 " tx_bd chain "
9877 "----------------------------\n");
9878
9879 BCE_PRINTF("page size = 0x%08X, tx chain pages = 0x%08X\n",
9880 (u32) BCM_PAGE_SIZE, (u32) TX_PAGES);
9881 BCE_PRINTF("tx_bd per page = 0x%08X, usable tx_bd per page = 0x%08X\n",
9882 (u32) TOTAL_TX_BD_PER_PAGE, (u32) USABLE_TX_BD_PER_PAGE);
9883 BCE_PRINTF("total tx_bd = 0x%08X\n", (u32) TOTAL_TX_BD);
9884
9885 BCE_PRINTF(
9886 "----------------------------"
9887 " tx_bd data "
9888 "----------------------------\n");
9889
9890 /* Now print out a decoded list of TX buffer descriptors. */
9891 for (int i = 0; i < count; i++) {
9892 txbd = &sc->tx_bd_chain[TX_PAGE(tx_prod)][TX_IDX(tx_prod)];
9893 bce_dump_txbd(sc, tx_prod, txbd);
9894 tx_prod++;
9895 }
9896
9897 BCE_PRINTF(
9898 "----------------------------"
9899 "----------------"
9900 "----------------------------\n");
9901}
9902
9903
9904/****************************************************************************/
9905/* Prints out the RX chain. */
9906/* */
9907/* Returns: */
9908/* Nothing. */
9909/****************************************************************************/
9910static __attribute__ ((noinline)) void
9911bce_dump_rx_bd_chain(struct bce_softc *sc, u16 rx_prod, int count)
9912{
9913 struct rx_bd *rxbd;
9914
9915 /* First some info about the rx_bd chain structure. */
9916 BCE_PRINTF(
9917 "----------------------------"
9918 " rx_bd chain "
9919 "----------------------------\n");
9920
9921 BCE_PRINTF("page size = 0x%08X, rx chain pages = 0x%08X\n",
9922 (u32) BCM_PAGE_SIZE, (u32) RX_PAGES);
9923
9924 BCE_PRINTF("rx_bd per page = 0x%08X, usable rx_bd per page = 0x%08X\n",
9925 (u32) TOTAL_RX_BD_PER_PAGE, (u32) USABLE_RX_BD_PER_PAGE);
9926
9927 BCE_PRINTF("total rx_bd = 0x%08X\n", (u32) TOTAL_RX_BD);
9928
9929 BCE_PRINTF(
9930 "----------------------------"
9931 " rx_bd data "
9932 "----------------------------\n");
9933
9934 /* Now print out the rx_bd's themselves. */
9935 for (int i = 0; i < count; i++) {
9936 rxbd = &sc->rx_bd_chain[RX_PAGE(rx_prod)][RX_IDX(rx_prod)];
9937 bce_dump_rxbd(sc, rx_prod, rxbd);
9938 rx_prod = RX_CHAIN_IDX(rx_prod + 1);
9939 }
9940
9941 BCE_PRINTF(
9942 "----------------------------"
9943 "----------------"
9944 "----------------------------\n");
9945}
9946
9947
9948#ifdef BCE_JUMBO_HDRSPLIT
9949/****************************************************************************/
9950/* Prints out the page chain. */
9951/* */
9952/* Returns: */
9953/* Nothing. */
9954/****************************************************************************/
9955static __attribute__ ((noinline)) void
9956bce_dump_pg_chain(struct bce_softc *sc, u16 pg_prod, int count)
9957{
9958 struct rx_bd *pgbd;
9959
9960 /* First some info about the page chain structure. */
9961 BCE_PRINTF(
9962 "----------------------------"
9963 " page chain "
9964 "----------------------------\n");
9965
9966 BCE_PRINTF("page size = 0x%08X, pg chain pages = 0x%08X\n",
9967 (u32) BCM_PAGE_SIZE, (u32) PG_PAGES);
9968
9969 BCE_PRINTF("rx_bd per page = 0x%08X, usable rx_bd per page = 0x%08X\n",
9970 (u32) TOTAL_PG_BD_PER_PAGE, (u32) USABLE_PG_BD_PER_PAGE);
9971
9972 BCE_PRINTF("total rx_bd = 0x%08X, max_pg_bd = 0x%08X\n",
9973 (u32) TOTAL_PG_BD, (u32) MAX_PG_BD);
9974
9975 BCE_PRINTF(
9976 "----------------------------"
9977 " page data "
9978 "----------------------------\n");
9979
9980 /* Now print out the rx_bd's themselves. */
9981 for (int i = 0; i < count; i++) {
9982 pgbd = &sc->pg_bd_chain[PG_PAGE(pg_prod)][PG_IDX(pg_prod)];
9983 bce_dump_pgbd(sc, pg_prod, pgbd);
9984 pg_prod = PG_CHAIN_IDX(pg_prod + 1);
9985 }
9986
9987 BCE_PRINTF(
9988 "----------------------------"
9989 "----------------"
9990 "----------------------------\n");
9991}
9992#endif
9993
9994
9995#define BCE_PRINT_RX_CONS(arg) \
9996if (sblk->status_rx_quick_consumer_index##arg) \
9997 BCE_PRINTF("0x%04X(0x%04X) - rx_quick_consumer_index%d\n", \
9998 sblk->status_rx_quick_consumer_index##arg, (u16) \
9999 RX_CHAIN_IDX(sblk->status_rx_quick_consumer_index##arg), \
10000 arg);
10001
10002
10003#define BCE_PRINT_TX_CONS(arg) \
10004if (sblk->status_tx_quick_consumer_index##arg) \
10005 BCE_PRINTF("0x%04X(0x%04X) - tx_quick_consumer_index%d\n", \
10006 sblk->status_tx_quick_consumer_index##arg, (u16) \
10007 TX_CHAIN_IDX(sblk->status_tx_quick_consumer_index##arg), \
10008 arg);
10009
10010/****************************************************************************/
10011/* Prints out the status block from host memory. */
10012/* */
10013/* Returns: */
10014/* Nothing. */
10015/****************************************************************************/
10016static __attribute__ ((noinline)) void
10017bce_dump_status_block(struct bce_softc *sc)
10018{
10019 struct status_block *sblk;
10020
10021 sblk = sc->status_block;
10022
10023 BCE_PRINTF(
10024 "----------------------------"
10025 " Status Block "
10026 "----------------------------\n");
10027
10028 /* Theses indices are used for normal L2 drivers. */
10029 BCE_PRINTF(" 0x%08X - attn_bits\n",
10030 sblk->status_attn_bits);
10031
10032 BCE_PRINTF(" 0x%08X - attn_bits_ack\n",
10033 sblk->status_attn_bits_ack);
10034
10035 BCE_PRINT_RX_CONS(0);
10036 BCE_PRINT_TX_CONS(0)
10037
10038 BCE_PRINTF(" 0x%04X - status_idx\n", sblk->status_idx);
10039
10040 /* Theses indices are not used for normal L2 drivers. */
10041 BCE_PRINT_RX_CONS(1); BCE_PRINT_RX_CONS(2); BCE_PRINT_RX_CONS(3);
10042 BCE_PRINT_RX_CONS(4); BCE_PRINT_RX_CONS(5); BCE_PRINT_RX_CONS(6);
10043 BCE_PRINT_RX_CONS(7); BCE_PRINT_RX_CONS(8); BCE_PRINT_RX_CONS(9);
10044 BCE_PRINT_RX_CONS(10); BCE_PRINT_RX_CONS(11); BCE_PRINT_RX_CONS(12);
10045 BCE_PRINT_RX_CONS(13); BCE_PRINT_RX_CONS(14); BCE_PRINT_RX_CONS(15);
10046
10047 BCE_PRINT_TX_CONS(1); BCE_PRINT_TX_CONS(2); BCE_PRINT_TX_CONS(3);
10048
10049 if (sblk->status_completion_producer_index ||
10050 sblk->status_cmd_consumer_index)
10051 BCE_PRINTF("com_prod = 0x%08X, cmd_cons = 0x%08X\n",
10052 sblk->status_completion_producer_index,
10053 sblk->status_cmd_consumer_index);
10054
10055 BCE_PRINTF(
10056 "----------------------------"
10057 "----------------"
10058 "----------------------------\n");
10059}
10060
10061
10062#define BCE_PRINT_64BIT_STAT(arg) \
10063if (sblk->arg##_lo || sblk->arg##_hi) \
10064 BCE_PRINTF("0x%08X:%08X : %s\n", sblk->arg##_hi, \
10065 sblk->arg##_lo, #arg);
10066
10067#define BCE_PRINT_32BIT_STAT(arg) \
10068if (sblk->arg) \
10069 BCE_PRINTF(" 0x%08X : %s\n", \
10070 sblk->arg, #arg);
10071
10072/****************************************************************************/
10073/* Prints out the statistics block from host memory. */
10074/* */
10075/* Returns: */
10076/* Nothing. */
10077/****************************************************************************/
10078static __attribute__ ((noinline)) void
10079bce_dump_stats_block(struct bce_softc *sc)
10080{
10081 struct statistics_block *sblk;
10082
10083 sblk = sc->stats_block;
10084
10085 BCE_PRINTF(
10086 "---------------"
10087 " Stats Block (All Stats Not Shown Are 0) "
10088 "---------------\n");
10089
10090 BCE_PRINT_64BIT_STAT(stat_IfHCInOctets);
10091 BCE_PRINT_64BIT_STAT(stat_IfHCInBadOctets);
10092 BCE_PRINT_64BIT_STAT(stat_IfHCOutOctets);
10093 BCE_PRINT_64BIT_STAT(stat_IfHCOutBadOctets);
10094 BCE_PRINT_64BIT_STAT(stat_IfHCInUcastPkts);
10095 BCE_PRINT_64BIT_STAT(stat_IfHCInBroadcastPkts);
10096 BCE_PRINT_64BIT_STAT(stat_IfHCInMulticastPkts);
10097 BCE_PRINT_64BIT_STAT(stat_IfHCOutUcastPkts);
10098 BCE_PRINT_64BIT_STAT(stat_IfHCOutBroadcastPkts);
10099 BCE_PRINT_64BIT_STAT(stat_IfHCOutMulticastPkts);
10100 BCE_PRINT_32BIT_STAT(
10101 stat_emac_tx_stat_dot3statsinternalmactransmiterrors);
10102 BCE_PRINT_32BIT_STAT(stat_Dot3StatsCarrierSenseErrors);
10103 BCE_PRINT_32BIT_STAT(stat_Dot3StatsFCSErrors);
10104 BCE_PRINT_32BIT_STAT(stat_Dot3StatsAlignmentErrors);
10105 BCE_PRINT_32BIT_STAT(stat_Dot3StatsSingleCollisionFrames);
10106 BCE_PRINT_32BIT_STAT(stat_Dot3StatsMultipleCollisionFrames);
10107 BCE_PRINT_32BIT_STAT(stat_Dot3StatsDeferredTransmissions);
10108 BCE_PRINT_32BIT_STAT(stat_Dot3StatsExcessiveCollisions);
10109 BCE_PRINT_32BIT_STAT(stat_Dot3StatsLateCollisions);
10110 BCE_PRINT_32BIT_STAT(stat_EtherStatsCollisions);
10111 BCE_PRINT_32BIT_STAT(stat_EtherStatsFragments);
10112 BCE_PRINT_32BIT_STAT(stat_EtherStatsJabbers);
10113 BCE_PRINT_32BIT_STAT(stat_EtherStatsUndersizePkts);
10114 BCE_PRINT_32BIT_STAT(stat_EtherStatsOversizePkts);
10115 BCE_PRINT_32BIT_STAT(stat_EtherStatsPktsRx64Octets);
10116 BCE_PRINT_32BIT_STAT(stat_EtherStatsPktsRx65Octetsto127Octets);
10117 BCE_PRINT_32BIT_STAT(stat_EtherStatsPktsRx128Octetsto255Octets);
10118 BCE_PRINT_32BIT_STAT(stat_EtherStatsPktsRx256Octetsto511Octets);
10119 BCE_PRINT_32BIT_STAT(stat_EtherStatsPktsRx512Octetsto1023Octets);
10120 BCE_PRINT_32BIT_STAT(stat_EtherStatsPktsRx1024Octetsto1522Octets);
10121 BCE_PRINT_32BIT_STAT(stat_EtherStatsPktsRx1523Octetsto9022Octets);
10122 BCE_PRINT_32BIT_STAT(stat_EtherStatsPktsTx64Octets);
10123 BCE_PRINT_32BIT_STAT(stat_EtherStatsPktsTx65Octetsto127Octets);
10124 BCE_PRINT_32BIT_STAT(stat_EtherStatsPktsTx128Octetsto255Octets);
10125 BCE_PRINT_32BIT_STAT(stat_EtherStatsPktsTx256Octetsto511Octets);
10126 BCE_PRINT_32BIT_STAT(stat_EtherStatsPktsTx512Octetsto1023Octets);
10127 BCE_PRINT_32BIT_STAT(stat_EtherStatsPktsTx1024Octetsto1522Octets);
10128 BCE_PRINT_32BIT_STAT(stat_EtherStatsPktsTx1523Octetsto9022Octets);
10129 BCE_PRINT_32BIT_STAT(stat_XonPauseFramesReceived);
10130 BCE_PRINT_32BIT_STAT(stat_XoffPauseFramesReceived);
10131 BCE_PRINT_32BIT_STAT(stat_OutXonSent);
10132 BCE_PRINT_32BIT_STAT(stat_OutXoffSent);
10133 BCE_PRINT_32BIT_STAT(stat_FlowControlDone);
10134 BCE_PRINT_32BIT_STAT(stat_MacControlFramesReceived);
10135 BCE_PRINT_32BIT_STAT(stat_XoffStateEntered);
10136 BCE_PRINT_32BIT_STAT(stat_IfInFramesL2FilterDiscards);
10137 BCE_PRINT_32BIT_STAT(stat_IfInRuleCheckerDiscards);
10138 BCE_PRINT_32BIT_STAT(stat_IfInFTQDiscards);
10139 BCE_PRINT_32BIT_STAT(stat_IfInMBUFDiscards);
10140 BCE_PRINT_32BIT_STAT(stat_IfInRuleCheckerP4Hit);
10141 BCE_PRINT_32BIT_STAT(stat_CatchupInRuleCheckerDiscards);
10142 BCE_PRINT_32BIT_STAT(stat_CatchupInFTQDiscards);
10143 BCE_PRINT_32BIT_STAT(stat_CatchupInMBUFDiscards);
10144 BCE_PRINT_32BIT_STAT(stat_CatchupInRuleCheckerP4Hit);
10145
10146 BCE_PRINTF(
10147 "----------------------------"
10148 "----------------"
10149 "----------------------------\n");
10150}
10151
10152
10153/****************************************************************************/
10154/* Prints out a summary of the driver state. */
10155/* */
10156/* Returns: */
10157/* Nothing. */
10158/****************************************************************************/
10159static __attribute__ ((noinline)) void
10160bce_dump_driver_state(struct bce_softc *sc)
10161{
10162 u32 val_hi, val_lo;
10163
10164 BCE_PRINTF(
10165 "-----------------------------"
10166 " Driver State "
10167 "-----------------------------\n");
10168
10169 val_hi = BCE_ADDR_HI(sc);
10170 val_lo = BCE_ADDR_LO(sc);
10171 BCE_PRINTF("0x%08X:%08X - (sc) driver softc structure virtual "
10172 "address\n", val_hi, val_lo);
10173
10174 val_hi = BCE_ADDR_HI(sc->bce_vhandle);
10175 val_lo = BCE_ADDR_LO(sc->bce_vhandle);
10176 BCE_PRINTF("0x%08X:%08X - (sc->bce_vhandle) PCI BAR virtual "
10177 "address\n", val_hi, val_lo);
10178
10179 val_hi = BCE_ADDR_HI(sc->status_block);
10180 val_lo = BCE_ADDR_LO(sc->status_block);
10181 BCE_PRINTF("0x%08X:%08X - (sc->status_block) status block "
10182 "virtual address\n", val_hi, val_lo);
10183
10184 val_hi = BCE_ADDR_HI(sc->stats_block);
10185 val_lo = BCE_ADDR_LO(sc->stats_block);
10186 BCE_PRINTF("0x%08X:%08X - (sc->stats_block) statistics block "
10187 "virtual address\n", val_hi, val_lo);
10188
10189 val_hi = BCE_ADDR_HI(sc->tx_bd_chain);
10190 val_lo = BCE_ADDR_LO(sc->tx_bd_chain);
10191 BCE_PRINTF("0x%08X:%08X - (sc->tx_bd_chain) tx_bd chain "
10192 "virtual adddress\n", val_hi, val_lo);
10193
10194 val_hi = BCE_ADDR_HI(sc->rx_bd_chain);
10195 val_lo = BCE_ADDR_LO(sc->rx_bd_chain);
10196 BCE_PRINTF("0x%08X:%08X - (sc->rx_bd_chain) rx_bd chain "
10197 "virtual address\n", val_hi, val_lo);
10198
10199#ifdef BCE_JUMBO_HDRSPLIT
10200 val_hi = BCE_ADDR_HI(sc->pg_bd_chain);
10201 val_lo = BCE_ADDR_LO(sc->pg_bd_chain);
10202 BCE_PRINTF("0x%08X:%08X - (sc->pg_bd_chain) page chain "
10203 "virtual address\n", val_hi, val_lo);
10204#endif
10205
10206 val_hi = BCE_ADDR_HI(sc->tx_mbuf_ptr);
10207 val_lo = BCE_ADDR_LO(sc->tx_mbuf_ptr);
10208 BCE_PRINTF("0x%08X:%08X - (sc->tx_mbuf_ptr) tx mbuf chain "
10209 "virtual address\n", val_hi, val_lo);
10210
10211 val_hi = BCE_ADDR_HI(sc->rx_mbuf_ptr);
10212 val_lo = BCE_ADDR_LO(sc->rx_mbuf_ptr);
10213 BCE_PRINTF("0x%08X:%08X - (sc->rx_mbuf_ptr) rx mbuf chain "
10214 "virtual address\n", val_hi, val_lo);
10215
10216#ifdef BCE_JUMBO_HDRSPLIT
10217 val_hi = BCE_ADDR_HI(sc->pg_mbuf_ptr);
10218 val_lo = BCE_ADDR_LO(sc->pg_mbuf_ptr);
10219 BCE_PRINTF("0x%08X:%08X - (sc->pg_mbuf_ptr) page mbuf chain "
10220 "virtual address\n", val_hi, val_lo);
10221#endif
10222
10223 BCE_PRINTF(" 0x%08X - (sc->interrupts_generated) "
10224 "h/w intrs\n", sc->interrupts_generated);
10225
10226 BCE_PRINTF(" 0x%08X - (sc->interrupts_rx) "
10227 "rx interrupts handled\n", sc->interrupts_rx);
10228
10229 BCE_PRINTF(" 0x%08X - (sc->interrupts_tx) "
10230 "tx interrupts handled\n", sc->interrupts_tx);
10231
10232 BCE_PRINTF(" 0x%08X - (sc->phy_interrupts) "
10233 "phy interrupts handled\n", sc->phy_interrupts);
10234
10235 BCE_PRINTF(" 0x%08X - (sc->last_status_idx) "
10236 "status block index\n", sc->last_status_idx);
10237
10238 BCE_PRINTF(" 0x%04X(0x%04X) - (sc->tx_prod) tx producer "
10239 "index\n", sc->tx_prod, (u16) TX_CHAIN_IDX(sc->tx_prod));
10240
10241 BCE_PRINTF(" 0x%04X(0x%04X) - (sc->tx_cons) tx consumer "
10242 "index\n", sc->tx_cons, (u16) TX_CHAIN_IDX(sc->tx_cons));
10243
10244 BCE_PRINTF(" 0x%08X - (sc->tx_prod_bseq) tx producer "
10245 "byte seq index\n", sc->tx_prod_bseq);
10246
10247 BCE_PRINTF(" 0x%08X - (sc->debug_tx_mbuf_alloc) tx "
10248 "mbufs allocated\n", sc->debug_tx_mbuf_alloc);
10249
10250 BCE_PRINTF(" 0x%08X - (sc->used_tx_bd) used "
10251 "tx_bd's\n", sc->used_tx_bd);
10252
10253 BCE_PRINTF("0x%08X/%08X - (sc->tx_hi_watermark) tx hi "
10254 "watermark\n", sc->tx_hi_watermark, sc->max_tx_bd);
10255
10256 BCE_PRINTF(" 0x%04X(0x%04X) - (sc->rx_prod) rx producer "
10257 "index\n", sc->rx_prod, (u16) RX_CHAIN_IDX(sc->rx_prod));
10258
10259 BCE_PRINTF(" 0x%04X(0x%04X) - (sc->rx_cons) rx consumer "
10260 "index\n", sc->rx_cons, (u16) RX_CHAIN_IDX(sc->rx_cons));
10261
10262 BCE_PRINTF(" 0x%08X - (sc->rx_prod_bseq) rx producer "
10263 "byte seq index\n", sc->rx_prod_bseq);
10264
10265 BCE_PRINTF(" 0x%08X - (sc->debug_rx_mbuf_alloc) rx "
10266 "mbufs allocated\n", sc->debug_rx_mbuf_alloc);
10267
10268 BCE_PRINTF(" 0x%08X - (sc->free_rx_bd) free "
10269 "rx_bd's\n", sc->free_rx_bd);
10270
10271#ifdef BCE_JUMBO_HDRSPLIT
10272 BCE_PRINTF(" 0x%04X(0x%04X) - (sc->pg_prod) page producer "
10273 "index\n", sc->pg_prod, (u16) PG_CHAIN_IDX(sc->pg_prod));
10274
10275 BCE_PRINTF(" 0x%04X(0x%04X) - (sc->pg_cons) page consumer "
10276 "index\n", sc->pg_cons, (u16) PG_CHAIN_IDX(sc->pg_cons));
10277
10278 BCE_PRINTF(" 0x%08X - (sc->debug_pg_mbuf_alloc) page "
10279 "mbufs allocated\n", sc->debug_pg_mbuf_alloc);
10280
10281 BCE_PRINTF(" 0x%08X - (sc->free_pg_bd) free page "
10282 "rx_bd's\n", sc->free_pg_bd);
10283
10284 BCE_PRINTF("0x%08X/%08X - (sc->pg_low_watermark) page low "
10285 "watermark\n", sc->pg_low_watermark, sc->max_pg_bd);
10286#endif
10287
10288 BCE_PRINTF(" 0x%08X - (sc->mbuf_alloc_failed_count) "
10289 "mbuf alloc failures\n", sc->mbuf_alloc_failed_count);
10290
10291 BCE_PRINTF(" 0x%08X - (sc->bce_flags) "
10292 "bce mac flags\n", sc->bce_flags);
10293
10294 BCE_PRINTF(" 0x%08X - (sc->bce_phy_flags) "
10295 "bce phy flags\n", sc->bce_phy_flags);
10296
10297 BCE_PRINTF(
10298 "----------------------------"
10299 "----------------"
10300 "----------------------------\n");
10301}
10302
10303
10304/****************************************************************************/
10305/* Prints out the hardware state through a summary of important register, */
10306/* followed by a complete register dump. */
10307/* */
10308/* Returns: */
10309/* Nothing. */
10310/****************************************************************************/
10311static __attribute__ ((noinline)) void
10312bce_dump_hw_state(struct bce_softc *sc)
10313{
10314 u32 val;
10315
10316 BCE_PRINTF(
10317 "----------------------------"
10318 " Hardware State "
10319 "----------------------------\n");
10320
10321 BCE_PRINTF("%s - bootcode version\n", sc->bce_bc_ver);
10322
10323 val = REG_RD(sc, BCE_MISC_ENABLE_STATUS_BITS);
10324 BCE_PRINTF("0x%08X - (0x%06X) misc_enable_status_bits\n",
10325 val, BCE_MISC_ENABLE_STATUS_BITS);
10326
10327 val = REG_RD(sc, BCE_DMA_STATUS);
10328 BCE_PRINTF("0x%08X - (0x%06X) dma_status\n",
10329 val, BCE_DMA_STATUS);
10330
10331 val = REG_RD(sc, BCE_CTX_STATUS);
10332 BCE_PRINTF("0x%08X - (0x%06X) ctx_status\n",
10333 val, BCE_CTX_STATUS);
10334
10335 val = REG_RD(sc, BCE_EMAC_STATUS);
10336 BCE_PRINTF("0x%08X - (0x%06X) emac_status\n",
10337 val, BCE_EMAC_STATUS);
10338
10339 val = REG_RD(sc, BCE_RPM_STATUS);
10340 BCE_PRINTF("0x%08X - (0x%06X) rpm_status\n",
10341 val, BCE_RPM_STATUS);
10342
10343 /* ToDo: Create a #define for this constant. */
10344 val = REG_RD(sc, 0x2004);
10345 BCE_PRINTF("0x%08X - (0x%06X) rlup_status\n",
10346 val, 0x2004);
10347
10348 val = REG_RD(sc, BCE_RV2P_STATUS);
10349 BCE_PRINTF("0x%08X - (0x%06X) rv2p_status\n",
10350 val, BCE_RV2P_STATUS);
10351
10352 /* ToDo: Create a #define for this constant. */
10353 val = REG_RD(sc, 0x2c04);
10354 BCE_PRINTF("0x%08X - (0x%06X) rdma_status\n",
10355 val, 0x2c04);
10356
10357 val = REG_RD(sc, BCE_TBDR_STATUS);
10358 BCE_PRINTF("0x%08X - (0x%06X) tbdr_status\n",
10359 val, BCE_TBDR_STATUS);
10360
10361 val = REG_RD(sc, BCE_TDMA_STATUS);
10362 BCE_PRINTF("0x%08X - (0x%06X) tdma_status\n",
10363 val, BCE_TDMA_STATUS);
10364
10365 val = REG_RD(sc, BCE_HC_STATUS);
10366 BCE_PRINTF("0x%08X - (0x%06X) hc_status\n",
10367 val, BCE_HC_STATUS);
10368
10369 val = REG_RD_IND(sc, BCE_TXP_CPU_STATE);
10370 BCE_PRINTF("0x%08X - (0x%06X) txp_cpu_state\n",
10371 val, BCE_TXP_CPU_STATE);
10372
10373 val = REG_RD_IND(sc, BCE_TPAT_CPU_STATE);
10374 BCE_PRINTF("0x%08X - (0x%06X) tpat_cpu_state\n",
10375 val, BCE_TPAT_CPU_STATE);
10376
10377 val = REG_RD_IND(sc, BCE_RXP_CPU_STATE);
10378 BCE_PRINTF("0x%08X - (0x%06X) rxp_cpu_state\n",
10379 val, BCE_RXP_CPU_STATE);
10380
10381 val = REG_RD_IND(sc, BCE_COM_CPU_STATE);
10382 BCE_PRINTF("0x%08X - (0x%06X) com_cpu_state\n",
10383 val, BCE_COM_CPU_STATE);
10384
10385 val = REG_RD_IND(sc, BCE_MCP_CPU_STATE);
10386 BCE_PRINTF("0x%08X - (0x%06X) mcp_cpu_state\n",
10387 val, BCE_MCP_CPU_STATE);
10388
10389 val = REG_RD_IND(sc, BCE_CP_CPU_STATE);
10390 BCE_PRINTF("0x%08X - (0x%06X) cp_cpu_state\n",
10391 val, BCE_CP_CPU_STATE);
10392
10393 BCE_PRINTF(
10394 "----------------------------"
10395 "----------------"
10396 "----------------------------\n");
10397
10398 BCE_PRINTF(
10399 "----------------------------"
10400 " Register Dump "
10401 "----------------------------\n");
10402
10403 for (int i = 0x400; i < 0x8000; i += 0x10) {
10404 BCE_PRINTF("0x%04X: 0x%08X 0x%08X 0x%08X 0x%08X\n",
10405 i, REG_RD(sc, i), REG_RD(sc, i + 0x4),
10406 REG_RD(sc, i + 0x8), REG_RD(sc, i + 0xC));
10407 }
10408
10409 BCE_PRINTF(
10410 "----------------------------"
10411 "----------------"
10412 "----------------------------\n");
10413}
10414
10415
10416/****************************************************************************/
10417/* Prints out the mailbox queue registers. */
10418/* */
10419/* Returns: */
10420/* Nothing. */
10421/****************************************************************************/
10422static __attribute__ ((noinline)) void
10423bce_dump_mq_regs(struct bce_softc *sc)
10424{
10425 BCE_PRINTF(
10426 "----------------------------"
10427 " MQ Regs "
10428 "----------------------------\n");
10429
10430 BCE_PRINTF(
10431 "----------------------------"
10432 "----------------"
10433 "----------------------------\n");
10434
10435 for (int i = 0x3c00; i < 0x4000; i += 0x10) {
10436 BCE_PRINTF("0x%04X: 0x%08X 0x%08X 0x%08X 0x%08X\n",
10437 i, REG_RD(sc, i), REG_RD(sc, i + 0x4),
10438 REG_RD(sc, i + 0x8), REG_RD(sc, i + 0xC));
10439 }
10440
10441 BCE_PRINTF(
10442 "----------------------------"
10443 "----------------"
10444 "----------------------------\n");
10445}
10446
10447
10448/****************************************************************************/
10449/* Prints out the bootcode state. */
10450/* */
10451/* Returns: */
10452/* Nothing. */
10453/****************************************************************************/
10454static __attribute__ ((noinline)) void
10455bce_dump_bc_state(struct bce_softc *sc)
10456{
10457 u32 val;
10458
10459 BCE_PRINTF(
10460 "----------------------------"
10461 " Bootcode State "
10462 "----------------------------\n");
10463
10464 BCE_PRINTF("%s - bootcode version\n", sc->bce_bc_ver);
10465
10466 val = bce_shmem_rd(sc, BCE_BC_RESET_TYPE);
10467 BCE_PRINTF("0x%08X - (0x%06X) reset_type\n",
10468 val, BCE_BC_RESET_TYPE);
10469
10470 val = bce_shmem_rd(sc, BCE_BC_STATE);
10471 BCE_PRINTF("0x%08X - (0x%06X) state\n",
10472 val, BCE_BC_STATE);
10473
10474 val = bce_shmem_rd(sc, BCE_BC_STATE_CONDITION);
10475 BCE_PRINTF("0x%08X - (0x%06X) condition\n",
10476 val, BCE_BC_STATE_CONDITION);
10477
10478 val = bce_shmem_rd(sc, BCE_BC_STATE_DEBUG_CMD);
10479 BCE_PRINTF("0x%08X - (0x%06X) debug_cmd\n",
10480 val, BCE_BC_STATE_DEBUG_CMD);
10481
10482 BCE_PRINTF(
10483 "----------------------------"
10484 "----------------"
10485 "----------------------------\n");
10486}
10487
10488
10489/****************************************************************************/
10490/* Prints out the TXP processor state. */
10491/* */
10492/* Returns: */
10493/* Nothing. */
10494/****************************************************************************/
10495static __attribute__ ((noinline)) void
10496bce_dump_txp_state(struct bce_softc *sc, int regs)
10497{
10498 u32 val;
10499 u32 fw_version[3];
10500
10501 BCE_PRINTF(
10502 "----------------------------"
10503 " TXP State "
10504 "----------------------------\n");
10505
10506 for (int i = 0; i < 3; i++)
10507 fw_version[i] = htonl(REG_RD_IND(sc,
10508 (BCE_TXP_SCRATCH + 0x10 + i * 4)));
10509 BCE_PRINTF("Firmware version - %s\n", (char *) fw_version);
10510
10511 val = REG_RD_IND(sc, BCE_TXP_CPU_MODE);
10512 BCE_PRINTF("0x%08X - (0x%06X) txp_cpu_mode\n",
10513 val, BCE_TXP_CPU_MODE);
10514
10515 val = REG_RD_IND(sc, BCE_TXP_CPU_STATE);
10516 BCE_PRINTF("0x%08X - (0x%06X) txp_cpu_state\n",
10517 val, BCE_TXP_CPU_STATE);
10518
10519 val = REG_RD_IND(sc, BCE_TXP_CPU_EVENT_MASK);
10520 BCE_PRINTF("0x%08X - (0x%06X) txp_cpu_event_mask\n",
10521 val, BCE_TXP_CPU_EVENT_MASK);
10522
10523 if (regs) {
10524 BCE_PRINTF(
10525 "----------------------------"
10526 " Register Dump "
10527 "----------------------------\n");
10528
10529 for (int i = BCE_TXP_CPU_MODE; i < 0x68000; i += 0x10) {
10530 /* Skip the big blank spaces */
10531 if (i < 0x454000 && i > 0x5ffff)
10532 BCE_PRINTF("0x%04X: 0x%08X 0x%08X "
10533 "0x%08X 0x%08X\n", i,
10534 REG_RD_IND(sc, i),
10535 REG_RD_IND(sc, i + 0x4),
10536 REG_RD_IND(sc, i + 0x8),
10537 REG_RD_IND(sc, i + 0xC));
10538 }
10539 }
10540
10541 BCE_PRINTF(
10542 "----------------------------"
10543 "----------------"
10544 "----------------------------\n");
10545}
10546
10547
10548/****************************************************************************/
10549/* Prints out the RXP processor state. */
10550/* */
10551/* Returns: */
10552/* Nothing. */
10553/****************************************************************************/
10554static __attribute__ ((noinline)) void
10555bce_dump_rxp_state(struct bce_softc *sc, int regs)
10556{
10557 u32 val;
10558 u32 fw_version[3];
10559
10560 BCE_PRINTF(
10561 "----------------------------"
10562 " RXP State "
10563 "----------------------------\n");
10564
10565 for (int i = 0; i < 3; i++)
10566 fw_version[i] = htonl(REG_RD_IND(sc,
10567 (BCE_RXP_SCRATCH + 0x10 + i * 4)));
10568
10569 BCE_PRINTF("Firmware version - %s\n", (char *) fw_version);
10570
10571 val = REG_RD_IND(sc, BCE_RXP_CPU_MODE);
10572 BCE_PRINTF("0x%08X - (0x%06X) rxp_cpu_mode\n",
10573 val, BCE_RXP_CPU_MODE);
10574
10575 val = REG_RD_IND(sc, BCE_RXP_CPU_STATE);
10576 BCE_PRINTF("0x%08X - (0x%06X) rxp_cpu_state\n",
10577 val, BCE_RXP_CPU_STATE);
10578
10579 val = REG_RD_IND(sc, BCE_RXP_CPU_EVENT_MASK);
10580 BCE_PRINTF("0x%08X - (0x%06X) rxp_cpu_event_mask\n",
10581 val, BCE_RXP_CPU_EVENT_MASK);
10582
10583 if (regs) {
10584 BCE_PRINTF(
10585 "----------------------------"
10586 " Register Dump "
10587 "----------------------------\n");
10588
10589 for (int i = BCE_RXP_CPU_MODE; i < 0xe8fff; i += 0x10) {
10590 /* Skip the big blank sapces */
10591 if (i < 0xc5400 && i > 0xdffff)
10592 BCE_PRINTF("0x%04X: 0x%08X 0x%08X "
10593 "0x%08X 0x%08X\n", i,
10594 REG_RD_IND(sc, i),
10595 REG_RD_IND(sc, i + 0x4),
10596 REG_RD_IND(sc, i + 0x8),
10597 REG_RD_IND(sc, i + 0xC));
10598 }
10599 }
10600
10601 BCE_PRINTF(
10602 "----------------------------"
10603 "----------------"
10604 "----------------------------\n");
10605}
10606
10607
10608/****************************************************************************/
10609/* Prints out the TPAT processor state. */
10610/* */
10611/* Returns: */
10612/* Nothing. */
10613/****************************************************************************/
10614static __attribute__ ((noinline)) void
10615bce_dump_tpat_state(struct bce_softc *sc, int regs)
10616{
10617 u32 val;
10618 u32 fw_version[3];
10619
10620 BCE_PRINTF(
10621 "----------------------------"
10622 " TPAT State "
10623 "----------------------------\n");
10624
10625 for (int i = 0; i < 3; i++)
10626 fw_version[i] = htonl(REG_RD_IND(sc,
10627 (BCE_TPAT_SCRATCH + 0x410 + i * 4)));
10628
10629 BCE_PRINTF("Firmware version - %s\n", (char *) fw_version);
10630
10631 val = REG_RD_IND(sc, BCE_TPAT_CPU_MODE);
10632 BCE_PRINTF("0x%08X - (0x%06X) tpat_cpu_mode\n",
10633 val, BCE_TPAT_CPU_MODE);
10634
10635 val = REG_RD_IND(sc, BCE_TPAT_CPU_STATE);
10636 BCE_PRINTF("0x%08X - (0x%06X) tpat_cpu_state\n",
10637 val, BCE_TPAT_CPU_STATE);
10638
10639 val = REG_RD_IND(sc, BCE_TPAT_CPU_EVENT_MASK);
10640 BCE_PRINTF("0x%08X - (0x%06X) tpat_cpu_event_mask\n",
10641 val, BCE_TPAT_CPU_EVENT_MASK);
10642
10643 if (regs) {
10644 BCE_PRINTF(
10645 "----------------------------"
10646 " Register Dump "
10647 "----------------------------\n");
10648
10649 for (int i = BCE_TPAT_CPU_MODE; i < 0xa3fff; i += 0x10) {
10650 /* Skip the big blank spaces */
10651 if (i < 0x854000 && i > 0x9ffff)
10652 BCE_PRINTF("0x%04X: 0x%08X 0x%08X "
10653 "0x%08X 0x%08X\n", i,
10654 REG_RD_IND(sc, i),
10655 REG_RD_IND(sc, i + 0x4),
10656 REG_RD_IND(sc, i + 0x8),
10657 REG_RD_IND(sc, i + 0xC));
10658 }
10659 }
10660
10661 BCE_PRINTF(
10662 "----------------------------"
10663 "----------------"
10664 "----------------------------\n");
10665}
10666
10667
10668/****************************************************************************/
10669/* Prints out the Command Procesor (CP) state. */
10670/* */
10671/* Returns: */
10672/* Nothing. */
10673/****************************************************************************/
10674static __attribute__ ((noinline)) void
10675bce_dump_cp_state(struct bce_softc *sc, int regs)
10676{
10677 u32 val;
10678 u32 fw_version[3];
10679
10680 BCE_PRINTF(
10681 "----------------------------"
10682 " CP State "
10683 "----------------------------\n");
10684
10685 for (int i = 0; i < 3; i++)
10686 fw_version[i] = htonl(REG_RD_IND(sc,
10687 (BCE_CP_SCRATCH + 0x10 + i * 4)));
10688
10689 BCE_PRINTF("Firmware version - %s\n", (char *) fw_version);
10690
10691 val = REG_RD_IND(sc, BCE_CP_CPU_MODE);
10692 BCE_PRINTF("0x%08X - (0x%06X) cp_cpu_mode\n",
10693 val, BCE_CP_CPU_MODE);
10694
10695 val = REG_RD_IND(sc, BCE_CP_CPU_STATE);
10696 BCE_PRINTF("0x%08X - (0x%06X) cp_cpu_state\n",
10697 val, BCE_CP_CPU_STATE);
10698
10699 val = REG_RD_IND(sc, BCE_CP_CPU_EVENT_MASK);
10700 BCE_PRINTF("0x%08X - (0x%06X) cp_cpu_event_mask\n", val,
10701 BCE_CP_CPU_EVENT_MASK);
10702
10703 if (regs) {
10704 BCE_PRINTF(
10705 "----------------------------"
10706 " Register Dump "
10707 "----------------------------\n");
10708
10709 for (int i = BCE_CP_CPU_MODE; i < 0x1aa000; i += 0x10) {
10710 /* Skip the big blank spaces */
10711 if (i < 0x185400 && i > 0x19ffff)
10712 BCE_PRINTF("0x%04X: 0x%08X 0x%08X "
10713 "0x%08X 0x%08X\n", i,
10714 REG_RD_IND(sc, i),
10715 REG_RD_IND(sc, i + 0x4),
10716 REG_RD_IND(sc, i + 0x8),
10717 REG_RD_IND(sc, i + 0xC));
10718 }
10719 }
10720
10721 BCE_PRINTF(
10722 "----------------------------"
10723 "----------------"
10724 "----------------------------\n");
10725}
10726
10727
10728/****************************************************************************/
10729/* Prints out the Completion Procesor (COM) state. */
10730/* */
10731/* Returns: */
10732/* Nothing. */
10733/****************************************************************************/
10734static __attribute__ ((noinline)) void
10735bce_dump_com_state(struct bce_softc *sc, int regs)
10736{
10737 u32 val;
10738 u32 fw_version[4];
10739
10740 BCE_PRINTF(
10741 "----------------------------"
10742 " COM State "
10743 "----------------------------\n");
10744
10745 for (int i = 0; i < 3; i++)
10746 fw_version[i] = htonl(REG_RD_IND(sc,
10747 (BCE_COM_SCRATCH + 0x10 + i * 4)));
10748
10749 BCE_PRINTF("Firmware version - %s\n", (char *) fw_version);
10750
10751 val = REG_RD_IND(sc, BCE_COM_CPU_MODE);
10752 BCE_PRINTF("0x%08X - (0x%06X) com_cpu_mode\n",
10753 val, BCE_COM_CPU_MODE);
10754
10755 val = REG_RD_IND(sc, BCE_COM_CPU_STATE);
10756 BCE_PRINTF("0x%08X - (0x%06X) com_cpu_state\n",
10757 val, BCE_COM_CPU_STATE);
10758
10759 val = REG_RD_IND(sc, BCE_COM_CPU_EVENT_MASK);
10760 BCE_PRINTF("0x%08X - (0x%06X) com_cpu_event_mask\n", val,
10761 BCE_COM_CPU_EVENT_MASK);
10762
10763 if (regs) {
10764 BCE_PRINTF(
10765 "----------------------------"
10766 " Register Dump "
10767 "----------------------------\n");
10768
10769 for (int i = BCE_COM_CPU_MODE; i < 0x1053e8; i += 0x10) {
10770 BCE_PRINTF("0x%04X: 0x%08X 0x%08X "
10771 "0x%08X 0x%08X\n", i,
10772 REG_RD_IND(sc, i),
10773 REG_RD_IND(sc, i + 0x4),
10774 REG_RD_IND(sc, i + 0x8),
10775 REG_RD_IND(sc, i + 0xC));
10776 }
10777 }
10778
10779 BCE_PRINTF(
10780 "----------------------------"
10781 "----------------"
10782 "----------------------------\n");
10783}
10784
10785
10786/****************************************************************************/
10787/* Prints out the Receive Virtual 2 Physical (RV2P) state. */
10788/* */
10789/* Returns: */
10790/* Nothing. */
10791/****************************************************************************/
10792static __attribute__ ((noinline)) void
10793bce_dump_rv2p_state(struct bce_softc *sc)
10794{
10795 u32 val, pc1, pc2, fw_ver_high, fw_ver_low;
10796
10797 BCE_PRINTF(
10798 "----------------------------"
10799 " RV2P State "
10800 "----------------------------\n");
10801
10802 /* Stall the RV2P processors. */
10803 val = REG_RD_IND(sc, BCE_RV2P_CONFIG);
10804 val |= BCE_RV2P_CONFIG_STALL_PROC1 | BCE_RV2P_CONFIG_STALL_PROC2;
10805 REG_WR_IND(sc, BCE_RV2P_CONFIG, val);
10806
10807 /* Read the firmware version. */
10808 val = 0x00000001;
10809 REG_WR_IND(sc, BCE_RV2P_PROC1_ADDR_CMD, val);
10810 fw_ver_low = REG_RD_IND(sc, BCE_RV2P_INSTR_LOW);
10811 fw_ver_high = REG_RD_IND(sc, BCE_RV2P_INSTR_HIGH) &
10812 BCE_RV2P_INSTR_HIGH_HIGH;
10813 BCE_PRINTF("RV2P1 Firmware version - 0x%08X:0x%08X\n",
10814 fw_ver_high, fw_ver_low);
10815
10816 val = 0x00000001;
10817 REG_WR_IND(sc, BCE_RV2P_PROC2_ADDR_CMD, val);
10818 fw_ver_low = REG_RD_IND(sc, BCE_RV2P_INSTR_LOW);
10819 fw_ver_high = REG_RD_IND(sc, BCE_RV2P_INSTR_HIGH) &
10820 BCE_RV2P_INSTR_HIGH_HIGH;
10821 BCE_PRINTF("RV2P2 Firmware version - 0x%08X:0x%08X\n",
10822 fw_ver_high, fw_ver_low);
10823
10824 /* Resume the RV2P processors. */
10825 val = REG_RD_IND(sc, BCE_RV2P_CONFIG);
10826 val &= ~(BCE_RV2P_CONFIG_STALL_PROC1 | BCE_RV2P_CONFIG_STALL_PROC2);
10827 REG_WR_IND(sc, BCE_RV2P_CONFIG, val);
10828
10829 /* Fetch the program counter value. */
10830 val = 0x68007800;
10831 REG_WR_IND(sc, BCE_RV2P_DEBUG_VECT_PEEK, val);
10832 val = REG_RD_IND(sc, BCE_RV2P_DEBUG_VECT_PEEK);
10833 pc1 = (val & BCE_RV2P_DEBUG_VECT_PEEK_1_VALUE);
10834 pc2 = (val & BCE_RV2P_DEBUG_VECT_PEEK_2_VALUE) >> 16;
10835 BCE_PRINTF("0x%08X - RV2P1 program counter (1st read)\n", pc1);
10836 BCE_PRINTF("0x%08X - RV2P2 program counter (1st read)\n", pc2);
10837
10838 /* Fetch the program counter value again to see if it is advancing. */
10839 val = 0x68007800;
10840 REG_WR_IND(sc, BCE_RV2P_DEBUG_VECT_PEEK, val);
10841 val = REG_RD_IND(sc, BCE_RV2P_DEBUG_VECT_PEEK);
10842 pc1 = (val & BCE_RV2P_DEBUG_VECT_PEEK_1_VALUE);
10843 pc2 = (val & BCE_RV2P_DEBUG_VECT_PEEK_2_VALUE) >> 16;
10844 BCE_PRINTF("0x%08X - RV2P1 program counter (2nd read)\n", pc1);
10845 BCE_PRINTF("0x%08X - RV2P2 program counter (2nd read)\n", pc2);
10846
10847 BCE_PRINTF(
10848 "----------------------------"
10849 "----------------"
10850 "----------------------------\n");
10851}
10852
10853
10854/****************************************************************************/
10855/* Prints out the driver state and then enters the debugger. */
10856/* */
10857/* Returns: */
10858/* Nothing. */
10859/****************************************************************************/
10860static __attribute__ ((noinline)) void
10861bce_breakpoint(struct bce_softc *sc)
10862{
10863
10864 /*
10865 * Unreachable code to silence compiler warnings
10866 * about unused functions.
10867 */
10868 if (0) {
10869 bce_freeze_controller(sc);
10870 bce_unfreeze_controller(sc);
10871 bce_dump_enet(sc, NULL);
10872 bce_dump_txbd(sc, 0, NULL);
10873 bce_dump_rxbd(sc, 0, NULL);
10874 bce_dump_tx_mbuf_chain(sc, 0, USABLE_TX_BD);
10875 bce_dump_rx_mbuf_chain(sc, 0, USABLE_RX_BD);
10876 bce_dump_l2fhdr(sc, 0, NULL);
10877 bce_dump_ctx(sc, RX_CID);
10878 bce_dump_ftqs(sc);
10879 bce_dump_tx_chain(sc, 0, USABLE_TX_BD);
10880 bce_dump_rx_bd_chain(sc, 0, USABLE_RX_BD);
10881 bce_dump_status_block(sc);
10882 bce_dump_stats_block(sc);
10883 bce_dump_driver_state(sc);
10884 bce_dump_hw_state(sc);
10885 bce_dump_bc_state(sc);
10886 bce_dump_txp_state(sc, 0);
10887 bce_dump_rxp_state(sc, 0);
10888 bce_dump_tpat_state(sc, 0);
10889 bce_dump_cp_state(sc, 0);
10890 bce_dump_com_state(sc, 0);
10891 bce_dump_rv2p_state(sc);
10892
10893#ifdef BCE_JUMBO_HDRSPLIT
10894 bce_dump_pgbd(sc, 0, NULL);
10895 bce_dump_pg_mbuf_chain(sc, 0, USABLE_PG_BD);
10896 bce_dump_pg_chain(sc, 0, USABLE_PG_BD);
10897#endif
10898 }
10899
10900 bce_dump_status_block(sc);
10901 bce_dump_driver_state(sc);
10902
10903 /* Call the debugger. */
10904 breakpoint();
10905
10906 return;
10907}
10908#endif
10909