if_bce.c revision 186169
1/*-
2 * Copyright (c) 2006-2008 Broadcom Corporation
3 *	David Christensen <davidch@broadcom.com>.  All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * 1. Redistributions of source code must retain the above copyright
10 *    notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 *    notice, this list of conditions and the following disclaimer in the
13 *    documentation and/or other materials provided with the distribution.
14 * 3. Neither the name of Broadcom Corporation nor the name of its contributors
15 *    may be used to endorse or promote products derived from this software
16 *    without specific prior written consent.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS'
19 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
22 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
28 * THE POSSIBILITY OF SUCH DAMAGE.
29 */
30
31#include <sys/cdefs.h>
32__FBSDID("$FreeBSD: head/sys/dev/bce/if_bce.c 186169 2008-12-16 05:03:22Z delphij $");
33
34/*
35 * The following controllers are supported by this driver:
36 *   BCM5706C A2, A3
37 *   BCM5706S A2, A3
38 *   BCM5708C B1, B2
39 *   BCM5708S B1, B2
40 *   BCM5709C A1, C0
41 *   BCM5716  C0
42 *
43 * The following controllers are not supported by this driver:
44 *   BCM5706C A0, A1 (pre-production)
45 *   BCM5706S A0, A1 (pre-production)
46 *   BCM5708C A0, B0 (pre-production)
47 *   BCM5708S A0, B0 (pre-production)
48 *   BCM5709C A0  B0, B1, B2 (pre-production)
49 *   BCM5709S A0, A1, B0, B1, B2, C0 (pre-production)
50 */
51
52#include "opt_bce.h"
53
54#include <dev/bce/if_bcereg.h>
55#include <dev/bce/if_bcefw.h>
56
57/****************************************************************************/
58/* BCE Debug Options                                                        */
59/****************************************************************************/
60#ifdef BCE_DEBUG
61	u32 bce_debug = BCE_WARN;
62
63	/*          0 = Never              */
64	/*          1 = 1 in 2,147,483,648 */
65	/*        256 = 1 in     8,388,608 */
66	/*       2048 = 1 in     1,048,576 */
67	/*      65536 = 1 in        32,768 */
68	/*    1048576 = 1 in         2,048 */
69	/*  268435456 =	1 in             8 */
70	/*  536870912 = 1 in             4 */
71	/* 1073741824 = 1 in             2 */
72
73	/* Controls how often the l2_fhdr frame error check will fail. */
74	int bce_debug_l2fhdr_status_check = 0;
75
76	/* Controls how often the unexpected attention check will fail. */
77	int bce_debug_unexpected_attention = 0;
78
79	/* Controls how often to simulate an mbuf allocation failure. */
80	int bce_debug_mbuf_allocation_failure = 0;
81
82	/* Controls how often to simulate a DMA mapping failure. */
83	int bce_debug_dma_map_addr_failure = 0;
84
85	/* Controls how often to simulate a bootcode failure. */
86	int bce_debug_bootcode_running_failure = 0;
87#endif
88
89/****************************************************************************/
90/* BCE Build Time Options                                                   */
91/****************************************************************************/
92#define BCE_USE_SPLIT_HEADER 1
93/* #define BCE_NVRAM_WRITE_SUPPORT 1 */
94
95
96/****************************************************************************/
97/* PCI Device ID Table                                                      */
98/*                                                                          */
99/* Used by bce_probe() to identify the devices supported by this driver.    */
100/****************************************************************************/
101#define BCE_DEVDESC_MAX		64
102
103static struct bce_type bce_devs[] = {
104	/* BCM5706C Controllers and OEM boards. */
105	{ BRCM_VENDORID, BRCM_DEVICEID_BCM5706,  HP_VENDORID, 0x3101,
106		"HP NC370T Multifunction Gigabit Server Adapter" },
107	{ BRCM_VENDORID, BRCM_DEVICEID_BCM5706,  HP_VENDORID, 0x3106,
108		"HP NC370i Multifunction Gigabit Server Adapter" },
109	{ BRCM_VENDORID, BRCM_DEVICEID_BCM5706,  PCI_ANY_ID,  PCI_ANY_ID,
110		"Broadcom NetXtreme II BCM5706 1000Base-T" },
111
112	/* BCM5706S controllers and OEM boards. */
113	{ BRCM_VENDORID, BRCM_DEVICEID_BCM5706S, HP_VENDORID, 0x3102,
114		"HP NC370F Multifunction Gigabit Server Adapter" },
115	{ BRCM_VENDORID, BRCM_DEVICEID_BCM5706S, PCI_ANY_ID,  PCI_ANY_ID,
116		"Broadcom NetXtreme II BCM5706 1000Base-SX" },
117
118	/* BCM5708C controllers and OEM boards. */
119	{ BRCM_VENDORID, BRCM_DEVICEID_BCM5708,  PCI_ANY_ID,  PCI_ANY_ID,
120		"Broadcom NetXtreme II BCM5708 1000Base-T" },
121
122	/* BCM5708S controllers and OEM boards. */
123	{ BRCM_VENDORID, BRCM_DEVICEID_BCM5708S,  PCI_ANY_ID,  PCI_ANY_ID,
124		"Broadcom NetXtreme II BCM5708 1000Base-SX" },
125
126	/* BCM5709C controllers and OEM boards. */
127	{ BRCM_VENDORID, BRCM_DEVICEID_BCM5709,  PCI_ANY_ID,  PCI_ANY_ID,
128		"Broadcom NetXtreme II BCM5709 1000Base-T" },
129
130	/* BCM5709S controllers and OEM boards. */
131	{ BRCM_VENDORID, BRCM_DEVICEID_BCM5709S,  PCI_ANY_ID,  PCI_ANY_ID,
132		"Broadcom NetXtreme II BCM5709 1000Base-SX" },
133
134	/* BCM5716 controllers and OEM boards. */
135	{ BRCM_VENDORID, BRCM_DEVICEID_BCM5716,  PCI_ANY_ID,  PCI_ANY_ID,
136		"Broadcom NetXtreme II BCM5716 1000Base-T" },
137
138	{ 0, 0, 0, 0, NULL }
139};
140
141
142/****************************************************************************/
143/* Supported Flash NVRAM device data.                                       */
144/****************************************************************************/
145static struct flash_spec flash_table[] =
146{
147#define BUFFERED_FLAGS		(BCE_NV_BUFFERED | BCE_NV_TRANSLATE)
148#define NONBUFFERED_FLAGS	(BCE_NV_WREN)
149
150	/* Slow EEPROM */
151	{0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
152	 BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
153	 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
154	 "EEPROM - slow"},
155	/* Expansion entry 0001 */
156	{0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
157	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
158	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
159	 "Entry 0001"},
160	/* Saifun SA25F010 (non-buffered flash) */
161	/* strap, cfg1, & write1 need updates */
162	{0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
163	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
164	 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
165	 "Non-buffered flash (128kB)"},
166	/* Saifun SA25F020 (non-buffered flash) */
167	/* strap, cfg1, & write1 need updates */
168	{0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
169	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
170	 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
171	 "Non-buffered flash (256kB)"},
172	/* Expansion entry 0100 */
173	{0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
174	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
175	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
176	 "Entry 0100"},
177	/* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
178	{0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
179	 NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
180	 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
181	 "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
182	/* Entry 0110: ST M45PE20 (non-buffered flash)*/
183	{0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
184	 NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
185	 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
186	 "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
187	/* Saifun SA25F005 (non-buffered flash) */
188	/* strap, cfg1, & write1 need updates */
189	{0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
190	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
191	 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
192	 "Non-buffered flash (64kB)"},
193	/* Fast EEPROM */
194	{0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
195	 BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
196	 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
197	 "EEPROM - fast"},
198	/* Expansion entry 1001 */
199	{0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
200	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
201	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
202	 "Entry 1001"},
203	/* Expansion entry 1010 */
204	{0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
205	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
206	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
207	 "Entry 1010"},
208	/* ATMEL AT45DB011B (buffered flash) */
209	{0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
210	 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
211	 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
212	 "Buffered flash (128kB)"},
213	/* Expansion entry 1100 */
214	{0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
215	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
216	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
217	 "Entry 1100"},
218	/* Expansion entry 1101 */
219	{0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
220	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
221	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
222	 "Entry 1101"},
223	/* Ateml Expansion entry 1110 */
224	{0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
225	 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
226	 BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
227	 "Entry 1110 (Atmel)"},
228	/* ATMEL AT45DB021B (buffered flash) */
229	{0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
230	 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
231	 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
232	 "Buffered flash (256kB)"},
233};
234
235/*
236 * The BCM5709 controllers transparently handle the
237 * differences between Atmel 264 byte pages and all
238 * flash devices which use 256 byte pages, so no
239 * logical-to-physical mapping is required in the
240 * driver.
241 */
242static struct flash_spec flash_5709 = {
243	.flags		= BCE_NV_BUFFERED,
244	.page_bits	= BCM5709_FLASH_PAGE_BITS,
245	.page_size	= BCM5709_FLASH_PAGE_SIZE,
246	.addr_mask	= BCM5709_FLASH_BYTE_ADDR_MASK,
247	.total_size	= BUFFERED_FLASH_TOTAL_SIZE * 2,
248	.name		= "5709/5716 buffered flash (256kB)",
249};
250
251
252/****************************************************************************/
253/* FreeBSD device entry points.                                             */
254/****************************************************************************/
255static int  bce_probe				(device_t);
256static int  bce_attach				(device_t);
257static int  bce_detach				(device_t);
258static int  bce_shutdown			(device_t);
259
260
261/****************************************************************************/
262/* BCE Debug Data Structure Dump Routines                                   */
263/****************************************************************************/
264#ifdef BCE_DEBUG
265static u32	bce_reg_rd				(struct bce_softc *, u32);
266static void	bce_reg_wr				(struct bce_softc *, u32, u32);
267static void	bce_reg_wr16			(struct bce_softc *, u32, u16);
268static u32  bce_ctx_rd				(struct bce_softc *, u32, u32);
269static void bce_dump_enet           (struct bce_softc *, struct mbuf *);
270static void bce_dump_mbuf 			(struct bce_softc *, struct mbuf *);
271static void bce_dump_tx_mbuf_chain	(struct bce_softc *, u16, int);
272static void bce_dump_rx_mbuf_chain	(struct bce_softc *, u16, int);
273#ifdef BCE_USE_SPLIT_HEADER
274static void bce_dump_pg_mbuf_chain	(struct bce_softc *, u16, int);
275#endif
276static void bce_dump_txbd			(struct bce_softc *, int, struct tx_bd *);
277static void bce_dump_rxbd			(struct bce_softc *, int, struct rx_bd *);
278#ifdef BCE_USE_SPLIT_HEADER
279static void bce_dump_pgbd			(struct bce_softc *, int, struct rx_bd *);
280#endif
281static void bce_dump_l2fhdr			(struct bce_softc *, int, struct l2_fhdr *);
282static void bce_dump_ctx			(struct bce_softc *, u16);
283static void bce_dump_ftqs			(struct bce_softc *);
284static void bce_dump_tx_chain		(struct bce_softc *, u16, int);
285static void bce_dump_rx_chain		(struct bce_softc *, u16, int);
286#ifdef BCE_USE_SPLIT_HEADER
287static void bce_dump_pg_chain		(struct bce_softc *, u16, int);
288#endif
289static void bce_dump_status_block	(struct bce_softc *);
290static void bce_dump_stats_block	(struct bce_softc *);
291static void bce_dump_driver_state	(struct bce_softc *);
292static void bce_dump_hw_state		(struct bce_softc *);
293static void bce_dump_mq_regs        (struct bce_softc *);
294static void bce_dump_bc_state		(struct bce_softc *);
295static void bce_dump_txp_state		(struct bce_softc *, int);
296static void bce_dump_rxp_state		(struct bce_softc *, int);
297static void bce_dump_tpat_state		(struct bce_softc *, int);
298static void bce_dump_cp_state		(struct bce_softc *, int);
299static void bce_dump_com_state		(struct bce_softc *, int);
300static void bce_breakpoint			(struct bce_softc *);
301#endif
302
303
304/****************************************************************************/
305/* BCE Register/Memory Access Routines                                      */
306/****************************************************************************/
307static u32  bce_reg_rd_ind			(struct bce_softc *, u32);
308static void bce_reg_wr_ind			(struct bce_softc *, u32, u32);
309static void bce_ctx_wr				(struct bce_softc *, u32, u32, u32);
310static int  bce_miibus_read_reg		(device_t, int, int);
311static int  bce_miibus_write_reg	(device_t, int, int, int);
312static void bce_miibus_statchg		(device_t);
313
314
315/****************************************************************************/
316/* BCE NVRAM Access Routines                                                */
317/****************************************************************************/
318static int  bce_acquire_nvram_lock	(struct bce_softc *);
319static int  bce_release_nvram_lock	(struct bce_softc *);
320static void bce_enable_nvram_access	(struct bce_softc *);
321static void	bce_disable_nvram_access(struct bce_softc *);
322static int  bce_nvram_read_dword	(struct bce_softc *, u32, u8 *, u32);
323static int  bce_init_nvram			(struct bce_softc *);
324static int  bce_nvram_read			(struct bce_softc *, u32, u8 *, int);
325static int  bce_nvram_test			(struct bce_softc *);
326#ifdef BCE_NVRAM_WRITE_SUPPORT
327static int  bce_enable_nvram_write	(struct bce_softc *);
328static void bce_disable_nvram_write	(struct bce_softc *);
329static int  bce_nvram_erase_page	(struct bce_softc *, u32);
330static int  bce_nvram_write_dword	(struct bce_softc *, u32, u8 *, u32);
331static int  bce_nvram_write			(struct bce_softc *, u32, u8 *, int);
332#endif
333
334/****************************************************************************/
335/*                                                                          */
336/****************************************************************************/
337static void bce_get_media			(struct bce_softc *);
338static void bce_dma_map_addr		(void *, bus_dma_segment_t *, int, int);
339static int  bce_dma_alloc			(device_t);
340static void bce_dma_free			(struct bce_softc *);
341static void bce_release_resources	(struct bce_softc *);
342
343/****************************************************************************/
344/* BCE Firmware Synchronization and Load                                    */
345/****************************************************************************/
346static int  bce_fw_sync				(struct bce_softc *, u32);
347static void bce_load_rv2p_fw		(struct bce_softc *, u32 *, u32, u32);
348static void bce_load_cpu_fw			(struct bce_softc *, struct cpu_reg *, struct fw_info *);
349static void bce_init_rxp_cpu		(struct bce_softc *);
350static void bce_init_txp_cpu 		(struct bce_softc *);
351static void bce_init_tpat_cpu		(struct bce_softc *);
352static void bce_init_cp_cpu		  	(struct bce_softc *);
353static void bce_init_com_cpu	  	(struct bce_softc *);
354static void bce_init_cpus			(struct bce_softc *);
355
356static void	bce_print_adapter_info	(struct bce_softc *);
357static void bce_probe_pci_caps		(device_t, struct bce_softc *);
358static void bce_stop				(struct bce_softc *);
359static int  bce_reset				(struct bce_softc *, u32);
360static int  bce_chipinit 			(struct bce_softc *);
361static int  bce_blockinit 			(struct bce_softc *);
362
363static int  bce_init_tx_chain		(struct bce_softc *);
364static void bce_free_tx_chain		(struct bce_softc *);
365
366static int  bce_get_rx_buf			(struct bce_softc *, struct mbuf *, u16 *, u16 *, u32 *);
367static int  bce_init_rx_chain		(struct bce_softc *);
368static void bce_fill_rx_chain		(struct bce_softc *);
369static void bce_free_rx_chain		(struct bce_softc *);
370
371#ifdef BCE_USE_SPLIT_HEADER
372static int  bce_get_pg_buf			(struct bce_softc *, struct mbuf *, u16 *, u16 *);
373static int  bce_init_pg_chain		(struct bce_softc *);
374static void bce_fill_pg_chain		(struct bce_softc *);
375static void bce_free_pg_chain		(struct bce_softc *);
376#endif
377
378static int  bce_tx_encap			(struct bce_softc *, struct mbuf **);
379static void bce_start_locked		(struct ifnet *);
380static void bce_start				(struct ifnet *);
381static int  bce_ioctl				(struct ifnet *, u_long, caddr_t);
382static void bce_watchdog			(struct bce_softc *);
383static int  bce_ifmedia_upd			(struct ifnet *);
384static void bce_ifmedia_upd_locked	(struct ifnet *);
385static void bce_ifmedia_sts			(struct ifnet *, struct ifmediareq *);
386static void bce_init_locked			(struct bce_softc *);
387static void bce_init				(void *);
388static void bce_mgmt_init_locked	(struct bce_softc *sc);
389
390static void bce_init_ctx			(struct bce_softc *);
391static void bce_get_mac_addr		(struct bce_softc *);
392static void bce_set_mac_addr		(struct bce_softc *);
393static void bce_phy_intr			(struct bce_softc *);
394static inline u16 bce_get_hw_rx_cons(struct bce_softc *);
395static void bce_rx_intr				(struct bce_softc *);
396static void bce_tx_intr				(struct bce_softc *);
397static void bce_disable_intr		(struct bce_softc *);
398static void bce_enable_intr			(struct bce_softc *, int);
399
400static void bce_intr				(void *);
401static void bce_set_rx_mode			(struct bce_softc *);
402static void bce_stats_update		(struct bce_softc *);
403static void bce_tick				(void *);
404static void bce_pulse				(void *);
405static void bce_add_sysctls			(struct bce_softc *);
406
407
408/****************************************************************************/
409/* FreeBSD device dispatch table.                                           */
410/****************************************************************************/
411static device_method_t bce_methods[] = {
412	/* Device interface (device_if.h) */
413	DEVMETHOD(device_probe,		bce_probe),
414	DEVMETHOD(device_attach,	bce_attach),
415	DEVMETHOD(device_detach,	bce_detach),
416	DEVMETHOD(device_shutdown,	bce_shutdown),
417/* Supported by device interface but not used here. */
418/*	DEVMETHOD(device_identify,	bce_identify),      */
419/*	DEVMETHOD(device_suspend,	bce_suspend),       */
420/*	DEVMETHOD(device_resume,	bce_resume),        */
421/*	DEVMETHOD(device_quiesce,	bce_quiesce),       */
422
423	/* Bus interface (bus_if.h) */
424	DEVMETHOD(bus_print_child,	bus_generic_print_child),
425	DEVMETHOD(bus_driver_added,	bus_generic_driver_added),
426
427	/* MII interface (miibus_if.h) */
428	DEVMETHOD(miibus_readreg,	bce_miibus_read_reg),
429	DEVMETHOD(miibus_writereg,	bce_miibus_write_reg),
430	DEVMETHOD(miibus_statchg,	bce_miibus_statchg),
431/* Supported by MII interface but not used here.       */
432/*	DEVMETHOD(miibus_linkchg,	bce_miibus_linkchg),   */
433/*	DEVMETHOD(miibus_mediainit,	bce_miibus_mediainit), */
434
435	{ 0, 0 }
436};
437
438static driver_t bce_driver = {
439	"bce",
440	bce_methods,
441	sizeof(struct bce_softc)
442};
443
444static devclass_t bce_devclass;
445
446MODULE_DEPEND(bce, pci, 1, 1, 1);
447MODULE_DEPEND(bce, ether, 1, 1, 1);
448MODULE_DEPEND(bce, miibus, 1, 1, 1);
449
450DRIVER_MODULE(bce, pci, bce_driver, bce_devclass, 0, 0);
451DRIVER_MODULE(miibus, bce, miibus_driver, miibus_devclass, 0, 0);
452
453
454/****************************************************************************/
455/* Tunable device values                                                    */
456/****************************************************************************/
457SYSCTL_NODE(_hw, OID_AUTO, bce, CTLFLAG_RD, 0, "bce driver parameters");
458
459/* Allowable values are TRUE or FALSE */
460static int bce_tso_enable = TRUE;
461TUNABLE_INT("hw.bce.tso_enable", &bce_tso_enable);
462SYSCTL_UINT(_hw_bce, OID_AUTO, tso_enable, CTLFLAG_RDTUN, &bce_tso_enable, 0,
463"TSO Enable/Disable");
464
465/* Allowable values are 0 (IRQ), 1 (MSI/IRQ), and 2 (MSI-X/MSI/IRQ) */
466/* ToDo: Add MSI-X support. */
467static int bce_msi_enable = 1;
468TUNABLE_INT("hw.bce.msi_enable", &bce_msi_enable);
469SYSCTL_UINT(_hw_bce, OID_AUTO, msi_enable, CTLFLAG_RDTUN, &bce_msi_enable, 0,
470"MSI-X|MSI|INTx selector");
471
472/* ToDo: Add tunable to enable/disable strict MTU handling. */
473/* Currently allows "loose" RX MTU checking (i.e. sets the  */
474/* H/W RX MTU to the size of the largest receive buffer, or */
475/* 2048 bytes).                                             */
476
477
478/****************************************************************************/
479/* Device probe function.                                                   */
480/*                                                                          */
481/* Compares the device to the driver's list of supported devices and        */
482/* reports back to the OS whether this is the right driver for the device.  */
483/*                                                                          */
484/* Returns:                                                                 */
485/*   BUS_PROBE_DEFAULT on success, positive value on failure.               */
486/****************************************************************************/
487static int
488bce_probe(device_t dev)
489{
490	struct bce_type *t;
491	struct bce_softc *sc;
492	char *descbuf;
493	u16 vid = 0, did = 0, svid = 0, sdid = 0;
494
495	t = bce_devs;
496
497	sc = device_get_softc(dev);
498	bzero(sc, sizeof(struct bce_softc));
499	sc->bce_unit = device_get_unit(dev);
500	sc->bce_dev = dev;
501
502	/* Get the data for the device to be probed. */
503	vid  = pci_get_vendor(dev);
504	did  = pci_get_device(dev);
505	svid = pci_get_subvendor(dev);
506	sdid = pci_get_subdevice(dev);
507
508	DBPRINT(sc, BCE_EXTREME_LOAD,
509		"%s(); VID = 0x%04X, DID = 0x%04X, SVID = 0x%04X, "
510		"SDID = 0x%04X\n", __FUNCTION__, vid, did, svid, sdid);
511
512	/* Look through the list of known devices for a match. */
513	while(t->bce_name != NULL) {
514
515		if ((vid == t->bce_vid) && (did == t->bce_did) &&
516			((svid == t->bce_svid) || (t->bce_svid == PCI_ANY_ID)) &&
517			((sdid == t->bce_sdid) || (t->bce_sdid == PCI_ANY_ID))) {
518
519			descbuf = malloc(BCE_DEVDESC_MAX, M_TEMP, M_NOWAIT);
520
521			if (descbuf == NULL)
522				return(ENOMEM);
523
524			/* Print out the device identity. */
525			snprintf(descbuf, BCE_DEVDESC_MAX, "%s (%c%d)",
526				t->bce_name,
527			    (((pci_read_config(dev, PCIR_REVID, 4) & 0xf0) >> 4) + 'A'),
528			    (pci_read_config(dev, PCIR_REVID, 4) & 0xf));
529
530			device_set_desc_copy(dev, descbuf);
531			free(descbuf, M_TEMP);
532			return(BUS_PROBE_DEFAULT);
533		}
534		t++;
535	}
536
537	return(ENXIO);
538}
539
540
541/****************************************************************************/
542/* PCI Capabilities Probe Function.                                         */
543/*                                                                          */
544/* Walks the PCI capabiites list for the device to find what features are   */
545/* supported.                                                               */
546/*                                                                          */
547/* Returns:                                                                 */
548/*   None.                                                                  */
549/****************************************************************************/
550static void
551bce_print_adapter_info(struct bce_softc *sc)
552{
553	DBENTER(BCE_VERBOSE_LOAD);
554
555	BCE_PRINTF("ASIC (0x%08X); ", sc->bce_chipid);
556	printf("Rev (%c%d); ", ((BCE_CHIP_ID(sc) & 0xf000) >> 12) + 'A',
557		((BCE_CHIP_ID(sc) & 0x0ff0) >> 4));
558
559	/* Bus info. */
560	if (sc->bce_flags & BCE_PCIE_FLAG) {
561		printf("Bus (PCIe x%d, ", sc->link_width);
562		switch (sc->link_speed) {
563			case 1: printf("2.5Gbps); "); break;
564			case 2:	printf("5Gbps); "); break;
565			default: printf("Unknown link speed); ");
566		}
567	} else {
568		printf("Bus (PCI%s, %s, %dMHz); ",
569			((sc->bce_flags & BCE_PCIX_FLAG) ? "-X" : ""),
570			((sc->bce_flags & BCE_PCI_32BIT_FLAG) ? "32-bit" : "64-bit"),
571			sc->bus_speed_mhz);
572	}
573
574	/* Firmware version and device features. */
575	printf("F/W (0x%08X); Flags( ", sc->bce_fw_ver);
576#ifdef BCE_USE_SPLIT_HEADER
577	printf("SPLT ");
578#endif
579	if (sc->bce_flags & BCE_MFW_ENABLE_FLAG)
580		printf("MFW ");
581	if (sc->bce_flags & BCE_USING_MSI_FLAG)
582		printf("MSI ");
583	if (sc->bce_flags & BCE_USING_MSIX_FLAG)
584		printf("MSI-X ");
585	if (sc->bce_phy_flags & BCE_PHY_2_5G_CAPABLE_FLAG)
586		printf("2.5G ");
587	printf(")\n");
588
589	DBEXIT(BCE_VERBOSE_LOAD);
590}
591
592
593/****************************************************************************/
594/* PCI Capabilities Probe Function.                                         */
595/*                                                                          */
596/* Walks the PCI capabiites list for the device to find what features are   */
597/* supported.                                                               */
598/*                                                                          */
599/* Returns:                                                                 */
600/*   None.                                                                  */
601/****************************************************************************/
602static void
603bce_probe_pci_caps(device_t dev, struct bce_softc *sc)
604{
605	u32 reg;
606
607	DBENTER(BCE_VERBOSE_LOAD);
608
609	/* Check if PCI-X capability is enabled. */
610	if (pci_find_extcap(dev, PCIY_PCIX, &reg) == 0) {
611		if (reg != 0)
612			sc->bce_cap_flags |= BCE_PCIX_CAPABLE_FLAG;
613	}
614
615	/* Check if PCIe capability is enabled. */
616	if (pci_find_extcap(dev, PCIY_EXPRESS, &reg) == 0) {
617		if (reg != 0) {
618			u16 link_status = pci_read_config(dev, reg + 0x12, 2);
619			DBPRINT(sc, BCE_INFO_LOAD, "PCIe link_status = 0x%08X\n",
620				link_status);
621			sc->link_speed = link_status & 0xf;
622			sc->link_width = (link_status >> 4) & 0x3f;
623			sc->bce_cap_flags |= BCE_PCIE_CAPABLE_FLAG;
624			sc->bce_flags |= BCE_PCIE_FLAG;
625		}
626	}
627
628	/* Check if MSI capability is enabled. */
629	if (pci_find_extcap(dev, PCIY_MSI, &reg) == 0) {
630		if (reg != 0)
631			sc->bce_cap_flags |= BCE_MSI_CAPABLE_FLAG;
632	}
633
634	/* Check if MSI-X capability is enabled. */
635	if (pci_find_extcap(dev, PCIY_MSIX, &reg) == 0) {
636		if (reg != 0)
637			sc->bce_cap_flags |= BCE_MSIX_CAPABLE_FLAG;
638	}
639
640	DBEXIT(BCE_VERBOSE_LOAD);
641}
642
643
644/****************************************************************************/
645/* Device attach function.                                                  */
646/*                                                                          */
647/* Allocates device resources, performs secondary chip identification,      */
648/* resets and initializes the hardware, and initializes driver instance     */
649/* variables.                                                               */
650/*                                                                          */
651/* Returns:                                                                 */
652/*   0 on success, positive value on failure.                               */
653/****************************************************************************/
654static int
655bce_attach(device_t dev)
656{
657	struct bce_softc *sc;
658	struct ifnet *ifp;
659	u32 val;
660	int error, rid, rc = 0;
661
662	sc = device_get_softc(dev);
663	sc->bce_dev = dev;
664
665	DBENTER(BCE_VERBOSE_LOAD | BCE_VERBOSE_RESET);
666
667	sc->bce_unit = device_get_unit(dev);
668
669	/* Set initial device and PHY flags */
670	sc->bce_flags = 0;
671	sc->bce_phy_flags = 0;
672
673	pci_enable_busmaster(dev);
674
675	/* Allocate PCI memory resources. */
676	rid = PCIR_BAR(0);
677	sc->bce_res_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
678		&rid, RF_ACTIVE);
679
680	if (sc->bce_res_mem == NULL) {
681		BCE_PRINTF("%s(%d): PCI memory allocation failed\n",
682			__FILE__, __LINE__);
683		rc = ENXIO;
684		goto bce_attach_fail;
685	}
686
687	/* Get various resource handles. */
688	sc->bce_btag    = rman_get_bustag(sc->bce_res_mem);
689	sc->bce_bhandle = rman_get_bushandle(sc->bce_res_mem);
690	sc->bce_vhandle = (vm_offset_t) rman_get_virtual(sc->bce_res_mem);
691
692	bce_probe_pci_caps(dev, sc);
693
694	rid = 1;
695#if 0
696	/* Try allocating MSI-X interrupts. */
697	if ((sc->bce_cap_flags & BCE_MSIX_CAPABLE_FLAG) &&
698		(bce_msi_enable >= 2) &&
699		((sc->bce_res_irq = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
700		&rid, RF_ACTIVE)) != NULL)) {
701
702		msi_needed = sc->bce_msi_count = 1;
703
704		if (((error = pci_alloc_msix(dev, &sc->bce_msi_count)) != 0) ||
705			(sc->bce_msi_count != msi_needed)) {
706			BCE_PRINTF("%s(%d): MSI-X allocation failed! Requested = %d,"
707				"Received = %d, error = %d\n", __FILE__, __LINE__,
708				msi_needed, sc->bce_msi_count, error);
709			sc->bce_msi_count = 0;
710			pci_release_msi(dev);
711			bus_release_resource(dev, SYS_RES_MEMORY, rid,
712				sc->bce_res_irq);
713			sc->bce_res_irq = NULL;
714		} else {
715			DBPRINT(sc, BCE_INFO_LOAD, "%s(): Using MSI-X interrupt.\n",
716				__FUNCTION__);
717			sc->bce_flags |= BCE_USING_MSIX_FLAG;
718			sc->bce_intr = bce_intr;
719		}
720	}
721#endif
722
723	/* Try allocating a MSI interrupt. */
724	if ((sc->bce_cap_flags & BCE_MSI_CAPABLE_FLAG) &&
725		(bce_msi_enable >= 1) && (sc->bce_msi_count == 0)) {
726		sc->bce_msi_count = 1;
727		if ((error = pci_alloc_msi(dev, &sc->bce_msi_count)) != 0) {
728			BCE_PRINTF("%s(%d): MSI allocation failed! error = %d\n",
729				__FILE__, __LINE__, error);
730			sc->bce_msi_count = 0;
731			pci_release_msi(dev);
732		} else {
733			DBPRINT(sc, BCE_INFO_LOAD, "%s(): Using MSI interrupt.\n",
734				__FUNCTION__);
735			sc->bce_flags |= BCE_USING_MSI_FLAG;
736			if ((BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) ||
737				(BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716))
738				sc->bce_flags |= BCE_ONE_SHOT_MSI_FLAG;
739			sc->bce_irq_rid = 1;
740			sc->bce_intr = bce_intr;
741		}
742	}
743
744	/* Try allocating a legacy interrupt. */
745	if (sc->bce_msi_count == 0) {
746		DBPRINT(sc, BCE_INFO_LOAD, "%s(): Using INTx interrupt.\n",
747			__FUNCTION__);
748		rid = 0;
749		sc->bce_intr = bce_intr;
750	}
751
752	sc->bce_res_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ,
753		&rid, RF_SHAREABLE | RF_ACTIVE);
754
755	sc->bce_irq_rid = rid;
756
757	/* Report any IRQ allocation errors. */
758	if (sc->bce_res_irq == NULL) {
759		BCE_PRINTF("%s(%d): PCI map interrupt failed!\n",
760			__FILE__, __LINE__);
761		rc = ENXIO;
762		goto bce_attach_fail;
763	}
764
765	/* Initialize mutex for the current device instance. */
766	BCE_LOCK_INIT(sc, device_get_nameunit(dev));
767
768	/*
769	 * Configure byte swap and enable indirect register access.
770	 * Rely on CPU to do target byte swapping on big endian systems.
771	 * Access to registers outside of PCI configurtion space are not
772	 * valid until this is done.
773	 */
774	pci_write_config(dev, BCE_PCICFG_MISC_CONFIG,
775			       BCE_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
776			       BCE_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP, 4);
777
778	/* Save ASIC revsion info. */
779	sc->bce_chipid =  REG_RD(sc, BCE_MISC_ID);
780
781	/* Weed out any non-production controller revisions. */
782	switch(BCE_CHIP_ID(sc)) {
783		case BCE_CHIP_ID_5706_A0:
784		case BCE_CHIP_ID_5706_A1:
785		case BCE_CHIP_ID_5708_A0:
786		case BCE_CHIP_ID_5708_B0:
787		case BCE_CHIP_ID_5709_A0:
788		case BCE_CHIP_ID_5709_B0:
789		case BCE_CHIP_ID_5709_B1:
790		case BCE_CHIP_ID_5709_B2:
791			BCE_PRINTF("%s(%d): Unsupported controller revision (%c%d)!\n",
792				__FILE__, __LINE__,
793				(((pci_read_config(dev, PCIR_REVID, 4) & 0xf0) >> 4) + 'A'),
794			    (pci_read_config(dev, PCIR_REVID, 4) & 0xf));
795			rc = ENODEV;
796			goto bce_attach_fail;
797	}
798
799	/*
800	 * The embedded PCIe to PCI-X bridge (EPB)
801	 * in the 5708 cannot address memory above
802	 * 40 bits (E7_5708CB1_23043 & E6_5708SB1_23043).
803	 */
804	if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5708)
805		sc->max_bus_addr = BCE_BUS_SPACE_MAXADDR;
806	else
807		sc->max_bus_addr = BUS_SPACE_MAXADDR;
808
809	/*
810	 * Find the base address for shared memory access.
811	 * Newer versions of bootcode use a signature and offset
812	 * while older versions use a fixed address.
813	 */
814	val = REG_RD_IND(sc, BCE_SHM_HDR_SIGNATURE);
815	if ((val & BCE_SHM_HDR_SIGNATURE_SIG_MASK) == BCE_SHM_HDR_SIGNATURE_SIG)
816		/* Multi-port devices use different offsets in shared memory. */
817		sc->bce_shmem_base = REG_RD_IND(sc, BCE_SHM_HDR_ADDR_0 +
818			(pci_get_function(sc->bce_dev) << 2));
819	else
820		sc->bce_shmem_base = HOST_VIEW_SHMEM_BASE;
821
822	DBPRINT(sc, BCE_VERBOSE_FIRMWARE, "%s(): bce_shmem_base = 0x%08X\n",
823		__FUNCTION__, sc->bce_shmem_base);
824
825	/* Fetch the bootcode revision. */
826	sc->bce_fw_ver = REG_RD_IND(sc, sc->bce_shmem_base +
827		BCE_DEV_INFO_BC_REV);
828
829	/* Check if any management firmware is running. */
830	val = REG_RD_IND(sc, sc->bce_shmem_base + BCE_PORT_FEATURE);
831	if (val & (BCE_PORT_FEATURE_ASF_ENABLED | BCE_PORT_FEATURE_IMD_ENABLED))
832		sc->bce_flags |= BCE_MFW_ENABLE_FLAG;
833
834	/* Get PCI bus information (speed and type). */
835	val = REG_RD(sc, BCE_PCICFG_MISC_STATUS);
836	if (val & BCE_PCICFG_MISC_STATUS_PCIX_DET) {
837		u32 clkreg;
838
839		sc->bce_flags |= BCE_PCIX_FLAG;
840
841		clkreg = REG_RD(sc, BCE_PCICFG_PCI_CLOCK_CONTROL_BITS);
842
843		clkreg &= BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET;
844		switch (clkreg) {
845		case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ:
846			sc->bus_speed_mhz = 133;
847			break;
848
849		case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ:
850			sc->bus_speed_mhz = 100;
851			break;
852
853		case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ:
854		case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ:
855			sc->bus_speed_mhz = 66;
856			break;
857
858		case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ:
859		case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ:
860			sc->bus_speed_mhz = 50;
861			break;
862
863		case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW:
864		case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ:
865		case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ:
866			sc->bus_speed_mhz = 33;
867			break;
868		}
869	} else {
870		if (val & BCE_PCICFG_MISC_STATUS_M66EN)
871			sc->bus_speed_mhz = 66;
872		else
873			sc->bus_speed_mhz = 33;
874	}
875
876	if (val & BCE_PCICFG_MISC_STATUS_32BIT_DET)
877		sc->bce_flags |= BCE_PCI_32BIT_FLAG;
878
879	/* Reset the controller and announce to bootcode that driver is present. */
880	if (bce_reset(sc, BCE_DRV_MSG_CODE_RESET)) {
881		BCE_PRINTF("%s(%d): Controller reset failed!\n",
882			__FILE__, __LINE__);
883		rc = ENXIO;
884		goto bce_attach_fail;
885	}
886
887	/* Initialize the controller. */
888	if (bce_chipinit(sc)) {
889		BCE_PRINTF("%s(%d): Controller initialization failed!\n",
890			__FILE__, __LINE__);
891		rc = ENXIO;
892		goto bce_attach_fail;
893	}
894
895	/* Perform NVRAM test. */
896	if (bce_nvram_test(sc)) {
897		BCE_PRINTF("%s(%d): NVRAM test failed!\n",
898			__FILE__, __LINE__);
899		rc = ENXIO;
900		goto bce_attach_fail;
901	}
902
903	/* Fetch the permanent Ethernet MAC address. */
904	bce_get_mac_addr(sc);
905
906	/*
907	 * Trip points control how many BDs
908	 * should be ready before generating an
909	 * interrupt while ticks control how long
910	 * a BD can sit in the chain before
911	 * generating an interrupt.  Set the default
912	 * values for the RX and TX chains.
913	 */
914
915#ifdef BCE_DEBUG
916	/* Force more frequent interrupts. */
917	sc->bce_tx_quick_cons_trip_int = 1;
918	sc->bce_tx_quick_cons_trip     = 1;
919	sc->bce_tx_ticks_int           = 0;
920	sc->bce_tx_ticks               = 0;
921
922	sc->bce_rx_quick_cons_trip_int = 1;
923	sc->bce_rx_quick_cons_trip     = 1;
924	sc->bce_rx_ticks_int           = 0;
925	sc->bce_rx_ticks               = 0;
926#else
927	/* Improve throughput at the expense of increased latency. */
928	sc->bce_tx_quick_cons_trip_int = 20;
929	sc->bce_tx_quick_cons_trip     = 20;
930	sc->bce_tx_ticks_int           = 80;
931	sc->bce_tx_ticks               = 80;
932
933	sc->bce_rx_quick_cons_trip_int = 6;
934	sc->bce_rx_quick_cons_trip     = 6;
935	sc->bce_rx_ticks_int           = 18;
936	sc->bce_rx_ticks               = 18;
937#endif
938
939	/* Update statistics once every second. */
940	sc->bce_stats_ticks = 1000000 & 0xffff00;
941
942	/* Find the media type for the adapter. */
943	bce_get_media(sc);
944
945	/* Store data needed by PHY driver for backplane applications */
946	sc->bce_shared_hw_cfg = REG_RD_IND(sc, sc->bce_shmem_base +
947		BCE_SHARED_HW_CFG_CONFIG);
948	sc->bce_port_hw_cfg   = REG_RD_IND(sc, sc->bce_shmem_base +
949		BCE_PORT_HW_CFG_CONFIG);
950
951	/* Allocate DMA memory resources. */
952	if (bce_dma_alloc(dev)) {
953		BCE_PRINTF("%s(%d): DMA resource allocation failed!\n",
954		    __FILE__, __LINE__);
955		rc = ENXIO;
956		goto bce_attach_fail;
957	}
958
959	/* Allocate an ifnet structure. */
960	ifp = sc->bce_ifp = if_alloc(IFT_ETHER);
961	if (ifp == NULL) {
962		BCE_PRINTF("%s(%d): Interface allocation failed!\n",
963			__FILE__, __LINE__);
964		rc = ENXIO;
965		goto bce_attach_fail;
966	}
967
968	/* Initialize the ifnet interface. */
969	ifp->if_softc        = sc;
970	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
971	ifp->if_flags        = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
972	ifp->if_ioctl        = bce_ioctl;
973	ifp->if_start        = bce_start;
974	ifp->if_init         = bce_init;
975	ifp->if_mtu          = ETHERMTU;
976
977	if (bce_tso_enable) {
978		ifp->if_hwassist = BCE_IF_HWASSIST | CSUM_TSO;
979		ifp->if_capabilities = BCE_IF_CAPABILITIES | IFCAP_TSO4;
980	} else {
981		ifp->if_hwassist = BCE_IF_HWASSIST;
982		ifp->if_capabilities = BCE_IF_CAPABILITIES;
983	}
984
985	ifp->if_capenable    = ifp->if_capabilities;
986
987	/*
988	 * Assume standard mbuf sizes for buffer allocation.
989	 * This may change later if the MTU size is set to
990	 * something other than 1500.
991	 */
992#ifdef BCE_USE_SPLIT_HEADER
993	sc->rx_bd_mbuf_alloc_size = MHLEN;
994	/* Make sure offset is 16 byte aligned for hardware. */
995	sc->rx_bd_mbuf_align_pad  = roundup2((MSIZE - MHLEN), 16) -
996		(MSIZE - MHLEN);
997	sc->rx_bd_mbuf_data_len   = sc->rx_bd_mbuf_alloc_size -
998		sc->rx_bd_mbuf_align_pad;
999	sc->pg_bd_mbuf_alloc_size = MCLBYTES;
1000#else
1001	sc->rx_bd_mbuf_alloc_size = MCLBYTES;
1002	sc->rx_bd_mbuf_align_pad  = roundup2(MCLBYTES, 16) - MCLBYTES;
1003	sc->rx_bd_mbuf_data_len   = sc->rx_bd_mbuf_alloc_size -
1004		sc->rx_bd_mbuf_align_pad;
1005#endif
1006
1007	ifp->if_snd.ifq_drv_maxlen = USABLE_TX_BD;
1008	IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen);
1009	IFQ_SET_READY(&ifp->if_snd);
1010
1011	if (sc->bce_phy_flags & BCE_PHY_2_5G_CAPABLE_FLAG)
1012		ifp->if_baudrate = IF_Mbps(2500ULL);
1013	else
1014		ifp->if_baudrate = IF_Mbps(1000);
1015
1016	/* Check for an MII child bus by probing the PHY. */
1017	if (mii_phy_probe(dev, &sc->bce_miibus, bce_ifmedia_upd,
1018		bce_ifmedia_sts)) {
1019		BCE_PRINTF("%s(%d): No PHY found on child MII bus!\n",
1020			__FILE__, __LINE__);
1021		rc = ENXIO;
1022		goto bce_attach_fail;
1023	}
1024
1025	/* Attach to the Ethernet interface list. */
1026	ether_ifattach(ifp, sc->eaddr);
1027
1028#if __FreeBSD_version < 500000
1029	callout_init(&sc->bce_tick_callout);
1030	callout_init(&sc->bce_pulse_callout);
1031#else
1032	callout_init_mtx(&sc->bce_tick_callout, &sc->bce_mtx, 0);
1033	callout_init_mtx(&sc->bce_pulse_callout, &sc->bce_mtx, 0);
1034#endif
1035
1036	/* Hookup IRQ last. */
1037	rc = bus_setup_intr(dev, sc->bce_res_irq, INTR_TYPE_NET | INTR_MPSAFE,
1038		NULL, bce_intr, sc, &sc->bce_intrhand);
1039
1040	if (rc) {
1041		BCE_PRINTF("%s(%d): Failed to setup IRQ!\n",
1042			__FILE__, __LINE__);
1043		bce_detach(dev);
1044		goto bce_attach_exit;
1045	}
1046
1047	/*
1048	 * At this point we've acquired all the resources
1049	 * we need to run so there's no turning back, we're
1050	 * cleared for launch.
1051	 */
1052
1053	/* Print some important debugging info. */
1054	DBRUNMSG(BCE_INFO, bce_dump_driver_state(sc));
1055
1056	/* Add the supported sysctls to the kernel. */
1057	bce_add_sysctls(sc);
1058
1059	BCE_LOCK(sc);
1060
1061	/*
1062	 * The chip reset earlier notified the bootcode that
1063	 * a driver is present.  We now need to start our pulse
1064	 * routine so that the bootcode is reminded that we're
1065	 * still running.
1066	 */
1067	bce_pulse(sc);
1068
1069	bce_mgmt_init_locked(sc);
1070	BCE_UNLOCK(sc);
1071
1072	/* Finally, print some useful adapter info */
1073	bce_print_adapter_info(sc);
1074	DBPRINT(sc, BCE_FATAL, "%s(): sc = %p\n",
1075		__FUNCTION__, sc);
1076
1077	goto bce_attach_exit;
1078
1079bce_attach_fail:
1080	bce_release_resources(sc);
1081
1082bce_attach_exit:
1083
1084	DBEXIT(BCE_VERBOSE_LOAD | BCE_VERBOSE_RESET);
1085
1086	return(rc);
1087}
1088
1089
1090/****************************************************************************/
1091/* Device detach function.                                                  */
1092/*                                                                          */
1093/* Stops the controller, resets the controller, and releases resources.     */
1094/*                                                                          */
1095/* Returns:                                                                 */
1096/*   0 on success, positive value on failure.                               */
1097/****************************************************************************/
1098static int
1099bce_detach(device_t dev)
1100{
1101	struct bce_softc *sc = device_get_softc(dev);
1102	struct ifnet *ifp;
1103	u32 msg;
1104
1105	DBENTER(BCE_VERBOSE_UNLOAD | BCE_VERBOSE_RESET);
1106
1107	ifp = sc->bce_ifp;
1108
1109	/* Stop and reset the controller. */
1110	BCE_LOCK(sc);
1111
1112	/* Stop the pulse so the bootcode can go to driver absent state. */
1113	callout_stop(&sc->bce_pulse_callout);
1114
1115	bce_stop(sc);
1116	if (sc->bce_flags & BCE_NO_WOL_FLAG)
1117		msg = BCE_DRV_MSG_CODE_UNLOAD_LNK_DN;
1118	else
1119		msg = BCE_DRV_MSG_CODE_UNLOAD;
1120	bce_reset(sc, msg);
1121
1122	BCE_UNLOCK(sc);
1123
1124	ether_ifdetach(ifp);
1125
1126	/* If we have a child device on the MII bus remove it too. */
1127	bus_generic_detach(dev);
1128	device_delete_child(dev, sc->bce_miibus);
1129
1130	/* Release all remaining resources. */
1131	bce_release_resources(sc);
1132
1133	DBEXIT(BCE_VERBOSE_UNLOAD | BCE_VERBOSE_RESET);
1134
1135	return(0);
1136}
1137
1138
1139/****************************************************************************/
1140/* Device shutdown function.                                                */
1141/*                                                                          */
1142/* Stops and resets the controller.                                         */
1143/*                                                                          */
1144/* Returns:                                                                 */
1145/*   0 on success, positive value on failure.                               */
1146/****************************************************************************/
1147static int
1148bce_shutdown(device_t dev)
1149{
1150	struct bce_softc *sc = device_get_softc(dev);
1151	u32 msg;
1152
1153	DBENTER(BCE_VERBOSE);
1154
1155	BCE_LOCK(sc);
1156	bce_stop(sc);
1157	if (sc->bce_flags & BCE_NO_WOL_FLAG)
1158		msg = BCE_DRV_MSG_CODE_UNLOAD_LNK_DN;
1159	else
1160		msg = BCE_DRV_MSG_CODE_UNLOAD;
1161	bce_reset(sc, msg);
1162	BCE_UNLOCK(sc);
1163
1164	DBEXIT(BCE_VERBOSE);
1165
1166	return (0);
1167}
1168
1169
1170#ifdef BCE_DEBUG
1171/****************************************************************************/
1172/* Register read.                                                           */
1173/*                                                                          */
1174/* Returns:                                                                 */
1175/*   The value of the register.                                             */
1176/****************************************************************************/
1177static u32
1178bce_reg_rd(struct bce_softc *sc, u32 offset)
1179{
1180	u32 val = bus_space_read_4(sc->bce_btag, sc->bce_bhandle, offset);
1181	DBPRINT(sc, BCE_INSANE_REG, "%s(); offset = 0x%08X, val = 0x%08X\n",
1182		__FUNCTION__, offset, val);
1183	return val;
1184}
1185
1186
1187/****************************************************************************/
1188/* Register write (16 bit).                                                 */
1189/*                                                                          */
1190/* Returns:                                                                 */
1191/*   Nothing.                                                               */
1192/****************************************************************************/
1193static void
1194bce_reg_wr16(struct bce_softc *sc, u32 offset, u16 val)
1195{
1196	DBPRINT(sc, BCE_INSANE_REG, "%s(); offset = 0x%08X, val = 0x%04X\n",
1197		__FUNCTION__, offset, val);
1198	bus_space_write_2(sc->bce_btag, sc->bce_bhandle, offset, val);
1199}
1200
1201
1202/****************************************************************************/
1203/* Register write.                                                          */
1204/*                                                                          */
1205/* Returns:                                                                 */
1206/*   Nothing.                                                               */
1207/****************************************************************************/
1208static void
1209bce_reg_wr(struct bce_softc *sc, u32 offset, u32 val)
1210{
1211	DBPRINT(sc, BCE_INSANE_REG, "%s(); offset = 0x%08X, val = 0x%08X\n",
1212		__FUNCTION__, offset, val);
1213	bus_space_write_4(sc->bce_btag, sc->bce_bhandle, offset, val);
1214}
1215#endif
1216
1217/****************************************************************************/
1218/* Indirect register read.                                                  */
1219/*                                                                          */
1220/* Reads NetXtreme II registers using an index/data register pair in PCI    */
1221/* configuration space.  Using this mechanism avoids issues with posted     */
1222/* reads but is much slower than memory-mapped I/O.                         */
1223/*                                                                          */
1224/* Returns:                                                                 */
1225/*   The value of the register.                                             */
1226/****************************************************************************/
1227static u32
1228bce_reg_rd_ind(struct bce_softc *sc, u32 offset)
1229{
1230	device_t dev;
1231	dev = sc->bce_dev;
1232
1233	pci_write_config(dev, BCE_PCICFG_REG_WINDOW_ADDRESS, offset, 4);
1234#ifdef BCE_DEBUG
1235	{
1236		u32 val;
1237		val = pci_read_config(dev, BCE_PCICFG_REG_WINDOW, 4);
1238		DBPRINT(sc, BCE_INSANE_REG, "%s(); offset = 0x%08X, val = 0x%08X\n",
1239			__FUNCTION__, offset, val);
1240		return val;
1241	}
1242#else
1243	return pci_read_config(dev, BCE_PCICFG_REG_WINDOW, 4);
1244#endif
1245}
1246
1247
1248/****************************************************************************/
1249/* Indirect register write.                                                 */
1250/*                                                                          */
1251/* Writes NetXtreme II registers using an index/data register pair in PCI   */
1252/* configuration space.  Using this mechanism avoids issues with posted     */
1253/* writes but is muchh slower than memory-mapped I/O.                       */
1254/*                                                                          */
1255/* Returns:                                                                 */
1256/*   Nothing.                                                               */
1257/****************************************************************************/
1258static void
1259bce_reg_wr_ind(struct bce_softc *sc, u32 offset, u32 val)
1260{
1261	device_t dev;
1262	dev = sc->bce_dev;
1263
1264	DBPRINT(sc, BCE_INSANE_REG, "%s(); offset = 0x%08X, val = 0x%08X\n",
1265		__FUNCTION__, offset, val);
1266
1267	pci_write_config(dev, BCE_PCICFG_REG_WINDOW_ADDRESS, offset, 4);
1268	pci_write_config(dev, BCE_PCICFG_REG_WINDOW, val, 4);
1269}
1270
1271
1272#ifdef BCE_DEBUG
1273/****************************************************************************/
1274/* Context memory read.                                                     */
1275/*                                                                          */
1276/* The NetXtreme II controller uses context memory to track connection      */
1277/* information for L2 and higher network protocols.                         */
1278/*                                                                          */
1279/* Returns:                                                                 */
1280/*   The requested 32 bit value of context memory.                          */
1281/****************************************************************************/
1282static u32
1283bce_ctx_rd(struct bce_softc *sc, u32 cid_addr, u32 ctx_offset)
1284{
1285	u32 idx, offset, retry_cnt = 5, val;
1286
1287	DBRUNIF((cid_addr > MAX_CID_ADDR || ctx_offset & 0x3 || cid_addr & CTX_MASK),
1288		BCE_PRINTF("%s(): Invalid CID address: 0x%08X.\n",
1289			__FUNCTION__, cid_addr));
1290
1291	offset = ctx_offset + cid_addr;
1292
1293	if ((BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) ||
1294		(BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716)) {
1295
1296		REG_WR(sc, BCE_CTX_CTX_CTRL, (offset | BCE_CTX_CTX_CTRL_READ_REQ));
1297
1298		for (idx = 0; idx < retry_cnt; idx++) {
1299			val = REG_RD(sc, BCE_CTX_CTX_CTRL);
1300			if ((val & BCE_CTX_CTX_CTRL_READ_REQ) == 0)
1301				break;
1302			DELAY(5);
1303		}
1304
1305		if (val & BCE_CTX_CTX_CTRL_READ_REQ)
1306			BCE_PRINTF("%s(%d); Unable to read CTX memory: "
1307				"cid_addr = 0x%08X, offset = 0x%08X!\n",
1308				__FILE__, __LINE__, cid_addr, ctx_offset);
1309
1310		val = REG_RD(sc, BCE_CTX_CTX_DATA);
1311	} else {
1312		REG_WR(sc, BCE_CTX_DATA_ADR, offset);
1313		val = REG_RD(sc, BCE_CTX_DATA);
1314	}
1315
1316	DBPRINT(sc, BCE_EXTREME_CTX, "%s(); cid_addr = 0x%08X, offset = 0x%08X, "
1317		"val = 0x%08X\n", __FUNCTION__, cid_addr, ctx_offset, val);
1318
1319	return(val);
1320}
1321#endif
1322
1323
1324/****************************************************************************/
1325/* Context memory write.                                                    */
1326/*                                                                          */
1327/* The NetXtreme II controller uses context memory to track connection      */
1328/* information for L2 and higher network protocols.                         */
1329/*                                                                          */
1330/* Returns:                                                                 */
1331/*   Nothing.                                                               */
1332/****************************************************************************/
1333static void
1334bce_ctx_wr(struct bce_softc *sc, u32 cid_addr, u32 ctx_offset, u32 ctx_val)
1335{
1336	u32 idx, offset = ctx_offset + cid_addr;
1337	u32 val, retry_cnt = 5;
1338
1339	DBPRINT(sc, BCE_EXTREME_CTX, "%s(); cid_addr = 0x%08X, offset = 0x%08X, "
1340		"val = 0x%08X\n", __FUNCTION__, cid_addr, ctx_offset, ctx_val);
1341
1342	DBRUNIF((cid_addr > MAX_CID_ADDR || ctx_offset & 0x3 || cid_addr & CTX_MASK),
1343		BCE_PRINTF("%s(): Invalid CID address: 0x%08X.\n",
1344			__FUNCTION__, cid_addr));
1345
1346	if ((BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) ||
1347		(BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716)) {
1348
1349		REG_WR(sc, BCE_CTX_CTX_DATA, ctx_val);
1350		REG_WR(sc, BCE_CTX_CTX_CTRL, (offset | BCE_CTX_CTX_CTRL_WRITE_REQ));
1351
1352		for (idx = 0; idx < retry_cnt; idx++) {
1353			val = REG_RD(sc, BCE_CTX_CTX_CTRL);
1354			if ((val & BCE_CTX_CTX_CTRL_WRITE_REQ) == 0)
1355				break;
1356			DELAY(5);
1357		}
1358
1359		if (val & BCE_CTX_CTX_CTRL_WRITE_REQ)
1360			BCE_PRINTF("%s(%d); Unable to write CTX memory: "
1361				"cid_addr = 0x%08X, offset = 0x%08X!\n",
1362				__FILE__, __LINE__, cid_addr, ctx_offset);
1363
1364	} else {
1365		REG_WR(sc, BCE_CTX_DATA_ADR, offset);
1366		REG_WR(sc, BCE_CTX_DATA, ctx_val);
1367	}
1368}
1369
1370
1371/****************************************************************************/
1372/* PHY register read.                                                       */
1373/*                                                                          */
1374/* Implements register reads on the MII bus.                                */
1375/*                                                                          */
1376/* Returns:                                                                 */
1377/*   The value of the register.                                             */
1378/****************************************************************************/
1379static int
1380bce_miibus_read_reg(device_t dev, int phy, int reg)
1381{
1382	struct bce_softc *sc;
1383	u32 val;
1384	int i;
1385
1386	sc = device_get_softc(dev);
1387
1388	/* Make sure we are accessing the correct PHY address. */
1389	if (phy != sc->bce_phy_addr) {
1390		DBPRINT(sc, BCE_INSANE_PHY, "Invalid PHY address %d for PHY read!\n", phy);
1391		return(0);
1392	}
1393
1394	if (sc->bce_phy_flags & BCE_PHY_INT_MODE_AUTO_POLLING_FLAG) {
1395		val = REG_RD(sc, BCE_EMAC_MDIO_MODE);
1396		val &= ~BCE_EMAC_MDIO_MODE_AUTO_POLL;
1397
1398		REG_WR(sc, BCE_EMAC_MDIO_MODE, val);
1399		REG_RD(sc, BCE_EMAC_MDIO_MODE);
1400
1401		DELAY(40);
1402	}
1403
1404
1405	val = BCE_MIPHY(phy) | BCE_MIREG(reg) |
1406		BCE_EMAC_MDIO_COMM_COMMAND_READ | BCE_EMAC_MDIO_COMM_DISEXT |
1407		BCE_EMAC_MDIO_COMM_START_BUSY;
1408	REG_WR(sc, BCE_EMAC_MDIO_COMM, val);
1409
1410	for (i = 0; i < BCE_PHY_TIMEOUT; i++) {
1411		DELAY(10);
1412
1413		val = REG_RD(sc, BCE_EMAC_MDIO_COMM);
1414		if (!(val & BCE_EMAC_MDIO_COMM_START_BUSY)) {
1415			DELAY(5);
1416
1417			val = REG_RD(sc, BCE_EMAC_MDIO_COMM);
1418			val &= BCE_EMAC_MDIO_COMM_DATA;
1419
1420			break;
1421		}
1422	}
1423
1424	if (val & BCE_EMAC_MDIO_COMM_START_BUSY) {
1425		BCE_PRINTF("%s(%d): Error: PHY read timeout! phy = %d, reg = 0x%04X\n",
1426			__FILE__, __LINE__, phy, reg);
1427		val = 0x0;
1428	} else {
1429		val = REG_RD(sc, BCE_EMAC_MDIO_COMM);
1430	}
1431
1432
1433	if (sc->bce_phy_flags & BCE_PHY_INT_MODE_AUTO_POLLING_FLAG) {
1434		val = REG_RD(sc, BCE_EMAC_MDIO_MODE);
1435		val |= BCE_EMAC_MDIO_MODE_AUTO_POLL;
1436
1437		REG_WR(sc, BCE_EMAC_MDIO_MODE, val);
1438		REG_RD(sc, BCE_EMAC_MDIO_MODE);
1439
1440		DELAY(40);
1441	}
1442
1443	DB_PRINT_PHY_REG(reg, val);
1444	return (val & 0xffff);
1445
1446}
1447
1448
1449/****************************************************************************/
1450/* PHY register write.                                                      */
1451/*                                                                          */
1452/* Implements register writes on the MII bus.                               */
1453/*                                                                          */
1454/* Returns:                                                                 */
1455/*   The value of the register.                                             */
1456/****************************************************************************/
1457static int
1458bce_miibus_write_reg(device_t dev, int phy, int reg, int val)
1459{
1460	struct bce_softc *sc;
1461	u32 val1;
1462	int i;
1463
1464	sc = device_get_softc(dev);
1465
1466	/* Make sure we are accessing the correct PHY address. */
1467	if (phy != sc->bce_phy_addr) {
1468		DBPRINT(sc, BCE_INSANE_PHY, "Invalid PHY address %d for PHY write!\n", phy);
1469		return(0);
1470	}
1471
1472	DB_PRINT_PHY_REG(reg, val);
1473
1474	if (sc->bce_phy_flags & BCE_PHY_INT_MODE_AUTO_POLLING_FLAG) {
1475		val1 = REG_RD(sc, BCE_EMAC_MDIO_MODE);
1476		val1 &= ~BCE_EMAC_MDIO_MODE_AUTO_POLL;
1477
1478		REG_WR(sc, BCE_EMAC_MDIO_MODE, val1);
1479		REG_RD(sc, BCE_EMAC_MDIO_MODE);
1480
1481		DELAY(40);
1482	}
1483
1484	val1 = BCE_MIPHY(phy) | BCE_MIREG(reg) | val |
1485		BCE_EMAC_MDIO_COMM_COMMAND_WRITE |
1486		BCE_EMAC_MDIO_COMM_START_BUSY | BCE_EMAC_MDIO_COMM_DISEXT;
1487	REG_WR(sc, BCE_EMAC_MDIO_COMM, val1);
1488
1489	for (i = 0; i < BCE_PHY_TIMEOUT; i++) {
1490		DELAY(10);
1491
1492		val1 = REG_RD(sc, BCE_EMAC_MDIO_COMM);
1493		if (!(val1 & BCE_EMAC_MDIO_COMM_START_BUSY)) {
1494			DELAY(5);
1495			break;
1496		}
1497	}
1498
1499	if (val1 & BCE_EMAC_MDIO_COMM_START_BUSY)
1500		BCE_PRINTF("%s(%d): PHY write timeout!\n",
1501			__FILE__, __LINE__);
1502
1503	if (sc->bce_phy_flags & BCE_PHY_INT_MODE_AUTO_POLLING_FLAG) {
1504		val1 = REG_RD(sc, BCE_EMAC_MDIO_MODE);
1505		val1 |= BCE_EMAC_MDIO_MODE_AUTO_POLL;
1506
1507		REG_WR(sc, BCE_EMAC_MDIO_MODE, val1);
1508		REG_RD(sc, BCE_EMAC_MDIO_MODE);
1509
1510		DELAY(40);
1511	}
1512
1513	return 0;
1514}
1515
1516
1517/****************************************************************************/
1518/* MII bus status change.                                                   */
1519/*                                                                          */
1520/* Called by the MII bus driver when the PHY establishes link to set the    */
1521/* MAC interface registers.                                                 */
1522/*                                                                          */
1523/* Returns:                                                                 */
1524/*   Nothing.                                                               */
1525/****************************************************************************/
1526static void
1527bce_miibus_statchg(device_t dev)
1528{
1529	struct bce_softc *sc;
1530	struct mii_data *mii;
1531	int val;
1532
1533	sc = device_get_softc(dev);
1534
1535	DBENTER(BCE_VERBOSE_PHY);
1536
1537	mii = device_get_softc(sc->bce_miibus);
1538
1539	val = REG_RD(sc, BCE_EMAC_MODE);
1540	val &= ~(BCE_EMAC_MODE_PORT | BCE_EMAC_MODE_HALF_DUPLEX |
1541		BCE_EMAC_MODE_MAC_LOOP | BCE_EMAC_MODE_FORCE_LINK |
1542		BCE_EMAC_MODE_25G);
1543
1544	/* Set MII or GMII interface based on the speed negotiated by the PHY. */
1545	switch (IFM_SUBTYPE(mii->mii_media_active)) {
1546	case IFM_10_T:
1547		if (BCE_CHIP_NUM(sc) != BCE_CHIP_NUM_5706) {
1548			DBPRINT(sc, BCE_INFO, "Enabling 10Mb interface.\n");
1549			val |= BCE_EMAC_MODE_PORT_MII_10;
1550			break;
1551		}
1552		/* fall-through */
1553	case IFM_100_TX:
1554		DBPRINT(sc, BCE_INFO, "Enabling MII interface.\n");
1555		val |= BCE_EMAC_MODE_PORT_MII;
1556		break;
1557	case IFM_2500_SX:
1558		DBPRINT(sc, BCE_INFO, "Enabling 2.5G MAC mode.\n");
1559		val |= BCE_EMAC_MODE_25G;
1560		/* fall-through */
1561	case IFM_1000_T:
1562	case IFM_1000_SX:
1563		DBPRINT(sc, BCE_INFO, "Enabling GMII interface.\n");
1564		val |= BCE_EMAC_MODE_PORT_GMII;
1565		break;
1566	default:
1567		DBPRINT(sc, BCE_INFO, "Unknown speed, enabling default GMII "
1568			"interface.\n");
1569		val |= BCE_EMAC_MODE_PORT_GMII;
1570	}
1571
1572	/* Set half or full duplex based on the duplicity negotiated by the PHY. */
1573	if ((mii->mii_media_active & IFM_GMASK) == IFM_HDX) {
1574		DBPRINT(sc, BCE_INFO, "Setting Half-Duplex interface.\n");
1575		val |= BCE_EMAC_MODE_HALF_DUPLEX;
1576	} else
1577		DBPRINT(sc, BCE_INFO, "Setting Full-Duplex interface.\n");
1578
1579	REG_WR(sc, BCE_EMAC_MODE, val);
1580
1581#if 0
1582	/* ToDo: Enable flow control support in brgphy and bge. */
1583	/* FLAG0 is set if RX is enabled and FLAG1 if TX is enabled */
1584	if (mii->mii_media_active & IFM_FLAG0)
1585		BCE_SETBIT(sc, BCE_EMAC_RX_MODE, BCE_EMAC_RX_MODE_FLOW_EN);
1586	if (mii->mii_media_active & IFM_FLAG1)
1587		BCE_SETBIT(sc, BCE_EMAC_RX_MODE, BCE_EMAC_TX_MODE_FLOW_EN);
1588#endif
1589
1590	DBEXIT(BCE_VERBOSE_PHY);
1591}
1592
1593
1594/****************************************************************************/
1595/* Acquire NVRAM lock.                                                      */
1596/*                                                                          */
1597/* Before the NVRAM can be accessed the caller must acquire an NVRAM lock.  */
1598/* Locks 0 and 2 are reserved, lock 1 is used by firmware and lock 2 is     */
1599/* for use by the driver.                                                   */
1600/*                                                                          */
1601/* Returns:                                                                 */
1602/*   0 on success, positive value on failure.                               */
1603/****************************************************************************/
1604static int
1605bce_acquire_nvram_lock(struct bce_softc *sc)
1606{
1607	u32 val;
1608	int j, rc = 0;
1609
1610	DBENTER(BCE_VERBOSE_NVRAM);
1611
1612	/* Request access to the flash interface. */
1613	REG_WR(sc, BCE_NVM_SW_ARB, BCE_NVM_SW_ARB_ARB_REQ_SET2);
1614	for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
1615		val = REG_RD(sc, BCE_NVM_SW_ARB);
1616		if (val & BCE_NVM_SW_ARB_ARB_ARB2)
1617			break;
1618
1619		DELAY(5);
1620	}
1621
1622	if (j >= NVRAM_TIMEOUT_COUNT) {
1623		DBPRINT(sc, BCE_WARN, "Timeout acquiring NVRAM lock!\n");
1624		rc = EBUSY;
1625	}
1626
1627	DBEXIT(BCE_VERBOSE_NVRAM);
1628	return (rc);
1629}
1630
1631
1632/****************************************************************************/
1633/* Release NVRAM lock.                                                      */
1634/*                                                                          */
1635/* When the caller is finished accessing NVRAM the lock must be released.   */
1636/* Locks 0 and 2 are reserved, lock 1 is used by firmware and lock 2 is     */
1637/* for use by the driver.                                                   */
1638/*                                                                          */
1639/* Returns:                                                                 */
1640/*   0 on success, positive value on failure.                               */
1641/****************************************************************************/
1642static int
1643bce_release_nvram_lock(struct bce_softc *sc)
1644{
1645	u32 val;
1646	int j, rc = 0;
1647
1648	DBENTER(BCE_VERBOSE_NVRAM);
1649
1650	/*
1651	 * Relinquish nvram interface.
1652	 */
1653	REG_WR(sc, BCE_NVM_SW_ARB, BCE_NVM_SW_ARB_ARB_REQ_CLR2);
1654
1655	for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
1656		val = REG_RD(sc, BCE_NVM_SW_ARB);
1657		if (!(val & BCE_NVM_SW_ARB_ARB_ARB2))
1658			break;
1659
1660		DELAY(5);
1661	}
1662
1663	if (j >= NVRAM_TIMEOUT_COUNT) {
1664		DBPRINT(sc, BCE_WARN, "Timeout releasing NVRAM lock!\n");
1665		rc = EBUSY;
1666	}
1667
1668	DBEXIT(BCE_VERBOSE_NVRAM);
1669	return (rc);
1670}
1671
1672
1673#ifdef BCE_NVRAM_WRITE_SUPPORT
1674/****************************************************************************/
1675/* Enable NVRAM write access.                                               */
1676/*                                                                          */
1677/* Before writing to NVRAM the caller must enable NVRAM writes.             */
1678/*                                                                          */
1679/* Returns:                                                                 */
1680/*   0 on success, positive value on failure.                               */
1681/****************************************************************************/
1682static int
1683bce_enable_nvram_write(struct bce_softc *sc)
1684{
1685	u32 val;
1686	int rc = 0;
1687
1688	DBENTER(BCE_VERBOSE_NVRAM);
1689
1690	val = REG_RD(sc, BCE_MISC_CFG);
1691	REG_WR(sc, BCE_MISC_CFG, val | BCE_MISC_CFG_NVM_WR_EN_PCI);
1692
1693	if (!(sc->bce_flash_info->flags & BCE_NV_BUFFERED)) {
1694		int j;
1695
1696		REG_WR(sc, BCE_NVM_COMMAND, BCE_NVM_COMMAND_DONE);
1697		REG_WR(sc, BCE_NVM_COMMAND,	BCE_NVM_COMMAND_WREN | BCE_NVM_COMMAND_DOIT);
1698
1699		for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
1700			DELAY(5);
1701
1702			val = REG_RD(sc, BCE_NVM_COMMAND);
1703			if (val & BCE_NVM_COMMAND_DONE)
1704				break;
1705		}
1706
1707		if (j >= NVRAM_TIMEOUT_COUNT) {
1708			DBPRINT(sc, BCE_WARN, "Timeout writing NVRAM!\n");
1709			rc = EBUSY;
1710		}
1711	}
1712
1713	DBENTER(BCE_VERBOSE_NVRAM);
1714	return (rc);
1715}
1716
1717
1718/****************************************************************************/
1719/* Disable NVRAM write access.                                              */
1720/*                                                                          */
1721/* When the caller is finished writing to NVRAM write access must be        */
1722/* disabled.                                                                */
1723/*                                                                          */
1724/* Returns:                                                                 */
1725/*   Nothing.                                                               */
1726/****************************************************************************/
1727static void
1728bce_disable_nvram_write(struct bce_softc *sc)
1729{
1730	u32 val;
1731
1732	DBENTER(BCE_VERBOSE_NVRAM);
1733
1734	val = REG_RD(sc, BCE_MISC_CFG);
1735	REG_WR(sc, BCE_MISC_CFG, val & ~BCE_MISC_CFG_NVM_WR_EN);
1736
1737	DBEXIT(BCE_VERBOSE_NVRAM);
1738
1739}
1740#endif
1741
1742
1743/****************************************************************************/
1744/* Enable NVRAM access.                                                     */
1745/*                                                                          */
1746/* Before accessing NVRAM for read or write operations the caller must      */
1747/* enabled NVRAM access.                                                    */
1748/*                                                                          */
1749/* Returns:                                                                 */
1750/*   Nothing.                                                               */
1751/****************************************************************************/
1752static void
1753bce_enable_nvram_access(struct bce_softc *sc)
1754{
1755	u32 val;
1756
1757	DBENTER(BCE_VERBOSE_NVRAM);
1758
1759	val = REG_RD(sc, BCE_NVM_ACCESS_ENABLE);
1760	/* Enable both bits, even on read. */
1761	REG_WR(sc, BCE_NVM_ACCESS_ENABLE,
1762	       val | BCE_NVM_ACCESS_ENABLE_EN | BCE_NVM_ACCESS_ENABLE_WR_EN);
1763
1764	DBEXIT(BCE_VERBOSE_NVRAM);
1765}
1766
1767
1768/****************************************************************************/
1769/* Disable NVRAM access.                                                    */
1770/*                                                                          */
1771/* When the caller is finished accessing NVRAM access must be disabled.     */
1772/*                                                                          */
1773/* Returns:                                                                 */
1774/*   Nothing.                                                               */
1775/****************************************************************************/
1776static void
1777bce_disable_nvram_access(struct bce_softc *sc)
1778{
1779	u32 val;
1780
1781	DBENTER(BCE_VERBOSE_NVRAM);
1782
1783	val = REG_RD(sc, BCE_NVM_ACCESS_ENABLE);
1784
1785	/* Disable both bits, even after read. */
1786	REG_WR(sc, BCE_NVM_ACCESS_ENABLE,
1787		val & ~(BCE_NVM_ACCESS_ENABLE_EN |
1788			BCE_NVM_ACCESS_ENABLE_WR_EN));
1789
1790	DBEXIT(BCE_VERBOSE_NVRAM);
1791}
1792
1793
1794#ifdef BCE_NVRAM_WRITE_SUPPORT
1795/****************************************************************************/
1796/* Erase NVRAM page before writing.                                         */
1797/*                                                                          */
1798/* Non-buffered flash parts require that a page be erased before it is      */
1799/* written.                                                                 */
1800/*                                                                          */
1801/* Returns:                                                                 */
1802/*   0 on success, positive value on failure.                               */
1803/****************************************************************************/
1804static int
1805bce_nvram_erase_page(struct bce_softc *sc, u32 offset)
1806{
1807	u32 cmd;
1808	int j, rc = 0;
1809
1810	DBENTER(BCE_VERBOSE_NVRAM);
1811
1812	/* Buffered flash doesn't require an erase. */
1813	if (sc->bce_flash_info->flags & BCE_NV_BUFFERED)
1814		goto bce_nvram_erase_page_exit;
1815
1816	/* Build an erase command. */
1817	cmd = BCE_NVM_COMMAND_ERASE | BCE_NVM_COMMAND_WR |
1818	      BCE_NVM_COMMAND_DOIT;
1819
1820	/*
1821	 * Clear the DONE bit separately, set the NVRAM adress to erase,
1822	 * and issue the erase command.
1823	 */
1824	REG_WR(sc, BCE_NVM_COMMAND, BCE_NVM_COMMAND_DONE);
1825	REG_WR(sc, BCE_NVM_ADDR, offset & BCE_NVM_ADDR_NVM_ADDR_VALUE);
1826	REG_WR(sc, BCE_NVM_COMMAND, cmd);
1827
1828	/* Wait for completion. */
1829	for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
1830		u32 val;
1831
1832		DELAY(5);
1833
1834		val = REG_RD(sc, BCE_NVM_COMMAND);
1835		if (val & BCE_NVM_COMMAND_DONE)
1836			break;
1837	}
1838
1839	if (j >= NVRAM_TIMEOUT_COUNT) {
1840		DBPRINT(sc, BCE_WARN, "Timeout erasing NVRAM.\n");
1841		rc = EBUSY;
1842	}
1843
1844bce_nvram_erase_page_exit:
1845	DBEXIT(BCE_VERBOSE_NVRAM);
1846	return (rc);
1847}
1848#endif /* BCE_NVRAM_WRITE_SUPPORT */
1849
1850
1851/****************************************************************************/
1852/* Read a dword (32 bits) from NVRAM.                                       */
1853/*                                                                          */
1854/* Read a 32 bit word from NVRAM.  The caller is assumed to have already    */
1855/* obtained the NVRAM lock and enabled the controller for NVRAM access.     */
1856/*                                                                          */
1857/* Returns:                                                                 */
1858/*   0 on success and the 32 bit value read, positive value on failure.     */
1859/****************************************************************************/
1860static int
1861bce_nvram_read_dword(struct bce_softc *sc, u32 offset, u8 *ret_val,
1862							u32 cmd_flags)
1863{
1864	u32 cmd;
1865	int i, rc = 0;
1866
1867	DBENTER(BCE_EXTREME_NVRAM);
1868
1869	/* Build the command word. */
1870	cmd = BCE_NVM_COMMAND_DOIT | cmd_flags;
1871
1872	/* Calculate the offset for buffered flash if translation is used. */
1873	if (sc->bce_flash_info->flags & BCE_NV_TRANSLATE) {
1874		offset = ((offset / sc->bce_flash_info->page_size) <<
1875			   sc->bce_flash_info->page_bits) +
1876			  (offset % sc->bce_flash_info->page_size);
1877	}
1878
1879	/*
1880	 * Clear the DONE bit separately, set the address to read,
1881	 * and issue the read.
1882	 */
1883	REG_WR(sc, BCE_NVM_COMMAND, BCE_NVM_COMMAND_DONE);
1884	REG_WR(sc, BCE_NVM_ADDR, offset & BCE_NVM_ADDR_NVM_ADDR_VALUE);
1885	REG_WR(sc, BCE_NVM_COMMAND, cmd);
1886
1887	/* Wait for completion. */
1888	for (i = 0; i < NVRAM_TIMEOUT_COUNT; i++) {
1889		u32 val;
1890
1891		DELAY(5);
1892
1893		val = REG_RD(sc, BCE_NVM_COMMAND);
1894		if (val & BCE_NVM_COMMAND_DONE) {
1895			val = REG_RD(sc, BCE_NVM_READ);
1896
1897			val = bce_be32toh(val);
1898			memcpy(ret_val, &val, 4);
1899			break;
1900		}
1901	}
1902
1903	/* Check for errors. */
1904	if (i >= NVRAM_TIMEOUT_COUNT) {
1905		BCE_PRINTF("%s(%d): Timeout error reading NVRAM at offset 0x%08X!\n",
1906			__FILE__, __LINE__, offset);
1907		rc = EBUSY;
1908	}
1909
1910	DBEXIT(BCE_EXTREME_NVRAM);
1911	return(rc);
1912}
1913
1914
1915#ifdef BCE_NVRAM_WRITE_SUPPORT
1916/****************************************************************************/
1917/* Write a dword (32 bits) to NVRAM.                                        */
1918/*                                                                          */
1919/* Write a 32 bit word to NVRAM.  The caller is assumed to have already     */
1920/* obtained the NVRAM lock, enabled the controller for NVRAM access, and    */
1921/* enabled NVRAM write access.                                              */
1922/*                                                                          */
1923/* Returns:                                                                 */
1924/*   0 on success, positive value on failure.                               */
1925/****************************************************************************/
1926static int
1927bce_nvram_write_dword(struct bce_softc *sc, u32 offset, u8 *val,
1928	u32 cmd_flags)
1929{
1930	u32 cmd, val32;
1931	int j, rc = 0;
1932
1933	DBENTER(BCE_VERBOSE_NVRAM);
1934
1935	/* Build the command word. */
1936	cmd = BCE_NVM_COMMAND_DOIT | BCE_NVM_COMMAND_WR | cmd_flags;
1937
1938	/* Calculate the offset for buffered flash if translation is used. */
1939	if (sc->bce_flash_info->flags & BCE_NV_TRANSLATE) {
1940		offset = ((offset / sc->bce_flash_info->page_size) <<
1941			  sc->bce_flash_info->page_bits) +
1942			 (offset % sc->bce_flash_info->page_size);
1943	}
1944
1945	/*
1946	 * Clear the DONE bit separately, convert NVRAM data to big-endian,
1947	 * set the NVRAM address to write, and issue the write command
1948	 */
1949	REG_WR(sc, BCE_NVM_COMMAND, BCE_NVM_COMMAND_DONE);
1950	memcpy(&val32, val, 4);
1951	val32 = htobe32(val32);
1952	REG_WR(sc, BCE_NVM_WRITE, val32);
1953	REG_WR(sc, BCE_NVM_ADDR, offset & BCE_NVM_ADDR_NVM_ADDR_VALUE);
1954	REG_WR(sc, BCE_NVM_COMMAND, cmd);
1955
1956	/* Wait for completion. */
1957	for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
1958		DELAY(5);
1959
1960		if (REG_RD(sc, BCE_NVM_COMMAND) & BCE_NVM_COMMAND_DONE)
1961			break;
1962	}
1963	if (j >= NVRAM_TIMEOUT_COUNT) {
1964		BCE_PRINTF("%s(%d): Timeout error writing NVRAM at offset 0x%08X\n",
1965			__FILE__, __LINE__, offset);
1966		rc = EBUSY;
1967	}
1968
1969	DBEXIT(BCE_VERBOSE_NVRAM);
1970	return (rc);
1971}
1972#endif /* BCE_NVRAM_WRITE_SUPPORT */
1973
1974
1975/****************************************************************************/
1976/* Initialize NVRAM access.                                                 */
1977/*                                                                          */
1978/* Identify the NVRAM device in use and prepare the NVRAM interface to      */
1979/* access that device.                                                      */
1980/*                                                                          */
1981/* Returns:                                                                 */
1982/*   0 on success, positive value on failure.                               */
1983/****************************************************************************/
1984static int
1985bce_init_nvram(struct bce_softc *sc)
1986{
1987	u32 val;
1988	int j, entry_count, rc = 0;
1989	struct flash_spec *flash;
1990
1991	DBENTER(BCE_VERBOSE_NVRAM);
1992
1993	if ((BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) ||
1994		(BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716)) {
1995		sc->bce_flash_info = &flash_5709;
1996		goto bce_init_nvram_get_flash_size;
1997	}
1998
1999	/* Determine the selected interface. */
2000	val = REG_RD(sc, BCE_NVM_CFG1);
2001
2002	entry_count = sizeof(flash_table) / sizeof(struct flash_spec);
2003
2004	/*
2005	 * Flash reconfiguration is required to support additional
2006	 * NVRAM devices not directly supported in hardware.
2007	 * Check if the flash interface was reconfigured
2008	 * by the bootcode.
2009	 */
2010
2011	if (val & 0x40000000) {
2012		/* Flash interface reconfigured by bootcode. */
2013
2014		DBPRINT(sc,BCE_INFO_LOAD,
2015			"bce_init_nvram(): Flash WAS reconfigured.\n");
2016
2017		for (j = 0, flash = &flash_table[0]; j < entry_count;
2018		     j++, flash++) {
2019			if ((val & FLASH_BACKUP_STRAP_MASK) ==
2020			    (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
2021				sc->bce_flash_info = flash;
2022				break;
2023			}
2024		}
2025	} else {
2026		/* Flash interface not yet reconfigured. */
2027		u32 mask;
2028
2029		DBPRINT(sc, BCE_INFO_LOAD, "%s(): Flash was NOT reconfigured.\n",
2030			__FUNCTION__);
2031
2032		if (val & (1 << 23))
2033			mask = FLASH_BACKUP_STRAP_MASK;
2034		else
2035			mask = FLASH_STRAP_MASK;
2036
2037		/* Look for the matching NVRAM device configuration data. */
2038		for (j = 0, flash = &flash_table[0]; j < entry_count; j++, flash++) {
2039
2040			/* Check if the device matches any of the known devices. */
2041			if ((val & mask) == (flash->strapping & mask)) {
2042				/* Found a device match. */
2043				sc->bce_flash_info = flash;
2044
2045				/* Request access to the flash interface. */
2046				if ((rc = bce_acquire_nvram_lock(sc)) != 0)
2047					return rc;
2048
2049				/* Reconfigure the flash interface. */
2050				bce_enable_nvram_access(sc);
2051				REG_WR(sc, BCE_NVM_CFG1, flash->config1);
2052				REG_WR(sc, BCE_NVM_CFG2, flash->config2);
2053				REG_WR(sc, BCE_NVM_CFG3, flash->config3);
2054				REG_WR(sc, BCE_NVM_WRITE1, flash->write1);
2055				bce_disable_nvram_access(sc);
2056				bce_release_nvram_lock(sc);
2057
2058				break;
2059			}
2060		}
2061	}
2062
2063	/* Check if a matching device was found. */
2064	if (j == entry_count) {
2065		sc->bce_flash_info = NULL;
2066		BCE_PRINTF("%s(%d): Unknown Flash NVRAM found!\n",
2067			__FILE__, __LINE__);
2068		rc = ENODEV;
2069	}
2070
2071bce_init_nvram_get_flash_size:
2072	/* Write the flash config data to the shared memory interface. */
2073	val = REG_RD_IND(sc, sc->bce_shmem_base + BCE_SHARED_HW_CFG_CONFIG2);
2074	val &= BCE_SHARED_HW_CFG2_NVM_SIZE_MASK;
2075	if (val)
2076		sc->bce_flash_size = val;
2077	else
2078		sc->bce_flash_size = sc->bce_flash_info->total_size;
2079
2080	DBPRINT(sc, BCE_INFO_LOAD, "%s(): Found %s, size = 0x%08X\n",
2081		__FUNCTION__, sc->bce_flash_info->name,
2082		sc->bce_flash_info->total_size);
2083
2084	DBEXIT(BCE_VERBOSE_NVRAM);
2085	return rc;
2086}
2087
2088
2089/****************************************************************************/
2090/* Read an arbitrary range of data from NVRAM.                              */
2091/*                                                                          */
2092/* Prepares the NVRAM interface for access and reads the requested data     */
2093/* into the supplied buffer.                                                */
2094/*                                                                          */
2095/* Returns:                                                                 */
2096/*   0 on success and the data read, positive value on failure.             */
2097/****************************************************************************/
2098static int
2099bce_nvram_read(struct bce_softc *sc, u32 offset, u8 *ret_buf,
2100	int buf_size)
2101{
2102	int rc = 0;
2103	u32 cmd_flags, offset32, len32, extra;
2104
2105	DBENTER(BCE_VERBOSE_NVRAM);
2106
2107	if (buf_size == 0)
2108		goto bce_nvram_read_exit;
2109
2110	/* Request access to the flash interface. */
2111	if ((rc = bce_acquire_nvram_lock(sc)) != 0)
2112		goto bce_nvram_read_exit;
2113
2114	/* Enable access to flash interface */
2115	bce_enable_nvram_access(sc);
2116
2117	len32 = buf_size;
2118	offset32 = offset;
2119	extra = 0;
2120
2121	cmd_flags = 0;
2122
2123	if (offset32 & 3) {
2124		u8 buf[4];
2125		u32 pre_len;
2126
2127		offset32 &= ~3;
2128		pre_len = 4 - (offset & 3);
2129
2130		if (pre_len >= len32) {
2131			pre_len = len32;
2132			cmd_flags = BCE_NVM_COMMAND_FIRST | BCE_NVM_COMMAND_LAST;
2133		}
2134		else {
2135			cmd_flags = BCE_NVM_COMMAND_FIRST;
2136		}
2137
2138		rc = bce_nvram_read_dword(sc, offset32, buf, cmd_flags);
2139
2140		if (rc)
2141			return rc;
2142
2143		memcpy(ret_buf, buf + (offset & 3), pre_len);
2144
2145		offset32 += 4;
2146		ret_buf += pre_len;
2147		len32 -= pre_len;
2148	}
2149
2150	if (len32 & 3) {
2151		extra = 4 - (len32 & 3);
2152		len32 = (len32 + 4) & ~3;
2153	}
2154
2155	if (len32 == 4) {
2156		u8 buf[4];
2157
2158		if (cmd_flags)
2159			cmd_flags = BCE_NVM_COMMAND_LAST;
2160		else
2161			cmd_flags = BCE_NVM_COMMAND_FIRST |
2162				    BCE_NVM_COMMAND_LAST;
2163
2164		rc = bce_nvram_read_dword(sc, offset32, buf, cmd_flags);
2165
2166		memcpy(ret_buf, buf, 4 - extra);
2167	}
2168	else if (len32 > 0) {
2169		u8 buf[4];
2170
2171		/* Read the first word. */
2172		if (cmd_flags)
2173			cmd_flags = 0;
2174		else
2175			cmd_flags = BCE_NVM_COMMAND_FIRST;
2176
2177		rc = bce_nvram_read_dword(sc, offset32, ret_buf, cmd_flags);
2178
2179		/* Advance to the next dword. */
2180		offset32 += 4;
2181		ret_buf += 4;
2182		len32 -= 4;
2183
2184		while (len32 > 4 && rc == 0) {
2185			rc = bce_nvram_read_dword(sc, offset32, ret_buf, 0);
2186
2187			/* Advance to the next dword. */
2188			offset32 += 4;
2189			ret_buf += 4;
2190			len32 -= 4;
2191		}
2192
2193		if (rc)
2194			goto bce_nvram_read_locked_exit;
2195
2196		cmd_flags = BCE_NVM_COMMAND_LAST;
2197		rc = bce_nvram_read_dword(sc, offset32, buf, cmd_flags);
2198
2199		memcpy(ret_buf, buf, 4 - extra);
2200	}
2201
2202bce_nvram_read_locked_exit:
2203	/* Disable access to flash interface and release the lock. */
2204	bce_disable_nvram_access(sc);
2205	bce_release_nvram_lock(sc);
2206
2207bce_nvram_read_exit:
2208	DBEXIT(BCE_VERBOSE_NVRAM);
2209	return rc;
2210}
2211
2212
2213#ifdef BCE_NVRAM_WRITE_SUPPORT
2214/****************************************************************************/
2215/* Write an arbitrary range of data from NVRAM.                             */
2216/*                                                                          */
2217/* Prepares the NVRAM interface for write access and writes the requested   */
2218/* data from the supplied buffer.  The caller is responsible for            */
2219/* calculating any appropriate CRCs.                                        */
2220/*                                                                          */
2221/* Returns:                                                                 */
2222/*   0 on success, positive value on failure.                               */
2223/****************************************************************************/
2224static int
2225bce_nvram_write(struct bce_softc *sc, u32 offset, u8 *data_buf,
2226	int buf_size)
2227{
2228	u32 written, offset32, len32;
2229	u8 *buf, start[4], end[4];
2230	int rc = 0;
2231	int align_start, align_end;
2232
2233	DBENTER(BCE_VERBOSE_NVRAM);
2234
2235	buf = data_buf;
2236	offset32 = offset;
2237	len32 = buf_size;
2238	align_start = align_end = 0;
2239
2240	if ((align_start = (offset32 & 3))) {
2241		offset32 &= ~3;
2242		len32 += align_start;
2243		if ((rc = bce_nvram_read(sc, offset32, start, 4)))
2244			goto bce_nvram_write_exit;
2245	}
2246
2247	if (len32 & 3) {
2248	       	if ((len32 > 4) || !align_start) {
2249			align_end = 4 - (len32 & 3);
2250			len32 += align_end;
2251			if ((rc = bce_nvram_read(sc, offset32 + len32 - 4,
2252				end, 4))) {
2253				goto bce_nvram_write_exit;
2254			}
2255		}
2256	}
2257
2258	if (align_start || align_end) {
2259		buf = malloc(len32, M_DEVBUF, M_NOWAIT);
2260		if (buf == 0) {
2261			rc = ENOMEM;
2262			goto bce_nvram_write_exit;
2263		}
2264
2265		if (align_start) {
2266			memcpy(buf, start, 4);
2267		}
2268
2269		if (align_end) {
2270			memcpy(buf + len32 - 4, end, 4);
2271		}
2272		memcpy(buf + align_start, data_buf, buf_size);
2273	}
2274
2275	written = 0;
2276	while ((written < len32) && (rc == 0)) {
2277		u32 page_start, page_end, data_start, data_end;
2278		u32 addr, cmd_flags;
2279		int i;
2280		u8 flash_buffer[264];
2281
2282	    /* Find the page_start addr */
2283		page_start = offset32 + written;
2284		page_start -= (page_start % sc->bce_flash_info->page_size);
2285		/* Find the page_end addr */
2286		page_end = page_start + sc->bce_flash_info->page_size;
2287		/* Find the data_start addr */
2288		data_start = (written == 0) ? offset32 : page_start;
2289		/* Find the data_end addr */
2290		data_end = (page_end > offset32 + len32) ?
2291			(offset32 + len32) : page_end;
2292
2293		/* Request access to the flash interface. */
2294		if ((rc = bce_acquire_nvram_lock(sc)) != 0)
2295			goto bce_nvram_write_exit;
2296
2297		/* Enable access to flash interface */
2298		bce_enable_nvram_access(sc);
2299
2300		cmd_flags = BCE_NVM_COMMAND_FIRST;
2301		if (!(sc->bce_flash_info->flags & BCE_NV_BUFFERED)) {
2302			int j;
2303
2304			/* Read the whole page into the buffer
2305			 * (non-buffer flash only) */
2306			for (j = 0; j < sc->bce_flash_info->page_size; j += 4) {
2307				if (j == (sc->bce_flash_info->page_size - 4)) {
2308					cmd_flags |= BCE_NVM_COMMAND_LAST;
2309				}
2310				rc = bce_nvram_read_dword(sc,
2311					page_start + j,
2312					&flash_buffer[j],
2313					cmd_flags);
2314
2315				if (rc)
2316					goto bce_nvram_write_locked_exit;
2317
2318				cmd_flags = 0;
2319			}
2320		}
2321
2322		/* Enable writes to flash interface (unlock write-protect) */
2323		if ((rc = bce_enable_nvram_write(sc)) != 0)
2324			goto bce_nvram_write_locked_exit;
2325
2326		/* Erase the page */
2327		if ((rc = bce_nvram_erase_page(sc, page_start)) != 0)
2328			goto bce_nvram_write_locked_exit;
2329
2330		/* Re-enable the write again for the actual write */
2331		bce_enable_nvram_write(sc);
2332
2333		/* Loop to write back the buffer data from page_start to
2334		 * data_start */
2335		i = 0;
2336		if (!(sc->bce_flash_info->flags & BCE_NV_BUFFERED)) {
2337			for (addr = page_start; addr < data_start;
2338				addr += 4, i += 4) {
2339
2340				rc = bce_nvram_write_dword(sc, addr,
2341					&flash_buffer[i], cmd_flags);
2342
2343				if (rc != 0)
2344					goto bce_nvram_write_locked_exit;
2345
2346				cmd_flags = 0;
2347			}
2348		}
2349
2350		/* Loop to write the new data from data_start to data_end */
2351		for (addr = data_start; addr < data_end; addr += 4, i++) {
2352			if ((addr == page_end - 4) ||
2353				((sc->bce_flash_info->flags & BCE_NV_BUFFERED) &&
2354				(addr == data_end - 4))) {
2355
2356				cmd_flags |= BCE_NVM_COMMAND_LAST;
2357			}
2358			rc = bce_nvram_write_dword(sc, addr, buf,
2359				cmd_flags);
2360
2361			if (rc != 0)
2362				goto bce_nvram_write_locked_exit;
2363
2364			cmd_flags = 0;
2365			buf += 4;
2366		}
2367
2368		/* Loop to write back the buffer data from data_end
2369		 * to page_end */
2370		if (!(sc->bce_flash_info->flags & BCE_NV_BUFFERED)) {
2371			for (addr = data_end; addr < page_end;
2372				addr += 4, i += 4) {
2373
2374				if (addr == page_end-4) {
2375					cmd_flags = BCE_NVM_COMMAND_LAST;
2376                		}
2377				rc = bce_nvram_write_dword(sc, addr,
2378					&flash_buffer[i], cmd_flags);
2379
2380				if (rc != 0)
2381					goto bce_nvram_write_locked_exit;
2382
2383				cmd_flags = 0;
2384			}
2385		}
2386
2387		/* Disable writes to flash interface (lock write-protect) */
2388		bce_disable_nvram_write(sc);
2389
2390		/* Disable access to flash interface */
2391		bce_disable_nvram_access(sc);
2392		bce_release_nvram_lock(sc);
2393
2394		/* Increment written */
2395		written += data_end - data_start;
2396	}
2397
2398	goto bce_nvram_write_exit;
2399
2400bce_nvram_write_locked_exit:
2401		bce_disable_nvram_write(sc);
2402		bce_disable_nvram_access(sc);
2403		bce_release_nvram_lock(sc);
2404
2405bce_nvram_write_exit:
2406	if (align_start || align_end)
2407		free(buf, M_DEVBUF);
2408
2409	DBEXIT(BCE_VERBOSE_NVRAM);
2410	return (rc);
2411}
2412#endif /* BCE_NVRAM_WRITE_SUPPORT */
2413
2414
2415/****************************************************************************/
2416/* Verifies that NVRAM is accessible and contains valid data.               */
2417/*                                                                          */
2418/* Reads the configuration data from NVRAM and verifies that the CRC is     */
2419/* correct.                                                                 */
2420/*                                                                          */
2421/* Returns:                                                                 */
2422/*   0 on success, positive value on failure.                               */
2423/****************************************************************************/
2424static int
2425bce_nvram_test(struct bce_softc *sc)
2426{
2427	u32 buf[BCE_NVRAM_SIZE / 4];
2428	u8 *data = (u8 *) buf;
2429	int rc = 0;
2430	u32 magic, csum;
2431
2432	DBENTER(BCE_VERBOSE_NVRAM | BCE_VERBOSE_LOAD | BCE_VERBOSE_RESET);
2433
2434	/*
2435	 * Check that the device NVRAM is valid by reading
2436	 * the magic value at offset 0.
2437	 */
2438	if ((rc = bce_nvram_read(sc, 0, data, 4)) != 0) {
2439		BCE_PRINTF("%s(%d): Unable to read NVRAM!\n", __FILE__, __LINE__);
2440		goto bce_nvram_test_exit;
2441	}
2442
2443	/*
2444	 * Verify that offset 0 of the NVRAM contains
2445	 * a valid magic number.
2446	 */
2447    magic = bce_be32toh(buf[0]);
2448	if (magic != BCE_NVRAM_MAGIC) {
2449		rc = ENODEV;
2450		BCE_PRINTF("%s(%d): Invalid NVRAM magic value! Expected: 0x%08X, "
2451			"Found: 0x%08X\n",
2452			__FILE__, __LINE__, BCE_NVRAM_MAGIC, magic);
2453		goto bce_nvram_test_exit;
2454	}
2455
2456	/*
2457	 * Verify that the device NVRAM includes valid
2458	 * configuration data.
2459	 */
2460	if ((rc = bce_nvram_read(sc, 0x100, data, BCE_NVRAM_SIZE)) != 0) {
2461		BCE_PRINTF("%s(%d): Unable to read Manufacturing Information from "
2462			"NVRAM!\n", __FILE__, __LINE__);
2463		goto bce_nvram_test_exit;
2464	}
2465
2466	csum = ether_crc32_le(data, 0x100);
2467	if (csum != BCE_CRC32_RESIDUAL) {
2468		rc = ENODEV;
2469		BCE_PRINTF("%s(%d): Invalid Manufacturing Information NVRAM CRC! "
2470			"Expected: 0x%08X, Found: 0x%08X\n",
2471			__FILE__, __LINE__, BCE_CRC32_RESIDUAL, csum);
2472		goto bce_nvram_test_exit;
2473	}
2474
2475	csum = ether_crc32_le(data + 0x100, 0x100);
2476	if (csum != BCE_CRC32_RESIDUAL) {
2477		rc = ENODEV;
2478		BCE_PRINTF("%s(%d): Invalid Feature Configuration Information "
2479			"NVRAM CRC! Expected: 0x%08X, Found: 08%08X\n",
2480			__FILE__, __LINE__, BCE_CRC32_RESIDUAL, csum);
2481	}
2482
2483bce_nvram_test_exit:
2484	DBEXIT(BCE_VERBOSE_NVRAM | BCE_VERBOSE_LOAD | BCE_VERBOSE_RESET);
2485	return rc;
2486}
2487
2488
2489/****************************************************************************/
2490/* Identifies the current media type of the controller and sets the PHY     */
2491/* address.                                                                 */
2492/*                                                                          */
2493/* Returns:                                                                 */
2494/*   Nothing.                                                               */
2495/****************************************************************************/
2496static void
2497bce_get_media(struct bce_softc *sc)
2498{
2499	u32 val;
2500
2501	DBENTER(BCE_VERBOSE);
2502
2503	/* Assume PHY address for copper controllers. */
2504	sc->bce_phy_addr = 1;
2505
2506	if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) {
2507 		u32 val = REG_RD(sc, BCE_MISC_DUAL_MEDIA_CTRL);
2508		u32 bond_id = val & BCE_MISC_DUAL_MEDIA_CTRL_BOND_ID;
2509		u32 strap;
2510
2511		/*
2512		 * The BCM5709S is software configurable
2513		 * for Copper or SerDes operation.
2514		 */
2515		if (bond_id == BCE_MISC_DUAL_MEDIA_CTRL_BOND_ID_C) {
2516			DBPRINT(sc, BCE_INFO_LOAD, "5709 bonded for copper.\n");
2517			goto bce_get_media_exit;
2518		} else if (bond_id == BCE_MISC_DUAL_MEDIA_CTRL_BOND_ID_S) {
2519			DBPRINT(sc, BCE_INFO_LOAD, "5709 bonded for dual media.\n");
2520			sc->bce_phy_flags |= BCE_PHY_SERDES_FLAG;
2521			goto bce_get_media_exit;
2522		}
2523
2524		if (val & BCE_MISC_DUAL_MEDIA_CTRL_STRAP_OVERRIDE)
2525			strap = (val & BCE_MISC_DUAL_MEDIA_CTRL_PHY_CTRL) >> 21;
2526		else
2527			strap = (val & BCE_MISC_DUAL_MEDIA_CTRL_PHY_CTRL_STRAP) >> 8;
2528
2529		if (pci_get_function(sc->bce_dev) == 0) {
2530			switch (strap) {
2531			case 0x4:
2532			case 0x5:
2533			case 0x6:
2534				DBPRINT(sc, BCE_INFO_LOAD,
2535					"BCM5709 s/w configured for SerDes.\n");
2536				sc->bce_phy_flags |= BCE_PHY_SERDES_FLAG;
2537			default:
2538				DBPRINT(sc, BCE_INFO_LOAD,
2539					"BCM5709 s/w configured for Copper.\n");
2540			}
2541		} else {
2542			switch (strap) {
2543			case 0x1:
2544			case 0x2:
2545			case 0x4:
2546				DBPRINT(sc, BCE_INFO_LOAD,
2547					"BCM5709 s/w configured for SerDes.\n");
2548				sc->bce_phy_flags |= BCE_PHY_SERDES_FLAG;
2549			default:
2550				DBPRINT(sc, BCE_INFO_LOAD,
2551					"BCM5709 s/w configured for Copper.\n");
2552			}
2553		}
2554
2555	} else if (BCE_CHIP_BOND_ID(sc) & BCE_CHIP_BOND_ID_SERDES_BIT)
2556		sc->bce_phy_flags |= BCE_PHY_SERDES_FLAG;
2557
2558	if (sc->bce_phy_flags & BCE_PHY_SERDES_FLAG) {
2559		sc->bce_flags |= BCE_NO_WOL_FLAG;
2560		if (BCE_CHIP_NUM(sc) != BCE_CHIP_NUM_5706) {
2561			sc->bce_phy_addr = 2;
2562			val = REG_RD_IND(sc, sc->bce_shmem_base +
2563				 BCE_SHARED_HW_CFG_CONFIG);
2564			if (val & BCE_SHARED_HW_CFG_PHY_2_5G) {
2565				sc->bce_phy_flags |= BCE_PHY_2_5G_CAPABLE_FLAG;
2566				DBPRINT(sc, BCE_INFO_LOAD, "Found 2.5Gb capable adapter\n");
2567			}
2568		}
2569	} else if ((BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5706) ||
2570		   (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5708))
2571		sc->bce_phy_flags |= BCE_PHY_CRC_FIX_FLAG;
2572
2573bce_get_media_exit:
2574	DBPRINT(sc, (BCE_INFO_LOAD | BCE_INFO_PHY),
2575		"Using PHY address %d.\n", sc->bce_phy_addr);
2576
2577	DBEXIT(BCE_VERBOSE);
2578}
2579
2580
2581/****************************************************************************/
2582/* Free any DMA memory owned by the driver.                                 */
2583/*                                                                          */
2584/* Scans through each data structre that requires DMA memory and frees      */
2585/* the memory if allocated.                                                 */
2586/*                                                                          */
2587/* Returns:                                                                 */
2588/*   Nothing.                                                               */
2589/****************************************************************************/
2590static void
2591bce_dma_free(struct bce_softc *sc)
2592{
2593	int i;
2594
2595	DBENTER(BCE_VERBOSE_RESET | BCE_VERBOSE_UNLOAD | BCE_VERBOSE_CTX);
2596
2597	/* Free, unmap, and destroy the status block. */
2598	if (sc->status_block != NULL) {
2599		bus_dmamem_free(
2600			sc->status_tag,
2601		    sc->status_block,
2602		    sc->status_map);
2603		sc->status_block = NULL;
2604	}
2605
2606	if (sc->status_map != NULL) {
2607		bus_dmamap_unload(
2608			sc->status_tag,
2609		    sc->status_map);
2610		bus_dmamap_destroy(sc->status_tag,
2611		    sc->status_map);
2612		sc->status_map = NULL;
2613	}
2614
2615	if (sc->status_tag != NULL) {
2616		bus_dma_tag_destroy(sc->status_tag);
2617		sc->status_tag = NULL;
2618	}
2619
2620
2621	/* Free, unmap, and destroy the statistics block. */
2622	if (sc->stats_block != NULL) {
2623		bus_dmamem_free(
2624			sc->stats_tag,
2625		    sc->stats_block,
2626		    sc->stats_map);
2627		sc->stats_block = NULL;
2628	}
2629
2630	if (sc->stats_map != NULL) {
2631		bus_dmamap_unload(
2632			sc->stats_tag,
2633		    sc->stats_map);
2634		bus_dmamap_destroy(sc->stats_tag,
2635		    sc->stats_map);
2636		sc->stats_map = NULL;
2637	}
2638
2639	if (sc->stats_tag != NULL) {
2640		bus_dma_tag_destroy(sc->stats_tag);
2641		sc->stats_tag = NULL;
2642	}
2643
2644
2645	/* Free, unmap and destroy all context memory pages. */
2646	if ((BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) ||
2647		(BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716)) {
2648		for (i = 0; i < sc->ctx_pages; i++ ) {
2649			if (sc->ctx_block[i] != NULL) {
2650				bus_dmamem_free(
2651					sc->ctx_tag,
2652				    sc->ctx_block[i],
2653				    sc->ctx_map[i]);
2654				sc->ctx_block[i] = NULL;
2655			}
2656
2657			if (sc->ctx_map[i] != NULL) {
2658				bus_dmamap_unload(
2659					sc->ctx_tag,
2660		    		sc->ctx_map[i]);
2661				bus_dmamap_destroy(
2662					sc->ctx_tag,
2663				    sc->ctx_map[i]);
2664				sc->ctx_map[i] = NULL;
2665			}
2666		}
2667
2668		/* Destroy the context memory tag. */
2669		if (sc->ctx_tag != NULL) {
2670			bus_dma_tag_destroy(sc->ctx_tag);
2671			sc->ctx_tag = NULL;
2672		}
2673	}
2674
2675
2676	/* Free, unmap and destroy all TX buffer descriptor chain pages. */
2677	for (i = 0; i < TX_PAGES; i++ ) {
2678		if (sc->tx_bd_chain[i] != NULL) {
2679			bus_dmamem_free(
2680				sc->tx_bd_chain_tag,
2681			    sc->tx_bd_chain[i],
2682			    sc->tx_bd_chain_map[i]);
2683			sc->tx_bd_chain[i] = NULL;
2684		}
2685
2686		if (sc->tx_bd_chain_map[i] != NULL) {
2687			bus_dmamap_unload(
2688				sc->tx_bd_chain_tag,
2689		    	sc->tx_bd_chain_map[i]);
2690			bus_dmamap_destroy(
2691				sc->tx_bd_chain_tag,
2692			    sc->tx_bd_chain_map[i]);
2693			sc->tx_bd_chain_map[i] = NULL;
2694		}
2695	}
2696
2697	/* Destroy the TX buffer descriptor tag. */
2698	if (sc->tx_bd_chain_tag != NULL) {
2699		bus_dma_tag_destroy(sc->tx_bd_chain_tag);
2700		sc->tx_bd_chain_tag = NULL;
2701	}
2702
2703
2704	/* Free, unmap and destroy all RX buffer descriptor chain pages. */
2705	for (i = 0; i < RX_PAGES; i++ ) {
2706		if (sc->rx_bd_chain[i] != NULL) {
2707			bus_dmamem_free(
2708				sc->rx_bd_chain_tag,
2709			    sc->rx_bd_chain[i],
2710			    sc->rx_bd_chain_map[i]);
2711			sc->rx_bd_chain[i] = NULL;
2712		}
2713
2714		if (sc->rx_bd_chain_map[i] != NULL) {
2715			bus_dmamap_unload(
2716				sc->rx_bd_chain_tag,
2717		    	sc->rx_bd_chain_map[i]);
2718			bus_dmamap_destroy(
2719				sc->rx_bd_chain_tag,
2720			    sc->rx_bd_chain_map[i]);
2721			sc->rx_bd_chain_map[i] = NULL;
2722		}
2723	}
2724
2725	/* Destroy the RX buffer descriptor tag. */
2726	if (sc->rx_bd_chain_tag != NULL) {
2727		bus_dma_tag_destroy(sc->rx_bd_chain_tag);
2728		sc->rx_bd_chain_tag = NULL;
2729	}
2730
2731
2732#ifdef BCE_USE_SPLIT_HEADER
2733	/* Free, unmap and destroy all page buffer descriptor chain pages. */
2734	for (i = 0; i < PG_PAGES; i++ ) {
2735		if (sc->pg_bd_chain[i] != NULL) {
2736			bus_dmamem_free(
2737				sc->pg_bd_chain_tag,
2738			    sc->pg_bd_chain[i],
2739			    sc->pg_bd_chain_map[i]);
2740			sc->pg_bd_chain[i] = NULL;
2741		}
2742
2743		if (sc->pg_bd_chain_map[i] != NULL) {
2744			bus_dmamap_unload(
2745				sc->pg_bd_chain_tag,
2746		    	sc->pg_bd_chain_map[i]);
2747			bus_dmamap_destroy(
2748				sc->pg_bd_chain_tag,
2749			    sc->pg_bd_chain_map[i]);
2750			sc->pg_bd_chain_map[i] = NULL;
2751		}
2752	}
2753
2754	/* Destroy the page buffer descriptor tag. */
2755	if (sc->pg_bd_chain_tag != NULL) {
2756		bus_dma_tag_destroy(sc->pg_bd_chain_tag);
2757		sc->pg_bd_chain_tag = NULL;
2758	}
2759#endif
2760
2761
2762	/* Unload and destroy the TX mbuf maps. */
2763	for (i = 0; i < TOTAL_TX_BD; i++) {
2764		if (sc->tx_mbuf_map[i] != NULL) {
2765			bus_dmamap_unload(sc->tx_mbuf_tag,
2766				sc->tx_mbuf_map[i]);
2767			bus_dmamap_destroy(sc->tx_mbuf_tag,
2768	 			sc->tx_mbuf_map[i]);
2769			sc->tx_mbuf_map[i] = NULL;
2770		}
2771	}
2772
2773	/* Destroy the TX mbuf tag. */
2774	if (sc->tx_mbuf_tag != NULL) {
2775		bus_dma_tag_destroy(sc->tx_mbuf_tag);
2776		sc->tx_mbuf_tag = NULL;
2777	}
2778
2779	/* Unload and destroy the RX mbuf maps. */
2780	for (i = 0; i < TOTAL_RX_BD; i++) {
2781		if (sc->rx_mbuf_map[i] != NULL) {
2782			bus_dmamap_unload(sc->rx_mbuf_tag,
2783				sc->rx_mbuf_map[i]);
2784			bus_dmamap_destroy(sc->rx_mbuf_tag,
2785	 			sc->rx_mbuf_map[i]);
2786			sc->rx_mbuf_map[i] = NULL;
2787		}
2788	}
2789
2790	/* Destroy the RX mbuf tag. */
2791	if (sc->rx_mbuf_tag != NULL) {
2792		bus_dma_tag_destroy(sc->rx_mbuf_tag);
2793		sc->rx_mbuf_tag = NULL;
2794	}
2795
2796#ifdef BCE_USE_SPLIT_HEADER
2797	/* Unload and destroy the page mbuf maps. */
2798	for (i = 0; i < TOTAL_PG_BD; i++) {
2799		if (sc->pg_mbuf_map[i] != NULL) {
2800			bus_dmamap_unload(sc->pg_mbuf_tag,
2801				sc->pg_mbuf_map[i]);
2802			bus_dmamap_destroy(sc->pg_mbuf_tag,
2803	 			sc->pg_mbuf_map[i]);
2804			sc->pg_mbuf_map[i] = NULL;
2805		}
2806	}
2807
2808	/* Destroy the page mbuf tag. */
2809	if (sc->pg_mbuf_tag != NULL) {
2810		bus_dma_tag_destroy(sc->pg_mbuf_tag);
2811		sc->pg_mbuf_tag = NULL;
2812	}
2813#endif
2814
2815	/* Destroy the parent tag */
2816	if (sc->parent_tag != NULL) {
2817		bus_dma_tag_destroy(sc->parent_tag);
2818		sc->parent_tag = NULL;
2819	}
2820
2821	DBEXIT(BCE_VERBOSE_RESET | BCE_VERBOSE_UNLOAD | BCE_VERBOSE_CTX);
2822}
2823
2824
2825/****************************************************************************/
2826/* Get DMA memory from the OS.                                              */
2827/*                                                                          */
2828/* Validates that the OS has provided DMA buffers in response to a          */
2829/* bus_dmamap_load() call and saves the physical address of those buffers.  */
2830/* When the callback is used the OS will return 0 for the mapping function  */
2831/* (bus_dmamap_load()) so we use the value of map_arg->maxsegs to pass any  */
2832/* failures back to the caller.                                             */
2833/*                                                                          */
2834/* Returns:                                                                 */
2835/*   Nothing.                                                               */
2836/****************************************************************************/
2837static void
2838bce_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error)
2839{
2840	bus_addr_t *busaddr = arg;
2841
2842	/* Simulate a mapping failure. */
2843	DBRUNIF(DB_RANDOMTRUE(bce_debug_dma_map_addr_failure),
2844		printf("bce: %s(%d): Simulating DMA mapping error.\n",
2845			__FILE__, __LINE__);
2846		error = ENOMEM);
2847
2848	/* Check for an error and signal the caller that an error occurred. */
2849	if (error) {
2850		printf("bce %s(%d): DMA mapping error! error = %d, "
2851		    "nseg = %d\n", __FILE__, __LINE__, error, nseg);
2852		*busaddr = 0;
2853		return;
2854	}
2855
2856	*busaddr = segs->ds_addr;
2857	return;
2858}
2859
2860
2861/****************************************************************************/
2862/* Allocate any DMA memory needed by the driver.                            */
2863/*                                                                          */
2864/* Allocates DMA memory needed for the various global structures needed by  */
2865/* hardware.                                                                */
2866/*                                                                          */
2867/* Memory alignment requirements:                                           */
2868/* +-----------------+----------+----------+----------+----------+          */
2869/* |                 |   5706   |   5708   |   5709   |   5716   |          */
2870/* +-----------------+----------+----------+----------+----------+          */
2871/* |Status Block     | 8 bytes  | 8 bytes  | 16 bytes | 16 bytes |          */
2872/* |Statistics Block | 8 bytes  | 8 bytes  | 16 bytes | 16 bytes |          */
2873/* |RX Buffers       | 16 bytes | 16 bytes | 16 bytes | 16 bytes |          */
2874/* |PG Buffers       |   none   |   none   |   none   |   none   |          */
2875/* |TX Buffers       |   none   |   none   |   none   |   none   |          */
2876/* |Chain Pages(1)   |   4KiB   |   4KiB   |   4KiB   |   4KiB   |          */
2877/* +-----------------+----------+----------+----------+----------+          */
2878/*                                                                          */
2879/* (1) Must align with CPU page size (BCM_PAGE_SZIE).                       */
2880/*                                                                          */
2881/* Returns:                                                                 */
2882/*   0 for success, positive value for failure.                             */
2883/****************************************************************************/
2884static int
2885bce_dma_alloc(device_t dev)
2886{
2887	struct bce_softc *sc;
2888	int i, error, rc = 0;
2889	bus_addr_t busaddr;
2890	bus_size_t max_size, max_seg_size;
2891	int max_segments;
2892
2893	sc = device_get_softc(dev);
2894
2895	DBENTER(BCE_VERBOSE_RESET | BCE_VERBOSE_CTX);
2896
2897	/*
2898	 * Allocate the parent bus DMA tag appropriate for PCI.
2899	 */
2900	if (bus_dma_tag_create(NULL,
2901			1,
2902			BCE_DMA_BOUNDARY,
2903			sc->max_bus_addr,
2904			BUS_SPACE_MAXADDR,
2905			NULL, NULL,
2906			MAXBSIZE,
2907			BUS_SPACE_UNRESTRICTED,
2908			BUS_SPACE_MAXSIZE_32BIT,
2909			0,
2910			NULL, NULL,
2911			&sc->parent_tag)) {
2912		BCE_PRINTF("%s(%d): Could not allocate parent DMA tag!\n",
2913			__FILE__, __LINE__);
2914		rc = ENOMEM;
2915		goto bce_dma_alloc_exit;
2916	}
2917
2918	/*
2919	 * Create a DMA tag for the status block, allocate and clear the
2920	 * memory, map the memory into DMA space, and fetch the physical
2921	 * address of the block.
2922	 */
2923	if (bus_dma_tag_create(sc->parent_tag,
2924	    	BCE_DMA_ALIGN,
2925	    	BCE_DMA_BOUNDARY,
2926	    	sc->max_bus_addr,
2927	    	BUS_SPACE_MAXADDR,
2928	    	NULL, NULL,
2929	    	BCE_STATUS_BLK_SZ,
2930	    	1,
2931	    	BCE_STATUS_BLK_SZ,
2932	    	0,
2933	    	NULL, NULL,
2934	    	&sc->status_tag)) {
2935		BCE_PRINTF("%s(%d): Could not allocate status block DMA tag!\n",
2936			__FILE__, __LINE__);
2937		rc = ENOMEM;
2938		goto bce_dma_alloc_exit;
2939	}
2940
2941	if(bus_dmamem_alloc(sc->status_tag,
2942	    	(void **)&sc->status_block,
2943	    	BUS_DMA_NOWAIT,
2944	    	&sc->status_map)) {
2945		BCE_PRINTF("%s(%d): Could not allocate status block DMA memory!\n",
2946			__FILE__, __LINE__);
2947		rc = ENOMEM;
2948		goto bce_dma_alloc_exit;
2949	}
2950
2951	bzero((char *)sc->status_block, BCE_STATUS_BLK_SZ);
2952
2953	error = bus_dmamap_load(sc->status_tag,
2954	    	sc->status_map,
2955	    	sc->status_block,
2956	    	BCE_STATUS_BLK_SZ,
2957	    	bce_dma_map_addr,
2958	    	&busaddr,
2959	    	BUS_DMA_NOWAIT);
2960
2961	if (error) {
2962		BCE_PRINTF("%s(%d): Could not map status block DMA memory!\n",
2963			__FILE__, __LINE__);
2964		rc = ENOMEM;
2965		goto bce_dma_alloc_exit;
2966	}
2967
2968	sc->status_block_paddr = busaddr;
2969	DBPRINT(sc, BCE_INFO, "%s(): status_block_paddr = 0x%jX\n",
2970		__FUNCTION__, (uintmax_t) sc->status_block_paddr);
2971
2972	/*
2973	 * Create a DMA tag for the statistics block, allocate and clear the
2974	 * memory, map the memory into DMA space, and fetch the physical
2975	 * address of the block.
2976	 */
2977	if (bus_dma_tag_create(sc->parent_tag,
2978	    	BCE_DMA_ALIGN,
2979	    	BCE_DMA_BOUNDARY,
2980	    	sc->max_bus_addr,
2981	    	BUS_SPACE_MAXADDR,
2982	    	NULL, NULL,
2983	    	BCE_STATS_BLK_SZ,
2984	    	1,
2985	    	BCE_STATS_BLK_SZ,
2986	    	0,
2987	    	NULL, NULL,
2988	    	&sc->stats_tag)) {
2989		BCE_PRINTF("%s(%d): Could not allocate statistics block DMA tag!\n",
2990			__FILE__, __LINE__);
2991		rc = ENOMEM;
2992		goto bce_dma_alloc_exit;
2993	}
2994
2995	if (bus_dmamem_alloc(sc->stats_tag,
2996	    	(void **)&sc->stats_block,
2997	    	BUS_DMA_NOWAIT,
2998	    	&sc->stats_map)) {
2999		BCE_PRINTF("%s(%d): Could not allocate statistics block DMA memory!\n",
3000			__FILE__, __LINE__);
3001		rc = ENOMEM;
3002		goto bce_dma_alloc_exit;
3003	}
3004
3005	bzero((char *)sc->stats_block, BCE_STATS_BLK_SZ);
3006
3007	error = bus_dmamap_load(sc->stats_tag,
3008	    	sc->stats_map,
3009	    	sc->stats_block,
3010	    	BCE_STATS_BLK_SZ,
3011	    	bce_dma_map_addr,
3012	    	&busaddr,
3013	    	BUS_DMA_NOWAIT);
3014
3015	if(error) {
3016		BCE_PRINTF("%s(%d): Could not map statistics block DMA memory!\n",
3017			__FILE__, __LINE__);
3018		rc = ENOMEM;
3019		goto bce_dma_alloc_exit;
3020	}
3021
3022	sc->stats_block_paddr = busaddr;
3023	DBPRINT(sc, BCE_INFO, "%s(): stats_block_paddr = 0x%jX\n",
3024		__FUNCTION__, (uintmax_t) sc->stats_block_paddr);
3025
3026	/* BCM5709 uses host memory as cache for context memory. */
3027	if ((BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) ||
3028		(BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716)) {
3029		sc->ctx_pages = 0x2000 / BCM_PAGE_SIZE;
3030		if (sc->ctx_pages == 0)
3031			sc->ctx_pages = 1;
3032
3033		DBRUNIF((sc->ctx_pages > 512),
3034			BCE_PRINTF("%s(%d): Too many CTX pages! %d > 512\n",
3035				__FILE__, __LINE__, sc->ctx_pages));
3036
3037		/*
3038		 * Create a DMA tag for the context pages,
3039		 * allocate and clear the memory, map the
3040		 * memory into DMA space, and fetch the
3041		 * physical address of the block.
3042		 */
3043		if(bus_dma_tag_create(sc->parent_tag,
3044			BCM_PAGE_SIZE,
3045		    BCE_DMA_BOUNDARY,
3046			sc->max_bus_addr,
3047			BUS_SPACE_MAXADDR,
3048			NULL, NULL,
3049			BCM_PAGE_SIZE,
3050			1,
3051			BCM_PAGE_SIZE,
3052			0,
3053			NULL, NULL,
3054			&sc->ctx_tag)) {
3055			BCE_PRINTF("%s(%d): Could not allocate CTX DMA tag!\n",
3056				__FILE__, __LINE__);
3057			rc = ENOMEM;
3058			goto bce_dma_alloc_exit;
3059		}
3060
3061		for (i = 0; i < sc->ctx_pages; i++) {
3062
3063			if(bus_dmamem_alloc(sc->ctx_tag,
3064		    		(void **)&sc->ctx_block[i],
3065	    		BUS_DMA_NOWAIT,
3066		    	&sc->ctx_map[i])) {
3067				BCE_PRINTF("%s(%d): Could not allocate CTX "
3068					"DMA memory!\n", __FILE__, __LINE__);
3069				rc = ENOMEM;
3070				goto bce_dma_alloc_exit;
3071			}
3072
3073			bzero((char *)sc->ctx_block[i], BCM_PAGE_SIZE);
3074
3075			error = bus_dmamap_load(sc->ctx_tag,
3076	    		sc->ctx_map[i],
3077	    		sc->ctx_block[i],
3078		    	BCM_PAGE_SIZE,
3079		    	bce_dma_map_addr,
3080	    		&busaddr,
3081	    		BUS_DMA_NOWAIT);
3082
3083			if (error) {
3084				BCE_PRINTF("%s(%d): Could not map CTX DMA memory!\n",
3085					__FILE__, __LINE__);
3086				rc = ENOMEM;
3087				goto bce_dma_alloc_exit;
3088			}
3089
3090			sc->ctx_paddr[i] = busaddr;
3091			DBPRINT(sc, BCE_INFO, "%s(): ctx_paddr[%d] = 0x%jX\n",
3092				__FUNCTION__, i, (uintmax_t) sc->ctx_paddr[i]);
3093		}
3094	}
3095
3096	/*
3097	 * Create a DMA tag for the TX buffer descriptor chain,
3098	 * allocate and clear the  memory, and fetch the
3099	 * physical address of the block.
3100	 */
3101	if(bus_dma_tag_create(sc->parent_tag,
3102			BCM_PAGE_SIZE,
3103		    BCE_DMA_BOUNDARY,
3104			sc->max_bus_addr,
3105			BUS_SPACE_MAXADDR,
3106			NULL, NULL,
3107			BCE_TX_CHAIN_PAGE_SZ,
3108			1,
3109			BCE_TX_CHAIN_PAGE_SZ,
3110			0,
3111			NULL, NULL,
3112			&sc->tx_bd_chain_tag)) {
3113		BCE_PRINTF("%s(%d): Could not allocate TX descriptor chain DMA tag!\n",
3114			__FILE__, __LINE__);
3115		rc = ENOMEM;
3116		goto bce_dma_alloc_exit;
3117	}
3118
3119	for (i = 0; i < TX_PAGES; i++) {
3120
3121		if(bus_dmamem_alloc(sc->tx_bd_chain_tag,
3122	    		(void **)&sc->tx_bd_chain[i],
3123	    		BUS_DMA_NOWAIT,
3124		    	&sc->tx_bd_chain_map[i])) {
3125			BCE_PRINTF("%s(%d): Could not allocate TX descriptor "
3126				"chain DMA memory!\n", __FILE__, __LINE__);
3127			rc = ENOMEM;
3128			goto bce_dma_alloc_exit;
3129		}
3130
3131		error = bus_dmamap_load(sc->tx_bd_chain_tag,
3132	    		sc->tx_bd_chain_map[i],
3133	    		sc->tx_bd_chain[i],
3134		    	BCE_TX_CHAIN_PAGE_SZ,
3135		    	bce_dma_map_addr,
3136	    		&busaddr,
3137	    		BUS_DMA_NOWAIT);
3138
3139		if (error) {
3140			BCE_PRINTF("%s(%d): Could not map TX descriptor chain DMA memory!\n",
3141				__FILE__, __LINE__);
3142			rc = ENOMEM;
3143			goto bce_dma_alloc_exit;
3144		}
3145
3146		sc->tx_bd_chain_paddr[i] = busaddr;
3147		DBPRINT(sc, BCE_INFO, "%s(): tx_bd_chain_paddr[%d] = 0x%jX\n",
3148			__FUNCTION__, i, (uintmax_t) sc->tx_bd_chain_paddr[i]);
3149	}
3150
3151	/* Check the required size before mapping to conserve resources. */
3152	if (bce_tso_enable) {
3153		max_size     = BCE_TSO_MAX_SIZE;
3154		max_segments = BCE_MAX_SEGMENTS;
3155		max_seg_size = BCE_TSO_MAX_SEG_SIZE;
3156	} else {
3157		max_size     = MCLBYTES * BCE_MAX_SEGMENTS;
3158		max_segments = BCE_MAX_SEGMENTS;
3159		max_seg_size = MCLBYTES;
3160	}
3161
3162	/* Create a DMA tag for TX mbufs. */
3163	if (bus_dma_tag_create(sc->parent_tag,
3164			1,
3165			BCE_DMA_BOUNDARY,
3166			sc->max_bus_addr,
3167			BUS_SPACE_MAXADDR,
3168			NULL, NULL,
3169			max_size,
3170			max_segments,
3171			max_seg_size,
3172			0,
3173			NULL, NULL,
3174			&sc->tx_mbuf_tag)) {
3175		BCE_PRINTF("%s(%d): Could not allocate TX mbuf DMA tag!\n",
3176			__FILE__, __LINE__);
3177		rc = ENOMEM;
3178		goto bce_dma_alloc_exit;
3179	}
3180
3181	/* Create DMA maps for the TX mbufs clusters. */
3182	for (i = 0; i < TOTAL_TX_BD; i++) {
3183		if (bus_dmamap_create(sc->tx_mbuf_tag, BUS_DMA_NOWAIT,
3184			&sc->tx_mbuf_map[i])) {
3185			BCE_PRINTF("%s(%d): Unable to create TX mbuf DMA map!\n",
3186				__FILE__, __LINE__);
3187			rc = ENOMEM;
3188			goto bce_dma_alloc_exit;
3189		}
3190	}
3191
3192	/*
3193	 * Create a DMA tag for the RX buffer descriptor chain,
3194	 * allocate and clear the memory, and fetch the physical
3195	 * address of the blocks.
3196	 */
3197	if (bus_dma_tag_create(sc->parent_tag,
3198			BCM_PAGE_SIZE,
3199			BCE_DMA_BOUNDARY,
3200			BUS_SPACE_MAXADDR,
3201			sc->max_bus_addr,
3202			NULL, NULL,
3203			BCE_RX_CHAIN_PAGE_SZ,
3204			1,
3205			BCE_RX_CHAIN_PAGE_SZ,
3206			0,
3207			NULL, NULL,
3208			&sc->rx_bd_chain_tag)) {
3209		BCE_PRINTF("%s(%d): Could not allocate RX descriptor chain DMA tag!\n",
3210			__FILE__, __LINE__);
3211		rc = ENOMEM;
3212		goto bce_dma_alloc_exit;
3213	}
3214
3215	for (i = 0; i < RX_PAGES; i++) {
3216
3217		if (bus_dmamem_alloc(sc->rx_bd_chain_tag,
3218	    		(void **)&sc->rx_bd_chain[i],
3219	    		BUS_DMA_NOWAIT,
3220		    	&sc->rx_bd_chain_map[i])) {
3221			BCE_PRINTF("%s(%d): Could not allocate RX descriptor chain "
3222				"DMA memory!\n", __FILE__, __LINE__);
3223			rc = ENOMEM;
3224			goto bce_dma_alloc_exit;
3225		}
3226
3227		bzero((char *)sc->rx_bd_chain[i], BCE_RX_CHAIN_PAGE_SZ);
3228
3229		error = bus_dmamap_load(sc->rx_bd_chain_tag,
3230	    		sc->rx_bd_chain_map[i],
3231	    		sc->rx_bd_chain[i],
3232		    	BCE_RX_CHAIN_PAGE_SZ,
3233		    	bce_dma_map_addr,
3234	    		&busaddr,
3235	    		BUS_DMA_NOWAIT);
3236
3237		if (error) {
3238			BCE_PRINTF("%s(%d): Could not map RX descriptor chain DMA memory!\n",
3239				__FILE__, __LINE__);
3240			rc = ENOMEM;
3241			goto bce_dma_alloc_exit;
3242		}
3243
3244		sc->rx_bd_chain_paddr[i] = busaddr;
3245		DBPRINT(sc, BCE_INFO, "%s(): rx_bd_chain_paddr[%d] = 0x%jX\n",
3246			__FUNCTION__, i, (uintmax_t) sc->rx_bd_chain_paddr[i]);
3247	}
3248
3249	/*
3250	 * Create a DMA tag for RX mbufs.
3251	 */
3252#ifdef BCE_USE_SPLIT_HEADER
3253	max_size = max_seg_size = ((sc->rx_bd_mbuf_alloc_size < MCLBYTES) ?
3254		MCLBYTES : sc->rx_bd_mbuf_alloc_size);
3255#else
3256	max_size = max_seg_size = MJUM9BYTES;
3257#endif
3258
3259	if (bus_dma_tag_create(sc->parent_tag,
3260			1,
3261			BCE_DMA_BOUNDARY,
3262			sc->max_bus_addr,
3263			BUS_SPACE_MAXADDR,
3264			NULL, NULL,
3265			max_size,
3266			1,
3267			max_seg_size,
3268			0,
3269			NULL, NULL,
3270	    	&sc->rx_mbuf_tag)) {
3271		BCE_PRINTF("%s(%d): Could not allocate RX mbuf DMA tag!\n",
3272			__FILE__, __LINE__);
3273		rc = ENOMEM;
3274		goto bce_dma_alloc_exit;
3275	}
3276
3277	/* Create DMA maps for the RX mbuf clusters. */
3278	for (i = 0; i < TOTAL_RX_BD; i++) {
3279		if (bus_dmamap_create(sc->rx_mbuf_tag, BUS_DMA_NOWAIT,
3280				&sc->rx_mbuf_map[i])) {
3281			BCE_PRINTF("%s(%d): Unable to create RX mbuf DMA map!\n",
3282				__FILE__, __LINE__);
3283			rc = ENOMEM;
3284			goto bce_dma_alloc_exit;
3285		}
3286	}
3287
3288#ifdef BCE_USE_SPLIT_HEADER
3289	/*
3290	 * Create a DMA tag for the page buffer descriptor chain,
3291	 * allocate and clear the memory, and fetch the physical
3292	 * address of the blocks.
3293	 */
3294	if (bus_dma_tag_create(sc->parent_tag,
3295			BCM_PAGE_SIZE,
3296			BCE_DMA_BOUNDARY,
3297			BUS_SPACE_MAXADDR,
3298			sc->max_bus_addr,
3299			NULL, NULL,
3300			BCE_PG_CHAIN_PAGE_SZ,
3301			1,
3302			BCE_PG_CHAIN_PAGE_SZ,
3303			0,
3304			NULL, NULL,
3305			&sc->pg_bd_chain_tag)) {
3306		BCE_PRINTF("%s(%d): Could not allocate page descriptor chain DMA tag!\n",
3307			__FILE__, __LINE__);
3308		rc = ENOMEM;
3309		goto bce_dma_alloc_exit;
3310	}
3311
3312	for (i = 0; i < PG_PAGES; i++) {
3313
3314		if (bus_dmamem_alloc(sc->pg_bd_chain_tag,
3315	    		(void **)&sc->pg_bd_chain[i],
3316	    		BUS_DMA_NOWAIT,
3317		    	&sc->pg_bd_chain_map[i])) {
3318			BCE_PRINTF("%s(%d): Could not allocate page descriptor chain "
3319				"DMA memory!\n", __FILE__, __LINE__);
3320			rc = ENOMEM;
3321			goto bce_dma_alloc_exit;
3322		}
3323
3324		bzero((char *)sc->pg_bd_chain[i], BCE_PG_CHAIN_PAGE_SZ);
3325
3326		error = bus_dmamap_load(sc->pg_bd_chain_tag,
3327	    		sc->pg_bd_chain_map[i],
3328	    		sc->pg_bd_chain[i],
3329		    	BCE_PG_CHAIN_PAGE_SZ,
3330		    	bce_dma_map_addr,
3331	    		&busaddr,
3332	    		BUS_DMA_NOWAIT);
3333
3334		if (error) {
3335			BCE_PRINTF("%s(%d): Could not map page descriptor chain DMA memory!\n",
3336				__FILE__, __LINE__);
3337			rc = ENOMEM;
3338			goto bce_dma_alloc_exit;
3339		}
3340
3341		sc->pg_bd_chain_paddr[i] = busaddr;
3342		DBPRINT(sc, BCE_INFO, "%s(): pg_bd_chain_paddr[%d] = 0x%jX\n",
3343			__FUNCTION__, i, (uintmax_t) sc->pg_bd_chain_paddr[i]);
3344	}
3345
3346	/*
3347	 * Create a DMA tag for page mbufs.
3348	 */
3349	max_size = max_seg_size = ((sc->pg_bd_mbuf_alloc_size < MCLBYTES) ?
3350		MCLBYTES : sc->pg_bd_mbuf_alloc_size);
3351
3352	if (bus_dma_tag_create(sc->parent_tag,
3353			1,
3354			BCE_DMA_BOUNDARY,
3355			sc->max_bus_addr,
3356			BUS_SPACE_MAXADDR,
3357			NULL, NULL,
3358			max_size,
3359			1,
3360			max_seg_size,
3361			0,
3362			NULL, NULL,
3363	    	&sc->pg_mbuf_tag)) {
3364		BCE_PRINTF("%s(%d): Could not allocate page mbuf DMA tag!\n",
3365			__FILE__, __LINE__);
3366		rc = ENOMEM;
3367		goto bce_dma_alloc_exit;
3368	}
3369
3370	/* Create DMA maps for the page mbuf clusters. */
3371	for (i = 0; i < TOTAL_PG_BD; i++) {
3372		if (bus_dmamap_create(sc->pg_mbuf_tag, BUS_DMA_NOWAIT,
3373				&sc->pg_mbuf_map[i])) {
3374			BCE_PRINTF("%s(%d): Unable to create page mbuf DMA map!\n",
3375				__FILE__, __LINE__);
3376			rc = ENOMEM;
3377			goto bce_dma_alloc_exit;
3378		}
3379	}
3380#endif
3381
3382bce_dma_alloc_exit:
3383	DBEXIT(BCE_VERBOSE_RESET | BCE_VERBOSE_CTX);
3384	return(rc);
3385}
3386
3387
3388/****************************************************************************/
3389/* Release all resources used by the driver.                                */
3390/*                                                                          */
3391/* Releases all resources acquired by the driver including interrupts,      */
3392/* interrupt handler, interfaces, mutexes, and DMA memory.                  */
3393/*                                                                          */
3394/* Returns:                                                                 */
3395/*   Nothing.                                                               */
3396/****************************************************************************/
3397static void
3398bce_release_resources(struct bce_softc *sc)
3399{
3400	device_t dev;
3401
3402	DBENTER(BCE_VERBOSE_RESET);
3403
3404	dev = sc->bce_dev;
3405
3406	bce_dma_free(sc);
3407
3408	if (sc->bce_intrhand != NULL) {
3409		DBPRINT(sc, BCE_INFO_RESET, "Removing interrupt handler.\n");
3410		bus_teardown_intr(dev, sc->bce_res_irq, sc->bce_intrhand);
3411	}
3412
3413	if (sc->bce_res_irq != NULL) {
3414		DBPRINT(sc, BCE_INFO_RESET, "Releasing IRQ.\n");
3415		bus_release_resource(dev, SYS_RES_IRQ, sc->bce_irq_rid,
3416			sc->bce_res_irq);
3417	}
3418
3419	if (sc->bce_flags & (BCE_USING_MSI_FLAG | BCE_USING_MSIX_FLAG)) {
3420		DBPRINT(sc, BCE_INFO_RESET, "Releasing MSI/MSI-X vector.\n");
3421		pci_release_msi(dev);
3422	}
3423
3424	if (sc->bce_res_mem != NULL) {
3425		DBPRINT(sc, BCE_INFO_RESET, "Releasing PCI memory.\n");
3426		bus_release_resource(dev, SYS_RES_MEMORY, PCIR_BAR(0), sc->bce_res_mem);
3427	}
3428
3429	if (sc->bce_ifp != NULL) {
3430		DBPRINT(sc, BCE_INFO_RESET, "Releasing IF.\n");
3431		if_free(sc->bce_ifp);
3432	}
3433
3434	if (mtx_initialized(&sc->bce_mtx))
3435		BCE_LOCK_DESTROY(sc);
3436
3437	DBEXIT(BCE_VERBOSE_RESET);
3438}
3439
3440
3441/****************************************************************************/
3442/* Firmware synchronization.                                                */
3443/*                                                                          */
3444/* Before performing certain events such as a chip reset, synchronize with  */
3445/* the firmware first.                                                      */
3446/*                                                                          */
3447/* Returns:                                                                 */
3448/*   0 for success, positive value for failure.                             */
3449/****************************************************************************/
3450static int
3451bce_fw_sync(struct bce_softc *sc, u32 msg_data)
3452{
3453	int i, rc = 0;
3454	u32 val;
3455
3456	DBENTER(BCE_VERBOSE_RESET);
3457
3458	/* Don't waste any time if we've timed out before. */
3459	if (sc->bce_fw_timed_out) {
3460		rc = EBUSY;
3461		goto bce_fw_sync_exit;
3462	}
3463
3464	/* Increment the message sequence number. */
3465	sc->bce_fw_wr_seq++;
3466	msg_data |= sc->bce_fw_wr_seq;
3467
3468 	DBPRINT(sc, BCE_VERBOSE_FIRMWARE, "bce_fw_sync(): msg_data = 0x%08X\n",
3469 		msg_data);
3470
3471	/* Send the message to the bootcode driver mailbox. */
3472	REG_WR_IND(sc, sc->bce_shmem_base + BCE_DRV_MB, msg_data);
3473
3474	/* Wait for the bootcode to acknowledge the message. */
3475	for (i = 0; i < FW_ACK_TIME_OUT_MS; i++) {
3476		/* Check for a response in the bootcode firmware mailbox. */
3477		val = REG_RD_IND(sc, sc->bce_shmem_base + BCE_FW_MB);
3478		if ((val & BCE_FW_MSG_ACK) == (msg_data & BCE_DRV_MSG_SEQ))
3479			break;
3480		DELAY(1000);
3481	}
3482
3483	/* If we've timed out, tell the bootcode that we've stopped waiting. */
3484	if (((val & BCE_FW_MSG_ACK) != (msg_data & BCE_DRV_MSG_SEQ)) &&
3485		((msg_data & BCE_DRV_MSG_DATA) != BCE_DRV_MSG_DATA_WAIT0)) {
3486
3487		BCE_PRINTF("%s(%d): Firmware synchronization timeout! "
3488			"msg_data = 0x%08X\n",
3489			__FILE__, __LINE__, msg_data);
3490
3491		msg_data &= ~BCE_DRV_MSG_CODE;
3492		msg_data |= BCE_DRV_MSG_CODE_FW_TIMEOUT;
3493
3494		REG_WR_IND(sc, sc->bce_shmem_base + BCE_DRV_MB, msg_data);
3495
3496		sc->bce_fw_timed_out = 1;
3497		rc = EBUSY;
3498	}
3499
3500bce_fw_sync_exit:
3501	DBEXIT(BCE_VERBOSE_RESET);
3502	return (rc);
3503}
3504
3505
3506/****************************************************************************/
3507/* Load Receive Virtual 2 Physical (RV2P) processor firmware.               */
3508/*                                                                          */
3509/* Returns:                                                                 */
3510/*   Nothing.                                                               */
3511/****************************************************************************/
3512static void
3513bce_load_rv2p_fw(struct bce_softc *sc, u32 *rv2p_code,
3514	u32 rv2p_code_len, u32 rv2p_proc)
3515{
3516	int i;
3517	u32 val;
3518
3519	DBENTER(BCE_VERBOSE_RESET);
3520
3521	/* Set the page size used by RV2P. */
3522	if (rv2p_proc == RV2P_PROC2) {
3523		BCE_RV2P_PROC2_CHG_MAX_BD_PAGE(USABLE_RX_BD_PER_PAGE);
3524	}
3525
3526	for (i = 0; i < rv2p_code_len; i += 8) {
3527		REG_WR(sc, BCE_RV2P_INSTR_HIGH, *rv2p_code);
3528		rv2p_code++;
3529		REG_WR(sc, BCE_RV2P_INSTR_LOW, *rv2p_code);
3530		rv2p_code++;
3531
3532		if (rv2p_proc == RV2P_PROC1) {
3533			val = (i / 8) | BCE_RV2P_PROC1_ADDR_CMD_RDWR;
3534			REG_WR(sc, BCE_RV2P_PROC1_ADDR_CMD, val);
3535		}
3536		else {
3537			val = (i / 8) | BCE_RV2P_PROC2_ADDR_CMD_RDWR;
3538			REG_WR(sc, BCE_RV2P_PROC2_ADDR_CMD, val);
3539		}
3540	}
3541
3542	/* Reset the processor, un-stall is done later. */
3543	if (rv2p_proc == RV2P_PROC1) {
3544		REG_WR(sc, BCE_RV2P_COMMAND, BCE_RV2P_COMMAND_PROC1_RESET);
3545	}
3546	else {
3547		REG_WR(sc, BCE_RV2P_COMMAND, BCE_RV2P_COMMAND_PROC2_RESET);
3548	}
3549
3550	DBEXIT(BCE_VERBOSE_RESET);
3551}
3552
3553
3554/****************************************************************************/
3555/* Load RISC processor firmware.                                            */
3556/*                                                                          */
3557/* Loads firmware from the file if_bcefw.h into the scratchpad memory       */
3558/* associated with a particular processor.                                  */
3559/*                                                                          */
3560/* Returns:                                                                 */
3561/*   Nothing.                                                               */
3562/****************************************************************************/
3563static void
3564bce_load_cpu_fw(struct bce_softc *sc, struct cpu_reg *cpu_reg,
3565	struct fw_info *fw)
3566{
3567	u32 offset;
3568	u32 val;
3569
3570	DBENTER(BCE_VERBOSE_RESET);
3571
3572	/* Halt the CPU. */
3573	val = REG_RD_IND(sc, cpu_reg->mode);
3574	val |= cpu_reg->mode_value_halt;
3575	REG_WR_IND(sc, cpu_reg->mode, val);
3576	REG_WR_IND(sc, cpu_reg->state, cpu_reg->state_value_clear);
3577
3578	/* Load the Text area. */
3579	offset = cpu_reg->spad_base + (fw->text_addr - cpu_reg->mips_view_base);
3580	if (fw->text) {
3581		int j;
3582
3583		for (j = 0; j < (fw->text_len / 4); j++, offset += 4) {
3584			REG_WR_IND(sc, offset, fw->text[j]);
3585	        }
3586	}
3587
3588	/* Load the Data area. */
3589	offset = cpu_reg->spad_base + (fw->data_addr - cpu_reg->mips_view_base);
3590	if (fw->data) {
3591		int j;
3592
3593		for (j = 0; j < (fw->data_len / 4); j++, offset += 4) {
3594			REG_WR_IND(sc, offset, fw->data[j]);
3595		}
3596	}
3597
3598	/* Load the SBSS area. */
3599	offset = cpu_reg->spad_base + (fw->sbss_addr - cpu_reg->mips_view_base);
3600	if (fw->sbss) {
3601		int j;
3602
3603		for (j = 0; j < (fw->sbss_len / 4); j++, offset += 4) {
3604			REG_WR_IND(sc, offset, fw->sbss[j]);
3605		}
3606	}
3607
3608	/* Load the BSS area. */
3609	offset = cpu_reg->spad_base + (fw->bss_addr - cpu_reg->mips_view_base);
3610	if (fw->bss) {
3611		int j;
3612
3613		for (j = 0; j < (fw->bss_len/4); j++, offset += 4) {
3614			REG_WR_IND(sc, offset, fw->bss[j]);
3615		}
3616	}
3617
3618	/* Load the Read-Only area. */
3619	offset = cpu_reg->spad_base +
3620		(fw->rodata_addr - cpu_reg->mips_view_base);
3621	if (fw->rodata) {
3622		int j;
3623
3624		for (j = 0; j < (fw->rodata_len / 4); j++, offset += 4) {
3625			REG_WR_IND(sc, offset, fw->rodata[j]);
3626		}
3627	}
3628
3629	/* Clear the pre-fetch instruction. */
3630	REG_WR_IND(sc, cpu_reg->inst, 0);
3631	REG_WR_IND(sc, cpu_reg->pc, fw->start_addr);
3632
3633	/* Start the CPU. */
3634	val = REG_RD_IND(sc, cpu_reg->mode);
3635	val &= ~cpu_reg->mode_value_halt;
3636	REG_WR_IND(sc, cpu_reg->state, cpu_reg->state_value_clear);
3637	REG_WR_IND(sc, cpu_reg->mode, val);
3638
3639	DBEXIT(BCE_VERBOSE_RESET);
3640}
3641
3642
3643/****************************************************************************/
3644/* Initialize the RX CPU.                                                   */
3645/*                                                                          */
3646/* Returns:                                                                 */
3647/*   Nothing.                                                               */
3648/****************************************************************************/
3649static void
3650bce_init_rxp_cpu(struct bce_softc *sc)
3651{
3652	struct cpu_reg cpu_reg;
3653	struct fw_info fw;
3654
3655	DBENTER(BCE_VERBOSE_RESET);
3656
3657	cpu_reg.mode = BCE_RXP_CPU_MODE;
3658	cpu_reg.mode_value_halt = BCE_RXP_CPU_MODE_SOFT_HALT;
3659	cpu_reg.mode_value_sstep = BCE_RXP_CPU_MODE_STEP_ENA;
3660	cpu_reg.state = BCE_RXP_CPU_STATE;
3661	cpu_reg.state_value_clear = 0xffffff;
3662	cpu_reg.gpr0 = BCE_RXP_CPU_REG_FILE;
3663	cpu_reg.evmask = BCE_RXP_CPU_EVENT_MASK;
3664	cpu_reg.pc = BCE_RXP_CPU_PROGRAM_COUNTER;
3665	cpu_reg.inst = BCE_RXP_CPU_INSTRUCTION;
3666	cpu_reg.bp = BCE_RXP_CPU_HW_BREAKPOINT;
3667	cpu_reg.spad_base = BCE_RXP_SCRATCH;
3668	cpu_reg.mips_view_base = 0x8000000;
3669
3670	if ((BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) ||
3671		(BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716)) {
3672 		fw.ver_major = bce_RXP_b09FwReleaseMajor;
3673		fw.ver_minor = bce_RXP_b09FwReleaseMinor;
3674		fw.ver_fix = bce_RXP_b09FwReleaseFix;
3675		fw.start_addr = bce_RXP_b09FwStartAddr;
3676
3677		fw.text_addr = bce_RXP_b09FwTextAddr;
3678		fw.text_len = bce_RXP_b09FwTextLen;
3679		fw.text_index = 0;
3680		fw.text = bce_RXP_b09FwText;
3681
3682		fw.data_addr = bce_RXP_b09FwDataAddr;
3683		fw.data_len = bce_RXP_b09FwDataLen;
3684		fw.data_index = 0;
3685		fw.data = bce_RXP_b09FwData;
3686
3687		fw.sbss_addr = bce_RXP_b09FwSbssAddr;
3688		fw.sbss_len = bce_RXP_b09FwSbssLen;
3689		fw.sbss_index = 0;
3690		fw.sbss = bce_RXP_b09FwSbss;
3691
3692		fw.bss_addr = bce_RXP_b09FwBssAddr;
3693		fw.bss_len = bce_RXP_b09FwBssLen;
3694		fw.bss_index = 0;
3695		fw.bss = bce_RXP_b09FwBss;
3696
3697		fw.rodata_addr = bce_RXP_b09FwRodataAddr;
3698		fw.rodata_len = bce_RXP_b09FwRodataLen;
3699		fw.rodata_index = 0;
3700		fw.rodata = bce_RXP_b09FwRodata;
3701	} else {
3702		fw.ver_major = bce_RXP_b06FwReleaseMajor;
3703		fw.ver_minor = bce_RXP_b06FwReleaseMinor;
3704		fw.ver_fix = bce_RXP_b06FwReleaseFix;
3705		fw.start_addr = bce_RXP_b06FwStartAddr;
3706
3707		fw.text_addr = bce_RXP_b06FwTextAddr;
3708		fw.text_len = bce_RXP_b06FwTextLen;
3709		fw.text_index = 0;
3710		fw.text = bce_RXP_b06FwText;
3711
3712		fw.data_addr = bce_RXP_b06FwDataAddr;
3713		fw.data_len = bce_RXP_b06FwDataLen;
3714		fw.data_index = 0;
3715		fw.data = bce_RXP_b06FwData;
3716
3717		fw.sbss_addr = bce_RXP_b06FwSbssAddr;
3718		fw.sbss_len = bce_RXP_b06FwSbssLen;
3719		fw.sbss_index = 0;
3720		fw.sbss = bce_RXP_b06FwSbss;
3721
3722		fw.bss_addr = bce_RXP_b06FwBssAddr;
3723		fw.bss_len = bce_RXP_b06FwBssLen;
3724		fw.bss_index = 0;
3725		fw.bss = bce_RXP_b06FwBss;
3726
3727		fw.rodata_addr = bce_RXP_b06FwRodataAddr;
3728		fw.rodata_len = bce_RXP_b06FwRodataLen;
3729		fw.rodata_index = 0;
3730		fw.rodata = bce_RXP_b06FwRodata;
3731	}
3732
3733	DBPRINT(sc, BCE_INFO_RESET, "Loading RX firmware.\n");
3734	bce_load_cpu_fw(sc, &cpu_reg, &fw);
3735
3736	DBEXIT(BCE_VERBOSE_RESET);
3737}
3738
3739
3740/****************************************************************************/
3741/* Initialize the TX CPU.                                                   */
3742/*                                                                          */
3743/* Returns:                                                                 */
3744/*   Nothing.                                                               */
3745/****************************************************************************/
3746static void
3747bce_init_txp_cpu(struct bce_softc *sc)
3748{
3749	struct cpu_reg cpu_reg;
3750	struct fw_info fw;
3751
3752	DBENTER(BCE_VERBOSE_RESET);
3753
3754	cpu_reg.mode = BCE_TXP_CPU_MODE;
3755	cpu_reg.mode_value_halt = BCE_TXP_CPU_MODE_SOFT_HALT;
3756	cpu_reg.mode_value_sstep = BCE_TXP_CPU_MODE_STEP_ENA;
3757	cpu_reg.state = BCE_TXP_CPU_STATE;
3758	cpu_reg.state_value_clear = 0xffffff;
3759	cpu_reg.gpr0 = BCE_TXP_CPU_REG_FILE;
3760	cpu_reg.evmask = BCE_TXP_CPU_EVENT_MASK;
3761	cpu_reg.pc = BCE_TXP_CPU_PROGRAM_COUNTER;
3762	cpu_reg.inst = BCE_TXP_CPU_INSTRUCTION;
3763	cpu_reg.bp = BCE_TXP_CPU_HW_BREAKPOINT;
3764	cpu_reg.spad_base = BCE_TXP_SCRATCH;
3765	cpu_reg.mips_view_base = 0x8000000;
3766
3767	if ((BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) ||
3768		(BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716)) {
3769		fw.ver_major = bce_TXP_b09FwReleaseMajor;
3770		fw.ver_minor = bce_TXP_b09FwReleaseMinor;
3771		fw.ver_fix = bce_TXP_b09FwReleaseFix;
3772		fw.start_addr = bce_TXP_b09FwStartAddr;
3773
3774		fw.text_addr = bce_TXP_b09FwTextAddr;
3775		fw.text_len = bce_TXP_b09FwTextLen;
3776		fw.text_index = 0;
3777		fw.text = bce_TXP_b09FwText;
3778
3779		fw.data_addr = bce_TXP_b09FwDataAddr;
3780		fw.data_len = bce_TXP_b09FwDataLen;
3781		fw.data_index = 0;
3782		fw.data = bce_TXP_b09FwData;
3783
3784		fw.sbss_addr = bce_TXP_b09FwSbssAddr;
3785		fw.sbss_len = bce_TXP_b09FwSbssLen;
3786		fw.sbss_index = 0;
3787		fw.sbss = bce_TXP_b09FwSbss;
3788
3789		fw.bss_addr = bce_TXP_b09FwBssAddr;
3790		fw.bss_len = bce_TXP_b09FwBssLen;
3791		fw.bss_index = 0;
3792		fw.bss = bce_TXP_b09FwBss;
3793
3794		fw.rodata_addr = bce_TXP_b09FwRodataAddr;
3795		fw.rodata_len = bce_TXP_b09FwRodataLen;
3796		fw.rodata_index = 0;
3797		fw.rodata = bce_TXP_b09FwRodata;
3798	} else {
3799		fw.ver_major = bce_TXP_b06FwReleaseMajor;
3800		fw.ver_minor = bce_TXP_b06FwReleaseMinor;
3801		fw.ver_fix = bce_TXP_b06FwReleaseFix;
3802		fw.start_addr = bce_TXP_b06FwStartAddr;
3803
3804		fw.text_addr = bce_TXP_b06FwTextAddr;
3805		fw.text_len = bce_TXP_b06FwTextLen;
3806		fw.text_index = 0;
3807		fw.text = bce_TXP_b06FwText;
3808
3809		fw.data_addr = bce_TXP_b06FwDataAddr;
3810		fw.data_len = bce_TXP_b06FwDataLen;
3811		fw.data_index = 0;
3812		fw.data = bce_TXP_b06FwData;
3813
3814		fw.sbss_addr = bce_TXP_b06FwSbssAddr;
3815		fw.sbss_len = bce_TXP_b06FwSbssLen;
3816		fw.sbss_index = 0;
3817		fw.sbss = bce_TXP_b06FwSbss;
3818
3819		fw.bss_addr = bce_TXP_b06FwBssAddr;
3820		fw.bss_len = bce_TXP_b06FwBssLen;
3821		fw.bss_index = 0;
3822		fw.bss = bce_TXP_b06FwBss;
3823
3824		fw.rodata_addr = bce_TXP_b06FwRodataAddr;
3825		fw.rodata_len = bce_TXP_b06FwRodataLen;
3826		fw.rodata_index = 0;
3827		fw.rodata = bce_TXP_b06FwRodata;
3828	}
3829
3830	DBPRINT(sc, BCE_INFO_RESET, "Loading TX firmware.\n");
3831	bce_load_cpu_fw(sc, &cpu_reg, &fw);
3832
3833	DBEXIT(BCE_VERBOSE_RESET);
3834}
3835
3836
3837/****************************************************************************/
3838/* Initialize the TPAT CPU.                                                 */
3839/*                                                                          */
3840/* Returns:                                                                 */
3841/*   Nothing.                                                               */
3842/****************************************************************************/
3843static void
3844bce_init_tpat_cpu(struct bce_softc *sc)
3845{
3846	struct cpu_reg cpu_reg;
3847	struct fw_info fw;
3848
3849	DBENTER(BCE_VERBOSE_RESET);
3850
3851	cpu_reg.mode = BCE_TPAT_CPU_MODE;
3852	cpu_reg.mode_value_halt = BCE_TPAT_CPU_MODE_SOFT_HALT;
3853	cpu_reg.mode_value_sstep = BCE_TPAT_CPU_MODE_STEP_ENA;
3854	cpu_reg.state = BCE_TPAT_CPU_STATE;
3855	cpu_reg.state_value_clear = 0xffffff;
3856	cpu_reg.gpr0 = BCE_TPAT_CPU_REG_FILE;
3857	cpu_reg.evmask = BCE_TPAT_CPU_EVENT_MASK;
3858	cpu_reg.pc = BCE_TPAT_CPU_PROGRAM_COUNTER;
3859	cpu_reg.inst = BCE_TPAT_CPU_INSTRUCTION;
3860	cpu_reg.bp = BCE_TPAT_CPU_HW_BREAKPOINT;
3861	cpu_reg.spad_base = BCE_TPAT_SCRATCH;
3862	cpu_reg.mips_view_base = 0x8000000;
3863
3864	if ((BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) ||
3865		(BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716)) {
3866		fw.ver_major = bce_TPAT_b09FwReleaseMajor;
3867		fw.ver_minor = bce_TPAT_b09FwReleaseMinor;
3868		fw.ver_fix = bce_TPAT_b09FwReleaseFix;
3869		fw.start_addr = bce_TPAT_b09FwStartAddr;
3870
3871		fw.text_addr = bce_TPAT_b09FwTextAddr;
3872		fw.text_len = bce_TPAT_b09FwTextLen;
3873		fw.text_index = 0;
3874		fw.text = bce_TPAT_b09FwText;
3875
3876		fw.data_addr = bce_TPAT_b09FwDataAddr;
3877		fw.data_len = bce_TPAT_b09FwDataLen;
3878		fw.data_index = 0;
3879		fw.data = bce_TPAT_b09FwData;
3880
3881		fw.sbss_addr = bce_TPAT_b09FwSbssAddr;
3882		fw.sbss_len = bce_TPAT_b09FwSbssLen;
3883		fw.sbss_index = 0;
3884		fw.sbss = bce_TPAT_b09FwSbss;
3885
3886		fw.bss_addr = bce_TPAT_b09FwBssAddr;
3887		fw.bss_len = bce_TPAT_b09FwBssLen;
3888		fw.bss_index = 0;
3889		fw.bss = bce_TPAT_b09FwBss;
3890
3891		fw.rodata_addr = bce_TPAT_b09FwRodataAddr;
3892		fw.rodata_len = bce_TPAT_b09FwRodataLen;
3893		fw.rodata_index = 0;
3894		fw.rodata = bce_TPAT_b09FwRodata;
3895	} else {
3896		fw.ver_major = bce_TPAT_b06FwReleaseMajor;
3897		fw.ver_minor = bce_TPAT_b06FwReleaseMinor;
3898		fw.ver_fix = bce_TPAT_b06FwReleaseFix;
3899		fw.start_addr = bce_TPAT_b06FwStartAddr;
3900
3901		fw.text_addr = bce_TPAT_b06FwTextAddr;
3902		fw.text_len = bce_TPAT_b06FwTextLen;
3903		fw.text_index = 0;
3904		fw.text = bce_TPAT_b06FwText;
3905
3906		fw.data_addr = bce_TPAT_b06FwDataAddr;
3907		fw.data_len = bce_TPAT_b06FwDataLen;
3908		fw.data_index = 0;
3909		fw.data = bce_TPAT_b06FwData;
3910
3911		fw.sbss_addr = bce_TPAT_b06FwSbssAddr;
3912		fw.sbss_len = bce_TPAT_b06FwSbssLen;
3913		fw.sbss_index = 0;
3914		fw.sbss = bce_TPAT_b06FwSbss;
3915
3916		fw.bss_addr = bce_TPAT_b06FwBssAddr;
3917		fw.bss_len = bce_TPAT_b06FwBssLen;
3918		fw.bss_index = 0;
3919		fw.bss = bce_TPAT_b06FwBss;
3920
3921		fw.rodata_addr = bce_TPAT_b06FwRodataAddr;
3922		fw.rodata_len = bce_TPAT_b06FwRodataLen;
3923		fw.rodata_index = 0;
3924		fw.rodata = bce_TPAT_b06FwRodata;
3925	}
3926
3927	DBPRINT(sc, BCE_INFO_RESET, "Loading TPAT firmware.\n");
3928	bce_load_cpu_fw(sc, &cpu_reg, &fw);
3929
3930	DBEXIT(BCE_VERBOSE_RESET);
3931}
3932
3933
3934/****************************************************************************/
3935/* Initialize the CP CPU.                                                   */
3936/*                                                                          */
3937/* Returns:                                                                 */
3938/*   Nothing.                                                               */
3939/****************************************************************************/
3940static void
3941bce_init_cp_cpu(struct bce_softc *sc)
3942{
3943	struct cpu_reg cpu_reg;
3944	struct fw_info fw;
3945
3946	DBENTER(BCE_VERBOSE_RESET);
3947
3948	cpu_reg.mode = BCE_CP_CPU_MODE;
3949	cpu_reg.mode_value_halt = BCE_CP_CPU_MODE_SOFT_HALT;
3950	cpu_reg.mode_value_sstep = BCE_CP_CPU_MODE_STEP_ENA;
3951	cpu_reg.state = BCE_CP_CPU_STATE;
3952	cpu_reg.state_value_clear = 0xffffff;
3953	cpu_reg.gpr0 = BCE_CP_CPU_REG_FILE;
3954	cpu_reg.evmask = BCE_CP_CPU_EVENT_MASK;
3955	cpu_reg.pc = BCE_CP_CPU_PROGRAM_COUNTER;
3956	cpu_reg.inst = BCE_CP_CPU_INSTRUCTION;
3957	cpu_reg.bp = BCE_CP_CPU_HW_BREAKPOINT;
3958	cpu_reg.spad_base = BCE_CP_SCRATCH;
3959	cpu_reg.mips_view_base = 0x8000000;
3960
3961	if ((BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) ||
3962		(BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716)) {
3963		fw.ver_major = bce_CP_b09FwReleaseMajor;
3964		fw.ver_minor = bce_CP_b09FwReleaseMinor;
3965		fw.ver_fix = bce_CP_b09FwReleaseFix;
3966		fw.start_addr = bce_CP_b09FwStartAddr;
3967
3968		fw.text_addr = bce_CP_b09FwTextAddr;
3969		fw.text_len = bce_CP_b09FwTextLen;
3970		fw.text_index = 0;
3971		fw.text = bce_CP_b09FwText;
3972
3973		fw.data_addr = bce_CP_b09FwDataAddr;
3974		fw.data_len = bce_CP_b09FwDataLen;
3975		fw.data_index = 0;
3976		fw.data = bce_CP_b09FwData;
3977
3978		fw.sbss_addr = bce_CP_b09FwSbssAddr;
3979		fw.sbss_len = bce_CP_b09FwSbssLen;
3980		fw.sbss_index = 0;
3981		fw.sbss = bce_CP_b09FwSbss;
3982
3983		fw.bss_addr = bce_CP_b09FwBssAddr;
3984		fw.bss_len = bce_CP_b09FwBssLen;
3985		fw.bss_index = 0;
3986		fw.bss = bce_CP_b09FwBss;
3987
3988		fw.rodata_addr = bce_CP_b09FwRodataAddr;
3989		fw.rodata_len = bce_CP_b09FwRodataLen;
3990		fw.rodata_index = 0;
3991		fw.rodata = bce_CP_b09FwRodata;
3992	} else {
3993		fw.ver_major = bce_CP_b06FwReleaseMajor;
3994		fw.ver_minor = bce_CP_b06FwReleaseMinor;
3995		fw.ver_fix = bce_CP_b06FwReleaseFix;
3996		fw.start_addr = bce_CP_b06FwStartAddr;
3997
3998		fw.text_addr = bce_CP_b06FwTextAddr;
3999		fw.text_len = bce_CP_b06FwTextLen;
4000		fw.text_index = 0;
4001		fw.text = bce_CP_b06FwText;
4002
4003		fw.data_addr = bce_CP_b06FwDataAddr;
4004		fw.data_len = bce_CP_b06FwDataLen;
4005		fw.data_index = 0;
4006		fw.data = bce_CP_b06FwData;
4007
4008		fw.sbss_addr = bce_CP_b06FwSbssAddr;
4009		fw.sbss_len = bce_CP_b06FwSbssLen;
4010		fw.sbss_index = 0;
4011		fw.sbss = bce_CP_b06FwSbss;
4012
4013		fw.bss_addr = bce_CP_b06FwBssAddr;
4014		fw.bss_len = bce_CP_b06FwBssLen;
4015		fw.bss_index = 0;
4016		fw.bss = bce_CP_b06FwBss;
4017
4018		fw.rodata_addr = bce_CP_b06FwRodataAddr;
4019		fw.rodata_len = bce_CP_b06FwRodataLen;
4020		fw.rodata_index = 0;
4021		fw.rodata = bce_CP_b06FwRodata;
4022	}
4023
4024	DBPRINT(sc, BCE_INFO_RESET, "Loading CP firmware.\n");
4025	bce_load_cpu_fw(sc, &cpu_reg, &fw);
4026
4027	DBEXIT(BCE_VERBOSE_RESET);
4028}
4029
4030
4031/****************************************************************************/
4032/* Initialize the COM CPU.                                                 */
4033/*                                                                          */
4034/* Returns:                                                                 */
4035/*   Nothing.                                                               */
4036/****************************************************************************/
4037static void
4038bce_init_com_cpu(struct bce_softc *sc)
4039{
4040	struct cpu_reg cpu_reg;
4041	struct fw_info fw;
4042
4043	DBENTER(BCE_VERBOSE_RESET);
4044
4045	cpu_reg.mode = BCE_COM_CPU_MODE;
4046	cpu_reg.mode_value_halt = BCE_COM_CPU_MODE_SOFT_HALT;
4047	cpu_reg.mode_value_sstep = BCE_COM_CPU_MODE_STEP_ENA;
4048	cpu_reg.state = BCE_COM_CPU_STATE;
4049	cpu_reg.state_value_clear = 0xffffff;
4050	cpu_reg.gpr0 = BCE_COM_CPU_REG_FILE;
4051	cpu_reg.evmask = BCE_COM_CPU_EVENT_MASK;
4052	cpu_reg.pc = BCE_COM_CPU_PROGRAM_COUNTER;
4053	cpu_reg.inst = BCE_COM_CPU_INSTRUCTION;
4054	cpu_reg.bp = BCE_COM_CPU_HW_BREAKPOINT;
4055	cpu_reg.spad_base = BCE_COM_SCRATCH;
4056	cpu_reg.mips_view_base = 0x8000000;
4057
4058	if ((BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) ||
4059		(BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716)) {
4060		fw.ver_major = bce_COM_b09FwReleaseMajor;
4061		fw.ver_minor = bce_COM_b09FwReleaseMinor;
4062		fw.ver_fix = bce_COM_b09FwReleaseFix;
4063		fw.start_addr = bce_COM_b09FwStartAddr;
4064
4065		fw.text_addr = bce_COM_b09FwTextAddr;
4066		fw.text_len = bce_COM_b09FwTextLen;
4067		fw.text_index = 0;
4068		fw.text = bce_COM_b09FwText;
4069
4070		fw.data_addr = bce_COM_b09FwDataAddr;
4071		fw.data_len = bce_COM_b09FwDataLen;
4072		fw.data_index = 0;
4073		fw.data = bce_COM_b09FwData;
4074
4075		fw.sbss_addr = bce_COM_b09FwSbssAddr;
4076		fw.sbss_len = bce_COM_b09FwSbssLen;
4077		fw.sbss_index = 0;
4078		fw.sbss = bce_COM_b09FwSbss;
4079
4080		fw.bss_addr = bce_COM_b09FwBssAddr;
4081		fw.bss_len = bce_COM_b09FwBssLen;
4082		fw.bss_index = 0;
4083		fw.bss = bce_COM_b09FwBss;
4084
4085		fw.rodata_addr = bce_COM_b09FwRodataAddr;
4086		fw.rodata_len = bce_COM_b09FwRodataLen;
4087		fw.rodata_index = 0;
4088		fw.rodata = bce_COM_b09FwRodata;
4089	} else {
4090		fw.ver_major = bce_COM_b06FwReleaseMajor;
4091		fw.ver_minor = bce_COM_b06FwReleaseMinor;
4092		fw.ver_fix = bce_COM_b06FwReleaseFix;
4093		fw.start_addr = bce_COM_b06FwStartAddr;
4094
4095		fw.text_addr = bce_COM_b06FwTextAddr;
4096		fw.text_len = bce_COM_b06FwTextLen;
4097		fw.text_index = 0;
4098		fw.text = bce_COM_b06FwText;
4099
4100		fw.data_addr = bce_COM_b06FwDataAddr;
4101		fw.data_len = bce_COM_b06FwDataLen;
4102		fw.data_index = 0;
4103		fw.data = bce_COM_b06FwData;
4104
4105		fw.sbss_addr = bce_COM_b06FwSbssAddr;
4106		fw.sbss_len = bce_COM_b06FwSbssLen;
4107		fw.sbss_index = 0;
4108		fw.sbss = bce_COM_b06FwSbss;
4109
4110		fw.bss_addr = bce_COM_b06FwBssAddr;
4111		fw.bss_len = bce_COM_b06FwBssLen;
4112		fw.bss_index = 0;
4113		fw.bss = bce_COM_b06FwBss;
4114
4115		fw.rodata_addr = bce_COM_b06FwRodataAddr;
4116		fw.rodata_len = bce_COM_b06FwRodataLen;
4117		fw.rodata_index = 0;
4118		fw.rodata = bce_COM_b06FwRodata;
4119	}
4120
4121	DBPRINT(sc, BCE_INFO_RESET, "Loading COM firmware.\n");
4122	bce_load_cpu_fw(sc, &cpu_reg, &fw);
4123
4124	DBEXIT(BCE_VERBOSE_RESET);
4125}
4126
4127
4128/****************************************************************************/
4129/* Initialize the RV2P, RX, TX, TPAT, COM, and CP CPUs.                     */
4130/*                                                                          */
4131/* Loads the firmware for each CPU and starts the CPU.                      */
4132/*                                                                          */
4133/* Returns:                                                                 */
4134/*   Nothing.                                                               */
4135/****************************************************************************/
4136static void
4137bce_init_cpus(struct bce_softc *sc)
4138{
4139	DBENTER(BCE_VERBOSE_RESET);
4140
4141	if ((BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) ||
4142		(BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716)) {
4143		bce_load_rv2p_fw(sc, bce_xi_rv2p_proc1, sizeof(bce_xi_rv2p_proc1),
4144			RV2P_PROC1);
4145		bce_load_rv2p_fw(sc, bce_xi_rv2p_proc2, sizeof(bce_xi_rv2p_proc2),
4146			RV2P_PROC2);
4147	} else {
4148		bce_load_rv2p_fw(sc, bce_rv2p_proc1, sizeof(bce_rv2p_proc1),
4149			RV2P_PROC1);
4150		bce_load_rv2p_fw(sc, bce_rv2p_proc2, sizeof(bce_rv2p_proc2),
4151			RV2P_PROC2);
4152	}
4153
4154	bce_init_rxp_cpu(sc);
4155	bce_init_txp_cpu(sc);
4156	bce_init_tpat_cpu(sc);
4157	bce_init_com_cpu(sc);
4158	bce_init_cp_cpu(sc);
4159
4160	DBEXIT(BCE_VERBOSE_RESET);
4161}
4162
4163
4164/****************************************************************************/
4165/* Initialize context memory.                                               */
4166/*                                                                          */
4167/* Clears the memory associated with each Context ID (CID).                 */
4168/*                                                                          */
4169/* Returns:                                                                 */
4170/*   Nothing.                                                               */
4171/****************************************************************************/
4172static void
4173bce_init_ctx(struct bce_softc *sc)
4174{
4175
4176	DBENTER(BCE_VERBOSE_RESET | BCE_VERBOSE_CTX);
4177
4178	if ((BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) ||
4179		(BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716)) {
4180		/* DRC: Replace this constant value with a #define. */
4181		int i, retry_cnt = 10;
4182		u32 val;
4183
4184		DBPRINT(sc, BCE_INFO_CTX, "Initializing 5709 context.\n");
4185
4186		/*
4187		 * BCM5709 context memory may be cached
4188		 * in host memory so prepare the host memory
4189		 * for access.
4190		 */
4191		val = BCE_CTX_COMMAND_ENABLED | BCE_CTX_COMMAND_MEM_INIT | (1 << 12);
4192		val |= (BCM_PAGE_BITS - 8) << 16;
4193		REG_WR(sc, BCE_CTX_COMMAND, val);
4194
4195		/* Wait for mem init command to complete. */
4196		for (i = 0; i < retry_cnt; i++) {
4197			val = REG_RD(sc, BCE_CTX_COMMAND);
4198			if (!(val & BCE_CTX_COMMAND_MEM_INIT))
4199				break;
4200			DELAY(2);
4201		}
4202
4203		/* ToDo: Consider returning an error here. */
4204		DBRUNIF((val & BCE_CTX_COMMAND_MEM_INIT),
4205			BCE_PRINTF("%s(): Context memory initialization failed!\n",
4206			__FUNCTION__));
4207
4208		for (i = 0; i < sc->ctx_pages; i++) {
4209			int j;
4210
4211			/* Set the physical address of the context memory cache. */
4212			REG_WR(sc, BCE_CTX_HOST_PAGE_TBL_DATA0,
4213				BCE_ADDR_LO(sc->ctx_paddr[i] & 0xfffffff0) |
4214				BCE_CTX_HOST_PAGE_TBL_DATA0_VALID);
4215			REG_WR(sc, BCE_CTX_HOST_PAGE_TBL_DATA1,
4216				BCE_ADDR_HI(sc->ctx_paddr[i]));
4217			REG_WR(sc, BCE_CTX_HOST_PAGE_TBL_CTRL, i |
4218				BCE_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
4219
4220			/* Verify that the context memory write was successful. */
4221			for (j = 0; j < retry_cnt; j++) {
4222				val = REG_RD(sc, BCE_CTX_HOST_PAGE_TBL_CTRL);
4223				if ((val & BCE_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) == 0)
4224					break;
4225				DELAY(5);
4226			}
4227
4228			/* ToDo: Consider returning an error here. */
4229			DBRUNIF((val & BCE_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ),
4230				BCE_PRINTF("%s(): Failed to initialize context page %d!\n",
4231				__FUNCTION__, i));
4232		}
4233	} else {
4234		u32 vcid_addr, offset;
4235
4236		DBPRINT(sc, BCE_INFO, "Initializing 5706/5708 context.\n");
4237
4238		/*
4239		 * For the 5706/5708, context memory is local to
4240		 * the controller, so initialize the controller
4241		 * context memory.
4242		 */
4243
4244		vcid_addr = GET_CID_ADDR(96);
4245		while (vcid_addr) {
4246
4247			vcid_addr -= PHY_CTX_SIZE;
4248
4249			REG_WR(sc, BCE_CTX_VIRT_ADDR, 0);
4250			REG_WR(sc, BCE_CTX_PAGE_TBL, vcid_addr);
4251
4252            for(offset = 0; offset < PHY_CTX_SIZE; offset += 4) {
4253                CTX_WR(sc, 0x00, offset, 0);
4254            }
4255
4256			REG_WR(sc, BCE_CTX_VIRT_ADDR, vcid_addr);
4257			REG_WR(sc, BCE_CTX_PAGE_TBL, vcid_addr);
4258		}
4259
4260	}
4261	DBEXIT(BCE_VERBOSE_RESET | BCE_VERBOSE_CTX);
4262}
4263
4264
4265/****************************************************************************/
4266/* Fetch the permanent MAC address of the controller.                       */
4267/*                                                                          */
4268/* Returns:                                                                 */
4269/*   Nothing.                                                               */
4270/****************************************************************************/
4271static void
4272bce_get_mac_addr(struct bce_softc *sc)
4273{
4274	u32 mac_lo = 0, mac_hi = 0;
4275
4276	DBENTER(BCE_VERBOSE_RESET);
4277	/*
4278	 * The NetXtreme II bootcode populates various NIC
4279	 * power-on and runtime configuration items in a
4280	 * shared memory area.  The factory configured MAC
4281	 * address is available from both NVRAM and the
4282	 * shared memory area so we'll read the value from
4283	 * shared memory for speed.
4284	 */
4285
4286	mac_hi = REG_RD_IND(sc, sc->bce_shmem_base +
4287		BCE_PORT_HW_CFG_MAC_UPPER);
4288	mac_lo = REG_RD_IND(sc, sc->bce_shmem_base +
4289		BCE_PORT_HW_CFG_MAC_LOWER);
4290
4291	if ((mac_lo == 0) && (mac_hi == 0)) {
4292		BCE_PRINTF("%s(%d): Invalid Ethernet address!\n",
4293			__FILE__, __LINE__);
4294	} else {
4295		sc->eaddr[0] = (u_char)(mac_hi >> 8);
4296		sc->eaddr[1] = (u_char)(mac_hi >> 0);
4297		sc->eaddr[2] = (u_char)(mac_lo >> 24);
4298		sc->eaddr[3] = (u_char)(mac_lo >> 16);
4299		sc->eaddr[4] = (u_char)(mac_lo >> 8);
4300		sc->eaddr[5] = (u_char)(mac_lo >> 0);
4301	}
4302
4303	DBPRINT(sc, BCE_INFO_MISC, "Permanent Ethernet address = %6D\n", sc->eaddr, ":");
4304	DBEXIT(BCE_VERBOSE_RESET);
4305}
4306
4307
4308/****************************************************************************/
4309/* Program the MAC address.                                                 */
4310/*                                                                          */
4311/* Returns:                                                                 */
4312/*   Nothing.                                                               */
4313/****************************************************************************/
4314static void
4315bce_set_mac_addr(struct bce_softc *sc)
4316{
4317	u32 val;
4318	u8 *mac_addr = sc->eaddr;
4319
4320	/* ToDo: Add support for setting multiple MAC addresses. */
4321
4322	DBENTER(BCE_VERBOSE_RESET);
4323	DBPRINT(sc, BCE_INFO_MISC, "Setting Ethernet address = %6D\n", sc->eaddr, ":");
4324
4325	val = (mac_addr[0] << 8) | mac_addr[1];
4326
4327	REG_WR(sc, BCE_EMAC_MAC_MATCH0, val);
4328
4329	val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
4330		(mac_addr[4] << 8) | mac_addr[5];
4331
4332	REG_WR(sc, BCE_EMAC_MAC_MATCH1, val);
4333
4334	DBEXIT(BCE_VERBOSE_RESET);
4335}
4336
4337
4338/****************************************************************************/
4339/* Stop the controller.                                                     */
4340/*                                                                          */
4341/* Returns:                                                                 */
4342/*   Nothing.                                                               */
4343/****************************************************************************/
4344static void
4345bce_stop(struct bce_softc *sc)
4346{
4347	struct ifnet *ifp;
4348	struct ifmedia_entry *ifm;
4349	struct mii_data *mii = NULL;
4350	int mtmp, itmp;
4351
4352	DBENTER(BCE_VERBOSE_RESET);
4353
4354	BCE_LOCK_ASSERT(sc);
4355
4356	ifp = sc->bce_ifp;
4357
4358	mii = device_get_softc(sc->bce_miibus);
4359
4360	callout_stop(&sc->bce_tick_callout);
4361
4362	/* Disable the transmit/receive blocks. */
4363	REG_WR(sc, BCE_MISC_ENABLE_CLR_BITS, BCE_MISC_ENABLE_CLR_DEFAULT);
4364	REG_RD(sc, BCE_MISC_ENABLE_CLR_BITS);
4365	DELAY(20);
4366
4367	bce_disable_intr(sc);
4368
4369	/* Free RX buffers. */
4370#ifdef BCE_USE_SPLIT_HEADER
4371	bce_free_pg_chain(sc);
4372#endif
4373	bce_free_rx_chain(sc);
4374
4375	/* Free TX buffers. */
4376	bce_free_tx_chain(sc);
4377
4378	/*
4379	 * Isolate/power down the PHY, but leave the media selection
4380	 * unchanged so that things will be put back to normal when
4381	 * we bring the interface back up.
4382	 */
4383
4384	itmp = ifp->if_flags;
4385	ifp->if_flags |= IFF_UP;
4386
4387	/* If we are called from bce_detach(), mii is already NULL. */
4388	if (mii != NULL) {
4389		ifm = mii->mii_media.ifm_cur;
4390		mtmp = ifm->ifm_media;
4391		ifm->ifm_media = IFM_ETHER | IFM_NONE;
4392		mii_mediachg(mii);
4393		ifm->ifm_media = mtmp;
4394	}
4395
4396	ifp->if_flags = itmp;
4397	sc->watchdog_timer = 0;
4398
4399	sc->bce_link = 0;
4400
4401	ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
4402
4403	DBEXIT(BCE_VERBOSE_RESET);
4404}
4405
4406
4407static int
4408bce_reset(struct bce_softc *sc, u32 reset_code)
4409{
4410	u32 val;
4411	int i, rc = 0;
4412
4413	DBENTER(BCE_VERBOSE_RESET);
4414
4415	DBPRINT(sc, BCE_VERBOSE_RESET, "%s(): reset_code = 0x%08X\n",
4416		__FUNCTION__, reset_code);
4417
4418	/* Wait for pending PCI transactions to complete. */
4419	REG_WR(sc, BCE_MISC_ENABLE_CLR_BITS,
4420	       BCE_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
4421	       BCE_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
4422	       BCE_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
4423	       BCE_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
4424	val = REG_RD(sc, BCE_MISC_ENABLE_CLR_BITS);
4425	DELAY(5);
4426
4427	/* Disable DMA */
4428	if ((BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) ||
4429		(BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716)) {
4430		val = REG_RD(sc, BCE_MISC_NEW_CORE_CTL);
4431		val &= ~BCE_MISC_NEW_CORE_CTL_DMA_ENABLE;
4432		REG_WR(sc, BCE_MISC_NEW_CORE_CTL, val);
4433	}
4434
4435	/* Assume bootcode is running. */
4436	sc->bce_fw_timed_out = 0;
4437
4438	/* Give the firmware a chance to prepare for the reset. */
4439	rc = bce_fw_sync(sc, BCE_DRV_MSG_DATA_WAIT0 | reset_code);
4440	if (rc)
4441		goto bce_reset_exit;
4442
4443	/* Set a firmware reminder that this is a soft reset. */
4444	REG_WR_IND(sc, sc->bce_shmem_base + BCE_DRV_RESET_SIGNATURE,
4445		   BCE_DRV_RESET_SIGNATURE_MAGIC);
4446
4447	/* Dummy read to force the chip to complete all current transactions. */
4448	val = REG_RD(sc, BCE_MISC_ID);
4449
4450	/* Chip reset. */
4451	if ((BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) ||
4452		(BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716)) {
4453		REG_WR(sc, BCE_MISC_COMMAND, BCE_MISC_COMMAND_SW_RESET);
4454		REG_RD(sc, BCE_MISC_COMMAND);
4455		DELAY(5);
4456
4457		val = BCE_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
4458		      BCE_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
4459
4460		pci_write_config(sc->bce_dev, BCE_PCICFG_MISC_CONFIG, val, 4);
4461	} else {
4462		val = BCE_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4463			BCE_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
4464			BCE_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
4465		REG_WR(sc, BCE_PCICFG_MISC_CONFIG, val);
4466
4467		/* Allow up to 30us for reset to complete. */
4468		for (i = 0; i < 10; i++) {
4469			val = REG_RD(sc, BCE_PCICFG_MISC_CONFIG);
4470			if ((val & (BCE_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4471				BCE_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0) {
4472				break;
4473			}
4474			DELAY(10);
4475		}
4476
4477		/* Check that reset completed successfully. */
4478		if (val & (BCE_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4479			BCE_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
4480			BCE_PRINTF("%s(%d): Reset failed!\n",
4481				__FILE__, __LINE__);
4482			rc = EBUSY;
4483			goto bce_reset_exit;
4484		}
4485	}
4486
4487	/* Make sure byte swapping is properly configured. */
4488	val = REG_RD(sc, BCE_PCI_SWAP_DIAG0);
4489	if (val != 0x01020304) {
4490		BCE_PRINTF("%s(%d): Byte swap is incorrect!\n",
4491			__FILE__, __LINE__);
4492		rc = ENODEV;
4493		goto bce_reset_exit;
4494	}
4495
4496	/* Just completed a reset, assume that firmware is running again. */
4497	sc->bce_fw_timed_out = 0;
4498
4499	/* Wait for the firmware to finish its initialization. */
4500	rc = bce_fw_sync(sc, BCE_DRV_MSG_DATA_WAIT1 | reset_code);
4501	if (rc)
4502		BCE_PRINTF("%s(%d): Firmware did not complete initialization!\n",
4503			__FILE__, __LINE__);
4504
4505bce_reset_exit:
4506	DBEXIT(BCE_VERBOSE_RESET);
4507	return (rc);
4508}
4509
4510
4511static int
4512bce_chipinit(struct bce_softc *sc)
4513{
4514	u32 val;
4515	int rc = 0;
4516
4517	DBENTER(BCE_VERBOSE_RESET);
4518
4519	bce_disable_intr(sc);
4520
4521	/*
4522	 * Initialize DMA byte/word swapping, configure the number of DMA
4523	 * channels and PCI clock compensation delay.
4524	 */
4525	val = BCE_DMA_CONFIG_DATA_BYTE_SWAP |
4526	      BCE_DMA_CONFIG_DATA_WORD_SWAP |
4527#if BYTE_ORDER == BIG_ENDIAN
4528	      BCE_DMA_CONFIG_CNTL_BYTE_SWAP |
4529#endif
4530	      BCE_DMA_CONFIG_CNTL_WORD_SWAP |
4531	      DMA_READ_CHANS << 12 |
4532	      DMA_WRITE_CHANS << 16;
4533
4534	val |= (0x2 << 20) | BCE_DMA_CONFIG_CNTL_PCI_COMP_DLY;
4535
4536	if ((sc->bce_flags & BCE_PCIX_FLAG) && (sc->bus_speed_mhz == 133))
4537		val |= BCE_DMA_CONFIG_PCI_FAST_CLK_CMP;
4538
4539	/*
4540	 * This setting resolves a problem observed on certain Intel PCI
4541	 * chipsets that cannot handle multiple outstanding DMA operations.
4542	 * See errata E9_5706A1_65.
4543	 */
4544	if ((BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5706) &&
4545	    (BCE_CHIP_ID(sc) != BCE_CHIP_ID_5706_A0) &&
4546	    !(sc->bce_flags & BCE_PCIX_FLAG))
4547		val |= BCE_DMA_CONFIG_CNTL_PING_PONG_DMA;
4548
4549	REG_WR(sc, BCE_DMA_CONFIG, val);
4550
4551	/* Enable the RX_V2P and Context state machines before access. */
4552	REG_WR(sc, BCE_MISC_ENABLE_SET_BITS,
4553	       BCE_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
4554	       BCE_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
4555	       BCE_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
4556
4557	/* Initialize context mapping and zero out the quick contexts. */
4558	bce_init_ctx(sc);
4559
4560	/* Initialize the on-boards CPUs */
4561	bce_init_cpus(sc);
4562
4563	/* Prepare NVRAM for access. */
4564	if (bce_init_nvram(sc)) {
4565		rc = ENODEV;
4566		goto bce_chipinit_exit;
4567	}
4568
4569	/* Set the kernel bypass block size */
4570	val = REG_RD(sc, BCE_MQ_CONFIG);
4571	val &= ~BCE_MQ_CONFIG_KNL_BYP_BLK_SIZE;
4572	val |= BCE_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
4573
4574	/* Enable bins used on the 5709. */
4575	if ((BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) ||
4576		(BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716)) {
4577		val |= BCE_MQ_CONFIG_BIN_MQ_MODE;
4578		if (BCE_CHIP_ID(sc) == BCE_CHIP_ID_5709_A1)
4579			val |= BCE_MQ_CONFIG_HALT_DIS;
4580	}
4581
4582	REG_WR(sc, BCE_MQ_CONFIG, val);
4583
4584	val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
4585	REG_WR(sc, BCE_MQ_KNL_BYP_WIND_START, val);
4586	REG_WR(sc, BCE_MQ_KNL_WIND_END, val);
4587
4588	/* Set the page size and clear the RV2P processor stall bits. */
4589	val = (BCM_PAGE_BITS - 8) << 24;
4590	REG_WR(sc, BCE_RV2P_CONFIG, val);
4591
4592	/* Configure page size. */
4593	val = REG_RD(sc, BCE_TBDR_CONFIG);
4594	val &= ~BCE_TBDR_CONFIG_PAGE_SIZE;
4595	val |= (BCM_PAGE_BITS - 8) << 24 | 0x40;
4596	REG_WR(sc, BCE_TBDR_CONFIG, val);
4597
4598	/* Set the perfect match control register to default. */
4599	REG_WR_IND(sc, BCE_RXP_PM_CTRL, 0);
4600
4601bce_chipinit_exit:
4602	DBEXIT(BCE_VERBOSE_RESET);
4603
4604	return(rc);
4605}
4606
4607
4608/****************************************************************************/
4609/* Initialize the controller in preparation to send/receive traffic.        */
4610/*                                                                          */
4611/* Returns:                                                                 */
4612/*   0 for success, positive value for failure.                             */
4613/****************************************************************************/
4614static int
4615bce_blockinit(struct bce_softc *sc)
4616{
4617	u32 reg, val;
4618	int rc = 0;
4619
4620	DBENTER(BCE_VERBOSE_RESET);
4621
4622	/* Load the hardware default MAC address. */
4623	bce_set_mac_addr(sc);
4624
4625	/* Set the Ethernet backoff seed value */
4626	val = sc->eaddr[0]         + (sc->eaddr[1] << 8) +
4627	      (sc->eaddr[2] << 16) + (sc->eaddr[3]     ) +
4628	      (sc->eaddr[4] << 8)  + (sc->eaddr[5] << 16);
4629	REG_WR(sc, BCE_EMAC_BACKOFF_SEED, val);
4630
4631	sc->last_status_idx = 0;
4632	sc->rx_mode = BCE_EMAC_RX_MODE_SORT_MODE;
4633
4634	/* Set up link change interrupt generation. */
4635	REG_WR(sc, BCE_EMAC_ATTENTION_ENA, BCE_EMAC_ATTENTION_ENA_LINK);
4636
4637	/* Program the physical address of the status block. */
4638	REG_WR(sc, BCE_HC_STATUS_ADDR_L,
4639		BCE_ADDR_LO(sc->status_block_paddr));
4640	REG_WR(sc, BCE_HC_STATUS_ADDR_H,
4641		BCE_ADDR_HI(sc->status_block_paddr));
4642
4643	/* Program the physical address of the statistics block. */
4644	REG_WR(sc, BCE_HC_STATISTICS_ADDR_L,
4645		BCE_ADDR_LO(sc->stats_block_paddr));
4646	REG_WR(sc, BCE_HC_STATISTICS_ADDR_H,
4647		BCE_ADDR_HI(sc->stats_block_paddr));
4648
4649	/* Program various host coalescing parameters. */
4650	REG_WR(sc, BCE_HC_TX_QUICK_CONS_TRIP,
4651		(sc->bce_tx_quick_cons_trip_int << 16) | sc->bce_tx_quick_cons_trip);
4652	REG_WR(sc, BCE_HC_RX_QUICK_CONS_TRIP,
4653		(sc->bce_rx_quick_cons_trip_int << 16) | sc->bce_rx_quick_cons_trip);
4654	REG_WR(sc, BCE_HC_COMP_PROD_TRIP,
4655		(sc->bce_comp_prod_trip_int << 16) | sc->bce_comp_prod_trip);
4656	REG_WR(sc, BCE_HC_TX_TICKS,
4657		(sc->bce_tx_ticks_int << 16) | sc->bce_tx_ticks);
4658	REG_WR(sc, BCE_HC_RX_TICKS,
4659		(sc->bce_rx_ticks_int << 16) | sc->bce_rx_ticks);
4660	REG_WR(sc, BCE_HC_COM_TICKS,
4661		(sc->bce_com_ticks_int << 16) | sc->bce_com_ticks);
4662	REG_WR(sc, BCE_HC_CMD_TICKS,
4663		(sc->bce_cmd_ticks_int << 16) | sc->bce_cmd_ticks);
4664	REG_WR(sc, BCE_HC_STATS_TICKS,
4665		(sc->bce_stats_ticks & 0xffff00));
4666	REG_WR(sc, BCE_HC_STAT_COLLECT_TICKS, 0xbb8);  /* 3ms */
4667
4668	/* Configure the Host Coalescing block. */
4669	val = BCE_HC_CONFIG_RX_TMR_MODE | BCE_HC_CONFIG_TX_TMR_MODE |
4670		      BCE_HC_CONFIG_COLLECT_STATS;
4671
4672#if 0
4673	/* ToDo: Add MSI-X support. */
4674	if (sc->bce_flags & BCE_USING_MSIX_FLAG) {
4675		u32 base = ((BCE_TX_VEC - 1) * BCE_HC_SB_CONFIG_SIZE) +
4676			   BCE_HC_SB_CONFIG_1;
4677
4678		REG_WR(sc, BCE_HC_MSIX_BIT_VECTOR, BCE_HC_MSIX_BIT_VECTOR_VAL);
4679
4680		REG_WR(sc, base, BCE_HC_SB_CONFIG_1_TX_TMR_MODE |
4681			BCE_HC_SB_CONFIG_1_ONE_SHOT);
4682
4683		REG_WR(sc, base + BCE_HC_TX_QUICK_CONS_TRIP_OFF,
4684			(sc->tx_quick_cons_trip_int << 16) |
4685			 sc->tx_quick_cons_trip);
4686
4687		REG_WR(sc, base + BCE_HC_TX_TICKS_OFF,
4688			(sc->tx_ticks_int << 16) | sc->tx_ticks);
4689
4690		val |= BCE_HC_CONFIG_SB_ADDR_INC_128B;
4691	}
4692
4693	/*
4694	 * Tell the HC block to automatically set the
4695	 * INT_MASK bit after an MSI/MSI-X interrupt
4696	 * is generated so the driver doesn't have to.
4697	 */
4698	if (sc->bce_flags & BCE_ONE_SHOT_MSI_FLAG)
4699		val |= BCE_HC_CONFIG_ONE_SHOT;
4700
4701	/* Set the MSI-X status blocks to 128 byte boundaries. */
4702	if (sc->bce_flags & BCE_USING_MSIX_FLAG)
4703		val |= BCE_HC_CONFIG_SB_ADDR_INC_128B;
4704#endif
4705
4706	REG_WR(sc, BCE_HC_CONFIG, val);
4707
4708	/* Clear the internal statistics counters. */
4709	REG_WR(sc, BCE_HC_COMMAND, BCE_HC_COMMAND_CLR_STAT_NOW);
4710
4711	/* Verify that bootcode is running. */
4712	reg = REG_RD_IND(sc, sc->bce_shmem_base + BCE_DEV_INFO_SIGNATURE);
4713
4714	DBRUNIF(DB_RANDOMTRUE(bce_debug_bootcode_running_failure),
4715		BCE_PRINTF("%s(%d): Simulating bootcode failure.\n",
4716			__FILE__, __LINE__);
4717		reg = 0);
4718
4719	if ((reg & BCE_DEV_INFO_SIGNATURE_MAGIC_MASK) !=
4720	    BCE_DEV_INFO_SIGNATURE_MAGIC) {
4721		BCE_PRINTF("%s(%d): Bootcode not running! Found: 0x%08X, "
4722			"Expected: 08%08X\n", __FILE__, __LINE__,
4723			(reg & BCE_DEV_INFO_SIGNATURE_MAGIC_MASK),
4724			BCE_DEV_INFO_SIGNATURE_MAGIC);
4725		rc = ENODEV;
4726		goto bce_blockinit_exit;
4727	}
4728
4729	/* Enable DMA */
4730	if ((BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) ||
4731		(BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716)) {
4732		val = REG_RD(sc, BCE_MISC_NEW_CORE_CTL);
4733		val |= BCE_MISC_NEW_CORE_CTL_DMA_ENABLE;
4734		REG_WR(sc, BCE_MISC_NEW_CORE_CTL, val);
4735	}
4736
4737	/* Allow bootcode to apply any additional fixes before enabling MAC. */
4738	rc = bce_fw_sync(sc, BCE_DRV_MSG_DATA_WAIT2 | BCE_DRV_MSG_CODE_RESET);
4739
4740	/* Enable link state change interrupt generation. */
4741	REG_WR(sc, BCE_HC_ATTN_BITS_ENABLE, STATUS_ATTN_BITS_LINK_STATE);
4742
4743	/* Enable all remaining blocks in the MAC. */
4744	if ((BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709)	||
4745		(BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716))
4746		REG_WR(sc, BCE_MISC_ENABLE_SET_BITS, BCE_MISC_ENABLE_DEFAULT_XI);
4747	else
4748		REG_WR(sc, BCE_MISC_ENABLE_SET_BITS, BCE_MISC_ENABLE_DEFAULT);
4749
4750	REG_RD(sc, BCE_MISC_ENABLE_SET_BITS);
4751	DELAY(20);
4752
4753	/* Save the current host coalescing block settings. */
4754	sc->hc_command = REG_RD(sc, BCE_HC_COMMAND);
4755
4756bce_blockinit_exit:
4757	DBEXIT(BCE_VERBOSE_RESET);
4758
4759	return (rc);
4760}
4761
4762
4763/****************************************************************************/
4764/* Encapsulate an mbuf into the rx_bd chain.                                */
4765/*                                                                          */
4766/* Returns:                                                                 */
4767/*   0 for success, positive value for failure.                             */
4768/****************************************************************************/
4769static int
4770bce_get_rx_buf(struct bce_softc *sc, struct mbuf *m, u16 *prod,
4771	u16 *chain_prod, u32 *prod_bseq)
4772{
4773	bus_dmamap_t map;
4774	bus_dma_segment_t segs[BCE_MAX_SEGMENTS];
4775	struct mbuf *m_new = NULL;
4776	struct rx_bd *rxbd;
4777	int nsegs, error, rc = 0;
4778#ifdef BCE_DEBUG
4779	u16 debug_chain_prod = *chain_prod;
4780#endif
4781
4782	DBENTER(BCE_EXTREME_RESET | BCE_EXTREME_RECV | BCE_EXTREME_LOAD);
4783
4784	/* Make sure the inputs are valid. */
4785	DBRUNIF((*chain_prod > MAX_RX_BD),
4786		BCE_PRINTF("%s(%d): RX producer out of range: 0x%04X > 0x%04X\n",
4787		__FILE__, __LINE__, *chain_prod, (u16) MAX_RX_BD));
4788
4789	DBPRINT(sc, BCE_EXTREME_RECV, "%s(enter): prod = 0x%04X, chain_prod = 0x%04X, "
4790		"prod_bseq = 0x%08X\n", __FUNCTION__, *prod, *chain_prod, *prod_bseq);
4791
4792	/* Update some debug statistic counters */
4793	DBRUNIF((sc->free_rx_bd < sc->rx_low_watermark),
4794		sc->rx_low_watermark = sc->free_rx_bd);
4795	DBRUNIF((sc->free_rx_bd == sc->max_rx_bd), sc->rx_empty_count++);
4796
4797	/* Check whether this is a new mbuf allocation. */
4798	if (m == NULL) {
4799
4800		/* Simulate an mbuf allocation failure. */
4801		DBRUNIF(DB_RANDOMTRUE(bce_debug_mbuf_allocation_failure),
4802			sc->mbuf_alloc_failed++;
4803			sc->debug_mbuf_sim_alloc_failed++;
4804			rc = ENOBUFS;
4805			goto bce_get_rx_buf_exit);
4806
4807		/* This is a new mbuf allocation. */
4808#ifdef BCE_USE_SPLIT_HEADER
4809		MGETHDR(m_new, M_DONTWAIT, MT_DATA);
4810#else
4811		if (sc->rx_bd_mbuf_alloc_size <= MCLBYTES)
4812			m_new = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
4813		else
4814			m_new = m_getjcl(M_DONTWAIT, MT_DATA, M_PKTHDR, sc->rx_bd_mbuf_alloc_size);
4815#endif
4816
4817		if (m_new == NULL) {
4818			sc->mbuf_alloc_failed++;
4819			rc = ENOBUFS;
4820			goto bce_get_rx_buf_exit;
4821		}
4822
4823		DBRUN(sc->debug_rx_mbuf_alloc++);
4824	} else {
4825		/* Reuse an existing mbuf. */
4826		m_new = m;
4827	}
4828
4829	/* Make sure we have a valid packet header. */
4830	M_ASSERTPKTHDR(m_new);
4831
4832	/* Initialize the mbuf size and pad if necessary for alignment. */
4833	m_new->m_pkthdr.len = m_new->m_len = sc->rx_bd_mbuf_alloc_size;
4834	m_adj(m_new, sc->rx_bd_mbuf_align_pad);
4835
4836	/* ToDo: Consider calling m_fragment() to test error handling. */
4837
4838	/* Map the mbuf cluster into device memory. */
4839	map = sc->rx_mbuf_map[*chain_prod];
4840	error = bus_dmamap_load_mbuf_sg(sc->rx_mbuf_tag, map, m_new,
4841	    segs, &nsegs, BUS_DMA_NOWAIT);
4842
4843	/* Handle any mapping errors. */
4844	if (error) {
4845		BCE_PRINTF("%s(%d): Error mapping mbuf into RX chain (%d)!\n",
4846			__FILE__, __LINE__, error);
4847
4848		m_freem(m_new);
4849		DBRUN(sc->debug_rx_mbuf_alloc--);
4850
4851		rc = ENOBUFS;
4852		goto bce_get_rx_buf_exit;
4853	}
4854
4855	/* All mbufs must map to a single segment. */
4856	KASSERT(nsegs == 1, ("%s(): Too many segments returned (%d)!",
4857		 __FUNCTION__, nsegs));
4858
4859	/* ToDo: Do we need bus_dmamap_sync(,,BUS_DMASYNC_PREWRITE) here? */
4860
4861	/* Setup the rx_bd for the segment. */
4862	rxbd = &sc->rx_bd_chain[RX_PAGE(*chain_prod)][RX_IDX(*chain_prod)];
4863
4864	rxbd->rx_bd_haddr_lo  = htole32(BCE_ADDR_LO(segs[0].ds_addr));
4865	rxbd->rx_bd_haddr_hi  = htole32(BCE_ADDR_HI(segs[0].ds_addr));
4866	rxbd->rx_bd_len       = htole32(segs[0].ds_len);
4867	rxbd->rx_bd_flags     = htole32(RX_BD_FLAGS_START | RX_BD_FLAGS_END);
4868	*prod_bseq += segs[0].ds_len;
4869
4870	/* Save the mbuf and update our counter. */
4871	sc->rx_mbuf_ptr[*chain_prod] = m_new;
4872	sc->free_rx_bd -= nsegs;
4873
4874	DBRUNMSG(BCE_INSANE_RECV, bce_dump_rx_mbuf_chain(sc, debug_chain_prod,
4875		nsegs));
4876
4877	DBPRINT(sc, BCE_EXTREME_RECV, "%s(exit): prod = 0x%04X, chain_prod = 0x%04X, "
4878		"prod_bseq = 0x%08X\n", __FUNCTION__, *prod, *chain_prod, *prod_bseq);
4879
4880bce_get_rx_buf_exit:
4881	DBEXIT(BCE_EXTREME_RESET | BCE_EXTREME_RECV | BCE_EXTREME_LOAD);
4882
4883	return(rc);
4884}
4885
4886
4887#ifdef BCE_USE_SPLIT_HEADER
4888/****************************************************************************/
4889/* Encapsulate an mbuf cluster into the page chain.                        */
4890/*                                                                          */
4891/* Returns:                                                                 */
4892/*   0 for success, positive value for failure.                             */
4893/****************************************************************************/
4894static int
4895bce_get_pg_buf(struct bce_softc *sc, struct mbuf *m, u16 *prod,
4896	u16 *prod_idx)
4897{
4898	bus_dmamap_t map;
4899	bus_addr_t busaddr;
4900	struct mbuf *m_new = NULL;
4901	struct rx_bd *pgbd;
4902	int error, rc = 0;
4903#ifdef BCE_DEBUG
4904	u16 debug_prod_idx = *prod_idx;
4905#endif
4906
4907	DBENTER(BCE_EXTREME_RESET | BCE_EXTREME_RECV | BCE_EXTREME_LOAD);
4908
4909	/* Make sure the inputs are valid. */
4910	DBRUNIF((*prod_idx > MAX_PG_BD),
4911		BCE_PRINTF("%s(%d): page producer out of range: 0x%04X > 0x%04X\n",
4912		__FILE__, __LINE__, *prod_idx, (u16) MAX_PG_BD));
4913
4914	DBPRINT(sc, BCE_EXTREME_RECV, "%s(enter): prod = 0x%04X, "
4915		"chain_prod = 0x%04X\n", __FUNCTION__, *prod, *prod_idx);
4916
4917	/* Update counters if we've hit a new low or run out of pages. */
4918	DBRUNIF((sc->free_pg_bd < sc->pg_low_watermark),
4919		sc->pg_low_watermark = sc->free_pg_bd);
4920	DBRUNIF((sc->free_pg_bd == sc->max_pg_bd), sc->pg_empty_count++);
4921
4922	/* Check whether this is a new mbuf allocation. */
4923	if (m == NULL) {
4924
4925		/* Simulate an mbuf allocation failure. */
4926		DBRUNIF(DB_RANDOMTRUE(bce_debug_mbuf_allocation_failure),
4927			sc->mbuf_alloc_failed++;
4928			sc->debug_mbuf_sim_alloc_failed++;
4929			rc = ENOBUFS;
4930			goto bce_get_pg_buf_exit);
4931
4932		/* This is a new mbuf allocation. */
4933		m_new = m_getcl(M_DONTWAIT, MT_DATA, 0);
4934		if (m_new == NULL) {
4935			sc->mbuf_alloc_failed++;
4936			rc = ENOBUFS;
4937			goto bce_get_pg_buf_exit;
4938		}
4939
4940		DBRUN(sc->debug_pg_mbuf_alloc++);
4941	} else {
4942		/* Reuse an existing mbuf. */
4943		m_new = m;
4944		m_new->m_data = m_new->m_ext.ext_buf;
4945	}
4946
4947	m_new->m_len = sc->pg_bd_mbuf_alloc_size;
4948
4949	/* ToDo: Consider calling m_fragment() to test error handling. */
4950
4951	/* Map the mbuf cluster into device memory. */
4952	map = sc->pg_mbuf_map[*prod_idx];
4953	error = bus_dmamap_load(sc->pg_mbuf_tag, map, mtod(m_new, void *),
4954	    sc->pg_bd_mbuf_alloc_size, bce_dma_map_addr, &busaddr, BUS_DMA_NOWAIT);
4955
4956	/* Handle any mapping errors. */
4957	if (error) {
4958		BCE_PRINTF("%s(%d): Error mapping mbuf into page chain!\n",
4959			__FILE__, __LINE__);
4960
4961		m_freem(m_new);
4962		DBRUN(sc->debug_pg_mbuf_alloc--);
4963
4964		rc = ENOBUFS;
4965		goto bce_get_pg_buf_exit;
4966	}
4967
4968	/* ToDo: Do we need bus_dmamap_sync(,,BUS_DMASYNC_PREWRITE) here? */
4969
4970	/*
4971	 * The page chain uses the same rx_bd data structure
4972	 * as the receive chain but doesn't require a byte sequence (bseq).
4973	 */
4974	pgbd = &sc->pg_bd_chain[PG_PAGE(*prod_idx)][PG_IDX(*prod_idx)];
4975
4976	pgbd->rx_bd_haddr_lo  = htole32(BCE_ADDR_LO(busaddr));
4977	pgbd->rx_bd_haddr_hi  = htole32(BCE_ADDR_HI(busaddr));
4978	pgbd->rx_bd_len       = htole32(sc->pg_bd_mbuf_alloc_size);
4979	pgbd->rx_bd_flags     = htole32(RX_BD_FLAGS_START | RX_BD_FLAGS_END);
4980
4981	/* Save the mbuf and update our counter. */
4982	sc->pg_mbuf_ptr[*prod_idx] = m_new;
4983	sc->free_pg_bd--;
4984
4985	DBRUNMSG(BCE_INSANE_RECV, bce_dump_pg_mbuf_chain(sc, debug_prod_idx,
4986		1));
4987
4988	DBPRINT(sc, BCE_EXTREME_RECV, "%s(exit): prod = 0x%04X, "
4989		"prod_idx = 0x%04X\n", __FUNCTION__, *prod, *prod_idx);
4990
4991bce_get_pg_buf_exit:
4992	DBEXIT(BCE_EXTREME_RESET | BCE_EXTREME_RECV | BCE_EXTREME_LOAD);
4993
4994	return(rc);
4995}
4996#endif /* BCE_USE_SPLIT_HEADER */
4997
4998/****************************************************************************/
4999/* Initialize the TX context memory.                                        */
5000/*                                                                          */
5001/* Returns:                                                                 */
5002/*   Nothing                                                                */
5003/****************************************************************************/
5004static void
5005bce_init_tx_context(struct bce_softc *sc)
5006{
5007	u32 val;
5008
5009	DBENTER(BCE_VERBOSE_RESET | BCE_VERBOSE_SEND | BCE_VERBOSE_CTX);
5010
5011	/* Initialize the context ID for an L2 TX chain. */
5012	if ((BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) ||
5013		(BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716)) {
5014		/* Set the CID type to support an L2 connection. */
5015		val = BCE_L2CTX_TX_TYPE_TYPE_L2_XI | BCE_L2CTX_TX_TYPE_SIZE_L2_XI;
5016		CTX_WR(sc, GET_CID_ADDR(TX_CID), BCE_L2CTX_TX_TYPE_XI, val);
5017		val = BCE_L2CTX_TX_CMD_TYPE_TYPE_L2_XI | (8 << 16);
5018		CTX_WR(sc, GET_CID_ADDR(TX_CID), BCE_L2CTX_TX_CMD_TYPE_XI, val);
5019
5020		/* Point the hardware to the first page in the chain. */
5021		val = BCE_ADDR_HI(sc->tx_bd_chain_paddr[0]);
5022		CTX_WR(sc, GET_CID_ADDR(TX_CID), BCE_L2CTX_TX_TBDR_BHADDR_HI_XI, val);
5023		val = BCE_ADDR_LO(sc->tx_bd_chain_paddr[0]);
5024		CTX_WR(sc, GET_CID_ADDR(TX_CID), BCE_L2CTX_TX_TBDR_BHADDR_LO_XI, val);
5025	} else {
5026		/* Set the CID type to support an L2 connection. */
5027		val = BCE_L2CTX_TX_TYPE_TYPE_L2 | BCE_L2CTX_TX_TYPE_SIZE_L2;
5028		CTX_WR(sc, GET_CID_ADDR(TX_CID), BCE_L2CTX_TX_TYPE, val);
5029		val = BCE_L2CTX_TX_CMD_TYPE_TYPE_L2 | (8 << 16);
5030		CTX_WR(sc, GET_CID_ADDR(TX_CID), BCE_L2CTX_TX_CMD_TYPE, val);
5031
5032		/* Point the hardware to the first page in the chain. */
5033		val = BCE_ADDR_HI(sc->tx_bd_chain_paddr[0]);
5034		CTX_WR(sc, GET_CID_ADDR(TX_CID), BCE_L2CTX_TX_TBDR_BHADDR_HI, val);
5035		val = BCE_ADDR_LO(sc->tx_bd_chain_paddr[0]);
5036		CTX_WR(sc, GET_CID_ADDR(TX_CID), BCE_L2CTX_TX_TBDR_BHADDR_LO, val);
5037	}
5038
5039	DBEXIT(BCE_VERBOSE_RESET | BCE_VERBOSE_SEND | BCE_VERBOSE_CTX);
5040}
5041
5042
5043/****************************************************************************/
5044/* Allocate memory and initialize the TX data structures.                   */
5045/*                                                                          */
5046/* Returns:                                                                 */
5047/*   0 for success, positive value for failure.                             */
5048/****************************************************************************/
5049static int
5050bce_init_tx_chain(struct bce_softc *sc)
5051{
5052	struct tx_bd *txbd;
5053	int i, rc = 0;
5054
5055	DBENTER(BCE_VERBOSE_RESET | BCE_VERBOSE_SEND | BCE_VERBOSE_LOAD);
5056
5057	/* Set the initial TX producer/consumer indices. */
5058	sc->tx_prod        = 0;
5059	sc->tx_cons        = 0;
5060	sc->tx_prod_bseq   = 0;
5061	sc->used_tx_bd     = 0;
5062	sc->max_tx_bd      = USABLE_TX_BD;
5063	DBRUN(sc->tx_hi_watermark = USABLE_TX_BD);
5064	DBRUN(sc->tx_full_count = 0);
5065
5066	/*
5067	 * The NetXtreme II supports a linked-list structre called
5068	 * a Buffer Descriptor Chain (or BD chain).  A BD chain
5069	 * consists of a series of 1 or more chain pages, each of which
5070	 * consists of a fixed number of BD entries.
5071	 * The last BD entry on each page is a pointer to the next page
5072	 * in the chain, and the last pointer in the BD chain
5073	 * points back to the beginning of the chain.
5074	 */
5075
5076	/* Set the TX next pointer chain entries. */
5077	for (i = 0; i < TX_PAGES; i++) {
5078		int j;
5079
5080		txbd = &sc->tx_bd_chain[i][USABLE_TX_BD_PER_PAGE];
5081
5082		/* Check if we've reached the last page. */
5083		if (i == (TX_PAGES - 1))
5084			j = 0;
5085		else
5086			j = i + 1;
5087
5088		txbd->tx_bd_haddr_hi = htole32(BCE_ADDR_HI(sc->tx_bd_chain_paddr[j]));
5089		txbd->tx_bd_haddr_lo = htole32(BCE_ADDR_LO(sc->tx_bd_chain_paddr[j]));
5090	}
5091
5092	bce_init_tx_context(sc);
5093
5094	DBRUNMSG(BCE_INSANE_SEND, bce_dump_tx_chain(sc, 0, TOTAL_TX_BD));
5095	DBEXIT(BCE_VERBOSE_RESET | BCE_VERBOSE_SEND | BCE_VERBOSE_LOAD);
5096
5097	return(rc);
5098}
5099
5100
5101/****************************************************************************/
5102/* Free memory and clear the TX data structures.                            */
5103/*                                                                          */
5104/* Returns:                                                                 */
5105/*   Nothing.                                                               */
5106/****************************************************************************/
5107static void
5108bce_free_tx_chain(struct bce_softc *sc)
5109{
5110	int i;
5111
5112	DBENTER(BCE_VERBOSE_RESET | BCE_VERBOSE_SEND | BCE_VERBOSE_UNLOAD);
5113
5114	/* Unmap, unload, and free any mbufs still in the TX mbuf chain. */
5115	for (i = 0; i < TOTAL_TX_BD; i++) {
5116		if (sc->tx_mbuf_ptr[i] != NULL) {
5117			if (sc->tx_mbuf_map[i] != NULL)
5118				bus_dmamap_sync(sc->tx_mbuf_tag, sc->tx_mbuf_map[i],
5119					BUS_DMASYNC_POSTWRITE);
5120			m_freem(sc->tx_mbuf_ptr[i]);
5121			sc->tx_mbuf_ptr[i] = NULL;
5122			DBRUN(sc->debug_tx_mbuf_alloc--);
5123		}
5124	}
5125
5126	/* Clear each TX chain page. */
5127	for (i = 0; i < TX_PAGES; i++)
5128		bzero((char *)sc->tx_bd_chain[i], BCE_TX_CHAIN_PAGE_SZ);
5129
5130	sc->used_tx_bd     = 0;
5131
5132	/* Check if we lost any mbufs in the process. */
5133	DBRUNIF((sc->debug_tx_mbuf_alloc),
5134		BCE_PRINTF("%s(%d): Memory leak! Lost %d mbufs "
5135			"from tx chain!\n",
5136			__FILE__, __LINE__, sc->debug_tx_mbuf_alloc));
5137
5138	DBEXIT(BCE_VERBOSE_RESET | BCE_VERBOSE_SEND | BCE_VERBOSE_UNLOAD);
5139}
5140
5141
5142/****************************************************************************/
5143/* Initialize the RX context memory.                                        */
5144/*                                                                          */
5145/* Returns:                                                                 */
5146/*   Nothing                                                                */
5147/****************************************************************************/
5148static void
5149bce_init_rx_context(struct bce_softc *sc)
5150{
5151	u32 val;
5152
5153	DBENTER(BCE_VERBOSE_RESET | BCE_VERBOSE_RECV | BCE_VERBOSE_CTX);
5154
5155	/* Initialize the type, size, and BD cache levels for the RX context. */
5156	val = BCE_L2CTX_RX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE |
5157		BCE_L2CTX_RX_CTX_TYPE_SIZE_L2 |
5158		(0x02 << BCE_L2CTX_RX_BD_PRE_READ_SHIFT);
5159
5160	/*
5161	 * Set the level for generating pause frames
5162	 * when the number of available rx_bd's gets
5163	 * too low (the low watermark) and the level
5164	 * when pause frames can be stopped (the high
5165	 * watermark).
5166	 */
5167	if ((BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) ||
5168		(BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716)) {
5169		u32 lo_water, hi_water;
5170
5171		lo_water = BCE_L2CTX_RX_LO_WATER_MARK_DEFAULT;
5172		hi_water = USABLE_RX_BD / 4;
5173
5174		lo_water /= BCE_L2CTX_RX_LO_WATER_MARK_SCALE;
5175		hi_water /= BCE_L2CTX_RX_HI_WATER_MARK_SCALE;
5176
5177		if (hi_water > 0xf)
5178			hi_water = 0xf;
5179		else if (hi_water == 0)
5180			lo_water = 0;
5181		val |= (lo_water << BCE_L2CTX_RX_LO_WATER_MARK_SHIFT) |
5182			(hi_water << BCE_L2CTX_RX_HI_WATER_MARK_SHIFT);
5183	}
5184
5185 	CTX_WR(sc, GET_CID_ADDR(RX_CID), BCE_L2CTX_RX_CTX_TYPE, val);
5186
5187	/* Setup the MQ BIN mapping for l2_ctx_host_bseq. */
5188	if ((BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) ||
5189		(BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716)) {
5190		val = REG_RD(sc, BCE_MQ_MAP_L2_5);
5191		REG_WR(sc, BCE_MQ_MAP_L2_5, val | BCE_MQ_MAP_L2_5_ARM);
5192	}
5193
5194	/* Point the hardware to the first page in the chain. */
5195	val = BCE_ADDR_HI(sc->rx_bd_chain_paddr[0]);
5196	CTX_WR(sc, GET_CID_ADDR(RX_CID), BCE_L2CTX_RX_NX_BDHADDR_HI, val);
5197	val = BCE_ADDR_LO(sc->rx_bd_chain_paddr[0]);
5198	CTX_WR(sc, GET_CID_ADDR(RX_CID), BCE_L2CTX_RX_NX_BDHADDR_LO, val);
5199
5200	DBEXIT(BCE_VERBOSE_RESET | BCE_VERBOSE_RECV | BCE_VERBOSE_CTX);
5201}
5202
5203
5204/****************************************************************************/
5205/* Allocate memory and initialize the RX data structures.                   */
5206/*                                                                          */
5207/* Returns:                                                                 */
5208/*   0 for success, positive value for failure.                             */
5209/****************************************************************************/
5210static int
5211bce_init_rx_chain(struct bce_softc *sc)
5212{
5213	struct rx_bd *rxbd;
5214	int i, rc = 0;
5215
5216	DBENTER(BCE_VERBOSE_RESET | BCE_VERBOSE_RECV | BCE_VERBOSE_LOAD |
5217		BCE_VERBOSE_CTX);
5218
5219	/* Initialize the RX producer and consumer indices. */
5220	sc->rx_prod        = 0;
5221	sc->rx_cons        = 0;
5222	sc->rx_prod_bseq   = 0;
5223	sc->free_rx_bd     = USABLE_RX_BD;
5224	sc->max_rx_bd      = USABLE_RX_BD;
5225	DBRUN(sc->rx_low_watermark = sc->max_rx_bd);
5226	DBRUN(sc->rx_empty_count = 0);
5227
5228	/* Initialize the RX next pointer chain entries. */
5229	for (i = 0; i < RX_PAGES; i++) {
5230		int j;
5231
5232		rxbd = &sc->rx_bd_chain[i][USABLE_RX_BD_PER_PAGE];
5233
5234		/* Check if we've reached the last page. */
5235		if (i == (RX_PAGES - 1))
5236			j = 0;
5237		else
5238			j = i + 1;
5239
5240		/* Setup the chain page pointers. */
5241		rxbd->rx_bd_haddr_hi = htole32(BCE_ADDR_HI(sc->rx_bd_chain_paddr[j]));
5242		rxbd->rx_bd_haddr_lo = htole32(BCE_ADDR_LO(sc->rx_bd_chain_paddr[j]));
5243	}
5244
5245/* Fill up the RX chain. */
5246	bce_fill_rx_chain(sc);
5247
5248	for (i = 0; i < RX_PAGES; i++) {
5249		bus_dmamap_sync(
5250			sc->rx_bd_chain_tag,
5251	    	sc->rx_bd_chain_map[i],
5252		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
5253	}
5254
5255	bce_init_rx_context(sc);
5256
5257	DBRUNMSG(BCE_EXTREME_RECV, bce_dump_rx_chain(sc, 0, TOTAL_RX_BD));
5258	DBEXIT(BCE_VERBOSE_RESET | BCE_VERBOSE_RECV | BCE_VERBOSE_LOAD |
5259		BCE_VERBOSE_CTX);
5260	/* ToDo: Are there possible failure modes here? */
5261	return(rc);
5262}
5263
5264
5265/****************************************************************************/
5266/* Add mbufs to the RX chain until its full or an mbuf allocation error     */
5267/* occurs.                                                                  */
5268/*                                                                          */
5269/* Returns:                                                                 */
5270/*   Nothing                                                                */
5271/****************************************************************************/
5272static void
5273bce_fill_rx_chain(struct bce_softc *sc)
5274{
5275	u16 prod, prod_idx;
5276	u32 prod_bseq;
5277
5278	DBENTER(BCE_VERBOSE_RESET | BCE_EXTREME_RECV | BCE_VERBOSE_LOAD |
5279		BCE_VERBOSE_CTX);
5280
5281	/* Get the RX chain producer indices. */
5282	prod      = sc->rx_prod;
5283	prod_bseq = sc->rx_prod_bseq;
5284
5285	/* Keep filling the RX chain until it's full. */
5286	while (sc->free_rx_bd > 0) {
5287		prod_idx = RX_CHAIN_IDX(prod);
5288		if (bce_get_rx_buf(sc, NULL, &prod, &prod_idx, &prod_bseq)) {
5289			/* Bail out if we can't add an mbuf to the chain. */
5290			break;
5291		}
5292		prod = NEXT_RX_BD(prod);
5293	}
5294
5295	/* Save the RX chain producer indices. */
5296	sc->rx_prod      = prod;
5297	sc->rx_prod_bseq = prod_bseq;
5298
5299	DBRUNIF(((prod & USABLE_RX_BD_PER_PAGE) == USABLE_RX_BD_PER_PAGE),
5300		BCE_PRINTF("%s(): Invalid rx_prod value: 0x%04X\n",
5301		__FUNCTION__, sc->rx_prod));
5302
5303	/* Write the mailbox and tell the chip about the waiting rx_bd's. */
5304	REG_WR16(sc, MB_GET_CID_ADDR(RX_CID) + BCE_L2MQ_RX_HOST_BDIDX,
5305		sc->rx_prod);
5306	REG_WR(sc, MB_GET_CID_ADDR(RX_CID) + BCE_L2MQ_RX_HOST_BSEQ,
5307		sc->rx_prod_bseq);
5308
5309	DBEXIT(BCE_VERBOSE_RESET | BCE_EXTREME_RECV | BCE_VERBOSE_LOAD |
5310		BCE_VERBOSE_CTX);
5311}
5312
5313
5314/****************************************************************************/
5315/* Free memory and clear the RX data structures.                            */
5316/*                                                                          */
5317/* Returns:                                                                 */
5318/*   Nothing.                                                               */
5319/****************************************************************************/
5320static void
5321bce_free_rx_chain(struct bce_softc *sc)
5322{
5323	int i;
5324
5325	DBENTER(BCE_VERBOSE_RESET | BCE_VERBOSE_RECV | BCE_VERBOSE_UNLOAD);
5326
5327	/* Free any mbufs still in the RX mbuf chain. */
5328	for (i = 0; i < TOTAL_RX_BD; i++) {
5329		if (sc->rx_mbuf_ptr[i] != NULL) {
5330			if (sc->rx_mbuf_map[i] != NULL)
5331				bus_dmamap_sync(sc->rx_mbuf_tag, sc->rx_mbuf_map[i],
5332					BUS_DMASYNC_POSTREAD);
5333			m_freem(sc->rx_mbuf_ptr[i]);
5334			sc->rx_mbuf_ptr[i] = NULL;
5335			DBRUN(sc->debug_rx_mbuf_alloc--);
5336		}
5337	}
5338
5339	/* Clear each RX chain page. */
5340	for (i = 0; i < RX_PAGES; i++)
5341		bzero((char *)sc->rx_bd_chain[i], BCE_RX_CHAIN_PAGE_SZ);
5342
5343	sc->free_rx_bd = sc->max_rx_bd;
5344
5345	/* Check if we lost any mbufs in the process. */
5346	DBRUNIF((sc->debug_rx_mbuf_alloc),
5347		BCE_PRINTF("%s(): Memory leak! Lost %d mbufs from rx chain!\n",
5348			__FUNCTION__, sc->debug_rx_mbuf_alloc));
5349
5350	DBEXIT(BCE_VERBOSE_RESET | BCE_VERBOSE_RECV | BCE_VERBOSE_UNLOAD);
5351}
5352
5353
5354#ifdef BCE_USE_SPLIT_HEADER
5355/****************************************************************************/
5356/* Allocate memory and initialize the page data structures.                 */
5357/* Assumes that bce_init_rx_chain() has not already been called.            */
5358/*                                                                          */
5359/* Returns:                                                                 */
5360/*   0 for success, positive value for failure.                             */
5361/****************************************************************************/
5362static int
5363bce_init_pg_chain(struct bce_softc *sc)
5364{
5365	struct rx_bd *pgbd;
5366	int i, rc = 0;
5367	u32 val;
5368
5369	DBENTER(BCE_VERBOSE_RESET | BCE_VERBOSE_RECV | BCE_VERBOSE_LOAD |
5370		BCE_VERBOSE_CTX);
5371
5372	/* Initialize the page producer and consumer indices. */
5373	sc->pg_prod        = 0;
5374	sc->pg_cons        = 0;
5375	sc->free_pg_bd     = USABLE_PG_BD;
5376	sc->max_pg_bd      = USABLE_PG_BD;
5377	DBRUN(sc->pg_low_watermark = sc->max_pg_bd);
5378	DBRUN(sc->pg_empty_count = 0);
5379
5380	/* Initialize the page next pointer chain entries. */
5381	for (i = 0; i < PG_PAGES; i++) {
5382		int j;
5383
5384		pgbd = &sc->pg_bd_chain[i][USABLE_PG_BD_PER_PAGE];
5385
5386		/* Check if we've reached the last page. */
5387		if (i == (PG_PAGES - 1))
5388			j = 0;
5389		else
5390			j = i + 1;
5391
5392		/* Setup the chain page pointers. */
5393		pgbd->rx_bd_haddr_hi = htole32(BCE_ADDR_HI(sc->pg_bd_chain_paddr[j]));
5394		pgbd->rx_bd_haddr_lo = htole32(BCE_ADDR_LO(sc->pg_bd_chain_paddr[j]));
5395	}
5396
5397	/* Setup the MQ BIN mapping for host_pg_bidx. */
5398	if ((BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709)	||
5399		(BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716))
5400		REG_WR(sc, BCE_MQ_MAP_L2_3, BCE_MQ_MAP_L2_3_DEFAULT);
5401
5402	CTX_WR(sc, GET_CID_ADDR(RX_CID), BCE_L2CTX_RX_PG_BUF_SIZE, 0);
5403
5404	/* Configure the rx_bd and page chain mbuf cluster size. */
5405	val = (sc->rx_bd_mbuf_data_len << 16) | sc->pg_bd_mbuf_alloc_size;
5406	CTX_WR(sc, GET_CID_ADDR(RX_CID), BCE_L2CTX_RX_PG_BUF_SIZE, val);
5407
5408	/* Configure the context reserved for jumbo support. */
5409	CTX_WR(sc, GET_CID_ADDR(RX_CID), BCE_L2CTX_RX_RBDC_KEY,
5410		BCE_L2CTX_RX_RBDC_JUMBO_KEY);
5411
5412	/* Point the hardware to the first page in the page chain. */
5413	val = BCE_ADDR_HI(sc->pg_bd_chain_paddr[0]);
5414	CTX_WR(sc, GET_CID_ADDR(RX_CID), BCE_L2CTX_RX_NX_PG_BDHADDR_HI, val);
5415	val = BCE_ADDR_LO(sc->pg_bd_chain_paddr[0]);
5416	CTX_WR(sc, GET_CID_ADDR(RX_CID), BCE_L2CTX_RX_NX_PG_BDHADDR_LO, val);
5417
5418	/* Fill up the page chain. */
5419	bce_fill_pg_chain(sc);
5420
5421	for (i = 0; i < PG_PAGES; i++) {
5422		bus_dmamap_sync(
5423			sc->pg_bd_chain_tag,
5424	    	sc->pg_bd_chain_map[i],
5425		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
5426	}
5427
5428	DBRUNMSG(BCE_EXTREME_RECV, bce_dump_pg_chain(sc, 0, TOTAL_PG_BD));
5429	DBEXIT(BCE_VERBOSE_RESET | BCE_VERBOSE_RECV | BCE_VERBOSE_LOAD |
5430		BCE_VERBOSE_CTX);
5431	return(rc);
5432}
5433
5434
5435/****************************************************************************/
5436/* Add mbufs to the page chain until its full or an mbuf allocation error   */
5437/* occurs.                                                                  */
5438/*                                                                          */
5439/* Returns:                                                                 */
5440/*   Nothing                                                                */
5441/****************************************************************************/
5442static void
5443bce_fill_pg_chain(struct bce_softc *sc)
5444{
5445	u16 prod, prod_idx;
5446
5447	DBENTER(BCE_VERBOSE_RESET | BCE_EXTREME_RECV | BCE_VERBOSE_LOAD |
5448		BCE_VERBOSE_CTX);
5449
5450	/* Get the page chain prodcuer index. */
5451	prod = sc->pg_prod;
5452
5453	/* Keep filling the page chain until it's full. */
5454	while (sc->free_pg_bd > 0) {
5455		prod_idx = PG_CHAIN_IDX(prod);
5456		if (bce_get_pg_buf(sc, NULL, &prod, &prod_idx)) {
5457			/* Bail out if we can't add an mbuf to the chain. */
5458			break;
5459		}
5460		prod = NEXT_PG_BD(prod);
5461	}
5462
5463	/* Save the page chain producer index. */
5464	sc->pg_prod = prod;
5465
5466	DBRUNIF(((prod & USABLE_RX_BD_PER_PAGE) == USABLE_RX_BD_PER_PAGE),
5467		BCE_PRINTF("%s(): Invalid pg_prod value: 0x%04X\n",
5468		__FUNCTION__, sc->pg_prod));
5469
5470	/*
5471	 * Write the mailbox and tell the chip about
5472	 * the new rx_bd's in the page chain.
5473	 */
5474	REG_WR16(sc, MB_GET_CID_ADDR(RX_CID) + BCE_L2MQ_RX_HOST_PG_BDIDX,
5475		sc->pg_prod);
5476
5477	DBEXIT(BCE_VERBOSE_RESET | BCE_EXTREME_RECV | BCE_VERBOSE_LOAD |
5478		BCE_VERBOSE_CTX);
5479}
5480
5481
5482/****************************************************************************/
5483/* Free memory and clear the RX data structures.                            */
5484/*                                                                          */
5485/* Returns:                                                                 */
5486/*   Nothing.                                                               */
5487/****************************************************************************/
5488static void
5489bce_free_pg_chain(struct bce_softc *sc)
5490{
5491	int i;
5492
5493	DBENTER(BCE_VERBOSE_RESET | BCE_VERBOSE_RECV | BCE_VERBOSE_UNLOAD);
5494
5495	/* Free any mbufs still in the mbuf page chain. */
5496	for (i = 0; i < TOTAL_PG_BD; i++) {
5497		if (sc->pg_mbuf_ptr[i] != NULL) {
5498			if (sc->pg_mbuf_map[i] != NULL)
5499				bus_dmamap_sync(sc->pg_mbuf_tag, sc->pg_mbuf_map[i],
5500					BUS_DMASYNC_POSTREAD);
5501			m_freem(sc->pg_mbuf_ptr[i]);
5502			sc->pg_mbuf_ptr[i] = NULL;
5503			DBRUN(sc->debug_pg_mbuf_alloc--);
5504		}
5505	}
5506
5507	/* Clear each page chain pages. */
5508	for (i = 0; i < PG_PAGES; i++)
5509		bzero((char *)sc->pg_bd_chain[i], BCE_PG_CHAIN_PAGE_SZ);
5510
5511	sc->free_pg_bd = sc->max_pg_bd;
5512
5513	/* Check if we lost any mbufs in the process. */
5514	DBRUNIF((sc->debug_pg_mbuf_alloc),
5515		BCE_PRINTF("%s(): Memory leak! Lost %d mbufs from page chain!\n",
5516			__FUNCTION__, sc->debug_pg_mbuf_alloc));
5517
5518	DBEXIT(BCE_VERBOSE_RESET | BCE_VERBOSE_RECV | BCE_VERBOSE_UNLOAD);
5519}
5520#endif /* BCE_USE_SPLIT_HEADER */
5521
5522
5523/****************************************************************************/
5524/* Set media options.                                                       */
5525/*                                                                          */
5526/* Returns:                                                                 */
5527/*   0 for success, positive value for failure.                             */
5528/****************************************************************************/
5529static int
5530bce_ifmedia_upd(struct ifnet *ifp)
5531{
5532	struct bce_softc *sc = ifp->if_softc;
5533
5534	DBENTER(BCE_VERBOSE);
5535
5536	BCE_LOCK(sc);
5537	bce_ifmedia_upd_locked(ifp);
5538	BCE_UNLOCK(sc);
5539
5540	DBEXIT(BCE_VERBOSE);
5541	return (0);
5542}
5543
5544
5545/****************************************************************************/
5546/* Set media options.                                                       */
5547/*                                                                          */
5548/* Returns:                                                                 */
5549/*   Nothing.                                                               */
5550/****************************************************************************/
5551static void
5552bce_ifmedia_upd_locked(struct ifnet *ifp)
5553{
5554	struct bce_softc *sc = ifp->if_softc;
5555	struct mii_data *mii;
5556
5557	DBENTER(BCE_VERBOSE);
5558
5559	BCE_LOCK_ASSERT(sc);
5560
5561	mii = device_get_softc(sc->bce_miibus);
5562
5563	/* Make sure the MII bus has been enumerated. */
5564	if (mii) {
5565		sc->bce_link = 0;
5566		if (mii->mii_instance) {
5567			struct mii_softc *miisc;
5568
5569			LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
5570				mii_phy_reset(miisc);
5571		}
5572		mii_mediachg(mii);
5573	}
5574
5575	DBEXIT(BCE_VERBOSE);
5576}
5577
5578
5579/****************************************************************************/
5580/* Reports current media status.                                            */
5581/*                                                                          */
5582/* Returns:                                                                 */
5583/*   Nothing.                                                               */
5584/****************************************************************************/
5585static void
5586bce_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
5587{
5588	struct bce_softc *sc = ifp->if_softc;
5589	struct mii_data *mii;
5590
5591	DBENTER(BCE_VERBOSE);
5592
5593	BCE_LOCK(sc);
5594
5595	mii = device_get_softc(sc->bce_miibus);
5596
5597	mii_pollstat(mii);
5598	ifmr->ifm_active = mii->mii_media_active;
5599	ifmr->ifm_status = mii->mii_media_status;
5600
5601	BCE_UNLOCK(sc);
5602
5603	DBEXIT(BCE_VERBOSE);
5604}
5605
5606
5607/****************************************************************************/
5608/* Handles PHY generated interrupt events.                                  */
5609/*                                                                          */
5610/* Returns:                                                                 */
5611/*   Nothing.                                                               */
5612/****************************************************************************/
5613static void
5614bce_phy_intr(struct bce_softc *sc)
5615{
5616	u32 new_link_state, old_link_state;
5617
5618	DBENTER(BCE_VERBOSE_PHY | BCE_VERBOSE_INTR);
5619
5620	new_link_state = sc->status_block->status_attn_bits &
5621		STATUS_ATTN_BITS_LINK_STATE;
5622	old_link_state = sc->status_block->status_attn_bits_ack &
5623		STATUS_ATTN_BITS_LINK_STATE;
5624
5625	/* Handle any changes if the link state has changed. */
5626	if (new_link_state != old_link_state) {
5627
5628		/* Update the status_attn_bits_ack field in the status block. */
5629		if (new_link_state) {
5630			REG_WR(sc, BCE_PCICFG_STATUS_BIT_SET_CMD,
5631				STATUS_ATTN_BITS_LINK_STATE);
5632			DBPRINT(sc, BCE_INFO_PHY, "%s(): Link is now UP.\n",
5633				__FUNCTION__);
5634		}
5635		else {
5636			REG_WR(sc, BCE_PCICFG_STATUS_BIT_CLEAR_CMD,
5637				STATUS_ATTN_BITS_LINK_STATE);
5638			DBPRINT(sc, BCE_INFO_PHY, "%s(): Link is now DOWN.\n",
5639				__FUNCTION__);
5640		}
5641
5642		/*
5643		 * Assume link is down and allow
5644		 * tick routine to update the state
5645		 * based on the actual media state.
5646		 */
5647		sc->bce_link = 0;
5648		callout_stop(&sc->bce_tick_callout);
5649		bce_tick(sc);
5650	}
5651
5652	/* Acknowledge the link change interrupt. */
5653	REG_WR(sc, BCE_EMAC_STATUS, BCE_EMAC_STATUS_LINK_CHANGE);
5654
5655	DBEXIT(BCE_VERBOSE_PHY | BCE_VERBOSE_INTR);
5656}
5657
5658
5659/****************************************************************************/
5660/* Reads the receive consumer value from the status block (skipping over    */
5661/* chain page pointer if necessary).                                        */
5662/*                                                                          */
5663/* Returns:                                                                 */
5664/*   hw_cons                                                                */
5665/****************************************************************************/
5666static inline u16
5667bce_get_hw_rx_cons(struct bce_softc *sc)
5668{
5669	u16 hw_cons;
5670
5671	rmb();
5672	hw_cons = sc->status_block->status_rx_quick_consumer_index0;
5673	if ((hw_cons & USABLE_RX_BD_PER_PAGE) == USABLE_RX_BD_PER_PAGE)
5674		hw_cons++;
5675
5676	return hw_cons;
5677}
5678
5679/****************************************************************************/
5680/* Handles received frame interrupt events.                                 */
5681/*                                                                          */
5682/* Returns:                                                                 */
5683/*   Nothing.                                                               */
5684/****************************************************************************/
5685static void
5686bce_rx_intr(struct bce_softc *sc)
5687{
5688	struct ifnet *ifp = sc->bce_ifp;
5689	struct l2_fhdr *l2fhdr;
5690	unsigned int pkt_len;
5691	u16 sw_rx_cons, sw_rx_cons_idx, hw_rx_cons;
5692	u32 status;
5693#ifdef BCE_USE_SPLIT_HEADER
5694	unsigned int rem_len;
5695	u16 sw_pg_cons, sw_pg_cons_idx;
5696#endif
5697
5698	DBENTER(BCE_VERBOSE_RECV | BCE_VERBOSE_INTR);
5699	DBRUN(sc->rx_interrupts++);
5700	DBPRINT(sc, BCE_EXTREME_RECV, "%s(enter): rx_prod = 0x%04X, "
5701		"rx_cons = 0x%04X, rx_prod_bseq = 0x%08X\n",
5702		__FUNCTION__, sc->rx_prod, sc->rx_cons, sc->rx_prod_bseq);
5703
5704	/* Prepare the RX chain pages to be accessed by the host CPU. */
5705	for (int i = 0; i < RX_PAGES; i++)
5706		bus_dmamap_sync(sc->rx_bd_chain_tag,
5707		    sc->rx_bd_chain_map[i], BUS_DMASYNC_POSTWRITE);
5708
5709#ifdef BCE_USE_SPLIT_HEADER
5710	/* Prepare the page chain pages to be accessed by the host CPU. */
5711	for (int i = 0; i < PG_PAGES; i++)
5712		bus_dmamap_sync(sc->pg_bd_chain_tag,
5713		    sc->pg_bd_chain_map[i], BUS_DMASYNC_POSTWRITE);
5714#endif
5715
5716	/* Get the hardware's view of the RX consumer index. */
5717	hw_rx_cons = sc->hw_rx_cons = bce_get_hw_rx_cons(sc);
5718
5719	/* Get working copies of the driver's view of the consumer indices. */
5720	sw_rx_cons = sc->rx_cons;
5721#ifdef BCE_USE_SPLIT_HEADER
5722	sw_pg_cons = sc->pg_cons;
5723#endif
5724
5725	/* Update some debug statistics counters */
5726	DBRUNIF((sc->free_rx_bd < sc->rx_low_watermark),
5727		sc->rx_low_watermark = sc->free_rx_bd);
5728	DBRUNIF((sc->free_rx_bd == sc->max_rx_bd), sc->rx_empty_count++);
5729
5730	/* Scan through the receive chain as long as there is work to do */
5731	/* ToDo: Consider setting a limit on the number of packets processed. */
5732	rmb();
5733	while (sw_rx_cons != hw_rx_cons) {
5734		struct mbuf *m0;
5735
5736		/* Convert the producer/consumer indices to an actual rx_bd index. */
5737		sw_rx_cons_idx = RX_CHAIN_IDX(sw_rx_cons);
5738
5739		/* Unmap the mbuf from DMA space. */
5740		bus_dmamap_sync(sc->rx_mbuf_tag,
5741		    sc->rx_mbuf_map[sw_rx_cons_idx],
5742	    	BUS_DMASYNC_POSTREAD);
5743		bus_dmamap_unload(sc->rx_mbuf_tag,
5744		    sc->rx_mbuf_map[sw_rx_cons_idx]);
5745
5746		/* Remove the mbuf from the RX chain. */
5747		m0 = sc->rx_mbuf_ptr[sw_rx_cons_idx];
5748		sc->rx_mbuf_ptr[sw_rx_cons_idx] = NULL;
5749		DBRUN(sc->debug_rx_mbuf_alloc--);
5750		sc->free_rx_bd++;
5751
5752		/*
5753		 * Frames received on the NetXteme II are prepended
5754		 * with an l2_fhdr structure which provides status
5755		 * information about the received frame (including
5756		 * VLAN tags and checksum info).  The frames are also
5757		 * automatically adjusted to align the IP header
5758		 * (i.e. two null bytes are inserted before the
5759		 * Ethernet header).  As a result the data DMA'd by
5760		 * the controller into the mbuf is as follows:
5761		 * +---------+-----+---------------------+-----+
5762		 * | l2_fhdr | pad | packet data         | FCS |
5763		 * +---------+-----+---------------------+-----+
5764		 * The l2_fhdr needs to be checked and skipped and
5765		 * the FCS needs to be stripped before sending the
5766		 * packet up the stack.
5767		 */
5768		l2fhdr  = mtod(m0, struct l2_fhdr *);
5769
5770		/* Get the packet data + FCS length and the status. */
5771		pkt_len = l2fhdr->l2_fhdr_pkt_len;
5772		status  = l2fhdr->l2_fhdr_status;
5773
5774		/*
5775		 * Skip over the l2_fhdr and pad, resulting in the
5776		 * following data in the mbuf:
5777		 * +---------------------+-----+
5778		 * | packet data         | FCS |
5779		 * +---------------------+-----+
5780		 */
5781		m_adj(m0, sizeof(struct l2_fhdr) + ETHER_ALIGN);
5782
5783#ifdef BCE_USE_SPLIT_HEADER
5784		/*
5785		 * Check whether the received frame fits in a single
5786		 * mbuf or not (i.e. packet data + FCS <=
5787		 * sc->rx_bd_mbuf_data_len bytes).
5788		 */
5789		if (pkt_len > m0->m_len) {
5790			/*
5791			 * The received frame is larger than a single mbuf.
5792			 * If the frame was a TCP frame then only the TCP
5793			 * header is placed in the mbuf, the remaining
5794			 * payload (including FCS) is placed in the page
5795			 * chain, the SPLIT flag is set, and the header
5796			 * length is placed in the IP checksum field.
5797			 * If the frame is not a TCP frame then the mbuf
5798			 * is filled and the remaining bytes are placed
5799			 * in the page chain.
5800			 */
5801
5802			DBPRINT(sc, BCE_INFO_RECV, "%s(): Found a large packet.\n",
5803				__FUNCTION__);
5804
5805			/*
5806			 * When the page chain is enabled and the TCP
5807			 * header has been split from the TCP payload,
5808			 * the ip_xsum structure will reflect the length
5809			 * of the TCP header, not the IP checksum.  Set
5810			 * the packet length of the mbuf accordingly.
5811			 */
5812		 	if (status & L2_FHDR_STATUS_SPLIT)
5813				m0->m_len = l2fhdr->l2_fhdr_ip_xsum;
5814
5815			rem_len = pkt_len - m0->m_len;
5816
5817			/* Pull mbufs off the page chain for the remaining data. */
5818			while (rem_len > 0) {
5819				struct mbuf *m_pg;
5820
5821				sw_pg_cons_idx = PG_CHAIN_IDX(sw_pg_cons);
5822
5823				/* Remove the mbuf from the page chain. */
5824				m_pg = sc->pg_mbuf_ptr[sw_pg_cons_idx];
5825				sc->pg_mbuf_ptr[sw_pg_cons_idx] = NULL;
5826				DBRUN(sc->debug_pg_mbuf_alloc--);
5827				sc->free_pg_bd++;
5828
5829				/* Unmap the page chain mbuf from DMA space. */
5830				bus_dmamap_sync(sc->pg_mbuf_tag,
5831					sc->pg_mbuf_map[sw_pg_cons_idx],
5832					BUS_DMASYNC_POSTREAD);
5833				bus_dmamap_unload(sc->pg_mbuf_tag,
5834					sc->pg_mbuf_map[sw_pg_cons_idx]);
5835
5836				/* Adjust the mbuf length. */
5837				if (rem_len < m_pg->m_len) {
5838					/* The mbuf chain is complete. */
5839					m_pg->m_len = rem_len;
5840					rem_len = 0;
5841				} else {
5842					/* More packet data is waiting. */
5843					rem_len -= m_pg->m_len;
5844				}
5845
5846				/* Concatenate the mbuf cluster to the mbuf. */
5847				m_cat(m0, m_pg);
5848
5849				sw_pg_cons = NEXT_PG_BD(sw_pg_cons);
5850			}
5851
5852			/* Set the total packet length. */
5853			m0->m_pkthdr.len = pkt_len;
5854
5855		} else {
5856			/*
5857			 * The received packet is small and fits in a
5858			 * single mbuf (i.e. the l2_fhdr + pad + packet +
5859			 * FCS <= MHLEN).  In other words, the packet is
5860			 * 154 bytes or less in size.
5861			 */
5862
5863			DBPRINT(sc, BCE_INFO_RECV, "%s(): Found a small packet.\n",
5864				__FUNCTION__);
5865
5866			/* Set the total packet length. */
5867			m0->m_pkthdr.len = m0->m_len = pkt_len;
5868		}
5869#endif
5870
5871		/* Remove the trailing Ethernet FCS. */
5872		m_adj(m0, -ETHER_CRC_LEN);
5873
5874		/* Check that the resulting mbuf chain is valid. */
5875		DBRUN(m_sanity(m0, FALSE));
5876		DBRUNIF(((m0->m_len < ETHER_HDR_LEN) |
5877			(m0->m_pkthdr.len > BCE_MAX_JUMBO_ETHER_MTU_VLAN)),
5878			BCE_PRINTF("Invalid Ethernet frame size!\n");
5879			m_print(m0, 128));
5880
5881		DBRUNIF(DB_RANDOMTRUE(bce_debug_l2fhdr_status_check),
5882			BCE_PRINTF("Simulating l2_fhdr status error.\n");
5883			status = status | L2_FHDR_ERRORS_PHY_DECODE);
5884
5885		/* Check the received frame for errors. */
5886		if (status & (L2_FHDR_ERRORS_BAD_CRC |
5887			L2_FHDR_ERRORS_PHY_DECODE | L2_FHDR_ERRORS_ALIGNMENT |
5888			L2_FHDR_ERRORS_TOO_SHORT  | L2_FHDR_ERRORS_GIANT_FRAME)) {
5889
5890			/* Log the error and release the mbuf. */
5891			ifp->if_ierrors++;
5892			DBRUN(sc->l2fhdr_status_errors++);
5893
5894			m_freem(m0);
5895			m0 = NULL;
5896			goto bce_rx_int_next_rx;
5897		}
5898
5899		/* Send the packet to the appropriate interface. */
5900		m0->m_pkthdr.rcvif = ifp;
5901
5902		/* Assume no hardware checksum. */
5903		m0->m_pkthdr.csum_flags = 0;
5904
5905		/* Validate the checksum if offload enabled. */
5906		if (ifp->if_capenable & IFCAP_RXCSUM) {
5907
5908			/* Check for an IP datagram. */
5909		 	if (!(status & L2_FHDR_STATUS_SPLIT) &&
5910				(status & L2_FHDR_STATUS_IP_DATAGRAM)) {
5911				m0->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
5912
5913				/* Check if the IP checksum is valid. */
5914				if ((l2fhdr->l2_fhdr_ip_xsum ^ 0xffff) == 0)
5915					m0->m_pkthdr.csum_flags |= CSUM_IP_VALID;
5916			}
5917
5918			/* Check for a valid TCP/UDP frame. */
5919			if (status & (L2_FHDR_STATUS_TCP_SEGMENT |
5920				L2_FHDR_STATUS_UDP_DATAGRAM)) {
5921
5922				/* Check for a good TCP/UDP checksum. */
5923				if ((status & (L2_FHDR_ERRORS_TCP_XSUM |
5924					      L2_FHDR_ERRORS_UDP_XSUM)) == 0) {
5925					m0->m_pkthdr.csum_data =
5926					    l2fhdr->l2_fhdr_tcp_udp_xsum;
5927					m0->m_pkthdr.csum_flags |= (CSUM_DATA_VALID
5928						| CSUM_PSEUDO_HDR);
5929				}
5930			}
5931		}
5932
5933		/*
5934		 * If we received a packet with a vlan tag,
5935		 * attach that information to the packet.
5936		 */
5937		if (status & L2_FHDR_STATUS_L2_VLAN_TAG) {
5938#if __FreeBSD_version < 700000
5939			VLAN_INPUT_TAG(ifp, m0, l2fhdr->l2_fhdr_vlan_tag, continue);
5940#else
5941			m0->m_pkthdr.ether_vtag = l2fhdr->l2_fhdr_vlan_tag;
5942			m0->m_flags |= M_VLANTAG;
5943#endif
5944		}
5945
5946		/* Pass the mbuf off to the upper layers. */
5947		ifp->if_ipackets++;
5948
5949bce_rx_int_next_rx:
5950		sw_rx_cons = NEXT_RX_BD(sw_rx_cons);
5951
5952		/* If we have a packet, pass it up the stack */
5953		if (m0) {
5954			/* Make sure we don't lose our place when we release the lock. */
5955			sc->rx_cons = sw_rx_cons;
5956#ifdef BCE_USE_SPLIT_HEADER
5957			sc->pg_cons = sw_pg_cons;
5958#endif
5959
5960			BCE_UNLOCK(sc);
5961			(*ifp->if_input)(ifp, m0);
5962			BCE_LOCK(sc);
5963
5964			/* Recover our place. */
5965			sw_rx_cons = sc->rx_cons;
5966#ifdef BCE_USE_SPLIT_HEADER
5967			sw_pg_cons = sc->pg_cons;
5968#endif
5969		}
5970
5971		/* Refresh hw_cons to see if there's new work */
5972		if (sw_rx_cons == hw_rx_cons)
5973			hw_rx_cons = sc->hw_rx_cons = bce_get_hw_rx_cons(sc);
5974	}
5975
5976	/* No new packets to process.  Refill the RX and page chains and exit. */
5977#ifdef BCE_USE_SPLIT_HEADER
5978	sc->pg_cons = sw_pg_cons;
5979	bce_fill_pg_chain(sc);
5980#endif
5981
5982	sc->rx_cons = sw_rx_cons;
5983	bce_fill_rx_chain(sc);
5984
5985	for (int i = 0; i < RX_PAGES; i++)
5986		bus_dmamap_sync(sc->rx_bd_chain_tag,
5987		    sc->rx_bd_chain_map[i], BUS_DMASYNC_PREWRITE);
5988
5989#ifdef BCE_USE_SPLIT_HEADER
5990	for (int i = 0; i < PG_PAGES; i++)
5991		bus_dmamap_sync(sc->pg_bd_chain_tag,
5992		    sc->pg_bd_chain_map[i], BUS_DMASYNC_PREWRITE);
5993#endif
5994
5995	DBPRINT(sc, BCE_EXTREME_RECV, "%s(exit): rx_prod = 0x%04X, "
5996		"rx_cons = 0x%04X, rx_prod_bseq = 0x%08X\n",
5997		__FUNCTION__, sc->rx_prod, sc->rx_cons, sc->rx_prod_bseq);
5998	DBEXIT(BCE_VERBOSE_RECV | BCE_VERBOSE_INTR);
5999}
6000
6001
6002/****************************************************************************/
6003/* Reads the transmit consumer value from the status block (skipping over   */
6004/* chain page pointer if necessary).                                        */
6005/*                                                                          */
6006/* Returns:                                                                 */
6007/*   hw_cons                                                                */
6008/****************************************************************************/
6009static inline u16
6010bce_get_hw_tx_cons(struct bce_softc *sc)
6011{
6012	u16 hw_cons;
6013
6014	mb();
6015	hw_cons = sc->status_block->status_tx_quick_consumer_index0;
6016	if ((hw_cons & USABLE_TX_BD_PER_PAGE) == USABLE_TX_BD_PER_PAGE)
6017		hw_cons++;
6018
6019	return hw_cons;
6020}
6021
6022
6023/****************************************************************************/
6024/* Handles transmit completion interrupt events.                            */
6025/*                                                                          */
6026/* Returns:                                                                 */
6027/*   Nothing.                                                               */
6028/****************************************************************************/
6029static void
6030bce_tx_intr(struct bce_softc *sc)
6031{
6032	struct ifnet *ifp = sc->bce_ifp;
6033	u16 hw_tx_cons, sw_tx_cons, sw_tx_chain_cons;
6034
6035	DBENTER(BCE_VERBOSE_SEND | BCE_VERBOSE_INTR);
6036	DBRUN(sc->tx_interrupts++);
6037	DBPRINT(sc, BCE_EXTREME_SEND, "%s(enter): tx_prod = 0x%04X, "
6038		"tx_cons = 0x%04X, tx_prod_bseq = 0x%08X\n",
6039		__FUNCTION__, sc->tx_prod, sc->tx_cons, sc->tx_prod_bseq);
6040
6041	BCE_LOCK_ASSERT(sc);
6042
6043	/* Get the hardware's view of the TX consumer index. */
6044	hw_tx_cons = sc->hw_tx_cons = bce_get_hw_tx_cons(sc);
6045	sw_tx_cons = sc->tx_cons;
6046
6047	/* Prevent speculative reads from getting ahead of the status block. */
6048	bus_space_barrier(sc->bce_btag, sc->bce_bhandle, 0, 0,
6049		BUS_SPACE_BARRIER_READ);
6050
6051	/* Cycle through any completed TX chain page entries. */
6052	while (sw_tx_cons != hw_tx_cons) {
6053#ifdef BCE_DEBUG
6054		struct tx_bd *txbd = NULL;
6055#endif
6056		sw_tx_chain_cons = TX_CHAIN_IDX(sw_tx_cons);
6057
6058		DBPRINT(sc, BCE_INFO_SEND,
6059			"%s(): hw_tx_cons = 0x%04X, sw_tx_cons = 0x%04X, "
6060			"sw_tx_chain_cons = 0x%04X\n",
6061			__FUNCTION__, hw_tx_cons, sw_tx_cons, sw_tx_chain_cons);
6062
6063		DBRUNIF((sw_tx_chain_cons > MAX_TX_BD),
6064			BCE_PRINTF("%s(%d): TX chain consumer out of range! "
6065				" 0x%04X > 0x%04X\n", __FILE__, __LINE__, sw_tx_chain_cons,
6066				(int) MAX_TX_BD);
6067			bce_breakpoint(sc));
6068
6069		DBRUN(txbd = &sc->tx_bd_chain[TX_PAGE(sw_tx_chain_cons)]
6070				[TX_IDX(sw_tx_chain_cons)]);
6071
6072		DBRUNIF((txbd == NULL),
6073			BCE_PRINTF("%s(%d): Unexpected NULL tx_bd[0x%04X]!\n",
6074				__FILE__, __LINE__, sw_tx_chain_cons);
6075			bce_breakpoint(sc));
6076
6077		DBRUNMSG(BCE_INFO_SEND, BCE_PRINTF("%s(): ", __FUNCTION__);
6078			bce_dump_txbd(sc, sw_tx_chain_cons, txbd));
6079
6080		/*
6081		 * Free the associated mbuf. Remember
6082		 * that only the last tx_bd of a packet
6083		 * has an mbuf pointer and DMA map.
6084		 */
6085		if (sc->tx_mbuf_ptr[sw_tx_chain_cons] != NULL) {
6086
6087			/* Validate that this is the last tx_bd. */
6088			DBRUNIF((!(txbd->tx_bd_flags & TX_BD_FLAGS_END)),
6089				BCE_PRINTF("%s(%d): tx_bd END flag not set but "
6090				"txmbuf == NULL!\n", __FILE__, __LINE__);
6091				bce_breakpoint(sc));
6092
6093			DBRUNMSG(BCE_INFO_SEND,
6094				BCE_PRINTF("%s(): Unloading map/freeing mbuf "
6095					"from tx_bd[0x%04X]\n", __FUNCTION__, sw_tx_chain_cons));
6096
6097			/* Unmap the mbuf. */
6098			bus_dmamap_unload(sc->tx_mbuf_tag,
6099			    sc->tx_mbuf_map[sw_tx_chain_cons]);
6100
6101			/* Free the mbuf. */
6102			m_freem(sc->tx_mbuf_ptr[sw_tx_chain_cons]);
6103			sc->tx_mbuf_ptr[sw_tx_chain_cons] = NULL;
6104			DBRUN(sc->debug_tx_mbuf_alloc--);
6105
6106			ifp->if_opackets++;
6107		}
6108
6109		sc->used_tx_bd--;
6110		sw_tx_cons = NEXT_TX_BD(sw_tx_cons);
6111
6112		/* Refresh hw_cons to see if there's new work. */
6113		hw_tx_cons = sc->hw_tx_cons = bce_get_hw_tx_cons(sc);
6114
6115		/* Prevent speculative reads from getting ahead of the status block. */
6116		bus_space_barrier(sc->bce_btag, sc->bce_bhandle, 0, 0,
6117			BUS_SPACE_BARRIER_READ);
6118	}
6119
6120	/* Clear the TX timeout timer. */
6121	sc->watchdog_timer = 0;
6122
6123	/* Clear the tx hardware queue full flag. */
6124	if (sc->used_tx_bd < sc->max_tx_bd) {
6125		DBRUNIF((ifp->if_drv_flags & IFF_DRV_OACTIVE),
6126			DBPRINT(sc, BCE_INFO_SEND,
6127				"%s(): Open TX chain! %d/%d (used/total)\n",
6128				__FUNCTION__, sc->used_tx_bd, sc->max_tx_bd));
6129		ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
6130	}
6131
6132	sc->tx_cons = sw_tx_cons;
6133
6134	DBPRINT(sc, BCE_EXTREME_SEND, "%s(exit): tx_prod = 0x%04X, "
6135		"tx_cons = 0x%04X, tx_prod_bseq = 0x%08X\n",
6136		__FUNCTION__, sc->tx_prod, sc->tx_cons, sc->tx_prod_bseq);
6137	DBEXIT(BCE_VERBOSE_SEND | BCE_VERBOSE_INTR);
6138}
6139
6140
6141/****************************************************************************/
6142/* Disables interrupt generation.                                           */
6143/*                                                                          */
6144/* Returns:                                                                 */
6145/*   Nothing.                                                               */
6146/****************************************************************************/
6147static void
6148bce_disable_intr(struct bce_softc *sc)
6149{
6150	DBENTER(BCE_VERBOSE_INTR);
6151
6152	REG_WR(sc, BCE_PCICFG_INT_ACK_CMD, BCE_PCICFG_INT_ACK_CMD_MASK_INT);
6153	REG_RD(sc, BCE_PCICFG_INT_ACK_CMD);
6154
6155	DBEXIT(BCE_VERBOSE_INTR);
6156}
6157
6158
6159/****************************************************************************/
6160/* Enables interrupt generation.                                            */
6161/*                                                                          */
6162/* Returns:                                                                 */
6163/*   Nothing.                                                               */
6164/****************************************************************************/
6165static void
6166bce_enable_intr(struct bce_softc *sc, int coal_now)
6167{
6168	DBENTER(BCE_VERBOSE_INTR);
6169
6170	REG_WR(sc, BCE_PCICFG_INT_ACK_CMD,
6171	       BCE_PCICFG_INT_ACK_CMD_INDEX_VALID |
6172	       BCE_PCICFG_INT_ACK_CMD_MASK_INT | sc->last_status_idx);
6173
6174	REG_WR(sc, BCE_PCICFG_INT_ACK_CMD,
6175	       BCE_PCICFG_INT_ACK_CMD_INDEX_VALID | sc->last_status_idx);
6176
6177	/* Force an immediate interrupt (whether there is new data or not). */
6178	if (coal_now)
6179		REG_WR(sc, BCE_HC_COMMAND, sc->hc_command | BCE_HC_COMMAND_COAL_NOW);
6180
6181	DBEXIT(BCE_VERBOSE_INTR);
6182}
6183
6184
6185/****************************************************************************/
6186/* Handles controller initialization.                                       */
6187/*                                                                          */
6188/* Returns:                                                                 */
6189/*   Nothing.                                                               */
6190/****************************************************************************/
6191static void
6192bce_init_locked(struct bce_softc *sc)
6193{
6194	struct ifnet *ifp;
6195	u32 ether_mtu = 0;
6196
6197	DBENTER(BCE_VERBOSE_RESET);
6198
6199	BCE_LOCK_ASSERT(sc);
6200
6201	ifp = sc->bce_ifp;
6202
6203	/* Check if the driver is still running and bail out if it is. */
6204	if (ifp->if_drv_flags & IFF_DRV_RUNNING)
6205		goto bce_init_locked_exit;
6206
6207	bce_stop(sc);
6208
6209	if (bce_reset(sc, BCE_DRV_MSG_CODE_RESET)) {
6210		BCE_PRINTF("%s(%d): Controller reset failed!\n",
6211			__FILE__, __LINE__);
6212		goto bce_init_locked_exit;
6213	}
6214
6215	if (bce_chipinit(sc)) {
6216		BCE_PRINTF("%s(%d): Controller initialization failed!\n",
6217			__FILE__, __LINE__);
6218		goto bce_init_locked_exit;
6219	}
6220
6221	if (bce_blockinit(sc)) {
6222		BCE_PRINTF("%s(%d): Block initialization failed!\n",
6223			__FILE__, __LINE__);
6224		goto bce_init_locked_exit;
6225	}
6226
6227	/* Load our MAC address. */
6228	bcopy(IF_LLADDR(sc->bce_ifp), sc->eaddr, ETHER_ADDR_LEN);
6229	bce_set_mac_addr(sc);
6230
6231	/*
6232	 * Calculate and program the hardware Ethernet MTU
6233	 * size. Be generous on the receive if we have room.
6234	 */
6235#ifdef BCE_USE_SPLIT_HEADER
6236	if (ifp->if_mtu <= (sc->rx_bd_mbuf_data_len + sc->pg_bd_mbuf_alloc_size))
6237		ether_mtu = sc->rx_bd_mbuf_data_len + sc->pg_bd_mbuf_alloc_size;
6238#else
6239	if (ifp->if_mtu <= sc->rx_bd_mbuf_data_len)
6240		ether_mtu = sc->rx_bd_mbuf_data_len;
6241#endif
6242	else
6243		ether_mtu = ifp->if_mtu;
6244
6245	ether_mtu += ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN + ETHER_CRC_LEN;
6246
6247	DBPRINT(sc, BCE_INFO_MISC, "%s(): setting h/w mtu = %d\n", __FUNCTION__,
6248		ether_mtu);
6249
6250	/* Program the mtu, enabling jumbo frame support if necessary. */
6251	if (ether_mtu > (ETHER_MAX_LEN + ETHER_VLAN_ENCAP_LEN))
6252		REG_WR(sc, BCE_EMAC_RX_MTU_SIZE,
6253			min(ether_mtu, BCE_MAX_JUMBO_ETHER_MTU) |
6254			BCE_EMAC_RX_MTU_SIZE_JUMBO_ENA);
6255	else
6256		REG_WR(sc, BCE_EMAC_RX_MTU_SIZE, ether_mtu);
6257
6258	DBPRINT(sc, BCE_INFO_LOAD,
6259		"%s(): rx_bd_mbuf_alloc_size = %d, rx_bce_mbuf_data_len = %d, "
6260		"rx_bd_mbuf_align_pad = %d, pg_bd_mbuf_alloc_size = %d\n",
6261		__FUNCTION__, sc->rx_bd_mbuf_alloc_size, sc->rx_bd_mbuf_data_len,
6262		sc->rx_bd_mbuf_align_pad, sc->pg_bd_mbuf_alloc_size);
6263
6264	/* Program appropriate promiscuous/multicast filtering. */
6265	bce_set_rx_mode(sc);
6266
6267#ifdef BCE_USE_SPLIT_HEADER
6268	/* Init page buffer descriptor chain. */
6269	bce_init_pg_chain(sc);
6270#endif
6271
6272	/* Init RX buffer descriptor chain. */
6273	bce_init_rx_chain(sc);
6274
6275	/* Init TX buffer descriptor chain. */
6276	bce_init_tx_chain(sc);
6277
6278	/* Enable host interrupts. */
6279	bce_enable_intr(sc, 1);
6280
6281	bce_ifmedia_upd_locked(ifp);
6282
6283	ifp->if_drv_flags |= IFF_DRV_RUNNING;
6284	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
6285
6286	callout_reset(&sc->bce_tick_callout, hz, bce_tick, sc);
6287
6288bce_init_locked_exit:
6289	DBEXIT(BCE_VERBOSE_RESET);
6290}
6291
6292
6293/****************************************************************************/
6294/* Initialize the controller just enough so that any management firmware    */
6295/* running on the device will continue to operate correctly.                */
6296/*                                                                          */
6297/* Returns:                                                                 */
6298/*   Nothing.                                                               */
6299/****************************************************************************/
6300static void
6301bce_mgmt_init_locked(struct bce_softc *sc)
6302{
6303	struct ifnet *ifp;
6304
6305	DBENTER(BCE_VERBOSE_RESET);
6306
6307	BCE_LOCK_ASSERT(sc);
6308
6309	/* Bail out if management firmware is not running. */
6310	if (!(sc->bce_flags & BCE_MFW_ENABLE_FLAG)) {
6311		DBPRINT(sc, BCE_VERBOSE_SPECIAL,
6312			"No management firmware running...\n");
6313		goto bce_mgmt_init_locked_exit;
6314	}
6315
6316	ifp = sc->bce_ifp;
6317
6318	/* Enable all critical blocks in the MAC. */
6319	REG_WR(sc, BCE_MISC_ENABLE_SET_BITS, BCE_MISC_ENABLE_DEFAULT);
6320	REG_RD(sc, BCE_MISC_ENABLE_SET_BITS);
6321	DELAY(20);
6322
6323	bce_ifmedia_upd_locked(ifp);
6324
6325bce_mgmt_init_locked_exit:
6326	DBEXIT(BCE_VERBOSE_RESET);
6327}
6328
6329
6330/****************************************************************************/
6331/* Handles controller initialization when called from an unlocked routine.  */
6332/*                                                                          */
6333/* Returns:                                                                 */
6334/*   Nothing.                                                               */
6335/****************************************************************************/
6336static void
6337bce_init(void *xsc)
6338{
6339	struct bce_softc *sc = xsc;
6340
6341	DBENTER(BCE_VERBOSE_RESET);
6342
6343	BCE_LOCK(sc);
6344	bce_init_locked(sc);
6345	BCE_UNLOCK(sc);
6346
6347	DBEXIT(BCE_VERBOSE_RESET);
6348}
6349
6350
6351/****************************************************************************/
6352/* Encapsultes an mbuf cluster into the tx_bd chain structure and makes the */
6353/* memory visible to the controller.                                        */
6354/*                                                                          */
6355/* Returns:                                                                 */
6356/*   0 for success, positive value for failure.                             */
6357/* Modified:                                                                */
6358/*   m_head: May be set to NULL if MBUF is excessively fragmented.          */
6359/****************************************************************************/
6360static int
6361bce_tx_encap(struct bce_softc *sc, struct mbuf **m_head)
6362{
6363	bus_dma_segment_t segs[BCE_MAX_SEGMENTS];
6364	bus_dmamap_t map;
6365	struct tx_bd *txbd = NULL;
6366	struct mbuf *m0;
6367	struct ether_vlan_header *eh;
6368	struct ip *ip;
6369	struct tcphdr *th;
6370	u16 prod, chain_prod, etype, mss = 0, vlan_tag = 0, flags = 0;
6371	u32 prod_bseq;
6372	int hdr_len = 0, e_hlen = 0, ip_hlen = 0, tcp_hlen = 0, ip_len = 0;
6373
6374#ifdef BCE_DEBUG
6375	u16 debug_prod;
6376#endif
6377	int i, error, nsegs, rc = 0;
6378
6379	DBENTER(BCE_VERBOSE_SEND);
6380	DBPRINT(sc, BCE_INFO_SEND,
6381		"%s(enter): tx_prod = 0x%04X, tx_chain_prod = %04X, "
6382		"tx_prod_bseq = 0x%08X\n",
6383		__FUNCTION__, sc->tx_prod, (u16) TX_CHAIN_IDX(sc->tx_prod),
6384		sc->tx_prod_bseq);
6385
6386	/* Transfer any checksum offload flags to the bd. */
6387	m0 = *m_head;
6388	if (m0->m_pkthdr.csum_flags) {
6389		if (m0->m_pkthdr.csum_flags & CSUM_IP)
6390			flags |= TX_BD_FLAGS_IP_CKSUM;
6391		if (m0->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP))
6392			flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
6393		if (m0->m_pkthdr.csum_flags & CSUM_TSO) {
6394			/* For TSO the controller needs two pieces of info, */
6395			/* the MSS and the IP+TCP options length.           */
6396			mss = htole16(m0->m_pkthdr.tso_segsz);
6397
6398			/* Map the header and find the Ethernet type & header length */
6399			eh = mtod(m0, struct ether_vlan_header *);
6400			if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
6401				etype = ntohs(eh->evl_proto);
6402				e_hlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
6403			} else {
6404				etype = ntohs(eh->evl_encap_proto);
6405				e_hlen = ETHER_HDR_LEN;
6406			}
6407
6408			/* Check for supported TSO Ethernet types (only IPv4 for now) */
6409			switch (etype) {
6410				case ETHERTYPE_IP:
6411					ip = (struct ip *)(m0->m_data + e_hlen);
6412
6413					/* TSO only supported for TCP protocol */
6414					if (ip->ip_p != IPPROTO_TCP) {
6415						BCE_PRINTF("%s(%d): TSO enabled for non-TCP frame!.\n",
6416							__FILE__, __LINE__);
6417						goto bce_tx_encap_skip_tso;
6418					}
6419
6420					/* Get IP header length in bytes (min 20) */
6421					ip_hlen = ip->ip_hl << 2;
6422
6423					/* Get the TCP header length in bytes (min 20) */
6424					th = (struct tcphdr *)((caddr_t)ip + ip_hlen);
6425					tcp_hlen = (th->th_off << 2);
6426
6427					/* IP header length and checksum will be calc'd by hardware */
6428					ip_len = ip->ip_len;
6429					ip->ip_len = 0;
6430					ip->ip_sum = 0;
6431					break;
6432				case ETHERTYPE_IPV6:
6433					BCE_PRINTF("%s(%d): TSO over IPv6 not supported!.\n",
6434						__FILE__, __LINE__);
6435					goto bce_tx_encap_skip_tso;
6436				default:
6437					BCE_PRINTF("%s(%d): TSO enabled for unsupported protocol!.\n",
6438						__FILE__, __LINE__);
6439					goto bce_tx_encap_skip_tso;
6440			}
6441
6442			hdr_len = e_hlen + ip_hlen + tcp_hlen;
6443
6444			DBPRINT(sc, BCE_EXTREME_SEND,
6445				"%s(): hdr_len = %d, e_hlen = %d, ip_hlen = %d, tcp_hlen = %d, ip_len = %d\n",
6446				 __FUNCTION__, hdr_len, e_hlen, ip_hlen, tcp_hlen, ip_len);
6447
6448			/* Set the LSO flag in the TX BD */
6449			flags |= TX_BD_FLAGS_SW_LSO;
6450			/* Set the length of IP + TCP options (in 32 bit words) */
6451			flags |= (((ip_hlen + tcp_hlen - 40) >> 2) << 8);
6452
6453bce_tx_encap_skip_tso:
6454			DBRUN(sc->requested_tso_frames++);
6455		}
6456	}
6457
6458	/* Transfer any VLAN tags to the bd. */
6459	if (m0->m_flags & M_VLANTAG) {
6460		flags |= TX_BD_FLAGS_VLAN_TAG;
6461		vlan_tag = m0->m_pkthdr.ether_vtag;
6462	}
6463
6464	/* Map the mbuf into DMAable memory. */
6465	prod = sc->tx_prod;
6466	chain_prod = TX_CHAIN_IDX(prod);
6467	map = sc->tx_mbuf_map[chain_prod];
6468
6469	/* Map the mbuf into our DMA address space. */
6470	error = bus_dmamap_load_mbuf_sg(sc->tx_mbuf_tag, map, m0,
6471	    segs, &nsegs, BUS_DMA_NOWAIT);
6472
6473	/* Check if the DMA mapping was successful */
6474	if (error == EFBIG) {
6475
6476		/* The mbuf is too fragmented for our DMA mapping. */
6477   		DBPRINT(sc, BCE_WARN, "%s(): fragmented mbuf (%d pieces)\n",
6478			__FUNCTION__, nsegs);
6479		DBRUN(bce_dump_mbuf(sc, m0););
6480
6481		/* Try to defrag the mbuf. */
6482		m0 = m_defrag(*m_head, M_DONTWAIT);
6483		if (m0 == NULL) {
6484			/* Defrag was unsuccessful */
6485			m_freem(*m_head);
6486			*m_head = NULL;
6487			sc->mbuf_alloc_failed++;
6488			rc = ENOBUFS;
6489			goto bce_tx_encap_exit;
6490		}
6491
6492		/* Defrag was successful, try mapping again */
6493		*m_head = m0;
6494		error = bus_dmamap_load_mbuf_sg(sc->tx_mbuf_tag, map, m0,
6495		    segs, &nsegs, BUS_DMA_NOWAIT);
6496
6497		/* Still getting an error after a defrag. */
6498		if (error == ENOMEM) {
6499			/* Insufficient DMA buffers available. */
6500			sc->tx_dma_map_failures++;
6501			rc = error;
6502			goto bce_tx_encap_exit;
6503		} else if (error != 0) {
6504			/* Still can't map the mbuf, release it and return an error. */
6505			BCE_PRINTF(
6506			    "%s(%d): Unknown error mapping mbuf into TX chain!\n",
6507			    __FILE__, __LINE__);
6508			m_freem(m0);
6509			*m_head = NULL;
6510			sc->tx_dma_map_failures++;
6511			rc = ENOBUFS;
6512			goto bce_tx_encap_exit;
6513		}
6514	} else if (error == ENOMEM) {
6515		/* Insufficient DMA buffers available. */
6516		sc->tx_dma_map_failures++;
6517		rc = error;
6518		goto bce_tx_encap_exit;
6519	} else if (error != 0) {
6520		m_freem(m0);
6521		*m_head = NULL;
6522		sc->tx_dma_map_failures++;
6523		rc = error;
6524		goto bce_tx_encap_exit;
6525	}
6526
6527	/* Make sure there's room in the chain */
6528	if (nsegs > (sc->max_tx_bd - sc->used_tx_bd)) {
6529		bus_dmamap_unload(sc->tx_mbuf_tag, map);
6530		rc = ENOBUFS;
6531		goto bce_tx_encap_exit;
6532	}
6533
6534	/* prod points to an empty tx_bd at this point. */
6535	prod_bseq  = sc->tx_prod_bseq;
6536
6537#ifdef BCE_DEBUG
6538	debug_prod = chain_prod;
6539#endif
6540
6541	DBPRINT(sc, BCE_INFO_SEND,
6542		"%s(start): prod = 0x%04X, chain_prod = 0x%04X, "
6543		"prod_bseq = 0x%08X\n",
6544		__FUNCTION__, prod, chain_prod, prod_bseq);
6545
6546	/*
6547	 * Cycle through each mbuf segment that makes up
6548	 * the outgoing frame, gathering the mapping info
6549	 * for that segment and creating a tx_bd for
6550	 * the mbuf.
6551	 */
6552	for (i = 0; i < nsegs ; i++) {
6553
6554		chain_prod = TX_CHAIN_IDX(prod);
6555		txbd= &sc->tx_bd_chain[TX_PAGE(chain_prod)][TX_IDX(chain_prod)];
6556
6557		txbd->tx_bd_haddr_lo = htole32(BCE_ADDR_LO(segs[i].ds_addr));
6558		txbd->tx_bd_haddr_hi = htole32(BCE_ADDR_HI(segs[i].ds_addr));
6559		txbd->tx_bd_mss_nbytes = htole32(mss << 16) | htole16(segs[i].ds_len);
6560		txbd->tx_bd_vlan_tag = htole16(vlan_tag);
6561		txbd->tx_bd_flags = htole16(flags);
6562		prod_bseq += segs[i].ds_len;
6563		if (i == 0)
6564			txbd->tx_bd_flags |= htole16(TX_BD_FLAGS_START);
6565		prod = NEXT_TX_BD(prod);
6566	}
6567
6568	/* Set the END flag on the last TX buffer descriptor. */
6569	txbd->tx_bd_flags |= htole16(TX_BD_FLAGS_END);
6570
6571	DBRUNMSG(BCE_EXTREME_SEND, bce_dump_tx_chain(sc, debug_prod, nsegs));
6572
6573	DBPRINT(sc, BCE_INFO_SEND,
6574		"%s( end ): prod = 0x%04X, chain_prod = 0x%04X, "
6575		"prod_bseq = 0x%08X\n",
6576		__FUNCTION__, prod, chain_prod, prod_bseq);
6577
6578	/*
6579	 * Ensure that the mbuf pointer for this transmission
6580	 * is placed at the array index of the last
6581	 * descriptor in this chain.  This is done
6582	 * because a single map is used for all
6583	 * segments of the mbuf and we don't want to
6584	 * unload the map before all of the segments
6585	 * have been freed.
6586	 */
6587	sc->tx_mbuf_ptr[chain_prod] = m0;
6588	sc->used_tx_bd += nsegs;
6589
6590	/* Update some debug statistic counters */
6591	DBRUNIF((sc->used_tx_bd > sc->tx_hi_watermark),
6592		sc->tx_hi_watermark = sc->used_tx_bd);
6593	DBRUNIF((sc->used_tx_bd == sc->max_tx_bd), sc->tx_full_count++);
6594	DBRUNIF(sc->debug_tx_mbuf_alloc++);
6595
6596	DBRUNMSG(BCE_EXTREME_SEND, bce_dump_tx_mbuf_chain(sc, chain_prod, 1));
6597
6598	/* prod points to the next free tx_bd at this point. */
6599	sc->tx_prod = prod;
6600	sc->tx_prod_bseq = prod_bseq;
6601
6602	DBPRINT(sc, BCE_INFO_SEND,
6603		"%s(exit): prod = 0x%04X, chain_prod = %04X, "
6604		"prod_bseq = 0x%08X\n",
6605		__FUNCTION__, sc->tx_prod, (u16) TX_CHAIN_IDX(sc->tx_prod),
6606		sc->tx_prod_bseq);
6607
6608bce_tx_encap_exit:
6609	DBEXIT(BCE_VERBOSE_SEND);
6610	return(rc);
6611}
6612
6613
6614/****************************************************************************/
6615/* Main transmit routine when called from another routine with a lock.      */
6616/*                                                                          */
6617/* Returns:                                                                 */
6618/*   Nothing.                                                               */
6619/****************************************************************************/
6620static void
6621bce_start_locked(struct ifnet *ifp)
6622{
6623	struct bce_softc *sc = ifp->if_softc;
6624	struct mbuf *m_head = NULL;
6625	int count = 0;
6626	u16 tx_prod, tx_chain_prod;
6627
6628	DBENTER(BCE_VERBOSE_SEND | BCE_VERBOSE_CTX);
6629
6630	BCE_LOCK_ASSERT(sc);
6631
6632	/* prod points to the next free tx_bd. */
6633	tx_prod = sc->tx_prod;
6634	tx_chain_prod = TX_CHAIN_IDX(tx_prod);
6635
6636	DBPRINT(sc, BCE_INFO_SEND,
6637		"%s(enter): tx_prod = 0x%04X, tx_chain_prod = 0x%04X, "
6638		"tx_prod_bseq = 0x%08X\n",
6639		__FUNCTION__, tx_prod, tx_chain_prod, sc->tx_prod_bseq);
6640
6641	/* If there's no link or the transmit queue is empty then just exit. */
6642	if (!sc->bce_link) {
6643		DBPRINT(sc, BCE_INFO_SEND, "%s(): No link.\n",
6644			__FUNCTION__);
6645		goto bce_start_locked_exit;
6646	}
6647
6648	if (IFQ_DRV_IS_EMPTY(&ifp->if_snd)) {
6649		DBPRINT(sc, BCE_INFO_SEND, "%s(): Transmit queue empty.\n",
6650			__FUNCTION__);
6651		goto bce_start_locked_exit;
6652	}
6653
6654	/*
6655	 * Keep adding entries while there is space in the ring.
6656	 */
6657	while (sc->used_tx_bd < sc->max_tx_bd) {
6658
6659		/* Check for any frames to send. */
6660		IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
6661
6662		/* Stop when the transmit queue is empty. */
6663		if (m_head == NULL)
6664			break;
6665
6666		/*
6667		 * Pack the data into the transmit ring. If we
6668		 * don't have room, place the mbuf back at the
6669		 * head of the queue and set the OACTIVE flag
6670		 * to wait for the NIC to drain the chain.
6671		 */
6672		if (bce_tx_encap(sc, &m_head)) {
6673			/* No room, put the frame back on the transmit queue. */
6674			if (m_head != NULL)
6675				IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
6676			ifp->if_drv_flags |= IFF_DRV_OACTIVE;
6677			DBPRINT(sc, BCE_INFO_SEND,
6678				"TX chain is closed for business! Total tx_bd used = %d\n",
6679				sc->used_tx_bd);
6680			break;
6681		}
6682
6683		count++;
6684
6685		/* Send a copy of the frame to any BPF listeners. */
6686		ETHER_BPF_MTAP(ifp, m_head);
6687	}
6688
6689	/* Exit if no packets were dequeued. */
6690	if (count == 0) {
6691		DBPRINT(sc, BCE_VERBOSE_SEND, "%s(): No packets were dequeued\n",
6692			__FUNCTION__);
6693		goto bce_start_locked_exit;
6694	}
6695
6696	DBPRINT(sc, BCE_VERBOSE_SEND, "%s(): Inserted %d frames into send queue.\n",
6697		__FUNCTION__, count);
6698
6699	REG_WR(sc, BCE_MQ_COMMAND, REG_RD(sc, BCE_MQ_COMMAND) | BCE_MQ_COMMAND_NO_MAP_ERROR);
6700
6701	/* Write the mailbox and tell the chip about the waiting tx_bd's. */
6702	DBPRINT(sc, BCE_VERBOSE_SEND, "%s(): MB_GET_CID_ADDR(TX_CID) = 0x%08X; "
6703		"BCE_L2MQ_TX_HOST_BIDX = 0x%08X, sc->tx_prod = 0x%04X\n",
6704		__FUNCTION__,
6705		MB_GET_CID_ADDR(TX_CID), BCE_L2MQ_TX_HOST_BIDX, sc->tx_prod);
6706	REG_WR16(sc, MB_GET_CID_ADDR(TX_CID) + BCE_L2MQ_TX_HOST_BIDX, sc->tx_prod);
6707	DBPRINT(sc, BCE_VERBOSE_SEND, "%s(): MB_GET_CID_ADDR(TX_CID) = 0x%08X; "
6708		"BCE_L2MQ_TX_HOST_BSEQ = 0x%08X, sc->tx_prod_bseq = 0x%04X\n",
6709		__FUNCTION__,
6710		MB_GET_CID_ADDR(TX_CID), BCE_L2MQ_TX_HOST_BSEQ, sc->tx_prod_bseq);
6711	REG_WR(sc, MB_GET_CID_ADDR(TX_CID) + BCE_L2MQ_TX_HOST_BSEQ, sc->tx_prod_bseq);
6712
6713	/* Set the tx timeout. */
6714	sc->watchdog_timer = BCE_TX_TIMEOUT;
6715
6716	DBRUNMSG(BCE_VERBOSE_SEND, bce_dump_ctx(sc, TX_CID));
6717	DBRUNMSG(BCE_VERBOSE_SEND, bce_dump_mq_regs(sc));
6718
6719bce_start_locked_exit:
6720	DBEXIT(BCE_VERBOSE_SEND | BCE_VERBOSE_CTX);
6721	return;
6722}
6723
6724
6725/****************************************************************************/
6726/* Main transmit routine when called from another routine without a lock.   */
6727/*                                                                          */
6728/* Returns:                                                                 */
6729/*   Nothing.                                                               */
6730/****************************************************************************/
6731static void
6732bce_start(struct ifnet *ifp)
6733{
6734	struct bce_softc *sc = ifp->if_softc;
6735
6736	DBENTER(BCE_VERBOSE_SEND);
6737
6738	BCE_LOCK(sc);
6739	bce_start_locked(ifp);
6740	BCE_UNLOCK(sc);
6741
6742	DBEXIT(BCE_VERBOSE_SEND);
6743}
6744
6745
6746/****************************************************************************/
6747/* Handles any IOCTL calls from the operating system.                       */
6748/*                                                                          */
6749/* Returns:                                                                 */
6750/*   0 for success, positive value for failure.                             */
6751/****************************************************************************/
6752static int
6753bce_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
6754{
6755	struct bce_softc *sc = ifp->if_softc;
6756	struct ifreq *ifr = (struct ifreq *) data;
6757	struct mii_data *mii;
6758	int mask, error = 0;
6759
6760	DBENTER(BCE_VERBOSE_MISC);
6761
6762	switch(command) {
6763
6764		/* Set the interface MTU. */
6765		case SIOCSIFMTU:
6766			/* Check that the MTU setting is supported. */
6767			if ((ifr->ifr_mtu < BCE_MIN_MTU) ||
6768				(ifr->ifr_mtu > BCE_MAX_JUMBO_MTU)) {
6769				error = EINVAL;
6770				break;
6771			}
6772
6773			DBPRINT(sc, BCE_INFO_MISC,
6774				"SIOCSIFMTU: Changing MTU from %d to %d\n",
6775				(int) ifp->if_mtu, (int) ifr->ifr_mtu);
6776
6777			BCE_LOCK(sc);
6778			ifp->if_mtu = ifr->ifr_mtu;
6779			ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
6780#ifdef BCE_USE_SPLIT_HEADER
6781			/* No buffer allocation size changes are necessary. */
6782#else
6783			/* Recalculate our buffer allocation sizes. */
6784			if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN + ETHER_CRC_LEN) > MCLBYTES) {
6785				sc->rx_bd_mbuf_alloc_size = MJUM9BYTES;
6786				sc->rx_bd_mbuf_align_pad  = roundup2(MJUM9BYTES, 16) - MJUM9BYTES;
6787				sc->rx_bd_mbuf_data_len   = sc->rx_bd_mbuf_alloc_size -
6788					sc->rx_bd_mbuf_align_pad;
6789			} else {
6790				sc->rx_bd_mbuf_alloc_size = MCLBYTES;
6791				sc->rx_bd_mbuf_align_pad  = roundup2(MCLBYTES, 16) - MCLBYTES;
6792				sc->rx_bd_mbuf_data_len   = sc->rx_bd_mbuf_alloc_size -
6793					sc->rx_bd_mbuf_align_pad;
6794			}
6795#endif
6796
6797			bce_init_locked(sc);
6798			BCE_UNLOCK(sc);
6799			break;
6800
6801		/* Set interface flags. */
6802		case SIOCSIFFLAGS:
6803			DBPRINT(sc, BCE_VERBOSE_SPECIAL, "Received SIOCSIFFLAGS\n");
6804
6805			BCE_LOCK(sc);
6806
6807			/* Check if the interface is up. */
6808			if (ifp->if_flags & IFF_UP) {
6809				if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
6810					/* Change promiscuous/multicast flags as necessary. */
6811					bce_set_rx_mode(sc);
6812				} else {
6813					/* Start the HW */
6814					bce_init_locked(sc);
6815				}
6816			} else {
6817				/* The interface is down, check if driver is running. */
6818				if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
6819					bce_stop(sc);
6820
6821					/* If MFW is running, restart the controller a bit. */
6822					if (sc->bce_flags & BCE_MFW_ENABLE_FLAG) {
6823						bce_reset(sc, BCE_DRV_MSG_CODE_RESET);
6824						bce_chipinit(sc);
6825						bce_mgmt_init_locked(sc);
6826					}
6827				}
6828			}
6829
6830			BCE_UNLOCK(sc);
6831			error = 0;
6832
6833			break;
6834
6835		/* Add/Delete multicast address */
6836		case SIOCADDMULTI:
6837		case SIOCDELMULTI:
6838			DBPRINT(sc, BCE_VERBOSE_MISC, "Received SIOCADDMULTI/SIOCDELMULTI\n");
6839
6840			BCE_LOCK(sc);
6841			if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
6842				bce_set_rx_mode(sc);
6843				error = 0;
6844			}
6845			BCE_UNLOCK(sc);
6846
6847			break;
6848
6849		/* Set/Get Interface media */
6850		case SIOCSIFMEDIA:
6851		case SIOCGIFMEDIA:
6852			DBPRINT(sc, BCE_VERBOSE_MISC, "Received SIOCSIFMEDIA/SIOCGIFMEDIA\n");
6853
6854			mii = device_get_softc(sc->bce_miibus);
6855			error = ifmedia_ioctl(ifp, ifr,
6856			    &mii->mii_media, command);
6857			break;
6858
6859		/* Set interface capability */
6860		case SIOCSIFCAP:
6861			mask = ifr->ifr_reqcap ^ ifp->if_capenable;
6862			DBPRINT(sc, BCE_INFO_MISC, "Received SIOCSIFCAP = 0x%08X\n", (u32) mask);
6863
6864			/* Toggle the TX checksum capabilites enable flag. */
6865			if (mask & IFCAP_TXCSUM) {
6866				ifp->if_capenable ^= IFCAP_TXCSUM;
6867				if (IFCAP_TXCSUM & ifp->if_capenable)
6868					ifp->if_hwassist = BCE_IF_HWASSIST;
6869				else
6870					ifp->if_hwassist = 0;
6871			}
6872
6873			/* Toggle the RX checksum capabilities enable flag. */
6874			if (mask & IFCAP_RXCSUM) {
6875				ifp->if_capenable ^= IFCAP_RXCSUM;
6876				if (IFCAP_RXCSUM & ifp->if_capenable)
6877					ifp->if_hwassist = BCE_IF_HWASSIST;
6878				else
6879					ifp->if_hwassist = 0;
6880			}
6881
6882			/* Toggle the TSO capabilities enable flag. */
6883			if (bce_tso_enable && (mask & IFCAP_TSO4)) {
6884				ifp->if_capenable ^= IFCAP_TSO4;
6885				if (IFCAP_RXCSUM & ifp->if_capenable)
6886					ifp->if_hwassist = BCE_IF_HWASSIST;
6887				else
6888					ifp->if_hwassist = 0;
6889			}
6890
6891			/* Toggle VLAN_MTU capabilities enable flag. */
6892			if (mask & IFCAP_VLAN_MTU) {
6893				BCE_PRINTF("%s(%d): Changing VLAN_MTU not supported.\n",
6894					__FILE__, __LINE__);
6895			}
6896
6897			/* Toggle VLANHWTAG capabilities enabled flag. */
6898			if (mask & IFCAP_VLAN_HWTAGGING) {
6899				if (sc->bce_flags & BCE_MFW_ENABLE_FLAG)
6900					BCE_PRINTF("%s(%d): Cannot change VLAN_HWTAGGING while "
6901						"management firmware (ASF/IPMI/UMP) is running!\n",
6902						__FILE__, __LINE__);
6903				else
6904					BCE_PRINTF("%s(%d): Changing VLAN_HWTAGGING not supported!\n",
6905						__FILE__, __LINE__);
6906			}
6907
6908			break;
6909		default:
6910			/* We don't know how to handle the IOCTL, pass it on. */
6911			error = ether_ioctl(ifp, command, data);
6912			break;
6913	}
6914
6915	DBEXIT(BCE_VERBOSE_MISC);
6916	return(error);
6917}
6918
6919
6920/****************************************************************************/
6921/* Transmit timeout handler.                                                */
6922/*                                                                          */
6923/* Returns:                                                                 */
6924/*   Nothing.                                                               */
6925/****************************************************************************/
6926static void
6927bce_watchdog(struct bce_softc *sc)
6928{
6929	DBENTER(BCE_EXTREME_SEND);
6930
6931	BCE_LOCK_ASSERT(sc);
6932
6933	/* If the watchdog timer hasn't expired then just exit. */
6934	if (sc->watchdog_timer == 0 || --sc->watchdog_timer)
6935		goto bce_watchdog_exit;
6936
6937	/* If pause frames are active then don't reset the hardware. */
6938	/* ToDo: Should we reset the timer here? */
6939	if (REG_RD(sc, BCE_EMAC_TX_STATUS) & BCE_EMAC_TX_STATUS_XOFFED)
6940		goto bce_watchdog_exit;
6941
6942	BCE_PRINTF("%s(%d): Watchdog timeout occurred, resetting!\n",
6943		__FILE__, __LINE__);
6944
6945	DBRUNMSG(BCE_INFO,
6946		bce_dump_driver_state(sc);
6947		bce_dump_status_block(sc);
6948		bce_dump_stats_block(sc);
6949		bce_dump_ftqs(sc);
6950		bce_dump_txp_state(sc, 0);
6951		bce_dump_rxp_state(sc, 0);
6952		bce_dump_tpat_state(sc, 0);
6953		bce_dump_cp_state(sc, 0);
6954		bce_dump_com_state(sc, 0));
6955
6956	DBRUN(bce_breakpoint(sc));
6957
6958	sc->bce_ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
6959
6960	bce_init_locked(sc);
6961	sc->bce_ifp->if_oerrors++;
6962
6963bce_watchdog_exit:
6964	DBEXIT(BCE_EXTREME_SEND);
6965}
6966
6967
6968/*
6969 * Interrupt handler.
6970 */
6971/****************************************************************************/
6972/* Main interrupt entry point.  Verifies that the controller generated the  */
6973/* interrupt and then calls a separate routine for handle the various       */
6974/* interrupt causes (PHY, TX, RX).                                          */
6975/*                                                                          */
6976/* Returns:                                                                 */
6977/*   0 for success, positive value for failure.                             */
6978/****************************************************************************/
6979static void
6980bce_intr(void *xsc)
6981{
6982	struct bce_softc *sc;
6983	struct ifnet *ifp;
6984	u32 status_attn_bits;
6985	u16 hw_rx_cons, hw_tx_cons;
6986
6987	sc = xsc;
6988	ifp = sc->bce_ifp;
6989
6990	DBENTER(BCE_VERBOSE_SEND | BCE_VERBOSE_RECV | BCE_VERBOSE_INTR);
6991	DBRUNMSG(BCE_VERBOSE_INTR, bce_dump_status_block(sc));
6992
6993	BCE_LOCK(sc);
6994
6995	DBRUN(sc->interrupts_generated++);
6996
6997	bus_dmamap_sync(sc->status_tag, sc->status_map,
6998	    BUS_DMASYNC_POSTWRITE);
6999
7000	/*
7001	 * If the hardware status block index
7002	 * matches the last value read by the
7003	 * driver and we haven't asserted our
7004	 * interrupt then there's nothing to do.
7005	 */
7006	if ((sc->status_block->status_idx == sc->last_status_idx) &&
7007		(REG_RD(sc, BCE_PCICFG_MISC_STATUS) & BCE_PCICFG_MISC_STATUS_INTA_VALUE)) {
7008			DBPRINT(sc, BCE_VERBOSE_INTR, "%s(): Spurious interrupt.\n",
7009				__FUNCTION__);
7010			goto bce_intr_exit;
7011	}
7012
7013	/* Ack the interrupt and stop others from occuring. */
7014	REG_WR(sc, BCE_PCICFG_INT_ACK_CMD,
7015		BCE_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
7016		BCE_PCICFG_INT_ACK_CMD_MASK_INT);
7017
7018	/* Check if the hardware has finished any work. */
7019	hw_rx_cons = bce_get_hw_rx_cons(sc);
7020	hw_tx_cons = bce_get_hw_tx_cons(sc);
7021
7022	/* Keep processing data as long as there is work to do. */
7023	for (;;) {
7024
7025		status_attn_bits = sc->status_block->status_attn_bits;
7026
7027		DBRUNIF(DB_RANDOMTRUE(bce_debug_unexpected_attention),
7028			BCE_PRINTF("Simulating unexpected status attention bit set.");
7029			status_attn_bits = status_attn_bits | STATUS_ATTN_BITS_PARITY_ERROR);
7030
7031		/* Was it a link change interrupt? */
7032		if ((status_attn_bits & STATUS_ATTN_BITS_LINK_STATE) !=
7033			(sc->status_block->status_attn_bits_ack & STATUS_ATTN_BITS_LINK_STATE)) {
7034			bce_phy_intr(sc);
7035
7036			/* Clear any transient status updates during link state change. */
7037			REG_WR(sc, BCE_HC_COMMAND,
7038				sc->hc_command | BCE_HC_COMMAND_COAL_NOW_WO_INT);
7039			REG_RD(sc, BCE_HC_COMMAND);
7040		}
7041
7042		/* If any other attention is asserted then the chip is toast. */
7043		if (((status_attn_bits & ~STATUS_ATTN_BITS_LINK_STATE) !=
7044			(sc->status_block->status_attn_bits_ack &
7045			~STATUS_ATTN_BITS_LINK_STATE))) {
7046
7047			DBRUN(sc->unexpected_attentions++);
7048
7049			BCE_PRINTF("%s(%d): Fatal attention detected: 0x%08X\n",
7050				__FILE__, __LINE__, sc->status_block->status_attn_bits);
7051
7052			DBRUNMSG(BCE_FATAL,
7053				if (bce_debug_unexpected_attention == 0)
7054					bce_breakpoint(sc));
7055
7056			bce_init_locked(sc);
7057			goto bce_intr_exit;
7058		}
7059
7060		/* Check for any completed RX frames. */
7061		if (hw_rx_cons != sc->hw_rx_cons)
7062			bce_rx_intr(sc);
7063
7064		/* Check for any completed TX frames. */
7065		if (hw_tx_cons != sc->hw_tx_cons)
7066			bce_tx_intr(sc);
7067
7068		/* Save the status block index value for use during the next interrupt. */
7069		sc->last_status_idx = sc->status_block->status_idx;
7070
7071		/* Prevent speculative reads from getting ahead of the status block. */
7072		bus_space_barrier(sc->bce_btag, sc->bce_bhandle, 0, 0,
7073			BUS_SPACE_BARRIER_READ);
7074
7075		/* If there's no work left then exit the interrupt service routine. */
7076		hw_rx_cons = bce_get_hw_rx_cons(sc);
7077		hw_tx_cons = bce_get_hw_tx_cons(sc);
7078
7079		if ((hw_rx_cons == sc->hw_rx_cons) && (hw_tx_cons == sc->hw_tx_cons))
7080			break;
7081
7082	}
7083
7084	bus_dmamap_sync(sc->status_tag,	sc->status_map,
7085	    BUS_DMASYNC_PREWRITE);
7086
7087	/* Re-enable interrupts. */
7088	bce_enable_intr(sc, 0);
7089
7090	/* Handle any frames that arrived while handling the interrupt. */
7091	if (ifp->if_drv_flags & IFF_DRV_RUNNING && !IFQ_DRV_IS_EMPTY(&ifp->if_snd))
7092		bce_start_locked(ifp);
7093
7094bce_intr_exit:
7095	BCE_UNLOCK(sc);
7096
7097	DBEXIT(BCE_VERBOSE_SEND | BCE_VERBOSE_RECV | BCE_VERBOSE_INTR);
7098}
7099
7100
7101/****************************************************************************/
7102/* Programs the various packet receive modes (broadcast and multicast).     */
7103/*                                                                          */
7104/* Returns:                                                                 */
7105/*   Nothing.                                                               */
7106/****************************************************************************/
7107static void
7108bce_set_rx_mode(struct bce_softc *sc)
7109{
7110	struct ifnet *ifp;
7111	struct ifmultiaddr *ifma;
7112	u32 hashes[NUM_MC_HASH_REGISTERS] = { 0, 0, 0, 0, 0, 0, 0, 0 };
7113	u32 rx_mode, sort_mode;
7114	int h, i;
7115
7116	DBENTER(BCE_VERBOSE_MISC);
7117
7118	BCE_LOCK_ASSERT(sc);
7119
7120	ifp = sc->bce_ifp;
7121
7122	/* Initialize receive mode default settings. */
7123	rx_mode   = sc->rx_mode & ~(BCE_EMAC_RX_MODE_PROMISCUOUS |
7124			    BCE_EMAC_RX_MODE_KEEP_VLAN_TAG);
7125	sort_mode = 1 | BCE_RPM_SORT_USER0_BC_EN;
7126
7127	/*
7128	 * ASF/IPMI/UMP firmware requires that VLAN tag stripping
7129	 * be enbled.
7130	 */
7131	if (!(BCE_IF_CAPABILITIES & IFCAP_VLAN_HWTAGGING) &&
7132		(!(sc->bce_flags & BCE_MFW_ENABLE_FLAG)))
7133		rx_mode |= BCE_EMAC_RX_MODE_KEEP_VLAN_TAG;
7134
7135	/*
7136	 * Check for promiscuous, all multicast, or selected
7137	 * multicast address filtering.
7138	 */
7139	if (ifp->if_flags & IFF_PROMISC) {
7140		DBPRINT(sc, BCE_INFO_MISC, "Enabling promiscuous mode.\n");
7141
7142		/* Enable promiscuous mode. */
7143		rx_mode |= BCE_EMAC_RX_MODE_PROMISCUOUS;
7144		sort_mode |= BCE_RPM_SORT_USER0_PROM_EN;
7145	} else if (ifp->if_flags & IFF_ALLMULTI) {
7146		DBPRINT(sc, BCE_INFO_MISC, "Enabling all multicast mode.\n");
7147
7148		/* Enable all multicast addresses. */
7149		for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
7150			REG_WR(sc, BCE_EMAC_MULTICAST_HASH0 + (i * 4), 0xffffffff);
7151       	}
7152		sort_mode |= BCE_RPM_SORT_USER0_MC_EN;
7153	} else {
7154		/* Accept one or more multicast(s). */
7155		DBPRINT(sc, BCE_INFO_MISC, "Enabling selective multicast mode.\n");
7156
7157		IF_ADDR_LOCK(ifp);
7158		TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
7159			if (ifma->ifma_addr->sa_family != AF_LINK)
7160				continue;
7161			h = ether_crc32_le(LLADDR((struct sockaddr_dl *)
7162			    ifma->ifma_addr), ETHER_ADDR_LEN) & 0xFF;
7163			    hashes[(h & 0xE0) >> 5] |= 1 << (h & 0x1F);
7164		}
7165		IF_ADDR_UNLOCK(ifp);
7166
7167		for (i = 0; i < NUM_MC_HASH_REGISTERS; i++)
7168			REG_WR(sc, BCE_EMAC_MULTICAST_HASH0 + (i * 4), hashes[i]);
7169
7170		sort_mode |= BCE_RPM_SORT_USER0_MC_HSH_EN;
7171	}
7172
7173	/* Only make changes if the recive mode has actually changed. */
7174	if (rx_mode != sc->rx_mode) {
7175		DBPRINT(sc, BCE_VERBOSE_MISC, "Enabling new receive mode: 0x%08X\n",
7176			rx_mode);
7177
7178		sc->rx_mode = rx_mode;
7179		REG_WR(sc, BCE_EMAC_RX_MODE, rx_mode);
7180	}
7181
7182	/* Disable and clear the exisitng sort before enabling a new sort. */
7183	REG_WR(sc, BCE_RPM_SORT_USER0, 0x0);
7184	REG_WR(sc, BCE_RPM_SORT_USER0, sort_mode);
7185	REG_WR(sc, BCE_RPM_SORT_USER0, sort_mode | BCE_RPM_SORT_USER0_ENA);
7186
7187	DBEXIT(BCE_VERBOSE_MISC);
7188}
7189
7190
7191/****************************************************************************/
7192/* Called periodically to updates statistics from the controllers           */
7193/* statistics block.                                                        */
7194/*                                                                          */
7195/* Returns:                                                                 */
7196/*   Nothing.                                                               */
7197/****************************************************************************/
7198static void
7199bce_stats_update(struct bce_softc *sc)
7200{
7201	struct ifnet *ifp;
7202	struct statistics_block *stats;
7203
7204	DBENTER(BCE_EXTREME_MISC);
7205
7206	ifp = sc->bce_ifp;
7207
7208	stats = (struct statistics_block *) sc->stats_block;
7209
7210	/*
7211	 * Certain controllers don't report
7212	 * carrier sense errors correctly.
7213	 * See errata E11_5708CA0_1165.
7214	 */
7215	if (!(BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5706) &&
7216	    !(BCE_CHIP_ID(sc) == BCE_CHIP_ID_5708_A0))
7217		ifp->if_oerrors += (u_long) stats->stat_Dot3StatsCarrierSenseErrors;
7218
7219	/*
7220	 * Update the sysctl statistics from the
7221	 * hardware statistics.
7222	 */
7223	sc->stat_IfHCInOctets =
7224		((u64) stats->stat_IfHCInOctets_hi << 32) +
7225		 (u64) stats->stat_IfHCInOctets_lo;
7226
7227	sc->stat_IfHCInBadOctets =
7228		((u64) stats->stat_IfHCInBadOctets_hi << 32) +
7229		 (u64) stats->stat_IfHCInBadOctets_lo;
7230
7231	sc->stat_IfHCOutOctets =
7232		((u64) stats->stat_IfHCOutOctets_hi << 32) +
7233		 (u64) stats->stat_IfHCOutOctets_lo;
7234
7235	sc->stat_IfHCOutBadOctets =
7236		((u64) stats->stat_IfHCOutBadOctets_hi << 32) +
7237		 (u64) stats->stat_IfHCOutBadOctets_lo;
7238
7239	sc->stat_IfHCInUcastPkts =
7240		((u64) stats->stat_IfHCInUcastPkts_hi << 32) +
7241		 (u64) stats->stat_IfHCInUcastPkts_lo;
7242
7243	sc->stat_IfHCInMulticastPkts =
7244		((u64) stats->stat_IfHCInMulticastPkts_hi << 32) +
7245		 (u64) stats->stat_IfHCInMulticastPkts_lo;
7246
7247	sc->stat_IfHCInBroadcastPkts =
7248		((u64) stats->stat_IfHCInBroadcastPkts_hi << 32) +
7249		 (u64) stats->stat_IfHCInBroadcastPkts_lo;
7250
7251	sc->stat_IfHCOutUcastPkts =
7252		((u64) stats->stat_IfHCOutUcastPkts_hi << 32) +
7253		 (u64) stats->stat_IfHCOutUcastPkts_lo;
7254
7255	sc->stat_IfHCOutMulticastPkts =
7256		((u64) stats->stat_IfHCOutMulticastPkts_hi << 32) +
7257		 (u64) stats->stat_IfHCOutMulticastPkts_lo;
7258
7259	sc->stat_IfHCOutBroadcastPkts =
7260		((u64) stats->stat_IfHCOutBroadcastPkts_hi << 32) +
7261		 (u64) stats->stat_IfHCOutBroadcastPkts_lo;
7262
7263	sc->stat_emac_tx_stat_dot3statsinternalmactransmiterrors =
7264		stats->stat_emac_tx_stat_dot3statsinternalmactransmiterrors;
7265
7266	sc->stat_Dot3StatsCarrierSenseErrors =
7267		stats->stat_Dot3StatsCarrierSenseErrors;
7268
7269	sc->stat_Dot3StatsFCSErrors =
7270		stats->stat_Dot3StatsFCSErrors;
7271
7272	sc->stat_Dot3StatsAlignmentErrors =
7273		stats->stat_Dot3StatsAlignmentErrors;
7274
7275	sc->stat_Dot3StatsSingleCollisionFrames =
7276		stats->stat_Dot3StatsSingleCollisionFrames;
7277
7278	sc->stat_Dot3StatsMultipleCollisionFrames =
7279		stats->stat_Dot3StatsMultipleCollisionFrames;
7280
7281	sc->stat_Dot3StatsDeferredTransmissions =
7282		stats->stat_Dot3StatsDeferredTransmissions;
7283
7284	sc->stat_Dot3StatsExcessiveCollisions =
7285		stats->stat_Dot3StatsExcessiveCollisions;
7286
7287	sc->stat_Dot3StatsLateCollisions =
7288		stats->stat_Dot3StatsLateCollisions;
7289
7290	sc->stat_EtherStatsCollisions =
7291		stats->stat_EtherStatsCollisions;
7292
7293	sc->stat_EtherStatsFragments =
7294		stats->stat_EtherStatsFragments;
7295
7296	sc->stat_EtherStatsJabbers =
7297		stats->stat_EtherStatsJabbers;
7298
7299	sc->stat_EtherStatsUndersizePkts =
7300		stats->stat_EtherStatsUndersizePkts;
7301
7302	sc->stat_EtherStatsOverrsizePkts =
7303		stats->stat_EtherStatsOverrsizePkts;
7304
7305	sc->stat_EtherStatsPktsRx64Octets =
7306		stats->stat_EtherStatsPktsRx64Octets;
7307
7308	sc->stat_EtherStatsPktsRx65Octetsto127Octets =
7309		stats->stat_EtherStatsPktsRx65Octetsto127Octets;
7310
7311	sc->stat_EtherStatsPktsRx128Octetsto255Octets =
7312		stats->stat_EtherStatsPktsRx128Octetsto255Octets;
7313
7314	sc->stat_EtherStatsPktsRx256Octetsto511Octets =
7315		stats->stat_EtherStatsPktsRx256Octetsto511Octets;
7316
7317	sc->stat_EtherStatsPktsRx512Octetsto1023Octets =
7318		stats->stat_EtherStatsPktsRx512Octetsto1023Octets;
7319
7320	sc->stat_EtherStatsPktsRx1024Octetsto1522Octets =
7321		stats->stat_EtherStatsPktsRx1024Octetsto1522Octets;
7322
7323	sc->stat_EtherStatsPktsRx1523Octetsto9022Octets =
7324		stats->stat_EtherStatsPktsRx1523Octetsto9022Octets;
7325
7326	sc->stat_EtherStatsPktsTx64Octets =
7327		stats->stat_EtherStatsPktsTx64Octets;
7328
7329	sc->stat_EtherStatsPktsTx65Octetsto127Octets =
7330		stats->stat_EtherStatsPktsTx65Octetsto127Octets;
7331
7332	sc->stat_EtherStatsPktsTx128Octetsto255Octets =
7333		stats->stat_EtherStatsPktsTx128Octetsto255Octets;
7334
7335	sc->stat_EtherStatsPktsTx256Octetsto511Octets =
7336		stats->stat_EtherStatsPktsTx256Octetsto511Octets;
7337
7338	sc->stat_EtherStatsPktsTx512Octetsto1023Octets =
7339		stats->stat_EtherStatsPktsTx512Octetsto1023Octets;
7340
7341	sc->stat_EtherStatsPktsTx1024Octetsto1522Octets =
7342		stats->stat_EtherStatsPktsTx1024Octetsto1522Octets;
7343
7344	sc->stat_EtherStatsPktsTx1523Octetsto9022Octets =
7345		stats->stat_EtherStatsPktsTx1523Octetsto9022Octets;
7346
7347	sc->stat_XonPauseFramesReceived =
7348		stats->stat_XonPauseFramesReceived;
7349
7350	sc->stat_XoffPauseFramesReceived =
7351		stats->stat_XoffPauseFramesReceived;
7352
7353	sc->stat_OutXonSent =
7354		stats->stat_OutXonSent;
7355
7356	sc->stat_OutXoffSent =
7357		stats->stat_OutXoffSent;
7358
7359	sc->stat_FlowControlDone =
7360		stats->stat_FlowControlDone;
7361
7362	sc->stat_MacControlFramesReceived =
7363		stats->stat_MacControlFramesReceived;
7364
7365	sc->stat_XoffStateEntered =
7366		stats->stat_XoffStateEntered;
7367
7368	sc->stat_IfInFramesL2FilterDiscards =
7369		stats->stat_IfInFramesL2FilterDiscards;
7370
7371	sc->stat_IfInRuleCheckerDiscards =
7372		stats->stat_IfInRuleCheckerDiscards;
7373
7374	sc->stat_IfInFTQDiscards =
7375		stats->stat_IfInFTQDiscards;
7376
7377	sc->stat_IfInMBUFDiscards =
7378		stats->stat_IfInMBUFDiscards;
7379
7380	sc->stat_IfInRuleCheckerP4Hit =
7381		stats->stat_IfInRuleCheckerP4Hit;
7382
7383	sc->stat_CatchupInRuleCheckerDiscards =
7384		stats->stat_CatchupInRuleCheckerDiscards;
7385
7386	sc->stat_CatchupInFTQDiscards =
7387		stats->stat_CatchupInFTQDiscards;
7388
7389	sc->stat_CatchupInMBUFDiscards =
7390		stats->stat_CatchupInMBUFDiscards;
7391
7392	sc->stat_CatchupInRuleCheckerP4Hit =
7393		stats->stat_CatchupInRuleCheckerP4Hit;
7394
7395	sc->com_no_buffers = REG_RD_IND(sc, 0x120084);
7396
7397	/*
7398	 * Update the interface statistics from the
7399	 * hardware statistics.
7400	 */
7401	ifp->if_collisions =
7402		(u_long) sc->stat_EtherStatsCollisions;
7403
7404	/* ToDo: This method loses soft errors. */
7405	ifp->if_ierrors =
7406		(u_long) sc->stat_EtherStatsUndersizePkts +
7407		(u_long) sc->stat_EtherStatsOverrsizePkts +
7408		(u_long) sc->stat_IfInMBUFDiscards +
7409		(u_long) sc->stat_Dot3StatsAlignmentErrors +
7410		(u_long) sc->stat_Dot3StatsFCSErrors +
7411		(u_long) sc->stat_IfInRuleCheckerDiscards +
7412		(u_long) sc->stat_IfInFTQDiscards +
7413		(u_long) sc->com_no_buffers;
7414
7415	/* ToDo: This method loses soft errors. */
7416	ifp->if_oerrors =
7417		(u_long) sc->stat_emac_tx_stat_dot3statsinternalmactransmiterrors +
7418		(u_long) sc->stat_Dot3StatsExcessiveCollisions +
7419		(u_long) sc->stat_Dot3StatsLateCollisions;
7420
7421	/* ToDo: Add additional statistics. */
7422
7423	DBEXIT(BCE_EXTREME_MISC);
7424}
7425
7426
7427/****************************************************************************/
7428/* Periodic function to notify the bootcode that the driver is still        */
7429/* present.                                                                 */
7430/*                                                                          */
7431/* Returns:                                                                 */
7432/*   Nothing.                                                               */
7433/****************************************************************************/
7434static void
7435bce_pulse(void *xsc)
7436{
7437	struct bce_softc *sc = xsc;
7438	u32 msg;
7439
7440	DBENTER(BCE_EXTREME_MISC);
7441
7442	BCE_LOCK_ASSERT(sc);
7443
7444	/* Tell the firmware that the driver is still running. */
7445	msg = (u32) ++sc->bce_fw_drv_pulse_wr_seq;
7446	REG_WR_IND(sc, sc->bce_shmem_base + BCE_DRV_PULSE_MB, msg);
7447
7448	/* Schedule the next pulse. */
7449	callout_reset(&sc->bce_pulse_callout, hz, bce_pulse, sc);
7450
7451	DBEXIT(BCE_EXTREME_MISC);
7452}
7453
7454
7455/****************************************************************************/
7456/* Periodic function to perform maintenance tasks.                          */
7457/*                                                                          */
7458/* Returns:                                                                 */
7459/*   Nothing.                                                               */
7460/****************************************************************************/
7461static void
7462bce_tick(void *xsc)
7463{
7464	struct bce_softc *sc = xsc;
7465	struct mii_data *mii;
7466	struct ifnet *ifp;
7467
7468	ifp = sc->bce_ifp;
7469
7470	DBENTER(BCE_EXTREME_MISC);
7471
7472	BCE_LOCK_ASSERT(sc);
7473
7474	/* Schedule the next tick. */
7475	callout_reset(&sc->bce_tick_callout, hz, bce_tick, sc);
7476
7477	/* Update the statistics from the hardware statistics block. */
7478	bce_stats_update(sc);
7479
7480	/* Top off the receive and page chains. */
7481#ifdef BCE_USE_SPLIT_HEADER
7482	bce_fill_pg_chain(sc);
7483#endif
7484	bce_fill_rx_chain(sc);
7485
7486	/* Check that chip hasn't hung. */
7487	bce_watchdog(sc);
7488
7489	/* If link is up already up then we're done. */
7490	if (sc->bce_link)
7491		goto bce_tick_exit;
7492
7493	/* Link is down.  Check what the PHY's doing. */
7494	mii = device_get_softc(sc->bce_miibus);
7495	mii_tick(mii);
7496
7497	/* Check if the link has come up. */
7498	if ((mii->mii_media_status & IFM_ACTIVE) &&
7499	    (IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE)) {
7500		DBPRINT(sc, BCE_VERBOSE_MISC, "%s(): Link up!\n", __FUNCTION__);
7501		sc->bce_link++;
7502		if ((IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T ||
7503		    IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_SX) &&
7504		    bootverbose)
7505			BCE_PRINTF("Gigabit link up!\n");
7506		/* Now that link is up, handle any outstanding TX traffic. */
7507		if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) {
7508			DBPRINT(sc, BCE_VERBOSE_MISC, "%s(): Found pending TX traffic.\n",
7509				 __FUNCTION__);
7510			bce_start_locked(ifp);
7511		}
7512	}
7513
7514bce_tick_exit:
7515	DBEXIT(BCE_EXTREME_MISC);
7516	return;
7517}
7518
7519
7520#ifdef BCE_DEBUG
7521/****************************************************************************/
7522/* Allows the driver state to be dumped through the sysctl interface.       */
7523/*                                                                          */
7524/* Returns:                                                                 */
7525/*   0 for success, positive value for failure.                             */
7526/****************************************************************************/
7527static int
7528bce_sysctl_driver_state(SYSCTL_HANDLER_ARGS)
7529{
7530        int error;
7531        int result;
7532        struct bce_softc *sc;
7533
7534        result = -1;
7535        error = sysctl_handle_int(oidp, &result, 0, req);
7536
7537        if (error || !req->newptr)
7538                return (error);
7539
7540        if (result == 1) {
7541                sc = (struct bce_softc *)arg1;
7542                bce_dump_driver_state(sc);
7543        }
7544
7545        return error;
7546}
7547
7548
7549/****************************************************************************/
7550/* Allows the hardware state to be dumped through the sysctl interface.     */
7551/*                                                                          */
7552/* Returns:                                                                 */
7553/*   0 for success, positive value for failure.                             */
7554/****************************************************************************/
7555static int
7556bce_sysctl_hw_state(SYSCTL_HANDLER_ARGS)
7557{
7558        int error;
7559        int result;
7560        struct bce_softc *sc;
7561
7562        result = -1;
7563        error = sysctl_handle_int(oidp, &result, 0, req);
7564
7565        if (error || !req->newptr)
7566                return (error);
7567
7568        if (result == 1) {
7569                sc = (struct bce_softc *)arg1;
7570                bce_dump_hw_state(sc);
7571        }
7572
7573        return error;
7574}
7575
7576
7577/****************************************************************************/
7578/* Allows the bootcode state to be dumped through the sysctl interface.     */
7579/*                                                                          */
7580/* Returns:                                                                 */
7581/*   0 for success, positive value for failure.                             */
7582/****************************************************************************/
7583static int
7584bce_sysctl_bc_state(SYSCTL_HANDLER_ARGS)
7585{
7586        int error;
7587        int result;
7588        struct bce_softc *sc;
7589
7590        result = -1;
7591        error = sysctl_handle_int(oidp, &result, 0, req);
7592
7593        if (error || !req->newptr)
7594                return (error);
7595
7596        if (result == 1) {
7597                sc = (struct bce_softc *)arg1;
7598                bce_dump_bc_state(sc);
7599        }
7600
7601        return error;
7602}
7603
7604
7605/****************************************************************************/
7606/* Provides a sysctl interface to allow dumping the RX chain.               */
7607/*                                                                          */
7608/* Returns:                                                                 */
7609/*   0 for success, positive value for failure.                             */
7610/****************************************************************************/
7611static int
7612bce_sysctl_dump_rx_chain(SYSCTL_HANDLER_ARGS)
7613{
7614        int error;
7615        int result;
7616        struct bce_softc *sc;
7617
7618        result = -1;
7619        error = sysctl_handle_int(oidp, &result, 0, req);
7620
7621        if (error || !req->newptr)
7622                return (error);
7623
7624        if (result == 1) {
7625                sc = (struct bce_softc *)arg1;
7626                bce_dump_rx_chain(sc, 0, TOTAL_RX_BD);
7627        }
7628
7629        return error;
7630}
7631
7632
7633/****************************************************************************/
7634/* Provides a sysctl interface to allow dumping the TX chain.               */
7635/*                                                                          */
7636/* Returns:                                                                 */
7637/*   0 for success, positive value for failure.                             */
7638/****************************************************************************/
7639static int
7640bce_sysctl_dump_tx_chain(SYSCTL_HANDLER_ARGS)
7641{
7642        int error;
7643        int result;
7644        struct bce_softc *sc;
7645
7646        result = -1;
7647        error = sysctl_handle_int(oidp, &result, 0, req);
7648
7649        if (error || !req->newptr)
7650                return (error);
7651
7652        if (result == 1) {
7653                sc = (struct bce_softc *)arg1;
7654                bce_dump_tx_chain(sc, 0, USABLE_TX_BD);
7655        }
7656
7657        return error;
7658}
7659
7660
7661#ifdef BCE_USE_SPLIT_HEADER
7662/****************************************************************************/
7663/* Provides a sysctl interface to allow dumping the page chain.             */
7664/*                                                                          */
7665/* Returns:                                                                 */
7666/*   0 for success, positive value for failure.                             */
7667/****************************************************************************/
7668static int
7669bce_sysctl_dump_pg_chain(SYSCTL_HANDLER_ARGS)
7670{
7671        int error;
7672        int result;
7673        struct bce_softc *sc;
7674
7675        result = -1;
7676        error = sysctl_handle_int(oidp, &result, 0, req);
7677
7678        if (error || !req->newptr)
7679                return (error);
7680
7681        if (result == 1) {
7682                sc = (struct bce_softc *)arg1;
7683                bce_dump_pg_chain(sc, 0, TOTAL_PG_BD);
7684        }
7685
7686        return error;
7687}
7688#endif
7689
7690/****************************************************************************/
7691/* Provides a sysctl interface to allow reading arbitrary NVRAM offsets in  */
7692/* the device.  DO NOT ENABLE ON PRODUCTION SYSTEMS!                        */
7693/*                                                                          */
7694/* Returns:                                                                 */
7695/*   0 for success, positive value for failure.                             */
7696/****************************************************************************/
7697static int
7698bce_sysctl_nvram_read(SYSCTL_HANDLER_ARGS)
7699{
7700	struct bce_softc *sc = (struct bce_softc *)arg1;
7701	int error;
7702	u32 result;
7703	u32 val[1];
7704	u8 *data = (u8 *) val;
7705
7706	result = -1;
7707	error = sysctl_handle_int(oidp, &result, 0, req);
7708	if (error || (req->newptr == NULL))
7709		return (error);
7710
7711	bce_nvram_read(sc, result, data, 4);
7712	BCE_PRINTF("offset 0x%08X = 0x%08X\n", result, bce_be32toh(val[0]));
7713
7714	return (error);
7715}
7716
7717
7718/****************************************************************************/
7719/* Provides a sysctl interface to allow reading arbitrary registers in the  */
7720/* device.  DO NOT ENABLE ON PRODUCTION SYSTEMS!                            */
7721/*                                                                          */
7722/* Returns:                                                                 */
7723/*   0 for success, positive value for failure.                             */
7724/****************************************************************************/
7725static int
7726bce_sysctl_reg_read(SYSCTL_HANDLER_ARGS)
7727{
7728	struct bce_softc *sc = (struct bce_softc *)arg1;
7729	int error;
7730	u32 val, result;
7731
7732	result = -1;
7733	error = sysctl_handle_int(oidp, &result, 0, req);
7734	if (error || (req->newptr == NULL))
7735		return (error);
7736
7737	/* Make sure the register is accessible. */
7738	if (result < 0x8000) {
7739		val = REG_RD(sc, result);
7740		BCE_PRINTF("reg 0x%08X = 0x%08X\n", result, val);
7741	} else if (result < 0x0280000) {
7742		val = REG_RD_IND(sc, result);
7743		BCE_PRINTF("reg 0x%08X = 0x%08X\n", result, val);
7744	}
7745
7746	return (error);
7747}
7748
7749
7750/****************************************************************************/
7751/* Provides a sysctl interface to allow reading arbitrary PHY registers in  */
7752/* the device.  DO NOT ENABLE ON PRODUCTION SYSTEMS!                        */
7753/*                                                                          */
7754/* Returns:                                                                 */
7755/*   0 for success, positive value for failure.                             */
7756/****************************************************************************/
7757static int
7758bce_sysctl_phy_read(SYSCTL_HANDLER_ARGS)
7759{
7760	struct bce_softc *sc;
7761	device_t dev;
7762	int error, result;
7763	u16 val;
7764
7765	result = -1;
7766	error = sysctl_handle_int(oidp, &result, 0, req);
7767	if (error || (req->newptr == NULL))
7768		return (error);
7769
7770	/* Make sure the register is accessible. */
7771	if (result < 0x20) {
7772		sc = (struct bce_softc *)arg1;
7773		dev = sc->bce_dev;
7774		val = bce_miibus_read_reg(dev, sc->bce_phy_addr, result);
7775		BCE_PRINTF("phy 0x%02X = 0x%04X\n", result, val);
7776	}
7777	return (error);
7778}
7779
7780
7781/****************************************************************************/
7782/* Provides a sysctl interface to allow reading a CID.                      */
7783/*                                                                          */
7784/* Returns:                                                                 */
7785/*   0 for success, positive value for failure.                             */
7786/****************************************************************************/
7787static int
7788bce_sysctl_dump_ctx(SYSCTL_HANDLER_ARGS)
7789{
7790	struct bce_softc *sc;
7791	int error;
7792	u16 result;
7793
7794	result = -1;
7795	error = sysctl_handle_int(oidp, &result, 0, req);
7796	if (error || (req->newptr == NULL))
7797		return (error);
7798
7799	/* Make sure the register is accessible. */
7800	if (result <= TX_CID) {
7801		sc = (struct bce_softc *)arg1;
7802		bce_dump_ctx(sc, result);
7803	}
7804
7805	return (error);
7806}
7807
7808
7809 /****************************************************************************/
7810/* Provides a sysctl interface to forcing the driver to dump state and      */
7811/* enter the debugger.  DO NOT ENABLE ON PRODUCTION SYSTEMS!                */
7812/*                                                                          */
7813/* Returns:                                                                 */
7814/*   0 for success, positive value for failure.                             */
7815/****************************************************************************/
7816static int
7817bce_sysctl_breakpoint(SYSCTL_HANDLER_ARGS)
7818{
7819        int error;
7820        int result;
7821        struct bce_softc *sc;
7822
7823        result = -1;
7824        error = sysctl_handle_int(oidp, &result, 0, req);
7825
7826        if (error || !req->newptr)
7827                return (error);
7828
7829        if (result == 1) {
7830                sc = (struct bce_softc *)arg1;
7831                bce_breakpoint(sc);
7832        }
7833
7834        return error;
7835}
7836#endif
7837
7838
7839/****************************************************************************/
7840/* Adds any sysctl parameters for tuning or debugging purposes.             */
7841/*                                                                          */
7842/* Returns:                                                                 */
7843/*   0 for success, positive value for failure.                             */
7844/****************************************************************************/
7845static void
7846bce_add_sysctls(struct bce_softc *sc)
7847{
7848	struct sysctl_ctx_list *ctx;
7849	struct sysctl_oid_list *children;
7850
7851	DBENTER(BCE_VERBOSE_MISC);
7852
7853	ctx = device_get_sysctl_ctx(sc->bce_dev);
7854	children = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->bce_dev));
7855
7856#ifdef BCE_DEBUG
7857	SYSCTL_ADD_INT(ctx, children, OID_AUTO,
7858		"rx_low_watermark",
7859		CTLFLAG_RD, &sc->rx_low_watermark,
7860		0, "Lowest level of free rx_bd's");
7861
7862	SYSCTL_ADD_INT(ctx, children, OID_AUTO,
7863		"rx_empty_count",
7864		CTLFLAG_RD, &sc->rx_empty_count,
7865		0, "Number of times the RX chain was empty");
7866
7867	SYSCTL_ADD_INT(ctx, children, OID_AUTO,
7868		"tx_hi_watermark",
7869		CTLFLAG_RD, &sc->tx_hi_watermark,
7870		0, "Highest level of used tx_bd's");
7871
7872	SYSCTL_ADD_INT(ctx, children, OID_AUTO,
7873		"tx_full_count",
7874		CTLFLAG_RD, &sc->tx_full_count,
7875		0, "Number of times the TX chain was full");
7876
7877	SYSCTL_ADD_INT(ctx, children, OID_AUTO,
7878		"l2fhdr_status_errors",
7879		CTLFLAG_RD, &sc->l2fhdr_status_errors,
7880		0, "l2_fhdr status errors");
7881
7882	SYSCTL_ADD_INT(ctx, children, OID_AUTO,
7883		"unexpected_attentions",
7884		CTLFLAG_RD, &sc->unexpected_attentions,
7885		0, "Unexpected attentions");
7886
7887	SYSCTL_ADD_INT(ctx, children, OID_AUTO,
7888		"lost_status_block_updates",
7889		CTLFLAG_RD, &sc->lost_status_block_updates,
7890		0, "Lost status block updates");
7891
7892	SYSCTL_ADD_INT(ctx, children, OID_AUTO,
7893		"debug_mbuf_sim_alloc_failed",
7894		CTLFLAG_RD, &sc->debug_mbuf_sim_alloc_failed,
7895		0, "Simulated mbuf cluster allocation failures");
7896
7897	SYSCTL_ADD_INT(ctx, children, OID_AUTO,
7898		"requested_tso_frames",
7899		CTLFLAG_RD, &sc->requested_tso_frames,
7900		0, "Number of TSO frames received");
7901
7902	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
7903		"rx_interrupts",
7904		CTLFLAG_RD, &sc->rx_interrupts,
7905		0, "Number of RX interrupts");
7906
7907	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
7908		"tx_interrupts",
7909		CTLFLAG_RD, &sc->tx_interrupts,
7910		0, "Number of TX interrupts");
7911
7912	SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
7913		"rx_intr_time",
7914		CTLFLAG_RD, &sc->rx_intr_time,
7915		"RX interrupt time");
7916
7917	SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
7918		"tx_intr_time",
7919		CTLFLAG_RD, &sc->tx_intr_time,
7920		"TX interrupt time");
7921#endif
7922
7923	SYSCTL_ADD_INT(ctx, children, OID_AUTO,
7924		"mbuf_alloc_failed",
7925		CTLFLAG_RD, &sc->mbuf_alloc_failed,
7926		0, "mbuf cluster allocation failures");
7927
7928	SYSCTL_ADD_INT(ctx, children, OID_AUTO,
7929		"tx_dma_map_failures",
7930		CTLFLAG_RD, &sc->tx_dma_map_failures,
7931		0, "tx dma mapping failures");
7932
7933	SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
7934		"stat_IfHcInOctets",
7935		CTLFLAG_RD, &sc->stat_IfHCInOctets,
7936		"Bytes received");
7937
7938	SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
7939		"stat_IfHCInBadOctets",
7940		CTLFLAG_RD, &sc->stat_IfHCInBadOctets,
7941		"Bad bytes received");
7942
7943	SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
7944		"stat_IfHCOutOctets",
7945		CTLFLAG_RD, &sc->stat_IfHCOutOctets,
7946		"Bytes sent");
7947
7948	SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
7949		"stat_IfHCOutBadOctets",
7950		CTLFLAG_RD, &sc->stat_IfHCOutBadOctets,
7951		"Bad bytes sent");
7952
7953	SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
7954		"stat_IfHCInUcastPkts",
7955		CTLFLAG_RD, &sc->stat_IfHCInUcastPkts,
7956		"Unicast packets received");
7957
7958	SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
7959		"stat_IfHCInMulticastPkts",
7960		CTLFLAG_RD, &sc->stat_IfHCInMulticastPkts,
7961		"Multicast packets received");
7962
7963	SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
7964		"stat_IfHCInBroadcastPkts",
7965		CTLFLAG_RD, &sc->stat_IfHCInBroadcastPkts,
7966		"Broadcast packets received");
7967
7968	SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
7969		"stat_IfHCOutUcastPkts",
7970		CTLFLAG_RD, &sc->stat_IfHCOutUcastPkts,
7971		"Unicast packets sent");
7972
7973	SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
7974		"stat_IfHCOutMulticastPkts",
7975		CTLFLAG_RD, &sc->stat_IfHCOutMulticastPkts,
7976		"Multicast packets sent");
7977
7978	SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
7979		"stat_IfHCOutBroadcastPkts",
7980		CTLFLAG_RD, &sc->stat_IfHCOutBroadcastPkts,
7981		"Broadcast packets sent");
7982
7983	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
7984		"stat_emac_tx_stat_dot3statsinternalmactransmiterrors",
7985		CTLFLAG_RD, &sc->stat_emac_tx_stat_dot3statsinternalmactransmiterrors,
7986		0, "Internal MAC transmit errors");
7987
7988	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
7989		"stat_Dot3StatsCarrierSenseErrors",
7990		CTLFLAG_RD, &sc->stat_Dot3StatsCarrierSenseErrors,
7991		0, "Carrier sense errors");
7992
7993	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
7994		"stat_Dot3StatsFCSErrors",
7995		CTLFLAG_RD, &sc->stat_Dot3StatsFCSErrors,
7996		0, "Frame check sequence errors");
7997
7998	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
7999		"stat_Dot3StatsAlignmentErrors",
8000		CTLFLAG_RD, &sc->stat_Dot3StatsAlignmentErrors,
8001		0, "Alignment errors");
8002
8003	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
8004		"stat_Dot3StatsSingleCollisionFrames",
8005		CTLFLAG_RD, &sc->stat_Dot3StatsSingleCollisionFrames,
8006		0, "Single Collision Frames");
8007
8008	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
8009		"stat_Dot3StatsMultipleCollisionFrames",
8010		CTLFLAG_RD, &sc->stat_Dot3StatsMultipleCollisionFrames,
8011		0, "Multiple Collision Frames");
8012
8013	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
8014		"stat_Dot3StatsDeferredTransmissions",
8015		CTLFLAG_RD, &sc->stat_Dot3StatsDeferredTransmissions,
8016		0, "Deferred Transmissions");
8017
8018	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
8019		"stat_Dot3StatsExcessiveCollisions",
8020		CTLFLAG_RD, &sc->stat_Dot3StatsExcessiveCollisions,
8021		0, "Excessive Collisions");
8022
8023	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
8024		"stat_Dot3StatsLateCollisions",
8025		CTLFLAG_RD, &sc->stat_Dot3StatsLateCollisions,
8026		0, "Late Collisions");
8027
8028	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
8029		"stat_EtherStatsCollisions",
8030		CTLFLAG_RD, &sc->stat_EtherStatsCollisions,
8031		0, "Collisions");
8032
8033	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
8034		"stat_EtherStatsFragments",
8035		CTLFLAG_RD, &sc->stat_EtherStatsFragments,
8036		0, "Fragments");
8037
8038	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
8039		"stat_EtherStatsJabbers",
8040		CTLFLAG_RD, &sc->stat_EtherStatsJabbers,
8041		0, "Jabbers");
8042
8043	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
8044		"stat_EtherStatsUndersizePkts",
8045		CTLFLAG_RD, &sc->stat_EtherStatsUndersizePkts,
8046		0, "Undersize packets");
8047
8048	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
8049		"stat_EtherStatsOverrsizePkts",
8050		CTLFLAG_RD, &sc->stat_EtherStatsOverrsizePkts,
8051		0, "stat_EtherStatsOverrsizePkts");
8052
8053	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
8054		"stat_EtherStatsPktsRx64Octets",
8055		CTLFLAG_RD, &sc->stat_EtherStatsPktsRx64Octets,
8056		0, "Bytes received in 64 byte packets");
8057
8058	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
8059		"stat_EtherStatsPktsRx65Octetsto127Octets",
8060		CTLFLAG_RD, &sc->stat_EtherStatsPktsRx65Octetsto127Octets,
8061		0, "Bytes received in 65 to 127 byte packets");
8062
8063	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
8064		"stat_EtherStatsPktsRx128Octetsto255Octets",
8065		CTLFLAG_RD, &sc->stat_EtherStatsPktsRx128Octetsto255Octets,
8066		0, "Bytes received in 128 to 255 byte packets");
8067
8068	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
8069		"stat_EtherStatsPktsRx256Octetsto511Octets",
8070		CTLFLAG_RD, &sc->stat_EtherStatsPktsRx256Octetsto511Octets,
8071		0, "Bytes received in 256 to 511 byte packets");
8072
8073	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
8074		"stat_EtherStatsPktsRx512Octetsto1023Octets",
8075		CTLFLAG_RD, &sc->stat_EtherStatsPktsRx512Octetsto1023Octets,
8076		0, "Bytes received in 512 to 1023 byte packets");
8077
8078	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
8079		"stat_EtherStatsPktsRx1024Octetsto1522Octets",
8080		CTLFLAG_RD, &sc->stat_EtherStatsPktsRx1024Octetsto1522Octets,
8081		0, "Bytes received in 1024 t0 1522 byte packets");
8082
8083	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
8084		"stat_EtherStatsPktsRx1523Octetsto9022Octets",
8085		CTLFLAG_RD, &sc->stat_EtherStatsPktsRx1523Octetsto9022Octets,
8086		0, "Bytes received in 1523 to 9022 byte packets");
8087
8088	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
8089		"stat_EtherStatsPktsTx64Octets",
8090		CTLFLAG_RD, &sc->stat_EtherStatsPktsTx64Octets,
8091		0, "Bytes sent in 64 byte packets");
8092
8093	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
8094		"stat_EtherStatsPktsTx65Octetsto127Octets",
8095		CTLFLAG_RD, &sc->stat_EtherStatsPktsTx65Octetsto127Octets,
8096		0, "Bytes sent in 65 to 127 byte packets");
8097
8098	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
8099		"stat_EtherStatsPktsTx128Octetsto255Octets",
8100		CTLFLAG_RD, &sc->stat_EtherStatsPktsTx128Octetsto255Octets,
8101		0, "Bytes sent in 128 to 255 byte packets");
8102
8103	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
8104		"stat_EtherStatsPktsTx256Octetsto511Octets",
8105		CTLFLAG_RD, &sc->stat_EtherStatsPktsTx256Octetsto511Octets,
8106		0, "Bytes sent in 256 to 511 byte packets");
8107
8108	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
8109		"stat_EtherStatsPktsTx512Octetsto1023Octets",
8110		CTLFLAG_RD, &sc->stat_EtherStatsPktsTx512Octetsto1023Octets,
8111		0, "Bytes sent in 512 to 1023 byte packets");
8112
8113	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
8114		"stat_EtherStatsPktsTx1024Octetsto1522Octets",
8115		CTLFLAG_RD, &sc->stat_EtherStatsPktsTx1024Octetsto1522Octets,
8116		0, "Bytes sent in 1024 to 1522 byte packets");
8117
8118	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
8119		"stat_EtherStatsPktsTx1523Octetsto9022Octets",
8120		CTLFLAG_RD, &sc->stat_EtherStatsPktsTx1523Octetsto9022Octets,
8121		0, "Bytes sent in 1523 to 9022 byte packets");
8122
8123	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
8124		"stat_XonPauseFramesReceived",
8125		CTLFLAG_RD, &sc->stat_XonPauseFramesReceived,
8126		0, "XON pause frames receved");
8127
8128	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
8129		"stat_XoffPauseFramesReceived",
8130		CTLFLAG_RD, &sc->stat_XoffPauseFramesReceived,
8131		0, "XOFF pause frames received");
8132
8133	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
8134		"stat_OutXonSent",
8135		CTLFLAG_RD, &sc->stat_OutXonSent,
8136		0, "XON pause frames sent");
8137
8138	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
8139		"stat_OutXoffSent",
8140		CTLFLAG_RD, &sc->stat_OutXoffSent,
8141		0, "XOFF pause frames sent");
8142
8143	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
8144		"stat_FlowControlDone",
8145		CTLFLAG_RD, &sc->stat_FlowControlDone,
8146		0, "Flow control done");
8147
8148	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
8149		"stat_MacControlFramesReceived",
8150		CTLFLAG_RD, &sc->stat_MacControlFramesReceived,
8151		0, "MAC control frames received");
8152
8153	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
8154		"stat_XoffStateEntered",
8155		CTLFLAG_RD, &sc->stat_XoffStateEntered,
8156		0, "XOFF state entered");
8157
8158	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
8159		"stat_IfInFramesL2FilterDiscards",
8160		CTLFLAG_RD, &sc->stat_IfInFramesL2FilterDiscards,
8161		0, "Received L2 packets discarded");
8162
8163	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
8164		"stat_IfInRuleCheckerDiscards",
8165		CTLFLAG_RD, &sc->stat_IfInRuleCheckerDiscards,
8166		0, "Received packets discarded by rule");
8167
8168	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
8169		"stat_IfInFTQDiscards",
8170		CTLFLAG_RD, &sc->stat_IfInFTQDiscards,
8171		0, "Received packet FTQ discards");
8172
8173	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
8174		"stat_IfInMBUFDiscards",
8175		CTLFLAG_RD, &sc->stat_IfInMBUFDiscards,
8176		0, "Received packets discarded due to lack of controller buffer memory");
8177
8178	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
8179		"stat_IfInRuleCheckerP4Hit",
8180		CTLFLAG_RD, &sc->stat_IfInRuleCheckerP4Hit,
8181		0, "Received packets rule checker hits");
8182
8183	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
8184		"stat_CatchupInRuleCheckerDiscards",
8185		CTLFLAG_RD, &sc->stat_CatchupInRuleCheckerDiscards,
8186		0, "Received packets discarded in Catchup path");
8187
8188	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
8189		"stat_CatchupInFTQDiscards",
8190		CTLFLAG_RD, &sc->stat_CatchupInFTQDiscards,
8191		0, "Received packets discarded in FTQ in Catchup path");
8192
8193	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
8194		"stat_CatchupInMBUFDiscards",
8195		CTLFLAG_RD, &sc->stat_CatchupInMBUFDiscards,
8196		0, "Received packets discarded in controller buffer memory in Catchup path");
8197
8198	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
8199		"stat_CatchupInRuleCheckerP4Hit",
8200		CTLFLAG_RD, &sc->stat_CatchupInRuleCheckerP4Hit,
8201		0, "Received packets rule checker hits in Catchup path");
8202
8203	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
8204		"com_no_buffers",
8205		CTLFLAG_RD, &sc->com_no_buffers,
8206		0, "Valid packets received but no RX buffers available");
8207
8208#ifdef BCE_DEBUG
8209	SYSCTL_ADD_PROC(ctx, children, OID_AUTO,
8210		"driver_state", CTLTYPE_INT | CTLFLAG_RW,
8211		(void *)sc, 0,
8212		bce_sysctl_driver_state, "I", "Drive state information");
8213
8214	SYSCTL_ADD_PROC(ctx, children, OID_AUTO,
8215		"hw_state", CTLTYPE_INT | CTLFLAG_RW,
8216		(void *)sc, 0,
8217		bce_sysctl_hw_state, "I", "Hardware state information");
8218
8219	SYSCTL_ADD_PROC(ctx, children, OID_AUTO,
8220		"bc_state", CTLTYPE_INT | CTLFLAG_RW,
8221		(void *)sc, 0,
8222		bce_sysctl_bc_state, "I", "Bootcode state information");
8223
8224	SYSCTL_ADD_PROC(ctx, children, OID_AUTO,
8225		"dump_rx_chain", CTLTYPE_INT | CTLFLAG_RW,
8226		(void *)sc, 0,
8227		bce_sysctl_dump_rx_chain, "I", "Dump rx_bd chain");
8228
8229	SYSCTL_ADD_PROC(ctx, children, OID_AUTO,
8230		"dump_tx_chain", CTLTYPE_INT | CTLFLAG_RW,
8231		(void *)sc, 0,
8232		bce_sysctl_dump_tx_chain, "I", "Dump tx_bd chain");
8233
8234#ifdef BCE_USE_SPLIT_HEADER
8235	SYSCTL_ADD_PROC(ctx, children, OID_AUTO,
8236		"dump_pg_chain", CTLTYPE_INT | CTLFLAG_RW,
8237		(void *)sc, 0,
8238		bce_sysctl_dump_pg_chain, "I", "Dump page chain");
8239#endif
8240	SYSCTL_ADD_PROC(ctx, children, OID_AUTO,
8241		"dump_ctx", CTLTYPE_INT | CTLFLAG_RW,
8242		(void *)sc, 0,
8243		bce_sysctl_dump_ctx, "I", "Dump context memory");
8244
8245	SYSCTL_ADD_PROC(ctx, children, OID_AUTO,
8246		"breakpoint", CTLTYPE_INT | CTLFLAG_RW,
8247		(void *)sc, 0,
8248		bce_sysctl_breakpoint, "I", "Driver breakpoint");
8249
8250	SYSCTL_ADD_PROC(ctx, children, OID_AUTO,
8251		"reg_read", CTLTYPE_INT | CTLFLAG_RW,
8252		(void *)sc, 0,
8253		bce_sysctl_reg_read, "I", "Register read");
8254
8255	SYSCTL_ADD_PROC(ctx, children, OID_AUTO,
8256		"nvram_read", CTLTYPE_INT | CTLFLAG_RW,
8257		(void *)sc, 0,
8258		bce_sysctl_nvram_read, "I", "NVRAM read");
8259
8260	SYSCTL_ADD_PROC(ctx, children, OID_AUTO,
8261		"phy_read", CTLTYPE_INT | CTLFLAG_RW,
8262		(void *)sc, 0,
8263		bce_sysctl_phy_read, "I", "PHY register read");
8264
8265#endif
8266
8267	DBEXIT(BCE_VERBOSE_MISC);
8268}
8269
8270
8271/****************************************************************************/
8272/* BCE Debug Routines                                                       */
8273/****************************************************************************/
8274#ifdef BCE_DEBUG
8275
8276/****************************************************************************/
8277/* Freezes the controller to allow for a cohesive state dump.               */
8278/*                                                                          */
8279/* Returns:                                                                 */
8280/*   Nothing.                                                               */
8281/****************************************************************************/
8282static void
8283bce_freeze_controller(struct bce_softc *sc)
8284{
8285	u32 val;
8286	val = REG_RD(sc, BCE_MISC_COMMAND);
8287	val |= BCE_MISC_COMMAND_DISABLE_ALL;
8288	REG_WR(sc, BCE_MISC_COMMAND, val);
8289}
8290
8291
8292/****************************************************************************/
8293/* Unfreezes the controller after a freeze operation.  This may not always  */
8294/* work and the controller will require a reset!                            */
8295/*                                                                          */
8296/* Returns:                                                                 */
8297/*   Nothing.                                                               */
8298/****************************************************************************/
8299static void
8300bce_unfreeze_controller(struct bce_softc *sc)
8301{
8302	u32 val;
8303	val = REG_RD(sc, BCE_MISC_COMMAND);
8304	val |= BCE_MISC_COMMAND_ENABLE_ALL;
8305	REG_WR(sc, BCE_MISC_COMMAND, val);
8306}
8307
8308
8309/****************************************************************************/
8310/* Prints out Ethernet frame information from an mbuf.                      */
8311/*                                                                          */
8312/* Partially decode an Ethernet frame to look at some important headers.    */
8313/*                                                                          */
8314/* Returns:                                                                 */
8315/*   Nothing.                                                               */
8316/****************************************************************************/
8317static void
8318bce_dump_enet(struct bce_softc *sc, struct mbuf *m)
8319{
8320	struct ether_vlan_header *eh;
8321	u16 etype;
8322	int ehlen;
8323	struct ip *ip;
8324	struct tcphdr *th;
8325	struct udphdr *uh;
8326	struct arphdr *ah;
8327
8328		BCE_PRINTF(
8329			"-----------------------------"
8330			" Frame Decode "
8331			"-----------------------------\n");
8332
8333	eh = mtod(m, struct ether_vlan_header *);
8334
8335	/* Handle VLAN encapsulation if present. */
8336	if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
8337		etype = ntohs(eh->evl_proto);
8338		ehlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
8339	} else {
8340		etype = ntohs(eh->evl_encap_proto);
8341		ehlen = ETHER_HDR_LEN;
8342	}
8343
8344	/* ToDo: Add VLAN output. */
8345	BCE_PRINTF("enet: dest = %6D, src = %6D, type = 0x%04X, hlen = %d\n",
8346		eh->evl_dhost, ":", eh->evl_shost, ":", etype, ehlen);
8347
8348	switch (etype) {
8349		case ETHERTYPE_IP:
8350			ip = (struct ip *)(m->m_data + ehlen);
8351			BCE_PRINTF("--ip: dest = 0x%08X , src = 0x%08X, len = %d bytes, "
8352				"protocol = 0x%02X, xsum = 0x%04X\n",
8353				ntohl(ip->ip_dst.s_addr), ntohl(ip->ip_src.s_addr),
8354				ntohs(ip->ip_len), ip->ip_p, ntohs(ip->ip_sum));
8355
8356			switch (ip->ip_p) {
8357				case IPPROTO_TCP:
8358					th = (struct tcphdr *)((caddr_t)ip + (ip->ip_hl << 2));
8359					BCE_PRINTF("-tcp: dest = %d, src = %d, hlen = %d bytes, "
8360						"flags = 0x%b, csum = 0x%04X\n",
8361						ntohs(th->th_dport), ntohs(th->th_sport), (th->th_off << 2),
8362						th->th_flags, "\20\10CWR\07ECE\06URG\05ACK\04PSH\03RST\02SYN\01FIN",
8363						ntohs(th->th_sum));
8364					break;
8365				case IPPROTO_UDP:
8366        		    uh = (struct udphdr *)((caddr_t)ip + (ip->ip_hl << 2));
8367					BCE_PRINTF("-udp: dest = %d, src = %d, len = %d bytes, "
8368						"csum = 0x%04X\n", ntohs(uh->uh_dport), ntohs(uh->uh_sport),
8369						ntohs(uh->uh_ulen), ntohs(uh->uh_sum));
8370					break;
8371				case IPPROTO_ICMP:
8372					BCE_PRINTF("icmp:\n");
8373					break;
8374				default:
8375					BCE_PRINTF("----: Other IP protocol.\n");
8376			}
8377			break;
8378		case ETHERTYPE_IPV6:
8379			BCE_PRINTF("ipv6: No decode supported.\n");
8380			break;
8381		case ETHERTYPE_ARP:
8382			BCE_PRINTF("-arp: ");
8383			ah = (struct arphdr *) (m->m_data + ehlen);
8384			switch (ntohs(ah->ar_op)) {
8385				case ARPOP_REVREQUEST:
8386					printf("reverse ARP request\n");
8387					break;
8388				case ARPOP_REVREPLY:
8389					printf("reverse ARP reply\n");
8390					break;
8391				case ARPOP_REQUEST:
8392					printf("ARP request\n");
8393					break;
8394				case ARPOP_REPLY:
8395					printf("ARP reply\n");
8396					break;
8397				default:
8398					printf("other ARP operation\n");
8399			}
8400			break;
8401		default:
8402			BCE_PRINTF("----: Other protocol.\n");
8403	}
8404
8405	BCE_PRINTF(
8406		"-----------------------------"
8407		"--------------"
8408		"-----------------------------\n");
8409}
8410
8411
8412/****************************************************************************/
8413/* Prints out information about an mbuf.                                    */
8414/*                                                                          */
8415/* Returns:                                                                 */
8416/*   Nothing.                                                               */
8417/****************************************************************************/
8418static __attribute__ ((noinline)) void
8419bce_dump_mbuf(struct bce_softc *sc, struct mbuf *m)
8420{
8421	struct mbuf *mp = m;
8422
8423	if (m == NULL) {
8424		BCE_PRINTF("mbuf: null pointer\n");
8425		return;
8426	}
8427
8428	while (mp) {
8429		BCE_PRINTF("mbuf: %p, m_len = %d, m_flags = 0x%b, m_data = %p\n",
8430			mp, mp->m_len, mp->m_flags,
8431			"\20\1M_EXT\2M_PKTHDR\3M_EOR\4M_RDONLY",
8432			mp->m_data);
8433
8434		if (mp->m_flags & M_PKTHDR) {
8435			BCE_PRINTF("- m_pkthdr: len = %d, flags = 0x%b, csum_flags = %b\n",
8436				mp->m_pkthdr.len, mp->m_flags,
8437				"\20\12M_BCAST\13M_MCAST\14M_FRAG\15M_FIRSTFRAG"
8438				"\16M_LASTFRAG\21M_VLANTAG\22M_PROMISC\23M_NOFREE",
8439				mp->m_pkthdr.csum_flags,
8440				"\20\1CSUM_IP\2CSUM_TCP\3CSUM_UDP\4CSUM_IP_FRAGS"
8441				"\5CSUM_FRAGMENT\6CSUM_TSO\11CSUM_IP_CHECKED"
8442				"\12CSUM_IP_VALID\13CSUM_DATA_VALID\14CSUM_PSEUDO_HDR");
8443		}
8444
8445		if (mp->m_flags & M_EXT) {
8446			BCE_PRINTF("- m_ext: %p, ext_size = %d, type = ",
8447				mp->m_ext.ext_buf, mp->m_ext.ext_size);
8448			switch (mp->m_ext.ext_type) {
8449				case EXT_CLUSTER:    printf("EXT_CLUSTER\n"); break;
8450				case EXT_SFBUF:      printf("EXT_SFBUF\n"); break;
8451				case EXT_JUMBO9:     printf("EXT_JUMBO9\n"); break;
8452				case EXT_JUMBO16:    printf("EXT_JUMBO16\n"); break;
8453				case EXT_PACKET:     printf("EXT_PACKET\n"); break;
8454				case EXT_MBUF:       printf("EXT_MBUF\n"); break;
8455				case EXT_NET_DRV:    printf("EXT_NET_DRV\n"); break;
8456				case EXT_MOD_TYPE:   printf("EXT_MDD_TYPE\n"); break;
8457				case EXT_DISPOSABLE: printf("EXT_DISPOSABLE\n"); break;
8458				case EXT_EXTREF:     printf("EXT_EXTREF\n"); break;
8459				default:             printf("UNKNOWN\n");
8460			}
8461		}
8462
8463		mp = mp->m_next;
8464	}
8465}
8466
8467
8468/****************************************************************************/
8469/* Prints out the mbufs in the TX mbuf chain.                               */
8470/*                                                                          */
8471/* Returns:                                                                 */
8472/*   Nothing.                                                               */
8473/****************************************************************************/
8474static __attribute__ ((noinline)) void
8475bce_dump_tx_mbuf_chain(struct bce_softc *sc, u16 chain_prod, int count)
8476{
8477	struct mbuf *m;
8478
8479	BCE_PRINTF(
8480		"----------------------------"
8481		"  tx mbuf data  "
8482		"----------------------------\n");
8483
8484	for (int i = 0; i < count; i++) {
8485	 	m = sc->tx_mbuf_ptr[chain_prod];
8486		BCE_PRINTF("txmbuf[0x%04X]\n", chain_prod);
8487		bce_dump_mbuf(sc, m);
8488		chain_prod = TX_CHAIN_IDX(NEXT_TX_BD(chain_prod));
8489	}
8490
8491	BCE_PRINTF(
8492		"----------------------------"
8493		"----------------"
8494		"----------------------------\n");
8495}
8496
8497
8498/****************************************************************************/
8499/* Prints out the mbufs in the RX mbuf chain.                               */
8500/*                                                                          */
8501/* Returns:                                                                 */
8502/*   Nothing.                                                               */
8503/****************************************************************************/
8504static __attribute__ ((noinline)) void
8505bce_dump_rx_mbuf_chain(struct bce_softc *sc, u16 chain_prod, int count)
8506{
8507	struct mbuf *m;
8508
8509	BCE_PRINTF(
8510		"----------------------------"
8511		"  rx mbuf data  "
8512		"----------------------------\n");
8513
8514	for (int i = 0; i < count; i++) {
8515	 	m = sc->rx_mbuf_ptr[chain_prod];
8516		BCE_PRINTF("rxmbuf[0x%04X]\n", chain_prod);
8517		bce_dump_mbuf(sc, m);
8518		chain_prod = RX_CHAIN_IDX(NEXT_RX_BD(chain_prod));
8519	}
8520
8521
8522	BCE_PRINTF(
8523		"----------------------------"
8524		"----------------"
8525		"----------------------------\n");
8526}
8527
8528
8529#ifdef BCE_USE_SPLIT_HEADER
8530/****************************************************************************/
8531/* Prints out the mbufs in the mbuf page chain.                             */
8532/*                                                                          */
8533/* Returns:                                                                 */
8534/*   Nothing.                                                               */
8535/****************************************************************************/
8536static __attribute__ ((noinline)) void
8537bce_dump_pg_mbuf_chain(struct bce_softc *sc, u16 chain_prod, int count)
8538{
8539	struct mbuf *m;
8540
8541	BCE_PRINTF(
8542		"----------------------------"
8543		"  pg mbuf data  "
8544		"----------------------------\n");
8545
8546	for (int i = 0; i < count; i++) {
8547	 	m = sc->pg_mbuf_ptr[chain_prod];
8548		BCE_PRINTF("pgmbuf[0x%04X]\n", chain_prod);
8549		bce_dump_mbuf(sc, m);
8550		chain_prod = PG_CHAIN_IDX(NEXT_PG_BD(chain_prod));
8551	}
8552
8553
8554	BCE_PRINTF(
8555		"----------------------------"
8556		"----------------"
8557		"----------------------------\n");
8558}
8559#endif
8560
8561
8562/****************************************************************************/
8563/* Prints out a tx_bd structure.                                            */
8564/*                                                                          */
8565/* Returns:                                                                 */
8566/*   Nothing.                                                               */
8567/****************************************************************************/
8568static __attribute__ ((noinline)) void
8569bce_dump_txbd(struct bce_softc *sc, int idx, struct tx_bd *txbd)
8570{
8571	if (idx > MAX_TX_BD)
8572		/* Index out of range. */
8573		BCE_PRINTF("tx_bd[0x%04X]: Invalid tx_bd index!\n", idx);
8574	else if ((idx & USABLE_TX_BD_PER_PAGE) == USABLE_TX_BD_PER_PAGE)
8575		/* TX Chain page pointer. */
8576		BCE_PRINTF("tx_bd[0x%04X]: haddr = 0x%08X:%08X, chain page pointer\n",
8577			idx, txbd->tx_bd_haddr_hi, txbd->tx_bd_haddr_lo);
8578	else {
8579			/* Normal tx_bd entry. */
8580			BCE_PRINTF("tx_bd[0x%04X]: haddr = 0x%08X:%08X, nbytes = 0x%08X, "
8581				"vlan tag= 0x%04X, flags = 0x%04X (", idx,
8582				txbd->tx_bd_haddr_hi, txbd->tx_bd_haddr_lo,
8583				txbd->tx_bd_mss_nbytes, txbd->tx_bd_vlan_tag,
8584				txbd->tx_bd_flags);
8585
8586			if (txbd->tx_bd_flags & TX_BD_FLAGS_CONN_FAULT)
8587				printf(" CONN_FAULT");
8588
8589			if (txbd->tx_bd_flags & TX_BD_FLAGS_TCP_UDP_CKSUM)
8590				printf(" TCP_UDP_CKSUM");
8591
8592			if (txbd->tx_bd_flags & TX_BD_FLAGS_IP_CKSUM)
8593				printf(" IP_CKSUM");
8594
8595			if (txbd->tx_bd_flags & TX_BD_FLAGS_VLAN_TAG)
8596				printf("  VLAN");
8597
8598			if (txbd->tx_bd_flags & TX_BD_FLAGS_COAL_NOW)
8599				printf(" COAL_NOW");
8600
8601			if (txbd->tx_bd_flags & TX_BD_FLAGS_DONT_GEN_CRC)
8602				printf(" DONT_GEN_CRC");
8603
8604			if (txbd->tx_bd_flags & TX_BD_FLAGS_START)
8605				printf(" START");
8606
8607			if (txbd->tx_bd_flags & TX_BD_FLAGS_END)
8608				printf(" END");
8609
8610			if (txbd->tx_bd_flags & TX_BD_FLAGS_SW_LSO)
8611				printf(" LSO");
8612
8613			if (txbd->tx_bd_flags & TX_BD_FLAGS_SW_OPTION_WORD)
8614				printf(" OPTION_WORD");
8615
8616			if (txbd->tx_bd_flags & TX_BD_FLAGS_SW_FLAGS)
8617				printf(" FLAGS");
8618
8619			if (txbd->tx_bd_flags & TX_BD_FLAGS_SW_SNAP)
8620				printf(" SNAP");
8621
8622			printf(" )\n");
8623		}
8624
8625}
8626
8627
8628/****************************************************************************/
8629/* Prints out a rx_bd structure.                                            */
8630/*                                                                          */
8631/* Returns:                                                                 */
8632/*   Nothing.                                                               */
8633/****************************************************************************/
8634static __attribute__ ((noinline)) void
8635bce_dump_rxbd(struct bce_softc *sc, int idx, struct rx_bd *rxbd)
8636{
8637	if (idx > MAX_RX_BD)
8638		/* Index out of range. */
8639		BCE_PRINTF("rx_bd[0x%04X]: Invalid rx_bd index!\n", idx);
8640	else if ((idx & USABLE_RX_BD_PER_PAGE) == USABLE_RX_BD_PER_PAGE)
8641		/* RX Chain page pointer. */
8642		BCE_PRINTF("rx_bd[0x%04X]: haddr = 0x%08X:%08X, chain page pointer\n",
8643			idx, rxbd->rx_bd_haddr_hi, rxbd->rx_bd_haddr_lo);
8644	else
8645		/* Normal rx_bd entry. */
8646		BCE_PRINTF("rx_bd[0x%04X]: haddr = 0x%08X:%08X, nbytes = 0x%08X, "
8647			"flags = 0x%08X\n", idx,
8648			rxbd->rx_bd_haddr_hi, rxbd->rx_bd_haddr_lo,
8649			rxbd->rx_bd_len, rxbd->rx_bd_flags);
8650}
8651
8652
8653#ifdef BCE_USE_SPLIT_HEADER
8654/****************************************************************************/
8655/* Prints out a rx_bd structure in the page chain.                          */
8656/*                                                                          */
8657/* Returns:                                                                 */
8658/*   Nothing.                                                               */
8659/****************************************************************************/
8660static __attribute__ ((noinline)) void
8661bce_dump_pgbd(struct bce_softc *sc, int idx, struct rx_bd *pgbd)
8662{
8663	if (idx > MAX_PG_BD)
8664		/* Index out of range. */
8665		BCE_PRINTF("pg_bd[0x%04X]: Invalid pg_bd index!\n", idx);
8666	else if ((idx & USABLE_PG_BD_PER_PAGE) == USABLE_PG_BD_PER_PAGE)
8667		/* Page Chain page pointer. */
8668		BCE_PRINTF("px_bd[0x%04X]: haddr = 0x%08X:%08X, chain page pointer\n",
8669			idx, pgbd->rx_bd_haddr_hi, pgbd->rx_bd_haddr_lo);
8670	else
8671		/* Normal rx_bd entry. */
8672		BCE_PRINTF("pg_bd[0x%04X]: haddr = 0x%08X:%08X, nbytes = 0x%08X, "
8673			"flags = 0x%08X\n", idx,
8674			pgbd->rx_bd_haddr_hi, pgbd->rx_bd_haddr_lo,
8675			pgbd->rx_bd_len, pgbd->rx_bd_flags);
8676}
8677#endif
8678
8679
8680/****************************************************************************/
8681/* Prints out a l2_fhdr structure.                                          */
8682/*                                                                          */
8683/* Returns:                                                                 */
8684/*   Nothing.                                                               */
8685/****************************************************************************/
8686static __attribute__ ((noinline)) void
8687bce_dump_l2fhdr(struct bce_softc *sc, int idx, struct l2_fhdr *l2fhdr)
8688{
8689	BCE_PRINTF("l2_fhdr[0x%04X]: status = 0x%b, "
8690		"pkt_len = %d, vlan = 0x%04x, ip_xsum/hdr_len = 0x%04X, "
8691		"tcp_udp_xsum = 0x%04X\n", idx,
8692		l2fhdr->l2_fhdr_status, BCE_L2FHDR_PRINTFB,
8693		l2fhdr->l2_fhdr_pkt_len, l2fhdr->l2_fhdr_vlan_tag,
8694		l2fhdr->l2_fhdr_ip_xsum, l2fhdr->l2_fhdr_tcp_udp_xsum);
8695}
8696
8697
8698/****************************************************************************/
8699/* Prints out context memory info.  (Only useful for CID 0 to 16.)          */
8700/*                                                                          */
8701/* Returns:                                                                 */
8702/*   Nothing.                                                               */
8703/****************************************************************************/
8704static __attribute__ ((noinline)) void
8705bce_dump_ctx(struct bce_softc *sc, u16 cid)
8706{
8707	if (cid <= TX_CID) {
8708		BCE_PRINTF(
8709			"----------------------------"
8710			"    CTX Data    "
8711			"----------------------------\n");
8712
8713		BCE_PRINTF("     0x%04X - (CID) Context ID\n", cid);
8714
8715		if (cid == RX_CID) {
8716			BCE_PRINTF(" 0x%08X - (L2CTX_RX_HOST_BDIDX) host rx "
8717				"producer index\n",
8718				CTX_RD(sc, GET_CID_ADDR(cid), BCE_L2CTX_RX_HOST_BDIDX));
8719			BCE_PRINTF(" 0x%08X - (L2CTX_RX_HOST_BSEQ) host byte sequence\n",
8720				CTX_RD(sc, GET_CID_ADDR(cid), BCE_L2CTX_RX_HOST_BSEQ));
8721			BCE_PRINTF(" 0x%08X - (L2CTX_RX_NX_BSEQ) h/w byte sequence\n",
8722				CTX_RD(sc, GET_CID_ADDR(cid), BCE_L2CTX_RX_NX_BSEQ));
8723			BCE_PRINTF(" 0x%08X - (L2CTX_RX_NX_BDHADDR_HI) h/w buffer "
8724				"descriptor address\n",
8725 				CTX_RD(sc, GET_CID_ADDR(cid), BCE_L2CTX_RX_NX_BDHADDR_HI));
8726			BCE_PRINTF(" 0x%08X - (L2CTX_RX_NX_BDHADDR_LO) h/w buffer "
8727				"descriptor address\n",
8728				CTX_RD(sc, GET_CID_ADDR(cid), BCE_L2CTX_RX_NX_BDHADDR_LO));
8729			BCE_PRINTF(" 0x%08X - (L2CTX_RX_NX_BDIDX) h/w rx consumer index\n",
8730				CTX_RD(sc, GET_CID_ADDR(cid), BCE_L2CTX_RX_NX_BDIDX));
8731			BCE_PRINTF(" 0x%08X - (L2CTX_RX_HOST_PG_BDIDX) host page "
8732				"producer index\n",
8733				CTX_RD(sc, GET_CID_ADDR(cid), BCE_L2CTX_RX_HOST_PG_BDIDX));
8734			BCE_PRINTF(" 0x%08X - (L2CTX_RX_PG_BUF_SIZE) host rx_bd/page "
8735				"buffer size\n",
8736				CTX_RD(sc, GET_CID_ADDR(cid), BCE_L2CTX_RX_PG_BUF_SIZE));
8737			BCE_PRINTF(" 0x%08X - (L2CTX_RX_NX_PG_BDHADDR_HI) h/w page "
8738				"chain address\n",
8739				CTX_RD(sc, GET_CID_ADDR(cid), BCE_L2CTX_RX_NX_PG_BDHADDR_HI));
8740			BCE_PRINTF(" 0x%08X - (L2CTX_RX_NX_PG_BDHADDR_LO) h/w page "
8741				"chain address\n",
8742				CTX_RD(sc, GET_CID_ADDR(cid), BCE_L2CTX_RX_NX_PG_BDHADDR_LO));
8743			BCE_PRINTF(" 0x%08X - (L2CTX_RX_NX_PG_BDIDX) h/w page "
8744				"consumer index\n",
8745				CTX_RD(sc, GET_CID_ADDR(cid), BCE_L2CTX_RX_NX_PG_BDIDX));
8746		} else if (cid == TX_CID) {
8747			if ((BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) ||
8748				(BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716)) {
8749				BCE_PRINTF(" 0x%08X - (L2CTX_TX_TYPE_XI) ctx type\n",
8750					CTX_RD(sc, GET_CID_ADDR(cid), BCE_L2CTX_TX_TYPE_XI));
8751				BCE_PRINTF(" 0x%08X - (L2CTX_CMD_TX_TYPE_XI) ctx cmd\n",
8752					CTX_RD(sc, GET_CID_ADDR(cid), BCE_L2CTX_TX_CMD_TYPE_XI));
8753				BCE_PRINTF(" 0x%08X - (L2CTX_TX_TBDR_BDHADDR_HI_XI) h/w buffer "
8754					"descriptor address\n",	CTX_RD(sc,
8755					GET_CID_ADDR(cid), BCE_L2CTX_TX_TBDR_BHADDR_HI_XI));
8756				BCE_PRINTF(" 0x%08X - (L2CTX_TX_TBDR_BHADDR_LO_XI) h/w buffer "
8757					"descriptor address\n", CTX_RD(sc,
8758					GET_CID_ADDR(cid), BCE_L2CTX_TX_TBDR_BHADDR_LO_XI));
8759				BCE_PRINTF(" 0x%08X - (L2CTX_TX_HOST_BIDX_XI) host producer "
8760					"index\n", CTX_RD(sc, GET_CID_ADDR(cid),
8761					BCE_L2CTX_TX_HOST_BIDX_XI));
8762				BCE_PRINTF(" 0x%08X - (L2CTX_TX_HOST_BSEQ_XI) host byte "
8763					"sequence\n", CTX_RD(sc, GET_CID_ADDR(cid),
8764					BCE_L2CTX_TX_HOST_BSEQ_XI));
8765			} else {
8766				BCE_PRINTF(" 0x%08X - (L2CTX_TX_TYPE) ctx type\n",
8767					CTX_RD(sc, GET_CID_ADDR(cid), BCE_L2CTX_TX_TYPE));
8768				BCE_PRINTF(" 0x%08X - (L2CTX_TX_CMD_TYPE) ctx cmd\n",
8769					CTX_RD(sc, GET_CID_ADDR(cid), BCE_L2CTX_TX_CMD_TYPE));
8770				BCE_PRINTF(" 0x%08X - (L2CTX_TX_TBDR_BDHADDR_HI) h/w buffer "
8771					"descriptor address\n", CTX_RD(sc, GET_CID_ADDR(cid),
8772					BCE_L2CTX_TX_TBDR_BHADDR_HI));
8773				BCE_PRINTF(" 0x%08X - (L2CTX_TX_TBDR_BHADDR_LO) h/w buffer "
8774					"descriptor address\n", CTX_RD(sc, GET_CID_ADDR(cid),
8775					BCE_L2CTX_TX_TBDR_BHADDR_LO));
8776				BCE_PRINTF(" 0x%08X - (L2CTX_TX_HOST_BIDX) host producer "
8777					"index\n", CTX_RD(sc, GET_CID_ADDR(cid),
8778					BCE_L2CTX_TX_HOST_BIDX));
8779				BCE_PRINTF(" 0x%08X - (L2CTX_TX_HOST_BSEQ) host byte "
8780					"sequence\n", CTX_RD(sc, GET_CID_ADDR(cid),
8781					BCE_L2CTX_TX_HOST_BSEQ));
8782			}
8783		} else
8784			BCE_PRINTF(" Unknown CID\n");
8785
8786		BCE_PRINTF(
8787			"----------------------------"
8788			"    Raw CTX     "
8789			"----------------------------\n");
8790
8791		for (int i = 0x0; i < 0x300; i += 0x10) {
8792			BCE_PRINTF("0x%04X: 0x%08X 0x%08X 0x%08X 0x%08X\n", i,
8793				CTX_RD(sc, GET_CID_ADDR(cid), i),
8794				CTX_RD(sc, GET_CID_ADDR(cid), i + 0x4),
8795				CTX_RD(sc, GET_CID_ADDR(cid), i + 0x8),
8796				CTX_RD(sc, GET_CID_ADDR(cid), i + 0xc));
8797		}
8798
8799
8800		BCE_PRINTF(
8801			"----------------------------"
8802			"----------------"
8803			"----------------------------\n");
8804	}
8805}
8806
8807
8808/****************************************************************************/
8809/* Prints out the FTQ data.                                                 */
8810/*                                                                          */
8811/* Returns:                                                                */
8812/*   Nothing.                                                               */
8813/****************************************************************************/
8814static __attribute__ ((noinline)) void
8815bce_dump_ftqs(struct bce_softc *sc)
8816{
8817	u32 cmd, ctl, cur_depth, max_depth, valid_cnt, val;
8818
8819	BCE_PRINTF(
8820		"----------------------------"
8821		"    FTQ Data    "
8822		"----------------------------\n");
8823
8824	BCE_PRINTF("   FTQ    Command    Control   Depth_Now  Max_Depth  Valid_Cnt \n");
8825	BCE_PRINTF(" ------- ---------- ---------- ---------- ---------- ----------\n");
8826
8827	/* Setup the generic statistic counters for the FTQ valid count. */
8828	val = (BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_RV2PPQ_VALID_CNT << 24) |
8829		(BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_RXPCQ_VALID_CNT  << 16) |
8830		(BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_RXPQ_VALID_CNT   <<  8) |
8831		(BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_RLUPQ_VALID_CNT);
8832	REG_WR(sc, BCE_HC_STAT_GEN_SEL_0, val);
8833
8834	val = (BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_TSCHQ_VALID_CNT  << 24) |
8835		(BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_RDMAQ_VALID_CNT  << 16) |
8836		(BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_RV2PTQ_VALID_CNT <<  8) |
8837		(BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_RV2PMQ_VALID_CNT);
8838	REG_WR(sc, BCE_HC_STAT_GEN_SEL_1, val);
8839
8840	val = (BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_TPATQ_VALID_CNT  << 24) |
8841		(BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_TDMAQ_VALID_CNT  << 16) |
8842		(BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_TXPQ_VALID_CNT   <<  8) |
8843		(BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_TBDRQ_VALID_CNT);
8844	REG_WR(sc, BCE_HC_STAT_GEN_SEL_2, val);
8845
8846	val = (BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_COMQ_VALID_CNT   << 24) |
8847		(BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_COMTQ_VALID_CNT  << 16) |
8848		(BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_COMXQ_VALID_CNT  <<  8) |
8849		(BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_TASQ_VALID_CNT);
8850	REG_WR(sc, BCE_HC_STAT_GEN_SEL_3, val);
8851
8852	/* Input queue to the Receive Lookup state machine */
8853	cmd = REG_RD(sc, BCE_RLUP_FTQ_CMD);
8854	ctl = REG_RD(sc, BCE_RLUP_FTQ_CTL);
8855	cur_depth = (ctl & BCE_RLUP_FTQ_CTL_CUR_DEPTH) >> 22;
8856	max_depth = (ctl & BCE_RLUP_FTQ_CTL_MAX_DEPTH) >> 12;
8857	valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT0);
8858	BCE_PRINTF(" RLUP    0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n",
8859		cmd, ctl, cur_depth, max_depth, valid_cnt);
8860
8861	/* Input queue to the Receive Processor */
8862	cmd = REG_RD_IND(sc, BCE_RXP_FTQ_CMD);
8863	ctl = REG_RD_IND(sc, BCE_RXP_FTQ_CTL);
8864	cur_depth = (ctl & BCE_RXP_FTQ_CTL_CUR_DEPTH) >> 22;
8865	max_depth = (ctl & BCE_RXP_FTQ_CTL_MAX_DEPTH) >> 12;
8866	valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT1);
8867	BCE_PRINTF(" RXP     0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n",
8868		cmd, ctl, cur_depth, max_depth, valid_cnt);
8869
8870	/* Input queue to the Recevie Processor */
8871	cmd = REG_RD_IND(sc, BCE_RXP_CFTQ_CMD);
8872	ctl = REG_RD_IND(sc, BCE_RXP_CFTQ_CTL);
8873	cur_depth = (ctl & BCE_RXP_CFTQ_CTL_CUR_DEPTH) >> 22;
8874	max_depth = (ctl & BCE_RXP_CFTQ_CTL_MAX_DEPTH) >> 12;
8875	valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT2);
8876	BCE_PRINTF(" RXPC    0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n",
8877		cmd, ctl, cur_depth, max_depth, valid_cnt);
8878
8879	/* Input queue to the Receive Virtual to Physical state machine */
8880	cmd = REG_RD(sc, BCE_RV2P_PFTQ_CMD);
8881	ctl = REG_RD(sc, BCE_RV2P_PFTQ_CTL);
8882	cur_depth = (ctl & BCE_RV2P_PFTQ_CTL_CUR_DEPTH) >> 22;
8883	max_depth = (ctl & BCE_RV2P_PFTQ_CTL_MAX_DEPTH) >> 12;
8884	valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT3);
8885	BCE_PRINTF(" RV2PP   0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n",
8886		cmd, ctl, cur_depth, max_depth, valid_cnt);
8887
8888	/* Input queue to the Recevie Virtual to Physical state machine */
8889	cmd = REG_RD(sc, BCE_RV2P_MFTQ_CMD);
8890	ctl = REG_RD(sc, BCE_RV2P_MFTQ_CTL);
8891	cur_depth = (ctl & BCE_RV2P_MFTQ_CTL_CUR_DEPTH) >> 22;
8892	max_depth = (ctl & BCE_RV2P_MFTQ_CTL_MAX_DEPTH) >> 12;
8893	valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT4);
8894	BCE_PRINTF(" RV2PM   0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n",
8895		cmd, ctl, cur_depth, max_depth, valid_cnt);
8896
8897	/* Input queue to the Receive Virtual to Physical state machine */
8898	cmd = REG_RD(sc, BCE_RV2P_TFTQ_CMD);
8899	ctl = REG_RD(sc, BCE_RV2P_TFTQ_CTL);
8900	cur_depth = (ctl & BCE_RV2P_TFTQ_CTL_CUR_DEPTH) >> 22;
8901	max_depth = (ctl & BCE_RV2P_TFTQ_CTL_MAX_DEPTH) >> 12;
8902	valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT5);
8903	BCE_PRINTF(" RV2PT   0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n",
8904		cmd, ctl, cur_depth, max_depth, valid_cnt);
8905
8906	/* Input queue to the Receive DMA state machine */
8907	cmd = REG_RD(sc, BCE_RDMA_FTQ_CMD);
8908	ctl = REG_RD(sc, BCE_RDMA_FTQ_CTL);
8909	cur_depth = (ctl & BCE_RDMA_FTQ_CTL_CUR_DEPTH) >> 22;
8910	max_depth = (ctl & BCE_RDMA_FTQ_CTL_MAX_DEPTH) >> 12;
8911	valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT6);
8912	BCE_PRINTF(" RDMA    0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n",
8913		cmd, ctl, cur_depth, max_depth, valid_cnt);
8914
8915	/* Input queue to the Transmit Scheduler state machine */
8916	cmd = REG_RD(sc, BCE_TSCH_FTQ_CMD);
8917	ctl = REG_RD(sc, BCE_TSCH_FTQ_CTL);
8918	cur_depth = (ctl & BCE_TSCH_FTQ_CTL_CUR_DEPTH) >> 22;
8919	max_depth = (ctl & BCE_TSCH_FTQ_CTL_MAX_DEPTH) >> 12;
8920	valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT7);
8921	BCE_PRINTF(" TSCH    0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n",
8922		cmd, ctl, cur_depth, max_depth, valid_cnt);
8923
8924	/* Input queue to the Transmit Buffer Descriptor state machine */
8925	cmd = REG_RD(sc, BCE_TBDR_FTQ_CMD);
8926	ctl = REG_RD(sc, BCE_TBDR_FTQ_CTL);
8927	cur_depth = (ctl & BCE_TBDR_FTQ_CTL_CUR_DEPTH) >> 22;
8928	max_depth = (ctl & BCE_TBDR_FTQ_CTL_MAX_DEPTH) >> 12;
8929	valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT8);
8930	BCE_PRINTF(" TBDR    0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n",
8931		cmd, ctl, cur_depth, max_depth, valid_cnt);
8932
8933	/* Input queue to the Transmit Processor */
8934	cmd = REG_RD_IND(sc, BCE_TXP_FTQ_CMD);
8935	ctl = REG_RD_IND(sc, BCE_TXP_FTQ_CTL);
8936	cur_depth = (ctl & BCE_TXP_FTQ_CTL_CUR_DEPTH) >> 22;
8937	max_depth = (ctl & BCE_TXP_FTQ_CTL_MAX_DEPTH) >> 12;
8938	valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT9);
8939	BCE_PRINTF(" TXP     0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n",
8940		cmd, ctl, cur_depth, max_depth, valid_cnt);
8941
8942	/* Input queue to the Transmit DMA state machine */
8943	cmd = REG_RD(sc, BCE_TDMA_FTQ_CMD);
8944	ctl = REG_RD(sc, BCE_TDMA_FTQ_CTL);
8945	cur_depth = (ctl & BCE_TDMA_FTQ_CTL_CUR_DEPTH) >> 22;
8946	max_depth = (ctl & BCE_TDMA_FTQ_CTL_MAX_DEPTH) >> 12;
8947	valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT10);
8948	BCE_PRINTF(" TDMA    0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n",
8949		cmd, ctl, cur_depth, max_depth, valid_cnt);
8950
8951	/* Input queue to the Transmit Patch-Up Processor */
8952	cmd = REG_RD_IND(sc, BCE_TPAT_FTQ_CMD);
8953	ctl = REG_RD_IND(sc, BCE_TPAT_FTQ_CTL);
8954	cur_depth = (ctl & BCE_TPAT_FTQ_CTL_CUR_DEPTH) >> 22;
8955	max_depth = (ctl & BCE_TPAT_FTQ_CTL_MAX_DEPTH) >> 12;
8956	valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT11);
8957	BCE_PRINTF(" TPAT    0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n",
8958		cmd, ctl, cur_depth, max_depth, valid_cnt);
8959
8960	/* Input queue to the Transmit Assembler state machine */
8961	cmd = REG_RD_IND(sc, BCE_TAS_FTQ_CMD);
8962	ctl = REG_RD_IND(sc, BCE_TAS_FTQ_CTL);
8963	cur_depth = (ctl & BCE_TAS_FTQ_CTL_CUR_DEPTH) >> 22;
8964	max_depth = (ctl & BCE_TAS_FTQ_CTL_MAX_DEPTH) >> 12;
8965	valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT12);
8966	BCE_PRINTF(" TAS     0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n",
8967		cmd, ctl, cur_depth, max_depth, valid_cnt);
8968
8969	/* Input queue to the Completion Processor */
8970	cmd = REG_RD_IND(sc, BCE_COM_COMXQ_FTQ_CMD);
8971	ctl = REG_RD_IND(sc, BCE_COM_COMXQ_FTQ_CTL);
8972	cur_depth = (ctl & BCE_COM_COMXQ_FTQ_CTL_CUR_DEPTH) >> 22;
8973	max_depth = (ctl & BCE_COM_COMXQ_FTQ_CTL_MAX_DEPTH) >> 12;
8974	valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT13);
8975	BCE_PRINTF(" COMX    0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n",
8976		cmd, ctl, cur_depth, max_depth, valid_cnt);
8977
8978	/* Input queue to the Completion Processor */
8979	cmd = REG_RD_IND(sc, BCE_COM_COMTQ_FTQ_CMD);
8980	ctl = REG_RD_IND(sc, BCE_COM_COMTQ_FTQ_CTL);
8981	cur_depth = (ctl & BCE_COM_COMTQ_FTQ_CTL_CUR_DEPTH) >> 22;
8982	max_depth = (ctl & BCE_COM_COMTQ_FTQ_CTL_MAX_DEPTH) >> 12;
8983	valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT14);
8984	BCE_PRINTF(" COMT    0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n",
8985		cmd, ctl, cur_depth, max_depth, valid_cnt);
8986
8987	/* Input queue to the Completion Processor */
8988	cmd = REG_RD_IND(sc, BCE_COM_COMQ_FTQ_CMD);
8989	ctl = REG_RD_IND(sc, BCE_COM_COMQ_FTQ_CTL);
8990	cur_depth = (ctl & BCE_COM_COMQ_FTQ_CTL_CUR_DEPTH) >> 22;
8991	max_depth = (ctl & BCE_COM_COMQ_FTQ_CTL_MAX_DEPTH) >> 12;
8992	valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT15);
8993	BCE_PRINTF(" COMX    0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n",
8994		cmd, ctl, cur_depth, max_depth, valid_cnt);
8995
8996	/* Setup the generic statistic counters for the FTQ valid count. */
8997	val = (BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_CSQ_VALID_CNT  << 16) |
8998		(BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_CPQ_VALID_CNT  <<  8) |
8999		(BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_MGMQ_VALID_CNT);
9000
9001	if ((BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709)	||
9002		(BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716))
9003		val = val | (BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_RV2PCSQ_VALID_CNT_XI << 24);
9004		REG_WR(sc, BCE_HC_STAT_GEN_SEL_0, val);
9005
9006	/* Input queue to the Management Control Processor */
9007	cmd = REG_RD_IND(sc, BCE_MCP_MCPQ_FTQ_CMD);
9008	ctl = REG_RD_IND(sc, BCE_MCP_MCPQ_FTQ_CTL);
9009	cur_depth = (ctl & BCE_MCP_MCPQ_FTQ_CTL_CUR_DEPTH) >> 22;
9010	max_depth = (ctl & BCE_MCP_MCPQ_FTQ_CTL_MAX_DEPTH) >> 12;
9011	valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT0);
9012	BCE_PRINTF(" MCP     0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n",
9013		cmd, ctl, cur_depth, max_depth, valid_cnt);
9014
9015	/* Input queue to the Command Processor */
9016	cmd = REG_RD_IND(sc, BCE_CP_CPQ_FTQ_CMD);
9017	ctl = REG_RD_IND(sc, BCE_CP_CPQ_FTQ_CTL);
9018	cur_depth = (ctl & BCE_CP_CPQ_FTQ_CTL_CUR_DEPTH) >> 22;
9019	max_depth = (ctl & BCE_CP_CPQ_FTQ_CTL_MAX_DEPTH) >> 12;
9020	valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT1);
9021	BCE_PRINTF(" CP      0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n",
9022		cmd, ctl, cur_depth, max_depth, valid_cnt);
9023
9024	/* Input queue to the Completion Scheduler state machine */
9025	cmd = REG_RD(sc, BCE_CSCH_CH_FTQ_CMD);
9026	ctl = REG_RD(sc, BCE_CSCH_CH_FTQ_CTL);
9027	cur_depth = (ctl & BCE_CSCH_CH_FTQ_CTL_CUR_DEPTH) >> 22;
9028	max_depth = (ctl & BCE_CSCH_CH_FTQ_CTL_MAX_DEPTH) >> 12;
9029	valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT2);
9030	BCE_PRINTF(" CS      0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n",
9031		cmd, ctl, cur_depth, max_depth, valid_cnt);
9032
9033	if ((BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) ||
9034		(BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716)) {
9035		/* Input queue to the Receive Virtual to Physical Command Scheduler */
9036		cmd = REG_RD(sc, BCE_RV2PCSR_FTQ_CMD);
9037		ctl = REG_RD(sc, BCE_RV2PCSR_FTQ_CTL);
9038		cur_depth = (ctl & 0xFFC00000) >> 22;
9039		max_depth = (ctl & 0x003FF000) >> 12;
9040		valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT3);
9041		BCE_PRINTF(" RV2PCSR 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n",
9042			cmd, ctl, cur_depth, max_depth, valid_cnt);
9043	}
9044
9045	BCE_PRINTF(
9046		"----------------------------"
9047		"----------------"
9048		"----------------------------\n");
9049}
9050
9051
9052/****************************************************************************/
9053/* Prints out the TX chain.                                                 */
9054/*                                                                          */
9055/* Returns:                                                                 */
9056/*   Nothing.                                                               */
9057/****************************************************************************/
9058static __attribute__ ((noinline)) void
9059bce_dump_tx_chain(struct bce_softc *sc, u16 tx_prod, int count)
9060{
9061	struct tx_bd *txbd;
9062
9063	/* First some info about the tx_bd chain structure. */
9064	BCE_PRINTF(
9065		"----------------------------"
9066		"  tx_bd  chain  "
9067		"----------------------------\n");
9068
9069	BCE_PRINTF("page size      = 0x%08X, tx chain pages        = 0x%08X\n",
9070		(u32) BCM_PAGE_SIZE, (u32) TX_PAGES);
9071
9072	BCE_PRINTF("tx_bd per page = 0x%08X, usable tx_bd per page = 0x%08X\n",
9073		(u32) TOTAL_TX_BD_PER_PAGE, (u32) USABLE_TX_BD_PER_PAGE);
9074
9075	BCE_PRINTF("total tx_bd    = 0x%08X\n", (u32) TOTAL_TX_BD);
9076
9077	BCE_PRINTF(
9078		"----------------------------"
9079		"   tx_bd data   "
9080		"----------------------------\n");
9081
9082	/* Now print out the tx_bd's themselves. */
9083	for (int i = 0; i < count; i++) {
9084	 	txbd = &sc->tx_bd_chain[TX_PAGE(tx_prod)][TX_IDX(tx_prod)];
9085		bce_dump_txbd(sc, tx_prod, txbd);
9086		tx_prod = NEXT_TX_BD(tx_prod);
9087	}
9088
9089	BCE_PRINTF(
9090		"----------------------------"
9091		"----------------"
9092		"----------------------------\n");
9093}
9094
9095
9096/****************************************************************************/
9097/* Prints out the RX chain.                                                 */
9098/*                                                                          */
9099/* Returns:                                                                 */
9100/*   Nothing.                                                               */
9101/****************************************************************************/
9102static __attribute__ ((noinline)) void
9103bce_dump_rx_chain(struct bce_softc *sc, u16 rx_prod, int count)
9104{
9105	struct rx_bd *rxbd;
9106
9107	/* First some info about the rx_bd chain structure. */
9108	BCE_PRINTF(
9109		"----------------------------"
9110		"  rx_bd  chain  "
9111		"----------------------------\n");
9112
9113	BCE_PRINTF("page size      = 0x%08X, rx chain pages        = 0x%08X\n",
9114		(u32) BCM_PAGE_SIZE, (u32) RX_PAGES);
9115
9116	BCE_PRINTF("rx_bd per page = 0x%08X, usable rx_bd per page = 0x%08X\n",
9117		(u32) TOTAL_RX_BD_PER_PAGE, (u32) USABLE_RX_BD_PER_PAGE);
9118
9119	BCE_PRINTF("total rx_bd    = 0x%08X\n", (u32) TOTAL_RX_BD);
9120
9121	BCE_PRINTF(
9122		"----------------------------"
9123		"   rx_bd data   "
9124		"----------------------------\n");
9125
9126	/* Now print out the rx_bd's themselves. */
9127	for (int i = 0; i < count; i++) {
9128		rxbd = &sc->rx_bd_chain[RX_PAGE(rx_prod)][RX_IDX(rx_prod)];
9129		bce_dump_rxbd(sc, rx_prod, rxbd);
9130		rx_prod = RX_CHAIN_IDX(rx_prod + 1);
9131	}
9132
9133	BCE_PRINTF(
9134		"----------------------------"
9135		"----------------"
9136		"----------------------------\n");
9137}
9138
9139
9140#ifdef BCE_USE_SPLIT_HEADER
9141/****************************************************************************/
9142/* Prints out the page chain.                                               */
9143/*                                                                          */
9144/* Returns:                                                                 */
9145/*   Nothing.                                                               */
9146/****************************************************************************/
9147static __attribute__ ((noinline)) void
9148bce_dump_pg_chain(struct bce_softc *sc, u16 pg_prod, int count)
9149{
9150	struct rx_bd *pgbd;
9151
9152	/* First some info about the page chain structure. */
9153	BCE_PRINTF(
9154		"----------------------------"
9155		"   page chain   "
9156		"----------------------------\n");
9157
9158	BCE_PRINTF("page size      = 0x%08X, pg chain pages        = 0x%08X\n",
9159		(u32) BCM_PAGE_SIZE, (u32) PG_PAGES);
9160
9161	BCE_PRINTF("rx_bd per page = 0x%08X, usable rx_bd per page = 0x%08X\n",
9162		(u32) TOTAL_PG_BD_PER_PAGE, (u32) USABLE_PG_BD_PER_PAGE);
9163
9164	BCE_PRINTF("total rx_bd    = 0x%08X, max_pg_bd             = 0x%08X\n",
9165		(u32) TOTAL_PG_BD, (u32) MAX_PG_BD);
9166
9167	BCE_PRINTF(
9168		"----------------------------"
9169		"   page data    "
9170		"----------------------------\n");
9171
9172	/* Now print out the rx_bd's themselves. */
9173	for (int i = 0; i < count; i++) {
9174		pgbd = &sc->pg_bd_chain[PG_PAGE(pg_prod)][PG_IDX(pg_prod)];
9175		bce_dump_pgbd(sc, pg_prod, pgbd);
9176		pg_prod = PG_CHAIN_IDX(pg_prod + 1);
9177	}
9178
9179	BCE_PRINTF(
9180		"----------------------------"
9181		"----------------"
9182		"----------------------------\n");
9183}
9184#endif
9185
9186
9187/****************************************************************************/
9188/* Prints out the status block from host memory.                            */
9189/*                                                                          */
9190/* Returns:                                                                 */
9191/*   Nothing.                                                               */
9192/****************************************************************************/
9193static __attribute__ ((noinline)) void
9194bce_dump_status_block(struct bce_softc *sc)
9195{
9196	struct status_block *sblk;
9197
9198	sblk = sc->status_block;
9199
9200   	BCE_PRINTF(
9201		"----------------------------"
9202		"  Status Block  "
9203		"----------------------------\n");
9204
9205	BCE_PRINTF("    0x%08X - attn_bits\n",
9206		sblk->status_attn_bits);
9207
9208	BCE_PRINTF("    0x%08X - attn_bits_ack\n",
9209		sblk->status_attn_bits_ack);
9210
9211	BCE_PRINTF("0x%04X(0x%04X) - rx_cons0\n",
9212		sblk->status_rx_quick_consumer_index0,
9213		(u16) RX_CHAIN_IDX(sblk->status_rx_quick_consumer_index0));
9214
9215	BCE_PRINTF("0x%04X(0x%04X) - tx_cons0\n",
9216		sblk->status_tx_quick_consumer_index0,
9217		(u16) TX_CHAIN_IDX(sblk->status_tx_quick_consumer_index0));
9218
9219	BCE_PRINTF("        0x%04X - status_idx\n", sblk->status_idx);
9220
9221	/* Theses indices are not used for normal L2 drivers. */
9222	if (sblk->status_rx_quick_consumer_index1)
9223		BCE_PRINTF("0x%04X(0x%04X) - rx_cons1\n",
9224			sblk->status_rx_quick_consumer_index1,
9225			(u16) RX_CHAIN_IDX(sblk->status_rx_quick_consumer_index1));
9226
9227	if (sblk->status_tx_quick_consumer_index1)
9228		BCE_PRINTF("0x%04X(0x%04X) - tx_cons1\n",
9229			sblk->status_tx_quick_consumer_index1,
9230			(u16) TX_CHAIN_IDX(sblk->status_tx_quick_consumer_index1));
9231
9232	if (sblk->status_rx_quick_consumer_index2)
9233		BCE_PRINTF("0x%04X(0x%04X)- rx_cons2\n",
9234			sblk->status_rx_quick_consumer_index2,
9235			(u16) RX_CHAIN_IDX(sblk->status_rx_quick_consumer_index2));
9236
9237	if (sblk->status_tx_quick_consumer_index2)
9238		BCE_PRINTF("0x%04X(0x%04X) - tx_cons2\n",
9239			sblk->status_tx_quick_consumer_index2,
9240			(u16) TX_CHAIN_IDX(sblk->status_tx_quick_consumer_index2));
9241
9242	if (sblk->status_rx_quick_consumer_index3)
9243		BCE_PRINTF("0x%04X(0x%04X) - rx_cons3\n",
9244			sblk->status_rx_quick_consumer_index3,
9245			(u16) RX_CHAIN_IDX(sblk->status_rx_quick_consumer_index3));
9246
9247	if (sblk->status_tx_quick_consumer_index3)
9248		BCE_PRINTF("0x%04X(0x%04X) - tx_cons3\n",
9249			sblk->status_tx_quick_consumer_index3,
9250			(u16) TX_CHAIN_IDX(sblk->status_tx_quick_consumer_index3));
9251
9252	if (sblk->status_rx_quick_consumer_index4 ||
9253		sblk->status_rx_quick_consumer_index5)
9254		BCE_PRINTF("rx_cons4  = 0x%08X, rx_cons5      = 0x%08X\n",
9255			sblk->status_rx_quick_consumer_index4,
9256			sblk->status_rx_quick_consumer_index5);
9257
9258	if (sblk->status_rx_quick_consumer_index6 ||
9259		sblk->status_rx_quick_consumer_index7)
9260		BCE_PRINTF("rx_cons6  = 0x%08X, rx_cons7      = 0x%08X\n",
9261			sblk->status_rx_quick_consumer_index6,
9262			sblk->status_rx_quick_consumer_index7);
9263
9264	if (sblk->status_rx_quick_consumer_index8 ||
9265		sblk->status_rx_quick_consumer_index9)
9266		BCE_PRINTF("rx_cons8  = 0x%08X, rx_cons9      = 0x%08X\n",
9267			sblk->status_rx_quick_consumer_index8,
9268			sblk->status_rx_quick_consumer_index9);
9269
9270	if (sblk->status_rx_quick_consumer_index10 ||
9271		sblk->status_rx_quick_consumer_index11)
9272		BCE_PRINTF("rx_cons10 = 0x%08X, rx_cons11     = 0x%08X\n",
9273			sblk->status_rx_quick_consumer_index10,
9274			sblk->status_rx_quick_consumer_index11);
9275
9276	if (sblk->status_rx_quick_consumer_index12 ||
9277		sblk->status_rx_quick_consumer_index13)
9278		BCE_PRINTF("rx_cons12 = 0x%08X, rx_cons13     = 0x%08X\n",
9279			sblk->status_rx_quick_consumer_index12,
9280			sblk->status_rx_quick_consumer_index13);
9281
9282	if (sblk->status_rx_quick_consumer_index14 ||
9283		sblk->status_rx_quick_consumer_index15)
9284		BCE_PRINTF("rx_cons14 = 0x%08X, rx_cons15     = 0x%08X\n",
9285			sblk->status_rx_quick_consumer_index14,
9286			sblk->status_rx_quick_consumer_index15);
9287
9288	if (sblk->status_completion_producer_index ||
9289		sblk->status_cmd_consumer_index)
9290		BCE_PRINTF("com_prod  = 0x%08X, cmd_cons      = 0x%08X\n",
9291			sblk->status_completion_producer_index,
9292			sblk->status_cmd_consumer_index);
9293
9294	BCE_PRINTF(
9295		"----------------------------"
9296		"----------------"
9297		"----------------------------\n");
9298}
9299
9300
9301/****************************************************************************/
9302/* Prints out the statistics block from host memory.                        */
9303/*                                                                          */
9304/* Returns:                                                                 */
9305/*   Nothing.                                                               */
9306/****************************************************************************/
9307static __attribute__ ((noinline)) void
9308bce_dump_stats_block(struct bce_softc *sc)
9309{
9310	struct statistics_block *sblk;
9311
9312	sblk = sc->stats_block;
9313
9314	BCE_PRINTF(
9315		"---------------"
9316		" Stats Block  (All Stats Not Shown Are 0) "
9317		"---------------\n");
9318
9319	if (sblk->stat_IfHCInOctets_hi
9320		|| sblk->stat_IfHCInOctets_lo)
9321		BCE_PRINTF("0x%08X:%08X : "
9322			"IfHcInOctets\n",
9323			sblk->stat_IfHCInOctets_hi,
9324			sblk->stat_IfHCInOctets_lo);
9325
9326	if (sblk->stat_IfHCInBadOctets_hi
9327		|| sblk->stat_IfHCInBadOctets_lo)
9328		BCE_PRINTF("0x%08X:%08X : "
9329			"IfHcInBadOctets\n",
9330			sblk->stat_IfHCInBadOctets_hi,
9331			sblk->stat_IfHCInBadOctets_lo);
9332
9333	if (sblk->stat_IfHCOutOctets_hi
9334		|| sblk->stat_IfHCOutOctets_lo)
9335		BCE_PRINTF("0x%08X:%08X : "
9336			"IfHcOutOctets\n",
9337			sblk->stat_IfHCOutOctets_hi,
9338			sblk->stat_IfHCOutOctets_lo);
9339
9340	if (sblk->stat_IfHCOutBadOctets_hi
9341		|| sblk->stat_IfHCOutBadOctets_lo)
9342		BCE_PRINTF("0x%08X:%08X : "
9343			"IfHcOutBadOctets\n",
9344			sblk->stat_IfHCOutBadOctets_hi,
9345			sblk->stat_IfHCOutBadOctets_lo);
9346
9347	if (sblk->stat_IfHCInUcastPkts_hi
9348		|| sblk->stat_IfHCInUcastPkts_lo)
9349		BCE_PRINTF("0x%08X:%08X : "
9350			"IfHcInUcastPkts\n",
9351			sblk->stat_IfHCInUcastPkts_hi,
9352			sblk->stat_IfHCInUcastPkts_lo);
9353
9354	if (sblk->stat_IfHCInBroadcastPkts_hi
9355		|| sblk->stat_IfHCInBroadcastPkts_lo)
9356		BCE_PRINTF("0x%08X:%08X : "
9357			"IfHcInBroadcastPkts\n",
9358			sblk->stat_IfHCInBroadcastPkts_hi,
9359			sblk->stat_IfHCInBroadcastPkts_lo);
9360
9361	if (sblk->stat_IfHCInMulticastPkts_hi
9362		|| sblk->stat_IfHCInMulticastPkts_lo)
9363		BCE_PRINTF("0x%08X:%08X : "
9364			"IfHcInMulticastPkts\n",
9365			sblk->stat_IfHCInMulticastPkts_hi,
9366			sblk->stat_IfHCInMulticastPkts_lo);
9367
9368	if (sblk->stat_IfHCOutUcastPkts_hi
9369		|| sblk->stat_IfHCOutUcastPkts_lo)
9370		BCE_PRINTF("0x%08X:%08X : "
9371			"IfHcOutUcastPkts\n",
9372			sblk->stat_IfHCOutUcastPkts_hi,
9373			sblk->stat_IfHCOutUcastPkts_lo);
9374
9375	if (sblk->stat_IfHCOutBroadcastPkts_hi
9376		|| sblk->stat_IfHCOutBroadcastPkts_lo)
9377		BCE_PRINTF("0x%08X:%08X : "
9378			"IfHcOutBroadcastPkts\n",
9379			sblk->stat_IfHCOutBroadcastPkts_hi,
9380			sblk->stat_IfHCOutBroadcastPkts_lo);
9381
9382	if (sblk->stat_IfHCOutMulticastPkts_hi
9383		|| sblk->stat_IfHCOutMulticastPkts_lo)
9384		BCE_PRINTF("0x%08X:%08X : "
9385			"IfHcOutMulticastPkts\n",
9386			sblk->stat_IfHCOutMulticastPkts_hi,
9387			sblk->stat_IfHCOutMulticastPkts_lo);
9388
9389	if (sblk->stat_emac_tx_stat_dot3statsinternalmactransmiterrors)
9390		BCE_PRINTF("         0x%08X : "
9391			"emac_tx_stat_dot3statsinternalmactransmiterrors\n",
9392			sblk->stat_emac_tx_stat_dot3statsinternalmactransmiterrors);
9393
9394	if (sblk->stat_Dot3StatsCarrierSenseErrors)
9395		BCE_PRINTF("         0x%08X : Dot3StatsCarrierSenseErrors\n",
9396			sblk->stat_Dot3StatsCarrierSenseErrors);
9397
9398	if (sblk->stat_Dot3StatsFCSErrors)
9399		BCE_PRINTF("         0x%08X : Dot3StatsFCSErrors\n",
9400			sblk->stat_Dot3StatsFCSErrors);
9401
9402	if (sblk->stat_Dot3StatsAlignmentErrors)
9403		BCE_PRINTF("         0x%08X : Dot3StatsAlignmentErrors\n",
9404			sblk->stat_Dot3StatsAlignmentErrors);
9405
9406	if (sblk->stat_Dot3StatsSingleCollisionFrames)
9407		BCE_PRINTF("         0x%08X : Dot3StatsSingleCollisionFrames\n",
9408			sblk->stat_Dot3StatsSingleCollisionFrames);
9409
9410	if (sblk->stat_Dot3StatsMultipleCollisionFrames)
9411		BCE_PRINTF("         0x%08X : Dot3StatsMultipleCollisionFrames\n",
9412			sblk->stat_Dot3StatsMultipleCollisionFrames);
9413
9414	if (sblk->stat_Dot3StatsDeferredTransmissions)
9415		BCE_PRINTF("         0x%08X : Dot3StatsDeferredTransmissions\n",
9416			sblk->stat_Dot3StatsDeferredTransmissions);
9417
9418	if (sblk->stat_Dot3StatsExcessiveCollisions)
9419		BCE_PRINTF("         0x%08X : Dot3StatsExcessiveCollisions\n",
9420			sblk->stat_Dot3StatsExcessiveCollisions);
9421
9422	if (sblk->stat_Dot3StatsLateCollisions)
9423		BCE_PRINTF("         0x%08X : Dot3StatsLateCollisions\n",
9424			sblk->stat_Dot3StatsLateCollisions);
9425
9426	if (sblk->stat_EtherStatsCollisions)
9427		BCE_PRINTF("         0x%08X : EtherStatsCollisions\n",
9428			sblk->stat_EtherStatsCollisions);
9429
9430	if (sblk->stat_EtherStatsFragments)
9431		BCE_PRINTF("         0x%08X : EtherStatsFragments\n",
9432			sblk->stat_EtherStatsFragments);
9433
9434	if (sblk->stat_EtherStatsJabbers)
9435		BCE_PRINTF("         0x%08X : EtherStatsJabbers\n",
9436			sblk->stat_EtherStatsJabbers);
9437
9438	if (sblk->stat_EtherStatsUndersizePkts)
9439		BCE_PRINTF("         0x%08X : EtherStatsUndersizePkts\n",
9440			sblk->stat_EtherStatsUndersizePkts);
9441
9442	if (sblk->stat_EtherStatsOverrsizePkts)
9443		BCE_PRINTF("         0x%08X : EtherStatsOverrsizePkts\n",
9444			sblk->stat_EtherStatsOverrsizePkts);
9445
9446	if (sblk->stat_EtherStatsPktsRx64Octets)
9447		BCE_PRINTF("         0x%08X : EtherStatsPktsRx64Octets\n",
9448			sblk->stat_EtherStatsPktsRx64Octets);
9449
9450	if (sblk->stat_EtherStatsPktsRx65Octetsto127Octets)
9451		BCE_PRINTF("         0x%08X : EtherStatsPktsRx65Octetsto127Octets\n",
9452			sblk->stat_EtherStatsPktsRx65Octetsto127Octets);
9453
9454	if (sblk->stat_EtherStatsPktsRx128Octetsto255Octets)
9455		BCE_PRINTF("         0x%08X : EtherStatsPktsRx128Octetsto255Octets\n",
9456			sblk->stat_EtherStatsPktsRx128Octetsto255Octets);
9457
9458	if (sblk->stat_EtherStatsPktsRx256Octetsto511Octets)
9459		BCE_PRINTF("         0x%08X : EtherStatsPktsRx256Octetsto511Octets\n",
9460			sblk->stat_EtherStatsPktsRx256Octetsto511Octets);
9461
9462	if (sblk->stat_EtherStatsPktsRx512Octetsto1023Octets)
9463		BCE_PRINTF("         0x%08X : EtherStatsPktsRx512Octetsto1023Octets\n",
9464			sblk->stat_EtherStatsPktsRx512Octetsto1023Octets);
9465
9466	if (sblk->stat_EtherStatsPktsRx1024Octetsto1522Octets)
9467		BCE_PRINTF("         0x%08X : EtherStatsPktsRx1024Octetsto1522Octets\n",
9468			sblk->stat_EtherStatsPktsRx1024Octetsto1522Octets);
9469
9470	if (sblk->stat_EtherStatsPktsRx1523Octetsto9022Octets)
9471		BCE_PRINTF("         0x%08X : EtherStatsPktsRx1523Octetsto9022Octets\n",
9472			sblk->stat_EtherStatsPktsRx1523Octetsto9022Octets);
9473
9474	if (sblk->stat_EtherStatsPktsTx64Octets)
9475		BCE_PRINTF("         0x%08X : EtherStatsPktsTx64Octets\n",
9476			sblk->stat_EtherStatsPktsTx64Octets);
9477
9478	if (sblk->stat_EtherStatsPktsTx65Octetsto127Octets)
9479		BCE_PRINTF("         0x%08X : EtherStatsPktsTx65Octetsto127Octets\n",
9480			sblk->stat_EtherStatsPktsTx65Octetsto127Octets);
9481
9482	if (sblk->stat_EtherStatsPktsTx128Octetsto255Octets)
9483		BCE_PRINTF("         0x%08X : EtherStatsPktsTx128Octetsto255Octets\n",
9484			sblk->stat_EtherStatsPktsTx128Octetsto255Octets);
9485
9486	if (sblk->stat_EtherStatsPktsTx256Octetsto511Octets)
9487		BCE_PRINTF("         0x%08X : EtherStatsPktsTx256Octetsto511Octets\n",
9488			sblk->stat_EtherStatsPktsTx256Octetsto511Octets);
9489
9490	if (sblk->stat_EtherStatsPktsTx512Octetsto1023Octets)
9491		BCE_PRINTF("         0x%08X : EtherStatsPktsTx512Octetsto1023Octets\n",
9492			sblk->stat_EtherStatsPktsTx512Octetsto1023Octets);
9493
9494	if (sblk->stat_EtherStatsPktsTx1024Octetsto1522Octets)
9495		BCE_PRINTF("         0x%08X : EtherStatsPktsTx1024Octetsto1522Octets\n",
9496			sblk->stat_EtherStatsPktsTx1024Octetsto1522Octets);
9497
9498	if (sblk->stat_EtherStatsPktsTx1523Octetsto9022Octets)
9499		BCE_PRINTF("         0x%08X : EtherStatsPktsTx1523Octetsto9022Octets\n",
9500			sblk->stat_EtherStatsPktsTx1523Octetsto9022Octets);
9501
9502	if (sblk->stat_XonPauseFramesReceived)
9503		BCE_PRINTF("         0x%08X : XonPauseFramesReceived\n",
9504			sblk->stat_XonPauseFramesReceived);
9505
9506	if (sblk->stat_XoffPauseFramesReceived)
9507	   BCE_PRINTF("          0x%08X : XoffPauseFramesReceived\n",
9508			sblk->stat_XoffPauseFramesReceived);
9509
9510	if (sblk->stat_OutXonSent)
9511		BCE_PRINTF("         0x%08X : OutXonSent\n",
9512			sblk->stat_OutXonSent);
9513
9514	if (sblk->stat_OutXoffSent)
9515		BCE_PRINTF("         0x%08X : OutXoffSent\n",
9516			sblk->stat_OutXoffSent);
9517
9518	if (sblk->stat_FlowControlDone)
9519		BCE_PRINTF("         0x%08X : FlowControlDone\n",
9520			sblk->stat_FlowControlDone);
9521
9522	if (sblk->stat_MacControlFramesReceived)
9523		BCE_PRINTF("         0x%08X : MacControlFramesReceived\n",
9524			sblk->stat_MacControlFramesReceived);
9525
9526	if (sblk->stat_XoffStateEntered)
9527		BCE_PRINTF("         0x%08X : XoffStateEntered\n",
9528			sblk->stat_XoffStateEntered);
9529
9530	if (sblk->stat_IfInFramesL2FilterDiscards)
9531		BCE_PRINTF("         0x%08X : IfInFramesL2FilterDiscards\n",
9532			sblk->stat_IfInFramesL2FilterDiscards);
9533
9534	if (sblk->stat_IfInRuleCheckerDiscards)
9535		BCE_PRINTF("         0x%08X : IfInRuleCheckerDiscards\n",
9536			sblk->stat_IfInRuleCheckerDiscards);
9537
9538	if (sblk->stat_IfInFTQDiscards)
9539		BCE_PRINTF("         0x%08X : IfInFTQDiscards\n",
9540			sblk->stat_IfInFTQDiscards);
9541
9542	if (sblk->stat_IfInMBUFDiscards)
9543		BCE_PRINTF("         0x%08X : IfInMBUFDiscards\n",
9544			sblk->stat_IfInMBUFDiscards);
9545
9546	if (sblk->stat_IfInRuleCheckerP4Hit)
9547		BCE_PRINTF("         0x%08X : IfInRuleCheckerP4Hit\n",
9548			sblk->stat_IfInRuleCheckerP4Hit);
9549
9550	if (sblk->stat_CatchupInRuleCheckerDiscards)
9551		BCE_PRINTF("         0x%08X : CatchupInRuleCheckerDiscards\n",
9552			sblk->stat_CatchupInRuleCheckerDiscards);
9553
9554	if (sblk->stat_CatchupInFTQDiscards)
9555		BCE_PRINTF("         0x%08X : CatchupInFTQDiscards\n",
9556			sblk->stat_CatchupInFTQDiscards);
9557
9558	if (sblk->stat_CatchupInMBUFDiscards)
9559		BCE_PRINTF("         0x%08X : CatchupInMBUFDiscards\n",
9560			sblk->stat_CatchupInMBUFDiscards);
9561
9562	if (sblk->stat_CatchupInRuleCheckerP4Hit)
9563		BCE_PRINTF("         0x%08X : CatchupInRuleCheckerP4Hit\n",
9564			sblk->stat_CatchupInRuleCheckerP4Hit);
9565
9566	BCE_PRINTF(
9567		"----------------------------"
9568		"----------------"
9569		"----------------------------\n");
9570}
9571
9572
9573/****************************************************************************/
9574/* Prints out a summary of the driver state.                                */
9575/*                                                                          */
9576/* Returns:                                                                 */
9577/*   Nothing.                                                               */
9578/****************************************************************************/
9579static __attribute__ ((noinline)) void
9580bce_dump_driver_state(struct bce_softc *sc)
9581{
9582	u32 val_hi, val_lo;
9583
9584	BCE_PRINTF(
9585		"-----------------------------"
9586		" Driver State "
9587		"-----------------------------\n");
9588
9589	val_hi = BCE_ADDR_HI(sc);
9590	val_lo = BCE_ADDR_LO(sc);
9591	BCE_PRINTF("0x%08X:%08X - (sc) driver softc structure virtual address\n",
9592		val_hi, val_lo);
9593
9594	val_hi = BCE_ADDR_HI(sc->bce_vhandle);
9595	val_lo = BCE_ADDR_LO(sc->bce_vhandle);
9596	BCE_PRINTF("0x%08X:%08X - (sc->bce_vhandle) PCI BAR virtual address\n",
9597		val_hi, val_lo);
9598
9599	val_hi = BCE_ADDR_HI(sc->status_block);
9600	val_lo = BCE_ADDR_LO(sc->status_block);
9601	BCE_PRINTF("0x%08X:%08X - (sc->status_block) status block virtual address\n",
9602		val_hi, val_lo);
9603
9604	val_hi = BCE_ADDR_HI(sc->stats_block);
9605	val_lo = BCE_ADDR_LO(sc->stats_block);
9606	BCE_PRINTF("0x%08X:%08X - (sc->stats_block) statistics block virtual address\n",
9607		val_hi, val_lo);
9608
9609	val_hi = BCE_ADDR_HI(sc->tx_bd_chain);
9610	val_lo = BCE_ADDR_LO(sc->tx_bd_chain);
9611	BCE_PRINTF(
9612		"0x%08X:%08X - (sc->tx_bd_chain) tx_bd chain virtual adddress\n",
9613		val_hi, val_lo);
9614
9615	val_hi = BCE_ADDR_HI(sc->rx_bd_chain);
9616	val_lo = BCE_ADDR_LO(sc->rx_bd_chain);
9617	BCE_PRINTF(
9618		"0x%08X:%08X - (sc->rx_bd_chain) rx_bd chain virtual address\n",
9619		val_hi, val_lo);
9620
9621#ifdef BCE_USE_SPLIT_HEADER
9622	val_hi = BCE_ADDR_HI(sc->pg_bd_chain);
9623	val_lo = BCE_ADDR_LO(sc->pg_bd_chain);
9624	BCE_PRINTF(
9625		"0x%08X:%08X - (sc->pg_bd_chain) page chain virtual address\n",
9626		val_hi, val_lo);
9627#endif
9628
9629	val_hi = BCE_ADDR_HI(sc->tx_mbuf_ptr);
9630	val_lo = BCE_ADDR_LO(sc->tx_mbuf_ptr);
9631	BCE_PRINTF(
9632		"0x%08X:%08X - (sc->tx_mbuf_ptr) tx mbuf chain virtual address\n",
9633		val_hi, val_lo);
9634
9635	val_hi = BCE_ADDR_HI(sc->rx_mbuf_ptr);
9636	val_lo = BCE_ADDR_LO(sc->rx_mbuf_ptr);
9637	BCE_PRINTF(
9638		"0x%08X:%08X - (sc->rx_mbuf_ptr) rx mbuf chain virtual address\n",
9639		val_hi, val_lo);
9640
9641#ifdef BCE_USE_SPLIT_HEADER
9642	val_hi = BCE_ADDR_HI(sc->pg_mbuf_ptr);
9643	val_lo = BCE_ADDR_LO(sc->pg_mbuf_ptr);
9644	BCE_PRINTF(
9645		"0x%08X:%08X - (sc->pg_mbuf_ptr) page mbuf chain virtual address\n",
9646		val_hi, val_lo);
9647#endif
9648
9649	BCE_PRINTF("         0x%08X - (sc->interrupts_generated) h/w intrs\n",
9650		sc->interrupts_generated);
9651
9652	BCE_PRINTF("         0x%08X - (sc->rx_interrupts) rx interrupts handled\n",
9653		sc->rx_interrupts);
9654
9655	BCE_PRINTF("         0x%08X - (sc->tx_interrupts) tx interrupts handled\n",
9656		sc->tx_interrupts);
9657
9658	BCE_PRINTF("         0x%08X - (sc->last_status_idx) status block index\n",
9659		sc->last_status_idx);
9660
9661	BCE_PRINTF("     0x%04X(0x%04X) - (sc->tx_prod) tx producer index\n",
9662		sc->tx_prod, (u16) TX_CHAIN_IDX(sc->tx_prod));
9663
9664	BCE_PRINTF("     0x%04X(0x%04X) - (sc->tx_cons) tx consumer index\n",
9665		sc->tx_cons, (u16) TX_CHAIN_IDX(sc->tx_cons));
9666
9667	BCE_PRINTF("         0x%08X - (sc->tx_prod_bseq) tx producer bseq index\n",
9668		sc->tx_prod_bseq);
9669
9670	BCE_PRINTF("         0x%08X - (sc->debug_tx_mbuf_alloc) tx mbufs allocated\n",
9671		sc->debug_tx_mbuf_alloc);
9672
9673	BCE_PRINTF("         0x%08X - (sc->used_tx_bd) used tx_bd's\n",
9674		sc->used_tx_bd);
9675
9676	BCE_PRINTF("0x%08X/%08X - (sc->tx_hi_watermark) tx hi watermark\n",
9677		sc->tx_hi_watermark, sc->max_tx_bd);
9678
9679	BCE_PRINTF("     0x%04X(0x%04X) - (sc->rx_prod) rx producer index\n",
9680		sc->rx_prod, (u16) RX_CHAIN_IDX(sc->rx_prod));
9681
9682	BCE_PRINTF("     0x%04X(0x%04X) - (sc->rx_cons) rx consumer index\n",
9683		sc->rx_cons, (u16) RX_CHAIN_IDX(sc->rx_cons));
9684
9685	BCE_PRINTF("         0x%08X - (sc->rx_prod_bseq) rx producer bseq index\n",
9686		sc->rx_prod_bseq);
9687
9688	BCE_PRINTF("         0x%08X - (sc->debug_rx_mbuf_alloc) rx mbufs allocated\n",
9689		sc->debug_rx_mbuf_alloc);
9690
9691	BCE_PRINTF("         0x%08X - (sc->free_rx_bd) free rx_bd's\n",
9692		sc->free_rx_bd);
9693
9694#ifdef BCE_USE_SPLIT_HEADER
9695	BCE_PRINTF("     0x%04X(0x%04X) - (sc->pg_prod) page producer index\n",
9696		sc->pg_prod, (u16) PG_CHAIN_IDX(sc->pg_prod));
9697
9698	BCE_PRINTF("     0x%04X(0x%04X) - (sc->pg_cons) page consumer index\n",
9699		sc->pg_cons, (u16) PG_CHAIN_IDX(sc->pg_cons));
9700
9701	BCE_PRINTF("         0x%08X - (sc->debug_pg_mbuf_alloc) page mbufs allocated\n",
9702		sc->debug_pg_mbuf_alloc);
9703
9704	BCE_PRINTF("         0x%08X - (sc->free_pg_bd) free page rx_bd's\n",
9705		sc->free_pg_bd);
9706
9707	BCE_PRINTF("0x%08X/%08X - (sc->pg_low_watermark) page low watermark\n",
9708		sc->pg_low_watermark, sc->max_pg_bd);
9709#endif
9710
9711	BCE_PRINTF("         0x%08X - (sc->mbuf_alloc_failed) "
9712		"mbuf alloc failures\n",
9713		sc->mbuf_alloc_failed);
9714
9715	BCE_PRINTF("         0x%08X - (sc->debug_mbuf_sim_alloc_failed) "
9716		"simulated mbuf alloc failures\n",
9717		sc->debug_mbuf_sim_alloc_failed);
9718
9719	BCE_PRINTF("         0x%08X - (sc->bce_flags) bce mac flags\n",
9720		sc->bce_flags);
9721
9722	BCE_PRINTF("         0x%08X - (sc->bce_phy_flags) bce phy flags\n",
9723		sc->bce_phy_flags);
9724
9725	BCE_PRINTF(
9726		"----------------------------"
9727		"----------------"
9728		"----------------------------\n");
9729}
9730
9731
9732/****************************************************************************/
9733/* Prints out the hardware state through a summary of important register,   */
9734/* followed by a complete register dump.                                    */
9735/*                                                                          */
9736/* Returns:                                                                 */
9737/*   Nothing.                                                               */
9738/****************************************************************************/
9739static __attribute__ ((noinline)) void
9740bce_dump_hw_state(struct bce_softc *sc)
9741{
9742	u32 val;
9743
9744	BCE_PRINTF(
9745		"----------------------------"
9746		" Hardware State "
9747		"----------------------------\n");
9748
9749	BCE_PRINTF("0x%08X - bootcode version\n", sc->bce_fw_ver);
9750
9751	val = REG_RD(sc, BCE_MISC_ENABLE_STATUS_BITS);
9752	BCE_PRINTF("0x%08X - (0x%06X) misc_enable_status_bits\n",
9753		val, BCE_MISC_ENABLE_STATUS_BITS);
9754
9755	val = REG_RD(sc, BCE_DMA_STATUS);
9756	BCE_PRINTF("0x%08X - (0x%06X) dma_status\n", val, BCE_DMA_STATUS);
9757
9758	val = REG_RD(sc, BCE_CTX_STATUS);
9759	BCE_PRINTF("0x%08X - (0x%06X) ctx_status\n", val, BCE_CTX_STATUS);
9760
9761	val = REG_RD(sc, BCE_EMAC_STATUS);
9762	BCE_PRINTF("0x%08X - (0x%06X) emac_status\n", val, BCE_EMAC_STATUS);
9763
9764	val = REG_RD(sc, BCE_RPM_STATUS);
9765	BCE_PRINTF("0x%08X - (0x%06X) rpm_status\n", val, BCE_RPM_STATUS);
9766
9767	val = REG_RD(sc, 0x2004);
9768	BCE_PRINTF("0x%08X - (0x%06X) rlup_status\n", val, 0x2004);
9769
9770	val = REG_RD(sc, BCE_RV2P_STATUS);
9771	BCE_PRINTF("0x%08X - (0x%06X) rv2p_status\n", val, BCE_RV2P_STATUS);
9772
9773	val = REG_RD(sc, 0x2c04);
9774	BCE_PRINTF("0x%08X - (0x%06X) rdma_status\n", val, 0x2c04);
9775
9776	val = REG_RD(sc, BCE_TBDR_STATUS);
9777	BCE_PRINTF("0x%08X - (0x%06X) tbdr_status\n", val, BCE_TBDR_STATUS);
9778
9779	val = REG_RD(sc, BCE_TDMA_STATUS);
9780	BCE_PRINTF("0x%08X - (0x%06X) tdma_status\n", val, BCE_TDMA_STATUS);
9781
9782	val = REG_RD(sc, BCE_HC_STATUS);
9783	BCE_PRINTF("0x%08X - (0x%06X) hc_status\n", val, BCE_HC_STATUS);
9784
9785	val = REG_RD_IND(sc, BCE_TXP_CPU_STATE);
9786	BCE_PRINTF("0x%08X - (0x%06X) txp_cpu_state\n", val, BCE_TXP_CPU_STATE);
9787
9788	val = REG_RD_IND(sc, BCE_TPAT_CPU_STATE);
9789	BCE_PRINTF("0x%08X - (0x%06X) tpat_cpu_state\n", val, BCE_TPAT_CPU_STATE);
9790
9791	val = REG_RD_IND(sc, BCE_RXP_CPU_STATE);
9792	BCE_PRINTF("0x%08X - (0x%06X) rxp_cpu_state\n", val, BCE_RXP_CPU_STATE);
9793
9794	val = REG_RD_IND(sc, BCE_COM_CPU_STATE);
9795	BCE_PRINTF("0x%08X - (0x%06X) com_cpu_state\n", val, BCE_COM_CPU_STATE);
9796
9797	val = REG_RD_IND(sc, BCE_MCP_CPU_STATE);
9798	BCE_PRINTF("0x%08X - (0x%06X) mcp_cpu_state\n", val, BCE_MCP_CPU_STATE);
9799
9800	val = REG_RD_IND(sc, BCE_CP_CPU_STATE);
9801	BCE_PRINTF("0x%08X - (0x%06X) cp_cpu_state\n", val, BCE_CP_CPU_STATE);
9802
9803	BCE_PRINTF(
9804		"----------------------------"
9805		"----------------"
9806		"----------------------------\n");
9807
9808	BCE_PRINTF(
9809		"----------------------------"
9810		" Register  Dump "
9811		"----------------------------\n");
9812
9813	for (int i = 0x400; i < 0x8000; i += 0x10) {
9814		BCE_PRINTF("0x%04X: 0x%08X 0x%08X 0x%08X 0x%08X\n",
9815			i, REG_RD(sc, i), REG_RD(sc, i + 0x4),
9816			REG_RD(sc, i + 0x8), REG_RD(sc, i + 0xC));
9817	}
9818
9819	BCE_PRINTF(
9820		"----------------------------"
9821		"----------------"
9822		"----------------------------\n");
9823}
9824
9825
9826/****************************************************************************/
9827/* Prints out the mailbox queue registers.                                  */
9828/*                                                                          */
9829/* Returns:                                                                 */
9830/*   Nothing.                                                               */
9831/****************************************************************************/
9832static __attribute__ ((noinline)) void
9833bce_dump_mq_regs(struct bce_softc *sc)
9834{
9835	BCE_PRINTF(
9836		"----------------------------"
9837		"    MQ Regs     "
9838		"----------------------------\n");
9839
9840	BCE_PRINTF(
9841		"----------------------------"
9842		"----------------"
9843		"----------------------------\n");
9844
9845	for (int i = 0x3c00; i < 0x4000; i += 0x10) {
9846		BCE_PRINTF("0x%04X: 0x%08X 0x%08X 0x%08X 0x%08X\n",
9847			i, REG_RD(sc, i), REG_RD(sc, i + 0x4),
9848			REG_RD(sc, i + 0x8), REG_RD(sc, i + 0xC));
9849	}
9850
9851	BCE_PRINTF(
9852		"----------------------------"
9853		"----------------"
9854		"----------------------------\n");
9855}
9856
9857
9858/****************************************************************************/
9859/* Prints out the bootcode state.                                           */
9860/*                                                                          */
9861/* Returns:                                                                 */
9862/*   Nothing.                                                               */
9863/****************************************************************************/
9864static __attribute__ ((noinline)) void
9865bce_dump_bc_state(struct bce_softc *sc)
9866{
9867	u32 val;
9868
9869	BCE_PRINTF(
9870		"----------------------------"
9871		" Bootcode State "
9872		"----------------------------\n");
9873
9874	BCE_PRINTF("0x%08X - bootcode version\n", sc->bce_fw_ver);
9875
9876	val = REG_RD_IND(sc, sc->bce_shmem_base + BCE_BC_RESET_TYPE);
9877	BCE_PRINTF("0x%08X - (0x%06X) reset_type\n",
9878		val, BCE_BC_RESET_TYPE);
9879
9880	val = REG_RD_IND(sc, sc->bce_shmem_base + BCE_BC_STATE);
9881	BCE_PRINTF("0x%08X - (0x%06X) state\n",
9882		val, BCE_BC_STATE);
9883
9884	val = REG_RD_IND(sc, sc->bce_shmem_base + BCE_BC_CONDITION);
9885	BCE_PRINTF("0x%08X - (0x%06X) condition\n",
9886		val, BCE_BC_CONDITION);
9887
9888	val = REG_RD_IND(sc, sc->bce_shmem_base + BCE_BC_STATE_DEBUG_CMD);
9889	BCE_PRINTF("0x%08X - (0x%06X) debug_cmd\n",
9890		val, BCE_BC_STATE_DEBUG_CMD);
9891
9892	BCE_PRINTF(
9893		"----------------------------"
9894		"----------------"
9895		"----------------------------\n");
9896}
9897
9898
9899/****************************************************************************/
9900/* Prints out the TXP processor state.                                      */
9901/*                                                                          */
9902/* Returns:                                                                 */
9903/*   Nothing.                                                               */
9904/****************************************************************************/
9905static __attribute__ ((noinline)) void
9906bce_dump_txp_state(struct bce_softc *sc, int regs)
9907{
9908	u32 val;
9909	u32 fw_version[3];
9910
9911	BCE_PRINTF(
9912		"----------------------------"
9913		"   TXP  State   "
9914		"----------------------------\n");
9915
9916	for (int i = 0; i < 3; i++)
9917		fw_version[i] = htonl(REG_RD_IND(sc,
9918			(BCE_TXP_SCRATCH + 0x10 + i * 4)));
9919	BCE_PRINTF("Firmware version - %s\n", (char *) fw_version);
9920
9921	val = REG_RD_IND(sc, BCE_TXP_CPU_MODE);
9922	BCE_PRINTF("0x%08X - (0x%06X) txp_cpu_mode\n", val, BCE_TXP_CPU_MODE);
9923
9924	val = REG_RD_IND(sc, BCE_TXP_CPU_STATE);
9925	BCE_PRINTF("0x%08X - (0x%06X) txp_cpu_state\n", val, BCE_TXP_CPU_STATE);
9926
9927	val = REG_RD_IND(sc, BCE_TXP_CPU_EVENT_MASK);
9928	BCE_PRINTF("0x%08X - (0x%06X) txp_cpu_event_mask\n", val,
9929		BCE_TXP_CPU_EVENT_MASK);
9930
9931	if (regs) {
9932		BCE_PRINTF(
9933			"----------------------------"
9934			" Register  Dump "
9935			"----------------------------\n");
9936
9937		for (int i = BCE_TXP_CPU_MODE; i < 0x68000; i += 0x10) {
9938			/* Skip the big blank spaces */
9939			if (i < 0x454000 && i > 0x5ffff)
9940				BCE_PRINTF("0x%04X: 0x%08X 0x%08X 0x%08X 0x%08X\n",
9941					i, REG_RD_IND(sc, i), REG_RD_IND(sc, i + 0x4),
9942					REG_RD_IND(sc, i + 0x8), REG_RD_IND(sc, i + 0xC));
9943		}
9944	}
9945
9946	BCE_PRINTF(
9947		"----------------------------"
9948		"----------------"
9949		"----------------------------\n");
9950}
9951
9952
9953/****************************************************************************/
9954/* Prints out the RXP processor state.                                      */
9955/*                                                                          */
9956/* Returns:                                                                 */
9957/*   Nothing.                                                               */
9958/****************************************************************************/
9959static __attribute__ ((noinline)) void
9960bce_dump_rxp_state(struct bce_softc *sc, int regs)
9961{
9962	u32 val;
9963	u32 fw_version[3];
9964
9965	BCE_PRINTF(
9966		"----------------------------"
9967		"   RXP  State   "
9968		"----------------------------\n");
9969
9970	for (int i = 0; i < 3; i++)
9971		fw_version[i] = htonl(REG_RD_IND(sc,
9972			(BCE_RXP_SCRATCH + 0x10 + i * 4)));
9973	BCE_PRINTF("Firmware version - %s\n", (char *) fw_version);
9974
9975	val = REG_RD_IND(sc, BCE_RXP_CPU_MODE);
9976	BCE_PRINTF("0x%08X - (0x%06X) rxp_cpu_mode\n", val, BCE_RXP_CPU_MODE);
9977
9978	val = REG_RD_IND(sc, BCE_RXP_CPU_STATE);
9979	BCE_PRINTF("0x%08X - (0x%06X) rxp_cpu_state\n", val, BCE_RXP_CPU_STATE);
9980
9981	val = REG_RD_IND(sc, BCE_RXP_CPU_EVENT_MASK);
9982	BCE_PRINTF("0x%08X - (0x%06X) rxp_cpu_event_mask\n", val,
9983		BCE_RXP_CPU_EVENT_MASK);
9984
9985	if (regs) {
9986		BCE_PRINTF(
9987			"----------------------------"
9988			" Register  Dump "
9989			"----------------------------\n");
9990
9991		for (int i = BCE_RXP_CPU_MODE; i < 0xe8fff; i += 0x10) {
9992			/* Skip the big blank sapces */
9993			if (i < 0xc5400 && i > 0xdffff)
9994				BCE_PRINTF("0x%04X: 0x%08X 0x%08X 0x%08X 0x%08X\n",
9995	 				i, REG_RD_IND(sc, i), REG_RD_IND(sc, i + 0x4),
9996					REG_RD_IND(sc, i + 0x8), REG_RD_IND(sc, i + 0xC));
9997		}
9998	}
9999
10000	BCE_PRINTF(
10001		"----------------------------"
10002		"----------------"
10003		"----------------------------\n");
10004}
10005
10006
10007/****************************************************************************/
10008/* Prints out the TPAT processor state.                                     */
10009/*                                                                          */
10010/* Returns:                                                                 */
10011/*   Nothing.                                                               */
10012/****************************************************************************/
10013static __attribute__ ((noinline)) void
10014bce_dump_tpat_state(struct bce_softc *sc, int regs)
10015{
10016	u32 val;
10017	u32 fw_version[3];
10018
10019	BCE_PRINTF(
10020		"----------------------------"
10021		"   TPAT State   "
10022		"----------------------------\n");
10023
10024	for (int i = 0; i < 3; i++)
10025		fw_version[i] = htonl(REG_RD_IND(sc,
10026			(BCE_TPAT_SCRATCH + 0x410 + i * 4)));
10027	BCE_PRINTF("Firmware version - %s\n", (char *) fw_version);
10028
10029	val = REG_RD_IND(sc, BCE_TPAT_CPU_MODE);
10030	BCE_PRINTF("0x%08X - (0x%06X) tpat_cpu_mode\n", val, BCE_TPAT_CPU_MODE);
10031
10032	val = REG_RD_IND(sc, BCE_TPAT_CPU_STATE);
10033	BCE_PRINTF("0x%08X - (0x%06X) tpat_cpu_state\n", val, BCE_TPAT_CPU_STATE);
10034
10035	val = REG_RD_IND(sc, BCE_TPAT_CPU_EVENT_MASK);
10036	BCE_PRINTF("0x%08X - (0x%06X) tpat_cpu_event_mask\n", val,
10037		BCE_TPAT_CPU_EVENT_MASK);
10038
10039	if (regs) {
10040		BCE_PRINTF(
10041			"----------------------------"
10042			" Register  Dump "
10043			"----------------------------\n");
10044
10045		for (int i = BCE_TPAT_CPU_MODE; i < 0xa3fff; i += 0x10) {
10046			/* Skip the big blank spaces */
10047			if (i < 0x854000 && i > 0x9ffff)
10048				BCE_PRINTF("0x%04X: 0x%08X 0x%08X 0x%08X 0x%08X\n",
10049					i, REG_RD_IND(sc, i), REG_RD_IND(sc, i + 0x4),
10050					REG_RD_IND(sc, i + 0x8), REG_RD_IND(sc, i + 0xC));
10051		}
10052	}
10053
10054	BCE_PRINTF(
10055		"----------------------------"
10056		"----------------"
10057		"----------------------------\n");
10058}
10059
10060
10061/****************************************************************************/
10062/* Prints out the Command Procesor (CP) state.                              */
10063/*                                                                          */
10064/* Returns:                                                                 */
10065/*   Nothing.                                                               */
10066/****************************************************************************/
10067static __attribute__ ((noinline)) void
10068bce_dump_cp_state(struct bce_softc *sc, int regs)
10069{
10070	u32 val;
10071	u32 fw_version[3];
10072
10073	BCE_PRINTF(
10074		"----------------------------"
10075		"    CP State    "
10076		"----------------------------\n");
10077
10078	for (int i = 0; i < 3; i++)
10079		fw_version[i] = htonl(REG_RD_IND(sc,
10080			(BCE_CP_SCRATCH + 0x10 + i * 4)));
10081	BCE_PRINTF("Firmware version - %s\n", (char *) fw_version);
10082
10083	val = REG_RD_IND(sc, BCE_CP_CPU_MODE);
10084	BCE_PRINTF("0x%08X - (0x%06X) cp_cpu_mode\n", val, BCE_CP_CPU_MODE);
10085
10086	val = REG_RD_IND(sc, BCE_CP_CPU_STATE);
10087	BCE_PRINTF("0x%08X - (0x%06X) cp_cpu_state\n", val, BCE_CP_CPU_STATE);
10088
10089	val = REG_RD_IND(sc, BCE_CP_CPU_EVENT_MASK);
10090	BCE_PRINTF("0x%08X - (0x%06X) cp_cpu_event_mask\n", val,
10091		BCE_CP_CPU_EVENT_MASK);
10092
10093	if (regs) {
10094		BCE_PRINTF(
10095			"----------------------------"
10096			" Register  Dump "
10097			"----------------------------\n");
10098
10099		for (int i = BCE_CP_CPU_MODE; i < 0x1aa000; i += 0x10) {
10100			/* Skip the big blank spaces */
10101			if (i < 0x185400 && i > 0x19ffff)
10102				BCE_PRINTF("0x%04X: 0x%08X 0x%08X 0x%08X 0x%08X\n",
10103					i, REG_RD_IND(sc, i), REG_RD_IND(sc, i + 0x4),
10104					REG_RD_IND(sc, i + 0x8), REG_RD_IND(sc, i + 0xC));
10105		}
10106	}
10107
10108	BCE_PRINTF(
10109		"----------------------------"
10110		"----------------"
10111		"----------------------------\n");
10112}
10113
10114
10115/****************************************************************************/
10116/* Prints out the Completion Procesor (COM) state.                          */
10117/*                                                                          */
10118/* Returns:                                                                 */
10119/*   Nothing.                                                               */
10120/****************************************************************************/
10121static __attribute__ ((noinline)) void
10122bce_dump_com_state(struct bce_softc *sc, int regs)
10123{
10124	u32 val;
10125	u32 fw_version[3];
10126
10127	BCE_PRINTF(
10128		"----------------------------"
10129		"   COM State    "
10130		"----------------------------\n");
10131
10132	for (int i = 0; i < 3; i++)
10133		fw_version[i] = htonl(REG_RD_IND(sc,
10134			(BCE_COM_SCRATCH + 0x10 + i * 4)));
10135	BCE_PRINTF("Firmware version - %s\n", (char *) fw_version);
10136
10137	val = REG_RD_IND(sc, BCE_COM_CPU_MODE);
10138	BCE_PRINTF("0x%08X - (0x%06X) com_cpu_mode\n", val, BCE_COM_CPU_MODE);
10139
10140	val = REG_RD_IND(sc, BCE_COM_CPU_STATE);
10141	BCE_PRINTF("0x%08X - (0x%06X) com_cpu_state\n", val, BCE_COM_CPU_STATE);
10142
10143	val = REG_RD_IND(sc, BCE_COM_CPU_EVENT_MASK);
10144	BCE_PRINTF("0x%08X - (0x%06X) com_cpu_event_mask\n", val,
10145		BCE_COM_CPU_EVENT_MASK);
10146
10147	if (regs) {
10148		BCE_PRINTF(
10149			"----------------------------"
10150			" Register  Dump "
10151			"----------------------------\n");
10152
10153		for (int i = BCE_COM_CPU_MODE; i < 0x1053e8; i += 0x10) {
10154			BCE_PRINTF("0x%04X: 0x%08X 0x%08X 0x%08X 0x%08X\n",
10155				i, REG_RD_IND(sc, i), REG_RD_IND(sc, i + 0x4),
10156				REG_RD_IND(sc, i + 0x8), REG_RD_IND(sc, i + 0xC));
10157		}
10158	}
10159
10160	BCE_PRINTF(
10161		"----------------------------"
10162		"----------------"
10163		"----------------------------\n");
10164}
10165
10166
10167/****************************************************************************/
10168/* Prints out the driver state and then enters the debugger.                */
10169/*                                                                          */
10170/* Returns:                                                                 */
10171/*   Nothing.                                                               */
10172/****************************************************************************/
10173static void
10174bce_breakpoint(struct bce_softc *sc)
10175{
10176
10177	/*
10178	 * Unreachable code to silence compiler warnings
10179	 * about unused functions.
10180	 */
10181	if (0) {
10182		bce_freeze_controller(sc);
10183		bce_unfreeze_controller(sc);
10184		bce_dump_enet(sc, NULL);
10185   		bce_dump_txbd(sc, 0, NULL);
10186		bce_dump_rxbd(sc, 0, NULL);
10187		bce_dump_tx_mbuf_chain(sc, 0, USABLE_TX_BD);
10188		bce_dump_rx_mbuf_chain(sc, 0, USABLE_RX_BD);
10189		bce_dump_l2fhdr(sc, 0, NULL);
10190		bce_dump_ctx(sc, RX_CID);
10191		bce_dump_ftqs(sc);
10192		bce_dump_tx_chain(sc, 0, USABLE_TX_BD);
10193		bce_dump_rx_chain(sc, 0, USABLE_RX_BD);
10194		bce_dump_status_block(sc);
10195		bce_dump_stats_block(sc);
10196		bce_dump_driver_state(sc);
10197		bce_dump_hw_state(sc);
10198		bce_dump_bc_state(sc);
10199		bce_dump_txp_state(sc, 0);
10200		bce_dump_rxp_state(sc, 0);
10201		bce_dump_tpat_state(sc, 0);
10202		bce_dump_cp_state(sc, 0);
10203		bce_dump_com_state(sc, 0);
10204#ifdef BCE_USE_SPLIT_HEADER
10205		bce_dump_pgbd(sc, 0, NULL);
10206		bce_dump_pg_mbuf_chain(sc, 0, USABLE_PG_BD);
10207		bce_dump_pg_chain(sc, 0, USABLE_PG_BD);
10208#endif
10209	}
10210
10211	bce_dump_status_block(sc);
10212	bce_dump_driver_state(sc);
10213
10214	/* Call the debugger. */
10215	breakpoint();
10216
10217	return;
10218}
10219#endif
10220
10221