if_bnx.c revision 1.97
1/*	$OpenBSD: if_bnx.c,v 1.97 2012/07/05 13:50:15 phessler Exp $	*/
2
3/*-
4 * Copyright (c) 2006 Broadcom Corporation
5 *	David Christensen <davidch@broadcom.com>.  All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 *    notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 *    notice, this list of conditions and the following disclaimer in the
15 *    documentation and/or other materials provided with the distribution.
16 * 3. Neither the name of Broadcom Corporation nor the name of its contributors
17 *    may be used to endorse or promote products derived from this software
18 *    without specific prior written consent.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS'
21 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
30 * THE POSSIBILITY OF SUCH DAMAGE.
31 */
32
33#if 0
34#include <sys/cdefs.h>
35__FBSDID("$FreeBSD: src/sys/dev/bce/if_bce.c,v 1.3 2006/04/13 14:12:26 ru Exp $");
36#endif
37
38/*
39 * The following controllers are supported by this driver:
40 *   BCM5706C A2, A3
41 *   BCM5706S A2, A3
42 *   BCM5708C B1, B2
43 *   BCM5708S B1, B2
44 *   BCM5709C A1, C0
45 *   BCM5709S A1, C0
46 *   BCM5716  C0
47 *
48 * The following controllers are not supported by this driver:
49 *   BCM5706C A0, A1
50 *   BCM5706S A0, A1
51 *   BCM5708C A0, B0
52 *   BCM5708S A0, B0
53 *   BCM5709C A0  B0, B1, B2 (pre-production)
54 *   BCM5709S A0, B0, B1, B2 (pre-production)
55 */
56
57#include <dev/pci/if_bnxreg.h>
58
59struct bnx_firmware {
60	char *filename;
61	struct bnx_firmware_header *fw;
62
63	u_int32_t *bnx_COM_FwText;
64	u_int32_t *bnx_COM_FwData;
65	u_int32_t *bnx_COM_FwRodata;
66	u_int32_t *bnx_COM_FwBss;
67	u_int32_t *bnx_COM_FwSbss;
68
69	u_int32_t *bnx_RXP_FwText;
70	u_int32_t *bnx_RXP_FwData;
71	u_int32_t *bnx_RXP_FwRodata;
72	u_int32_t *bnx_RXP_FwBss;
73	u_int32_t *bnx_RXP_FwSbss;
74
75	u_int32_t *bnx_TPAT_FwText;
76	u_int32_t *bnx_TPAT_FwData;
77	u_int32_t *bnx_TPAT_FwRodata;
78	u_int32_t *bnx_TPAT_FwBss;
79	u_int32_t *bnx_TPAT_FwSbss;
80
81	u_int32_t *bnx_TXP_FwText;
82	u_int32_t *bnx_TXP_FwData;
83	u_int32_t *bnx_TXP_FwRodata;
84	u_int32_t *bnx_TXP_FwBss;
85	u_int32_t *bnx_TXP_FwSbss;
86};
87
88struct bnx_firmware bnx_firmwares[] = {
89	{ "bnx-b06",		NULL },
90	{ "bnx-b09",		NULL }
91};
92#define	BNX_FW_B06	0
93#define	BNX_FW_B09	1
94
95struct bnx_rv2p {
96	char *filename;
97	struct bnx_rv2p_header *fw;
98
99	u_int32_t *bnx_rv2p_proc1;
100	u_int32_t *bnx_rv2p_proc2;
101};
102
103struct bnx_rv2p bnx_rv2ps[] = {
104	{ "bnx-rv2p",		NULL },
105	{ "bnx-xi-rv2p",	NULL },
106	{ "bnx-xi90-rv2p",	NULL }
107};
108#define BNX_RV2P	0
109#define BNX_XI_RV2P	1
110#define BNX_XI90_RV2P	2
111
112void	nswaph(u_int32_t *p, int wcount);
113
114/****************************************************************************/
115/* BNX Driver Version                                                       */
116/****************************************************************************/
117
118#define BNX_DRIVER_VERSION	"v0.9.6"
119
120/****************************************************************************/
121/* BNX Debug Options                                                        */
122/****************************************************************************/
123#ifdef BNX_DEBUG
124	u_int32_t bnx_debug = BNX_WARN;
125
126	/*          0 = Never              */
127	/*          1 = 1 in 2,147,483,648 */
128	/*        256 = 1 in     8,388,608 */
129	/*       2048 = 1 in     1,048,576 */
130	/*      65536 = 1 in        32,768 */
131	/*    1048576 = 1 in         2,048 */
132	/*  268435456 =	1 in             8 */
133	/*  536870912 = 1 in             4 */
134	/* 1073741824 = 1 in             2 */
135
136	/* Controls how often the l2_fhdr frame error check will fail. */
137	int bnx_debug_l2fhdr_status_check = 0;
138
139	/* Controls how often the unexpected attention check will fail. */
140	int bnx_debug_unexpected_attention = 0;
141
142	/* Controls how often to simulate an mbuf allocation failure. */
143	int bnx_debug_mbuf_allocation_failure = 0;
144
145	/* Controls how often to simulate a DMA mapping failure. */
146	int bnx_debug_dma_map_addr_failure = 0;
147
148	/* Controls how often to simulate a bootcode failure. */
149	int bnx_debug_bootcode_running_failure = 0;
150#endif
151
152/****************************************************************************/
153/* PCI Device ID Table                                                      */
154/*                                                                          */
155/* Used by bnx_probe() to identify the devices supported by this driver.    */
156/****************************************************************************/
157const struct pci_matchid bnx_devices[] = {
158	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5706 },
159	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5706S },
160	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5708 },
161	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5708S },
162	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5709 },
163	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5709S },
164	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5716 },
165	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5716S }
166};
167
168/****************************************************************************/
169/* Supported Flash NVRAM device data.                                       */
170/****************************************************************************/
171static struct flash_spec flash_table[] =
172{
173#define BUFFERED_FLAGS		(BNX_NV_BUFFERED | BNX_NV_TRANSLATE)
174#define NONBUFFERED_FLAGS	(BNX_NV_WREN)
175
176	/* Slow EEPROM */
177	{0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
178	 BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
179	 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
180	 "EEPROM - slow"},
181	/* Expansion entry 0001 */
182	{0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
183	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
184	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
185	 "Entry 0001"},
186	/* Saifun SA25F010 (non-buffered flash) */
187	/* strap, cfg1, & write1 need updates */
188	{0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
189	 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
190	 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
191	 "Non-buffered flash (128kB)"},
192	/* Saifun SA25F020 (non-buffered flash) */
193	/* strap, cfg1, & write1 need updates */
194	{0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
195	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
196	 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
197	 "Non-buffered flash (256kB)"},
198	/* Expansion entry 0100 */
199	{0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
200	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
201	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
202	 "Entry 0100"},
203	/* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
204	{0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
205	 NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
206	 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
207	 "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
208	/* Entry 0110: ST M45PE20 (non-buffered flash)*/
209	{0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
210	 NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
211	 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
212	 "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
213	/* Saifun SA25F005 (non-buffered flash) */
214	/* strap, cfg1, & write1 need updates */
215	{0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
216	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
217	 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
218	 "Non-buffered flash (64kB)"},
219	/* Fast EEPROM */
220	{0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
221	 BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
222	 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
223	 "EEPROM - fast"},
224	/* Expansion entry 1001 */
225	{0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
226	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
227	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
228	 "Entry 1001"},
229	/* Expansion entry 1010 */
230	{0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
231	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
232	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
233	 "Entry 1010"},
234	/* ATMEL AT45DB011B (buffered flash) */
235	{0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
236	 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
237	 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
238	 "Buffered flash (128kB)"},
239	/* Expansion entry 1100 */
240	{0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
241	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
242	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
243	 "Entry 1100"},
244	/* Expansion entry 1101 */
245	{0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
246	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
247	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
248	 "Entry 1101"},
249	/* Ateml Expansion entry 1110 */
250	{0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
251	 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
252	 BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
253	 "Entry 1110 (Atmel)"},
254	/* ATMEL AT45DB021B (buffered flash) */
255	{0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
256	 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
257	 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
258	 "Buffered flash (256kB)"},
259};
260
261/*
262 * The BCM5709 controllers transparently handle the
263 * differences between Atmel 264 byte pages and all
264 * flash devices which use 256 byte pages, so no
265 * logical-to-physical mapping is required in the
266 * driver.
267 */
268static struct flash_spec flash_5709 = {
269	.flags		= BNX_NV_BUFFERED,
270	.page_bits	= BCM5709_FLASH_PAGE_BITS,
271	.page_size	= BCM5709_FLASH_PAGE_SIZE,
272	.addr_mask	= BCM5709_FLASH_BYTE_ADDR_MASK,
273	.total_size	= BUFFERED_FLASH_TOTAL_SIZE * 2,
274	.name		= "5709 buffered flash (256kB)",
275};
276
277/****************************************************************************/
278/* OpenBSD device entry points.                                             */
279/****************************************************************************/
280int	bnx_probe(struct device *, void *, void *);
281void	bnx_attach(struct device *, struct device *, void *);
282void	bnx_attachhook(void *);
283int	bnx_read_firmware(struct bnx_softc *sc, int);
284int	bnx_read_rv2p(struct bnx_softc *sc, int);
285#if 0
286void	bnx_detach(void *);
287#endif
288
289/****************************************************************************/
290/* BNX Debug Data Structure Dump Routines                                   */
291/****************************************************************************/
292#ifdef BNX_DEBUG
293void	bnx_dump_mbuf(struct bnx_softc *, struct mbuf *);
294void	bnx_dump_tx_mbuf_chain(struct bnx_softc *, int, int);
295void	bnx_dump_rx_mbuf_chain(struct bnx_softc *, int, int);
296void	bnx_dump_txbd(struct bnx_softc *, int, struct tx_bd *);
297void	bnx_dump_rxbd(struct bnx_softc *, int, struct rx_bd *);
298void	bnx_dump_l2fhdr(struct bnx_softc *, int, struct l2_fhdr *);
299void	bnx_dump_tx_chain(struct bnx_softc *, int, int);
300void	bnx_dump_rx_chain(struct bnx_softc *, int, int);
301void	bnx_dump_status_block(struct bnx_softc *);
302void	bnx_dump_stats_block(struct bnx_softc *);
303void	bnx_dump_driver_state(struct bnx_softc *);
304void	bnx_dump_hw_state(struct bnx_softc *);
305void	bnx_breakpoint(struct bnx_softc *);
306#endif
307
308/****************************************************************************/
309/* BNX Register/Memory Access Routines                                      */
310/****************************************************************************/
311u_int32_t	bnx_reg_rd_ind(struct bnx_softc *, u_int32_t);
312void	bnx_reg_wr_ind(struct bnx_softc *, u_int32_t, u_int32_t);
313void	bnx_ctx_wr(struct bnx_softc *, u_int32_t, u_int32_t, u_int32_t);
314int	bnx_miibus_read_reg(struct device *, int, int);
315void	bnx_miibus_write_reg(struct device *, int, int, int);
316void	bnx_miibus_statchg(struct device *);
317
318/****************************************************************************/
319/* BNX NVRAM Access Routines                                                */
320/****************************************************************************/
321int	bnx_acquire_nvram_lock(struct bnx_softc *);
322int	bnx_release_nvram_lock(struct bnx_softc *);
323void	bnx_enable_nvram_access(struct bnx_softc *);
324void	bnx_disable_nvram_access(struct bnx_softc *);
325int	bnx_nvram_read_dword(struct bnx_softc *, u_int32_t, u_int8_t *,
326	    u_int32_t);
327int	bnx_init_nvram(struct bnx_softc *);
328int	bnx_nvram_read(struct bnx_softc *, u_int32_t, u_int8_t *, int);
329int	bnx_nvram_test(struct bnx_softc *);
330#ifdef BNX_NVRAM_WRITE_SUPPORT
331int	bnx_enable_nvram_write(struct bnx_softc *);
332void	bnx_disable_nvram_write(struct bnx_softc *);
333int	bnx_nvram_erase_page(struct bnx_softc *, u_int32_t);
334int	bnx_nvram_write_dword(struct bnx_softc *, u_int32_t, u_int8_t *,
335	    u_int32_t);
336int	bnx_nvram_write(struct bnx_softc *, u_int32_t, u_int8_t *, int);
337#endif
338
339/****************************************************************************/
340/*                                                                          */
341/****************************************************************************/
342void	bnx_get_media(struct bnx_softc *);
343void	bnx_init_media(struct bnx_softc *);
344int	bnx_dma_alloc(struct bnx_softc *);
345void	bnx_dma_free(struct bnx_softc *);
346void	bnx_release_resources(struct bnx_softc *);
347
348/****************************************************************************/
349/* BNX Firmware Synchronization and Load                                    */
350/****************************************************************************/
351int	bnx_fw_sync(struct bnx_softc *, u_int32_t);
352void	bnx_load_rv2p_fw(struct bnx_softc *, u_int32_t *, u_int32_t,
353	    u_int32_t);
354void	bnx_load_cpu_fw(struct bnx_softc *, struct cpu_reg *,
355	    struct fw_info *);
356void	bnx_init_cpus(struct bnx_softc *);
357
358void	bnx_stop(struct bnx_softc *);
359int	bnx_reset(struct bnx_softc *, u_int32_t);
360int	bnx_chipinit(struct bnx_softc *);
361int	bnx_blockinit(struct bnx_softc *);
362int	bnx_get_buf(struct bnx_softc *, u_int16_t *, u_int16_t *, u_int32_t *);
363
364int	bnx_init_tx_chain(struct bnx_softc *);
365void	bnx_init_tx_context(struct bnx_softc *);
366void	bnx_fill_rx_chain(struct bnx_softc *);
367void	bnx_init_rx_context(struct bnx_softc *);
368int	bnx_init_rx_chain(struct bnx_softc *);
369void	bnx_free_rx_chain(struct bnx_softc *);
370void	bnx_free_tx_chain(struct bnx_softc *);
371
372int	bnx_tx_encap(struct bnx_softc *, struct mbuf *);
373void	bnx_start(struct ifnet *);
374int	bnx_ioctl(struct ifnet *, u_long, caddr_t);
375void	bnx_watchdog(struct ifnet *);
376int	bnx_ifmedia_upd(struct ifnet *);
377void	bnx_ifmedia_sts(struct ifnet *, struct ifmediareq *);
378void	bnx_init(void *);
379void	bnx_mgmt_init(struct bnx_softc *sc);
380
381void	bnx_init_context(struct bnx_softc *);
382void	bnx_get_mac_addr(struct bnx_softc *);
383void	bnx_set_mac_addr(struct bnx_softc *);
384void	bnx_phy_intr(struct bnx_softc *);
385void	bnx_rx_intr(struct bnx_softc *);
386void	bnx_tx_intr(struct bnx_softc *);
387void	bnx_disable_intr(struct bnx_softc *);
388void	bnx_enable_intr(struct bnx_softc *);
389
390int	bnx_intr(void *);
391void	bnx_iff(struct bnx_softc *);
392void	bnx_stats_update(struct bnx_softc *);
393void	bnx_tick(void *);
394
395struct rwlock bnx_tx_pool_lk = RWLOCK_INITIALIZER("bnxplinit");
396struct pool *bnx_tx_pool = NULL;
397void	bnx_alloc_pkts(void *, void *);
398
399/****************************************************************************/
400/* OpenBSD device dispatch table.                                           */
401/****************************************************************************/
402struct cfattach bnx_ca = {
403	sizeof(struct bnx_softc), bnx_probe, bnx_attach
404};
405
406struct cfdriver bnx_cd = {
407	NULL, "bnx", DV_IFNET
408};
409
410/****************************************************************************/
411/* Device probe function.                                                   */
412/*                                                                          */
413/* Compares the device to the driver's list of supported devices and        */
414/* reports back to the OS whether this is the right driver for the device.  */
415/*                                                                          */
416/* Returns:                                                                 */
417/*   BUS_PROBE_DEFAULT on success, positive value on failure.               */
418/****************************************************************************/
419int
420bnx_probe(struct device *parent, void *match, void *aux)
421{
422	return (pci_matchbyid((struct pci_attach_args *)aux, bnx_devices,
423	    nitems(bnx_devices)));
424}
425
426void
427nswaph(u_int32_t *p, int wcount)
428{
429	for (; wcount; wcount -=4) {
430		*p = ntohl(*p);
431		p++;
432	}
433}
434
435int
436bnx_read_firmware(struct bnx_softc *sc, int idx)
437{
438	struct bnx_firmware *bfw = &bnx_firmwares[idx];
439	struct bnx_firmware_header *hdr = bfw->fw;
440	u_char *p, *q;
441	size_t size;
442	int error;
443
444	if (hdr != NULL)
445		return (0);
446
447	if ((error = loadfirmware(bfw->filename, &p, &size)) != 0)
448		return (error);
449
450	if (size < sizeof(struct bnx_firmware_header)) {
451		free(p, M_DEVBUF);
452		return (EINVAL);
453	}
454
455	hdr = (struct bnx_firmware_header *)p;
456
457	hdr->bnx_COM_FwReleaseMajor = ntohl(hdr->bnx_COM_FwReleaseMajor);
458	hdr->bnx_COM_FwReleaseMinor = ntohl(hdr->bnx_COM_FwReleaseMinor);
459	hdr->bnx_COM_FwReleaseFix = ntohl(hdr->bnx_COM_FwReleaseFix);
460	hdr->bnx_COM_FwStartAddr = ntohl(hdr->bnx_COM_FwStartAddr);
461	hdr->bnx_COM_FwTextAddr = ntohl(hdr->bnx_COM_FwTextAddr);
462	hdr->bnx_COM_FwTextLen = ntohl(hdr->bnx_COM_FwTextLen);
463	hdr->bnx_COM_FwDataAddr = ntohl(hdr->bnx_COM_FwDataAddr);
464	hdr->bnx_COM_FwDataLen = ntohl(hdr->bnx_COM_FwDataLen);
465	hdr->bnx_COM_FwRodataAddr = ntohl(hdr->bnx_COM_FwRodataAddr);
466	hdr->bnx_COM_FwRodataLen = ntohl(hdr->bnx_COM_FwRodataLen);
467	hdr->bnx_COM_FwBssAddr = ntohl(hdr->bnx_COM_FwBssAddr);
468	hdr->bnx_COM_FwBssLen = ntohl(hdr->bnx_COM_FwBssLen);
469	hdr->bnx_COM_FwSbssAddr = ntohl(hdr->bnx_COM_FwSbssAddr);
470	hdr->bnx_COM_FwSbssLen = ntohl(hdr->bnx_COM_FwSbssLen);
471
472	hdr->bnx_RXP_FwReleaseMajor = ntohl(hdr->bnx_RXP_FwReleaseMajor);
473	hdr->bnx_RXP_FwReleaseMinor = ntohl(hdr->bnx_RXP_FwReleaseMinor);
474	hdr->bnx_RXP_FwReleaseFix = ntohl(hdr->bnx_RXP_FwReleaseFix);
475	hdr->bnx_RXP_FwStartAddr = ntohl(hdr->bnx_RXP_FwStartAddr);
476	hdr->bnx_RXP_FwTextAddr = ntohl(hdr->bnx_RXP_FwTextAddr);
477	hdr->bnx_RXP_FwTextLen = ntohl(hdr->bnx_RXP_FwTextLen);
478	hdr->bnx_RXP_FwDataAddr = ntohl(hdr->bnx_RXP_FwDataAddr);
479	hdr->bnx_RXP_FwDataLen = ntohl(hdr->bnx_RXP_FwDataLen);
480	hdr->bnx_RXP_FwRodataAddr = ntohl(hdr->bnx_RXP_FwRodataAddr);
481	hdr->bnx_RXP_FwRodataLen = ntohl(hdr->bnx_RXP_FwRodataLen);
482	hdr->bnx_RXP_FwBssAddr = ntohl(hdr->bnx_RXP_FwBssAddr);
483	hdr->bnx_RXP_FwBssLen = ntohl(hdr->bnx_RXP_FwBssLen);
484	hdr->bnx_RXP_FwSbssAddr = ntohl(hdr->bnx_RXP_FwSbssAddr);
485	hdr->bnx_RXP_FwSbssLen = ntohl(hdr->bnx_RXP_FwSbssLen);
486
487	hdr->bnx_TPAT_FwReleaseMajor = ntohl(hdr->bnx_TPAT_FwReleaseMajor);
488	hdr->bnx_TPAT_FwReleaseMinor = ntohl(hdr->bnx_TPAT_FwReleaseMinor);
489	hdr->bnx_TPAT_FwReleaseFix = ntohl(hdr->bnx_TPAT_FwReleaseFix);
490	hdr->bnx_TPAT_FwStartAddr = ntohl(hdr->bnx_TPAT_FwStartAddr);
491	hdr->bnx_TPAT_FwTextAddr = ntohl(hdr->bnx_TPAT_FwTextAddr);
492	hdr->bnx_TPAT_FwTextLen = ntohl(hdr->bnx_TPAT_FwTextLen);
493	hdr->bnx_TPAT_FwDataAddr = ntohl(hdr->bnx_TPAT_FwDataAddr);
494	hdr->bnx_TPAT_FwDataLen = ntohl(hdr->bnx_TPAT_FwDataLen);
495	hdr->bnx_TPAT_FwRodataAddr = ntohl(hdr->bnx_TPAT_FwRodataAddr);
496	hdr->bnx_TPAT_FwRodataLen = ntohl(hdr->bnx_TPAT_FwRodataLen);
497	hdr->bnx_TPAT_FwBssAddr = ntohl(hdr->bnx_TPAT_FwBssAddr);
498	hdr->bnx_TPAT_FwBssLen = ntohl(hdr->bnx_TPAT_FwBssLen);
499	hdr->bnx_TPAT_FwSbssAddr = ntohl(hdr->bnx_TPAT_FwSbssAddr);
500	hdr->bnx_TPAT_FwSbssLen = ntohl(hdr->bnx_TPAT_FwSbssLen);
501
502	hdr->bnx_TXP_FwReleaseMajor = ntohl(hdr->bnx_TXP_FwReleaseMajor);
503	hdr->bnx_TXP_FwReleaseMinor = ntohl(hdr->bnx_TXP_FwReleaseMinor);
504	hdr->bnx_TXP_FwReleaseFix = ntohl(hdr->bnx_TXP_FwReleaseFix);
505	hdr->bnx_TXP_FwStartAddr = ntohl(hdr->bnx_TXP_FwStartAddr);
506	hdr->bnx_TXP_FwTextAddr = ntohl(hdr->bnx_TXP_FwTextAddr);
507	hdr->bnx_TXP_FwTextLen = ntohl(hdr->bnx_TXP_FwTextLen);
508	hdr->bnx_TXP_FwDataAddr = ntohl(hdr->bnx_TXP_FwDataAddr);
509	hdr->bnx_TXP_FwDataLen = ntohl(hdr->bnx_TXP_FwDataLen);
510	hdr->bnx_TXP_FwRodataAddr = ntohl(hdr->bnx_TXP_FwRodataAddr);
511	hdr->bnx_TXP_FwRodataLen = ntohl(hdr->bnx_TXP_FwRodataLen);
512	hdr->bnx_TXP_FwBssAddr = ntohl(hdr->bnx_TXP_FwBssAddr);
513	hdr->bnx_TXP_FwBssLen = ntohl(hdr->bnx_TXP_FwBssLen);
514	hdr->bnx_TXP_FwSbssAddr = ntohl(hdr->bnx_TXP_FwSbssAddr);
515	hdr->bnx_TXP_FwSbssLen = ntohl(hdr->bnx_TXP_FwSbssLen);
516
517	q = p + sizeof(*hdr);
518
519	bfw->bnx_COM_FwText = (u_int32_t *)q;
520	q += hdr->bnx_COM_FwTextLen;
521	nswaph(bfw->bnx_COM_FwText, hdr->bnx_COM_FwTextLen);
522	bfw->bnx_COM_FwData = (u_int32_t *)q;
523	q += hdr->bnx_COM_FwDataLen;
524	nswaph(bfw->bnx_COM_FwData, hdr->bnx_COM_FwDataLen);
525	bfw->bnx_COM_FwRodata = (u_int32_t *)q;
526	q += hdr->bnx_COM_FwRodataLen;
527	nswaph(bfw->bnx_COM_FwRodata, hdr->bnx_COM_FwRodataLen);
528	bfw->bnx_COM_FwBss = (u_int32_t *)q;
529	q += hdr->bnx_COM_FwBssLen;
530	nswaph(bfw->bnx_COM_FwBss, hdr->bnx_COM_FwBssLen);
531	bfw->bnx_COM_FwSbss = (u_int32_t *)q;
532	q += hdr->bnx_COM_FwSbssLen;
533	nswaph(bfw->bnx_COM_FwSbss, hdr->bnx_COM_FwSbssLen);
534
535	bfw->bnx_RXP_FwText = (u_int32_t *)q;
536	q += hdr->bnx_RXP_FwTextLen;
537	nswaph(bfw->bnx_RXP_FwText, hdr->bnx_RXP_FwTextLen);
538	bfw->bnx_RXP_FwData = (u_int32_t *)q;
539	q += hdr->bnx_RXP_FwDataLen;
540	nswaph(bfw->bnx_RXP_FwData, hdr->bnx_RXP_FwDataLen);
541	bfw->bnx_RXP_FwRodata = (u_int32_t *)q;
542	q += hdr->bnx_RXP_FwRodataLen;
543	nswaph(bfw->bnx_RXP_FwRodata, hdr->bnx_RXP_FwRodataLen);
544	bfw->bnx_RXP_FwBss = (u_int32_t *)q;
545	q += hdr->bnx_RXP_FwBssLen;
546	nswaph(bfw->bnx_RXP_FwBss, hdr->bnx_RXP_FwBssLen);
547	bfw->bnx_RXP_FwSbss = (u_int32_t *)q;
548	q += hdr->bnx_RXP_FwSbssLen;
549	nswaph(bfw->bnx_RXP_FwSbss, hdr->bnx_RXP_FwSbssLen);
550
551	bfw->bnx_TPAT_FwText = (u_int32_t *)q;
552	q += hdr->bnx_TPAT_FwTextLen;
553	nswaph(bfw->bnx_TPAT_FwText, hdr->bnx_TPAT_FwTextLen);
554	bfw->bnx_TPAT_FwData = (u_int32_t *)q;
555	q += hdr->bnx_TPAT_FwDataLen;
556	nswaph(bfw->bnx_TPAT_FwData, hdr->bnx_TPAT_FwDataLen);
557	bfw->bnx_TPAT_FwRodata = (u_int32_t *)q;
558	q += hdr->bnx_TPAT_FwRodataLen;
559	nswaph(bfw->bnx_TPAT_FwRodata, hdr->bnx_TPAT_FwRodataLen);
560	bfw->bnx_TPAT_FwBss = (u_int32_t *)q;
561	q += hdr->bnx_TPAT_FwBssLen;
562	nswaph(bfw->bnx_TPAT_FwBss, hdr->bnx_TPAT_FwBssLen);
563	bfw->bnx_TPAT_FwSbss = (u_int32_t *)q;
564	q += hdr->bnx_TPAT_FwSbssLen;
565	nswaph(bfw->bnx_TPAT_FwSbss, hdr->bnx_TPAT_FwSbssLen);
566
567	bfw->bnx_TXP_FwText = (u_int32_t *)q;
568	q += hdr->bnx_TXP_FwTextLen;
569	nswaph(bfw->bnx_TXP_FwText, hdr->bnx_TXP_FwTextLen);
570	bfw->bnx_TXP_FwData = (u_int32_t *)q;
571	q += hdr->bnx_TXP_FwDataLen;
572	nswaph(bfw->bnx_TXP_FwData, hdr->bnx_TXP_FwDataLen);
573	bfw->bnx_TXP_FwRodata = (u_int32_t *)q;
574	q += hdr->bnx_TXP_FwRodataLen;
575	nswaph(bfw->bnx_TXP_FwRodata, hdr->bnx_TXP_FwRodataLen);
576	bfw->bnx_TXP_FwBss = (u_int32_t *)q;
577	q += hdr->bnx_TXP_FwBssLen;
578	nswaph(bfw->bnx_TXP_FwBss, hdr->bnx_TXP_FwBssLen);
579	bfw->bnx_TXP_FwSbss = (u_int32_t *)q;
580	q += hdr->bnx_TXP_FwSbssLen;
581	nswaph(bfw->bnx_TXP_FwSbss, hdr->bnx_TXP_FwSbssLen);
582
583	if (q - p != size) {
584		free(p, M_DEVBUF);
585		hdr = NULL;
586		return EINVAL;
587	}
588
589	bfw->fw = hdr;
590
591	return (0);
592}
593
594int
595bnx_read_rv2p(struct bnx_softc *sc, int idx)
596{
597	struct bnx_rv2p *rv2p = &bnx_rv2ps[idx];
598	struct bnx_rv2p_header *hdr = rv2p->fw;
599	u_char *p, *q;
600	size_t size;
601	int error;
602
603	if (hdr != NULL)
604		return (0);
605
606	if ((error = loadfirmware(rv2p->filename, &p, &size)) != 0)
607		return (error);
608
609	if (size < sizeof(struct bnx_rv2p_header)) {
610		free(p, M_DEVBUF);
611		return (EINVAL);
612	}
613
614	hdr = (struct bnx_rv2p_header *)p;
615
616	hdr->bnx_rv2p_proc1len = ntohl(hdr->bnx_rv2p_proc1len);
617	hdr->bnx_rv2p_proc2len = ntohl(hdr->bnx_rv2p_proc2len);
618
619	q = p + sizeof(*hdr);
620
621	rv2p->bnx_rv2p_proc1 = (u_int32_t *)q;
622	q += hdr->bnx_rv2p_proc1len;
623	nswaph(rv2p->bnx_rv2p_proc1, hdr->bnx_rv2p_proc1len);
624	rv2p->bnx_rv2p_proc2 = (u_int32_t *)q;
625	q += hdr->bnx_rv2p_proc2len;
626	nswaph(rv2p->bnx_rv2p_proc2, hdr->bnx_rv2p_proc2len);
627
628	if (q - p != size) {
629		free(p, M_DEVBUF);
630		return EINVAL;
631	}
632
633	rv2p->fw = hdr;
634
635	return (0);
636}
637
638
639/****************************************************************************/
640/* Device attach function.                                                  */
641/*                                                                          */
642/* Allocates device resources, performs secondary chip identification,      */
643/* resets and initializes the hardware, and initializes driver instance     */
644/* variables.                                                               */
645/*                                                                          */
646/* Returns:                                                                 */
647/*   0 on success, positive value on failure.                               */
648/****************************************************************************/
649void
650bnx_attach(struct device *parent, struct device *self, void *aux)
651{
652	struct bnx_softc	*sc = (struct bnx_softc *)self;
653	struct pci_attach_args	*pa = aux;
654	pci_chipset_tag_t	pc = pa->pa_pc;
655	u_int32_t		val;
656	pcireg_t		memtype;
657	const char 		*intrstr = NULL;
658
659	sc->bnx_pa = *pa;
660
661	/*
662	 * Map control/status registers.
663	*/
664	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, BNX_PCI_BAR0);
665	if (pci_mapreg_map(pa, BNX_PCI_BAR0, memtype, 0, &sc->bnx_btag,
666	    &sc->bnx_bhandle, NULL, &sc->bnx_size, 0)) {
667		printf(": can't find mem space\n");
668		return;
669	}
670
671	if (pci_intr_map(pa, &sc->bnx_ih)) {
672		printf(": couldn't map interrupt\n");
673		goto bnx_attach_fail;
674	}
675	intrstr = pci_intr_string(pc, sc->bnx_ih);
676
677	/*
678	 * Configure byte swap and enable indirect register access.
679	 * Rely on CPU to do target byte swapping on big endian systems.
680	 * Access to registers outside of PCI configurtion space are not
681	 * valid until this is done.
682	 */
683	pci_conf_write(pa->pa_pc, pa->pa_tag, BNX_PCICFG_MISC_CONFIG,
684	    BNX_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
685	    BNX_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP);
686
687	/* Save ASIC revsion info. */
688	sc->bnx_chipid =  REG_RD(sc, BNX_MISC_ID);
689
690	/*
691	 * Find the base address for shared memory access.
692	 * Newer versions of bootcode use a signature and offset
693	 * while older versions use a fixed address.
694	 */
695	val = REG_RD_IND(sc, BNX_SHM_HDR_SIGNATURE);
696	if ((val & BNX_SHM_HDR_SIGNATURE_SIG_MASK) == BNX_SHM_HDR_SIGNATURE_SIG)
697		sc->bnx_shmem_base = REG_RD_IND(sc, BNX_SHM_HDR_ADDR_0 +
698		    (sc->bnx_pa.pa_function << 2));
699	else
700		sc->bnx_shmem_base = HOST_VIEW_SHMEM_BASE;
701
702	DBPRINT(sc, BNX_INFO, "bnx_shmem_base = 0x%08X\n", sc->bnx_shmem_base);
703
704	/* Set initial device and PHY flags */
705	sc->bnx_flags = 0;
706	sc->bnx_phy_flags = 0;
707
708	/* Get PCI bus information (speed and type). */
709	val = REG_RD(sc, BNX_PCICFG_MISC_STATUS);
710	if (val & BNX_PCICFG_MISC_STATUS_PCIX_DET) {
711		u_int32_t clkreg;
712
713		sc->bnx_flags |= BNX_PCIX_FLAG;
714
715		clkreg = REG_RD(sc, BNX_PCICFG_PCI_CLOCK_CONTROL_BITS);
716
717		clkreg &= BNX_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET;
718		switch (clkreg) {
719		case BNX_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ:
720			sc->bus_speed_mhz = 133;
721			break;
722
723		case BNX_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ:
724			sc->bus_speed_mhz = 100;
725			break;
726
727		case BNX_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ:
728		case BNX_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ:
729			sc->bus_speed_mhz = 66;
730			break;
731
732		case BNX_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ:
733		case BNX_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ:
734			sc->bus_speed_mhz = 50;
735			break;
736
737		case BNX_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW:
738		case BNX_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ:
739		case BNX_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ:
740			sc->bus_speed_mhz = 33;
741			break;
742		}
743	} else if (val & BNX_PCICFG_MISC_STATUS_M66EN)
744			sc->bus_speed_mhz = 66;
745		else
746			sc->bus_speed_mhz = 33;
747
748	if (val & BNX_PCICFG_MISC_STATUS_32BIT_DET)
749		sc->bnx_flags |= BNX_PCI_32BIT_FLAG;
750
751	/* Hookup IRQ last. */
752	sc->bnx_intrhand = pci_intr_establish(pc, sc->bnx_ih, IPL_NET,
753	    bnx_intr, sc, sc->bnx_dev.dv_xname);
754	if (sc->bnx_intrhand == NULL) {
755		printf(": couldn't establish interrupt");
756		if (intrstr != NULL)
757			printf(" at %s", intrstr);
758		printf("\n");
759		goto bnx_attach_fail;
760	}
761
762	printf(": %s\n", intrstr);
763
764	mountroothook_establish(bnx_attachhook, sc);
765	return;
766
767bnx_attach_fail:
768	bnx_release_resources(sc);
769	DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
770}
771
772void
773bnx_attachhook(void *xsc)
774{
775	struct bnx_softc *sc = xsc;
776	struct pci_attach_args *pa = &sc->bnx_pa;
777	struct ifnet		*ifp;
778	int			error, mii_flags = 0;
779	int			fw = BNX_FW_B06;
780	int			rv2p = BNX_RV2P;
781
782	if (BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5709) {
783		fw = BNX_FW_B09;
784		if ((BNX_CHIP_REV(sc) == BNX_CHIP_REV_Ax))
785			rv2p = BNX_XI90_RV2P;
786		else
787			rv2p = BNX_XI_RV2P;
788	}
789
790	if ((error = bnx_read_firmware(sc, fw)) != 0) {
791		printf("%s: error %d, could not read firmware\n",
792		    sc->bnx_dev.dv_xname, error);
793		return;
794	}
795
796	if ((error = bnx_read_rv2p(sc, rv2p)) != 0) {
797		printf("%s: error %d, could not read rv2p\n",
798		    sc->bnx_dev.dv_xname, error);
799		return;
800	}
801
802	/* Reset the controller. */
803	if (bnx_reset(sc, BNX_DRV_MSG_CODE_RESET))
804		goto bnx_attach_fail;
805
806	/* Initialize the controller. */
807	if (bnx_chipinit(sc)) {
808		printf("%s: Controller initialization failed!\n",
809		    sc->bnx_dev.dv_xname);
810		goto bnx_attach_fail;
811	}
812
813	/* Perform NVRAM test. */
814	if (bnx_nvram_test(sc)) {
815		printf("%s: NVRAM test failed!\n",
816		    sc->bnx_dev.dv_xname);
817		goto bnx_attach_fail;
818	}
819
820	/* Fetch the permanent Ethernet MAC address. */
821	bnx_get_mac_addr(sc);
822
823	/*
824	 * Trip points control how many BDs
825	 * should be ready before generating an
826	 * interrupt while ticks control how long
827	 * a BD can sit in the chain before
828	 * generating an interrupt.  Set the default
829	 * values for the RX and TX rings.
830	 */
831
832#ifdef BNX_DEBUG
833	/* Force more frequent interrupts. */
834	sc->bnx_tx_quick_cons_trip_int = 1;
835	sc->bnx_tx_quick_cons_trip     = 1;
836	sc->bnx_tx_ticks_int           = 0;
837	sc->bnx_tx_ticks               = 0;
838
839	sc->bnx_rx_quick_cons_trip_int = 1;
840	sc->bnx_rx_quick_cons_trip     = 1;
841	sc->bnx_rx_ticks_int           = 0;
842	sc->bnx_rx_ticks               = 0;
843#else
844	sc->bnx_tx_quick_cons_trip_int = 20;
845	sc->bnx_tx_quick_cons_trip     = 20;
846	sc->bnx_tx_ticks_int           = 80;
847	sc->bnx_tx_ticks               = 80;
848
849	sc->bnx_rx_quick_cons_trip_int = 6;
850	sc->bnx_rx_quick_cons_trip     = 6;
851	sc->bnx_rx_ticks_int           = 18;
852	sc->bnx_rx_ticks               = 18;
853#endif
854
855	/* Update statistics once every second. */
856	sc->bnx_stats_ticks = 1000000 & 0xffff00;
857
858	/* Find the media type for the adapter. */
859	bnx_get_media(sc);
860
861	/*
862	 * Store config data needed by the PHY driver for
863	 * backplane applications
864	 */
865	sc->bnx_shared_hw_cfg = REG_RD_IND(sc, sc->bnx_shmem_base +
866		BNX_SHARED_HW_CFG_CONFIG);
867	sc->bnx_port_hw_cfg = REG_RD_IND(sc, sc->bnx_shmem_base +
868		BNX_PORT_HW_CFG_CONFIG);
869
870	/* Allocate DMA memory resources. */
871	sc->bnx_dmatag = pa->pa_dmat;
872	if (bnx_dma_alloc(sc)) {
873		printf("%s: DMA resource allocation failed!\n",
874		    sc->bnx_dev.dv_xname);
875		goto bnx_attach_fail;
876	}
877
878	/* Initialize the ifnet interface. */
879	ifp = &sc->arpcom.ac_if;
880	ifp->if_softc = sc;
881	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
882	ifp->if_ioctl = bnx_ioctl;
883	ifp->if_start = bnx_start;
884	ifp->if_watchdog = bnx_watchdog;
885	IFQ_SET_MAXLEN(&ifp->if_snd, USABLE_TX_BD - 1);
886	IFQ_SET_READY(&ifp->if_snd);
887	m_clsetwms(ifp, MCLBYTES, 2, USABLE_RX_BD);
888	bcopy(sc->eaddr, sc->arpcom.ac_enaddr, ETHER_ADDR_LEN);
889	bcopy(sc->bnx_dev.dv_xname, ifp->if_xname, IFNAMSIZ);
890
891	ifp->if_capabilities = IFCAP_VLAN_MTU;
892
893#ifdef BNX_CSUM
894	ifp->if_capabilities |= IFCAP_CSUM_TCPv4 | IFCAP_CSUM_UDPv4;
895#endif
896
897#if NVLAN > 0
898	ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING;
899#endif
900
901	sc->mbuf_alloc_size = BNX_MAX_MRU;
902
903	printf("%s: address %s\n", sc->bnx_dev.dv_xname,
904	    ether_sprintf(sc->arpcom.ac_enaddr));
905
906	sc->bnx_mii.mii_ifp = ifp;
907	sc->bnx_mii.mii_readreg = bnx_miibus_read_reg;
908	sc->bnx_mii.mii_writereg = bnx_miibus_write_reg;
909	sc->bnx_mii.mii_statchg = bnx_miibus_statchg;
910
911	/* Handle any special PHY initialization for SerDes PHYs. */
912	bnx_init_media(sc);
913
914	/* Look for our PHY. */
915	ifmedia_init(&sc->bnx_mii.mii_media, 0, bnx_ifmedia_upd,
916	    bnx_ifmedia_sts);
917	if (BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5706 ||
918	    (!(sc->bnx_phy_flags & BNX_PHY_SERDES_FLAG)))
919		mii_flags |= MIIF_DOPAUSE;
920	if (sc->bnx_phy_flags & BNX_PHY_SERDES_FLAG)
921		mii_flags |= MIIF_HAVEFIBER;
922	mii_attach(&sc->bnx_dev, &sc->bnx_mii, 0xffffffff,
923	    MII_PHY_ANY, MII_OFFSET_ANY, mii_flags);
924
925	if (LIST_FIRST(&sc->bnx_mii.mii_phys) == NULL) {
926		printf("%s: no PHY found!\n", sc->bnx_dev.dv_xname);
927		ifmedia_add(&sc->bnx_mii.mii_media,
928		    IFM_ETHER|IFM_MANUAL, 0, NULL);
929		ifmedia_set(&sc->bnx_mii.mii_media,
930		    IFM_ETHER|IFM_MANUAL);
931	} else {
932		ifmedia_set(&sc->bnx_mii.mii_media,
933		    IFM_ETHER|IFM_AUTO);
934	}
935
936	/* Attach to the Ethernet interface list. */
937	if_attach(ifp);
938	ether_ifattach(ifp);
939
940	timeout_set(&sc->bnx_timeout, bnx_tick, sc);
941
942	/* Print some important debugging info. */
943	DBRUN(BNX_INFO, bnx_dump_driver_state(sc));
944
945	/* Get the firmware running so ASF still works. */
946	bnx_mgmt_init(sc);
947
948	/* Handle interrupts */
949	sc->bnx_flags |= BNX_ACTIVE_FLAG;
950
951	goto bnx_attach_exit;
952
953bnx_attach_fail:
954	bnx_release_resources(sc);
955
956bnx_attach_exit:
957	DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
958}
959
960/****************************************************************************/
961/* Device detach function.                                                  */
962/*                                                                          */
963/* Stops the controller, resets the controller, and releases resources.     */
964/*                                                                          */
965/* Returns:                                                                 */
966/*   0 on success, positive value on failure.                               */
967/****************************************************************************/
968#if 0
969void
970bnx_detach(void *xsc)
971{
972	struct bnx_softc *sc;
973	struct ifnet *ifp = &sc->arpcom.ac_if;
974
975	sc = device_get_softc(dev);
976
977	DBPRINT(sc, BNX_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
978
979	/* Stop and reset the controller. */
980	bnx_stop(sc);
981	bnx_reset(sc, BNX_DRV_MSG_CODE_RESET);
982
983	ether_ifdetach(ifp);
984
985	/* If we have a child device on the MII bus remove it too. */
986	bus_generic_detach(dev);
987	device_delete_child(dev, sc->bnx_mii);
988
989	/* Release all remaining resources. */
990	bnx_release_resources(sc);
991
992	DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
993
994	return(0);
995}
996#endif
997
998/****************************************************************************/
999/* Indirect register read.                                                  */
1000/*                                                                          */
1001/* Reads NetXtreme II registers using an index/data register pair in PCI    */
1002/* configuration space.  Using this mechanism avoids issues with posted     */
1003/* reads but is much slower than memory-mapped I/O.                         */
1004/*                                                                          */
1005/* Returns:                                                                 */
1006/*   The value of the register.                                             */
1007/****************************************************************************/
1008u_int32_t
1009bnx_reg_rd_ind(struct bnx_softc *sc, u_int32_t offset)
1010{
1011	struct pci_attach_args	*pa = &(sc->bnx_pa);
1012
1013	pci_conf_write(pa->pa_pc, pa->pa_tag, BNX_PCICFG_REG_WINDOW_ADDRESS,
1014	    offset);
1015#ifdef BNX_DEBUG
1016	{
1017		u_int32_t val;
1018		val = pci_conf_read(pa->pa_pc, pa->pa_tag,
1019		    BNX_PCICFG_REG_WINDOW);
1020		DBPRINT(sc, BNX_EXCESSIVE, "%s(); offset = 0x%08X, "
1021		    "val = 0x%08X\n", __FUNCTION__, offset, val);
1022		return (val);
1023	}
1024#else
1025	return pci_conf_read(pa->pa_pc, pa->pa_tag, BNX_PCICFG_REG_WINDOW);
1026#endif
1027}
1028
1029/****************************************************************************/
1030/* Indirect register write.                                                 */
1031/*                                                                          */
1032/* Writes NetXtreme II registers using an index/data register pair in PCI   */
1033/* configuration space.  Using this mechanism avoids issues with posted     */
1034/* writes but is muchh slower than memory-mapped I/O.                       */
1035/*                                                                          */
1036/* Returns:                                                                 */
1037/*   Nothing.                                                               */
1038/****************************************************************************/
1039void
1040bnx_reg_wr_ind(struct bnx_softc *sc, u_int32_t offset, u_int32_t val)
1041{
1042	struct pci_attach_args  *pa = &(sc->bnx_pa);
1043
1044	DBPRINT(sc, BNX_EXCESSIVE, "%s(); offset = 0x%08X, val = 0x%08X\n",
1045		__FUNCTION__, offset, val);
1046
1047	pci_conf_write(pa->pa_pc, pa->pa_tag, BNX_PCICFG_REG_WINDOW_ADDRESS,
1048	    offset);
1049	pci_conf_write(pa->pa_pc, pa->pa_tag, BNX_PCICFG_REG_WINDOW, val);
1050}
1051
1052/****************************************************************************/
1053/* Context memory write.                                                    */
1054/*                                                                          */
1055/* The NetXtreme II controller uses context memory to track connection      */
1056/* information for L2 and higher network protocols.                         */
1057/*                                                                          */
1058/* Returns:                                                                 */
1059/*   Nothing.                                                               */
1060/****************************************************************************/
1061void
1062bnx_ctx_wr(struct bnx_softc *sc, u_int32_t cid_addr, u_int32_t ctx_offset,
1063    u_int32_t ctx_val)
1064{
1065	u_int32_t idx, offset = ctx_offset + cid_addr;
1066	u_int32_t val, retry_cnt = 5;
1067
1068	if (BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5709) {
1069		REG_WR(sc, BNX_CTX_CTX_DATA, ctx_val);
1070		REG_WR(sc, BNX_CTX_CTX_CTRL,
1071		    (offset | BNX_CTX_CTX_CTRL_WRITE_REQ));
1072
1073		for (idx = 0; idx < retry_cnt; idx++) {
1074			val = REG_RD(sc, BNX_CTX_CTX_CTRL);
1075			if ((val & BNX_CTX_CTX_CTRL_WRITE_REQ) == 0)
1076				break;
1077			DELAY(5);
1078		}
1079
1080#if 0
1081		if (val & BNX_CTX_CTX_CTRL_WRITE_REQ)
1082			BNX_PRINTF("%s(%d); Unable to write CTX memory: "
1083				"cid_addr = 0x%08X, offset = 0x%08X!\n",
1084				__FILE__, __LINE__, cid_addr, ctx_offset);
1085#endif
1086
1087	} else {
1088		REG_WR(sc, BNX_CTX_DATA_ADR, offset);
1089		REG_WR(sc, BNX_CTX_DATA, ctx_val);
1090	}
1091}
1092
1093/****************************************************************************/
1094/* PHY register read.                                                       */
1095/*                                                                          */
1096/* Implements register reads on the MII bus.                                */
1097/*                                                                          */
1098/* Returns:                                                                 */
1099/*   The value of the register.                                             */
1100/****************************************************************************/
1101int
1102bnx_miibus_read_reg(struct device *dev, int phy, int reg)
1103{
1104	struct bnx_softc	*sc = (struct bnx_softc *)dev;
1105	u_int32_t		val;
1106	int			i;
1107
1108	/* Make sure we are accessing the correct PHY address. */
1109	if (phy != sc->bnx_phy_addr) {
1110		DBPRINT(sc, BNX_VERBOSE,
1111		    "Invalid PHY address %d for PHY read!\n", phy);
1112		return(0);
1113	}
1114
1115	/*
1116	 * The BCM5709S PHY is an IEEE Clause 45 PHY
1117	 * with special mappings to work with IEEE
1118	 * Clause 22 register accesses.
1119	 */
1120	if ((sc->bnx_phy_flags & BNX_PHY_IEEE_CLAUSE_45_FLAG) != 0) {
1121		if (reg >= MII_BMCR && reg <= MII_ANLPRNP)
1122			reg += 0x10;
1123	}
1124
1125	if (sc->bnx_phy_flags & BNX_PHY_INT_MODE_AUTO_POLLING_FLAG) {
1126		val = REG_RD(sc, BNX_EMAC_MDIO_MODE);
1127		val &= ~BNX_EMAC_MDIO_MODE_AUTO_POLL;
1128
1129		REG_WR(sc, BNX_EMAC_MDIO_MODE, val);
1130		REG_RD(sc, BNX_EMAC_MDIO_MODE);
1131
1132		DELAY(40);
1133	}
1134
1135	val = BNX_MIPHY(phy) | BNX_MIREG(reg) |
1136	    BNX_EMAC_MDIO_COMM_COMMAND_READ | BNX_EMAC_MDIO_COMM_DISEXT |
1137	    BNX_EMAC_MDIO_COMM_START_BUSY;
1138	REG_WR(sc, BNX_EMAC_MDIO_COMM, val);
1139
1140	for (i = 0; i < BNX_PHY_TIMEOUT; i++) {
1141		DELAY(10);
1142
1143		val = REG_RD(sc, BNX_EMAC_MDIO_COMM);
1144		if (!(val & BNX_EMAC_MDIO_COMM_START_BUSY)) {
1145			DELAY(5);
1146
1147			val = REG_RD(sc, BNX_EMAC_MDIO_COMM);
1148			val &= BNX_EMAC_MDIO_COMM_DATA;
1149
1150			break;
1151		}
1152	}
1153
1154	if (val & BNX_EMAC_MDIO_COMM_START_BUSY) {
1155		BNX_PRINTF(sc, "%s(%d): Error: PHY read timeout! phy = %d, "
1156		    "reg = 0x%04X\n", __FILE__, __LINE__, phy, reg);
1157		val = 0x0;
1158	} else
1159		val = REG_RD(sc, BNX_EMAC_MDIO_COMM);
1160
1161	DBPRINT(sc, BNX_EXCESSIVE,
1162	    "%s(): phy = %d, reg = 0x%04X, val = 0x%04X\n", __FUNCTION__, phy,
1163	    (u_int16_t) reg & 0xffff, (u_int16_t) val & 0xffff);
1164
1165	if (sc->bnx_phy_flags & BNX_PHY_INT_MODE_AUTO_POLLING_FLAG) {
1166		val = REG_RD(sc, BNX_EMAC_MDIO_MODE);
1167		val |= BNX_EMAC_MDIO_MODE_AUTO_POLL;
1168
1169		REG_WR(sc, BNX_EMAC_MDIO_MODE, val);
1170		REG_RD(sc, BNX_EMAC_MDIO_MODE);
1171
1172		DELAY(40);
1173	}
1174
1175	return (val & 0xffff);
1176}
1177
1178/****************************************************************************/
1179/* PHY register write.                                                      */
1180/*                                                                          */
1181/* Implements register writes on the MII bus.                               */
1182/*                                                                          */
1183/* Returns:                                                                 */
1184/*   The value of the register.                                             */
1185/****************************************************************************/
1186void
1187bnx_miibus_write_reg(struct device *dev, int phy, int reg, int val)
1188{
1189	struct bnx_softc	*sc = (struct bnx_softc *)dev;
1190	u_int32_t		val1;
1191	int			i;
1192
1193	/* Make sure we are accessing the correct PHY address. */
1194	if (phy != sc->bnx_phy_addr) {
1195		DBPRINT(sc, BNX_VERBOSE, "Invalid PHY address %d for PHY write!\n",
1196		    phy);
1197		return;
1198	}
1199
1200	DBPRINT(sc, BNX_EXCESSIVE, "%s(): phy = %d, reg = 0x%04X, "
1201	    "val = 0x%04X\n", __FUNCTION__,
1202	    phy, (u_int16_t) reg & 0xffff, (u_int16_t) val & 0xffff);
1203
1204	/*
1205	 * The BCM5709S PHY is an IEEE Clause 45 PHY
1206	 * with special mappings to work with IEEE
1207	 * Clause 22 register accesses.
1208	 */
1209	if ((sc->bnx_phy_flags & BNX_PHY_IEEE_CLAUSE_45_FLAG) != 0) {
1210		if (reg >= MII_BMCR && reg <= MII_ANLPRNP)
1211			reg += 0x10;
1212	}
1213
1214	if (sc->bnx_phy_flags & BNX_PHY_INT_MODE_AUTO_POLLING_FLAG) {
1215		val1 = REG_RD(sc, BNX_EMAC_MDIO_MODE);
1216		val1 &= ~BNX_EMAC_MDIO_MODE_AUTO_POLL;
1217
1218		REG_WR(sc, BNX_EMAC_MDIO_MODE, val1);
1219		REG_RD(sc, BNX_EMAC_MDIO_MODE);
1220
1221		DELAY(40);
1222	}
1223
1224	val1 = BNX_MIPHY(phy) | BNX_MIREG(reg) | val |
1225	    BNX_EMAC_MDIO_COMM_COMMAND_WRITE |
1226	    BNX_EMAC_MDIO_COMM_START_BUSY | BNX_EMAC_MDIO_COMM_DISEXT;
1227	REG_WR(sc, BNX_EMAC_MDIO_COMM, val1);
1228
1229	for (i = 0; i < BNX_PHY_TIMEOUT; i++) {
1230		DELAY(10);
1231
1232		val1 = REG_RD(sc, BNX_EMAC_MDIO_COMM);
1233		if (!(val1 & BNX_EMAC_MDIO_COMM_START_BUSY)) {
1234			DELAY(5);
1235			break;
1236		}
1237	}
1238
1239	if (val1 & BNX_EMAC_MDIO_COMM_START_BUSY) {
1240		BNX_PRINTF(sc, "%s(%d): PHY write timeout!\n", __FILE__,
1241		    __LINE__);
1242	}
1243
1244	if (sc->bnx_phy_flags & BNX_PHY_INT_MODE_AUTO_POLLING_FLAG) {
1245		val1 = REG_RD(sc, BNX_EMAC_MDIO_MODE);
1246		val1 |= BNX_EMAC_MDIO_MODE_AUTO_POLL;
1247
1248		REG_WR(sc, BNX_EMAC_MDIO_MODE, val1);
1249		REG_RD(sc, BNX_EMAC_MDIO_MODE);
1250
1251		DELAY(40);
1252	}
1253}
1254
1255/****************************************************************************/
1256/* MII bus status change.                                                   */
1257/*                                                                          */
1258/* Called by the MII bus driver when the PHY establishes link to set the    */
1259/* MAC interface registers.                                                 */
1260/*                                                                          */
1261/* Returns:                                                                 */
1262/*   Nothing.                                                               */
1263/****************************************************************************/
1264void
1265bnx_miibus_statchg(struct device *dev)
1266{
1267	struct bnx_softc	*sc = (struct bnx_softc *)dev;
1268	struct mii_data		*mii = &sc->bnx_mii;
1269	u_int32_t		rx_mode = sc->rx_mode;
1270	int			val;
1271
1272	val = REG_RD(sc, BNX_EMAC_MODE);
1273	val &= ~(BNX_EMAC_MODE_PORT | BNX_EMAC_MODE_HALF_DUPLEX |
1274		BNX_EMAC_MODE_MAC_LOOP | BNX_EMAC_MODE_FORCE_LINK |
1275		BNX_EMAC_MODE_25G);
1276
1277	/*
1278	 * Get flow control negotiation result.
1279	 */
1280	if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO &&
1281	    (mii->mii_media_active & IFM_ETH_FMASK) != sc->bnx_flowflags) {
1282		sc->bnx_flowflags = mii->mii_media_active & IFM_ETH_FMASK;
1283		mii->mii_media_active &= ~IFM_ETH_FMASK;
1284	}
1285
1286	/* Set MII or GMII interface based on the speed
1287	 * negotiated by the PHY.
1288	 */
1289	switch (IFM_SUBTYPE(mii->mii_media_active)) {
1290	case IFM_10_T:
1291		if (BNX_CHIP_NUM(sc) != BNX_CHIP_NUM_5706) {
1292			DBPRINT(sc, BNX_INFO, "Enabling 10Mb interface.\n");
1293			val |= BNX_EMAC_MODE_PORT_MII_10;
1294			break;
1295		}
1296		/* FALLTHROUGH */
1297	case IFM_100_TX:
1298		DBPRINT(sc, BNX_INFO, "Enabling MII interface.\n");
1299		val |= BNX_EMAC_MODE_PORT_MII;
1300		break;
1301	case IFM_2500_SX:
1302		DBPRINT(sc, BNX_INFO, "Enabling 2.5G MAC mode.\n");
1303		val |= BNX_EMAC_MODE_25G;
1304		/* FALLTHROUGH */
1305	case IFM_1000_T:
1306	case IFM_1000_SX:
1307		DBPRINT(sc, BNX_INFO, "Enablinb GMII interface.\n");
1308		val |= BNX_EMAC_MODE_PORT_GMII;
1309		break;
1310	default:
1311		val |= BNX_EMAC_MODE_PORT_GMII;
1312		break;
1313	}
1314
1315	/* Set half or full duplex based on the duplicity
1316	 * negotiated by the PHY.
1317	 */
1318	if ((mii->mii_media_active & IFM_GMASK) == IFM_HDX) {
1319		DBPRINT(sc, BNX_INFO, "Setting Half-Duplex interface.\n");
1320		val |= BNX_EMAC_MODE_HALF_DUPLEX;
1321	} else
1322		DBPRINT(sc, BNX_INFO, "Setting Full-Duplex interface.\n");
1323
1324	REG_WR(sc, BNX_EMAC_MODE, val);
1325
1326	/*
1327	 * 802.3x flow control
1328	 */
1329	if (sc->bnx_flowflags & IFM_ETH_RXPAUSE) {
1330		DBPRINT(sc, BNX_INFO, "Enabling RX mode flow control.\n");
1331		rx_mode |= BNX_EMAC_RX_MODE_FLOW_EN;
1332	} else {
1333		DBPRINT(sc, BNX_INFO, "Disabling RX mode flow control.\n");
1334		rx_mode &= ~BNX_EMAC_RX_MODE_FLOW_EN;
1335	}
1336
1337	if (sc->bnx_flowflags & IFM_ETH_TXPAUSE) {
1338		DBPRINT(sc, BNX_INFO, "Enabling TX mode flow control.\n");
1339		BNX_SETBIT(sc, BNX_EMAC_TX_MODE, BNX_EMAC_TX_MODE_FLOW_EN);
1340	} else {
1341		DBPRINT(sc, BNX_INFO, "Disabling TX mode flow control.\n");
1342		BNX_CLRBIT(sc, BNX_EMAC_TX_MODE, BNX_EMAC_TX_MODE_FLOW_EN);
1343	}
1344
1345	/* Only make changes if the recive mode has actually changed. */
1346	if (rx_mode != sc->rx_mode) {
1347		DBPRINT(sc, BNX_VERBOSE, "Enabling new receive mode: 0x%08X\n",
1348		    rx_mode);
1349
1350		sc->rx_mode = rx_mode;
1351		REG_WR(sc, BNX_EMAC_RX_MODE, rx_mode);
1352	}
1353}
1354
1355/****************************************************************************/
1356/* Acquire NVRAM lock.                                                      */
1357/*                                                                          */
1358/* Before the NVRAM can be accessed the caller must acquire an NVRAM lock.  */
1359/* Locks 0 and 2 are reserved, lock 1 is used by firmware and lock 2 is     */
1360/* for use by the driver.                                                   */
1361/*                                                                          */
1362/* Returns:                                                                 */
1363/*   0 on success, positive value on failure.                               */
1364/****************************************************************************/
1365int
1366bnx_acquire_nvram_lock(struct bnx_softc *sc)
1367{
1368	u_int32_t		val;
1369	int			j;
1370
1371	DBPRINT(sc, BNX_VERBOSE, "Acquiring NVRAM lock.\n");
1372
1373	/* Request access to the flash interface. */
1374	REG_WR(sc, BNX_NVM_SW_ARB, BNX_NVM_SW_ARB_ARB_REQ_SET2);
1375	for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
1376		val = REG_RD(sc, BNX_NVM_SW_ARB);
1377		if (val & BNX_NVM_SW_ARB_ARB_ARB2)
1378			break;
1379
1380		DELAY(5);
1381	}
1382
1383	if (j >= NVRAM_TIMEOUT_COUNT) {
1384		DBPRINT(sc, BNX_WARN, "Timeout acquiring NVRAM lock!\n");
1385		return (EBUSY);
1386	}
1387
1388	return (0);
1389}
1390
1391/****************************************************************************/
1392/* Release NVRAM lock.                                                      */
1393/*                                                                          */
1394/* When the caller is finished accessing NVRAM the lock must be released.   */
1395/* Locks 0 and 2 are reserved, lock 1 is used by firmware and lock 2 is     */
1396/* for use by the driver.                                                   */
1397/*                                                                          */
1398/* Returns:                                                                 */
1399/*   0 on success, positive value on failure.                               */
1400/****************************************************************************/
1401int
1402bnx_release_nvram_lock(struct bnx_softc *sc)
1403{
1404	int			j;
1405	u_int32_t		val;
1406
1407	DBPRINT(sc, BNX_VERBOSE, "Releasing NVRAM lock.\n");
1408
1409	/* Relinquish nvram interface. */
1410	REG_WR(sc, BNX_NVM_SW_ARB, BNX_NVM_SW_ARB_ARB_REQ_CLR2);
1411
1412	for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
1413		val = REG_RD(sc, BNX_NVM_SW_ARB);
1414		if (!(val & BNX_NVM_SW_ARB_ARB_ARB2))
1415			break;
1416
1417		DELAY(5);
1418	}
1419
1420	if (j >= NVRAM_TIMEOUT_COUNT) {
1421		DBPRINT(sc, BNX_WARN, "Timeout reeasing NVRAM lock!\n");
1422		return (EBUSY);
1423	}
1424
1425	return (0);
1426}
1427
1428#ifdef BNX_NVRAM_WRITE_SUPPORT
1429/****************************************************************************/
1430/* Enable NVRAM write access.                                               */
1431/*                                                                          */
1432/* Before writing to NVRAM the caller must enable NVRAM writes.             */
1433/*                                                                          */
1434/* Returns:                                                                 */
1435/*   0 on success, positive value on failure.                               */
1436/****************************************************************************/
1437int
1438bnx_enable_nvram_write(struct bnx_softc *sc)
1439{
1440	u_int32_t		val;
1441
1442	DBPRINT(sc, BNX_VERBOSE, "Enabling NVRAM write.\n");
1443
1444	val = REG_RD(sc, BNX_MISC_CFG);
1445	REG_WR(sc, BNX_MISC_CFG, val | BNX_MISC_CFG_NVM_WR_EN_PCI);
1446
1447	if (!ISSET(sc->bnx_flash_info->flags, BNX_NV_BUFFERED)) {
1448		int j;
1449
1450		REG_WR(sc, BNX_NVM_COMMAND, BNX_NVM_COMMAND_DONE);
1451		REG_WR(sc, BNX_NVM_COMMAND,
1452		    BNX_NVM_COMMAND_WREN | BNX_NVM_COMMAND_DOIT);
1453
1454		for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
1455			DELAY(5);
1456
1457			val = REG_RD(sc, BNX_NVM_COMMAND);
1458			if (val & BNX_NVM_COMMAND_DONE)
1459				break;
1460		}
1461
1462		if (j >= NVRAM_TIMEOUT_COUNT) {
1463			DBPRINT(sc, BNX_WARN, "Timeout writing NVRAM!\n");
1464			return (EBUSY);
1465		}
1466	}
1467
1468	return (0);
1469}
1470
1471/****************************************************************************/
1472/* Disable NVRAM write access.                                              */
1473/*                                                                          */
1474/* When the caller is finished writing to NVRAM write access must be        */
1475/* disabled.                                                                */
1476/*                                                                          */
1477/* Returns:                                                                 */
1478/*   Nothing.                                                               */
1479/****************************************************************************/
1480void
1481bnx_disable_nvram_write(struct bnx_softc *sc)
1482{
1483	u_int32_t		val;
1484
1485	DBPRINT(sc, BNX_VERBOSE,  "Disabling NVRAM write.\n");
1486
1487	val = REG_RD(sc, BNX_MISC_CFG);
1488	REG_WR(sc, BNX_MISC_CFG, val & ~BNX_MISC_CFG_NVM_WR_EN);
1489}
1490#endif
1491
1492/****************************************************************************/
1493/* Enable NVRAM access.                                                     */
1494/*                                                                          */
1495/* Before accessing NVRAM for read or write operations the caller must      */
1496/* enabled NVRAM access.                                                    */
1497/*                                                                          */
1498/* Returns:                                                                 */
1499/*   Nothing.                                                               */
1500/****************************************************************************/
1501void
1502bnx_enable_nvram_access(struct bnx_softc *sc)
1503{
1504	u_int32_t		val;
1505
1506	DBPRINT(sc, BNX_VERBOSE, "Enabling NVRAM access.\n");
1507
1508	val = REG_RD(sc, BNX_NVM_ACCESS_ENABLE);
1509	/* Enable both bits, even on read. */
1510	REG_WR(sc, BNX_NVM_ACCESS_ENABLE,
1511	    val | BNX_NVM_ACCESS_ENABLE_EN | BNX_NVM_ACCESS_ENABLE_WR_EN);
1512}
1513
1514/****************************************************************************/
1515/* Disable NVRAM access.                                                    */
1516/*                                                                          */
1517/* When the caller is finished accessing NVRAM access must be disabled.     */
1518/*                                                                          */
1519/* Returns:                                                                 */
1520/*   Nothing.                                                               */
1521/****************************************************************************/
1522void
1523bnx_disable_nvram_access(struct bnx_softc *sc)
1524{
1525	u_int32_t		val;
1526
1527	DBPRINT(sc, BNX_VERBOSE, "Disabling NVRAM access.\n");
1528
1529	val = REG_RD(sc, BNX_NVM_ACCESS_ENABLE);
1530
1531	/* Disable both bits, even after read. */
1532	REG_WR(sc, BNX_NVM_ACCESS_ENABLE,
1533	    val & ~(BNX_NVM_ACCESS_ENABLE_EN | BNX_NVM_ACCESS_ENABLE_WR_EN));
1534}
1535
1536#ifdef BNX_NVRAM_WRITE_SUPPORT
1537/****************************************************************************/
1538/* Erase NVRAM page before writing.                                         */
1539/*                                                                          */
1540/* Non-buffered flash parts require that a page be erased before it is      */
1541/* written.                                                                 */
1542/*                                                                          */
1543/* Returns:                                                                 */
1544/*   0 on success, positive value on failure.                               */
1545/****************************************************************************/
1546int
1547bnx_nvram_erase_page(struct bnx_softc *sc, u_int32_t offset)
1548{
1549	u_int32_t		cmd;
1550	int			j;
1551
1552	/* Buffered flash doesn't require an erase. */
1553	if (ISSET(sc->bnx_flash_info->flags, BNX_NV_BUFFERED))
1554		return (0);
1555
1556	DBPRINT(sc, BNX_VERBOSE, "Erasing NVRAM page.\n");
1557
1558	/* Build an erase command. */
1559	cmd = BNX_NVM_COMMAND_ERASE | BNX_NVM_COMMAND_WR |
1560	    BNX_NVM_COMMAND_DOIT;
1561
1562	/*
1563	 * Clear the DONE bit separately, set the NVRAM address to erase,
1564	 * and issue the erase command.
1565	 */
1566	REG_WR(sc, BNX_NVM_COMMAND, BNX_NVM_COMMAND_DONE);
1567	REG_WR(sc, BNX_NVM_ADDR, offset & BNX_NVM_ADDR_NVM_ADDR_VALUE);
1568	REG_WR(sc, BNX_NVM_COMMAND, cmd);
1569
1570	/* Wait for completion. */
1571	for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
1572		u_int32_t val;
1573
1574		DELAY(5);
1575
1576		val = REG_RD(sc, BNX_NVM_COMMAND);
1577		if (val & BNX_NVM_COMMAND_DONE)
1578			break;
1579	}
1580
1581	if (j >= NVRAM_TIMEOUT_COUNT) {
1582		DBPRINT(sc, BNX_WARN, "Timeout erasing NVRAM.\n");
1583		return (EBUSY);
1584	}
1585
1586	return (0);
1587}
1588#endif /* BNX_NVRAM_WRITE_SUPPORT */
1589
1590/****************************************************************************/
1591/* Read a dword (32 bits) from NVRAM.                                       */
1592/*                                                                          */
1593/* Read a 32 bit word from NVRAM.  The caller is assumed to have already    */
1594/* obtained the NVRAM lock and enabled the controller for NVRAM access.     */
1595/*                                                                          */
1596/* Returns:                                                                 */
1597/*   0 on success and the 32 bit value read, positive value on failure.     */
1598/****************************************************************************/
1599int
1600bnx_nvram_read_dword(struct bnx_softc *sc, u_int32_t offset,
1601    u_int8_t *ret_val, u_int32_t cmd_flags)
1602{
1603	u_int32_t		cmd;
1604	int			i, rc = 0;
1605
1606	/* Build the command word. */
1607	cmd = BNX_NVM_COMMAND_DOIT | cmd_flags;
1608
1609	/* Calculate the offset for buffered flash if translation is used. */
1610	if (ISSET(sc->bnx_flash_info->flags, BNX_NV_TRANSLATE)) {
1611		offset = ((offset / sc->bnx_flash_info->page_size) <<
1612		    sc->bnx_flash_info->page_bits) +
1613		    (offset % sc->bnx_flash_info->page_size);
1614	}
1615
1616	/*
1617	 * Clear the DONE bit separately, set the address to read,
1618	 * and issue the read.
1619	 */
1620	REG_WR(sc, BNX_NVM_COMMAND, BNX_NVM_COMMAND_DONE);
1621	REG_WR(sc, BNX_NVM_ADDR, offset & BNX_NVM_ADDR_NVM_ADDR_VALUE);
1622	REG_WR(sc, BNX_NVM_COMMAND, cmd);
1623
1624	/* Wait for completion. */
1625	for (i = 0; i < NVRAM_TIMEOUT_COUNT; i++) {
1626		u_int32_t val;
1627
1628		DELAY(5);
1629
1630		val = REG_RD(sc, BNX_NVM_COMMAND);
1631		if (val & BNX_NVM_COMMAND_DONE) {
1632			val = REG_RD(sc, BNX_NVM_READ);
1633
1634			val = bnx_be32toh(val);
1635			memcpy(ret_val, &val, 4);
1636			break;
1637		}
1638	}
1639
1640	/* Check for errors. */
1641	if (i >= NVRAM_TIMEOUT_COUNT) {
1642		BNX_PRINTF(sc, "%s(%d): Timeout error reading NVRAM at "
1643		    "offset 0x%08X!\n", __FILE__, __LINE__, offset);
1644		rc = EBUSY;
1645	}
1646
1647	return(rc);
1648}
1649
1650#ifdef BNX_NVRAM_WRITE_SUPPORT
1651/****************************************************************************/
1652/* Write a dword (32 bits) to NVRAM.                                        */
1653/*                                                                          */
1654/* Write a 32 bit word to NVRAM.  The caller is assumed to have already     */
1655/* obtained the NVRAM lock, enabled the controller for NVRAM access, and    */
1656/* enabled NVRAM write access.                                              */
1657/*                                                                          */
1658/* Returns:                                                                 */
1659/*   0 on success, positive value on failure.                               */
1660/****************************************************************************/
1661int
1662bnx_nvram_write_dword(struct bnx_softc *sc, u_int32_t offset, u_int8_t *val,
1663    u_int32_t cmd_flags)
1664{
1665	u_int32_t		cmd, val32;
1666	int			j;
1667
1668	/* Build the command word. */
1669	cmd = BNX_NVM_COMMAND_DOIT | BNX_NVM_COMMAND_WR | cmd_flags;
1670
1671	/* Calculate the offset for buffered flash if translation is used. */
1672	if (ISSET(sc->bnx_flash_info->flags, BNX_NV_TRANSLATE)) {
1673		offset = ((offset / sc->bnx_flash_info->page_size) <<
1674		    sc->bnx_flash_info->page_bits) +
1675		    (offset % sc->bnx_flash_info->page_size);
1676	}
1677
1678	/*
1679	 * Clear the DONE bit separately, convert NVRAM data to big-endian,
1680	 * set the NVRAM address to write, and issue the write command
1681	 */
1682	REG_WR(sc, BNX_NVM_COMMAND, BNX_NVM_COMMAND_DONE);
1683	memcpy(&val32, val, 4);
1684	val32 = htobe32(val32);
1685	REG_WR(sc, BNX_NVM_WRITE, val32);
1686	REG_WR(sc, BNX_NVM_ADDR, offset & BNX_NVM_ADDR_NVM_ADDR_VALUE);
1687	REG_WR(sc, BNX_NVM_COMMAND, cmd);
1688
1689	/* Wait for completion. */
1690	for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
1691		DELAY(5);
1692
1693		if (REG_RD(sc, BNX_NVM_COMMAND) & BNX_NVM_COMMAND_DONE)
1694			break;
1695	}
1696	if (j >= NVRAM_TIMEOUT_COUNT) {
1697		BNX_PRINTF(sc, "%s(%d): Timeout error writing NVRAM at "
1698		    "offset 0x%08X\n", __FILE__, __LINE__, offset);
1699		return (EBUSY);
1700	}
1701
1702	return (0);
1703}
1704#endif /* BNX_NVRAM_WRITE_SUPPORT */
1705
1706/****************************************************************************/
1707/* Initialize NVRAM access.                                                 */
1708/*                                                                          */
1709/* Identify the NVRAM device in use and prepare the NVRAM interface to      */
1710/* access that device.                                                      */
1711/*                                                                          */
1712/* Returns:                                                                 */
1713/*   0 on success, positive value on failure.                               */
1714/****************************************************************************/
1715int
1716bnx_init_nvram(struct bnx_softc *sc)
1717{
1718	u_int32_t		val;
1719	int			j, entry_count, rc = 0;
1720	struct flash_spec	*flash;
1721
1722	DBPRINT(sc,BNX_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
1723
1724	if (BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5709) {
1725		sc->bnx_flash_info = &flash_5709;
1726		goto bnx_init_nvram_get_flash_size;
1727	}
1728
1729	/* Determine the selected interface. */
1730	val = REG_RD(sc, BNX_NVM_CFG1);
1731
1732	entry_count = sizeof(flash_table) / sizeof(struct flash_spec);
1733
1734	/*
1735	 * Flash reconfiguration is required to support additional
1736	 * NVRAM devices not directly supported in hardware.
1737	 * Check if the flash interface was reconfigured
1738	 * by the bootcode.
1739	 */
1740
1741	if (val & 0x40000000) {
1742		/* Flash interface reconfigured by bootcode. */
1743
1744		DBPRINT(sc,BNX_INFO_LOAD,
1745			"bnx_init_nvram(): Flash WAS reconfigured.\n");
1746
1747		for (j = 0, flash = &flash_table[0]; j < entry_count;
1748		     j++, flash++) {
1749			if ((val & FLASH_BACKUP_STRAP_MASK) ==
1750			    (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
1751				sc->bnx_flash_info = flash;
1752				break;
1753			}
1754		}
1755	} else {
1756		/* Flash interface not yet reconfigured. */
1757		u_int32_t mask;
1758
1759		DBPRINT(sc,BNX_INFO_LOAD,
1760			"bnx_init_nvram(): Flash was NOT reconfigured.\n");
1761
1762		if (val & (1 << 23))
1763			mask = FLASH_BACKUP_STRAP_MASK;
1764		else
1765			mask = FLASH_STRAP_MASK;
1766
1767		/* Look for the matching NVRAM device configuration data. */
1768		for (j = 0, flash = &flash_table[0]; j < entry_count;
1769		    j++, flash++) {
1770			/* Check if the dev matches any of the known devices. */
1771			if ((val & mask) == (flash->strapping & mask)) {
1772				/* Found a device match. */
1773				sc->bnx_flash_info = flash;
1774
1775				/* Request access to the flash interface. */
1776				if ((rc = bnx_acquire_nvram_lock(sc)) != 0)
1777					return (rc);
1778
1779				/* Reconfigure the flash interface. */
1780				bnx_enable_nvram_access(sc);
1781				REG_WR(sc, BNX_NVM_CFG1, flash->config1);
1782				REG_WR(sc, BNX_NVM_CFG2, flash->config2);
1783				REG_WR(sc, BNX_NVM_CFG3, flash->config3);
1784				REG_WR(sc, BNX_NVM_WRITE1, flash->write1);
1785				bnx_disable_nvram_access(sc);
1786				bnx_release_nvram_lock(sc);
1787
1788				break;
1789			}
1790		}
1791	}
1792
1793	/* Check if a matching device was found. */
1794	if (j == entry_count) {
1795		sc->bnx_flash_info = NULL;
1796		BNX_PRINTF(sc, "%s(%d): Unknown Flash NVRAM found!\n",
1797			__FILE__, __LINE__);
1798		rc = ENODEV;
1799	}
1800
1801bnx_init_nvram_get_flash_size:
1802	/* Write the flash config data to the shared memory interface. */
1803	val = REG_RD_IND(sc, sc->bnx_shmem_base + BNX_SHARED_HW_CFG_CONFIG2);
1804	val &= BNX_SHARED_HW_CFG2_NVM_SIZE_MASK;
1805	if (val)
1806		sc->bnx_flash_size = val;
1807	else
1808		sc->bnx_flash_size = sc->bnx_flash_info->total_size;
1809
1810	DBPRINT(sc, BNX_INFO_LOAD, "bnx_init_nvram() flash->total_size = "
1811	    "0x%08X\n", sc->bnx_flash_info->total_size);
1812
1813	DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
1814
1815	return (rc);
1816}
1817
1818/****************************************************************************/
1819/* Read an arbitrary range of data from NVRAM.                              */
1820/*                                                                          */
1821/* Prepares the NVRAM interface for access and reads the requested data     */
1822/* into the supplied buffer.                                                */
1823/*                                                                          */
1824/* Returns:                                                                 */
1825/*   0 on success and the data read, positive value on failure.             */
1826/****************************************************************************/
1827int
1828bnx_nvram_read(struct bnx_softc *sc, u_int32_t offset, u_int8_t *ret_buf,
1829    int buf_size)
1830{
1831	int			rc = 0;
1832	u_int32_t		cmd_flags, offset32, len32, extra;
1833
1834	if (buf_size == 0)
1835		return (0);
1836
1837	/* Request access to the flash interface. */
1838	if ((rc = bnx_acquire_nvram_lock(sc)) != 0)
1839		return (rc);
1840
1841	/* Enable access to flash interface */
1842	bnx_enable_nvram_access(sc);
1843
1844	len32 = buf_size;
1845	offset32 = offset;
1846	extra = 0;
1847
1848	cmd_flags = 0;
1849
1850	if (offset32 & 3) {
1851		u_int8_t buf[4];
1852		u_int32_t pre_len;
1853
1854		offset32 &= ~3;
1855		pre_len = 4 - (offset & 3);
1856
1857		if (pre_len >= len32) {
1858			pre_len = len32;
1859			cmd_flags =
1860			    BNX_NVM_COMMAND_FIRST | BNX_NVM_COMMAND_LAST;
1861		} else
1862			cmd_flags = BNX_NVM_COMMAND_FIRST;
1863
1864		rc = bnx_nvram_read_dword(sc, offset32, buf, cmd_flags);
1865
1866		if (rc)
1867			return (rc);
1868
1869		memcpy(ret_buf, buf + (offset & 3), pre_len);
1870
1871		offset32 += 4;
1872		ret_buf += pre_len;
1873		len32 -= pre_len;
1874	}
1875
1876	if (len32 & 3) {
1877		extra = 4 - (len32 & 3);
1878		len32 = (len32 + 4) & ~3;
1879	}
1880
1881	if (len32 == 4) {
1882		u_int8_t buf[4];
1883
1884		if (cmd_flags)
1885			cmd_flags = BNX_NVM_COMMAND_LAST;
1886		else
1887			cmd_flags =
1888			    BNX_NVM_COMMAND_FIRST | BNX_NVM_COMMAND_LAST;
1889
1890		rc = bnx_nvram_read_dword(sc, offset32, buf, cmd_flags);
1891
1892		memcpy(ret_buf, buf, 4 - extra);
1893	} else if (len32 > 0) {
1894		u_int8_t buf[4];
1895
1896		/* Read the first word. */
1897		if (cmd_flags)
1898			cmd_flags = 0;
1899		else
1900			cmd_flags = BNX_NVM_COMMAND_FIRST;
1901
1902		rc = bnx_nvram_read_dword(sc, offset32, ret_buf, cmd_flags);
1903
1904		/* Advance to the next dword. */
1905		offset32 += 4;
1906		ret_buf += 4;
1907		len32 -= 4;
1908
1909		while (len32 > 4 && rc == 0) {
1910			rc = bnx_nvram_read_dword(sc, offset32, ret_buf, 0);
1911
1912			/* Advance to the next dword. */
1913			offset32 += 4;
1914			ret_buf += 4;
1915			len32 -= 4;
1916		}
1917
1918		if (rc)
1919			return (rc);
1920
1921		cmd_flags = BNX_NVM_COMMAND_LAST;
1922		rc = bnx_nvram_read_dword(sc, offset32, buf, cmd_flags);
1923
1924		memcpy(ret_buf, buf, 4 - extra);
1925	}
1926
1927	/* Disable access to flash interface and release the lock. */
1928	bnx_disable_nvram_access(sc);
1929	bnx_release_nvram_lock(sc);
1930
1931	return (rc);
1932}
1933
1934#ifdef BNX_NVRAM_WRITE_SUPPORT
1935/****************************************************************************/
1936/* Write an arbitrary range of data from NVRAM.                             */
1937/*                                                                          */
1938/* Prepares the NVRAM interface for write access and writes the requested   */
1939/* data from the supplied buffer.  The caller is responsible for            */
1940/* calculating any appropriate CRCs.                                        */
1941/*                                                                          */
1942/* Returns:                                                                 */
1943/*   0 on success, positive value on failure.                               */
1944/****************************************************************************/
1945int
1946bnx_nvram_write(struct bnx_softc *sc, u_int32_t offset, u_int8_t *data_buf,
1947    int buf_size)
1948{
1949	u_int32_t		written, offset32, len32;
1950	u_int8_t		*buf, start[4], end[4];
1951	int			rc = 0;
1952	int			align_start, align_end;
1953
1954	buf = data_buf;
1955	offset32 = offset;
1956	len32 = buf_size;
1957	align_start = align_end = 0;
1958
1959	if ((align_start = (offset32 & 3))) {
1960		offset32 &= ~3;
1961		len32 += align_start;
1962		if ((rc = bnx_nvram_read(sc, offset32, start, 4)))
1963			return (rc);
1964	}
1965
1966	if (len32 & 3) {
1967		if ((len32 > 4) || !align_start) {
1968			align_end = 4 - (len32 & 3);
1969			len32 += align_end;
1970			if ((rc = bnx_nvram_read(sc, offset32 + len32 - 4,
1971			    end, 4))) {
1972				return (rc);
1973			}
1974		}
1975	}
1976
1977	if (align_start || align_end) {
1978		buf = malloc(len32, M_DEVBUF, M_NOWAIT);
1979		if (buf == 0)
1980			return (ENOMEM);
1981
1982		if (align_start)
1983			memcpy(buf, start, 4);
1984
1985		if (align_end)
1986			memcpy(buf + len32 - 4, end, 4);
1987
1988		memcpy(buf + align_start, data_buf, buf_size);
1989	}
1990
1991	written = 0;
1992	while ((written < len32) && (rc == 0)) {
1993		u_int32_t page_start, page_end, data_start, data_end;
1994		u_int32_t addr, cmd_flags;
1995		int i;
1996		u_int8_t flash_buffer[264];
1997
1998	    /* Find the page_start addr */
1999		page_start = offset32 + written;
2000		page_start -= (page_start % sc->bnx_flash_info->page_size);
2001		/* Find the page_end addr */
2002		page_end = page_start + sc->bnx_flash_info->page_size;
2003		/* Find the data_start addr */
2004		data_start = (written == 0) ? offset32 : page_start;
2005		/* Find the data_end addr */
2006		data_end = (page_end > offset32 + len32) ?
2007		    (offset32 + len32) : page_end;
2008
2009		/* Request access to the flash interface. */
2010		if ((rc = bnx_acquire_nvram_lock(sc)) != 0)
2011			goto nvram_write_end;
2012
2013		/* Enable access to flash interface */
2014		bnx_enable_nvram_access(sc);
2015
2016		cmd_flags = BNX_NVM_COMMAND_FIRST;
2017		if (!ISSET(sc->bnx_flash_info->flags, BNX_NV_BUFFERED)) {
2018			int j;
2019
2020			/* Read the whole page into the buffer
2021			 * (non-buffer flash only) */
2022			for (j = 0; j < sc->bnx_flash_info->page_size; j += 4) {
2023				if (j == (sc->bnx_flash_info->page_size - 4))
2024					cmd_flags |= BNX_NVM_COMMAND_LAST;
2025
2026				rc = bnx_nvram_read_dword(sc,
2027					page_start + j,
2028					&flash_buffer[j],
2029					cmd_flags);
2030
2031				if (rc)
2032					goto nvram_write_end;
2033
2034				cmd_flags = 0;
2035			}
2036		}
2037
2038		/* Enable writes to flash interface (unlock write-protect) */
2039		if ((rc = bnx_enable_nvram_write(sc)) != 0)
2040			goto nvram_write_end;
2041
2042		/* Erase the page */
2043		if ((rc = bnx_nvram_erase_page(sc, page_start)) != 0)
2044			goto nvram_write_end;
2045
2046		/* Re-enable the write again for the actual write */
2047		bnx_enable_nvram_write(sc);
2048
2049		/* Loop to write back the buffer data from page_start to
2050		 * data_start */
2051		i = 0;
2052		if (!ISSET(sc->bnx_flash_info->flags, BNX_NV_BUFFERED)) {
2053			for (addr = page_start; addr < data_start;
2054				addr += 4, i += 4) {
2055
2056				rc = bnx_nvram_write_dword(sc, addr,
2057				    &flash_buffer[i], cmd_flags);
2058
2059				if (rc != 0)
2060					goto nvram_write_end;
2061
2062				cmd_flags = 0;
2063			}
2064		}
2065
2066		/* Loop to write the new data from data_start to data_end */
2067		for (addr = data_start; addr < data_end; addr += 4, i++) {
2068			if ((addr == page_end - 4) ||
2069			    (ISSET(sc->bnx_flash_info->flags, BNX_NV_BUFFERED)
2070			    && (addr == data_end - 4))) {
2071
2072				cmd_flags |= BNX_NVM_COMMAND_LAST;
2073			}
2074
2075			rc = bnx_nvram_write_dword(sc, addr, buf, cmd_flags);
2076
2077			if (rc != 0)
2078				goto nvram_write_end;
2079
2080			cmd_flags = 0;
2081			buf += 4;
2082		}
2083
2084		/* Loop to write back the buffer data from data_end
2085		 * to page_end */
2086		if (!ISSET(sc->bnx_flash_info->flags, BNX_NV_BUFFERED)) {
2087			for (addr = data_end; addr < page_end;
2088			    addr += 4, i += 4) {
2089
2090				if (addr == page_end-4)
2091					cmd_flags = BNX_NVM_COMMAND_LAST;
2092
2093				rc = bnx_nvram_write_dword(sc, addr,
2094				    &flash_buffer[i], cmd_flags);
2095
2096				if (rc != 0)
2097					goto nvram_write_end;
2098
2099				cmd_flags = 0;
2100			}
2101		}
2102
2103		/* Disable writes to flash interface (lock write-protect) */
2104		bnx_disable_nvram_write(sc);
2105
2106		/* Disable access to flash interface */
2107		bnx_disable_nvram_access(sc);
2108		bnx_release_nvram_lock(sc);
2109
2110		/* Increment written */
2111		written += data_end - data_start;
2112	}
2113
2114nvram_write_end:
2115	if (align_start || align_end)
2116		free(buf, M_DEVBUF);
2117
2118	return (rc);
2119}
2120#endif /* BNX_NVRAM_WRITE_SUPPORT */
2121
2122/****************************************************************************/
2123/* Verifies that NVRAM is accessible and contains valid data.               */
2124/*                                                                          */
2125/* Reads the configuration data from NVRAM and verifies that the CRC is     */
2126/* correct.                                                                 */
2127/*                                                                          */
2128/* Returns:                                                                 */
2129/*   0 on success, positive value on failure.                               */
2130/****************************************************************************/
2131int
2132bnx_nvram_test(struct bnx_softc *sc)
2133{
2134	u_int32_t		buf[BNX_NVRAM_SIZE / 4];
2135	u_int8_t		*data = (u_int8_t *) buf;
2136	int			rc = 0;
2137	u_int32_t		magic, csum;
2138
2139	/*
2140	 * Check that the device NVRAM is valid by reading
2141	 * the magic value at offset 0.
2142	 */
2143	if ((rc = bnx_nvram_read(sc, 0, data, 4)) != 0)
2144		goto bnx_nvram_test_done;
2145
2146	magic = bnx_be32toh(buf[0]);
2147	if (magic != BNX_NVRAM_MAGIC) {
2148		rc = ENODEV;
2149		BNX_PRINTF(sc, "%s(%d): Invalid NVRAM magic value! "
2150		    "Expected: 0x%08X, Found: 0x%08X\n",
2151		    __FILE__, __LINE__, BNX_NVRAM_MAGIC, magic);
2152		goto bnx_nvram_test_done;
2153	}
2154
2155	/*
2156	 * Verify that the device NVRAM includes valid
2157	 * configuration data.
2158	 */
2159	if ((rc = bnx_nvram_read(sc, 0x100, data, BNX_NVRAM_SIZE)) != 0)
2160		goto bnx_nvram_test_done;
2161
2162	csum = ether_crc32_le(data, 0x100);
2163	if (csum != BNX_CRC32_RESIDUAL) {
2164		rc = ENODEV;
2165		BNX_PRINTF(sc, "%s(%d): Invalid Manufacturing Information "
2166		    "NVRAM CRC! Expected: 0x%08X, Found: 0x%08X\n",
2167		    __FILE__, __LINE__, BNX_CRC32_RESIDUAL, csum);
2168		goto bnx_nvram_test_done;
2169	}
2170
2171	csum = ether_crc32_le(data + 0x100, 0x100);
2172	if (csum != BNX_CRC32_RESIDUAL) {
2173		BNX_PRINTF(sc, "%s(%d): Invalid Feature Configuration "
2174		    "Information NVRAM CRC! Expected: 0x%08X, Found: 08%08X\n",
2175		    __FILE__, __LINE__, BNX_CRC32_RESIDUAL, csum);
2176		rc = ENODEV;
2177	}
2178
2179bnx_nvram_test_done:
2180	return (rc);
2181}
2182
2183/****************************************************************************/
2184/* Identifies the current media type of the controller and sets the PHY     */
2185/* address.                                                                 */
2186/*                                                                          */
2187/* Returns:                                                                 */
2188/*   Nothing.                                                               */
2189/****************************************************************************/
2190void
2191bnx_get_media(struct bnx_softc *sc)
2192{
2193	u_int32_t val;
2194
2195	sc->bnx_phy_addr = 1;
2196
2197	if (BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5709) {
2198		u_int32_t val = REG_RD(sc, BNX_MISC_DUAL_MEDIA_CTRL);
2199		u_int32_t bond_id = val & BNX_MISC_DUAL_MEDIA_CTRL_BOND_ID;
2200		u_int32_t strap;
2201
2202		/*
2203		 * The BCM5709S is software configurable
2204		 * for Copper or SerDes operation.
2205		 */
2206		if (bond_id == BNX_MISC_DUAL_MEDIA_CTRL_BOND_ID_C) {
2207			DBPRINT(sc, BNX_INFO_LOAD,
2208			    "5709 bonded for copper.\n");
2209			goto bnx_get_media_exit;
2210		} else if (bond_id == BNX_MISC_DUAL_MEDIA_CTRL_BOND_ID_S) {
2211			DBPRINT(sc, BNX_INFO_LOAD,
2212			    "5709 bonded for dual media.\n");
2213			sc->bnx_phy_flags |= BNX_PHY_SERDES_FLAG;
2214			goto bnx_get_media_exit;
2215		}
2216
2217		if (val & BNX_MISC_DUAL_MEDIA_CTRL_STRAP_OVERRIDE)
2218			strap = (val & BNX_MISC_DUAL_MEDIA_CTRL_PHY_CTRL) >> 21;
2219		else {
2220			strap = (val & BNX_MISC_DUAL_MEDIA_CTRL_PHY_CTRL_STRAP)
2221			    >> 8;
2222		}
2223
2224		if (sc->bnx_pa.pa_function == 0) {
2225			switch (strap) {
2226			case 0x4:
2227			case 0x5:
2228			case 0x6:
2229				DBPRINT(sc, BNX_INFO_LOAD,
2230					"BCM5709 s/w configured for SerDes.\n");
2231				sc->bnx_phy_flags |= BNX_PHY_SERDES_FLAG;
2232				break;
2233			default:
2234				DBPRINT(sc, BNX_INFO_LOAD,
2235					"BCM5709 s/w configured for Copper.\n");
2236			}
2237		} else {
2238			switch (strap) {
2239			case 0x1:
2240			case 0x2:
2241			case 0x4:
2242				DBPRINT(sc, BNX_INFO_LOAD,
2243					"BCM5709 s/w configured for SerDes.\n");
2244				sc->bnx_phy_flags |= BNX_PHY_SERDES_FLAG;
2245				break;
2246			default:
2247				DBPRINT(sc, BNX_INFO_LOAD,
2248					"BCM5709 s/w configured for Copper.\n");
2249			}
2250		}
2251
2252	} else if (BNX_CHIP_BOND_ID(sc) & BNX_CHIP_BOND_ID_SERDES_BIT)
2253		sc->bnx_phy_flags |= BNX_PHY_SERDES_FLAG;
2254
2255	if (sc->bnx_phy_flags & BNX_PHY_SERDES_FLAG) {
2256		sc->bnx_flags |= BNX_NO_WOL_FLAG;
2257
2258		if (BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5709)
2259			sc->bnx_phy_flags |= BNX_PHY_IEEE_CLAUSE_45_FLAG;
2260
2261		/*
2262		 * The BCM5708S, BCM5709S, and BCM5716S controllers use a
2263		 * separate PHY for SerDes.
2264		 */
2265		if (BNX_CHIP_NUM(sc) != BNX_CHIP_NUM_5706) {
2266			sc->bnx_phy_addr = 2;
2267			val = REG_RD_IND(sc, sc->bnx_shmem_base +
2268				 BNX_SHARED_HW_CFG_CONFIG);
2269			if (val & BNX_SHARED_HW_CFG_PHY_2_5G) {
2270				sc->bnx_phy_flags |= BNX_PHY_2_5G_CAPABLE_FLAG;
2271				DBPRINT(sc, BNX_INFO_LOAD,
2272				    "Found 2.5Gb capable adapter\n");
2273			}
2274		}
2275	} else if ((BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5706) ||
2276		   (BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5708))
2277		sc->bnx_phy_flags |= BNX_PHY_CRC_FIX_FLAG;
2278
2279bnx_get_media_exit:
2280	DBPRINT(sc, (BNX_INFO_LOAD | BNX_INFO_PHY),
2281		"Using PHY address %d.\n", sc->bnx_phy_addr);
2282}
2283
2284/****************************************************************************/
2285/* Performs PHY initialization required before MII drivers access the       */
2286/* device.                                                                  */
2287/*                                                                          */
2288/* Returns:                                                                 */
2289/*   Nothing.                                                               */
2290/****************************************************************************/
2291void
2292bnx_init_media(struct bnx_softc *sc)
2293{
2294	if (sc->bnx_phy_flags & BNX_PHY_IEEE_CLAUSE_45_FLAG) {
2295		/*
2296		 * Configure the BCM5709S / BCM5716S PHYs to use traditional
2297		 * IEEE Clause 22 method. Otherwise we have no way to attach
2298		 * the PHY to the mii(4) layer. PHY specific configuration
2299		 * is done by the mii(4) layer.
2300		 */
2301
2302		/* Select auto-negotiation MMD of the PHY. */
2303		bnx_miibus_write_reg(&sc->bnx_dev, sc->bnx_phy_addr,
2304		    BRGPHY_BLOCK_ADDR, BRGPHY_BLOCK_ADDR_ADDR_EXT);
2305
2306		bnx_miibus_write_reg(&sc->bnx_dev, sc->bnx_phy_addr,
2307		    BRGPHY_ADDR_EXT, BRGPHY_ADDR_EXT_AN_MMD);
2308
2309		bnx_miibus_write_reg(&sc->bnx_dev, sc->bnx_phy_addr,
2310		    BRGPHY_BLOCK_ADDR, BRGPHY_BLOCK_ADDR_COMBO_IEEE0);
2311	}
2312}
2313
2314/****************************************************************************/
2315/* Free any DMA memory owned by the driver.                                 */
2316/*                                                                          */
2317/* Scans through each data structre that requires DMA memory and frees      */
2318/* the memory if allocated.                                                 */
2319/*                                                                          */
2320/* Returns:                                                                 */
2321/*   Nothing.                                                               */
2322/****************************************************************************/
2323void
2324bnx_dma_free(struct bnx_softc *sc)
2325{
2326	int			i;
2327
2328	DBPRINT(sc,BNX_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
2329
2330	/* Destroy the status block. */
2331	if (sc->status_block != NULL && sc->status_map != NULL) {
2332		bus_dmamap_sync(sc->bnx_dmatag, sc->status_map, 0,
2333		    sc->status_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
2334		bus_dmamap_unload(sc->bnx_dmatag, sc->status_map);
2335		bus_dmamem_unmap(sc->bnx_dmatag, (caddr_t)sc->status_block,
2336		    BNX_STATUS_BLK_SZ);
2337		bus_dmamem_free(sc->bnx_dmatag, &sc->status_seg,
2338		    sc->status_rseg);
2339		bus_dmamap_destroy(sc->bnx_dmatag, sc->status_map);
2340		sc->status_block = NULL;
2341		sc->status_map = NULL;
2342	}
2343
2344	/* Destroy the statistics block. */
2345	if (sc->stats_block != NULL && sc->stats_map != NULL) {
2346		bus_dmamap_unload(sc->bnx_dmatag, sc->stats_map);
2347		bus_dmamem_unmap(sc->bnx_dmatag, (caddr_t)sc->stats_block,
2348		    BNX_STATS_BLK_SZ);
2349		bus_dmamem_free(sc->bnx_dmatag, &sc->stats_seg,
2350		    sc->stats_rseg);
2351		bus_dmamap_destroy(sc->bnx_dmatag, sc->stats_map);
2352		sc->stats_block = NULL;
2353		sc->stats_map = NULL;
2354	}
2355
2356	/* Free, unmap and destroy all context memory pages. */
2357	if (BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5709) {
2358		for (i = 0; i < sc->ctx_pages; i++) {
2359			if (sc->ctx_block[i] != NULL) {
2360				bus_dmamap_unload(sc->bnx_dmatag,
2361				    sc->ctx_map[i]);
2362				bus_dmamem_unmap(sc->bnx_dmatag,
2363				    (caddr_t)sc->ctx_block[i],
2364				    BCM_PAGE_SIZE);
2365				bus_dmamem_free(sc->bnx_dmatag,
2366				    &sc->ctx_segs[i], sc->ctx_rsegs[i]);
2367				bus_dmamap_destroy(sc->bnx_dmatag,
2368				    sc->ctx_map[i]);
2369				sc->ctx_block[i] = NULL;
2370			}
2371		}
2372	}
2373
2374	/* Free, unmap and destroy all TX buffer descriptor chain pages. */
2375	for (i = 0; i < TX_PAGES; i++ ) {
2376		if (sc->tx_bd_chain[i] != NULL &&
2377		    sc->tx_bd_chain_map[i] != NULL) {
2378			bus_dmamap_unload(sc->bnx_dmatag,
2379			    sc->tx_bd_chain_map[i]);
2380			bus_dmamem_unmap(sc->bnx_dmatag,
2381			    (caddr_t)sc->tx_bd_chain[i], BNX_TX_CHAIN_PAGE_SZ);
2382			bus_dmamem_free(sc->bnx_dmatag, &sc->tx_bd_chain_seg[i],
2383			    sc->tx_bd_chain_rseg[i]);
2384			bus_dmamap_destroy(sc->bnx_dmatag,
2385			    sc->tx_bd_chain_map[i]);
2386			sc->tx_bd_chain[i] = NULL;
2387			sc->tx_bd_chain_map[i] = NULL;
2388		}
2389	}
2390
2391	/* Destroy the TX dmamaps. */
2392	/* This isn't necessary since we dont allocate them up front */
2393
2394	/* Free, unmap and destroy all RX buffer descriptor chain pages. */
2395	for (i = 0; i < RX_PAGES; i++ ) {
2396		if (sc->rx_bd_chain[i] != NULL &&
2397		    sc->rx_bd_chain_map[i] != NULL) {
2398			bus_dmamap_unload(sc->bnx_dmatag,
2399			    sc->rx_bd_chain_map[i]);
2400			bus_dmamem_unmap(sc->bnx_dmatag,
2401			    (caddr_t)sc->rx_bd_chain[i], BNX_RX_CHAIN_PAGE_SZ);
2402			bus_dmamem_free(sc->bnx_dmatag, &sc->rx_bd_chain_seg[i],
2403			    sc->rx_bd_chain_rseg[i]);
2404
2405			bus_dmamap_destroy(sc->bnx_dmatag,
2406			    sc->rx_bd_chain_map[i]);
2407			sc->rx_bd_chain[i] = NULL;
2408			sc->rx_bd_chain_map[i] = NULL;
2409		}
2410	}
2411
2412	/* Unload and destroy the RX mbuf maps. */
2413	for (i = 0; i < TOTAL_RX_BD; i++) {
2414		if (sc->rx_mbuf_map[i] != NULL) {
2415			bus_dmamap_unload(sc->bnx_dmatag, sc->rx_mbuf_map[i]);
2416			bus_dmamap_destroy(sc->bnx_dmatag, sc->rx_mbuf_map[i]);
2417		}
2418	}
2419
2420	DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
2421}
2422
2423/****************************************************************************/
2424/* Allocate any DMA memory needed by the driver.                            */
2425/*                                                                          */
2426/* Allocates DMA memory needed for the various global structures needed by  */
2427/* hardware.                                                                */
2428/*                                                                          */
2429/* Returns:                                                                 */
2430/*   0 for success, positive value for failure.                             */
2431/****************************************************************************/
2432int
2433bnx_dma_alloc(struct bnx_softc *sc)
2434{
2435	int			i, rc = 0;
2436
2437	DBPRINT(sc, BNX_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
2438
2439	/*
2440	 * Allocate DMA memory for the status block, map the memory into DMA
2441	 * space, and fetch the physical address of the block.
2442	 */
2443	if (bus_dmamap_create(sc->bnx_dmatag, BNX_STATUS_BLK_SZ, 1,
2444	    BNX_STATUS_BLK_SZ, 0, BUS_DMA_NOWAIT, &sc->status_map)) {
2445		printf(": Could not create status block DMA map!\n");
2446		rc = ENOMEM;
2447		goto bnx_dma_alloc_exit;
2448	}
2449
2450	if (bus_dmamem_alloc(sc->bnx_dmatag, BNX_STATUS_BLK_SZ,
2451	    BNX_DMA_ALIGN, BNX_DMA_BOUNDARY, &sc->status_seg, 1,
2452	    &sc->status_rseg, BUS_DMA_NOWAIT | BUS_DMA_ZERO)) {
2453		printf(": Could not allocate status block DMA memory!\n");
2454		rc = ENOMEM;
2455		goto bnx_dma_alloc_exit;
2456	}
2457
2458	if (bus_dmamem_map(sc->bnx_dmatag, &sc->status_seg, sc->status_rseg,
2459	    BNX_STATUS_BLK_SZ, (caddr_t *)&sc->status_block, BUS_DMA_NOWAIT)) {
2460		printf(": Could not map status block DMA memory!\n");
2461		rc = ENOMEM;
2462		goto bnx_dma_alloc_exit;
2463	}
2464
2465	if (bus_dmamap_load(sc->bnx_dmatag, sc->status_map,
2466	    sc->status_block, BNX_STATUS_BLK_SZ, NULL, BUS_DMA_NOWAIT)) {
2467		printf(": Could not load status block DMA memory!\n");
2468		rc = ENOMEM;
2469		goto bnx_dma_alloc_exit;
2470	}
2471
2472	bus_dmamap_sync(sc->bnx_dmatag, sc->status_map, 0,
2473	    sc->status_map->dm_mapsize, BUS_DMASYNC_PREREAD);
2474
2475	sc->status_block_paddr = sc->status_map->dm_segs[0].ds_addr;
2476
2477	/* DRC - Fix for 64 bit addresses. */
2478	DBPRINT(sc, BNX_INFO, "status_block_paddr = 0x%08X\n",
2479		(u_int32_t) sc->status_block_paddr);
2480
2481	/* BCM5709 uses host memory as cache for context memory. */
2482	if (BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5709) {
2483		sc->ctx_pages = 0x2000 / BCM_PAGE_SIZE;
2484		if (sc->ctx_pages == 0)
2485			sc->ctx_pages = 1;
2486		if (sc->ctx_pages > 4) /* XXX */
2487			sc->ctx_pages = 4;
2488
2489		DBRUNIF((sc->ctx_pages > 512),
2490			BCE_PRINTF("%s(%d): Too many CTX pages! %d > 512\n",
2491				__FILE__, __LINE__, sc->ctx_pages));
2492
2493
2494		for (i = 0; i < sc->ctx_pages; i++) {
2495			if (bus_dmamap_create(sc->bnx_dmatag, BCM_PAGE_SIZE,
2496			    1, BCM_PAGE_SIZE, BNX_DMA_BOUNDARY,
2497			    BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
2498			    &sc->ctx_map[i]) != 0) {
2499				rc = ENOMEM;
2500				goto bnx_dma_alloc_exit;
2501			}
2502
2503			if (bus_dmamem_alloc(sc->bnx_dmatag, BCM_PAGE_SIZE,
2504			    BCM_PAGE_SIZE, BNX_DMA_BOUNDARY, &sc->ctx_segs[i],
2505			    1, &sc->ctx_rsegs[i], BUS_DMA_NOWAIT) != 0) {
2506				rc = ENOMEM;
2507				goto bnx_dma_alloc_exit;
2508			}
2509
2510			if (bus_dmamem_map(sc->bnx_dmatag, &sc->ctx_segs[i],
2511			    sc->ctx_rsegs[i], BCM_PAGE_SIZE,
2512			    (caddr_t *)&sc->ctx_block[i],
2513			    BUS_DMA_NOWAIT) != 0) {
2514				rc = ENOMEM;
2515				goto bnx_dma_alloc_exit;
2516			}
2517
2518			if (bus_dmamap_load(sc->bnx_dmatag, sc->ctx_map[i],
2519			    sc->ctx_block[i], BCM_PAGE_SIZE, NULL,
2520			    BUS_DMA_NOWAIT) != 0) {
2521				rc = ENOMEM;
2522				goto bnx_dma_alloc_exit;
2523			}
2524
2525			bzero(sc->ctx_block[i], BCM_PAGE_SIZE);
2526		}
2527	}
2528
2529	/*
2530	 * Allocate DMA memory for the statistics block, map the memory into
2531	 * DMA space, and fetch the physical address of the block.
2532	 */
2533	if (bus_dmamap_create(sc->bnx_dmatag, BNX_STATS_BLK_SZ, 1,
2534	    BNX_STATS_BLK_SZ, 0, BUS_DMA_NOWAIT, &sc->stats_map)) {
2535		printf(": Could not create stats block DMA map!\n");
2536		rc = ENOMEM;
2537		goto bnx_dma_alloc_exit;
2538	}
2539
2540	if (bus_dmamem_alloc(sc->bnx_dmatag, BNX_STATS_BLK_SZ,
2541	    BNX_DMA_ALIGN, BNX_DMA_BOUNDARY, &sc->stats_seg, 1,
2542	    &sc->stats_rseg, BUS_DMA_NOWAIT | BUS_DMA_ZERO)) {
2543		printf(": Could not allocate stats block DMA memory!\n");
2544		rc = ENOMEM;
2545		goto bnx_dma_alloc_exit;
2546	}
2547
2548	if (bus_dmamem_map(sc->bnx_dmatag, &sc->stats_seg, sc->stats_rseg,
2549	    BNX_STATS_BLK_SZ, (caddr_t *)&sc->stats_block, BUS_DMA_NOWAIT)) {
2550		printf(": Could not map stats block DMA memory!\n");
2551		rc = ENOMEM;
2552		goto bnx_dma_alloc_exit;
2553	}
2554
2555	if (bus_dmamap_load(sc->bnx_dmatag, sc->stats_map,
2556	    sc->stats_block, BNX_STATS_BLK_SZ, NULL, BUS_DMA_NOWAIT)) {
2557		printf(": Could not load status block DMA memory!\n");
2558		rc = ENOMEM;
2559		goto bnx_dma_alloc_exit;
2560	}
2561
2562	sc->stats_block_paddr = sc->stats_map->dm_segs[0].ds_addr;
2563
2564	/* DRC - Fix for 64 bit address. */
2565	DBPRINT(sc,BNX_INFO, "stats_block_paddr = 0x%08X\n",
2566	    (u_int32_t) sc->stats_block_paddr);
2567
2568	/*
2569	 * Allocate DMA memory for the TX buffer descriptor chain,
2570	 * and fetch the physical address of the block.
2571	 */
2572	for (i = 0; i < TX_PAGES; i++) {
2573		if (bus_dmamap_create(sc->bnx_dmatag, BNX_TX_CHAIN_PAGE_SZ, 1,
2574		    BNX_TX_CHAIN_PAGE_SZ, 0, BUS_DMA_NOWAIT,
2575		    &sc->tx_bd_chain_map[i])) {
2576			printf(": Could not create Tx desc %d DMA map!\n", i);
2577			rc = ENOMEM;
2578			goto bnx_dma_alloc_exit;
2579		}
2580
2581		if (bus_dmamem_alloc(sc->bnx_dmatag, BNX_TX_CHAIN_PAGE_SZ,
2582		    BCM_PAGE_SIZE, BNX_DMA_BOUNDARY, &sc->tx_bd_chain_seg[i], 1,
2583		    &sc->tx_bd_chain_rseg[i], BUS_DMA_NOWAIT)) {
2584			printf(": Could not allocate TX desc %d DMA memory!\n",
2585			    i);
2586			rc = ENOMEM;
2587			goto bnx_dma_alloc_exit;
2588		}
2589
2590		if (bus_dmamem_map(sc->bnx_dmatag, &sc->tx_bd_chain_seg[i],
2591		    sc->tx_bd_chain_rseg[i], BNX_TX_CHAIN_PAGE_SZ,
2592		    (caddr_t *)&sc->tx_bd_chain[i], BUS_DMA_NOWAIT)) {
2593			printf(": Could not map TX desc %d DMA memory!\n", i);
2594			rc = ENOMEM;
2595			goto bnx_dma_alloc_exit;
2596		}
2597
2598		if (bus_dmamap_load(sc->bnx_dmatag, sc->tx_bd_chain_map[i],
2599		    (caddr_t)sc->tx_bd_chain[i], BNX_TX_CHAIN_PAGE_SZ, NULL,
2600		    BUS_DMA_NOWAIT)) {
2601			printf(": Could not load TX desc %d DMA memory!\n", i);
2602			rc = ENOMEM;
2603			goto bnx_dma_alloc_exit;
2604		}
2605
2606		sc->tx_bd_chain_paddr[i] =
2607		    sc->tx_bd_chain_map[i]->dm_segs[0].ds_addr;
2608
2609		/* DRC - Fix for 64 bit systems. */
2610		DBPRINT(sc, BNX_INFO, "tx_bd_chain_paddr[%d] = 0x%08X\n",
2611		    i, (u_int32_t) sc->tx_bd_chain_paddr[i]);
2612	}
2613
2614	/*
2615	 * Create lists to hold TX mbufs.
2616	 */
2617	TAILQ_INIT(&sc->tx_free_pkts);
2618	TAILQ_INIT(&sc->tx_used_pkts);
2619	sc->tx_pkt_count = 0;
2620	mtx_init(&sc->tx_pkt_mtx, IPL_NET);
2621
2622	/*
2623	 * Allocate DMA memory for the Rx buffer descriptor chain,
2624	 * and fetch the physical address of the block.
2625	 */
2626	for (i = 0; i < RX_PAGES; i++) {
2627		if (bus_dmamap_create(sc->bnx_dmatag, BNX_RX_CHAIN_PAGE_SZ, 1,
2628		    BNX_RX_CHAIN_PAGE_SZ, 0, BUS_DMA_NOWAIT,
2629		    &sc->rx_bd_chain_map[i])) {
2630			printf(": Could not create Rx desc %d DMA map!\n", i);
2631			rc = ENOMEM;
2632			goto bnx_dma_alloc_exit;
2633		}
2634
2635		if (bus_dmamem_alloc(sc->bnx_dmatag, BNX_RX_CHAIN_PAGE_SZ,
2636		    BCM_PAGE_SIZE, BNX_DMA_BOUNDARY, &sc->rx_bd_chain_seg[i], 1,
2637		    &sc->rx_bd_chain_rseg[i], BUS_DMA_NOWAIT | BUS_DMA_ZERO)) {
2638			printf(": Could not allocate Rx desc %d DMA memory!\n",
2639			    i);
2640			rc = ENOMEM;
2641			goto bnx_dma_alloc_exit;
2642		}
2643
2644		if (bus_dmamem_map(sc->bnx_dmatag, &sc->rx_bd_chain_seg[i],
2645		    sc->rx_bd_chain_rseg[i], BNX_RX_CHAIN_PAGE_SZ,
2646		    (caddr_t *)&sc->rx_bd_chain[i], BUS_DMA_NOWAIT)) {
2647			printf(": Could not map Rx desc %d DMA memory!\n", i);
2648			rc = ENOMEM;
2649			goto bnx_dma_alloc_exit;
2650		}
2651
2652		if (bus_dmamap_load(sc->bnx_dmatag, sc->rx_bd_chain_map[i],
2653		    (caddr_t)sc->rx_bd_chain[i], BNX_RX_CHAIN_PAGE_SZ, NULL,
2654		    BUS_DMA_NOWAIT)) {
2655			printf(": Could not load Rx desc %d DMA memory!\n", i);
2656			rc = ENOMEM;
2657			goto bnx_dma_alloc_exit;
2658		}
2659
2660		sc->rx_bd_chain_paddr[i] =
2661		    sc->rx_bd_chain_map[i]->dm_segs[0].ds_addr;
2662
2663		/* DRC - Fix for 64 bit systems. */
2664		DBPRINT(sc, BNX_INFO, "rx_bd_chain_paddr[%d] = 0x%08X\n",
2665		    i, (u_int32_t) sc->rx_bd_chain_paddr[i]);
2666	}
2667
2668	/*
2669	 * Create DMA maps for the Rx buffer mbufs.
2670	 */
2671	for (i = 0; i < TOTAL_RX_BD; i++) {
2672		if (bus_dmamap_create(sc->bnx_dmatag, BNX_MAX_MRU,
2673		    BNX_MAX_SEGMENTS, BNX_MAX_MRU, 0, BUS_DMA_NOWAIT,
2674		    &sc->rx_mbuf_map[i])) {
2675			printf(": Could not create Rx mbuf %d DMA map!\n", i);
2676			rc = ENOMEM;
2677			goto bnx_dma_alloc_exit;
2678		}
2679	}
2680
2681 bnx_dma_alloc_exit:
2682	DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
2683
2684	return(rc);
2685}
2686
2687/****************************************************************************/
2688/* Release all resources used by the driver.                                */
2689/*                                                                          */
2690/* Releases all resources acquired by the driver including interrupts,      */
2691/* interrupt handler, interfaces, mutexes, and DMA memory.                  */
2692/*                                                                          */
2693/* Returns:                                                                 */
2694/*   Nothing.                                                               */
2695/****************************************************************************/
2696void
2697bnx_release_resources(struct bnx_softc *sc)
2698{
2699	struct pci_attach_args	*pa = &(sc->bnx_pa);
2700
2701	DBPRINT(sc, BNX_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
2702
2703	bnx_dma_free(sc);
2704
2705	if (sc->bnx_intrhand != NULL)
2706		pci_intr_disestablish(pa->pa_pc, sc->bnx_intrhand);
2707
2708	if (sc->bnx_size)
2709		bus_space_unmap(sc->bnx_btag, sc->bnx_bhandle, sc->bnx_size);
2710
2711	DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
2712}
2713
2714/****************************************************************************/
2715/* Firmware synchronization.                                                */
2716/*                                                                          */
2717/* Before performing certain events such as a chip reset, synchronize with  */
2718/* the firmware first.                                                      */
2719/*                                                                          */
2720/* Returns:                                                                 */
2721/*   0 for success, positive value for failure.                             */
2722/****************************************************************************/
2723int
2724bnx_fw_sync(struct bnx_softc *sc, u_int32_t msg_data)
2725{
2726	int			i, rc = 0;
2727	u_int32_t		val;
2728
2729	/* Don't waste any time if we've timed out before. */
2730	if (sc->bnx_fw_timed_out) {
2731		rc = EBUSY;
2732		goto bnx_fw_sync_exit;
2733	}
2734
2735	/* Increment the message sequence number. */
2736	sc->bnx_fw_wr_seq++;
2737	msg_data |= sc->bnx_fw_wr_seq;
2738
2739 	DBPRINT(sc, BNX_VERBOSE, "bnx_fw_sync(): msg_data = 0x%08X\n",
2740	    msg_data);
2741
2742	/* Send the message to the bootcode driver mailbox. */
2743	REG_WR_IND(sc, sc->bnx_shmem_base + BNX_DRV_MB, msg_data);
2744
2745	/* Wait for the bootcode to acknowledge the message. */
2746	for (i = 0; i < FW_ACK_TIME_OUT_MS; i++) {
2747		/* Check for a response in the bootcode firmware mailbox. */
2748		val = REG_RD_IND(sc, sc->bnx_shmem_base + BNX_FW_MB);
2749		if ((val & BNX_FW_MSG_ACK) == (msg_data & BNX_DRV_MSG_SEQ))
2750			break;
2751		DELAY(1000);
2752	}
2753
2754	/* If we've timed out, tell the bootcode that we've stopped waiting. */
2755	if (((val & BNX_FW_MSG_ACK) != (msg_data & BNX_DRV_MSG_SEQ)) &&
2756		((msg_data & BNX_DRV_MSG_DATA) != BNX_DRV_MSG_DATA_WAIT0)) {
2757		BNX_PRINTF(sc, "%s(%d): Firmware synchronization timeout! "
2758		    "msg_data = 0x%08X\n", __FILE__, __LINE__, msg_data);
2759
2760		msg_data &= ~BNX_DRV_MSG_CODE;
2761		msg_data |= BNX_DRV_MSG_CODE_FW_TIMEOUT;
2762
2763		REG_WR_IND(sc, sc->bnx_shmem_base + BNX_DRV_MB, msg_data);
2764
2765		sc->bnx_fw_timed_out = 1;
2766		rc = EBUSY;
2767	}
2768
2769bnx_fw_sync_exit:
2770	return (rc);
2771}
2772
2773/****************************************************************************/
2774/* Load Receive Virtual 2 Physical (RV2P) processor firmware.               */
2775/*                                                                          */
2776/* Returns:                                                                 */
2777/*   Nothing.                                                               */
2778/****************************************************************************/
2779void
2780bnx_load_rv2p_fw(struct bnx_softc *sc, u_int32_t *rv2p_code,
2781    u_int32_t rv2p_code_len, u_int32_t rv2p_proc)
2782{
2783	int			i;
2784	u_int32_t		val;
2785
2786	/* Set the page size used by RV2P. */
2787	if (rv2p_proc == RV2P_PROC2) {
2788		BNX_RV2P_PROC2_CHG_MAX_BD_PAGE(rv2p_code,
2789		    USABLE_RX_BD_PER_PAGE);
2790	}
2791
2792	for (i = 0; i < rv2p_code_len; i += 8) {
2793		REG_WR(sc, BNX_RV2P_INSTR_HIGH, *rv2p_code);
2794		rv2p_code++;
2795		REG_WR(sc, BNX_RV2P_INSTR_LOW, *rv2p_code);
2796		rv2p_code++;
2797
2798		if (rv2p_proc == RV2P_PROC1) {
2799			val = (i / 8) | BNX_RV2P_PROC1_ADDR_CMD_RDWR;
2800			REG_WR(sc, BNX_RV2P_PROC1_ADDR_CMD, val);
2801		} else {
2802			val = (i / 8) | BNX_RV2P_PROC2_ADDR_CMD_RDWR;
2803			REG_WR(sc, BNX_RV2P_PROC2_ADDR_CMD, val);
2804		}
2805	}
2806
2807	/* Reset the processor, un-stall is done later. */
2808	if (rv2p_proc == RV2P_PROC1)
2809		REG_WR(sc, BNX_RV2P_COMMAND, BNX_RV2P_COMMAND_PROC1_RESET);
2810	else
2811		REG_WR(sc, BNX_RV2P_COMMAND, BNX_RV2P_COMMAND_PROC2_RESET);
2812}
2813
2814/****************************************************************************/
2815/* Load RISC processor firmware.                                            */
2816/*                                                                          */
2817/* Loads firmware from the file if_bnxfw.h into the scratchpad memory       */
2818/* associated with a particular processor.                                  */
2819/*                                                                          */
2820/* Returns:                                                                 */
2821/*   Nothing.                                                               */
2822/****************************************************************************/
2823void
2824bnx_load_cpu_fw(struct bnx_softc *sc, struct cpu_reg *cpu_reg,
2825    struct fw_info *fw)
2826{
2827	u_int32_t		offset;
2828	u_int32_t		val;
2829
2830	/* Halt the CPU. */
2831	val = REG_RD_IND(sc, cpu_reg->mode);
2832	val |= cpu_reg->mode_value_halt;
2833	REG_WR_IND(sc, cpu_reg->mode, val);
2834	REG_WR_IND(sc, cpu_reg->state, cpu_reg->state_value_clear);
2835
2836	/* Load the Text area. */
2837	offset = cpu_reg->spad_base + (fw->text_addr - cpu_reg->mips_view_base);
2838	if (fw->text) {
2839		int j;
2840
2841		for (j = 0; j < (fw->text_len / 4); j++, offset += 4)
2842			REG_WR_IND(sc, offset, fw->text[j]);
2843	}
2844
2845	/* Load the Data area. */
2846	offset = cpu_reg->spad_base + (fw->data_addr - cpu_reg->mips_view_base);
2847	if (fw->data) {
2848		int j;
2849
2850		for (j = 0; j < (fw->data_len / 4); j++, offset += 4)
2851			REG_WR_IND(sc, offset, fw->data[j]);
2852	}
2853
2854	/* Load the SBSS area. */
2855	offset = cpu_reg->spad_base + (fw->sbss_addr - cpu_reg->mips_view_base);
2856	if (fw->sbss) {
2857		int j;
2858
2859		for (j = 0; j < (fw->sbss_len / 4); j++, offset += 4)
2860			REG_WR_IND(sc, offset, fw->sbss[j]);
2861	}
2862
2863	/* Load the BSS area. */
2864	offset = cpu_reg->spad_base + (fw->bss_addr - cpu_reg->mips_view_base);
2865	if (fw->bss) {
2866		int j;
2867
2868		for (j = 0; j < (fw->bss_len/4); j++, offset += 4)
2869			REG_WR_IND(sc, offset, fw->bss[j]);
2870	}
2871
2872	/* Load the Read-Only area. */
2873	offset = cpu_reg->spad_base +
2874	    (fw->rodata_addr - cpu_reg->mips_view_base);
2875	if (fw->rodata) {
2876		int j;
2877
2878		for (j = 0; j < (fw->rodata_len / 4); j++, offset += 4)
2879			REG_WR_IND(sc, offset, fw->rodata[j]);
2880	}
2881
2882	/* Clear the pre-fetch instruction. */
2883	REG_WR_IND(sc, cpu_reg->inst, 0);
2884	REG_WR_IND(sc, cpu_reg->pc, fw->start_addr);
2885
2886	/* Start the CPU. */
2887	val = REG_RD_IND(sc, cpu_reg->mode);
2888	val &= ~cpu_reg->mode_value_halt;
2889	REG_WR_IND(sc, cpu_reg->state, cpu_reg->state_value_clear);
2890	REG_WR_IND(sc, cpu_reg->mode, val);
2891}
2892
2893/****************************************************************************/
2894/* Initialize the RV2P, RX, TX, TPAT, and COM CPUs.                         */
2895/*                                                                          */
2896/* Loads the firmware for each CPU and starts the CPU.                      */
2897/*                                                                          */
2898/* Returns:                                                                 */
2899/*   Nothing.                                                               */
2900/****************************************************************************/
2901void
2902bnx_init_cpus(struct bnx_softc *sc)
2903{
2904	struct bnx_firmware *bfw = &bnx_firmwares[BNX_FW_B06];
2905	struct bnx_rv2p *rv2p = &bnx_rv2ps[BNX_RV2P];
2906	struct cpu_reg cpu_reg;
2907	struct fw_info fw;
2908
2909	if (BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5709) {
2910		bfw = &bnx_firmwares[BNX_FW_B09];
2911		if ((BNX_CHIP_REV(sc) == BNX_CHIP_REV_Ax))
2912			rv2p = &bnx_rv2ps[BNX_XI90_RV2P];
2913		else
2914			rv2p = &bnx_rv2ps[BNX_XI_RV2P];
2915	}
2916
2917	/* Initialize the RV2P processor. */
2918	bnx_load_rv2p_fw(sc, rv2p->bnx_rv2p_proc1,
2919	    rv2p->fw->bnx_rv2p_proc1len, RV2P_PROC1);
2920	bnx_load_rv2p_fw(sc, rv2p->bnx_rv2p_proc2,
2921	    rv2p->fw->bnx_rv2p_proc2len, RV2P_PROC2);
2922
2923	/* Initialize the RX Processor. */
2924	cpu_reg.mode = BNX_RXP_CPU_MODE;
2925	cpu_reg.mode_value_halt = BNX_RXP_CPU_MODE_SOFT_HALT;
2926	cpu_reg.mode_value_sstep = BNX_RXP_CPU_MODE_STEP_ENA;
2927	cpu_reg.state = BNX_RXP_CPU_STATE;
2928	cpu_reg.state_value_clear = 0xffffff;
2929	cpu_reg.gpr0 = BNX_RXP_CPU_REG_FILE;
2930	cpu_reg.evmask = BNX_RXP_CPU_EVENT_MASK;
2931	cpu_reg.pc = BNX_RXP_CPU_PROGRAM_COUNTER;
2932	cpu_reg.inst = BNX_RXP_CPU_INSTRUCTION;
2933	cpu_reg.bp = BNX_RXP_CPU_HW_BREAKPOINT;
2934	cpu_reg.spad_base = BNX_RXP_SCRATCH;
2935	cpu_reg.mips_view_base = 0x8000000;
2936
2937	fw.ver_major = bfw->fw->bnx_RXP_FwReleaseMajor;
2938	fw.ver_minor = bfw->fw->bnx_RXP_FwReleaseMinor;
2939	fw.ver_fix = bfw->fw->bnx_RXP_FwReleaseFix;
2940	fw.start_addr = bfw->fw->bnx_RXP_FwStartAddr;
2941
2942	fw.text_addr = bfw->fw->bnx_RXP_FwTextAddr;
2943	fw.text_len = bfw->fw->bnx_RXP_FwTextLen;
2944	fw.text_index = 0;
2945	fw.text = bfw->bnx_RXP_FwText;
2946
2947	fw.data_addr = bfw->fw->bnx_RXP_FwDataAddr;
2948	fw.data_len = bfw->fw->bnx_RXP_FwDataLen;
2949	fw.data_index = 0;
2950	fw.data = bfw->bnx_RXP_FwData;
2951
2952	fw.sbss_addr = bfw->fw->bnx_RXP_FwSbssAddr;
2953	fw.sbss_len = bfw->fw->bnx_RXP_FwSbssLen;
2954	fw.sbss_index = 0;
2955	fw.sbss = bfw->bnx_RXP_FwSbss;
2956
2957	fw.bss_addr = bfw->fw->bnx_RXP_FwBssAddr;
2958	fw.bss_len = bfw->fw->bnx_RXP_FwBssLen;
2959	fw.bss_index = 0;
2960	fw.bss = bfw->bnx_RXP_FwBss;
2961
2962	fw.rodata_addr = bfw->fw->bnx_RXP_FwRodataAddr;
2963	fw.rodata_len = bfw->fw->bnx_RXP_FwRodataLen;
2964	fw.rodata_index = 0;
2965	fw.rodata = bfw->bnx_RXP_FwRodata;
2966
2967	DBPRINT(sc, BNX_INFO_RESET, "Loading RX firmware.\n");
2968	bnx_load_cpu_fw(sc, &cpu_reg, &fw);
2969
2970	/* Initialize the TX Processor. */
2971	cpu_reg.mode = BNX_TXP_CPU_MODE;
2972	cpu_reg.mode_value_halt = BNX_TXP_CPU_MODE_SOFT_HALT;
2973	cpu_reg.mode_value_sstep = BNX_TXP_CPU_MODE_STEP_ENA;
2974	cpu_reg.state = BNX_TXP_CPU_STATE;
2975	cpu_reg.state_value_clear = 0xffffff;
2976	cpu_reg.gpr0 = BNX_TXP_CPU_REG_FILE;
2977	cpu_reg.evmask = BNX_TXP_CPU_EVENT_MASK;
2978	cpu_reg.pc = BNX_TXP_CPU_PROGRAM_COUNTER;
2979	cpu_reg.inst = BNX_TXP_CPU_INSTRUCTION;
2980	cpu_reg.bp = BNX_TXP_CPU_HW_BREAKPOINT;
2981	cpu_reg.spad_base = BNX_TXP_SCRATCH;
2982	cpu_reg.mips_view_base = 0x8000000;
2983
2984	fw.ver_major = bfw->fw->bnx_TXP_FwReleaseMajor;
2985	fw.ver_minor = bfw->fw->bnx_TXP_FwReleaseMinor;
2986	fw.ver_fix = bfw->fw->bnx_TXP_FwReleaseFix;
2987	fw.start_addr = bfw->fw->bnx_TXP_FwStartAddr;
2988
2989	fw.text_addr = bfw->fw->bnx_TXP_FwTextAddr;
2990	fw.text_len = bfw->fw->bnx_TXP_FwTextLen;
2991	fw.text_index = 0;
2992	fw.text = bfw->bnx_TXP_FwText;
2993
2994	fw.data_addr = bfw->fw->bnx_TXP_FwDataAddr;
2995	fw.data_len = bfw->fw->bnx_TXP_FwDataLen;
2996	fw.data_index = 0;
2997	fw.data = bfw->bnx_TXP_FwData;
2998
2999	fw.sbss_addr = bfw->fw->bnx_TXP_FwSbssAddr;
3000	fw.sbss_len = bfw->fw->bnx_TXP_FwSbssLen;
3001	fw.sbss_index = 0;
3002	fw.sbss = bfw->bnx_TXP_FwSbss;
3003
3004	fw.bss_addr = bfw->fw->bnx_TXP_FwBssAddr;
3005	fw.bss_len = bfw->fw->bnx_TXP_FwBssLen;
3006	fw.bss_index = 0;
3007	fw.bss = bfw->bnx_TXP_FwBss;
3008
3009	fw.rodata_addr = bfw->fw->bnx_TXP_FwRodataAddr;
3010	fw.rodata_len = bfw->fw->bnx_TXP_FwRodataLen;
3011	fw.rodata_index = 0;
3012	fw.rodata = bfw->bnx_TXP_FwRodata;
3013
3014	DBPRINT(sc, BNX_INFO_RESET, "Loading TX firmware.\n");
3015	bnx_load_cpu_fw(sc, &cpu_reg, &fw);
3016
3017	/* Initialize the TX Patch-up Processor. */
3018	cpu_reg.mode = BNX_TPAT_CPU_MODE;
3019	cpu_reg.mode_value_halt = BNX_TPAT_CPU_MODE_SOFT_HALT;
3020	cpu_reg.mode_value_sstep = BNX_TPAT_CPU_MODE_STEP_ENA;
3021	cpu_reg.state = BNX_TPAT_CPU_STATE;
3022	cpu_reg.state_value_clear = 0xffffff;
3023	cpu_reg.gpr0 = BNX_TPAT_CPU_REG_FILE;
3024	cpu_reg.evmask = BNX_TPAT_CPU_EVENT_MASK;
3025	cpu_reg.pc = BNX_TPAT_CPU_PROGRAM_COUNTER;
3026	cpu_reg.inst = BNX_TPAT_CPU_INSTRUCTION;
3027	cpu_reg.bp = BNX_TPAT_CPU_HW_BREAKPOINT;
3028	cpu_reg.spad_base = BNX_TPAT_SCRATCH;
3029	cpu_reg.mips_view_base = 0x8000000;
3030
3031	fw.ver_major = bfw->fw->bnx_TPAT_FwReleaseMajor;
3032	fw.ver_minor = bfw->fw->bnx_TPAT_FwReleaseMinor;
3033	fw.ver_fix = bfw->fw->bnx_TPAT_FwReleaseFix;
3034	fw.start_addr = bfw->fw->bnx_TPAT_FwStartAddr;
3035
3036	fw.text_addr = bfw->fw->bnx_TPAT_FwTextAddr;
3037	fw.text_len = bfw->fw->bnx_TPAT_FwTextLen;
3038	fw.text_index = 0;
3039	fw.text = bfw->bnx_TPAT_FwText;
3040
3041	fw.data_addr = bfw->fw->bnx_TPAT_FwDataAddr;
3042	fw.data_len = bfw->fw->bnx_TPAT_FwDataLen;
3043	fw.data_index = 0;
3044	fw.data = bfw->bnx_TPAT_FwData;
3045
3046	fw.sbss_addr = bfw->fw->bnx_TPAT_FwSbssAddr;
3047	fw.sbss_len = bfw->fw->bnx_TPAT_FwSbssLen;
3048	fw.sbss_index = 0;
3049	fw.sbss = bfw->bnx_TPAT_FwSbss;
3050
3051	fw.bss_addr = bfw->fw->bnx_TPAT_FwBssAddr;
3052	fw.bss_len = bfw->fw->bnx_TPAT_FwBssLen;
3053	fw.bss_index = 0;
3054	fw.bss = bfw->bnx_TPAT_FwBss;
3055
3056	fw.rodata_addr = bfw->fw->bnx_TPAT_FwRodataAddr;
3057	fw.rodata_len = bfw->fw->bnx_TPAT_FwRodataLen;
3058	fw.rodata_index = 0;
3059	fw.rodata = bfw->bnx_TPAT_FwRodata;
3060
3061	DBPRINT(sc, BNX_INFO_RESET, "Loading TPAT firmware.\n");
3062	bnx_load_cpu_fw(sc, &cpu_reg, &fw);
3063
3064	/* Initialize the Completion Processor. */
3065	cpu_reg.mode = BNX_COM_CPU_MODE;
3066	cpu_reg.mode_value_halt = BNX_COM_CPU_MODE_SOFT_HALT;
3067	cpu_reg.mode_value_sstep = BNX_COM_CPU_MODE_STEP_ENA;
3068	cpu_reg.state = BNX_COM_CPU_STATE;
3069	cpu_reg.state_value_clear = 0xffffff;
3070	cpu_reg.gpr0 = BNX_COM_CPU_REG_FILE;
3071	cpu_reg.evmask = BNX_COM_CPU_EVENT_MASK;
3072	cpu_reg.pc = BNX_COM_CPU_PROGRAM_COUNTER;
3073	cpu_reg.inst = BNX_COM_CPU_INSTRUCTION;
3074	cpu_reg.bp = BNX_COM_CPU_HW_BREAKPOINT;
3075	cpu_reg.spad_base = BNX_COM_SCRATCH;
3076	cpu_reg.mips_view_base = 0x8000000;
3077
3078	fw.ver_major = bfw->fw->bnx_COM_FwReleaseMajor;
3079	fw.ver_minor = bfw->fw->bnx_COM_FwReleaseMinor;
3080	fw.ver_fix = bfw->fw->bnx_COM_FwReleaseFix;
3081	fw.start_addr = bfw->fw->bnx_COM_FwStartAddr;
3082
3083	fw.text_addr = bfw->fw->bnx_COM_FwTextAddr;
3084	fw.text_len = bfw->fw->bnx_COM_FwTextLen;
3085	fw.text_index = 0;
3086	fw.text = bfw->bnx_COM_FwText;
3087
3088	fw.data_addr = bfw->fw->bnx_COM_FwDataAddr;
3089	fw.data_len = bfw->fw->bnx_COM_FwDataLen;
3090	fw.data_index = 0;
3091	fw.data = bfw->bnx_COM_FwData;
3092
3093	fw.sbss_addr = bfw->fw->bnx_COM_FwSbssAddr;
3094	fw.sbss_len = bfw->fw->bnx_COM_FwSbssLen;
3095	fw.sbss_index = 0;
3096	fw.sbss = bfw->bnx_COM_FwSbss;
3097
3098	fw.bss_addr = bfw->fw->bnx_COM_FwBssAddr;
3099	fw.bss_len = bfw->fw->bnx_COM_FwBssLen;
3100	fw.bss_index = 0;
3101	fw.bss = bfw->bnx_COM_FwBss;
3102
3103	fw.rodata_addr = bfw->fw->bnx_COM_FwRodataAddr;
3104	fw.rodata_len = bfw->fw->bnx_COM_FwRodataLen;
3105	fw.rodata_index = 0;
3106	fw.rodata = bfw->bnx_COM_FwRodata;
3107
3108	DBPRINT(sc, BNX_INFO_RESET, "Loading COM firmware.\n");
3109	bnx_load_cpu_fw(sc, &cpu_reg, &fw);
3110}
3111
3112/****************************************************************************/
3113/* Initialize context memory.                                               */
3114/*                                                                          */
3115/* Clears the memory associated with each Context ID (CID).                 */
3116/*                                                                          */
3117/* Returns:                                                                 */
3118/*   Nothing.                                                               */
3119/****************************************************************************/
3120void
3121bnx_init_context(struct bnx_softc *sc)
3122{
3123	if (BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5709) {
3124		/* DRC: Replace this constant value with a #define. */
3125		int i, retry_cnt = 10;
3126		u_int32_t val;
3127
3128		/*
3129		 * BCM5709 context memory may be cached
3130		 * in host memory so prepare the host memory
3131		 * for access.
3132		 */
3133		val = BNX_CTX_COMMAND_ENABLED | BNX_CTX_COMMAND_MEM_INIT
3134		    | (1 << 12);
3135		val |= (BCM_PAGE_BITS - 8) << 16;
3136		REG_WR(sc, BNX_CTX_COMMAND, val);
3137
3138		/* Wait for mem init command to complete. */
3139		for (i = 0; i < retry_cnt; i++) {
3140			val = REG_RD(sc, BNX_CTX_COMMAND);
3141			if (!(val & BNX_CTX_COMMAND_MEM_INIT))
3142				break;
3143			DELAY(2);
3144		}
3145
3146		/* ToDo: Consider returning an error here. */
3147
3148		for (i = 0; i < sc->ctx_pages; i++) {
3149			int j;
3150
3151			/* Set the physaddr of the context memory cache. */
3152			val = (u_int32_t)(sc->ctx_segs[i].ds_addr);
3153			REG_WR(sc, BNX_CTX_HOST_PAGE_TBL_DATA0, val |
3154				BNX_CTX_HOST_PAGE_TBL_DATA0_VALID);
3155			val = (u_int32_t)
3156			    ((u_int64_t)sc->ctx_segs[i].ds_addr >> 32);
3157			REG_WR(sc, BNX_CTX_HOST_PAGE_TBL_DATA1, val);
3158			REG_WR(sc, BNX_CTX_HOST_PAGE_TBL_CTRL, i |
3159				BNX_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
3160
3161			/* Verify that the context memory write was successful. */
3162			for (j = 0; j < retry_cnt; j++) {
3163				val = REG_RD(sc, BNX_CTX_HOST_PAGE_TBL_CTRL);
3164				if ((val & BNX_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) == 0)
3165					break;
3166				DELAY(5);
3167			}
3168
3169			/* ToDo: Consider returning an error here. */
3170		}
3171	} else {
3172		u_int32_t vcid_addr, offset;
3173
3174		/*
3175		 * For the 5706/5708, context memory is local to
3176		 * the controller, so initialize the controller
3177		 * context memory.
3178		 */
3179
3180		vcid_addr = GET_CID_ADDR(96);
3181		while (vcid_addr) {
3182
3183			vcid_addr -= PHY_CTX_SIZE;
3184
3185			REG_WR(sc, BNX_CTX_VIRT_ADDR, 0);
3186			REG_WR(sc, BNX_CTX_PAGE_TBL, vcid_addr);
3187
3188			for(offset = 0; offset < PHY_CTX_SIZE; offset += 4) {
3189				CTX_WR(sc, 0x00, offset, 0);
3190			}
3191
3192			REG_WR(sc, BNX_CTX_VIRT_ADDR, vcid_addr);
3193			REG_WR(sc, BNX_CTX_PAGE_TBL, vcid_addr);
3194		}
3195 	}
3196}
3197
3198/****************************************************************************/
3199/* Fetch the permanent MAC address of the controller.                       */
3200/*                                                                          */
3201/* Returns:                                                                 */
3202/*   Nothing.                                                               */
3203/****************************************************************************/
3204void
3205bnx_get_mac_addr(struct bnx_softc *sc)
3206{
3207	u_int32_t		mac_lo = 0, mac_hi = 0;
3208
3209	/*
3210	 * The NetXtreme II bootcode populates various NIC
3211	 * power-on and runtime configuration items in a
3212	 * shared memory area.  The factory configured MAC
3213	 * address is available from both NVRAM and the
3214	 * shared memory area so we'll read the value from
3215	 * shared memory for speed.
3216	 */
3217
3218	mac_hi = REG_RD_IND(sc, sc->bnx_shmem_base + BNX_PORT_HW_CFG_MAC_UPPER);
3219	mac_lo = REG_RD_IND(sc, sc->bnx_shmem_base + BNX_PORT_HW_CFG_MAC_LOWER);
3220
3221	if ((mac_lo == 0) && (mac_hi == 0)) {
3222		BNX_PRINTF(sc, "%s(%d): Invalid Ethernet address!\n",
3223		    __FILE__, __LINE__);
3224	} else {
3225		sc->eaddr[0] = (u_char)(mac_hi >> 8);
3226		sc->eaddr[1] = (u_char)(mac_hi >> 0);
3227		sc->eaddr[2] = (u_char)(mac_lo >> 24);
3228		sc->eaddr[3] = (u_char)(mac_lo >> 16);
3229		sc->eaddr[4] = (u_char)(mac_lo >> 8);
3230		sc->eaddr[5] = (u_char)(mac_lo >> 0);
3231	}
3232
3233	DBPRINT(sc, BNX_INFO, "Permanent Ethernet address = "
3234	    "%6D\n", sc->eaddr, ":");
3235}
3236
3237/****************************************************************************/
3238/* Program the MAC address.                                                 */
3239/*                                                                          */
3240/* Returns:                                                                 */
3241/*   Nothing.                                                               */
3242/****************************************************************************/
3243void
3244bnx_set_mac_addr(struct bnx_softc *sc)
3245{
3246	u_int32_t		val;
3247	u_int8_t		*mac_addr = sc->eaddr;
3248
3249	DBPRINT(sc, BNX_INFO, "Setting Ethernet address = "
3250	    "%6D\n", sc->eaddr, ":");
3251
3252	val = (mac_addr[0] << 8) | mac_addr[1];
3253
3254	REG_WR(sc, BNX_EMAC_MAC_MATCH0, val);
3255
3256	val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
3257		(mac_addr[4] << 8) | mac_addr[5];
3258
3259	REG_WR(sc, BNX_EMAC_MAC_MATCH1, val);
3260}
3261
3262/****************************************************************************/
3263/* Stop the controller.                                                     */
3264/*                                                                          */
3265/* Returns:                                                                 */
3266/*   Nothing.                                                               */
3267/****************************************************************************/
3268void
3269bnx_stop(struct bnx_softc *sc)
3270{
3271	struct ifnet		*ifp = &sc->arpcom.ac_if;
3272	struct ifmedia_entry	*ifm;
3273	struct mii_data		*mii;
3274	int			mtmp, itmp;
3275
3276	DBPRINT(sc, BNX_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
3277
3278	timeout_del(&sc->bnx_timeout);
3279
3280	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
3281
3282	/* Disable the transmit/receive blocks. */
3283	REG_WR(sc, BNX_MISC_ENABLE_CLR_BITS, 0x5ffffff);
3284	REG_RD(sc, BNX_MISC_ENABLE_CLR_BITS);
3285	DELAY(20);
3286
3287	bnx_disable_intr(sc);
3288
3289	/* Tell firmware that the driver is going away. */
3290	bnx_reset(sc, BNX_DRV_MSG_CODE_SUSPEND_NO_WOL);
3291
3292	/* Free RX buffers. */
3293	bnx_free_rx_chain(sc);
3294
3295	/* Free TX buffers. */
3296	bnx_free_tx_chain(sc);
3297
3298	/*
3299	 * Isolate/power down the PHY, but leave the media selection
3300	 * unchanged so that things will be put back to normal when
3301	 * we bring the interface back up.
3302	 */
3303	mii = &sc->bnx_mii;
3304	itmp = ifp->if_flags;
3305	ifp->if_flags |= IFF_UP;
3306	ifm = mii->mii_media.ifm_cur;
3307	mtmp = ifm->ifm_media;
3308	ifm->ifm_media = IFM_ETHER|IFM_NONE;
3309	mii_mediachg(mii);
3310	ifm->ifm_media = mtmp;
3311	ifp->if_flags = itmp;
3312
3313	ifp->if_timer = 0;
3314
3315	sc->bnx_link = 0;
3316
3317	DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
3318
3319	bnx_mgmt_init(sc);
3320}
3321
3322int
3323bnx_reset(struct bnx_softc *sc, u_int32_t reset_code)
3324{
3325	struct pci_attach_args	*pa = &(sc->bnx_pa);
3326	u_int32_t		val;
3327	int			i, rc = 0;
3328
3329	DBPRINT(sc, BNX_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
3330
3331	/* Wait for pending PCI transactions to complete. */
3332	REG_WR(sc, BNX_MISC_ENABLE_CLR_BITS,
3333	    BNX_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
3334	    BNX_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
3335	    BNX_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
3336	    BNX_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
3337	val = REG_RD(sc, BNX_MISC_ENABLE_CLR_BITS);
3338	DELAY(5);
3339
3340	/* Disable DMA */
3341	if (BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5709) {
3342		val = REG_RD(sc, BNX_MISC_NEW_CORE_CTL);
3343		val &= ~BNX_MISC_NEW_CORE_CTL_DMA_ENABLE;
3344		REG_WR(sc, BNX_MISC_NEW_CORE_CTL, val);
3345	}
3346
3347	/* Assume bootcode is running. */
3348	sc->bnx_fw_timed_out = 0;
3349
3350	/* Give the firmware a chance to prepare for the reset. */
3351	rc = bnx_fw_sync(sc, BNX_DRV_MSG_DATA_WAIT0 | reset_code);
3352	if (rc)
3353		goto bnx_reset_exit;
3354
3355	/* Set a firmware reminder that this is a soft reset. */
3356	REG_WR_IND(sc, sc->bnx_shmem_base + BNX_DRV_RESET_SIGNATURE,
3357	    BNX_DRV_RESET_SIGNATURE_MAGIC);
3358
3359	/* Dummy read to force the chip to complete all current transactions. */
3360	val = REG_RD(sc, BNX_MISC_ID);
3361
3362	/* Chip reset. */
3363	if (BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5709) {
3364		REG_WR(sc, BNX_MISC_COMMAND, BNX_MISC_COMMAND_SW_RESET);
3365		REG_RD(sc, BNX_MISC_COMMAND);
3366		DELAY(5);
3367
3368		val = BNX_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
3369		      BNX_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
3370
3371		pci_conf_write(pa->pa_pc, pa->pa_tag, BNX_PCICFG_MISC_CONFIG,
3372		    val);
3373	} else {
3374		val = BNX_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3375			BNX_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
3376			BNX_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
3377		REG_WR(sc, BNX_PCICFG_MISC_CONFIG, val);
3378
3379		/* Allow up to 30us for reset to complete. */
3380		for (i = 0; i < 10; i++) {
3381			val = REG_RD(sc, BNX_PCICFG_MISC_CONFIG);
3382			if ((val & (BNX_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3383				BNX_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0) {
3384				break;
3385			}
3386			DELAY(10);
3387		}
3388
3389		/* Check that reset completed successfully. */
3390		if (val & (BNX_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3391		    BNX_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
3392			BNX_PRINTF(sc, "%s(%d): Reset failed!\n",
3393			    __FILE__, __LINE__);
3394			rc = EBUSY;
3395			goto bnx_reset_exit;
3396		}
3397	}
3398
3399	/* Make sure byte swapping is properly configured. */
3400	val = REG_RD(sc, BNX_PCI_SWAP_DIAG0);
3401	if (val != 0x01020304) {
3402		BNX_PRINTF(sc, "%s(%d): Byte swap is incorrect!\n",
3403		    __FILE__, __LINE__);
3404		rc = ENODEV;
3405		goto bnx_reset_exit;
3406	}
3407
3408	/* Just completed a reset, assume that firmware is running again. */
3409	sc->bnx_fw_timed_out = 0;
3410
3411	/* Wait for the firmware to finish its initialization. */
3412	rc = bnx_fw_sync(sc, BNX_DRV_MSG_DATA_WAIT1 | reset_code);
3413	if (rc)
3414		BNX_PRINTF(sc, "%s(%d): Firmware did not complete "
3415		    "initialization!\n", __FILE__, __LINE__);
3416
3417bnx_reset_exit:
3418	DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
3419
3420	return (rc);
3421}
3422
3423int
3424bnx_chipinit(struct bnx_softc *sc)
3425{
3426	struct pci_attach_args	*pa = &(sc->bnx_pa);
3427	u_int32_t		val;
3428	int			rc = 0;
3429
3430	DBPRINT(sc, BNX_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
3431
3432	/* Make sure the interrupt is not active. */
3433	REG_WR(sc, BNX_PCICFG_INT_ACK_CMD, BNX_PCICFG_INT_ACK_CMD_MASK_INT);
3434
3435	/* Initialize DMA byte/word swapping, configure the number of DMA  */
3436	/* channels and PCI clock compensation delay.                      */
3437	val = BNX_DMA_CONFIG_DATA_BYTE_SWAP |
3438	    BNX_DMA_CONFIG_DATA_WORD_SWAP |
3439#if BYTE_ORDER == BIG_ENDIAN
3440	    BNX_DMA_CONFIG_CNTL_BYTE_SWAP |
3441#endif
3442	    BNX_DMA_CONFIG_CNTL_WORD_SWAP |
3443	    DMA_READ_CHANS << 12 |
3444	    DMA_WRITE_CHANS << 16;
3445
3446	val |= (0x2 << 20) | BNX_DMA_CONFIG_CNTL_PCI_COMP_DLY;
3447
3448	if ((sc->bnx_flags & BNX_PCIX_FLAG) && (sc->bus_speed_mhz == 133))
3449		val |= BNX_DMA_CONFIG_PCI_FAST_CLK_CMP;
3450
3451	/*
3452	 * This setting resolves a problem observed on certain Intel PCI
3453	 * chipsets that cannot handle multiple outstanding DMA operations.
3454	 * See errata E9_5706A1_65.
3455	 */
3456	if ((BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5706) &&
3457	    (BNX_CHIP_ID(sc) != BNX_CHIP_ID_5706_A0) &&
3458	    !(sc->bnx_flags & BNX_PCIX_FLAG))
3459		val |= BNX_DMA_CONFIG_CNTL_PING_PONG_DMA;
3460
3461	REG_WR(sc, BNX_DMA_CONFIG, val);
3462
3463#if 1
3464	/* Clear the PCI-X relaxed ordering bit. See errata E3_5708CA0_570. */
3465	if (sc->bnx_flags & BNX_PCIX_FLAG) {
3466		val = pci_conf_read(pa->pa_pc, pa->pa_tag, BNX_PCI_PCIX_CMD);
3467		pci_conf_write(pa->pa_pc, pa->pa_tag, BNX_PCI_PCIX_CMD,
3468		    val & ~0x20000);
3469	}
3470#endif
3471
3472	/* Enable the RX_V2P and Context state machines before access. */
3473	REG_WR(sc, BNX_MISC_ENABLE_SET_BITS,
3474	    BNX_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
3475	    BNX_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
3476	    BNX_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
3477
3478	/* Initialize context mapping and zero out the quick contexts. */
3479	bnx_init_context(sc);
3480
3481	/* Initialize the on-boards CPUs */
3482	bnx_init_cpus(sc);
3483
3484	/* Prepare NVRAM for access. */
3485	if (bnx_init_nvram(sc)) {
3486		rc = ENODEV;
3487		goto bnx_chipinit_exit;
3488	}
3489
3490	/* Set the kernel bypass block size */
3491	val = REG_RD(sc, BNX_MQ_CONFIG);
3492	val &= ~BNX_MQ_CONFIG_KNL_BYP_BLK_SIZE;
3493	val |= BNX_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
3494
3495	/* Enable bins used on the 5709. */
3496	if (BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5709) {
3497		val |= BNX_MQ_CONFIG_BIN_MQ_MODE;
3498		if (BNX_CHIP_ID(sc) == BNX_CHIP_ID_5709_A1)
3499			val |= BNX_MQ_CONFIG_HALT_DIS;
3500	}
3501
3502	REG_WR(sc, BNX_MQ_CONFIG, val);
3503
3504	val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
3505	REG_WR(sc, BNX_MQ_KNL_BYP_WIND_START, val);
3506	REG_WR(sc, BNX_MQ_KNL_WIND_END, val);
3507
3508	val = (BCM_PAGE_BITS - 8) << 24;
3509	REG_WR(sc, BNX_RV2P_CONFIG, val);
3510
3511	/* Configure page size. */
3512	val = REG_RD(sc, BNX_TBDR_CONFIG);
3513	val &= ~BNX_TBDR_CONFIG_PAGE_SIZE;
3514	val |= (BCM_PAGE_BITS - 8) << 24 | 0x40;
3515	REG_WR(sc, BNX_TBDR_CONFIG, val);
3516
3517#if 0
3518	/* Set the perfect match control register to default. */
3519	REG_WR_IND(sc, BNX_RXP_PM_CTRL, 0);
3520#endif
3521
3522bnx_chipinit_exit:
3523	DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
3524
3525	return(rc);
3526}
3527
3528/****************************************************************************/
3529/* Initialize the controller in preparation to send/receive traffic.        */
3530/*                                                                          */
3531/* Returns:                                                                 */
3532/*   0 for success, positive value for failure.                             */
3533/****************************************************************************/
3534int
3535bnx_blockinit(struct bnx_softc *sc)
3536{
3537	u_int32_t		reg, val;
3538	int 			rc = 0;
3539
3540	DBPRINT(sc, BNX_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
3541
3542	/* Load the hardware default MAC address. */
3543	bnx_set_mac_addr(sc);
3544
3545	/* Set the Ethernet backoff seed value */
3546	val = sc->eaddr[0] + (sc->eaddr[1] << 8) + (sc->eaddr[2] << 16) +
3547	    (sc->eaddr[3]) + (sc->eaddr[4] << 8) + (sc->eaddr[5] << 16);
3548	REG_WR(sc, BNX_EMAC_BACKOFF_SEED, val);
3549
3550	sc->last_status_idx = 0;
3551	sc->rx_mode = BNX_EMAC_RX_MODE_SORT_MODE;
3552
3553	/* Set up link change interrupt generation. */
3554	REG_WR(sc, BNX_EMAC_ATTENTION_ENA, BNX_EMAC_ATTENTION_ENA_LINK);
3555	REG_WR(sc, BNX_HC_ATTN_BITS_ENABLE, STATUS_ATTN_BITS_LINK_STATE);
3556
3557	/* Program the physical address of the status block. */
3558	REG_WR(sc, BNX_HC_STATUS_ADDR_L, (u_int32_t)(sc->status_block_paddr));
3559	REG_WR(sc, BNX_HC_STATUS_ADDR_H,
3560	    (u_int32_t)((u_int64_t)sc->status_block_paddr >> 32));
3561
3562	/* Program the physical address of the statistics block. */
3563	REG_WR(sc, BNX_HC_STATISTICS_ADDR_L,
3564	    (u_int32_t)(sc->stats_block_paddr));
3565	REG_WR(sc, BNX_HC_STATISTICS_ADDR_H,
3566	    (u_int32_t)((u_int64_t)sc->stats_block_paddr >> 32));
3567
3568	/* Program various host coalescing parameters. */
3569	REG_WR(sc, BNX_HC_TX_QUICK_CONS_TRIP, (sc->bnx_tx_quick_cons_trip_int
3570	    << 16) | sc->bnx_tx_quick_cons_trip);
3571	REG_WR(sc, BNX_HC_RX_QUICK_CONS_TRIP, (sc->bnx_rx_quick_cons_trip_int
3572	    << 16) | sc->bnx_rx_quick_cons_trip);
3573	REG_WR(sc, BNX_HC_COMP_PROD_TRIP, (sc->bnx_comp_prod_trip_int << 16) |
3574	    sc->bnx_comp_prod_trip);
3575	REG_WR(sc, BNX_HC_TX_TICKS, (sc->bnx_tx_ticks_int << 16) |
3576	    sc->bnx_tx_ticks);
3577	REG_WR(sc, BNX_HC_RX_TICKS, (sc->bnx_rx_ticks_int << 16) |
3578	    sc->bnx_rx_ticks);
3579	REG_WR(sc, BNX_HC_COM_TICKS, (sc->bnx_com_ticks_int << 16) |
3580	    sc->bnx_com_ticks);
3581	REG_WR(sc, BNX_HC_CMD_TICKS, (sc->bnx_cmd_ticks_int << 16) |
3582	    sc->bnx_cmd_ticks);
3583	REG_WR(sc, BNX_HC_STATS_TICKS, (sc->bnx_stats_ticks & 0xffff00));
3584	REG_WR(sc, BNX_HC_STAT_COLLECT_TICKS, 0xbb8);  /* 3ms */
3585	REG_WR(sc, BNX_HC_CONFIG,
3586	    (BNX_HC_CONFIG_RX_TMR_MODE | BNX_HC_CONFIG_TX_TMR_MODE |
3587	    BNX_HC_CONFIG_COLLECT_STATS));
3588
3589	/* Clear the internal statistics counters. */
3590	REG_WR(sc, BNX_HC_COMMAND, BNX_HC_COMMAND_CLR_STAT_NOW);
3591
3592	/* Verify that bootcode is running. */
3593	reg = REG_RD_IND(sc, sc->bnx_shmem_base + BNX_DEV_INFO_SIGNATURE);
3594
3595	DBRUNIF(DB_RANDOMTRUE(bnx_debug_bootcode_running_failure),
3596	    BNX_PRINTF(sc, "%s(%d): Simulating bootcode failure.\n",
3597	    __FILE__, __LINE__); reg = 0);
3598
3599	if ((reg & BNX_DEV_INFO_SIGNATURE_MAGIC_MASK) !=
3600	    BNX_DEV_INFO_SIGNATURE_MAGIC) {
3601		BNX_PRINTF(sc, "%s(%d): Bootcode not running! Found: 0x%08X, "
3602		    "Expected: 08%08X\n", __FILE__, __LINE__,
3603		    (reg & BNX_DEV_INFO_SIGNATURE_MAGIC_MASK),
3604		    BNX_DEV_INFO_SIGNATURE_MAGIC);
3605		rc = ENODEV;
3606		goto bnx_blockinit_exit;
3607	}
3608
3609	/* Check if any management firmware is running. */
3610	reg = REG_RD_IND(sc, sc->bnx_shmem_base + BNX_PORT_FEATURE);
3611	if (reg & (BNX_PORT_FEATURE_ASF_ENABLED |
3612	    BNX_PORT_FEATURE_IMD_ENABLED)) {
3613		DBPRINT(sc, BNX_INFO, "Management F/W Enabled.\n");
3614		sc->bnx_flags |= BNX_MFW_ENABLE_FLAG;
3615	}
3616
3617	sc->bnx_fw_ver = REG_RD_IND(sc, sc->bnx_shmem_base +
3618	    BNX_DEV_INFO_BC_REV);
3619
3620	DBPRINT(sc, BNX_INFO, "bootcode rev = 0x%08X\n", sc->bnx_fw_ver);
3621
3622	/* Enable DMA */
3623	if (BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5709) {
3624		val = REG_RD(sc, BNX_MISC_NEW_CORE_CTL);
3625		val |= BNX_MISC_NEW_CORE_CTL_DMA_ENABLE;
3626		REG_WR(sc, BNX_MISC_NEW_CORE_CTL, val);
3627	}
3628
3629	/* Allow bootcode to apply any additional fixes before enabling MAC. */
3630	rc = bnx_fw_sync(sc, BNX_DRV_MSG_DATA_WAIT2 | BNX_DRV_MSG_CODE_RESET);
3631
3632	/* Enable link state change interrupt generation. */
3633	if (BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5709) {
3634		REG_WR(sc, BNX_MISC_ENABLE_SET_BITS,
3635		    BNX_MISC_ENABLE_DEFAULT_XI);
3636	} else
3637		REG_WR(sc, BNX_MISC_ENABLE_SET_BITS, BNX_MISC_ENABLE_DEFAULT);
3638
3639	/* Enable all remaining blocks in the MAC. */
3640	REG_WR(sc, BNX_MISC_ENABLE_SET_BITS, 0x5ffffff);
3641	REG_RD(sc, BNX_MISC_ENABLE_SET_BITS);
3642	DELAY(20);
3643
3644bnx_blockinit_exit:
3645	DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
3646
3647	return (rc);
3648}
3649
3650/****************************************************************************/
3651/* Encapsulate an mbuf cluster into the rx_bd chain.                        */
3652/*                                                                          */
3653/* The NetXtreme II can support Jumbo frames by using multiple rx_bd's.     */
3654/* This routine will map an mbuf cluster into 1 or more rx_bd's as          */
3655/* necessary.                                                               */
3656/*                                                                          */
3657/* Returns:                                                                 */
3658/*   0 for success, positive value for failure.                             */
3659/****************************************************************************/
3660int
3661bnx_get_buf(struct bnx_softc *sc, u_int16_t *prod,
3662    u_int16_t *chain_prod, u_int32_t *prod_bseq)
3663{
3664	bus_dmamap_t		map;
3665	struct mbuf 		*m;
3666	struct rx_bd		*rxbd;
3667	int			i;
3668	u_int32_t		addr;
3669#ifdef BNX_DEBUG
3670	u_int16_t		debug_chain_prod = *chain_prod;
3671#endif
3672	u_int16_t		first_chain_prod;
3673
3674	DBPRINT(sc, (BNX_VERBOSE_RESET | BNX_VERBOSE_RECV), "Entering %s()\n",
3675	    __FUNCTION__);
3676
3677	/* Make sure the inputs are valid. */
3678	DBRUNIF((*chain_prod > MAX_RX_BD),
3679	    printf("%s: RX producer out of range: 0x%04X > 0x%04X\n",
3680	    *chain_prod, (u_int16_t) MAX_RX_BD));
3681
3682	DBPRINT(sc, BNX_VERBOSE_RECV, "%s(enter): prod = 0x%04X, chain_prod = "
3683	    "0x%04X, prod_bseq = 0x%08X\n", __FUNCTION__, *prod, *chain_prod,
3684	    *prod_bseq);
3685
3686	/* This is a new mbuf allocation. */
3687	m = MCLGETI(NULL, M_DONTWAIT, &sc->arpcom.ac_if, MCLBYTES);
3688	if (!m)
3689		return (ENOBUFS);
3690	m->m_len = m->m_pkthdr.len = MCLBYTES;
3691	/* the chip aligns the ip header for us, no need to m_adj */
3692
3693	/* Map the mbuf cluster into device memory. */
3694	map = sc->rx_mbuf_map[*chain_prod];
3695	if (bus_dmamap_load_mbuf(sc->bnx_dmatag, map, m, BUS_DMA_NOWAIT)) {
3696		m_freem(m);
3697		return (ENOBUFS);
3698	}
3699	first_chain_prod = *chain_prod;
3700
3701	/* Make sure there is room in the receive chain. */
3702	if (map->dm_nsegs > sc->free_rx_bd) {
3703		bus_dmamap_unload(sc->bnx_dmatag, map);
3704		m_freem(m);
3705		return (EFBIG);
3706	}
3707
3708#ifdef BNX_DEBUG
3709	/* Track the distribution of buffer segments. */
3710	sc->rx_mbuf_segs[map->dm_nsegs]++;
3711#endif
3712
3713	/* Update some debug statistics counters */
3714	DBRUNIF((sc->free_rx_bd < sc->rx_low_watermark),
3715	    sc->rx_low_watermark = sc->free_rx_bd);
3716	DBRUNIF((sc->free_rx_bd == sc->max_rx_bd), sc->rx_empty_count++);
3717
3718	/* Setup the rx_bd for the first segment. */
3719	rxbd = &sc->rx_bd_chain[RX_PAGE(*chain_prod)][RX_IDX(*chain_prod)];
3720
3721	addr = (u_int32_t)map->dm_segs[0].ds_addr;
3722	rxbd->rx_bd_haddr_lo = addr;
3723	addr = (u_int32_t)((u_int64_t)map->dm_segs[0].ds_addr >> 32);
3724	rxbd->rx_bd_haddr_hi = addr;
3725	rxbd->rx_bd_len = map->dm_segs[0].ds_len;
3726	rxbd->rx_bd_flags = RX_BD_FLAGS_START;
3727	*prod_bseq += map->dm_segs[0].ds_len;
3728
3729	for (i = 1; i < map->dm_nsegs; i++) {
3730		*prod = NEXT_RX_BD(*prod);
3731		*chain_prod = RX_CHAIN_IDX(*prod);
3732
3733		rxbd =
3734		    &sc->rx_bd_chain[RX_PAGE(*chain_prod)][RX_IDX(*chain_prod)];
3735
3736		addr = (u_int32_t)map->dm_segs[i].ds_addr;
3737		rxbd->rx_bd_haddr_lo = addr;
3738		addr = (u_int32_t)((u_int64_t)map->dm_segs[i].ds_addr >> 32);
3739		rxbd->rx_bd_haddr_hi = addr;
3740		rxbd->rx_bd_len = map->dm_segs[i].ds_len;
3741		rxbd->rx_bd_flags = 0;
3742		*prod_bseq += map->dm_segs[i].ds_len;
3743	}
3744
3745	rxbd->rx_bd_flags |= RX_BD_FLAGS_END;
3746
3747	/*
3748	 * Save the mbuf, adjust the map pointer (swap map for first and
3749	 * last rx_bd entry so that rx_mbuf_ptr and rx_mbuf_map matches)
3750	 * and update our counter.
3751	 */
3752	sc->rx_mbuf_ptr[*chain_prod] = m;
3753	sc->rx_mbuf_map[first_chain_prod] = sc->rx_mbuf_map[*chain_prod];
3754	sc->rx_mbuf_map[*chain_prod] = map;
3755	sc->free_rx_bd -= map->dm_nsegs;
3756
3757	DBRUN(BNX_VERBOSE_RECV, bnx_dump_rx_mbuf_chain(sc, debug_chain_prod,
3758	    map->dm_nsegs));
3759
3760	return (0);
3761}
3762
3763void
3764bnx_alloc_pkts(void *xsc, void *arg)
3765{
3766	struct bnx_softc *sc = xsc;
3767	struct ifnet *ifp = &sc->arpcom.ac_if;
3768	struct bnx_pkt *pkt;
3769	int i;
3770	int s;
3771
3772	for (i = 0; i < 4; i++) { /* magic! */
3773		pkt = pool_get(bnx_tx_pool, PR_WAITOK);
3774		if (pkt == NULL)
3775			break;
3776
3777		if (bus_dmamap_create(sc->bnx_dmatag,
3778		    MCLBYTES * BNX_MAX_SEGMENTS, USABLE_TX_BD,
3779		    MCLBYTES, 0, BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW,
3780		    &pkt->pkt_dmamap) != 0)
3781			goto put;
3782
3783		if (!ISSET(ifp->if_flags, IFF_UP))
3784			goto stopping;
3785
3786		mtx_enter(&sc->tx_pkt_mtx);
3787		TAILQ_INSERT_TAIL(&sc->tx_free_pkts, pkt, pkt_entry);
3788		sc->tx_pkt_count++;
3789		mtx_leave(&sc->tx_pkt_mtx);
3790	}
3791
3792	mtx_enter(&sc->tx_pkt_mtx);
3793	CLR(sc->bnx_flags, BNX_ALLOC_PKTS_FLAG);
3794	mtx_leave(&sc->tx_pkt_mtx);
3795
3796	s = splnet();
3797	if (!IFQ_IS_EMPTY(&ifp->if_snd))
3798		bnx_start(ifp);
3799	splx(s);
3800
3801	return;
3802
3803stopping:
3804	bus_dmamap_destroy(sc->bnx_dmatag, pkt->pkt_dmamap);
3805put:
3806	pool_put(bnx_tx_pool, pkt);
3807}
3808
3809/****************************************************************************/
3810/* Initialize the TX context memory.                                        */
3811/*                                                                          */
3812/* Returns:                                                                 */
3813/*   Nothing                                                                */
3814/****************************************************************************/
3815void
3816bnx_init_tx_context(struct bnx_softc *sc)
3817{
3818	u_int32_t val;
3819
3820	/* Initialize the context ID for an L2 TX chain. */
3821	if (BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5709) {
3822		/* Set the CID type to support an L2 connection. */
3823		val = BNX_L2CTX_TYPE_TYPE_L2 | BNX_L2CTX_TYPE_SIZE_L2;
3824		CTX_WR(sc, GET_CID_ADDR(TX_CID), BNX_L2CTX_TYPE_XI, val);
3825		val = BNX_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
3826		CTX_WR(sc, GET_CID_ADDR(TX_CID), BNX_L2CTX_CMD_TYPE_XI, val);
3827
3828		/* Point the hardware to the first page in the chain. */
3829		val = (u_int32_t)((u_int64_t)sc->tx_bd_chain_paddr[0] >> 32);
3830		CTX_WR(sc, GET_CID_ADDR(TX_CID),
3831		    BNX_L2CTX_TBDR_BHADDR_HI_XI, val);
3832		val = (u_int32_t)(sc->tx_bd_chain_paddr[0]);
3833		CTX_WR(sc, GET_CID_ADDR(TX_CID),
3834		    BNX_L2CTX_TBDR_BHADDR_LO_XI, val);
3835	} else {
3836		/* Set the CID type to support an L2 connection. */
3837		val = BNX_L2CTX_TYPE_TYPE_L2 | BNX_L2CTX_TYPE_SIZE_L2;
3838		CTX_WR(sc, GET_CID_ADDR(TX_CID), BNX_L2CTX_TYPE, val);
3839		val = BNX_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
3840		CTX_WR(sc, GET_CID_ADDR(TX_CID), BNX_L2CTX_CMD_TYPE, val);
3841
3842		/* Point the hardware to the first page in the chain. */
3843		val = (u_int32_t)((u_int64_t)sc->tx_bd_chain_paddr[0] >> 32);
3844		CTX_WR(sc, GET_CID_ADDR(TX_CID), BNX_L2CTX_TBDR_BHADDR_HI, val);
3845		val = (u_int32_t)(sc->tx_bd_chain_paddr[0]);
3846		CTX_WR(sc, GET_CID_ADDR(TX_CID), BNX_L2CTX_TBDR_BHADDR_LO, val);
3847	}
3848}
3849
3850/****************************************************************************/
3851/* Allocate memory and initialize the TX data structures.                   */
3852/*                                                                          */
3853/* Returns:                                                                 */
3854/*   0 for success, positive value for failure.                             */
3855/****************************************************************************/
3856int
3857bnx_init_tx_chain(struct bnx_softc *sc)
3858{
3859	struct tx_bd		*txbd;
3860	u_int32_t		addr;
3861	int			i, rc = 0;
3862
3863	DBPRINT(sc, BNX_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
3864
3865	/* Force an allocation of some dmamaps for tx up front */
3866	bnx_alloc_pkts(sc, NULL);
3867
3868	/* Set the initial TX producer/consumer indices. */
3869	sc->tx_prod = 0;
3870	sc->tx_cons = 0;
3871	sc->tx_prod_bseq = 0;
3872	sc->used_tx_bd = 0;
3873	sc->max_tx_bd =	USABLE_TX_BD;
3874	DBRUNIF(1, sc->tx_hi_watermark = USABLE_TX_BD);
3875	DBRUNIF(1, sc->tx_full_count = 0);
3876
3877	/*
3878	 * The NetXtreme II supports a linked-list structure called
3879	 * a Buffer Descriptor Chain (or BD chain).  A BD chain
3880	 * consists of a series of 1 or more chain pages, each of which
3881	 * consists of a fixed number of BD entries.
3882	 * The last BD entry on each page is a pointer to the next page
3883	 * in the chain, and the last pointer in the BD chain
3884	 * points back to the beginning of the chain.
3885	 */
3886
3887	/* Set the TX next pointer chain entries. */
3888	for (i = 0; i < TX_PAGES; i++) {
3889		int j;
3890
3891		txbd = &sc->tx_bd_chain[i][USABLE_TX_BD_PER_PAGE];
3892
3893		/* Check if we've reached the last page. */
3894		if (i == (TX_PAGES - 1))
3895			j = 0;
3896		else
3897			j = i + 1;
3898
3899		addr = (u_int32_t)sc->tx_bd_chain_paddr[j];
3900		txbd->tx_bd_haddr_lo = addr;
3901		addr = (u_int32_t)((u_int64_t)sc->tx_bd_chain_paddr[j] >> 32);
3902		txbd->tx_bd_haddr_hi = addr;
3903	}
3904
3905	/*
3906	 * Initialize the context ID for an L2 TX chain.
3907	 */
3908	bnx_init_tx_context(sc);
3909
3910	DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
3911
3912	return(rc);
3913}
3914
3915/****************************************************************************/
3916/* Free memory and clear the TX data structures.                            */
3917/*                                                                          */
3918/* Returns:                                                                 */
3919/*   Nothing.                                                               */
3920/****************************************************************************/
3921void
3922bnx_free_tx_chain(struct bnx_softc *sc)
3923{
3924	struct bnx_pkt		*pkt;
3925	int			i;
3926
3927	DBPRINT(sc, BNX_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
3928
3929	/* Unmap, unload, and free any mbufs still in the TX mbuf chain. */
3930	mtx_enter(&sc->tx_pkt_mtx);
3931	while ((pkt = TAILQ_FIRST(&sc->tx_used_pkts)) != NULL) {
3932		TAILQ_REMOVE(&sc->tx_used_pkts, pkt, pkt_entry);
3933		mtx_leave(&sc->tx_pkt_mtx);
3934
3935		bus_dmamap_sync(sc->bnx_dmatag, pkt->pkt_dmamap, 0,
3936		    pkt->pkt_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
3937		bus_dmamap_unload(sc->bnx_dmatag, pkt->pkt_dmamap);
3938
3939		m_freem(pkt->pkt_mbuf);
3940
3941		mtx_enter(&sc->tx_pkt_mtx);
3942		TAILQ_INSERT_TAIL(&sc->tx_free_pkts, pkt, pkt_entry);
3943	}
3944
3945	/* Destroy all the dmamaps we allocated for TX */
3946	while ((pkt = TAILQ_FIRST(&sc->tx_free_pkts)) != NULL) {
3947		TAILQ_REMOVE(&sc->tx_free_pkts, pkt, pkt_entry);
3948		sc->tx_pkt_count--;
3949		mtx_leave(&sc->tx_pkt_mtx);
3950
3951		bus_dmamap_destroy(sc->bnx_dmatag, pkt->pkt_dmamap);
3952		pool_put(bnx_tx_pool, pkt);
3953
3954		mtx_enter(&sc->tx_pkt_mtx);
3955	}
3956	mtx_leave(&sc->tx_pkt_mtx);
3957
3958	/* Clear each TX chain page. */
3959	for (i = 0; i < TX_PAGES; i++)
3960		bzero(sc->tx_bd_chain[i], BNX_TX_CHAIN_PAGE_SZ);
3961
3962	sc->used_tx_bd = 0;
3963
3964	/* Check if we lost any mbufs in the process. */
3965	DBRUNIF((sc->tx_mbuf_alloc),
3966	    printf("%s: Memory leak! Lost %d mbufs from tx chain!\n",
3967	    sc->tx_mbuf_alloc));
3968
3969	DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
3970}
3971
3972/****************************************************************************/
3973/* Initialize the RX context memory.                                        */
3974/*                                                                          */
3975/* Returns:                                                                 */
3976/*   Nothing                                                                */
3977/****************************************************************************/
3978void
3979bnx_init_rx_context(struct bnx_softc *sc)
3980{
3981	u_int32_t val;
3982
3983	/* Initialize the context ID for an L2 RX chain. */
3984	val = BNX_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE |
3985		BNX_L2CTX_CTX_TYPE_SIZE_L2 | (0x02 << 8);
3986
3987	/*
3988	 * Set the level for generating pause frames
3989	 * when the number of available rx_bd's gets
3990	 * too low (the low watermark) and the level
3991	 * when pause frames can be stopped (the high
3992	 * watermark).
3993	 */
3994	if (BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5709) {
3995		u_int32_t lo_water, hi_water;
3996
3997		lo_water = BNX_L2CTX_RX_LO_WATER_MARK_DEFAULT;
3998		hi_water = USABLE_RX_BD / 4;
3999
4000		lo_water /= BNX_L2CTX_RX_LO_WATER_MARK_SCALE;
4001		hi_water /= BNX_L2CTX_RX_HI_WATER_MARK_SCALE;
4002
4003		if (hi_water > 0xf)
4004			hi_water = 0xf;
4005		else if (hi_water == 0)
4006			lo_water = 0;
4007
4008		val |= (lo_water << BNX_L2CTX_RX_LO_WATER_MARK_SHIFT) |
4009		    (hi_water << BNX_L2CTX_RX_HI_WATER_MARK_SHIFT);
4010	}
4011
4012 	CTX_WR(sc, GET_CID_ADDR(RX_CID), BNX_L2CTX_CTX_TYPE, val);
4013
4014	/* Setup the MQ BIN mapping for l2_ctx_host_bseq. */
4015	if (BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5709) {
4016		val = REG_RD(sc, BNX_MQ_MAP_L2_5);
4017		REG_WR(sc, BNX_MQ_MAP_L2_5, val | BNX_MQ_MAP_L2_5_ARM);
4018	}
4019
4020	/* Point the hardware to the first page in the chain. */
4021	val = (u_int32_t)((u_int64_t)sc->rx_bd_chain_paddr[0] >> 32);
4022	CTX_WR(sc, GET_CID_ADDR(RX_CID), BNX_L2CTX_NX_BDHADDR_HI, val);
4023	val = (u_int32_t)(sc->rx_bd_chain_paddr[0]);
4024	CTX_WR(sc, GET_CID_ADDR(RX_CID), BNX_L2CTX_NX_BDHADDR_LO, val);
4025}
4026
4027/****************************************************************************/
4028/* Add mbufs to the RX chain until its full or an mbuf allocation error     */
4029/* occurs.                                                                  */
4030/*                                                                          */
4031/* Returns:                                                                 */
4032/*   Nothing                                                                */
4033/****************************************************************************/
4034void
4035bnx_fill_rx_chain(struct bnx_softc *sc)
4036{
4037	u_int16_t		prod, chain_prod;
4038	u_int32_t		prod_bseq;
4039#ifdef BNX_DEBUG
4040	int rx_mbuf_alloc_before, free_rx_bd_before;
4041#endif
4042
4043	DBPRINT(sc, BNX_EXCESSIVE_RECV, "Entering %s()\n", __FUNCTION__);
4044
4045	prod = sc->rx_prod;
4046	prod_bseq = sc->rx_prod_bseq;
4047
4048#ifdef BNX_DEBUG
4049	rx_mbuf_alloc_before = sc->rx_mbuf_alloc;
4050	free_rx_bd_before = sc->free_rx_bd;
4051#endif
4052
4053	/* Keep filling the RX chain until it's full. */
4054	while (sc->free_rx_bd > 0) {
4055		chain_prod = RX_CHAIN_IDX(prod);
4056		if (bnx_get_buf(sc, &prod, &chain_prod, &prod_bseq)) {
4057			/* Bail out if we can't add an mbuf to the chain. */
4058			break;
4059		}
4060		prod = NEXT_RX_BD(prod);
4061	}
4062
4063#if 0
4064	DBRUNIF((sc->rx_mbuf_alloc - rx_mbuf_alloc_before),
4065		BNX_PRINTF(sc, "%s(): Installed %d mbufs in %d rx_bd entries.\n",
4066		__FUNCTION__, (sc->rx_mbuf_alloc - rx_mbuf_alloc_before),
4067		(free_rx_bd_before - sc->free_rx_bd)));
4068#endif
4069
4070	/* Save the RX chain producer index. */
4071	sc->rx_prod = prod;
4072	sc->rx_prod_bseq = prod_bseq;
4073
4074	/* Tell the chip about the waiting rx_bd's. */
4075	REG_WR16(sc, MB_RX_CID_ADDR + BNX_L2CTX_HOST_BDIDX, sc->rx_prod);
4076	REG_WR(sc, MB_RX_CID_ADDR + BNX_L2CTX_HOST_BSEQ, sc->rx_prod_bseq);
4077
4078	DBPRINT(sc, BNX_EXCESSIVE_RECV, "Exiting %s()\n", __FUNCTION__);
4079}
4080
4081/****************************************************************************/
4082/* Allocate memory and initialize the RX data structures.                   */
4083/*                                                                          */
4084/* Returns:                                                                 */
4085/*   0 for success, positive value for failure.                             */
4086/****************************************************************************/
4087int
4088bnx_init_rx_chain(struct bnx_softc *sc)
4089{
4090	struct rx_bd		*rxbd;
4091	int			i, rc = 0;
4092	u_int32_t		addr;
4093
4094	DBPRINT(sc, BNX_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
4095
4096	/* Initialize the RX producer and consumer indices. */
4097	sc->rx_prod = 0;
4098	sc->rx_cons = 0;
4099	sc->rx_prod_bseq = 0;
4100	sc->free_rx_bd = USABLE_RX_BD;
4101	sc->max_rx_bd = USABLE_RX_BD;
4102	DBRUNIF(1, sc->rx_low_watermark = USABLE_RX_BD);
4103	DBRUNIF(1, sc->rx_empty_count = 0);
4104
4105	/* Initialize the RX next pointer chain entries. */
4106	for (i = 0; i < RX_PAGES; i++) {
4107		int j;
4108
4109		rxbd = &sc->rx_bd_chain[i][USABLE_RX_BD_PER_PAGE];
4110
4111		/* Check if we've reached the last page. */
4112		if (i == (RX_PAGES - 1))
4113			j = 0;
4114		else
4115			j = i + 1;
4116
4117		/* Setup the chain page pointers. */
4118		addr = (u_int32_t)((u_int64_t)sc->rx_bd_chain_paddr[j] >> 32);
4119		rxbd->rx_bd_haddr_hi = addr;
4120		addr = (u_int32_t)sc->rx_bd_chain_paddr[j];
4121		rxbd->rx_bd_haddr_lo = addr;
4122	}
4123
4124	/* Fill up the RX chain. */
4125	bnx_fill_rx_chain(sc);
4126
4127	for (i = 0; i < RX_PAGES; i++)
4128		bus_dmamap_sync(sc->bnx_dmatag, sc->rx_bd_chain_map[i], 0,
4129		    sc->rx_bd_chain_map[i]->dm_mapsize,
4130		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
4131
4132	bnx_init_rx_context(sc);
4133
4134	DBRUN(BNX_VERBOSE_RECV, bnx_dump_rx_chain(sc, 0, TOTAL_RX_BD));
4135
4136	DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
4137
4138	return(rc);
4139}
4140
4141/****************************************************************************/
4142/* Free memory and clear the RX data structures.                            */
4143/*                                                                          */
4144/* Returns:                                                                 */
4145/*   Nothing.                                                               */
4146/****************************************************************************/
4147void
4148bnx_free_rx_chain(struct bnx_softc *sc)
4149{
4150	int			i;
4151#ifdef BNX_DEBUG
4152	int			rx_mbuf_alloc_before;
4153#endif
4154
4155	DBPRINT(sc, BNX_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
4156
4157#ifdef BNX_DEBUG
4158	rx_mbuf_alloc_before = sc->rx_mbuf_alloc;
4159#endif
4160
4161	/* Free any mbufs still in the RX mbuf chain. */
4162	for (i = 0; i < TOTAL_RX_BD; i++) {
4163		if (sc->rx_mbuf_ptr[i] != NULL) {
4164			if (sc->rx_mbuf_map[i] != NULL) {
4165				bus_dmamap_sync(sc->bnx_dmatag,
4166				    sc->rx_mbuf_map[i],	0,
4167				    sc->rx_mbuf_map[i]->dm_mapsize,
4168				    BUS_DMASYNC_POSTREAD);
4169				bus_dmamap_unload(sc->bnx_dmatag,
4170				    sc->rx_mbuf_map[i]);
4171			}
4172			m_freem(sc->rx_mbuf_ptr[i]);
4173			sc->rx_mbuf_ptr[i] = NULL;
4174			DBRUNIF(1, sc->rx_mbuf_alloc--);
4175		}
4176	}
4177
4178	DBRUNIF((rx_mbuf_alloc_before - sc->rx_mbuf_alloc),
4179		BNX_PRINTF(sc, "%s(): Released %d mbufs.\n",
4180		__FUNCTION__, (rx_mbuf_alloc_before - sc->rx_mbuf_alloc)));
4181
4182	/* Clear each RX chain page. */
4183	for (i = 0; i < RX_PAGES; i++)
4184		bzero(sc->rx_bd_chain[i], BNX_RX_CHAIN_PAGE_SZ);
4185
4186	sc->free_rx_bd = sc->max_rx_bd;
4187
4188	/* Check if we lost any mbufs in the process. */
4189	DBRUNIF((sc->rx_mbuf_alloc),
4190	    printf("%s: Memory leak! Lost %d mbufs from rx chain!\n",
4191	    sc->rx_mbuf_alloc));
4192
4193	DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
4194}
4195
4196/****************************************************************************/
4197/* Set media options.                                                       */
4198/*                                                                          */
4199/* Returns:                                                                 */
4200/*   0 for success, positive value for failure.                             */
4201/****************************************************************************/
4202int
4203bnx_ifmedia_upd(struct ifnet *ifp)
4204{
4205	struct bnx_softc	*sc;
4206	struct mii_data		*mii;
4207	int			rc = 0;
4208
4209	sc = ifp->if_softc;
4210
4211	mii = &sc->bnx_mii;
4212	sc->bnx_link = 0;
4213	if (mii->mii_instance) {
4214		struct mii_softc *miisc;
4215		LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
4216			mii_phy_reset(miisc);
4217	}
4218	mii_mediachg(mii);
4219
4220	return(rc);
4221}
4222
4223/****************************************************************************/
4224/* Reports current media status.                                            */
4225/*                                                                          */
4226/* Returns:                                                                 */
4227/*   Nothing.                                                               */
4228/****************************************************************************/
4229void
4230bnx_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
4231{
4232	struct bnx_softc	*sc;
4233	struct mii_data		*mii;
4234	int			s;
4235
4236	sc = ifp->if_softc;
4237
4238	s = splnet();
4239
4240	mii = &sc->bnx_mii;
4241
4242	mii_pollstat(mii);
4243	ifmr->ifm_status = mii->mii_media_status;
4244	ifmr->ifm_active = (mii->mii_media_active & ~IFM_ETH_FMASK) |
4245	    sc->bnx_flowflags;
4246
4247	splx(s);
4248}
4249
4250/****************************************************************************/
4251/* Handles PHY generated interrupt events.                                  */
4252/*                                                                          */
4253/* Returns:                                                                 */
4254/*   Nothing.                                                               */
4255/****************************************************************************/
4256void
4257bnx_phy_intr(struct bnx_softc *sc)
4258{
4259	u_int32_t		new_link_state, old_link_state;
4260
4261	new_link_state = sc->status_block->status_attn_bits &
4262	    STATUS_ATTN_BITS_LINK_STATE;
4263	old_link_state = sc->status_block->status_attn_bits_ack &
4264	    STATUS_ATTN_BITS_LINK_STATE;
4265
4266	/* Handle any changes if the link state has changed. */
4267	if (new_link_state != old_link_state) {
4268		DBRUN(BNX_VERBOSE_INTR, bnx_dump_status_block(sc));
4269
4270		sc->bnx_link = 0;
4271		timeout_del(&sc->bnx_timeout);
4272		bnx_tick(sc);
4273
4274		/* Update the status_attn_bits_ack field in the status block. */
4275		if (new_link_state) {
4276			REG_WR(sc, BNX_PCICFG_STATUS_BIT_SET_CMD,
4277			    STATUS_ATTN_BITS_LINK_STATE);
4278			DBPRINT(sc, BNX_INFO, "Link is now UP.\n");
4279		} else {
4280			REG_WR(sc, BNX_PCICFG_STATUS_BIT_CLEAR_CMD,
4281			    STATUS_ATTN_BITS_LINK_STATE);
4282			DBPRINT(sc, BNX_INFO, "Link is now DOWN.\n");
4283		}
4284	}
4285
4286	/* Acknowledge the link change interrupt. */
4287	REG_WR(sc, BNX_EMAC_STATUS, BNX_EMAC_STATUS_LINK_CHANGE);
4288}
4289
4290/****************************************************************************/
4291/* Handles received frame interrupt events.                                 */
4292/*                                                                          */
4293/* Returns:                                                                 */
4294/*   Nothing.                                                               */
4295/****************************************************************************/
4296void
4297bnx_rx_intr(struct bnx_softc *sc)
4298{
4299	struct status_block	*sblk = sc->status_block;
4300	struct ifnet		*ifp = &sc->arpcom.ac_if;
4301	u_int16_t		hw_cons, sw_cons, sw_chain_cons;
4302	u_int16_t		sw_prod, sw_chain_prod;
4303	u_int32_t		sw_prod_bseq;
4304	struct l2_fhdr		*l2fhdr;
4305	int			i;
4306
4307	DBRUNIF(1, sc->rx_interrupts++);
4308
4309	/* Prepare the RX chain pages to be accessed by the host CPU. */
4310	for (i = 0; i < RX_PAGES; i++)
4311		bus_dmamap_sync(sc->bnx_dmatag,
4312		    sc->rx_bd_chain_map[i], 0,
4313		    sc->rx_bd_chain_map[i]->dm_mapsize,
4314		    BUS_DMASYNC_POSTWRITE);
4315
4316	/* Get the hardware's view of the RX consumer index. */
4317	hw_cons = sc->hw_rx_cons = sblk->status_rx_quick_consumer_index0;
4318	if ((hw_cons & USABLE_RX_BD_PER_PAGE) == USABLE_RX_BD_PER_PAGE)
4319		hw_cons++;
4320
4321	/* Get working copies of the driver's view of the RX indices. */
4322	sw_cons = sc->rx_cons;
4323	sw_prod = sc->rx_prod;
4324	sw_prod_bseq = sc->rx_prod_bseq;
4325
4326	DBPRINT(sc, BNX_INFO_RECV, "%s(enter): sw_prod = 0x%04X, "
4327	    "sw_cons = 0x%04X, sw_prod_bseq = 0x%08X\n",
4328	    __FUNCTION__, sw_prod, sw_cons, sw_prod_bseq);
4329
4330	/* Prevent speculative reads from getting ahead of the status block. */
4331	bus_space_barrier(sc->bnx_btag, sc->bnx_bhandle, 0, 0,
4332	    BUS_SPACE_BARRIER_READ);
4333
4334	/* Update some debug statistics counters */
4335	DBRUNIF((sc->free_rx_bd < sc->rx_low_watermark),
4336	    sc->rx_low_watermark = sc->free_rx_bd);
4337	DBRUNIF((sc->free_rx_bd == USABLE_RX_BD), sc->rx_empty_count++);
4338
4339	/*
4340	 * Scan through the receive chain as long
4341	 * as there is work to do.
4342	 */
4343	while (sw_cons != hw_cons) {
4344		struct mbuf *m;
4345		struct rx_bd *rxbd;
4346		unsigned int len;
4347		u_int32_t status;
4348
4349		/* Clear the mbuf pointer. */
4350		m = NULL;
4351
4352		/* Convert the producer/consumer indices to an actual
4353		 * rx_bd index.
4354		 */
4355		sw_chain_cons = RX_CHAIN_IDX(sw_cons);
4356		sw_chain_prod = RX_CHAIN_IDX(sw_prod);
4357
4358		/* Get the used rx_bd. */
4359		rxbd = &sc->rx_bd_chain[RX_PAGE(sw_chain_cons)][RX_IDX(sw_chain_cons)];
4360		sc->free_rx_bd++;
4361
4362		DBRUN(BNX_VERBOSE_RECV, printf("%s(): ", __FUNCTION__);
4363		bnx_dump_rxbd(sc, sw_chain_cons, rxbd));
4364
4365		/* The mbuf is stored with the last rx_bd entry of a packet. */
4366		if (sc->rx_mbuf_ptr[sw_chain_cons] != NULL) {
4367			/* Validate that this is the last rx_bd. */
4368			DBRUNIF((!(rxbd->rx_bd_flags & RX_BD_FLAGS_END)),
4369			    printf("%s: Unexpected mbuf found in "
4370			        "rx_bd[0x%04X]!\n", sw_chain_cons);
4371				bnx_breakpoint(sc));
4372
4373			/* DRC - ToDo: If the received packet is small, say less
4374			 *             than 128 bytes, allocate a new mbuf here,
4375			 *             copy the data to that mbuf, and recycle
4376			 *             the mapped jumbo frame.
4377			 */
4378
4379			/* Unmap the mbuf from DMA space. */
4380			bus_dmamap_sync(sc->bnx_dmatag,
4381			    sc->rx_mbuf_map[sw_chain_cons], 0,
4382			    sc->rx_mbuf_map[sw_chain_cons]->dm_mapsize,
4383			    BUS_DMASYNC_POSTREAD);
4384			bus_dmamap_unload(sc->bnx_dmatag,
4385			    sc->rx_mbuf_map[sw_chain_cons]);
4386
4387			/* Remove the mbuf from RX chain. */
4388			m = sc->rx_mbuf_ptr[sw_chain_cons];
4389			sc->rx_mbuf_ptr[sw_chain_cons] = NULL;
4390
4391			/*
4392			 * Frames received on the NetXteme II are prepended
4393			 * with the l2_fhdr structure which provides status
4394			 * information about the received frame (including
4395			 * VLAN tags and checksum info) and are also
4396			 * automatically adjusted to align the IP header
4397			 * (i.e. two null bytes are inserted before the
4398			 * Ethernet header).
4399			 */
4400			l2fhdr = mtod(m, struct l2_fhdr *);
4401
4402			len    = l2fhdr->l2_fhdr_pkt_len;
4403			status = l2fhdr->l2_fhdr_status;
4404
4405			DBRUNIF(DB_RANDOMTRUE(bnx_debug_l2fhdr_status_check),
4406			    printf("Simulating l2_fhdr status error.\n");
4407			    status = status | L2_FHDR_ERRORS_PHY_DECODE);
4408
4409			/* Watch for unusual sized frames. */
4410			DBRUNIF(((len < BNX_MIN_MTU) ||
4411			    (len > BNX_MAX_JUMBO_ETHER_MTU_VLAN)),
4412			    printf("%s: Unusual frame size found. "
4413			    "Min(%d), Actual(%d), Max(%d)\n", (int)BNX_MIN_MTU,
4414			    len, (int) BNX_MAX_JUMBO_ETHER_MTU_VLAN);
4415
4416			bnx_dump_mbuf(sc, m);
4417			bnx_breakpoint(sc));
4418
4419			len -= ETHER_CRC_LEN;
4420
4421			/* Check the received frame for errors. */
4422			if (status &  (L2_FHDR_ERRORS_BAD_CRC |
4423			    L2_FHDR_ERRORS_PHY_DECODE |
4424			    L2_FHDR_ERRORS_ALIGNMENT |
4425			    L2_FHDR_ERRORS_TOO_SHORT |
4426			    L2_FHDR_ERRORS_GIANT_FRAME)) {
4427				/* Log the error and release the mbuf. */
4428				ifp->if_ierrors++;
4429				DBRUNIF(1, sc->l2fhdr_status_errors++);
4430
4431				m_freem(m);
4432				m = NULL;
4433				goto bnx_rx_int_next_rx;
4434			}
4435
4436			/* Skip over the l2_fhdr when passing the data up
4437			 * the stack.
4438			 */
4439			m_adj(m, sizeof(struct l2_fhdr) + ETHER_ALIGN);
4440
4441			/* Adjust the pckt length to match the received data. */
4442			m->m_pkthdr.len = m->m_len = len;
4443
4444			/* Send the packet to the appropriate interface. */
4445			m->m_pkthdr.rcvif = ifp;
4446
4447			DBRUN(BNX_VERBOSE_RECV,
4448			    struct ether_header *eh;
4449			    eh = mtod(m, struct ether_header *);
4450			    printf("%s: to: %6D, from: %6D, type: 0x%04X\n",
4451			    __FUNCTION__, eh->ether_dhost, ":",
4452			    eh->ether_shost, ":", htons(eh->ether_type)));
4453
4454			/* Validate the checksum. */
4455
4456			/* Check for an IP datagram. */
4457			if (status & L2_FHDR_STATUS_IP_DATAGRAM) {
4458				/* Check if the IP checksum is valid. */
4459				if ((l2fhdr->l2_fhdr_ip_xsum ^ 0xffff)
4460				    == 0)
4461					m->m_pkthdr.csum_flags |=
4462					    M_IPV4_CSUM_IN_OK;
4463				else
4464					DBPRINT(sc, BNX_WARN_SEND,
4465					    "%s(): Invalid IP checksum "
4466					        "= 0x%04X!\n",
4467						__FUNCTION__,
4468						l2fhdr->l2_fhdr_ip_xsum
4469						);
4470			}
4471
4472			/* Check for a valid TCP/UDP frame. */
4473			if (status & (L2_FHDR_STATUS_TCP_SEGMENT |
4474			    L2_FHDR_STATUS_UDP_DATAGRAM)) {
4475				/* Check for a good TCP/UDP checksum. */
4476				if ((status &
4477				    (L2_FHDR_ERRORS_TCP_XSUM |
4478				    L2_FHDR_ERRORS_UDP_XSUM)) == 0) {
4479					m->m_pkthdr.csum_flags |=
4480					    M_TCP_CSUM_IN_OK |
4481					    M_UDP_CSUM_IN_OK;
4482				} else {
4483					DBPRINT(sc, BNX_WARN_SEND,
4484					    "%s(): Invalid TCP/UDP "
4485					    "checksum = 0x%04X!\n",
4486					    __FUNCTION__,
4487					    l2fhdr->l2_fhdr_tcp_udp_xsum);
4488				}
4489			}
4490
4491			/*
4492			 * If we received a packet with a vlan tag,
4493			 * attach that information to the packet.
4494			 */
4495			if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) &&
4496			    !(sc->rx_mode & BNX_EMAC_RX_MODE_KEEP_VLAN_TAG)) {
4497#if NVLAN > 0
4498				DBPRINT(sc, BNX_VERBOSE_SEND,
4499				    "%s(): VLAN tag = 0x%04X\n",
4500				    __FUNCTION__,
4501				    l2fhdr->l2_fhdr_vlan_tag);
4502
4503				m->m_pkthdr.ether_vtag =
4504				    l2fhdr->l2_fhdr_vlan_tag;
4505				m->m_flags |= M_VLANTAG;
4506#else
4507				m_freem(m);
4508				goto bnx_rx_int_next_rx;
4509#endif
4510			}
4511
4512			/* Pass the mbuf off to the upper layers. */
4513			ifp->if_ipackets++;
4514
4515bnx_rx_int_next_rx:
4516			sw_prod = NEXT_RX_BD(sw_prod);
4517		}
4518
4519		sw_cons = NEXT_RX_BD(sw_cons);
4520
4521		/* If we have a packet, pass it up the stack */
4522		if (m) {
4523			sc->rx_cons = sw_cons;
4524
4525#if NBPFILTER > 0
4526			/*
4527			 * Handle BPF listeners. Let the BPF
4528			 * user see the packet.
4529			 */
4530			if (ifp->if_bpf)
4531				bpf_mtap_ether(ifp->if_bpf, m,
4532				    BPF_DIRECTION_IN);
4533#endif
4534
4535			DBPRINT(sc, BNX_VERBOSE_RECV,
4536			    "%s(): Passing received frame up.\n", __FUNCTION__);
4537			ether_input_mbuf(ifp, m);
4538			DBRUNIF(1, sc->rx_mbuf_alloc--);
4539
4540			sw_cons = sc->rx_cons;
4541		}
4542
4543		/* Refresh hw_cons to see if there's new work */
4544		if (sw_cons == hw_cons) {
4545			hw_cons = sc->hw_rx_cons =
4546			    sblk->status_rx_quick_consumer_index0;
4547			if ((hw_cons & USABLE_RX_BD_PER_PAGE) ==
4548			    USABLE_RX_BD_PER_PAGE)
4549				hw_cons++;
4550		}
4551
4552		/* Prevent speculative reads from getting ahead of
4553		 * the status block.
4554		 */
4555		bus_space_barrier(sc->bnx_btag, sc->bnx_bhandle, 0, 0,
4556		    BUS_SPACE_BARRIER_READ);
4557	}
4558
4559	/* No new packets to process.  Refill the RX chain and exit. */
4560	sc->rx_cons = sw_cons;
4561	bnx_fill_rx_chain(sc);
4562
4563	for (i = 0; i < RX_PAGES; i++)
4564		bus_dmamap_sync(sc->bnx_dmatag,
4565		    sc->rx_bd_chain_map[i], 0,
4566		    sc->rx_bd_chain_map[i]->dm_mapsize,
4567		    BUS_DMASYNC_PREWRITE);
4568
4569	DBPRINT(sc, BNX_INFO_RECV, "%s(exit): rx_prod = 0x%04X, "
4570	    "rx_cons = 0x%04X, rx_prod_bseq = 0x%08X\n",
4571	    __FUNCTION__, sc->rx_prod, sc->rx_cons, sc->rx_prod_bseq);
4572}
4573
4574/****************************************************************************/
4575/* Handles transmit completion interrupt events.                            */
4576/*                                                                          */
4577/* Returns:                                                                 */
4578/*   Nothing.                                                               */
4579/****************************************************************************/
4580void
4581bnx_tx_intr(struct bnx_softc *sc)
4582{
4583	struct status_block	*sblk = sc->status_block;
4584	struct ifnet		*ifp = &sc->arpcom.ac_if;
4585	struct bnx_pkt		*pkt;
4586	bus_dmamap_t		map;
4587	u_int16_t		hw_tx_cons, sw_tx_cons, sw_tx_chain_cons;
4588
4589	DBRUNIF(1, sc->tx_interrupts++);
4590
4591	/* Get the hardware's view of the TX consumer index. */
4592	hw_tx_cons = sc->hw_tx_cons = sblk->status_tx_quick_consumer_index0;
4593
4594	/* Skip to the next entry if this is a chain page pointer. */
4595	if ((hw_tx_cons & USABLE_TX_BD_PER_PAGE) == USABLE_TX_BD_PER_PAGE)
4596		hw_tx_cons++;
4597
4598	sw_tx_cons = sc->tx_cons;
4599
4600	/* Prevent speculative reads from getting ahead of the status block. */
4601	bus_space_barrier(sc->bnx_btag, sc->bnx_bhandle, 0, 0,
4602	    BUS_SPACE_BARRIER_READ);
4603
4604	/* Cycle through any completed TX chain page entries. */
4605	while (sw_tx_cons != hw_tx_cons) {
4606#ifdef BNX_DEBUG
4607		struct tx_bd *txbd = NULL;
4608#endif
4609		sw_tx_chain_cons = TX_CHAIN_IDX(sw_tx_cons);
4610
4611		DBPRINT(sc, BNX_INFO_SEND, "%s(): hw_tx_cons = 0x%04X, "
4612		    "sw_tx_cons = 0x%04X, sw_tx_chain_cons = 0x%04X\n",
4613		    __FUNCTION__, hw_tx_cons, sw_tx_cons, sw_tx_chain_cons);
4614
4615		DBRUNIF((sw_tx_chain_cons > MAX_TX_BD),
4616		    printf("%s: TX chain consumer out of range! "
4617		    " 0x%04X > 0x%04X\n", sw_tx_chain_cons, (int)MAX_TX_BD);
4618		    bnx_breakpoint(sc));
4619
4620		DBRUNIF(1, txbd = &sc->tx_bd_chain
4621		    [TX_PAGE(sw_tx_chain_cons)][TX_IDX(sw_tx_chain_cons)]);
4622
4623		DBRUNIF((txbd == NULL),
4624		    printf("%s: Unexpected NULL tx_bd[0x%04X]!\n",
4625		    sw_tx_chain_cons);
4626		    bnx_breakpoint(sc));
4627
4628		DBRUN(BNX_INFO_SEND, printf("%s: ", __FUNCTION__);
4629		    bnx_dump_txbd(sc, sw_tx_chain_cons, txbd));
4630
4631		mtx_enter(&sc->tx_pkt_mtx);
4632		pkt = TAILQ_FIRST(&sc->tx_used_pkts);
4633		if (pkt != NULL && pkt->pkt_end_desc == sw_tx_chain_cons) {
4634			TAILQ_REMOVE(&sc->tx_used_pkts, pkt, pkt_entry);
4635			mtx_leave(&sc->tx_pkt_mtx);
4636			/*
4637			 * Free the associated mbuf. Remember
4638			 * that only the last tx_bd of a packet
4639			 * has an mbuf pointer and DMA map.
4640			 */
4641			map = pkt->pkt_dmamap;
4642			bus_dmamap_sync(sc->bnx_dmatag, map, 0,
4643			    map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
4644			bus_dmamap_unload(sc->bnx_dmatag, map);
4645
4646			m_freem(pkt->pkt_mbuf);
4647
4648			ifp->if_opackets++;
4649
4650			mtx_enter(&sc->tx_pkt_mtx);
4651			TAILQ_INSERT_TAIL(&sc->tx_free_pkts, pkt, pkt_entry);
4652		}
4653		mtx_leave(&sc->tx_pkt_mtx);
4654
4655		sc->used_tx_bd--;
4656		sw_tx_cons = NEXT_TX_BD(sw_tx_cons);
4657
4658		/* Refresh hw_cons to see if there's new work. */
4659		hw_tx_cons = sc->hw_tx_cons =
4660		    sblk->status_tx_quick_consumer_index0;
4661		if ((hw_tx_cons & USABLE_TX_BD_PER_PAGE) ==
4662		    USABLE_TX_BD_PER_PAGE)
4663			hw_tx_cons++;
4664
4665		/* Prevent speculative reads from getting ahead of
4666		 * the status block.
4667		 */
4668		bus_space_barrier(sc->bnx_btag, sc->bnx_bhandle, 0, 0,
4669		    BUS_SPACE_BARRIER_READ);
4670	}
4671
4672	/* Clear the TX timeout timer. */
4673	ifp->if_timer = 0;
4674
4675	/* Clear the tx hardware queue full flag. */
4676	if (sc->used_tx_bd < sc->max_tx_bd) {
4677		DBRUNIF((ifp->if_flags & IFF_OACTIVE),
4678		    printf("%s: Open TX chain! %d/%d (used/total)\n",
4679			sc->bnx_dev.dv_xname, sc->used_tx_bd,
4680			sc->max_tx_bd));
4681		ifp->if_flags &= ~IFF_OACTIVE;
4682	}
4683
4684	sc->tx_cons = sw_tx_cons;
4685}
4686
4687/****************************************************************************/
4688/* Disables interrupt generation.                                           */
4689/*                                                                          */
4690/* Returns:                                                                 */
4691/*   Nothing.                                                               */
4692/****************************************************************************/
4693void
4694bnx_disable_intr(struct bnx_softc *sc)
4695{
4696	REG_WR(sc, BNX_PCICFG_INT_ACK_CMD, BNX_PCICFG_INT_ACK_CMD_MASK_INT);
4697	REG_RD(sc, BNX_PCICFG_INT_ACK_CMD);
4698}
4699
4700/****************************************************************************/
4701/* Enables interrupt generation.                                            */
4702/*                                                                          */
4703/* Returns:                                                                 */
4704/*   Nothing.                                                               */
4705/****************************************************************************/
4706void
4707bnx_enable_intr(struct bnx_softc *sc)
4708{
4709	u_int32_t		val;
4710
4711	REG_WR(sc, BNX_PCICFG_INT_ACK_CMD, BNX_PCICFG_INT_ACK_CMD_INDEX_VALID |
4712	    BNX_PCICFG_INT_ACK_CMD_MASK_INT | sc->last_status_idx);
4713
4714	REG_WR(sc, BNX_PCICFG_INT_ACK_CMD, BNX_PCICFG_INT_ACK_CMD_INDEX_VALID |
4715	    sc->last_status_idx);
4716
4717	val = REG_RD(sc, BNX_HC_COMMAND);
4718	REG_WR(sc, BNX_HC_COMMAND, val | BNX_HC_COMMAND_COAL_NOW);
4719}
4720
4721/****************************************************************************/
4722/* Handles controller initialization.                                       */
4723/*                                                                          */
4724/* Returns:                                                                 */
4725/*   Nothing.                                                               */
4726/****************************************************************************/
4727void
4728bnx_init(void *xsc)
4729{
4730	struct bnx_softc	*sc = (struct bnx_softc *)xsc;
4731	struct ifnet		*ifp = &sc->arpcom.ac_if;
4732	u_int32_t		ether_mtu;
4733	int			txpl = 1;
4734	int			s;
4735
4736	DBPRINT(sc, BNX_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
4737
4738	if (rw_enter(&bnx_tx_pool_lk, RW_WRITE | RW_INTR) != 0)
4739		return;
4740	if (bnx_tx_pool == NULL) {
4741		bnx_tx_pool = malloc(sizeof(*bnx_tx_pool), M_DEVBUF, M_WAITOK);
4742		if (bnx_tx_pool != NULL) {
4743			pool_init(bnx_tx_pool, sizeof(struct bnx_pkt),
4744			    0, 0, 0, "bnxpkts", &pool_allocator_nointr);
4745		} else
4746			txpl = 0;
4747	}
4748	rw_exit(&bnx_tx_pool_lk);
4749
4750	if (!txpl)
4751		return;
4752
4753	s = splnet();
4754
4755	bnx_stop(sc);
4756
4757	if (bnx_reset(sc, BNX_DRV_MSG_CODE_RESET)) {
4758		BNX_PRINTF(sc, "Controller reset failed!\n");
4759		goto bnx_init_exit;
4760	}
4761
4762	if (bnx_chipinit(sc)) {
4763		BNX_PRINTF(sc, "Controller initialization failed!\n");
4764		goto bnx_init_exit;
4765	}
4766
4767	if (bnx_blockinit(sc)) {
4768		BNX_PRINTF(sc, "Block initialization failed!\n");
4769		goto bnx_init_exit;
4770	}
4771
4772	/* Load our MAC address. */
4773	bcopy(sc->arpcom.ac_enaddr, sc->eaddr, ETHER_ADDR_LEN);
4774	bnx_set_mac_addr(sc);
4775
4776	/* Calculate and program the Ethernet MRU size. */
4777	ether_mtu = BNX_MAX_STD_ETHER_MTU_VLAN;
4778
4779	DBPRINT(sc, BNX_INFO, "%s(): setting MRU = %d\n",
4780	    __FUNCTION__, ether_mtu);
4781
4782	/*
4783	 * Program the MRU and enable Jumbo frame
4784	 * support.
4785	 */
4786	REG_WR(sc, BNX_EMAC_RX_MTU_SIZE, ether_mtu |
4787		BNX_EMAC_RX_MTU_SIZE_JUMBO_ENA);
4788
4789	/* Calculate the RX Ethernet frame size for rx_bd's. */
4790	sc->max_frame_size = sizeof(struct l2_fhdr) + 2 + ether_mtu + 8;
4791
4792	DBPRINT(sc, BNX_INFO, "%s(): mclbytes = %d, mbuf_alloc_size = %d, "
4793	    "max_frame_size = %d\n", __FUNCTION__, (int)MCLBYTES,
4794	    sc->mbuf_alloc_size, sc->max_frame_size);
4795
4796	/* Program appropriate promiscuous/multicast filtering. */
4797	bnx_iff(sc);
4798
4799	/* Init RX buffer descriptor chain. */
4800	bnx_init_rx_chain(sc);
4801
4802	/* Init TX buffer descriptor chain. */
4803	bnx_init_tx_chain(sc);
4804
4805	/* Enable host interrupts. */
4806	bnx_enable_intr(sc);
4807
4808	bnx_ifmedia_upd(ifp);
4809
4810	ifp->if_flags |= IFF_RUNNING;
4811	ifp->if_flags &= ~IFF_OACTIVE;
4812
4813	timeout_add_sec(&sc->bnx_timeout, 1);
4814
4815bnx_init_exit:
4816	DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
4817
4818	splx(s);
4819
4820	return;
4821}
4822
4823void
4824bnx_mgmt_init(struct bnx_softc *sc)
4825{
4826	struct ifnet	*ifp = &sc->arpcom.ac_if;
4827	u_int32_t	val;
4828
4829	/* Check if the driver is still running and bail out if it is. */
4830	if (ifp->if_flags & IFF_RUNNING)
4831		goto bnx_mgmt_init_exit;
4832
4833	/* Initialize the on-boards CPUs */
4834	bnx_init_cpus(sc);
4835
4836	val = (BCM_PAGE_BITS - 8) << 24;
4837	REG_WR(sc, BNX_RV2P_CONFIG, val);
4838
4839	/* Enable all critical blocks in the MAC. */
4840	REG_WR(sc, BNX_MISC_ENABLE_SET_BITS,
4841	    BNX_MISC_ENABLE_SET_BITS_RX_V2P_ENABLE |
4842	    BNX_MISC_ENABLE_SET_BITS_RX_DMA_ENABLE |
4843	    BNX_MISC_ENABLE_SET_BITS_COMPLETION_ENABLE);
4844	REG_RD(sc, BNX_MISC_ENABLE_SET_BITS);
4845	DELAY(20);
4846
4847	bnx_ifmedia_upd(ifp);
4848
4849bnx_mgmt_init_exit:
4850 	DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
4851}
4852
4853/****************************************************************************/
4854/* Encapsultes an mbuf cluster into the tx_bd chain structure and makes the */
4855/* memory visible to the controller.                                        */
4856/*                                                                          */
4857/* Returns:                                                                 */
4858/*   0 for success, positive value for failure.                             */
4859/****************************************************************************/
4860int
4861bnx_tx_encap(struct bnx_softc *sc, struct mbuf *m)
4862{
4863	struct bnx_pkt		*pkt;
4864	bus_dmamap_t		map;
4865	struct tx_bd 		*txbd = NULL;
4866	u_int16_t		vlan_tag = 0, flags = 0;
4867	u_int16_t		chain_prod, prod;
4868#ifdef BNX_DEBUG
4869	u_int16_t		debug_prod;
4870#endif
4871	u_int32_t		addr, prod_bseq;
4872	int			i, error;
4873
4874	mtx_enter(&sc->tx_pkt_mtx);
4875	pkt = TAILQ_FIRST(&sc->tx_free_pkts);
4876	if (pkt == NULL) {
4877		if (sc->tx_pkt_count <= TOTAL_TX_BD &&
4878		    !ISSET(sc->bnx_flags, BNX_ALLOC_PKTS_FLAG) &&
4879		    workq_add_task(NULL, 0, bnx_alloc_pkts, sc, NULL) == 0)
4880			SET(sc->bnx_flags, BNX_ALLOC_PKTS_FLAG);
4881
4882		mtx_leave(&sc->tx_pkt_mtx);
4883		return (ENOMEM);
4884	}
4885	TAILQ_REMOVE(&sc->tx_free_pkts, pkt, pkt_entry);
4886	mtx_leave(&sc->tx_pkt_mtx);
4887
4888	/* Transfer any checksum offload flags to the bd. */
4889	if (m->m_pkthdr.csum_flags) {
4890		if (m->m_pkthdr.csum_flags & M_IPV4_CSUM_OUT)
4891			flags |= TX_BD_FLAGS_IP_CKSUM;
4892		if (m->m_pkthdr.csum_flags &
4893		    (M_TCP_CSUM_OUT | M_UDP_CSUM_OUT))
4894			flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
4895	}
4896
4897#if NVLAN > 0
4898	/* Transfer any VLAN tags to the bd. */
4899	if (m->m_flags & M_VLANTAG) {
4900		flags |= TX_BD_FLAGS_VLAN_TAG;
4901		vlan_tag = m->m_pkthdr.ether_vtag;
4902	}
4903#endif
4904
4905	/* Map the mbuf into DMAable memory. */
4906	prod = sc->tx_prod;
4907	chain_prod = TX_CHAIN_IDX(prod);
4908	map = pkt->pkt_dmamap;
4909
4910	/* Map the mbuf into our DMA address space. */
4911	error = bus_dmamap_load_mbuf(sc->bnx_dmatag, map, m,
4912	    BUS_DMA_NOWAIT);
4913	if (error != 0) {
4914		printf("%s: Error mapping mbuf into TX chain!\n",
4915		    sc->bnx_dev.dv_xname);
4916		sc->tx_dma_map_failures++;
4917		goto maperr;
4918	}
4919
4920	/* Make sure there's room in the chain */
4921	if (map->dm_nsegs > (sc->max_tx_bd - sc->used_tx_bd))
4922		goto nospace;
4923
4924	/* prod points to an empty tx_bd at this point. */
4925	prod_bseq = sc->tx_prod_bseq;
4926#ifdef BNX_DEBUG
4927	debug_prod = chain_prod;
4928#endif
4929
4930	DBPRINT(sc, BNX_INFO_SEND,
4931		"%s(): Start: prod = 0x%04X, chain_prod = %04X, "
4932		"prod_bseq = 0x%08X\n",
4933		__FUNCTION__, prod, chain_prod, prod_bseq);
4934
4935	/*
4936	 * Cycle through each mbuf segment that makes up
4937	 * the outgoing frame, gathering the mapping info
4938	 * for that segment and creating a tx_bd for the
4939	 * mbuf.
4940	 */
4941	for (i = 0; i < map->dm_nsegs ; i++) {
4942		chain_prod = TX_CHAIN_IDX(prod);
4943		txbd = &sc->tx_bd_chain[TX_PAGE(chain_prod)][TX_IDX(chain_prod)];
4944
4945		addr = (u_int32_t)map->dm_segs[i].ds_addr;
4946		txbd->tx_bd_haddr_lo = addr;
4947		addr = (u_int32_t)((u_int64_t)map->dm_segs[i].ds_addr >> 32);
4948		txbd->tx_bd_haddr_hi = addr;
4949		txbd->tx_bd_mss_nbytes = map->dm_segs[i].ds_len;
4950		txbd->tx_bd_vlan_tag = vlan_tag;
4951		txbd->tx_bd_flags = flags;
4952		prod_bseq += map->dm_segs[i].ds_len;
4953		if (i == 0)
4954			txbd->tx_bd_flags |= TX_BD_FLAGS_START;
4955		prod = NEXT_TX_BD(prod);
4956 	}
4957
4958	/* Set the END flag on the last TX buffer descriptor. */
4959	txbd->tx_bd_flags |= TX_BD_FLAGS_END;
4960
4961	DBRUN(BNX_INFO_SEND, bnx_dump_tx_chain(sc, debug_prod,
4962	    map->dm_nsegs));
4963
4964	DBPRINT(sc, BNX_INFO_SEND,
4965		"%s(): End: prod = 0x%04X, chain_prod = %04X, "
4966		"prod_bseq = 0x%08X\n",
4967		__FUNCTION__, prod, chain_prod, prod_bseq);
4968
4969	pkt->pkt_mbuf = m;
4970	pkt->pkt_end_desc = chain_prod;
4971
4972	mtx_enter(&sc->tx_pkt_mtx);
4973	TAILQ_INSERT_TAIL(&sc->tx_used_pkts, pkt, pkt_entry);
4974	mtx_leave(&sc->tx_pkt_mtx);
4975
4976	sc->used_tx_bd += map->dm_nsegs;
4977
4978	/* Update some debug statistics counters */
4979	DBRUNIF((sc->used_tx_bd > sc->tx_hi_watermark),
4980	    sc->tx_hi_watermark = sc->used_tx_bd);
4981	DBRUNIF(sc->used_tx_bd == sc->max_tx_bd, sc->tx_full_count++);
4982	DBRUNIF(1, sc->tx_mbuf_alloc++);
4983
4984	DBRUN(BNX_VERBOSE_SEND, bnx_dump_tx_mbuf_chain(sc, chain_prod,
4985	    map->dm_nsegs));
4986
4987	bus_dmamap_sync(sc->bnx_dmatag, map, 0, map->dm_mapsize,
4988	    BUS_DMASYNC_PREWRITE);
4989
4990	/* prod points to the next free tx_bd at this point. */
4991	sc->tx_prod = prod;
4992	sc->tx_prod_bseq = prod_bseq;
4993
4994	return (0);
4995
4996nospace:
4997	bus_dmamap_unload(sc->bnx_dmatag, map);
4998maperr:
4999	mtx_enter(&sc->tx_pkt_mtx);
5000	TAILQ_INSERT_TAIL(&sc->tx_free_pkts, pkt, pkt_entry);
5001	mtx_leave(&sc->tx_pkt_mtx);
5002
5003	return (ENOMEM);
5004}
5005
5006/****************************************************************************/
5007/* Main transmit routine.                                                   */
5008/*                                                                          */
5009/* Returns:                                                                 */
5010/*   Nothing.                                                               */
5011/****************************************************************************/
5012void
5013bnx_start(struct ifnet *ifp)
5014{
5015	struct bnx_softc	*sc = ifp->if_softc;
5016	struct mbuf		*m_head = NULL;
5017	int			count = 0;
5018	u_int16_t		tx_prod, tx_chain_prod;
5019
5020	/* If there's no link or the transmit queue is empty then just exit. */
5021	if (!sc->bnx_link || IFQ_IS_EMPTY(&ifp->if_snd)) {
5022		DBPRINT(sc, BNX_INFO_SEND,
5023		    "%s(): No link or transmit queue empty.\n", __FUNCTION__);
5024		goto bnx_start_exit;
5025	}
5026
5027	/* prod points to the next free tx_bd. */
5028	tx_prod = sc->tx_prod;
5029	tx_chain_prod = TX_CHAIN_IDX(tx_prod);
5030
5031	DBPRINT(sc, BNX_INFO_SEND, "%s(): Start: tx_prod = 0x%04X, "
5032	    "tx_chain_prod = %04X, tx_prod_bseq = 0x%08X\n",
5033	    __FUNCTION__, tx_prod, tx_chain_prod, sc->tx_prod_bseq);
5034
5035	/*
5036	 * Keep adding entries while there is space in the ring.
5037	 */
5038	while (sc->used_tx_bd < sc->max_tx_bd) {
5039		/* Check for any frames to send. */
5040		IFQ_POLL(&ifp->if_snd, m_head);
5041		if (m_head == NULL)
5042			break;
5043
5044		/*
5045		 * Pack the data into the transmit ring. If we
5046		 * don't have room, set the OACTIVE flag to wait
5047		 * for the NIC to drain the chain.
5048		 */
5049		if (bnx_tx_encap(sc, m_head)) {
5050			ifp->if_flags |= IFF_OACTIVE;
5051			DBPRINT(sc, BNX_INFO_SEND, "TX chain is closed for "
5052			    "business! Total tx_bd used = %d\n",
5053			    sc->used_tx_bd);
5054			break;
5055		}
5056
5057		IFQ_DEQUEUE(&ifp->if_snd, m_head);
5058		count++;
5059
5060#if NBPFILTER > 0
5061		/* Send a copy of the frame to any BPF listeners. */
5062		if (ifp->if_bpf)
5063			bpf_mtap_ether(ifp->if_bpf, m_head, BPF_DIRECTION_OUT);
5064#endif
5065	}
5066
5067	if (count == 0) {
5068		/* no packets were dequeued */
5069		DBPRINT(sc, BNX_VERBOSE_SEND,
5070		    "%s(): No packets were dequeued\n", __FUNCTION__);
5071		goto bnx_start_exit;
5072	}
5073
5074	/* Update the driver's counters. */
5075	tx_chain_prod = TX_CHAIN_IDX(sc->tx_prod);
5076
5077	DBPRINT(sc, BNX_INFO_SEND, "%s(): End: tx_prod = 0x%04X, tx_chain_prod "
5078	    "= 0x%04X, tx_prod_bseq = 0x%08X\n", __FUNCTION__, tx_prod,
5079	    tx_chain_prod, sc->tx_prod_bseq);
5080
5081	/* Start the transmit. */
5082	REG_WR16(sc, MB_TX_CID_ADDR + BNX_L2CTX_TX_HOST_BIDX, sc->tx_prod);
5083	REG_WR(sc, MB_TX_CID_ADDR + BNX_L2CTX_TX_HOST_BSEQ, sc->tx_prod_bseq);
5084
5085	/* Set the tx timeout. */
5086	ifp->if_timer = BNX_TX_TIMEOUT;
5087
5088bnx_start_exit:
5089	return;
5090}
5091
5092/****************************************************************************/
5093/* Handles any IOCTL calls from the operating system.                       */
5094/*                                                                          */
5095/* Returns:                                                                 */
5096/*   0 for success, positive value for failure.                             */
5097/****************************************************************************/
5098int
5099bnx_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
5100{
5101	struct bnx_softc	*sc = ifp->if_softc;
5102	struct ifaddr		*ifa = (struct ifaddr *) data;
5103	struct ifreq		*ifr = (struct ifreq *) data;
5104	struct mii_data		*mii = &sc->bnx_mii;
5105	int			s, error = 0;
5106
5107	s = splnet();
5108
5109	switch (command) {
5110	case SIOCSIFADDR:
5111		ifp->if_flags |= IFF_UP;
5112		if (!(ifp->if_flags & IFF_RUNNING))
5113			bnx_init(sc);
5114#ifdef INET
5115		if (ifa->ifa_addr->sa_family == AF_INET)
5116			arp_ifinit(&sc->arpcom, ifa);
5117#endif /* INET */
5118		break;
5119
5120	case SIOCSIFFLAGS:
5121		if (ifp->if_flags & IFF_UP) {
5122			if (ifp->if_flags & IFF_RUNNING)
5123				error = ENETRESET;
5124			else
5125				bnx_init(sc);
5126		} else {
5127			if (ifp->if_flags & IFF_RUNNING)
5128				bnx_stop(sc);
5129		}
5130		break;
5131
5132	case SIOCSIFMEDIA:
5133		/* Flow control requires full-duplex mode. */
5134		if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
5135		    (ifr->ifr_media & IFM_FDX) == 0)
5136			ifr->ifr_media &= ~IFM_ETH_FMASK;
5137
5138		if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
5139			if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
5140				/* We can do both TXPAUSE and RXPAUSE. */
5141				ifr->ifr_media |=
5142				    IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
5143			}
5144			sc->bnx_flowflags = ifr->ifr_media & IFM_ETH_FMASK;
5145		}
5146		/* FALLTHROUGH */
5147	case SIOCGIFMEDIA:
5148		DBPRINT(sc, BNX_VERBOSE, "bnx_phy_flags = 0x%08X\n",
5149		    sc->bnx_phy_flags);
5150
5151		error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command);
5152		break;
5153
5154	default:
5155		error = ether_ioctl(ifp, &sc->arpcom, command, data);
5156	}
5157
5158	if (error == ENETRESET) {
5159		if (ifp->if_flags & IFF_RUNNING)
5160			bnx_iff(sc);
5161		error = 0;
5162	}
5163
5164	splx(s);
5165	return (error);
5166}
5167
5168/****************************************************************************/
5169/* Transmit timeout handler.                                                */
5170/*                                                                          */
5171/* Returns:                                                                 */
5172/*   Nothing.                                                               */
5173/****************************************************************************/
5174void
5175bnx_watchdog(struct ifnet *ifp)
5176{
5177	struct bnx_softc	*sc = ifp->if_softc;
5178
5179	DBRUN(BNX_WARN_SEND, bnx_dump_driver_state(sc);
5180	    bnx_dump_status_block(sc));
5181
5182	/*
5183	 * If we are in this routine because of pause frames, then
5184	 * don't reset the hardware.
5185	 */
5186	if (REG_RD(sc, BNX_EMAC_TX_STATUS) & BNX_EMAC_TX_STATUS_XOFFED)
5187		return;
5188
5189	printf("%s: Watchdog timeout occurred, resetting!\n",
5190	    ifp->if_xname);
5191
5192	/* DBRUN(BNX_FATAL, bnx_breakpoint(sc)); */
5193
5194	bnx_init(sc);
5195
5196	ifp->if_oerrors++;
5197}
5198
5199/*
5200 * Interrupt handler.
5201 */
5202/****************************************************************************/
5203/* Main interrupt entry point.  Verifies that the controller generated the  */
5204/* interrupt and then calls a separate routine for handle the various       */
5205/* interrupt causes (PHY, TX, RX).                                          */
5206/*                                                                          */
5207/* Returns:                                                                 */
5208/*   0 for success, positive value for failure.                             */
5209/****************************************************************************/
5210int
5211bnx_intr(void *xsc)
5212{
5213	struct bnx_softc	*sc = xsc;
5214	struct ifnet		*ifp = &sc->arpcom.ac_if;
5215	u_int32_t		status_attn_bits;
5216	u_int16_t		status_idx;
5217	int			rv = 0;
5218
5219	if ((sc->bnx_flags & BNX_ACTIVE_FLAG) == 0)
5220		return (0);
5221
5222	DBRUNIF(1, sc->interrupts_generated++);
5223
5224	bus_dmamap_sync(sc->bnx_dmatag, sc->status_map, 0,
5225	    sc->status_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
5226
5227	/*
5228	 * If the hardware status block index
5229	 * matches the last value read by the
5230	 * driver and we haven't asserted our
5231	 * interrupt then there's nothing to do.
5232	 */
5233	status_idx = sc->status_block->status_idx;
5234	if (status_idx != sc->last_status_idx ||
5235	    !ISSET(REG_RD(sc, BNX_PCICFG_MISC_STATUS),
5236	    BNX_PCICFG_MISC_STATUS_INTA_VALUE)) {
5237		rv = 1;
5238
5239		/* Ack the interrupt */
5240		REG_WR(sc, BNX_PCICFG_INT_ACK_CMD,
5241		    BNX_PCICFG_INT_ACK_CMD_INDEX_VALID | status_idx);
5242
5243		status_attn_bits = sc->status_block->status_attn_bits;
5244
5245		DBRUNIF(DB_RANDOMTRUE(bnx_debug_unexpected_attention),
5246		    printf("Simulating unexpected status attention bit set.");
5247		    status_attn_bits = status_attn_bits |
5248		    STATUS_ATTN_BITS_PARITY_ERROR);
5249
5250		/* Was it a link change interrupt? */
5251		if ((status_attn_bits & STATUS_ATTN_BITS_LINK_STATE) !=
5252		    (sc->status_block->status_attn_bits_ack &
5253		    STATUS_ATTN_BITS_LINK_STATE))
5254			bnx_phy_intr(sc);
5255
5256		/* If any other attention is asserted then the chip is toast. */
5257		if (((status_attn_bits & ~STATUS_ATTN_BITS_LINK_STATE) !=
5258		    (sc->status_block->status_attn_bits_ack &
5259		    ~STATUS_ATTN_BITS_LINK_STATE))) {
5260			DBRUN(1, sc->unexpected_attentions++);
5261
5262			BNX_PRINTF(sc, "Fatal attention detected: 0x%08X\n",
5263			    sc->status_block->status_attn_bits);
5264
5265			DBRUN(BNX_FATAL,
5266			    if (bnx_debug_unexpected_attention == 0)
5267				bnx_breakpoint(sc));
5268
5269			bnx_init(sc);
5270			goto out;
5271		}
5272
5273		/* Check for any completed RX frames. */
5274		if (sc->status_block->status_rx_quick_consumer_index0 !=
5275		    sc->hw_rx_cons)
5276			bnx_rx_intr(sc);
5277
5278		/* Check for any completed TX frames. */
5279		if (sc->status_block->status_tx_quick_consumer_index0 !=
5280		    sc->hw_tx_cons)
5281			bnx_tx_intr(sc);
5282
5283		/*
5284		 * Save the status block index value for use during the
5285		 * next interrupt.
5286		 */
5287		sc->last_status_idx = status_idx;
5288
5289		/* Start moving packets again */
5290		if (ifp->if_flags & IFF_RUNNING && !IFQ_IS_EMPTY(&ifp->if_snd))
5291			bnx_start(ifp);
5292	}
5293
5294out:
5295	bus_dmamap_sync(sc->bnx_dmatag, sc->status_map, 0,
5296	    sc->status_map->dm_mapsize, BUS_DMASYNC_PREREAD);
5297
5298	return (rv);
5299}
5300
5301/****************************************************************************/
5302/* Programs the various packet receive modes (broadcast and multicast).     */
5303/*                                                                          */
5304/* Returns:                                                                 */
5305/*   Nothing.                                                               */
5306/****************************************************************************/
5307void
5308bnx_iff(struct bnx_softc *sc)
5309{
5310	struct arpcom		*ac = &sc->arpcom;
5311	struct ifnet		*ifp = &ac->ac_if;
5312	struct ether_multi	*enm;
5313	struct ether_multistep	step;
5314	u_int32_t		hashes[NUM_MC_HASH_REGISTERS] = { 0, 0, 0, 0, 0, 0, 0, 0 };
5315	u_int32_t		rx_mode, sort_mode;
5316	int			h, i;
5317
5318	/* Initialize receive mode default settings. */
5319	rx_mode = sc->rx_mode & ~(BNX_EMAC_RX_MODE_PROMISCUOUS |
5320	    BNX_EMAC_RX_MODE_KEEP_VLAN_TAG);
5321	sort_mode = 1 | BNX_RPM_SORT_USER0_BC_EN;
5322	ifp->if_flags &= ~IFF_ALLMULTI;
5323
5324	/*
5325	 * ASF/IPMI/UMP firmware requires that VLAN tag stripping
5326	 * be enbled.
5327	 */
5328	if (!(ifp->if_capabilities & IFCAP_VLAN_HWTAGGING) &&
5329	    (!(sc->bnx_flags & BNX_MFW_ENABLE_FLAG)))
5330		rx_mode |= BNX_EMAC_RX_MODE_KEEP_VLAN_TAG;
5331
5332	/*
5333	 * Check for promiscuous, all multicast, or selected
5334	 * multicast address filtering.
5335	 */
5336	if (ifp->if_flags & IFF_PROMISC) {
5337		DBPRINT(sc, BNX_INFO, "Enabling promiscuous mode.\n");
5338
5339		ifp->if_flags |= IFF_ALLMULTI;
5340		/* Enable promiscuous mode. */
5341		rx_mode |= BNX_EMAC_RX_MODE_PROMISCUOUS;
5342		sort_mode |= BNX_RPM_SORT_USER0_PROM_EN;
5343	} else if (ac->ac_multirangecnt > 0) {
5344		DBPRINT(sc, BNX_INFO, "Enabling all multicast mode.\n");
5345
5346		ifp->if_flags |= IFF_ALLMULTI;
5347		/* Enable all multicast addresses. */
5348		for (i = 0; i < NUM_MC_HASH_REGISTERS; i++)
5349			REG_WR(sc, BNX_EMAC_MULTICAST_HASH0 + (i * 4),
5350			    0xffffffff);
5351		sort_mode |= BNX_RPM_SORT_USER0_MC_EN;
5352	} else {
5353		/* Accept one or more multicast(s). */
5354		DBPRINT(sc, BNX_INFO, "Enabling selective multicast mode.\n");
5355
5356		ETHER_FIRST_MULTI(step, ac, enm);
5357		while (enm != NULL) {
5358			h = ether_crc32_le(enm->enm_addrlo, ETHER_ADDR_LEN) &
5359			    0xFF;
5360
5361			hashes[(h & 0xE0) >> 5] |= 1 << (h & 0x1F);
5362
5363			ETHER_NEXT_MULTI(step, enm);
5364		}
5365
5366		for (i = 0; i < NUM_MC_HASH_REGISTERS; i++)
5367			REG_WR(sc, BNX_EMAC_MULTICAST_HASH0 + (i * 4),
5368			    hashes[i]);
5369
5370		sort_mode |= BNX_RPM_SORT_USER0_MC_HSH_EN;
5371	}
5372
5373	/* Only make changes if the recive mode has actually changed. */
5374	if (rx_mode != sc->rx_mode) {
5375		DBPRINT(sc, BNX_VERBOSE, "Enabling new receive mode: 0x%08X\n",
5376		    rx_mode);
5377
5378		sc->rx_mode = rx_mode;
5379		REG_WR(sc, BNX_EMAC_RX_MODE, rx_mode);
5380	}
5381
5382	/* Disable and clear the exisitng sort before enabling a new sort. */
5383	REG_WR(sc, BNX_RPM_SORT_USER0, 0x0);
5384	REG_WR(sc, BNX_RPM_SORT_USER0, sort_mode);
5385	REG_WR(sc, BNX_RPM_SORT_USER0, sort_mode | BNX_RPM_SORT_USER0_ENA);
5386}
5387
5388/****************************************************************************/
5389/* Called periodically to updates statistics from the controllers           */
5390/* statistics block.                                                        */
5391/*                                                                          */
5392/* Returns:                                                                 */
5393/*   Nothing.                                                               */
5394/****************************************************************************/
5395void
5396bnx_stats_update(struct bnx_softc *sc)
5397{
5398	struct ifnet		*ifp = &sc->arpcom.ac_if;
5399	struct statistics_block	*stats;
5400
5401	DBPRINT(sc, BNX_EXCESSIVE, "Entering %s()\n", __FUNCTION__);
5402
5403	stats = (struct statistics_block *)sc->stats_block;
5404
5405	/*
5406	 * Update the interface statistics from the
5407	 * hardware statistics.
5408	 */
5409	ifp->if_collisions = (u_long)stats->stat_EtherStatsCollisions;
5410
5411	ifp->if_ierrors = (u_long)stats->stat_EtherStatsUndersizePkts +
5412	    (u_long)stats->stat_EtherStatsOverrsizePkts +
5413	    (u_long)stats->stat_IfInMBUFDiscards +
5414	    (u_long)stats->stat_Dot3StatsAlignmentErrors +
5415	    (u_long)stats->stat_Dot3StatsFCSErrors;
5416
5417	ifp->if_oerrors = (u_long)
5418	    stats->stat_emac_tx_stat_dot3statsinternalmactransmiterrors +
5419	    (u_long)stats->stat_Dot3StatsExcessiveCollisions +
5420	    (u_long)stats->stat_Dot3StatsLateCollisions;
5421
5422	/*
5423	 * Certain controllers don't report
5424	 * carrier sense errors correctly.
5425	 * See errata E11_5708CA0_1165.
5426	 */
5427	if (!(BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5706) &&
5428	    !(BNX_CHIP_ID(sc) == BNX_CHIP_ID_5708_A0))
5429		ifp->if_oerrors += (u_long) stats->stat_Dot3StatsCarrierSenseErrors;
5430
5431	/*
5432	 * Update the sysctl statistics from the
5433	 * hardware statistics.
5434	 */
5435	sc->stat_IfHCInOctets = ((u_int64_t)stats->stat_IfHCInOctets_hi << 32) +
5436	    (u_int64_t) stats->stat_IfHCInOctets_lo;
5437
5438	sc->stat_IfHCInBadOctets =
5439	    ((u_int64_t) stats->stat_IfHCInBadOctets_hi << 32) +
5440	    (u_int64_t) stats->stat_IfHCInBadOctets_lo;
5441
5442	sc->stat_IfHCOutOctets =
5443	    ((u_int64_t) stats->stat_IfHCOutOctets_hi << 32) +
5444	    (u_int64_t) stats->stat_IfHCOutOctets_lo;
5445
5446	sc->stat_IfHCOutBadOctets =
5447	    ((u_int64_t) stats->stat_IfHCOutBadOctets_hi << 32) +
5448	    (u_int64_t) stats->stat_IfHCOutBadOctets_lo;
5449
5450	sc->stat_IfHCInUcastPkts =
5451	    ((u_int64_t) stats->stat_IfHCInUcastPkts_hi << 32) +
5452	    (u_int64_t) stats->stat_IfHCInUcastPkts_lo;
5453
5454	sc->stat_IfHCInMulticastPkts =
5455	    ((u_int64_t) stats->stat_IfHCInMulticastPkts_hi << 32) +
5456	    (u_int64_t) stats->stat_IfHCInMulticastPkts_lo;
5457
5458	sc->stat_IfHCInBroadcastPkts =
5459	    ((u_int64_t) stats->stat_IfHCInBroadcastPkts_hi << 32) +
5460	    (u_int64_t) stats->stat_IfHCInBroadcastPkts_lo;
5461
5462	sc->stat_IfHCOutUcastPkts =
5463	   ((u_int64_t) stats->stat_IfHCOutUcastPkts_hi << 32) +
5464	    (u_int64_t) stats->stat_IfHCOutUcastPkts_lo;
5465
5466	sc->stat_IfHCOutMulticastPkts =
5467	    ((u_int64_t) stats->stat_IfHCOutMulticastPkts_hi << 32) +
5468	    (u_int64_t) stats->stat_IfHCOutMulticastPkts_lo;
5469
5470	sc->stat_IfHCOutBroadcastPkts =
5471	    ((u_int64_t) stats->stat_IfHCOutBroadcastPkts_hi << 32) +
5472	    (u_int64_t) stats->stat_IfHCOutBroadcastPkts_lo;
5473
5474	sc->stat_emac_tx_stat_dot3statsinternalmactransmiterrors =
5475	    stats->stat_emac_tx_stat_dot3statsinternalmactransmiterrors;
5476
5477	sc->stat_Dot3StatsCarrierSenseErrors =
5478	    stats->stat_Dot3StatsCarrierSenseErrors;
5479
5480	sc->stat_Dot3StatsFCSErrors = stats->stat_Dot3StatsFCSErrors;
5481
5482	sc->stat_Dot3StatsAlignmentErrors =
5483	    stats->stat_Dot3StatsAlignmentErrors;
5484
5485	sc->stat_Dot3StatsSingleCollisionFrames =
5486	    stats->stat_Dot3StatsSingleCollisionFrames;
5487
5488	sc->stat_Dot3StatsMultipleCollisionFrames =
5489	    stats->stat_Dot3StatsMultipleCollisionFrames;
5490
5491	sc->stat_Dot3StatsDeferredTransmissions =
5492	    stats->stat_Dot3StatsDeferredTransmissions;
5493
5494	sc->stat_Dot3StatsExcessiveCollisions =
5495	    stats->stat_Dot3StatsExcessiveCollisions;
5496
5497	sc->stat_Dot3StatsLateCollisions = stats->stat_Dot3StatsLateCollisions;
5498
5499	sc->stat_EtherStatsCollisions = stats->stat_EtherStatsCollisions;
5500
5501	sc->stat_EtherStatsFragments = stats->stat_EtherStatsFragments;
5502
5503	sc->stat_EtherStatsJabbers = stats->stat_EtherStatsJabbers;
5504
5505	sc->stat_EtherStatsUndersizePkts = stats->stat_EtherStatsUndersizePkts;
5506
5507	sc->stat_EtherStatsOverrsizePkts = stats->stat_EtherStatsOverrsizePkts;
5508
5509	sc->stat_EtherStatsPktsRx64Octets =
5510	    stats->stat_EtherStatsPktsRx64Octets;
5511
5512	sc->stat_EtherStatsPktsRx65Octetsto127Octets =
5513	    stats->stat_EtherStatsPktsRx65Octetsto127Octets;
5514
5515	sc->stat_EtherStatsPktsRx128Octetsto255Octets =
5516	    stats->stat_EtherStatsPktsRx128Octetsto255Octets;
5517
5518	sc->stat_EtherStatsPktsRx256Octetsto511Octets =
5519	    stats->stat_EtherStatsPktsRx256Octetsto511Octets;
5520
5521	sc->stat_EtherStatsPktsRx512Octetsto1023Octets =
5522	    stats->stat_EtherStatsPktsRx512Octetsto1023Octets;
5523
5524	sc->stat_EtherStatsPktsRx1024Octetsto1522Octets =
5525	    stats->stat_EtherStatsPktsRx1024Octetsto1522Octets;
5526
5527	sc->stat_EtherStatsPktsRx1523Octetsto9022Octets =
5528	    stats->stat_EtherStatsPktsRx1523Octetsto9022Octets;
5529
5530	sc->stat_EtherStatsPktsTx64Octets =
5531	    stats->stat_EtherStatsPktsTx64Octets;
5532
5533	sc->stat_EtherStatsPktsTx65Octetsto127Octets =
5534	    stats->stat_EtherStatsPktsTx65Octetsto127Octets;
5535
5536	sc->stat_EtherStatsPktsTx128Octetsto255Octets =
5537	    stats->stat_EtherStatsPktsTx128Octetsto255Octets;
5538
5539	sc->stat_EtherStatsPktsTx256Octetsto511Octets =
5540	    stats->stat_EtherStatsPktsTx256Octetsto511Octets;
5541
5542	sc->stat_EtherStatsPktsTx512Octetsto1023Octets =
5543	    stats->stat_EtherStatsPktsTx512Octetsto1023Octets;
5544
5545	sc->stat_EtherStatsPktsTx1024Octetsto1522Octets =
5546	    stats->stat_EtherStatsPktsTx1024Octetsto1522Octets;
5547
5548	sc->stat_EtherStatsPktsTx1523Octetsto9022Octets =
5549	    stats->stat_EtherStatsPktsTx1523Octetsto9022Octets;
5550
5551	sc->stat_XonPauseFramesReceived = stats->stat_XonPauseFramesReceived;
5552
5553	sc->stat_XoffPauseFramesReceived = stats->stat_XoffPauseFramesReceived;
5554
5555	sc->stat_OutXonSent = stats->stat_OutXonSent;
5556
5557	sc->stat_OutXoffSent = stats->stat_OutXoffSent;
5558
5559	sc->stat_FlowControlDone = stats->stat_FlowControlDone;
5560
5561	sc->stat_MacControlFramesReceived =
5562	    stats->stat_MacControlFramesReceived;
5563
5564	sc->stat_XoffStateEntered = stats->stat_XoffStateEntered;
5565
5566	sc->stat_IfInFramesL2FilterDiscards =
5567	    stats->stat_IfInFramesL2FilterDiscards;
5568
5569	sc->stat_IfInRuleCheckerDiscards = stats->stat_IfInRuleCheckerDiscards;
5570
5571	sc->stat_IfInFTQDiscards = stats->stat_IfInFTQDiscards;
5572
5573	sc->stat_IfInMBUFDiscards = stats->stat_IfInMBUFDiscards;
5574
5575	sc->stat_IfInRuleCheckerP4Hit = stats->stat_IfInRuleCheckerP4Hit;
5576
5577	sc->stat_CatchupInRuleCheckerDiscards =
5578	    stats->stat_CatchupInRuleCheckerDiscards;
5579
5580	sc->stat_CatchupInFTQDiscards = stats->stat_CatchupInFTQDiscards;
5581
5582	sc->stat_CatchupInMBUFDiscards = stats->stat_CatchupInMBUFDiscards;
5583
5584	sc->stat_CatchupInRuleCheckerP4Hit =
5585	    stats->stat_CatchupInRuleCheckerP4Hit;
5586
5587	DBPRINT(sc, BNX_EXCESSIVE, "Exiting %s()\n", __FUNCTION__);
5588}
5589
5590void
5591bnx_tick(void *xsc)
5592{
5593	struct bnx_softc	*sc = xsc;
5594	struct ifnet		*ifp = &sc->arpcom.ac_if;
5595	struct mii_data		*mii = NULL;
5596	u_int32_t		msg;
5597
5598	/* Tell the firmware that the driver is still running. */
5599#ifdef BNX_DEBUG
5600	msg = (u_int32_t)BNX_DRV_MSG_DATA_PULSE_CODE_ALWAYS_ALIVE;
5601#else
5602	msg = (u_int32_t)++sc->bnx_fw_drv_pulse_wr_seq;
5603#endif
5604	REG_WR_IND(sc, sc->bnx_shmem_base + BNX_DRV_PULSE_MB, msg);
5605
5606	/* Update the statistics from the hardware statistics block. */
5607	bnx_stats_update(sc);
5608
5609	/* Schedule the next tick. */
5610	timeout_add_sec(&sc->bnx_timeout, 1);
5611
5612	/* If link is up already up then we're done. */
5613	if (sc->bnx_link)
5614		goto bnx_tick_exit;
5615
5616	mii = &sc->bnx_mii;
5617	mii_tick(mii);
5618
5619	/* Check if the link has come up. */
5620	if (!sc->bnx_link && mii->mii_media_status & IFM_ACTIVE &&
5621	    IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) {
5622		sc->bnx_link++;
5623		/* Now that link is up, handle any outstanding TX traffic. */
5624		if (!IFQ_IS_EMPTY(&ifp->if_snd))
5625			bnx_start(ifp);
5626	}
5627
5628bnx_tick_exit:
5629	return;
5630}
5631
5632/****************************************************************************/
5633/* BNX Debug Routines                                                       */
5634/****************************************************************************/
5635#ifdef BNX_DEBUG
5636
5637/****************************************************************************/
5638/* Prints out information about an mbuf.                                    */
5639/*                                                                          */
5640/* Returns:                                                                 */
5641/*   Nothing.                                                               */
5642/****************************************************************************/
5643void
5644bnx_dump_mbuf(struct bnx_softc *sc, struct mbuf *m)
5645{
5646	struct mbuf		*mp = m;
5647
5648	if (m == NULL) {
5649		/* Index out of range. */
5650		printf("mbuf ptr is null!\n");
5651		return;
5652	}
5653
5654	while (mp) {
5655		printf("mbuf: vaddr = %p, m_len = %d, m_flags = ",
5656		    mp, mp->m_len);
5657
5658		if (mp->m_flags & M_EXT)
5659			printf("M_EXT ");
5660		if (mp->m_flags & M_PKTHDR)
5661			printf("M_PKTHDR ");
5662		printf("\n");
5663
5664		if (mp->m_flags & M_EXT)
5665			printf("- m_ext: vaddr = %p, ext_size = 0x%04X\n",
5666			    mp, mp->m_ext.ext_size);
5667
5668		mp = mp->m_next;
5669	}
5670}
5671
5672/****************************************************************************/
5673/* Prints out the mbufs in the TX mbuf chain.                               */
5674/*                                                                          */
5675/* Returns:                                                                 */
5676/*   Nothing.                                                               */
5677/****************************************************************************/
5678void
5679bnx_dump_tx_mbuf_chain(struct bnx_softc *sc, int chain_prod, int count)
5680{
5681	struct mbuf		*m;
5682	int			i;
5683
5684	BNX_PRINTF(sc,
5685	    "----------------------------"
5686	    "  tx mbuf data  "
5687	    "----------------------------\n");
5688
5689	for (i = 0; i < count; i++) {
5690	 	m = sc->tx_mbuf_ptr[chain_prod];
5691		BNX_PRINTF(sc, "txmbuf[%d]\n", chain_prod);
5692		bnx_dump_mbuf(sc, m);
5693		chain_prod = TX_CHAIN_IDX(NEXT_TX_BD(chain_prod));
5694	}
5695
5696	BNX_PRINTF(sc,
5697	    "--------------------------------------------"
5698	    "----------------------------\n");
5699}
5700
5701/*
5702 * This routine prints the RX mbuf chain.
5703 */
5704void
5705bnx_dump_rx_mbuf_chain(struct bnx_softc *sc, int chain_prod, int count)
5706{
5707	struct mbuf		*m;
5708	int			i;
5709
5710	BNX_PRINTF(sc,
5711	    "----------------------------"
5712	    "  rx mbuf data  "
5713	    "----------------------------\n");
5714
5715	for (i = 0; i < count; i++) {
5716	 	m = sc->rx_mbuf_ptr[chain_prod];
5717		BNX_PRINTF(sc, "rxmbuf[0x%04X]\n", chain_prod);
5718		bnx_dump_mbuf(sc, m);
5719		chain_prod = RX_CHAIN_IDX(NEXT_RX_BD(chain_prod));
5720	}
5721
5722
5723	BNX_PRINTF(sc,
5724	    "--------------------------------------------"
5725	    "----------------------------\n");
5726}
5727
5728void
5729bnx_dump_txbd(struct bnx_softc *sc, int idx, struct tx_bd *txbd)
5730{
5731	if (idx > MAX_TX_BD)
5732		/* Index out of range. */
5733		BNX_PRINTF(sc, "tx_bd[0x%04X]: Invalid tx_bd index!\n", idx);
5734	else if ((idx & USABLE_TX_BD_PER_PAGE) == USABLE_TX_BD_PER_PAGE)
5735		/* TX Chain page pointer. */
5736		BNX_PRINTF(sc, "tx_bd[0x%04X]: haddr = 0x%08X:%08X, chain "
5737		    "page pointer\n", idx, txbd->tx_bd_haddr_hi,
5738		    txbd->tx_bd_haddr_lo);
5739	else
5740		/* Normal tx_bd entry. */
5741		BNX_PRINTF(sc, "tx_bd[0x%04X]: haddr = 0x%08X:%08X, nbytes = "
5742		    "0x%08X, vlan tag = 0x%4X, flags = 0x%08X\n", idx,
5743		    txbd->tx_bd_haddr_hi, txbd->tx_bd_haddr_lo,
5744		    txbd->tx_bd_mss_nbytes, txbd->tx_bd_vlan_tag,
5745		    txbd->tx_bd_flags);
5746}
5747
5748void
5749bnx_dump_rxbd(struct bnx_softc *sc, int idx, struct rx_bd *rxbd)
5750{
5751	if (idx > MAX_RX_BD)
5752		/* Index out of range. */
5753		BNX_PRINTF(sc, "rx_bd[0x%04X]: Invalid rx_bd index!\n", idx);
5754	else if ((idx & USABLE_RX_BD_PER_PAGE) == USABLE_RX_BD_PER_PAGE)
5755		/* TX Chain page pointer. */
5756		BNX_PRINTF(sc, "rx_bd[0x%04X]: haddr = 0x%08X:%08X, chain page "
5757		    "pointer\n", idx, rxbd->rx_bd_haddr_hi,
5758		    rxbd->rx_bd_haddr_lo);
5759	else
5760		/* Normal tx_bd entry. */
5761		BNX_PRINTF(sc, "rx_bd[0x%04X]: haddr = 0x%08X:%08X, nbytes = "
5762		    "0x%08X, flags = 0x%08X\n", idx,
5763			rxbd->rx_bd_haddr_hi, rxbd->rx_bd_haddr_lo,
5764			rxbd->rx_bd_len, rxbd->rx_bd_flags);
5765}
5766
5767void
5768bnx_dump_l2fhdr(struct bnx_softc *sc, int idx, struct l2_fhdr *l2fhdr)
5769{
5770	BNX_PRINTF(sc, "l2_fhdr[0x%04X]: status = 0x%08X, "
5771	    "pkt_len = 0x%04X, vlan = 0x%04x, ip_xsum = 0x%04X, "
5772	    "tcp_udp_xsum = 0x%04X\n", idx,
5773	    l2fhdr->l2_fhdr_status, l2fhdr->l2_fhdr_pkt_len,
5774	    l2fhdr->l2_fhdr_vlan_tag, l2fhdr->l2_fhdr_ip_xsum,
5775	    l2fhdr->l2_fhdr_tcp_udp_xsum);
5776}
5777
5778/*
5779 * This routine prints the TX chain.
5780 */
5781void
5782bnx_dump_tx_chain(struct bnx_softc *sc, int tx_prod, int count)
5783{
5784	struct tx_bd		*txbd;
5785	int			i;
5786
5787	/* First some info about the tx_bd chain structure. */
5788	BNX_PRINTF(sc,
5789	    "----------------------------"
5790	    "  tx_bd  chain  "
5791	    "----------------------------\n");
5792
5793	BNX_PRINTF(sc,
5794	    "page size      = 0x%08X, tx chain pages        = 0x%08X\n",
5795	    (u_int32_t)BCM_PAGE_SIZE, (u_int32_t) TX_PAGES);
5796
5797	BNX_PRINTF(sc,
5798	    "tx_bd per page = 0x%08X, usable tx_bd per page = 0x%08X\n",
5799	    (u_int32_t)TOTAL_TX_BD_PER_PAGE, (u_int32_t)USABLE_TX_BD_PER_PAGE);
5800
5801	BNX_PRINTF(sc, "total tx_bd    = 0x%08X\n", (u_int32_t)TOTAL_TX_BD);
5802
5803	BNX_PRINTF(sc, ""
5804	    "-----------------------------"
5805	    "   tx_bd data   "
5806	    "-----------------------------\n");
5807
5808	/* Now print out the tx_bd's themselves. */
5809	for (i = 0; i < count; i++) {
5810	 	txbd = &sc->tx_bd_chain[TX_PAGE(tx_prod)][TX_IDX(tx_prod)];
5811		bnx_dump_txbd(sc, tx_prod, txbd);
5812		tx_prod = TX_CHAIN_IDX(NEXT_TX_BD(tx_prod));
5813	}
5814
5815	BNX_PRINTF(sc,
5816	    "-----------------------------"
5817	    "--------------"
5818	    "-----------------------------\n");
5819}
5820
5821/*
5822 * This routine prints the RX chain.
5823 */
5824void
5825bnx_dump_rx_chain(struct bnx_softc *sc, int rx_prod, int count)
5826{
5827	struct rx_bd		*rxbd;
5828	int			i;
5829
5830	/* First some info about the tx_bd chain structure. */
5831	BNX_PRINTF(sc,
5832	    "----------------------------"
5833	    "  rx_bd  chain  "
5834	    "----------------------------\n");
5835
5836	BNX_PRINTF(sc, "----- RX_BD Chain -----\n");
5837
5838	BNX_PRINTF(sc,
5839	    "page size      = 0x%08X, rx chain pages        = 0x%08X\n",
5840	    (u_int32_t)BCM_PAGE_SIZE, (u_int32_t)RX_PAGES);
5841
5842	BNX_PRINTF(sc,
5843	    "rx_bd per page = 0x%08X, usable rx_bd per page = 0x%08X\n",
5844	    (u_int32_t)TOTAL_RX_BD_PER_PAGE, (u_int32_t)USABLE_RX_BD_PER_PAGE);
5845
5846	BNX_PRINTF(sc, "total rx_bd    = 0x%08X\n", (u_int32_t)TOTAL_RX_BD);
5847
5848	BNX_PRINTF(sc,
5849	    "----------------------------"
5850	    "   rx_bd data   "
5851	    "----------------------------\n");
5852
5853	/* Now print out the rx_bd's themselves. */
5854	for (i = 0; i < count; i++) {
5855		rxbd = &sc->rx_bd_chain[RX_PAGE(rx_prod)][RX_IDX(rx_prod)];
5856		bnx_dump_rxbd(sc, rx_prod, rxbd);
5857		rx_prod = RX_CHAIN_IDX(NEXT_RX_BD(rx_prod));
5858	}
5859
5860	BNX_PRINTF(sc,
5861	    "----------------------------"
5862	    "--------------"
5863	    "----------------------------\n");
5864}
5865
5866/*
5867 * This routine prints the status block.
5868 */
5869void
5870bnx_dump_status_block(struct bnx_softc *sc)
5871{
5872	struct status_block	*sblk;
5873
5874	sblk = sc->status_block;
5875
5876   	BNX_PRINTF(sc, "----------------------------- Status Block "
5877	    "-----------------------------\n");
5878
5879	BNX_PRINTF(sc,
5880	    "attn_bits  = 0x%08X, attn_bits_ack = 0x%08X, index = 0x%04X\n",
5881	    sblk->status_attn_bits, sblk->status_attn_bits_ack,
5882	    sblk->status_idx);
5883
5884	BNX_PRINTF(sc, "rx_cons0   = 0x%08X, tx_cons0      = 0x%08X\n",
5885	    sblk->status_rx_quick_consumer_index0,
5886	    sblk->status_tx_quick_consumer_index0);
5887
5888	BNX_PRINTF(sc, "status_idx = 0x%04X\n", sblk->status_idx);
5889
5890	/* Theses indices are not used for normal L2 drivers. */
5891	if (sblk->status_rx_quick_consumer_index1 ||
5892		sblk->status_tx_quick_consumer_index1)
5893		BNX_PRINTF(sc, "rx_cons1  = 0x%08X, tx_cons1      = 0x%08X\n",
5894		    sblk->status_rx_quick_consumer_index1,
5895		    sblk->status_tx_quick_consumer_index1);
5896
5897	if (sblk->status_rx_quick_consumer_index2 ||
5898		sblk->status_tx_quick_consumer_index2)
5899		BNX_PRINTF(sc, "rx_cons2  = 0x%08X, tx_cons2      = 0x%08X\n",
5900		    sblk->status_rx_quick_consumer_index2,
5901		    sblk->status_tx_quick_consumer_index2);
5902
5903	if (sblk->status_rx_quick_consumer_index3 ||
5904		sblk->status_tx_quick_consumer_index3)
5905		BNX_PRINTF(sc, "rx_cons3  = 0x%08X, tx_cons3      = 0x%08X\n",
5906		    sblk->status_rx_quick_consumer_index3,
5907		    sblk->status_tx_quick_consumer_index3);
5908
5909	if (sblk->status_rx_quick_consumer_index4 ||
5910		sblk->status_rx_quick_consumer_index5)
5911		BNX_PRINTF(sc, "rx_cons4  = 0x%08X, rx_cons5      = 0x%08X\n",
5912		    sblk->status_rx_quick_consumer_index4,
5913		    sblk->status_rx_quick_consumer_index5);
5914
5915	if (sblk->status_rx_quick_consumer_index6 ||
5916		sblk->status_rx_quick_consumer_index7)
5917		BNX_PRINTF(sc, "rx_cons6  = 0x%08X, rx_cons7      = 0x%08X\n",
5918		    sblk->status_rx_quick_consumer_index6,
5919		    sblk->status_rx_quick_consumer_index7);
5920
5921	if (sblk->status_rx_quick_consumer_index8 ||
5922		sblk->status_rx_quick_consumer_index9)
5923		BNX_PRINTF(sc, "rx_cons8  = 0x%08X, rx_cons9      = 0x%08X\n",
5924		    sblk->status_rx_quick_consumer_index8,
5925		    sblk->status_rx_quick_consumer_index9);
5926
5927	if (sblk->status_rx_quick_consumer_index10 ||
5928		sblk->status_rx_quick_consumer_index11)
5929		BNX_PRINTF(sc, "rx_cons10 = 0x%08X, rx_cons11     = 0x%08X\n",
5930		    sblk->status_rx_quick_consumer_index10,
5931		    sblk->status_rx_quick_consumer_index11);
5932
5933	if (sblk->status_rx_quick_consumer_index12 ||
5934		sblk->status_rx_quick_consumer_index13)
5935		BNX_PRINTF(sc, "rx_cons12 = 0x%08X, rx_cons13     = 0x%08X\n",
5936		    sblk->status_rx_quick_consumer_index12,
5937		    sblk->status_rx_quick_consumer_index13);
5938
5939	if (sblk->status_rx_quick_consumer_index14 ||
5940		sblk->status_rx_quick_consumer_index15)
5941		BNX_PRINTF(sc, "rx_cons14 = 0x%08X, rx_cons15     = 0x%08X\n",
5942		    sblk->status_rx_quick_consumer_index14,
5943		    sblk->status_rx_quick_consumer_index15);
5944
5945	if (sblk->status_completion_producer_index ||
5946		sblk->status_cmd_consumer_index)
5947		BNX_PRINTF(sc, "com_prod  = 0x%08X, cmd_cons      = 0x%08X\n",
5948		    sblk->status_completion_producer_index,
5949		    sblk->status_cmd_consumer_index);
5950
5951	BNX_PRINTF(sc, "-------------------------------------------"
5952	    "-----------------------------\n");
5953}
5954
5955/*
5956 * This routine prints the statistics block.
5957 */
5958void
5959bnx_dump_stats_block(struct bnx_softc *sc)
5960{
5961	struct statistics_block	*sblk;
5962
5963	sblk = sc->stats_block;
5964
5965	BNX_PRINTF(sc, ""
5966	    "-----------------------------"
5967	    " Stats  Block "
5968	    "-----------------------------\n");
5969
5970	BNX_PRINTF(sc, "IfHcInOctets         = 0x%08X:%08X, "
5971	    "IfHcInBadOctets      = 0x%08X:%08X\n",
5972	    sblk->stat_IfHCInOctets_hi, sblk->stat_IfHCInOctets_lo,
5973	    sblk->stat_IfHCInBadOctets_hi, sblk->stat_IfHCInBadOctets_lo);
5974
5975	BNX_PRINTF(sc, "IfHcOutOctets        = 0x%08X:%08X, "
5976	    "IfHcOutBadOctets     = 0x%08X:%08X\n",
5977	    sblk->stat_IfHCOutOctets_hi, sblk->stat_IfHCOutOctets_lo,
5978	    sblk->stat_IfHCOutBadOctets_hi, sblk->stat_IfHCOutBadOctets_lo);
5979
5980	BNX_PRINTF(sc, "IfHcInUcastPkts      = 0x%08X:%08X, "
5981	    "IfHcInMulticastPkts  = 0x%08X:%08X\n",
5982	    sblk->stat_IfHCInUcastPkts_hi, sblk->stat_IfHCInUcastPkts_lo,
5983	    sblk->stat_IfHCInMulticastPkts_hi,
5984	    sblk->stat_IfHCInMulticastPkts_lo);
5985
5986	BNX_PRINTF(sc, "IfHcInBroadcastPkts  = 0x%08X:%08X, "
5987	    "IfHcOutUcastPkts     = 0x%08X:%08X\n",
5988	    sblk->stat_IfHCInBroadcastPkts_hi,
5989	    sblk->stat_IfHCInBroadcastPkts_lo,
5990	    sblk->stat_IfHCOutUcastPkts_hi,
5991	    sblk->stat_IfHCOutUcastPkts_lo);
5992
5993	BNX_PRINTF(sc, "IfHcOutMulticastPkts = 0x%08X:%08X, "
5994	    "IfHcOutBroadcastPkts = 0x%08X:%08X\n",
5995	    sblk->stat_IfHCOutMulticastPkts_hi,
5996	    sblk->stat_IfHCOutMulticastPkts_lo,
5997	    sblk->stat_IfHCOutBroadcastPkts_hi,
5998	    sblk->stat_IfHCOutBroadcastPkts_lo);
5999
6000	if (sblk->stat_emac_tx_stat_dot3statsinternalmactransmiterrors)
6001		BNX_PRINTF(sc, "0x%08X : "
6002		    "emac_tx_stat_dot3statsinternalmactransmiterrors\n",
6003		    sblk->stat_emac_tx_stat_dot3statsinternalmactransmiterrors);
6004
6005	if (sblk->stat_Dot3StatsCarrierSenseErrors)
6006		BNX_PRINTF(sc, "0x%08X : Dot3StatsCarrierSenseErrors\n",
6007		    sblk->stat_Dot3StatsCarrierSenseErrors);
6008
6009	if (sblk->stat_Dot3StatsFCSErrors)
6010		BNX_PRINTF(sc, "0x%08X : Dot3StatsFCSErrors\n",
6011		    sblk->stat_Dot3StatsFCSErrors);
6012
6013	if (sblk->stat_Dot3StatsAlignmentErrors)
6014		BNX_PRINTF(sc, "0x%08X : Dot3StatsAlignmentErrors\n",
6015		    sblk->stat_Dot3StatsAlignmentErrors);
6016
6017	if (sblk->stat_Dot3StatsSingleCollisionFrames)
6018		BNX_PRINTF(sc, "0x%08X : Dot3StatsSingleCollisionFrames\n",
6019		    sblk->stat_Dot3StatsSingleCollisionFrames);
6020
6021	if (sblk->stat_Dot3StatsMultipleCollisionFrames)
6022		BNX_PRINTF(sc, "0x%08X : Dot3StatsMultipleCollisionFrames\n",
6023		    sblk->stat_Dot3StatsMultipleCollisionFrames);
6024
6025	if (sblk->stat_Dot3StatsDeferredTransmissions)
6026		BNX_PRINTF(sc, "0x%08X : Dot3StatsDeferredTransmissions\n",
6027		    sblk->stat_Dot3StatsDeferredTransmissions);
6028
6029	if (sblk->stat_Dot3StatsExcessiveCollisions)
6030		BNX_PRINTF(sc, "0x%08X : Dot3StatsExcessiveCollisions\n",
6031		    sblk->stat_Dot3StatsExcessiveCollisions);
6032
6033	if (sblk->stat_Dot3StatsLateCollisions)
6034		BNX_PRINTF(sc, "0x%08X : Dot3StatsLateCollisions\n",
6035		    sblk->stat_Dot3StatsLateCollisions);
6036
6037	if (sblk->stat_EtherStatsCollisions)
6038		BNX_PRINTF(sc, "0x%08X : EtherStatsCollisions\n",
6039		    sblk->stat_EtherStatsCollisions);
6040
6041	if (sblk->stat_EtherStatsFragments)
6042		BNX_PRINTF(sc, "0x%08X : EtherStatsFragments\n",
6043		    sblk->stat_EtherStatsFragments);
6044
6045	if (sblk->stat_EtherStatsJabbers)
6046		BNX_PRINTF(sc, "0x%08X : EtherStatsJabbers\n",
6047		    sblk->stat_EtherStatsJabbers);
6048
6049	if (sblk->stat_EtherStatsUndersizePkts)
6050		BNX_PRINTF(sc, "0x%08X : EtherStatsUndersizePkts\n",
6051		    sblk->stat_EtherStatsUndersizePkts);
6052
6053	if (sblk->stat_EtherStatsOverrsizePkts)
6054		BNX_PRINTF(sc, "0x%08X : EtherStatsOverrsizePkts\n",
6055		    sblk->stat_EtherStatsOverrsizePkts);
6056
6057	if (sblk->stat_EtherStatsPktsRx64Octets)
6058		BNX_PRINTF(sc, "0x%08X : EtherStatsPktsRx64Octets\n",
6059		    sblk->stat_EtherStatsPktsRx64Octets);
6060
6061	if (sblk->stat_EtherStatsPktsRx65Octetsto127Octets)
6062		BNX_PRINTF(sc, "0x%08X : EtherStatsPktsRx65Octetsto127Octets\n",
6063		    sblk->stat_EtherStatsPktsRx65Octetsto127Octets);
6064
6065	if (sblk->stat_EtherStatsPktsRx128Octetsto255Octets)
6066		BNX_PRINTF(sc, "0x%08X : "
6067		    "EtherStatsPktsRx128Octetsto255Octets\n",
6068		    sblk->stat_EtherStatsPktsRx128Octetsto255Octets);
6069
6070	if (sblk->stat_EtherStatsPktsRx256Octetsto511Octets)
6071		BNX_PRINTF(sc, "0x%08X : "
6072		    "EtherStatsPktsRx256Octetsto511Octets\n",
6073		    sblk->stat_EtherStatsPktsRx256Octetsto511Octets);
6074
6075	if (sblk->stat_EtherStatsPktsRx512Octetsto1023Octets)
6076		BNX_PRINTF(sc, "0x%08X : "
6077		    "EtherStatsPktsRx512Octetsto1023Octets\n",
6078		    sblk->stat_EtherStatsPktsRx512Octetsto1023Octets);
6079
6080	if (sblk->stat_EtherStatsPktsRx1024Octetsto1522Octets)
6081		BNX_PRINTF(sc, "0x%08X : "
6082		    "EtherStatsPktsRx1024Octetsto1522Octets\n",
6083		sblk->stat_EtherStatsPktsRx1024Octetsto1522Octets);
6084
6085	if (sblk->stat_EtherStatsPktsRx1523Octetsto9022Octets)
6086		BNX_PRINTF(sc, "0x%08X : "
6087		    "EtherStatsPktsRx1523Octetsto9022Octets\n",
6088		    sblk->stat_EtherStatsPktsRx1523Octetsto9022Octets);
6089
6090	if (sblk->stat_EtherStatsPktsTx64Octets)
6091		BNX_PRINTF(sc, "0x%08X : EtherStatsPktsTx64Octets\n",
6092		    sblk->stat_EtherStatsPktsTx64Octets);
6093
6094	if (sblk->stat_EtherStatsPktsTx65Octetsto127Octets)
6095		BNX_PRINTF(sc, "0x%08X : EtherStatsPktsTx65Octetsto127Octets\n",
6096		    sblk->stat_EtherStatsPktsTx65Octetsto127Octets);
6097
6098	if (sblk->stat_EtherStatsPktsTx128Octetsto255Octets)
6099		BNX_PRINTF(sc, "0x%08X : "
6100		    "EtherStatsPktsTx128Octetsto255Octets\n",
6101		    sblk->stat_EtherStatsPktsTx128Octetsto255Octets);
6102
6103	if (sblk->stat_EtherStatsPktsTx256Octetsto511Octets)
6104		BNX_PRINTF(sc, "0x%08X : "
6105		    "EtherStatsPktsTx256Octetsto511Octets\n",
6106		    sblk->stat_EtherStatsPktsTx256Octetsto511Octets);
6107
6108	if (sblk->stat_EtherStatsPktsTx512Octetsto1023Octets)
6109		BNX_PRINTF(sc, "0x%08X : "
6110		    "EtherStatsPktsTx512Octetsto1023Octets\n",
6111		    sblk->stat_EtherStatsPktsTx512Octetsto1023Octets);
6112
6113	if (sblk->stat_EtherStatsPktsTx1024Octetsto1522Octets)
6114		BNX_PRINTF(sc, "0x%08X : "
6115		    "EtherStatsPktsTx1024Octetsto1522Octets\n",
6116		    sblk->stat_EtherStatsPktsTx1024Octetsto1522Octets);
6117
6118	if (sblk->stat_EtherStatsPktsTx1523Octetsto9022Octets)
6119		BNX_PRINTF(sc, "0x%08X : "
6120		    "EtherStatsPktsTx1523Octetsto9022Octets\n",
6121		    sblk->stat_EtherStatsPktsTx1523Octetsto9022Octets);
6122
6123	if (sblk->stat_XonPauseFramesReceived)
6124		BNX_PRINTF(sc, "0x%08X : XonPauseFramesReceived\n",
6125		    sblk->stat_XonPauseFramesReceived);
6126
6127	if (sblk->stat_XoffPauseFramesReceived)
6128		BNX_PRINTF(sc, "0x%08X : XoffPauseFramesReceived\n",
6129		    sblk->stat_XoffPauseFramesReceived);
6130
6131	if (sblk->stat_OutXonSent)
6132		BNX_PRINTF(sc, "0x%08X : OutXonSent\n",
6133		    sblk->stat_OutXonSent);
6134
6135	if (sblk->stat_OutXoffSent)
6136		BNX_PRINTF(sc, "0x%08X : OutXoffSent\n",
6137		    sblk->stat_OutXoffSent);
6138
6139	if (sblk->stat_FlowControlDone)
6140		BNX_PRINTF(sc, "0x%08X : FlowControlDone\n",
6141		    sblk->stat_FlowControlDone);
6142
6143	if (sblk->stat_MacControlFramesReceived)
6144		BNX_PRINTF(sc, "0x%08X : MacControlFramesReceived\n",
6145		    sblk->stat_MacControlFramesReceived);
6146
6147	if (sblk->stat_XoffStateEntered)
6148		BNX_PRINTF(sc, "0x%08X : XoffStateEntered\n",
6149		    sblk->stat_XoffStateEntered);
6150
6151	if (sblk->stat_IfInFramesL2FilterDiscards)
6152		BNX_PRINTF(sc, "0x%08X : IfInFramesL2FilterDiscards\n",
6153		    sblk->stat_IfInFramesL2FilterDiscards);
6154
6155	if (sblk->stat_IfInRuleCheckerDiscards)
6156		BNX_PRINTF(sc, "0x%08X : IfInRuleCheckerDiscards\n",
6157		    sblk->stat_IfInRuleCheckerDiscards);
6158
6159	if (sblk->stat_IfInFTQDiscards)
6160		BNX_PRINTF(sc, "0x%08X : IfInFTQDiscards\n",
6161		    sblk->stat_IfInFTQDiscards);
6162
6163	if (sblk->stat_IfInMBUFDiscards)
6164		BNX_PRINTF(sc, "0x%08X : IfInMBUFDiscards\n",
6165		    sblk->stat_IfInMBUFDiscards);
6166
6167	if (sblk->stat_IfInRuleCheckerP4Hit)
6168		BNX_PRINTF(sc, "0x%08X : IfInRuleCheckerP4Hit\n",
6169		    sblk->stat_IfInRuleCheckerP4Hit);
6170
6171	if (sblk->stat_CatchupInRuleCheckerDiscards)
6172		BNX_PRINTF(sc, "0x%08X : CatchupInRuleCheckerDiscards\n",
6173		    sblk->stat_CatchupInRuleCheckerDiscards);
6174
6175	if (sblk->stat_CatchupInFTQDiscards)
6176		BNX_PRINTF(sc, "0x%08X : CatchupInFTQDiscards\n",
6177		    sblk->stat_CatchupInFTQDiscards);
6178
6179	if (sblk->stat_CatchupInMBUFDiscards)
6180		BNX_PRINTF(sc, "0x%08X : CatchupInMBUFDiscards\n",
6181		    sblk->stat_CatchupInMBUFDiscards);
6182
6183	if (sblk->stat_CatchupInRuleCheckerP4Hit)
6184		BNX_PRINTF(sc, "0x%08X : CatchupInRuleCheckerP4Hit\n",
6185		    sblk->stat_CatchupInRuleCheckerP4Hit);
6186
6187	BNX_PRINTF(sc,
6188	    "-----------------------------"
6189	    "--------------"
6190	    "-----------------------------\n");
6191}
6192
6193void
6194bnx_dump_driver_state(struct bnx_softc *sc)
6195{
6196	BNX_PRINTF(sc,
6197	    "-----------------------------"
6198	    " Driver State "
6199	    "-----------------------------\n");
6200
6201	BNX_PRINTF(sc, "%p - (sc) driver softc structure virtual "
6202	    "address\n", sc);
6203
6204	BNX_PRINTF(sc, "%p - (sc->status_block) status block virtual address\n",
6205	    sc->status_block);
6206
6207	BNX_PRINTF(sc, "%p - (sc->stats_block) statistics block virtual "
6208	    "address\n", sc->stats_block);
6209
6210	BNX_PRINTF(sc, "%p - (sc->tx_bd_chain) tx_bd chain virtual "
6211	    "adddress\n", sc->tx_bd_chain);
6212
6213	BNX_PRINTF(sc, "%p - (sc->rx_bd_chain) rx_bd chain virtual address\n",
6214	    sc->rx_bd_chain);
6215
6216	BNX_PRINTF(sc, "%p - (sc->tx_mbuf_ptr) tx mbuf chain virtual address\n",
6217	    sc->tx_mbuf_ptr);
6218
6219	BNX_PRINTF(sc, "%p - (sc->rx_mbuf_ptr) rx mbuf chain virtual address\n",
6220	    sc->rx_mbuf_ptr);
6221
6222	BNX_PRINTF(sc,
6223	    "         0x%08X - (sc->interrupts_generated) h/w intrs\n",
6224	    sc->interrupts_generated);
6225
6226	BNX_PRINTF(sc,
6227	    "         0x%08X - (sc->rx_interrupts) rx interrupts handled\n",
6228	    sc->rx_interrupts);
6229
6230	BNX_PRINTF(sc,
6231	    "         0x%08X - (sc->tx_interrupts) tx interrupts handled\n",
6232	    sc->tx_interrupts);
6233
6234	BNX_PRINTF(sc,
6235	    "         0x%08X - (sc->last_status_idx) status block index\n",
6236	    sc->last_status_idx);
6237
6238	BNX_PRINTF(sc, "         0x%08X - (sc->tx_prod) tx producer index\n",
6239	    sc->tx_prod);
6240
6241	BNX_PRINTF(sc, "         0x%08X - (sc->tx_cons) tx consumer index\n",
6242	    sc->tx_cons);
6243
6244	BNX_PRINTF(sc,
6245	    "         0x%08X - (sc->tx_prod_bseq) tx producer bseq index\n",
6246	    sc->tx_prod_bseq);
6247
6248	BNX_PRINTF(sc,
6249	    "         0x%08X - (sc->tx_mbuf_alloc) tx mbufs allocated\n",
6250	    sc->tx_mbuf_alloc);
6251
6252	BNX_PRINTF(sc,
6253	    "         0x%08X - (sc->used_tx_bd) used tx_bd's\n",
6254	    sc->used_tx_bd);
6255
6256	BNX_PRINTF(sc,
6257	    "         0x%08X/%08X - (sc->tx_hi_watermark) tx hi watermark\n",
6258	    sc->tx_hi_watermark, sc->max_tx_bd);
6259
6260	BNX_PRINTF(sc, "         0x%08X - (sc->rx_prod) rx producer index\n",
6261	    sc->rx_prod);
6262
6263	BNX_PRINTF(sc, "         0x%08X - (sc->rx_cons) rx consumer index\n",
6264	    sc->rx_cons);
6265
6266	BNX_PRINTF(sc,
6267	    "         0x%08X - (sc->rx_prod_bseq) rx producer bseq index\n",
6268	    sc->rx_prod_bseq);
6269
6270	BNX_PRINTF(sc,
6271	    "         0x%08X - (sc->rx_mbuf_alloc) rx mbufs allocated\n",
6272	    sc->rx_mbuf_alloc);
6273
6274	BNX_PRINTF(sc, "         0x%08X - (sc->free_rx_bd) free rx_bd's\n",
6275	    sc->free_rx_bd);
6276
6277	BNX_PRINTF(sc,
6278	    "0x%08X/%08X - (sc->rx_low_watermark) rx low watermark\n",
6279	    sc->rx_low_watermark, sc->max_rx_bd);
6280
6281	BNX_PRINTF(sc,
6282	    "         0x%08X - (sc->mbuf_alloc_failed) "
6283	    "mbuf alloc failures\n",
6284	    sc->mbuf_alloc_failed);
6285
6286	BNX_PRINTF(sc,
6287	    "         0x%0X - (sc->mbuf_sim_allocated_failed) "
6288	    "simulated mbuf alloc failures\n",
6289	    sc->mbuf_sim_alloc_failed);
6290
6291	BNX_PRINTF(sc, "-------------------------------------------"
6292	    "-----------------------------\n");
6293}
6294
6295void
6296bnx_dump_hw_state(struct bnx_softc *sc)
6297{
6298	u_int32_t		val1;
6299	int			i;
6300
6301	BNX_PRINTF(sc,
6302	    "----------------------------"
6303	    " Hardware State "
6304	    "----------------------------\n");
6305
6306	BNX_PRINTF(sc, "0x%08X : bootcode version\n", sc->bnx_fw_ver);
6307
6308	val1 = REG_RD(sc, BNX_MISC_ENABLE_STATUS_BITS);
6309	BNX_PRINTF(sc, "0x%08X : (0x%04X) misc_enable_status_bits\n",
6310	    val1, BNX_MISC_ENABLE_STATUS_BITS);
6311
6312	val1 = REG_RD(sc, BNX_DMA_STATUS);
6313	BNX_PRINTF(sc, "0x%08X : (0x%04X) dma_status\n", val1, BNX_DMA_STATUS);
6314
6315	val1 = REG_RD(sc, BNX_CTX_STATUS);
6316	BNX_PRINTF(sc, "0x%08X : (0x%04X) ctx_status\n", val1, BNX_CTX_STATUS);
6317
6318	val1 = REG_RD(sc, BNX_EMAC_STATUS);
6319	BNX_PRINTF(sc, "0x%08X : (0x%04X) emac_status\n", val1,
6320	    BNX_EMAC_STATUS);
6321
6322	val1 = REG_RD(sc, BNX_RPM_STATUS);
6323	BNX_PRINTF(sc, "0x%08X : (0x%04X) rpm_status\n", val1, BNX_RPM_STATUS);
6324
6325	val1 = REG_RD(sc, BNX_TBDR_STATUS);
6326	BNX_PRINTF(sc, "0x%08X : (0x%04X) tbdr_status\n", val1,
6327	    BNX_TBDR_STATUS);
6328
6329	val1 = REG_RD(sc, BNX_TDMA_STATUS);
6330	BNX_PRINTF(sc, "0x%08X : (0x%04X) tdma_status\n", val1,
6331	    BNX_TDMA_STATUS);
6332
6333	val1 = REG_RD(sc, BNX_HC_STATUS);
6334	BNX_PRINTF(sc, "0x%08X : (0x%04X) hc_status\n", val1, BNX_HC_STATUS);
6335
6336	BNX_PRINTF(sc,
6337	    "----------------------------"
6338	    "----------------"
6339	    "----------------------------\n");
6340
6341	BNX_PRINTF(sc,
6342	    "----------------------------"
6343	    " Register  Dump "
6344	    "----------------------------\n");
6345
6346	for (i = 0x400; i < 0x8000; i += 0x10)
6347		BNX_PRINTF(sc, "0x%04X: 0x%08X 0x%08X 0x%08X 0x%08X\n",
6348		    i, REG_RD(sc, i), REG_RD(sc, i + 0x4),
6349		    REG_RD(sc, i + 0x8), REG_RD(sc, i + 0xC));
6350
6351	BNX_PRINTF(sc,
6352	    "----------------------------"
6353	    "----------------"
6354	    "----------------------------\n");
6355}
6356
6357void
6358bnx_breakpoint(struct bnx_softc *sc)
6359{
6360	/* Unreachable code to shut the compiler up about unused functions. */
6361	if (0) {
6362   		bnx_dump_txbd(sc, 0, NULL);
6363		bnx_dump_rxbd(sc, 0, NULL);
6364		bnx_dump_tx_mbuf_chain(sc, 0, USABLE_TX_BD);
6365		bnx_dump_rx_mbuf_chain(sc, 0, sc->max_rx_bd);
6366		bnx_dump_l2fhdr(sc, 0, NULL);
6367		bnx_dump_tx_chain(sc, 0, USABLE_TX_BD);
6368		bnx_dump_rx_chain(sc, 0, sc->max_rx_bd);
6369		bnx_dump_status_block(sc);
6370		bnx_dump_stats_block(sc);
6371		bnx_dump_driver_state(sc);
6372		bnx_dump_hw_state(sc);
6373	}
6374
6375	bnx_dump_driver_state(sc);
6376	/* Print the important status block fields. */
6377	bnx_dump_status_block(sc);
6378
6379#if 0
6380	/* Call the debugger. */
6381	breakpoint();
6382#endif
6383
6384	return;
6385}
6386#endif
6387