if_bnx.c revision 1.93
1/*	$OpenBSD: if_bnx.c,v 1.93 2011/04/13 07:28:35 dlg Exp $	*/
2
3/*-
4 * Copyright (c) 2006 Broadcom Corporation
5 *	David Christensen <davidch@broadcom.com>.  All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 *    notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 *    notice, this list of conditions and the following disclaimer in the
15 *    documentation and/or other materials provided with the distribution.
16 * 3. Neither the name of Broadcom Corporation nor the name of its contributors
17 *    may be used to endorse or promote products derived from this software
18 *    without specific prior written consent.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS'
21 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
30 * THE POSSIBILITY OF SUCH DAMAGE.
31 */
32
33#if 0
34#include <sys/cdefs.h>
35__FBSDID("$FreeBSD: src/sys/dev/bce/if_bce.c,v 1.3 2006/04/13 14:12:26 ru Exp $");
36#endif
37
38/*
39 * The following controllers are supported by this driver:
40 *   BCM5706C A2, A3
41 *   BCM5706S A2, A3
42 *   BCM5708C B1, B2
43 *   BCM5708S B1, B2
44 *   BCM5709C A1, C0
45 *   BCM5709S A1, C0
46 *   BCM5716  C0
47 *
48 * The following controllers are not supported by this driver:
49 *   BCM5706C A0, A1
50 *   BCM5706S A0, A1
51 *   BCM5708C A0, B0
52 *   BCM5708S A0, B0
53 *   BCM5709C A0  B0, B1, B2 (pre-production)
54 *   BCM5709S A0, B0, B1, B2 (pre-production)
55 */
56
57#include <dev/pci/if_bnxreg.h>
58
59struct bnx_firmware {
60	char *filename;
61	struct bnx_firmware_header *fw;
62
63	u_int32_t *bnx_COM_FwText;
64	u_int32_t *bnx_COM_FwData;
65	u_int32_t *bnx_COM_FwRodata;
66	u_int32_t *bnx_COM_FwBss;
67	u_int32_t *bnx_COM_FwSbss;
68
69	u_int32_t *bnx_RXP_FwText;
70	u_int32_t *bnx_RXP_FwData;
71	u_int32_t *bnx_RXP_FwRodata;
72	u_int32_t *bnx_RXP_FwBss;
73	u_int32_t *bnx_RXP_FwSbss;
74
75	u_int32_t *bnx_TPAT_FwText;
76	u_int32_t *bnx_TPAT_FwData;
77	u_int32_t *bnx_TPAT_FwRodata;
78	u_int32_t *bnx_TPAT_FwBss;
79	u_int32_t *bnx_TPAT_FwSbss;
80
81	u_int32_t *bnx_TXP_FwText;
82	u_int32_t *bnx_TXP_FwData;
83	u_int32_t *bnx_TXP_FwRodata;
84	u_int32_t *bnx_TXP_FwBss;
85	u_int32_t *bnx_TXP_FwSbss;
86};
87
88struct bnx_firmware bnx_firmwares[] = {
89	{ "bnx-b06",		NULL },
90	{ "bnx-b09",		NULL }
91};
92#define	BNX_FW_B06	0
93#define	BNX_FW_B09	1
94
95struct bnx_rv2p {
96	char *filename;
97	struct bnx_rv2p_header *fw;
98
99	u_int32_t *bnx_rv2p_proc1;
100	u_int32_t *bnx_rv2p_proc2;
101};
102
103struct bnx_rv2p bnx_rv2ps[] = {
104	{ "bnx-rv2p",		NULL },
105	{ "bnx-xi-rv2p",	NULL },
106	{ "bnx-xi90-rv2p",	NULL }
107};
108#define BNX_RV2P	0
109#define BNX_XI_RV2P	1
110#define BNX_XI90_RV2P	2
111
112void	nswaph(u_int32_t *p, int wcount);
113
114/****************************************************************************/
115/* BNX Driver Version                                                       */
116/****************************************************************************/
117
118#define BNX_DRIVER_VERSION	"v0.9.6"
119
120/****************************************************************************/
121/* BNX Debug Options                                                        */
122/****************************************************************************/
123#ifdef BNX_DEBUG
124	u_int32_t bnx_debug = BNX_WARN;
125
126	/*          0 = Never              */
127	/*          1 = 1 in 2,147,483,648 */
128	/*        256 = 1 in     8,388,608 */
129	/*       2048 = 1 in     1,048,576 */
130	/*      65536 = 1 in        32,768 */
131	/*    1048576 = 1 in         2,048 */
132	/*  268435456 =	1 in             8 */
133	/*  536870912 = 1 in             4 */
134	/* 1073741824 = 1 in             2 */
135
136	/* Controls how often the l2_fhdr frame error check will fail. */
137	int bnx_debug_l2fhdr_status_check = 0;
138
139	/* Controls how often the unexpected attention check will fail. */
140	int bnx_debug_unexpected_attention = 0;
141
142	/* Controls how often to simulate an mbuf allocation failure. */
143	int bnx_debug_mbuf_allocation_failure = 0;
144
145	/* Controls how often to simulate a DMA mapping failure. */
146	int bnx_debug_dma_map_addr_failure = 0;
147
148	/* Controls how often to simulate a bootcode failure. */
149	int bnx_debug_bootcode_running_failure = 0;
150#endif
151
152/****************************************************************************/
153/* PCI Device ID Table                                                      */
154/*                                                                          */
155/* Used by bnx_probe() to identify the devices supported by this driver.    */
156/****************************************************************************/
157const struct pci_matchid bnx_devices[] = {
158	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5706 },
159	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5706S },
160	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5708 },
161	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5708S },
162	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5709 },
163	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5709S },
164	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5716 },
165	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5716S }
166};
167
168/****************************************************************************/
169/* Supported Flash NVRAM device data.                                       */
170/****************************************************************************/
171static struct flash_spec flash_table[] =
172{
173#define BUFFERED_FLAGS		(BNX_NV_BUFFERED | BNX_NV_TRANSLATE)
174#define NONBUFFERED_FLAGS	(BNX_NV_WREN)
175
176	/* Slow EEPROM */
177	{0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
178	 BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
179	 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
180	 "EEPROM - slow"},
181	/* Expansion entry 0001 */
182	{0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
183	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
184	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
185	 "Entry 0001"},
186	/* Saifun SA25F010 (non-buffered flash) */
187	/* strap, cfg1, & write1 need updates */
188	{0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
189	 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
190	 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
191	 "Non-buffered flash (128kB)"},
192	/* Saifun SA25F020 (non-buffered flash) */
193	/* strap, cfg1, & write1 need updates */
194	{0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
195	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
196	 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
197	 "Non-buffered flash (256kB)"},
198	/* Expansion entry 0100 */
199	{0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
200	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
201	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
202	 "Entry 0100"},
203	/* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
204	{0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
205	 NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
206	 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
207	 "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
208	/* Entry 0110: ST M45PE20 (non-buffered flash)*/
209	{0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
210	 NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
211	 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
212	 "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
213	/* Saifun SA25F005 (non-buffered flash) */
214	/* strap, cfg1, & write1 need updates */
215	{0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
216	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
217	 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
218	 "Non-buffered flash (64kB)"},
219	/* Fast EEPROM */
220	{0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
221	 BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
222	 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
223	 "EEPROM - fast"},
224	/* Expansion entry 1001 */
225	{0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
226	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
227	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
228	 "Entry 1001"},
229	/* Expansion entry 1010 */
230	{0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
231	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
232	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
233	 "Entry 1010"},
234	/* ATMEL AT45DB011B (buffered flash) */
235	{0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
236	 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
237	 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
238	 "Buffered flash (128kB)"},
239	/* Expansion entry 1100 */
240	{0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
241	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
242	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
243	 "Entry 1100"},
244	/* Expansion entry 1101 */
245	{0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
246	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
247	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
248	 "Entry 1101"},
249	/* Ateml Expansion entry 1110 */
250	{0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
251	 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
252	 BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
253	 "Entry 1110 (Atmel)"},
254	/* ATMEL AT45DB021B (buffered flash) */
255	{0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
256	 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
257	 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
258	 "Buffered flash (256kB)"},
259};
260
261/*
262 * The BCM5709 controllers transparently handle the
263 * differences between Atmel 264 byte pages and all
264 * flash devices which use 256 byte pages, so no
265 * logical-to-physical mapping is required in the
266 * driver.
267 */
268static struct flash_spec flash_5709 = {
269	.flags		= BNX_NV_BUFFERED,
270	.page_bits	= BCM5709_FLASH_PAGE_BITS,
271	.page_size	= BCM5709_FLASH_PAGE_SIZE,
272	.addr_mask	= BCM5709_FLASH_BYTE_ADDR_MASK,
273	.total_size	= BUFFERED_FLASH_TOTAL_SIZE * 2,
274	.name		= "5709 buffered flash (256kB)",
275};
276
277/****************************************************************************/
278/* OpenBSD device entry points.                                             */
279/****************************************************************************/
280int	bnx_probe(struct device *, void *, void *);
281void	bnx_attach(struct device *, struct device *, void *);
282void	bnx_attachhook(void *);
283int	bnx_read_firmware(struct bnx_softc *sc, int);
284int	bnx_read_rv2p(struct bnx_softc *sc, int);
285#if 0
286void	bnx_detach(void *);
287#endif
288
289/****************************************************************************/
290/* BNX Debug Data Structure Dump Routines                                   */
291/****************************************************************************/
292#ifdef BNX_DEBUG
293void	bnx_dump_mbuf(struct bnx_softc *, struct mbuf *);
294void	bnx_dump_tx_mbuf_chain(struct bnx_softc *, int, int);
295void	bnx_dump_rx_mbuf_chain(struct bnx_softc *, int, int);
296void	bnx_dump_txbd(struct bnx_softc *, int, struct tx_bd *);
297void	bnx_dump_rxbd(struct bnx_softc *, int, struct rx_bd *);
298void	bnx_dump_l2fhdr(struct bnx_softc *, int, struct l2_fhdr *);
299void	bnx_dump_tx_chain(struct bnx_softc *, int, int);
300void	bnx_dump_rx_chain(struct bnx_softc *, int, int);
301void	bnx_dump_status_block(struct bnx_softc *);
302void	bnx_dump_stats_block(struct bnx_softc *);
303void	bnx_dump_driver_state(struct bnx_softc *);
304void	bnx_dump_hw_state(struct bnx_softc *);
305void	bnx_breakpoint(struct bnx_softc *);
306#endif
307
308/****************************************************************************/
309/* BNX Register/Memory Access Routines                                      */
310/****************************************************************************/
311u_int32_t	bnx_reg_rd_ind(struct bnx_softc *, u_int32_t);
312void	bnx_reg_wr_ind(struct bnx_softc *, u_int32_t, u_int32_t);
313void	bnx_ctx_wr(struct bnx_softc *, u_int32_t, u_int32_t, u_int32_t);
314int	bnx_miibus_read_reg(struct device *, int, int);
315void	bnx_miibus_write_reg(struct device *, int, int, int);
316void	bnx_miibus_statchg(struct device *);
317
318/****************************************************************************/
319/* BNX NVRAM Access Routines                                                */
320/****************************************************************************/
321int	bnx_acquire_nvram_lock(struct bnx_softc *);
322int	bnx_release_nvram_lock(struct bnx_softc *);
323void	bnx_enable_nvram_access(struct bnx_softc *);
324void	bnx_disable_nvram_access(struct bnx_softc *);
325int	bnx_nvram_read_dword(struct bnx_softc *, u_int32_t, u_int8_t *,
326	    u_int32_t);
327int	bnx_init_nvram(struct bnx_softc *);
328int	bnx_nvram_read(struct bnx_softc *, u_int32_t, u_int8_t *, int);
329int	bnx_nvram_test(struct bnx_softc *);
330#ifdef BNX_NVRAM_WRITE_SUPPORT
331int	bnx_enable_nvram_write(struct bnx_softc *);
332void	bnx_disable_nvram_write(struct bnx_softc *);
333int	bnx_nvram_erase_page(struct bnx_softc *, u_int32_t);
334int	bnx_nvram_write_dword(struct bnx_softc *, u_int32_t, u_int8_t *,
335	    u_int32_t);
336int	bnx_nvram_write(struct bnx_softc *, u_int32_t, u_int8_t *, int);
337#endif
338
339/****************************************************************************/
340/*                                                                          */
341/****************************************************************************/
342void	bnx_get_media(struct bnx_softc *);
343void	bnx_init_media(struct bnx_softc *);
344int	bnx_dma_alloc(struct bnx_softc *);
345void	bnx_dma_free(struct bnx_softc *);
346void	bnx_release_resources(struct bnx_softc *);
347
348/****************************************************************************/
349/* BNX Firmware Synchronization and Load                                    */
350/****************************************************************************/
351int	bnx_fw_sync(struct bnx_softc *, u_int32_t);
352void	bnx_load_rv2p_fw(struct bnx_softc *, u_int32_t *, u_int32_t,
353	    u_int32_t);
354void	bnx_load_cpu_fw(struct bnx_softc *, struct cpu_reg *,
355	    struct fw_info *);
356void	bnx_init_cpus(struct bnx_softc *);
357
358void	bnx_stop(struct bnx_softc *);
359int	bnx_reset(struct bnx_softc *, u_int32_t);
360int	bnx_chipinit(struct bnx_softc *);
361int	bnx_blockinit(struct bnx_softc *);
362int	bnx_get_buf(struct bnx_softc *, u_int16_t *, u_int16_t *, u_int32_t *);
363
364int	bnx_init_tx_chain(struct bnx_softc *);
365void	bnx_init_tx_context(struct bnx_softc *);
366void	bnx_fill_rx_chain(struct bnx_softc *);
367void	bnx_init_rx_context(struct bnx_softc *);
368int	bnx_init_rx_chain(struct bnx_softc *);
369void	bnx_free_rx_chain(struct bnx_softc *);
370void	bnx_free_tx_chain(struct bnx_softc *);
371
372int	bnx_tx_encap(struct bnx_softc *, struct mbuf *);
373void	bnx_start(struct ifnet *);
374int	bnx_ioctl(struct ifnet *, u_long, caddr_t);
375void	bnx_watchdog(struct ifnet *);
376int	bnx_ifmedia_upd(struct ifnet *);
377void	bnx_ifmedia_sts(struct ifnet *, struct ifmediareq *);
378void	bnx_init(void *);
379void	bnx_mgmt_init(struct bnx_softc *sc);
380
381void	bnx_init_context(struct bnx_softc *);
382void	bnx_get_mac_addr(struct bnx_softc *);
383void	bnx_set_mac_addr(struct bnx_softc *);
384void	bnx_phy_intr(struct bnx_softc *);
385void	bnx_rx_intr(struct bnx_softc *);
386void	bnx_tx_intr(struct bnx_softc *);
387void	bnx_disable_intr(struct bnx_softc *);
388void	bnx_enable_intr(struct bnx_softc *);
389
390int	bnx_intr(void *);
391void	bnx_iff(struct bnx_softc *);
392void	bnx_stats_update(struct bnx_softc *);
393void	bnx_tick(void *);
394
395struct rwlock bnx_tx_pool_lk = RWLOCK_INITIALIZER("bnxplinit");
396struct pool *bnx_tx_pool = NULL;
397void	bnx_alloc_pkts(void *, void *);
398
399/****************************************************************************/
400/* OpenBSD device dispatch table.                                           */
401/****************************************************************************/
402struct cfattach bnx_ca = {
403	sizeof(struct bnx_softc), bnx_probe, bnx_attach
404};
405
406struct cfdriver bnx_cd = {
407	NULL, "bnx", DV_IFNET
408};
409
410/****************************************************************************/
411/* Device probe function.                                                   */
412/*                                                                          */
413/* Compares the device to the driver's list of supported devices and        */
414/* reports back to the OS whether this is the right driver for the device.  */
415/*                                                                          */
416/* Returns:                                                                 */
417/*   BUS_PROBE_DEFAULT on success, positive value on failure.               */
418/****************************************************************************/
419int
420bnx_probe(struct device *parent, void *match, void *aux)
421{
422	return (pci_matchbyid((struct pci_attach_args *)aux, bnx_devices,
423	    nitems(bnx_devices)));
424}
425
426void
427nswaph(u_int32_t *p, int wcount)
428{
429	for (; wcount; wcount -=4) {
430		*p = ntohl(*p);
431		p++;
432	}
433}
434
435int
436bnx_read_firmware(struct bnx_softc *sc, int idx)
437{
438	struct bnx_firmware *bfw = &bnx_firmwares[idx];
439	struct bnx_firmware_header *hdr = bfw->fw;
440	u_char *p, *q;
441	size_t size;
442	int error;
443
444	if (hdr != NULL)
445		return (0);
446
447	if ((error = loadfirmware(bfw->filename, &p, &size)) != 0)
448		return (error);
449
450	if (size < sizeof(struct bnx_firmware_header)) {
451		free(p, M_DEVBUF);
452		return (EINVAL);
453	}
454
455	hdr = (struct bnx_firmware_header *)p;
456
457	hdr->bnx_COM_FwReleaseMajor = ntohl(hdr->bnx_COM_FwReleaseMajor);
458	hdr->bnx_COM_FwReleaseMinor = ntohl(hdr->bnx_COM_FwReleaseMinor);
459	hdr->bnx_COM_FwReleaseFix = ntohl(hdr->bnx_COM_FwReleaseFix);
460	hdr->bnx_COM_FwStartAddr = ntohl(hdr->bnx_COM_FwStartAddr);
461	hdr->bnx_COM_FwTextAddr = ntohl(hdr->bnx_COM_FwTextAddr);
462	hdr->bnx_COM_FwTextLen = ntohl(hdr->bnx_COM_FwTextLen);
463	hdr->bnx_COM_FwDataAddr = ntohl(hdr->bnx_COM_FwDataAddr);
464	hdr->bnx_COM_FwDataLen = ntohl(hdr->bnx_COM_FwDataLen);
465	hdr->bnx_COM_FwRodataAddr = ntohl(hdr->bnx_COM_FwRodataAddr);
466	hdr->bnx_COM_FwRodataLen = ntohl(hdr->bnx_COM_FwRodataLen);
467	hdr->bnx_COM_FwBssAddr = ntohl(hdr->bnx_COM_FwBssAddr);
468	hdr->bnx_COM_FwBssLen = ntohl(hdr->bnx_COM_FwBssLen);
469	hdr->bnx_COM_FwSbssAddr = ntohl(hdr->bnx_COM_FwSbssAddr);
470	hdr->bnx_COM_FwSbssLen = ntohl(hdr->bnx_COM_FwSbssLen);
471
472	hdr->bnx_RXP_FwReleaseMajor = ntohl(hdr->bnx_RXP_FwReleaseMajor);
473	hdr->bnx_RXP_FwReleaseMinor = ntohl(hdr->bnx_RXP_FwReleaseMinor);
474	hdr->bnx_RXP_FwReleaseFix = ntohl(hdr->bnx_RXP_FwReleaseFix);
475	hdr->bnx_RXP_FwStartAddr = ntohl(hdr->bnx_RXP_FwStartAddr);
476	hdr->bnx_RXP_FwTextAddr = ntohl(hdr->bnx_RXP_FwTextAddr);
477	hdr->bnx_RXP_FwTextLen = ntohl(hdr->bnx_RXP_FwTextLen);
478	hdr->bnx_RXP_FwDataAddr = ntohl(hdr->bnx_RXP_FwDataAddr);
479	hdr->bnx_RXP_FwDataLen = ntohl(hdr->bnx_RXP_FwDataLen);
480	hdr->bnx_RXP_FwRodataAddr = ntohl(hdr->bnx_RXP_FwRodataAddr);
481	hdr->bnx_RXP_FwRodataLen = ntohl(hdr->bnx_RXP_FwRodataLen);
482	hdr->bnx_RXP_FwBssAddr = ntohl(hdr->bnx_RXP_FwBssAddr);
483	hdr->bnx_RXP_FwBssLen = ntohl(hdr->bnx_RXP_FwBssLen);
484	hdr->bnx_RXP_FwSbssAddr = ntohl(hdr->bnx_RXP_FwSbssAddr);
485	hdr->bnx_RXP_FwSbssLen = ntohl(hdr->bnx_RXP_FwSbssLen);
486
487	hdr->bnx_TPAT_FwReleaseMajor = ntohl(hdr->bnx_TPAT_FwReleaseMajor);
488	hdr->bnx_TPAT_FwReleaseMinor = ntohl(hdr->bnx_TPAT_FwReleaseMinor);
489	hdr->bnx_TPAT_FwReleaseFix = ntohl(hdr->bnx_TPAT_FwReleaseFix);
490	hdr->bnx_TPAT_FwStartAddr = ntohl(hdr->bnx_TPAT_FwStartAddr);
491	hdr->bnx_TPAT_FwTextAddr = ntohl(hdr->bnx_TPAT_FwTextAddr);
492	hdr->bnx_TPAT_FwTextLen = ntohl(hdr->bnx_TPAT_FwTextLen);
493	hdr->bnx_TPAT_FwDataAddr = ntohl(hdr->bnx_TPAT_FwDataAddr);
494	hdr->bnx_TPAT_FwDataLen = ntohl(hdr->bnx_TPAT_FwDataLen);
495	hdr->bnx_TPAT_FwRodataAddr = ntohl(hdr->bnx_TPAT_FwRodataAddr);
496	hdr->bnx_TPAT_FwRodataLen = ntohl(hdr->bnx_TPAT_FwRodataLen);
497	hdr->bnx_TPAT_FwBssAddr = ntohl(hdr->bnx_TPAT_FwBssAddr);
498	hdr->bnx_TPAT_FwBssLen = ntohl(hdr->bnx_TPAT_FwBssLen);
499	hdr->bnx_TPAT_FwSbssAddr = ntohl(hdr->bnx_TPAT_FwSbssAddr);
500	hdr->bnx_TPAT_FwSbssLen = ntohl(hdr->bnx_TPAT_FwSbssLen);
501
502	hdr->bnx_TXP_FwReleaseMajor = ntohl(hdr->bnx_TXP_FwReleaseMajor);
503	hdr->bnx_TXP_FwReleaseMinor = ntohl(hdr->bnx_TXP_FwReleaseMinor);
504	hdr->bnx_TXP_FwReleaseFix = ntohl(hdr->bnx_TXP_FwReleaseFix);
505	hdr->bnx_TXP_FwStartAddr = ntohl(hdr->bnx_TXP_FwStartAddr);
506	hdr->bnx_TXP_FwTextAddr = ntohl(hdr->bnx_TXP_FwTextAddr);
507	hdr->bnx_TXP_FwTextLen = ntohl(hdr->bnx_TXP_FwTextLen);
508	hdr->bnx_TXP_FwDataAddr = ntohl(hdr->bnx_TXP_FwDataAddr);
509	hdr->bnx_TXP_FwDataLen = ntohl(hdr->bnx_TXP_FwDataLen);
510	hdr->bnx_TXP_FwRodataAddr = ntohl(hdr->bnx_TXP_FwRodataAddr);
511	hdr->bnx_TXP_FwRodataLen = ntohl(hdr->bnx_TXP_FwRodataLen);
512	hdr->bnx_TXP_FwBssAddr = ntohl(hdr->bnx_TXP_FwBssAddr);
513	hdr->bnx_TXP_FwBssLen = ntohl(hdr->bnx_TXP_FwBssLen);
514	hdr->bnx_TXP_FwSbssAddr = ntohl(hdr->bnx_TXP_FwSbssAddr);
515	hdr->bnx_TXP_FwSbssLen = ntohl(hdr->bnx_TXP_FwSbssLen);
516
517	q = p + sizeof(*hdr);
518
519	bfw->bnx_COM_FwText = (u_int32_t *)q;
520	q += hdr->bnx_COM_FwTextLen;
521	nswaph(bfw->bnx_COM_FwText, hdr->bnx_COM_FwTextLen);
522	bfw->bnx_COM_FwData = (u_int32_t *)q;
523	q += hdr->bnx_COM_FwDataLen;
524	nswaph(bfw->bnx_COM_FwData, hdr->bnx_COM_FwDataLen);
525	bfw->bnx_COM_FwRodata = (u_int32_t *)q;
526	q += hdr->bnx_COM_FwRodataLen;
527	nswaph(bfw->bnx_COM_FwRodata, hdr->bnx_COM_FwRodataLen);
528	bfw->bnx_COM_FwBss = (u_int32_t *)q;
529	q += hdr->bnx_COM_FwBssLen;
530	nswaph(bfw->bnx_COM_FwBss, hdr->bnx_COM_FwBssLen);
531	bfw->bnx_COM_FwSbss = (u_int32_t *)q;
532	q += hdr->bnx_COM_FwSbssLen;
533	nswaph(bfw->bnx_COM_FwSbss, hdr->bnx_COM_FwSbssLen);
534
535	bfw->bnx_RXP_FwText = (u_int32_t *)q;
536	q += hdr->bnx_RXP_FwTextLen;
537	nswaph(bfw->bnx_RXP_FwText, hdr->bnx_RXP_FwTextLen);
538	bfw->bnx_RXP_FwData = (u_int32_t *)q;
539	q += hdr->bnx_RXP_FwDataLen;
540	nswaph(bfw->bnx_RXP_FwData, hdr->bnx_RXP_FwDataLen);
541	bfw->bnx_RXP_FwRodata = (u_int32_t *)q;
542	q += hdr->bnx_RXP_FwRodataLen;
543	nswaph(bfw->bnx_RXP_FwRodata, hdr->bnx_RXP_FwRodataLen);
544	bfw->bnx_RXP_FwBss = (u_int32_t *)q;
545	q += hdr->bnx_RXP_FwBssLen;
546	nswaph(bfw->bnx_RXP_FwBss, hdr->bnx_RXP_FwBssLen);
547	bfw->bnx_RXP_FwSbss = (u_int32_t *)q;
548	q += hdr->bnx_RXP_FwSbssLen;
549	nswaph(bfw->bnx_RXP_FwSbss, hdr->bnx_RXP_FwSbssLen);
550
551	bfw->bnx_TPAT_FwText = (u_int32_t *)q;
552	q += hdr->bnx_TPAT_FwTextLen;
553	nswaph(bfw->bnx_TPAT_FwText, hdr->bnx_TPAT_FwTextLen);
554	bfw->bnx_TPAT_FwData = (u_int32_t *)q;
555	q += hdr->bnx_TPAT_FwDataLen;
556	nswaph(bfw->bnx_TPAT_FwData, hdr->bnx_TPAT_FwDataLen);
557	bfw->bnx_TPAT_FwRodata = (u_int32_t *)q;
558	q += hdr->bnx_TPAT_FwRodataLen;
559	nswaph(bfw->bnx_TPAT_FwRodata, hdr->bnx_TPAT_FwRodataLen);
560	bfw->bnx_TPAT_FwBss = (u_int32_t *)q;
561	q += hdr->bnx_TPAT_FwBssLen;
562	nswaph(bfw->bnx_TPAT_FwBss, hdr->bnx_TPAT_FwBssLen);
563	bfw->bnx_TPAT_FwSbss = (u_int32_t *)q;
564	q += hdr->bnx_TPAT_FwSbssLen;
565	nswaph(bfw->bnx_TPAT_FwSbss, hdr->bnx_TPAT_FwSbssLen);
566
567	bfw->bnx_TXP_FwText = (u_int32_t *)q;
568	q += hdr->bnx_TXP_FwTextLen;
569	nswaph(bfw->bnx_TXP_FwText, hdr->bnx_TXP_FwTextLen);
570	bfw->bnx_TXP_FwData = (u_int32_t *)q;
571	q += hdr->bnx_TXP_FwDataLen;
572	nswaph(bfw->bnx_TXP_FwData, hdr->bnx_TXP_FwDataLen);
573	bfw->bnx_TXP_FwRodata = (u_int32_t *)q;
574	q += hdr->bnx_TXP_FwRodataLen;
575	nswaph(bfw->bnx_TXP_FwRodata, hdr->bnx_TXP_FwRodataLen);
576	bfw->bnx_TXP_FwBss = (u_int32_t *)q;
577	q += hdr->bnx_TXP_FwBssLen;
578	nswaph(bfw->bnx_TXP_FwBss, hdr->bnx_TXP_FwBssLen);
579	bfw->bnx_TXP_FwSbss = (u_int32_t *)q;
580	q += hdr->bnx_TXP_FwSbssLen;
581	nswaph(bfw->bnx_TXP_FwSbss, hdr->bnx_TXP_FwSbssLen);
582
583	if (q - p != size) {
584		free(p, M_DEVBUF);
585		hdr = NULL;
586		return EINVAL;
587	}
588
589	bfw->fw = hdr;
590
591	return (0);
592}
593
594int
595bnx_read_rv2p(struct bnx_softc *sc, int idx)
596{
597	struct bnx_rv2p *rv2p = &bnx_rv2ps[idx];
598	struct bnx_rv2p_header *hdr = rv2p->fw;
599	u_char *p, *q;
600	size_t size;
601	int error;
602
603	if (hdr != NULL)
604		return (0);
605
606	if ((error = loadfirmware(rv2p->filename, &p, &size)) != 0)
607		return (error);
608
609	if (size < sizeof(struct bnx_rv2p_header)) {
610		free(p, M_DEVBUF);
611		return (EINVAL);
612	}
613
614	hdr = (struct bnx_rv2p_header *)p;
615
616	hdr->bnx_rv2p_proc1len = ntohl(hdr->bnx_rv2p_proc1len);
617	hdr->bnx_rv2p_proc2len = ntohl(hdr->bnx_rv2p_proc2len);
618
619	q = p + sizeof(*hdr);
620
621	rv2p->bnx_rv2p_proc1 = (u_int32_t *)q;
622	q += hdr->bnx_rv2p_proc1len;
623	nswaph(rv2p->bnx_rv2p_proc1, hdr->bnx_rv2p_proc1len);
624	rv2p->bnx_rv2p_proc2 = (u_int32_t *)q;
625	q += hdr->bnx_rv2p_proc2len;
626	nswaph(rv2p->bnx_rv2p_proc2, hdr->bnx_rv2p_proc2len);
627
628	if (q - p != size) {
629		free(p, M_DEVBUF);
630		return EINVAL;
631	}
632
633	rv2p->fw = hdr;
634
635	return (0);
636}
637
638
639/****************************************************************************/
640/* Device attach function.                                                  */
641/*                                                                          */
642/* Allocates device resources, performs secondary chip identification,      */
643/* resets and initializes the hardware, and initializes driver instance     */
644/* variables.                                                               */
645/*                                                                          */
646/* Returns:                                                                 */
647/*   0 on success, positive value on failure.                               */
648/****************************************************************************/
649void
650bnx_attach(struct device *parent, struct device *self, void *aux)
651{
652	struct bnx_softc	*sc = (struct bnx_softc *)self;
653	struct pci_attach_args	*pa = aux;
654	pci_chipset_tag_t	pc = pa->pa_pc;
655	u_int32_t		val;
656	pcireg_t		memtype;
657	const char 		*intrstr = NULL;
658
659	sc->bnx_pa = *pa;
660
661	/*
662	 * Map control/status registers.
663	*/
664	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, BNX_PCI_BAR0);
665	if (pci_mapreg_map(pa, BNX_PCI_BAR0, memtype, 0, &sc->bnx_btag,
666	    &sc->bnx_bhandle, NULL, &sc->bnx_size, 0)) {
667		printf(": can't find mem space\n");
668		return;
669	}
670
671	if (pci_intr_map(pa, &sc->bnx_ih)) {
672		printf(": couldn't map interrupt\n");
673		goto bnx_attach_fail;
674	}
675	intrstr = pci_intr_string(pc, sc->bnx_ih);
676
677	/*
678	 * Configure byte swap and enable indirect register access.
679	 * Rely on CPU to do target byte swapping on big endian systems.
680	 * Access to registers outside of PCI configurtion space are not
681	 * valid until this is done.
682	 */
683	pci_conf_write(pa->pa_pc, pa->pa_tag, BNX_PCICFG_MISC_CONFIG,
684	    BNX_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
685	    BNX_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP);
686
687	/* Save ASIC revsion info. */
688	sc->bnx_chipid =  REG_RD(sc, BNX_MISC_ID);
689
690	/*
691	 * Find the base address for shared memory access.
692	 * Newer versions of bootcode use a signature and offset
693	 * while older versions use a fixed address.
694	 */
695	val = REG_RD_IND(sc, BNX_SHM_HDR_SIGNATURE);
696	if ((val & BNX_SHM_HDR_SIGNATURE_SIG_MASK) == BNX_SHM_HDR_SIGNATURE_SIG)
697		sc->bnx_shmem_base = REG_RD_IND(sc, BNX_SHM_HDR_ADDR_0 +
698		    (sc->bnx_pa.pa_function << 2));
699	else
700		sc->bnx_shmem_base = HOST_VIEW_SHMEM_BASE;
701
702	DBPRINT(sc, BNX_INFO, "bnx_shmem_base = 0x%08X\n", sc->bnx_shmem_base);
703
704	/* Set initial device and PHY flags */
705	sc->bnx_flags = 0;
706	sc->bnx_phy_flags = 0;
707
708	/* Get PCI bus information (speed and type). */
709	val = REG_RD(sc, BNX_PCICFG_MISC_STATUS);
710	if (val & BNX_PCICFG_MISC_STATUS_PCIX_DET) {
711		u_int32_t clkreg;
712
713		sc->bnx_flags |= BNX_PCIX_FLAG;
714
715		clkreg = REG_RD(sc, BNX_PCICFG_PCI_CLOCK_CONTROL_BITS);
716
717		clkreg &= BNX_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET;
718		switch (clkreg) {
719		case BNX_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ:
720			sc->bus_speed_mhz = 133;
721			break;
722
723		case BNX_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ:
724			sc->bus_speed_mhz = 100;
725			break;
726
727		case BNX_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ:
728		case BNX_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ:
729			sc->bus_speed_mhz = 66;
730			break;
731
732		case BNX_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ:
733		case BNX_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ:
734			sc->bus_speed_mhz = 50;
735			break;
736
737		case BNX_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW:
738		case BNX_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ:
739		case BNX_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ:
740			sc->bus_speed_mhz = 33;
741			break;
742		}
743	} else if (val & BNX_PCICFG_MISC_STATUS_M66EN)
744			sc->bus_speed_mhz = 66;
745		else
746			sc->bus_speed_mhz = 33;
747
748	if (val & BNX_PCICFG_MISC_STATUS_32BIT_DET)
749		sc->bnx_flags |= BNX_PCI_32BIT_FLAG;
750
751	printf(": %s\n", intrstr);
752
753	/* Hookup IRQ last. */
754	sc->bnx_intrhand = pci_intr_establish(pc, sc->bnx_ih, IPL_NET,
755	    bnx_intr, sc, sc->bnx_dev.dv_xname);
756	if (sc->bnx_intrhand == NULL) {
757		printf("%s: couldn't establish interrupt\n",
758		    sc->bnx_dev.dv_xname);
759		goto bnx_attach_fail;
760	}
761
762	mountroothook_establish(bnx_attachhook, sc);
763	return;
764
765bnx_attach_fail:
766	bnx_release_resources(sc);
767	DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
768}
769
770void
771bnx_attachhook(void *xsc)
772{
773	struct bnx_softc *sc = xsc;
774	struct pci_attach_args *pa = &sc->bnx_pa;
775	struct ifnet		*ifp;
776	int			error, mii_flags = 0;
777	int			fw = BNX_FW_B06;
778	int			rv2p = BNX_RV2P;
779
780	if (BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5709) {
781		fw = BNX_FW_B09;
782		if ((BNX_CHIP_REV(sc) == BNX_CHIP_REV_Ax))
783			rv2p = BNX_XI90_RV2P;
784		else
785			rv2p = BNX_XI_RV2P;
786	}
787
788	if ((error = bnx_read_firmware(sc, fw)) != 0) {
789		printf("%s: error %d, could not read firmware\n",
790		    sc->bnx_dev.dv_xname, error);
791		return;
792	}
793
794	if ((error = bnx_read_rv2p(sc, rv2p)) != 0) {
795		printf("%s: error %d, could not read rv2p\n",
796		    sc->bnx_dev.dv_xname, error);
797		return;
798	}
799
800	/* Reset the controller. */
801	if (bnx_reset(sc, BNX_DRV_MSG_CODE_RESET))
802		goto bnx_attach_fail;
803
804	/* Initialize the controller. */
805	if (bnx_chipinit(sc)) {
806		printf("%s: Controller initialization failed!\n",
807		    sc->bnx_dev.dv_xname);
808		goto bnx_attach_fail;
809	}
810
811	/* Perform NVRAM test. */
812	if (bnx_nvram_test(sc)) {
813		printf("%s: NVRAM test failed!\n",
814		    sc->bnx_dev.dv_xname);
815		goto bnx_attach_fail;
816	}
817
818	/* Fetch the permanent Ethernet MAC address. */
819	bnx_get_mac_addr(sc);
820
821	/*
822	 * Trip points control how many BDs
823	 * should be ready before generating an
824	 * interrupt while ticks control how long
825	 * a BD can sit in the chain before
826	 * generating an interrupt.  Set the default
827	 * values for the RX and TX rings.
828	 */
829
830#ifdef BNX_DEBUG
831	/* Force more frequent interrupts. */
832	sc->bnx_tx_quick_cons_trip_int = 1;
833	sc->bnx_tx_quick_cons_trip     = 1;
834	sc->bnx_tx_ticks_int           = 0;
835	sc->bnx_tx_ticks               = 0;
836
837	sc->bnx_rx_quick_cons_trip_int = 1;
838	sc->bnx_rx_quick_cons_trip     = 1;
839	sc->bnx_rx_ticks_int           = 0;
840	sc->bnx_rx_ticks               = 0;
841#else
842	sc->bnx_tx_quick_cons_trip_int = 20;
843	sc->bnx_tx_quick_cons_trip     = 20;
844	sc->bnx_tx_ticks_int           = 80;
845	sc->bnx_tx_ticks               = 80;
846
847	sc->bnx_rx_quick_cons_trip_int = 6;
848	sc->bnx_rx_quick_cons_trip     = 6;
849	sc->bnx_rx_ticks_int           = 18;
850	sc->bnx_rx_ticks               = 18;
851#endif
852
853	/* Update statistics once every second. */
854	sc->bnx_stats_ticks = 1000000 & 0xffff00;
855
856	/* Find the media type for the adapter. */
857	bnx_get_media(sc);
858
859	/*
860	 * Store config data needed by the PHY driver for
861	 * backplane applications
862	 */
863	sc->bnx_shared_hw_cfg = REG_RD_IND(sc, sc->bnx_shmem_base +
864		BNX_SHARED_HW_CFG_CONFIG);
865	sc->bnx_port_hw_cfg = REG_RD_IND(sc, sc->bnx_shmem_base +
866		BNX_PORT_HW_CFG_CONFIG);
867
868	/* Allocate DMA memory resources. */
869	sc->bnx_dmatag = pa->pa_dmat;
870	if (bnx_dma_alloc(sc)) {
871		printf("%s: DMA resource allocation failed!\n",
872		    sc->bnx_dev.dv_xname);
873		goto bnx_attach_fail;
874	}
875
876	/* Initialize the ifnet interface. */
877	ifp = &sc->arpcom.ac_if;
878	ifp->if_softc = sc;
879	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
880	ifp->if_ioctl = bnx_ioctl;
881	ifp->if_start = bnx_start;
882	ifp->if_watchdog = bnx_watchdog;
883	IFQ_SET_MAXLEN(&ifp->if_snd, USABLE_TX_BD - 1);
884	IFQ_SET_READY(&ifp->if_snd);
885	m_clsetwms(ifp, MCLBYTES, 2, USABLE_RX_BD);
886	bcopy(sc->eaddr, sc->arpcom.ac_enaddr, ETHER_ADDR_LEN);
887	bcopy(sc->bnx_dev.dv_xname, ifp->if_xname, IFNAMSIZ);
888
889	ifp->if_capabilities = IFCAP_VLAN_MTU;
890
891#ifdef BNX_CSUM
892	ifp->if_capabilities |= IFCAP_CSUM_TCPv4 | IFCAP_CSUM_UDPv4;
893#endif
894
895#if NVLAN > 0
896	ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING;
897#endif
898
899	sc->mbuf_alloc_size = BNX_MAX_MRU;
900
901	printf("%s: address %s\n", sc->bnx_dev.dv_xname,
902	    ether_sprintf(sc->arpcom.ac_enaddr));
903
904	sc->bnx_mii.mii_ifp = ifp;
905	sc->bnx_mii.mii_readreg = bnx_miibus_read_reg;
906	sc->bnx_mii.mii_writereg = bnx_miibus_write_reg;
907	sc->bnx_mii.mii_statchg = bnx_miibus_statchg;
908
909	/* Handle any special PHY initialization for SerDes PHYs. */
910	bnx_init_media(sc);
911
912	/* Look for our PHY. */
913	ifmedia_init(&sc->bnx_mii.mii_media, 0, bnx_ifmedia_upd,
914	    bnx_ifmedia_sts);
915	if (sc->bnx_phy_flags & BNX_PHY_SERDES_FLAG)
916		mii_flags |= MIIF_HAVEFIBER;
917	mii_attach(&sc->bnx_dev, &sc->bnx_mii, 0xffffffff,
918	    MII_PHY_ANY, MII_OFFSET_ANY, mii_flags);
919
920	if (LIST_FIRST(&sc->bnx_mii.mii_phys) == NULL) {
921		printf("%s: no PHY found!\n", sc->bnx_dev.dv_xname);
922		ifmedia_add(&sc->bnx_mii.mii_media,
923		    IFM_ETHER|IFM_MANUAL, 0, NULL);
924		ifmedia_set(&sc->bnx_mii.mii_media,
925		    IFM_ETHER|IFM_MANUAL);
926	} else {
927		ifmedia_set(&sc->bnx_mii.mii_media,
928		    IFM_ETHER|IFM_AUTO);
929	}
930
931	/* Attach to the Ethernet interface list. */
932	if_attach(ifp);
933	ether_ifattach(ifp);
934
935	timeout_set(&sc->bnx_timeout, bnx_tick, sc);
936
937	/* Print some important debugging info. */
938	DBRUN(BNX_INFO, bnx_dump_driver_state(sc));
939
940	/* Get the firmware running so ASF still works. */
941	bnx_mgmt_init(sc);
942
943	/* Handle interrupts */
944	sc->bnx_flags |= BNX_ACTIVE_FLAG;
945
946	goto bnx_attach_exit;
947
948bnx_attach_fail:
949	bnx_release_resources(sc);
950
951bnx_attach_exit:
952	DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
953}
954
955/****************************************************************************/
956/* Device detach function.                                                  */
957/*                                                                          */
958/* Stops the controller, resets the controller, and releases resources.     */
959/*                                                                          */
960/* Returns:                                                                 */
961/*   0 on success, positive value on failure.                               */
962/****************************************************************************/
963#if 0
964void
965bnx_detach(void *xsc)
966{
967	struct bnx_softc *sc;
968	struct ifnet *ifp = &sc->arpcom.ac_if;
969
970	sc = device_get_softc(dev);
971
972	DBPRINT(sc, BNX_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
973
974	/* Stop and reset the controller. */
975	bnx_stop(sc);
976	bnx_reset(sc, BNX_DRV_MSG_CODE_RESET);
977
978	ether_ifdetach(ifp);
979
980	/* If we have a child device on the MII bus remove it too. */
981	bus_generic_detach(dev);
982	device_delete_child(dev, sc->bnx_mii);
983
984	/* Release all remaining resources. */
985	bnx_release_resources(sc);
986
987	DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
988
989	return(0);
990}
991#endif
992
993/****************************************************************************/
994/* Indirect register read.                                                  */
995/*                                                                          */
996/* Reads NetXtreme II registers using an index/data register pair in PCI    */
997/* configuration space.  Using this mechanism avoids issues with posted     */
998/* reads but is much slower than memory-mapped I/O.                         */
999/*                                                                          */
1000/* Returns:                                                                 */
1001/*   The value of the register.                                             */
1002/****************************************************************************/
1003u_int32_t
1004bnx_reg_rd_ind(struct bnx_softc *sc, u_int32_t offset)
1005{
1006	struct pci_attach_args	*pa = &(sc->bnx_pa);
1007
1008	pci_conf_write(pa->pa_pc, pa->pa_tag, BNX_PCICFG_REG_WINDOW_ADDRESS,
1009	    offset);
1010#ifdef BNX_DEBUG
1011	{
1012		u_int32_t val;
1013		val = pci_conf_read(pa->pa_pc, pa->pa_tag,
1014		    BNX_PCICFG_REG_WINDOW);
1015		DBPRINT(sc, BNX_EXCESSIVE, "%s(); offset = 0x%08X, "
1016		    "val = 0x%08X\n", __FUNCTION__, offset, val);
1017		return (val);
1018	}
1019#else
1020	return pci_conf_read(pa->pa_pc, pa->pa_tag, BNX_PCICFG_REG_WINDOW);
1021#endif
1022}
1023
1024/****************************************************************************/
1025/* Indirect register write.                                                 */
1026/*                                                                          */
1027/* Writes NetXtreme II registers using an index/data register pair in PCI   */
1028/* configuration space.  Using this mechanism avoids issues with posted     */
1029/* writes but is muchh slower than memory-mapped I/O.                       */
1030/*                                                                          */
1031/* Returns:                                                                 */
1032/*   Nothing.                                                               */
1033/****************************************************************************/
1034void
1035bnx_reg_wr_ind(struct bnx_softc *sc, u_int32_t offset, u_int32_t val)
1036{
1037	struct pci_attach_args  *pa = &(sc->bnx_pa);
1038
1039	DBPRINT(sc, BNX_EXCESSIVE, "%s(); offset = 0x%08X, val = 0x%08X\n",
1040		__FUNCTION__, offset, val);
1041
1042	pci_conf_write(pa->pa_pc, pa->pa_tag, BNX_PCICFG_REG_WINDOW_ADDRESS,
1043	    offset);
1044	pci_conf_write(pa->pa_pc, pa->pa_tag, BNX_PCICFG_REG_WINDOW, val);
1045}
1046
1047/****************************************************************************/
1048/* Context memory write.                                                    */
1049/*                                                                          */
1050/* The NetXtreme II controller uses context memory to track connection      */
1051/* information for L2 and higher network protocols.                         */
1052/*                                                                          */
1053/* Returns:                                                                 */
1054/*   Nothing.                                                               */
1055/****************************************************************************/
1056void
1057bnx_ctx_wr(struct bnx_softc *sc, u_int32_t cid_addr, u_int32_t ctx_offset,
1058    u_int32_t ctx_val)
1059{
1060	u_int32_t idx, offset = ctx_offset + cid_addr;
1061	u_int32_t val, retry_cnt = 5;
1062
1063	if (BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5709) {
1064		REG_WR(sc, BNX_CTX_CTX_DATA, ctx_val);
1065		REG_WR(sc, BNX_CTX_CTX_CTRL,
1066		    (offset | BNX_CTX_CTX_CTRL_WRITE_REQ));
1067
1068		for (idx = 0; idx < retry_cnt; idx++) {
1069			val = REG_RD(sc, BNX_CTX_CTX_CTRL);
1070			if ((val & BNX_CTX_CTX_CTRL_WRITE_REQ) == 0)
1071				break;
1072			DELAY(5);
1073		}
1074
1075#if 0
1076		if (val & BNX_CTX_CTX_CTRL_WRITE_REQ)
1077			BNX_PRINTF("%s(%d); Unable to write CTX memory: "
1078				"cid_addr = 0x%08X, offset = 0x%08X!\n",
1079				__FILE__, __LINE__, cid_addr, ctx_offset);
1080#endif
1081
1082	} else {
1083		REG_WR(sc, BNX_CTX_DATA_ADR, offset);
1084		REG_WR(sc, BNX_CTX_DATA, ctx_val);
1085	}
1086}
1087
1088/****************************************************************************/
1089/* PHY register read.                                                       */
1090/*                                                                          */
1091/* Implements register reads on the MII bus.                                */
1092/*                                                                          */
1093/* Returns:                                                                 */
1094/*   The value of the register.                                             */
1095/****************************************************************************/
1096int
1097bnx_miibus_read_reg(struct device *dev, int phy, int reg)
1098{
1099	struct bnx_softc	*sc = (struct bnx_softc *)dev;
1100	u_int32_t		val;
1101	int			i;
1102
1103	/* Make sure we are accessing the correct PHY address. */
1104	if (phy != sc->bnx_phy_addr) {
1105		DBPRINT(sc, BNX_VERBOSE,
1106		    "Invalid PHY address %d for PHY read!\n", phy);
1107		return(0);
1108	}
1109
1110	/*
1111	 * The BCM5709S PHY is an IEEE Clause 45 PHY
1112	 * with special mappings to work with IEEE
1113	 * Clause 22 register accesses.
1114	 */
1115	if ((sc->bnx_phy_flags & BNX_PHY_IEEE_CLAUSE_45_FLAG) != 0) {
1116		if (reg >= MII_BMCR && reg <= MII_ANLPRNP)
1117			reg += 0x10;
1118	}
1119
1120	if (sc->bnx_phy_flags & BNX_PHY_INT_MODE_AUTO_POLLING_FLAG) {
1121		val = REG_RD(sc, BNX_EMAC_MDIO_MODE);
1122		val &= ~BNX_EMAC_MDIO_MODE_AUTO_POLL;
1123
1124		REG_WR(sc, BNX_EMAC_MDIO_MODE, val);
1125		REG_RD(sc, BNX_EMAC_MDIO_MODE);
1126
1127		DELAY(40);
1128	}
1129
1130	val = BNX_MIPHY(phy) | BNX_MIREG(reg) |
1131	    BNX_EMAC_MDIO_COMM_COMMAND_READ | BNX_EMAC_MDIO_COMM_DISEXT |
1132	    BNX_EMAC_MDIO_COMM_START_BUSY;
1133	REG_WR(sc, BNX_EMAC_MDIO_COMM, val);
1134
1135	for (i = 0; i < BNX_PHY_TIMEOUT; i++) {
1136		DELAY(10);
1137
1138		val = REG_RD(sc, BNX_EMAC_MDIO_COMM);
1139		if (!(val & BNX_EMAC_MDIO_COMM_START_BUSY)) {
1140			DELAY(5);
1141
1142			val = REG_RD(sc, BNX_EMAC_MDIO_COMM);
1143			val &= BNX_EMAC_MDIO_COMM_DATA;
1144
1145			break;
1146		}
1147	}
1148
1149	if (val & BNX_EMAC_MDIO_COMM_START_BUSY) {
1150		BNX_PRINTF(sc, "%s(%d): Error: PHY read timeout! phy = %d, "
1151		    "reg = 0x%04X\n", __FILE__, __LINE__, phy, reg);
1152		val = 0x0;
1153	} else
1154		val = REG_RD(sc, BNX_EMAC_MDIO_COMM);
1155
1156	DBPRINT(sc, BNX_EXCESSIVE,
1157	    "%s(): phy = %d, reg = 0x%04X, val = 0x%04X\n", __FUNCTION__, phy,
1158	    (u_int16_t) reg & 0xffff, (u_int16_t) val & 0xffff);
1159
1160	if (sc->bnx_phy_flags & BNX_PHY_INT_MODE_AUTO_POLLING_FLAG) {
1161		val = REG_RD(sc, BNX_EMAC_MDIO_MODE);
1162		val |= BNX_EMAC_MDIO_MODE_AUTO_POLL;
1163
1164		REG_WR(sc, BNX_EMAC_MDIO_MODE, val);
1165		REG_RD(sc, BNX_EMAC_MDIO_MODE);
1166
1167		DELAY(40);
1168	}
1169
1170	return (val & 0xffff);
1171}
1172
1173/****************************************************************************/
1174/* PHY register write.                                                      */
1175/*                                                                          */
1176/* Implements register writes on the MII bus.                               */
1177/*                                                                          */
1178/* Returns:                                                                 */
1179/*   The value of the register.                                             */
1180/****************************************************************************/
1181void
1182bnx_miibus_write_reg(struct device *dev, int phy, int reg, int val)
1183{
1184	struct bnx_softc	*sc = (struct bnx_softc *)dev;
1185	u_int32_t		val1;
1186	int			i;
1187
1188	/* Make sure we are accessing the correct PHY address. */
1189	if (phy != sc->bnx_phy_addr) {
1190		DBPRINT(sc, BNX_VERBOSE, "Invalid PHY address %d for PHY write!\n",
1191		    phy);
1192		return;
1193	}
1194
1195	DBPRINT(sc, BNX_EXCESSIVE, "%s(): phy = %d, reg = 0x%04X, "
1196	    "val = 0x%04X\n", __FUNCTION__,
1197	    phy, (u_int16_t) reg & 0xffff, (u_int16_t) val & 0xffff);
1198
1199	/*
1200	 * The BCM5709S PHY is an IEEE Clause 45 PHY
1201	 * with special mappings to work with IEEE
1202	 * Clause 22 register accesses.
1203	 */
1204	if ((sc->bnx_phy_flags & BNX_PHY_IEEE_CLAUSE_45_FLAG) != 0) {
1205		if (reg >= MII_BMCR && reg <= MII_ANLPRNP)
1206			reg += 0x10;
1207	}
1208
1209	if (sc->bnx_phy_flags & BNX_PHY_INT_MODE_AUTO_POLLING_FLAG) {
1210		val1 = REG_RD(sc, BNX_EMAC_MDIO_MODE);
1211		val1 &= ~BNX_EMAC_MDIO_MODE_AUTO_POLL;
1212
1213		REG_WR(sc, BNX_EMAC_MDIO_MODE, val1);
1214		REG_RD(sc, BNX_EMAC_MDIO_MODE);
1215
1216		DELAY(40);
1217	}
1218
1219	val1 = BNX_MIPHY(phy) | BNX_MIREG(reg) | val |
1220	    BNX_EMAC_MDIO_COMM_COMMAND_WRITE |
1221	    BNX_EMAC_MDIO_COMM_START_BUSY | BNX_EMAC_MDIO_COMM_DISEXT;
1222	REG_WR(sc, BNX_EMAC_MDIO_COMM, val1);
1223
1224	for (i = 0; i < BNX_PHY_TIMEOUT; i++) {
1225		DELAY(10);
1226
1227		val1 = REG_RD(sc, BNX_EMAC_MDIO_COMM);
1228		if (!(val1 & BNX_EMAC_MDIO_COMM_START_BUSY)) {
1229			DELAY(5);
1230			break;
1231		}
1232	}
1233
1234	if (val1 & BNX_EMAC_MDIO_COMM_START_BUSY) {
1235		BNX_PRINTF(sc, "%s(%d): PHY write timeout!\n", __FILE__,
1236		    __LINE__);
1237	}
1238
1239	if (sc->bnx_phy_flags & BNX_PHY_INT_MODE_AUTO_POLLING_FLAG) {
1240		val1 = REG_RD(sc, BNX_EMAC_MDIO_MODE);
1241		val1 |= BNX_EMAC_MDIO_MODE_AUTO_POLL;
1242
1243		REG_WR(sc, BNX_EMAC_MDIO_MODE, val1);
1244		REG_RD(sc, BNX_EMAC_MDIO_MODE);
1245
1246		DELAY(40);
1247	}
1248}
1249
1250/****************************************************************************/
1251/* MII bus status change.                                                   */
1252/*                                                                          */
1253/* Called by the MII bus driver when the PHY establishes link to set the    */
1254/* MAC interface registers.                                                 */
1255/*                                                                          */
1256/* Returns:                                                                 */
1257/*   Nothing.                                                               */
1258/****************************************************************************/
1259void
1260bnx_miibus_statchg(struct device *dev)
1261{
1262	struct bnx_softc	*sc = (struct bnx_softc *)dev;
1263	struct mii_data		*mii = &sc->bnx_mii;
1264	int			val;
1265
1266	val = REG_RD(sc, BNX_EMAC_MODE);
1267	val &= ~(BNX_EMAC_MODE_PORT | BNX_EMAC_MODE_HALF_DUPLEX |
1268		BNX_EMAC_MODE_MAC_LOOP | BNX_EMAC_MODE_FORCE_LINK |
1269		BNX_EMAC_MODE_25G);
1270
1271	/* Set MII or GMII interface based on the speed
1272	 * negotiated by the PHY.
1273	 */
1274	switch (IFM_SUBTYPE(mii->mii_media_active)) {
1275	case IFM_10_T:
1276		if (BNX_CHIP_NUM(sc) != BNX_CHIP_NUM_5706) {
1277			DBPRINT(sc, BNX_INFO, "Enabling 10Mb interface.\n");
1278			val |= BNX_EMAC_MODE_PORT_MII_10;
1279			break;
1280		}
1281		/* FALLTHROUGH */
1282	case IFM_100_TX:
1283		DBPRINT(sc, BNX_INFO, "Enabling MII interface.\n");
1284		val |= BNX_EMAC_MODE_PORT_MII;
1285		break;
1286	case IFM_2500_SX:
1287		DBPRINT(sc, BNX_INFO, "Enabling 2.5G MAC mode.\n");
1288		val |= BNX_EMAC_MODE_25G;
1289		/* FALLTHROUGH */
1290	case IFM_1000_T:
1291	case IFM_1000_SX:
1292		DBPRINT(sc, BNX_INFO, "Enablinb GMII interface.\n");
1293		val |= BNX_EMAC_MODE_PORT_GMII;
1294		break;
1295	default:
1296		val |= BNX_EMAC_MODE_PORT_GMII;
1297		break;
1298	}
1299
1300	/* Set half or full duplex based on the duplicity
1301	 * negotiated by the PHY.
1302	 */
1303	if ((mii->mii_media_active & IFM_GMASK) == IFM_HDX) {
1304		DBPRINT(sc, BNX_INFO, "Setting Half-Duplex interface.\n");
1305		val |= BNX_EMAC_MODE_HALF_DUPLEX;
1306	} else
1307		DBPRINT(sc, BNX_INFO, "Setting Full-Duplex interface.\n");
1308
1309	REG_WR(sc, BNX_EMAC_MODE, val);
1310}
1311
1312/****************************************************************************/
1313/* Acquire NVRAM lock.                                                      */
1314/*                                                                          */
1315/* Before the NVRAM can be accessed the caller must acquire an NVRAM lock.  */
1316/* Locks 0 and 2 are reserved, lock 1 is used by firmware and lock 2 is     */
1317/* for use by the driver.                                                   */
1318/*                                                                          */
1319/* Returns:                                                                 */
1320/*   0 on success, positive value on failure.                               */
1321/****************************************************************************/
1322int
1323bnx_acquire_nvram_lock(struct bnx_softc *sc)
1324{
1325	u_int32_t		val;
1326	int			j;
1327
1328	DBPRINT(sc, BNX_VERBOSE, "Acquiring NVRAM lock.\n");
1329
1330	/* Request access to the flash interface. */
1331	REG_WR(sc, BNX_NVM_SW_ARB, BNX_NVM_SW_ARB_ARB_REQ_SET2);
1332	for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
1333		val = REG_RD(sc, BNX_NVM_SW_ARB);
1334		if (val & BNX_NVM_SW_ARB_ARB_ARB2)
1335			break;
1336
1337		DELAY(5);
1338	}
1339
1340	if (j >= NVRAM_TIMEOUT_COUNT) {
1341		DBPRINT(sc, BNX_WARN, "Timeout acquiring NVRAM lock!\n");
1342		return (EBUSY);
1343	}
1344
1345	return (0);
1346}
1347
1348/****************************************************************************/
1349/* Release NVRAM lock.                                                      */
1350/*                                                                          */
1351/* When the caller is finished accessing NVRAM the lock must be released.   */
1352/* Locks 0 and 2 are reserved, lock 1 is used by firmware and lock 2 is     */
1353/* for use by the driver.                                                   */
1354/*                                                                          */
1355/* Returns:                                                                 */
1356/*   0 on success, positive value on failure.                               */
1357/****************************************************************************/
1358int
1359bnx_release_nvram_lock(struct bnx_softc *sc)
1360{
1361	int			j;
1362	u_int32_t		val;
1363
1364	DBPRINT(sc, BNX_VERBOSE, "Releasing NVRAM lock.\n");
1365
1366	/* Relinquish nvram interface. */
1367	REG_WR(sc, BNX_NVM_SW_ARB, BNX_NVM_SW_ARB_ARB_REQ_CLR2);
1368
1369	for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
1370		val = REG_RD(sc, BNX_NVM_SW_ARB);
1371		if (!(val & BNX_NVM_SW_ARB_ARB_ARB2))
1372			break;
1373
1374		DELAY(5);
1375	}
1376
1377	if (j >= NVRAM_TIMEOUT_COUNT) {
1378		DBPRINT(sc, BNX_WARN, "Timeout reeasing NVRAM lock!\n");
1379		return (EBUSY);
1380	}
1381
1382	return (0);
1383}
1384
1385#ifdef BNX_NVRAM_WRITE_SUPPORT
1386/****************************************************************************/
1387/* Enable NVRAM write access.                                               */
1388/*                                                                          */
1389/* Before writing to NVRAM the caller must enable NVRAM writes.             */
1390/*                                                                          */
1391/* Returns:                                                                 */
1392/*   0 on success, positive value on failure.                               */
1393/****************************************************************************/
1394int
1395bnx_enable_nvram_write(struct bnx_softc *sc)
1396{
1397	u_int32_t		val;
1398
1399	DBPRINT(sc, BNX_VERBOSE, "Enabling NVRAM write.\n");
1400
1401	val = REG_RD(sc, BNX_MISC_CFG);
1402	REG_WR(sc, BNX_MISC_CFG, val | BNX_MISC_CFG_NVM_WR_EN_PCI);
1403
1404	if (!ISSET(sc->bnx_flash_info->flags, BNX_NV_BUFFERED)) {
1405		int j;
1406
1407		REG_WR(sc, BNX_NVM_COMMAND, BNX_NVM_COMMAND_DONE);
1408		REG_WR(sc, BNX_NVM_COMMAND,
1409		    BNX_NVM_COMMAND_WREN | BNX_NVM_COMMAND_DOIT);
1410
1411		for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
1412			DELAY(5);
1413
1414			val = REG_RD(sc, BNX_NVM_COMMAND);
1415			if (val & BNX_NVM_COMMAND_DONE)
1416				break;
1417		}
1418
1419		if (j >= NVRAM_TIMEOUT_COUNT) {
1420			DBPRINT(sc, BNX_WARN, "Timeout writing NVRAM!\n");
1421			return (EBUSY);
1422		}
1423	}
1424
1425	return (0);
1426}
1427
1428/****************************************************************************/
1429/* Disable NVRAM write access.                                              */
1430/*                                                                          */
1431/* When the caller is finished writing to NVRAM write access must be        */
1432/* disabled.                                                                */
1433/*                                                                          */
1434/* Returns:                                                                 */
1435/*   Nothing.                                                               */
1436/****************************************************************************/
1437void
1438bnx_disable_nvram_write(struct bnx_softc *sc)
1439{
1440	u_int32_t		val;
1441
1442	DBPRINT(sc, BNX_VERBOSE,  "Disabling NVRAM write.\n");
1443
1444	val = REG_RD(sc, BNX_MISC_CFG);
1445	REG_WR(sc, BNX_MISC_CFG, val & ~BNX_MISC_CFG_NVM_WR_EN);
1446}
1447#endif
1448
1449/****************************************************************************/
1450/* Enable NVRAM access.                                                     */
1451/*                                                                          */
1452/* Before accessing NVRAM for read or write operations the caller must      */
1453/* enabled NVRAM access.                                                    */
1454/*                                                                          */
1455/* Returns:                                                                 */
1456/*   Nothing.                                                               */
1457/****************************************************************************/
1458void
1459bnx_enable_nvram_access(struct bnx_softc *sc)
1460{
1461	u_int32_t		val;
1462
1463	DBPRINT(sc, BNX_VERBOSE, "Enabling NVRAM access.\n");
1464
1465	val = REG_RD(sc, BNX_NVM_ACCESS_ENABLE);
1466	/* Enable both bits, even on read. */
1467	REG_WR(sc, BNX_NVM_ACCESS_ENABLE,
1468	    val | BNX_NVM_ACCESS_ENABLE_EN | BNX_NVM_ACCESS_ENABLE_WR_EN);
1469}
1470
1471/****************************************************************************/
1472/* Disable NVRAM access.                                                    */
1473/*                                                                          */
1474/* When the caller is finished accessing NVRAM access must be disabled.     */
1475/*                                                                          */
1476/* Returns:                                                                 */
1477/*   Nothing.                                                               */
1478/****************************************************************************/
1479void
1480bnx_disable_nvram_access(struct bnx_softc *sc)
1481{
1482	u_int32_t		val;
1483
1484	DBPRINT(sc, BNX_VERBOSE, "Disabling NVRAM access.\n");
1485
1486	val = REG_RD(sc, BNX_NVM_ACCESS_ENABLE);
1487
1488	/* Disable both bits, even after read. */
1489	REG_WR(sc, BNX_NVM_ACCESS_ENABLE,
1490	    val & ~(BNX_NVM_ACCESS_ENABLE_EN | BNX_NVM_ACCESS_ENABLE_WR_EN));
1491}
1492
1493#ifdef BNX_NVRAM_WRITE_SUPPORT
1494/****************************************************************************/
1495/* Erase NVRAM page before writing.                                         */
1496/*                                                                          */
1497/* Non-buffered flash parts require that a page be erased before it is      */
1498/* written.                                                                 */
1499/*                                                                          */
1500/* Returns:                                                                 */
1501/*   0 on success, positive value on failure.                               */
1502/****************************************************************************/
1503int
1504bnx_nvram_erase_page(struct bnx_softc *sc, u_int32_t offset)
1505{
1506	u_int32_t		cmd;
1507	int			j;
1508
1509	/* Buffered flash doesn't require an erase. */
1510	if (ISSET(sc->bnx_flash_info->flags, BNX_NV_BUFFERED))
1511		return (0);
1512
1513	DBPRINT(sc, BNX_VERBOSE, "Erasing NVRAM page.\n");
1514
1515	/* Build an erase command. */
1516	cmd = BNX_NVM_COMMAND_ERASE | BNX_NVM_COMMAND_WR |
1517	    BNX_NVM_COMMAND_DOIT;
1518
1519	/*
1520	 * Clear the DONE bit separately, set the NVRAM address to erase,
1521	 * and issue the erase command.
1522	 */
1523	REG_WR(sc, BNX_NVM_COMMAND, BNX_NVM_COMMAND_DONE);
1524	REG_WR(sc, BNX_NVM_ADDR, offset & BNX_NVM_ADDR_NVM_ADDR_VALUE);
1525	REG_WR(sc, BNX_NVM_COMMAND, cmd);
1526
1527	/* Wait for completion. */
1528	for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
1529		u_int32_t val;
1530
1531		DELAY(5);
1532
1533		val = REG_RD(sc, BNX_NVM_COMMAND);
1534		if (val & BNX_NVM_COMMAND_DONE)
1535			break;
1536	}
1537
1538	if (j >= NVRAM_TIMEOUT_COUNT) {
1539		DBPRINT(sc, BNX_WARN, "Timeout erasing NVRAM.\n");
1540		return (EBUSY);
1541	}
1542
1543	return (0);
1544}
1545#endif /* BNX_NVRAM_WRITE_SUPPORT */
1546
1547/****************************************************************************/
1548/* Read a dword (32 bits) from NVRAM.                                       */
1549/*                                                                          */
1550/* Read a 32 bit word from NVRAM.  The caller is assumed to have already    */
1551/* obtained the NVRAM lock and enabled the controller for NVRAM access.     */
1552/*                                                                          */
1553/* Returns:                                                                 */
1554/*   0 on success and the 32 bit value read, positive value on failure.     */
1555/****************************************************************************/
1556int
1557bnx_nvram_read_dword(struct bnx_softc *sc, u_int32_t offset,
1558    u_int8_t *ret_val, u_int32_t cmd_flags)
1559{
1560	u_int32_t		cmd;
1561	int			i, rc = 0;
1562
1563	/* Build the command word. */
1564	cmd = BNX_NVM_COMMAND_DOIT | cmd_flags;
1565
1566	/* Calculate the offset for buffered flash if translation is used. */
1567	if (ISSET(sc->bnx_flash_info->flags, BNX_NV_TRANSLATE)) {
1568		offset = ((offset / sc->bnx_flash_info->page_size) <<
1569		    sc->bnx_flash_info->page_bits) +
1570		    (offset % sc->bnx_flash_info->page_size);
1571	}
1572
1573	/*
1574	 * Clear the DONE bit separately, set the address to read,
1575	 * and issue the read.
1576	 */
1577	REG_WR(sc, BNX_NVM_COMMAND, BNX_NVM_COMMAND_DONE);
1578	REG_WR(sc, BNX_NVM_ADDR, offset & BNX_NVM_ADDR_NVM_ADDR_VALUE);
1579	REG_WR(sc, BNX_NVM_COMMAND, cmd);
1580
1581	/* Wait for completion. */
1582	for (i = 0; i < NVRAM_TIMEOUT_COUNT; i++) {
1583		u_int32_t val;
1584
1585		DELAY(5);
1586
1587		val = REG_RD(sc, BNX_NVM_COMMAND);
1588		if (val & BNX_NVM_COMMAND_DONE) {
1589			val = REG_RD(sc, BNX_NVM_READ);
1590
1591			val = bnx_be32toh(val);
1592			memcpy(ret_val, &val, 4);
1593			break;
1594		}
1595	}
1596
1597	/* Check for errors. */
1598	if (i >= NVRAM_TIMEOUT_COUNT) {
1599		BNX_PRINTF(sc, "%s(%d): Timeout error reading NVRAM at "
1600		    "offset 0x%08X!\n", __FILE__, __LINE__, offset);
1601		rc = EBUSY;
1602	}
1603
1604	return(rc);
1605}
1606
1607#ifdef BNX_NVRAM_WRITE_SUPPORT
1608/****************************************************************************/
1609/* Write a dword (32 bits) to NVRAM.                                        */
1610/*                                                                          */
1611/* Write a 32 bit word to NVRAM.  The caller is assumed to have already     */
1612/* obtained the NVRAM lock, enabled the controller for NVRAM access, and    */
1613/* enabled NVRAM write access.                                              */
1614/*                                                                          */
1615/* Returns:                                                                 */
1616/*   0 on success, positive value on failure.                               */
1617/****************************************************************************/
1618int
1619bnx_nvram_write_dword(struct bnx_softc *sc, u_int32_t offset, u_int8_t *val,
1620    u_int32_t cmd_flags)
1621{
1622	u_int32_t		cmd, val32;
1623	int			j;
1624
1625	/* Build the command word. */
1626	cmd = BNX_NVM_COMMAND_DOIT | BNX_NVM_COMMAND_WR | cmd_flags;
1627
1628	/* Calculate the offset for buffered flash if translation is used. */
1629	if (ISSET(sc->bnx_flash_info->flags, BNX_NV_TRANSLATE)) {
1630		offset = ((offset / sc->bnx_flash_info->page_size) <<
1631		    sc->bnx_flash_info->page_bits) +
1632		    (offset % sc->bnx_flash_info->page_size);
1633	}
1634
1635	/*
1636	 * Clear the DONE bit separately, convert NVRAM data to big-endian,
1637	 * set the NVRAM address to write, and issue the write command
1638	 */
1639	REG_WR(sc, BNX_NVM_COMMAND, BNX_NVM_COMMAND_DONE);
1640	memcpy(&val32, val, 4);
1641	val32 = htobe32(val32);
1642	REG_WR(sc, BNX_NVM_WRITE, val32);
1643	REG_WR(sc, BNX_NVM_ADDR, offset & BNX_NVM_ADDR_NVM_ADDR_VALUE);
1644	REG_WR(sc, BNX_NVM_COMMAND, cmd);
1645
1646	/* Wait for completion. */
1647	for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
1648		DELAY(5);
1649
1650		if (REG_RD(sc, BNX_NVM_COMMAND) & BNX_NVM_COMMAND_DONE)
1651			break;
1652	}
1653	if (j >= NVRAM_TIMEOUT_COUNT) {
1654		BNX_PRINTF(sc, "%s(%d): Timeout error writing NVRAM at "
1655		    "offset 0x%08X\n", __FILE__, __LINE__, offset);
1656		return (EBUSY);
1657	}
1658
1659	return (0);
1660}
1661#endif /* BNX_NVRAM_WRITE_SUPPORT */
1662
1663/****************************************************************************/
1664/* Initialize NVRAM access.                                                 */
1665/*                                                                          */
1666/* Identify the NVRAM device in use and prepare the NVRAM interface to      */
1667/* access that device.                                                      */
1668/*                                                                          */
1669/* Returns:                                                                 */
1670/*   0 on success, positive value on failure.                               */
1671/****************************************************************************/
1672int
1673bnx_init_nvram(struct bnx_softc *sc)
1674{
1675	u_int32_t		val;
1676	int			j, entry_count, rc = 0;
1677	struct flash_spec	*flash;
1678
1679	DBPRINT(sc,BNX_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
1680
1681	if (BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5709) {
1682		sc->bnx_flash_info = &flash_5709;
1683		goto bnx_init_nvram_get_flash_size;
1684	}
1685
1686	/* Determine the selected interface. */
1687	val = REG_RD(sc, BNX_NVM_CFG1);
1688
1689	entry_count = sizeof(flash_table) / sizeof(struct flash_spec);
1690
1691	/*
1692	 * Flash reconfiguration is required to support additional
1693	 * NVRAM devices not directly supported in hardware.
1694	 * Check if the flash interface was reconfigured
1695	 * by the bootcode.
1696	 */
1697
1698	if (val & 0x40000000) {
1699		/* Flash interface reconfigured by bootcode. */
1700
1701		DBPRINT(sc,BNX_INFO_LOAD,
1702			"bnx_init_nvram(): Flash WAS reconfigured.\n");
1703
1704		for (j = 0, flash = &flash_table[0]; j < entry_count;
1705		     j++, flash++) {
1706			if ((val & FLASH_BACKUP_STRAP_MASK) ==
1707			    (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
1708				sc->bnx_flash_info = flash;
1709				break;
1710			}
1711		}
1712	} else {
1713		/* Flash interface not yet reconfigured. */
1714		u_int32_t mask;
1715
1716		DBPRINT(sc,BNX_INFO_LOAD,
1717			"bnx_init_nvram(): Flash was NOT reconfigured.\n");
1718
1719		if (val & (1 << 23))
1720			mask = FLASH_BACKUP_STRAP_MASK;
1721		else
1722			mask = FLASH_STRAP_MASK;
1723
1724		/* Look for the matching NVRAM device configuration data. */
1725		for (j = 0, flash = &flash_table[0]; j < entry_count;
1726		    j++, flash++) {
1727			/* Check if the dev matches any of the known devices. */
1728			if ((val & mask) == (flash->strapping & mask)) {
1729				/* Found a device match. */
1730				sc->bnx_flash_info = flash;
1731
1732				/* Request access to the flash interface. */
1733				if ((rc = bnx_acquire_nvram_lock(sc)) != 0)
1734					return (rc);
1735
1736				/* Reconfigure the flash interface. */
1737				bnx_enable_nvram_access(sc);
1738				REG_WR(sc, BNX_NVM_CFG1, flash->config1);
1739				REG_WR(sc, BNX_NVM_CFG2, flash->config2);
1740				REG_WR(sc, BNX_NVM_CFG3, flash->config3);
1741				REG_WR(sc, BNX_NVM_WRITE1, flash->write1);
1742				bnx_disable_nvram_access(sc);
1743				bnx_release_nvram_lock(sc);
1744
1745				break;
1746			}
1747		}
1748	}
1749
1750	/* Check if a matching device was found. */
1751	if (j == entry_count) {
1752		sc->bnx_flash_info = NULL;
1753		BNX_PRINTF(sc, "%s(%d): Unknown Flash NVRAM found!\n",
1754			__FILE__, __LINE__);
1755		rc = ENODEV;
1756	}
1757
1758bnx_init_nvram_get_flash_size:
1759	/* Write the flash config data to the shared memory interface. */
1760	val = REG_RD_IND(sc, sc->bnx_shmem_base + BNX_SHARED_HW_CFG_CONFIG2);
1761	val &= BNX_SHARED_HW_CFG2_NVM_SIZE_MASK;
1762	if (val)
1763		sc->bnx_flash_size = val;
1764	else
1765		sc->bnx_flash_size = sc->bnx_flash_info->total_size;
1766
1767	DBPRINT(sc, BNX_INFO_LOAD, "bnx_init_nvram() flash->total_size = "
1768	    "0x%08X\n", sc->bnx_flash_info->total_size);
1769
1770	DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
1771
1772	return (rc);
1773}
1774
1775/****************************************************************************/
1776/* Read an arbitrary range of data from NVRAM.                              */
1777/*                                                                          */
1778/* Prepares the NVRAM interface for access and reads the requested data     */
1779/* into the supplied buffer.                                                */
1780/*                                                                          */
1781/* Returns:                                                                 */
1782/*   0 on success and the data read, positive value on failure.             */
1783/****************************************************************************/
1784int
1785bnx_nvram_read(struct bnx_softc *sc, u_int32_t offset, u_int8_t *ret_buf,
1786    int buf_size)
1787{
1788	int			rc = 0;
1789	u_int32_t		cmd_flags, offset32, len32, extra;
1790
1791	if (buf_size == 0)
1792		return (0);
1793
1794	/* Request access to the flash interface. */
1795	if ((rc = bnx_acquire_nvram_lock(sc)) != 0)
1796		return (rc);
1797
1798	/* Enable access to flash interface */
1799	bnx_enable_nvram_access(sc);
1800
1801	len32 = buf_size;
1802	offset32 = offset;
1803	extra = 0;
1804
1805	cmd_flags = 0;
1806
1807	if (offset32 & 3) {
1808		u_int8_t buf[4];
1809		u_int32_t pre_len;
1810
1811		offset32 &= ~3;
1812		pre_len = 4 - (offset & 3);
1813
1814		if (pre_len >= len32) {
1815			pre_len = len32;
1816			cmd_flags =
1817			    BNX_NVM_COMMAND_FIRST | BNX_NVM_COMMAND_LAST;
1818		} else
1819			cmd_flags = BNX_NVM_COMMAND_FIRST;
1820
1821		rc = bnx_nvram_read_dword(sc, offset32, buf, cmd_flags);
1822
1823		if (rc)
1824			return (rc);
1825
1826		memcpy(ret_buf, buf + (offset & 3), pre_len);
1827
1828		offset32 += 4;
1829		ret_buf += pre_len;
1830		len32 -= pre_len;
1831	}
1832
1833	if (len32 & 3) {
1834		extra = 4 - (len32 & 3);
1835		len32 = (len32 + 4) & ~3;
1836	}
1837
1838	if (len32 == 4) {
1839		u_int8_t buf[4];
1840
1841		if (cmd_flags)
1842			cmd_flags = BNX_NVM_COMMAND_LAST;
1843		else
1844			cmd_flags =
1845			    BNX_NVM_COMMAND_FIRST | BNX_NVM_COMMAND_LAST;
1846
1847		rc = bnx_nvram_read_dword(sc, offset32, buf, cmd_flags);
1848
1849		memcpy(ret_buf, buf, 4 - extra);
1850	} else if (len32 > 0) {
1851		u_int8_t buf[4];
1852
1853		/* Read the first word. */
1854		if (cmd_flags)
1855			cmd_flags = 0;
1856		else
1857			cmd_flags = BNX_NVM_COMMAND_FIRST;
1858
1859		rc = bnx_nvram_read_dword(sc, offset32, ret_buf, cmd_flags);
1860
1861		/* Advance to the next dword. */
1862		offset32 += 4;
1863		ret_buf += 4;
1864		len32 -= 4;
1865
1866		while (len32 > 4 && rc == 0) {
1867			rc = bnx_nvram_read_dword(sc, offset32, ret_buf, 0);
1868
1869			/* Advance to the next dword. */
1870			offset32 += 4;
1871			ret_buf += 4;
1872			len32 -= 4;
1873		}
1874
1875		if (rc)
1876			return (rc);
1877
1878		cmd_flags = BNX_NVM_COMMAND_LAST;
1879		rc = bnx_nvram_read_dword(sc, offset32, buf, cmd_flags);
1880
1881		memcpy(ret_buf, buf, 4 - extra);
1882	}
1883
1884	/* Disable access to flash interface and release the lock. */
1885	bnx_disable_nvram_access(sc);
1886	bnx_release_nvram_lock(sc);
1887
1888	return (rc);
1889}
1890
1891#ifdef BNX_NVRAM_WRITE_SUPPORT
1892/****************************************************************************/
1893/* Write an arbitrary range of data from NVRAM.                             */
1894/*                                                                          */
1895/* Prepares the NVRAM interface for write access and writes the requested   */
1896/* data from the supplied buffer.  The caller is responsible for            */
1897/* calculating any appropriate CRCs.                                        */
1898/*                                                                          */
1899/* Returns:                                                                 */
1900/*   0 on success, positive value on failure.                               */
1901/****************************************************************************/
1902int
1903bnx_nvram_write(struct bnx_softc *sc, u_int32_t offset, u_int8_t *data_buf,
1904    int buf_size)
1905{
1906	u_int32_t		written, offset32, len32;
1907	u_int8_t		*buf, start[4], end[4];
1908	int			rc = 0;
1909	int			align_start, align_end;
1910
1911	buf = data_buf;
1912	offset32 = offset;
1913	len32 = buf_size;
1914	align_start = align_end = 0;
1915
1916	if ((align_start = (offset32 & 3))) {
1917		offset32 &= ~3;
1918		len32 += align_start;
1919		if ((rc = bnx_nvram_read(sc, offset32, start, 4)))
1920			return (rc);
1921	}
1922
1923	if (len32 & 3) {
1924		if ((len32 > 4) || !align_start) {
1925			align_end = 4 - (len32 & 3);
1926			len32 += align_end;
1927			if ((rc = bnx_nvram_read(sc, offset32 + len32 - 4,
1928			    end, 4))) {
1929				return (rc);
1930			}
1931		}
1932	}
1933
1934	if (align_start || align_end) {
1935		buf = malloc(len32, M_DEVBUF, M_NOWAIT);
1936		if (buf == 0)
1937			return (ENOMEM);
1938
1939		if (align_start)
1940			memcpy(buf, start, 4);
1941
1942		if (align_end)
1943			memcpy(buf + len32 - 4, end, 4);
1944
1945		memcpy(buf + align_start, data_buf, buf_size);
1946	}
1947
1948	written = 0;
1949	while ((written < len32) && (rc == 0)) {
1950		u_int32_t page_start, page_end, data_start, data_end;
1951		u_int32_t addr, cmd_flags;
1952		int i;
1953		u_int8_t flash_buffer[264];
1954
1955	    /* Find the page_start addr */
1956		page_start = offset32 + written;
1957		page_start -= (page_start % sc->bnx_flash_info->page_size);
1958		/* Find the page_end addr */
1959		page_end = page_start + sc->bnx_flash_info->page_size;
1960		/* Find the data_start addr */
1961		data_start = (written == 0) ? offset32 : page_start;
1962		/* Find the data_end addr */
1963		data_end = (page_end > offset32 + len32) ?
1964		    (offset32 + len32) : page_end;
1965
1966		/* Request access to the flash interface. */
1967		if ((rc = bnx_acquire_nvram_lock(sc)) != 0)
1968			goto nvram_write_end;
1969
1970		/* Enable access to flash interface */
1971		bnx_enable_nvram_access(sc);
1972
1973		cmd_flags = BNX_NVM_COMMAND_FIRST;
1974		if (!ISSET(sc->bnx_flash_info->flags, BNX_NV_BUFFERED)) {
1975			int j;
1976
1977			/* Read the whole page into the buffer
1978			 * (non-buffer flash only) */
1979			for (j = 0; j < sc->bnx_flash_info->page_size; j += 4) {
1980				if (j == (sc->bnx_flash_info->page_size - 4))
1981					cmd_flags |= BNX_NVM_COMMAND_LAST;
1982
1983				rc = bnx_nvram_read_dword(sc,
1984					page_start + j,
1985					&flash_buffer[j],
1986					cmd_flags);
1987
1988				if (rc)
1989					goto nvram_write_end;
1990
1991				cmd_flags = 0;
1992			}
1993		}
1994
1995		/* Enable writes to flash interface (unlock write-protect) */
1996		if ((rc = bnx_enable_nvram_write(sc)) != 0)
1997			goto nvram_write_end;
1998
1999		/* Erase the page */
2000		if ((rc = bnx_nvram_erase_page(sc, page_start)) != 0)
2001			goto nvram_write_end;
2002
2003		/* Re-enable the write again for the actual write */
2004		bnx_enable_nvram_write(sc);
2005
2006		/* Loop to write back the buffer data from page_start to
2007		 * data_start */
2008		i = 0;
2009		if (!ISSET(sc->bnx_flash_info->flags, BNX_NV_BUFFERED)) {
2010			for (addr = page_start; addr < data_start;
2011				addr += 4, i += 4) {
2012
2013				rc = bnx_nvram_write_dword(sc, addr,
2014				    &flash_buffer[i], cmd_flags);
2015
2016				if (rc != 0)
2017					goto nvram_write_end;
2018
2019				cmd_flags = 0;
2020			}
2021		}
2022
2023		/* Loop to write the new data from data_start to data_end */
2024		for (addr = data_start; addr < data_end; addr += 4, i++) {
2025			if ((addr == page_end - 4) ||
2026			    (ISSET(sc->bnx_flash_info->flags, BNX_NV_BUFFERED)
2027			    && (addr == data_end - 4))) {
2028
2029				cmd_flags |= BNX_NVM_COMMAND_LAST;
2030			}
2031
2032			rc = bnx_nvram_write_dword(sc, addr, buf, cmd_flags);
2033
2034			if (rc != 0)
2035				goto nvram_write_end;
2036
2037			cmd_flags = 0;
2038			buf += 4;
2039		}
2040
2041		/* Loop to write back the buffer data from data_end
2042		 * to page_end */
2043		if (!ISSET(sc->bnx_flash_info->flags, BNX_NV_BUFFERED)) {
2044			for (addr = data_end; addr < page_end;
2045			    addr += 4, i += 4) {
2046
2047				if (addr == page_end-4)
2048					cmd_flags = BNX_NVM_COMMAND_LAST;
2049
2050				rc = bnx_nvram_write_dword(sc, addr,
2051				    &flash_buffer[i], cmd_flags);
2052
2053				if (rc != 0)
2054					goto nvram_write_end;
2055
2056				cmd_flags = 0;
2057			}
2058		}
2059
2060		/* Disable writes to flash interface (lock write-protect) */
2061		bnx_disable_nvram_write(sc);
2062
2063		/* Disable access to flash interface */
2064		bnx_disable_nvram_access(sc);
2065		bnx_release_nvram_lock(sc);
2066
2067		/* Increment written */
2068		written += data_end - data_start;
2069	}
2070
2071nvram_write_end:
2072	if (align_start || align_end)
2073		free(buf, M_DEVBUF);
2074
2075	return (rc);
2076}
2077#endif /* BNX_NVRAM_WRITE_SUPPORT */
2078
2079/****************************************************************************/
2080/* Verifies that NVRAM is accessible and contains valid data.               */
2081/*                                                                          */
2082/* Reads the configuration data from NVRAM and verifies that the CRC is     */
2083/* correct.                                                                 */
2084/*                                                                          */
2085/* Returns:                                                                 */
2086/*   0 on success, positive value on failure.                               */
2087/****************************************************************************/
2088int
2089bnx_nvram_test(struct bnx_softc *sc)
2090{
2091	u_int32_t		buf[BNX_NVRAM_SIZE / 4];
2092	u_int8_t		*data = (u_int8_t *) buf;
2093	int			rc = 0;
2094	u_int32_t		magic, csum;
2095
2096	/*
2097	 * Check that the device NVRAM is valid by reading
2098	 * the magic value at offset 0.
2099	 */
2100	if ((rc = bnx_nvram_read(sc, 0, data, 4)) != 0)
2101		goto bnx_nvram_test_done;
2102
2103	magic = bnx_be32toh(buf[0]);
2104	if (magic != BNX_NVRAM_MAGIC) {
2105		rc = ENODEV;
2106		BNX_PRINTF(sc, "%s(%d): Invalid NVRAM magic value! "
2107		    "Expected: 0x%08X, Found: 0x%08X\n",
2108		    __FILE__, __LINE__, BNX_NVRAM_MAGIC, magic);
2109		goto bnx_nvram_test_done;
2110	}
2111
2112	/*
2113	 * Verify that the device NVRAM includes valid
2114	 * configuration data.
2115	 */
2116	if ((rc = bnx_nvram_read(sc, 0x100, data, BNX_NVRAM_SIZE)) != 0)
2117		goto bnx_nvram_test_done;
2118
2119	csum = ether_crc32_le(data, 0x100);
2120	if (csum != BNX_CRC32_RESIDUAL) {
2121		rc = ENODEV;
2122		BNX_PRINTF(sc, "%s(%d): Invalid Manufacturing Information "
2123		    "NVRAM CRC! Expected: 0x%08X, Found: 0x%08X\n",
2124		    __FILE__, __LINE__, BNX_CRC32_RESIDUAL, csum);
2125		goto bnx_nvram_test_done;
2126	}
2127
2128	csum = ether_crc32_le(data + 0x100, 0x100);
2129	if (csum != BNX_CRC32_RESIDUAL) {
2130		BNX_PRINTF(sc, "%s(%d): Invalid Feature Configuration "
2131		    "Information NVRAM CRC! Expected: 0x%08X, Found: 08%08X\n",
2132		    __FILE__, __LINE__, BNX_CRC32_RESIDUAL, csum);
2133		rc = ENODEV;
2134	}
2135
2136bnx_nvram_test_done:
2137	return (rc);
2138}
2139
2140/****************************************************************************/
2141/* Identifies the current media type of the controller and sets the PHY     */
2142/* address.                                                                 */
2143/*                                                                          */
2144/* Returns:                                                                 */
2145/*   Nothing.                                                               */
2146/****************************************************************************/
2147void
2148bnx_get_media(struct bnx_softc *sc)
2149{
2150	u_int32_t val;
2151
2152	sc->bnx_phy_addr = 1;
2153
2154	if (BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5709) {
2155		u_int32_t val = REG_RD(sc, BNX_MISC_DUAL_MEDIA_CTRL);
2156		u_int32_t bond_id = val & BNX_MISC_DUAL_MEDIA_CTRL_BOND_ID;
2157		u_int32_t strap;
2158
2159		/*
2160		 * The BCM5709S is software configurable
2161		 * for Copper or SerDes operation.
2162		 */
2163		if (bond_id == BNX_MISC_DUAL_MEDIA_CTRL_BOND_ID_C) {
2164			DBPRINT(sc, BNX_INFO_LOAD,
2165			    "5709 bonded for copper.\n");
2166			goto bnx_get_media_exit;
2167		} else if (bond_id == BNX_MISC_DUAL_MEDIA_CTRL_BOND_ID_S) {
2168			DBPRINT(sc, BNX_INFO_LOAD,
2169			    "5709 bonded for dual media.\n");
2170			sc->bnx_phy_flags |= BNX_PHY_SERDES_FLAG;
2171			goto bnx_get_media_exit;
2172		}
2173
2174		if (val & BNX_MISC_DUAL_MEDIA_CTRL_STRAP_OVERRIDE)
2175			strap = (val & BNX_MISC_DUAL_MEDIA_CTRL_PHY_CTRL) >> 21;
2176		else {
2177			strap = (val & BNX_MISC_DUAL_MEDIA_CTRL_PHY_CTRL_STRAP)
2178			    >> 8;
2179		}
2180
2181		if (sc->bnx_pa.pa_function == 0) {
2182			switch (strap) {
2183			case 0x4:
2184			case 0x5:
2185			case 0x6:
2186				DBPRINT(sc, BNX_INFO_LOAD,
2187					"BCM5709 s/w configured for SerDes.\n");
2188				sc->bnx_phy_flags |= BNX_PHY_SERDES_FLAG;
2189				break;
2190			default:
2191				DBPRINT(sc, BNX_INFO_LOAD,
2192					"BCM5709 s/w configured for Copper.\n");
2193			}
2194		} else {
2195			switch (strap) {
2196			case 0x1:
2197			case 0x2:
2198			case 0x4:
2199				DBPRINT(sc, BNX_INFO_LOAD,
2200					"BCM5709 s/w configured for SerDes.\n");
2201				sc->bnx_phy_flags |= BNX_PHY_SERDES_FLAG;
2202				break;
2203			default:
2204				DBPRINT(sc, BNX_INFO_LOAD,
2205					"BCM5709 s/w configured for Copper.\n");
2206			}
2207		}
2208
2209	} else if (BNX_CHIP_BOND_ID(sc) & BNX_CHIP_BOND_ID_SERDES_BIT)
2210		sc->bnx_phy_flags |= BNX_PHY_SERDES_FLAG;
2211
2212	if (sc->bnx_phy_flags & BNX_PHY_SERDES_FLAG) {
2213		sc->bnx_flags |= BNX_NO_WOL_FLAG;
2214
2215		if (BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5709)
2216			sc->bnx_phy_flags |= BNX_PHY_IEEE_CLAUSE_45_FLAG;
2217
2218		/*
2219		 * The BCM5708S, BCM5709S, and BCM5716S controllers use a
2220		 * separate PHY for SerDes.
2221		 */
2222		if (BNX_CHIP_NUM(sc) != BNX_CHIP_NUM_5706) {
2223			sc->bnx_phy_addr = 2;
2224			val = REG_RD_IND(sc, sc->bnx_shmem_base +
2225				 BNX_SHARED_HW_CFG_CONFIG);
2226			if (val & BNX_SHARED_HW_CFG_PHY_2_5G) {
2227				sc->bnx_phy_flags |= BNX_PHY_2_5G_CAPABLE_FLAG;
2228				DBPRINT(sc, BNX_INFO_LOAD,
2229				    "Found 2.5Gb capable adapter\n");
2230			}
2231		}
2232	} else if ((BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5706) ||
2233		   (BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5708))
2234		sc->bnx_phy_flags |= BNX_PHY_CRC_FIX_FLAG;
2235
2236bnx_get_media_exit:
2237	DBPRINT(sc, (BNX_INFO_LOAD | BNX_INFO_PHY),
2238		"Using PHY address %d.\n", sc->bnx_phy_addr);
2239}
2240
2241/****************************************************************************/
2242/* Performs PHY initialization required before MII drivers access the       */
2243/* device.                                                                  */
2244/*                                                                          */
2245/* Returns:                                                                 */
2246/*   Nothing.                                                               */
2247/****************************************************************************/
2248void
2249bnx_init_media(struct bnx_softc *sc)
2250{
2251	if (sc->bnx_phy_flags & BNX_PHY_IEEE_CLAUSE_45_FLAG) {
2252		/*
2253		 * Configure the BCM5709S / BCM5716S PHYs to use traditional
2254		 * IEEE Clause 22 method. Otherwise we have no way to attach
2255		 * the PHY to the mii(4) layer. PHY specific configuration
2256		 * is done by the mii(4) layer.
2257		 */
2258
2259		/* Select auto-negotiation MMD of the PHY. */
2260		bnx_miibus_write_reg(&sc->bnx_dev, sc->bnx_phy_addr,
2261		    BRGPHY_BLOCK_ADDR, BRGPHY_BLOCK_ADDR_ADDR_EXT);
2262
2263		bnx_miibus_write_reg(&sc->bnx_dev, sc->bnx_phy_addr,
2264		    BRGPHY_ADDR_EXT, BRGPHY_ADDR_EXT_AN_MMD);
2265
2266		bnx_miibus_write_reg(&sc->bnx_dev, sc->bnx_phy_addr,
2267		    BRGPHY_BLOCK_ADDR, BRGPHY_BLOCK_ADDR_COMBO_IEEE0);
2268	}
2269}
2270
2271/****************************************************************************/
2272/* Free any DMA memory owned by the driver.                                 */
2273/*                                                                          */
2274/* Scans through each data structre that requires DMA memory and frees      */
2275/* the memory if allocated.                                                 */
2276/*                                                                          */
2277/* Returns:                                                                 */
2278/*   Nothing.                                                               */
2279/****************************************************************************/
2280void
2281bnx_dma_free(struct bnx_softc *sc)
2282{
2283	int			i;
2284
2285	DBPRINT(sc,BNX_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
2286
2287	/* Destroy the status block. */
2288	if (sc->status_block != NULL && sc->status_map != NULL) {
2289		bus_dmamap_sync(sc->bnx_dmatag, sc->status_map, 0,
2290		    sc->status_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
2291		bus_dmamap_unload(sc->bnx_dmatag, sc->status_map);
2292		bus_dmamem_unmap(sc->bnx_dmatag, (caddr_t)sc->status_block,
2293		    BNX_STATUS_BLK_SZ);
2294		bus_dmamem_free(sc->bnx_dmatag, &sc->status_seg,
2295		    sc->status_rseg);
2296		bus_dmamap_destroy(sc->bnx_dmatag, sc->status_map);
2297		sc->status_block = NULL;
2298		sc->status_map = NULL;
2299	}
2300
2301	/* Destroy the statistics block. */
2302	if (sc->stats_block != NULL && sc->stats_map != NULL) {
2303		bus_dmamap_unload(sc->bnx_dmatag, sc->stats_map);
2304		bus_dmamem_unmap(sc->bnx_dmatag, (caddr_t)sc->stats_block,
2305		    BNX_STATS_BLK_SZ);
2306		bus_dmamem_free(sc->bnx_dmatag, &sc->stats_seg,
2307		    sc->stats_rseg);
2308		bus_dmamap_destroy(sc->bnx_dmatag, sc->stats_map);
2309		sc->stats_block = NULL;
2310		sc->stats_map = NULL;
2311	}
2312
2313	/* Free, unmap and destroy all context memory pages. */
2314	if (BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5709) {
2315		for (i = 0; i < sc->ctx_pages; i++) {
2316			if (sc->ctx_block[i] != NULL) {
2317				bus_dmamap_unload(sc->bnx_dmatag,
2318				    sc->ctx_map[i]);
2319				bus_dmamem_unmap(sc->bnx_dmatag,
2320				    (caddr_t)sc->ctx_block[i],
2321				    BCM_PAGE_SIZE);
2322				bus_dmamem_free(sc->bnx_dmatag,
2323				    &sc->ctx_segs[i], sc->ctx_rsegs[i]);
2324				bus_dmamap_destroy(sc->bnx_dmatag,
2325				    sc->ctx_map[i]);
2326				sc->ctx_block[i] = NULL;
2327			}
2328		}
2329	}
2330
2331	/* Free, unmap and destroy all TX buffer descriptor chain pages. */
2332	for (i = 0; i < TX_PAGES; i++ ) {
2333		if (sc->tx_bd_chain[i] != NULL &&
2334		    sc->tx_bd_chain_map[i] != NULL) {
2335			bus_dmamap_unload(sc->bnx_dmatag,
2336			    sc->tx_bd_chain_map[i]);
2337			bus_dmamem_unmap(sc->bnx_dmatag,
2338			    (caddr_t)sc->tx_bd_chain[i], BNX_TX_CHAIN_PAGE_SZ);
2339			bus_dmamem_free(sc->bnx_dmatag, &sc->tx_bd_chain_seg[i],
2340			    sc->tx_bd_chain_rseg[i]);
2341			bus_dmamap_destroy(sc->bnx_dmatag,
2342			    sc->tx_bd_chain_map[i]);
2343			sc->tx_bd_chain[i] = NULL;
2344			sc->tx_bd_chain_map[i] = NULL;
2345		}
2346	}
2347
2348	/* Destroy the TX dmamaps. */
2349	/* This isn't necessary since we dont allocate them up front */
2350
2351	/* Free, unmap and destroy all RX buffer descriptor chain pages. */
2352	for (i = 0; i < RX_PAGES; i++ ) {
2353		if (sc->rx_bd_chain[i] != NULL &&
2354		    sc->rx_bd_chain_map[i] != NULL) {
2355			bus_dmamap_unload(sc->bnx_dmatag,
2356			    sc->rx_bd_chain_map[i]);
2357			bus_dmamem_unmap(sc->bnx_dmatag,
2358			    (caddr_t)sc->rx_bd_chain[i], BNX_RX_CHAIN_PAGE_SZ);
2359			bus_dmamem_free(sc->bnx_dmatag, &sc->rx_bd_chain_seg[i],
2360			    sc->rx_bd_chain_rseg[i]);
2361
2362			bus_dmamap_destroy(sc->bnx_dmatag,
2363			    sc->rx_bd_chain_map[i]);
2364			sc->rx_bd_chain[i] = NULL;
2365			sc->rx_bd_chain_map[i] = NULL;
2366		}
2367	}
2368
2369	/* Unload and destroy the RX mbuf maps. */
2370	for (i = 0; i < TOTAL_RX_BD; i++) {
2371		if (sc->rx_mbuf_map[i] != NULL) {
2372			bus_dmamap_unload(sc->bnx_dmatag, sc->rx_mbuf_map[i]);
2373			bus_dmamap_destroy(sc->bnx_dmatag, sc->rx_mbuf_map[i]);
2374		}
2375	}
2376
2377	DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
2378}
2379
2380/****************************************************************************/
2381/* Allocate any DMA memory needed by the driver.                            */
2382/*                                                                          */
2383/* Allocates DMA memory needed for the various global structures needed by  */
2384/* hardware.                                                                */
2385/*                                                                          */
2386/* Returns:                                                                 */
2387/*   0 for success, positive value for failure.                             */
2388/****************************************************************************/
2389int
2390bnx_dma_alloc(struct bnx_softc *sc)
2391{
2392	int			i, rc = 0;
2393
2394	DBPRINT(sc, BNX_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
2395
2396	/*
2397	 * Allocate DMA memory for the status block, map the memory into DMA
2398	 * space, and fetch the physical address of the block.
2399	 */
2400	if (bus_dmamap_create(sc->bnx_dmatag, BNX_STATUS_BLK_SZ, 1,
2401	    BNX_STATUS_BLK_SZ, 0, BUS_DMA_NOWAIT, &sc->status_map)) {
2402		printf(": Could not create status block DMA map!\n");
2403		rc = ENOMEM;
2404		goto bnx_dma_alloc_exit;
2405	}
2406
2407	if (bus_dmamem_alloc(sc->bnx_dmatag, BNX_STATUS_BLK_SZ,
2408	    BNX_DMA_ALIGN, BNX_DMA_BOUNDARY, &sc->status_seg, 1,
2409	    &sc->status_rseg, BUS_DMA_NOWAIT | BUS_DMA_ZERO)) {
2410		printf(": Could not allocate status block DMA memory!\n");
2411		rc = ENOMEM;
2412		goto bnx_dma_alloc_exit;
2413	}
2414
2415	if (bus_dmamem_map(sc->bnx_dmatag, &sc->status_seg, sc->status_rseg,
2416	    BNX_STATUS_BLK_SZ, (caddr_t *)&sc->status_block, BUS_DMA_NOWAIT)) {
2417		printf(": Could not map status block DMA memory!\n");
2418		rc = ENOMEM;
2419		goto bnx_dma_alloc_exit;
2420	}
2421
2422	if (bus_dmamap_load(sc->bnx_dmatag, sc->status_map,
2423	    sc->status_block, BNX_STATUS_BLK_SZ, NULL, BUS_DMA_NOWAIT)) {
2424		printf(": Could not load status block DMA memory!\n");
2425		rc = ENOMEM;
2426		goto bnx_dma_alloc_exit;
2427	}
2428
2429	bus_dmamap_sync(sc->bnx_dmatag, sc->status_map, 0,
2430	    sc->status_map->dm_mapsize, BUS_DMASYNC_PREREAD);
2431
2432	sc->status_block_paddr = sc->status_map->dm_segs[0].ds_addr;
2433
2434	/* DRC - Fix for 64 bit addresses. */
2435	DBPRINT(sc, BNX_INFO, "status_block_paddr = 0x%08X\n",
2436		(u_int32_t) sc->status_block_paddr);
2437
2438	/* BCM5709 uses host memory as cache for context memory. */
2439	if (BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5709) {
2440		sc->ctx_pages = 0x2000 / BCM_PAGE_SIZE;
2441		if (sc->ctx_pages == 0)
2442			sc->ctx_pages = 1;
2443		if (sc->ctx_pages > 4) /* XXX */
2444			sc->ctx_pages = 4;
2445
2446		DBRUNIF((sc->ctx_pages > 512),
2447			BCE_PRINTF("%s(%d): Too many CTX pages! %d > 512\n",
2448				__FILE__, __LINE__, sc->ctx_pages));
2449
2450
2451		for (i = 0; i < sc->ctx_pages; i++) {
2452			if (bus_dmamap_create(sc->bnx_dmatag, BCM_PAGE_SIZE,
2453			    1, BCM_PAGE_SIZE, BNX_DMA_BOUNDARY,
2454			    BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
2455			    &sc->ctx_map[i]) != 0) {
2456				rc = ENOMEM;
2457				goto bnx_dma_alloc_exit;
2458			}
2459
2460			if (bus_dmamem_alloc(sc->bnx_dmatag, BCM_PAGE_SIZE,
2461			    BCM_PAGE_SIZE, BNX_DMA_BOUNDARY, &sc->ctx_segs[i],
2462			    1, &sc->ctx_rsegs[i], BUS_DMA_NOWAIT) != 0) {
2463				rc = ENOMEM;
2464				goto bnx_dma_alloc_exit;
2465			}
2466
2467			if (bus_dmamem_map(sc->bnx_dmatag, &sc->ctx_segs[i],
2468			    sc->ctx_rsegs[i], BCM_PAGE_SIZE,
2469			    (caddr_t *)&sc->ctx_block[i],
2470			    BUS_DMA_NOWAIT) != 0) {
2471				rc = ENOMEM;
2472				goto bnx_dma_alloc_exit;
2473			}
2474
2475			if (bus_dmamap_load(sc->bnx_dmatag, sc->ctx_map[i],
2476			    sc->ctx_block[i], BCM_PAGE_SIZE, NULL,
2477			    BUS_DMA_NOWAIT) != 0) {
2478				rc = ENOMEM;
2479				goto bnx_dma_alloc_exit;
2480			}
2481
2482			bzero(sc->ctx_block[i], BCM_PAGE_SIZE);
2483		}
2484	}
2485
2486	/*
2487	 * Allocate DMA memory for the statistics block, map the memory into
2488	 * DMA space, and fetch the physical address of the block.
2489	 */
2490	if (bus_dmamap_create(sc->bnx_dmatag, BNX_STATS_BLK_SZ, 1,
2491	    BNX_STATS_BLK_SZ, 0, BUS_DMA_NOWAIT, &sc->stats_map)) {
2492		printf(": Could not create stats block DMA map!\n");
2493		rc = ENOMEM;
2494		goto bnx_dma_alloc_exit;
2495	}
2496
2497	if (bus_dmamem_alloc(sc->bnx_dmatag, BNX_STATS_BLK_SZ,
2498	    BNX_DMA_ALIGN, BNX_DMA_BOUNDARY, &sc->stats_seg, 1,
2499	    &sc->stats_rseg, BUS_DMA_NOWAIT | BUS_DMA_ZERO)) {
2500		printf(": Could not allocate stats block DMA memory!\n");
2501		rc = ENOMEM;
2502		goto bnx_dma_alloc_exit;
2503	}
2504
2505	if (bus_dmamem_map(sc->bnx_dmatag, &sc->stats_seg, sc->stats_rseg,
2506	    BNX_STATS_BLK_SZ, (caddr_t *)&sc->stats_block, BUS_DMA_NOWAIT)) {
2507		printf(": Could not map stats block DMA memory!\n");
2508		rc = ENOMEM;
2509		goto bnx_dma_alloc_exit;
2510	}
2511
2512	if (bus_dmamap_load(sc->bnx_dmatag, sc->stats_map,
2513	    sc->stats_block, BNX_STATS_BLK_SZ, NULL, BUS_DMA_NOWAIT)) {
2514		printf(": Could not load status block DMA memory!\n");
2515		rc = ENOMEM;
2516		goto bnx_dma_alloc_exit;
2517	}
2518
2519	sc->stats_block_paddr = sc->stats_map->dm_segs[0].ds_addr;
2520
2521	/* DRC - Fix for 64 bit address. */
2522	DBPRINT(sc,BNX_INFO, "stats_block_paddr = 0x%08X\n",
2523	    (u_int32_t) sc->stats_block_paddr);
2524
2525	/*
2526	 * Allocate DMA memory for the TX buffer descriptor chain,
2527	 * and fetch the physical address of the block.
2528	 */
2529	for (i = 0; i < TX_PAGES; i++) {
2530		if (bus_dmamap_create(sc->bnx_dmatag, BNX_TX_CHAIN_PAGE_SZ, 1,
2531		    BNX_TX_CHAIN_PAGE_SZ, 0, BUS_DMA_NOWAIT,
2532		    &sc->tx_bd_chain_map[i])) {
2533			printf(": Could not create Tx desc %d DMA map!\n", i);
2534			rc = ENOMEM;
2535			goto bnx_dma_alloc_exit;
2536		}
2537
2538		if (bus_dmamem_alloc(sc->bnx_dmatag, BNX_TX_CHAIN_PAGE_SZ,
2539		    BCM_PAGE_SIZE, BNX_DMA_BOUNDARY, &sc->tx_bd_chain_seg[i], 1,
2540		    &sc->tx_bd_chain_rseg[i], BUS_DMA_NOWAIT)) {
2541			printf(": Could not allocate TX desc %d DMA memory!\n",
2542			    i);
2543			rc = ENOMEM;
2544			goto bnx_dma_alloc_exit;
2545		}
2546
2547		if (bus_dmamem_map(sc->bnx_dmatag, &sc->tx_bd_chain_seg[i],
2548		    sc->tx_bd_chain_rseg[i], BNX_TX_CHAIN_PAGE_SZ,
2549		    (caddr_t *)&sc->tx_bd_chain[i], BUS_DMA_NOWAIT)) {
2550			printf(": Could not map TX desc %d DMA memory!\n", i);
2551			rc = ENOMEM;
2552			goto bnx_dma_alloc_exit;
2553		}
2554
2555		if (bus_dmamap_load(sc->bnx_dmatag, sc->tx_bd_chain_map[i],
2556		    (caddr_t)sc->tx_bd_chain[i], BNX_TX_CHAIN_PAGE_SZ, NULL,
2557		    BUS_DMA_NOWAIT)) {
2558			printf(": Could not load TX desc %d DMA memory!\n", i);
2559			rc = ENOMEM;
2560			goto bnx_dma_alloc_exit;
2561		}
2562
2563		sc->tx_bd_chain_paddr[i] =
2564		    sc->tx_bd_chain_map[i]->dm_segs[0].ds_addr;
2565
2566		/* DRC - Fix for 64 bit systems. */
2567		DBPRINT(sc, BNX_INFO, "tx_bd_chain_paddr[%d] = 0x%08X\n",
2568		    i, (u_int32_t) sc->tx_bd_chain_paddr[i]);
2569	}
2570
2571	/*
2572	 * Create lists to hold TX mbufs.
2573	 */
2574	TAILQ_INIT(&sc->tx_free_pkts);
2575	TAILQ_INIT(&sc->tx_used_pkts);
2576	sc->tx_pkt_count = 0;
2577	mtx_init(&sc->tx_pkt_mtx, IPL_NET);
2578
2579	/*
2580	 * Allocate DMA memory for the Rx buffer descriptor chain,
2581	 * and fetch the physical address of the block.
2582	 */
2583	for (i = 0; i < RX_PAGES; i++) {
2584		if (bus_dmamap_create(sc->bnx_dmatag, BNX_RX_CHAIN_PAGE_SZ, 1,
2585		    BNX_RX_CHAIN_PAGE_SZ, 0, BUS_DMA_NOWAIT,
2586		    &sc->rx_bd_chain_map[i])) {
2587			printf(": Could not create Rx desc %d DMA map!\n", i);
2588			rc = ENOMEM;
2589			goto bnx_dma_alloc_exit;
2590		}
2591
2592		if (bus_dmamem_alloc(sc->bnx_dmatag, BNX_RX_CHAIN_PAGE_SZ,
2593		    BCM_PAGE_SIZE, BNX_DMA_BOUNDARY, &sc->rx_bd_chain_seg[i], 1,
2594		    &sc->rx_bd_chain_rseg[i], BUS_DMA_NOWAIT | BUS_DMA_ZERO)) {
2595			printf(": Could not allocate Rx desc %d DMA memory!\n",
2596			    i);
2597			rc = ENOMEM;
2598			goto bnx_dma_alloc_exit;
2599		}
2600
2601		if (bus_dmamem_map(sc->bnx_dmatag, &sc->rx_bd_chain_seg[i],
2602		    sc->rx_bd_chain_rseg[i], BNX_RX_CHAIN_PAGE_SZ,
2603		    (caddr_t *)&sc->rx_bd_chain[i], BUS_DMA_NOWAIT)) {
2604			printf(": Could not map Rx desc %d DMA memory!\n", i);
2605			rc = ENOMEM;
2606			goto bnx_dma_alloc_exit;
2607		}
2608
2609		if (bus_dmamap_load(sc->bnx_dmatag, sc->rx_bd_chain_map[i],
2610		    (caddr_t)sc->rx_bd_chain[i], BNX_RX_CHAIN_PAGE_SZ, NULL,
2611		    BUS_DMA_NOWAIT)) {
2612			printf(": Could not load Rx desc %d DMA memory!\n", i);
2613			rc = ENOMEM;
2614			goto bnx_dma_alloc_exit;
2615		}
2616
2617		sc->rx_bd_chain_paddr[i] =
2618		    sc->rx_bd_chain_map[i]->dm_segs[0].ds_addr;
2619
2620		/* DRC - Fix for 64 bit systems. */
2621		DBPRINT(sc, BNX_INFO, "rx_bd_chain_paddr[%d] = 0x%08X\n",
2622		    i, (u_int32_t) sc->rx_bd_chain_paddr[i]);
2623	}
2624
2625	/*
2626	 * Create DMA maps for the Rx buffer mbufs.
2627	 */
2628	for (i = 0; i < TOTAL_RX_BD; i++) {
2629		if (bus_dmamap_create(sc->bnx_dmatag, BNX_MAX_MRU,
2630		    BNX_MAX_SEGMENTS, BNX_MAX_MRU, 0, BUS_DMA_NOWAIT,
2631		    &sc->rx_mbuf_map[i])) {
2632			printf(": Could not create Rx mbuf %d DMA map!\n", i);
2633			rc = ENOMEM;
2634			goto bnx_dma_alloc_exit;
2635		}
2636	}
2637
2638 bnx_dma_alloc_exit:
2639	DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
2640
2641	return(rc);
2642}
2643
2644/****************************************************************************/
2645/* Release all resources used by the driver.                                */
2646/*                                                                          */
2647/* Releases all resources acquired by the driver including interrupts,      */
2648/* interrupt handler, interfaces, mutexes, and DMA memory.                  */
2649/*                                                                          */
2650/* Returns:                                                                 */
2651/*   Nothing.                                                               */
2652/****************************************************************************/
2653void
2654bnx_release_resources(struct bnx_softc *sc)
2655{
2656	struct pci_attach_args	*pa = &(sc->bnx_pa);
2657
2658	DBPRINT(sc, BNX_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
2659
2660	bnx_dma_free(sc);
2661
2662	if (sc->bnx_intrhand != NULL)
2663		pci_intr_disestablish(pa->pa_pc, sc->bnx_intrhand);
2664
2665	if (sc->bnx_size)
2666		bus_space_unmap(sc->bnx_btag, sc->bnx_bhandle, sc->bnx_size);
2667
2668	DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
2669}
2670
2671/****************************************************************************/
2672/* Firmware synchronization.                                                */
2673/*                                                                          */
2674/* Before performing certain events such as a chip reset, synchronize with  */
2675/* the firmware first.                                                      */
2676/*                                                                          */
2677/* Returns:                                                                 */
2678/*   0 for success, positive value for failure.                             */
2679/****************************************************************************/
2680int
2681bnx_fw_sync(struct bnx_softc *sc, u_int32_t msg_data)
2682{
2683	int			i, rc = 0;
2684	u_int32_t		val;
2685
2686	/* Don't waste any time if we've timed out before. */
2687	if (sc->bnx_fw_timed_out) {
2688		rc = EBUSY;
2689		goto bnx_fw_sync_exit;
2690	}
2691
2692	/* Increment the message sequence number. */
2693	sc->bnx_fw_wr_seq++;
2694	msg_data |= sc->bnx_fw_wr_seq;
2695
2696 	DBPRINT(sc, BNX_VERBOSE, "bnx_fw_sync(): msg_data = 0x%08X\n",
2697	    msg_data);
2698
2699	/* Send the message to the bootcode driver mailbox. */
2700	REG_WR_IND(sc, sc->bnx_shmem_base + BNX_DRV_MB, msg_data);
2701
2702	/* Wait for the bootcode to acknowledge the message. */
2703	for (i = 0; i < FW_ACK_TIME_OUT_MS; i++) {
2704		/* Check for a response in the bootcode firmware mailbox. */
2705		val = REG_RD_IND(sc, sc->bnx_shmem_base + BNX_FW_MB);
2706		if ((val & BNX_FW_MSG_ACK) == (msg_data & BNX_DRV_MSG_SEQ))
2707			break;
2708		DELAY(1000);
2709	}
2710
2711	/* If we've timed out, tell the bootcode that we've stopped waiting. */
2712	if (((val & BNX_FW_MSG_ACK) != (msg_data & BNX_DRV_MSG_SEQ)) &&
2713		((msg_data & BNX_DRV_MSG_DATA) != BNX_DRV_MSG_DATA_WAIT0)) {
2714		BNX_PRINTF(sc, "%s(%d): Firmware synchronization timeout! "
2715		    "msg_data = 0x%08X\n", __FILE__, __LINE__, msg_data);
2716
2717		msg_data &= ~BNX_DRV_MSG_CODE;
2718		msg_data |= BNX_DRV_MSG_CODE_FW_TIMEOUT;
2719
2720		REG_WR_IND(sc, sc->bnx_shmem_base + BNX_DRV_MB, msg_data);
2721
2722		sc->bnx_fw_timed_out = 1;
2723		rc = EBUSY;
2724	}
2725
2726bnx_fw_sync_exit:
2727	return (rc);
2728}
2729
2730/****************************************************************************/
2731/* Load Receive Virtual 2 Physical (RV2P) processor firmware.               */
2732/*                                                                          */
2733/* Returns:                                                                 */
2734/*   Nothing.                                                               */
2735/****************************************************************************/
2736void
2737bnx_load_rv2p_fw(struct bnx_softc *sc, u_int32_t *rv2p_code,
2738    u_int32_t rv2p_code_len, u_int32_t rv2p_proc)
2739{
2740	int			i;
2741	u_int32_t		val;
2742
2743	/* Set the page size used by RV2P. */
2744	if (rv2p_proc == RV2P_PROC2) {
2745		BNX_RV2P_PROC2_CHG_MAX_BD_PAGE(rv2p_code,
2746		    USABLE_RX_BD_PER_PAGE);
2747	}
2748
2749	for (i = 0; i < rv2p_code_len; i += 8) {
2750		REG_WR(sc, BNX_RV2P_INSTR_HIGH, *rv2p_code);
2751		rv2p_code++;
2752		REG_WR(sc, BNX_RV2P_INSTR_LOW, *rv2p_code);
2753		rv2p_code++;
2754
2755		if (rv2p_proc == RV2P_PROC1) {
2756			val = (i / 8) | BNX_RV2P_PROC1_ADDR_CMD_RDWR;
2757			REG_WR(sc, BNX_RV2P_PROC1_ADDR_CMD, val);
2758		} else {
2759			val = (i / 8) | BNX_RV2P_PROC2_ADDR_CMD_RDWR;
2760			REG_WR(sc, BNX_RV2P_PROC2_ADDR_CMD, val);
2761		}
2762	}
2763
2764	/* Reset the processor, un-stall is done later. */
2765	if (rv2p_proc == RV2P_PROC1)
2766		REG_WR(sc, BNX_RV2P_COMMAND, BNX_RV2P_COMMAND_PROC1_RESET);
2767	else
2768		REG_WR(sc, BNX_RV2P_COMMAND, BNX_RV2P_COMMAND_PROC2_RESET);
2769}
2770
2771/****************************************************************************/
2772/* Load RISC processor firmware.                                            */
2773/*                                                                          */
2774/* Loads firmware from the file if_bnxfw.h into the scratchpad memory       */
2775/* associated with a particular processor.                                  */
2776/*                                                                          */
2777/* Returns:                                                                 */
2778/*   Nothing.                                                               */
2779/****************************************************************************/
2780void
2781bnx_load_cpu_fw(struct bnx_softc *sc, struct cpu_reg *cpu_reg,
2782    struct fw_info *fw)
2783{
2784	u_int32_t		offset;
2785	u_int32_t		val;
2786
2787	/* Halt the CPU. */
2788	val = REG_RD_IND(sc, cpu_reg->mode);
2789	val |= cpu_reg->mode_value_halt;
2790	REG_WR_IND(sc, cpu_reg->mode, val);
2791	REG_WR_IND(sc, cpu_reg->state, cpu_reg->state_value_clear);
2792
2793	/* Load the Text area. */
2794	offset = cpu_reg->spad_base + (fw->text_addr - cpu_reg->mips_view_base);
2795	if (fw->text) {
2796		int j;
2797
2798		for (j = 0; j < (fw->text_len / 4); j++, offset += 4)
2799			REG_WR_IND(sc, offset, fw->text[j]);
2800	}
2801
2802	/* Load the Data area. */
2803	offset = cpu_reg->spad_base + (fw->data_addr - cpu_reg->mips_view_base);
2804	if (fw->data) {
2805		int j;
2806
2807		for (j = 0; j < (fw->data_len / 4); j++, offset += 4)
2808			REG_WR_IND(sc, offset, fw->data[j]);
2809	}
2810
2811	/* Load the SBSS area. */
2812	offset = cpu_reg->spad_base + (fw->sbss_addr - cpu_reg->mips_view_base);
2813	if (fw->sbss) {
2814		int j;
2815
2816		for (j = 0; j < (fw->sbss_len / 4); j++, offset += 4)
2817			REG_WR_IND(sc, offset, fw->sbss[j]);
2818	}
2819
2820	/* Load the BSS area. */
2821	offset = cpu_reg->spad_base + (fw->bss_addr - cpu_reg->mips_view_base);
2822	if (fw->bss) {
2823		int j;
2824
2825		for (j = 0; j < (fw->bss_len/4); j++, offset += 4)
2826			REG_WR_IND(sc, offset, fw->bss[j]);
2827	}
2828
2829	/* Load the Read-Only area. */
2830	offset = cpu_reg->spad_base +
2831	    (fw->rodata_addr - cpu_reg->mips_view_base);
2832	if (fw->rodata) {
2833		int j;
2834
2835		for (j = 0; j < (fw->rodata_len / 4); j++, offset += 4)
2836			REG_WR_IND(sc, offset, fw->rodata[j]);
2837	}
2838
2839	/* Clear the pre-fetch instruction. */
2840	REG_WR_IND(sc, cpu_reg->inst, 0);
2841	REG_WR_IND(sc, cpu_reg->pc, fw->start_addr);
2842
2843	/* Start the CPU. */
2844	val = REG_RD_IND(sc, cpu_reg->mode);
2845	val &= ~cpu_reg->mode_value_halt;
2846	REG_WR_IND(sc, cpu_reg->state, cpu_reg->state_value_clear);
2847	REG_WR_IND(sc, cpu_reg->mode, val);
2848}
2849
2850/****************************************************************************/
2851/* Initialize the RV2P, RX, TX, TPAT, and COM CPUs.                         */
2852/*                                                                          */
2853/* Loads the firmware for each CPU and starts the CPU.                      */
2854/*                                                                          */
2855/* Returns:                                                                 */
2856/*   Nothing.                                                               */
2857/****************************************************************************/
2858void
2859bnx_init_cpus(struct bnx_softc *sc)
2860{
2861	struct bnx_firmware *bfw = &bnx_firmwares[BNX_FW_B06];
2862	struct bnx_rv2p *rv2p = &bnx_rv2ps[BNX_RV2P];
2863	struct cpu_reg cpu_reg;
2864	struct fw_info fw;
2865
2866	if (BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5709) {
2867		bfw = &bnx_firmwares[BNX_FW_B09];
2868		if ((BNX_CHIP_REV(sc) == BNX_CHIP_REV_Ax))
2869			rv2p = &bnx_rv2ps[BNX_XI90_RV2P];
2870		else
2871			rv2p = &bnx_rv2ps[BNX_XI_RV2P];
2872	}
2873
2874	/* Initialize the RV2P processor. */
2875	bnx_load_rv2p_fw(sc, rv2p->bnx_rv2p_proc1,
2876	    rv2p->fw->bnx_rv2p_proc1len, RV2P_PROC1);
2877	bnx_load_rv2p_fw(sc, rv2p->bnx_rv2p_proc2,
2878	    rv2p->fw->bnx_rv2p_proc2len, RV2P_PROC2);
2879
2880	/* Initialize the RX Processor. */
2881	cpu_reg.mode = BNX_RXP_CPU_MODE;
2882	cpu_reg.mode_value_halt = BNX_RXP_CPU_MODE_SOFT_HALT;
2883	cpu_reg.mode_value_sstep = BNX_RXP_CPU_MODE_STEP_ENA;
2884	cpu_reg.state = BNX_RXP_CPU_STATE;
2885	cpu_reg.state_value_clear = 0xffffff;
2886	cpu_reg.gpr0 = BNX_RXP_CPU_REG_FILE;
2887	cpu_reg.evmask = BNX_RXP_CPU_EVENT_MASK;
2888	cpu_reg.pc = BNX_RXP_CPU_PROGRAM_COUNTER;
2889	cpu_reg.inst = BNX_RXP_CPU_INSTRUCTION;
2890	cpu_reg.bp = BNX_RXP_CPU_HW_BREAKPOINT;
2891	cpu_reg.spad_base = BNX_RXP_SCRATCH;
2892	cpu_reg.mips_view_base = 0x8000000;
2893
2894	fw.ver_major = bfw->fw->bnx_RXP_FwReleaseMajor;
2895	fw.ver_minor = bfw->fw->bnx_RXP_FwReleaseMinor;
2896	fw.ver_fix = bfw->fw->bnx_RXP_FwReleaseFix;
2897	fw.start_addr = bfw->fw->bnx_RXP_FwStartAddr;
2898
2899	fw.text_addr = bfw->fw->bnx_RXP_FwTextAddr;
2900	fw.text_len = bfw->fw->bnx_RXP_FwTextLen;
2901	fw.text_index = 0;
2902	fw.text = bfw->bnx_RXP_FwText;
2903
2904	fw.data_addr = bfw->fw->bnx_RXP_FwDataAddr;
2905	fw.data_len = bfw->fw->bnx_RXP_FwDataLen;
2906	fw.data_index = 0;
2907	fw.data = bfw->bnx_RXP_FwData;
2908
2909	fw.sbss_addr = bfw->fw->bnx_RXP_FwSbssAddr;
2910	fw.sbss_len = bfw->fw->bnx_RXP_FwSbssLen;
2911	fw.sbss_index = 0;
2912	fw.sbss = bfw->bnx_RXP_FwSbss;
2913
2914	fw.bss_addr = bfw->fw->bnx_RXP_FwBssAddr;
2915	fw.bss_len = bfw->fw->bnx_RXP_FwBssLen;
2916	fw.bss_index = 0;
2917	fw.bss = bfw->bnx_RXP_FwBss;
2918
2919	fw.rodata_addr = bfw->fw->bnx_RXP_FwRodataAddr;
2920	fw.rodata_len = bfw->fw->bnx_RXP_FwRodataLen;
2921	fw.rodata_index = 0;
2922	fw.rodata = bfw->bnx_RXP_FwRodata;
2923
2924	DBPRINT(sc, BNX_INFO_RESET, "Loading RX firmware.\n");
2925	bnx_load_cpu_fw(sc, &cpu_reg, &fw);
2926
2927	/* Initialize the TX Processor. */
2928	cpu_reg.mode = BNX_TXP_CPU_MODE;
2929	cpu_reg.mode_value_halt = BNX_TXP_CPU_MODE_SOFT_HALT;
2930	cpu_reg.mode_value_sstep = BNX_TXP_CPU_MODE_STEP_ENA;
2931	cpu_reg.state = BNX_TXP_CPU_STATE;
2932	cpu_reg.state_value_clear = 0xffffff;
2933	cpu_reg.gpr0 = BNX_TXP_CPU_REG_FILE;
2934	cpu_reg.evmask = BNX_TXP_CPU_EVENT_MASK;
2935	cpu_reg.pc = BNX_TXP_CPU_PROGRAM_COUNTER;
2936	cpu_reg.inst = BNX_TXP_CPU_INSTRUCTION;
2937	cpu_reg.bp = BNX_TXP_CPU_HW_BREAKPOINT;
2938	cpu_reg.spad_base = BNX_TXP_SCRATCH;
2939	cpu_reg.mips_view_base = 0x8000000;
2940
2941	fw.ver_major = bfw->fw->bnx_TXP_FwReleaseMajor;
2942	fw.ver_minor = bfw->fw->bnx_TXP_FwReleaseMinor;
2943	fw.ver_fix = bfw->fw->bnx_TXP_FwReleaseFix;
2944	fw.start_addr = bfw->fw->bnx_TXP_FwStartAddr;
2945
2946	fw.text_addr = bfw->fw->bnx_TXP_FwTextAddr;
2947	fw.text_len = bfw->fw->bnx_TXP_FwTextLen;
2948	fw.text_index = 0;
2949	fw.text = bfw->bnx_TXP_FwText;
2950
2951	fw.data_addr = bfw->fw->bnx_TXP_FwDataAddr;
2952	fw.data_len = bfw->fw->bnx_TXP_FwDataLen;
2953	fw.data_index = 0;
2954	fw.data = bfw->bnx_TXP_FwData;
2955
2956	fw.sbss_addr = bfw->fw->bnx_TXP_FwSbssAddr;
2957	fw.sbss_len = bfw->fw->bnx_TXP_FwSbssLen;
2958	fw.sbss_index = 0;
2959	fw.sbss = bfw->bnx_TXP_FwSbss;
2960
2961	fw.bss_addr = bfw->fw->bnx_TXP_FwBssAddr;
2962	fw.bss_len = bfw->fw->bnx_TXP_FwBssLen;
2963	fw.bss_index = 0;
2964	fw.bss = bfw->bnx_TXP_FwBss;
2965
2966	fw.rodata_addr = bfw->fw->bnx_TXP_FwRodataAddr;
2967	fw.rodata_len = bfw->fw->bnx_TXP_FwRodataLen;
2968	fw.rodata_index = 0;
2969	fw.rodata = bfw->bnx_TXP_FwRodata;
2970
2971	DBPRINT(sc, BNX_INFO_RESET, "Loading TX firmware.\n");
2972	bnx_load_cpu_fw(sc, &cpu_reg, &fw);
2973
2974	/* Initialize the TX Patch-up Processor. */
2975	cpu_reg.mode = BNX_TPAT_CPU_MODE;
2976	cpu_reg.mode_value_halt = BNX_TPAT_CPU_MODE_SOFT_HALT;
2977	cpu_reg.mode_value_sstep = BNX_TPAT_CPU_MODE_STEP_ENA;
2978	cpu_reg.state = BNX_TPAT_CPU_STATE;
2979	cpu_reg.state_value_clear = 0xffffff;
2980	cpu_reg.gpr0 = BNX_TPAT_CPU_REG_FILE;
2981	cpu_reg.evmask = BNX_TPAT_CPU_EVENT_MASK;
2982	cpu_reg.pc = BNX_TPAT_CPU_PROGRAM_COUNTER;
2983	cpu_reg.inst = BNX_TPAT_CPU_INSTRUCTION;
2984	cpu_reg.bp = BNX_TPAT_CPU_HW_BREAKPOINT;
2985	cpu_reg.spad_base = BNX_TPAT_SCRATCH;
2986	cpu_reg.mips_view_base = 0x8000000;
2987
2988	fw.ver_major = bfw->fw->bnx_TPAT_FwReleaseMajor;
2989	fw.ver_minor = bfw->fw->bnx_TPAT_FwReleaseMinor;
2990	fw.ver_fix = bfw->fw->bnx_TPAT_FwReleaseFix;
2991	fw.start_addr = bfw->fw->bnx_TPAT_FwStartAddr;
2992
2993	fw.text_addr = bfw->fw->bnx_TPAT_FwTextAddr;
2994	fw.text_len = bfw->fw->bnx_TPAT_FwTextLen;
2995	fw.text_index = 0;
2996	fw.text = bfw->bnx_TPAT_FwText;
2997
2998	fw.data_addr = bfw->fw->bnx_TPAT_FwDataAddr;
2999	fw.data_len = bfw->fw->bnx_TPAT_FwDataLen;
3000	fw.data_index = 0;
3001	fw.data = bfw->bnx_TPAT_FwData;
3002
3003	fw.sbss_addr = bfw->fw->bnx_TPAT_FwSbssAddr;
3004	fw.sbss_len = bfw->fw->bnx_TPAT_FwSbssLen;
3005	fw.sbss_index = 0;
3006	fw.sbss = bfw->bnx_TPAT_FwSbss;
3007
3008	fw.bss_addr = bfw->fw->bnx_TPAT_FwBssAddr;
3009	fw.bss_len = bfw->fw->bnx_TPAT_FwBssLen;
3010	fw.bss_index = 0;
3011	fw.bss = bfw->bnx_TPAT_FwBss;
3012
3013	fw.rodata_addr = bfw->fw->bnx_TPAT_FwRodataAddr;
3014	fw.rodata_len = bfw->fw->bnx_TPAT_FwRodataLen;
3015	fw.rodata_index = 0;
3016	fw.rodata = bfw->bnx_TPAT_FwRodata;
3017
3018	DBPRINT(sc, BNX_INFO_RESET, "Loading TPAT firmware.\n");
3019	bnx_load_cpu_fw(sc, &cpu_reg, &fw);
3020
3021	/* Initialize the Completion Processor. */
3022	cpu_reg.mode = BNX_COM_CPU_MODE;
3023	cpu_reg.mode_value_halt = BNX_COM_CPU_MODE_SOFT_HALT;
3024	cpu_reg.mode_value_sstep = BNX_COM_CPU_MODE_STEP_ENA;
3025	cpu_reg.state = BNX_COM_CPU_STATE;
3026	cpu_reg.state_value_clear = 0xffffff;
3027	cpu_reg.gpr0 = BNX_COM_CPU_REG_FILE;
3028	cpu_reg.evmask = BNX_COM_CPU_EVENT_MASK;
3029	cpu_reg.pc = BNX_COM_CPU_PROGRAM_COUNTER;
3030	cpu_reg.inst = BNX_COM_CPU_INSTRUCTION;
3031	cpu_reg.bp = BNX_COM_CPU_HW_BREAKPOINT;
3032	cpu_reg.spad_base = BNX_COM_SCRATCH;
3033	cpu_reg.mips_view_base = 0x8000000;
3034
3035	fw.ver_major = bfw->fw->bnx_COM_FwReleaseMajor;
3036	fw.ver_minor = bfw->fw->bnx_COM_FwReleaseMinor;
3037	fw.ver_fix = bfw->fw->bnx_COM_FwReleaseFix;
3038	fw.start_addr = bfw->fw->bnx_COM_FwStartAddr;
3039
3040	fw.text_addr = bfw->fw->bnx_COM_FwTextAddr;
3041	fw.text_len = bfw->fw->bnx_COM_FwTextLen;
3042	fw.text_index = 0;
3043	fw.text = bfw->bnx_COM_FwText;
3044
3045	fw.data_addr = bfw->fw->bnx_COM_FwDataAddr;
3046	fw.data_len = bfw->fw->bnx_COM_FwDataLen;
3047	fw.data_index = 0;
3048	fw.data = bfw->bnx_COM_FwData;
3049
3050	fw.sbss_addr = bfw->fw->bnx_COM_FwSbssAddr;
3051	fw.sbss_len = bfw->fw->bnx_COM_FwSbssLen;
3052	fw.sbss_index = 0;
3053	fw.sbss = bfw->bnx_COM_FwSbss;
3054
3055	fw.bss_addr = bfw->fw->bnx_COM_FwBssAddr;
3056	fw.bss_len = bfw->fw->bnx_COM_FwBssLen;
3057	fw.bss_index = 0;
3058	fw.bss = bfw->bnx_COM_FwBss;
3059
3060	fw.rodata_addr = bfw->fw->bnx_COM_FwRodataAddr;
3061	fw.rodata_len = bfw->fw->bnx_COM_FwRodataLen;
3062	fw.rodata_index = 0;
3063	fw.rodata = bfw->bnx_COM_FwRodata;
3064
3065	DBPRINT(sc, BNX_INFO_RESET, "Loading COM firmware.\n");
3066	bnx_load_cpu_fw(sc, &cpu_reg, &fw);
3067}
3068
3069/****************************************************************************/
3070/* Initialize context memory.                                               */
3071/*                                                                          */
3072/* Clears the memory associated with each Context ID (CID).                 */
3073/*                                                                          */
3074/* Returns:                                                                 */
3075/*   Nothing.                                                               */
3076/****************************************************************************/
3077void
3078bnx_init_context(struct bnx_softc *sc)
3079{
3080	if (BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5709) {
3081		/* DRC: Replace this constant value with a #define. */
3082		int i, retry_cnt = 10;
3083		u_int32_t val;
3084
3085		/*
3086		 * BCM5709 context memory may be cached
3087		 * in host memory so prepare the host memory
3088		 * for access.
3089		 */
3090		val = BNX_CTX_COMMAND_ENABLED | BNX_CTX_COMMAND_MEM_INIT
3091		    | (1 << 12);
3092		val |= (BCM_PAGE_BITS - 8) << 16;
3093		REG_WR(sc, BNX_CTX_COMMAND, val);
3094
3095		/* Wait for mem init command to complete. */
3096		for (i = 0; i < retry_cnt; i++) {
3097			val = REG_RD(sc, BNX_CTX_COMMAND);
3098			if (!(val & BNX_CTX_COMMAND_MEM_INIT))
3099				break;
3100			DELAY(2);
3101		}
3102
3103		/* ToDo: Consider returning an error here. */
3104
3105		for (i = 0; i < sc->ctx_pages; i++) {
3106			int j;
3107
3108			/* Set the physaddr of the context memory cache. */
3109			val = (u_int32_t)(sc->ctx_segs[i].ds_addr);
3110			REG_WR(sc, BNX_CTX_HOST_PAGE_TBL_DATA0, val |
3111				BNX_CTX_HOST_PAGE_TBL_DATA0_VALID);
3112			val = (u_int32_t)
3113			    ((u_int64_t)sc->ctx_segs[i].ds_addr >> 32);
3114			REG_WR(sc, BNX_CTX_HOST_PAGE_TBL_DATA1, val);
3115			REG_WR(sc, BNX_CTX_HOST_PAGE_TBL_CTRL, i |
3116				BNX_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
3117
3118			/* Verify that the context memory write was successful. */
3119			for (j = 0; j < retry_cnt; j++) {
3120				val = REG_RD(sc, BNX_CTX_HOST_PAGE_TBL_CTRL);
3121				if ((val & BNX_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) == 0)
3122					break;
3123				DELAY(5);
3124			}
3125
3126			/* ToDo: Consider returning an error here. */
3127		}
3128	} else {
3129		u_int32_t vcid_addr, offset;
3130
3131		/*
3132		 * For the 5706/5708, context memory is local to
3133		 * the controller, so initialize the controller
3134		 * context memory.
3135		 */
3136
3137		vcid_addr = GET_CID_ADDR(96);
3138		while (vcid_addr) {
3139
3140			vcid_addr -= PHY_CTX_SIZE;
3141
3142			REG_WR(sc, BNX_CTX_VIRT_ADDR, 0);
3143			REG_WR(sc, BNX_CTX_PAGE_TBL, vcid_addr);
3144
3145			for(offset = 0; offset < PHY_CTX_SIZE; offset += 4) {
3146				CTX_WR(sc, 0x00, offset, 0);
3147			}
3148
3149			REG_WR(sc, BNX_CTX_VIRT_ADDR, vcid_addr);
3150			REG_WR(sc, BNX_CTX_PAGE_TBL, vcid_addr);
3151		}
3152 	}
3153}
3154
3155/****************************************************************************/
3156/* Fetch the permanent MAC address of the controller.                       */
3157/*                                                                          */
3158/* Returns:                                                                 */
3159/*   Nothing.                                                               */
3160/****************************************************************************/
3161void
3162bnx_get_mac_addr(struct bnx_softc *sc)
3163{
3164	u_int32_t		mac_lo = 0, mac_hi = 0;
3165
3166	/*
3167	 * The NetXtreme II bootcode populates various NIC
3168	 * power-on and runtime configuration items in a
3169	 * shared memory area.  The factory configured MAC
3170	 * address is available from both NVRAM and the
3171	 * shared memory area so we'll read the value from
3172	 * shared memory for speed.
3173	 */
3174
3175	mac_hi = REG_RD_IND(sc, sc->bnx_shmem_base + BNX_PORT_HW_CFG_MAC_UPPER);
3176	mac_lo = REG_RD_IND(sc, sc->bnx_shmem_base + BNX_PORT_HW_CFG_MAC_LOWER);
3177
3178	if ((mac_lo == 0) && (mac_hi == 0)) {
3179		BNX_PRINTF(sc, "%s(%d): Invalid Ethernet address!\n",
3180		    __FILE__, __LINE__);
3181	} else {
3182		sc->eaddr[0] = (u_char)(mac_hi >> 8);
3183		sc->eaddr[1] = (u_char)(mac_hi >> 0);
3184		sc->eaddr[2] = (u_char)(mac_lo >> 24);
3185		sc->eaddr[3] = (u_char)(mac_lo >> 16);
3186		sc->eaddr[4] = (u_char)(mac_lo >> 8);
3187		sc->eaddr[5] = (u_char)(mac_lo >> 0);
3188	}
3189
3190	DBPRINT(sc, BNX_INFO, "Permanent Ethernet address = "
3191	    "%6D\n", sc->eaddr, ":");
3192}
3193
3194/****************************************************************************/
3195/* Program the MAC address.                                                 */
3196/*                                                                          */
3197/* Returns:                                                                 */
3198/*   Nothing.                                                               */
3199/****************************************************************************/
3200void
3201bnx_set_mac_addr(struct bnx_softc *sc)
3202{
3203	u_int32_t		val;
3204	u_int8_t		*mac_addr = sc->eaddr;
3205
3206	DBPRINT(sc, BNX_INFO, "Setting Ethernet address = "
3207	    "%6D\n", sc->eaddr, ":");
3208
3209	val = (mac_addr[0] << 8) | mac_addr[1];
3210
3211	REG_WR(sc, BNX_EMAC_MAC_MATCH0, val);
3212
3213	val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
3214		(mac_addr[4] << 8) | mac_addr[5];
3215
3216	REG_WR(sc, BNX_EMAC_MAC_MATCH1, val);
3217}
3218
3219/****************************************************************************/
3220/* Stop the controller.                                                     */
3221/*                                                                          */
3222/* Returns:                                                                 */
3223/*   Nothing.                                                               */
3224/****************************************************************************/
3225void
3226bnx_stop(struct bnx_softc *sc)
3227{
3228	struct ifnet		*ifp = &sc->arpcom.ac_if;
3229	struct ifmedia_entry	*ifm;
3230	struct mii_data		*mii;
3231	int			mtmp, itmp;
3232
3233	DBPRINT(sc, BNX_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
3234
3235	timeout_del(&sc->bnx_timeout);
3236
3237	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
3238
3239	/* Disable the transmit/receive blocks. */
3240	REG_WR(sc, BNX_MISC_ENABLE_CLR_BITS, 0x5ffffff);
3241	REG_RD(sc, BNX_MISC_ENABLE_CLR_BITS);
3242	DELAY(20);
3243
3244	bnx_disable_intr(sc);
3245
3246	/* Tell firmware that the driver is going away. */
3247	bnx_reset(sc, BNX_DRV_MSG_CODE_SUSPEND_NO_WOL);
3248
3249	/* Free RX buffers. */
3250	bnx_free_rx_chain(sc);
3251
3252	/* Free TX buffers. */
3253	bnx_free_tx_chain(sc);
3254
3255	/*
3256	 * Isolate/power down the PHY, but leave the media selection
3257	 * unchanged so that things will be put back to normal when
3258	 * we bring the interface back up.
3259	 */
3260	mii = &sc->bnx_mii;
3261	itmp = ifp->if_flags;
3262	ifp->if_flags |= IFF_UP;
3263	ifm = mii->mii_media.ifm_cur;
3264	mtmp = ifm->ifm_media;
3265	ifm->ifm_media = IFM_ETHER|IFM_NONE;
3266	mii_mediachg(mii);
3267	ifm->ifm_media = mtmp;
3268	ifp->if_flags = itmp;
3269
3270	ifp->if_timer = 0;
3271
3272	sc->bnx_link = 0;
3273
3274	DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
3275
3276	bnx_mgmt_init(sc);
3277}
3278
3279int
3280bnx_reset(struct bnx_softc *sc, u_int32_t reset_code)
3281{
3282	struct pci_attach_args	*pa = &(sc->bnx_pa);
3283	u_int32_t		val;
3284	int			i, rc = 0;
3285
3286	DBPRINT(sc, BNX_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
3287
3288	/* Wait for pending PCI transactions to complete. */
3289	REG_WR(sc, BNX_MISC_ENABLE_CLR_BITS,
3290	    BNX_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
3291	    BNX_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
3292	    BNX_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
3293	    BNX_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
3294	val = REG_RD(sc, BNX_MISC_ENABLE_CLR_BITS);
3295	DELAY(5);
3296
3297	/* Disable DMA */
3298	if (BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5709) {
3299		val = REG_RD(sc, BNX_MISC_NEW_CORE_CTL);
3300		val &= ~BNX_MISC_NEW_CORE_CTL_DMA_ENABLE;
3301		REG_WR(sc, BNX_MISC_NEW_CORE_CTL, val);
3302	}
3303
3304	/* Assume bootcode is running. */
3305	sc->bnx_fw_timed_out = 0;
3306
3307	/* Give the firmware a chance to prepare for the reset. */
3308	rc = bnx_fw_sync(sc, BNX_DRV_MSG_DATA_WAIT0 | reset_code);
3309	if (rc)
3310		goto bnx_reset_exit;
3311
3312	/* Set a firmware reminder that this is a soft reset. */
3313	REG_WR_IND(sc, sc->bnx_shmem_base + BNX_DRV_RESET_SIGNATURE,
3314	    BNX_DRV_RESET_SIGNATURE_MAGIC);
3315
3316	/* Dummy read to force the chip to complete all current transactions. */
3317	val = REG_RD(sc, BNX_MISC_ID);
3318
3319	/* Chip reset. */
3320	if (BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5709) {
3321		REG_WR(sc, BNX_MISC_COMMAND, BNX_MISC_COMMAND_SW_RESET);
3322		REG_RD(sc, BNX_MISC_COMMAND);
3323		DELAY(5);
3324
3325		val = BNX_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
3326		      BNX_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
3327
3328		pci_conf_write(pa->pa_pc, pa->pa_tag, BNX_PCICFG_MISC_CONFIG,
3329		    val);
3330	} else {
3331		val = BNX_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3332			BNX_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
3333			BNX_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
3334		REG_WR(sc, BNX_PCICFG_MISC_CONFIG, val);
3335
3336		/* Allow up to 30us for reset to complete. */
3337		for (i = 0; i < 10; i++) {
3338			val = REG_RD(sc, BNX_PCICFG_MISC_CONFIG);
3339			if ((val & (BNX_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3340				BNX_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0) {
3341				break;
3342			}
3343			DELAY(10);
3344		}
3345
3346		/* Check that reset completed successfully. */
3347		if (val & (BNX_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3348		    BNX_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
3349			BNX_PRINTF(sc, "%s(%d): Reset failed!\n",
3350			    __FILE__, __LINE__);
3351			rc = EBUSY;
3352			goto bnx_reset_exit;
3353		}
3354	}
3355
3356	/* Make sure byte swapping is properly configured. */
3357	val = REG_RD(sc, BNX_PCI_SWAP_DIAG0);
3358	if (val != 0x01020304) {
3359		BNX_PRINTF(sc, "%s(%d): Byte swap is incorrect!\n",
3360		    __FILE__, __LINE__);
3361		rc = ENODEV;
3362		goto bnx_reset_exit;
3363	}
3364
3365	/* Just completed a reset, assume that firmware is running again. */
3366	sc->bnx_fw_timed_out = 0;
3367
3368	/* Wait for the firmware to finish its initialization. */
3369	rc = bnx_fw_sync(sc, BNX_DRV_MSG_DATA_WAIT1 | reset_code);
3370	if (rc)
3371		BNX_PRINTF(sc, "%s(%d): Firmware did not complete "
3372		    "initialization!\n", __FILE__, __LINE__);
3373
3374bnx_reset_exit:
3375	DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
3376
3377	return (rc);
3378}
3379
3380int
3381bnx_chipinit(struct bnx_softc *sc)
3382{
3383	struct pci_attach_args	*pa = &(sc->bnx_pa);
3384	u_int32_t		val;
3385	int			rc = 0;
3386
3387	DBPRINT(sc, BNX_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
3388
3389	/* Make sure the interrupt is not active. */
3390	REG_WR(sc, BNX_PCICFG_INT_ACK_CMD, BNX_PCICFG_INT_ACK_CMD_MASK_INT);
3391
3392	/* Initialize DMA byte/word swapping, configure the number of DMA  */
3393	/* channels and PCI clock compensation delay.                      */
3394	val = BNX_DMA_CONFIG_DATA_BYTE_SWAP |
3395	    BNX_DMA_CONFIG_DATA_WORD_SWAP |
3396#if BYTE_ORDER == BIG_ENDIAN
3397	    BNX_DMA_CONFIG_CNTL_BYTE_SWAP |
3398#endif
3399	    BNX_DMA_CONFIG_CNTL_WORD_SWAP |
3400	    DMA_READ_CHANS << 12 |
3401	    DMA_WRITE_CHANS << 16;
3402
3403	val |= (0x2 << 20) | BNX_DMA_CONFIG_CNTL_PCI_COMP_DLY;
3404
3405	if ((sc->bnx_flags & BNX_PCIX_FLAG) && (sc->bus_speed_mhz == 133))
3406		val |= BNX_DMA_CONFIG_PCI_FAST_CLK_CMP;
3407
3408	/*
3409	 * This setting resolves a problem observed on certain Intel PCI
3410	 * chipsets that cannot handle multiple outstanding DMA operations.
3411	 * See errata E9_5706A1_65.
3412	 */
3413	if ((BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5706) &&
3414	    (BNX_CHIP_ID(sc) != BNX_CHIP_ID_5706_A0) &&
3415	    !(sc->bnx_flags & BNX_PCIX_FLAG))
3416		val |= BNX_DMA_CONFIG_CNTL_PING_PONG_DMA;
3417
3418	REG_WR(sc, BNX_DMA_CONFIG, val);
3419
3420#if 1
3421	/* Clear the PCI-X relaxed ordering bit. See errata E3_5708CA0_570. */
3422	if (sc->bnx_flags & BNX_PCIX_FLAG) {
3423		val = pci_conf_read(pa->pa_pc, pa->pa_tag, BNX_PCI_PCIX_CMD);
3424		pci_conf_write(pa->pa_pc, pa->pa_tag, BNX_PCI_PCIX_CMD,
3425		    val & ~0x20000);
3426	}
3427#endif
3428
3429	/* Enable the RX_V2P and Context state machines before access. */
3430	REG_WR(sc, BNX_MISC_ENABLE_SET_BITS,
3431	    BNX_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
3432	    BNX_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
3433	    BNX_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
3434
3435	/* Initialize context mapping and zero out the quick contexts. */
3436	bnx_init_context(sc);
3437
3438	/* Initialize the on-boards CPUs */
3439	bnx_init_cpus(sc);
3440
3441	/* Prepare NVRAM for access. */
3442	if (bnx_init_nvram(sc)) {
3443		rc = ENODEV;
3444		goto bnx_chipinit_exit;
3445	}
3446
3447	/* Set the kernel bypass block size */
3448	val = REG_RD(sc, BNX_MQ_CONFIG);
3449	val &= ~BNX_MQ_CONFIG_KNL_BYP_BLK_SIZE;
3450	val |= BNX_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
3451
3452	/* Enable bins used on the 5709. */
3453	if (BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5709) {
3454		val |= BNX_MQ_CONFIG_BIN_MQ_MODE;
3455		if (BNX_CHIP_ID(sc) == BNX_CHIP_ID_5709_A1)
3456			val |= BNX_MQ_CONFIG_HALT_DIS;
3457	}
3458
3459	REG_WR(sc, BNX_MQ_CONFIG, val);
3460
3461	val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
3462	REG_WR(sc, BNX_MQ_KNL_BYP_WIND_START, val);
3463	REG_WR(sc, BNX_MQ_KNL_WIND_END, val);
3464
3465	val = (BCM_PAGE_BITS - 8) << 24;
3466	REG_WR(sc, BNX_RV2P_CONFIG, val);
3467
3468	/* Configure page size. */
3469	val = REG_RD(sc, BNX_TBDR_CONFIG);
3470	val &= ~BNX_TBDR_CONFIG_PAGE_SIZE;
3471	val |= (BCM_PAGE_BITS - 8) << 24 | 0x40;
3472	REG_WR(sc, BNX_TBDR_CONFIG, val);
3473
3474#if 0
3475	/* Set the perfect match control register to default. */
3476	REG_WR_IND(sc, BNX_RXP_PM_CTRL, 0);
3477#endif
3478
3479bnx_chipinit_exit:
3480	DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
3481
3482	return(rc);
3483}
3484
3485/****************************************************************************/
3486/* Initialize the controller in preparation to send/receive traffic.        */
3487/*                                                                          */
3488/* Returns:                                                                 */
3489/*   0 for success, positive value for failure.                             */
3490/****************************************************************************/
3491int
3492bnx_blockinit(struct bnx_softc *sc)
3493{
3494	u_int32_t		reg, val;
3495	int 			rc = 0;
3496
3497	DBPRINT(sc, BNX_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
3498
3499	/* Load the hardware default MAC address. */
3500	bnx_set_mac_addr(sc);
3501
3502	/* Set the Ethernet backoff seed value */
3503	val = sc->eaddr[0] + (sc->eaddr[1] << 8) + (sc->eaddr[2] << 16) +
3504	    (sc->eaddr[3]) + (sc->eaddr[4] << 8) + (sc->eaddr[5] << 16);
3505	REG_WR(sc, BNX_EMAC_BACKOFF_SEED, val);
3506
3507	sc->last_status_idx = 0;
3508	sc->rx_mode = BNX_EMAC_RX_MODE_SORT_MODE;
3509
3510	/* Set up link change interrupt generation. */
3511	REG_WR(sc, BNX_EMAC_ATTENTION_ENA, BNX_EMAC_ATTENTION_ENA_LINK);
3512	REG_WR(sc, BNX_HC_ATTN_BITS_ENABLE, STATUS_ATTN_BITS_LINK_STATE);
3513
3514	/* Program the physical address of the status block. */
3515	REG_WR(sc, BNX_HC_STATUS_ADDR_L, (u_int32_t)(sc->status_block_paddr));
3516	REG_WR(sc, BNX_HC_STATUS_ADDR_H,
3517	    (u_int32_t)((u_int64_t)sc->status_block_paddr >> 32));
3518
3519	/* Program the physical address of the statistics block. */
3520	REG_WR(sc, BNX_HC_STATISTICS_ADDR_L,
3521	    (u_int32_t)(sc->stats_block_paddr));
3522	REG_WR(sc, BNX_HC_STATISTICS_ADDR_H,
3523	    (u_int32_t)((u_int64_t)sc->stats_block_paddr >> 32));
3524
3525	/* Program various host coalescing parameters. */
3526	REG_WR(sc, BNX_HC_TX_QUICK_CONS_TRIP, (sc->bnx_tx_quick_cons_trip_int
3527	    << 16) | sc->bnx_tx_quick_cons_trip);
3528	REG_WR(sc, BNX_HC_RX_QUICK_CONS_TRIP, (sc->bnx_rx_quick_cons_trip_int
3529	    << 16) | sc->bnx_rx_quick_cons_trip);
3530	REG_WR(sc, BNX_HC_COMP_PROD_TRIP, (sc->bnx_comp_prod_trip_int << 16) |
3531	    sc->bnx_comp_prod_trip);
3532	REG_WR(sc, BNX_HC_TX_TICKS, (sc->bnx_tx_ticks_int << 16) |
3533	    sc->bnx_tx_ticks);
3534	REG_WR(sc, BNX_HC_RX_TICKS, (sc->bnx_rx_ticks_int << 16) |
3535	    sc->bnx_rx_ticks);
3536	REG_WR(sc, BNX_HC_COM_TICKS, (sc->bnx_com_ticks_int << 16) |
3537	    sc->bnx_com_ticks);
3538	REG_WR(sc, BNX_HC_CMD_TICKS, (sc->bnx_cmd_ticks_int << 16) |
3539	    sc->bnx_cmd_ticks);
3540	REG_WR(sc, BNX_HC_STATS_TICKS, (sc->bnx_stats_ticks & 0xffff00));
3541	REG_WR(sc, BNX_HC_STAT_COLLECT_TICKS, 0xbb8);  /* 3ms */
3542	REG_WR(sc, BNX_HC_CONFIG,
3543	    (BNX_HC_CONFIG_RX_TMR_MODE | BNX_HC_CONFIG_TX_TMR_MODE |
3544	    BNX_HC_CONFIG_COLLECT_STATS));
3545
3546	/* Clear the internal statistics counters. */
3547	REG_WR(sc, BNX_HC_COMMAND, BNX_HC_COMMAND_CLR_STAT_NOW);
3548
3549	/* Verify that bootcode is running. */
3550	reg = REG_RD_IND(sc, sc->bnx_shmem_base + BNX_DEV_INFO_SIGNATURE);
3551
3552	DBRUNIF(DB_RANDOMTRUE(bnx_debug_bootcode_running_failure),
3553	    BNX_PRINTF(sc, "%s(%d): Simulating bootcode failure.\n",
3554	    __FILE__, __LINE__); reg = 0);
3555
3556	if ((reg & BNX_DEV_INFO_SIGNATURE_MAGIC_MASK) !=
3557	    BNX_DEV_INFO_SIGNATURE_MAGIC) {
3558		BNX_PRINTF(sc, "%s(%d): Bootcode not running! Found: 0x%08X, "
3559		    "Expected: 08%08X\n", __FILE__, __LINE__,
3560		    (reg & BNX_DEV_INFO_SIGNATURE_MAGIC_MASK),
3561		    BNX_DEV_INFO_SIGNATURE_MAGIC);
3562		rc = ENODEV;
3563		goto bnx_blockinit_exit;
3564	}
3565
3566	/* Check if any management firmware is running. */
3567	reg = REG_RD_IND(sc, sc->bnx_shmem_base + BNX_PORT_FEATURE);
3568	if (reg & (BNX_PORT_FEATURE_ASF_ENABLED |
3569	    BNX_PORT_FEATURE_IMD_ENABLED)) {
3570		DBPRINT(sc, BNX_INFO, "Management F/W Enabled.\n");
3571		sc->bnx_flags |= BNX_MFW_ENABLE_FLAG;
3572	}
3573
3574	sc->bnx_fw_ver = REG_RD_IND(sc, sc->bnx_shmem_base +
3575	    BNX_DEV_INFO_BC_REV);
3576
3577	DBPRINT(sc, BNX_INFO, "bootcode rev = 0x%08X\n", sc->bnx_fw_ver);
3578
3579	/* Enable DMA */
3580	if (BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5709) {
3581		val = REG_RD(sc, BNX_MISC_NEW_CORE_CTL);
3582		val |= BNX_MISC_NEW_CORE_CTL_DMA_ENABLE;
3583		REG_WR(sc, BNX_MISC_NEW_CORE_CTL, val);
3584	}
3585
3586	/* Allow bootcode to apply any additional fixes before enabling MAC. */
3587	rc = bnx_fw_sync(sc, BNX_DRV_MSG_DATA_WAIT2 | BNX_DRV_MSG_CODE_RESET);
3588
3589	/* Enable link state change interrupt generation. */
3590	if (BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5709) {
3591		REG_WR(sc, BNX_MISC_ENABLE_SET_BITS,
3592		    BNX_MISC_ENABLE_DEFAULT_XI);
3593	} else
3594		REG_WR(sc, BNX_MISC_ENABLE_SET_BITS, BNX_MISC_ENABLE_DEFAULT);
3595
3596	/* Enable all remaining blocks in the MAC. */
3597	REG_WR(sc, BNX_MISC_ENABLE_SET_BITS, 0x5ffffff);
3598	REG_RD(sc, BNX_MISC_ENABLE_SET_BITS);
3599	DELAY(20);
3600
3601bnx_blockinit_exit:
3602	DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
3603
3604	return (rc);
3605}
3606
3607/****************************************************************************/
3608/* Encapsulate an mbuf cluster into the rx_bd chain.                        */
3609/*                                                                          */
3610/* The NetXtreme II can support Jumbo frames by using multiple rx_bd's.     */
3611/* This routine will map an mbuf cluster into 1 or more rx_bd's as          */
3612/* necessary.                                                               */
3613/*                                                                          */
3614/* Returns:                                                                 */
3615/*   0 for success, positive value for failure.                             */
3616/****************************************************************************/
3617int
3618bnx_get_buf(struct bnx_softc *sc, u_int16_t *prod,
3619    u_int16_t *chain_prod, u_int32_t *prod_bseq)
3620{
3621	bus_dmamap_t		map;
3622	struct mbuf 		*m;
3623	struct rx_bd		*rxbd;
3624	int			i;
3625	u_int32_t		addr;
3626#ifdef BNX_DEBUG
3627	u_int16_t		debug_chain_prod = *chain_prod;
3628#endif
3629	u_int16_t		first_chain_prod;
3630
3631	DBPRINT(sc, (BNX_VERBOSE_RESET | BNX_VERBOSE_RECV), "Entering %s()\n",
3632	    __FUNCTION__);
3633
3634	/* Make sure the inputs are valid. */
3635	DBRUNIF((*chain_prod > MAX_RX_BD),
3636	    printf("%s: RX producer out of range: 0x%04X > 0x%04X\n",
3637	    *chain_prod, (u_int16_t) MAX_RX_BD));
3638
3639	DBPRINT(sc, BNX_VERBOSE_RECV, "%s(enter): prod = 0x%04X, chain_prod = "
3640	    "0x%04X, prod_bseq = 0x%08X\n", __FUNCTION__, *prod, *chain_prod,
3641	    *prod_bseq);
3642
3643	/* This is a new mbuf allocation. */
3644	m = MCLGETI(NULL, M_DONTWAIT, &sc->arpcom.ac_if, MCLBYTES);
3645	if (!m)
3646		return (ENOBUFS);
3647	m->m_len = m->m_pkthdr.len = MCLBYTES;
3648	/* the chip aligns the ip header for us, no need to m_adj */
3649
3650	/* Map the mbuf cluster into device memory. */
3651	map = sc->rx_mbuf_map[*chain_prod];
3652	if (bus_dmamap_load_mbuf(sc->bnx_dmatag, map, m, BUS_DMA_NOWAIT)) {
3653		m_freem(m);
3654		return (ENOBUFS);
3655	}
3656	first_chain_prod = *chain_prod;
3657
3658	/* Make sure there is room in the receive chain. */
3659	if (map->dm_nsegs > sc->free_rx_bd) {
3660		bus_dmamap_unload(sc->bnx_dmatag, map);
3661		m_freem(m);
3662		return (EFBIG);
3663	}
3664
3665#ifdef BNX_DEBUG
3666	/* Track the distribution of buffer segments. */
3667	sc->rx_mbuf_segs[map->dm_nsegs]++;
3668#endif
3669
3670	/* Update some debug statistics counters */
3671	DBRUNIF((sc->free_rx_bd < sc->rx_low_watermark),
3672	    sc->rx_low_watermark = sc->free_rx_bd);
3673	DBRUNIF((sc->free_rx_bd == sc->max_rx_bd), sc->rx_empty_count++);
3674
3675	/* Setup the rx_bd for the first segment. */
3676	rxbd = &sc->rx_bd_chain[RX_PAGE(*chain_prod)][RX_IDX(*chain_prod)];
3677
3678	addr = (u_int32_t)map->dm_segs[0].ds_addr;
3679	rxbd->rx_bd_haddr_lo = addr;
3680	addr = (u_int32_t)((u_int64_t)map->dm_segs[0].ds_addr >> 32);
3681	rxbd->rx_bd_haddr_hi = addr;
3682	rxbd->rx_bd_len = map->dm_segs[0].ds_len;
3683	rxbd->rx_bd_flags = RX_BD_FLAGS_START;
3684	*prod_bseq += map->dm_segs[0].ds_len;
3685
3686	for (i = 1; i < map->dm_nsegs; i++) {
3687		*prod = NEXT_RX_BD(*prod);
3688		*chain_prod = RX_CHAIN_IDX(*prod);
3689
3690		rxbd =
3691		    &sc->rx_bd_chain[RX_PAGE(*chain_prod)][RX_IDX(*chain_prod)];
3692
3693		addr = (u_int32_t)map->dm_segs[i].ds_addr;
3694		rxbd->rx_bd_haddr_lo = addr;
3695		addr = (u_int32_t)((u_int64_t)map->dm_segs[i].ds_addr >> 32);
3696		rxbd->rx_bd_haddr_hi = addr;
3697		rxbd->rx_bd_len = map->dm_segs[i].ds_len;
3698		rxbd->rx_bd_flags = 0;
3699		*prod_bseq += map->dm_segs[i].ds_len;
3700	}
3701
3702	rxbd->rx_bd_flags |= RX_BD_FLAGS_END;
3703
3704	/*
3705	 * Save the mbuf, adjust the map pointer (swap map for first and
3706	 * last rx_bd entry so that rx_mbuf_ptr and rx_mbuf_map matches)
3707	 * and update our counter.
3708	 */
3709	sc->rx_mbuf_ptr[*chain_prod] = m;
3710	sc->rx_mbuf_map[first_chain_prod] = sc->rx_mbuf_map[*chain_prod];
3711	sc->rx_mbuf_map[*chain_prod] = map;
3712	sc->free_rx_bd -= map->dm_nsegs;
3713
3714	DBRUN(BNX_VERBOSE_RECV, bnx_dump_rx_mbuf_chain(sc, debug_chain_prod,
3715	    map->dm_nsegs));
3716
3717	return (0);
3718}
3719
3720void
3721bnx_alloc_pkts(void *xsc, void *arg)
3722{
3723	struct bnx_softc *sc = xsc;
3724	struct ifnet *ifp = &sc->arpcom.ac_if;
3725	struct bnx_pkt *pkt;
3726	int i;
3727	int s;
3728
3729	for (i = 0; i < 4; i++) { /* magic! */
3730		pkt = pool_get(bnx_tx_pool, PR_WAITOK);
3731		if (pkt == NULL)
3732			break;
3733
3734		if (bus_dmamap_create(sc->bnx_dmatag,
3735		    MCLBYTES * BNX_MAX_SEGMENTS, USABLE_TX_BD,
3736		    MCLBYTES, 0, BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW,
3737		    &pkt->pkt_dmamap) != 0)
3738			goto put;
3739
3740		if (!ISSET(ifp->if_flags, IFF_UP))
3741			goto stopping;
3742
3743		mtx_enter(&sc->tx_pkt_mtx);
3744		TAILQ_INSERT_TAIL(&sc->tx_free_pkts, pkt, pkt_entry);
3745		sc->tx_pkt_count++;
3746		mtx_leave(&sc->tx_pkt_mtx);
3747	}
3748
3749	mtx_enter(&sc->tx_pkt_mtx);
3750	CLR(sc->bnx_flags, BNX_ALLOC_PKTS_FLAG);
3751	mtx_leave(&sc->tx_pkt_mtx);
3752
3753	s = splnet();
3754	if (!IFQ_IS_EMPTY(&ifp->if_snd))
3755		bnx_start(ifp);
3756	splx(s);
3757
3758	return;
3759
3760stopping:
3761	bus_dmamap_destroy(sc->bnx_dmatag, pkt->pkt_dmamap);
3762put:
3763	pool_put(bnx_tx_pool, pkt);
3764}
3765
3766/****************************************************************************/
3767/* Initialize the TX context memory.                                        */
3768/*                                                                          */
3769/* Returns:                                                                 */
3770/*   Nothing                                                                */
3771/****************************************************************************/
3772void
3773bnx_init_tx_context(struct bnx_softc *sc)
3774{
3775	u_int32_t val;
3776
3777	/* Initialize the context ID for an L2 TX chain. */
3778	if (BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5709) {
3779		/* Set the CID type to support an L2 connection. */
3780		val = BNX_L2CTX_TYPE_TYPE_L2 | BNX_L2CTX_TYPE_SIZE_L2;
3781		CTX_WR(sc, GET_CID_ADDR(TX_CID), BNX_L2CTX_TYPE_XI, val);
3782		val = BNX_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
3783		CTX_WR(sc, GET_CID_ADDR(TX_CID), BNX_L2CTX_CMD_TYPE_XI, val);
3784
3785		/* Point the hardware to the first page in the chain. */
3786		val = (u_int32_t)((u_int64_t)sc->tx_bd_chain_paddr[0] >> 32);
3787		CTX_WR(sc, GET_CID_ADDR(TX_CID),
3788		    BNX_L2CTX_TBDR_BHADDR_HI_XI, val);
3789		val = (u_int32_t)(sc->tx_bd_chain_paddr[0]);
3790		CTX_WR(sc, GET_CID_ADDR(TX_CID),
3791		    BNX_L2CTX_TBDR_BHADDR_LO_XI, val);
3792	} else {
3793		/* Set the CID type to support an L2 connection. */
3794		val = BNX_L2CTX_TYPE_TYPE_L2 | BNX_L2CTX_TYPE_SIZE_L2;
3795		CTX_WR(sc, GET_CID_ADDR(TX_CID), BNX_L2CTX_TYPE, val);
3796		val = BNX_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
3797		CTX_WR(sc, GET_CID_ADDR(TX_CID), BNX_L2CTX_CMD_TYPE, val);
3798
3799		/* Point the hardware to the first page in the chain. */
3800		val = (u_int32_t)((u_int64_t)sc->tx_bd_chain_paddr[0] >> 32);
3801		CTX_WR(sc, GET_CID_ADDR(TX_CID), BNX_L2CTX_TBDR_BHADDR_HI, val);
3802		val = (u_int32_t)(sc->tx_bd_chain_paddr[0]);
3803		CTX_WR(sc, GET_CID_ADDR(TX_CID), BNX_L2CTX_TBDR_BHADDR_LO, val);
3804	}
3805}
3806
3807/****************************************************************************/
3808/* Allocate memory and initialize the TX data structures.                   */
3809/*                                                                          */
3810/* Returns:                                                                 */
3811/*   0 for success, positive value for failure.                             */
3812/****************************************************************************/
3813int
3814bnx_init_tx_chain(struct bnx_softc *sc)
3815{
3816	struct tx_bd		*txbd;
3817	u_int32_t		addr;
3818	int			i, rc = 0;
3819
3820	DBPRINT(sc, BNX_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
3821
3822	/* Force an allocation of some dmamaps for tx up front */
3823	bnx_alloc_pkts(sc, NULL);
3824
3825	/* Set the initial TX producer/consumer indices. */
3826	sc->tx_prod = 0;
3827	sc->tx_cons = 0;
3828	sc->tx_prod_bseq = 0;
3829	sc->used_tx_bd = 0;
3830	sc->max_tx_bd =	USABLE_TX_BD;
3831	DBRUNIF(1, sc->tx_hi_watermark = USABLE_TX_BD);
3832	DBRUNIF(1, sc->tx_full_count = 0);
3833
3834	/*
3835	 * The NetXtreme II supports a linked-list structure called
3836	 * a Buffer Descriptor Chain (or BD chain).  A BD chain
3837	 * consists of a series of 1 or more chain pages, each of which
3838	 * consists of a fixed number of BD entries.
3839	 * The last BD entry on each page is a pointer to the next page
3840	 * in the chain, and the last pointer in the BD chain
3841	 * points back to the beginning of the chain.
3842	 */
3843
3844	/* Set the TX next pointer chain entries. */
3845	for (i = 0; i < TX_PAGES; i++) {
3846		int j;
3847
3848		txbd = &sc->tx_bd_chain[i][USABLE_TX_BD_PER_PAGE];
3849
3850		/* Check if we've reached the last page. */
3851		if (i == (TX_PAGES - 1))
3852			j = 0;
3853		else
3854			j = i + 1;
3855
3856		addr = (u_int32_t)sc->tx_bd_chain_paddr[j];
3857		txbd->tx_bd_haddr_lo = addr;
3858		addr = (u_int32_t)((u_int64_t)sc->tx_bd_chain_paddr[j] >> 32);
3859		txbd->tx_bd_haddr_hi = addr;
3860	}
3861
3862	/*
3863	 * Initialize the context ID for an L2 TX chain.
3864	 */
3865	bnx_init_tx_context(sc);
3866
3867	DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
3868
3869	return(rc);
3870}
3871
3872/****************************************************************************/
3873/* Free memory and clear the TX data structures.                            */
3874/*                                                                          */
3875/* Returns:                                                                 */
3876/*   Nothing.                                                               */
3877/****************************************************************************/
3878void
3879bnx_free_tx_chain(struct bnx_softc *sc)
3880{
3881	struct bnx_pkt		*pkt;
3882	int			i;
3883
3884	DBPRINT(sc, BNX_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
3885
3886	/* Unmap, unload, and free any mbufs still in the TX mbuf chain. */
3887	mtx_enter(&sc->tx_pkt_mtx);
3888	while ((pkt = TAILQ_FIRST(&sc->tx_used_pkts)) != NULL) {
3889		TAILQ_REMOVE(&sc->tx_used_pkts, pkt, pkt_entry);
3890		mtx_leave(&sc->tx_pkt_mtx);
3891
3892		bus_dmamap_sync(sc->bnx_dmatag, pkt->pkt_dmamap, 0,
3893		    pkt->pkt_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
3894		bus_dmamap_unload(sc->bnx_dmatag, pkt->pkt_dmamap);
3895
3896		m_freem(pkt->pkt_mbuf);
3897
3898		mtx_enter(&sc->tx_pkt_mtx);
3899		TAILQ_INSERT_TAIL(&sc->tx_free_pkts, pkt, pkt_entry);
3900	}
3901
3902	/* Destroy all the dmamaps we allocated for TX */
3903	while ((pkt = TAILQ_FIRST(&sc->tx_free_pkts)) != NULL) {
3904		TAILQ_REMOVE(&sc->tx_free_pkts, pkt, pkt_entry);
3905		sc->tx_pkt_count--;
3906		mtx_leave(&sc->tx_pkt_mtx);
3907
3908		bus_dmamap_destroy(sc->bnx_dmatag, pkt->pkt_dmamap);
3909		pool_put(bnx_tx_pool, pkt);
3910
3911		mtx_enter(&sc->tx_pkt_mtx);
3912	}
3913	mtx_leave(&sc->tx_pkt_mtx);
3914
3915	/* Clear each TX chain page. */
3916	for (i = 0; i < TX_PAGES; i++)
3917		bzero((char *)sc->tx_bd_chain[i], BNX_TX_CHAIN_PAGE_SZ);
3918
3919	sc->used_tx_bd = 0;
3920
3921	/* Check if we lost any mbufs in the process. */
3922	DBRUNIF((sc->tx_mbuf_alloc),
3923	    printf("%s: Memory leak! Lost %d mbufs from tx chain!\n",
3924	    sc->tx_mbuf_alloc));
3925
3926	DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
3927}
3928
3929/****************************************************************************/
3930/* Initialize the RX context memory.                                        */
3931/*                                                                          */
3932/* Returns:                                                                 */
3933/*   Nothing                                                                */
3934/****************************************************************************/
3935void
3936bnx_init_rx_context(struct bnx_softc *sc)
3937{
3938	u_int32_t val;
3939
3940	/* Initialize the context ID for an L2 RX chain. */
3941	val = BNX_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE |
3942		BNX_L2CTX_CTX_TYPE_SIZE_L2 | (0x02 << 8);
3943
3944	if (BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5709) {
3945		u_int32_t lo_water, hi_water;
3946
3947		lo_water = BNX_L2CTX_RX_LO_WATER_MARK_DEFAULT;
3948		hi_water = USABLE_RX_BD / 4;
3949
3950		lo_water /= BNX_L2CTX_RX_LO_WATER_MARK_SCALE;
3951		hi_water /= BNX_L2CTX_RX_HI_WATER_MARK_SCALE;
3952
3953		if (hi_water > 0xf)
3954			hi_water = 0xf;
3955		else if (hi_water == 0)
3956			lo_water = 0;
3957		val |= lo_water |
3958		    (hi_water << BNX_L2CTX_RX_HI_WATER_MARK_SHIFT);
3959	}
3960
3961 	CTX_WR(sc, GET_CID_ADDR(RX_CID), BNX_L2CTX_CTX_TYPE, val);
3962
3963	/* Setup the MQ BIN mapping for l2_ctx_host_bseq. */
3964	if (BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5709) {
3965		val = REG_RD(sc, BNX_MQ_MAP_L2_5);
3966		REG_WR(sc, BNX_MQ_MAP_L2_5, val | BNX_MQ_MAP_L2_5_ARM);
3967	}
3968
3969	/* Point the hardware to the first page in the chain. */
3970	val = (u_int32_t)((u_int64_t)sc->rx_bd_chain_paddr[0] >> 32);
3971	CTX_WR(sc, GET_CID_ADDR(RX_CID), BNX_L2CTX_NX_BDHADDR_HI, val);
3972	val = (u_int32_t)(sc->rx_bd_chain_paddr[0]);
3973	CTX_WR(sc, GET_CID_ADDR(RX_CID), BNX_L2CTX_NX_BDHADDR_LO, val);
3974}
3975
3976/****************************************************************************/
3977/* Add mbufs to the RX chain until its full or an mbuf allocation error     */
3978/* occurs.                                                                  */
3979/*                                                                          */
3980/* Returns:                                                                 */
3981/*   Nothing                                                                */
3982/****************************************************************************/
3983void
3984bnx_fill_rx_chain(struct bnx_softc *sc)
3985{
3986	u_int16_t		prod, chain_prod;
3987	u_int32_t		prod_bseq;
3988#ifdef BNX_DEBUG
3989	int rx_mbuf_alloc_before, free_rx_bd_before;
3990#endif
3991
3992	DBPRINT(sc, BNX_EXCESSIVE_RECV, "Entering %s()\n", __FUNCTION__);
3993
3994	prod = sc->rx_prod;
3995	prod_bseq = sc->rx_prod_bseq;
3996
3997#ifdef BNX_DEBUG
3998	rx_mbuf_alloc_before = sc->rx_mbuf_alloc;
3999	free_rx_bd_before = sc->free_rx_bd;
4000#endif
4001
4002	/* Keep filling the RX chain until it's full. */
4003	while (sc->free_rx_bd > 0) {
4004		chain_prod = RX_CHAIN_IDX(prod);
4005		if (bnx_get_buf(sc, &prod, &chain_prod, &prod_bseq)) {
4006			/* Bail out if we can't add an mbuf to the chain. */
4007			break;
4008		}
4009		prod = NEXT_RX_BD(prod);
4010	}
4011
4012#if 0
4013	DBRUNIF((sc->rx_mbuf_alloc - rx_mbuf_alloc_before),
4014		BNX_PRINTF(sc, "%s(): Installed %d mbufs in %d rx_bd entries.\n",
4015		__FUNCTION__, (sc->rx_mbuf_alloc - rx_mbuf_alloc_before),
4016		(free_rx_bd_before - sc->free_rx_bd)));
4017#endif
4018
4019	/* Save the RX chain producer index. */
4020	sc->rx_prod = prod;
4021	sc->rx_prod_bseq = prod_bseq;
4022
4023	/* Tell the chip about the waiting rx_bd's. */
4024	REG_WR16(sc, MB_RX_CID_ADDR + BNX_L2CTX_HOST_BDIDX, sc->rx_prod);
4025	REG_WR(sc, MB_RX_CID_ADDR + BNX_L2CTX_HOST_BSEQ, sc->rx_prod_bseq);
4026
4027	DBPRINT(sc, BNX_EXCESSIVE_RECV, "Exiting %s()\n", __FUNCTION__);
4028}
4029
4030/****************************************************************************/
4031/* Allocate memory and initialize the RX data structures.                   */
4032/*                                                                          */
4033/* Returns:                                                                 */
4034/*   0 for success, positive value for failure.                             */
4035/****************************************************************************/
4036int
4037bnx_init_rx_chain(struct bnx_softc *sc)
4038{
4039	struct rx_bd		*rxbd;
4040	int			i, rc = 0;
4041	u_int32_t		addr;
4042
4043	DBPRINT(sc, BNX_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
4044
4045	/* Initialize the RX producer and consumer indices. */
4046	sc->rx_prod = 0;
4047	sc->rx_cons = 0;
4048	sc->rx_prod_bseq = 0;
4049	sc->free_rx_bd = USABLE_RX_BD;
4050	sc->max_rx_bd = USABLE_RX_BD;
4051	DBRUNIF(1, sc->rx_low_watermark = USABLE_RX_BD);
4052	DBRUNIF(1, sc->rx_empty_count = 0);
4053
4054	/* Initialize the RX next pointer chain entries. */
4055	for (i = 0; i < RX_PAGES; i++) {
4056		int j;
4057
4058		rxbd = &sc->rx_bd_chain[i][USABLE_RX_BD_PER_PAGE];
4059
4060		/* Check if we've reached the last page. */
4061		if (i == (RX_PAGES - 1))
4062			j = 0;
4063		else
4064			j = i + 1;
4065
4066		/* Setup the chain page pointers. */
4067		addr = (u_int32_t)((u_int64_t)sc->rx_bd_chain_paddr[j] >> 32);
4068		rxbd->rx_bd_haddr_hi = addr;
4069		addr = (u_int32_t)sc->rx_bd_chain_paddr[j];
4070		rxbd->rx_bd_haddr_lo = addr;
4071	}
4072
4073	/* Fill up the RX chain. */
4074	bnx_fill_rx_chain(sc);
4075
4076	for (i = 0; i < RX_PAGES; i++)
4077		bus_dmamap_sync(sc->bnx_dmatag, sc->rx_bd_chain_map[i], 0,
4078		    sc->rx_bd_chain_map[i]->dm_mapsize,
4079		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
4080
4081	bnx_init_rx_context(sc);
4082
4083	DBRUN(BNX_VERBOSE_RECV, bnx_dump_rx_chain(sc, 0, TOTAL_RX_BD));
4084
4085	DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
4086
4087	return(rc);
4088}
4089
4090/****************************************************************************/
4091/* Free memory and clear the RX data structures.                            */
4092/*                                                                          */
4093/* Returns:                                                                 */
4094/*   Nothing.                                                               */
4095/****************************************************************************/
4096void
4097bnx_free_rx_chain(struct bnx_softc *sc)
4098{
4099	int			i;
4100#ifdef BNX_DEBUG
4101	int			rx_mbuf_alloc_before;
4102#endif
4103
4104	DBPRINT(sc, BNX_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
4105
4106#ifdef BNX_DEBUG
4107	rx_mbuf_alloc_before = sc->rx_mbuf_alloc;
4108#endif
4109
4110	/* Free any mbufs still in the RX mbuf chain. */
4111	for (i = 0; i < TOTAL_RX_BD; i++) {
4112		if (sc->rx_mbuf_ptr[i] != NULL) {
4113			if (sc->rx_mbuf_map[i] != NULL) {
4114				bus_dmamap_sync(sc->bnx_dmatag,
4115				    sc->rx_mbuf_map[i],	0,
4116				    sc->rx_mbuf_map[i]->dm_mapsize,
4117				    BUS_DMASYNC_POSTREAD);
4118				bus_dmamap_unload(sc->bnx_dmatag,
4119				    sc->rx_mbuf_map[i]);
4120			}
4121			m_freem(sc->rx_mbuf_ptr[i]);
4122			sc->rx_mbuf_ptr[i] = NULL;
4123			DBRUNIF(1, sc->rx_mbuf_alloc--);
4124		}
4125	}
4126
4127	DBRUNIF((rx_mbuf_alloc_before - sc->rx_mbuf_alloc),
4128		BNX_PRINTF(sc, "%s(): Released %d mbufs.\n",
4129		__FUNCTION__, (rx_mbuf_alloc_before - sc->rx_mbuf_alloc)));
4130
4131	/* Clear each RX chain page. */
4132	for (i = 0; i < RX_PAGES; i++)
4133		bzero((char *)sc->rx_bd_chain[i], BNX_RX_CHAIN_PAGE_SZ);
4134
4135	sc->free_rx_bd = sc->max_rx_bd;
4136
4137	/* Check if we lost any mbufs in the process. */
4138	DBRUNIF((sc->rx_mbuf_alloc),
4139	    printf("%s: Memory leak! Lost %d mbufs from rx chain!\n",
4140	    sc->rx_mbuf_alloc));
4141
4142	DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
4143}
4144
4145/****************************************************************************/
4146/* Set media options.                                                       */
4147/*                                                                          */
4148/* Returns:                                                                 */
4149/*   0 for success, positive value for failure.                             */
4150/****************************************************************************/
4151int
4152bnx_ifmedia_upd(struct ifnet *ifp)
4153{
4154	struct bnx_softc	*sc;
4155	struct mii_data		*mii;
4156	int			rc = 0;
4157
4158	sc = ifp->if_softc;
4159
4160	mii = &sc->bnx_mii;
4161	sc->bnx_link = 0;
4162	if (mii->mii_instance) {
4163		struct mii_softc *miisc;
4164		LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
4165			mii_phy_reset(miisc);
4166	}
4167	mii_mediachg(mii);
4168
4169	return(rc);
4170}
4171
4172/****************************************************************************/
4173/* Reports current media status.                                            */
4174/*                                                                          */
4175/* Returns:                                                                 */
4176/*   Nothing.                                                               */
4177/****************************************************************************/
4178void
4179bnx_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
4180{
4181	struct bnx_softc	*sc;
4182	struct mii_data		*mii;
4183	int			s;
4184
4185	sc = ifp->if_softc;
4186
4187	s = splnet();
4188
4189	mii = &sc->bnx_mii;
4190
4191	mii_pollstat(mii);
4192	ifmr->ifm_active = mii->mii_media_active;
4193	ifmr->ifm_status = mii->mii_media_status;
4194
4195	splx(s);
4196}
4197
4198/****************************************************************************/
4199/* Handles PHY generated interrupt events.                                  */
4200/*                                                                          */
4201/* Returns:                                                                 */
4202/*   Nothing.                                                               */
4203/****************************************************************************/
4204void
4205bnx_phy_intr(struct bnx_softc *sc)
4206{
4207	u_int32_t		new_link_state, old_link_state;
4208
4209	new_link_state = sc->status_block->status_attn_bits &
4210	    STATUS_ATTN_BITS_LINK_STATE;
4211	old_link_state = sc->status_block->status_attn_bits_ack &
4212	    STATUS_ATTN_BITS_LINK_STATE;
4213
4214	/* Handle any changes if the link state has changed. */
4215	if (new_link_state != old_link_state) {
4216		DBRUN(BNX_VERBOSE_INTR, bnx_dump_status_block(sc));
4217
4218		sc->bnx_link = 0;
4219		timeout_del(&sc->bnx_timeout);
4220		bnx_tick(sc);
4221
4222		/* Update the status_attn_bits_ack field in the status block. */
4223		if (new_link_state) {
4224			REG_WR(sc, BNX_PCICFG_STATUS_BIT_SET_CMD,
4225			    STATUS_ATTN_BITS_LINK_STATE);
4226			DBPRINT(sc, BNX_INFO, "Link is now UP.\n");
4227		} else {
4228			REG_WR(sc, BNX_PCICFG_STATUS_BIT_CLEAR_CMD,
4229			    STATUS_ATTN_BITS_LINK_STATE);
4230			DBPRINT(sc, BNX_INFO, "Link is now DOWN.\n");
4231		}
4232	}
4233
4234	/* Acknowledge the link change interrupt. */
4235	REG_WR(sc, BNX_EMAC_STATUS, BNX_EMAC_STATUS_LINK_CHANGE);
4236}
4237
4238/****************************************************************************/
4239/* Handles received frame interrupt events.                                 */
4240/*                                                                          */
4241/* Returns:                                                                 */
4242/*   Nothing.                                                               */
4243/****************************************************************************/
4244void
4245bnx_rx_intr(struct bnx_softc *sc)
4246{
4247	struct status_block	*sblk = sc->status_block;
4248	struct ifnet		*ifp = &sc->arpcom.ac_if;
4249	u_int16_t		hw_cons, sw_cons, sw_chain_cons;
4250	u_int16_t		sw_prod, sw_chain_prod;
4251	u_int32_t		sw_prod_bseq;
4252	struct l2_fhdr		*l2fhdr;
4253	int			i;
4254
4255	DBRUNIF(1, sc->rx_interrupts++);
4256
4257	/* Prepare the RX chain pages to be accessed by the host CPU. */
4258	for (i = 0; i < RX_PAGES; i++)
4259		bus_dmamap_sync(sc->bnx_dmatag,
4260		    sc->rx_bd_chain_map[i], 0,
4261		    sc->rx_bd_chain_map[i]->dm_mapsize,
4262		    BUS_DMASYNC_POSTWRITE);
4263
4264	/* Get the hardware's view of the RX consumer index. */
4265	hw_cons = sc->hw_rx_cons = sblk->status_rx_quick_consumer_index0;
4266	if ((hw_cons & USABLE_RX_BD_PER_PAGE) == USABLE_RX_BD_PER_PAGE)
4267		hw_cons++;
4268
4269	/* Get working copies of the driver's view of the RX indices. */
4270	sw_cons = sc->rx_cons;
4271	sw_prod = sc->rx_prod;
4272	sw_prod_bseq = sc->rx_prod_bseq;
4273
4274	DBPRINT(sc, BNX_INFO_RECV, "%s(enter): sw_prod = 0x%04X, "
4275	    "sw_cons = 0x%04X, sw_prod_bseq = 0x%08X\n",
4276	    __FUNCTION__, sw_prod, sw_cons, sw_prod_bseq);
4277
4278	/* Prevent speculative reads from getting ahead of the status block. */
4279	bus_space_barrier(sc->bnx_btag, sc->bnx_bhandle, 0, 0,
4280	    BUS_SPACE_BARRIER_READ);
4281
4282	/* Update some debug statistics counters */
4283	DBRUNIF((sc->free_rx_bd < sc->rx_low_watermark),
4284	    sc->rx_low_watermark = sc->free_rx_bd);
4285	DBRUNIF((sc->free_rx_bd == USABLE_RX_BD), sc->rx_empty_count++);
4286
4287	/*
4288	 * Scan through the receive chain as long
4289	 * as there is work to do.
4290	 */
4291	while (sw_cons != hw_cons) {
4292		struct mbuf *m;
4293		struct rx_bd *rxbd;
4294		unsigned int len;
4295		u_int32_t status;
4296
4297		/* Clear the mbuf pointer. */
4298		m = NULL;
4299
4300		/* Convert the producer/consumer indices to an actual
4301		 * rx_bd index.
4302		 */
4303		sw_chain_cons = RX_CHAIN_IDX(sw_cons);
4304		sw_chain_prod = RX_CHAIN_IDX(sw_prod);
4305
4306		/* Get the used rx_bd. */
4307		rxbd = &sc->rx_bd_chain[RX_PAGE(sw_chain_cons)][RX_IDX(sw_chain_cons)];
4308		sc->free_rx_bd++;
4309
4310		DBRUN(BNX_VERBOSE_RECV, printf("%s(): ", __FUNCTION__);
4311		bnx_dump_rxbd(sc, sw_chain_cons, rxbd));
4312
4313		/* The mbuf is stored with the last rx_bd entry of a packet. */
4314		if (sc->rx_mbuf_ptr[sw_chain_cons] != NULL) {
4315			/* Validate that this is the last rx_bd. */
4316			DBRUNIF((!(rxbd->rx_bd_flags & RX_BD_FLAGS_END)),
4317			    printf("%s: Unexpected mbuf found in "
4318			        "rx_bd[0x%04X]!\n", sw_chain_cons);
4319				bnx_breakpoint(sc));
4320
4321			/* DRC - ToDo: If the received packet is small, say less
4322			 *             than 128 bytes, allocate a new mbuf here,
4323			 *             copy the data to that mbuf, and recycle
4324			 *             the mapped jumbo frame.
4325			 */
4326
4327			/* Unmap the mbuf from DMA space. */
4328			bus_dmamap_sync(sc->bnx_dmatag,
4329			    sc->rx_mbuf_map[sw_chain_cons], 0,
4330			    sc->rx_mbuf_map[sw_chain_cons]->dm_mapsize,
4331			    BUS_DMASYNC_POSTREAD);
4332			bus_dmamap_unload(sc->bnx_dmatag,
4333			    sc->rx_mbuf_map[sw_chain_cons]);
4334
4335			/* Remove the mbuf from RX chain. */
4336			m = sc->rx_mbuf_ptr[sw_chain_cons];
4337			sc->rx_mbuf_ptr[sw_chain_cons] = NULL;
4338
4339			/*
4340			 * Frames received on the NetXteme II are prepended
4341			 * with the l2_fhdr structure which provides status
4342			 * information about the received frame (including
4343			 * VLAN tags and checksum info) and are also
4344			 * automatically adjusted to align the IP header
4345			 * (i.e. two null bytes are inserted before the
4346			 * Ethernet header).
4347			 */
4348			l2fhdr = mtod(m, struct l2_fhdr *);
4349
4350			len    = l2fhdr->l2_fhdr_pkt_len;
4351			status = l2fhdr->l2_fhdr_status;
4352
4353			DBRUNIF(DB_RANDOMTRUE(bnx_debug_l2fhdr_status_check),
4354			    printf("Simulating l2_fhdr status error.\n");
4355			    status = status | L2_FHDR_ERRORS_PHY_DECODE);
4356
4357			/* Watch for unusual sized frames. */
4358			DBRUNIF(((len < BNX_MIN_MTU) ||
4359			    (len > BNX_MAX_JUMBO_ETHER_MTU_VLAN)),
4360			    printf("%s: Unusual frame size found. "
4361			    "Min(%d), Actual(%d), Max(%d)\n", (int)BNX_MIN_MTU,
4362			    len, (int) BNX_MAX_JUMBO_ETHER_MTU_VLAN);
4363
4364			bnx_dump_mbuf(sc, m);
4365			bnx_breakpoint(sc));
4366
4367			len -= ETHER_CRC_LEN;
4368
4369			/* Check the received frame for errors. */
4370			if (status &  (L2_FHDR_ERRORS_BAD_CRC |
4371			    L2_FHDR_ERRORS_PHY_DECODE |
4372			    L2_FHDR_ERRORS_ALIGNMENT |
4373			    L2_FHDR_ERRORS_TOO_SHORT |
4374			    L2_FHDR_ERRORS_GIANT_FRAME)) {
4375				/* Log the error and release the mbuf. */
4376				ifp->if_ierrors++;
4377				DBRUNIF(1, sc->l2fhdr_status_errors++);
4378
4379				m_freem(m);
4380				m = NULL;
4381				goto bnx_rx_int_next_rx;
4382			}
4383
4384			/* Skip over the l2_fhdr when passing the data up
4385			 * the stack.
4386			 */
4387			m_adj(m, sizeof(struct l2_fhdr) + ETHER_ALIGN);
4388
4389			/* Adjust the pckt length to match the received data. */
4390			m->m_pkthdr.len = m->m_len = len;
4391
4392			/* Send the packet to the appropriate interface. */
4393			m->m_pkthdr.rcvif = ifp;
4394
4395			DBRUN(BNX_VERBOSE_RECV,
4396			    struct ether_header *eh;
4397			    eh = mtod(m, struct ether_header *);
4398			    printf("%s: to: %6D, from: %6D, type: 0x%04X\n",
4399			    __FUNCTION__, eh->ether_dhost, ":",
4400			    eh->ether_shost, ":", htons(eh->ether_type)));
4401
4402			/* Validate the checksum. */
4403
4404			/* Check for an IP datagram. */
4405			if (status & L2_FHDR_STATUS_IP_DATAGRAM) {
4406				/* Check if the IP checksum is valid. */
4407				if ((l2fhdr->l2_fhdr_ip_xsum ^ 0xffff)
4408				    == 0)
4409					m->m_pkthdr.csum_flags |=
4410					    M_IPV4_CSUM_IN_OK;
4411				else
4412					DBPRINT(sc, BNX_WARN_SEND,
4413					    "%s(): Invalid IP checksum "
4414					        "= 0x%04X!\n",
4415						__FUNCTION__,
4416						l2fhdr->l2_fhdr_ip_xsum
4417						);
4418			}
4419
4420			/* Check for a valid TCP/UDP frame. */
4421			if (status & (L2_FHDR_STATUS_TCP_SEGMENT |
4422			    L2_FHDR_STATUS_UDP_DATAGRAM)) {
4423				/* Check for a good TCP/UDP checksum. */
4424				if ((status &
4425				    (L2_FHDR_ERRORS_TCP_XSUM |
4426				    L2_FHDR_ERRORS_UDP_XSUM)) == 0) {
4427					m->m_pkthdr.csum_flags |=
4428					    M_TCP_CSUM_IN_OK |
4429					    M_UDP_CSUM_IN_OK;
4430				} else {
4431					DBPRINT(sc, BNX_WARN_SEND,
4432					    "%s(): Invalid TCP/UDP "
4433					    "checksum = 0x%04X!\n",
4434					    __FUNCTION__,
4435					    l2fhdr->l2_fhdr_tcp_udp_xsum);
4436				}
4437			}
4438
4439			/*
4440			 * If we received a packet with a vlan tag,
4441			 * attach that information to the packet.
4442			 */
4443			if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) &&
4444			    !(sc->rx_mode & BNX_EMAC_RX_MODE_KEEP_VLAN_TAG)) {
4445#if NVLAN > 0
4446				DBPRINT(sc, BNX_VERBOSE_SEND,
4447				    "%s(): VLAN tag = 0x%04X\n",
4448				    __FUNCTION__,
4449				    l2fhdr->l2_fhdr_vlan_tag);
4450
4451				m->m_pkthdr.ether_vtag =
4452				    l2fhdr->l2_fhdr_vlan_tag;
4453				m->m_flags |= M_VLANTAG;
4454#else
4455				m_freem(m);
4456				goto bnx_rx_int_next_rx;
4457#endif
4458			}
4459
4460			/* Pass the mbuf off to the upper layers. */
4461			ifp->if_ipackets++;
4462
4463bnx_rx_int_next_rx:
4464			sw_prod = NEXT_RX_BD(sw_prod);
4465		}
4466
4467		sw_cons = NEXT_RX_BD(sw_cons);
4468
4469		/* If we have a packet, pass it up the stack */
4470		if (m) {
4471			sc->rx_cons = sw_cons;
4472
4473#if NBPFILTER > 0
4474			/*
4475			 * Handle BPF listeners. Let the BPF
4476			 * user see the packet.
4477			 */
4478			if (ifp->if_bpf)
4479				bpf_mtap_ether(ifp->if_bpf, m,
4480				    BPF_DIRECTION_IN);
4481#endif
4482
4483			DBPRINT(sc, BNX_VERBOSE_RECV,
4484			    "%s(): Passing received frame up.\n", __FUNCTION__);
4485			ether_input_mbuf(ifp, m);
4486			DBRUNIF(1, sc->rx_mbuf_alloc--);
4487
4488			sw_cons = sc->rx_cons;
4489		}
4490
4491		/* Refresh hw_cons to see if there's new work */
4492		if (sw_cons == hw_cons) {
4493			hw_cons = sc->hw_rx_cons =
4494			    sblk->status_rx_quick_consumer_index0;
4495			if ((hw_cons & USABLE_RX_BD_PER_PAGE) ==
4496			    USABLE_RX_BD_PER_PAGE)
4497				hw_cons++;
4498		}
4499
4500		/* Prevent speculative reads from getting ahead of
4501		 * the status block.
4502		 */
4503		bus_space_barrier(sc->bnx_btag, sc->bnx_bhandle, 0, 0,
4504		    BUS_SPACE_BARRIER_READ);
4505	}
4506
4507	/* No new packets to process.  Refill the RX chain and exit. */
4508	sc->rx_cons = sw_cons;
4509	bnx_fill_rx_chain(sc);
4510
4511	for (i = 0; i < RX_PAGES; i++)
4512		bus_dmamap_sync(sc->bnx_dmatag,
4513		    sc->rx_bd_chain_map[i], 0,
4514		    sc->rx_bd_chain_map[i]->dm_mapsize,
4515		    BUS_DMASYNC_PREWRITE);
4516
4517	DBPRINT(sc, BNX_INFO_RECV, "%s(exit): rx_prod = 0x%04X, "
4518	    "rx_cons = 0x%04X, rx_prod_bseq = 0x%08X\n",
4519	    __FUNCTION__, sc->rx_prod, sc->rx_cons, sc->rx_prod_bseq);
4520}
4521
4522/****************************************************************************/
4523/* Handles transmit completion interrupt events.                            */
4524/*                                                                          */
4525/* Returns:                                                                 */
4526/*   Nothing.                                                               */
4527/****************************************************************************/
4528void
4529bnx_tx_intr(struct bnx_softc *sc)
4530{
4531	struct status_block	*sblk = sc->status_block;
4532	struct ifnet		*ifp = &sc->arpcom.ac_if;
4533	struct bnx_pkt		*pkt;
4534	bus_dmamap_t		map;
4535	u_int16_t		hw_tx_cons, sw_tx_cons, sw_tx_chain_cons;
4536
4537	DBRUNIF(1, sc->tx_interrupts++);
4538
4539	/* Get the hardware's view of the TX consumer index. */
4540	hw_tx_cons = sc->hw_tx_cons = sblk->status_tx_quick_consumer_index0;
4541
4542	/* Skip to the next entry if this is a chain page pointer. */
4543	if ((hw_tx_cons & USABLE_TX_BD_PER_PAGE) == USABLE_TX_BD_PER_PAGE)
4544		hw_tx_cons++;
4545
4546	sw_tx_cons = sc->tx_cons;
4547
4548	/* Prevent speculative reads from getting ahead of the status block. */
4549	bus_space_barrier(sc->bnx_btag, sc->bnx_bhandle, 0, 0,
4550	    BUS_SPACE_BARRIER_READ);
4551
4552	/* Cycle through any completed TX chain page entries. */
4553	while (sw_tx_cons != hw_tx_cons) {
4554#ifdef BNX_DEBUG
4555		struct tx_bd *txbd = NULL;
4556#endif
4557		sw_tx_chain_cons = TX_CHAIN_IDX(sw_tx_cons);
4558
4559		DBPRINT(sc, BNX_INFO_SEND, "%s(): hw_tx_cons = 0x%04X, "
4560		    "sw_tx_cons = 0x%04X, sw_tx_chain_cons = 0x%04X\n",
4561		    __FUNCTION__, hw_tx_cons, sw_tx_cons, sw_tx_chain_cons);
4562
4563		DBRUNIF((sw_tx_chain_cons > MAX_TX_BD),
4564		    printf("%s: TX chain consumer out of range! "
4565		    " 0x%04X > 0x%04X\n", sw_tx_chain_cons, (int)MAX_TX_BD);
4566		    bnx_breakpoint(sc));
4567
4568		DBRUNIF(1, txbd = &sc->tx_bd_chain
4569		    [TX_PAGE(sw_tx_chain_cons)][TX_IDX(sw_tx_chain_cons)]);
4570
4571		DBRUNIF((txbd == NULL),
4572		    printf("%s: Unexpected NULL tx_bd[0x%04X]!\n",
4573		    sw_tx_chain_cons);
4574		    bnx_breakpoint(sc));
4575
4576		DBRUN(BNX_INFO_SEND, printf("%s: ", __FUNCTION__);
4577		    bnx_dump_txbd(sc, sw_tx_chain_cons, txbd));
4578
4579		mtx_enter(&sc->tx_pkt_mtx);
4580		pkt = TAILQ_FIRST(&sc->tx_used_pkts);
4581		if (pkt != NULL && pkt->pkt_end_desc == sw_tx_chain_cons) {
4582			TAILQ_REMOVE(&sc->tx_used_pkts, pkt, pkt_entry);
4583			mtx_leave(&sc->tx_pkt_mtx);
4584			/*
4585			 * Free the associated mbuf. Remember
4586			 * that only the last tx_bd of a packet
4587			 * has an mbuf pointer and DMA map.
4588			 */
4589			map = pkt->pkt_dmamap;
4590			bus_dmamap_sync(sc->bnx_dmatag, map, 0,
4591			    map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
4592			bus_dmamap_unload(sc->bnx_dmatag, map);
4593
4594			m_freem(pkt->pkt_mbuf);
4595
4596			ifp->if_opackets++;
4597
4598			mtx_enter(&sc->tx_pkt_mtx);
4599			TAILQ_INSERT_TAIL(&sc->tx_free_pkts, pkt, pkt_entry);
4600		}
4601		mtx_leave(&sc->tx_pkt_mtx);
4602
4603		sc->used_tx_bd--;
4604		sw_tx_cons = NEXT_TX_BD(sw_tx_cons);
4605
4606		/* Refresh hw_cons to see if there's new work. */
4607		hw_tx_cons = sc->hw_tx_cons =
4608		    sblk->status_tx_quick_consumer_index0;
4609		if ((hw_tx_cons & USABLE_TX_BD_PER_PAGE) ==
4610		    USABLE_TX_BD_PER_PAGE)
4611			hw_tx_cons++;
4612
4613		/* Prevent speculative reads from getting ahead of
4614		 * the status block.
4615		 */
4616		bus_space_barrier(sc->bnx_btag, sc->bnx_bhandle, 0, 0,
4617		    BUS_SPACE_BARRIER_READ);
4618	}
4619
4620	/* Clear the TX timeout timer. */
4621	ifp->if_timer = 0;
4622
4623	/* Clear the tx hardware queue full flag. */
4624	if (sc->used_tx_bd < sc->max_tx_bd) {
4625		DBRUNIF((ifp->if_flags & IFF_OACTIVE),
4626		    printf("%s: Open TX chain! %d/%d (used/total)\n",
4627			sc->bnx_dev.dv_xname, sc->used_tx_bd,
4628			sc->max_tx_bd));
4629		ifp->if_flags &= ~IFF_OACTIVE;
4630	}
4631
4632	sc->tx_cons = sw_tx_cons;
4633}
4634
4635/****************************************************************************/
4636/* Disables interrupt generation.                                           */
4637/*                                                                          */
4638/* Returns:                                                                 */
4639/*   Nothing.                                                               */
4640/****************************************************************************/
4641void
4642bnx_disable_intr(struct bnx_softc *sc)
4643{
4644	REG_WR(sc, BNX_PCICFG_INT_ACK_CMD, BNX_PCICFG_INT_ACK_CMD_MASK_INT);
4645	REG_RD(sc, BNX_PCICFG_INT_ACK_CMD);
4646}
4647
4648/****************************************************************************/
4649/* Enables interrupt generation.                                            */
4650/*                                                                          */
4651/* Returns:                                                                 */
4652/*   Nothing.                                                               */
4653/****************************************************************************/
4654void
4655bnx_enable_intr(struct bnx_softc *sc)
4656{
4657	u_int32_t		val;
4658
4659	REG_WR(sc, BNX_PCICFG_INT_ACK_CMD, BNX_PCICFG_INT_ACK_CMD_INDEX_VALID |
4660	    BNX_PCICFG_INT_ACK_CMD_MASK_INT | sc->last_status_idx);
4661
4662	REG_WR(sc, BNX_PCICFG_INT_ACK_CMD, BNX_PCICFG_INT_ACK_CMD_INDEX_VALID |
4663	    sc->last_status_idx);
4664
4665	val = REG_RD(sc, BNX_HC_COMMAND);
4666	REG_WR(sc, BNX_HC_COMMAND, val | BNX_HC_COMMAND_COAL_NOW);
4667}
4668
4669/****************************************************************************/
4670/* Handles controller initialization.                                       */
4671/*                                                                          */
4672/* Returns:                                                                 */
4673/*   Nothing.                                                               */
4674/****************************************************************************/
4675void
4676bnx_init(void *xsc)
4677{
4678	struct bnx_softc	*sc = (struct bnx_softc *)xsc;
4679	struct ifnet		*ifp = &sc->arpcom.ac_if;
4680	u_int32_t		ether_mtu;
4681	int			txpl = 1;
4682	int			s;
4683
4684	DBPRINT(sc, BNX_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
4685
4686	if (rw_enter(&bnx_tx_pool_lk, RW_WRITE | RW_INTR) != 0)
4687		return;
4688	if (bnx_tx_pool == NULL) {
4689		bnx_tx_pool = malloc(sizeof(*bnx_tx_pool), M_DEVBUF, M_WAITOK);
4690		if (bnx_tx_pool != NULL) {
4691			pool_init(bnx_tx_pool, sizeof(struct bnx_pkt),
4692			    0, 0, 0, "bnxpkts", &pool_allocator_nointr);
4693		} else
4694			txpl = 0;
4695	}
4696	rw_exit(&bnx_tx_pool_lk);
4697
4698	if (!txpl)
4699		return;
4700
4701	s = splnet();
4702
4703	bnx_stop(sc);
4704
4705	if (bnx_reset(sc, BNX_DRV_MSG_CODE_RESET)) {
4706		BNX_PRINTF(sc, "Controller reset failed!\n");
4707		goto bnx_init_exit;
4708	}
4709
4710	if (bnx_chipinit(sc)) {
4711		BNX_PRINTF(sc, "Controller initialization failed!\n");
4712		goto bnx_init_exit;
4713	}
4714
4715	if (bnx_blockinit(sc)) {
4716		BNX_PRINTF(sc, "Block initialization failed!\n");
4717		goto bnx_init_exit;
4718	}
4719
4720	/* Load our MAC address. */
4721	bcopy(sc->arpcom.ac_enaddr, sc->eaddr, ETHER_ADDR_LEN);
4722	bnx_set_mac_addr(sc);
4723
4724	/* Calculate and program the Ethernet MRU size. */
4725	ether_mtu = BNX_MAX_STD_ETHER_MTU_VLAN;
4726
4727	DBPRINT(sc, BNX_INFO, "%s(): setting MRU = %d\n",
4728	    __FUNCTION__, ether_mtu);
4729
4730	/*
4731	 * Program the MRU and enable Jumbo frame
4732	 * support.
4733	 */
4734	REG_WR(sc, BNX_EMAC_RX_MTU_SIZE, ether_mtu |
4735		BNX_EMAC_RX_MTU_SIZE_JUMBO_ENA);
4736
4737	/* Calculate the RX Ethernet frame size for rx_bd's. */
4738	sc->max_frame_size = sizeof(struct l2_fhdr) + 2 + ether_mtu + 8;
4739
4740	DBPRINT(sc, BNX_INFO, "%s(): mclbytes = %d, mbuf_alloc_size = %d, "
4741	    "max_frame_size = %d\n", __FUNCTION__, (int)MCLBYTES,
4742	    sc->mbuf_alloc_size, sc->max_frame_size);
4743
4744	/* Program appropriate promiscuous/multicast filtering. */
4745	bnx_iff(sc);
4746
4747	/* Init RX buffer descriptor chain. */
4748	bnx_init_rx_chain(sc);
4749
4750	/* Init TX buffer descriptor chain. */
4751	bnx_init_tx_chain(sc);
4752
4753	/* Enable host interrupts. */
4754	bnx_enable_intr(sc);
4755
4756	bnx_ifmedia_upd(ifp);
4757
4758	ifp->if_flags |= IFF_RUNNING;
4759	ifp->if_flags &= ~IFF_OACTIVE;
4760
4761	timeout_add_sec(&sc->bnx_timeout, 1);
4762
4763bnx_init_exit:
4764	DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
4765
4766	splx(s);
4767
4768	return;
4769}
4770
4771void
4772bnx_mgmt_init(struct bnx_softc *sc)
4773{
4774	struct ifnet	*ifp = &sc->arpcom.ac_if;
4775	u_int32_t	val;
4776
4777	/* Check if the driver is still running and bail out if it is. */
4778	if (ifp->if_flags & IFF_RUNNING)
4779		goto bnx_mgmt_init_exit;
4780
4781	/* Initialize the on-boards CPUs */
4782	bnx_init_cpus(sc);
4783
4784	val = (BCM_PAGE_BITS - 8) << 24;
4785	REG_WR(sc, BNX_RV2P_CONFIG, val);
4786
4787	/* Enable all critical blocks in the MAC. */
4788	REG_WR(sc, BNX_MISC_ENABLE_SET_BITS,
4789	    BNX_MISC_ENABLE_SET_BITS_RX_V2P_ENABLE |
4790	    BNX_MISC_ENABLE_SET_BITS_RX_DMA_ENABLE |
4791	    BNX_MISC_ENABLE_SET_BITS_COMPLETION_ENABLE);
4792	REG_RD(sc, BNX_MISC_ENABLE_SET_BITS);
4793	DELAY(20);
4794
4795	bnx_ifmedia_upd(ifp);
4796
4797bnx_mgmt_init_exit:
4798 	DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
4799}
4800
4801/****************************************************************************/
4802/* Encapsultes an mbuf cluster into the tx_bd chain structure and makes the */
4803/* memory visible to the controller.                                        */
4804/*                                                                          */
4805/* Returns:                                                                 */
4806/*   0 for success, positive value for failure.                             */
4807/****************************************************************************/
4808int
4809bnx_tx_encap(struct bnx_softc *sc, struct mbuf *m)
4810{
4811	struct bnx_pkt		*pkt;
4812	bus_dmamap_t		map;
4813	struct tx_bd 		*txbd = NULL;
4814	u_int16_t		vlan_tag = 0, flags = 0;
4815	u_int16_t		chain_prod, prod;
4816#ifdef BNX_DEBUG
4817	u_int16_t		debug_prod;
4818#endif
4819	u_int32_t		addr, prod_bseq;
4820	int			i, error;
4821
4822	mtx_enter(&sc->tx_pkt_mtx);
4823	pkt = TAILQ_FIRST(&sc->tx_free_pkts);
4824	if (pkt == NULL) {
4825		if (sc->tx_pkt_count <= TOTAL_TX_BD &&
4826		    !ISSET(sc->bnx_flags, BNX_ALLOC_PKTS_FLAG) &&
4827		    workq_add_task(NULL, 0, bnx_alloc_pkts, sc, NULL) == 0)
4828			SET(sc->bnx_flags, BNX_ALLOC_PKTS_FLAG);
4829
4830		mtx_leave(&sc->tx_pkt_mtx);
4831		return (ENOMEM);
4832	}
4833	TAILQ_REMOVE(&sc->tx_free_pkts, pkt, pkt_entry);
4834	mtx_leave(&sc->tx_pkt_mtx);
4835
4836	/* Transfer any checksum offload flags to the bd. */
4837	if (m->m_pkthdr.csum_flags) {
4838		if (m->m_pkthdr.csum_flags & M_IPV4_CSUM_OUT)
4839			flags |= TX_BD_FLAGS_IP_CKSUM;
4840		if (m->m_pkthdr.csum_flags &
4841		    (M_TCP_CSUM_OUT | M_UDP_CSUM_OUT))
4842			flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
4843	}
4844
4845#if NVLAN > 0
4846	/* Transfer any VLAN tags to the bd. */
4847	if (m->m_flags & M_VLANTAG) {
4848		flags |= TX_BD_FLAGS_VLAN_TAG;
4849		vlan_tag = m->m_pkthdr.ether_vtag;
4850	}
4851#endif
4852
4853	/* Map the mbuf into DMAable memory. */
4854	prod = sc->tx_prod;
4855	chain_prod = TX_CHAIN_IDX(prod);
4856	map = pkt->pkt_dmamap;
4857
4858	/* Map the mbuf into our DMA address space. */
4859	error = bus_dmamap_load_mbuf(sc->bnx_dmatag, map, m,
4860	    BUS_DMA_NOWAIT);
4861	if (error != 0) {
4862		printf("%s: Error mapping mbuf into TX chain!\n",
4863		    sc->bnx_dev.dv_xname);
4864		sc->tx_dma_map_failures++;
4865		goto maperr;
4866	}
4867
4868	/* Make sure there's room in the chain */
4869	if (map->dm_nsegs > (sc->max_tx_bd - sc->used_tx_bd))
4870		goto nospace;
4871
4872	/* prod points to an empty tx_bd at this point. */
4873	prod_bseq = sc->tx_prod_bseq;
4874#ifdef BNX_DEBUG
4875	debug_prod = chain_prod;
4876#endif
4877
4878	DBPRINT(sc, BNX_INFO_SEND,
4879		"%s(): Start: prod = 0x%04X, chain_prod = %04X, "
4880		"prod_bseq = 0x%08X\n",
4881		__FUNCTION__, prod, chain_prod, prod_bseq);
4882
4883	/*
4884	 * Cycle through each mbuf segment that makes up
4885	 * the outgoing frame, gathering the mapping info
4886	 * for that segment and creating a tx_bd for the
4887	 * mbuf.
4888	 */
4889	for (i = 0; i < map->dm_nsegs ; i++) {
4890		chain_prod = TX_CHAIN_IDX(prod);
4891		txbd = &sc->tx_bd_chain[TX_PAGE(chain_prod)][TX_IDX(chain_prod)];
4892
4893		addr = (u_int32_t)map->dm_segs[i].ds_addr;
4894		txbd->tx_bd_haddr_lo = addr;
4895		addr = (u_int32_t)((u_int64_t)map->dm_segs[i].ds_addr >> 32);
4896		txbd->tx_bd_haddr_hi = addr;
4897		txbd->tx_bd_mss_nbytes = map->dm_segs[i].ds_len;
4898		txbd->tx_bd_vlan_tag = vlan_tag;
4899		txbd->tx_bd_flags = flags;
4900		prod_bseq += map->dm_segs[i].ds_len;
4901		if (i == 0)
4902			txbd->tx_bd_flags |= TX_BD_FLAGS_START;
4903		prod = NEXT_TX_BD(prod);
4904 	}
4905
4906	/* Set the END flag on the last TX buffer descriptor. */
4907	txbd->tx_bd_flags |= TX_BD_FLAGS_END;
4908
4909	DBRUN(BNX_INFO_SEND, bnx_dump_tx_chain(sc, debug_prod,
4910	    map->dm_nsegs));
4911
4912	DBPRINT(sc, BNX_INFO_SEND,
4913		"%s(): End: prod = 0x%04X, chain_prod = %04X, "
4914		"prod_bseq = 0x%08X\n",
4915		__FUNCTION__, prod, chain_prod, prod_bseq);
4916
4917	pkt->pkt_mbuf = m;
4918	pkt->pkt_end_desc = chain_prod;
4919
4920	mtx_enter(&sc->tx_pkt_mtx);
4921	TAILQ_INSERT_TAIL(&sc->tx_used_pkts, pkt, pkt_entry);
4922	mtx_leave(&sc->tx_pkt_mtx);
4923
4924	sc->used_tx_bd += map->dm_nsegs;
4925
4926	/* Update some debug statistics counters */
4927	DBRUNIF((sc->used_tx_bd > sc->tx_hi_watermark),
4928	    sc->tx_hi_watermark = sc->used_tx_bd);
4929	DBRUNIF(sc->used_tx_bd == sc->max_tx_bd, sc->tx_full_count++);
4930	DBRUNIF(1, sc->tx_mbuf_alloc++);
4931
4932	DBRUN(BNX_VERBOSE_SEND, bnx_dump_tx_mbuf_chain(sc, chain_prod,
4933	    map->dm_nsegs));
4934
4935	bus_dmamap_sync(sc->bnx_dmatag, map, 0, map->dm_mapsize,
4936	    BUS_DMASYNC_PREWRITE);
4937
4938	/* prod points to the next free tx_bd at this point. */
4939	sc->tx_prod = prod;
4940	sc->tx_prod_bseq = prod_bseq;
4941
4942	return (0);
4943
4944nospace:
4945	bus_dmamap_unload(sc->bnx_dmatag, map);
4946maperr:
4947	mtx_enter(&sc->tx_pkt_mtx);
4948	TAILQ_INSERT_TAIL(&sc->tx_free_pkts, pkt, pkt_entry);
4949	mtx_leave(&sc->tx_pkt_mtx);
4950
4951	return (ENOMEM);
4952}
4953
4954/****************************************************************************/
4955/* Main transmit routine.                                                   */
4956/*                                                                          */
4957/* Returns:                                                                 */
4958/*   Nothing.                                                               */
4959/****************************************************************************/
4960void
4961bnx_start(struct ifnet *ifp)
4962{
4963	struct bnx_softc	*sc = ifp->if_softc;
4964	struct mbuf		*m_head = NULL;
4965	int			count = 0;
4966	u_int16_t		tx_prod, tx_chain_prod;
4967
4968	/* If there's no link or the transmit queue is empty then just exit. */
4969	if (!sc->bnx_link || IFQ_IS_EMPTY(&ifp->if_snd)) {
4970		DBPRINT(sc, BNX_INFO_SEND,
4971		    "%s(): No link or transmit queue empty.\n", __FUNCTION__);
4972		goto bnx_start_exit;
4973	}
4974
4975	/* prod points to the next free tx_bd. */
4976	tx_prod = sc->tx_prod;
4977	tx_chain_prod = TX_CHAIN_IDX(tx_prod);
4978
4979	DBPRINT(sc, BNX_INFO_SEND, "%s(): Start: tx_prod = 0x%04X, "
4980	    "tx_chain_prod = %04X, tx_prod_bseq = 0x%08X\n",
4981	    __FUNCTION__, tx_prod, tx_chain_prod, sc->tx_prod_bseq);
4982
4983	/*
4984	 * Keep adding entries while there is space in the ring.
4985	 */
4986	while (sc->used_tx_bd < sc->max_tx_bd) {
4987		/* Check for any frames to send. */
4988		IFQ_POLL(&ifp->if_snd, m_head);
4989		if (m_head == NULL)
4990			break;
4991
4992		/*
4993		 * Pack the data into the transmit ring. If we
4994		 * don't have room, set the OACTIVE flag to wait
4995		 * for the NIC to drain the chain.
4996		 */
4997		if (bnx_tx_encap(sc, m_head)) {
4998			ifp->if_flags |= IFF_OACTIVE;
4999			DBPRINT(sc, BNX_INFO_SEND, "TX chain is closed for "
5000			    "business! Total tx_bd used = %d\n",
5001			    sc->used_tx_bd);
5002			break;
5003		}
5004
5005		IFQ_DEQUEUE(&ifp->if_snd, m_head);
5006		count++;
5007
5008#if NBPFILTER > 0
5009		/* Send a copy of the frame to any BPF listeners. */
5010		if (ifp->if_bpf)
5011			bpf_mtap_ether(ifp->if_bpf, m_head, BPF_DIRECTION_OUT);
5012#endif
5013	}
5014
5015	if (count == 0) {
5016		/* no packets were dequeued */
5017		DBPRINT(sc, BNX_VERBOSE_SEND,
5018		    "%s(): No packets were dequeued\n", __FUNCTION__);
5019		goto bnx_start_exit;
5020	}
5021
5022	/* Update the driver's counters. */
5023	tx_chain_prod = TX_CHAIN_IDX(sc->tx_prod);
5024
5025	DBPRINT(sc, BNX_INFO_SEND, "%s(): End: tx_prod = 0x%04X, tx_chain_prod "
5026	    "= 0x%04X, tx_prod_bseq = 0x%08X\n", __FUNCTION__, tx_prod,
5027	    tx_chain_prod, sc->tx_prod_bseq);
5028
5029	/* Start the transmit. */
5030	REG_WR16(sc, MB_TX_CID_ADDR + BNX_L2CTX_TX_HOST_BIDX, sc->tx_prod);
5031	REG_WR(sc, MB_TX_CID_ADDR + BNX_L2CTX_TX_HOST_BSEQ, sc->tx_prod_bseq);
5032
5033	/* Set the tx timeout. */
5034	ifp->if_timer = BNX_TX_TIMEOUT;
5035
5036bnx_start_exit:
5037	return;
5038}
5039
5040/****************************************************************************/
5041/* Handles any IOCTL calls from the operating system.                       */
5042/*                                                                          */
5043/* Returns:                                                                 */
5044/*   0 for success, positive value for failure.                             */
5045/****************************************************************************/
5046int
5047bnx_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
5048{
5049	struct bnx_softc	*sc = ifp->if_softc;
5050	struct ifaddr		*ifa = (struct ifaddr *) data;
5051	struct ifreq		*ifr = (struct ifreq *) data;
5052	struct mii_data		*mii = &sc->bnx_mii;
5053	int			s, error = 0;
5054
5055	s = splnet();
5056
5057	switch (command) {
5058	case SIOCSIFADDR:
5059		ifp->if_flags |= IFF_UP;
5060		if (!(ifp->if_flags & IFF_RUNNING))
5061			bnx_init(sc);
5062#ifdef INET
5063		if (ifa->ifa_addr->sa_family == AF_INET)
5064			arp_ifinit(&sc->arpcom, ifa);
5065#endif /* INET */
5066		break;
5067
5068	case SIOCSIFFLAGS:
5069		if (ifp->if_flags & IFF_UP) {
5070			if (ifp->if_flags & IFF_RUNNING)
5071				error = ENETRESET;
5072			else
5073				bnx_init(sc);
5074		} else {
5075			if (ifp->if_flags & IFF_RUNNING)
5076				bnx_stop(sc);
5077		}
5078		break;
5079
5080	case SIOCSIFMEDIA:
5081	case SIOCGIFMEDIA:
5082		DBPRINT(sc, BNX_VERBOSE, "bnx_phy_flags = 0x%08X\n",
5083		    sc->bnx_phy_flags);
5084
5085		error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command);
5086		break;
5087
5088	default:
5089		error = ether_ioctl(ifp, &sc->arpcom, command, data);
5090	}
5091
5092	if (error == ENETRESET) {
5093		if (ifp->if_flags & IFF_RUNNING)
5094			bnx_iff(sc);
5095		error = 0;
5096	}
5097
5098	splx(s);
5099	return (error);
5100}
5101
5102/****************************************************************************/
5103/* Transmit timeout handler.                                                */
5104/*                                                                          */
5105/* Returns:                                                                 */
5106/*   Nothing.                                                               */
5107/****************************************************************************/
5108void
5109bnx_watchdog(struct ifnet *ifp)
5110{
5111	struct bnx_softc	*sc = ifp->if_softc;
5112
5113	DBRUN(BNX_WARN_SEND, bnx_dump_driver_state(sc);
5114	    bnx_dump_status_block(sc));
5115
5116	/*
5117	 * If we are in this routine because of pause frames, then
5118	 * don't reset the hardware.
5119	 */
5120	if (REG_RD(sc, BNX_EMAC_TX_STATUS) & BNX_EMAC_TX_STATUS_XOFFED)
5121		return;
5122
5123	printf("%s: Watchdog timeout occurred, resetting!\n",
5124	    ifp->if_xname);
5125
5126	/* DBRUN(BNX_FATAL, bnx_breakpoint(sc)); */
5127
5128	bnx_init(sc);
5129
5130	ifp->if_oerrors++;
5131}
5132
5133/*
5134 * Interrupt handler.
5135 */
5136/****************************************************************************/
5137/* Main interrupt entry point.  Verifies that the controller generated the  */
5138/* interrupt and then calls a separate routine for handle the various       */
5139/* interrupt causes (PHY, TX, RX).                                          */
5140/*                                                                          */
5141/* Returns:                                                                 */
5142/*   0 for success, positive value for failure.                             */
5143/****************************************************************************/
5144int
5145bnx_intr(void *xsc)
5146{
5147	struct bnx_softc	*sc;
5148	struct ifnet		*ifp;
5149	u_int32_t		status_attn_bits;
5150
5151	sc = xsc;
5152	if ((sc->bnx_flags & BNX_ACTIVE_FLAG) == 0)
5153		return (0);
5154
5155	ifp = &sc->arpcom.ac_if;
5156
5157	DBRUNIF(1, sc->interrupts_generated++);
5158
5159	bus_dmamap_sync(sc->bnx_dmatag, sc->status_map, 0,
5160	    sc->status_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
5161
5162	/*
5163	 * If the hardware status block index
5164	 * matches the last value read by the
5165	 * driver and we haven't asserted our
5166	 * interrupt then there's nothing to do.
5167	 */
5168	if ((sc->status_block->status_idx == sc->last_status_idx) &&
5169	    (REG_RD(sc, BNX_PCICFG_MISC_STATUS) &
5170	    BNX_PCICFG_MISC_STATUS_INTA_VALUE))
5171		return (0);
5172
5173	/* Ack the interrupt and stop others from occuring. */
5174	REG_WR(sc, BNX_PCICFG_INT_ACK_CMD,
5175	    BNX_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
5176	    BNX_PCICFG_INT_ACK_CMD_MASK_INT);
5177
5178	/* Keep processing data as long as there is work to do. */
5179	for (;;) {
5180		status_attn_bits = sc->status_block->status_attn_bits;
5181
5182		DBRUNIF(DB_RANDOMTRUE(bnx_debug_unexpected_attention),
5183		    printf("Simulating unexpected status attention bit set.");
5184		    status_attn_bits = status_attn_bits |
5185		    STATUS_ATTN_BITS_PARITY_ERROR);
5186
5187		/* Was it a link change interrupt? */
5188		if ((status_attn_bits & STATUS_ATTN_BITS_LINK_STATE) !=
5189		    (sc->status_block->status_attn_bits_ack &
5190		    STATUS_ATTN_BITS_LINK_STATE))
5191			bnx_phy_intr(sc);
5192
5193		/* If any other attention is asserted then the chip is toast. */
5194		if (((status_attn_bits & ~STATUS_ATTN_BITS_LINK_STATE) !=
5195		    (sc->status_block->status_attn_bits_ack &
5196		    ~STATUS_ATTN_BITS_LINK_STATE))) {
5197			DBRUN(1, sc->unexpected_attentions++);
5198
5199			BNX_PRINTF(sc, "Fatal attention detected: 0x%08X\n",
5200			    sc->status_block->status_attn_bits);
5201
5202			DBRUN(BNX_FATAL,
5203			    if (bnx_debug_unexpected_attention == 0)
5204			    bnx_breakpoint(sc));
5205
5206			bnx_init(sc);
5207			return (1);
5208		}
5209
5210		/* Check for any completed RX frames. */
5211		if (sc->status_block->status_rx_quick_consumer_index0 !=
5212		    sc->hw_rx_cons)
5213			bnx_rx_intr(sc);
5214
5215		/* Check for any completed TX frames. */
5216		if (sc->status_block->status_tx_quick_consumer_index0 !=
5217		    sc->hw_tx_cons)
5218			bnx_tx_intr(sc);
5219
5220		/* Save the status block index value for use during the
5221		 * next interrupt.
5222		 */
5223		sc->last_status_idx = sc->status_block->status_idx;
5224
5225		/* Prevent speculative reads from getting ahead of the
5226		 * status block.
5227		 */
5228		bus_space_barrier(sc->bnx_btag, sc->bnx_bhandle, 0, 0,
5229		    BUS_SPACE_BARRIER_READ);
5230
5231		/* If there's no work left then exit the isr. */
5232		if ((sc->status_block->status_rx_quick_consumer_index0 ==
5233		    sc->hw_rx_cons) &&
5234		    (sc->status_block->status_tx_quick_consumer_index0 ==
5235		    sc->hw_tx_cons))
5236			break;
5237	}
5238
5239	bus_dmamap_sync(sc->bnx_dmatag, sc->status_map, 0,
5240	    sc->status_map->dm_mapsize, BUS_DMASYNC_PREREAD);
5241
5242	/* Re-enable interrupts. */
5243	REG_WR(sc, BNX_PCICFG_INT_ACK_CMD,
5244	    BNX_PCICFG_INT_ACK_CMD_INDEX_VALID | sc->last_status_idx |
5245	    BNX_PCICFG_INT_ACK_CMD_MASK_INT);
5246	REG_WR(sc, BNX_PCICFG_INT_ACK_CMD,
5247	    BNX_PCICFG_INT_ACK_CMD_INDEX_VALID | sc->last_status_idx);
5248
5249	/* Handle any frames that arrived while handling the interrupt. */
5250	if (ifp->if_flags & IFF_RUNNING && !IFQ_IS_EMPTY(&ifp->if_snd))
5251		bnx_start(ifp);
5252
5253	return (1);
5254}
5255
5256/****************************************************************************/
5257/* Programs the various packet receive modes (broadcast and multicast).     */
5258/*                                                                          */
5259/* Returns:                                                                 */
5260/*   Nothing.                                                               */
5261/****************************************************************************/
5262void
5263bnx_iff(struct bnx_softc *sc)
5264{
5265	struct arpcom		*ac = &sc->arpcom;
5266	struct ifnet		*ifp = &ac->ac_if;
5267	struct ether_multi	*enm;
5268	struct ether_multistep	step;
5269	u_int32_t		hashes[NUM_MC_HASH_REGISTERS] = { 0, 0, 0, 0, 0, 0, 0, 0 };
5270	u_int32_t		rx_mode, sort_mode;
5271	int			h, i;
5272
5273	/* Initialize receive mode default settings. */
5274	rx_mode = sc->rx_mode & ~(BNX_EMAC_RX_MODE_PROMISCUOUS |
5275	    BNX_EMAC_RX_MODE_KEEP_VLAN_TAG);
5276	sort_mode = 1 | BNX_RPM_SORT_USER0_BC_EN;
5277	ifp->if_flags &= ~IFF_ALLMULTI;
5278
5279	/*
5280	 * ASF/IPMI/UMP firmware requires that VLAN tag stripping
5281	 * be enbled.
5282	 */
5283	if (!(ifp->if_capabilities & IFCAP_VLAN_HWTAGGING) &&
5284	    (!(sc->bnx_flags & BNX_MFW_ENABLE_FLAG)))
5285		rx_mode |= BNX_EMAC_RX_MODE_KEEP_VLAN_TAG;
5286
5287	/*
5288	 * Check for promiscuous, all multicast, or selected
5289	 * multicast address filtering.
5290	 */
5291	if (ifp->if_flags & IFF_PROMISC) {
5292		DBPRINT(sc, BNX_INFO, "Enabling promiscuous mode.\n");
5293
5294		ifp->if_flags |= IFF_ALLMULTI;
5295		/* Enable promiscuous mode. */
5296		rx_mode |= BNX_EMAC_RX_MODE_PROMISCUOUS;
5297		sort_mode |= BNX_RPM_SORT_USER0_PROM_EN;
5298	} else if (ac->ac_multirangecnt > 0) {
5299		DBPRINT(sc, BNX_INFO, "Enabling all multicast mode.\n");
5300
5301		ifp->if_flags |= IFF_ALLMULTI;
5302		/* Enable all multicast addresses. */
5303		for (i = 0; i < NUM_MC_HASH_REGISTERS; i++)
5304			REG_WR(sc, BNX_EMAC_MULTICAST_HASH0 + (i * 4),
5305			    0xffffffff);
5306		sort_mode |= BNX_RPM_SORT_USER0_MC_EN;
5307	} else {
5308		/* Accept one or more multicast(s). */
5309		DBPRINT(sc, BNX_INFO, "Enabling selective multicast mode.\n");
5310
5311		ETHER_FIRST_MULTI(step, ac, enm);
5312		while (enm != NULL) {
5313			h = ether_crc32_le(enm->enm_addrlo, ETHER_ADDR_LEN) &
5314			    0xFF;
5315
5316			hashes[(h & 0xE0) >> 5] |= 1 << (h & 0x1F);
5317
5318			ETHER_NEXT_MULTI(step, enm);
5319		}
5320
5321		for (i = 0; i < NUM_MC_HASH_REGISTERS; i++)
5322			REG_WR(sc, BNX_EMAC_MULTICAST_HASH0 + (i * 4),
5323			    hashes[i]);
5324
5325		sort_mode |= BNX_RPM_SORT_USER0_MC_HSH_EN;
5326	}
5327
5328	/* Only make changes if the recive mode has actually changed. */
5329	if (rx_mode != sc->rx_mode) {
5330		DBPRINT(sc, BNX_VERBOSE, "Enabling new receive mode: 0x%08X\n",
5331		    rx_mode);
5332
5333		sc->rx_mode = rx_mode;
5334		REG_WR(sc, BNX_EMAC_RX_MODE, rx_mode);
5335	}
5336
5337	/* Disable and clear the exisitng sort before enabling a new sort. */
5338	REG_WR(sc, BNX_RPM_SORT_USER0, 0x0);
5339	REG_WR(sc, BNX_RPM_SORT_USER0, sort_mode);
5340	REG_WR(sc, BNX_RPM_SORT_USER0, sort_mode | BNX_RPM_SORT_USER0_ENA);
5341}
5342
5343/****************************************************************************/
5344/* Called periodically to updates statistics from the controllers           */
5345/* statistics block.                                                        */
5346/*                                                                          */
5347/* Returns:                                                                 */
5348/*   Nothing.                                                               */
5349/****************************************************************************/
5350void
5351bnx_stats_update(struct bnx_softc *sc)
5352{
5353	struct ifnet		*ifp = &sc->arpcom.ac_if;
5354	struct statistics_block	*stats;
5355
5356	DBPRINT(sc, BNX_EXCESSIVE, "Entering %s()\n", __FUNCTION__);
5357
5358	stats = (struct statistics_block *)sc->stats_block;
5359
5360	/*
5361	 * Update the interface statistics from the
5362	 * hardware statistics.
5363	 */
5364	ifp->if_collisions = (u_long)stats->stat_EtherStatsCollisions;
5365
5366	ifp->if_ierrors = (u_long)stats->stat_EtherStatsUndersizePkts +
5367	    (u_long)stats->stat_EtherStatsOverrsizePkts +
5368	    (u_long)stats->stat_IfInMBUFDiscards +
5369	    (u_long)stats->stat_Dot3StatsAlignmentErrors +
5370	    (u_long)stats->stat_Dot3StatsFCSErrors;
5371
5372	ifp->if_oerrors = (u_long)
5373	    stats->stat_emac_tx_stat_dot3statsinternalmactransmiterrors +
5374	    (u_long)stats->stat_Dot3StatsExcessiveCollisions +
5375	    (u_long)stats->stat_Dot3StatsLateCollisions;
5376
5377	/*
5378	 * Certain controllers don't report
5379	 * carrier sense errors correctly.
5380	 * See errata E11_5708CA0_1165.
5381	 */
5382	if (!(BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5706) &&
5383	    !(BNX_CHIP_ID(sc) == BNX_CHIP_ID_5708_A0))
5384		ifp->if_oerrors += (u_long) stats->stat_Dot3StatsCarrierSenseErrors;
5385
5386	/*
5387	 * Update the sysctl statistics from the
5388	 * hardware statistics.
5389	 */
5390	sc->stat_IfHCInOctets = ((u_int64_t)stats->stat_IfHCInOctets_hi << 32) +
5391	    (u_int64_t) stats->stat_IfHCInOctets_lo;
5392
5393	sc->stat_IfHCInBadOctets =
5394	    ((u_int64_t) stats->stat_IfHCInBadOctets_hi << 32) +
5395	    (u_int64_t) stats->stat_IfHCInBadOctets_lo;
5396
5397	sc->stat_IfHCOutOctets =
5398	    ((u_int64_t) stats->stat_IfHCOutOctets_hi << 32) +
5399	    (u_int64_t) stats->stat_IfHCOutOctets_lo;
5400
5401	sc->stat_IfHCOutBadOctets =
5402	    ((u_int64_t) stats->stat_IfHCOutBadOctets_hi << 32) +
5403	    (u_int64_t) stats->stat_IfHCOutBadOctets_lo;
5404
5405	sc->stat_IfHCInUcastPkts =
5406	    ((u_int64_t) stats->stat_IfHCInUcastPkts_hi << 32) +
5407	    (u_int64_t) stats->stat_IfHCInUcastPkts_lo;
5408
5409	sc->stat_IfHCInMulticastPkts =
5410	    ((u_int64_t) stats->stat_IfHCInMulticastPkts_hi << 32) +
5411	    (u_int64_t) stats->stat_IfHCInMulticastPkts_lo;
5412
5413	sc->stat_IfHCInBroadcastPkts =
5414	    ((u_int64_t) stats->stat_IfHCInBroadcastPkts_hi << 32) +
5415	    (u_int64_t) stats->stat_IfHCInBroadcastPkts_lo;
5416
5417	sc->stat_IfHCOutUcastPkts =
5418	   ((u_int64_t) stats->stat_IfHCOutUcastPkts_hi << 32) +
5419	    (u_int64_t) stats->stat_IfHCOutUcastPkts_lo;
5420
5421	sc->stat_IfHCOutMulticastPkts =
5422	    ((u_int64_t) stats->stat_IfHCOutMulticastPkts_hi << 32) +
5423	    (u_int64_t) stats->stat_IfHCOutMulticastPkts_lo;
5424
5425	sc->stat_IfHCOutBroadcastPkts =
5426	    ((u_int64_t) stats->stat_IfHCOutBroadcastPkts_hi << 32) +
5427	    (u_int64_t) stats->stat_IfHCOutBroadcastPkts_lo;
5428
5429	sc->stat_emac_tx_stat_dot3statsinternalmactransmiterrors =
5430	    stats->stat_emac_tx_stat_dot3statsinternalmactransmiterrors;
5431
5432	sc->stat_Dot3StatsCarrierSenseErrors =
5433	    stats->stat_Dot3StatsCarrierSenseErrors;
5434
5435	sc->stat_Dot3StatsFCSErrors = stats->stat_Dot3StatsFCSErrors;
5436
5437	sc->stat_Dot3StatsAlignmentErrors =
5438	    stats->stat_Dot3StatsAlignmentErrors;
5439
5440	sc->stat_Dot3StatsSingleCollisionFrames =
5441	    stats->stat_Dot3StatsSingleCollisionFrames;
5442
5443	sc->stat_Dot3StatsMultipleCollisionFrames =
5444	    stats->stat_Dot3StatsMultipleCollisionFrames;
5445
5446	sc->stat_Dot3StatsDeferredTransmissions =
5447	    stats->stat_Dot3StatsDeferredTransmissions;
5448
5449	sc->stat_Dot3StatsExcessiveCollisions =
5450	    stats->stat_Dot3StatsExcessiveCollisions;
5451
5452	sc->stat_Dot3StatsLateCollisions = stats->stat_Dot3StatsLateCollisions;
5453
5454	sc->stat_EtherStatsCollisions = stats->stat_EtherStatsCollisions;
5455
5456	sc->stat_EtherStatsFragments = stats->stat_EtherStatsFragments;
5457
5458	sc->stat_EtherStatsJabbers = stats->stat_EtherStatsJabbers;
5459
5460	sc->stat_EtherStatsUndersizePkts = stats->stat_EtherStatsUndersizePkts;
5461
5462	sc->stat_EtherStatsOverrsizePkts = stats->stat_EtherStatsOverrsizePkts;
5463
5464	sc->stat_EtherStatsPktsRx64Octets =
5465	    stats->stat_EtherStatsPktsRx64Octets;
5466
5467	sc->stat_EtherStatsPktsRx65Octetsto127Octets =
5468	    stats->stat_EtherStatsPktsRx65Octetsto127Octets;
5469
5470	sc->stat_EtherStatsPktsRx128Octetsto255Octets =
5471	    stats->stat_EtherStatsPktsRx128Octetsto255Octets;
5472
5473	sc->stat_EtherStatsPktsRx256Octetsto511Octets =
5474	    stats->stat_EtherStatsPktsRx256Octetsto511Octets;
5475
5476	sc->stat_EtherStatsPktsRx512Octetsto1023Octets =
5477	    stats->stat_EtherStatsPktsRx512Octetsto1023Octets;
5478
5479	sc->stat_EtherStatsPktsRx1024Octetsto1522Octets =
5480	    stats->stat_EtherStatsPktsRx1024Octetsto1522Octets;
5481
5482	sc->stat_EtherStatsPktsRx1523Octetsto9022Octets =
5483	    stats->stat_EtherStatsPktsRx1523Octetsto9022Octets;
5484
5485	sc->stat_EtherStatsPktsTx64Octets =
5486	    stats->stat_EtherStatsPktsTx64Octets;
5487
5488	sc->stat_EtherStatsPktsTx65Octetsto127Octets =
5489	    stats->stat_EtherStatsPktsTx65Octetsto127Octets;
5490
5491	sc->stat_EtherStatsPktsTx128Octetsto255Octets =
5492	    stats->stat_EtherStatsPktsTx128Octetsto255Octets;
5493
5494	sc->stat_EtherStatsPktsTx256Octetsto511Octets =
5495	    stats->stat_EtherStatsPktsTx256Octetsto511Octets;
5496
5497	sc->stat_EtherStatsPktsTx512Octetsto1023Octets =
5498	    stats->stat_EtherStatsPktsTx512Octetsto1023Octets;
5499
5500	sc->stat_EtherStatsPktsTx1024Octetsto1522Octets =
5501	    stats->stat_EtherStatsPktsTx1024Octetsto1522Octets;
5502
5503	sc->stat_EtherStatsPktsTx1523Octetsto9022Octets =
5504	    stats->stat_EtherStatsPktsTx1523Octetsto9022Octets;
5505
5506	sc->stat_XonPauseFramesReceived = stats->stat_XonPauseFramesReceived;
5507
5508	sc->stat_XoffPauseFramesReceived = stats->stat_XoffPauseFramesReceived;
5509
5510	sc->stat_OutXonSent = stats->stat_OutXonSent;
5511
5512	sc->stat_OutXoffSent = stats->stat_OutXoffSent;
5513
5514	sc->stat_FlowControlDone = stats->stat_FlowControlDone;
5515
5516	sc->stat_MacControlFramesReceived =
5517	    stats->stat_MacControlFramesReceived;
5518
5519	sc->stat_XoffStateEntered = stats->stat_XoffStateEntered;
5520
5521	sc->stat_IfInFramesL2FilterDiscards =
5522	    stats->stat_IfInFramesL2FilterDiscards;
5523
5524	sc->stat_IfInRuleCheckerDiscards = stats->stat_IfInRuleCheckerDiscards;
5525
5526	sc->stat_IfInFTQDiscards = stats->stat_IfInFTQDiscards;
5527
5528	sc->stat_IfInMBUFDiscards = stats->stat_IfInMBUFDiscards;
5529
5530	sc->stat_IfInRuleCheckerP4Hit = stats->stat_IfInRuleCheckerP4Hit;
5531
5532	sc->stat_CatchupInRuleCheckerDiscards =
5533	    stats->stat_CatchupInRuleCheckerDiscards;
5534
5535	sc->stat_CatchupInFTQDiscards = stats->stat_CatchupInFTQDiscards;
5536
5537	sc->stat_CatchupInMBUFDiscards = stats->stat_CatchupInMBUFDiscards;
5538
5539	sc->stat_CatchupInRuleCheckerP4Hit =
5540	    stats->stat_CatchupInRuleCheckerP4Hit;
5541
5542	DBPRINT(sc, BNX_EXCESSIVE, "Exiting %s()\n", __FUNCTION__);
5543}
5544
5545void
5546bnx_tick(void *xsc)
5547{
5548	struct bnx_softc	*sc = xsc;
5549	struct ifnet		*ifp = &sc->arpcom.ac_if;
5550	struct mii_data		*mii = NULL;
5551	u_int32_t		msg;
5552
5553	/* Tell the firmware that the driver is still running. */
5554#ifdef BNX_DEBUG
5555	msg = (u_int32_t)BNX_DRV_MSG_DATA_PULSE_CODE_ALWAYS_ALIVE;
5556#else
5557	msg = (u_int32_t)++sc->bnx_fw_drv_pulse_wr_seq;
5558#endif
5559	REG_WR_IND(sc, sc->bnx_shmem_base + BNX_DRV_PULSE_MB, msg);
5560
5561	/* Update the statistics from the hardware statistics block. */
5562	bnx_stats_update(sc);
5563
5564	/* Schedule the next tick. */
5565	timeout_add_sec(&sc->bnx_timeout, 1);
5566
5567	/* If link is up already up then we're done. */
5568	if (sc->bnx_link)
5569		goto bnx_tick_exit;
5570
5571	mii = &sc->bnx_mii;
5572	mii_tick(mii);
5573
5574	/* Check if the link has come up. */
5575	if (!sc->bnx_link && mii->mii_media_status & IFM_ACTIVE &&
5576	    IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) {
5577		sc->bnx_link++;
5578		/* Now that link is up, handle any outstanding TX traffic. */
5579		if (!IFQ_IS_EMPTY(&ifp->if_snd))
5580			bnx_start(ifp);
5581	}
5582
5583bnx_tick_exit:
5584	return;
5585}
5586
5587/****************************************************************************/
5588/* BNX Debug Routines                                                       */
5589/****************************************************************************/
5590#ifdef BNX_DEBUG
5591
5592/****************************************************************************/
5593/* Prints out information about an mbuf.                                    */
5594/*                                                                          */
5595/* Returns:                                                                 */
5596/*   Nothing.                                                               */
5597/****************************************************************************/
5598void
5599bnx_dump_mbuf(struct bnx_softc *sc, struct mbuf *m)
5600{
5601	struct mbuf		*mp = m;
5602
5603	if (m == NULL) {
5604		/* Index out of range. */
5605		printf("mbuf ptr is null!\n");
5606		return;
5607	}
5608
5609	while (mp) {
5610		printf("mbuf: vaddr = %p, m_len = %d, m_flags = ",
5611		    mp, mp->m_len);
5612
5613		if (mp->m_flags & M_EXT)
5614			printf("M_EXT ");
5615		if (mp->m_flags & M_PKTHDR)
5616			printf("M_PKTHDR ");
5617		printf("\n");
5618
5619		if (mp->m_flags & M_EXT)
5620			printf("- m_ext: vaddr = %p, ext_size = 0x%04X\n",
5621			    mp, mp->m_ext.ext_size);
5622
5623		mp = mp->m_next;
5624	}
5625}
5626
5627/****************************************************************************/
5628/* Prints out the mbufs in the TX mbuf chain.                               */
5629/*                                                                          */
5630/* Returns:                                                                 */
5631/*   Nothing.                                                               */
5632/****************************************************************************/
5633void
5634bnx_dump_tx_mbuf_chain(struct bnx_softc *sc, int chain_prod, int count)
5635{
5636	struct mbuf		*m;
5637	int			i;
5638
5639	BNX_PRINTF(sc,
5640	    "----------------------------"
5641	    "  tx mbuf data  "
5642	    "----------------------------\n");
5643
5644	for (i = 0; i < count; i++) {
5645	 	m = sc->tx_mbuf_ptr[chain_prod];
5646		BNX_PRINTF(sc, "txmbuf[%d]\n", chain_prod);
5647		bnx_dump_mbuf(sc, m);
5648		chain_prod = TX_CHAIN_IDX(NEXT_TX_BD(chain_prod));
5649	}
5650
5651	BNX_PRINTF(sc,
5652	    "--------------------------------------------"
5653	    "----------------------------\n");
5654}
5655
5656/*
5657 * This routine prints the RX mbuf chain.
5658 */
5659void
5660bnx_dump_rx_mbuf_chain(struct bnx_softc *sc, int chain_prod, int count)
5661{
5662	struct mbuf		*m;
5663	int			i;
5664
5665	BNX_PRINTF(sc,
5666	    "----------------------------"
5667	    "  rx mbuf data  "
5668	    "----------------------------\n");
5669
5670	for (i = 0; i < count; i++) {
5671	 	m = sc->rx_mbuf_ptr[chain_prod];
5672		BNX_PRINTF(sc, "rxmbuf[0x%04X]\n", chain_prod);
5673		bnx_dump_mbuf(sc, m);
5674		chain_prod = RX_CHAIN_IDX(NEXT_RX_BD(chain_prod));
5675	}
5676
5677
5678	BNX_PRINTF(sc,
5679	    "--------------------------------------------"
5680	    "----------------------------\n");
5681}
5682
5683void
5684bnx_dump_txbd(struct bnx_softc *sc, int idx, struct tx_bd *txbd)
5685{
5686	if (idx > MAX_TX_BD)
5687		/* Index out of range. */
5688		BNX_PRINTF(sc, "tx_bd[0x%04X]: Invalid tx_bd index!\n", idx);
5689	else if ((idx & USABLE_TX_BD_PER_PAGE) == USABLE_TX_BD_PER_PAGE)
5690		/* TX Chain page pointer. */
5691		BNX_PRINTF(sc, "tx_bd[0x%04X]: haddr = 0x%08X:%08X, chain "
5692		    "page pointer\n", idx, txbd->tx_bd_haddr_hi,
5693		    txbd->tx_bd_haddr_lo);
5694	else
5695		/* Normal tx_bd entry. */
5696		BNX_PRINTF(sc, "tx_bd[0x%04X]: haddr = 0x%08X:%08X, nbytes = "
5697		    "0x%08X, vlan tag = 0x%4X, flags = 0x%08X\n", idx,
5698		    txbd->tx_bd_haddr_hi, txbd->tx_bd_haddr_lo,
5699		    txbd->tx_bd_mss_nbytes, txbd->tx_bd_vlan_tag,
5700		    txbd->tx_bd_flags);
5701}
5702
5703void
5704bnx_dump_rxbd(struct bnx_softc *sc, int idx, struct rx_bd *rxbd)
5705{
5706	if (idx > MAX_RX_BD)
5707		/* Index out of range. */
5708		BNX_PRINTF(sc, "rx_bd[0x%04X]: Invalid rx_bd index!\n", idx);
5709	else if ((idx & USABLE_RX_BD_PER_PAGE) == USABLE_RX_BD_PER_PAGE)
5710		/* TX Chain page pointer. */
5711		BNX_PRINTF(sc, "rx_bd[0x%04X]: haddr = 0x%08X:%08X, chain page "
5712		    "pointer\n", idx, rxbd->rx_bd_haddr_hi,
5713		    rxbd->rx_bd_haddr_lo);
5714	else
5715		/* Normal tx_bd entry. */
5716		BNX_PRINTF(sc, "rx_bd[0x%04X]: haddr = 0x%08X:%08X, nbytes = "
5717		    "0x%08X, flags = 0x%08X\n", idx,
5718			rxbd->rx_bd_haddr_hi, rxbd->rx_bd_haddr_lo,
5719			rxbd->rx_bd_len, rxbd->rx_bd_flags);
5720}
5721
5722void
5723bnx_dump_l2fhdr(struct bnx_softc *sc, int idx, struct l2_fhdr *l2fhdr)
5724{
5725	BNX_PRINTF(sc, "l2_fhdr[0x%04X]: status = 0x%08X, "
5726	    "pkt_len = 0x%04X, vlan = 0x%04x, ip_xsum = 0x%04X, "
5727	    "tcp_udp_xsum = 0x%04X\n", idx,
5728	    l2fhdr->l2_fhdr_status, l2fhdr->l2_fhdr_pkt_len,
5729	    l2fhdr->l2_fhdr_vlan_tag, l2fhdr->l2_fhdr_ip_xsum,
5730	    l2fhdr->l2_fhdr_tcp_udp_xsum);
5731}
5732
5733/*
5734 * This routine prints the TX chain.
5735 */
5736void
5737bnx_dump_tx_chain(struct bnx_softc *sc, int tx_prod, int count)
5738{
5739	struct tx_bd		*txbd;
5740	int			i;
5741
5742	/* First some info about the tx_bd chain structure. */
5743	BNX_PRINTF(sc,
5744	    "----------------------------"
5745	    "  tx_bd  chain  "
5746	    "----------------------------\n");
5747
5748	BNX_PRINTF(sc,
5749	    "page size      = 0x%08X, tx chain pages        = 0x%08X\n",
5750	    (u_int32_t)BCM_PAGE_SIZE, (u_int32_t) TX_PAGES);
5751
5752	BNX_PRINTF(sc,
5753	    "tx_bd per page = 0x%08X, usable tx_bd per page = 0x%08X\n",
5754	    (u_int32_t)TOTAL_TX_BD_PER_PAGE, (u_int32_t)USABLE_TX_BD_PER_PAGE);
5755
5756	BNX_PRINTF(sc, "total tx_bd    = 0x%08X\n", (u_int32_t)TOTAL_TX_BD);
5757
5758	BNX_PRINTF(sc, ""
5759	    "-----------------------------"
5760	    "   tx_bd data   "
5761	    "-----------------------------\n");
5762
5763	/* Now print out the tx_bd's themselves. */
5764	for (i = 0; i < count; i++) {
5765	 	txbd = &sc->tx_bd_chain[TX_PAGE(tx_prod)][TX_IDX(tx_prod)];
5766		bnx_dump_txbd(sc, tx_prod, txbd);
5767		tx_prod = TX_CHAIN_IDX(NEXT_TX_BD(tx_prod));
5768	}
5769
5770	BNX_PRINTF(sc,
5771	    "-----------------------------"
5772	    "--------------"
5773	    "-----------------------------\n");
5774}
5775
5776/*
5777 * This routine prints the RX chain.
5778 */
5779void
5780bnx_dump_rx_chain(struct bnx_softc *sc, int rx_prod, int count)
5781{
5782	struct rx_bd		*rxbd;
5783	int			i;
5784
5785	/* First some info about the tx_bd chain structure. */
5786	BNX_PRINTF(sc,
5787	    "----------------------------"
5788	    "  rx_bd  chain  "
5789	    "----------------------------\n");
5790
5791	BNX_PRINTF(sc, "----- RX_BD Chain -----\n");
5792
5793	BNX_PRINTF(sc,
5794	    "page size      = 0x%08X, rx chain pages        = 0x%08X\n",
5795	    (u_int32_t)BCM_PAGE_SIZE, (u_int32_t)RX_PAGES);
5796
5797	BNX_PRINTF(sc,
5798	    "rx_bd per page = 0x%08X, usable rx_bd per page = 0x%08X\n",
5799	    (u_int32_t)TOTAL_RX_BD_PER_PAGE, (u_int32_t)USABLE_RX_BD_PER_PAGE);
5800
5801	BNX_PRINTF(sc, "total rx_bd    = 0x%08X\n", (u_int32_t)TOTAL_RX_BD);
5802
5803	BNX_PRINTF(sc,
5804	    "----------------------------"
5805	    "   rx_bd data   "
5806	    "----------------------------\n");
5807
5808	/* Now print out the rx_bd's themselves. */
5809	for (i = 0; i < count; i++) {
5810		rxbd = &sc->rx_bd_chain[RX_PAGE(rx_prod)][RX_IDX(rx_prod)];
5811		bnx_dump_rxbd(sc, rx_prod, rxbd);
5812		rx_prod = RX_CHAIN_IDX(NEXT_RX_BD(rx_prod));
5813	}
5814
5815	BNX_PRINTF(sc,
5816	    "----------------------------"
5817	    "--------------"
5818	    "----------------------------\n");
5819}
5820
5821/*
5822 * This routine prints the status block.
5823 */
5824void
5825bnx_dump_status_block(struct bnx_softc *sc)
5826{
5827	struct status_block	*sblk;
5828
5829	sblk = sc->status_block;
5830
5831   	BNX_PRINTF(sc, "----------------------------- Status Block "
5832	    "-----------------------------\n");
5833
5834	BNX_PRINTF(sc,
5835	    "attn_bits  = 0x%08X, attn_bits_ack = 0x%08X, index = 0x%04X\n",
5836	    sblk->status_attn_bits, sblk->status_attn_bits_ack,
5837	    sblk->status_idx);
5838
5839	BNX_PRINTF(sc, "rx_cons0   = 0x%08X, tx_cons0      = 0x%08X\n",
5840	    sblk->status_rx_quick_consumer_index0,
5841	    sblk->status_tx_quick_consumer_index0);
5842
5843	BNX_PRINTF(sc, "status_idx = 0x%04X\n", sblk->status_idx);
5844
5845	/* Theses indices are not used for normal L2 drivers. */
5846	if (sblk->status_rx_quick_consumer_index1 ||
5847		sblk->status_tx_quick_consumer_index1)
5848		BNX_PRINTF(sc, "rx_cons1  = 0x%08X, tx_cons1      = 0x%08X\n",
5849		    sblk->status_rx_quick_consumer_index1,
5850		    sblk->status_tx_quick_consumer_index1);
5851
5852	if (sblk->status_rx_quick_consumer_index2 ||
5853		sblk->status_tx_quick_consumer_index2)
5854		BNX_PRINTF(sc, "rx_cons2  = 0x%08X, tx_cons2      = 0x%08X\n",
5855		    sblk->status_rx_quick_consumer_index2,
5856		    sblk->status_tx_quick_consumer_index2);
5857
5858	if (sblk->status_rx_quick_consumer_index3 ||
5859		sblk->status_tx_quick_consumer_index3)
5860		BNX_PRINTF(sc, "rx_cons3  = 0x%08X, tx_cons3      = 0x%08X\n",
5861		    sblk->status_rx_quick_consumer_index3,
5862		    sblk->status_tx_quick_consumer_index3);
5863
5864	if (sblk->status_rx_quick_consumer_index4 ||
5865		sblk->status_rx_quick_consumer_index5)
5866		BNX_PRINTF(sc, "rx_cons4  = 0x%08X, rx_cons5      = 0x%08X\n",
5867		    sblk->status_rx_quick_consumer_index4,
5868		    sblk->status_rx_quick_consumer_index5);
5869
5870	if (sblk->status_rx_quick_consumer_index6 ||
5871		sblk->status_rx_quick_consumer_index7)
5872		BNX_PRINTF(sc, "rx_cons6  = 0x%08X, rx_cons7      = 0x%08X\n",
5873		    sblk->status_rx_quick_consumer_index6,
5874		    sblk->status_rx_quick_consumer_index7);
5875
5876	if (sblk->status_rx_quick_consumer_index8 ||
5877		sblk->status_rx_quick_consumer_index9)
5878		BNX_PRINTF(sc, "rx_cons8  = 0x%08X, rx_cons9      = 0x%08X\n",
5879		    sblk->status_rx_quick_consumer_index8,
5880		    sblk->status_rx_quick_consumer_index9);
5881
5882	if (sblk->status_rx_quick_consumer_index10 ||
5883		sblk->status_rx_quick_consumer_index11)
5884		BNX_PRINTF(sc, "rx_cons10 = 0x%08X, rx_cons11     = 0x%08X\n",
5885		    sblk->status_rx_quick_consumer_index10,
5886		    sblk->status_rx_quick_consumer_index11);
5887
5888	if (sblk->status_rx_quick_consumer_index12 ||
5889		sblk->status_rx_quick_consumer_index13)
5890		BNX_PRINTF(sc, "rx_cons12 = 0x%08X, rx_cons13     = 0x%08X\n",
5891		    sblk->status_rx_quick_consumer_index12,
5892		    sblk->status_rx_quick_consumer_index13);
5893
5894	if (sblk->status_rx_quick_consumer_index14 ||
5895		sblk->status_rx_quick_consumer_index15)
5896		BNX_PRINTF(sc, "rx_cons14 = 0x%08X, rx_cons15     = 0x%08X\n",
5897		    sblk->status_rx_quick_consumer_index14,
5898		    sblk->status_rx_quick_consumer_index15);
5899
5900	if (sblk->status_completion_producer_index ||
5901		sblk->status_cmd_consumer_index)
5902		BNX_PRINTF(sc, "com_prod  = 0x%08X, cmd_cons      = 0x%08X\n",
5903		    sblk->status_completion_producer_index,
5904		    sblk->status_cmd_consumer_index);
5905
5906	BNX_PRINTF(sc, "-------------------------------------------"
5907	    "-----------------------------\n");
5908}
5909
5910/*
5911 * This routine prints the statistics block.
5912 */
5913void
5914bnx_dump_stats_block(struct bnx_softc *sc)
5915{
5916	struct statistics_block	*sblk;
5917
5918	sblk = sc->stats_block;
5919
5920	BNX_PRINTF(sc, ""
5921	    "-----------------------------"
5922	    " Stats  Block "
5923	    "-----------------------------\n");
5924
5925	BNX_PRINTF(sc, "IfHcInOctets         = 0x%08X:%08X, "
5926	    "IfHcInBadOctets      = 0x%08X:%08X\n",
5927	    sblk->stat_IfHCInOctets_hi, sblk->stat_IfHCInOctets_lo,
5928	    sblk->stat_IfHCInBadOctets_hi, sblk->stat_IfHCInBadOctets_lo);
5929
5930	BNX_PRINTF(sc, "IfHcOutOctets        = 0x%08X:%08X, "
5931	    "IfHcOutBadOctets     = 0x%08X:%08X\n",
5932	    sblk->stat_IfHCOutOctets_hi, sblk->stat_IfHCOutOctets_lo,
5933	    sblk->stat_IfHCOutBadOctets_hi, sblk->stat_IfHCOutBadOctets_lo);
5934
5935	BNX_PRINTF(sc, "IfHcInUcastPkts      = 0x%08X:%08X, "
5936	    "IfHcInMulticastPkts  = 0x%08X:%08X\n",
5937	    sblk->stat_IfHCInUcastPkts_hi, sblk->stat_IfHCInUcastPkts_lo,
5938	    sblk->stat_IfHCInMulticastPkts_hi,
5939	    sblk->stat_IfHCInMulticastPkts_lo);
5940
5941	BNX_PRINTF(sc, "IfHcInBroadcastPkts  = 0x%08X:%08X, "
5942	    "IfHcOutUcastPkts     = 0x%08X:%08X\n",
5943	    sblk->stat_IfHCInBroadcastPkts_hi,
5944	    sblk->stat_IfHCInBroadcastPkts_lo,
5945	    sblk->stat_IfHCOutUcastPkts_hi,
5946	    sblk->stat_IfHCOutUcastPkts_lo);
5947
5948	BNX_PRINTF(sc, "IfHcOutMulticastPkts = 0x%08X:%08X, "
5949	    "IfHcOutBroadcastPkts = 0x%08X:%08X\n",
5950	    sblk->stat_IfHCOutMulticastPkts_hi,
5951	    sblk->stat_IfHCOutMulticastPkts_lo,
5952	    sblk->stat_IfHCOutBroadcastPkts_hi,
5953	    sblk->stat_IfHCOutBroadcastPkts_lo);
5954
5955	if (sblk->stat_emac_tx_stat_dot3statsinternalmactransmiterrors)
5956		BNX_PRINTF(sc, "0x%08X : "
5957		    "emac_tx_stat_dot3statsinternalmactransmiterrors\n",
5958		    sblk->stat_emac_tx_stat_dot3statsinternalmactransmiterrors);
5959
5960	if (sblk->stat_Dot3StatsCarrierSenseErrors)
5961		BNX_PRINTF(sc, "0x%08X : Dot3StatsCarrierSenseErrors\n",
5962		    sblk->stat_Dot3StatsCarrierSenseErrors);
5963
5964	if (sblk->stat_Dot3StatsFCSErrors)
5965		BNX_PRINTF(sc, "0x%08X : Dot3StatsFCSErrors\n",
5966		    sblk->stat_Dot3StatsFCSErrors);
5967
5968	if (sblk->stat_Dot3StatsAlignmentErrors)
5969		BNX_PRINTF(sc, "0x%08X : Dot3StatsAlignmentErrors\n",
5970		    sblk->stat_Dot3StatsAlignmentErrors);
5971
5972	if (sblk->stat_Dot3StatsSingleCollisionFrames)
5973		BNX_PRINTF(sc, "0x%08X : Dot3StatsSingleCollisionFrames\n",
5974		    sblk->stat_Dot3StatsSingleCollisionFrames);
5975
5976	if (sblk->stat_Dot3StatsMultipleCollisionFrames)
5977		BNX_PRINTF(sc, "0x%08X : Dot3StatsMultipleCollisionFrames\n",
5978		    sblk->stat_Dot3StatsMultipleCollisionFrames);
5979
5980	if (sblk->stat_Dot3StatsDeferredTransmissions)
5981		BNX_PRINTF(sc, "0x%08X : Dot3StatsDeferredTransmissions\n",
5982		    sblk->stat_Dot3StatsDeferredTransmissions);
5983
5984	if (sblk->stat_Dot3StatsExcessiveCollisions)
5985		BNX_PRINTF(sc, "0x%08X : Dot3StatsExcessiveCollisions\n",
5986		    sblk->stat_Dot3StatsExcessiveCollisions);
5987
5988	if (sblk->stat_Dot3StatsLateCollisions)
5989		BNX_PRINTF(sc, "0x%08X : Dot3StatsLateCollisions\n",
5990		    sblk->stat_Dot3StatsLateCollisions);
5991
5992	if (sblk->stat_EtherStatsCollisions)
5993		BNX_PRINTF(sc, "0x%08X : EtherStatsCollisions\n",
5994		    sblk->stat_EtherStatsCollisions);
5995
5996	if (sblk->stat_EtherStatsFragments)
5997		BNX_PRINTF(sc, "0x%08X : EtherStatsFragments\n",
5998		    sblk->stat_EtherStatsFragments);
5999
6000	if (sblk->stat_EtherStatsJabbers)
6001		BNX_PRINTF(sc, "0x%08X : EtherStatsJabbers\n",
6002		    sblk->stat_EtherStatsJabbers);
6003
6004	if (sblk->stat_EtherStatsUndersizePkts)
6005		BNX_PRINTF(sc, "0x%08X : EtherStatsUndersizePkts\n",
6006		    sblk->stat_EtherStatsUndersizePkts);
6007
6008	if (sblk->stat_EtherStatsOverrsizePkts)
6009		BNX_PRINTF(sc, "0x%08X : EtherStatsOverrsizePkts\n",
6010		    sblk->stat_EtherStatsOverrsizePkts);
6011
6012	if (sblk->stat_EtherStatsPktsRx64Octets)
6013		BNX_PRINTF(sc, "0x%08X : EtherStatsPktsRx64Octets\n",
6014		    sblk->stat_EtherStatsPktsRx64Octets);
6015
6016	if (sblk->stat_EtherStatsPktsRx65Octetsto127Octets)
6017		BNX_PRINTF(sc, "0x%08X : EtherStatsPktsRx65Octetsto127Octets\n",
6018		    sblk->stat_EtherStatsPktsRx65Octetsto127Octets);
6019
6020	if (sblk->stat_EtherStatsPktsRx128Octetsto255Octets)
6021		BNX_PRINTF(sc, "0x%08X : "
6022		    "EtherStatsPktsRx128Octetsto255Octets\n",
6023		    sblk->stat_EtherStatsPktsRx128Octetsto255Octets);
6024
6025	if (sblk->stat_EtherStatsPktsRx256Octetsto511Octets)
6026		BNX_PRINTF(sc, "0x%08X : "
6027		    "EtherStatsPktsRx256Octetsto511Octets\n",
6028		    sblk->stat_EtherStatsPktsRx256Octetsto511Octets);
6029
6030	if (sblk->stat_EtherStatsPktsRx512Octetsto1023Octets)
6031		BNX_PRINTF(sc, "0x%08X : "
6032		    "EtherStatsPktsRx512Octetsto1023Octets\n",
6033		    sblk->stat_EtherStatsPktsRx512Octetsto1023Octets);
6034
6035	if (sblk->stat_EtherStatsPktsRx1024Octetsto1522Octets)
6036		BNX_PRINTF(sc, "0x%08X : "
6037		    "EtherStatsPktsRx1024Octetsto1522Octets\n",
6038		sblk->stat_EtherStatsPktsRx1024Octetsto1522Octets);
6039
6040	if (sblk->stat_EtherStatsPktsRx1523Octetsto9022Octets)
6041		BNX_PRINTF(sc, "0x%08X : "
6042		    "EtherStatsPktsRx1523Octetsto9022Octets\n",
6043		    sblk->stat_EtherStatsPktsRx1523Octetsto9022Octets);
6044
6045	if (sblk->stat_EtherStatsPktsTx64Octets)
6046		BNX_PRINTF(sc, "0x%08X : EtherStatsPktsTx64Octets\n",
6047		    sblk->stat_EtherStatsPktsTx64Octets);
6048
6049	if (sblk->stat_EtherStatsPktsTx65Octetsto127Octets)
6050		BNX_PRINTF(sc, "0x%08X : EtherStatsPktsTx65Octetsto127Octets\n",
6051		    sblk->stat_EtherStatsPktsTx65Octetsto127Octets);
6052
6053	if (sblk->stat_EtherStatsPktsTx128Octetsto255Octets)
6054		BNX_PRINTF(sc, "0x%08X : "
6055		    "EtherStatsPktsTx128Octetsto255Octets\n",
6056		    sblk->stat_EtherStatsPktsTx128Octetsto255Octets);
6057
6058	if (sblk->stat_EtherStatsPktsTx256Octetsto511Octets)
6059		BNX_PRINTF(sc, "0x%08X : "
6060		    "EtherStatsPktsTx256Octetsto511Octets\n",
6061		    sblk->stat_EtherStatsPktsTx256Octetsto511Octets);
6062
6063	if (sblk->stat_EtherStatsPktsTx512Octetsto1023Octets)
6064		BNX_PRINTF(sc, "0x%08X : "
6065		    "EtherStatsPktsTx512Octetsto1023Octets\n",
6066		    sblk->stat_EtherStatsPktsTx512Octetsto1023Octets);
6067
6068	if (sblk->stat_EtherStatsPktsTx1024Octetsto1522Octets)
6069		BNX_PRINTF(sc, "0x%08X : "
6070		    "EtherStatsPktsTx1024Octetsto1522Octets\n",
6071		    sblk->stat_EtherStatsPktsTx1024Octetsto1522Octets);
6072
6073	if (sblk->stat_EtherStatsPktsTx1523Octetsto9022Octets)
6074		BNX_PRINTF(sc, "0x%08X : "
6075		    "EtherStatsPktsTx1523Octetsto9022Octets\n",
6076		    sblk->stat_EtherStatsPktsTx1523Octetsto9022Octets);
6077
6078	if (sblk->stat_XonPauseFramesReceived)
6079		BNX_PRINTF(sc, "0x%08X : XonPauseFramesReceived\n",
6080		    sblk->stat_XonPauseFramesReceived);
6081
6082	if (sblk->stat_XoffPauseFramesReceived)
6083		BNX_PRINTF(sc, "0x%08X : XoffPauseFramesReceived\n",
6084		    sblk->stat_XoffPauseFramesReceived);
6085
6086	if (sblk->stat_OutXonSent)
6087		BNX_PRINTF(sc, "0x%08X : OutXonSent\n",
6088		    sblk->stat_OutXonSent);
6089
6090	if (sblk->stat_OutXoffSent)
6091		BNX_PRINTF(sc, "0x%08X : OutXoffSent\n",
6092		    sblk->stat_OutXoffSent);
6093
6094	if (sblk->stat_FlowControlDone)
6095		BNX_PRINTF(sc, "0x%08X : FlowControlDone\n",
6096		    sblk->stat_FlowControlDone);
6097
6098	if (sblk->stat_MacControlFramesReceived)
6099		BNX_PRINTF(sc, "0x%08X : MacControlFramesReceived\n",
6100		    sblk->stat_MacControlFramesReceived);
6101
6102	if (sblk->stat_XoffStateEntered)
6103		BNX_PRINTF(sc, "0x%08X : XoffStateEntered\n",
6104		    sblk->stat_XoffStateEntered);
6105
6106	if (sblk->stat_IfInFramesL2FilterDiscards)
6107		BNX_PRINTF(sc, "0x%08X : IfInFramesL2FilterDiscards\n",
6108		    sblk->stat_IfInFramesL2FilterDiscards);
6109
6110	if (sblk->stat_IfInRuleCheckerDiscards)
6111		BNX_PRINTF(sc, "0x%08X : IfInRuleCheckerDiscards\n",
6112		    sblk->stat_IfInRuleCheckerDiscards);
6113
6114	if (sblk->stat_IfInFTQDiscards)
6115		BNX_PRINTF(sc, "0x%08X : IfInFTQDiscards\n",
6116		    sblk->stat_IfInFTQDiscards);
6117
6118	if (sblk->stat_IfInMBUFDiscards)
6119		BNX_PRINTF(sc, "0x%08X : IfInMBUFDiscards\n",
6120		    sblk->stat_IfInMBUFDiscards);
6121
6122	if (sblk->stat_IfInRuleCheckerP4Hit)
6123		BNX_PRINTF(sc, "0x%08X : IfInRuleCheckerP4Hit\n",
6124		    sblk->stat_IfInRuleCheckerP4Hit);
6125
6126	if (sblk->stat_CatchupInRuleCheckerDiscards)
6127		BNX_PRINTF(sc, "0x%08X : CatchupInRuleCheckerDiscards\n",
6128		    sblk->stat_CatchupInRuleCheckerDiscards);
6129
6130	if (sblk->stat_CatchupInFTQDiscards)
6131		BNX_PRINTF(sc, "0x%08X : CatchupInFTQDiscards\n",
6132		    sblk->stat_CatchupInFTQDiscards);
6133
6134	if (sblk->stat_CatchupInMBUFDiscards)
6135		BNX_PRINTF(sc, "0x%08X : CatchupInMBUFDiscards\n",
6136		    sblk->stat_CatchupInMBUFDiscards);
6137
6138	if (sblk->stat_CatchupInRuleCheckerP4Hit)
6139		BNX_PRINTF(sc, "0x%08X : CatchupInRuleCheckerP4Hit\n",
6140		    sblk->stat_CatchupInRuleCheckerP4Hit);
6141
6142	BNX_PRINTF(sc,
6143	    "-----------------------------"
6144	    "--------------"
6145	    "-----------------------------\n");
6146}
6147
6148void
6149bnx_dump_driver_state(struct bnx_softc *sc)
6150{
6151	BNX_PRINTF(sc,
6152	    "-----------------------------"
6153	    " Driver State "
6154	    "-----------------------------\n");
6155
6156	BNX_PRINTF(sc, "%p - (sc) driver softc structure virtual "
6157	    "address\n", sc);
6158
6159	BNX_PRINTF(sc, "%p - (sc->status_block) status block virtual address\n",
6160	    sc->status_block);
6161
6162	BNX_PRINTF(sc, "%p - (sc->stats_block) statistics block virtual "
6163	    "address\n", sc->stats_block);
6164
6165	BNX_PRINTF(sc, "%p - (sc->tx_bd_chain) tx_bd chain virtual "
6166	    "adddress\n", sc->tx_bd_chain);
6167
6168	BNX_PRINTF(sc, "%p - (sc->rx_bd_chain) rx_bd chain virtual address\n",
6169	    sc->rx_bd_chain);
6170
6171	BNX_PRINTF(sc, "%p - (sc->tx_mbuf_ptr) tx mbuf chain virtual address\n",
6172	    sc->tx_mbuf_ptr);
6173
6174	BNX_PRINTF(sc, "%p - (sc->rx_mbuf_ptr) rx mbuf chain virtual address\n",
6175	    sc->rx_mbuf_ptr);
6176
6177	BNX_PRINTF(sc,
6178	    "         0x%08X - (sc->interrupts_generated) h/w intrs\n",
6179	    sc->interrupts_generated);
6180
6181	BNX_PRINTF(sc,
6182	    "         0x%08X - (sc->rx_interrupts) rx interrupts handled\n",
6183	    sc->rx_interrupts);
6184
6185	BNX_PRINTF(sc,
6186	    "         0x%08X - (sc->tx_interrupts) tx interrupts handled\n",
6187	    sc->tx_interrupts);
6188
6189	BNX_PRINTF(sc,
6190	    "         0x%08X - (sc->last_status_idx) status block index\n",
6191	    sc->last_status_idx);
6192
6193	BNX_PRINTF(sc, "         0x%08X - (sc->tx_prod) tx producer index\n",
6194	    sc->tx_prod);
6195
6196	BNX_PRINTF(sc, "         0x%08X - (sc->tx_cons) tx consumer index\n",
6197	    sc->tx_cons);
6198
6199	BNX_PRINTF(sc,
6200	    "         0x%08X - (sc->tx_prod_bseq) tx producer bseq index\n",
6201	    sc->tx_prod_bseq);
6202
6203	BNX_PRINTF(sc,
6204	    "         0x%08X - (sc->tx_mbuf_alloc) tx mbufs allocated\n",
6205	    sc->tx_mbuf_alloc);
6206
6207	BNX_PRINTF(sc,
6208	    "         0x%08X - (sc->used_tx_bd) used tx_bd's\n",
6209	    sc->used_tx_bd);
6210
6211	BNX_PRINTF(sc,
6212	    "         0x%08X/%08X - (sc->tx_hi_watermark) tx hi watermark\n",
6213	    sc->tx_hi_watermark, sc->max_tx_bd);
6214
6215	BNX_PRINTF(sc, "         0x%08X - (sc->rx_prod) rx producer index\n",
6216	    sc->rx_prod);
6217
6218	BNX_PRINTF(sc, "         0x%08X - (sc->rx_cons) rx consumer index\n",
6219	    sc->rx_cons);
6220
6221	BNX_PRINTF(sc,
6222	    "         0x%08X - (sc->rx_prod_bseq) rx producer bseq index\n",
6223	    sc->rx_prod_bseq);
6224
6225	BNX_PRINTF(sc,
6226	    "         0x%08X - (sc->rx_mbuf_alloc) rx mbufs allocated\n",
6227	    sc->rx_mbuf_alloc);
6228
6229	BNX_PRINTF(sc, "         0x%08X - (sc->free_rx_bd) free rx_bd's\n",
6230	    sc->free_rx_bd);
6231
6232	BNX_PRINTF(sc,
6233	    "0x%08X/%08X - (sc->rx_low_watermark) rx low watermark\n",
6234	    sc->rx_low_watermark, sc->max_rx_bd);
6235
6236	BNX_PRINTF(sc,
6237	    "         0x%08X - (sc->mbuf_alloc_failed) "
6238	    "mbuf alloc failures\n",
6239	    sc->mbuf_alloc_failed);
6240
6241	BNX_PRINTF(sc,
6242	    "         0x%0X - (sc->mbuf_sim_allocated_failed) "
6243	    "simulated mbuf alloc failures\n",
6244	    sc->mbuf_sim_alloc_failed);
6245
6246	BNX_PRINTF(sc, "-------------------------------------------"
6247	    "-----------------------------\n");
6248}
6249
6250void
6251bnx_dump_hw_state(struct bnx_softc *sc)
6252{
6253	u_int32_t		val1;
6254	int			i;
6255
6256	BNX_PRINTF(sc,
6257	    "----------------------------"
6258	    " Hardware State "
6259	    "----------------------------\n");
6260
6261	BNX_PRINTF(sc, "0x%08X : bootcode version\n", sc->bnx_fw_ver);
6262
6263	val1 = REG_RD(sc, BNX_MISC_ENABLE_STATUS_BITS);
6264	BNX_PRINTF(sc, "0x%08X : (0x%04X) misc_enable_status_bits\n",
6265	    val1, BNX_MISC_ENABLE_STATUS_BITS);
6266
6267	val1 = REG_RD(sc, BNX_DMA_STATUS);
6268	BNX_PRINTF(sc, "0x%08X : (0x%04X) dma_status\n", val1, BNX_DMA_STATUS);
6269
6270	val1 = REG_RD(sc, BNX_CTX_STATUS);
6271	BNX_PRINTF(sc, "0x%08X : (0x%04X) ctx_status\n", val1, BNX_CTX_STATUS);
6272
6273	val1 = REG_RD(sc, BNX_EMAC_STATUS);
6274	BNX_PRINTF(sc, "0x%08X : (0x%04X) emac_status\n", val1,
6275	    BNX_EMAC_STATUS);
6276
6277	val1 = REG_RD(sc, BNX_RPM_STATUS);
6278	BNX_PRINTF(sc, "0x%08X : (0x%04X) rpm_status\n", val1, BNX_RPM_STATUS);
6279
6280	val1 = REG_RD(sc, BNX_TBDR_STATUS);
6281	BNX_PRINTF(sc, "0x%08X : (0x%04X) tbdr_status\n", val1,
6282	    BNX_TBDR_STATUS);
6283
6284	val1 = REG_RD(sc, BNX_TDMA_STATUS);
6285	BNX_PRINTF(sc, "0x%08X : (0x%04X) tdma_status\n", val1,
6286	    BNX_TDMA_STATUS);
6287
6288	val1 = REG_RD(sc, BNX_HC_STATUS);
6289	BNX_PRINTF(sc, "0x%08X : (0x%04X) hc_status\n", val1, BNX_HC_STATUS);
6290
6291	BNX_PRINTF(sc,
6292	    "----------------------------"
6293	    "----------------"
6294	    "----------------------------\n");
6295
6296	BNX_PRINTF(sc,
6297	    "----------------------------"
6298	    " Register  Dump "
6299	    "----------------------------\n");
6300
6301	for (i = 0x400; i < 0x8000; i += 0x10)
6302		BNX_PRINTF(sc, "0x%04X: 0x%08X 0x%08X 0x%08X 0x%08X\n",
6303		    i, REG_RD(sc, i), REG_RD(sc, i + 0x4),
6304		    REG_RD(sc, i + 0x8), REG_RD(sc, i + 0xC));
6305
6306	BNX_PRINTF(sc,
6307	    "----------------------------"
6308	    "----------------"
6309	    "----------------------------\n");
6310}
6311
6312void
6313bnx_breakpoint(struct bnx_softc *sc)
6314{
6315	/* Unreachable code to shut the compiler up about unused functions. */
6316	if (0) {
6317   		bnx_dump_txbd(sc, 0, NULL);
6318		bnx_dump_rxbd(sc, 0, NULL);
6319		bnx_dump_tx_mbuf_chain(sc, 0, USABLE_TX_BD);
6320		bnx_dump_rx_mbuf_chain(sc, 0, sc->max_rx_bd);
6321		bnx_dump_l2fhdr(sc, 0, NULL);
6322		bnx_dump_tx_chain(sc, 0, USABLE_TX_BD);
6323		bnx_dump_rx_chain(sc, 0, sc->max_rx_bd);
6324		bnx_dump_status_block(sc);
6325		bnx_dump_stats_block(sc);
6326		bnx_dump_driver_state(sc);
6327		bnx_dump_hw_state(sc);
6328	}
6329
6330	bnx_dump_driver_state(sc);
6331	/* Print the important status block fields. */
6332	bnx_dump_status_block(sc);
6333
6334#if 0
6335	/* Call the debugger. */
6336	breakpoint();
6337#endif
6338
6339	return;
6340}
6341#endif
6342