1/*	$OpenBSD: if_bnx.c,v 1.133 2023/11/10 15:51:20 bluhm Exp $	*/
2
3/*-
4 * Copyright (c) 2006 Broadcom Corporation
5 *	David Christensen <davidch@broadcom.com>.  All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 *    notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 *    notice, this list of conditions and the following disclaimer in the
15 *    documentation and/or other materials provided with the distribution.
16 * 3. Neither the name of Broadcom Corporation nor the name of its contributors
17 *    may be used to endorse or promote products derived from this software
18 *    without specific prior written consent.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS'
21 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
30 * THE POSSIBILITY OF SUCH DAMAGE.
31 */
32
33/*
34 * The following controllers are supported by this driver:
35 *   BCM5706C A2, A3
36 *   BCM5706S A2, A3
37 *   BCM5708C B1, B2
38 *   BCM5708S B1, B2
39 *   BCM5709C A1, C0
40 *   BCM5709S A1, C0
41 *   BCM5716  C0
42 *
43 * The following controllers are not supported by this driver:
44 *   BCM5706C A0, A1
45 *   BCM5706S A0, A1
46 *   BCM5708C A0, B0
47 *   BCM5708S A0, B0
48 *   BCM5709C A0  B0, B1, B2 (pre-production)
49 *   BCM5709S A0, B0, B1, B2 (pre-production)
50 */
51
52#include <dev/pci/if_bnxreg.h>
53
54struct bnx_firmware {
55	char *filename;
56	struct bnx_firmware_header *fw;
57
58	u_int32_t *bnx_COM_FwText;
59	u_int32_t *bnx_COM_FwData;
60	u_int32_t *bnx_COM_FwRodata;
61	u_int32_t *bnx_COM_FwBss;
62	u_int32_t *bnx_COM_FwSbss;
63
64	u_int32_t *bnx_RXP_FwText;
65	u_int32_t *bnx_RXP_FwData;
66	u_int32_t *bnx_RXP_FwRodata;
67	u_int32_t *bnx_RXP_FwBss;
68	u_int32_t *bnx_RXP_FwSbss;
69
70	u_int32_t *bnx_TPAT_FwText;
71	u_int32_t *bnx_TPAT_FwData;
72	u_int32_t *bnx_TPAT_FwRodata;
73	u_int32_t *bnx_TPAT_FwBss;
74	u_int32_t *bnx_TPAT_FwSbss;
75
76	u_int32_t *bnx_TXP_FwText;
77	u_int32_t *bnx_TXP_FwData;
78	u_int32_t *bnx_TXP_FwRodata;
79	u_int32_t *bnx_TXP_FwBss;
80	u_int32_t *bnx_TXP_FwSbss;
81};
82
83struct bnx_firmware bnx_firmwares[] = {
84	{ "bnx-b06",		NULL },
85	{ "bnx-b09",		NULL }
86};
87#define	BNX_FW_B06	0
88#define	BNX_FW_B09	1
89
90struct bnx_rv2p {
91	char *filename;
92	struct bnx_rv2p_header *fw;
93
94	u_int32_t *bnx_rv2p_proc1;
95	u_int32_t *bnx_rv2p_proc2;
96};
97
98struct bnx_rv2p bnx_rv2ps[] = {
99	{ "bnx-rv2p",		NULL },
100	{ "bnx-xi-rv2p",	NULL },
101	{ "bnx-xi90-rv2p",	NULL }
102};
103#define BNX_RV2P	0
104#define BNX_XI_RV2P	1
105#define BNX_XI90_RV2P	2
106
107void	nswaph(u_int32_t *p, int wcount);
108
109/****************************************************************************/
110/* BNX Driver Version                                                       */
111/****************************************************************************/
112
113#define BNX_DRIVER_VERSION	"v0.9.6"
114
115/****************************************************************************/
116/* BNX Debug Options                                                        */
117/****************************************************************************/
118#ifdef BNX_DEBUG
119	u_int32_t bnx_debug = BNX_WARN;
120
121	/*          0 = Never              */
122	/*          1 = 1 in 2,147,483,648 */
123	/*        256 = 1 in     8,388,608 */
124	/*       2048 = 1 in     1,048,576 */
125	/*      65536 = 1 in        32,768 */
126	/*    1048576 = 1 in         2,048 */
127	/*  268435456 =	1 in             8 */
128	/*  536870912 = 1 in             4 */
129	/* 1073741824 = 1 in             2 */
130
131	/* Controls how often the l2_fhdr frame error check will fail. */
132	int bnx_debug_l2fhdr_status_check = 0;
133
134	/* Controls how often the unexpected attention check will fail. */
135	int bnx_debug_unexpected_attention = 0;
136
137	/* Controls how often to simulate an mbuf allocation failure. */
138	int bnx_debug_mbuf_allocation_failure = 0;
139
140	/* Controls how often to simulate a DMA mapping failure. */
141	int bnx_debug_dma_map_addr_failure = 0;
142
143	/* Controls how often to simulate a bootcode failure. */
144	int bnx_debug_bootcode_running_failure = 0;
145#endif
146
147/****************************************************************************/
148/* PCI Device ID Table                                                      */
149/*                                                                          */
150/* Used by bnx_probe() to identify the devices supported by this driver.    */
151/****************************************************************************/
152const struct pci_matchid bnx_devices[] = {
153	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5706 },
154	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5706S },
155	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5708 },
156	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5708S },
157	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5709 },
158	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5709S },
159	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5716 },
160	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5716S }
161};
162
163/****************************************************************************/
164/* Supported Flash NVRAM device data.                                       */
165/****************************************************************************/
166static struct flash_spec flash_table[] =
167{
168#define BUFFERED_FLAGS		(BNX_NV_BUFFERED | BNX_NV_TRANSLATE)
169#define NONBUFFERED_FLAGS	(BNX_NV_WREN)
170
171	/* Slow EEPROM */
172	{0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
173	 BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
174	 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
175	 "EEPROM - slow"},
176	/* Expansion entry 0001 */
177	{0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
178	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
179	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
180	 "Entry 0001"},
181	/* Saifun SA25F010 (non-buffered flash) */
182	/* strap, cfg1, & write1 need updates */
183	{0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
184	 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
185	 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
186	 "Non-buffered flash (128kB)"},
187	/* Saifun SA25F020 (non-buffered flash) */
188	/* strap, cfg1, & write1 need updates */
189	{0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
190	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
191	 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
192	 "Non-buffered flash (256kB)"},
193	/* Expansion entry 0100 */
194	{0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
195	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
196	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
197	 "Entry 0100"},
198	/* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
199	{0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
200	 NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
201	 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
202	 "Entry 0101: ST M45PE10 (128kB non-buffered)"},
203	/* Entry 0110: ST M45PE20 (non-buffered flash)*/
204	{0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
205	 NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
206	 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
207	 "Entry 0110: ST M45PE20 (256kB non-buffered)"},
208	/* Saifun SA25F005 (non-buffered flash) */
209	/* strap, cfg1, & write1 need updates */
210	{0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
211	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
212	 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
213	 "Non-buffered flash (64kB)"},
214	/* Fast EEPROM */
215	{0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
216	 BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
217	 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
218	 "EEPROM - fast"},
219	/* Expansion entry 1001 */
220	{0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
221	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
222	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
223	 "Entry 1001"},
224	/* Expansion entry 1010 */
225	{0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
226	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
227	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
228	 "Entry 1010"},
229	/* ATMEL AT45DB011B (buffered flash) */
230	{0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
231	 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
232	 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
233	 "Buffered flash (128kB)"},
234	/* Expansion entry 1100 */
235	{0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
236	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
237	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
238	 "Entry 1100"},
239	/* Expansion entry 1101 */
240	{0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
241	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
242	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
243	 "Entry 1101"},
244	/* Ateml Expansion entry 1110 */
245	{0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
246	 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
247	 BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
248	 "Entry 1110 (Atmel)"},
249	/* ATMEL AT45DB021B (buffered flash) */
250	{0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
251	 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
252	 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
253	 "Buffered flash (256kB)"},
254};
255
256/*
257 * The BCM5709 controllers transparently handle the
258 * differences between Atmel 264 byte pages and all
259 * flash devices which use 256 byte pages, so no
260 * logical-to-physical mapping is required in the
261 * driver.
262 */
263static struct flash_spec flash_5709 = {
264	.flags		= BNX_NV_BUFFERED,
265	.page_bits	= BCM5709_FLASH_PAGE_BITS,
266	.page_size	= BCM5709_FLASH_PAGE_SIZE,
267	.addr_mask	= BCM5709_FLASH_BYTE_ADDR_MASK,
268	.total_size	= BUFFERED_FLASH_TOTAL_SIZE * 2,
269	.name		= "5709 buffered flash (256kB)",
270};
271
272/****************************************************************************/
273/* OpenBSD device entry points.                                             */
274/****************************************************************************/
275int	bnx_probe(struct device *, void *, void *);
276void	bnx_attach(struct device *, struct device *, void *);
277void	bnx_attachhook(struct device *);
278int	bnx_read_firmware(struct bnx_softc *sc, int);
279int	bnx_read_rv2p(struct bnx_softc *sc, int);
280#if 0
281void	bnx_detach(void *);
282#endif
283
284/****************************************************************************/
285/* BNX Debug Data Structure Dump Routines                                   */
286/****************************************************************************/
287#ifdef BNX_DEBUG
288void	bnx_dump_mbuf(struct bnx_softc *, struct mbuf *);
289void	bnx_dump_tx_mbuf_chain(struct bnx_softc *, int, int);
290void	bnx_dump_rx_mbuf_chain(struct bnx_softc *, int, int);
291void	bnx_dump_txbd(struct bnx_softc *, int, struct tx_bd *);
292void	bnx_dump_rxbd(struct bnx_softc *, int, struct rx_bd *);
293void	bnx_dump_l2fhdr(struct bnx_softc *, int, struct l2_fhdr *);
294void	bnx_dump_tx_chain(struct bnx_softc *, int, int);
295void	bnx_dump_rx_chain(struct bnx_softc *, int, int);
296void	bnx_dump_status_block(struct bnx_softc *);
297void	bnx_dump_stats_block(struct bnx_softc *);
298void	bnx_dump_driver_state(struct bnx_softc *);
299void	bnx_dump_hw_state(struct bnx_softc *);
300void	bnx_breakpoint(struct bnx_softc *);
301#endif
302
303/****************************************************************************/
304/* BNX Register/Memory Access Routines                                      */
305/****************************************************************************/
306u_int32_t	bnx_reg_rd_ind(struct bnx_softc *, u_int32_t);
307void	bnx_reg_wr_ind(struct bnx_softc *, u_int32_t, u_int32_t);
308void	bnx_ctx_wr(struct bnx_softc *, u_int32_t, u_int32_t, u_int32_t);
309int	bnx_miibus_read_reg(struct device *, int, int);
310void	bnx_miibus_write_reg(struct device *, int, int, int);
311void	bnx_miibus_statchg(struct device *);
312
313/****************************************************************************/
314/* BNX NVRAM Access Routines                                                */
315/****************************************************************************/
316int	bnx_acquire_nvram_lock(struct bnx_softc *);
317int	bnx_release_nvram_lock(struct bnx_softc *);
318void	bnx_enable_nvram_access(struct bnx_softc *);
319void	bnx_disable_nvram_access(struct bnx_softc *);
320int	bnx_nvram_read_dword(struct bnx_softc *, u_int32_t, u_int8_t *,
321	    u_int32_t);
322int	bnx_init_nvram(struct bnx_softc *);
323int	bnx_nvram_read(struct bnx_softc *, u_int32_t, u_int8_t *, int);
324int	bnx_nvram_test(struct bnx_softc *);
325#ifdef BNX_NVRAM_WRITE_SUPPORT
326int	bnx_enable_nvram_write(struct bnx_softc *);
327void	bnx_disable_nvram_write(struct bnx_softc *);
328int	bnx_nvram_erase_page(struct bnx_softc *, u_int32_t);
329int	bnx_nvram_write_dword(struct bnx_softc *, u_int32_t, u_int8_t *,
330	    u_int32_t);
331int	bnx_nvram_write(struct bnx_softc *, u_int32_t, u_int8_t *, int);
332#endif
333
334/****************************************************************************/
335/*                                                                          */
336/****************************************************************************/
337void	bnx_get_media(struct bnx_softc *);
338void	bnx_init_media(struct bnx_softc *);
339int	bnx_dma_alloc(struct bnx_softc *);
340void	bnx_dma_free(struct bnx_softc *);
341void	bnx_release_resources(struct bnx_softc *);
342
343/****************************************************************************/
344/* BNX Firmware Synchronization and Load                                    */
345/****************************************************************************/
346int	bnx_fw_sync(struct bnx_softc *, u_int32_t);
347void	bnx_load_rv2p_fw(struct bnx_softc *, u_int32_t *, u_int32_t,
348	    u_int32_t);
349void	bnx_load_cpu_fw(struct bnx_softc *, struct cpu_reg *,
350	    struct fw_info *);
351void	bnx_init_cpus(struct bnx_softc *);
352
353void	bnx_stop(struct bnx_softc *);
354int	bnx_reset(struct bnx_softc *, u_int32_t);
355int	bnx_chipinit(struct bnx_softc *);
356int	bnx_blockinit(struct bnx_softc *);
357int	bnx_get_buf(struct bnx_softc *, u_int16_t *, u_int16_t *, u_int32_t *);
358
359int	bnx_init_tx_chain(struct bnx_softc *);
360void	bnx_init_tx_context(struct bnx_softc *);
361int	bnx_fill_rx_chain(struct bnx_softc *);
362void	bnx_init_rx_context(struct bnx_softc *);
363int	bnx_init_rx_chain(struct bnx_softc *);
364void	bnx_free_rx_chain(struct bnx_softc *);
365void	bnx_free_tx_chain(struct bnx_softc *);
366void	bnx_rxrefill(void *);
367
368int	bnx_tx_encap(struct bnx_softc *, struct mbuf *, int *);
369void	bnx_start(struct ifqueue *);
370int	bnx_ioctl(struct ifnet *, u_long, caddr_t);
371void	bnx_watchdog(struct ifnet *);
372int	bnx_ifmedia_upd(struct ifnet *);
373void	bnx_ifmedia_sts(struct ifnet *, struct ifmediareq *);
374void	bnx_init(void *);
375void	bnx_mgmt_init(struct bnx_softc *sc);
376
377void	bnx_init_context(struct bnx_softc *);
378void	bnx_get_mac_addr(struct bnx_softc *);
379void	bnx_set_mac_addr(struct bnx_softc *);
380void	bnx_phy_intr(struct bnx_softc *);
381void	bnx_rx_intr(struct bnx_softc *);
382void	bnx_tx_intr(struct bnx_softc *);
383void	bnx_disable_intr(struct bnx_softc *);
384void	bnx_enable_intr(struct bnx_softc *);
385
386int	bnx_intr(void *);
387void	bnx_iff(struct bnx_softc *);
388void	bnx_stats_update(struct bnx_softc *);
389void	bnx_tick(void *);
390
391/****************************************************************************/
392/* OpenBSD device dispatch table.                                           */
393/****************************************************************************/
394const struct cfattach bnx_ca = {
395	sizeof(struct bnx_softc), bnx_probe, bnx_attach
396};
397
398struct cfdriver bnx_cd = {
399	NULL, "bnx", DV_IFNET
400};
401
402/****************************************************************************/
403/* Device probe function.                                                   */
404/*                                                                          */
405/* Compares the device to the driver's list of supported devices and        */
406/* reports back to the OS whether this is the right driver for the device.  */
407/*                                                                          */
408/* Returns:                                                                 */
409/*   BUS_PROBE_DEFAULT on success, positive value on failure.               */
410/****************************************************************************/
411int
412bnx_probe(struct device *parent, void *match, void *aux)
413{
414	return (pci_matchbyid((struct pci_attach_args *)aux, bnx_devices,
415	    nitems(bnx_devices)));
416}
417
418void
419nswaph(u_int32_t *p, int wcount)
420{
421	for (; wcount; wcount -=4) {
422		*p = ntohl(*p);
423		p++;
424	}
425}
426
427int
428bnx_read_firmware(struct bnx_softc *sc, int idx)
429{
430	struct bnx_firmware *bfw = &bnx_firmwares[idx];
431	struct bnx_firmware_header *hdr = bfw->fw;
432	u_char *p, *q;
433	size_t size;
434	int error;
435
436	if (hdr != NULL)
437		return (0);
438
439	if ((error = loadfirmware(bfw->filename, &p, &size)) != 0)
440		return (error);
441
442	if (size < sizeof(struct bnx_firmware_header)) {
443		free(p, M_DEVBUF, size);
444		return (EINVAL);
445	}
446
447	hdr = (struct bnx_firmware_header *)p;
448
449	hdr->bnx_COM_FwReleaseMajor = ntohl(hdr->bnx_COM_FwReleaseMajor);
450	hdr->bnx_COM_FwReleaseMinor = ntohl(hdr->bnx_COM_FwReleaseMinor);
451	hdr->bnx_COM_FwReleaseFix = ntohl(hdr->bnx_COM_FwReleaseFix);
452	hdr->bnx_COM_FwStartAddr = ntohl(hdr->bnx_COM_FwStartAddr);
453	hdr->bnx_COM_FwTextAddr = ntohl(hdr->bnx_COM_FwTextAddr);
454	hdr->bnx_COM_FwTextLen = ntohl(hdr->bnx_COM_FwTextLen);
455	hdr->bnx_COM_FwDataAddr = ntohl(hdr->bnx_COM_FwDataAddr);
456	hdr->bnx_COM_FwDataLen = ntohl(hdr->bnx_COM_FwDataLen);
457	hdr->bnx_COM_FwRodataAddr = ntohl(hdr->bnx_COM_FwRodataAddr);
458	hdr->bnx_COM_FwRodataLen = ntohl(hdr->bnx_COM_FwRodataLen);
459	hdr->bnx_COM_FwBssAddr = ntohl(hdr->bnx_COM_FwBssAddr);
460	hdr->bnx_COM_FwBssLen = ntohl(hdr->bnx_COM_FwBssLen);
461	hdr->bnx_COM_FwSbssAddr = ntohl(hdr->bnx_COM_FwSbssAddr);
462	hdr->bnx_COM_FwSbssLen = ntohl(hdr->bnx_COM_FwSbssLen);
463
464	hdr->bnx_RXP_FwReleaseMajor = ntohl(hdr->bnx_RXP_FwReleaseMajor);
465	hdr->bnx_RXP_FwReleaseMinor = ntohl(hdr->bnx_RXP_FwReleaseMinor);
466	hdr->bnx_RXP_FwReleaseFix = ntohl(hdr->bnx_RXP_FwReleaseFix);
467	hdr->bnx_RXP_FwStartAddr = ntohl(hdr->bnx_RXP_FwStartAddr);
468	hdr->bnx_RXP_FwTextAddr = ntohl(hdr->bnx_RXP_FwTextAddr);
469	hdr->bnx_RXP_FwTextLen = ntohl(hdr->bnx_RXP_FwTextLen);
470	hdr->bnx_RXP_FwDataAddr = ntohl(hdr->bnx_RXP_FwDataAddr);
471	hdr->bnx_RXP_FwDataLen = ntohl(hdr->bnx_RXP_FwDataLen);
472	hdr->bnx_RXP_FwRodataAddr = ntohl(hdr->bnx_RXP_FwRodataAddr);
473	hdr->bnx_RXP_FwRodataLen = ntohl(hdr->bnx_RXP_FwRodataLen);
474	hdr->bnx_RXP_FwBssAddr = ntohl(hdr->bnx_RXP_FwBssAddr);
475	hdr->bnx_RXP_FwBssLen = ntohl(hdr->bnx_RXP_FwBssLen);
476	hdr->bnx_RXP_FwSbssAddr = ntohl(hdr->bnx_RXP_FwSbssAddr);
477	hdr->bnx_RXP_FwSbssLen = ntohl(hdr->bnx_RXP_FwSbssLen);
478
479	hdr->bnx_TPAT_FwReleaseMajor = ntohl(hdr->bnx_TPAT_FwReleaseMajor);
480	hdr->bnx_TPAT_FwReleaseMinor = ntohl(hdr->bnx_TPAT_FwReleaseMinor);
481	hdr->bnx_TPAT_FwReleaseFix = ntohl(hdr->bnx_TPAT_FwReleaseFix);
482	hdr->bnx_TPAT_FwStartAddr = ntohl(hdr->bnx_TPAT_FwStartAddr);
483	hdr->bnx_TPAT_FwTextAddr = ntohl(hdr->bnx_TPAT_FwTextAddr);
484	hdr->bnx_TPAT_FwTextLen = ntohl(hdr->bnx_TPAT_FwTextLen);
485	hdr->bnx_TPAT_FwDataAddr = ntohl(hdr->bnx_TPAT_FwDataAddr);
486	hdr->bnx_TPAT_FwDataLen = ntohl(hdr->bnx_TPAT_FwDataLen);
487	hdr->bnx_TPAT_FwRodataAddr = ntohl(hdr->bnx_TPAT_FwRodataAddr);
488	hdr->bnx_TPAT_FwRodataLen = ntohl(hdr->bnx_TPAT_FwRodataLen);
489	hdr->bnx_TPAT_FwBssAddr = ntohl(hdr->bnx_TPAT_FwBssAddr);
490	hdr->bnx_TPAT_FwBssLen = ntohl(hdr->bnx_TPAT_FwBssLen);
491	hdr->bnx_TPAT_FwSbssAddr = ntohl(hdr->bnx_TPAT_FwSbssAddr);
492	hdr->bnx_TPAT_FwSbssLen = ntohl(hdr->bnx_TPAT_FwSbssLen);
493
494	hdr->bnx_TXP_FwReleaseMajor = ntohl(hdr->bnx_TXP_FwReleaseMajor);
495	hdr->bnx_TXP_FwReleaseMinor = ntohl(hdr->bnx_TXP_FwReleaseMinor);
496	hdr->bnx_TXP_FwReleaseFix = ntohl(hdr->bnx_TXP_FwReleaseFix);
497	hdr->bnx_TXP_FwStartAddr = ntohl(hdr->bnx_TXP_FwStartAddr);
498	hdr->bnx_TXP_FwTextAddr = ntohl(hdr->bnx_TXP_FwTextAddr);
499	hdr->bnx_TXP_FwTextLen = ntohl(hdr->bnx_TXP_FwTextLen);
500	hdr->bnx_TXP_FwDataAddr = ntohl(hdr->bnx_TXP_FwDataAddr);
501	hdr->bnx_TXP_FwDataLen = ntohl(hdr->bnx_TXP_FwDataLen);
502	hdr->bnx_TXP_FwRodataAddr = ntohl(hdr->bnx_TXP_FwRodataAddr);
503	hdr->bnx_TXP_FwRodataLen = ntohl(hdr->bnx_TXP_FwRodataLen);
504	hdr->bnx_TXP_FwBssAddr = ntohl(hdr->bnx_TXP_FwBssAddr);
505	hdr->bnx_TXP_FwBssLen = ntohl(hdr->bnx_TXP_FwBssLen);
506	hdr->bnx_TXP_FwSbssAddr = ntohl(hdr->bnx_TXP_FwSbssAddr);
507	hdr->bnx_TXP_FwSbssLen = ntohl(hdr->bnx_TXP_FwSbssLen);
508
509	q = p + sizeof(*hdr);
510
511	bfw->bnx_COM_FwText = (u_int32_t *)q;
512	q += hdr->bnx_COM_FwTextLen;
513	nswaph(bfw->bnx_COM_FwText, hdr->bnx_COM_FwTextLen);
514	bfw->bnx_COM_FwData = (u_int32_t *)q;
515	q += hdr->bnx_COM_FwDataLen;
516	nswaph(bfw->bnx_COM_FwData, hdr->bnx_COM_FwDataLen);
517	bfw->bnx_COM_FwRodata = (u_int32_t *)q;
518	q += hdr->bnx_COM_FwRodataLen;
519	nswaph(bfw->bnx_COM_FwRodata, hdr->bnx_COM_FwRodataLen);
520	bfw->bnx_COM_FwBss = (u_int32_t *)q;
521	q += hdr->bnx_COM_FwBssLen;
522	nswaph(bfw->bnx_COM_FwBss, hdr->bnx_COM_FwBssLen);
523	bfw->bnx_COM_FwSbss = (u_int32_t *)q;
524	q += hdr->bnx_COM_FwSbssLen;
525	nswaph(bfw->bnx_COM_FwSbss, hdr->bnx_COM_FwSbssLen);
526
527	bfw->bnx_RXP_FwText = (u_int32_t *)q;
528	q += hdr->bnx_RXP_FwTextLen;
529	nswaph(bfw->bnx_RXP_FwText, hdr->bnx_RXP_FwTextLen);
530	bfw->bnx_RXP_FwData = (u_int32_t *)q;
531	q += hdr->bnx_RXP_FwDataLen;
532	nswaph(bfw->bnx_RXP_FwData, hdr->bnx_RXP_FwDataLen);
533	bfw->bnx_RXP_FwRodata = (u_int32_t *)q;
534	q += hdr->bnx_RXP_FwRodataLen;
535	nswaph(bfw->bnx_RXP_FwRodata, hdr->bnx_RXP_FwRodataLen);
536	bfw->bnx_RXP_FwBss = (u_int32_t *)q;
537	q += hdr->bnx_RXP_FwBssLen;
538	nswaph(bfw->bnx_RXP_FwBss, hdr->bnx_RXP_FwBssLen);
539	bfw->bnx_RXP_FwSbss = (u_int32_t *)q;
540	q += hdr->bnx_RXP_FwSbssLen;
541	nswaph(bfw->bnx_RXP_FwSbss, hdr->bnx_RXP_FwSbssLen);
542
543	bfw->bnx_TPAT_FwText = (u_int32_t *)q;
544	q += hdr->bnx_TPAT_FwTextLen;
545	nswaph(bfw->bnx_TPAT_FwText, hdr->bnx_TPAT_FwTextLen);
546	bfw->bnx_TPAT_FwData = (u_int32_t *)q;
547	q += hdr->bnx_TPAT_FwDataLen;
548	nswaph(bfw->bnx_TPAT_FwData, hdr->bnx_TPAT_FwDataLen);
549	bfw->bnx_TPAT_FwRodata = (u_int32_t *)q;
550	q += hdr->bnx_TPAT_FwRodataLen;
551	nswaph(bfw->bnx_TPAT_FwRodata, hdr->bnx_TPAT_FwRodataLen);
552	bfw->bnx_TPAT_FwBss = (u_int32_t *)q;
553	q += hdr->bnx_TPAT_FwBssLen;
554	nswaph(bfw->bnx_TPAT_FwBss, hdr->bnx_TPAT_FwBssLen);
555	bfw->bnx_TPAT_FwSbss = (u_int32_t *)q;
556	q += hdr->bnx_TPAT_FwSbssLen;
557	nswaph(bfw->bnx_TPAT_FwSbss, hdr->bnx_TPAT_FwSbssLen);
558
559	bfw->bnx_TXP_FwText = (u_int32_t *)q;
560	q += hdr->bnx_TXP_FwTextLen;
561	nswaph(bfw->bnx_TXP_FwText, hdr->bnx_TXP_FwTextLen);
562	bfw->bnx_TXP_FwData = (u_int32_t *)q;
563	q += hdr->bnx_TXP_FwDataLen;
564	nswaph(bfw->bnx_TXP_FwData, hdr->bnx_TXP_FwDataLen);
565	bfw->bnx_TXP_FwRodata = (u_int32_t *)q;
566	q += hdr->bnx_TXP_FwRodataLen;
567	nswaph(bfw->bnx_TXP_FwRodata, hdr->bnx_TXP_FwRodataLen);
568	bfw->bnx_TXP_FwBss = (u_int32_t *)q;
569	q += hdr->bnx_TXP_FwBssLen;
570	nswaph(bfw->bnx_TXP_FwBss, hdr->bnx_TXP_FwBssLen);
571	bfw->bnx_TXP_FwSbss = (u_int32_t *)q;
572	q += hdr->bnx_TXP_FwSbssLen;
573	nswaph(bfw->bnx_TXP_FwSbss, hdr->bnx_TXP_FwSbssLen);
574
575	if (q - p != size) {
576		free(p, M_DEVBUF, size);
577		hdr = NULL;
578		return EINVAL;
579	}
580
581	bfw->fw = hdr;
582
583	return (0);
584}
585
586int
587bnx_read_rv2p(struct bnx_softc *sc, int idx)
588{
589	struct bnx_rv2p *rv2p = &bnx_rv2ps[idx];
590	struct bnx_rv2p_header *hdr = rv2p->fw;
591	u_char *p, *q;
592	size_t size;
593	int error;
594
595	if (hdr != NULL)
596		return (0);
597
598	if ((error = loadfirmware(rv2p->filename, &p, &size)) != 0)
599		return (error);
600
601	if (size < sizeof(struct bnx_rv2p_header)) {
602		free(p, M_DEVBUF, size);
603		return (EINVAL);
604	}
605
606	hdr = (struct bnx_rv2p_header *)p;
607
608	hdr->bnx_rv2p_proc1len = ntohl(hdr->bnx_rv2p_proc1len);
609	hdr->bnx_rv2p_proc2len = ntohl(hdr->bnx_rv2p_proc2len);
610
611	q = p + sizeof(*hdr);
612
613	rv2p->bnx_rv2p_proc1 = (u_int32_t *)q;
614	q += hdr->bnx_rv2p_proc1len;
615	nswaph(rv2p->bnx_rv2p_proc1, hdr->bnx_rv2p_proc1len);
616	rv2p->bnx_rv2p_proc2 = (u_int32_t *)q;
617	q += hdr->bnx_rv2p_proc2len;
618	nswaph(rv2p->bnx_rv2p_proc2, hdr->bnx_rv2p_proc2len);
619
620	if (q - p != size) {
621		free(p, M_DEVBUF, size);
622		return EINVAL;
623	}
624
625	rv2p->fw = hdr;
626
627	return (0);
628}
629
630
631/****************************************************************************/
632/* Device attach function.                                                  */
633/*                                                                          */
634/* Allocates device resources, performs secondary chip identification,      */
635/* resets and initializes the hardware, and initializes driver instance     */
636/* variables.                                                               */
637/*                                                                          */
638/* Returns:                                                                 */
639/*   0 on success, positive value on failure.                               */
640/****************************************************************************/
641void
642bnx_attach(struct device *parent, struct device *self, void *aux)
643{
644	struct bnx_softc	*sc = (struct bnx_softc *)self;
645	struct pci_attach_args	*pa = aux;
646	pci_chipset_tag_t	pc = pa->pa_pc;
647	u_int32_t		val;
648	pcireg_t		memtype;
649	const char 		*intrstr = NULL;
650
651	sc->bnx_pa = *pa;
652
653	/*
654	 * Map control/status registers.
655	*/
656	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, BNX_PCI_BAR0);
657	if (pci_mapreg_map(pa, BNX_PCI_BAR0, memtype, 0, &sc->bnx_btag,
658	    &sc->bnx_bhandle, NULL, &sc->bnx_size, 0)) {
659		printf(": can't find mem space\n");
660		return;
661	}
662
663	if (pci_intr_map(pa, &sc->bnx_ih)) {
664		printf(": couldn't map interrupt\n");
665		goto bnx_attach_fail;
666	}
667	intrstr = pci_intr_string(pc, sc->bnx_ih);
668
669	/*
670	 * Configure byte swap and enable indirect register access.
671	 * Rely on CPU to do target byte swapping on big endian systems.
672	 * Access to registers outside of PCI configuration space are not
673	 * valid until this is done.
674	 */
675	pci_conf_write(pa->pa_pc, pa->pa_tag, BNX_PCICFG_MISC_CONFIG,
676	    BNX_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
677	    BNX_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP);
678
679	/* Save ASIC revision info. */
680	sc->bnx_chipid =  REG_RD(sc, BNX_MISC_ID);
681
682	/*
683	 * Find the base address for shared memory access.
684	 * Newer versions of bootcode use a signature and offset
685	 * while older versions use a fixed address.
686	 */
687	val = REG_RD_IND(sc, BNX_SHM_HDR_SIGNATURE);
688	if ((val & BNX_SHM_HDR_SIGNATURE_SIG_MASK) == BNX_SHM_HDR_SIGNATURE_SIG)
689		sc->bnx_shmem_base = REG_RD_IND(sc, BNX_SHM_HDR_ADDR_0 +
690		    (sc->bnx_pa.pa_function << 2));
691	else
692		sc->bnx_shmem_base = HOST_VIEW_SHMEM_BASE;
693
694	DBPRINT(sc, BNX_INFO, "bnx_shmem_base = 0x%08X\n", sc->bnx_shmem_base);
695
696	/* Set initial device and PHY flags */
697	sc->bnx_flags = 0;
698	sc->bnx_phy_flags = 0;
699
700	/* Get PCI bus information (speed and type). */
701	val = REG_RD(sc, BNX_PCICFG_MISC_STATUS);
702	if (val & BNX_PCICFG_MISC_STATUS_PCIX_DET) {
703		u_int32_t clkreg;
704
705		sc->bnx_flags |= BNX_PCIX_FLAG;
706
707		clkreg = REG_RD(sc, BNX_PCICFG_PCI_CLOCK_CONTROL_BITS);
708
709		clkreg &= BNX_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET;
710		switch (clkreg) {
711		case BNX_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ:
712			sc->bus_speed_mhz = 133;
713			break;
714
715		case BNX_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ:
716			sc->bus_speed_mhz = 100;
717			break;
718
719		case BNX_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ:
720		case BNX_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ:
721			sc->bus_speed_mhz = 66;
722			break;
723
724		case BNX_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ:
725		case BNX_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ:
726			sc->bus_speed_mhz = 50;
727			break;
728
729		case BNX_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW:
730		case BNX_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ:
731		case BNX_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ:
732			sc->bus_speed_mhz = 33;
733			break;
734		}
735	} else if (val & BNX_PCICFG_MISC_STATUS_M66EN)
736			sc->bus_speed_mhz = 66;
737		else
738			sc->bus_speed_mhz = 33;
739
740	if (val & BNX_PCICFG_MISC_STATUS_32BIT_DET)
741		sc->bnx_flags |= BNX_PCI_32BIT_FLAG;
742
743	/* Hookup IRQ last. */
744	sc->bnx_intrhand = pci_intr_establish(pc, sc->bnx_ih,
745	    IPL_NET | IPL_MPSAFE, bnx_intr, sc, sc->bnx_dev.dv_xname);
746	if (sc->bnx_intrhand == NULL) {
747		printf(": couldn't establish interrupt");
748		if (intrstr != NULL)
749			printf(" at %s", intrstr);
750		printf("\n");
751		goto bnx_attach_fail;
752	}
753
754	printf(": %s\n", intrstr);
755
756	config_mountroot(self, bnx_attachhook);
757	return;
758
759bnx_attach_fail:
760	bnx_release_resources(sc);
761	DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
762}
763
764void
765bnx_attachhook(struct device *self)
766{
767	struct bnx_softc *sc = (struct bnx_softc *)self;
768	struct pci_attach_args *pa = &sc->bnx_pa;
769	struct ifnet		*ifp;
770	int			error, mii_flags = 0;
771	int			fw = BNX_FW_B06;
772	int			rv2p = BNX_RV2P;
773
774	if (BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5709) {
775		fw = BNX_FW_B09;
776		if ((BNX_CHIP_REV(sc) == BNX_CHIP_REV_Ax))
777			rv2p = BNX_XI90_RV2P;
778		else
779			rv2p = BNX_XI_RV2P;
780	}
781
782	if ((error = bnx_read_firmware(sc, fw)) != 0) {
783		printf("%s: error %d, could not read firmware\n",
784		    sc->bnx_dev.dv_xname, error);
785		return;
786	}
787
788	if ((error = bnx_read_rv2p(sc, rv2p)) != 0) {
789		printf("%s: error %d, could not read rv2p\n",
790		    sc->bnx_dev.dv_xname, error);
791		return;
792	}
793
794	/* Reset the controller. */
795	if (bnx_reset(sc, BNX_DRV_MSG_CODE_RESET))
796		goto bnx_attach_fail;
797
798	/* Initialize the controller. */
799	if (bnx_chipinit(sc)) {
800		printf("%s: Controller initialization failed!\n",
801		    sc->bnx_dev.dv_xname);
802		goto bnx_attach_fail;
803	}
804
805	/* Perform NVRAM test. */
806	if (bnx_nvram_test(sc)) {
807		printf("%s: NVRAM test failed!\n",
808		    sc->bnx_dev.dv_xname);
809		goto bnx_attach_fail;
810	}
811
812	/* Fetch the permanent Ethernet MAC address. */
813	bnx_get_mac_addr(sc);
814
815	/*
816	 * Trip points control how many BDs
817	 * should be ready before generating an
818	 * interrupt while ticks control how long
819	 * a BD can sit in the chain before
820	 * generating an interrupt.  Set the default
821	 * values for the RX and TX rings.
822	 */
823
824#ifdef BNX_DEBUG
825	/* Force more frequent interrupts. */
826	sc->bnx_tx_quick_cons_trip_int = 1;
827	sc->bnx_tx_quick_cons_trip     = 1;
828	sc->bnx_tx_ticks_int           = 0;
829	sc->bnx_tx_ticks               = 0;
830
831	sc->bnx_rx_quick_cons_trip_int = 1;
832	sc->bnx_rx_quick_cons_trip     = 1;
833	sc->bnx_rx_ticks_int           = 0;
834	sc->bnx_rx_ticks               = 0;
835#else
836	sc->bnx_tx_quick_cons_trip_int = 20;
837	sc->bnx_tx_quick_cons_trip     = 20;
838	sc->bnx_tx_ticks_int           = 80;
839	sc->bnx_tx_ticks               = 80;
840
841	sc->bnx_rx_quick_cons_trip_int = 6;
842	sc->bnx_rx_quick_cons_trip     = 6;
843	sc->bnx_rx_ticks_int           = 18;
844	sc->bnx_rx_ticks               = 18;
845#endif
846
847	/* Update statistics once every second. */
848	sc->bnx_stats_ticks = 1000000 & 0xffff00;
849
850	/* Find the media type for the adapter. */
851	bnx_get_media(sc);
852
853	/*
854	 * Store config data needed by the PHY driver for
855	 * backplane applications
856	 */
857	sc->bnx_shared_hw_cfg = REG_RD_IND(sc, sc->bnx_shmem_base +
858		BNX_SHARED_HW_CFG_CONFIG);
859	sc->bnx_port_hw_cfg = REG_RD_IND(sc, sc->bnx_shmem_base +
860		BNX_PORT_HW_CFG_CONFIG);
861
862	/* Allocate DMA memory resources. */
863	sc->bnx_dmatag = pa->pa_dmat;
864	if (bnx_dma_alloc(sc)) {
865		printf("%s: DMA resource allocation failed!\n",
866		    sc->bnx_dev.dv_xname);
867		goto bnx_attach_fail;
868	}
869
870	/* Initialize the ifnet interface. */
871	ifp = &sc->arpcom.ac_if;
872	ifp->if_softc = sc;
873	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
874	ifp->if_xflags = IFXF_MPSAFE;
875	ifp->if_ioctl = bnx_ioctl;
876	ifp->if_qstart = bnx_start;
877	ifp->if_watchdog = bnx_watchdog;
878	ifp->if_hardmtu = BNX_MAX_JUMBO_ETHER_MTU_VLAN -
879	    sizeof(struct ether_header);
880	ifq_init_maxlen(&ifp->if_snd, USABLE_TX_BD - 1);
881	bcopy(sc->eaddr, sc->arpcom.ac_enaddr, ETHER_ADDR_LEN);
882	bcopy(sc->bnx_dev.dv_xname, ifp->if_xname, IFNAMSIZ);
883
884	ifp->if_capabilities = IFCAP_CSUM_TCPv4 | IFCAP_CSUM_UDPv4;
885
886#if NVLAN > 0
887	ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING;
888#endif
889
890	sc->mbuf_alloc_size = BNX_MAX_MRU;
891
892	printf("%s: address %s\n", sc->bnx_dev.dv_xname,
893	    ether_sprintf(sc->arpcom.ac_enaddr));
894
895	sc->bnx_mii.mii_ifp = ifp;
896	sc->bnx_mii.mii_readreg = bnx_miibus_read_reg;
897	sc->bnx_mii.mii_writereg = bnx_miibus_write_reg;
898	sc->bnx_mii.mii_statchg = bnx_miibus_statchg;
899
900	/* Handle any special PHY initialization for SerDes PHYs. */
901	bnx_init_media(sc);
902
903	/* Look for our PHY. */
904	ifmedia_init(&sc->bnx_mii.mii_media, 0, bnx_ifmedia_upd,
905	    bnx_ifmedia_sts);
906	mii_flags |= MIIF_DOPAUSE;
907	if (sc->bnx_phy_flags & BNX_PHY_SERDES_FLAG)
908		mii_flags |= MIIF_HAVEFIBER;
909	mii_attach(&sc->bnx_dev, &sc->bnx_mii, 0xffffffff,
910	    sc->bnx_phy_addr, MII_OFFSET_ANY, mii_flags);
911
912	if (LIST_FIRST(&sc->bnx_mii.mii_phys) == NULL) {
913		printf("%s: no PHY found!\n", sc->bnx_dev.dv_xname);
914		ifmedia_add(&sc->bnx_mii.mii_media,
915		    IFM_ETHER|IFM_MANUAL, 0, NULL);
916		ifmedia_set(&sc->bnx_mii.mii_media,
917		    IFM_ETHER|IFM_MANUAL);
918	} else {
919		ifmedia_set(&sc->bnx_mii.mii_media,
920		    IFM_ETHER|IFM_AUTO);
921	}
922
923	/* Attach to the Ethernet interface list. */
924	if_attach(ifp);
925	ether_ifattach(ifp);
926
927	timeout_set(&sc->bnx_timeout, bnx_tick, sc);
928	timeout_set(&sc->bnx_rxrefill, bnx_rxrefill, sc);
929
930	/* Print some important debugging info. */
931	DBRUN(BNX_INFO, bnx_dump_driver_state(sc));
932
933	/* Get the firmware running so ASF still works. */
934	bnx_mgmt_init(sc);
935
936	/* Handle interrupts */
937	sc->bnx_flags |= BNX_ACTIVE_FLAG;
938
939	goto bnx_attach_exit;
940
941bnx_attach_fail:
942	bnx_release_resources(sc);
943
944bnx_attach_exit:
945	DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
946}
947
948/****************************************************************************/
949/* Device detach function.                                                  */
950/*                                                                          */
951/* Stops the controller, resets the controller, and releases resources.     */
952/*                                                                          */
953/* Returns:                                                                 */
954/*   0 on success, positive value on failure.                               */
955/****************************************************************************/
956#if 0
957void
958bnx_detach(void *xsc)
959{
960	struct bnx_softc *sc;
961	struct ifnet *ifp = &sc->arpcom.ac_if;
962
963	sc = device_get_softc(dev);
964
965	DBPRINT(sc, BNX_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
966
967	/* Stop and reset the controller. */
968	bnx_stop(sc);
969	bnx_reset(sc, BNX_DRV_MSG_CODE_RESET);
970
971	ether_ifdetach(ifp);
972
973	/* If we have a child device on the MII bus remove it too. */
974	bus_generic_detach(dev);
975	device_delete_child(dev, sc->bnx_mii);
976
977	/* Release all remaining resources. */
978	bnx_release_resources(sc);
979
980	DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
981
982	return(0);
983}
984#endif
985
986/****************************************************************************/
987/* Indirect register read.                                                  */
988/*                                                                          */
989/* Reads NetXtreme II registers using an index/data register pair in PCI    */
990/* configuration space.  Using this mechanism avoids issues with posted     */
991/* reads but is much slower than memory-mapped I/O.                         */
992/*                                                                          */
993/* Returns:                                                                 */
994/*   The value of the register.                                             */
995/****************************************************************************/
996u_int32_t
997bnx_reg_rd_ind(struct bnx_softc *sc, u_int32_t offset)
998{
999	struct pci_attach_args	*pa = &(sc->bnx_pa);
1000
1001	pci_conf_write(pa->pa_pc, pa->pa_tag, BNX_PCICFG_REG_WINDOW_ADDRESS,
1002	    offset);
1003#ifdef BNX_DEBUG
1004	{
1005		u_int32_t val;
1006		val = pci_conf_read(pa->pa_pc, pa->pa_tag,
1007		    BNX_PCICFG_REG_WINDOW);
1008		DBPRINT(sc, BNX_EXCESSIVE, "%s(); offset = 0x%08X, "
1009		    "val = 0x%08X\n", __FUNCTION__, offset, val);
1010		return (val);
1011	}
1012#else
1013	return pci_conf_read(pa->pa_pc, pa->pa_tag, BNX_PCICFG_REG_WINDOW);
1014#endif
1015}
1016
1017/****************************************************************************/
1018/* Indirect register write.                                                 */
1019/*                                                                          */
1020/* Writes NetXtreme II registers using an index/data register pair in PCI   */
1021/* configuration space.  Using this mechanism avoids issues with posted     */
1022/* writes but is muchh slower than memory-mapped I/O.                       */
1023/*                                                                          */
1024/* Returns:                                                                 */
1025/*   Nothing.                                                               */
1026/****************************************************************************/
1027void
1028bnx_reg_wr_ind(struct bnx_softc *sc, u_int32_t offset, u_int32_t val)
1029{
1030	struct pci_attach_args  *pa = &(sc->bnx_pa);
1031
1032	DBPRINT(sc, BNX_EXCESSIVE, "%s(); offset = 0x%08X, val = 0x%08X\n",
1033		__FUNCTION__, offset, val);
1034
1035	pci_conf_write(pa->pa_pc, pa->pa_tag, BNX_PCICFG_REG_WINDOW_ADDRESS,
1036	    offset);
1037	pci_conf_write(pa->pa_pc, pa->pa_tag, BNX_PCICFG_REG_WINDOW, val);
1038}
1039
1040/****************************************************************************/
1041/* Context memory write.                                                    */
1042/*                                                                          */
1043/* The NetXtreme II controller uses context memory to track connection      */
1044/* information for L2 and higher network protocols.                         */
1045/*                                                                          */
1046/* Returns:                                                                 */
1047/*   Nothing.                                                               */
1048/****************************************************************************/
1049void
1050bnx_ctx_wr(struct bnx_softc *sc, u_int32_t cid_addr, u_int32_t ctx_offset,
1051    u_int32_t ctx_val)
1052{
1053	u_int32_t idx, offset = ctx_offset + cid_addr;
1054	u_int32_t val, retry_cnt = 5;
1055
1056	if (BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5709) {
1057		REG_WR(sc, BNX_CTX_CTX_DATA, ctx_val);
1058		REG_WR(sc, BNX_CTX_CTX_CTRL,
1059		    (offset | BNX_CTX_CTX_CTRL_WRITE_REQ));
1060
1061		for (idx = 0; idx < retry_cnt; idx++) {
1062			val = REG_RD(sc, BNX_CTX_CTX_CTRL);
1063			if ((val & BNX_CTX_CTX_CTRL_WRITE_REQ) == 0)
1064				break;
1065			DELAY(5);
1066		}
1067
1068#if 0
1069		if (val & BNX_CTX_CTX_CTRL_WRITE_REQ)
1070			BNX_PRINTF("%s(%d); Unable to write CTX memory: "
1071				"cid_addr = 0x%08X, offset = 0x%08X!\n",
1072				__FILE__, __LINE__, cid_addr, ctx_offset);
1073#endif
1074
1075	} else {
1076		REG_WR(sc, BNX_CTX_DATA_ADR, offset);
1077		REG_WR(sc, BNX_CTX_DATA, ctx_val);
1078	}
1079}
1080
1081/****************************************************************************/
1082/* PHY register read.                                                       */
1083/*                                                                          */
1084/* Implements register reads on the MII bus.                                */
1085/*                                                                          */
1086/* Returns:                                                                 */
1087/*   The value of the register.                                             */
1088/****************************************************************************/
1089int
1090bnx_miibus_read_reg(struct device *dev, int phy, int reg)
1091{
1092	struct bnx_softc	*sc = (struct bnx_softc *)dev;
1093	u_int32_t		val;
1094	int			i;
1095
1096	/*
1097	 * The BCM5709S PHY is an IEEE Clause 45 PHY
1098	 * with special mappings to work with IEEE
1099	 * Clause 22 register accesses.
1100	 */
1101	if ((sc->bnx_phy_flags & BNX_PHY_IEEE_CLAUSE_45_FLAG) != 0) {
1102		if (reg >= MII_BMCR && reg <= MII_ANLPRNP)
1103			reg += 0x10;
1104	}
1105
1106	if (sc->bnx_phy_flags & BNX_PHY_INT_MODE_AUTO_POLLING_FLAG) {
1107		val = REG_RD(sc, BNX_EMAC_MDIO_MODE);
1108		val &= ~BNX_EMAC_MDIO_MODE_AUTO_POLL;
1109
1110		REG_WR(sc, BNX_EMAC_MDIO_MODE, val);
1111		REG_RD(sc, BNX_EMAC_MDIO_MODE);
1112
1113		DELAY(40);
1114	}
1115
1116	val = BNX_MIPHY(phy) | BNX_MIREG(reg) |
1117	    BNX_EMAC_MDIO_COMM_COMMAND_READ | BNX_EMAC_MDIO_COMM_DISEXT |
1118	    BNX_EMAC_MDIO_COMM_START_BUSY;
1119	REG_WR(sc, BNX_EMAC_MDIO_COMM, val);
1120
1121	for (i = 0; i < BNX_PHY_TIMEOUT; i++) {
1122		DELAY(10);
1123
1124		val = REG_RD(sc, BNX_EMAC_MDIO_COMM);
1125		if (!(val & BNX_EMAC_MDIO_COMM_START_BUSY)) {
1126			DELAY(5);
1127
1128			val = REG_RD(sc, BNX_EMAC_MDIO_COMM);
1129			val &= BNX_EMAC_MDIO_COMM_DATA;
1130
1131			break;
1132		}
1133	}
1134
1135	if (val & BNX_EMAC_MDIO_COMM_START_BUSY) {
1136		BNX_PRINTF(sc, "%s(%d): Error: PHY read timeout! phy = %d, "
1137		    "reg = 0x%04X\n", __FILE__, __LINE__, phy, reg);
1138		val = 0x0;
1139	} else
1140		val = REG_RD(sc, BNX_EMAC_MDIO_COMM);
1141
1142	DBPRINT(sc, BNX_EXCESSIVE,
1143	    "%s(): phy = %d, reg = 0x%04X, val = 0x%04X\n", __FUNCTION__, phy,
1144	    (u_int16_t) reg & 0xffff, (u_int16_t) val & 0xffff);
1145
1146	if (sc->bnx_phy_flags & BNX_PHY_INT_MODE_AUTO_POLLING_FLAG) {
1147		val = REG_RD(sc, BNX_EMAC_MDIO_MODE);
1148		val |= BNX_EMAC_MDIO_MODE_AUTO_POLL;
1149
1150		REG_WR(sc, BNX_EMAC_MDIO_MODE, val);
1151		REG_RD(sc, BNX_EMAC_MDIO_MODE);
1152
1153		DELAY(40);
1154	}
1155
1156	return (val & 0xffff);
1157}
1158
1159/****************************************************************************/
1160/* PHY register write.                                                      */
1161/*                                                                          */
1162/* Implements register writes on the MII bus.                               */
1163/*                                                                          */
1164/* Returns:                                                                 */
1165/*   The value of the register.                                             */
1166/****************************************************************************/
1167void
1168bnx_miibus_write_reg(struct device *dev, int phy, int reg, int val)
1169{
1170	struct bnx_softc	*sc = (struct bnx_softc *)dev;
1171	u_int32_t		val1;
1172	int			i;
1173
1174	DBPRINT(sc, BNX_EXCESSIVE, "%s(): phy = %d, reg = 0x%04X, "
1175	    "val = 0x%04X\n", __FUNCTION__,
1176	    phy, (u_int16_t) reg & 0xffff, (u_int16_t) val & 0xffff);
1177
1178	/*
1179	 * The BCM5709S PHY is an IEEE Clause 45 PHY
1180	 * with special mappings to work with IEEE
1181	 * Clause 22 register accesses.
1182	 */
1183	if ((sc->bnx_phy_flags & BNX_PHY_IEEE_CLAUSE_45_FLAG) != 0) {
1184		if (reg >= MII_BMCR && reg <= MII_ANLPRNP)
1185			reg += 0x10;
1186	}
1187
1188	if (sc->bnx_phy_flags & BNX_PHY_INT_MODE_AUTO_POLLING_FLAG) {
1189		val1 = REG_RD(sc, BNX_EMAC_MDIO_MODE);
1190		val1 &= ~BNX_EMAC_MDIO_MODE_AUTO_POLL;
1191
1192		REG_WR(sc, BNX_EMAC_MDIO_MODE, val1);
1193		REG_RD(sc, BNX_EMAC_MDIO_MODE);
1194
1195		DELAY(40);
1196	}
1197
1198	val1 = BNX_MIPHY(phy) | BNX_MIREG(reg) | val |
1199	    BNX_EMAC_MDIO_COMM_COMMAND_WRITE |
1200	    BNX_EMAC_MDIO_COMM_START_BUSY | BNX_EMAC_MDIO_COMM_DISEXT;
1201	REG_WR(sc, BNX_EMAC_MDIO_COMM, val1);
1202
1203	for (i = 0; i < BNX_PHY_TIMEOUT; i++) {
1204		DELAY(10);
1205
1206		val1 = REG_RD(sc, BNX_EMAC_MDIO_COMM);
1207		if (!(val1 & BNX_EMAC_MDIO_COMM_START_BUSY)) {
1208			DELAY(5);
1209			break;
1210		}
1211	}
1212
1213	if (val1 & BNX_EMAC_MDIO_COMM_START_BUSY) {
1214		BNX_PRINTF(sc, "%s(%d): PHY write timeout!\n", __FILE__,
1215		    __LINE__);
1216	}
1217
1218	if (sc->bnx_phy_flags & BNX_PHY_INT_MODE_AUTO_POLLING_FLAG) {
1219		val1 = REG_RD(sc, BNX_EMAC_MDIO_MODE);
1220		val1 |= BNX_EMAC_MDIO_MODE_AUTO_POLL;
1221
1222		REG_WR(sc, BNX_EMAC_MDIO_MODE, val1);
1223		REG_RD(sc, BNX_EMAC_MDIO_MODE);
1224
1225		DELAY(40);
1226	}
1227}
1228
1229/****************************************************************************/
1230/* MII bus status change.                                                   */
1231/*                                                                          */
1232/* Called by the MII bus driver when the PHY establishes link to set the    */
1233/* MAC interface registers.                                                 */
1234/*                                                                          */
1235/* Returns:                                                                 */
1236/*   Nothing.                                                               */
1237/****************************************************************************/
1238void
1239bnx_miibus_statchg(struct device *dev)
1240{
1241	struct bnx_softc	*sc = (struct bnx_softc *)dev;
1242	struct mii_data		*mii = &sc->bnx_mii;
1243	u_int32_t		rx_mode = sc->rx_mode;
1244	int			val;
1245
1246	val = REG_RD(sc, BNX_EMAC_MODE);
1247	val &= ~(BNX_EMAC_MODE_PORT | BNX_EMAC_MODE_HALF_DUPLEX |
1248		BNX_EMAC_MODE_MAC_LOOP | BNX_EMAC_MODE_FORCE_LINK |
1249		BNX_EMAC_MODE_25G);
1250
1251	/*
1252	 * Get flow control negotiation result.
1253	 */
1254	if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO &&
1255	    (mii->mii_media_active & IFM_ETH_FMASK) != sc->bnx_flowflags) {
1256		sc->bnx_flowflags = mii->mii_media_active & IFM_ETH_FMASK;
1257		mii->mii_media_active &= ~IFM_ETH_FMASK;
1258	}
1259
1260	/* Set MII or GMII interface based on the speed
1261	 * negotiated by the PHY.
1262	 */
1263	switch (IFM_SUBTYPE(mii->mii_media_active)) {
1264	case IFM_10_T:
1265		if (BNX_CHIP_NUM(sc) != BNX_CHIP_NUM_5706) {
1266			DBPRINT(sc, BNX_INFO, "Enabling 10Mb interface.\n");
1267			val |= BNX_EMAC_MODE_PORT_MII_10;
1268			break;
1269		}
1270		/* FALLTHROUGH */
1271	case IFM_100_TX:
1272		DBPRINT(sc, BNX_INFO, "Enabling MII interface.\n");
1273		val |= BNX_EMAC_MODE_PORT_MII;
1274		break;
1275	case IFM_2500_SX:
1276		DBPRINT(sc, BNX_INFO, "Enabling 2.5G MAC mode.\n");
1277		val |= BNX_EMAC_MODE_25G;
1278		/* FALLTHROUGH */
1279	case IFM_1000_T:
1280	case IFM_1000_SX:
1281		DBPRINT(sc, BNX_INFO, "Enabling GMII interface.\n");
1282		val |= BNX_EMAC_MODE_PORT_GMII;
1283		break;
1284	default:
1285		val |= BNX_EMAC_MODE_PORT_GMII;
1286		break;
1287	}
1288
1289	/* Set half or full duplex based on the duplicity
1290	 * negotiated by the PHY.
1291	 */
1292	if ((mii->mii_media_active & IFM_GMASK) == IFM_HDX) {
1293		DBPRINT(sc, BNX_INFO, "Setting Half-Duplex interface.\n");
1294		val |= BNX_EMAC_MODE_HALF_DUPLEX;
1295	} else
1296		DBPRINT(sc, BNX_INFO, "Setting Full-Duplex interface.\n");
1297
1298	REG_WR(sc, BNX_EMAC_MODE, val);
1299
1300	/*
1301	 * 802.3x flow control
1302	 */
1303	if (sc->bnx_flowflags & IFM_ETH_RXPAUSE) {
1304		DBPRINT(sc, BNX_INFO, "Enabling RX mode flow control.\n");
1305		rx_mode |= BNX_EMAC_RX_MODE_FLOW_EN;
1306	} else {
1307		DBPRINT(sc, BNX_INFO, "Disabling RX mode flow control.\n");
1308		rx_mode &= ~BNX_EMAC_RX_MODE_FLOW_EN;
1309	}
1310
1311	if (sc->bnx_flowflags & IFM_ETH_TXPAUSE) {
1312		DBPRINT(sc, BNX_INFO, "Enabling TX mode flow control.\n");
1313		BNX_SETBIT(sc, BNX_EMAC_TX_MODE, BNX_EMAC_TX_MODE_FLOW_EN);
1314	} else {
1315		DBPRINT(sc, BNX_INFO, "Disabling TX mode flow control.\n");
1316		BNX_CLRBIT(sc, BNX_EMAC_TX_MODE, BNX_EMAC_TX_MODE_FLOW_EN);
1317	}
1318
1319	/* Only make changes if the receive mode has actually changed. */
1320	if (rx_mode != sc->rx_mode) {
1321		DBPRINT(sc, BNX_VERBOSE, "Enabling new receive mode: 0x%08X\n",
1322		    rx_mode);
1323
1324		sc->rx_mode = rx_mode;
1325		REG_WR(sc, BNX_EMAC_RX_MODE, rx_mode);
1326	}
1327}
1328
1329/****************************************************************************/
1330/* Acquire NVRAM lock.                                                      */
1331/*                                                                          */
1332/* Before the NVRAM can be accessed the caller must acquire an NVRAM lock.  */
1333/* Locks 0 and 2 are reserved, lock 1 is used by firmware and lock 2 is     */
1334/* for use by the driver.                                                   */
1335/*                                                                          */
1336/* Returns:                                                                 */
1337/*   0 on success, positive value on failure.                               */
1338/****************************************************************************/
1339int
1340bnx_acquire_nvram_lock(struct bnx_softc *sc)
1341{
1342	u_int32_t		val;
1343	int			j;
1344
1345	DBPRINT(sc, BNX_VERBOSE, "Acquiring NVRAM lock.\n");
1346
1347	/* Request access to the flash interface. */
1348	REG_WR(sc, BNX_NVM_SW_ARB, BNX_NVM_SW_ARB_ARB_REQ_SET2);
1349	for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
1350		val = REG_RD(sc, BNX_NVM_SW_ARB);
1351		if (val & BNX_NVM_SW_ARB_ARB_ARB2)
1352			break;
1353
1354		DELAY(5);
1355	}
1356
1357	if (j >= NVRAM_TIMEOUT_COUNT) {
1358		DBPRINT(sc, BNX_WARN, "Timeout acquiring NVRAM lock!\n");
1359		return (EBUSY);
1360	}
1361
1362	return (0);
1363}
1364
1365/****************************************************************************/
1366/* Release NVRAM lock.                                                      */
1367/*                                                                          */
1368/* When the caller is finished accessing NVRAM the lock must be released.   */
1369/* Locks 0 and 2 are reserved, lock 1 is used by firmware and lock 2 is     */
1370/* for use by the driver.                                                   */
1371/*                                                                          */
1372/* Returns:                                                                 */
1373/*   0 on success, positive value on failure.                               */
1374/****************************************************************************/
1375int
1376bnx_release_nvram_lock(struct bnx_softc *sc)
1377{
1378	int			j;
1379	u_int32_t		val;
1380
1381	DBPRINT(sc, BNX_VERBOSE, "Releasing NVRAM lock.\n");
1382
1383	/* Relinquish nvram interface. */
1384	REG_WR(sc, BNX_NVM_SW_ARB, BNX_NVM_SW_ARB_ARB_REQ_CLR2);
1385
1386	for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
1387		val = REG_RD(sc, BNX_NVM_SW_ARB);
1388		if (!(val & BNX_NVM_SW_ARB_ARB_ARB2))
1389			break;
1390
1391		DELAY(5);
1392	}
1393
1394	if (j >= NVRAM_TIMEOUT_COUNT) {
1395		DBPRINT(sc, BNX_WARN, "Timeout releasing NVRAM lock!\n");
1396		return (EBUSY);
1397	}
1398
1399	return (0);
1400}
1401
1402#ifdef BNX_NVRAM_WRITE_SUPPORT
1403/****************************************************************************/
1404/* Enable NVRAM write access.                                               */
1405/*                                                                          */
1406/* Before writing to NVRAM the caller must enable NVRAM writes.             */
1407/*                                                                          */
1408/* Returns:                                                                 */
1409/*   0 on success, positive value on failure.                               */
1410/****************************************************************************/
1411int
1412bnx_enable_nvram_write(struct bnx_softc *sc)
1413{
1414	u_int32_t		val;
1415
1416	DBPRINT(sc, BNX_VERBOSE, "Enabling NVRAM write.\n");
1417
1418	val = REG_RD(sc, BNX_MISC_CFG);
1419	REG_WR(sc, BNX_MISC_CFG, val | BNX_MISC_CFG_NVM_WR_EN_PCI);
1420
1421	if (!ISSET(sc->bnx_flash_info->flags, BNX_NV_BUFFERED)) {
1422		int j;
1423
1424		REG_WR(sc, BNX_NVM_COMMAND, BNX_NVM_COMMAND_DONE);
1425		REG_WR(sc, BNX_NVM_COMMAND,
1426		    BNX_NVM_COMMAND_WREN | BNX_NVM_COMMAND_DOIT);
1427
1428		for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
1429			DELAY(5);
1430
1431			val = REG_RD(sc, BNX_NVM_COMMAND);
1432			if (val & BNX_NVM_COMMAND_DONE)
1433				break;
1434		}
1435
1436		if (j >= NVRAM_TIMEOUT_COUNT) {
1437			DBPRINT(sc, BNX_WARN, "Timeout writing NVRAM!\n");
1438			return (EBUSY);
1439		}
1440	}
1441
1442	return (0);
1443}
1444
1445/****************************************************************************/
1446/* Disable NVRAM write access.                                              */
1447/*                                                                          */
1448/* When the caller is finished writing to NVRAM write access must be        */
1449/* disabled.                                                                */
1450/*                                                                          */
1451/* Returns:                                                                 */
1452/*   Nothing.                                                               */
1453/****************************************************************************/
1454void
1455bnx_disable_nvram_write(struct bnx_softc *sc)
1456{
1457	u_int32_t		val;
1458
1459	DBPRINT(sc, BNX_VERBOSE,  "Disabling NVRAM write.\n");
1460
1461	val = REG_RD(sc, BNX_MISC_CFG);
1462	REG_WR(sc, BNX_MISC_CFG, val & ~BNX_MISC_CFG_NVM_WR_EN);
1463}
1464#endif
1465
1466/****************************************************************************/
1467/* Enable NVRAM access.                                                     */
1468/*                                                                          */
1469/* Before accessing NVRAM for read or write operations the caller must      */
1470/* enabled NVRAM access.                                                    */
1471/*                                                                          */
1472/* Returns:                                                                 */
1473/*   Nothing.                                                               */
1474/****************************************************************************/
1475void
1476bnx_enable_nvram_access(struct bnx_softc *sc)
1477{
1478	u_int32_t		val;
1479
1480	DBPRINT(sc, BNX_VERBOSE, "Enabling NVRAM access.\n");
1481
1482	val = REG_RD(sc, BNX_NVM_ACCESS_ENABLE);
1483	/* Enable both bits, even on read. */
1484	REG_WR(sc, BNX_NVM_ACCESS_ENABLE,
1485	    val | BNX_NVM_ACCESS_ENABLE_EN | BNX_NVM_ACCESS_ENABLE_WR_EN);
1486}
1487
1488/****************************************************************************/
1489/* Disable NVRAM access.                                                    */
1490/*                                                                          */
1491/* When the caller is finished accessing NVRAM access must be disabled.     */
1492/*                                                                          */
1493/* Returns:                                                                 */
1494/*   Nothing.                                                               */
1495/****************************************************************************/
1496void
1497bnx_disable_nvram_access(struct bnx_softc *sc)
1498{
1499	u_int32_t		val;
1500
1501	DBPRINT(sc, BNX_VERBOSE, "Disabling NVRAM access.\n");
1502
1503	val = REG_RD(sc, BNX_NVM_ACCESS_ENABLE);
1504
1505	/* Disable both bits, even after read. */
1506	REG_WR(sc, BNX_NVM_ACCESS_ENABLE,
1507	    val & ~(BNX_NVM_ACCESS_ENABLE_EN | BNX_NVM_ACCESS_ENABLE_WR_EN));
1508}
1509
1510#ifdef BNX_NVRAM_WRITE_SUPPORT
1511/****************************************************************************/
1512/* Erase NVRAM page before writing.                                         */
1513/*                                                                          */
1514/* Non-buffered flash parts require that a page be erased before it is      */
1515/* written.                                                                 */
1516/*                                                                          */
1517/* Returns:                                                                 */
1518/*   0 on success, positive value on failure.                               */
1519/****************************************************************************/
1520int
1521bnx_nvram_erase_page(struct bnx_softc *sc, u_int32_t offset)
1522{
1523	u_int32_t		cmd;
1524	int			j;
1525
1526	/* Buffered flash doesn't require an erase. */
1527	if (ISSET(sc->bnx_flash_info->flags, BNX_NV_BUFFERED))
1528		return (0);
1529
1530	DBPRINT(sc, BNX_VERBOSE, "Erasing NVRAM page.\n");
1531
1532	/* Build an erase command. */
1533	cmd = BNX_NVM_COMMAND_ERASE | BNX_NVM_COMMAND_WR |
1534	    BNX_NVM_COMMAND_DOIT;
1535
1536	/*
1537	 * Clear the DONE bit separately, set the NVRAM address to erase,
1538	 * and issue the erase command.
1539	 */
1540	REG_WR(sc, BNX_NVM_COMMAND, BNX_NVM_COMMAND_DONE);
1541	REG_WR(sc, BNX_NVM_ADDR, offset & BNX_NVM_ADDR_NVM_ADDR_VALUE);
1542	REG_WR(sc, BNX_NVM_COMMAND, cmd);
1543
1544	/* Wait for completion. */
1545	for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
1546		u_int32_t val;
1547
1548		DELAY(5);
1549
1550		val = REG_RD(sc, BNX_NVM_COMMAND);
1551		if (val & BNX_NVM_COMMAND_DONE)
1552			break;
1553	}
1554
1555	if (j >= NVRAM_TIMEOUT_COUNT) {
1556		DBPRINT(sc, BNX_WARN, "Timeout erasing NVRAM.\n");
1557		return (EBUSY);
1558	}
1559
1560	return (0);
1561}
1562#endif /* BNX_NVRAM_WRITE_SUPPORT */
1563
1564/****************************************************************************/
1565/* Read a dword (32 bits) from NVRAM.                                       */
1566/*                                                                          */
1567/* Read a 32 bit word from NVRAM.  The caller is assumed to have already    */
1568/* obtained the NVRAM lock and enabled the controller for NVRAM access.     */
1569/*                                                                          */
1570/* Returns:                                                                 */
1571/*   0 on success and the 32 bit value read, positive value on failure.     */
1572/****************************************************************************/
1573int
1574bnx_nvram_read_dword(struct bnx_softc *sc, u_int32_t offset,
1575    u_int8_t *ret_val, u_int32_t cmd_flags)
1576{
1577	u_int32_t		cmd;
1578	int			i, rc = 0;
1579
1580	/* Build the command word. */
1581	cmd = BNX_NVM_COMMAND_DOIT | cmd_flags;
1582
1583	/* Calculate the offset for buffered flash if translation is used. */
1584	if (ISSET(sc->bnx_flash_info->flags, BNX_NV_TRANSLATE)) {
1585		offset = ((offset / sc->bnx_flash_info->page_size) <<
1586		    sc->bnx_flash_info->page_bits) +
1587		    (offset % sc->bnx_flash_info->page_size);
1588	}
1589
1590	/*
1591	 * Clear the DONE bit separately, set the address to read,
1592	 * and issue the read.
1593	 */
1594	REG_WR(sc, BNX_NVM_COMMAND, BNX_NVM_COMMAND_DONE);
1595	REG_WR(sc, BNX_NVM_ADDR, offset & BNX_NVM_ADDR_NVM_ADDR_VALUE);
1596	REG_WR(sc, BNX_NVM_COMMAND, cmd);
1597
1598	/* Wait for completion. */
1599	for (i = 0; i < NVRAM_TIMEOUT_COUNT; i++) {
1600		u_int32_t val;
1601
1602		DELAY(5);
1603
1604		val = REG_RD(sc, BNX_NVM_COMMAND);
1605		if (val & BNX_NVM_COMMAND_DONE) {
1606			val = REG_RD(sc, BNX_NVM_READ);
1607
1608			val = bnx_be32toh(val);
1609			memcpy(ret_val, &val, 4);
1610			break;
1611		}
1612	}
1613
1614	/* Check for errors. */
1615	if (i >= NVRAM_TIMEOUT_COUNT) {
1616		BNX_PRINTF(sc, "%s(%d): Timeout error reading NVRAM at "
1617		    "offset 0x%08X!\n", __FILE__, __LINE__, offset);
1618		rc = EBUSY;
1619	}
1620
1621	return(rc);
1622}
1623
1624#ifdef BNX_NVRAM_WRITE_SUPPORT
1625/****************************************************************************/
1626/* Write a dword (32 bits) to NVRAM.                                        */
1627/*                                                                          */
1628/* Write a 32 bit word to NVRAM.  The caller is assumed to have already     */
1629/* obtained the NVRAM lock, enabled the controller for NVRAM access, and    */
1630/* enabled NVRAM write access.                                              */
1631/*                                                                          */
1632/* Returns:                                                                 */
1633/*   0 on success, positive value on failure.                               */
1634/****************************************************************************/
1635int
1636bnx_nvram_write_dword(struct bnx_softc *sc, u_int32_t offset, u_int8_t *val,
1637    u_int32_t cmd_flags)
1638{
1639	u_int32_t		cmd, val32;
1640	int			j;
1641
1642	/* Build the command word. */
1643	cmd = BNX_NVM_COMMAND_DOIT | BNX_NVM_COMMAND_WR | cmd_flags;
1644
1645	/* Calculate the offset for buffered flash if translation is used. */
1646	if (ISSET(sc->bnx_flash_info->flags, BNX_NV_TRANSLATE)) {
1647		offset = ((offset / sc->bnx_flash_info->page_size) <<
1648		    sc->bnx_flash_info->page_bits) +
1649		    (offset % sc->bnx_flash_info->page_size);
1650	}
1651
1652	/*
1653	 * Clear the DONE bit separately, convert NVRAM data to big-endian,
1654	 * set the NVRAM address to write, and issue the write command
1655	 */
1656	REG_WR(sc, BNX_NVM_COMMAND, BNX_NVM_COMMAND_DONE);
1657	memcpy(&val32, val, 4);
1658	val32 = htobe32(val32);
1659	REG_WR(sc, BNX_NVM_WRITE, val32);
1660	REG_WR(sc, BNX_NVM_ADDR, offset & BNX_NVM_ADDR_NVM_ADDR_VALUE);
1661	REG_WR(sc, BNX_NVM_COMMAND, cmd);
1662
1663	/* Wait for completion. */
1664	for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
1665		DELAY(5);
1666
1667		if (REG_RD(sc, BNX_NVM_COMMAND) & BNX_NVM_COMMAND_DONE)
1668			break;
1669	}
1670	if (j >= NVRAM_TIMEOUT_COUNT) {
1671		BNX_PRINTF(sc, "%s(%d): Timeout error writing NVRAM at "
1672		    "offset 0x%08X\n", __FILE__, __LINE__, offset);
1673		return (EBUSY);
1674	}
1675
1676	return (0);
1677}
1678#endif /* BNX_NVRAM_WRITE_SUPPORT */
1679
1680/****************************************************************************/
1681/* Initialize NVRAM access.                                                 */
1682/*                                                                          */
1683/* Identify the NVRAM device in use and prepare the NVRAM interface to      */
1684/* access that device.                                                      */
1685/*                                                                          */
1686/* Returns:                                                                 */
1687/*   0 on success, positive value on failure.                               */
1688/****************************************************************************/
1689int
1690bnx_init_nvram(struct bnx_softc *sc)
1691{
1692	u_int32_t		val;
1693	int			j, entry_count, rc = 0;
1694	struct flash_spec	*flash;
1695
1696	DBPRINT(sc,BNX_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
1697
1698	if (BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5709) {
1699		sc->bnx_flash_info = &flash_5709;
1700		goto bnx_init_nvram_get_flash_size;
1701	}
1702
1703	/* Determine the selected interface. */
1704	val = REG_RD(sc, BNX_NVM_CFG1);
1705
1706	entry_count = sizeof(flash_table) / sizeof(struct flash_spec);
1707
1708	/*
1709	 * Flash reconfiguration is required to support additional
1710	 * NVRAM devices not directly supported in hardware.
1711	 * Check if the flash interface was reconfigured
1712	 * by the bootcode.
1713	 */
1714
1715	if (val & 0x40000000) {
1716		/* Flash interface reconfigured by bootcode. */
1717
1718		DBPRINT(sc,BNX_INFO_LOAD,
1719			"bnx_init_nvram(): Flash WAS reconfigured.\n");
1720
1721		for (j = 0, flash = &flash_table[0]; j < entry_count;
1722		     j++, flash++) {
1723			if ((val & FLASH_BACKUP_STRAP_MASK) ==
1724			    (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
1725				sc->bnx_flash_info = flash;
1726				break;
1727			}
1728		}
1729	} else {
1730		/* Flash interface not yet reconfigured. */
1731		u_int32_t mask;
1732
1733		DBPRINT(sc,BNX_INFO_LOAD,
1734			"bnx_init_nvram(): Flash was NOT reconfigured.\n");
1735
1736		if (val & (1 << 23))
1737			mask = FLASH_BACKUP_STRAP_MASK;
1738		else
1739			mask = FLASH_STRAP_MASK;
1740
1741		/* Look for the matching NVRAM device configuration data. */
1742		for (j = 0, flash = &flash_table[0]; j < entry_count;
1743		    j++, flash++) {
1744			/* Check if the dev matches any of the known devices. */
1745			if ((val & mask) == (flash->strapping & mask)) {
1746				/* Found a device match. */
1747				sc->bnx_flash_info = flash;
1748
1749				/* Request access to the flash interface. */
1750				if ((rc = bnx_acquire_nvram_lock(sc)) != 0)
1751					return (rc);
1752
1753				/* Reconfigure the flash interface. */
1754				bnx_enable_nvram_access(sc);
1755				REG_WR(sc, BNX_NVM_CFG1, flash->config1);
1756				REG_WR(sc, BNX_NVM_CFG2, flash->config2);
1757				REG_WR(sc, BNX_NVM_CFG3, flash->config3);
1758				REG_WR(sc, BNX_NVM_WRITE1, flash->write1);
1759				bnx_disable_nvram_access(sc);
1760				bnx_release_nvram_lock(sc);
1761
1762				break;
1763			}
1764		}
1765	}
1766
1767	/* Check if a matching device was found. */
1768	if (j == entry_count) {
1769		sc->bnx_flash_info = NULL;
1770		BNX_PRINTF(sc, "%s(%d): Unknown Flash NVRAM found!\n",
1771			__FILE__, __LINE__);
1772		rc = ENODEV;
1773	}
1774
1775bnx_init_nvram_get_flash_size:
1776	/* Write the flash config data to the shared memory interface. */
1777	val = REG_RD_IND(sc, sc->bnx_shmem_base + BNX_SHARED_HW_CFG_CONFIG2);
1778	val &= BNX_SHARED_HW_CFG2_NVM_SIZE_MASK;
1779	if (val)
1780		sc->bnx_flash_size = val;
1781	else
1782		sc->bnx_flash_size = sc->bnx_flash_info->total_size;
1783
1784	DBPRINT(sc, BNX_INFO_LOAD, "bnx_init_nvram() flash->total_size = "
1785	    "0x%08X\n", sc->bnx_flash_info->total_size);
1786
1787	DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
1788
1789	return (rc);
1790}
1791
1792/****************************************************************************/
1793/* Read an arbitrary range of data from NVRAM.                              */
1794/*                                                                          */
1795/* Prepares the NVRAM interface for access and reads the requested data     */
1796/* into the supplied buffer.                                                */
1797/*                                                                          */
1798/* Returns:                                                                 */
1799/*   0 on success and the data read, positive value on failure.             */
1800/****************************************************************************/
1801int
1802bnx_nvram_read(struct bnx_softc *sc, u_int32_t offset, u_int8_t *ret_buf,
1803    int buf_size)
1804{
1805	int			rc = 0;
1806	u_int32_t		cmd_flags, offset32, len32, extra;
1807
1808	if (buf_size == 0)
1809		return (0);
1810
1811	/* Request access to the flash interface. */
1812	if ((rc = bnx_acquire_nvram_lock(sc)) != 0)
1813		return (rc);
1814
1815	/* Enable access to flash interface */
1816	bnx_enable_nvram_access(sc);
1817
1818	len32 = buf_size;
1819	offset32 = offset;
1820	extra = 0;
1821
1822	cmd_flags = 0;
1823
1824	if (offset32 & 3) {
1825		u_int8_t buf[4];
1826		u_int32_t pre_len;
1827
1828		offset32 &= ~3;
1829		pre_len = 4 - (offset & 3);
1830
1831		if (pre_len >= len32) {
1832			pre_len = len32;
1833			cmd_flags =
1834			    BNX_NVM_COMMAND_FIRST | BNX_NVM_COMMAND_LAST;
1835		} else
1836			cmd_flags = BNX_NVM_COMMAND_FIRST;
1837
1838		rc = bnx_nvram_read_dword(sc, offset32, buf, cmd_flags);
1839
1840		if (rc)
1841			return (rc);
1842
1843		memcpy(ret_buf, buf + (offset & 3), pre_len);
1844
1845		offset32 += 4;
1846		ret_buf += pre_len;
1847		len32 -= pre_len;
1848	}
1849
1850	if (len32 & 3) {
1851		extra = 4 - (len32 & 3);
1852		len32 = (len32 + 4) & ~3;
1853	}
1854
1855	if (len32 == 4) {
1856		u_int8_t buf[4];
1857
1858		if (cmd_flags)
1859			cmd_flags = BNX_NVM_COMMAND_LAST;
1860		else
1861			cmd_flags =
1862			    BNX_NVM_COMMAND_FIRST | BNX_NVM_COMMAND_LAST;
1863
1864		rc = bnx_nvram_read_dword(sc, offset32, buf, cmd_flags);
1865
1866		memcpy(ret_buf, buf, 4 - extra);
1867	} else if (len32 > 0) {
1868		u_int8_t buf[4];
1869
1870		/* Read the first word. */
1871		if (cmd_flags)
1872			cmd_flags = 0;
1873		else
1874			cmd_flags = BNX_NVM_COMMAND_FIRST;
1875
1876		rc = bnx_nvram_read_dword(sc, offset32, ret_buf, cmd_flags);
1877
1878		/* Advance to the next dword. */
1879		offset32 += 4;
1880		ret_buf += 4;
1881		len32 -= 4;
1882
1883		while (len32 > 4 && rc == 0) {
1884			rc = bnx_nvram_read_dword(sc, offset32, ret_buf, 0);
1885
1886			/* Advance to the next dword. */
1887			offset32 += 4;
1888			ret_buf += 4;
1889			len32 -= 4;
1890		}
1891
1892		if (rc)
1893			return (rc);
1894
1895		cmd_flags = BNX_NVM_COMMAND_LAST;
1896		rc = bnx_nvram_read_dword(sc, offset32, buf, cmd_flags);
1897
1898		memcpy(ret_buf, buf, 4 - extra);
1899	}
1900
1901	/* Disable access to flash interface and release the lock. */
1902	bnx_disable_nvram_access(sc);
1903	bnx_release_nvram_lock(sc);
1904
1905	return (rc);
1906}
1907
1908#ifdef BNX_NVRAM_WRITE_SUPPORT
1909/****************************************************************************/
1910/* Write an arbitrary range of data from NVRAM.                             */
1911/*                                                                          */
1912/* Prepares the NVRAM interface for write access and writes the requested   */
1913/* data from the supplied buffer.  The caller is responsible for            */
1914/* calculating any appropriate CRCs.                                        */
1915/*                                                                          */
1916/* Returns:                                                                 */
1917/*   0 on success, positive value on failure.                               */
1918/****************************************************************************/
1919int
1920bnx_nvram_write(struct bnx_softc *sc, u_int32_t offset, u_int8_t *data_buf,
1921    int buf_size)
1922{
1923	u_int32_t		written, offset32, len32;
1924	u_int8_t		*buf, start[4], end[4];
1925	int			rc = 0;
1926	int			align_start, align_end;
1927
1928	buf = data_buf;
1929	offset32 = offset;
1930	len32 = buf_size;
1931	align_start = align_end = 0;
1932
1933	if ((align_start = (offset32 & 3))) {
1934		offset32 &= ~3;
1935		len32 += align_start;
1936		if ((rc = bnx_nvram_read(sc, offset32, start, 4)))
1937			return (rc);
1938	}
1939
1940	if (len32 & 3) {
1941		if ((len32 > 4) || !align_start) {
1942			align_end = 4 - (len32 & 3);
1943			len32 += align_end;
1944			if ((rc = bnx_nvram_read(sc, offset32 + len32 - 4,
1945			    end, 4))) {
1946				return (rc);
1947			}
1948		}
1949	}
1950
1951	if (align_start || align_end) {
1952		buf = malloc(len32, M_DEVBUF, M_NOWAIT);
1953		if (buf == 0)
1954			return (ENOMEM);
1955
1956		if (align_start)
1957			memcpy(buf, start, 4);
1958
1959		if (align_end)
1960			memcpy(buf + len32 - 4, end, 4);
1961
1962		memcpy(buf + align_start, data_buf, buf_size);
1963	}
1964
1965	written = 0;
1966	while ((written < len32) && (rc == 0)) {
1967		u_int32_t page_start, page_end, data_start, data_end;
1968		u_int32_t addr, cmd_flags;
1969		int i;
1970		u_int8_t flash_buffer[264];
1971
1972	    /* Find the page_start addr */
1973		page_start = offset32 + written;
1974		page_start -= (page_start % sc->bnx_flash_info->page_size);
1975		/* Find the page_end addr */
1976		page_end = page_start + sc->bnx_flash_info->page_size;
1977		/* Find the data_start addr */
1978		data_start = (written == 0) ? offset32 : page_start;
1979		/* Find the data_end addr */
1980		data_end = (page_end > offset32 + len32) ?
1981		    (offset32 + len32) : page_end;
1982
1983		/* Request access to the flash interface. */
1984		if ((rc = bnx_acquire_nvram_lock(sc)) != 0)
1985			goto nvram_write_end;
1986
1987		/* Enable access to flash interface */
1988		bnx_enable_nvram_access(sc);
1989
1990		cmd_flags = BNX_NVM_COMMAND_FIRST;
1991		if (!ISSET(sc->bnx_flash_info->flags, BNX_NV_BUFFERED)) {
1992			int j;
1993
1994			/* Read the whole page into the buffer
1995			 * (non-buffer flash only) */
1996			for (j = 0; j < sc->bnx_flash_info->page_size; j += 4) {
1997				if (j == (sc->bnx_flash_info->page_size - 4))
1998					cmd_flags |= BNX_NVM_COMMAND_LAST;
1999
2000				rc = bnx_nvram_read_dword(sc,
2001					page_start + j,
2002					&flash_buffer[j],
2003					cmd_flags);
2004
2005				if (rc)
2006					goto nvram_write_end;
2007
2008				cmd_flags = 0;
2009			}
2010		}
2011
2012		/* Enable writes to flash interface (unlock write-protect) */
2013		if ((rc = bnx_enable_nvram_write(sc)) != 0)
2014			goto nvram_write_end;
2015
2016		/* Erase the page */
2017		if ((rc = bnx_nvram_erase_page(sc, page_start)) != 0)
2018			goto nvram_write_end;
2019
2020		/* Re-enable the write again for the actual write */
2021		bnx_enable_nvram_write(sc);
2022
2023		/* Loop to write back the buffer data from page_start to
2024		 * data_start */
2025		i = 0;
2026		if (!ISSET(sc->bnx_flash_info->flags, BNX_NV_BUFFERED)) {
2027			for (addr = page_start; addr < data_start;
2028				addr += 4, i += 4) {
2029
2030				rc = bnx_nvram_write_dword(sc, addr,
2031				    &flash_buffer[i], cmd_flags);
2032
2033				if (rc != 0)
2034					goto nvram_write_end;
2035
2036				cmd_flags = 0;
2037			}
2038		}
2039
2040		/* Loop to write the new data from data_start to data_end */
2041		for (addr = data_start; addr < data_end; addr += 4, i++) {
2042			if ((addr == page_end - 4) ||
2043			    (ISSET(sc->bnx_flash_info->flags, BNX_NV_BUFFERED)
2044			    && (addr == data_end - 4))) {
2045
2046				cmd_flags |= BNX_NVM_COMMAND_LAST;
2047			}
2048
2049			rc = bnx_nvram_write_dword(sc, addr, buf, cmd_flags);
2050
2051			if (rc != 0)
2052				goto nvram_write_end;
2053
2054			cmd_flags = 0;
2055			buf += 4;
2056		}
2057
2058		/* Loop to write back the buffer data from data_end
2059		 * to page_end */
2060		if (!ISSET(sc->bnx_flash_info->flags, BNX_NV_BUFFERED)) {
2061			for (addr = data_end; addr < page_end;
2062			    addr += 4, i += 4) {
2063
2064				if (addr == page_end-4)
2065					cmd_flags = BNX_NVM_COMMAND_LAST;
2066
2067				rc = bnx_nvram_write_dword(sc, addr,
2068				    &flash_buffer[i], cmd_flags);
2069
2070				if (rc != 0)
2071					goto nvram_write_end;
2072
2073				cmd_flags = 0;
2074			}
2075		}
2076
2077		/* Disable writes to flash interface (lock write-protect) */
2078		bnx_disable_nvram_write(sc);
2079
2080		/* Disable access to flash interface */
2081		bnx_disable_nvram_access(sc);
2082		bnx_release_nvram_lock(sc);
2083
2084		/* Increment written */
2085		written += data_end - data_start;
2086	}
2087
2088nvram_write_end:
2089	if (align_start || align_end)
2090		free(buf, M_DEVBUF, len32);
2091
2092	return (rc);
2093}
2094#endif /* BNX_NVRAM_WRITE_SUPPORT */
2095
2096/****************************************************************************/
2097/* Verifies that NVRAM is accessible and contains valid data.               */
2098/*                                                                          */
2099/* Reads the configuration data from NVRAM and verifies that the CRC is     */
2100/* correct.                                                                 */
2101/*                                                                          */
2102/* Returns:                                                                 */
2103/*   0 on success, positive value on failure.                               */
2104/****************************************************************************/
2105int
2106bnx_nvram_test(struct bnx_softc *sc)
2107{
2108	u_int32_t		buf[BNX_NVRAM_SIZE / 4];
2109	u_int8_t		*data = (u_int8_t *) buf;
2110	int			rc = 0;
2111	u_int32_t		magic, csum;
2112
2113	/*
2114	 * Check that the device NVRAM is valid by reading
2115	 * the magic value at offset 0.
2116	 */
2117	if ((rc = bnx_nvram_read(sc, 0, data, 4)) != 0)
2118		goto bnx_nvram_test_done;
2119
2120	magic = bnx_be32toh(buf[0]);
2121	if (magic != BNX_NVRAM_MAGIC) {
2122		rc = ENODEV;
2123		BNX_PRINTF(sc, "%s(%d): Invalid NVRAM magic value! "
2124		    "Expected: 0x%08X, Found: 0x%08X\n",
2125		    __FILE__, __LINE__, BNX_NVRAM_MAGIC, magic);
2126		goto bnx_nvram_test_done;
2127	}
2128
2129	/*
2130	 * Verify that the device NVRAM includes valid
2131	 * configuration data.
2132	 */
2133	if ((rc = bnx_nvram_read(sc, 0x100, data, BNX_NVRAM_SIZE)) != 0)
2134		goto bnx_nvram_test_done;
2135
2136	csum = ether_crc32_le(data, 0x100);
2137	if (csum != BNX_CRC32_RESIDUAL) {
2138		rc = ENODEV;
2139		BNX_PRINTF(sc, "%s(%d): Invalid Manufacturing Information "
2140		    "NVRAM CRC! Expected: 0x%08X, Found: 0x%08X\n",
2141		    __FILE__, __LINE__, BNX_CRC32_RESIDUAL, csum);
2142		goto bnx_nvram_test_done;
2143	}
2144
2145	csum = ether_crc32_le(data + 0x100, 0x100);
2146	if (csum != BNX_CRC32_RESIDUAL) {
2147		BNX_PRINTF(sc, "%s(%d): Invalid Feature Configuration "
2148		    "Information NVRAM CRC! Expected: 0x%08X, Found: 08%08X\n",
2149		    __FILE__, __LINE__, BNX_CRC32_RESIDUAL, csum);
2150		rc = ENODEV;
2151	}
2152
2153bnx_nvram_test_done:
2154	return (rc);
2155}
2156
2157/****************************************************************************/
2158/* Identifies the current media type of the controller and sets the PHY     */
2159/* address.                                                                 */
2160/*                                                                          */
2161/* Returns:                                                                 */
2162/*   Nothing.                                                               */
2163/****************************************************************************/
2164void
2165bnx_get_media(struct bnx_softc *sc)
2166{
2167	u_int32_t val;
2168
2169	sc->bnx_phy_addr = 1;
2170
2171	if (BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5709) {
2172		u_int32_t val = REG_RD(sc, BNX_MISC_DUAL_MEDIA_CTRL);
2173		u_int32_t bond_id = val & BNX_MISC_DUAL_MEDIA_CTRL_BOND_ID;
2174		u_int32_t strap;
2175
2176		/*
2177		 * The BCM5709S is software configurable
2178		 * for Copper or SerDes operation.
2179		 */
2180		if (bond_id == BNX_MISC_DUAL_MEDIA_CTRL_BOND_ID_C) {
2181			DBPRINT(sc, BNX_INFO_LOAD,
2182			    "5709 bonded for copper.\n");
2183			goto bnx_get_media_exit;
2184		} else if (bond_id == BNX_MISC_DUAL_MEDIA_CTRL_BOND_ID_S) {
2185			DBPRINT(sc, BNX_INFO_LOAD,
2186			    "5709 bonded for dual media.\n");
2187			sc->bnx_phy_flags |= BNX_PHY_SERDES_FLAG;
2188			goto bnx_get_media_exit;
2189		}
2190
2191		if (val & BNX_MISC_DUAL_MEDIA_CTRL_STRAP_OVERRIDE)
2192			strap = (val & BNX_MISC_DUAL_MEDIA_CTRL_PHY_CTRL) >> 21;
2193		else {
2194			strap = (val & BNX_MISC_DUAL_MEDIA_CTRL_PHY_CTRL_STRAP)
2195			    >> 8;
2196		}
2197
2198		if (sc->bnx_pa.pa_function == 0) {
2199			switch (strap) {
2200			case 0x4:
2201			case 0x5:
2202			case 0x6:
2203				DBPRINT(sc, BNX_INFO_LOAD,
2204					"BCM5709 s/w configured for SerDes.\n");
2205				sc->bnx_phy_flags |= BNX_PHY_SERDES_FLAG;
2206				break;
2207			default:
2208				DBPRINT(sc, BNX_INFO_LOAD,
2209					"BCM5709 s/w configured for Copper.\n");
2210			}
2211		} else {
2212			switch (strap) {
2213			case 0x1:
2214			case 0x2:
2215			case 0x4:
2216				DBPRINT(sc, BNX_INFO_LOAD,
2217					"BCM5709 s/w configured for SerDes.\n");
2218				sc->bnx_phy_flags |= BNX_PHY_SERDES_FLAG;
2219				break;
2220			default:
2221				DBPRINT(sc, BNX_INFO_LOAD,
2222					"BCM5709 s/w configured for Copper.\n");
2223			}
2224		}
2225
2226	} else if (BNX_CHIP_BOND_ID(sc) & BNX_CHIP_BOND_ID_SERDES_BIT)
2227		sc->bnx_phy_flags |= BNX_PHY_SERDES_FLAG;
2228
2229	if (sc->bnx_phy_flags & BNX_PHY_SERDES_FLAG) {
2230		sc->bnx_flags |= BNX_NO_WOL_FLAG;
2231
2232		if (BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5709)
2233			sc->bnx_phy_flags |= BNX_PHY_IEEE_CLAUSE_45_FLAG;
2234
2235		/*
2236		 * The BCM5708S, BCM5709S, and BCM5716S controllers use a
2237		 * separate PHY for SerDes.
2238		 */
2239		if (BNX_CHIP_NUM(sc) != BNX_CHIP_NUM_5706) {
2240			sc->bnx_phy_addr = 2;
2241			val = REG_RD_IND(sc, sc->bnx_shmem_base +
2242				 BNX_SHARED_HW_CFG_CONFIG);
2243			if (val & BNX_SHARED_HW_CFG_PHY_2_5G) {
2244				sc->bnx_phy_flags |= BNX_PHY_2_5G_CAPABLE_FLAG;
2245				DBPRINT(sc, BNX_INFO_LOAD,
2246				    "Found 2.5Gb capable adapter\n");
2247			}
2248		}
2249	} else if ((BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5706) ||
2250		   (BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5708))
2251		sc->bnx_phy_flags |= BNX_PHY_CRC_FIX_FLAG;
2252
2253bnx_get_media_exit:
2254	DBPRINT(sc, (BNX_INFO_LOAD | BNX_INFO_PHY),
2255		"Using PHY address %d.\n", sc->bnx_phy_addr);
2256}
2257
2258/****************************************************************************/
2259/* Performs PHY initialization required before MII drivers access the       */
2260/* device.                                                                  */
2261/*                                                                          */
2262/* Returns:                                                                 */
2263/*   Nothing.                                                               */
2264/****************************************************************************/
2265void
2266bnx_init_media(struct bnx_softc *sc)
2267{
2268	if (sc->bnx_phy_flags & BNX_PHY_IEEE_CLAUSE_45_FLAG) {
2269		/*
2270		 * Configure the BCM5709S / BCM5716S PHYs to use traditional
2271		 * IEEE Clause 22 method. Otherwise we have no way to attach
2272		 * the PHY to the mii(4) layer. PHY specific configuration
2273		 * is done by the mii(4) layer.
2274		 */
2275
2276		/* Select auto-negotiation MMD of the PHY. */
2277		bnx_miibus_write_reg(&sc->bnx_dev, sc->bnx_phy_addr,
2278		    BRGPHY_BLOCK_ADDR, BRGPHY_BLOCK_ADDR_ADDR_EXT);
2279
2280		bnx_miibus_write_reg(&sc->bnx_dev, sc->bnx_phy_addr,
2281		    BRGPHY_ADDR_EXT, BRGPHY_ADDR_EXT_AN_MMD);
2282
2283		bnx_miibus_write_reg(&sc->bnx_dev, sc->bnx_phy_addr,
2284		    BRGPHY_BLOCK_ADDR, BRGPHY_BLOCK_ADDR_COMBO_IEEE0);
2285	}
2286}
2287
2288/****************************************************************************/
2289/* Free any DMA memory owned by the driver.                                 */
2290/*                                                                          */
2291/* Scans through each data structure that requires DMA memory and frees     */
2292/* the memory if allocated.                                                 */
2293/*                                                                          */
2294/* Returns:                                                                 */
2295/*   Nothing.                                                               */
2296/****************************************************************************/
2297void
2298bnx_dma_free(struct bnx_softc *sc)
2299{
2300	int			i;
2301
2302	DBPRINT(sc,BNX_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
2303
2304	/* Destroy the status block. */
2305	if (sc->status_block != NULL && sc->status_map != NULL) {
2306		bus_dmamap_sync(sc->bnx_dmatag, sc->status_map, 0,
2307		    sc->status_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
2308		bus_dmamap_unload(sc->bnx_dmatag, sc->status_map);
2309		bus_dmamem_unmap(sc->bnx_dmatag, (caddr_t)sc->status_block,
2310		    BNX_STATUS_BLK_SZ);
2311		bus_dmamem_free(sc->bnx_dmatag, &sc->status_seg,
2312		    sc->status_rseg);
2313		bus_dmamap_destroy(sc->bnx_dmatag, sc->status_map);
2314		sc->status_block = NULL;
2315		sc->status_map = NULL;
2316	}
2317
2318	/* Destroy the statistics block. */
2319	if (sc->stats_block != NULL && sc->stats_map != NULL) {
2320		bus_dmamap_unload(sc->bnx_dmatag, sc->stats_map);
2321		bus_dmamem_unmap(sc->bnx_dmatag, (caddr_t)sc->stats_block,
2322		    BNX_STATS_BLK_SZ);
2323		bus_dmamem_free(sc->bnx_dmatag, &sc->stats_seg,
2324		    sc->stats_rseg);
2325		bus_dmamap_destroy(sc->bnx_dmatag, sc->stats_map);
2326		sc->stats_block = NULL;
2327		sc->stats_map = NULL;
2328	}
2329
2330	/* Free, unmap and destroy all context memory pages. */
2331	if (BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5709) {
2332		for (i = 0; i < sc->ctx_pages; i++) {
2333			if (sc->ctx_block[i] != NULL) {
2334				bus_dmamap_unload(sc->bnx_dmatag,
2335				    sc->ctx_map[i]);
2336				bus_dmamem_unmap(sc->bnx_dmatag,
2337				    (caddr_t)sc->ctx_block[i],
2338				    BCM_PAGE_SIZE);
2339				bus_dmamem_free(sc->bnx_dmatag,
2340				    &sc->ctx_segs[i], sc->ctx_rsegs[i]);
2341				bus_dmamap_destroy(sc->bnx_dmatag,
2342				    sc->ctx_map[i]);
2343				sc->ctx_block[i] = NULL;
2344			}
2345		}
2346	}
2347
2348	/* Free, unmap and destroy all TX buffer descriptor chain pages. */
2349	for (i = 0; i < TX_PAGES; i++ ) {
2350		if (sc->tx_bd_chain[i] != NULL &&
2351		    sc->tx_bd_chain_map[i] != NULL) {
2352			bus_dmamap_unload(sc->bnx_dmatag,
2353			    sc->tx_bd_chain_map[i]);
2354			bus_dmamem_unmap(sc->bnx_dmatag,
2355			    (caddr_t)sc->tx_bd_chain[i], BNX_TX_CHAIN_PAGE_SZ);
2356			bus_dmamem_free(sc->bnx_dmatag, &sc->tx_bd_chain_seg[i],
2357			    sc->tx_bd_chain_rseg[i]);
2358			bus_dmamap_destroy(sc->bnx_dmatag,
2359			    sc->tx_bd_chain_map[i]);
2360			sc->tx_bd_chain[i] = NULL;
2361			sc->tx_bd_chain_map[i] = NULL;
2362		}
2363	}
2364
2365	/* Unload and destroy the TX mbuf maps. */
2366	for (i = 0; i < TOTAL_TX_BD; i++) {
2367		bus_dmamap_unload(sc->bnx_dmatag, sc->tx_mbuf_map[i]);
2368		bus_dmamap_destroy(sc->bnx_dmatag, sc->tx_mbuf_map[i]);
2369	}
2370
2371	/* Free, unmap and destroy all RX buffer descriptor chain pages. */
2372	for (i = 0; i < RX_PAGES; i++ ) {
2373		if (sc->rx_bd_chain[i] != NULL &&
2374		    sc->rx_bd_chain_map[i] != NULL) {
2375			bus_dmamap_unload(sc->bnx_dmatag,
2376			    sc->rx_bd_chain_map[i]);
2377			bus_dmamem_unmap(sc->bnx_dmatag,
2378			    (caddr_t)sc->rx_bd_chain[i], BNX_RX_CHAIN_PAGE_SZ);
2379			bus_dmamem_free(sc->bnx_dmatag, &sc->rx_bd_chain_seg[i],
2380			    sc->rx_bd_chain_rseg[i]);
2381
2382			bus_dmamap_destroy(sc->bnx_dmatag,
2383			    sc->rx_bd_chain_map[i]);
2384			sc->rx_bd_chain[i] = NULL;
2385			sc->rx_bd_chain_map[i] = NULL;
2386		}
2387	}
2388
2389	/* Unload and destroy the RX mbuf maps. */
2390	for (i = 0; i < TOTAL_RX_BD; i++) {
2391		if (sc->rx_mbuf_map[i] != NULL) {
2392			bus_dmamap_unload(sc->bnx_dmatag, sc->rx_mbuf_map[i]);
2393			bus_dmamap_destroy(sc->bnx_dmatag, sc->rx_mbuf_map[i]);
2394		}
2395	}
2396
2397	DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
2398}
2399
2400/****************************************************************************/
2401/* Allocate any DMA memory needed by the driver.                            */
2402/*                                                                          */
2403/* Allocates DMA memory needed for the various global structures needed by  */
2404/* hardware.                                                                */
2405/*                                                                          */
2406/* Returns:                                                                 */
2407/*   0 for success, positive value for failure.                             */
2408/****************************************************************************/
2409int
2410bnx_dma_alloc(struct bnx_softc *sc)
2411{
2412	int			i, rc = 0;
2413
2414	DBPRINT(sc, BNX_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
2415
2416	/*
2417	 * Create DMA maps for the TX buffer mbufs.
2418	 */
2419	for (i = 0; i < TOTAL_TX_BD; i++) {
2420		if (bus_dmamap_create(sc->bnx_dmatag,
2421		    BNX_MAX_JUMBO_ETHER_MTU_VLAN, BNX_MAX_SEGMENTS,
2422		    MCLBYTES, 0, BUS_DMA_NOWAIT, &sc->tx_mbuf_map[i])) {
2423			printf(": Could not create Tx mbuf %d DMA map!\n", 1);
2424			rc = ENOMEM;
2425			goto bnx_dma_alloc_exit;
2426		}
2427	}
2428
2429	/*
2430	 * Allocate DMA memory for the status block, map the memory into DMA
2431	 * space, and fetch the physical address of the block.
2432	 */
2433	if (bus_dmamap_create(sc->bnx_dmatag, BNX_STATUS_BLK_SZ, 1,
2434	    BNX_STATUS_BLK_SZ, 0, BUS_DMA_NOWAIT, &sc->status_map)) {
2435		printf(": Could not create status block DMA map!\n");
2436		rc = ENOMEM;
2437		goto bnx_dma_alloc_exit;
2438	}
2439
2440	if (bus_dmamem_alloc(sc->bnx_dmatag, BNX_STATUS_BLK_SZ,
2441	    BNX_DMA_ALIGN, BNX_DMA_BOUNDARY, &sc->status_seg, 1,
2442	    &sc->status_rseg, BUS_DMA_NOWAIT | BUS_DMA_ZERO)) {
2443		printf(": Could not allocate status block DMA memory!\n");
2444		rc = ENOMEM;
2445		goto bnx_dma_alloc_exit;
2446	}
2447
2448	if (bus_dmamem_map(sc->bnx_dmatag, &sc->status_seg, sc->status_rseg,
2449	    BNX_STATUS_BLK_SZ, (caddr_t *)&sc->status_block, BUS_DMA_NOWAIT)) {
2450		printf(": Could not map status block DMA memory!\n");
2451		rc = ENOMEM;
2452		goto bnx_dma_alloc_exit;
2453	}
2454
2455	if (bus_dmamap_load(sc->bnx_dmatag, sc->status_map,
2456	    sc->status_block, BNX_STATUS_BLK_SZ, NULL, BUS_DMA_NOWAIT)) {
2457		printf(": Could not load status block DMA memory!\n");
2458		rc = ENOMEM;
2459		goto bnx_dma_alloc_exit;
2460	}
2461
2462	bus_dmamap_sync(sc->bnx_dmatag, sc->status_map, 0,
2463	    sc->status_map->dm_mapsize, BUS_DMASYNC_PREREAD);
2464
2465	sc->status_block_paddr = sc->status_map->dm_segs[0].ds_addr;
2466
2467	/* DRC - Fix for 64 bit addresses. */
2468	DBPRINT(sc, BNX_INFO, "status_block_paddr = 0x%08X\n",
2469		(u_int32_t) sc->status_block_paddr);
2470
2471	/* BCM5709 uses host memory as cache for context memory. */
2472	if (BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5709) {
2473		sc->ctx_pages = 0x2000 / BCM_PAGE_SIZE;
2474		if (sc->ctx_pages == 0)
2475			sc->ctx_pages = 1;
2476		if (sc->ctx_pages > 4) /* XXX */
2477			sc->ctx_pages = 4;
2478
2479		DBRUNIF((sc->ctx_pages > 512),
2480			BCE_PRINTF("%s(%d): Too many CTX pages! %d > 512\n",
2481				__FILE__, __LINE__, sc->ctx_pages));
2482
2483
2484		for (i = 0; i < sc->ctx_pages; i++) {
2485			if (bus_dmamap_create(sc->bnx_dmatag, BCM_PAGE_SIZE,
2486			    1, BCM_PAGE_SIZE, BNX_DMA_BOUNDARY,
2487			    BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
2488			    &sc->ctx_map[i]) != 0) {
2489				rc = ENOMEM;
2490				goto bnx_dma_alloc_exit;
2491			}
2492
2493			if (bus_dmamem_alloc(sc->bnx_dmatag, BCM_PAGE_SIZE,
2494			    BCM_PAGE_SIZE, BNX_DMA_BOUNDARY, &sc->ctx_segs[i],
2495			    1, &sc->ctx_rsegs[i], BUS_DMA_NOWAIT) != 0) {
2496				rc = ENOMEM;
2497				goto bnx_dma_alloc_exit;
2498			}
2499
2500			if (bus_dmamem_map(sc->bnx_dmatag, &sc->ctx_segs[i],
2501			    sc->ctx_rsegs[i], BCM_PAGE_SIZE,
2502			    (caddr_t *)&sc->ctx_block[i],
2503			    BUS_DMA_NOWAIT) != 0) {
2504				rc = ENOMEM;
2505				goto bnx_dma_alloc_exit;
2506			}
2507
2508			if (bus_dmamap_load(sc->bnx_dmatag, sc->ctx_map[i],
2509			    sc->ctx_block[i], BCM_PAGE_SIZE, NULL,
2510			    BUS_DMA_NOWAIT) != 0) {
2511				rc = ENOMEM;
2512				goto bnx_dma_alloc_exit;
2513			}
2514
2515			bzero(sc->ctx_block[i], BCM_PAGE_SIZE);
2516		}
2517	}
2518
2519	/*
2520	 * Allocate DMA memory for the statistics block, map the memory into
2521	 * DMA space, and fetch the physical address of the block.
2522	 */
2523	if (bus_dmamap_create(sc->bnx_dmatag, BNX_STATS_BLK_SZ, 1,
2524	    BNX_STATS_BLK_SZ, 0, BUS_DMA_NOWAIT, &sc->stats_map)) {
2525		printf(": Could not create stats block DMA map!\n");
2526		rc = ENOMEM;
2527		goto bnx_dma_alloc_exit;
2528	}
2529
2530	if (bus_dmamem_alloc(sc->bnx_dmatag, BNX_STATS_BLK_SZ,
2531	    BNX_DMA_ALIGN, BNX_DMA_BOUNDARY, &sc->stats_seg, 1,
2532	    &sc->stats_rseg, BUS_DMA_NOWAIT | BUS_DMA_ZERO)) {
2533		printf(": Could not allocate stats block DMA memory!\n");
2534		rc = ENOMEM;
2535		goto bnx_dma_alloc_exit;
2536	}
2537
2538	if (bus_dmamem_map(sc->bnx_dmatag, &sc->stats_seg, sc->stats_rseg,
2539	    BNX_STATS_BLK_SZ, (caddr_t *)&sc->stats_block, BUS_DMA_NOWAIT)) {
2540		printf(": Could not map stats block DMA memory!\n");
2541		rc = ENOMEM;
2542		goto bnx_dma_alloc_exit;
2543	}
2544
2545	if (bus_dmamap_load(sc->bnx_dmatag, sc->stats_map,
2546	    sc->stats_block, BNX_STATS_BLK_SZ, NULL, BUS_DMA_NOWAIT)) {
2547		printf(": Could not load status block DMA memory!\n");
2548		rc = ENOMEM;
2549		goto bnx_dma_alloc_exit;
2550	}
2551
2552	sc->stats_block_paddr = sc->stats_map->dm_segs[0].ds_addr;
2553
2554	/* DRC - Fix for 64 bit address. */
2555	DBPRINT(sc,BNX_INFO, "stats_block_paddr = 0x%08X\n",
2556	    (u_int32_t) sc->stats_block_paddr);
2557
2558	/*
2559	 * Allocate DMA memory for the TX buffer descriptor chain,
2560	 * and fetch the physical address of the block.
2561	 */
2562	for (i = 0; i < TX_PAGES; i++) {
2563		if (bus_dmamap_create(sc->bnx_dmatag, BNX_TX_CHAIN_PAGE_SZ, 1,
2564		    BNX_TX_CHAIN_PAGE_SZ, 0, BUS_DMA_NOWAIT,
2565		    &sc->tx_bd_chain_map[i])) {
2566			printf(": Could not create Tx desc %d DMA map!\n", i);
2567			rc = ENOMEM;
2568			goto bnx_dma_alloc_exit;
2569		}
2570
2571		if (bus_dmamem_alloc(sc->bnx_dmatag, BNX_TX_CHAIN_PAGE_SZ,
2572		    BCM_PAGE_SIZE, BNX_DMA_BOUNDARY, &sc->tx_bd_chain_seg[i], 1,
2573		    &sc->tx_bd_chain_rseg[i], BUS_DMA_NOWAIT)) {
2574			printf(": Could not allocate TX desc %d DMA memory!\n",
2575			    i);
2576			rc = ENOMEM;
2577			goto bnx_dma_alloc_exit;
2578		}
2579
2580		if (bus_dmamem_map(sc->bnx_dmatag, &sc->tx_bd_chain_seg[i],
2581		    sc->tx_bd_chain_rseg[i], BNX_TX_CHAIN_PAGE_SZ,
2582		    (caddr_t *)&sc->tx_bd_chain[i], BUS_DMA_NOWAIT)) {
2583			printf(": Could not map TX desc %d DMA memory!\n", i);
2584			rc = ENOMEM;
2585			goto bnx_dma_alloc_exit;
2586		}
2587
2588		if (bus_dmamap_load(sc->bnx_dmatag, sc->tx_bd_chain_map[i],
2589		    (caddr_t)sc->tx_bd_chain[i], BNX_TX_CHAIN_PAGE_SZ, NULL,
2590		    BUS_DMA_NOWAIT)) {
2591			printf(": Could not load TX desc %d DMA memory!\n", i);
2592			rc = ENOMEM;
2593			goto bnx_dma_alloc_exit;
2594		}
2595
2596		sc->tx_bd_chain_paddr[i] =
2597		    sc->tx_bd_chain_map[i]->dm_segs[0].ds_addr;
2598
2599		/* DRC - Fix for 64 bit systems. */
2600		DBPRINT(sc, BNX_INFO, "tx_bd_chain_paddr[%d] = 0x%08X\n",
2601		    i, (u_int32_t) sc->tx_bd_chain_paddr[i]);
2602	}
2603
2604	/*
2605	 * Allocate DMA memory for the Rx buffer descriptor chain,
2606	 * and fetch the physical address of the block.
2607	 */
2608	for (i = 0; i < RX_PAGES; i++) {
2609		if (bus_dmamap_create(sc->bnx_dmatag, BNX_RX_CHAIN_PAGE_SZ, 1,
2610		    BNX_RX_CHAIN_PAGE_SZ, 0, BUS_DMA_NOWAIT,
2611		    &sc->rx_bd_chain_map[i])) {
2612			printf(": Could not create Rx desc %d DMA map!\n", i);
2613			rc = ENOMEM;
2614			goto bnx_dma_alloc_exit;
2615		}
2616
2617		if (bus_dmamem_alloc(sc->bnx_dmatag, BNX_RX_CHAIN_PAGE_SZ,
2618		    BCM_PAGE_SIZE, BNX_DMA_BOUNDARY, &sc->rx_bd_chain_seg[i], 1,
2619		    &sc->rx_bd_chain_rseg[i], BUS_DMA_NOWAIT | BUS_DMA_ZERO)) {
2620			printf(": Could not allocate Rx desc %d DMA memory!\n",
2621			    i);
2622			rc = ENOMEM;
2623			goto bnx_dma_alloc_exit;
2624		}
2625
2626		if (bus_dmamem_map(sc->bnx_dmatag, &sc->rx_bd_chain_seg[i],
2627		    sc->rx_bd_chain_rseg[i], BNX_RX_CHAIN_PAGE_SZ,
2628		    (caddr_t *)&sc->rx_bd_chain[i], BUS_DMA_NOWAIT)) {
2629			printf(": Could not map Rx desc %d DMA memory!\n", i);
2630			rc = ENOMEM;
2631			goto bnx_dma_alloc_exit;
2632		}
2633
2634		if (bus_dmamap_load(sc->bnx_dmatag, sc->rx_bd_chain_map[i],
2635		    (caddr_t)sc->rx_bd_chain[i], BNX_RX_CHAIN_PAGE_SZ, NULL,
2636		    BUS_DMA_NOWAIT)) {
2637			printf(": Could not load Rx desc %d DMA memory!\n", i);
2638			rc = ENOMEM;
2639			goto bnx_dma_alloc_exit;
2640		}
2641
2642		sc->rx_bd_chain_paddr[i] =
2643		    sc->rx_bd_chain_map[i]->dm_segs[0].ds_addr;
2644
2645		/* DRC - Fix for 64 bit systems. */
2646		DBPRINT(sc, BNX_INFO, "rx_bd_chain_paddr[%d] = 0x%08X\n",
2647		    i, (u_int32_t) sc->rx_bd_chain_paddr[i]);
2648	}
2649
2650	/*
2651	 * Create DMA maps for the Rx buffer mbufs.
2652	 */
2653	for (i = 0; i < TOTAL_RX_BD; i++) {
2654		if (bus_dmamap_create(sc->bnx_dmatag, BNX_MAX_JUMBO_MRU,
2655		    1, BNX_MAX_JUMBO_MRU, 0, BUS_DMA_NOWAIT,
2656		    &sc->rx_mbuf_map[i])) {
2657			printf(": Could not create Rx mbuf %d DMA map!\n", i);
2658			rc = ENOMEM;
2659			goto bnx_dma_alloc_exit;
2660		}
2661	}
2662
2663 bnx_dma_alloc_exit:
2664	DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
2665
2666	return(rc);
2667}
2668
2669/****************************************************************************/
2670/* Release all resources used by the driver.                                */
2671/*                                                                          */
2672/* Releases all resources acquired by the driver including interrupts,      */
2673/* interrupt handler, interfaces, mutexes, and DMA memory.                  */
2674/*                                                                          */
2675/* Returns:                                                                 */
2676/*   Nothing.                                                               */
2677/****************************************************************************/
2678void
2679bnx_release_resources(struct bnx_softc *sc)
2680{
2681	struct pci_attach_args	*pa = &(sc->bnx_pa);
2682
2683	DBPRINT(sc, BNX_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
2684
2685	bnx_dma_free(sc);
2686
2687	if (sc->bnx_intrhand != NULL)
2688		pci_intr_disestablish(pa->pa_pc, sc->bnx_intrhand);
2689
2690	if (sc->bnx_size)
2691		bus_space_unmap(sc->bnx_btag, sc->bnx_bhandle, sc->bnx_size);
2692
2693	DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
2694}
2695
2696/****************************************************************************/
2697/* Firmware synchronization.                                                */
2698/*                                                                          */
2699/* Before performing certain events such as a chip reset, synchronize with  */
2700/* the firmware first.                                                      */
2701/*                                                                          */
2702/* Returns:                                                                 */
2703/*   0 for success, positive value for failure.                             */
2704/****************************************************************************/
2705int
2706bnx_fw_sync(struct bnx_softc *sc, u_int32_t msg_data)
2707{
2708	int			i, rc = 0;
2709	u_int32_t		val;
2710
2711	/* Don't waste any time if we've timed out before. */
2712	if (sc->bnx_fw_timed_out) {
2713		rc = EBUSY;
2714		goto bnx_fw_sync_exit;
2715	}
2716
2717	/* Increment the message sequence number. */
2718	sc->bnx_fw_wr_seq++;
2719	msg_data |= sc->bnx_fw_wr_seq;
2720
2721 	DBPRINT(sc, BNX_VERBOSE, "bnx_fw_sync(): msg_data = 0x%08X\n",
2722	    msg_data);
2723
2724	/* Send the message to the bootcode driver mailbox. */
2725	REG_WR_IND(sc, sc->bnx_shmem_base + BNX_DRV_MB, msg_data);
2726
2727	/* Wait for the bootcode to acknowledge the message. */
2728	for (i = 0; i < FW_ACK_TIME_OUT_MS; i++) {
2729		/* Check for a response in the bootcode firmware mailbox. */
2730		val = REG_RD_IND(sc, sc->bnx_shmem_base + BNX_FW_MB);
2731		if ((val & BNX_FW_MSG_ACK) == (msg_data & BNX_DRV_MSG_SEQ))
2732			break;
2733		DELAY(1000);
2734	}
2735
2736	/* If we've timed out, tell the bootcode that we've stopped waiting. */
2737	if (((val & BNX_FW_MSG_ACK) != (msg_data & BNX_DRV_MSG_SEQ)) &&
2738		((msg_data & BNX_DRV_MSG_DATA) != BNX_DRV_MSG_DATA_WAIT0)) {
2739		BNX_PRINTF(sc, "%s(%d): Firmware synchronization timeout! "
2740		    "msg_data = 0x%08X\n", __FILE__, __LINE__, msg_data);
2741
2742		msg_data &= ~BNX_DRV_MSG_CODE;
2743		msg_data |= BNX_DRV_MSG_CODE_FW_TIMEOUT;
2744
2745		REG_WR_IND(sc, sc->bnx_shmem_base + BNX_DRV_MB, msg_data);
2746
2747		sc->bnx_fw_timed_out = 1;
2748		rc = EBUSY;
2749	}
2750
2751bnx_fw_sync_exit:
2752	return (rc);
2753}
2754
2755/****************************************************************************/
2756/* Load Receive Virtual 2 Physical (RV2P) processor firmware.               */
2757/*                                                                          */
2758/* Returns:                                                                 */
2759/*   Nothing.                                                               */
2760/****************************************************************************/
2761void
2762bnx_load_rv2p_fw(struct bnx_softc *sc, u_int32_t *rv2p_code,
2763    u_int32_t rv2p_code_len, u_int32_t rv2p_proc)
2764{
2765	int			i;
2766	u_int32_t		val;
2767
2768	/* Set the page size used by RV2P. */
2769	if (rv2p_proc == RV2P_PROC2) {
2770		BNX_RV2P_PROC2_CHG_MAX_BD_PAGE(rv2p_code,
2771		    USABLE_RX_BD_PER_PAGE);
2772	}
2773
2774	for (i = 0; i < rv2p_code_len; i += 8) {
2775		REG_WR(sc, BNX_RV2P_INSTR_HIGH, *rv2p_code);
2776		rv2p_code++;
2777		REG_WR(sc, BNX_RV2P_INSTR_LOW, *rv2p_code);
2778		rv2p_code++;
2779
2780		if (rv2p_proc == RV2P_PROC1) {
2781			val = (i / 8) | BNX_RV2P_PROC1_ADDR_CMD_RDWR;
2782			REG_WR(sc, BNX_RV2P_PROC1_ADDR_CMD, val);
2783		} else {
2784			val = (i / 8) | BNX_RV2P_PROC2_ADDR_CMD_RDWR;
2785			REG_WR(sc, BNX_RV2P_PROC2_ADDR_CMD, val);
2786		}
2787	}
2788
2789	/* Reset the processor, un-stall is done later. */
2790	if (rv2p_proc == RV2P_PROC1)
2791		REG_WR(sc, BNX_RV2P_COMMAND, BNX_RV2P_COMMAND_PROC1_RESET);
2792	else
2793		REG_WR(sc, BNX_RV2P_COMMAND, BNX_RV2P_COMMAND_PROC2_RESET);
2794}
2795
2796/****************************************************************************/
2797/* Load RISC processor firmware.                                            */
2798/*                                                                          */
2799/* Loads firmware from the file if_bnxfw.h into the scratchpad memory       */
2800/* associated with a particular processor.                                  */
2801/*                                                                          */
2802/* Returns:                                                                 */
2803/*   Nothing.                                                               */
2804/****************************************************************************/
2805void
2806bnx_load_cpu_fw(struct bnx_softc *sc, struct cpu_reg *cpu_reg,
2807    struct fw_info *fw)
2808{
2809	u_int32_t		offset;
2810	u_int32_t		val;
2811
2812	/* Halt the CPU. */
2813	val = REG_RD_IND(sc, cpu_reg->mode);
2814	val |= cpu_reg->mode_value_halt;
2815	REG_WR_IND(sc, cpu_reg->mode, val);
2816	REG_WR_IND(sc, cpu_reg->state, cpu_reg->state_value_clear);
2817
2818	/* Load the Text area. */
2819	offset = cpu_reg->spad_base + (fw->text_addr - cpu_reg->mips_view_base);
2820	if (fw->text) {
2821		int j;
2822
2823		for (j = 0; j < (fw->text_len / 4); j++, offset += 4)
2824			REG_WR_IND(sc, offset, fw->text[j]);
2825	}
2826
2827	/* Load the Data area. */
2828	offset = cpu_reg->spad_base + (fw->data_addr - cpu_reg->mips_view_base);
2829	if (fw->data) {
2830		int j;
2831
2832		for (j = 0; j < (fw->data_len / 4); j++, offset += 4)
2833			REG_WR_IND(sc, offset, fw->data[j]);
2834	}
2835
2836	/* Load the SBSS area. */
2837	offset = cpu_reg->spad_base + (fw->sbss_addr - cpu_reg->mips_view_base);
2838	if (fw->sbss) {
2839		int j;
2840
2841		for (j = 0; j < (fw->sbss_len / 4); j++, offset += 4)
2842			REG_WR_IND(sc, offset, fw->sbss[j]);
2843	}
2844
2845	/* Load the BSS area. */
2846	offset = cpu_reg->spad_base + (fw->bss_addr - cpu_reg->mips_view_base);
2847	if (fw->bss) {
2848		int j;
2849
2850		for (j = 0; j < (fw->bss_len/4); j++, offset += 4)
2851			REG_WR_IND(sc, offset, fw->bss[j]);
2852	}
2853
2854	/* Load the Read-Only area. */
2855	offset = cpu_reg->spad_base +
2856	    (fw->rodata_addr - cpu_reg->mips_view_base);
2857	if (fw->rodata) {
2858		int j;
2859
2860		for (j = 0; j < (fw->rodata_len / 4); j++, offset += 4)
2861			REG_WR_IND(sc, offset, fw->rodata[j]);
2862	}
2863
2864	/* Clear the pre-fetch instruction. */
2865	REG_WR_IND(sc, cpu_reg->inst, 0);
2866	REG_WR_IND(sc, cpu_reg->pc, fw->start_addr);
2867
2868	/* Start the CPU. */
2869	val = REG_RD_IND(sc, cpu_reg->mode);
2870	val &= ~cpu_reg->mode_value_halt;
2871	REG_WR_IND(sc, cpu_reg->state, cpu_reg->state_value_clear);
2872	REG_WR_IND(sc, cpu_reg->mode, val);
2873}
2874
2875/****************************************************************************/
2876/* Initialize the RV2P, RX, TX, TPAT, and COM CPUs.                         */
2877/*                                                                          */
2878/* Loads the firmware for each CPU and starts the CPU.                      */
2879/*                                                                          */
2880/* Returns:                                                                 */
2881/*   Nothing.                                                               */
2882/****************************************************************************/
2883void
2884bnx_init_cpus(struct bnx_softc *sc)
2885{
2886	struct bnx_firmware *bfw = &bnx_firmwares[BNX_FW_B06];
2887	struct bnx_rv2p *rv2p = &bnx_rv2ps[BNX_RV2P];
2888	struct cpu_reg cpu_reg;
2889	struct fw_info fw;
2890
2891	if (BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5709) {
2892		bfw = &bnx_firmwares[BNX_FW_B09];
2893		if ((BNX_CHIP_REV(sc) == BNX_CHIP_REV_Ax))
2894			rv2p = &bnx_rv2ps[BNX_XI90_RV2P];
2895		else
2896			rv2p = &bnx_rv2ps[BNX_XI_RV2P];
2897	}
2898
2899	/* Initialize the RV2P processor. */
2900	bnx_load_rv2p_fw(sc, rv2p->bnx_rv2p_proc1,
2901	    rv2p->fw->bnx_rv2p_proc1len, RV2P_PROC1);
2902	bnx_load_rv2p_fw(sc, rv2p->bnx_rv2p_proc2,
2903	    rv2p->fw->bnx_rv2p_proc2len, RV2P_PROC2);
2904
2905	/* Initialize the RX Processor. */
2906	cpu_reg.mode = BNX_RXP_CPU_MODE;
2907	cpu_reg.mode_value_halt = BNX_RXP_CPU_MODE_SOFT_HALT;
2908	cpu_reg.mode_value_sstep = BNX_RXP_CPU_MODE_STEP_ENA;
2909	cpu_reg.state = BNX_RXP_CPU_STATE;
2910	cpu_reg.state_value_clear = 0xffffff;
2911	cpu_reg.gpr0 = BNX_RXP_CPU_REG_FILE;
2912	cpu_reg.evmask = BNX_RXP_CPU_EVENT_MASK;
2913	cpu_reg.pc = BNX_RXP_CPU_PROGRAM_COUNTER;
2914	cpu_reg.inst = BNX_RXP_CPU_INSTRUCTION;
2915	cpu_reg.bp = BNX_RXP_CPU_HW_BREAKPOINT;
2916	cpu_reg.spad_base = BNX_RXP_SCRATCH;
2917	cpu_reg.mips_view_base = 0x8000000;
2918
2919	fw.ver_major = bfw->fw->bnx_RXP_FwReleaseMajor;
2920	fw.ver_minor = bfw->fw->bnx_RXP_FwReleaseMinor;
2921	fw.ver_fix = bfw->fw->bnx_RXP_FwReleaseFix;
2922	fw.start_addr = bfw->fw->bnx_RXP_FwStartAddr;
2923
2924	fw.text_addr = bfw->fw->bnx_RXP_FwTextAddr;
2925	fw.text_len = bfw->fw->bnx_RXP_FwTextLen;
2926	fw.text_index = 0;
2927	fw.text = bfw->bnx_RXP_FwText;
2928
2929	fw.data_addr = bfw->fw->bnx_RXP_FwDataAddr;
2930	fw.data_len = bfw->fw->bnx_RXP_FwDataLen;
2931	fw.data_index = 0;
2932	fw.data = bfw->bnx_RXP_FwData;
2933
2934	fw.sbss_addr = bfw->fw->bnx_RXP_FwSbssAddr;
2935	fw.sbss_len = bfw->fw->bnx_RXP_FwSbssLen;
2936	fw.sbss_index = 0;
2937	fw.sbss = bfw->bnx_RXP_FwSbss;
2938
2939	fw.bss_addr = bfw->fw->bnx_RXP_FwBssAddr;
2940	fw.bss_len = bfw->fw->bnx_RXP_FwBssLen;
2941	fw.bss_index = 0;
2942	fw.bss = bfw->bnx_RXP_FwBss;
2943
2944	fw.rodata_addr = bfw->fw->bnx_RXP_FwRodataAddr;
2945	fw.rodata_len = bfw->fw->bnx_RXP_FwRodataLen;
2946	fw.rodata_index = 0;
2947	fw.rodata = bfw->bnx_RXP_FwRodata;
2948
2949	DBPRINT(sc, BNX_INFO_RESET, "Loading RX firmware.\n");
2950	bnx_load_cpu_fw(sc, &cpu_reg, &fw);
2951
2952	/* Initialize the TX Processor. */
2953	cpu_reg.mode = BNX_TXP_CPU_MODE;
2954	cpu_reg.mode_value_halt = BNX_TXP_CPU_MODE_SOFT_HALT;
2955	cpu_reg.mode_value_sstep = BNX_TXP_CPU_MODE_STEP_ENA;
2956	cpu_reg.state = BNX_TXP_CPU_STATE;
2957	cpu_reg.state_value_clear = 0xffffff;
2958	cpu_reg.gpr0 = BNX_TXP_CPU_REG_FILE;
2959	cpu_reg.evmask = BNX_TXP_CPU_EVENT_MASK;
2960	cpu_reg.pc = BNX_TXP_CPU_PROGRAM_COUNTER;
2961	cpu_reg.inst = BNX_TXP_CPU_INSTRUCTION;
2962	cpu_reg.bp = BNX_TXP_CPU_HW_BREAKPOINT;
2963	cpu_reg.spad_base = BNX_TXP_SCRATCH;
2964	cpu_reg.mips_view_base = 0x8000000;
2965
2966	fw.ver_major = bfw->fw->bnx_TXP_FwReleaseMajor;
2967	fw.ver_minor = bfw->fw->bnx_TXP_FwReleaseMinor;
2968	fw.ver_fix = bfw->fw->bnx_TXP_FwReleaseFix;
2969	fw.start_addr = bfw->fw->bnx_TXP_FwStartAddr;
2970
2971	fw.text_addr = bfw->fw->bnx_TXP_FwTextAddr;
2972	fw.text_len = bfw->fw->bnx_TXP_FwTextLen;
2973	fw.text_index = 0;
2974	fw.text = bfw->bnx_TXP_FwText;
2975
2976	fw.data_addr = bfw->fw->bnx_TXP_FwDataAddr;
2977	fw.data_len = bfw->fw->bnx_TXP_FwDataLen;
2978	fw.data_index = 0;
2979	fw.data = bfw->bnx_TXP_FwData;
2980
2981	fw.sbss_addr = bfw->fw->bnx_TXP_FwSbssAddr;
2982	fw.sbss_len = bfw->fw->bnx_TXP_FwSbssLen;
2983	fw.sbss_index = 0;
2984	fw.sbss = bfw->bnx_TXP_FwSbss;
2985
2986	fw.bss_addr = bfw->fw->bnx_TXP_FwBssAddr;
2987	fw.bss_len = bfw->fw->bnx_TXP_FwBssLen;
2988	fw.bss_index = 0;
2989	fw.bss = bfw->bnx_TXP_FwBss;
2990
2991	fw.rodata_addr = bfw->fw->bnx_TXP_FwRodataAddr;
2992	fw.rodata_len = bfw->fw->bnx_TXP_FwRodataLen;
2993	fw.rodata_index = 0;
2994	fw.rodata = bfw->bnx_TXP_FwRodata;
2995
2996	DBPRINT(sc, BNX_INFO_RESET, "Loading TX firmware.\n");
2997	bnx_load_cpu_fw(sc, &cpu_reg, &fw);
2998
2999	/* Initialize the TX Patch-up Processor. */
3000	cpu_reg.mode = BNX_TPAT_CPU_MODE;
3001	cpu_reg.mode_value_halt = BNX_TPAT_CPU_MODE_SOFT_HALT;
3002	cpu_reg.mode_value_sstep = BNX_TPAT_CPU_MODE_STEP_ENA;
3003	cpu_reg.state = BNX_TPAT_CPU_STATE;
3004	cpu_reg.state_value_clear = 0xffffff;
3005	cpu_reg.gpr0 = BNX_TPAT_CPU_REG_FILE;
3006	cpu_reg.evmask = BNX_TPAT_CPU_EVENT_MASK;
3007	cpu_reg.pc = BNX_TPAT_CPU_PROGRAM_COUNTER;
3008	cpu_reg.inst = BNX_TPAT_CPU_INSTRUCTION;
3009	cpu_reg.bp = BNX_TPAT_CPU_HW_BREAKPOINT;
3010	cpu_reg.spad_base = BNX_TPAT_SCRATCH;
3011	cpu_reg.mips_view_base = 0x8000000;
3012
3013	fw.ver_major = bfw->fw->bnx_TPAT_FwReleaseMajor;
3014	fw.ver_minor = bfw->fw->bnx_TPAT_FwReleaseMinor;
3015	fw.ver_fix = bfw->fw->bnx_TPAT_FwReleaseFix;
3016	fw.start_addr = bfw->fw->bnx_TPAT_FwStartAddr;
3017
3018	fw.text_addr = bfw->fw->bnx_TPAT_FwTextAddr;
3019	fw.text_len = bfw->fw->bnx_TPAT_FwTextLen;
3020	fw.text_index = 0;
3021	fw.text = bfw->bnx_TPAT_FwText;
3022
3023	fw.data_addr = bfw->fw->bnx_TPAT_FwDataAddr;
3024	fw.data_len = bfw->fw->bnx_TPAT_FwDataLen;
3025	fw.data_index = 0;
3026	fw.data = bfw->bnx_TPAT_FwData;
3027
3028	fw.sbss_addr = bfw->fw->bnx_TPAT_FwSbssAddr;
3029	fw.sbss_len = bfw->fw->bnx_TPAT_FwSbssLen;
3030	fw.sbss_index = 0;
3031	fw.sbss = bfw->bnx_TPAT_FwSbss;
3032
3033	fw.bss_addr = bfw->fw->bnx_TPAT_FwBssAddr;
3034	fw.bss_len = bfw->fw->bnx_TPAT_FwBssLen;
3035	fw.bss_index = 0;
3036	fw.bss = bfw->bnx_TPAT_FwBss;
3037
3038	fw.rodata_addr = bfw->fw->bnx_TPAT_FwRodataAddr;
3039	fw.rodata_len = bfw->fw->bnx_TPAT_FwRodataLen;
3040	fw.rodata_index = 0;
3041	fw.rodata = bfw->bnx_TPAT_FwRodata;
3042
3043	DBPRINT(sc, BNX_INFO_RESET, "Loading TPAT firmware.\n");
3044	bnx_load_cpu_fw(sc, &cpu_reg, &fw);
3045
3046	/* Initialize the Completion Processor. */
3047	cpu_reg.mode = BNX_COM_CPU_MODE;
3048	cpu_reg.mode_value_halt = BNX_COM_CPU_MODE_SOFT_HALT;
3049	cpu_reg.mode_value_sstep = BNX_COM_CPU_MODE_STEP_ENA;
3050	cpu_reg.state = BNX_COM_CPU_STATE;
3051	cpu_reg.state_value_clear = 0xffffff;
3052	cpu_reg.gpr0 = BNX_COM_CPU_REG_FILE;
3053	cpu_reg.evmask = BNX_COM_CPU_EVENT_MASK;
3054	cpu_reg.pc = BNX_COM_CPU_PROGRAM_COUNTER;
3055	cpu_reg.inst = BNX_COM_CPU_INSTRUCTION;
3056	cpu_reg.bp = BNX_COM_CPU_HW_BREAKPOINT;
3057	cpu_reg.spad_base = BNX_COM_SCRATCH;
3058	cpu_reg.mips_view_base = 0x8000000;
3059
3060	fw.ver_major = bfw->fw->bnx_COM_FwReleaseMajor;
3061	fw.ver_minor = bfw->fw->bnx_COM_FwReleaseMinor;
3062	fw.ver_fix = bfw->fw->bnx_COM_FwReleaseFix;
3063	fw.start_addr = bfw->fw->bnx_COM_FwStartAddr;
3064
3065	fw.text_addr = bfw->fw->bnx_COM_FwTextAddr;
3066	fw.text_len = bfw->fw->bnx_COM_FwTextLen;
3067	fw.text_index = 0;
3068	fw.text = bfw->bnx_COM_FwText;
3069
3070	fw.data_addr = bfw->fw->bnx_COM_FwDataAddr;
3071	fw.data_len = bfw->fw->bnx_COM_FwDataLen;
3072	fw.data_index = 0;
3073	fw.data = bfw->bnx_COM_FwData;
3074
3075	fw.sbss_addr = bfw->fw->bnx_COM_FwSbssAddr;
3076	fw.sbss_len = bfw->fw->bnx_COM_FwSbssLen;
3077	fw.sbss_index = 0;
3078	fw.sbss = bfw->bnx_COM_FwSbss;
3079
3080	fw.bss_addr = bfw->fw->bnx_COM_FwBssAddr;
3081	fw.bss_len = bfw->fw->bnx_COM_FwBssLen;
3082	fw.bss_index = 0;
3083	fw.bss = bfw->bnx_COM_FwBss;
3084
3085	fw.rodata_addr = bfw->fw->bnx_COM_FwRodataAddr;
3086	fw.rodata_len = bfw->fw->bnx_COM_FwRodataLen;
3087	fw.rodata_index = 0;
3088	fw.rodata = bfw->bnx_COM_FwRodata;
3089
3090	DBPRINT(sc, BNX_INFO_RESET, "Loading COM firmware.\n");
3091	bnx_load_cpu_fw(sc, &cpu_reg, &fw);
3092}
3093
3094/****************************************************************************/
3095/* Initialize context memory.                                               */
3096/*                                                                          */
3097/* Clears the memory associated with each Context ID (CID).                 */
3098/*                                                                          */
3099/* Returns:                                                                 */
3100/*   Nothing.                                                               */
3101/****************************************************************************/
3102void
3103bnx_init_context(struct bnx_softc *sc)
3104{
3105	if (BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5709) {
3106		/* DRC: Replace this constant value with a #define. */
3107		int i, retry_cnt = 10;
3108		u_int32_t val;
3109
3110		/*
3111		 * BCM5709 context memory may be cached
3112		 * in host memory so prepare the host memory
3113		 * for access.
3114		 */
3115		val = BNX_CTX_COMMAND_ENABLED | BNX_CTX_COMMAND_MEM_INIT
3116		    | (1 << 12);
3117		val |= (BCM_PAGE_BITS - 8) << 16;
3118		REG_WR(sc, BNX_CTX_COMMAND, val);
3119
3120		/* Wait for mem init command to complete. */
3121		for (i = 0; i < retry_cnt; i++) {
3122			val = REG_RD(sc, BNX_CTX_COMMAND);
3123			if (!(val & BNX_CTX_COMMAND_MEM_INIT))
3124				break;
3125			DELAY(2);
3126		}
3127
3128		/* ToDo: Consider returning an error here. */
3129
3130		for (i = 0; i < sc->ctx_pages; i++) {
3131			int j;
3132
3133			/* Set the physaddr of the context memory cache. */
3134			val = (u_int32_t)(sc->ctx_segs[i].ds_addr);
3135			REG_WR(sc, BNX_CTX_HOST_PAGE_TBL_DATA0, val |
3136				BNX_CTX_HOST_PAGE_TBL_DATA0_VALID);
3137			val = (u_int32_t)
3138			    ((u_int64_t)sc->ctx_segs[i].ds_addr >> 32);
3139			REG_WR(sc, BNX_CTX_HOST_PAGE_TBL_DATA1, val);
3140			REG_WR(sc, BNX_CTX_HOST_PAGE_TBL_CTRL, i |
3141				BNX_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
3142
3143			/* Verify that the context memory write was successful. */
3144			for (j = 0; j < retry_cnt; j++) {
3145				val = REG_RD(sc, BNX_CTX_HOST_PAGE_TBL_CTRL);
3146				if ((val & BNX_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) == 0)
3147					break;
3148				DELAY(5);
3149			}
3150
3151			/* ToDo: Consider returning an error here. */
3152		}
3153	} else {
3154		u_int32_t vcid_addr, offset;
3155
3156		/*
3157		 * For the 5706/5708, context memory is local to
3158		 * the controller, so initialize the controller
3159		 * context memory.
3160		 */
3161
3162		vcid_addr = GET_CID_ADDR(96);
3163		while (vcid_addr) {
3164
3165			vcid_addr -= PHY_CTX_SIZE;
3166
3167			REG_WR(sc, BNX_CTX_VIRT_ADDR, 0);
3168			REG_WR(sc, BNX_CTX_PAGE_TBL, vcid_addr);
3169
3170			for(offset = 0; offset < PHY_CTX_SIZE; offset += 4) {
3171				CTX_WR(sc, 0x00, offset, 0);
3172			}
3173
3174			REG_WR(sc, BNX_CTX_VIRT_ADDR, vcid_addr);
3175			REG_WR(sc, BNX_CTX_PAGE_TBL, vcid_addr);
3176		}
3177 	}
3178}
3179
3180/****************************************************************************/
3181/* Fetch the permanent MAC address of the controller.                       */
3182/*                                                                          */
3183/* Returns:                                                                 */
3184/*   Nothing.                                                               */
3185/****************************************************************************/
3186void
3187bnx_get_mac_addr(struct bnx_softc *sc)
3188{
3189	u_int32_t		mac_lo = 0, mac_hi = 0;
3190
3191	/*
3192	 * The NetXtreme II bootcode populates various NIC
3193	 * power-on and runtime configuration items in a
3194	 * shared memory area.  The factory configured MAC
3195	 * address is available from both NVRAM and the
3196	 * shared memory area so we'll read the value from
3197	 * shared memory for speed.
3198	 */
3199
3200	mac_hi = REG_RD_IND(sc, sc->bnx_shmem_base + BNX_PORT_HW_CFG_MAC_UPPER);
3201	mac_lo = REG_RD_IND(sc, sc->bnx_shmem_base + BNX_PORT_HW_CFG_MAC_LOWER);
3202
3203	if ((mac_lo == 0) && (mac_hi == 0)) {
3204		BNX_PRINTF(sc, "%s(%d): Invalid Ethernet address!\n",
3205		    __FILE__, __LINE__);
3206	} else {
3207		sc->eaddr[0] = (u_char)(mac_hi >> 8);
3208		sc->eaddr[1] = (u_char)(mac_hi >> 0);
3209		sc->eaddr[2] = (u_char)(mac_lo >> 24);
3210		sc->eaddr[3] = (u_char)(mac_lo >> 16);
3211		sc->eaddr[4] = (u_char)(mac_lo >> 8);
3212		sc->eaddr[5] = (u_char)(mac_lo >> 0);
3213	}
3214
3215	DBPRINT(sc, BNX_INFO, "Permanent Ethernet address = "
3216	    "%6D\n", sc->eaddr, ":");
3217}
3218
3219/****************************************************************************/
3220/* Program the MAC address.                                                 */
3221/*                                                                          */
3222/* Returns:                                                                 */
3223/*   Nothing.                                                               */
3224/****************************************************************************/
3225void
3226bnx_set_mac_addr(struct bnx_softc *sc)
3227{
3228	u_int32_t		val;
3229	u_int8_t		*mac_addr = sc->eaddr;
3230
3231	DBPRINT(sc, BNX_INFO, "Setting Ethernet address = "
3232	    "%6D\n", sc->eaddr, ":");
3233
3234	val = (mac_addr[0] << 8) | mac_addr[1];
3235
3236	REG_WR(sc, BNX_EMAC_MAC_MATCH0, val);
3237
3238	val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
3239		(mac_addr[4] << 8) | mac_addr[5];
3240
3241	REG_WR(sc, BNX_EMAC_MAC_MATCH1, val);
3242}
3243
3244/****************************************************************************/
3245/* Stop the controller.                                                     */
3246/*                                                                          */
3247/* Returns:                                                                 */
3248/*   Nothing.                                                               */
3249/****************************************************************************/
3250void
3251bnx_stop(struct bnx_softc *sc)
3252{
3253	struct ifnet		*ifp = &sc->arpcom.ac_if;
3254	struct ifmedia_entry	*ifm;
3255	struct mii_data		*mii;
3256	int			mtmp, itmp;
3257
3258	DBPRINT(sc, BNX_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
3259
3260	timeout_del(&sc->bnx_timeout);
3261	timeout_del(&sc->bnx_rxrefill);
3262
3263	ifp->if_flags &= ~IFF_RUNNING;
3264	ifq_clr_oactive(&ifp->if_snd);
3265
3266	/* Disable the transmit/receive blocks. */
3267	REG_WR(sc, BNX_MISC_ENABLE_CLR_BITS, 0x5ffffff);
3268	REG_RD(sc, BNX_MISC_ENABLE_CLR_BITS);
3269	DELAY(20);
3270
3271	bnx_disable_intr(sc);
3272
3273	intr_barrier(sc->bnx_intrhand);
3274	KASSERT((ifp->if_flags & IFF_RUNNING) == 0);
3275
3276	/* Tell firmware that the driver is going away. */
3277	bnx_reset(sc, BNX_DRV_MSG_CODE_SUSPEND_NO_WOL);
3278
3279	/* Free RX buffers. */
3280	bnx_free_rx_chain(sc);
3281
3282	/* Free TX buffers. */
3283	bnx_free_tx_chain(sc);
3284
3285	/*
3286	 * Isolate/power down the PHY, but leave the media selection
3287	 * unchanged so that things will be put back to normal when
3288	 * we bring the interface back up.
3289	 */
3290	mii = &sc->bnx_mii;
3291	itmp = ifp->if_flags;
3292	ifp->if_flags |= IFF_UP;
3293	ifm = mii->mii_media.ifm_cur;
3294	mtmp = ifm->ifm_media;
3295	ifm->ifm_media = IFM_ETHER|IFM_NONE;
3296	mii_mediachg(mii);
3297	ifm->ifm_media = mtmp;
3298	ifp->if_flags = itmp;
3299
3300	ifp->if_timer = 0;
3301
3302	sc->bnx_link = 0;
3303
3304	DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
3305
3306	bnx_mgmt_init(sc);
3307}
3308
3309int
3310bnx_reset(struct bnx_softc *sc, u_int32_t reset_code)
3311{
3312	struct pci_attach_args	*pa = &(sc->bnx_pa);
3313	u_int32_t		val;
3314	int			i, rc = 0;
3315
3316	DBPRINT(sc, BNX_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
3317
3318	/* Wait for pending PCI transactions to complete. */
3319	REG_WR(sc, BNX_MISC_ENABLE_CLR_BITS,
3320	    BNX_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
3321	    BNX_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
3322	    BNX_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
3323	    BNX_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
3324	val = REG_RD(sc, BNX_MISC_ENABLE_CLR_BITS);
3325	DELAY(5);
3326
3327	/* Disable DMA */
3328	if (BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5709) {
3329		val = REG_RD(sc, BNX_MISC_NEW_CORE_CTL);
3330		val &= ~BNX_MISC_NEW_CORE_CTL_DMA_ENABLE;
3331		REG_WR(sc, BNX_MISC_NEW_CORE_CTL, val);
3332	}
3333
3334	/* Assume bootcode is running. */
3335	sc->bnx_fw_timed_out = 0;
3336
3337	/* Give the firmware a chance to prepare for the reset. */
3338	rc = bnx_fw_sync(sc, BNX_DRV_MSG_DATA_WAIT0 | reset_code);
3339	if (rc)
3340		goto bnx_reset_exit;
3341
3342	/* Set a firmware reminder that this is a soft reset. */
3343	REG_WR_IND(sc, sc->bnx_shmem_base + BNX_DRV_RESET_SIGNATURE,
3344	    BNX_DRV_RESET_SIGNATURE_MAGIC);
3345
3346	/* Dummy read to force the chip to complete all current transactions. */
3347	val = REG_RD(sc, BNX_MISC_ID);
3348
3349	/* Chip reset. */
3350	if (BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5709) {
3351		REG_WR(sc, BNX_MISC_COMMAND, BNX_MISC_COMMAND_SW_RESET);
3352		REG_RD(sc, BNX_MISC_COMMAND);
3353		DELAY(5);
3354
3355		val = BNX_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
3356		      BNX_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
3357
3358		pci_conf_write(pa->pa_pc, pa->pa_tag, BNX_PCICFG_MISC_CONFIG,
3359		    val);
3360	} else {
3361		val = BNX_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3362			BNX_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
3363			BNX_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
3364		REG_WR(sc, BNX_PCICFG_MISC_CONFIG, val);
3365
3366		/* Allow up to 30us for reset to complete. */
3367		for (i = 0; i < 10; i++) {
3368			val = REG_RD(sc, BNX_PCICFG_MISC_CONFIG);
3369			if ((val & (BNX_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3370				BNX_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0) {
3371				break;
3372			}
3373			DELAY(10);
3374		}
3375
3376		/* Check that reset completed successfully. */
3377		if (val & (BNX_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3378		    BNX_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
3379			BNX_PRINTF(sc, "%s(%d): Reset failed!\n",
3380			    __FILE__, __LINE__);
3381			rc = EBUSY;
3382			goto bnx_reset_exit;
3383		}
3384	}
3385
3386	/* Make sure byte swapping is properly configured. */
3387	val = REG_RD(sc, BNX_PCI_SWAP_DIAG0);
3388	if (val != 0x01020304) {
3389		BNX_PRINTF(sc, "%s(%d): Byte swap is incorrect!\n",
3390		    __FILE__, __LINE__);
3391		rc = ENODEV;
3392		goto bnx_reset_exit;
3393	}
3394
3395	/* Just completed a reset, assume that firmware is running again. */
3396	sc->bnx_fw_timed_out = 0;
3397
3398	/* Wait for the firmware to finish its initialization. */
3399	rc = bnx_fw_sync(sc, BNX_DRV_MSG_DATA_WAIT1 | reset_code);
3400	if (rc)
3401		BNX_PRINTF(sc, "%s(%d): Firmware did not complete "
3402		    "initialization!\n", __FILE__, __LINE__);
3403
3404bnx_reset_exit:
3405	DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
3406
3407	return (rc);
3408}
3409
3410int
3411bnx_chipinit(struct bnx_softc *sc)
3412{
3413	struct pci_attach_args	*pa = &(sc->bnx_pa);
3414	u_int32_t		val;
3415	int			rc = 0;
3416
3417	DBPRINT(sc, BNX_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
3418
3419	/* Make sure the interrupt is not active. */
3420	REG_WR(sc, BNX_PCICFG_INT_ACK_CMD, BNX_PCICFG_INT_ACK_CMD_MASK_INT);
3421
3422	/* Initialize DMA byte/word swapping, configure the number of DMA  */
3423	/* channels and PCI clock compensation delay.                      */
3424	val = BNX_DMA_CONFIG_DATA_BYTE_SWAP |
3425	    BNX_DMA_CONFIG_DATA_WORD_SWAP |
3426#if BYTE_ORDER == BIG_ENDIAN
3427	    BNX_DMA_CONFIG_CNTL_BYTE_SWAP |
3428#endif
3429	    BNX_DMA_CONFIG_CNTL_WORD_SWAP |
3430	    DMA_READ_CHANS << 12 |
3431	    DMA_WRITE_CHANS << 16;
3432
3433	val |= (0x2 << 20) | BNX_DMA_CONFIG_CNTL_PCI_COMP_DLY;
3434
3435	if ((sc->bnx_flags & BNX_PCIX_FLAG) && (sc->bus_speed_mhz == 133))
3436		val |= BNX_DMA_CONFIG_PCI_FAST_CLK_CMP;
3437
3438	/*
3439	 * This setting resolves a problem observed on certain Intel PCI
3440	 * chipsets that cannot handle multiple outstanding DMA operations.
3441	 * See errata E9_5706A1_65.
3442	 */
3443	if ((BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5706) &&
3444	    (BNX_CHIP_ID(sc) != BNX_CHIP_ID_5706_A0) &&
3445	    !(sc->bnx_flags & BNX_PCIX_FLAG))
3446		val |= BNX_DMA_CONFIG_CNTL_PING_PONG_DMA;
3447
3448	REG_WR(sc, BNX_DMA_CONFIG, val);
3449
3450#if 1
3451	/* Clear the PCI-X relaxed ordering bit. See errata E3_5708CA0_570. */
3452	if (sc->bnx_flags & BNX_PCIX_FLAG) {
3453		val = pci_conf_read(pa->pa_pc, pa->pa_tag, BNX_PCI_PCIX_CMD);
3454		pci_conf_write(pa->pa_pc, pa->pa_tag, BNX_PCI_PCIX_CMD,
3455		    val & ~0x20000);
3456	}
3457#endif
3458
3459	/* Enable the RX_V2P and Context state machines before access. */
3460	REG_WR(sc, BNX_MISC_ENABLE_SET_BITS,
3461	    BNX_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
3462	    BNX_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
3463	    BNX_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
3464
3465	/* Initialize context mapping and zero out the quick contexts. */
3466	bnx_init_context(sc);
3467
3468	/* Initialize the on-boards CPUs */
3469	bnx_init_cpus(sc);
3470
3471	/* Prepare NVRAM for access. */
3472	if (bnx_init_nvram(sc)) {
3473		rc = ENODEV;
3474		goto bnx_chipinit_exit;
3475	}
3476
3477	/* Set the kernel bypass block size */
3478	val = REG_RD(sc, BNX_MQ_CONFIG);
3479	val &= ~BNX_MQ_CONFIG_KNL_BYP_BLK_SIZE;
3480	val |= BNX_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
3481
3482	/* Enable bins used on the 5709. */
3483	if (BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5709) {
3484		val |= BNX_MQ_CONFIG_BIN_MQ_MODE;
3485		if (BNX_CHIP_ID(sc) == BNX_CHIP_ID_5709_A1)
3486			val |= BNX_MQ_CONFIG_HALT_DIS;
3487	}
3488
3489	REG_WR(sc, BNX_MQ_CONFIG, val);
3490
3491	val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
3492	REG_WR(sc, BNX_MQ_KNL_BYP_WIND_START, val);
3493	REG_WR(sc, BNX_MQ_KNL_WIND_END, val);
3494
3495	val = (BCM_PAGE_BITS - 8) << 24;
3496	REG_WR(sc, BNX_RV2P_CONFIG, val);
3497
3498	/* Configure page size. */
3499	val = REG_RD(sc, BNX_TBDR_CONFIG);
3500	val &= ~BNX_TBDR_CONFIG_PAGE_SIZE;
3501	val |= (BCM_PAGE_BITS - 8) << 24 | 0x40;
3502	REG_WR(sc, BNX_TBDR_CONFIG, val);
3503
3504#if 0
3505	/* Set the perfect match control register to default. */
3506	REG_WR_IND(sc, BNX_RXP_PM_CTRL, 0);
3507#endif
3508
3509bnx_chipinit_exit:
3510	DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
3511
3512	return(rc);
3513}
3514
3515/****************************************************************************/
3516/* Initialize the controller in preparation to send/receive traffic.        */
3517/*                                                                          */
3518/* Returns:                                                                 */
3519/*   0 for success, positive value for failure.                             */
3520/****************************************************************************/
3521int
3522bnx_blockinit(struct bnx_softc *sc)
3523{
3524	u_int32_t		reg, val;
3525	int 			rc = 0;
3526
3527	DBPRINT(sc, BNX_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
3528
3529	/* Load the hardware default MAC address. */
3530	bnx_set_mac_addr(sc);
3531
3532	/* Set the Ethernet backoff seed value */
3533	val = sc->eaddr[0] + (sc->eaddr[1] << 8) + (sc->eaddr[2] << 16) +
3534	    (sc->eaddr[3]) + (sc->eaddr[4] << 8) + (sc->eaddr[5] << 16);
3535	REG_WR(sc, BNX_EMAC_BACKOFF_SEED, val);
3536
3537	sc->last_status_idx = 0;
3538	sc->rx_mode = BNX_EMAC_RX_MODE_SORT_MODE;
3539
3540	/* Set up link change interrupt generation. */
3541	REG_WR(sc, BNX_EMAC_ATTENTION_ENA, BNX_EMAC_ATTENTION_ENA_LINK);
3542	REG_WR(sc, BNX_HC_ATTN_BITS_ENABLE, STATUS_ATTN_BITS_LINK_STATE);
3543
3544	/* Program the physical address of the status block. */
3545	REG_WR(sc, BNX_HC_STATUS_ADDR_L, (u_int32_t)(sc->status_block_paddr));
3546	REG_WR(sc, BNX_HC_STATUS_ADDR_H,
3547	    (u_int32_t)((u_int64_t)sc->status_block_paddr >> 32));
3548
3549	/* Program the physical address of the statistics block. */
3550	REG_WR(sc, BNX_HC_STATISTICS_ADDR_L,
3551	    (u_int32_t)(sc->stats_block_paddr));
3552	REG_WR(sc, BNX_HC_STATISTICS_ADDR_H,
3553	    (u_int32_t)((u_int64_t)sc->stats_block_paddr >> 32));
3554
3555	/* Program various host coalescing parameters. */
3556	REG_WR(sc, BNX_HC_TX_QUICK_CONS_TRIP, (sc->bnx_tx_quick_cons_trip_int
3557	    << 16) | sc->bnx_tx_quick_cons_trip);
3558	REG_WR(sc, BNX_HC_RX_QUICK_CONS_TRIP, (sc->bnx_rx_quick_cons_trip_int
3559	    << 16) | sc->bnx_rx_quick_cons_trip);
3560	REG_WR(sc, BNX_HC_COMP_PROD_TRIP, (sc->bnx_comp_prod_trip_int << 16) |
3561	    sc->bnx_comp_prod_trip);
3562	REG_WR(sc, BNX_HC_TX_TICKS, (sc->bnx_tx_ticks_int << 16) |
3563	    sc->bnx_tx_ticks);
3564	REG_WR(sc, BNX_HC_RX_TICKS, (sc->bnx_rx_ticks_int << 16) |
3565	    sc->bnx_rx_ticks);
3566	REG_WR(sc, BNX_HC_COM_TICKS, (sc->bnx_com_ticks_int << 16) |
3567	    sc->bnx_com_ticks);
3568	REG_WR(sc, BNX_HC_CMD_TICKS, (sc->bnx_cmd_ticks_int << 16) |
3569	    sc->bnx_cmd_ticks);
3570	REG_WR(sc, BNX_HC_STATS_TICKS, (sc->bnx_stats_ticks & 0xffff00));
3571	REG_WR(sc, BNX_HC_STAT_COLLECT_TICKS, 0xbb8);  /* 3ms */
3572	REG_WR(sc, BNX_HC_CONFIG,
3573	    (BNX_HC_CONFIG_RX_TMR_MODE | BNX_HC_CONFIG_TX_TMR_MODE |
3574	    BNX_HC_CONFIG_COLLECT_STATS));
3575
3576	/* Clear the internal statistics counters. */
3577	REG_WR(sc, BNX_HC_COMMAND, BNX_HC_COMMAND_CLR_STAT_NOW);
3578
3579	/* Verify that bootcode is running. */
3580	reg = REG_RD_IND(sc, sc->bnx_shmem_base + BNX_DEV_INFO_SIGNATURE);
3581
3582	DBRUNIF(DB_RANDOMTRUE(bnx_debug_bootcode_running_failure),
3583	    BNX_PRINTF(sc, "%s(%d): Simulating bootcode failure.\n",
3584	    __FILE__, __LINE__); reg = 0);
3585
3586	if ((reg & BNX_DEV_INFO_SIGNATURE_MAGIC_MASK) !=
3587	    BNX_DEV_INFO_SIGNATURE_MAGIC) {
3588		BNX_PRINTF(sc, "%s(%d): Bootcode not running! Found: 0x%08X, "
3589		    "Expected: 08%08X\n", __FILE__, __LINE__,
3590		    (reg & BNX_DEV_INFO_SIGNATURE_MAGIC_MASK),
3591		    BNX_DEV_INFO_SIGNATURE_MAGIC);
3592		rc = ENODEV;
3593		goto bnx_blockinit_exit;
3594	}
3595
3596	/* Check if any management firmware is running. */
3597	reg = REG_RD_IND(sc, sc->bnx_shmem_base + BNX_PORT_FEATURE);
3598	if (reg & (BNX_PORT_FEATURE_ASF_ENABLED |
3599	    BNX_PORT_FEATURE_IMD_ENABLED)) {
3600		DBPRINT(sc, BNX_INFO, "Management F/W Enabled.\n");
3601		sc->bnx_flags |= BNX_MFW_ENABLE_FLAG;
3602	}
3603
3604	sc->bnx_fw_ver = REG_RD_IND(sc, sc->bnx_shmem_base +
3605	    BNX_DEV_INFO_BC_REV);
3606
3607	DBPRINT(sc, BNX_INFO, "bootcode rev = 0x%08X\n", sc->bnx_fw_ver);
3608
3609	/* Enable DMA */
3610	if (BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5709) {
3611		val = REG_RD(sc, BNX_MISC_NEW_CORE_CTL);
3612		val |= BNX_MISC_NEW_CORE_CTL_DMA_ENABLE;
3613		REG_WR(sc, BNX_MISC_NEW_CORE_CTL, val);
3614	}
3615
3616	/* Allow bootcode to apply any additional fixes before enabling MAC. */
3617	rc = bnx_fw_sync(sc, BNX_DRV_MSG_DATA_WAIT2 | BNX_DRV_MSG_CODE_RESET);
3618
3619	/* Enable link state change interrupt generation. */
3620	if (BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5709) {
3621		REG_WR(sc, BNX_MISC_ENABLE_SET_BITS,
3622		    BNX_MISC_ENABLE_DEFAULT_XI);
3623	} else
3624		REG_WR(sc, BNX_MISC_ENABLE_SET_BITS, BNX_MISC_ENABLE_DEFAULT);
3625
3626	/* Enable all remaining blocks in the MAC. */
3627	REG_WR(sc, BNX_MISC_ENABLE_SET_BITS, 0x5ffffff);
3628	REG_RD(sc, BNX_MISC_ENABLE_SET_BITS);
3629	DELAY(20);
3630
3631bnx_blockinit_exit:
3632	DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
3633
3634	return (rc);
3635}
3636
3637/****************************************************************************/
3638/* Encapsulate an mbuf cluster into the rx_bd chain.                        */
3639/*                                                                          */
3640/* The NetXtreme II can support Jumbo frames by using multiple rx_bd's.     */
3641/* This routine will map an mbuf cluster into 1 or more rx_bd's as          */
3642/* necessary.                                                               */
3643/*                                                                          */
3644/* Returns:                                                                 */
3645/*   0 for success, positive value for failure.                             */
3646/****************************************************************************/
3647int
3648bnx_get_buf(struct bnx_softc *sc, u_int16_t *prod,
3649    u_int16_t *chain_prod, u_int32_t *prod_bseq)
3650{
3651	bus_dmamap_t		map;
3652	struct mbuf 		*m;
3653	struct rx_bd		*rxbd;
3654	int			i;
3655	u_int32_t		addr;
3656#ifdef BNX_DEBUG
3657	u_int16_t		debug_chain_prod = *chain_prod;
3658#endif
3659	u_int16_t		first_chain_prod;
3660
3661	DBPRINT(sc, (BNX_VERBOSE_RESET | BNX_VERBOSE_RECV), "Entering %s()\n",
3662	    __FUNCTION__);
3663
3664	/* Make sure the inputs are valid. */
3665	DBRUNIF((*chain_prod > MAX_RX_BD),
3666	    printf("%s: RX producer out of range: 0x%04X > 0x%04X\n",
3667	    *chain_prod, (u_int16_t) MAX_RX_BD));
3668
3669	DBPRINT(sc, BNX_VERBOSE_RECV, "%s(enter): prod = 0x%04X, chain_prod = "
3670	    "0x%04X, prod_bseq = 0x%08X\n", __FUNCTION__, *prod, *chain_prod,
3671	    *prod_bseq);
3672
3673	/* This is a new mbuf allocation. */
3674	m = MCLGETL(NULL, M_DONTWAIT, BNX_MAX_JUMBO_MRU);
3675	if (!m)
3676		return (0);
3677	m->m_len = m->m_pkthdr.len = BNX_MAX_JUMBO_MRU;
3678	/* the chip aligns the ip header for us, no need to m_adj */
3679
3680	/* Map the mbuf cluster into device memory. */
3681	map = sc->rx_mbuf_map[*chain_prod];
3682	if (bus_dmamap_load_mbuf(sc->bnx_dmatag, map, m, BUS_DMA_NOWAIT)) {
3683		m_freem(m);
3684		return (0);
3685	}
3686	first_chain_prod = *chain_prod;
3687
3688#ifdef BNX_DEBUG
3689	/* Track the distribution of buffer segments. */
3690	sc->rx_mbuf_segs[map->dm_nsegs]++;
3691#endif
3692
3693	/* Setup the rx_bd for the first segment. */
3694	rxbd = &sc->rx_bd_chain[RX_PAGE(*chain_prod)][RX_IDX(*chain_prod)];
3695
3696	addr = (u_int32_t)map->dm_segs[0].ds_addr;
3697	rxbd->rx_bd_haddr_lo = addr;
3698	addr = (u_int32_t)((u_int64_t)map->dm_segs[0].ds_addr >> 32);
3699	rxbd->rx_bd_haddr_hi = addr;
3700	rxbd->rx_bd_len = map->dm_segs[0].ds_len;
3701	rxbd->rx_bd_flags = RX_BD_FLAGS_START;
3702	*prod_bseq += map->dm_segs[0].ds_len;
3703
3704	for (i = 1; i < map->dm_nsegs; i++) {
3705		*prod = NEXT_RX_BD(*prod);
3706		*chain_prod = RX_CHAIN_IDX(*prod);
3707
3708		rxbd =
3709		    &sc->rx_bd_chain[RX_PAGE(*chain_prod)][RX_IDX(*chain_prod)];
3710
3711		addr = (u_int32_t)map->dm_segs[i].ds_addr;
3712		rxbd->rx_bd_haddr_lo = addr;
3713		addr = (u_int32_t)((u_int64_t)map->dm_segs[i].ds_addr >> 32);
3714		rxbd->rx_bd_haddr_hi = addr;
3715		rxbd->rx_bd_len = map->dm_segs[i].ds_len;
3716		rxbd->rx_bd_flags = 0;
3717		*prod_bseq += map->dm_segs[i].ds_len;
3718	}
3719
3720	rxbd->rx_bd_flags |= RX_BD_FLAGS_END;
3721
3722	/*
3723	 * Save the mbuf, adjust the map pointer (swap map for first and
3724	 * last rx_bd entry so that rx_mbuf_ptr and rx_mbuf_map matches)
3725	 * and update our counter.
3726	 */
3727	sc->rx_mbuf_ptr[*chain_prod] = m;
3728	sc->rx_mbuf_map[first_chain_prod] = sc->rx_mbuf_map[*chain_prod];
3729	sc->rx_mbuf_map[*chain_prod] = map;
3730
3731	DBRUN(BNX_VERBOSE_RECV, bnx_dump_rx_mbuf_chain(sc, debug_chain_prod,
3732	    map->dm_nsegs));
3733
3734	return (map->dm_nsegs);
3735}
3736
3737
3738/****************************************************************************/
3739/* Initialize the TX context memory.                                        */
3740/*                                                                          */
3741/* Returns:                                                                 */
3742/*   Nothing                                                                */
3743/****************************************************************************/
3744void
3745bnx_init_tx_context(struct bnx_softc *sc)
3746{
3747	u_int32_t val;
3748
3749	/* Initialize the context ID for an L2 TX chain. */
3750	if (BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5709) {
3751		/* Set the CID type to support an L2 connection. */
3752		val = BNX_L2CTX_TYPE_TYPE_L2 | BNX_L2CTX_TYPE_SIZE_L2;
3753		CTX_WR(sc, GET_CID_ADDR(TX_CID), BNX_L2CTX_TYPE_XI, val);
3754		val = BNX_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
3755		CTX_WR(sc, GET_CID_ADDR(TX_CID), BNX_L2CTX_CMD_TYPE_XI, val);
3756
3757		/* Point the hardware to the first page in the chain. */
3758		val = (u_int32_t)((u_int64_t)sc->tx_bd_chain_paddr[0] >> 32);
3759		CTX_WR(sc, GET_CID_ADDR(TX_CID),
3760		    BNX_L2CTX_TBDR_BHADDR_HI_XI, val);
3761		val = (u_int32_t)(sc->tx_bd_chain_paddr[0]);
3762		CTX_WR(sc, GET_CID_ADDR(TX_CID),
3763		    BNX_L2CTX_TBDR_BHADDR_LO_XI, val);
3764	} else {
3765		/* Set the CID type to support an L2 connection. */
3766		val = BNX_L2CTX_TYPE_TYPE_L2 | BNX_L2CTX_TYPE_SIZE_L2;
3767		CTX_WR(sc, GET_CID_ADDR(TX_CID), BNX_L2CTX_TYPE, val);
3768		val = BNX_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
3769		CTX_WR(sc, GET_CID_ADDR(TX_CID), BNX_L2CTX_CMD_TYPE, val);
3770
3771		/* Point the hardware to the first page in the chain. */
3772		val = (u_int32_t)((u_int64_t)sc->tx_bd_chain_paddr[0] >> 32);
3773		CTX_WR(sc, GET_CID_ADDR(TX_CID), BNX_L2CTX_TBDR_BHADDR_HI, val);
3774		val = (u_int32_t)(sc->tx_bd_chain_paddr[0]);
3775		CTX_WR(sc, GET_CID_ADDR(TX_CID), BNX_L2CTX_TBDR_BHADDR_LO, val);
3776	}
3777}
3778
3779/****************************************************************************/
3780/* Allocate memory and initialize the TX data structures.                   */
3781/*                                                                          */
3782/* Returns:                                                                 */
3783/*   0 for success, positive value for failure.                             */
3784/****************************************************************************/
3785int
3786bnx_init_tx_chain(struct bnx_softc *sc)
3787{
3788	struct tx_bd		*txbd;
3789	u_int32_t		addr;
3790	int			i, rc = 0;
3791
3792	DBPRINT(sc, BNX_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
3793
3794	/* Set the initial TX producer/consumer indices. */
3795	sc->tx_prod = 0;
3796	sc->tx_cons = 0;
3797	sc->tx_prod_bseq = 0;
3798	sc->used_tx_bd = 0;
3799	sc->max_tx_bd =	USABLE_TX_BD;
3800	DBRUNIF(1, sc->tx_hi_watermark = USABLE_TX_BD);
3801	DBRUNIF(1, sc->tx_full_count = 0);
3802
3803	/*
3804	 * The NetXtreme II supports a linked-list structure called
3805	 * a Buffer Descriptor Chain (or BD chain).  A BD chain
3806	 * consists of a series of 1 or more chain pages, each of which
3807	 * consists of a fixed number of BD entries.
3808	 * The last BD entry on each page is a pointer to the next page
3809	 * in the chain, and the last pointer in the BD chain
3810	 * points back to the beginning of the chain.
3811	 */
3812
3813	/* Set the TX next pointer chain entries. */
3814	for (i = 0; i < TX_PAGES; i++) {
3815		int j;
3816
3817		txbd = &sc->tx_bd_chain[i][USABLE_TX_BD_PER_PAGE];
3818
3819		/* Check if we've reached the last page. */
3820		if (i == (TX_PAGES - 1))
3821			j = 0;
3822		else
3823			j = i + 1;
3824
3825		addr = (u_int32_t)sc->tx_bd_chain_paddr[j];
3826		txbd->tx_bd_haddr_lo = addr;
3827		addr = (u_int32_t)((u_int64_t)sc->tx_bd_chain_paddr[j] >> 32);
3828		txbd->tx_bd_haddr_hi = addr;
3829	}
3830
3831	/*
3832	 * Initialize the context ID for an L2 TX chain.
3833	 */
3834	bnx_init_tx_context(sc);
3835
3836	DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
3837
3838	return(rc);
3839}
3840
3841/****************************************************************************/
3842/* Free memory and clear the TX data structures.                            */
3843/*                                                                          */
3844/* Returns:                                                                 */
3845/*   Nothing.                                                               */
3846/****************************************************************************/
3847void
3848bnx_free_tx_chain(struct bnx_softc *sc)
3849{
3850	int			i;
3851
3852	DBPRINT(sc, BNX_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
3853
3854	/* Unmap, unload, and free any mbufs still in the TX mbuf chain. */
3855	for (i = 0; i < TOTAL_TX_BD; i++) {
3856		if (sc->tx_mbuf_ptr[i] != NULL) {
3857			if (sc->tx_mbuf_map[i] != NULL) {
3858				bus_dmamap_sync(sc->bnx_dmatag,
3859				    sc->tx_mbuf_map[i],	0,
3860				    sc->tx_mbuf_map[i]->dm_mapsize,
3861				    BUS_DMASYNC_POSTWRITE);
3862				bus_dmamap_unload(sc->bnx_dmatag,
3863				    sc->tx_mbuf_map[i]);
3864			}
3865			m_freem(sc->tx_mbuf_ptr[i]);
3866			sc->tx_mbuf_ptr[i] = NULL;
3867			DBRUNIF(1, sc->tx_mbuf_alloc--);
3868		}
3869	}
3870
3871	/* Clear each TX chain page. */
3872	for (i = 0; i < TX_PAGES; i++)
3873		bzero(sc->tx_bd_chain[i], BNX_TX_CHAIN_PAGE_SZ);
3874
3875	sc->used_tx_bd = 0;
3876
3877	/* Check if we lost any mbufs in the process. */
3878	DBRUNIF((sc->tx_mbuf_alloc),
3879	    printf("%s: Memory leak! Lost %d mbufs from tx chain!\n",
3880	    sc->tx_mbuf_alloc));
3881
3882	DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
3883}
3884
3885/****************************************************************************/
3886/* Initialize the RX context memory.                                        */
3887/*                                                                          */
3888/* Returns:                                                                 */
3889/*   Nothing                                                                */
3890/****************************************************************************/
3891void
3892bnx_init_rx_context(struct bnx_softc *sc)
3893{
3894	u_int32_t val;
3895
3896	/* Initialize the context ID for an L2 RX chain. */
3897	val = BNX_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE |
3898		BNX_L2CTX_CTX_TYPE_SIZE_L2 | (0x02 << 8);
3899
3900	/*
3901	 * Set the level for generating pause frames
3902	 * when the number of available rx_bd's gets
3903	 * too low (the low watermark) and the level
3904	 * when pause frames can be stopped (the high
3905	 * watermark).
3906	 */
3907	if (BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5709) {
3908		u_int32_t lo_water, hi_water;
3909
3910		lo_water = BNX_L2CTX_RX_LO_WATER_MARK_DEFAULT;
3911		hi_water = USABLE_RX_BD / 4;
3912
3913		lo_water /= BNX_L2CTX_RX_LO_WATER_MARK_SCALE;
3914		hi_water /= BNX_L2CTX_RX_HI_WATER_MARK_SCALE;
3915
3916		if (hi_water > 0xf)
3917			hi_water = 0xf;
3918		else if (hi_water == 0)
3919			lo_water = 0;
3920
3921		val |= (lo_water << BNX_L2CTX_RX_LO_WATER_MARK_SHIFT) |
3922		    (hi_water << BNX_L2CTX_RX_HI_WATER_MARK_SHIFT);
3923	}
3924
3925 	CTX_WR(sc, GET_CID_ADDR(RX_CID), BNX_L2CTX_CTX_TYPE, val);
3926
3927	/* Setup the MQ BIN mapping for l2_ctx_host_bseq. */
3928	if (BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5709) {
3929		val = REG_RD(sc, BNX_MQ_MAP_L2_5);
3930		REG_WR(sc, BNX_MQ_MAP_L2_5, val | BNX_MQ_MAP_L2_5_ARM);
3931	}
3932
3933	/* Point the hardware to the first page in the chain. */
3934	val = (u_int32_t)((u_int64_t)sc->rx_bd_chain_paddr[0] >> 32);
3935	CTX_WR(sc, GET_CID_ADDR(RX_CID), BNX_L2CTX_NX_BDHADDR_HI, val);
3936	val = (u_int32_t)(sc->rx_bd_chain_paddr[0]);
3937	CTX_WR(sc, GET_CID_ADDR(RX_CID), BNX_L2CTX_NX_BDHADDR_LO, val);
3938}
3939
3940/****************************************************************************/
3941/* Add mbufs to the RX chain until its full or an mbuf allocation error     */
3942/* occurs.                                                                  */
3943/*                                                                          */
3944/* Returns:                                                                 */
3945/*   Nothing                                                                */
3946/****************************************************************************/
3947int
3948bnx_fill_rx_chain(struct bnx_softc *sc)
3949{
3950	u_int16_t		prod, chain_prod;
3951	u_int32_t		prod_bseq;
3952	u_int			slots, used;
3953	int			ndesc = 0;
3954#ifdef BNX_DEBUG
3955	int rx_mbuf_alloc_before;
3956#endif
3957
3958	DBPRINT(sc, BNX_EXCESSIVE_RECV, "Entering %s()\n", __FUNCTION__);
3959
3960	prod = sc->rx_prod;
3961	prod_bseq = sc->rx_prod_bseq;
3962
3963#ifdef BNX_DEBUG
3964	rx_mbuf_alloc_before = sc->rx_mbuf_alloc;
3965#endif
3966
3967	/* Keep filling the RX chain until it's full. */
3968	slots = if_rxr_get(&sc->rx_ring, sc->max_rx_bd);
3969	while (slots > 0) {
3970		chain_prod = RX_CHAIN_IDX(prod);
3971
3972		used = bnx_get_buf(sc, &prod, &chain_prod, &prod_bseq);
3973		if (used == 0) {
3974			/* Bail out if we can't add an mbuf to the chain. */
3975			break;
3976		}
3977		slots -= used;
3978
3979		prod = NEXT_RX_BD(prod);
3980		ndesc++;
3981	}
3982	if_rxr_put(&sc->rx_ring, slots);
3983
3984	/* Save the RX chain producer index. */
3985	sc->rx_prod = prod;
3986	sc->rx_prod_bseq = prod_bseq;
3987
3988	/* Tell the chip about the waiting rx_bd's. */
3989	REG_WR16(sc, MB_RX_CID_ADDR + BNX_L2CTX_HOST_BDIDX, sc->rx_prod);
3990	REG_WR(sc, MB_RX_CID_ADDR + BNX_L2CTX_HOST_BSEQ, sc->rx_prod_bseq);
3991
3992	DBPRINT(sc, BNX_EXCESSIVE_RECV, "Exiting %s()\n", __FUNCTION__);
3993
3994	return (ndesc);
3995}
3996
3997/****************************************************************************/
3998/* Allocate memory and initialize the RX data structures.                   */
3999/*                                                                          */
4000/* Returns:                                                                 */
4001/*   0 for success, positive value for failure.                             */
4002/****************************************************************************/
4003int
4004bnx_init_rx_chain(struct bnx_softc *sc)
4005{
4006	struct rx_bd		*rxbd;
4007	int			i, rc = 0;
4008	u_int32_t		addr;
4009
4010	DBPRINT(sc, BNX_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
4011
4012	/* Initialize the RX producer and consumer indices. */
4013	sc->rx_prod = 0;
4014	sc->rx_cons = 0;
4015	sc->rx_prod_bseq = 0;
4016	sc->max_rx_bd = USABLE_RX_BD;
4017	DBRUNIF(1, sc->rx_low_watermark = USABLE_RX_BD);
4018	DBRUNIF(1, sc->rx_empty_count = 0);
4019
4020	/* Initialize the RX next pointer chain entries. */
4021	for (i = 0; i < RX_PAGES; i++) {
4022		int j;
4023
4024		rxbd = &sc->rx_bd_chain[i][USABLE_RX_BD_PER_PAGE];
4025
4026		/* Check if we've reached the last page. */
4027		if (i == (RX_PAGES - 1))
4028			j = 0;
4029		else
4030			j = i + 1;
4031
4032		/* Setup the chain page pointers. */
4033		addr = (u_int32_t)((u_int64_t)sc->rx_bd_chain_paddr[j] >> 32);
4034		rxbd->rx_bd_haddr_hi = addr;
4035		addr = (u_int32_t)sc->rx_bd_chain_paddr[j];
4036		rxbd->rx_bd_haddr_lo = addr;
4037	}
4038
4039	if_rxr_init(&sc->rx_ring, 16, sc->max_rx_bd);
4040
4041	/* Fill up the RX chain. */
4042	bnx_fill_rx_chain(sc);
4043
4044	for (i = 0; i < RX_PAGES; i++)
4045		bus_dmamap_sync(sc->bnx_dmatag, sc->rx_bd_chain_map[i], 0,
4046		    sc->rx_bd_chain_map[i]->dm_mapsize,
4047		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
4048
4049	bnx_init_rx_context(sc);
4050
4051	DBRUN(BNX_VERBOSE_RECV, bnx_dump_rx_chain(sc, 0, TOTAL_RX_BD));
4052
4053	DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
4054
4055	return(rc);
4056}
4057
4058/****************************************************************************/
4059/* Free memory and clear the RX data structures.                            */
4060/*                                                                          */
4061/* Returns:                                                                 */
4062/*   Nothing.                                                               */
4063/****************************************************************************/
4064void
4065bnx_free_rx_chain(struct bnx_softc *sc)
4066{
4067	int			i;
4068#ifdef BNX_DEBUG
4069	int			rx_mbuf_alloc_before;
4070#endif
4071
4072	DBPRINT(sc, BNX_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
4073
4074#ifdef BNX_DEBUG
4075	rx_mbuf_alloc_before = sc->rx_mbuf_alloc;
4076#endif
4077
4078	/* Free any mbufs still in the RX mbuf chain. */
4079	for (i = 0; i < TOTAL_RX_BD; i++) {
4080		if (sc->rx_mbuf_ptr[i] != NULL) {
4081			if (sc->rx_mbuf_map[i] != NULL) {
4082				bus_dmamap_sync(sc->bnx_dmatag,
4083				    sc->rx_mbuf_map[i],	0,
4084				    sc->rx_mbuf_map[i]->dm_mapsize,
4085				    BUS_DMASYNC_POSTREAD);
4086				bus_dmamap_unload(sc->bnx_dmatag,
4087				    sc->rx_mbuf_map[i]);
4088			}
4089			m_freem(sc->rx_mbuf_ptr[i]);
4090			sc->rx_mbuf_ptr[i] = NULL;
4091			DBRUNIF(1, sc->rx_mbuf_alloc--);
4092		}
4093	}
4094
4095	DBRUNIF((rx_mbuf_alloc_before - sc->rx_mbuf_alloc),
4096		BNX_PRINTF(sc, "%s(): Released %d mbufs.\n",
4097		__FUNCTION__, (rx_mbuf_alloc_before - sc->rx_mbuf_alloc)));
4098
4099	/* Clear each RX chain page. */
4100	for (i = 0; i < RX_PAGES; i++)
4101		bzero(sc->rx_bd_chain[i], BNX_RX_CHAIN_PAGE_SZ);
4102
4103	/* Check if we lost any mbufs in the process. */
4104	DBRUNIF((sc->rx_mbuf_alloc),
4105	    printf("%s: Memory leak! Lost %d mbufs from rx chain!\n",
4106	    sc->rx_mbuf_alloc));
4107
4108	DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
4109}
4110
4111void
4112bnx_rxrefill(void *xsc)
4113{
4114	struct bnx_softc	*sc = xsc;
4115	int			s;
4116
4117	s = splnet();
4118	if (!bnx_fill_rx_chain(sc))
4119		timeout_add(&sc->bnx_rxrefill, 1);
4120	splx(s);
4121}
4122
4123/****************************************************************************/
4124/* Set media options.                                                       */
4125/*                                                                          */
4126/* Returns:                                                                 */
4127/*   0 for success, positive value for failure.                             */
4128/****************************************************************************/
4129int
4130bnx_ifmedia_upd(struct ifnet *ifp)
4131{
4132	struct bnx_softc	*sc;
4133	struct mii_data		*mii;
4134	int			rc = 0;
4135
4136	sc = ifp->if_softc;
4137
4138	mii = &sc->bnx_mii;
4139	sc->bnx_link = 0;
4140	if (mii->mii_instance) {
4141		struct mii_softc *miisc;
4142		LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
4143			mii_phy_reset(miisc);
4144	}
4145	mii_mediachg(mii);
4146
4147	return(rc);
4148}
4149
4150/****************************************************************************/
4151/* Reports current media status.                                            */
4152/*                                                                          */
4153/* Returns:                                                                 */
4154/*   Nothing.                                                               */
4155/****************************************************************************/
4156void
4157bnx_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
4158{
4159	struct bnx_softc	*sc;
4160	struct mii_data		*mii;
4161	int			s;
4162
4163	sc = ifp->if_softc;
4164
4165	s = splnet();
4166
4167	mii = &sc->bnx_mii;
4168
4169	mii_pollstat(mii);
4170	ifmr->ifm_status = mii->mii_media_status;
4171	ifmr->ifm_active = (mii->mii_media_active & ~IFM_ETH_FMASK) |
4172	    sc->bnx_flowflags;
4173
4174	splx(s);
4175}
4176
4177/****************************************************************************/
4178/* Handles PHY generated interrupt events.                                  */
4179/*                                                                          */
4180/* Returns:                                                                 */
4181/*   Nothing.                                                               */
4182/****************************************************************************/
4183void
4184bnx_phy_intr(struct bnx_softc *sc)
4185{
4186	u_int32_t		new_link_state, old_link_state;
4187
4188	new_link_state = sc->status_block->status_attn_bits &
4189	    STATUS_ATTN_BITS_LINK_STATE;
4190	old_link_state = sc->status_block->status_attn_bits_ack &
4191	    STATUS_ATTN_BITS_LINK_STATE;
4192
4193	/* Handle any changes if the link state has changed. */
4194	if (new_link_state != old_link_state) {
4195		DBRUN(BNX_VERBOSE_INTR, bnx_dump_status_block(sc));
4196
4197		sc->bnx_link = 0;
4198		timeout_del(&sc->bnx_timeout);
4199		bnx_tick(sc);
4200
4201		/* Update the status_attn_bits_ack field in the status block. */
4202		if (new_link_state) {
4203			REG_WR(sc, BNX_PCICFG_STATUS_BIT_SET_CMD,
4204			    STATUS_ATTN_BITS_LINK_STATE);
4205			DBPRINT(sc, BNX_INFO, "Link is now UP.\n");
4206		} else {
4207			REG_WR(sc, BNX_PCICFG_STATUS_BIT_CLEAR_CMD,
4208			    STATUS_ATTN_BITS_LINK_STATE);
4209			DBPRINT(sc, BNX_INFO, "Link is now DOWN.\n");
4210		}
4211	}
4212
4213	/* Acknowledge the link change interrupt. */
4214	REG_WR(sc, BNX_EMAC_STATUS, BNX_EMAC_STATUS_LINK_CHANGE);
4215}
4216
4217/****************************************************************************/
4218/* Handles received frame interrupt events.                                 */
4219/*                                                                          */
4220/* Returns:                                                                 */
4221/*   Nothing.                                                               */
4222/****************************************************************************/
4223void
4224bnx_rx_intr(struct bnx_softc *sc)
4225{
4226	struct status_block	*sblk = sc->status_block;
4227	struct ifnet		*ifp = &sc->arpcom.ac_if;
4228	struct mbuf_list	ml = MBUF_LIST_INITIALIZER();
4229	u_int16_t		hw_cons, sw_cons, sw_chain_cons;
4230	u_int16_t		sw_prod, sw_chain_prod;
4231	u_int32_t		sw_prod_bseq;
4232	struct l2_fhdr		*l2fhdr;
4233	int			i;
4234
4235	DBRUNIF(1, sc->rx_interrupts++);
4236
4237	if (if_rxr_inuse(&sc->rx_ring) == 0)
4238		return;
4239
4240	/* Prepare the RX chain pages to be accessed by the host CPU. */
4241	for (i = 0; i < RX_PAGES; i++)
4242		bus_dmamap_sync(sc->bnx_dmatag,
4243		    sc->rx_bd_chain_map[i], 0,
4244		    sc->rx_bd_chain_map[i]->dm_mapsize,
4245		    BUS_DMASYNC_POSTWRITE);
4246
4247	/* Get the hardware's view of the RX consumer index. */
4248	hw_cons = sc->hw_rx_cons = sblk->status_rx_quick_consumer_index0;
4249	if ((hw_cons & USABLE_RX_BD_PER_PAGE) == USABLE_RX_BD_PER_PAGE)
4250		hw_cons++;
4251
4252	/* Get working copies of the driver's view of the RX indices. */
4253	sw_cons = sc->rx_cons;
4254	sw_prod = sc->rx_prod;
4255	sw_prod_bseq = sc->rx_prod_bseq;
4256
4257	DBPRINT(sc, BNX_INFO_RECV, "%s(enter): sw_prod = 0x%04X, "
4258	    "sw_cons = 0x%04X, sw_prod_bseq = 0x%08X\n",
4259	    __FUNCTION__, sw_prod, sw_cons, sw_prod_bseq);
4260
4261	/* Prevent speculative reads from getting ahead of the status block. */
4262	bus_space_barrier(sc->bnx_btag, sc->bnx_bhandle, 0, 0,
4263	    BUS_SPACE_BARRIER_READ);
4264
4265	/*
4266	 * Scan through the receive chain as long
4267	 * as there is work to do.
4268	 */
4269	while (sw_cons != hw_cons) {
4270		struct mbuf *m;
4271		struct rx_bd *rxbd;
4272		unsigned int len;
4273		u_int32_t status;
4274
4275		/* Clear the mbuf pointer. */
4276		m = NULL;
4277
4278		/* Convert the producer/consumer indices to an actual
4279		 * rx_bd index.
4280		 */
4281		sw_chain_cons = RX_CHAIN_IDX(sw_cons);
4282		sw_chain_prod = RX_CHAIN_IDX(sw_prod);
4283
4284		/* Get the used rx_bd. */
4285		rxbd = &sc->rx_bd_chain[RX_PAGE(sw_chain_cons)][RX_IDX(sw_chain_cons)];
4286		if_rxr_put(&sc->rx_ring, 1);
4287
4288		DBRUN(BNX_VERBOSE_RECV, printf("%s(): ", __FUNCTION__);
4289		bnx_dump_rxbd(sc, sw_chain_cons, rxbd));
4290
4291		/* The mbuf is stored with the last rx_bd entry of a packet. */
4292		if (sc->rx_mbuf_ptr[sw_chain_cons] != NULL) {
4293			/* Validate that this is the last rx_bd. */
4294			DBRUNIF((!(rxbd->rx_bd_flags & RX_BD_FLAGS_END)),
4295			    printf("%s: Unexpected mbuf found in "
4296			        "rx_bd[0x%04X]!\n", sw_chain_cons);
4297				bnx_breakpoint(sc));
4298
4299			/* DRC - ToDo: If the received packet is small, say less
4300			 *             than 128 bytes, allocate a new mbuf here,
4301			 *             copy the data to that mbuf, and recycle
4302			 *             the mapped jumbo frame.
4303			 */
4304
4305			/* Unmap the mbuf from DMA space. */
4306			bus_dmamap_sync(sc->bnx_dmatag,
4307			    sc->rx_mbuf_map[sw_chain_cons], 0,
4308			    sc->rx_mbuf_map[sw_chain_cons]->dm_mapsize,
4309			    BUS_DMASYNC_POSTREAD);
4310			bus_dmamap_unload(sc->bnx_dmatag,
4311			    sc->rx_mbuf_map[sw_chain_cons]);
4312
4313			/* Remove the mbuf from RX chain. */
4314			m = sc->rx_mbuf_ptr[sw_chain_cons];
4315			sc->rx_mbuf_ptr[sw_chain_cons] = NULL;
4316
4317			/*
4318			 * Frames received on the NetXteme II are prepended
4319			 * with the l2_fhdr structure which provides status
4320			 * information about the received frame (including
4321			 * VLAN tags and checksum info) and are also
4322			 * automatically adjusted to align the IP header
4323			 * (i.e. two null bytes are inserted before the
4324			 * Ethernet header).
4325			 */
4326			l2fhdr = mtod(m, struct l2_fhdr *);
4327
4328			len    = l2fhdr->l2_fhdr_pkt_len;
4329			status = l2fhdr->l2_fhdr_status;
4330
4331			DBRUNIF(DB_RANDOMTRUE(bnx_debug_l2fhdr_status_check),
4332			    printf("Simulating l2_fhdr status error.\n");
4333			    status = status | L2_FHDR_ERRORS_PHY_DECODE);
4334
4335			/* Watch for unusual sized frames. */
4336			DBRUNIF(((len < BNX_MIN_MTU) ||
4337			    (len > BNX_MAX_JUMBO_ETHER_MTU_VLAN)),
4338			    printf("%s: Unusual frame size found. "
4339			    "Min(%d), Actual(%d), Max(%d)\n", (int)BNX_MIN_MTU,
4340			    len, (int) BNX_MAX_JUMBO_ETHER_MTU_VLAN);
4341
4342			bnx_dump_mbuf(sc, m);
4343			bnx_breakpoint(sc));
4344
4345			len -= ETHER_CRC_LEN;
4346
4347			/* Check the received frame for errors. */
4348			if (status &  (L2_FHDR_ERRORS_BAD_CRC |
4349			    L2_FHDR_ERRORS_PHY_DECODE |
4350			    L2_FHDR_ERRORS_ALIGNMENT |
4351			    L2_FHDR_ERRORS_TOO_SHORT |
4352			    L2_FHDR_ERRORS_GIANT_FRAME)) {
4353				/* Log the error and release the mbuf. */
4354				ifp->if_ierrors++;
4355				DBRUNIF(1, sc->l2fhdr_status_errors++);
4356
4357				m_freem(m);
4358				m = NULL;
4359				goto bnx_rx_int_next_rx;
4360			}
4361
4362			/* Skip over the l2_fhdr when passing the data up
4363			 * the stack.
4364			 */
4365			m_adj(m, sizeof(struct l2_fhdr) + ETHER_ALIGN);
4366
4367			/* Adjust the pckt length to match the received data. */
4368			m->m_pkthdr.len = m->m_len = len;
4369
4370			DBRUN(BNX_VERBOSE_RECV,
4371			    struct ether_header *eh;
4372			    eh = mtod(m, struct ether_header *);
4373			    printf("%s: to: %6D, from: %6D, type: 0x%04X\n",
4374			    __FUNCTION__, eh->ether_dhost, ":",
4375			    eh->ether_shost, ":", htons(eh->ether_type)));
4376
4377			/* Validate the checksum. */
4378
4379			/* Check for an IP datagram. */
4380			if (status & L2_FHDR_STATUS_IP_DATAGRAM) {
4381				/* Check if the IP checksum is valid. */
4382				if ((l2fhdr->l2_fhdr_ip_xsum ^ 0xffff)
4383				    == 0)
4384					m->m_pkthdr.csum_flags |=
4385					    M_IPV4_CSUM_IN_OK;
4386				else
4387					DBPRINT(sc, BNX_WARN_SEND,
4388					    "%s(): Invalid IP checksum "
4389					        "= 0x%04X!\n",
4390						__FUNCTION__,
4391						l2fhdr->l2_fhdr_ip_xsum
4392						);
4393			}
4394
4395			/* Check for a valid TCP/UDP frame. */
4396			if (status & (L2_FHDR_STATUS_TCP_SEGMENT |
4397			    L2_FHDR_STATUS_UDP_DATAGRAM)) {
4398				/* Check for a good TCP/UDP checksum. */
4399				if ((status &
4400				    (L2_FHDR_ERRORS_TCP_XSUM |
4401				    L2_FHDR_ERRORS_UDP_XSUM)) == 0) {
4402					m->m_pkthdr.csum_flags |=
4403					    M_TCP_CSUM_IN_OK |
4404					    M_UDP_CSUM_IN_OK;
4405				} else {
4406					DBPRINT(sc, BNX_WARN_SEND,
4407					    "%s(): Invalid TCP/UDP "
4408					    "checksum = 0x%04X!\n",
4409					    __FUNCTION__,
4410					    l2fhdr->l2_fhdr_tcp_udp_xsum);
4411				}
4412			}
4413
4414			/*
4415			 * If we received a packet with a vlan tag,
4416			 * attach that information to the packet.
4417			 */
4418			if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) &&
4419			    !(sc->rx_mode & BNX_EMAC_RX_MODE_KEEP_VLAN_TAG)) {
4420#if NVLAN > 0
4421				DBPRINT(sc, BNX_VERBOSE_SEND,
4422				    "%s(): VLAN tag = 0x%04X\n",
4423				    __FUNCTION__,
4424				    l2fhdr->l2_fhdr_vlan_tag);
4425
4426				m->m_pkthdr.ether_vtag =
4427				    l2fhdr->l2_fhdr_vlan_tag;
4428				m->m_flags |= M_VLANTAG;
4429#else
4430				m_freem(m);
4431				m = NULL;
4432				goto bnx_rx_int_next_rx;
4433#endif
4434			}
4435
4436bnx_rx_int_next_rx:
4437			sw_prod = NEXT_RX_BD(sw_prod);
4438		}
4439
4440		sw_cons = NEXT_RX_BD(sw_cons);
4441
4442		/* If we have a packet, pass it up the stack */
4443		if (m) {
4444			sc->rx_cons = sw_cons;
4445
4446			DBPRINT(sc, BNX_VERBOSE_RECV,
4447			    "%s(): Passing received frame up.\n", __FUNCTION__);
4448			ml_enqueue(&ml, m);
4449			DBRUNIF(1, sc->rx_mbuf_alloc--);
4450
4451			sw_cons = sc->rx_cons;
4452		}
4453
4454		/* Refresh hw_cons to see if there's new work */
4455		if (sw_cons == hw_cons) {
4456			hw_cons = sc->hw_rx_cons =
4457			    sblk->status_rx_quick_consumer_index0;
4458			if ((hw_cons & USABLE_RX_BD_PER_PAGE) ==
4459			    USABLE_RX_BD_PER_PAGE)
4460				hw_cons++;
4461		}
4462
4463		/* Prevent speculative reads from getting ahead of
4464		 * the status block.
4465		 */
4466		bus_space_barrier(sc->bnx_btag, sc->bnx_bhandle, 0, 0,
4467		    BUS_SPACE_BARRIER_READ);
4468	}
4469
4470	if (ifiq_input(&ifp->if_rcv, &ml))
4471		if_rxr_livelocked(&sc->rx_ring);
4472
4473	/* No new packets to process.  Refill the RX chain and exit. */
4474	sc->rx_cons = sw_cons;
4475	if (!bnx_fill_rx_chain(sc))
4476		timeout_add(&sc->bnx_rxrefill, 1);
4477
4478	for (i = 0; i < RX_PAGES; i++)
4479		bus_dmamap_sync(sc->bnx_dmatag,
4480		    sc->rx_bd_chain_map[i], 0,
4481		    sc->rx_bd_chain_map[i]->dm_mapsize,
4482		    BUS_DMASYNC_PREWRITE);
4483
4484	DBPRINT(sc, BNX_INFO_RECV, "%s(exit): rx_prod = 0x%04X, "
4485	    "rx_cons = 0x%04X, rx_prod_bseq = 0x%08X\n",
4486	    __FUNCTION__, sc->rx_prod, sc->rx_cons, sc->rx_prod_bseq);
4487}
4488
4489/****************************************************************************/
4490/* Handles transmit completion interrupt events.                            */
4491/*                                                                          */
4492/* Returns:                                                                 */
4493/*   Nothing.                                                               */
4494/****************************************************************************/
4495void
4496bnx_tx_intr(struct bnx_softc *sc)
4497{
4498	struct status_block	*sblk = sc->status_block;
4499	struct ifnet		*ifp = &sc->arpcom.ac_if;
4500	bus_dmamap_t		map;
4501	u_int16_t		hw_tx_cons, sw_tx_cons, sw_tx_chain_cons;
4502	int			freed, used;
4503
4504	DBRUNIF(1, sc->tx_interrupts++);
4505
4506	/* Get the hardware's view of the TX consumer index. */
4507	hw_tx_cons = sc->hw_tx_cons = sblk->status_tx_quick_consumer_index0;
4508
4509	/* Skip to the next entry if this is a chain page pointer. */
4510	if ((hw_tx_cons & USABLE_TX_BD_PER_PAGE) == USABLE_TX_BD_PER_PAGE)
4511		hw_tx_cons++;
4512
4513	sw_tx_cons = sc->tx_cons;
4514
4515	/* Prevent speculative reads from getting ahead of the status block. */
4516	bus_space_barrier(sc->bnx_btag, sc->bnx_bhandle, 0, 0,
4517	    BUS_SPACE_BARRIER_READ);
4518
4519	/* Cycle through any completed TX chain page entries. */
4520	freed = 0;
4521	while (sw_tx_cons != hw_tx_cons) {
4522#ifdef BNX_DEBUG
4523		struct tx_bd *txbd = NULL;
4524#endif
4525		sw_tx_chain_cons = TX_CHAIN_IDX(sw_tx_cons);
4526
4527		DBPRINT(sc, BNX_INFO_SEND, "%s(): hw_tx_cons = 0x%04X, "
4528		    "sw_tx_cons = 0x%04X, sw_tx_chain_cons = 0x%04X\n",
4529		    __FUNCTION__, hw_tx_cons, sw_tx_cons, sw_tx_chain_cons);
4530
4531		DBRUNIF((sw_tx_chain_cons > MAX_TX_BD),
4532		    printf("%s: TX chain consumer out of range! "
4533		    " 0x%04X > 0x%04X\n", sw_tx_chain_cons, (int)MAX_TX_BD);
4534		    bnx_breakpoint(sc));
4535
4536		DBRUNIF(1, txbd = &sc->tx_bd_chain
4537		    [TX_PAGE(sw_tx_chain_cons)][TX_IDX(sw_tx_chain_cons)]);
4538
4539		DBRUNIF((txbd == NULL),
4540		    printf("%s: Unexpected NULL tx_bd[0x%04X]!\n",
4541		    sw_tx_chain_cons);
4542		    bnx_breakpoint(sc));
4543
4544		DBRUN(BNX_INFO_SEND, printf("%s: ", __FUNCTION__);
4545		    bnx_dump_txbd(sc, sw_tx_chain_cons, txbd));
4546
4547		map = sc->tx_mbuf_map[sw_tx_chain_cons];
4548		if (sc->tx_mbuf_ptr[sw_tx_chain_cons] != NULL) {
4549			/* Validate that this is the last tx_bd. */
4550			DBRUNIF((!(txbd->tx_bd_flags & TX_BD_FLAGS_END)),
4551			    printf("%s: tx_bd END flag not set but "
4552			    "txmbuf == NULL!\n");
4553			    bnx_breakpoint(sc));
4554
4555			DBRUN(BNX_INFO_SEND,
4556			    printf("%s: Unloading map/freeing mbuf "
4557			    "from tx_bd[0x%04X]\n",
4558			    __FUNCTION__, sw_tx_chain_cons));
4559
4560			/* Unmap the mbuf. */
4561			bus_dmamap_sync(sc->bnx_dmatag, map, 0,
4562			    map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
4563			bus_dmamap_unload(sc->bnx_dmatag, map);
4564
4565			/* Free the mbuf. */
4566			m_freem(sc->tx_mbuf_ptr[sw_tx_chain_cons]);
4567			sc->tx_mbuf_ptr[sw_tx_chain_cons] = NULL;
4568		}
4569
4570		freed++;
4571		sw_tx_cons = NEXT_TX_BD(sw_tx_cons);
4572	}
4573
4574	used = atomic_sub_int_nv(&sc->used_tx_bd, freed);
4575
4576	sc->tx_cons = sw_tx_cons;
4577
4578	/* Clear the TX timeout timer. */
4579	if (used == 0)
4580		ifp->if_timer = 0;
4581
4582	if (ifq_is_oactive(&ifp->if_snd))
4583		ifq_restart(&ifp->if_snd);
4584}
4585
4586/****************************************************************************/
4587/* Disables interrupt generation.                                           */
4588/*                                                                          */
4589/* Returns:                                                                 */
4590/*   Nothing.                                                               */
4591/****************************************************************************/
4592void
4593bnx_disable_intr(struct bnx_softc *sc)
4594{
4595	REG_WR(sc, BNX_PCICFG_INT_ACK_CMD, BNX_PCICFG_INT_ACK_CMD_MASK_INT);
4596	REG_RD(sc, BNX_PCICFG_INT_ACK_CMD);
4597}
4598
4599/****************************************************************************/
4600/* Enables interrupt generation.                                            */
4601/*                                                                          */
4602/* Returns:                                                                 */
4603/*   Nothing.                                                               */
4604/****************************************************************************/
4605void
4606bnx_enable_intr(struct bnx_softc *sc)
4607{
4608	u_int32_t		val;
4609
4610	REG_WR(sc, BNX_PCICFG_INT_ACK_CMD, BNX_PCICFG_INT_ACK_CMD_INDEX_VALID |
4611	    BNX_PCICFG_INT_ACK_CMD_MASK_INT | sc->last_status_idx);
4612
4613	REG_WR(sc, BNX_PCICFG_INT_ACK_CMD, BNX_PCICFG_INT_ACK_CMD_INDEX_VALID |
4614	    sc->last_status_idx);
4615
4616	val = REG_RD(sc, BNX_HC_COMMAND);
4617	REG_WR(sc, BNX_HC_COMMAND, val | BNX_HC_COMMAND_COAL_NOW);
4618}
4619
4620/****************************************************************************/
4621/* Handles controller initialization.                                       */
4622/*                                                                          */
4623/* Returns:                                                                 */
4624/*   Nothing.                                                               */
4625/****************************************************************************/
4626void
4627bnx_init(void *xsc)
4628{
4629	struct bnx_softc	*sc = (struct bnx_softc *)xsc;
4630	struct ifnet		*ifp = &sc->arpcom.ac_if;
4631	u_int32_t		ether_mtu;
4632	int			s;
4633
4634	DBPRINT(sc, BNX_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
4635
4636	s = splnet();
4637
4638	bnx_stop(sc);
4639
4640	if (bnx_reset(sc, BNX_DRV_MSG_CODE_RESET)) {
4641		BNX_PRINTF(sc, "Controller reset failed!\n");
4642		goto bnx_init_exit;
4643	}
4644
4645	if (bnx_chipinit(sc)) {
4646		BNX_PRINTF(sc, "Controller initialization failed!\n");
4647		goto bnx_init_exit;
4648	}
4649
4650	if (bnx_blockinit(sc)) {
4651		BNX_PRINTF(sc, "Block initialization failed!\n");
4652		goto bnx_init_exit;
4653	}
4654
4655	/* Load our MAC address. */
4656	bcopy(sc->arpcom.ac_enaddr, sc->eaddr, ETHER_ADDR_LEN);
4657	bnx_set_mac_addr(sc);
4658
4659	/* Calculate and program the Ethernet MRU size. */
4660	ether_mtu = BNX_MAX_JUMBO_ETHER_MTU_VLAN;
4661
4662	DBPRINT(sc, BNX_INFO, "%s(): setting MRU = %d\n",
4663	    __FUNCTION__, ether_mtu);
4664
4665	/*
4666	 * Program the MRU and enable Jumbo frame
4667	 * support.
4668	 */
4669	REG_WR(sc, BNX_EMAC_RX_MTU_SIZE, ether_mtu |
4670		BNX_EMAC_RX_MTU_SIZE_JUMBO_ENA);
4671
4672	/* Calculate the RX Ethernet frame size for rx_bd's. */
4673	sc->max_frame_size = sizeof(struct l2_fhdr) + 2 + ether_mtu + 8;
4674
4675	DBPRINT(sc, BNX_INFO, "%s(): mclbytes = %d, mbuf_alloc_size = %d, "
4676	    "max_frame_size = %d\n", __FUNCTION__, (int)MCLBYTES,
4677	    sc->mbuf_alloc_size, sc->max_frame_size);
4678
4679	/* Program appropriate promiscuous/multicast filtering. */
4680	bnx_iff(sc);
4681
4682	/* Init RX buffer descriptor chain. */
4683	bnx_init_rx_chain(sc);
4684
4685	/* Init TX buffer descriptor chain. */
4686	bnx_init_tx_chain(sc);
4687
4688	/* Enable host interrupts. */
4689	bnx_enable_intr(sc);
4690
4691	bnx_ifmedia_upd(ifp);
4692
4693	ifp->if_flags |= IFF_RUNNING;
4694	ifq_clr_oactive(&ifp->if_snd);
4695
4696	timeout_add_sec(&sc->bnx_timeout, 1);
4697
4698bnx_init_exit:
4699	DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
4700
4701	splx(s);
4702
4703	return;
4704}
4705
4706void
4707bnx_mgmt_init(struct bnx_softc *sc)
4708{
4709	struct ifnet	*ifp = &sc->arpcom.ac_if;
4710	u_int32_t	val;
4711
4712	/* Check if the driver is still running and bail out if it is. */
4713	if (ifp->if_flags & IFF_RUNNING)
4714		goto bnx_mgmt_init_exit;
4715
4716	/* Initialize the on-boards CPUs */
4717	bnx_init_cpus(sc);
4718
4719	val = (BCM_PAGE_BITS - 8) << 24;
4720	REG_WR(sc, BNX_RV2P_CONFIG, val);
4721
4722	/* Enable all critical blocks in the MAC. */
4723	REG_WR(sc, BNX_MISC_ENABLE_SET_BITS,
4724	    BNX_MISC_ENABLE_SET_BITS_RX_V2P_ENABLE |
4725	    BNX_MISC_ENABLE_SET_BITS_RX_DMA_ENABLE |
4726	    BNX_MISC_ENABLE_SET_BITS_COMPLETION_ENABLE);
4727	REG_RD(sc, BNX_MISC_ENABLE_SET_BITS);
4728	DELAY(20);
4729
4730	bnx_ifmedia_upd(ifp);
4731
4732bnx_mgmt_init_exit:
4733 	DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
4734}
4735
4736/*****************************************************************************/
4737/* Encapsulates an mbuf cluster into the tx_bd chain structure and makes the */
4738/* memory visible to the controller.                                         */
4739/*                                                                           */
4740/* Returns:                                                                  */
4741/*   0 for success, positive value for failure.                              */
4742/*****************************************************************************/
4743int
4744bnx_tx_encap(struct bnx_softc *sc, struct mbuf *m, int *used)
4745{
4746	bus_dmamap_t		map;
4747	struct tx_bd 		*txbd = NULL;
4748	u_int16_t		vlan_tag = 0, flags = 0;
4749	u_int16_t		chain_prod, chain_head, prod;
4750#ifdef BNX_DEBUG
4751	u_int16_t		debug_prod;
4752#endif
4753	u_int32_t		addr, prod_bseq;
4754	int			i, error;
4755
4756	/* Transfer any checksum offload flags to the bd. */
4757	if (m->m_pkthdr.csum_flags) {
4758		if (m->m_pkthdr.csum_flags & M_IPV4_CSUM_OUT)
4759			flags |= TX_BD_FLAGS_IP_CKSUM;
4760		if (m->m_pkthdr.csum_flags &
4761		    (M_TCP_CSUM_OUT | M_UDP_CSUM_OUT))
4762			flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
4763	}
4764
4765#if NVLAN > 0
4766	/* Transfer any VLAN tags to the bd. */
4767	if (m->m_flags & M_VLANTAG) {
4768		flags |= TX_BD_FLAGS_VLAN_TAG;
4769		vlan_tag = m->m_pkthdr.ether_vtag;
4770	}
4771#endif
4772
4773	/* Map the mbuf into DMAable memory. */
4774	prod = sc->tx_prod;
4775	chain_head = chain_prod = TX_CHAIN_IDX(prod);
4776	map = sc->tx_mbuf_map[chain_head];
4777
4778	/* Map the mbuf into our DMA address space. */
4779	error = bus_dmamap_load_mbuf(sc->bnx_dmatag, map, m,
4780	    BUS_DMA_NOWAIT);
4781	switch (error) {
4782	case 0:
4783		break;
4784
4785	case EFBIG:
4786		if ((error = m_defrag(m, M_DONTWAIT)) == 0 &&
4787		    (error = bus_dmamap_load_mbuf(sc->bnx_dmatag, map, m,
4788		     BUS_DMA_NOWAIT)) == 0)
4789			break;
4790
4791		/* FALLTHROUGH */
4792	default:
4793		sc->tx_dma_map_failures++;
4794		return (ENOBUFS);
4795	}
4796
4797	/* prod points to an empty tx_bd at this point. */
4798	prod_bseq = sc->tx_prod_bseq;
4799#ifdef BNX_DEBUG
4800	debug_prod = chain_prod;
4801#endif
4802
4803	DBPRINT(sc, BNX_INFO_SEND,
4804		"%s(): Start: prod = 0x%04X, chain_prod = %04X, "
4805		"prod_bseq = 0x%08X\n",
4806		__FUNCTION__, prod, chain_prod, prod_bseq);
4807
4808	/*
4809	 * Cycle through each mbuf segment that makes up
4810	 * the outgoing frame, gathering the mapping info
4811	 * for that segment and creating a tx_bd for the
4812	 * mbuf.
4813	 */
4814	for (i = 0; i < map->dm_nsegs ; i++) {
4815		chain_prod = TX_CHAIN_IDX(prod);
4816		txbd = &sc->tx_bd_chain[TX_PAGE(chain_prod)][TX_IDX(chain_prod)];
4817
4818		addr = (u_int32_t)map->dm_segs[i].ds_addr;
4819		txbd->tx_bd_haddr_lo = addr;
4820		addr = (u_int32_t)((u_int64_t)map->dm_segs[i].ds_addr >> 32);
4821		txbd->tx_bd_haddr_hi = addr;
4822		txbd->tx_bd_mss_nbytes = map->dm_segs[i].ds_len;
4823		txbd->tx_bd_vlan_tag = vlan_tag;
4824		txbd->tx_bd_flags = flags;
4825		prod_bseq += map->dm_segs[i].ds_len;
4826		if (i == 0)
4827			txbd->tx_bd_flags |= TX_BD_FLAGS_START;
4828		prod = NEXT_TX_BD(prod);
4829 	}
4830
4831	/* Set the END flag on the last TX buffer descriptor. */
4832	txbd->tx_bd_flags |= TX_BD_FLAGS_END;
4833
4834	DBRUN(BNX_INFO_SEND, bnx_dump_tx_chain(sc, debug_prod,
4835	    map->dm_nsegs));
4836
4837	DBPRINT(sc, BNX_INFO_SEND,
4838		"%s(): End: prod = 0x%04X, chain_prod = %04X, "
4839		"prod_bseq = 0x%08X\n",
4840		__FUNCTION__, prod, chain_prod, prod_bseq);
4841
4842	sc->tx_mbuf_ptr[chain_prod] = m;
4843	sc->tx_mbuf_map[chain_head] = sc->tx_mbuf_map[chain_prod];
4844	sc->tx_mbuf_map[chain_prod] = map;
4845
4846	*used += map->dm_nsegs;
4847
4848	DBRUNIF(1, sc->tx_mbuf_alloc++);
4849
4850	DBRUN(BNX_VERBOSE_SEND, bnx_dump_tx_mbuf_chain(sc, chain_prod,
4851	    map->dm_nsegs));
4852
4853	bus_dmamap_sync(sc->bnx_dmatag, map, 0, map->dm_mapsize,
4854	    BUS_DMASYNC_PREWRITE);
4855
4856	/* prod points to the next free tx_bd at this point. */
4857	sc->tx_prod = prod;
4858	sc->tx_prod_bseq = prod_bseq;
4859
4860	return (0);
4861}
4862
4863/****************************************************************************/
4864/* Main transmit routine.                                                   */
4865/*                                                                          */
4866/* Returns:                                                                 */
4867/*   Nothing.                                                               */
4868/****************************************************************************/
4869void
4870bnx_start(struct ifqueue *ifq)
4871{
4872	struct ifnet		*ifp = ifq->ifq_if;
4873	struct bnx_softc	*sc = ifp->if_softc;
4874	struct mbuf		*m_head = NULL;
4875	int			used;
4876	u_int16_t		tx_prod, tx_chain_prod;
4877
4878	if (!sc->bnx_link) {
4879		ifq_purge(ifq);
4880		goto bnx_start_exit;
4881	}
4882
4883	/* prod points to the next free tx_bd. */
4884	tx_prod = sc->tx_prod;
4885	tx_chain_prod = TX_CHAIN_IDX(tx_prod);
4886
4887	DBPRINT(sc, BNX_INFO_SEND, "%s(): Start: tx_prod = 0x%04X, "
4888	    "tx_chain_prod = %04X, tx_prod_bseq = 0x%08X\n",
4889	    __FUNCTION__, tx_prod, tx_chain_prod, sc->tx_prod_bseq);
4890
4891	/*
4892	 * Keep adding entries while there is space in the ring.
4893	 */
4894	used = 0;
4895	while (1) {
4896		if (sc->used_tx_bd + used + BNX_MAX_SEGMENTS + 1 >=
4897		    sc->max_tx_bd) {
4898			DBPRINT(sc, BNX_INFO_SEND, "TX chain is closed for "
4899			    "business! Total tx_bd used = %d\n",
4900			    sc->used_tx_bd + used);
4901			ifq_set_oactive(ifq);
4902			break;
4903		}
4904
4905		m_head = ifq_dequeue(ifq);
4906		if (m_head == NULL)
4907			break;
4908
4909		if (bnx_tx_encap(sc, m_head, &used)) {
4910			m_freem(m_head);
4911			continue;
4912		}
4913
4914#if NBPFILTER > 0
4915		if (ifp->if_bpf)
4916			bpf_mtap_ether(ifp->if_bpf, m_head, BPF_DIRECTION_OUT);
4917#endif
4918	}
4919
4920	if (used == 0) {
4921		/* no packets were dequeued */
4922		DBPRINT(sc, BNX_VERBOSE_SEND,
4923		    "%s(): No packets were dequeued\n", __FUNCTION__);
4924		goto bnx_start_exit;
4925	}
4926
4927	/* Update the driver's counters. */
4928	used = atomic_add_int_nv(&sc->used_tx_bd, used);
4929
4930	/* Update some debug statistics counters */
4931	DBRUNIF((used > sc->tx_hi_watermark),
4932	    sc->tx_hi_watermark = used);
4933	DBRUNIF(used == sc->max_tx_bd, sc->tx_full_count++);
4934
4935	tx_chain_prod = TX_CHAIN_IDX(sc->tx_prod);
4936	DBPRINT(sc, BNX_INFO_SEND, "%s(): End: tx_prod = 0x%04X, tx_chain_prod "
4937	    "= 0x%04X, tx_prod_bseq = 0x%08X\n", __FUNCTION__, tx_prod,
4938	    tx_chain_prod, sc->tx_prod_bseq);
4939
4940	/* Start the transmit. */
4941	REG_WR16(sc, MB_TX_CID_ADDR + BNX_L2CTX_TX_HOST_BIDX, sc->tx_prod);
4942	REG_WR(sc, MB_TX_CID_ADDR + BNX_L2CTX_TX_HOST_BSEQ, sc->tx_prod_bseq);
4943
4944	/* Set the tx timeout. */
4945	ifp->if_timer = BNX_TX_TIMEOUT;
4946
4947bnx_start_exit:
4948	return;
4949}
4950
4951/****************************************************************************/
4952/* Handles any IOCTL calls from the operating system.                       */
4953/*                                                                          */
4954/* Returns:                                                                 */
4955/*   0 for success, positive value for failure.                             */
4956/****************************************************************************/
4957int
4958bnx_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
4959{
4960	struct bnx_softc	*sc = ifp->if_softc;
4961	struct ifreq		*ifr = (struct ifreq *) data;
4962	struct mii_data		*mii = &sc->bnx_mii;
4963	int			s, error = 0;
4964
4965	s = splnet();
4966
4967	switch (command) {
4968	case SIOCSIFADDR:
4969		ifp->if_flags |= IFF_UP;
4970		if (!(ifp->if_flags & IFF_RUNNING))
4971			bnx_init(sc);
4972		break;
4973
4974	case SIOCSIFFLAGS:
4975		if (ifp->if_flags & IFF_UP) {
4976			if (ifp->if_flags & IFF_RUNNING)
4977				error = ENETRESET;
4978			else
4979				bnx_init(sc);
4980		} else {
4981			if (ifp->if_flags & IFF_RUNNING)
4982				bnx_stop(sc);
4983		}
4984		break;
4985
4986	case SIOCSIFMEDIA:
4987		/* Flow control requires full-duplex mode. */
4988		if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
4989		    (ifr->ifr_media & IFM_FDX) == 0)
4990			ifr->ifr_media &= ~IFM_ETH_FMASK;
4991
4992		if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
4993			if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
4994				/* We can do both TXPAUSE and RXPAUSE. */
4995				ifr->ifr_media |=
4996				    IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
4997			}
4998			sc->bnx_flowflags = ifr->ifr_media & IFM_ETH_FMASK;
4999		}
5000		/* FALLTHROUGH */
5001	case SIOCGIFMEDIA:
5002		DBPRINT(sc, BNX_VERBOSE, "bnx_phy_flags = 0x%08X\n",
5003		    sc->bnx_phy_flags);
5004
5005		error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command);
5006		break;
5007
5008	case SIOCGIFRXR:
5009		error = if_rxr_ioctl((struct if_rxrinfo *)ifr->ifr_data,
5010		    NULL, BNX_MAX_JUMBO_MRU, &sc->rx_ring);
5011		break;
5012
5013	default:
5014		error = ether_ioctl(ifp, &sc->arpcom, command, data);
5015	}
5016
5017	if (error == ENETRESET) {
5018		if (ifp->if_flags & IFF_RUNNING)
5019			bnx_iff(sc);
5020		error = 0;
5021	}
5022
5023	splx(s);
5024	return (error);
5025}
5026
5027/****************************************************************************/
5028/* Transmit timeout handler.                                                */
5029/*                                                                          */
5030/* Returns:                                                                 */
5031/*   Nothing.                                                               */
5032/****************************************************************************/
5033void
5034bnx_watchdog(struct ifnet *ifp)
5035{
5036	struct bnx_softc	*sc = ifp->if_softc;
5037
5038	DBRUN(BNX_WARN_SEND, bnx_dump_driver_state(sc);
5039	    bnx_dump_status_block(sc));
5040
5041	/*
5042	 * If we are in this routine because of pause frames, then
5043	 * don't reset the hardware.
5044	 */
5045	if (REG_RD(sc, BNX_EMAC_TX_STATUS) & BNX_EMAC_TX_STATUS_XOFFED)
5046		return;
5047
5048	printf("%s: Watchdog timeout occurred, resetting!\n",
5049	    ifp->if_xname);
5050
5051	/* DBRUN(BNX_FATAL, bnx_breakpoint(sc)); */
5052
5053	bnx_init(sc);
5054
5055	ifp->if_oerrors++;
5056}
5057
5058/*
5059 * Interrupt handler.
5060 */
5061/****************************************************************************/
5062/* Main interrupt entry point.  Verifies that the controller generated the  */
5063/* interrupt and then calls a separate routine for handle the various       */
5064/* interrupt causes (PHY, TX, RX).                                          */
5065/*                                                                          */
5066/* Returns:                                                                 */
5067/*   0 for success, positive value for failure.                             */
5068/****************************************************************************/
5069int
5070bnx_intr(void *xsc)
5071{
5072	struct bnx_softc	*sc = xsc;
5073	struct ifnet		*ifp = &sc->arpcom.ac_if;
5074	u_int32_t		status_attn_bits;
5075	u_int16_t		status_idx;
5076	int			rv = 0;
5077
5078	if ((sc->bnx_flags & BNX_ACTIVE_FLAG) == 0)
5079		return (0);
5080
5081	DBRUNIF(1, sc->interrupts_generated++);
5082
5083	bus_dmamap_sync(sc->bnx_dmatag, sc->status_map, 0,
5084	    sc->status_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
5085
5086	/*
5087	 * If the hardware status block index
5088	 * matches the last value read by the
5089	 * driver and we haven't asserted our
5090	 * interrupt then there's nothing to do.
5091	 */
5092	status_idx = sc->status_block->status_idx;
5093	if (status_idx != sc->last_status_idx ||
5094	    !ISSET(REG_RD(sc, BNX_PCICFG_MISC_STATUS),
5095	    BNX_PCICFG_MISC_STATUS_INTA_VALUE)) {
5096		rv = 1;
5097
5098		/* Ack the interrupt */
5099		REG_WR(sc, BNX_PCICFG_INT_ACK_CMD,
5100		    BNX_PCICFG_INT_ACK_CMD_INDEX_VALID | status_idx);
5101
5102		status_attn_bits = sc->status_block->status_attn_bits;
5103
5104		DBRUNIF(DB_RANDOMTRUE(bnx_debug_unexpected_attention),
5105		    printf("Simulating unexpected status attention bit set.");
5106		    status_attn_bits = status_attn_bits |
5107		    STATUS_ATTN_BITS_PARITY_ERROR);
5108
5109		/* Was it a link change interrupt? */
5110		if ((status_attn_bits & STATUS_ATTN_BITS_LINK_STATE) !=
5111		    (sc->status_block->status_attn_bits_ack &
5112		    STATUS_ATTN_BITS_LINK_STATE)) {
5113			KERNEL_LOCK();
5114			bnx_phy_intr(sc);
5115			KERNEL_UNLOCK();
5116		}
5117
5118		/* If any other attention is asserted then the chip is toast. */
5119		if (((status_attn_bits & ~STATUS_ATTN_BITS_LINK_STATE) !=
5120		    (sc->status_block->status_attn_bits_ack &
5121		    ~STATUS_ATTN_BITS_LINK_STATE))) {
5122			KERNEL_LOCK();
5123			DBRUN(1, sc->unexpected_attentions++);
5124
5125			BNX_PRINTF(sc, "Fatal attention detected: 0x%08X\n",
5126			    sc->status_block->status_attn_bits);
5127
5128			DBRUN(BNX_FATAL,
5129			    if (bnx_debug_unexpected_attention == 0)
5130				bnx_breakpoint(sc));
5131
5132			bnx_init(sc);
5133			KERNEL_UNLOCK();
5134			goto out;
5135		}
5136
5137		/* Check for any completed RX frames. */
5138		if (sc->status_block->status_rx_quick_consumer_index0 !=
5139		    sc->hw_rx_cons)
5140			bnx_rx_intr(sc);
5141
5142		/* Check for any completed TX frames. */
5143		if (sc->status_block->status_tx_quick_consumer_index0 !=
5144		    sc->hw_tx_cons)
5145			bnx_tx_intr(sc);
5146
5147		/*
5148		 * Save the status block index value for use during the
5149		 * next interrupt.
5150		 */
5151		sc->last_status_idx = status_idx;
5152
5153		/* Start moving packets again */
5154		if (ifp->if_flags & IFF_RUNNING &&
5155		    !ifq_empty(&ifp->if_snd))
5156			ifq_start(&ifp->if_snd);
5157	}
5158
5159out:
5160	bus_dmamap_sync(sc->bnx_dmatag, sc->status_map, 0,
5161	    sc->status_map->dm_mapsize, BUS_DMASYNC_PREREAD);
5162
5163	return (rv);
5164}
5165
5166/****************************************************************************/
5167/* Programs the various packet receive modes (broadcast and multicast).     */
5168/*                                                                          */
5169/* Returns:                                                                 */
5170/*   Nothing.                                                               */
5171/****************************************************************************/
5172void
5173bnx_iff(struct bnx_softc *sc)
5174{
5175	struct arpcom		*ac = &sc->arpcom;
5176	struct ifnet		*ifp = &ac->ac_if;
5177	struct ether_multi	*enm;
5178	struct ether_multistep	step;
5179	u_int32_t		hashes[NUM_MC_HASH_REGISTERS] = { 0, 0, 0, 0, 0, 0, 0, 0 };
5180	u_int32_t		rx_mode, sort_mode;
5181	int			h, i;
5182
5183	/* Initialize receive mode default settings. */
5184	rx_mode = sc->rx_mode & ~(BNX_EMAC_RX_MODE_PROMISCUOUS |
5185	    BNX_EMAC_RX_MODE_KEEP_VLAN_TAG);
5186	sort_mode = 1 | BNX_RPM_SORT_USER0_BC_EN;
5187	ifp->if_flags &= ~IFF_ALLMULTI;
5188
5189	/*
5190	 * ASF/IPMI/UMP firmware requires that VLAN tag stripping
5191	 * be enabled.
5192	 */
5193	if (!(ifp->if_capabilities & IFCAP_VLAN_HWTAGGING) &&
5194	    (!(sc->bnx_flags & BNX_MFW_ENABLE_FLAG)))
5195		rx_mode |= BNX_EMAC_RX_MODE_KEEP_VLAN_TAG;
5196
5197	/*
5198	 * Check for promiscuous, all multicast, or selected
5199	 * multicast address filtering.
5200	 */
5201	if (ifp->if_flags & IFF_PROMISC) {
5202		DBPRINT(sc, BNX_INFO, "Enabling promiscuous mode.\n");
5203
5204		ifp->if_flags |= IFF_ALLMULTI;
5205		/* Enable promiscuous mode. */
5206		rx_mode |= BNX_EMAC_RX_MODE_PROMISCUOUS;
5207		sort_mode |= BNX_RPM_SORT_USER0_PROM_EN;
5208	} else if (ac->ac_multirangecnt > 0) {
5209		DBPRINT(sc, BNX_INFO, "Enabling all multicast mode.\n");
5210
5211		ifp->if_flags |= IFF_ALLMULTI;
5212		/* Enable all multicast addresses. */
5213		for (i = 0; i < NUM_MC_HASH_REGISTERS; i++)
5214			REG_WR(sc, BNX_EMAC_MULTICAST_HASH0 + (i * 4),
5215			    0xffffffff);
5216		sort_mode |= BNX_RPM_SORT_USER0_MC_EN;
5217	} else {
5218		/* Accept one or more multicast(s). */
5219		DBPRINT(sc, BNX_INFO, "Enabling selective multicast mode.\n");
5220
5221		ETHER_FIRST_MULTI(step, ac, enm);
5222		while (enm != NULL) {
5223			h = ether_crc32_le(enm->enm_addrlo, ETHER_ADDR_LEN) &
5224			    0xFF;
5225
5226			hashes[(h & 0xE0) >> 5] |= 1 << (h & 0x1F);
5227
5228			ETHER_NEXT_MULTI(step, enm);
5229		}
5230
5231		for (i = 0; i < NUM_MC_HASH_REGISTERS; i++)
5232			REG_WR(sc, BNX_EMAC_MULTICAST_HASH0 + (i * 4),
5233			    hashes[i]);
5234
5235		sort_mode |= BNX_RPM_SORT_USER0_MC_HSH_EN;
5236	}
5237
5238	/* Only make changes if the receive mode has actually changed. */
5239	if (rx_mode != sc->rx_mode) {
5240		DBPRINT(sc, BNX_VERBOSE, "Enabling new receive mode: 0x%08X\n",
5241		    rx_mode);
5242
5243		sc->rx_mode = rx_mode;
5244		REG_WR(sc, BNX_EMAC_RX_MODE, rx_mode);
5245	}
5246
5247	/* Disable and clear the existing sort before enabling a new sort. */
5248	REG_WR(sc, BNX_RPM_SORT_USER0, 0x0);
5249	REG_WR(sc, BNX_RPM_SORT_USER0, sort_mode);
5250	REG_WR(sc, BNX_RPM_SORT_USER0, sort_mode | BNX_RPM_SORT_USER0_ENA);
5251}
5252
5253/****************************************************************************/
5254/* Called periodically to updates statistics from the controllers           */
5255/* statistics block.                                                        */
5256/*                                                                          */
5257/* Returns:                                                                 */
5258/*   Nothing.                                                               */
5259/****************************************************************************/
5260void
5261bnx_stats_update(struct bnx_softc *sc)
5262{
5263	struct ifnet		*ifp = &sc->arpcom.ac_if;
5264	struct statistics_block	*stats;
5265
5266	DBPRINT(sc, BNX_EXCESSIVE, "Entering %s()\n", __FUNCTION__);
5267
5268	stats = (struct statistics_block *)sc->stats_block;
5269
5270	/*
5271	 * Update the interface statistics from the
5272	 * hardware statistics.
5273	 */
5274	ifp->if_collisions = (u_long)stats->stat_EtherStatsCollisions;
5275
5276	ifp->if_ierrors = (u_long)stats->stat_EtherStatsUndersizePkts +
5277	    (u_long)stats->stat_EtherStatsOverrsizePkts +
5278	    (u_long)stats->stat_IfInMBUFDiscards +
5279	    (u_long)stats->stat_Dot3StatsAlignmentErrors +
5280	    (u_long)stats->stat_Dot3StatsFCSErrors;
5281
5282	ifp->if_oerrors = (u_long)
5283	    stats->stat_emac_tx_stat_dot3statsinternalmactransmiterrors +
5284	    (u_long)stats->stat_Dot3StatsExcessiveCollisions +
5285	    (u_long)stats->stat_Dot3StatsLateCollisions;
5286
5287	/*
5288	 * Certain controllers don't report
5289	 * carrier sense errors correctly.
5290	 * See errata E11_5708CA0_1165.
5291	 */
5292	if (!(BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5706) &&
5293	    !(BNX_CHIP_ID(sc) == BNX_CHIP_ID_5708_A0))
5294		ifp->if_oerrors += (u_long) stats->stat_Dot3StatsCarrierSenseErrors;
5295
5296	/*
5297	 * Update the sysctl statistics from the
5298	 * hardware statistics.
5299	 */
5300	sc->stat_IfHCInOctets = ((u_int64_t)stats->stat_IfHCInOctets_hi << 32) +
5301	    (u_int64_t) stats->stat_IfHCInOctets_lo;
5302
5303	sc->stat_IfHCInBadOctets =
5304	    ((u_int64_t) stats->stat_IfHCInBadOctets_hi << 32) +
5305	    (u_int64_t) stats->stat_IfHCInBadOctets_lo;
5306
5307	sc->stat_IfHCOutOctets =
5308	    ((u_int64_t) stats->stat_IfHCOutOctets_hi << 32) +
5309	    (u_int64_t) stats->stat_IfHCOutOctets_lo;
5310
5311	sc->stat_IfHCOutBadOctets =
5312	    ((u_int64_t) stats->stat_IfHCOutBadOctets_hi << 32) +
5313	    (u_int64_t) stats->stat_IfHCOutBadOctets_lo;
5314
5315	sc->stat_IfHCInUcastPkts =
5316	    ((u_int64_t) stats->stat_IfHCInUcastPkts_hi << 32) +
5317	    (u_int64_t) stats->stat_IfHCInUcastPkts_lo;
5318
5319	sc->stat_IfHCInMulticastPkts =
5320	    ((u_int64_t) stats->stat_IfHCInMulticastPkts_hi << 32) +
5321	    (u_int64_t) stats->stat_IfHCInMulticastPkts_lo;
5322
5323	sc->stat_IfHCInBroadcastPkts =
5324	    ((u_int64_t) stats->stat_IfHCInBroadcastPkts_hi << 32) +
5325	    (u_int64_t) stats->stat_IfHCInBroadcastPkts_lo;
5326
5327	sc->stat_IfHCOutUcastPkts =
5328	   ((u_int64_t) stats->stat_IfHCOutUcastPkts_hi << 32) +
5329	    (u_int64_t) stats->stat_IfHCOutUcastPkts_lo;
5330
5331	sc->stat_IfHCOutMulticastPkts =
5332	    ((u_int64_t) stats->stat_IfHCOutMulticastPkts_hi << 32) +
5333	    (u_int64_t) stats->stat_IfHCOutMulticastPkts_lo;
5334
5335	sc->stat_IfHCOutBroadcastPkts =
5336	    ((u_int64_t) stats->stat_IfHCOutBroadcastPkts_hi << 32) +
5337	    (u_int64_t) stats->stat_IfHCOutBroadcastPkts_lo;
5338
5339	sc->stat_emac_tx_stat_dot3statsinternalmactransmiterrors =
5340	    stats->stat_emac_tx_stat_dot3statsinternalmactransmiterrors;
5341
5342	sc->stat_Dot3StatsCarrierSenseErrors =
5343	    stats->stat_Dot3StatsCarrierSenseErrors;
5344
5345	sc->stat_Dot3StatsFCSErrors = stats->stat_Dot3StatsFCSErrors;
5346
5347	sc->stat_Dot3StatsAlignmentErrors =
5348	    stats->stat_Dot3StatsAlignmentErrors;
5349
5350	sc->stat_Dot3StatsSingleCollisionFrames =
5351	    stats->stat_Dot3StatsSingleCollisionFrames;
5352
5353	sc->stat_Dot3StatsMultipleCollisionFrames =
5354	    stats->stat_Dot3StatsMultipleCollisionFrames;
5355
5356	sc->stat_Dot3StatsDeferredTransmissions =
5357	    stats->stat_Dot3StatsDeferredTransmissions;
5358
5359	sc->stat_Dot3StatsExcessiveCollisions =
5360	    stats->stat_Dot3StatsExcessiveCollisions;
5361
5362	sc->stat_Dot3StatsLateCollisions = stats->stat_Dot3StatsLateCollisions;
5363
5364	sc->stat_EtherStatsCollisions = stats->stat_EtherStatsCollisions;
5365
5366	sc->stat_EtherStatsFragments = stats->stat_EtherStatsFragments;
5367
5368	sc->stat_EtherStatsJabbers = stats->stat_EtherStatsJabbers;
5369
5370	sc->stat_EtherStatsUndersizePkts = stats->stat_EtherStatsUndersizePkts;
5371
5372	sc->stat_EtherStatsOverrsizePkts = stats->stat_EtherStatsOverrsizePkts;
5373
5374	sc->stat_EtherStatsPktsRx64Octets =
5375	    stats->stat_EtherStatsPktsRx64Octets;
5376
5377	sc->stat_EtherStatsPktsRx65Octetsto127Octets =
5378	    stats->stat_EtherStatsPktsRx65Octetsto127Octets;
5379
5380	sc->stat_EtherStatsPktsRx128Octetsto255Octets =
5381	    stats->stat_EtherStatsPktsRx128Octetsto255Octets;
5382
5383	sc->stat_EtherStatsPktsRx256Octetsto511Octets =
5384	    stats->stat_EtherStatsPktsRx256Octetsto511Octets;
5385
5386	sc->stat_EtherStatsPktsRx512Octetsto1023Octets =
5387	    stats->stat_EtherStatsPktsRx512Octetsto1023Octets;
5388
5389	sc->stat_EtherStatsPktsRx1024Octetsto1522Octets =
5390	    stats->stat_EtherStatsPktsRx1024Octetsto1522Octets;
5391
5392	sc->stat_EtherStatsPktsRx1523Octetsto9022Octets =
5393	    stats->stat_EtherStatsPktsRx1523Octetsto9022Octets;
5394
5395	sc->stat_EtherStatsPktsTx64Octets =
5396	    stats->stat_EtherStatsPktsTx64Octets;
5397
5398	sc->stat_EtherStatsPktsTx65Octetsto127Octets =
5399	    stats->stat_EtherStatsPktsTx65Octetsto127Octets;
5400
5401	sc->stat_EtherStatsPktsTx128Octetsto255Octets =
5402	    stats->stat_EtherStatsPktsTx128Octetsto255Octets;
5403
5404	sc->stat_EtherStatsPktsTx256Octetsto511Octets =
5405	    stats->stat_EtherStatsPktsTx256Octetsto511Octets;
5406
5407	sc->stat_EtherStatsPktsTx512Octetsto1023Octets =
5408	    stats->stat_EtherStatsPktsTx512Octetsto1023Octets;
5409
5410	sc->stat_EtherStatsPktsTx1024Octetsto1522Octets =
5411	    stats->stat_EtherStatsPktsTx1024Octetsto1522Octets;
5412
5413	sc->stat_EtherStatsPktsTx1523Octetsto9022Octets =
5414	    stats->stat_EtherStatsPktsTx1523Octetsto9022Octets;
5415
5416	sc->stat_XonPauseFramesReceived = stats->stat_XonPauseFramesReceived;
5417
5418	sc->stat_XoffPauseFramesReceived = stats->stat_XoffPauseFramesReceived;
5419
5420	sc->stat_OutXonSent = stats->stat_OutXonSent;
5421
5422	sc->stat_OutXoffSent = stats->stat_OutXoffSent;
5423
5424	sc->stat_FlowControlDone = stats->stat_FlowControlDone;
5425
5426	sc->stat_MacControlFramesReceived =
5427	    stats->stat_MacControlFramesReceived;
5428
5429	sc->stat_XoffStateEntered = stats->stat_XoffStateEntered;
5430
5431	sc->stat_IfInFramesL2FilterDiscards =
5432	    stats->stat_IfInFramesL2FilterDiscards;
5433
5434	sc->stat_IfInRuleCheckerDiscards = stats->stat_IfInRuleCheckerDiscards;
5435
5436	sc->stat_IfInFTQDiscards = stats->stat_IfInFTQDiscards;
5437
5438	sc->stat_IfInMBUFDiscards = stats->stat_IfInMBUFDiscards;
5439
5440	sc->stat_IfInRuleCheckerP4Hit = stats->stat_IfInRuleCheckerP4Hit;
5441
5442	sc->stat_CatchupInRuleCheckerDiscards =
5443	    stats->stat_CatchupInRuleCheckerDiscards;
5444
5445	sc->stat_CatchupInFTQDiscards = stats->stat_CatchupInFTQDiscards;
5446
5447	sc->stat_CatchupInMBUFDiscards = stats->stat_CatchupInMBUFDiscards;
5448
5449	sc->stat_CatchupInRuleCheckerP4Hit =
5450	    stats->stat_CatchupInRuleCheckerP4Hit;
5451
5452	DBPRINT(sc, BNX_EXCESSIVE, "Exiting %s()\n", __FUNCTION__);
5453}
5454
5455void
5456bnx_tick(void *xsc)
5457{
5458	struct bnx_softc	*sc = xsc;
5459	struct ifnet		*ifp = &sc->arpcom.ac_if;
5460	struct mii_data		*mii = NULL;
5461	u_int32_t		msg;
5462
5463	/* Tell the firmware that the driver is still running. */
5464#ifdef BNX_DEBUG
5465	msg = (u_int32_t)BNX_DRV_MSG_DATA_PULSE_CODE_ALWAYS_ALIVE;
5466#else
5467	msg = (u_int32_t)++sc->bnx_fw_drv_pulse_wr_seq;
5468#endif
5469	REG_WR_IND(sc, sc->bnx_shmem_base + BNX_DRV_PULSE_MB, msg);
5470
5471	/* Update the statistics from the hardware statistics block. */
5472	bnx_stats_update(sc);
5473
5474	/* Schedule the next tick. */
5475	timeout_add_sec(&sc->bnx_timeout, 1);
5476
5477	/* If link is up already up then we're done. */
5478	if (sc->bnx_link)
5479		goto bnx_tick_exit;
5480
5481	mii = &sc->bnx_mii;
5482	mii_tick(mii);
5483
5484	/* Check if the link has come up. */
5485	if (!sc->bnx_link && mii->mii_media_status & IFM_ACTIVE &&
5486	    IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) {
5487		sc->bnx_link++;
5488		/* Now that link is up, handle any outstanding TX traffic. */
5489		if (!ifq_empty(&ifp->if_snd))
5490			ifq_start(&ifp->if_snd);
5491	}
5492
5493bnx_tick_exit:
5494	return;
5495}
5496
5497/****************************************************************************/
5498/* BNX Debug Routines                                                       */
5499/****************************************************************************/
5500#ifdef BNX_DEBUG
5501
5502/****************************************************************************/
5503/* Prints out information about an mbuf.                                    */
5504/*                                                                          */
5505/* Returns:                                                                 */
5506/*   Nothing.                                                               */
5507/****************************************************************************/
5508void
5509bnx_dump_mbuf(struct bnx_softc *sc, struct mbuf *m)
5510{
5511	struct mbuf		*mp = m;
5512
5513	if (m == NULL) {
5514		/* Index out of range. */
5515		printf("mbuf ptr is null!\n");
5516		return;
5517	}
5518
5519	while (mp) {
5520		printf("mbuf: vaddr = %p, m_len = %d, m_flags = ",
5521		    mp, mp->m_len);
5522
5523		if (mp->m_flags & M_EXT)
5524			printf("M_EXT ");
5525		if (mp->m_flags & M_PKTHDR)
5526			printf("M_PKTHDR ");
5527		printf("\n");
5528
5529		if (mp->m_flags & M_EXT)
5530			printf("- m_ext: vaddr = %p, ext_size = 0x%04X\n",
5531			    mp, mp->m_ext.ext_size);
5532
5533		mp = mp->m_next;
5534	}
5535}
5536
5537/****************************************************************************/
5538/* Prints out the mbufs in the TX mbuf chain.                               */
5539/*                                                                          */
5540/* Returns:                                                                 */
5541/*   Nothing.                                                               */
5542/****************************************************************************/
5543void
5544bnx_dump_tx_mbuf_chain(struct bnx_softc *sc, int chain_prod, int count)
5545{
5546	struct mbuf		*m;
5547	int			i;
5548
5549	BNX_PRINTF(sc,
5550	    "----------------------------"
5551	    "  tx mbuf data  "
5552	    "----------------------------\n");
5553
5554	for (i = 0; i < count; i++) {
5555	 	m = sc->tx_mbuf_ptr[chain_prod];
5556		BNX_PRINTF(sc, "txmbuf[%d]\n", chain_prod);
5557		bnx_dump_mbuf(sc, m);
5558		chain_prod = TX_CHAIN_IDX(NEXT_TX_BD(chain_prod));
5559	}
5560
5561	BNX_PRINTF(sc,
5562	    "--------------------------------------------"
5563	    "----------------------------\n");
5564}
5565
5566/*
5567 * This routine prints the RX mbuf chain.
5568 */
5569void
5570bnx_dump_rx_mbuf_chain(struct bnx_softc *sc, int chain_prod, int count)
5571{
5572	struct mbuf		*m;
5573	int			i;
5574
5575	BNX_PRINTF(sc,
5576	    "----------------------------"
5577	    "  rx mbuf data  "
5578	    "----------------------------\n");
5579
5580	for (i = 0; i < count; i++) {
5581	 	m = sc->rx_mbuf_ptr[chain_prod];
5582		BNX_PRINTF(sc, "rxmbuf[0x%04X]\n", chain_prod);
5583		bnx_dump_mbuf(sc, m);
5584		chain_prod = RX_CHAIN_IDX(NEXT_RX_BD(chain_prod));
5585	}
5586
5587
5588	BNX_PRINTF(sc,
5589	    "--------------------------------------------"
5590	    "----------------------------\n");
5591}
5592
5593void
5594bnx_dump_txbd(struct bnx_softc *sc, int idx, struct tx_bd *txbd)
5595{
5596	if (idx > MAX_TX_BD)
5597		/* Index out of range. */
5598		BNX_PRINTF(sc, "tx_bd[0x%04X]: Invalid tx_bd index!\n", idx);
5599	else if ((idx & USABLE_TX_BD_PER_PAGE) == USABLE_TX_BD_PER_PAGE)
5600		/* TX Chain page pointer. */
5601		BNX_PRINTF(sc, "tx_bd[0x%04X]: haddr = 0x%08X:%08X, chain "
5602		    "page pointer\n", idx, txbd->tx_bd_haddr_hi,
5603		    txbd->tx_bd_haddr_lo);
5604	else
5605		/* Normal tx_bd entry. */
5606		BNX_PRINTF(sc, "tx_bd[0x%04X]: haddr = 0x%08X:%08X, nbytes = "
5607		    "0x%08X, vlan tag = 0x%4X, flags = 0x%08X\n", idx,
5608		    txbd->tx_bd_haddr_hi, txbd->tx_bd_haddr_lo,
5609		    txbd->tx_bd_mss_nbytes, txbd->tx_bd_vlan_tag,
5610		    txbd->tx_bd_flags);
5611}
5612
5613void
5614bnx_dump_rxbd(struct bnx_softc *sc, int idx, struct rx_bd *rxbd)
5615{
5616	if (idx > MAX_RX_BD)
5617		/* Index out of range. */
5618		BNX_PRINTF(sc, "rx_bd[0x%04X]: Invalid rx_bd index!\n", idx);
5619	else if ((idx & USABLE_RX_BD_PER_PAGE) == USABLE_RX_BD_PER_PAGE)
5620		/* TX Chain page pointer. */
5621		BNX_PRINTF(sc, "rx_bd[0x%04X]: haddr = 0x%08X:%08X, chain page "
5622		    "pointer\n", idx, rxbd->rx_bd_haddr_hi,
5623		    rxbd->rx_bd_haddr_lo);
5624	else
5625		/* Normal tx_bd entry. */
5626		BNX_PRINTF(sc, "rx_bd[0x%04X]: haddr = 0x%08X:%08X, nbytes = "
5627		    "0x%08X, flags = 0x%08X\n", idx,
5628			rxbd->rx_bd_haddr_hi, rxbd->rx_bd_haddr_lo,
5629			rxbd->rx_bd_len, rxbd->rx_bd_flags);
5630}
5631
5632void
5633bnx_dump_l2fhdr(struct bnx_softc *sc, int idx, struct l2_fhdr *l2fhdr)
5634{
5635	BNX_PRINTF(sc, "l2_fhdr[0x%04X]: status = 0x%08X, "
5636	    "pkt_len = 0x%04X, vlan = 0x%04x, ip_xsum = 0x%04X, "
5637	    "tcp_udp_xsum = 0x%04X\n", idx,
5638	    l2fhdr->l2_fhdr_status, l2fhdr->l2_fhdr_pkt_len,
5639	    l2fhdr->l2_fhdr_vlan_tag, l2fhdr->l2_fhdr_ip_xsum,
5640	    l2fhdr->l2_fhdr_tcp_udp_xsum);
5641}
5642
5643/*
5644 * This routine prints the TX chain.
5645 */
5646void
5647bnx_dump_tx_chain(struct bnx_softc *sc, int tx_prod, int count)
5648{
5649	struct tx_bd		*txbd;
5650	int			i;
5651
5652	/* First some info about the tx_bd chain structure. */
5653	BNX_PRINTF(sc,
5654	    "----------------------------"
5655	    "  tx_bd  chain  "
5656	    "----------------------------\n");
5657
5658	BNX_PRINTF(sc,
5659	    "page size      = 0x%08X, tx chain pages        = 0x%08X\n",
5660	    (u_int32_t)BCM_PAGE_SIZE, (u_int32_t) TX_PAGES);
5661
5662	BNX_PRINTF(sc,
5663	    "tx_bd per page = 0x%08X, usable tx_bd per page = 0x%08X\n",
5664	    (u_int32_t)TOTAL_TX_BD_PER_PAGE, (u_int32_t)USABLE_TX_BD_PER_PAGE);
5665
5666	BNX_PRINTF(sc, "total tx_bd    = 0x%08X\n", (u_int32_t)TOTAL_TX_BD);
5667
5668	BNX_PRINTF(sc, ""
5669	    "-----------------------------"
5670	    "   tx_bd data   "
5671	    "-----------------------------\n");
5672
5673	/* Now print out the tx_bd's themselves. */
5674	for (i = 0; i < count; i++) {
5675	 	txbd = &sc->tx_bd_chain[TX_PAGE(tx_prod)][TX_IDX(tx_prod)];
5676		bnx_dump_txbd(sc, tx_prod, txbd);
5677		tx_prod = TX_CHAIN_IDX(NEXT_TX_BD(tx_prod));
5678	}
5679
5680	BNX_PRINTF(sc,
5681	    "-----------------------------"
5682	    "--------------"
5683	    "-----------------------------\n");
5684}
5685
5686/*
5687 * This routine prints the RX chain.
5688 */
5689void
5690bnx_dump_rx_chain(struct bnx_softc *sc, int rx_prod, int count)
5691{
5692	struct rx_bd		*rxbd;
5693	int			i;
5694
5695	/* First some info about the tx_bd chain structure. */
5696	BNX_PRINTF(sc,
5697	    "----------------------------"
5698	    "  rx_bd  chain  "
5699	    "----------------------------\n");
5700
5701	BNX_PRINTF(sc, "----- RX_BD Chain -----\n");
5702
5703	BNX_PRINTF(sc,
5704	    "page size      = 0x%08X, rx chain pages        = 0x%08X\n",
5705	    (u_int32_t)BCM_PAGE_SIZE, (u_int32_t)RX_PAGES);
5706
5707	BNX_PRINTF(sc,
5708	    "rx_bd per page = 0x%08X, usable rx_bd per page = 0x%08X\n",
5709	    (u_int32_t)TOTAL_RX_BD_PER_PAGE, (u_int32_t)USABLE_RX_BD_PER_PAGE);
5710
5711	BNX_PRINTF(sc, "total rx_bd    = 0x%08X\n", (u_int32_t)TOTAL_RX_BD);
5712
5713	BNX_PRINTF(sc,
5714	    "----------------------------"
5715	    "   rx_bd data   "
5716	    "----------------------------\n");
5717
5718	/* Now print out the rx_bd's themselves. */
5719	for (i = 0; i < count; i++) {
5720		rxbd = &sc->rx_bd_chain[RX_PAGE(rx_prod)][RX_IDX(rx_prod)];
5721		bnx_dump_rxbd(sc, rx_prod, rxbd);
5722		rx_prod = RX_CHAIN_IDX(NEXT_RX_BD(rx_prod));
5723	}
5724
5725	BNX_PRINTF(sc,
5726	    "----------------------------"
5727	    "--------------"
5728	    "----------------------------\n");
5729}
5730
5731/*
5732 * This routine prints the status block.
5733 */
5734void
5735bnx_dump_status_block(struct bnx_softc *sc)
5736{
5737	struct status_block	*sblk;
5738
5739	sblk = sc->status_block;
5740
5741   	BNX_PRINTF(sc, "----------------------------- Status Block "
5742	    "-----------------------------\n");
5743
5744	BNX_PRINTF(sc,
5745	    "attn_bits  = 0x%08X, attn_bits_ack = 0x%08X, index = 0x%04X\n",
5746	    sblk->status_attn_bits, sblk->status_attn_bits_ack,
5747	    sblk->status_idx);
5748
5749	BNX_PRINTF(sc, "rx_cons0   = 0x%08X, tx_cons0      = 0x%08X\n",
5750	    sblk->status_rx_quick_consumer_index0,
5751	    sblk->status_tx_quick_consumer_index0);
5752
5753	BNX_PRINTF(sc, "status_idx = 0x%04X\n", sblk->status_idx);
5754
5755	/* Theses indices are not used for normal L2 drivers. */
5756	if (sblk->status_rx_quick_consumer_index1 ||
5757		sblk->status_tx_quick_consumer_index1)
5758		BNX_PRINTF(sc, "rx_cons1  = 0x%08X, tx_cons1      = 0x%08X\n",
5759		    sblk->status_rx_quick_consumer_index1,
5760		    sblk->status_tx_quick_consumer_index1);
5761
5762	if (sblk->status_rx_quick_consumer_index2 ||
5763		sblk->status_tx_quick_consumer_index2)
5764		BNX_PRINTF(sc, "rx_cons2  = 0x%08X, tx_cons2      = 0x%08X\n",
5765		    sblk->status_rx_quick_consumer_index2,
5766		    sblk->status_tx_quick_consumer_index2);
5767
5768	if (sblk->status_rx_quick_consumer_index3 ||
5769		sblk->status_tx_quick_consumer_index3)
5770		BNX_PRINTF(sc, "rx_cons3  = 0x%08X, tx_cons3      = 0x%08X\n",
5771		    sblk->status_rx_quick_consumer_index3,
5772		    sblk->status_tx_quick_consumer_index3);
5773
5774	if (sblk->status_rx_quick_consumer_index4 ||
5775		sblk->status_rx_quick_consumer_index5)
5776		BNX_PRINTF(sc, "rx_cons4  = 0x%08X, rx_cons5      = 0x%08X\n",
5777		    sblk->status_rx_quick_consumer_index4,
5778		    sblk->status_rx_quick_consumer_index5);
5779
5780	if (sblk->status_rx_quick_consumer_index6 ||
5781		sblk->status_rx_quick_consumer_index7)
5782		BNX_PRINTF(sc, "rx_cons6  = 0x%08X, rx_cons7      = 0x%08X\n",
5783		    sblk->status_rx_quick_consumer_index6,
5784		    sblk->status_rx_quick_consumer_index7);
5785
5786	if (sblk->status_rx_quick_consumer_index8 ||
5787		sblk->status_rx_quick_consumer_index9)
5788		BNX_PRINTF(sc, "rx_cons8  = 0x%08X, rx_cons9      = 0x%08X\n",
5789		    sblk->status_rx_quick_consumer_index8,
5790		    sblk->status_rx_quick_consumer_index9);
5791
5792	if (sblk->status_rx_quick_consumer_index10 ||
5793		sblk->status_rx_quick_consumer_index11)
5794		BNX_PRINTF(sc, "rx_cons10 = 0x%08X, rx_cons11     = 0x%08X\n",
5795		    sblk->status_rx_quick_consumer_index10,
5796		    sblk->status_rx_quick_consumer_index11);
5797
5798	if (sblk->status_rx_quick_consumer_index12 ||
5799		sblk->status_rx_quick_consumer_index13)
5800		BNX_PRINTF(sc, "rx_cons12 = 0x%08X, rx_cons13     = 0x%08X\n",
5801		    sblk->status_rx_quick_consumer_index12,
5802		    sblk->status_rx_quick_consumer_index13);
5803
5804	if (sblk->status_rx_quick_consumer_index14 ||
5805		sblk->status_rx_quick_consumer_index15)
5806		BNX_PRINTF(sc, "rx_cons14 = 0x%08X, rx_cons15     = 0x%08X\n",
5807		    sblk->status_rx_quick_consumer_index14,
5808		    sblk->status_rx_quick_consumer_index15);
5809
5810	if (sblk->status_completion_producer_index ||
5811		sblk->status_cmd_consumer_index)
5812		BNX_PRINTF(sc, "com_prod  = 0x%08X, cmd_cons      = 0x%08X\n",
5813		    sblk->status_completion_producer_index,
5814		    sblk->status_cmd_consumer_index);
5815
5816	BNX_PRINTF(sc, "-------------------------------------------"
5817	    "-----------------------------\n");
5818}
5819
5820/*
5821 * This routine prints the statistics block.
5822 */
5823void
5824bnx_dump_stats_block(struct bnx_softc *sc)
5825{
5826	struct statistics_block	*sblk;
5827
5828	sblk = sc->stats_block;
5829
5830	BNX_PRINTF(sc, ""
5831	    "-----------------------------"
5832	    " Stats  Block "
5833	    "-----------------------------\n");
5834
5835	BNX_PRINTF(sc, "IfHcInOctets         = 0x%08X:%08X, "
5836	    "IfHcInBadOctets      = 0x%08X:%08X\n",
5837	    sblk->stat_IfHCInOctets_hi, sblk->stat_IfHCInOctets_lo,
5838	    sblk->stat_IfHCInBadOctets_hi, sblk->stat_IfHCInBadOctets_lo);
5839
5840	BNX_PRINTF(sc, "IfHcOutOctets        = 0x%08X:%08X, "
5841	    "IfHcOutBadOctets     = 0x%08X:%08X\n",
5842	    sblk->stat_IfHCOutOctets_hi, sblk->stat_IfHCOutOctets_lo,
5843	    sblk->stat_IfHCOutBadOctets_hi, sblk->stat_IfHCOutBadOctets_lo);
5844
5845	BNX_PRINTF(sc, "IfHcInUcastPkts      = 0x%08X:%08X, "
5846	    "IfHcInMulticastPkts  = 0x%08X:%08X\n",
5847	    sblk->stat_IfHCInUcastPkts_hi, sblk->stat_IfHCInUcastPkts_lo,
5848	    sblk->stat_IfHCInMulticastPkts_hi,
5849	    sblk->stat_IfHCInMulticastPkts_lo);
5850
5851	BNX_PRINTF(sc, "IfHcInBroadcastPkts  = 0x%08X:%08X, "
5852	    "IfHcOutUcastPkts     = 0x%08X:%08X\n",
5853	    sblk->stat_IfHCInBroadcastPkts_hi,
5854	    sblk->stat_IfHCInBroadcastPkts_lo,
5855	    sblk->stat_IfHCOutUcastPkts_hi,
5856	    sblk->stat_IfHCOutUcastPkts_lo);
5857
5858	BNX_PRINTF(sc, "IfHcOutMulticastPkts = 0x%08X:%08X, "
5859	    "IfHcOutBroadcastPkts = 0x%08X:%08X\n",
5860	    sblk->stat_IfHCOutMulticastPkts_hi,
5861	    sblk->stat_IfHCOutMulticastPkts_lo,
5862	    sblk->stat_IfHCOutBroadcastPkts_hi,
5863	    sblk->stat_IfHCOutBroadcastPkts_lo);
5864
5865	if (sblk->stat_emac_tx_stat_dot3statsinternalmactransmiterrors)
5866		BNX_PRINTF(sc, "0x%08X : "
5867		    "emac_tx_stat_dot3statsinternalmactransmiterrors\n",
5868		    sblk->stat_emac_tx_stat_dot3statsinternalmactransmiterrors);
5869
5870	if (sblk->stat_Dot3StatsCarrierSenseErrors)
5871		BNX_PRINTF(sc, "0x%08X : Dot3StatsCarrierSenseErrors\n",
5872		    sblk->stat_Dot3StatsCarrierSenseErrors);
5873
5874	if (sblk->stat_Dot3StatsFCSErrors)
5875		BNX_PRINTF(sc, "0x%08X : Dot3StatsFCSErrors\n",
5876		    sblk->stat_Dot3StatsFCSErrors);
5877
5878	if (sblk->stat_Dot3StatsAlignmentErrors)
5879		BNX_PRINTF(sc, "0x%08X : Dot3StatsAlignmentErrors\n",
5880		    sblk->stat_Dot3StatsAlignmentErrors);
5881
5882	if (sblk->stat_Dot3StatsSingleCollisionFrames)
5883		BNX_PRINTF(sc, "0x%08X : Dot3StatsSingleCollisionFrames\n",
5884		    sblk->stat_Dot3StatsSingleCollisionFrames);
5885
5886	if (sblk->stat_Dot3StatsMultipleCollisionFrames)
5887		BNX_PRINTF(sc, "0x%08X : Dot3StatsMultipleCollisionFrames\n",
5888		    sblk->stat_Dot3StatsMultipleCollisionFrames);
5889
5890	if (sblk->stat_Dot3StatsDeferredTransmissions)
5891		BNX_PRINTF(sc, "0x%08X : Dot3StatsDeferredTransmissions\n",
5892		    sblk->stat_Dot3StatsDeferredTransmissions);
5893
5894	if (sblk->stat_Dot3StatsExcessiveCollisions)
5895		BNX_PRINTF(sc, "0x%08X : Dot3StatsExcessiveCollisions\n",
5896		    sblk->stat_Dot3StatsExcessiveCollisions);
5897
5898	if (sblk->stat_Dot3StatsLateCollisions)
5899		BNX_PRINTF(sc, "0x%08X : Dot3StatsLateCollisions\n",
5900		    sblk->stat_Dot3StatsLateCollisions);
5901
5902	if (sblk->stat_EtherStatsCollisions)
5903		BNX_PRINTF(sc, "0x%08X : EtherStatsCollisions\n",
5904		    sblk->stat_EtherStatsCollisions);
5905
5906	if (sblk->stat_EtherStatsFragments)
5907		BNX_PRINTF(sc, "0x%08X : EtherStatsFragments\n",
5908		    sblk->stat_EtherStatsFragments);
5909
5910	if (sblk->stat_EtherStatsJabbers)
5911		BNX_PRINTF(sc, "0x%08X : EtherStatsJabbers\n",
5912		    sblk->stat_EtherStatsJabbers);
5913
5914	if (sblk->stat_EtherStatsUndersizePkts)
5915		BNX_PRINTF(sc, "0x%08X : EtherStatsUndersizePkts\n",
5916		    sblk->stat_EtherStatsUndersizePkts);
5917
5918	if (sblk->stat_EtherStatsOverrsizePkts)
5919		BNX_PRINTF(sc, "0x%08X : EtherStatsOverrsizePkts\n",
5920		    sblk->stat_EtherStatsOverrsizePkts);
5921
5922	if (sblk->stat_EtherStatsPktsRx64Octets)
5923		BNX_PRINTF(sc, "0x%08X : EtherStatsPktsRx64Octets\n",
5924		    sblk->stat_EtherStatsPktsRx64Octets);
5925
5926	if (sblk->stat_EtherStatsPktsRx65Octetsto127Octets)
5927		BNX_PRINTF(sc, "0x%08X : EtherStatsPktsRx65Octetsto127Octets\n",
5928		    sblk->stat_EtherStatsPktsRx65Octetsto127Octets);
5929
5930	if (sblk->stat_EtherStatsPktsRx128Octetsto255Octets)
5931		BNX_PRINTF(sc, "0x%08X : "
5932		    "EtherStatsPktsRx128Octetsto255Octets\n",
5933		    sblk->stat_EtherStatsPktsRx128Octetsto255Octets);
5934
5935	if (sblk->stat_EtherStatsPktsRx256Octetsto511Octets)
5936		BNX_PRINTF(sc, "0x%08X : "
5937		    "EtherStatsPktsRx256Octetsto511Octets\n",
5938		    sblk->stat_EtherStatsPktsRx256Octetsto511Octets);
5939
5940	if (sblk->stat_EtherStatsPktsRx512Octetsto1023Octets)
5941		BNX_PRINTF(sc, "0x%08X : "
5942		    "EtherStatsPktsRx512Octetsto1023Octets\n",
5943		    sblk->stat_EtherStatsPktsRx512Octetsto1023Octets);
5944
5945	if (sblk->stat_EtherStatsPktsRx1024Octetsto1522Octets)
5946		BNX_PRINTF(sc, "0x%08X : "
5947		    "EtherStatsPktsRx1024Octetsto1522Octets\n",
5948		sblk->stat_EtherStatsPktsRx1024Octetsto1522Octets);
5949
5950	if (sblk->stat_EtherStatsPktsRx1523Octetsto9022Octets)
5951		BNX_PRINTF(sc, "0x%08X : "
5952		    "EtherStatsPktsRx1523Octetsto9022Octets\n",
5953		    sblk->stat_EtherStatsPktsRx1523Octetsto9022Octets);
5954
5955	if (sblk->stat_EtherStatsPktsTx64Octets)
5956		BNX_PRINTF(sc, "0x%08X : EtherStatsPktsTx64Octets\n",
5957		    sblk->stat_EtherStatsPktsTx64Octets);
5958
5959	if (sblk->stat_EtherStatsPktsTx65Octetsto127Octets)
5960		BNX_PRINTF(sc, "0x%08X : EtherStatsPktsTx65Octetsto127Octets\n",
5961		    sblk->stat_EtherStatsPktsTx65Octetsto127Octets);
5962
5963	if (sblk->stat_EtherStatsPktsTx128Octetsto255Octets)
5964		BNX_PRINTF(sc, "0x%08X : "
5965		    "EtherStatsPktsTx128Octetsto255Octets\n",
5966		    sblk->stat_EtherStatsPktsTx128Octetsto255Octets);
5967
5968	if (sblk->stat_EtherStatsPktsTx256Octetsto511Octets)
5969		BNX_PRINTF(sc, "0x%08X : "
5970		    "EtherStatsPktsTx256Octetsto511Octets\n",
5971		    sblk->stat_EtherStatsPktsTx256Octetsto511Octets);
5972
5973	if (sblk->stat_EtherStatsPktsTx512Octetsto1023Octets)
5974		BNX_PRINTF(sc, "0x%08X : "
5975		    "EtherStatsPktsTx512Octetsto1023Octets\n",
5976		    sblk->stat_EtherStatsPktsTx512Octetsto1023Octets);
5977
5978	if (sblk->stat_EtherStatsPktsTx1024Octetsto1522Octets)
5979		BNX_PRINTF(sc, "0x%08X : "
5980		    "EtherStatsPktsTx1024Octetsto1522Octets\n",
5981		    sblk->stat_EtherStatsPktsTx1024Octetsto1522Octets);
5982
5983	if (sblk->stat_EtherStatsPktsTx1523Octetsto9022Octets)
5984		BNX_PRINTF(sc, "0x%08X : "
5985		    "EtherStatsPktsTx1523Octetsto9022Octets\n",
5986		    sblk->stat_EtherStatsPktsTx1523Octetsto9022Octets);
5987
5988	if (sblk->stat_XonPauseFramesReceived)
5989		BNX_PRINTF(sc, "0x%08X : XonPauseFramesReceived\n",
5990		    sblk->stat_XonPauseFramesReceived);
5991
5992	if (sblk->stat_XoffPauseFramesReceived)
5993		BNX_PRINTF(sc, "0x%08X : XoffPauseFramesReceived\n",
5994		    sblk->stat_XoffPauseFramesReceived);
5995
5996	if (sblk->stat_OutXonSent)
5997		BNX_PRINTF(sc, "0x%08X : OutXonSent\n",
5998		    sblk->stat_OutXonSent);
5999
6000	if (sblk->stat_OutXoffSent)
6001		BNX_PRINTF(sc, "0x%08X : OutXoffSent\n",
6002		    sblk->stat_OutXoffSent);
6003
6004	if (sblk->stat_FlowControlDone)
6005		BNX_PRINTF(sc, "0x%08X : FlowControlDone\n",
6006		    sblk->stat_FlowControlDone);
6007
6008	if (sblk->stat_MacControlFramesReceived)
6009		BNX_PRINTF(sc, "0x%08X : MacControlFramesReceived\n",
6010		    sblk->stat_MacControlFramesReceived);
6011
6012	if (sblk->stat_XoffStateEntered)
6013		BNX_PRINTF(sc, "0x%08X : XoffStateEntered\n",
6014		    sblk->stat_XoffStateEntered);
6015
6016	if (sblk->stat_IfInFramesL2FilterDiscards)
6017		BNX_PRINTF(sc, "0x%08X : IfInFramesL2FilterDiscards\n",
6018		    sblk->stat_IfInFramesL2FilterDiscards);
6019
6020	if (sblk->stat_IfInRuleCheckerDiscards)
6021		BNX_PRINTF(sc, "0x%08X : IfInRuleCheckerDiscards\n",
6022		    sblk->stat_IfInRuleCheckerDiscards);
6023
6024	if (sblk->stat_IfInFTQDiscards)
6025		BNX_PRINTF(sc, "0x%08X : IfInFTQDiscards\n",
6026		    sblk->stat_IfInFTQDiscards);
6027
6028	if (sblk->stat_IfInMBUFDiscards)
6029		BNX_PRINTF(sc, "0x%08X : IfInMBUFDiscards\n",
6030		    sblk->stat_IfInMBUFDiscards);
6031
6032	if (sblk->stat_IfInRuleCheckerP4Hit)
6033		BNX_PRINTF(sc, "0x%08X : IfInRuleCheckerP4Hit\n",
6034		    sblk->stat_IfInRuleCheckerP4Hit);
6035
6036	if (sblk->stat_CatchupInRuleCheckerDiscards)
6037		BNX_PRINTF(sc, "0x%08X : CatchupInRuleCheckerDiscards\n",
6038		    sblk->stat_CatchupInRuleCheckerDiscards);
6039
6040	if (sblk->stat_CatchupInFTQDiscards)
6041		BNX_PRINTF(sc, "0x%08X : CatchupInFTQDiscards\n",
6042		    sblk->stat_CatchupInFTQDiscards);
6043
6044	if (sblk->stat_CatchupInMBUFDiscards)
6045		BNX_PRINTF(sc, "0x%08X : CatchupInMBUFDiscards\n",
6046		    sblk->stat_CatchupInMBUFDiscards);
6047
6048	if (sblk->stat_CatchupInRuleCheckerP4Hit)
6049		BNX_PRINTF(sc, "0x%08X : CatchupInRuleCheckerP4Hit\n",
6050		    sblk->stat_CatchupInRuleCheckerP4Hit);
6051
6052	BNX_PRINTF(sc,
6053	    "-----------------------------"
6054	    "--------------"
6055	    "-----------------------------\n");
6056}
6057
6058void
6059bnx_dump_driver_state(struct bnx_softc *sc)
6060{
6061	BNX_PRINTF(sc,
6062	    "-----------------------------"
6063	    " Driver State "
6064	    "-----------------------------\n");
6065
6066	BNX_PRINTF(sc, "%p - (sc) driver softc structure virtual "
6067	    "address\n", sc);
6068
6069	BNX_PRINTF(sc, "%p - (sc->status_block) status block virtual address\n",
6070	    sc->status_block);
6071
6072	BNX_PRINTF(sc, "%p - (sc->stats_block) statistics block virtual "
6073	    "address\n", sc->stats_block);
6074
6075	BNX_PRINTF(sc, "%p - (sc->tx_bd_chain) tx_bd chain virtual "
6076	    "address\n", sc->tx_bd_chain);
6077
6078	BNX_PRINTF(sc, "%p - (sc->rx_bd_chain) rx_bd chain virtual address\n",
6079	    sc->rx_bd_chain);
6080
6081	BNX_PRINTF(sc, "%p - (sc->tx_mbuf_ptr) tx mbuf chain virtual address\n",
6082	    sc->tx_mbuf_ptr);
6083
6084	BNX_PRINTF(sc, "%p - (sc->rx_mbuf_ptr) rx mbuf chain virtual address\n",
6085	    sc->rx_mbuf_ptr);
6086
6087	BNX_PRINTF(sc,
6088	    "         0x%08X - (sc->interrupts_generated) h/w intrs\n",
6089	    sc->interrupts_generated);
6090
6091	BNX_PRINTF(sc,
6092	    "         0x%08X - (sc->rx_interrupts) rx interrupts handled\n",
6093	    sc->rx_interrupts);
6094
6095	BNX_PRINTF(sc,
6096	    "         0x%08X - (sc->tx_interrupts) tx interrupts handled\n",
6097	    sc->tx_interrupts);
6098
6099	BNX_PRINTF(sc,
6100	    "         0x%08X - (sc->last_status_idx) status block index\n",
6101	    sc->last_status_idx);
6102
6103	BNX_PRINTF(sc, "         0x%08X - (sc->tx_prod) tx producer index\n",
6104	    sc->tx_prod);
6105
6106	BNX_PRINTF(sc, "         0x%08X - (sc->tx_cons) tx consumer index\n",
6107	    sc->tx_cons);
6108
6109	BNX_PRINTF(sc,
6110	    "         0x%08X - (sc->tx_prod_bseq) tx producer bseq index\n",
6111	    sc->tx_prod_bseq);
6112
6113	BNX_PRINTF(sc,
6114	    "         0x%08X - (sc->tx_mbuf_alloc) tx mbufs allocated\n",
6115	    sc->tx_mbuf_alloc);
6116
6117	BNX_PRINTF(sc,
6118	    "         0x%08X - (sc->used_tx_bd) used tx_bd's\n",
6119	    sc->used_tx_bd);
6120
6121	BNX_PRINTF(sc,
6122	    "         0x%08X/%08X - (sc->tx_hi_watermark) tx hi watermark\n",
6123	    sc->tx_hi_watermark, sc->max_tx_bd);
6124
6125	BNX_PRINTF(sc, "         0x%08X - (sc->rx_prod) rx producer index\n",
6126	    sc->rx_prod);
6127
6128	BNX_PRINTF(sc, "         0x%08X - (sc->rx_cons) rx consumer index\n",
6129	    sc->rx_cons);
6130
6131	BNX_PRINTF(sc,
6132	    "         0x%08X - (sc->rx_prod_bseq) rx producer bseq index\n",
6133	    sc->rx_prod_bseq);
6134
6135	BNX_PRINTF(sc,
6136	    "         0x%08X - (sc->rx_mbuf_alloc) rx mbufs allocated\n",
6137	    sc->rx_mbuf_alloc);
6138
6139	BNX_PRINTF(sc,
6140	    "0x%08X/%08X - (sc->rx_low_watermark) rx low watermark\n",
6141	    sc->rx_low_watermark, sc->max_rx_bd);
6142
6143	BNX_PRINTF(sc,
6144	    "         0x%08X - (sc->mbuf_alloc_failed) "
6145	    "mbuf alloc failures\n",
6146	    sc->mbuf_alloc_failed);
6147
6148	BNX_PRINTF(sc,
6149	    "         0x%0X - (sc->mbuf_sim_allocated_failed) "
6150	    "simulated mbuf alloc failures\n",
6151	    sc->mbuf_sim_alloc_failed);
6152
6153	BNX_PRINTF(sc, "-------------------------------------------"
6154	    "-----------------------------\n");
6155}
6156
6157void
6158bnx_dump_hw_state(struct bnx_softc *sc)
6159{
6160	u_int32_t		val1;
6161	int			i;
6162
6163	BNX_PRINTF(sc,
6164	    "----------------------------"
6165	    " Hardware State "
6166	    "----------------------------\n");
6167
6168	BNX_PRINTF(sc, "0x%08X : bootcode version\n", sc->bnx_fw_ver);
6169
6170	val1 = REG_RD(sc, BNX_MISC_ENABLE_STATUS_BITS);
6171	BNX_PRINTF(sc, "0x%08X : (0x%04X) misc_enable_status_bits\n",
6172	    val1, BNX_MISC_ENABLE_STATUS_BITS);
6173
6174	val1 = REG_RD(sc, BNX_DMA_STATUS);
6175	BNX_PRINTF(sc, "0x%08X : (0x%04X) dma_status\n", val1, BNX_DMA_STATUS);
6176
6177	val1 = REG_RD(sc, BNX_CTX_STATUS);
6178	BNX_PRINTF(sc, "0x%08X : (0x%04X) ctx_status\n", val1, BNX_CTX_STATUS);
6179
6180	val1 = REG_RD(sc, BNX_EMAC_STATUS);
6181	BNX_PRINTF(sc, "0x%08X : (0x%04X) emac_status\n", val1,
6182	    BNX_EMAC_STATUS);
6183
6184	val1 = REG_RD(sc, BNX_RPM_STATUS);
6185	BNX_PRINTF(sc, "0x%08X : (0x%04X) rpm_status\n", val1, BNX_RPM_STATUS);
6186
6187	val1 = REG_RD(sc, BNX_TBDR_STATUS);
6188	BNX_PRINTF(sc, "0x%08X : (0x%04X) tbdr_status\n", val1,
6189	    BNX_TBDR_STATUS);
6190
6191	val1 = REG_RD(sc, BNX_TDMA_STATUS);
6192	BNX_PRINTF(sc, "0x%08X : (0x%04X) tdma_status\n", val1,
6193	    BNX_TDMA_STATUS);
6194
6195	val1 = REG_RD(sc, BNX_HC_STATUS);
6196	BNX_PRINTF(sc, "0x%08X : (0x%04X) hc_status\n", val1, BNX_HC_STATUS);
6197
6198	BNX_PRINTF(sc,
6199	    "----------------------------"
6200	    "----------------"
6201	    "----------------------------\n");
6202
6203	BNX_PRINTF(sc,
6204	    "----------------------------"
6205	    " Register  Dump "
6206	    "----------------------------\n");
6207
6208	for (i = 0x400; i < 0x8000; i += 0x10)
6209		BNX_PRINTF(sc, "0x%04X: 0x%08X 0x%08X 0x%08X 0x%08X\n",
6210		    i, REG_RD(sc, i), REG_RD(sc, i + 0x4),
6211		    REG_RD(sc, i + 0x8), REG_RD(sc, i + 0xC));
6212
6213	BNX_PRINTF(sc,
6214	    "----------------------------"
6215	    "----------------"
6216	    "----------------------------\n");
6217}
6218
6219void
6220bnx_breakpoint(struct bnx_softc *sc)
6221{
6222	/* Unreachable code to shut the compiler up about unused functions. */
6223	if (0) {
6224   		bnx_dump_txbd(sc, 0, NULL);
6225		bnx_dump_rxbd(sc, 0, NULL);
6226		bnx_dump_tx_mbuf_chain(sc, 0, USABLE_TX_BD);
6227		bnx_dump_rx_mbuf_chain(sc, 0, sc->max_rx_bd);
6228		bnx_dump_l2fhdr(sc, 0, NULL);
6229		bnx_dump_tx_chain(sc, 0, USABLE_TX_BD);
6230		bnx_dump_rx_chain(sc, 0, sc->max_rx_bd);
6231		bnx_dump_status_block(sc);
6232		bnx_dump_stats_block(sc);
6233		bnx_dump_driver_state(sc);
6234		bnx_dump_hw_state(sc);
6235	}
6236
6237	bnx_dump_driver_state(sc);
6238	/* Print the important status block fields. */
6239	bnx_dump_status_block(sc);
6240
6241#if 0
6242	/* Call the debugger. */
6243	breakpoint();
6244#endif
6245
6246	return;
6247}
6248#endif
6249