if_bnx.c revision 1.6
1/*	$OpenBSD: if_bnx.c,v 1.6 2006/08/10 04:13:09 brad Exp $	*/
2
3/*-
4 * Copyright (c) 2006 Broadcom Corporation
5 *	David Christensen <davidch@broadcom.com>.  All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 *    notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 *    notice, this list of conditions and the following disclaimer in the
15 *    documentation and/or other materials provided with the distribution.
16 * 3. Neither the name of Broadcom Corporation nor the name of its contributors
17 *    may be used to endorse or promote products derived from this software
18 *    without specific prior written consent.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS'
21 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
30 * THE POSSIBILITY OF SUCH DAMAGE.
31 */
32
33#if 0
34#include <sys/cdefs.h>
35__FBSDID("$FreeBSD: src/sys/dev/bce/if_bce.c,v 1.3 2006/04/13 14:12:26 ru Exp $");
36#endif
37
38/*
39 * The following controllers are supported by this driver:
40 *   BCM5706C A2, A3
41 *   BCM5708C B1
42 *
43 * The following controllers are not supported by this driver:
44 * (These are not "Production" versions of the controller.)
45 *
46 *   BCM5706C A0, A1
47 *   BCM5706S A0, A1, A2, A3
48 *   BCM5708C A0, B0
49 *   BCM5708S A0, B0, B1
50 */
51
52#define BNX_DEBUG
53
54#include <dev/pci/if_bnxreg.h>
55#include <dev/microcode/bnx/bnxfw.h>
56
57/****************************************************************************/
58/* BNX Driver Version                                                       */
59/****************************************************************************/
60char bnx_driver_version[] = "v0.9.6";
61
62/****************************************************************************/
63/* BNX Debug Options                                                        */
64/****************************************************************************/
65#ifdef BNX_DEBUG
66	u_int32_t bnx_debug = BNX_WARN;
67
68	/*          0 = Never              */
69	/*          1 = 1 in 2,147,483,648 */
70	/*        256 = 1 in     8,388,608 */
71	/*       2048 = 1 in     1,048,576 */
72	/*      65536 = 1 in        32,768 */
73	/*    1048576 = 1 in         2,048 */
74	/*  268435456 =	1 in             8 */
75	/*  536870912 = 1 in             4 */
76	/* 1073741824 = 1 in             2 */
77
78	/* Controls how often the l2_fhdr frame error check will fail. */
79	int bnx_debug_l2fhdr_status_check = 0;
80
81	/* Controls how often the unexpected attention check will fail. */
82	int bnx_debug_unexpected_attention = 0;
83
84	/* Controls how often to simulate an mbuf allocation failure. */
85	int bnx_debug_mbuf_allocation_failure = 0;
86
87	/* Controls how often to simulate a DMA mapping failure. */
88	int bnx_debug_dma_map_addr_failure = 0;
89
90	/* Controls how often to simulate a bootcode failure. */
91	int bnx_debug_bootcode_running_failure = 0;
92#endif
93
94/****************************************************************************/
95/* PCI Device ID Table                                                      */
96/*                                                                          */
97/* Used by bnx_probe() to identify the devices supported by this driver.    */
98/****************************************************************************/
99const struct pci_matchid bnx_devices[] = {
100	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5706 },
101	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5706S },
102	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5708 },
103	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5708S }
104};
105
106/****************************************************************************/
107/* Supported Flash NVRAM device data.                                       */
108/****************************************************************************/
109static struct flash_spec flash_table[] =
110{
111	/* Slow EEPROM */
112	{0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
113	 1, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
114	 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
115	 "EEPROM - slow"},
116	/* Expansion entry 0001 */
117	{0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
118	 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
119	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
120	 "Entry 0001"},
121	/* Saifun SA25F010 (non-buffered flash) */
122	/* strap, cfg1, & write1 need updates */
123	{0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
124	 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
125	 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
126	 "Non-buffered flash (128kB)"},
127	/* Saifun SA25F020 (non-buffered flash) */
128	/* strap, cfg1, & write1 need updates */
129	{0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
130	 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
131	 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
132	 "Non-buffered flash (256kB)"},
133	/* Expansion entry 0100 */
134	{0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
135	 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
136	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
137	 "Entry 0100"},
138	/* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
139	{0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
140	 0, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
141	 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
142	 "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
143	/* Entry 0110: ST M45PE20 (non-buffered flash)*/
144	{0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
145	 0, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
146	 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
147	 "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
148	/* Saifun SA25F005 (non-buffered flash) */
149	/* strap, cfg1, & write1 need updates */
150	{0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
151	 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
152	 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
153	 "Non-buffered flash (64kB)"},
154	/* Fast EEPROM */
155	{0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
156	 1, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
157	 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
158	 "EEPROM - fast"},
159	/* Expansion entry 1001 */
160	{0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
161	 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
162	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
163	 "Entry 1001"},
164	/* Expansion entry 1010 */
165	{0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
166	 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
167	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
168	 "Entry 1010"},
169	/* ATMEL AT45DB011B (buffered flash) */
170	{0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
171	 1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
172	 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
173	 "Buffered flash (128kB)"},
174	/* Expansion entry 1100 */
175	{0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
176	 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
177	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
178	 "Entry 1100"},
179	/* Expansion entry 1101 */
180	{0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
181	 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
182	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
183	 "Entry 1101"},
184	/* Ateml Expansion entry 1110 */
185	{0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
186	 1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
187	 BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
188	 "Entry 1110 (Atmel)"},
189	/* ATMEL AT45DB021B (buffered flash) */
190	{0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
191	 1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
192	 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
193	 "Buffered flash (256kB)"},
194};
195
196/****************************************************************************/
197/* OpenBSD device entry points.                                             */
198/****************************************************************************/
199int	bnx_probe(struct device *, void *, void *);
200void	bnx_attach(struct device *, struct device *, void *);
201#if 0
202void	bnx_detach(void *);
203#endif
204void	bnx_shutdown(void *);
205
206/****************************************************************************/
207/* BNX Debug Data Structure Dump Routines                                   */
208/****************************************************************************/
209#ifdef BNX_DEBUG
210void	bnx_dump_mbuf(struct bnx_softc *, struct mbuf *);
211void	bnx_dump_tx_mbuf_chain(struct bnx_softc *, int, int);
212void	bnx_dump_rx_mbuf_chain(struct bnx_softc *, int, int);
213void	bnx_dump_txbd(struct bnx_softc *, int, struct tx_bd *);
214void	bnx_dump_rxbd(struct bnx_softc *, int, struct rx_bd *);
215void	bnx_dump_l2fhdr(struct bnx_softc *, int, struct l2_fhdr *);
216void	bnx_dump_tx_chain(struct bnx_softc *, int, int);
217void	bnx_dump_rx_chain(struct bnx_softc *, int, int);
218void	bnx_dump_status_block(struct bnx_softc *);
219void	bnx_dump_stats_block(struct bnx_softc *);
220void	bnx_dump_driver_state(struct bnx_softc *);
221void	bnx_dump_hw_state(struct bnx_softc *);
222void	bnx_breakpoint(struct bnx_softc *);
223#endif
224
225/****************************************************************************/
226/* BNX Register/Memory Access Routines                                      */
227/****************************************************************************/
228u_int32_t	bnx_reg_rd_ind(struct bnx_softc *, u_int32_t);
229void	bnx_reg_wr_ind(struct bnx_softc *, u_int32_t, u_int32_t);
230void	bnx_ctx_wr(struct bnx_softc *, u_int32_t, u_int32_t, u_int32_t);
231int	bnx_miibus_read_reg(struct device *, int, int);
232void	bnx_miibus_write_reg(struct device *, int, int, int);
233void	bnx_miibus_statchg(struct device *);
234
235/****************************************************************************/
236/* BNX NVRAM Access Routines                                                */
237/****************************************************************************/
238int	bnx_acquire_nvram_lock(struct bnx_softc *);
239int	bnx_release_nvram_lock(struct bnx_softc *);
240void	bnx_enable_nvram_access(struct bnx_softc *);
241void	bnx_disable_nvram_access(struct bnx_softc *);
242int	bnx_nvram_read_dword(struct bnx_softc *, u_int32_t, u_int8_t *,
243	    u_int32_t);
244int	bnx_init_nvram(struct bnx_softc *);
245int	bnx_nvram_read(struct bnx_softc *, u_int32_t, u_int8_t *, int);
246int	bnx_nvram_test(struct bnx_softc *);
247#ifdef BNX_NVRAM_WRITE_SUPPORT
248int	bnx_enable_nvram_write(struct bnx_softc *);
249void	bnx_disable_nvram_write(struct bnx_softc *);
250int	bnx_nvram_erase_page(struct bnx_softc *, u_int32_t);
251int	bnx_nvram_write_dword(struct bnx_softc *, u_int32_t, u_int8_t *,
252	    u_int32_t);
253int	bnx_nvram_write(struct bnx_softc *, u_int32_t, u_int8_t *, int);
254#endif
255
256/****************************************************************************/
257/*                                                                          */
258/****************************************************************************/
259int	bnx_dma_alloc(struct bnx_softc *);
260void	bnx_dma_free(struct bnx_softc *);
261void	bnx_release_resources(struct bnx_softc *);
262void	bnx_dma_map_tx_desc(void *, bus_dmamap_t);
263
264/****************************************************************************/
265/* BNX Firmware Synchronization and Load                                    */
266/****************************************************************************/
267int	bnx_fw_sync(struct bnx_softc *, u_int32_t);
268void	bnx_load_rv2p_fw(struct bnx_softc *, u_int32_t *, u_int32_t,
269	    u_int32_t);
270void	bnx_load_cpu_fw(struct bnx_softc *, struct cpu_reg *,
271	    struct fw_info *);
272void	bnx_init_cpus(struct bnx_softc *);
273
274void	bnx_stop(struct bnx_softc *);
275int	bnx_reset(struct bnx_softc *, u_int32_t);
276int	bnx_chipinit(struct bnx_softc *);
277int	bnx_blockinit(struct bnx_softc *);
278int	bnx_get_buf(struct bnx_softc *, struct mbuf *, u_int16_t *,
279	    u_int16_t *, u_int32_t *);
280
281int	bnx_init_tx_chain(struct bnx_softc *);
282int	bnx_init_rx_chain(struct bnx_softc *);
283void	bnx_free_rx_chain(struct bnx_softc *);
284void	bnx_free_tx_chain(struct bnx_softc *);
285
286int	bnx_tx_encap(struct bnx_softc *, struct mbuf *, u_int16_t *,
287	    u_int16_t *, u_int32_t *);
288void	bnx_start(struct ifnet *);
289int	bnx_ioctl(struct ifnet *, u_long, caddr_t);
290void	bnx_watchdog(struct ifnet *);
291int	bnx_ifmedia_upd(struct ifnet *);
292void	bnx_ifmedia_sts(struct ifnet *, struct ifmediareq *);
293void	bnx_init(void *);
294
295void	bnx_init_context(struct bnx_softc *);
296void	bnx_get_mac_addr(struct bnx_softc *);
297void	bnx_set_mac_addr(struct bnx_softc *);
298void	bnx_phy_intr(struct bnx_softc *);
299void	bnx_rx_intr(struct bnx_softc *);
300void	bnx_tx_intr(struct bnx_softc *);
301void	bnx_disable_intr(struct bnx_softc *);
302void	bnx_enable_intr(struct bnx_softc *);
303
304int	bnx_intr(void *);
305void	bnx_set_rx_mode(struct bnx_softc *);
306void	bnx_stats_update(struct bnx_softc *);
307void	bnx_tick(void *);
308
309/****************************************************************************/
310/* OpenBSD device dispatch table.                                           */
311/****************************************************************************/
312struct cfattach bnx_ca = {
313	sizeof(struct bnx_softc), bnx_probe, bnx_attach
314};
315
316struct cfdriver bnx_cd = {
317	0, "bnx", DV_IFNET
318};
319
320/****************************************************************************/
321/* Device probe function.                                                   */
322/*                                                                          */
323/* Compares the device to the driver's list of supported devices and        */
324/* reports back to the OS whether this is the right driver for the device.  */
325/*                                                                          */
326/* Returns:                                                                 */
327/*   BUS_PROBE_DEFAULT on success, positive value on failure.               */
328/****************************************************************************/
329int
330bnx_probe(struct device *parent, void *match, void *aux)
331{
332	return (pci_matchbyid((struct pci_attach_args *)aux, bnx_devices,
333	    sizeof(bnx_devices)/sizeof(bnx_devices[0])));
334}
335
336/****************************************************************************/
337/* Device attach function.                                                  */
338/*                                                                          */
339/* Allocates device resources, performs secondary chip identification,      */
340/* resets and initializes the hardware, and initializes driver instance     */
341/* variables.                                                               */
342/*                                                                          */
343/* Returns:                                                                 */
344/*   0 on success, positive value on failure.                               */
345/****************************************************************************/
346void
347bnx_attach(struct device *parent, struct device *self, void *aux)
348{
349	struct bnx_softc *sc = (struct bnx_softc *)self;
350	struct pci_attach_args *pa = aux;
351	pci_chipset_tag_t pc = pa->pa_pc;
352	pci_intr_handle_t ih;
353	const char *intrstr = NULL;
354	struct ifnet *ifp;
355	u_int32_t val;
356	pcireg_t memtype;
357	bus_size_t size;
358
359	sc->bnx_pa = *pa;
360
361	/*
362	 * Map control/status registers.
363	*/
364	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, BNX_PCI_BAR0);
365	switch (memtype) {
366	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
367	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
368		if (pci_mapreg_map(pa, BNX_PCI_BAR0,
369		    memtype, 0, &sc->bnx_btag, &sc->bnx_bhandle,
370		    NULL, &size, 0) == 0)
371			break;
372	default:
373		printf(": can't find mem space\n");
374		return;
375	}
376
377	if (pci_intr_map(pa, &ih)) {
378		printf(": couldn't map interrupt\n");
379		goto bnx_attach_fail;
380	}
381
382	intrstr = pci_intr_string(pc, ih);
383
384	/*
385	 * Configure byte swap and enable indirect register access.
386	 * Rely on CPU to do target byte swapping on big endian systems.
387	 * Access to registers outside of PCI configurtion space are not
388	 * valid until this is done.
389	 */
390	pci_conf_write(pa->pa_pc, pa->pa_tag, BNX_PCICFG_MISC_CONFIG,
391			       BNX_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
392			       BNX_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP);
393
394	/* Save ASIC revsion info. */
395	sc->bnx_chipid =  REG_RD(sc, BNX_MISC_ID);
396
397	/* Weed out any non-production controller revisions. */
398	switch(BNX_CHIP_ID(sc)) {
399		case BNX_CHIP_ID_5706_A0:
400		case BNX_CHIP_ID_5706_A1:
401		case BNX_CHIP_ID_5708_A0:
402		case BNX_CHIP_ID_5708_B0:
403			printf(": unsupported controller revision (%c%d)!\n",
404				(((pci_conf_read(pa->pa_pc, pa->pa_tag, 0x08) & 0xf0) >> 4) + 'A'),
405			    (pci_conf_read(pa->pa_pc, pa->pa_tag, 0x08) & 0xf));
406			goto bnx_attach_fail;
407	}
408
409	if (BNX_CHIP_BOND_ID(sc) & BNX_CHIP_BOND_ID_SERDES_BIT) {
410		printf(": SerDes controllers are not supported!\n");
411		goto bnx_attach_fail;
412	}
413
414#if 0
415	/*
416	 * The embedded PCIe to PCI-X bridge (EPB)
417	 * in the 5708 cannot address memory above
418	 * 40 bits (E7_5708CB1_23043 & E6_5708SB1_23043).
419	 */
420	if (BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5708)
421		sc->max_bus_addr = BNX_BUS_SPACE_MAXADDR;
422	else
423		sc->max_bus_addr = BUS_SPACE_MAXADDR;
424#endif
425
426	/*
427	 * Find the base address for shared memory access.
428	 * Newer versions of bootcode use a signature and offset
429	 * while older versions use a fixed address.
430	 */
431	val = REG_RD_IND(sc, BNX_SHM_HDR_SIGNATURE);
432	if ((val & BNX_SHM_HDR_SIGNATURE_SIG_MASK) == BNX_SHM_HDR_SIGNATURE_SIG)
433		sc->bnx_shmem_base = REG_RD_IND(sc, BNX_SHM_HDR_ADDR_0);
434	else
435		sc->bnx_shmem_base = HOST_VIEW_SHMEM_BASE;
436
437	DBPRINT(sc, BNX_INFO, "bnx_shmem_base = 0x%08X\n", sc->bnx_shmem_base);
438
439	/* Set initial device and PHY flags */
440	sc->bnx_flags = 0;
441	sc->bnx_phy_flags = 0;
442
443	/* Get PCI bus information (speed and type). */
444	val = REG_RD(sc, BNX_PCICFG_MISC_STATUS);
445	if (val & BNX_PCICFG_MISC_STATUS_PCIX_DET) {
446		u_int32_t clkreg;
447
448		sc->bnx_flags |= BNX_PCIX_FLAG;
449
450		clkreg = REG_RD(sc, BNX_PCICFG_PCI_CLOCK_CONTROL_BITS);
451
452		clkreg &= BNX_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET;
453		switch (clkreg) {
454		case BNX_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ:
455			sc->bus_speed_mhz = 133;
456			break;
457
458		case BNX_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ:
459			sc->bus_speed_mhz = 100;
460			break;
461
462		case BNX_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ:
463		case BNX_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ:
464			sc->bus_speed_mhz = 66;
465			break;
466
467		case BNX_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ:
468		case BNX_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ:
469			sc->bus_speed_mhz = 50;
470			break;
471
472		case BNX_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW:
473		case BNX_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ:
474		case BNX_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ:
475			sc->bus_speed_mhz = 33;
476			break;
477		}
478	} else {
479		if (val & BNX_PCICFG_MISC_STATUS_M66EN)
480			sc->bus_speed_mhz = 66;
481		else
482			sc->bus_speed_mhz = 33;
483	}
484
485	if (val & BNX_PCICFG_MISC_STATUS_32BIT_DET)
486		sc->bnx_flags |= BNX_PCI_32BIT_FLAG;
487
488	/* Reset the controller. */
489	if (bnx_reset(sc, BNX_DRV_MSG_CODE_RESET))
490		goto bnx_attach_fail;
491
492	/* Initialize the controller. */
493	if (bnx_chipinit(sc)) {
494		printf(": Controller initialization failed!\n");
495		goto bnx_attach_fail;
496	}
497
498	/* Perform NVRAM test. */
499	if (bnx_nvram_test(sc)) {
500		printf(": NVRAM test failed!\n");
501		goto bnx_attach_fail;
502	}
503
504	/* Fetch the permanent Ethernet MAC address. */
505	bnx_get_mac_addr(sc);
506
507	/*
508	 * Trip points control how many BDs
509	 * should be ready before generating an
510	 * interrupt while ticks control how long
511	 * a BD can sit in the chain before
512	 * generating an interrupt.  Set the default
513	 * values for the RX and TX rings.
514	 */
515
516#ifdef BNX_DRBUG
517	/* Force more frequent interrupts. */
518	sc->bnx_tx_quick_cons_trip_int = 1;
519	sc->bnx_tx_quick_cons_trip     = 1;
520	sc->bnx_tx_ticks_int           = 0;
521	sc->bnx_tx_ticks               = 0;
522
523	sc->bnx_rx_quick_cons_trip_int = 1;
524	sc->bnx_rx_quick_cons_trip     = 1;
525	sc->bnx_rx_ticks_int           = 0;
526	sc->bnx_rx_ticks               = 0;
527#else
528	sc->bnx_tx_quick_cons_trip_int = 20;
529	sc->bnx_tx_quick_cons_trip     = 20;
530	sc->bnx_tx_ticks_int           = 80;
531	sc->bnx_tx_ticks               = 80;
532
533	sc->bnx_rx_quick_cons_trip_int = 6;
534	sc->bnx_rx_quick_cons_trip     = 6;
535	sc->bnx_rx_ticks_int           = 18;
536	sc->bnx_rx_ticks               = 18;
537#endif
538
539	/* Update statistics once every second. */
540	sc->bnx_stats_ticks = 1000000 & 0xffff00;
541
542	/*
543	 * The copper based NetXtreme II controllers
544	 * use an integrated PHY at address 1 while
545	 * the SerDes controllers use a PHY at
546	 * address 2.
547	 */
548	sc->bnx_phy_addr = 1;
549
550	if (BNX_CHIP_BOND_ID(sc) & BNX_CHIP_BOND_ID_SERDES_BIT) {
551		sc->bnx_phy_flags |= BNX_PHY_SERDES_FLAG;
552		sc->bnx_flags |= BNX_NO_WOL_FLAG;
553		if (BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5708) {
554			sc->bnx_phy_addr = 2;
555			val = REG_RD_IND(sc, sc->bnx_shmem_base +
556					 BNX_SHARED_HW_CFG_CONFIG);
557			if (val & BNX_SHARED_HW_CFG_PHY_2_5G)
558				sc->bnx_phy_flags |= BNX_PHY_2_5G_CAPABLE_FLAG;
559		}
560	}
561
562	if (sc->bnx_phy_flags & BNX_PHY_SERDES_FLAG) {
563		printf(": SerDes is not supported by this driver!\n");
564		goto bnx_attach_fail;
565	}
566
567	/* Allocate DMA memory resources. */
568	sc->bnx_dmatag = pa->pa_dmat;
569	if (bnx_dma_alloc(sc)) {
570		printf("%s: DMA resource allocation failed!\n",
571		    sc->bnx_dev.dv_xname);
572		goto bnx_attach_fail;
573	}
574
575	/* Initialize the ifnet interface. */
576	ifp = &sc->arpcom.ac_if;
577	ifp->if_softc = sc;
578	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
579	ifp->if_ioctl = bnx_ioctl;
580	ifp->if_start = bnx_start;
581	ifp->if_timer = 0;
582	ifp->if_watchdog = bnx_watchdog;
583        if (sc->bnx_phy_flags & BNX_PHY_2_5G_CAPABLE_FLAG)
584                ifp->if_baudrate = IF_Gbps(2.5);
585        else
586                ifp->if_baudrate = IF_Gbps(1);
587#if 0
588	ifp->if_hardmtu = BNX_MAX_JUMBO_MTU;
589#endif
590	IFQ_SET_MAXLEN(&ifp->if_snd, USABLE_TX_BD);
591	IFQ_SET_READY(&ifp->if_snd);
592	bcopy(sc->eaddr, sc->arpcom.ac_enaddr, ETHER_ADDR_LEN);
593	bcopy(sc->bnx_dev.dv_xname, ifp->if_xname, IFNAMSIZ);
594
595	/* Assume a standard 1500 byte MTU size for mbuf allocations. */
596	sc->mbuf_alloc_size = MCLBYTES;
597
598	/* Hookup IRQ last. */
599	sc->bnx_intrhand = pci_intr_establish(pc, ih, IPL_NET, bnx_intr, sc,
600	    sc->bnx_dev.dv_xname);
601	if (sc->bnx_intrhand == NULL) {
602		printf(": couldn't establish interrupt");
603		if (intrstr != NULL)
604			printf(" at %s", intrstr);
605		printf("\n");
606		goto bnx_attach_fail;
607	}
608
609	printf(": %s, address %s\n", intrstr,
610	    ether_sprintf(sc->arpcom.ac_enaddr));
611
612	sc->bnx_mii.mii_ifp = ifp;
613	sc->bnx_mii.mii_readreg = bnx_miibus_read_reg;
614	sc->bnx_mii.mii_writereg = bnx_miibus_write_reg;
615	sc->bnx_mii.mii_statchg = bnx_miibus_statchg;
616
617	/* Look for our PHY. */
618	ifmedia_init(&sc->bnx_mii.mii_media, 0, bnx_ifmedia_upd,
619	    bnx_ifmedia_sts);
620	mii_attach(&sc->bnx_dev, &sc->bnx_mii, 0xffffffff,
621	    MII_PHY_ANY, MII_OFFSET_ANY, 0);
622
623	if (LIST_FIRST(&sc->bnx_mii.mii_phys) == NULL) {
624		printf("%s: no PHY found!\n", sc->bnx_dev.dv_xname);
625		ifmedia_add(&sc->bnx_mii.mii_media,
626		    IFM_ETHER|IFM_MANUAL, 0, NULL);
627		ifmedia_set(&sc->bnx_mii.mii_media,
628		    IFM_ETHER|IFM_MANUAL);
629	} else {
630		ifmedia_set(&sc->bnx_mii.mii_media,
631		    IFM_ETHER|IFM_AUTO);
632	}
633
634	/* Attach to the Ethernet interface list. */
635	if_attach(ifp);
636	ether_ifattach(ifp);
637
638	timeout_set(&sc->bnx_timeout, bnx_tick, sc);
639
640	/* Print some important debugging info. */
641	DBRUN(BNX_INFO, bnx_dump_driver_state(sc));
642
643	goto bnx_attach_exit;
644
645bnx_attach_fail:
646	bnx_release_resources(sc);
647
648bnx_attach_exit:
649
650	DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
651}
652
653/****************************************************************************/
654/* Device detach function.                                                  */
655/*                                                                          */
656/* Stops the controller, resets the controller, and releases resources.     */
657/*                                                                          */
658/* Returns:                                                                 */
659/*   0 on success, positive value on failure.                               */
660/****************************************************************************/
661#if 0
662void
663bnx_detach(void *xsc)
664{
665	struct bnx_softc *sc;
666	struct ifnet *ifp = &sc->arpcom.ac_if;
667
668	sc = device_get_softc(dev);
669
670	DBPRINT(sc, BNX_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
671
672	/* Stop and reset the controller. */
673	bnx_stop(sc);
674	bnx_reset(sc, BNX_DRV_MSG_CODE_RESET);
675
676	ether_ifdetach(ifp);
677
678	/* If we have a child device on the MII bus remove it too. */
679	if (sc->bnx_phy_flags & BNX_PHY_SERDES_FLAG) {
680		ifmedia_removeall(&sc->bnx_ifmedia);
681	} else {
682		bus_generic_detach(dev);
683		device_delete_child(dev, sc->bnx_mii);
684	}
685
686	/* Release all remaining resources. */
687	bnx_release_resources(sc);
688
689	DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
690
691	return(0);
692}
693#endif
694
695/****************************************************************************/
696/* Device shutdown function.                                                */
697/*                                                                          */
698/* Stops and resets the controller.                                         */
699/*                                                                          */
700/* Returns:                                                                 */
701/*   Nothing                                                                */
702/****************************************************************************/
703void
704bnx_shutdown(void *xsc)
705{
706	struct bnx_softc *sc = (struct bnx_softc *)xsc;
707
708	bnx_stop(sc);
709	bnx_reset(sc, BNX_DRV_MSG_CODE_RESET);
710}
711
712/****************************************************************************/
713/* Indirect register read.                                                  */
714/*                                                                          */
715/* Reads NetXtreme II registers using an index/data register pair in PCI    */
716/* configuration space.  Using this mechanism avoids issues with posted     */
717/* reads but is much slower than memory-mapped I/O.                         */
718/*                                                                          */
719/* Returns:                                                                 */
720/*   The value of the register.                                             */
721/****************************************************************************/
722u_int32_t
723bnx_reg_rd_ind(struct bnx_softc *sc, u_int32_t offset)
724{
725	struct pci_attach_args *pa = &(sc->bnx_pa);
726
727	pci_conf_write(pa->pa_pc, pa->pa_tag, BNX_PCICFG_REG_WINDOW_ADDRESS, offset);
728#ifdef BNX_DEBUG
729	{
730		u_int32_t val;
731		val = pci_conf_read(pa->pa_pc, pa->pa_tag, BNX_PCICFG_REG_WINDOW);
732		DBPRINT(sc, BNX_EXCESSIVE, "%s(); offset = 0x%08X, val = 0x%08X\n",
733			__FUNCTION__, offset, val);
734		return val;
735	}
736#else
737	return pci_conf_read(pa->pa_pc, pa->pa_tag, BNX_PCICFG_REG_WINDOW);
738#endif
739}
740
741/****************************************************************************/
742/* Indirect register write.                                                 */
743/*                                                                          */
744/* Writes NetXtreme II registers using an index/data register pair in PCI   */
745/* configuration space.  Using this mechanism avoids issues with posted     */
746/* writes but is muchh slower than memory-mapped I/O.                       */
747/*                                                                          */
748/* Returns:                                                                 */
749/*   Nothing.                                                               */
750/****************************************************************************/
751void
752bnx_reg_wr_ind(struct bnx_softc *sc, u_int32_t offset, u_int32_t val)
753{
754	struct pci_attach_args  *pa = &(sc->bnx_pa);
755
756	DBPRINT(sc, BNX_EXCESSIVE, "%s(); offset = 0x%08X, val = 0x%08X\n",
757		__FUNCTION__, offset, val);
758
759	pci_conf_write(pa->pa_pc, pa->pa_tag, BNX_PCICFG_REG_WINDOW_ADDRESS, offset);
760	pci_conf_write(pa->pa_pc, pa->pa_tag, BNX_PCICFG_REG_WINDOW, val);
761}
762
763/****************************************************************************/
764/* Context memory write.                                                    */
765/*                                                                          */
766/* The NetXtreme II controller uses context memory to track connection      */
767/* information for L2 and higher network protocols.                         */
768/*                                                                          */
769/* Returns:                                                                 */
770/*   Nothing.                                                               */
771/****************************************************************************/
772void
773bnx_ctx_wr(struct bnx_softc *sc, u_int32_t cid_addr, u_int32_t offset, u_int32_t val)
774{
775
776	DBPRINT(sc, BNX_EXCESSIVE, "%s(); cid_addr = 0x%08X, offset = 0x%08X, "
777		"val = 0x%08X\n", __FUNCTION__, cid_addr, offset, val);
778
779	offset += cid_addr;
780	REG_WR(sc, BNX_CTX_DATA_ADR, offset);
781	REG_WR(sc, BNX_CTX_DATA, val);
782}
783
784/****************************************************************************/
785/* PHY register read.                                                       */
786/*                                                                          */
787/* Implements register reads on the MII bus.                                */
788/*                                                                          */
789/* Returns:                                                                 */
790/*   The value of the register.                                             */
791/****************************************************************************/
792int
793bnx_miibus_read_reg(struct device *dev, int phy, int reg)
794{
795	struct bnx_softc *sc = (struct bnx_softc *)dev;
796	u_int32_t val;
797	int i;
798
799	/* Make sure we are accessing the correct PHY address. */
800	if (phy != sc->bnx_phy_addr) {
801		DBPRINT(sc, BNX_VERBOSE, "Invalid PHY address %d for PHY read!\n", phy);
802		return(0);
803	}
804
805	if (sc->bnx_phy_flags & BNX_PHY_INT_MODE_AUTO_POLLING_FLAG) {
806		val = REG_RD(sc, BNX_EMAC_MDIO_MODE);
807		val &= ~BNX_EMAC_MDIO_MODE_AUTO_POLL;
808
809		REG_WR(sc, BNX_EMAC_MDIO_MODE, val);
810		REG_RD(sc, BNX_EMAC_MDIO_MODE);
811
812		DELAY(40);
813	}
814
815	val = BNX_MIPHY(phy) | BNX_MIREG(reg) |
816		BNX_EMAC_MDIO_COMM_COMMAND_READ | BNX_EMAC_MDIO_COMM_DISEXT |
817		BNX_EMAC_MDIO_COMM_START_BUSY;
818	REG_WR(sc, BNX_EMAC_MDIO_COMM, val);
819
820	for (i = 0; i < BNX_PHY_TIMEOUT; i++) {
821		DELAY(10);
822
823		val = REG_RD(sc, BNX_EMAC_MDIO_COMM);
824		if (!(val & BNX_EMAC_MDIO_COMM_START_BUSY)) {
825			DELAY(5);
826
827			val = REG_RD(sc, BNX_EMAC_MDIO_COMM);
828			val &= BNX_EMAC_MDIO_COMM_DATA;
829
830			break;
831		}
832	}
833
834	if (val & BNX_EMAC_MDIO_COMM_START_BUSY) {
835		BNX_PRINTF(sc, "%s(%d): Error: PHY read timeout! phy = %d, reg = 0x%04X\n",
836			__FILE__, __LINE__, phy, reg);
837		val = 0x0;
838	} else {
839		val = REG_RD(sc, BNX_EMAC_MDIO_COMM);
840	}
841
842	DBPRINT(sc, BNX_EXCESSIVE, "%s(): phy = %d, reg = 0x%04X, val = 0x%04X\n",
843		__FUNCTION__, phy, (u_int16_t) reg & 0xffff, (u_int16_t) val & 0xffff);
844
845	if (sc->bnx_phy_flags & BNX_PHY_INT_MODE_AUTO_POLLING_FLAG) {
846		val = REG_RD(sc, BNX_EMAC_MDIO_MODE);
847		val |= BNX_EMAC_MDIO_MODE_AUTO_POLL;
848
849		REG_WR(sc, BNX_EMAC_MDIO_MODE, val);
850		REG_RD(sc, BNX_EMAC_MDIO_MODE);
851
852		DELAY(40);
853	}
854
855	return (val & 0xffff);
856
857}
858
859/****************************************************************************/
860/* PHY register write.                                                      */
861/*                                                                          */
862/* Implements register writes on the MII bus.                               */
863/*                                                                          */
864/* Returns:                                                                 */
865/*   The value of the register.                                             */
866/****************************************************************************/
867void
868bnx_miibus_write_reg(struct device *dev, int phy, int reg, int val)
869{
870	struct bnx_softc *sc = (struct bnx_softc *)dev;
871	u_int32_t val1;
872	int i;
873
874	/* Make sure we are accessing the correct PHY address. */
875	if (phy != sc->bnx_phy_addr) {
876		DBPRINT(sc, BNX_WARN, "Invalid PHY address %d for PHY write!\n", phy);
877		return;
878	}
879
880	DBPRINT(sc, BNX_EXCESSIVE, "%s(): phy = %d, reg = 0x%04X, val = 0x%04X\n",
881		__FUNCTION__, phy, (u_int16_t) reg & 0xffff, (u_int16_t) val & 0xffff);
882
883	if (sc->bnx_phy_flags & BNX_PHY_INT_MODE_AUTO_POLLING_FLAG) {
884		val1 = REG_RD(sc, BNX_EMAC_MDIO_MODE);
885		val1 &= ~BNX_EMAC_MDIO_MODE_AUTO_POLL;
886
887		REG_WR(sc, BNX_EMAC_MDIO_MODE, val1);
888		REG_RD(sc, BNX_EMAC_MDIO_MODE);
889
890		DELAY(40);
891	}
892
893	val1 = BNX_MIPHY(phy) | BNX_MIREG(reg) | val |
894		BNX_EMAC_MDIO_COMM_COMMAND_WRITE |
895		BNX_EMAC_MDIO_COMM_START_BUSY | BNX_EMAC_MDIO_COMM_DISEXT;
896	REG_WR(sc, BNX_EMAC_MDIO_COMM, val1);
897
898	for (i = 0; i < BNX_PHY_TIMEOUT; i++) {
899		DELAY(10);
900
901		val1 = REG_RD(sc, BNX_EMAC_MDIO_COMM);
902		if (!(val1 & BNX_EMAC_MDIO_COMM_START_BUSY)) {
903			DELAY(5);
904			break;
905		}
906	}
907
908	if (val1 & BNX_EMAC_MDIO_COMM_START_BUSY)
909		BNX_PRINTF(sc, "%s(%d): PHY write timeout!\n",
910			__FILE__, __LINE__);
911
912	if (sc->bnx_phy_flags & BNX_PHY_INT_MODE_AUTO_POLLING_FLAG) {
913		val1 = REG_RD(sc, BNX_EMAC_MDIO_MODE);
914		val1 |= BNX_EMAC_MDIO_MODE_AUTO_POLL;
915
916		REG_WR(sc, BNX_EMAC_MDIO_MODE, val1);
917		REG_RD(sc, BNX_EMAC_MDIO_MODE);
918
919		DELAY(40);
920	}
921}
922
923/****************************************************************************/
924/* MII bus status change.                                                   */
925/*                                                                          */
926/* Called by the MII bus driver when the PHY establishes link to set the    */
927/* MAC interface registers.                                                 */
928/*                                                                          */
929/* Returns:                                                                 */
930/*   Nothing.                                                               */
931/****************************************************************************/
932void
933bnx_miibus_statchg(struct device *dev)
934{
935	struct bnx_softc *sc = (struct bnx_softc *)dev;
936	struct mii_data *mii = &sc->bnx_mii;
937
938	BNX_CLRBIT(sc, BNX_EMAC_MODE, BNX_EMAC_MODE_PORT);
939
940	/* Set MII or GMII inerface based on the speed negotiated by the PHY. */
941	if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T) {
942		DBPRINT(sc, BNX_INFO, "Setting GMII interface.\n");
943		BNX_SETBIT(sc, BNX_EMAC_MODE, BNX_EMAC_MODE_PORT_GMII);
944	} else {
945		DBPRINT(sc, BNX_INFO, "Setting MII interface.\n");
946		BNX_SETBIT(sc, BNX_EMAC_MODE, BNX_EMAC_MODE_PORT_MII);
947	}
948
949	/* Set half or full duplex based on the duplicity negotiated by the PHY. */
950	if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) {
951		DBPRINT(sc, BNX_INFO, "Setting Full-Duplex interface.\n");
952		BNX_CLRBIT(sc, BNX_EMAC_MODE, BNX_EMAC_MODE_HALF_DUPLEX);
953	} else {
954		DBPRINT(sc, BNX_INFO, "Setting Half-Duplex interface.\n");
955		BNX_SETBIT(sc, BNX_EMAC_MODE, BNX_EMAC_MODE_HALF_DUPLEX);
956	}
957}
958
959
960/****************************************************************************/
961/* Acquire NVRAM lock.                                                      */
962/*                                                                          */
963/* Before the NVRAM can be accessed the caller must acquire an NVRAM lock.  */
964/* Locks 0 and 2 are reserved, lock 1 is used by firmware and lock 2 is     */
965/* for use by the driver.                                                   */
966/*                                                                          */
967/* Returns:                                                                 */
968/*   0 on success, positive value on failure.                               */
969/****************************************************************************/
970int
971bnx_acquire_nvram_lock(struct bnx_softc *sc)
972{
973	u_int32_t val;
974	int j;
975
976	DBPRINT(sc, BNX_VERBOSE, "Acquiring NVRAM lock.\n");
977
978	/* Request access to the flash interface. */
979	REG_WR(sc, BNX_NVM_SW_ARB, BNX_NVM_SW_ARB_ARB_REQ_SET2);
980	for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
981		val = REG_RD(sc, BNX_NVM_SW_ARB);
982		if (val & BNX_NVM_SW_ARB_ARB_ARB2)
983			break;
984
985		DELAY(5);
986	}
987
988	if (j >= NVRAM_TIMEOUT_COUNT) {
989		DBPRINT(sc, BNX_WARN, "Timeout acquiring NVRAM lock!\n");
990		return EBUSY;
991	}
992
993	return 0;
994}
995
996/****************************************************************************/
997/* Release NVRAM lock.                                                      */
998/*                                                                          */
999/* When the caller is finished accessing NVRAM the lock must be released.   */
1000/* Locks 0 and 2 are reserved, lock 1 is used by firmware and lock 2 is     */
1001/* for use by the driver.                                                   */
1002/*                                                                          */
1003/* Returns:                                                                 */
1004/*   0 on success, positive value on failure.                               */
1005/****************************************************************************/
1006int
1007bnx_release_nvram_lock(struct bnx_softc *sc)
1008{
1009	int j;
1010	u_int32_t val;
1011
1012	DBPRINT(sc, BNX_VERBOSE, "Releasing NVRAM lock.\n");
1013
1014	/*
1015	 * Relinquish nvram interface.
1016	 */
1017	REG_WR(sc, BNX_NVM_SW_ARB, BNX_NVM_SW_ARB_ARB_REQ_CLR2);
1018
1019	for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
1020		val = REG_RD(sc, BNX_NVM_SW_ARB);
1021		if (!(val & BNX_NVM_SW_ARB_ARB_ARB2))
1022			break;
1023
1024		DELAY(5);
1025	}
1026
1027	if (j >= NVRAM_TIMEOUT_COUNT) {
1028		DBPRINT(sc, BNX_WARN, "Timeout reeasing NVRAM lock!\n");
1029		return EBUSY;
1030	}
1031
1032	return 0;
1033}
1034
1035#ifdef BNX_NVRAM_WRITE_SUPPORT
1036/****************************************************************************/
1037/* Enable NVRAM write access.                                               */
1038/*                                                                          */
1039/* Before writing to NVRAM the caller must enable NVRAM writes.             */
1040/*                                                                          */
1041/* Returns:                                                                 */
1042/*   0 on success, positive value on failure.                               */
1043/****************************************************************************/
1044int
1045bnx_enable_nvram_write(struct bnx_softc *sc)
1046{
1047	u_int32_t val;
1048
1049	DBPRINT(sc, BNX_VERBOSE, "Enabling NVRAM write.\n");
1050
1051	val = REG_RD(sc, BNX_MISC_CFG);
1052	REG_WR(sc, BNX_MISC_CFG, val | BNX_MISC_CFG_NVM_WR_EN_PCI);
1053
1054	if (!sc->bnx_flash_info->buffered) {
1055		int j;
1056
1057		REG_WR(sc, BNX_NVM_COMMAND, BNX_NVM_COMMAND_DONE);
1058		REG_WR(sc, BNX_NVM_COMMAND,	BNX_NVM_COMMAND_WREN | BNX_NVM_COMMAND_DOIT);
1059
1060		for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
1061			DELAY(5);
1062
1063			val = REG_RD(sc, BNX_NVM_COMMAND);
1064			if (val & BNX_NVM_COMMAND_DONE)
1065				break;
1066		}
1067
1068		if (j >= NVRAM_TIMEOUT_COUNT) {
1069			DBPRINT(sc, BNX_WARN, "Timeout writing NVRAM!\n");
1070			return EBUSY;
1071		}
1072	}
1073	return 0;
1074}
1075
1076/****************************************************************************/
1077/* Disable NVRAM write access.                                              */
1078/*                                                                          */
1079/* When the caller is finished writing to NVRAM write access must be        */
1080/* disabled.                                                                */
1081/*                                                                          */
1082/* Returns:                                                                 */
1083/*   Nothing.                                                               */
1084/****************************************************************************/
1085void
1086bnx_disable_nvram_write(struct bnx_softc *sc)
1087{
1088	u_int32_t val;
1089
1090	DBPRINT(sc, BNX_VERBOSE,  "Disabling NVRAM write.\n");
1091
1092	val = REG_RD(sc, BNX_MISC_CFG);
1093	REG_WR(sc, BNX_MISC_CFG, val & ~BNX_MISC_CFG_NVM_WR_EN);
1094}
1095#endif
1096
1097/****************************************************************************/
1098/* Enable NVRAM access.                                                     */
1099/*                                                                          */
1100/* Before accessing NVRAM for read or write operations the caller must      */
1101/* enabled NVRAM access.                                                    */
1102/*                                                                          */
1103/* Returns:                                                                 */
1104/*   Nothing.                                                               */
1105/****************************************************************************/
1106void
1107bnx_enable_nvram_access(struct bnx_softc *sc)
1108{
1109	u_int32_t val;
1110
1111	DBPRINT(sc, BNX_VERBOSE, "Enabling NVRAM access.\n");
1112
1113	val = REG_RD(sc, BNX_NVM_ACCESS_ENABLE);
1114	/* Enable both bits, even on read. */
1115	REG_WR(sc, BNX_NVM_ACCESS_ENABLE,
1116	       val | BNX_NVM_ACCESS_ENABLE_EN | BNX_NVM_ACCESS_ENABLE_WR_EN);
1117}
1118
1119/****************************************************************************/
1120/* Disable NVRAM access.                                                    */
1121/*                                                                          */
1122/* When the caller is finished accessing NVRAM access must be disabled.     */
1123/*                                                                          */
1124/* Returns:                                                                 */
1125/*   Nothing.                                                               */
1126/****************************************************************************/
1127void
1128bnx_disable_nvram_access(struct bnx_softc *sc)
1129{
1130	u_int32_t val;
1131
1132	DBPRINT(sc, BNX_VERBOSE, "Disabling NVRAM access.\n");
1133
1134	val = REG_RD(sc, BNX_NVM_ACCESS_ENABLE);
1135
1136	/* Disable both bits, even after read. */
1137	REG_WR(sc, BNX_NVM_ACCESS_ENABLE,
1138		val & ~(BNX_NVM_ACCESS_ENABLE_EN |
1139			BNX_NVM_ACCESS_ENABLE_WR_EN));
1140}
1141
1142#ifdef BNX_NVRAM_WRITE_SUPPORT
1143/****************************************************************************/
1144/* Erase NVRAM page before writing.                                         */
1145/*                                                                          */
1146/* Non-buffered flash parts require that a page be erased before it is      */
1147/* written.                                                                 */
1148/*                                                                          */
1149/* Returns:                                                                 */
1150/*   0 on success, positive value on failure.                               */
1151/****************************************************************************/
1152int
1153bnx_nvram_erase_page(struct bnx_softc *sc, u_int32_t offset)
1154{
1155	u_int32_t cmd;
1156	int j;
1157
1158	/* Buffered flash doesn't require an erase. */
1159	if (sc->bnx_flash_info->buffered)
1160		return 0;
1161
1162	DBPRINT(sc, BNX_VERBOSE, "Erasing NVRAM page.\n");
1163
1164	/* Build an erase command. */
1165	cmd = BNX_NVM_COMMAND_ERASE | BNX_NVM_COMMAND_WR |
1166	      BNX_NVM_COMMAND_DOIT;
1167
1168	/*
1169	 * Clear the DONE bit separately, set the NVRAM adress to erase,
1170	 * and issue the erase command.
1171	 */
1172	REG_WR(sc, BNX_NVM_COMMAND, BNX_NVM_COMMAND_DONE);
1173	REG_WR(sc, BNX_NVM_ADDR, offset & BNX_NVM_ADDR_NVM_ADDR_VALUE);
1174	REG_WR(sc, BNX_NVM_COMMAND, cmd);
1175
1176	/* Wait for completion. */
1177	for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
1178		u_int32_t val;
1179
1180		DELAY(5);
1181
1182		val = REG_RD(sc, BNX_NVM_COMMAND);
1183		if (val & BNX_NVM_COMMAND_DONE)
1184			break;
1185	}
1186
1187	if (j >= NVRAM_TIMEOUT_COUNT) {
1188		DBPRINT(sc, BNX_WARN, "Timeout erasing NVRAM.\n");
1189		return EBUSY;
1190	}
1191
1192	return 0;
1193}
1194#endif /* BNX_NVRAM_WRITE_SUPPORT */
1195
1196/****************************************************************************/
1197/* Read a dword (32 bits) from NVRAM.                                       */
1198/*                                                                          */
1199/* Read a 32 bit word from NVRAM.  The caller is assumed to have already    */
1200/* obtained the NVRAM lock and enabled the controller for NVRAM access.     */
1201/*                                                                          */
1202/* Returns:                                                                 */
1203/*   0 on success and the 32 bit value read, positive value on failure.     */
1204/****************************************************************************/
1205int
1206bnx_nvram_read_dword(struct bnx_softc *sc, u_int32_t offset, u_int8_t *ret_val,
1207							u_int32_t cmd_flags)
1208{
1209	u_int32_t cmd;
1210	int i, rc = 0;
1211
1212	/* Build the command word. */
1213	cmd = BNX_NVM_COMMAND_DOIT | cmd_flags;
1214
1215	/* Calculate the offset for buffered flash. */
1216	if (sc->bnx_flash_info->buffered) {
1217		offset = ((offset / sc->bnx_flash_info->page_size) <<
1218			   sc->bnx_flash_info->page_bits) +
1219			  (offset % sc->bnx_flash_info->page_size);
1220	}
1221
1222	/*
1223	 * Clear the DONE bit separately, set the address to read,
1224	 * and issue the read.
1225	 */
1226	REG_WR(sc, BNX_NVM_COMMAND, BNX_NVM_COMMAND_DONE);
1227	REG_WR(sc, BNX_NVM_ADDR, offset & BNX_NVM_ADDR_NVM_ADDR_VALUE);
1228	REG_WR(sc, BNX_NVM_COMMAND, cmd);
1229
1230	/* Wait for completion. */
1231	for (i = 0; i < NVRAM_TIMEOUT_COUNT; i++) {
1232		u_int32_t val;
1233
1234		DELAY(5);
1235
1236		val = REG_RD(sc, BNX_NVM_COMMAND);
1237		if (val & BNX_NVM_COMMAND_DONE) {
1238			val = REG_RD(sc, BNX_NVM_READ);
1239
1240			val = bnx_be32toh(val);
1241			memcpy(ret_val, &val, 4);
1242			break;
1243		}
1244	}
1245
1246	/* Check for errors. */
1247	if (i >= NVRAM_TIMEOUT_COUNT) {
1248		BNX_PRINTF(sc, "%s(%d): Timeout error reading NVRAM at offset 0x%08X!\n",
1249			__FILE__, __LINE__, offset);
1250		rc = EBUSY;
1251	}
1252
1253	return(rc);
1254}
1255
1256#ifdef BNX_NVRAM_WRITE_SUPPORT
1257/****************************************************************************/
1258/* Write a dword (32 bits) to NVRAM.                                        */
1259/*                                                                          */
1260/* Write a 32 bit word to NVRAM.  The caller is assumed to have already     */
1261/* obtained the NVRAM lock, enabled the controller for NVRAM access, and    */
1262/* enabled NVRAM write access.                                              */
1263/*                                                                          */
1264/* Returns:                                                                 */
1265/*   0 on success, positive value on failure.                               */
1266/****************************************************************************/
1267int
1268bnx_nvram_write_dword(struct bnx_softc *sc, u_int32_t offset, u_int8_t *val,
1269	u_int32_t cmd_flags)
1270{
1271	u_int32_t cmd, val32;
1272	int j;
1273
1274	/* Build the command word. */
1275	cmd = BNX_NVM_COMMAND_DOIT | BNX_NVM_COMMAND_WR | cmd_flags;
1276
1277	/* Calculate the offset for buffered flash. */
1278	if (sc->bnx_flash_info->buffered) {
1279		offset = ((offset / sc->bnx_flash_info->page_size) <<
1280			  sc->bnx_flash_info->page_bits) +
1281			 (offset % sc->bnx_flash_info->page_size);
1282	}
1283
1284	/*
1285	 * Clear the DONE bit separately, convert NVRAM data to big-endian,
1286	 * set the NVRAM address to write, and issue the write command
1287	 */
1288	REG_WR(sc, BNX_NVM_COMMAND, BNX_NVM_COMMAND_DONE);
1289	memcpy(&val32, val, 4);
1290	val32 = htobe32(val32);
1291	REG_WR(sc, BNX_NVM_WRITE, val32);
1292	REG_WR(sc, BNX_NVM_ADDR, offset & BNX_NVM_ADDR_NVM_ADDR_VALUE);
1293	REG_WR(sc, BNX_NVM_COMMAND, cmd);
1294
1295	/* Wait for completion. */
1296	for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
1297		DELAY(5);
1298
1299		if (REG_RD(sc, BNX_NVM_COMMAND) & BNX_NVM_COMMAND_DONE)
1300			break;
1301	}
1302	if (j >= NVRAM_TIMEOUT_COUNT) {
1303		BNX_PRINTF(sc, "%s(%d): Timeout error writing NVRAM at offset 0x%08X\n",
1304			__FILE__, __LINE__, offset);
1305		return EBUSY;
1306	}
1307
1308	return 0;
1309}
1310#endif /* BNX_NVRAM_WRITE_SUPPORT */
1311
1312/****************************************************************************/
1313/* Initialize NVRAM access.                                                 */
1314/*                                                                          */
1315/* Identify the NVRAM device in use and prepare the NVRAM interface to      */
1316/* access that device.                                                      */
1317/*                                                                          */
1318/* Returns:                                                                 */
1319/*   0 on success, positive value on failure.                               */
1320/****************************************************************************/
1321int
1322bnx_init_nvram(struct bnx_softc *sc)
1323{
1324	u_int32_t val;
1325	int j, entry_count, rc;
1326	struct flash_spec *flash;
1327
1328	DBPRINT(sc,BNX_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
1329
1330	/* Determine the selected interface. */
1331	val = REG_RD(sc, BNX_NVM_CFG1);
1332
1333	entry_count = sizeof(flash_table) / sizeof(struct flash_spec);
1334
1335	rc = 0;
1336
1337	/*
1338	 * Flash reconfiguration is required to support additional
1339	 * NVRAM devices not directly supported in hardware.
1340	 * Check if the flash interface was reconfigured
1341	 * by the bootcode.
1342	 */
1343
1344	if (val & 0x40000000) {
1345		/* Flash interface reconfigured by bootcode. */
1346
1347		DBPRINT(sc,BNX_INFO_LOAD,
1348			"bnx_init_nvram(): Flash WAS reconfigured.\n");
1349
1350		for (j = 0, flash = &flash_table[0]; j < entry_count;
1351		     j++, flash++) {
1352			if ((val & FLASH_BACKUP_STRAP_MASK) ==
1353			    (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
1354				sc->bnx_flash_info = flash;
1355				break;
1356			}
1357		}
1358	} else {
1359		/* Flash interface not yet reconfigured. */
1360		u_int32_t mask;
1361
1362		DBPRINT(sc,BNX_INFO_LOAD,
1363			"bnx_init_nvram(): Flash was NOT reconfigured.\n");
1364
1365		if (val & (1 << 23))
1366			mask = FLASH_BACKUP_STRAP_MASK;
1367		else
1368			mask = FLASH_STRAP_MASK;
1369
1370		/* Look for the matching NVRAM device configuration data. */
1371		for (j = 0, flash = &flash_table[0]; j < entry_count; j++, flash++) {
1372
1373			/* Check if the device matches any of the known devices. */
1374			if ((val & mask) == (flash->strapping & mask)) {
1375				/* Found a device match. */
1376				sc->bnx_flash_info = flash;
1377
1378				/* Request access to the flash interface. */
1379				if ((rc = bnx_acquire_nvram_lock(sc)) != 0)
1380					return rc;
1381
1382				/* Reconfigure the flash interface. */
1383				bnx_enable_nvram_access(sc);
1384				REG_WR(sc, BNX_NVM_CFG1, flash->config1);
1385				REG_WR(sc, BNX_NVM_CFG2, flash->config2);
1386				REG_WR(sc, BNX_NVM_CFG3, flash->config3);
1387				REG_WR(sc, BNX_NVM_WRITE1, flash->write1);
1388				bnx_disable_nvram_access(sc);
1389				bnx_release_nvram_lock(sc);
1390
1391				break;
1392			}
1393		}
1394	}
1395
1396	/* Check if a matching device was found. */
1397	if (j == entry_count) {
1398		sc->bnx_flash_info = NULL;
1399		BNX_PRINTF(sc, "%s(%d): Unknown Flash NVRAM found!\n",
1400			__FILE__, __LINE__);
1401		rc = ENODEV;
1402	}
1403
1404	/* Write the flash config data to the shared memory interface. */
1405	val = REG_RD_IND(sc, sc->bnx_shmem_base + BNX_SHARED_HW_CFG_CONFIG2);
1406	val &= BNX_SHARED_HW_CFG2_NVM_SIZE_MASK;
1407	if (val)
1408		sc->bnx_flash_size = val;
1409	else
1410		sc->bnx_flash_size = sc->bnx_flash_info->total_size;
1411
1412	DBPRINT(sc, BNX_INFO_LOAD, "bnx_init_nvram() flash->total_size = 0x%08X\n",
1413		sc->bnx_flash_info->total_size);
1414
1415	DBPRINT(sc,BNX_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
1416
1417	return rc;
1418}
1419
1420/****************************************************************************/
1421/* Read an arbitrary range of data from NVRAM.                              */
1422/*                                                                          */
1423/* Prepares the NVRAM interface for access and reads the requested data     */
1424/* into the supplied buffer.                                                */
1425/*                                                                          */
1426/* Returns:                                                                 */
1427/*   0 on success and the data read, positive value on failure.             */
1428/****************************************************************************/
1429int
1430bnx_nvram_read(struct bnx_softc *sc, u_int32_t offset, u_int8_t *ret_buf,
1431	int buf_size)
1432{
1433	int rc = 0;
1434	u_int32_t cmd_flags, offset32, len32, extra;
1435
1436	if (buf_size == 0)
1437		return 0;
1438
1439	/* Request access to the flash interface. */
1440	if ((rc = bnx_acquire_nvram_lock(sc)) != 0)
1441		return rc;
1442
1443	/* Enable access to flash interface */
1444	bnx_enable_nvram_access(sc);
1445
1446	len32 = buf_size;
1447	offset32 = offset;
1448	extra = 0;
1449
1450	cmd_flags = 0;
1451
1452	if (offset32 & 3) {
1453		u_int8_t buf[4];
1454		u_int32_t pre_len;
1455
1456		offset32 &= ~3;
1457		pre_len = 4 - (offset & 3);
1458
1459		if (pre_len >= len32) {
1460			pre_len = len32;
1461			cmd_flags = BNX_NVM_COMMAND_FIRST | BNX_NVM_COMMAND_LAST;
1462		}
1463		else {
1464			cmd_flags = BNX_NVM_COMMAND_FIRST;
1465		}
1466
1467		rc = bnx_nvram_read_dword(sc, offset32, buf, cmd_flags);
1468
1469		if (rc)
1470			return rc;
1471
1472		memcpy(ret_buf, buf + (offset & 3), pre_len);
1473
1474		offset32 += 4;
1475		ret_buf += pre_len;
1476		len32 -= pre_len;
1477	}
1478
1479	if (len32 & 3) {
1480		extra = 4 - (len32 & 3);
1481		len32 = (len32 + 4) & ~3;
1482	}
1483
1484	if (len32 == 4) {
1485		u_int8_t buf[4];
1486
1487		if (cmd_flags)
1488			cmd_flags = BNX_NVM_COMMAND_LAST;
1489		else
1490			cmd_flags = BNX_NVM_COMMAND_FIRST |
1491				    BNX_NVM_COMMAND_LAST;
1492
1493		rc = bnx_nvram_read_dword(sc, offset32, buf, cmd_flags);
1494
1495		memcpy(ret_buf, buf, 4 - extra);
1496	}
1497	else if (len32 > 0) {
1498		u_int8_t buf[4];
1499
1500		/* Read the first word. */
1501		if (cmd_flags)
1502			cmd_flags = 0;
1503		else
1504			cmd_flags = BNX_NVM_COMMAND_FIRST;
1505
1506		rc = bnx_nvram_read_dword(sc, offset32, ret_buf, cmd_flags);
1507
1508		/* Advance to the next dword. */
1509		offset32 += 4;
1510		ret_buf += 4;
1511		len32 -= 4;
1512
1513		while (len32 > 4 && rc == 0) {
1514			rc = bnx_nvram_read_dword(sc, offset32, ret_buf, 0);
1515
1516			/* Advance to the next dword. */
1517			offset32 += 4;
1518			ret_buf += 4;
1519			len32 -= 4;
1520		}
1521
1522		if (rc)
1523			return rc;
1524
1525		cmd_flags = BNX_NVM_COMMAND_LAST;
1526		rc = bnx_nvram_read_dword(sc, offset32, buf, cmd_flags);
1527
1528		memcpy(ret_buf, buf, 4 - extra);
1529	}
1530
1531	/* Disable access to flash interface and release the lock. */
1532	bnx_disable_nvram_access(sc);
1533	bnx_release_nvram_lock(sc);
1534
1535	return rc;
1536}
1537
1538#ifdef BNX_NVRAM_WRITE_SUPPORT
1539/****************************************************************************/
1540/* Write an arbitrary range of data from NVRAM.                             */
1541/*                                                                          */
1542/* Prepares the NVRAM interface for write access and writes the requested   */
1543/* data from the supplied buffer.  The caller is responsible for            */
1544/* calculating any appropriate CRCs.                                        */
1545/*                                                                          */
1546/* Returns:                                                                 */
1547/*   0 on success, positive value on failure.                               */
1548/****************************************************************************/
1549int
1550bnx_nvram_write(struct bnx_softc *sc, u_int32_t offset, u_int8_t *data_buf,
1551	int buf_size)
1552{
1553	u_int32_t written, offset32, len32;
1554	u_int8_t *buf, start[4], end[4];
1555	int rc = 0;
1556	int align_start, align_end;
1557
1558	buf = data_buf;
1559	offset32 = offset;
1560	len32 = buf_size;
1561	align_start = align_end = 0;
1562
1563	if ((align_start = (offset32 & 3))) {
1564		offset32 &= ~3;
1565		len32 += align_start;
1566		if ((rc = bnx_nvram_read(sc, offset32, start, 4)))
1567			return rc;
1568	}
1569
1570	if (len32 & 3) {
1571	       	if ((len32 > 4) || !align_start) {
1572			align_end = 4 - (len32 & 3);
1573			len32 += align_end;
1574			if ((rc = bnx_nvram_read(sc, offset32 + len32 - 4,
1575				end, 4))) {
1576				return rc;
1577			}
1578		}
1579	}
1580
1581	if (align_start || align_end) {
1582		buf = malloc(len32, M_DEVBUF, M_NOWAIT);
1583		if (buf == 0)
1584			return ENOMEM;
1585		if (align_start) {
1586			memcpy(buf, start, 4);
1587		}
1588		if (align_end) {
1589			memcpy(buf + len32 - 4, end, 4);
1590		}
1591		memcpy(buf + align_start, data_buf, buf_size);
1592	}
1593
1594	written = 0;
1595	while ((written < len32) && (rc == 0)) {
1596		u_int32_t page_start, page_end, data_start, data_end;
1597		u_int32_t addr, cmd_flags;
1598		int i;
1599		u_int8_t flash_buffer[264];
1600
1601	    /* Find the page_start addr */
1602		page_start = offset32 + written;
1603		page_start -= (page_start % sc->bnx_flash_info->page_size);
1604		/* Find the page_end addr */
1605		page_end = page_start + sc->bnx_flash_info->page_size;
1606		/* Find the data_start addr */
1607		data_start = (written == 0) ? offset32 : page_start;
1608		/* Find the data_end addr */
1609		data_end = (page_end > offset32 + len32) ?
1610			(offset32 + len32) : page_end;
1611
1612		/* Request access to the flash interface. */
1613		if ((rc = bnx_acquire_nvram_lock(sc)) != 0)
1614			goto nvram_write_end;
1615
1616		/* Enable access to flash interface */
1617		bnx_enable_nvram_access(sc);
1618
1619		cmd_flags = BNX_NVM_COMMAND_FIRST;
1620		if (sc->bnx_flash_info->buffered == 0) {
1621			int j;
1622
1623			/* Read the whole page into the buffer
1624			 * (non-buffer flash only) */
1625			for (j = 0; j < sc->bnx_flash_info->page_size; j += 4) {
1626				if (j == (sc->bnx_flash_info->page_size - 4)) {
1627					cmd_flags |= BNX_NVM_COMMAND_LAST;
1628				}
1629				rc = bnx_nvram_read_dword(sc,
1630					page_start + j,
1631					&flash_buffer[j],
1632					cmd_flags);
1633
1634				if (rc)
1635					goto nvram_write_end;
1636
1637				cmd_flags = 0;
1638			}
1639		}
1640
1641		/* Enable writes to flash interface (unlock write-protect) */
1642		if ((rc = bnx_enable_nvram_write(sc)) != 0)
1643			goto nvram_write_end;
1644
1645		/* Erase the page */
1646		if ((rc = bnx_nvram_erase_page(sc, page_start)) != 0)
1647			goto nvram_write_end;
1648
1649		/* Re-enable the write again for the actual write */
1650		bnx_enable_nvram_write(sc);
1651
1652		/* Loop to write back the buffer data from page_start to
1653		 * data_start */
1654		i = 0;
1655		if (sc->bnx_flash_info->buffered == 0) {
1656			for (addr = page_start; addr < data_start;
1657				addr += 4, i += 4) {
1658
1659				rc = bnx_nvram_write_dword(sc, addr,
1660					&flash_buffer[i], cmd_flags);
1661
1662				if (rc != 0)
1663					goto nvram_write_end;
1664
1665				cmd_flags = 0;
1666			}
1667		}
1668
1669		/* Loop to write the new data from data_start to data_end */
1670		for (addr = data_start; addr < data_end; addr += 4, i++) {
1671			if ((addr == page_end - 4) ||
1672				((sc->bnx_flash_info->buffered) &&
1673				 (addr == data_end - 4))) {
1674
1675				cmd_flags |= BNX_NVM_COMMAND_LAST;
1676			}
1677			rc = bnx_nvram_write_dword(sc, addr, buf,
1678				cmd_flags);
1679
1680			if (rc != 0)
1681				goto nvram_write_end;
1682
1683			cmd_flags = 0;
1684			buf += 4;
1685		}
1686
1687		/* Loop to write back the buffer data from data_end
1688		 * to page_end */
1689		if (sc->bnx_flash_info->buffered == 0) {
1690			for (addr = data_end; addr < page_end;
1691				addr += 4, i += 4) {
1692
1693				if (addr == page_end-4) {
1694					cmd_flags = BNX_NVM_COMMAND_LAST;
1695                		}
1696				rc = bnx_nvram_write_dword(sc, addr,
1697					&flash_buffer[i], cmd_flags);
1698
1699				if (rc != 0)
1700					goto nvram_write_end;
1701
1702				cmd_flags = 0;
1703			}
1704		}
1705
1706		/* Disable writes to flash interface (lock write-protect) */
1707		bnx_disable_nvram_write(sc);
1708
1709		/* Disable access to flash interface */
1710		bnx_disable_nvram_access(sc);
1711		bnx_release_nvram_lock(sc);
1712
1713		/* Increment written */
1714		written += data_end - data_start;
1715	}
1716
1717nvram_write_end:
1718	if (align_start || align_end)
1719		free(buf, M_DEVBUF);
1720
1721	return rc;
1722}
1723#endif /* BNX_NVRAM_WRITE_SUPPORT */
1724
1725/****************************************************************************/
1726/* Verifies that NVRAM is accessible and contains valid data.               */
1727/*                                                                          */
1728/* Reads the configuration data from NVRAM and verifies that the CRC is     */
1729/* correct.                                                                 */
1730/*                                                                          */
1731/* Returns:                                                                 */
1732/*   0 on success, positive value on failure.                               */
1733/****************************************************************************/
1734int
1735bnx_nvram_test(struct bnx_softc *sc)
1736{
1737	u_int32_t buf[BNX_NVRAM_SIZE / 4];
1738	u_int8_t *data = (u_int8_t *) buf;
1739	int rc = 0;
1740	u_int32_t magic, csum;
1741
1742	/*
1743	 * Check that the device NVRAM is valid by reading
1744	 * the magic value at offset 0.
1745	 */
1746	if ((rc = bnx_nvram_read(sc, 0, data, 4)) != 0)
1747		goto bnx_nvram_test_done;
1748
1749	magic = bnx_be32toh(buf[0]);
1750	if (magic != BNX_NVRAM_MAGIC) {
1751		rc = ENODEV;
1752		BNX_PRINTF(sc, "%s(%d): Invalid NVRAM magic value! Expected: 0x%08X, "
1753			"Found: 0x%08X\n",
1754			__FILE__, __LINE__, BNX_NVRAM_MAGIC, magic);
1755		goto bnx_nvram_test_done;
1756	}
1757
1758	/*
1759	 * Verify that the device NVRAM includes valid
1760	 * configuration data.
1761	 */
1762	if ((rc = bnx_nvram_read(sc, 0x100, data, BNX_NVRAM_SIZE)) != 0)
1763		goto bnx_nvram_test_done;
1764
1765	csum = ether_crc32_le(data, 0x100);
1766	if (csum != BNX_CRC32_RESIDUAL) {
1767		rc = ENODEV;
1768		BNX_PRINTF(sc, "%s(%d): Invalid Manufacturing Information NVRAM CRC! "
1769			"Expected: 0x%08X, Found: 0x%08X\n",
1770			__FILE__, __LINE__, BNX_CRC32_RESIDUAL, csum);
1771		goto bnx_nvram_test_done;
1772	}
1773
1774	csum = ether_crc32_le(data + 0x100, 0x100);
1775	if (csum != BNX_CRC32_RESIDUAL) {
1776		BNX_PRINTF(sc, "%s(%d): Invalid Feature Configuration Information "
1777			"NVRAM CRC! Expected: 0x%08X, Found: 08%08X\n",
1778			__FILE__, __LINE__, BNX_CRC32_RESIDUAL, csum);
1779		rc = ENODEV;
1780	}
1781
1782bnx_nvram_test_done:
1783	return rc;
1784}
1785
1786/****************************************************************************/
1787/* Free any DMA memory owned by the driver.                                 */
1788/*                                                                          */
1789/* Scans through each data structre that requires DMA memory and frees      */
1790/* the memory if allocated.                                                 */
1791/*                                                                          */
1792/* Returns:                                                                 */
1793/*   Nothing.                                                               */
1794/****************************************************************************/
1795void
1796bnx_dma_free(struct bnx_softc *sc)
1797{
1798	int i;
1799
1800	DBPRINT(sc,BNX_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
1801
1802	/* Destroy the status block. */
1803	if (sc->status_block != NULL && sc->status_map != NULL) {
1804		bus_dmamap_unload(sc->bnx_dmatag, sc->status_map);
1805		bus_dmamem_unmap(sc->bnx_dmatag, (caddr_t)sc->status_block,
1806		    BNX_STATUS_BLK_SZ);
1807		bus_dmamem_free(sc->bnx_dmatag, &sc->status_seg,
1808		    sc->status_rseg);
1809		bus_dmamap_destroy(sc->bnx_dmatag, sc->status_map);
1810		sc->status_block = NULL;
1811		sc->status_map = NULL;
1812	}
1813
1814	/* Destroy the statistics block. */
1815	if (sc->stats_block != NULL && sc->stats_map != NULL) {
1816		bus_dmamap_unload(sc->bnx_dmatag, sc->stats_map);
1817		bus_dmamem_unmap(sc->bnx_dmatag, (caddr_t)sc->stats_block,
1818		    BNX_STATS_BLK_SZ);
1819		bus_dmamem_free(sc->bnx_dmatag, &sc->stats_seg,
1820		    sc->stats_rseg);
1821		bus_dmamap_destroy(sc->bnx_dmatag, sc->stats_map);
1822		sc->stats_block = NULL;
1823		sc->stats_map = NULL;
1824	}
1825
1826	/* Free, unmap and destroy all TX buffer descriptor chain pages. */
1827	for (i = 0; i < TX_PAGES; i++ ) {
1828		if (sc->tx_bd_chain[i] != NULL &&
1829		    sc->tx_bd_chain_map[i] != NULL) {
1830			bus_dmamap_unload(sc->bnx_dmatag, sc->tx_bd_chain_map[i]);
1831			bus_dmamem_unmap(sc->bnx_dmatag,
1832			    (caddr_t)sc->tx_bd_chain[i], BNX_TX_CHAIN_PAGE_SZ);
1833			bus_dmamem_free(sc->bnx_dmatag, &sc->tx_bd_chain_seg[i],
1834			    sc->tx_bd_chain_rseg[i]);
1835			bus_dmamap_destroy(sc->bnx_dmatag, sc->tx_bd_chain_map[i]);
1836			sc->tx_bd_chain[i] = NULL;
1837			sc->tx_bd_chain_map[i] = NULL;
1838		}
1839	}
1840
1841	/* Unload and destroy the TX mbuf maps. */
1842	for (i = 0; i < TOTAL_TX_BD; i++) {
1843		if (sc->tx_mbuf_map[i] != NULL) {
1844			bus_dmamap_unload(sc->bnx_dmatag, sc->tx_mbuf_map[i]);
1845			bus_dmamap_destroy(sc->bnx_dmatag, sc->tx_mbuf_map[i]);
1846		}
1847	}
1848
1849	/* Free, unmap and destroy all RX buffer descriptor chain pages. */
1850	for (i = 0; i < RX_PAGES; i++ ) {
1851		if (sc->rx_bd_chain[i] != NULL &&
1852		    sc->rx_bd_chain_map[i] != NULL) {
1853			bus_dmamap_unload(sc->bnx_dmatag, sc->rx_bd_chain_map[i]);
1854			bus_dmamem_unmap(sc->bnx_dmatag,
1855			    (caddr_t)sc->rx_bd_chain[i], BNX_RX_CHAIN_PAGE_SZ);
1856			bus_dmamem_free(sc->bnx_dmatag, &sc->rx_bd_chain_seg[i],
1857			    sc->rx_bd_chain_rseg[i]);
1858
1859			bus_dmamap_destroy(sc->bnx_dmatag, sc->rx_bd_chain_map[i]);
1860			sc->rx_bd_chain[i] = NULL;
1861			sc->rx_bd_chain_map[i] = NULL;
1862		}
1863	}
1864
1865	/* Unload and destroy the RX mbuf maps. */
1866	for (i = 0; i < TOTAL_RX_BD; i++) {
1867		if (sc->rx_mbuf_map[i] != NULL) {
1868			bus_dmamap_unload(sc->bnx_dmatag, sc->rx_mbuf_map[i]);
1869			bus_dmamap_destroy(sc->bnx_dmatag, sc->rx_mbuf_map[i]);
1870		}
1871	}
1872
1873	DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
1874}
1875
1876/****************************************************************************/
1877/* Map TX buffers into TX buffer descriptors.                               */
1878/*                                                                          */
1879/* Given a series of DMA memory containting an outgoing frame, map the      */
1880/* segments into the tx_bd structure used by the hardware.                  */
1881/*                                                                          */
1882/* Returns:                                                                 */
1883/*   Nothing.                                                               */
1884/****************************************************************************/
1885void
1886bnx_dma_map_tx_desc(void *arg, bus_dmamap_t map)
1887{
1888	struct bnx_dmamap_arg *map_arg;
1889	struct bnx_softc *sc;
1890	struct tx_bd *txbd = NULL;
1891	int i = 0, nseg;
1892	u_int16_t prod, chain_prod;
1893	u_int32_t	prod_bseq;
1894#ifdef BNX_DEBUG
1895	u_int16_t debug_prod;
1896#endif
1897
1898	map_arg = arg;
1899	sc = map_arg->sc;
1900	nseg = map->dm_nsegs;
1901
1902	/* Signal error to caller if there's too many segments */
1903	if (nseg > map_arg->maxsegs) {
1904		DBPRINT(sc, BNX_WARN,
1905			"%s(): Mapped TX descriptors: max segs = %d, "
1906			"actual segs = %d\n",
1907			__FUNCTION__, map_arg->maxsegs, nseg);
1908
1909		map_arg->maxsegs = 0;
1910		return;
1911	}
1912
1913	/* prod points to an empty tx_bd at this point. */
1914	prod       = map_arg->prod;
1915	chain_prod = map_arg->chain_prod;
1916	prod_bseq  = map_arg->prod_bseq;
1917
1918#ifdef BNX_DEBUG
1919	debug_prod = chain_prod;
1920#endif
1921
1922	DBPRINT(sc, BNX_INFO_SEND,
1923		"%s(): Start: prod = 0x%04X, chain_prod = %04X, "
1924		"prod_bseq = 0x%08X\n",
1925		__FUNCTION__, prod, chain_prod, prod_bseq);
1926
1927	/*
1928	 * Cycle through each mbuf segment that makes up
1929	 * the outgoing frame, gathering the mapping info
1930	 * for that segment and creating a tx_bd to for
1931	 * the mbuf.
1932	 */
1933
1934	txbd = &map_arg->tx_chain[TX_PAGE(chain_prod)][TX_IDX(chain_prod)];
1935
1936	/* Setup the first tx_bd for the first segment. */
1937	txbd->tx_bd_haddr_lo       = htole32(BNX_ADDR_LO(map->dm_segs[i].ds_addr));
1938	txbd->tx_bd_haddr_hi       = htole32(BNX_ADDR_HI(map->dm_segs[i].ds_addr));
1939	txbd->tx_bd_mss_nbytes     = htole16(map->dm_segs[i].ds_len);
1940	txbd->tx_bd_vlan_tag_flags = htole16(map_arg->tx_flags |
1941			TX_BD_FLAGS_START);
1942	prod_bseq += map->dm_segs[i].ds_len;
1943
1944	/* Setup any remaing segments. */
1945	for (i = 1; i < nseg; i++) {
1946		prod       = NEXT_TX_BD(prod);
1947		chain_prod = TX_CHAIN_IDX(prod);
1948
1949		txbd = &map_arg->tx_chain[TX_PAGE(chain_prod)][TX_IDX(chain_prod)];
1950
1951		txbd->tx_bd_haddr_lo       = htole32(BNX_ADDR_LO(map->dm_segs[i].ds_addr));
1952		txbd->tx_bd_haddr_hi       = htole32(BNX_ADDR_HI(map->dm_segs[i].ds_addr));
1953		txbd->tx_bd_mss_nbytes     = htole16(map->dm_segs[i].ds_len);
1954		txbd->tx_bd_vlan_tag_flags = htole16(map_arg->tx_flags);
1955
1956		prod_bseq += map->dm_segs[i].ds_len;
1957	}
1958
1959	/* Set the END flag on the last TX buffer descriptor. */
1960	txbd->tx_bd_vlan_tag_flags |= htole16(TX_BD_FLAGS_END);
1961
1962	DBRUN(BNX_INFO_SEND, bnx_dump_tx_chain(sc, debug_prod, nseg));
1963
1964	DBPRINT(sc, BNX_INFO_SEND,
1965		"%s(): End: prod = 0x%04X, chain_prod = %04X, "
1966		"prod_bseq = 0x%08X\n",
1967		__FUNCTION__, prod, chain_prod, prod_bseq);
1968
1969	/* prod points to the last tx_bd at this point. */
1970	map_arg->maxsegs    = nseg;
1971	map_arg->prod       = prod;
1972	map_arg->chain_prod = chain_prod;
1973	map_arg->prod_bseq  = prod_bseq;
1974}
1975
1976/****************************************************************************/
1977/* Allocate any DMA memory needed by the driver.                            */
1978/*                                                                          */
1979/* Allocates DMA memory needed for the various global structures needed by  */
1980/* hardware.                                                                */
1981/*                                                                          */
1982/* Returns:                                                                 */
1983/*   0 for success, positive value for failure.                             */
1984/****************************************************************************/
1985int
1986bnx_dma_alloc(struct bnx_softc *sc)
1987{
1988	int i, rc = 0;
1989
1990	DBPRINT(sc, BNX_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
1991
1992	/*
1993	 * Allocate DMA memory for the status block, map the memory into DMA
1994	 * space, and fetch the physical address of the block.
1995	 */
1996	if (bus_dmamap_create(sc->bnx_dmatag, BNX_STATUS_BLK_SZ, 1,
1997	    BNX_STATUS_BLK_SZ, 0, BUS_DMA_NOWAIT, &sc->status_map)) {
1998		printf(": Could not create status block DMA map!\n");
1999		rc = ENOMEM;
2000		goto bnx_dma_alloc_exit;
2001	}
2002
2003	if (bus_dmamem_alloc(sc->bnx_dmatag, BNX_STATUS_BLK_SZ,
2004	    BNX_DMA_ALIGN, BNX_DMA_BOUNDARY, &sc->status_seg, 1,
2005	    &sc->status_rseg, BUS_DMA_NOWAIT)) {
2006		printf(": Could not allocate status block DMA memory!\n");
2007		rc = ENOMEM;
2008		goto bnx_dma_alloc_exit;
2009	}
2010
2011	if (bus_dmamem_map(sc->bnx_dmatag, &sc->status_seg, sc->status_rseg,
2012	    BNX_STATUS_BLK_SZ, (caddr_t *)&sc->status_block, BUS_DMA_NOWAIT)) {
2013		printf(": Could not map status block DMA memory!\n");
2014		rc = ENOMEM;
2015		goto bnx_dma_alloc_exit;
2016	}
2017
2018	if (bus_dmamap_load(sc->bnx_dmatag, sc->status_map,
2019	    sc->status_block, BNX_STATUS_BLK_SZ, NULL, BUS_DMA_NOWAIT)) {
2020		printf(": Could not load status block DMA memory!\n");
2021		rc = ENOMEM;
2022		goto bnx_dma_alloc_exit;
2023	}
2024
2025	sc->status_block_paddr = sc->status_map->dm_segs[0].ds_addr;
2026	bzero(sc->status_block, BNX_STATUS_BLK_SZ);
2027
2028	/* DRC - Fix for 64 bit addresses. */
2029	DBPRINT(sc, BNX_INFO, "status_block_paddr = 0x%08X\n",
2030		(u_int32_t) sc->status_block_paddr);
2031
2032	/*
2033	 * Allocate DMA memory for the statistics block, map the memory into
2034	 * DMA space, and fetch the physical address of the block.
2035	 */
2036	if (bus_dmamap_create(sc->bnx_dmatag, BNX_STATS_BLK_SZ, 1,
2037	    BNX_STATS_BLK_SZ, 0, BUS_DMA_NOWAIT, &sc->stats_map)) {
2038		printf(": Could not create stats block DMA map!\n");
2039		rc = ENOMEM;
2040		goto bnx_dma_alloc_exit;
2041	}
2042
2043	if (bus_dmamem_alloc(sc->bnx_dmatag, BNX_STATS_BLK_SZ,
2044	    BNX_DMA_ALIGN, BNX_DMA_BOUNDARY, &sc->stats_seg, 1,
2045	    &sc->stats_rseg, BUS_DMA_NOWAIT)) {
2046		printf(": Could not allocate stats block DMA memory!\n");
2047		rc = ENOMEM;
2048		goto bnx_dma_alloc_exit;
2049	}
2050
2051	if (bus_dmamem_map(sc->bnx_dmatag, &sc->stats_seg, sc->stats_rseg,
2052	    BNX_STATS_BLK_SZ, (caddr_t *)&sc->stats_block, BUS_DMA_NOWAIT)) {
2053		printf(": Could not map stats block DMA memory!\n");
2054		rc = ENOMEM;
2055		goto bnx_dma_alloc_exit;
2056	}
2057
2058	if (bus_dmamap_load(sc->bnx_dmatag, sc->stats_map,
2059	    sc->stats_block, BNX_STATS_BLK_SZ, NULL, BUS_DMA_NOWAIT)) {
2060		printf(": Could not load status block DMA memory!\n");
2061		rc = ENOMEM;
2062		goto bnx_dma_alloc_exit;
2063	}
2064
2065	sc->stats_block_paddr = sc->stats_map->dm_segs[0].ds_addr;
2066	bzero(sc->stats_block, BNX_STATS_BLK_SZ);
2067
2068	/* DRC - Fix for 64 bit address. */
2069	DBPRINT(sc,BNX_INFO, "stats_block_paddr = 0x%08X\n",
2070		(u_int32_t) sc->stats_block_paddr);
2071
2072	/*
2073	 * Allocate DMA memory for the TX buffer descriptor chain,
2074	 * and fetch the physical address of the block.
2075	 */
2076	for (i = 0; i < TX_PAGES; i++) {
2077		if (bus_dmamap_create(sc->bnx_dmatag, BNX_TX_CHAIN_PAGE_SZ, 1,
2078		    BNX_TX_CHAIN_PAGE_SZ, 0, BUS_DMA_NOWAIT,
2079		    &sc->tx_bd_chain_map[i])) {
2080			printf(": Could not create Tx desc %d DMA map!\n", i);
2081			rc = ENOMEM;
2082			goto bnx_dma_alloc_exit;
2083		}
2084
2085		if (bus_dmamem_alloc(sc->bnx_dmatag, BNX_TX_CHAIN_PAGE_SZ,
2086		    BCM_PAGE_SIZE, BNX_DMA_BOUNDARY, &sc->tx_bd_chain_seg[i], 1,
2087		    &sc->tx_bd_chain_rseg[i], BUS_DMA_NOWAIT)) {
2088			printf(": Could not allocate TX desc %d DMA memory!\n", i);
2089			rc = ENOMEM;
2090			goto bnx_dma_alloc_exit;
2091		}
2092
2093		if (bus_dmamem_map(sc->bnx_dmatag, &sc->tx_bd_chain_seg[i],
2094		    sc->tx_bd_chain_rseg[i], BNX_TX_CHAIN_PAGE_SZ,
2095		    (caddr_t *)&sc->tx_bd_chain[i], BUS_DMA_NOWAIT)) {
2096			printf(": Could not map TX desc %d DMA memory!\n", i);
2097			rc = ENOMEM;
2098			goto bnx_dma_alloc_exit;
2099		}
2100
2101		if (bus_dmamap_load(sc->bnx_dmatag, sc->tx_bd_chain_map[i],
2102		    (caddr_t)sc->tx_bd_chain[i], BNX_STATS_BLK_SZ, NULL,
2103		    BUS_DMA_NOWAIT)) {
2104			printf(": Could not load TX desc %d DMA memory!\n", i);
2105			rc = ENOMEM;
2106			goto bnx_dma_alloc_exit;
2107		}
2108
2109		sc->tx_bd_chain_paddr[i] = sc->tx_bd_chain_map[i]->dm_segs[0].ds_addr;
2110
2111		/* DRC - Fix for 64 bit systems. */
2112		DBPRINT(sc, BNX_INFO, "tx_bd_chain_paddr[%d] = 0x%08X\n",
2113			i, (u_int32_t) sc->tx_bd_chain_paddr[i]);
2114	}
2115
2116	/*
2117	 * Create DMA maps for the TX buffer mbufs.
2118	 */
2119	for (i = 0; i < TOTAL_TX_BD; i++) {
2120		if (bus_dmamap_create(sc->bnx_dmatag, MCLBYTES * BNX_MAX_SEGMENTS,
2121		    BNX_MAX_SEGMENTS, MCLBYTES, 0, BUS_DMA_NOWAIT,
2122		    &sc->tx_mbuf_map[i])) {
2123			printf(": Could not create Tx mbuf %d DMA map!\n", i);
2124			rc = ENOMEM;
2125			goto bnx_dma_alloc_exit;
2126		}
2127	}
2128
2129	/*
2130	 * Allocate DMA memory for the Rx buffer descriptor chain,
2131	 * and fetch the physical address of the block.
2132	 */
2133	for (i = 0; i < RX_PAGES; i++) {
2134		if (bus_dmamap_create(sc->bnx_dmatag, BNX_RX_CHAIN_PAGE_SZ, 1,
2135		    BNX_RX_CHAIN_PAGE_SZ, 0, BUS_DMA_NOWAIT,
2136		    &sc->rx_bd_chain_map[i])) {
2137			printf(": Could not create Rx desc %d DMA map!\n", i);
2138			rc = ENOMEM;
2139			goto bnx_dma_alloc_exit;
2140		}
2141
2142		if (bus_dmamem_alloc(sc->bnx_dmatag, BNX_RX_CHAIN_PAGE_SZ,
2143		    BCM_PAGE_SIZE, BNX_DMA_BOUNDARY, &sc->rx_bd_chain_seg[i], 1,
2144		    &sc->rx_bd_chain_rseg[i], BUS_DMA_NOWAIT)) {
2145			printf(": Could not allocate Rx desc %d DMA memory!\n", i);
2146			rc = ENOMEM;
2147			goto bnx_dma_alloc_exit;
2148		}
2149
2150		if (bus_dmamem_map(sc->bnx_dmatag, &sc->rx_bd_chain_seg[i],
2151		    sc->rx_bd_chain_rseg[i], BNX_RX_CHAIN_PAGE_SZ,
2152		    (caddr_t *)&sc->rx_bd_chain[i], BUS_DMA_NOWAIT)) {
2153			printf(": Could not map Rx desc %d DMA memory!\n", i);
2154			rc = ENOMEM;
2155			goto bnx_dma_alloc_exit;
2156		}
2157
2158		if (bus_dmamap_load(sc->bnx_dmatag, sc->rx_bd_chain_map[i],
2159		    (caddr_t)sc->rx_bd_chain[i], BNX_STATS_BLK_SZ, NULL,
2160		    BUS_DMA_NOWAIT)) {
2161			printf(": Could not load Rx desc %d DMA memory!\n", i);
2162			rc = ENOMEM;
2163			goto bnx_dma_alloc_exit;
2164		}
2165
2166		bzero(sc->rx_bd_chain[i], BNX_RX_CHAIN_PAGE_SZ);
2167		sc->rx_bd_chain_paddr[i] = sc->rx_bd_chain_map[i]->dm_segs[0].ds_addr;
2168
2169		/* DRC - Fix for 64 bit systems. */
2170		DBPRINT(sc, BNX_INFO, "rx_bd_chain_paddr[%d] = 0x%08X\n",
2171			i, (u_int32_t) sc->rx_bd_chain_paddr[i]);
2172	}
2173
2174	/*
2175	 * Create DMA maps for the Rx buffer mbufs.
2176	 */
2177	for (i = 0; i < TOTAL_RX_BD; i++) {
2178		if (bus_dmamap_create(sc->bnx_dmatag, BNX_MAX_MRU,
2179		    BNX_MAX_SEGMENTS, BNX_MAX_MRU, 0, BUS_DMA_NOWAIT,
2180		    &sc->rx_mbuf_map[i])) {
2181			printf(": Could not create Rx mbuf %d DMA map!\n", i);
2182			rc = ENOMEM;
2183			goto bnx_dma_alloc_exit;
2184		}
2185	}
2186
2187 bnx_dma_alloc_exit:
2188	DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
2189
2190	return(rc);
2191}
2192
2193/****************************************************************************/
2194/* Release all resources used by the driver.                                */
2195/*                                                                          */
2196/* Releases all resources acquired by the driver including interrupts,      */
2197/* interrupt handler, interfaces, mutexes, and DMA memory.                  */
2198/*                                                                          */
2199/* Returns:                                                                 */
2200/*   Nothing.                                                               */
2201/****************************************************************************/
2202void
2203bnx_release_resources(struct bnx_softc *sc)
2204{
2205	struct pci_attach_args *pa = &(sc->bnx_pa);
2206
2207	DBPRINT(sc, BNX_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
2208
2209	bnx_dma_free(sc);
2210
2211	if (sc->bnx_intrhand != NULL)
2212		pci_intr_disestablish(pa->pa_pc, sc->bnx_intrhand);
2213
2214	DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
2215}
2216
2217/****************************************************************************/
2218/* Firmware synchronization.                                                */
2219/*                                                                          */
2220/* Before performing certain events such as a chip reset, synchronize with  */
2221/* the firmware first.                                                      */
2222/*                                                                          */
2223/* Returns:                                                                 */
2224/*   0 for success, positive value for failure.                             */
2225/****************************************************************************/
2226int
2227bnx_fw_sync(struct bnx_softc *sc, u_int32_t msg_data)
2228{
2229	int i, rc = 0;
2230	u_int32_t val;
2231
2232	/* Don't waste any time if we've timed out before. */
2233	if (sc->bnx_fw_timed_out) {
2234		rc = EBUSY;
2235		goto bnx_fw_sync_exit;
2236	}
2237
2238	/* Increment the message sequence number. */
2239	sc->bnx_fw_wr_seq++;
2240	msg_data |= sc->bnx_fw_wr_seq;
2241
2242 	DBPRINT(sc, BNX_VERBOSE, "bnx_fw_sync(): msg_data = 0x%08X\n", msg_data);
2243
2244	/* Send the message to the bootcode driver mailbox. */
2245	REG_WR_IND(sc, sc->bnx_shmem_base + BNX_DRV_MB, msg_data);
2246
2247	/* Wait for the bootcode to acknowledge the message. */
2248	for (i = 0; i < FW_ACK_TIME_OUT_MS; i++) {
2249		/* Check for a response in the bootcode firmware mailbox. */
2250		val = REG_RD_IND(sc, sc->bnx_shmem_base + BNX_FW_MB);
2251		if ((val & BNX_FW_MSG_ACK) == (msg_data & BNX_DRV_MSG_SEQ))
2252			break;
2253		DELAY(1000);
2254	}
2255
2256	/* If we've timed out, tell the bootcode that we've stopped waiting. */
2257	if (((val & BNX_FW_MSG_ACK) != (msg_data & BNX_DRV_MSG_SEQ)) &&
2258		((msg_data & BNX_DRV_MSG_DATA) != BNX_DRV_MSG_DATA_WAIT0)) {
2259
2260		BNX_PRINTF(sc, "%s(%d): Firmware synchronization timeout! "
2261			"msg_data = 0x%08X\n",
2262			__FILE__, __LINE__, msg_data);
2263
2264		msg_data &= ~BNX_DRV_MSG_CODE;
2265		msg_data |= BNX_DRV_MSG_CODE_FW_TIMEOUT;
2266
2267		REG_WR_IND(sc, sc->bnx_shmem_base + BNX_DRV_MB, msg_data);
2268
2269		sc->bnx_fw_timed_out = 1;
2270		rc = EBUSY;
2271	}
2272
2273bnx_fw_sync_exit:
2274	return (rc);
2275}
2276
2277/****************************************************************************/
2278/* Load Receive Virtual 2 Physical (RV2P) processor firmware.               */
2279/*                                                                          */
2280/* Returns:                                                                 */
2281/*   Nothing.                                                               */
2282/****************************************************************************/
2283void
2284bnx_load_rv2p_fw(struct bnx_softc *sc, u_int32_t *rv2p_code,
2285	u_int32_t rv2p_code_len, u_int32_t rv2p_proc)
2286{
2287	int i;
2288	u_int32_t val;
2289
2290	for (i = 0; i < rv2p_code_len; i += 8) {
2291		REG_WR(sc, BNX_RV2P_INSTR_HIGH, *rv2p_code);
2292		rv2p_code++;
2293		REG_WR(sc, BNX_RV2P_INSTR_LOW, *rv2p_code);
2294		rv2p_code++;
2295
2296		if (rv2p_proc == RV2P_PROC1) {
2297			val = (i / 8) | BNX_RV2P_PROC1_ADDR_CMD_RDWR;
2298			REG_WR(sc, BNX_RV2P_PROC1_ADDR_CMD, val);
2299		}
2300		else {
2301			val = (i / 8) | BNX_RV2P_PROC2_ADDR_CMD_RDWR;
2302			REG_WR(sc, BNX_RV2P_PROC2_ADDR_CMD, val);
2303		}
2304	}
2305
2306	/* Reset the processor, un-stall is done later. */
2307	if (rv2p_proc == RV2P_PROC1) {
2308		REG_WR(sc, BNX_RV2P_COMMAND, BNX_RV2P_COMMAND_PROC1_RESET);
2309	}
2310	else {
2311		REG_WR(sc, BNX_RV2P_COMMAND, BNX_RV2P_COMMAND_PROC2_RESET);
2312	}
2313}
2314
2315/****************************************************************************/
2316/* Load RISC processor firmware.                                            */
2317/*                                                                          */
2318/* Loads firmware from the file if_bnxfw.h into the scratchpad memory       */
2319/* associated with a particular processor.                                  */
2320/*                                                                          */
2321/* Returns:                                                                 */
2322/*   Nothing.                                                               */
2323/****************************************************************************/
2324void
2325bnx_load_cpu_fw(struct bnx_softc *sc, struct cpu_reg *cpu_reg,
2326	struct fw_info *fw)
2327{
2328	u_int32_t offset;
2329	u_int32_t val;
2330
2331	/* Halt the CPU. */
2332	val = REG_RD_IND(sc, cpu_reg->mode);
2333	val |= cpu_reg->mode_value_halt;
2334	REG_WR_IND(sc, cpu_reg->mode, val);
2335	REG_WR_IND(sc, cpu_reg->state, cpu_reg->state_value_clear);
2336
2337	/* Load the Text area. */
2338	offset = cpu_reg->spad_base + (fw->text_addr - cpu_reg->mips_view_base);
2339	if (fw->text) {
2340		int j;
2341
2342		for (j = 0; j < (fw->text_len / 4); j++, offset += 4) {
2343			REG_WR_IND(sc, offset, fw->text[j]);
2344	        }
2345	}
2346
2347	/* Load the Data area. */
2348	offset = cpu_reg->spad_base + (fw->data_addr - cpu_reg->mips_view_base);
2349	if (fw->data) {
2350		int j;
2351
2352		for (j = 0; j < (fw->data_len / 4); j++, offset += 4) {
2353			REG_WR_IND(sc, offset, fw->data[j]);
2354		}
2355	}
2356
2357	/* Load the SBSS area. */
2358	offset = cpu_reg->spad_base + (fw->sbss_addr - cpu_reg->mips_view_base);
2359	if (fw->sbss) {
2360		int j;
2361
2362		for (j = 0; j < (fw->sbss_len / 4); j++, offset += 4) {
2363			REG_WR_IND(sc, offset, fw->sbss[j]);
2364		}
2365	}
2366
2367	/* Load the BSS area. */
2368	offset = cpu_reg->spad_base + (fw->bss_addr - cpu_reg->mips_view_base);
2369	if (fw->bss) {
2370		int j;
2371
2372		for (j = 0; j < (fw->bss_len/4); j++, offset += 4) {
2373			REG_WR_IND(sc, offset, fw->bss[j]);
2374		}
2375	}
2376
2377	/* Load the Read-Only area. */
2378	offset = cpu_reg->spad_base +
2379		(fw->rodata_addr - cpu_reg->mips_view_base);
2380	if (fw->rodata) {
2381		int j;
2382
2383		for (j = 0; j < (fw->rodata_len / 4); j++, offset += 4) {
2384			REG_WR_IND(sc, offset, fw->rodata[j]);
2385		}
2386	}
2387
2388	/* Clear the pre-fetch instruction. */
2389	REG_WR_IND(sc, cpu_reg->inst, 0);
2390	REG_WR_IND(sc, cpu_reg->pc, fw->start_addr);
2391
2392	/* Start the CPU. */
2393	val = REG_RD_IND(sc, cpu_reg->mode);
2394	val &= ~cpu_reg->mode_value_halt;
2395	REG_WR_IND(sc, cpu_reg->state, cpu_reg->state_value_clear);
2396	REG_WR_IND(sc, cpu_reg->mode, val);
2397}
2398
2399/****************************************************************************/
2400/* Initialize the RV2P, RX, TX, TPAT, and COM CPUs.                         */
2401/*                                                                          */
2402/* Loads the firmware for each CPU and starts the CPU.                      */
2403/*                                                                          */
2404/* Returns:                                                                 */
2405/*   Nothing.                                                               */
2406/****************************************************************************/
2407void
2408bnx_init_cpus(struct bnx_softc *sc)
2409{
2410	struct cpu_reg cpu_reg;
2411	struct fw_info fw;
2412
2413	/* Initialize the RV2P processor. */
2414	bnx_load_rv2p_fw(sc, bnx_rv2p_proc1, sizeof(bnx_rv2p_proc1), RV2P_PROC1);
2415	bnx_load_rv2p_fw(sc, bnx_rv2p_proc2, sizeof(bnx_rv2p_proc2), RV2P_PROC2);
2416
2417	/* Initialize the RX Processor. */
2418	cpu_reg.mode = BNX_RXP_CPU_MODE;
2419	cpu_reg.mode_value_halt = BNX_RXP_CPU_MODE_SOFT_HALT;
2420	cpu_reg.mode_value_sstep = BNX_RXP_CPU_MODE_STEP_ENA;
2421	cpu_reg.state = BNX_RXP_CPU_STATE;
2422	cpu_reg.state_value_clear = 0xffffff;
2423	cpu_reg.gpr0 = BNX_RXP_CPU_REG_FILE;
2424	cpu_reg.evmask = BNX_RXP_CPU_EVENT_MASK;
2425	cpu_reg.pc = BNX_RXP_CPU_PROGRAM_COUNTER;
2426	cpu_reg.inst = BNX_RXP_CPU_INSTRUCTION;
2427	cpu_reg.bp = BNX_RXP_CPU_HW_BREAKPOINT;
2428	cpu_reg.spad_base = BNX_RXP_SCRATCH;
2429	cpu_reg.mips_view_base = 0x8000000;
2430
2431	fw.ver_major = bnx_RXP_b06FwReleaseMajor;
2432	fw.ver_minor = bnx_RXP_b06FwReleaseMinor;
2433	fw.ver_fix = bnx_RXP_b06FwReleaseFix;
2434	fw.start_addr = bnx_RXP_b06FwStartAddr;
2435
2436	fw.text_addr = bnx_RXP_b06FwTextAddr;
2437	fw.text_len = bnx_RXP_b06FwTextLen;
2438	fw.text_index = 0;
2439	fw.text = bnx_RXP_b06FwText;
2440
2441	fw.data_addr = bnx_RXP_b06FwDataAddr;
2442	fw.data_len = bnx_RXP_b06FwDataLen;
2443	fw.data_index = 0;
2444	fw.data = bnx_RXP_b06FwData;
2445
2446	fw.sbss_addr = bnx_RXP_b06FwSbssAddr;
2447	fw.sbss_len = bnx_RXP_b06FwSbssLen;
2448	fw.sbss_index = 0;
2449	fw.sbss = bnx_RXP_b06FwSbss;
2450
2451	fw.bss_addr = bnx_RXP_b06FwBssAddr;
2452	fw.bss_len = bnx_RXP_b06FwBssLen;
2453	fw.bss_index = 0;
2454	fw.bss = bnx_RXP_b06FwBss;
2455
2456	fw.rodata_addr = bnx_RXP_b06FwRodataAddr;
2457	fw.rodata_len = bnx_RXP_b06FwRodataLen;
2458	fw.rodata_index = 0;
2459	fw.rodata = bnx_RXP_b06FwRodata;
2460
2461	DBPRINT(sc, BNX_INFO_RESET, "Loading RX firmware.\n");
2462	bnx_load_cpu_fw(sc, &cpu_reg, &fw);
2463
2464	/* Initialize the TX Processor. */
2465	cpu_reg.mode = BNX_TXP_CPU_MODE;
2466	cpu_reg.mode_value_halt = BNX_TXP_CPU_MODE_SOFT_HALT;
2467	cpu_reg.mode_value_sstep = BNX_TXP_CPU_MODE_STEP_ENA;
2468	cpu_reg.state = BNX_TXP_CPU_STATE;
2469	cpu_reg.state_value_clear = 0xffffff;
2470	cpu_reg.gpr0 = BNX_TXP_CPU_REG_FILE;
2471	cpu_reg.evmask = BNX_TXP_CPU_EVENT_MASK;
2472	cpu_reg.pc = BNX_TXP_CPU_PROGRAM_COUNTER;
2473	cpu_reg.inst = BNX_TXP_CPU_INSTRUCTION;
2474	cpu_reg.bp = BNX_TXP_CPU_HW_BREAKPOINT;
2475	cpu_reg.spad_base = BNX_TXP_SCRATCH;
2476	cpu_reg.mips_view_base = 0x8000000;
2477
2478	fw.ver_major = bnx_TXP_b06FwReleaseMajor;
2479	fw.ver_minor = bnx_TXP_b06FwReleaseMinor;
2480	fw.ver_fix = bnx_TXP_b06FwReleaseFix;
2481	fw.start_addr = bnx_TXP_b06FwStartAddr;
2482
2483	fw.text_addr = bnx_TXP_b06FwTextAddr;
2484	fw.text_len = bnx_TXP_b06FwTextLen;
2485	fw.text_index = 0;
2486	fw.text = bnx_TXP_b06FwText;
2487
2488	fw.data_addr = bnx_TXP_b06FwDataAddr;
2489	fw.data_len = bnx_TXP_b06FwDataLen;
2490	fw.data_index = 0;
2491	fw.data = bnx_TXP_b06FwData;
2492
2493	fw.sbss_addr = bnx_TXP_b06FwSbssAddr;
2494	fw.sbss_len = bnx_TXP_b06FwSbssLen;
2495	fw.sbss_index = 0;
2496	fw.sbss = bnx_TXP_b06FwSbss;
2497
2498	fw.bss_addr = bnx_TXP_b06FwBssAddr;
2499	fw.bss_len = bnx_TXP_b06FwBssLen;
2500	fw.bss_index = 0;
2501	fw.bss = bnx_TXP_b06FwBss;
2502
2503	fw.rodata_addr = bnx_TXP_b06FwRodataAddr;
2504	fw.rodata_len = bnx_TXP_b06FwRodataLen;
2505	fw.rodata_index = 0;
2506	fw.rodata = bnx_TXP_b06FwRodata;
2507
2508	DBPRINT(sc, BNX_INFO_RESET, "Loading TX firmware.\n");
2509	bnx_load_cpu_fw(sc, &cpu_reg, &fw);
2510
2511	/* Initialize the TX Patch-up Processor. */
2512	cpu_reg.mode = BNX_TPAT_CPU_MODE;
2513	cpu_reg.mode_value_halt = BNX_TPAT_CPU_MODE_SOFT_HALT;
2514	cpu_reg.mode_value_sstep = BNX_TPAT_CPU_MODE_STEP_ENA;
2515	cpu_reg.state = BNX_TPAT_CPU_STATE;
2516	cpu_reg.state_value_clear = 0xffffff;
2517	cpu_reg.gpr0 = BNX_TPAT_CPU_REG_FILE;
2518	cpu_reg.evmask = BNX_TPAT_CPU_EVENT_MASK;
2519	cpu_reg.pc = BNX_TPAT_CPU_PROGRAM_COUNTER;
2520	cpu_reg.inst = BNX_TPAT_CPU_INSTRUCTION;
2521	cpu_reg.bp = BNX_TPAT_CPU_HW_BREAKPOINT;
2522	cpu_reg.spad_base = BNX_TPAT_SCRATCH;
2523	cpu_reg.mips_view_base = 0x8000000;
2524
2525	fw.ver_major = bnx_TPAT_b06FwReleaseMajor;
2526	fw.ver_minor = bnx_TPAT_b06FwReleaseMinor;
2527	fw.ver_fix = bnx_TPAT_b06FwReleaseFix;
2528	fw.start_addr = bnx_TPAT_b06FwStartAddr;
2529
2530	fw.text_addr = bnx_TPAT_b06FwTextAddr;
2531	fw.text_len = bnx_TPAT_b06FwTextLen;
2532	fw.text_index = 0;
2533	fw.text = bnx_TPAT_b06FwText;
2534
2535	fw.data_addr = bnx_TPAT_b06FwDataAddr;
2536	fw.data_len = bnx_TPAT_b06FwDataLen;
2537	fw.data_index = 0;
2538	fw.data = bnx_TPAT_b06FwData;
2539
2540	fw.sbss_addr = bnx_TPAT_b06FwSbssAddr;
2541	fw.sbss_len = bnx_TPAT_b06FwSbssLen;
2542	fw.sbss_index = 0;
2543	fw.sbss = bnx_TPAT_b06FwSbss;
2544
2545	fw.bss_addr = bnx_TPAT_b06FwBssAddr;
2546	fw.bss_len = bnx_TPAT_b06FwBssLen;
2547	fw.bss_index = 0;
2548	fw.bss = bnx_TPAT_b06FwBss;
2549
2550	fw.rodata_addr = bnx_TPAT_b06FwRodataAddr;
2551	fw.rodata_len = bnx_TPAT_b06FwRodataLen;
2552	fw.rodata_index = 0;
2553	fw.rodata = bnx_TPAT_b06FwRodata;
2554
2555	DBPRINT(sc, BNX_INFO_RESET, "Loading TPAT firmware.\n");
2556	bnx_load_cpu_fw(sc, &cpu_reg, &fw);
2557
2558	/* Initialize the Completion Processor. */
2559	cpu_reg.mode = BNX_COM_CPU_MODE;
2560	cpu_reg.mode_value_halt = BNX_COM_CPU_MODE_SOFT_HALT;
2561	cpu_reg.mode_value_sstep = BNX_COM_CPU_MODE_STEP_ENA;
2562	cpu_reg.state = BNX_COM_CPU_STATE;
2563	cpu_reg.state_value_clear = 0xffffff;
2564	cpu_reg.gpr0 = BNX_COM_CPU_REG_FILE;
2565	cpu_reg.evmask = BNX_COM_CPU_EVENT_MASK;
2566	cpu_reg.pc = BNX_COM_CPU_PROGRAM_COUNTER;
2567	cpu_reg.inst = BNX_COM_CPU_INSTRUCTION;
2568	cpu_reg.bp = BNX_COM_CPU_HW_BREAKPOINT;
2569	cpu_reg.spad_base = BNX_COM_SCRATCH;
2570	cpu_reg.mips_view_base = 0x8000000;
2571
2572	fw.ver_major = bnx_COM_b06FwReleaseMajor;
2573	fw.ver_minor = bnx_COM_b06FwReleaseMinor;
2574	fw.ver_fix = bnx_COM_b06FwReleaseFix;
2575	fw.start_addr = bnx_COM_b06FwStartAddr;
2576
2577	fw.text_addr = bnx_COM_b06FwTextAddr;
2578	fw.text_len = bnx_COM_b06FwTextLen;
2579	fw.text_index = 0;
2580	fw.text = bnx_COM_b06FwText;
2581
2582	fw.data_addr = bnx_COM_b06FwDataAddr;
2583	fw.data_len = bnx_COM_b06FwDataLen;
2584	fw.data_index = 0;
2585	fw.data = bnx_COM_b06FwData;
2586
2587	fw.sbss_addr = bnx_COM_b06FwSbssAddr;
2588	fw.sbss_len = bnx_COM_b06FwSbssLen;
2589	fw.sbss_index = 0;
2590	fw.sbss = bnx_COM_b06FwSbss;
2591
2592	fw.bss_addr = bnx_COM_b06FwBssAddr;
2593	fw.bss_len = bnx_COM_b06FwBssLen;
2594	fw.bss_index = 0;
2595	fw.bss = bnx_COM_b06FwBss;
2596
2597	fw.rodata_addr = bnx_COM_b06FwRodataAddr;
2598	fw.rodata_len = bnx_COM_b06FwRodataLen;
2599	fw.rodata_index = 0;
2600	fw.rodata = bnx_COM_b06FwRodata;
2601
2602	DBPRINT(sc, BNX_INFO_RESET, "Loading COM firmware.\n");
2603	bnx_load_cpu_fw(sc, &cpu_reg, &fw);
2604}
2605
2606/****************************************************************************/
2607/* Initialize context memory.                                               */
2608/*                                                                          */
2609/* Clears the memory associated with each Context ID (CID).                 */
2610/*                                                                          */
2611/* Returns:                                                                 */
2612/*   Nothing.                                                               */
2613/****************************************************************************/
2614void
2615bnx_init_context(struct bnx_softc *sc)
2616{
2617	u_int32_t vcid;
2618
2619	vcid = 96;
2620	while (vcid) {
2621		u_int32_t vcid_addr, pcid_addr, offset;
2622
2623		vcid--;
2624
2625   		vcid_addr = GET_CID_ADDR(vcid);
2626		pcid_addr = vcid_addr;
2627
2628		REG_WR(sc, BNX_CTX_VIRT_ADDR, 0x00);
2629		REG_WR(sc, BNX_CTX_PAGE_TBL, pcid_addr);
2630
2631		/* Zero out the context. */
2632		for (offset = 0; offset < PHY_CTX_SIZE; offset += 4) {
2633			CTX_WR(sc, 0x00, offset, 0);
2634		}
2635
2636		REG_WR(sc, BNX_CTX_VIRT_ADDR, vcid_addr);
2637		REG_WR(sc, BNX_CTX_PAGE_TBL, pcid_addr);
2638	}
2639}
2640
2641/****************************************************************************/
2642/* Fetch the permanent MAC address of the controller.                       */
2643/*                                                                          */
2644/* Returns:                                                                 */
2645/*   Nothing.                                                               */
2646/****************************************************************************/
2647void
2648bnx_get_mac_addr(struct bnx_softc *sc)
2649{
2650	u_int32_t mac_lo = 0, mac_hi = 0;
2651
2652	/*
2653	 * The NetXtreme II bootcode populates various NIC
2654	 * power-on and runtime configuration items in a
2655	 * shared memory area.  The factory configured MAC
2656	 * address is available from both NVRAM and the
2657	 * shared memory area so we'll read the value from
2658	 * shared memory for speed.
2659	 */
2660
2661	mac_hi = REG_RD_IND(sc, sc->bnx_shmem_base +
2662		BNX_PORT_HW_CFG_MAC_UPPER);
2663	mac_lo = REG_RD_IND(sc, sc->bnx_shmem_base +
2664		BNX_PORT_HW_CFG_MAC_LOWER);
2665
2666	if ((mac_lo == 0) && (mac_hi == 0)) {
2667		BNX_PRINTF(sc, "%s(%d): Invalid Ethernet address!\n",
2668			__FILE__, __LINE__);
2669	} else {
2670		sc->eaddr[0] = (u_char)(mac_hi >> 8);
2671		sc->eaddr[1] = (u_char)(mac_hi >> 0);
2672		sc->eaddr[2] = (u_char)(mac_lo >> 24);
2673		sc->eaddr[3] = (u_char)(mac_lo >> 16);
2674		sc->eaddr[4] = (u_char)(mac_lo >> 8);
2675		sc->eaddr[5] = (u_char)(mac_lo >> 0);
2676	}
2677
2678	DBPRINT(sc, BNX_INFO, "Permanent Ethernet address = %6D\n", sc->eaddr, ":");
2679}
2680
2681/****************************************************************************/
2682/* Program the MAC address.                                                 */
2683/*                                                                          */
2684/* Returns:                                                                 */
2685/*   Nothing.                                                               */
2686/****************************************************************************/
2687void
2688bnx_set_mac_addr(struct bnx_softc *sc)
2689{
2690	u_int32_t val;
2691	u_int8_t *mac_addr = sc->eaddr;
2692
2693	DBPRINT(sc, BNX_INFO, "Setting Ethernet address = %6D\n", sc->eaddr, ":");
2694
2695	val = (mac_addr[0] << 8) | mac_addr[1];
2696
2697	REG_WR(sc, BNX_EMAC_MAC_MATCH0, val);
2698
2699	val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
2700		(mac_addr[4] << 8) | mac_addr[5];
2701
2702	REG_WR(sc, BNX_EMAC_MAC_MATCH1, val);
2703}
2704
2705/****************************************************************************/
2706/* Stop the controller.                                                     */
2707/*                                                                          */
2708/* Returns:                                                                 */
2709/*   Nothing.                                                               */
2710/****************************************************************************/
2711void
2712bnx_stop(struct bnx_softc *sc)
2713{
2714	struct ifnet *ifp = &sc->arpcom.ac_if;
2715	struct ifmedia_entry *ifm;
2716	struct mii_data *mii = NULL;
2717	int mtmp, itmp;
2718
2719	DBPRINT(sc, BNX_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
2720
2721	mii = &sc->bnx_mii;
2722
2723	timeout_del(&sc->bnx_timeout);
2724
2725	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
2726
2727	/* Disable the transmit/receive blocks. */
2728	REG_WR(sc, BNX_MISC_ENABLE_CLR_BITS, 0x5ffffff);
2729	REG_RD(sc, BNX_MISC_ENABLE_CLR_BITS);
2730	DELAY(20);
2731
2732	bnx_disable_intr(sc);
2733
2734	/* Tell firmware that the driver is going away. */
2735	bnx_reset(sc, BNX_DRV_MSG_CODE_SUSPEND_NO_WOL);
2736
2737	/* Free the RX lists. */
2738	bnx_free_rx_chain(sc);
2739
2740	/* Free TX buffers. */
2741	bnx_free_tx_chain(sc);
2742
2743	/*
2744	 * Isolate/power down the PHY, but leave the media selection
2745	 * unchanged so that things will be put back to normal when
2746	 * we bring the interface back up.
2747	 */
2748
2749	itmp = ifp->if_flags;
2750	ifp->if_flags |= IFF_UP;
2751	/*
2752	 * If we are called from bnx_detach(), mii is already NULL.
2753	 */
2754	if (mii != NULL) {
2755		ifm = mii->mii_media.ifm_cur;
2756		mtmp = ifm->ifm_media;
2757		ifm->ifm_media = IFM_ETHER|IFM_NONE;
2758		mii_mediachg(mii);
2759		ifm->ifm_media = mtmp;
2760	}
2761
2762	ifp->if_flags = itmp;
2763	ifp->if_timer = 0;
2764
2765	sc->bnx_link = 0;
2766
2767	DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
2768
2769}
2770
2771int
2772bnx_reset(struct bnx_softc *sc, u_int32_t reset_code)
2773{
2774	u_int32_t val;
2775	int i, rc = 0;
2776
2777	DBPRINT(sc, BNX_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
2778
2779	/* Wait for pending PCI transactions to complete. */
2780	REG_WR(sc, BNX_MISC_ENABLE_CLR_BITS,
2781	       BNX_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
2782	       BNX_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
2783	       BNX_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
2784	       BNX_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
2785	val = REG_RD(sc, BNX_MISC_ENABLE_CLR_BITS);
2786	DELAY(5);
2787
2788	/* Assume bootcode is running. */
2789	sc->bnx_fw_timed_out = 0;
2790
2791	/* Give the firmware a chance to prepare for the reset. */
2792	rc = bnx_fw_sync(sc, BNX_DRV_MSG_DATA_WAIT0 | reset_code);
2793	if (rc)
2794		goto bnx_reset_exit;
2795
2796	/* Set a firmware reminder that this is a soft reset. */
2797	REG_WR_IND(sc, sc->bnx_shmem_base + BNX_DRV_RESET_SIGNATURE,
2798		   BNX_DRV_RESET_SIGNATURE_MAGIC);
2799
2800	/* Dummy read to force the chip to complete all current transactions. */
2801	val = REG_RD(sc, BNX_MISC_ID);
2802
2803	/* Chip reset. */
2804	val = BNX_PCICFG_MISC_CONFIG_CORE_RST_REQ |
2805	      BNX_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
2806	      BNX_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
2807	REG_WR(sc, BNX_PCICFG_MISC_CONFIG, val);
2808
2809	/* Allow up to 30us for reset to complete. */
2810	for (i = 0; i < 10; i++) {
2811		val = REG_RD(sc, BNX_PCICFG_MISC_CONFIG);
2812		if ((val & (BNX_PCICFG_MISC_CONFIG_CORE_RST_REQ |
2813			    BNX_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0) {
2814			break;
2815		}
2816		DELAY(10);
2817	}
2818
2819	/* Check that reset completed successfully. */
2820	if (val & (BNX_PCICFG_MISC_CONFIG_CORE_RST_REQ |
2821		   BNX_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
2822		BNX_PRINTF(sc, "%s(%d): Reset failed!\n",
2823			__FILE__, __LINE__);
2824		rc = EBUSY;
2825		goto bnx_reset_exit;
2826	}
2827
2828	/* Make sure byte swapping is properly configured. */
2829	val = REG_RD(sc, BNX_PCI_SWAP_DIAG0);
2830	if (val != 0x01020304) {
2831		BNX_PRINTF(sc, "%s(%d): Byte swap is incorrect!\n",
2832			__FILE__, __LINE__);
2833		rc = ENODEV;
2834		goto bnx_reset_exit;
2835	}
2836
2837	/* Just completed a reset, assume that firmware is running again. */
2838	sc->bnx_fw_timed_out = 0;
2839
2840	/* Wait for the firmware to finish its initialization. */
2841	rc = bnx_fw_sync(sc, BNX_DRV_MSG_DATA_WAIT1 | reset_code);
2842	if (rc)
2843		BNX_PRINTF(sc, "%s(%d): Firmware did not complete initialization!\n",
2844			__FILE__, __LINE__);
2845
2846bnx_reset_exit:
2847	DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
2848
2849	return (rc);
2850}
2851
2852int
2853bnx_chipinit(struct bnx_softc *sc)
2854{
2855	struct pci_attach_args *pa = &(sc->bnx_pa);
2856	u_int32_t val;
2857	int rc = 0;
2858
2859	DBPRINT(sc, BNX_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
2860
2861	/* Make sure the interrupt is not active. */
2862	REG_WR(sc, BNX_PCICFG_INT_ACK_CMD, BNX_PCICFG_INT_ACK_CMD_MASK_INT);
2863
2864	/* Initialize DMA byte/word swapping, configure the number of DMA  */
2865	/* channels and PCI clock compensation delay.                      */
2866	val = BNX_DMA_CONFIG_DATA_BYTE_SWAP |
2867	      BNX_DMA_CONFIG_DATA_WORD_SWAP |
2868#if BYTE_ORDER == BIG_ENDIAN
2869	      BNX_DMA_CONFIG_CNTL_BYTE_SWAP |
2870#endif
2871	      BNX_DMA_CONFIG_CNTL_WORD_SWAP |
2872	      DMA_READ_CHANS << 12 |
2873	      DMA_WRITE_CHANS << 16;
2874
2875	val |= (0x2 << 20) | BNX_DMA_CONFIG_CNTL_PCI_COMP_DLY;
2876
2877	if ((sc->bnx_flags & BNX_PCIX_FLAG) && (sc->bus_speed_mhz == 133))
2878		val |= BNX_DMA_CONFIG_PCI_FAST_CLK_CMP;
2879
2880	/*
2881	 * This setting resolves a problem observed on certain Intel PCI
2882	 * chipsets that cannot handle multiple outstanding DMA operations.
2883	 * See errata E9_5706A1_65.
2884	 */
2885	if ((BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5706) &&
2886	    (BNX_CHIP_ID(sc) != BNX_CHIP_ID_5706_A0) &&
2887	    !(sc->bnx_flags & BNX_PCIX_FLAG))
2888		val |= BNX_DMA_CONFIG_CNTL_PING_PONG_DMA;
2889
2890	REG_WR(sc, BNX_DMA_CONFIG, val);
2891
2892	/* Clear the PCI-X relaxed ordering bit. See errata E3_5708CA0_570. */
2893	if (sc->bnx_flags & BNX_PCIX_FLAG) {
2894		u_int16_t val;
2895
2896		val = pci_conf_read(pa->pa_pc, pa->pa_tag, BNX_PCI_PCIX_CMD);
2897		pci_conf_write(pa->pa_pc, pa->pa_tag, BNX_PCI_PCIX_CMD, val & ~0x2);
2898	}
2899
2900	/* Enable the RX_V2P and Context state machines before access. */
2901	REG_WR(sc, BNX_MISC_ENABLE_SET_BITS,
2902	       BNX_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
2903	       BNX_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
2904	       BNX_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
2905
2906	/* Initialize context mapping and zero out the quick contexts. */
2907	bnx_init_context(sc);
2908
2909	/* Initialize the on-boards CPUs */
2910	bnx_init_cpus(sc);
2911
2912	/* Prepare NVRAM for access. */
2913	if (bnx_init_nvram(sc)) {
2914		rc = ENODEV;
2915		goto bnx_chipinit_exit;
2916	}
2917
2918	/* Set the kernel bypass block size */
2919	val = REG_RD(sc, BNX_MQ_CONFIG);
2920	val &= ~BNX_MQ_CONFIG_KNL_BYP_BLK_SIZE;
2921	val |= BNX_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
2922	REG_WR(sc, BNX_MQ_CONFIG, val);
2923
2924	val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
2925	REG_WR(sc, BNX_MQ_KNL_BYP_WIND_START, val);
2926	REG_WR(sc, BNX_MQ_KNL_WIND_END, val);
2927
2928	val = (BCM_PAGE_BITS - 8) << 24;
2929	REG_WR(sc, BNX_RV2P_CONFIG, val);
2930
2931	/* Configure page size. */
2932	val = REG_RD(sc, BNX_TBDR_CONFIG);
2933	val &= ~BNX_TBDR_CONFIG_PAGE_SIZE;
2934	val |= (BCM_PAGE_BITS - 8) << 24 | 0x40;
2935	REG_WR(sc, BNX_TBDR_CONFIG, val);
2936
2937bnx_chipinit_exit:
2938	DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
2939
2940	return(rc);
2941}
2942
2943/****************************************************************************/
2944/* Initialize the controller in preparation to send/receive traffic.        */
2945/*                                                                          */
2946/* Returns:                                                                 */
2947/*   0 for success, positive value for failure.                             */
2948/****************************************************************************/
2949int
2950bnx_blockinit(struct bnx_softc *sc)
2951{
2952	u_int32_t reg, val;
2953	int rc = 0;
2954
2955	DBPRINT(sc, BNX_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
2956
2957	/* Load the hardware default MAC address. */
2958	bnx_set_mac_addr(sc);
2959
2960	/* Set the Ethernet backoff seed value */
2961	val = sc->eaddr[0]         + (sc->eaddr[1] << 8) +
2962	      (sc->eaddr[2] << 16) + (sc->eaddr[3]     ) +
2963	      (sc->eaddr[4] << 8)  + (sc->eaddr[5] << 16);
2964	REG_WR(sc, BNX_EMAC_BACKOFF_SEED, val);
2965
2966	sc->last_status_idx = 0;
2967	sc->rx_mode = BNX_EMAC_RX_MODE_SORT_MODE;
2968
2969	/* Set up link change interrupt generation. */
2970	REG_WR(sc, BNX_EMAC_ATTENTION_ENA, BNX_EMAC_ATTENTION_ENA_LINK);
2971
2972	/* Program the physical address of the status block. */
2973	REG_WR(sc, BNX_HC_STATUS_ADDR_L,
2974		BNX_ADDR_LO(sc->status_block_paddr));
2975	REG_WR(sc, BNX_HC_STATUS_ADDR_H,
2976		BNX_ADDR_HI(sc->status_block_paddr));
2977
2978	/* Program the physical address of the statistics block. */
2979	REG_WR(sc, BNX_HC_STATISTICS_ADDR_L,
2980		BNX_ADDR_LO(sc->stats_block_paddr));
2981	REG_WR(sc, BNX_HC_STATISTICS_ADDR_H,
2982		BNX_ADDR_HI(sc->stats_block_paddr));
2983
2984	/* Program various host coalescing parameters. */
2985	REG_WR(sc, BNX_HC_TX_QUICK_CONS_TRIP,
2986		(sc->bnx_tx_quick_cons_trip_int << 16) | sc->bnx_tx_quick_cons_trip);
2987	REG_WR(sc, BNX_HC_RX_QUICK_CONS_TRIP,
2988		(sc->bnx_rx_quick_cons_trip_int << 16) | sc->bnx_rx_quick_cons_trip);
2989	REG_WR(sc, BNX_HC_COMP_PROD_TRIP,
2990		(sc->bnx_comp_prod_trip_int << 16) | sc->bnx_comp_prod_trip);
2991	REG_WR(sc, BNX_HC_TX_TICKS,
2992		(sc->bnx_tx_ticks_int << 16) | sc->bnx_tx_ticks);
2993	REG_WR(sc, BNX_HC_RX_TICKS,
2994		(sc->bnx_rx_ticks_int << 16) | sc->bnx_rx_ticks);
2995	REG_WR(sc, BNX_HC_COM_TICKS,
2996		(sc->bnx_com_ticks_int << 16) | sc->bnx_com_ticks);
2997	REG_WR(sc, BNX_HC_CMD_TICKS,
2998		(sc->bnx_cmd_ticks_int << 16) | sc->bnx_cmd_ticks);
2999	REG_WR(sc, BNX_HC_STATS_TICKS,
3000		(sc->bnx_stats_ticks & 0xffff00));
3001	REG_WR(sc, BNX_HC_STAT_COLLECT_TICKS,
3002		0xbb8);  /* 3ms */
3003	REG_WR(sc, BNX_HC_CONFIG,
3004		(BNX_HC_CONFIG_RX_TMR_MODE | BNX_HC_CONFIG_TX_TMR_MODE |
3005		BNX_HC_CONFIG_COLLECT_STATS));
3006
3007	/* Clear the internal statistics counters. */
3008	REG_WR(sc, BNX_HC_COMMAND, BNX_HC_COMMAND_CLR_STAT_NOW);
3009
3010	/* Verify that bootcode is running. */
3011	reg = REG_RD_IND(sc, sc->bnx_shmem_base + BNX_DEV_INFO_SIGNATURE);
3012
3013	DBRUNIF(DB_RANDOMTRUE(bnx_debug_bootcode_running_failure),
3014		BNX_PRINTF(sc, "%s(%d): Simulating bootcode failure.\n",
3015			__FILE__, __LINE__);
3016		reg = 0);
3017
3018	if ((reg & BNX_DEV_INFO_SIGNATURE_MAGIC_MASK) !=
3019	    BNX_DEV_INFO_SIGNATURE_MAGIC) {
3020		BNX_PRINTF(sc, "%s(%d): Bootcode not running! Found: 0x%08X, "
3021			"Expected: 08%08X\n", __FILE__, __LINE__,
3022			(reg & BNX_DEV_INFO_SIGNATURE_MAGIC_MASK),
3023			BNX_DEV_INFO_SIGNATURE_MAGIC);
3024		rc = ENODEV;
3025		goto bnx_blockinit_exit;
3026	}
3027
3028	/* Check if any management firmware is running. */
3029	reg = REG_RD_IND(sc, sc->bnx_shmem_base + BNX_PORT_FEATURE);
3030	if (reg & (BNX_PORT_FEATURE_ASF_ENABLED | BNX_PORT_FEATURE_IMD_ENABLED)) {
3031		DBPRINT(sc, BNX_INFO, "Management F/W Enabled.\n");
3032		sc->bnx_flags |= BNX_MFW_ENABLE_FLAG;
3033	}
3034
3035	sc->bnx_fw_ver = REG_RD_IND(sc, sc->bnx_shmem_base + BNX_DEV_INFO_BC_REV);
3036	DBPRINT(sc, BNX_INFO, "bootcode rev = 0x%08X\n", sc->bnx_fw_ver);
3037
3038	/* Allow bootcode to apply any additional fixes before enabling MAC. */
3039	rc = bnx_fw_sync(sc, BNX_DRV_MSG_DATA_WAIT2 | BNX_DRV_MSG_CODE_RESET);
3040
3041	/* Enable link state change interrupt generation. */
3042	REG_WR(sc, BNX_HC_ATTN_BITS_ENABLE, STATUS_ATTN_BITS_LINK_STATE);
3043
3044	/* Enable all remaining blocks in the MAC. */
3045	REG_WR(sc, BNX_MISC_ENABLE_SET_BITS, 0x5ffffff);
3046	REG_RD(sc, BNX_MISC_ENABLE_SET_BITS);
3047	DELAY(20);
3048
3049bnx_blockinit_exit:
3050	DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
3051
3052	return (rc);
3053}
3054
3055/****************************************************************************/
3056/* Encapsulate an mbuf cluster into the rx_bd chain.                        */
3057/*                                                                          */
3058/* The NetXtreme II can support Jumbo frames by using multiple rx_bd's.     */
3059/* This routine will map an mbuf cluster into 1 or more rx_bd's as          */
3060/* necessary.                                                               */
3061/*                                                                          */
3062/* Returns:                                                                 */
3063/*   0 for success, positive value for failure.                             */
3064/****************************************************************************/
3065int
3066bnx_get_buf(struct bnx_softc *sc, struct mbuf *m, u_int16_t *prod, u_int16_t *chain_prod,
3067	u_int32_t *prod_bseq)
3068{
3069	bus_dmamap_t		map;
3070	struct mbuf *m_new = NULL;
3071	struct rx_bd		*rxbd;
3072	int i, rc = 0;
3073#ifdef BNX_DEBUG
3074	u_int16_t debug_chain_prod = *chain_prod;
3075#endif
3076
3077	DBPRINT(sc, (BNX_VERBOSE_RESET | BNX_VERBOSE_RECV), "Entering %s()\n",
3078		__FUNCTION__);
3079
3080	/* Make sure the inputs are valid. */
3081	DBRUNIF((*chain_prod > MAX_RX_BD),
3082		printf("%s: RX producer out of range: 0x%04X > 0x%04X\n",
3083		*chain_prod, (u_int16_t) MAX_RX_BD));
3084
3085	DBPRINT(sc, BNX_VERBOSE_RECV, "%s(enter): prod = 0x%04X, chain_prod = 0x%04X, "
3086		"prod_bseq = 0x%08X\n", __FUNCTION__, *prod, *chain_prod, *prod_bseq);
3087
3088	if (m == NULL) {
3089
3090		DBRUNIF(DB_RANDOMTRUE(bnx_debug_mbuf_allocation_failure),
3091			BNX_PRINTF(sc, "%s(%d): Simulating mbuf allocation failure.\n",
3092				__FILE__, __LINE__);
3093			sc->mbuf_alloc_failed++;
3094			rc = ENOBUFS;
3095			goto bnx_get_buf_exit);
3096
3097		/* This is a new mbuf allocation. */
3098		MGETHDR(m_new, M_DONTWAIT, MT_DATA);
3099		if (m_new == NULL) {
3100
3101			DBPRINT(sc, BNX_WARN, "%s(%d): RX mbuf header allocation failed!\n",
3102				__FILE__, __LINE__);
3103
3104			DBRUNIF(1, sc->mbuf_alloc_failed++);
3105
3106			rc = ENOBUFS;
3107			goto bnx_get_buf_exit;
3108		}
3109
3110		DBRUNIF(1, sc->rx_mbuf_alloc++);
3111		if (sc->mbuf_alloc_size <= MCLBYTES)
3112			MCLGET(m_new, M_DONTWAIT);
3113		else
3114			MEXTMALLOC(m_new, sc->mbuf_alloc_size, M_DONTWAIT);
3115		if (!(m_new->m_flags & M_EXT)) {
3116
3117			DBPRINT(sc, BNX_WARN, "%s(%d): RX mbuf chain allocation failed!\n",
3118				__FILE__, __LINE__);
3119
3120			m_freem(m_new);
3121
3122			DBRUNIF(1, sc->rx_mbuf_alloc--);
3123			DBRUNIF(1, sc->mbuf_alloc_failed++);
3124
3125			rc = ENOBUFS;
3126			goto bnx_get_buf_exit;
3127		}
3128
3129		m_new->m_len = m_new->m_pkthdr.len = sc->mbuf_alloc_size;
3130	} else {
3131		m_new = m;
3132		m_new->m_len = m_new->m_pkthdr.len = sc->mbuf_alloc_size;
3133		m_new->m_data = m_new->m_ext.ext_buf;
3134	}
3135
3136	/* Map the mbuf cluster into device memory. */
3137	map = sc->rx_mbuf_map[*chain_prod];
3138	if (bus_dmamap_load_mbuf(sc->bnx_dmatag, map, m_new, BUS_DMA_NOWAIT)) {
3139		BNX_PRINTF(sc, "%s(%d): Error mapping mbuf into RX chain!\n",
3140			__FILE__, __LINE__);
3141
3142		m_freem(m_new);
3143
3144		DBRUNIF(1, sc->rx_mbuf_alloc--);
3145
3146		rc = ENOBUFS;
3147		goto bnx_get_buf_exit;
3148	}
3149
3150	/* Watch for overflow. */
3151	DBRUNIF((sc->free_rx_bd > USABLE_RX_BD),
3152		printf("%s: Too many free rx_bd (0x%04X > 0x%04X)!\n",
3153			sc->free_rx_bd, (u_int16_t) USABLE_RX_BD));
3154
3155	DBRUNIF((sc->free_rx_bd < sc->rx_low_watermark),
3156		sc->rx_low_watermark = sc->free_rx_bd);
3157
3158	/* Setup the rx_bd for the first segment. */
3159	rxbd = &sc->rx_bd_chain[RX_PAGE(*chain_prod)][RX_IDX(*chain_prod)];
3160
3161	rxbd->rx_bd_haddr_lo  = htole32(BNX_ADDR_LO(map->dm_segs[0].ds_addr));
3162	rxbd->rx_bd_haddr_hi  = htole32(BNX_ADDR_HI(map->dm_segs[0].ds_addr));
3163	rxbd->rx_bd_len       = htole32(map->dm_segs[0].ds_len);
3164	rxbd->rx_bd_flags     = htole32(RX_BD_FLAGS_START);
3165	*prod_bseq += map->dm_segs[0].ds_len;
3166
3167	for (i = 1; i < map->dm_nsegs; i++) {
3168
3169		*prod = NEXT_RX_BD(*prod);
3170		*chain_prod = RX_CHAIN_IDX(*prod);
3171
3172		rxbd = &sc->rx_bd_chain[RX_PAGE(*chain_prod)][RX_IDX(*chain_prod)];
3173
3174		rxbd->rx_bd_haddr_lo  = htole32(BNX_ADDR_LO(map->dm_segs[i].ds_addr));
3175		rxbd->rx_bd_haddr_hi  = htole32(BNX_ADDR_HI(map->dm_segs[i].ds_addr));
3176		rxbd->rx_bd_len       = htole32(map->dm_segs[i].ds_len);
3177		rxbd->rx_bd_flags     = 0;
3178		*prod_bseq += map->dm_segs[i].ds_len;
3179	}
3180
3181	rxbd->rx_bd_flags |= htole32(RX_BD_FLAGS_END);
3182
3183	/* Save the mbuf and update our counter. */
3184	sc->rx_mbuf_ptr[*chain_prod] = m_new;
3185	sc->free_rx_bd -= map->dm_nsegs;
3186
3187	DBRUN(BNX_VERBOSE_RECV, bnx_dump_rx_mbuf_chain(sc, debug_chain_prod,
3188		map->dm_nsegs));
3189
3190	DBPRINT(sc, BNX_VERBOSE_RECV, "%s(exit): prod = 0x%04X, chain_prod = 0x%04X, "
3191		"prod_bseq = 0x%08X\n", __FUNCTION__, *prod, *chain_prod, *prod_bseq);
3192
3193bnx_get_buf_exit:
3194	DBPRINT(sc, (BNX_VERBOSE_RESET | BNX_VERBOSE_RECV), "Exiting %s()\n",
3195		__FUNCTION__);
3196
3197	return(rc);
3198}
3199
3200/****************************************************************************/
3201/* Allocate memory and initialize the TX data structures.                   */
3202/*                                                                          */
3203/* Returns:                                                                 */
3204/*   0 for success, positive value for failure.                             */
3205/****************************************************************************/
3206int
3207bnx_init_tx_chain(struct bnx_softc *sc)
3208{
3209	struct tx_bd *txbd;
3210	u_int32_t val;
3211	int i, rc = 0;
3212
3213	DBPRINT(sc, BNX_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
3214
3215	/* Set the initial TX producer/consumer indices. */
3216	sc->tx_prod        = 0;
3217	sc->tx_cons        = 0;
3218	sc->tx_prod_bseq   = 0;
3219	sc->used_tx_bd = 0;
3220	DBRUNIF(1, sc->tx_hi_watermark = USABLE_TX_BD);
3221
3222	/*
3223	 * The NetXtreme II supports a linked-list structure called
3224	 * a Buffer Descriptor Chain (or BD chain).  A BD chain
3225	 * consists of a series of 1 or more chain pages, each of which
3226	 * consists of a fixed number of BD entries.
3227	 * The last BD entry on each page is a pointer to the next page
3228	 * in the chain, and the last pointer in the BD chain
3229	 * points back to the beginning of the chain.
3230	 */
3231
3232	/* Set the TX next pointer chain entries. */
3233	for (i = 0; i < TX_PAGES; i++) {
3234		int j;
3235
3236		txbd = &sc->tx_bd_chain[i][USABLE_TX_BD_PER_PAGE];
3237
3238		/* Check if we've reached the last page. */
3239		if (i == (TX_PAGES - 1))
3240			j = 0;
3241		else
3242			j = i + 1;
3243
3244		txbd->tx_bd_haddr_hi = htole32(BNX_ADDR_HI(sc->tx_bd_chain_paddr[j]));
3245		txbd->tx_bd_haddr_lo = htole32(BNX_ADDR_LO(sc->tx_bd_chain_paddr[j]));
3246	}
3247
3248	/*
3249	 * Initialize the context ID for an L2 TX chain.
3250	 */
3251	val = BNX_L2CTX_TYPE_TYPE_L2;
3252	val |= BNX_L2CTX_TYPE_SIZE_L2;
3253	CTX_WR(sc, GET_CID_ADDR(TX_CID), BNX_L2CTX_TYPE, val);
3254
3255	val = BNX_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
3256	CTX_WR(sc, GET_CID_ADDR(TX_CID), BNX_L2CTX_CMD_TYPE, val);
3257
3258	/* Point the hardware to the first page in the chain. */
3259	val = BNX_ADDR_HI(sc->tx_bd_chain_paddr[0]);
3260	CTX_WR(sc, GET_CID_ADDR(TX_CID), BNX_L2CTX_TBDR_BHADDR_HI, val);
3261	val = BNX_ADDR_LO(sc->tx_bd_chain_paddr[0]);
3262	CTX_WR(sc, GET_CID_ADDR(TX_CID), BNX_L2CTX_TBDR_BHADDR_LO, val);
3263
3264	DBRUN(BNX_VERBOSE_SEND, bnx_dump_tx_chain(sc, 0, TOTAL_TX_BD));
3265
3266	DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
3267
3268	return(rc);
3269}
3270
3271/****************************************************************************/
3272/* Free memory and clear the TX data structures.                            */
3273/*                                                                          */
3274/* Returns:                                                                 */
3275/*   Nothing.                                                               */
3276/****************************************************************************/
3277void
3278bnx_free_tx_chain(struct bnx_softc *sc)
3279{
3280	int i;
3281
3282	DBPRINT(sc, BNX_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
3283
3284	/* Unmap, unload, and free any mbufs still in the TX mbuf chain. */
3285	for (i = 0; i < TOTAL_TX_BD; i++) {
3286		if (sc->tx_mbuf_ptr[i] != NULL) {
3287			if (sc->tx_mbuf_map != NULL)
3288				bus_dmamap_sync(sc->bnx_dmatag,
3289				    sc->tx_mbuf_map[i], 0,
3290				    sc->tx_mbuf_map[i]->dm_mapsize,
3291				    BUS_DMASYNC_POSTWRITE);
3292			m_freem(sc->tx_mbuf_ptr[i]);
3293			sc->tx_mbuf_ptr[i] = NULL;
3294			DBRUNIF(1, sc->tx_mbuf_alloc--);
3295		}
3296	}
3297
3298	/* Clear each TX chain page. */
3299	for (i = 0; i < TX_PAGES; i++)
3300		bzero((char *)sc->tx_bd_chain[i], BNX_TX_CHAIN_PAGE_SZ);
3301
3302	/* Check if we lost any mbufs in the process. */
3303	DBRUNIF((sc->tx_mbuf_alloc),
3304		printf("%s: Memory leak! Lost %d mbufs "
3305			"from tx chain!\n",
3306			sc->tx_mbuf_alloc));
3307
3308	DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
3309}
3310
3311/****************************************************************************/
3312/* Allocate memory and initialize the RX data structures.                   */
3313/*                                                                          */
3314/* Returns:                                                                 */
3315/*   0 for success, positive value for failure.                             */
3316/****************************************************************************/
3317int
3318bnx_init_rx_chain(struct bnx_softc *sc)
3319{
3320	struct rx_bd *rxbd;
3321	int i, rc = 0;
3322	u_int16_t prod, chain_prod;
3323	u_int32_t prod_bseq, val;
3324
3325	DBPRINT(sc, BNX_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
3326
3327	/* Initialize the RX producer and consumer indices. */
3328	sc->rx_prod        = 0;
3329	sc->rx_cons        = 0;
3330	sc->rx_prod_bseq   = 0;
3331	sc->free_rx_bd     = BNX_RX_SLACK_SPACE;
3332	DBRUNIF(1, sc->rx_low_watermark = USABLE_RX_BD);
3333
3334	/* Initialize the RX next pointer chain entries. */
3335	for (i = 0; i < RX_PAGES; i++) {
3336		int j;
3337
3338		rxbd = &sc->rx_bd_chain[i][USABLE_RX_BD_PER_PAGE];
3339
3340		/* Check if we've reached the last page. */
3341		if (i == (RX_PAGES - 1))
3342			j = 0;
3343		else
3344			j = i + 1;
3345
3346		/* Setup the chain page pointers. */
3347		rxbd->rx_bd_haddr_hi = htole32(BNX_ADDR_HI(sc->rx_bd_chain_paddr[j]));
3348		rxbd->rx_bd_haddr_lo = htole32(BNX_ADDR_LO(sc->rx_bd_chain_paddr[j]));
3349	}
3350
3351	/* Initialize the context ID for an L2 RX chain. */
3352	val = BNX_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE;
3353	val |= BNX_L2CTX_CTX_TYPE_SIZE_L2;
3354	val |= 0x02 << 8;
3355	CTX_WR(sc, GET_CID_ADDR(RX_CID), BNX_L2CTX_CTX_TYPE, val);
3356
3357	/* Point the hardware to the first page in the chain. */
3358	val = BNX_ADDR_HI(sc->rx_bd_chain_paddr[0]);
3359	CTX_WR(sc, GET_CID_ADDR(RX_CID), BNX_L2CTX_NX_BDHADDR_HI, val);
3360	val = BNX_ADDR_LO(sc->rx_bd_chain_paddr[0]);
3361	CTX_WR(sc, GET_CID_ADDR(RX_CID), BNX_L2CTX_NX_BDHADDR_LO, val);
3362
3363	/* Allocate mbuf clusters for the rx_bd chain. */
3364	prod = prod_bseq = 0;
3365	while (prod < BNX_RX_SLACK_SPACE) {
3366		chain_prod = RX_CHAIN_IDX(prod);
3367		if (bnx_get_buf(sc, NULL, &prod, &chain_prod, &prod_bseq)) {
3368			printf("%s: Error filling RX chain: rx_bd[0x%04X]!\n",
3369				chain_prod);
3370			rc = ENOBUFS;
3371			break;
3372		}
3373		prod = NEXT_RX_BD(prod);
3374	}
3375
3376	/* Save the RX chain producer index. */
3377	sc->rx_prod      = prod;
3378	sc->rx_prod_bseq = prod_bseq;
3379
3380	for (i = 0; i < RX_PAGES; i++) {
3381		bus_dmamap_sync(sc->bnx_dmatag, sc->rx_bd_chain_map[i], 0,
3382		    sc->rx_bd_chain_map[i]->dm_mapsize,
3383		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3384	}
3385
3386	/* Tell the chip about the waiting rx_bd's. */
3387	REG_WR16(sc, MB_RX_CID_ADDR + BNX_L2CTX_HOST_BDIDX, sc->rx_prod);
3388	REG_WR(sc, MB_RX_CID_ADDR + BNX_L2CTX_HOST_BSEQ, sc->rx_prod_bseq);
3389
3390	DBRUN(BNX_VERBOSE_RECV, bnx_dump_rx_chain(sc, 0, TOTAL_RX_BD));
3391
3392	DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
3393
3394	return(rc);
3395}
3396
3397/****************************************************************************/
3398/* Free memory and clear the RX data structures.                            */
3399/*                                                                          */
3400/* Returns:                                                                 */
3401/*   Nothing.                                                               */
3402/****************************************************************************/
3403void
3404bnx_free_rx_chain(struct bnx_softc *sc)
3405{
3406	int i;
3407
3408	DBPRINT(sc, BNX_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
3409
3410	/* Free any mbufs still in the RX mbuf chain. */
3411	for (i = 0; i < TOTAL_RX_BD; i++) {
3412		if (sc->rx_mbuf_ptr[i] != NULL) {
3413			if (sc->rx_mbuf_map[i] != NULL)
3414				bus_dmamap_sync(sc->bnx_dmatag,
3415				    sc->rx_mbuf_map[i],	0,
3416				    sc->rx_mbuf_map[i]->dm_mapsize,
3417				    BUS_DMASYNC_POSTREAD);
3418			m_freem(sc->rx_mbuf_ptr[i]);
3419			sc->rx_mbuf_ptr[i] = NULL;
3420			DBRUNIF(1, sc->rx_mbuf_alloc--);
3421		}
3422	}
3423
3424	/* Clear each RX chain page. */
3425	for (i = 0; i < RX_PAGES; i++)
3426		bzero((char *)sc->rx_bd_chain[i], BNX_RX_CHAIN_PAGE_SZ);
3427
3428	/* Check if we lost any mbufs in the process. */
3429	DBRUNIF((sc->rx_mbuf_alloc),
3430		printf("%s: Memory leak! Lost %d mbufs from rx chain!\n",
3431			sc->rx_mbuf_alloc));
3432
3433	DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
3434}
3435
3436/****************************************************************************/
3437/* Set media options.                                                       */
3438/*                                                                          */
3439/* Returns:                                                                 */
3440/*   0 for success, positive value for failure.                             */
3441/****************************************************************************/
3442int
3443bnx_ifmedia_upd(struct ifnet *ifp)
3444{
3445	struct bnx_softc *sc;
3446	struct mii_data *mii;
3447	struct ifmedia *ifm;
3448	int rc = 0;
3449
3450	sc = ifp->if_softc;
3451	ifm = &sc->bnx_ifmedia;
3452
3453	/* DRC - ToDo: Add SerDes support. */
3454
3455	mii = &sc->bnx_mii;
3456	sc->bnx_link = 0;
3457	if (mii->mii_instance) {
3458		struct mii_softc *miisc;
3459		for (miisc = LIST_FIRST(&mii->mii_phys); miisc != NULL;
3460		    miisc = LIST_NEXT(miisc, mii_list))
3461			mii_phy_reset(miisc);
3462	}
3463	mii_mediachg(mii);
3464
3465	return(rc);
3466}
3467
3468/****************************************************************************/
3469/* Reports current media status.                                            */
3470/*                                                                          */
3471/* Returns:                                                                 */
3472/*   Nothing.                                                               */
3473/****************************************************************************/
3474void
3475bnx_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
3476{
3477	struct bnx_softc *sc;
3478	struct mii_data *mii;
3479	int s;
3480
3481	sc = ifp->if_softc;
3482
3483	s = splnet();
3484
3485	mii = &sc->bnx_mii;
3486
3487	/* DRC - ToDo: Add SerDes support. */
3488
3489	mii_pollstat(mii);
3490	ifmr->ifm_active = mii->mii_media_active;
3491	ifmr->ifm_status = mii->mii_media_status;
3492
3493	splx(s);
3494}
3495
3496/****************************************************************************/
3497/* Handles PHY generated interrupt events.                                  */
3498/*                                                                          */
3499/* Returns:                                                                 */
3500/*   Nothing.                                                               */
3501/****************************************************************************/
3502void
3503bnx_phy_intr(struct bnx_softc *sc)
3504{
3505	u_int32_t new_link_state, old_link_state;
3506
3507	new_link_state = sc->status_block->status_attn_bits &
3508		STATUS_ATTN_BITS_LINK_STATE;
3509	old_link_state = sc->status_block->status_attn_bits_ack &
3510		STATUS_ATTN_BITS_LINK_STATE;
3511
3512	/* Handle any changes if the link state has changed. */
3513	if (new_link_state != old_link_state) {
3514
3515		DBRUN(BNX_VERBOSE_INTR, bnx_dump_status_block(sc));
3516
3517		sc->bnx_link = 0;
3518		timeout_del(&sc->bnx_timeout);
3519		bnx_tick(sc);
3520
3521		/* Update the status_attn_bits_ack field in the status block. */
3522		if (new_link_state) {
3523			REG_WR(sc, BNX_PCICFG_STATUS_BIT_SET_CMD,
3524				STATUS_ATTN_BITS_LINK_STATE);
3525			DBPRINT(sc, BNX_INFO, "Link is now UP.\n");
3526		} else {
3527			REG_WR(sc, BNX_PCICFG_STATUS_BIT_CLEAR_CMD,
3528				STATUS_ATTN_BITS_LINK_STATE);
3529			DBPRINT(sc, BNX_INFO, "Link is now DOWN.\n");
3530		}
3531
3532	}
3533
3534	/* Acknowledge the link change interrupt. */
3535	REG_WR(sc, BNX_EMAC_STATUS, BNX_EMAC_STATUS_LINK_CHANGE);
3536}
3537
3538/****************************************************************************/
3539/* Handles received frame interrupt events.                                 */
3540/*                                                                          */
3541/* Returns:                                                                 */
3542/*   Nothing.                                                               */
3543/****************************************************************************/
3544void
3545bnx_rx_intr(struct bnx_softc *sc)
3546{
3547	struct status_block *sblk = sc->status_block;
3548	struct ifnet *ifp = &sc->arpcom.ac_if;
3549	u_int16_t hw_cons, sw_cons, sw_chain_cons, sw_prod, sw_chain_prod;
3550	u_int32_t sw_prod_bseq;
3551	struct l2_fhdr *l2fhdr;
3552	int i;
3553
3554	DBRUNIF(1, sc->rx_interrupts++);
3555
3556	/* Prepare the RX chain pages to be accessed by the host CPU. */
3557	for (i = 0; i < RX_PAGES; i++)
3558		bus_dmamap_sync(sc->bnx_dmatag,
3559		    sc->rx_bd_chain_map[i], 0,
3560		    sc->rx_bd_chain_map[i]->dm_mapsize,
3561		    BUS_DMASYNC_POSTWRITE);
3562
3563	/* Get the hardware's view of the RX consumer index. */
3564	hw_cons = sc->hw_rx_cons = sblk->status_rx_quick_consumer_index0;
3565	if ((hw_cons & USABLE_RX_BD_PER_PAGE) == USABLE_RX_BD_PER_PAGE)
3566		hw_cons++;
3567
3568	/* Get working copies of the driver's view of the RX indices. */
3569	sw_cons = sc->rx_cons;
3570	sw_prod = sc->rx_prod;
3571	sw_prod_bseq = sc->rx_prod_bseq;
3572
3573	DBPRINT(sc, BNX_INFO_RECV, "%s(enter): sw_prod = 0x%04X, "
3574		"sw_cons = 0x%04X, sw_prod_bseq = 0x%08X\n",
3575		__FUNCTION__, sw_prod, sw_cons,
3576		sw_prod_bseq);
3577
3578	/* Prevent speculative reads from getting ahead of the status block. */
3579	bus_space_barrier(sc->bnx_btag, sc->bnx_bhandle, 0, 0,
3580		BUS_SPACE_BARRIER_READ);
3581
3582	DBRUNIF((sc->free_rx_bd < sc->rx_low_watermark),
3583		sc->rx_low_watermark = sc->free_rx_bd);
3584
3585	/*
3586	 * Scan through the receive chain as long
3587	 * as there is work to do.
3588	 */
3589	while (sw_cons != hw_cons) {
3590		struct mbuf *m;
3591		struct rx_bd *rxbd;
3592		unsigned int len;
3593		u_int32_t status;
3594
3595		/* Convert the producer/consumer indices to an actual rx_bd index. */
3596		sw_chain_cons = RX_CHAIN_IDX(sw_cons);
3597		sw_chain_prod = RX_CHAIN_IDX(sw_prod);
3598
3599		/* Get the used rx_bd. */
3600		rxbd = &sc->rx_bd_chain[RX_PAGE(sw_chain_cons)][RX_IDX(sw_chain_cons)];
3601		sc->free_rx_bd++;
3602
3603		DBRUN(BNX_VERBOSE_RECV,
3604			printf("%s(): ", __FUNCTION__);
3605			bnx_dump_rxbd(sc, sw_chain_cons, rxbd));
3606
3607		/* The mbuf is stored with the last rx_bd entry of a packet. */
3608		if (sc->rx_mbuf_ptr[sw_chain_cons] != NULL) {
3609
3610			/* Validate that this is the last rx_bd. */
3611			DBRUNIF((!(rxbd->rx_bd_flags & RX_BD_FLAGS_END)),
3612				printf("%s: Unexpected mbuf found in rx_bd[0x%04X]!\n",
3613				sw_chain_cons);
3614				bnx_breakpoint(sc));
3615
3616			/* DRC - ToDo: If the received packet is small, say less */
3617			/*             than 128 bytes, allocate a new mbuf here, */
3618			/*             copy the data to that mbuf, and recycle   */
3619			/*             the mapped jumbo frame.                   */
3620
3621			/* Unmap the mbuf from DMA space. */
3622			bus_dmamap_sync(sc->bnx_dmatag,
3623			    sc->rx_mbuf_map[sw_chain_cons], 0,
3624			    sc->rx_mbuf_map[sw_chain_cons]->dm_mapsize,
3625			    BUS_DMASYNC_POSTREAD);
3626			bus_dmamap_unload(sc->bnx_dmatag,
3627			    sc->rx_mbuf_map[sw_chain_cons]);
3628
3629			/* Remove the mbuf from the driver's chain. */
3630			m = sc->rx_mbuf_ptr[sw_chain_cons];
3631			sc->rx_mbuf_ptr[sw_chain_cons] = NULL;
3632
3633			/*
3634			 * Frames received on the NetXteme II are prepended
3635			 * with the l2_fhdr structure which provides status
3636			 * information about the received frame (including
3637			 * VLAN tags and checksum info) and are also
3638			 * automatically adjusted to align the IP header
3639			 * (i.e. two null bytes are inserted before the
3640			 * Ethernet header).
3641			 */
3642			l2fhdr = mtod(m, struct l2_fhdr *);
3643
3644			len    = l2fhdr->l2_fhdr_pkt_len;
3645			status = l2fhdr->l2_fhdr_status;
3646
3647			DBRUNIF(DB_RANDOMTRUE(bnx_debug_l2fhdr_status_check),
3648				printf("Simulating l2_fhdr status error.\n");
3649				status = status | L2_FHDR_ERRORS_PHY_DECODE);
3650
3651			/* Watch for unusual sized frames. */
3652			DBRUNIF(((len < BNX_MIN_MTU) || (len > BNX_MAX_JUMBO_ETHER_MTU_VLAN)),
3653				printf("%s: Unusual frame size found. "
3654					"Min(%d), Actual(%d), Max(%d)\n",
3655					(int) BNX_MIN_MTU,
3656					len, (int) BNX_MAX_JUMBO_ETHER_MTU_VLAN);
3657				bnx_dump_mbuf(sc, m);
3658		 		bnx_breakpoint(sc));
3659
3660			len -= ETHER_CRC_LEN;
3661
3662			/* Check the received frame for errors. */
3663			if (status &  (L2_FHDR_ERRORS_BAD_CRC |
3664				L2_FHDR_ERRORS_PHY_DECODE | L2_FHDR_ERRORS_ALIGNMENT |
3665				L2_FHDR_ERRORS_TOO_SHORT  | L2_FHDR_ERRORS_GIANT_FRAME)) {
3666
3667				ifp->if_ierrors++;
3668				DBRUNIF(1, sc->l2fhdr_status_errors++);
3669
3670				/* Reuse the mbuf for a new frame. */
3671				if (bnx_get_buf(sc, m, &sw_prod, &sw_chain_prod, &sw_prod_bseq)) {
3672
3673					DBRUNIF(1, bnx_breakpoint(sc));
3674					panic("%s: Can't reuse RX mbuf!\n", sc->bnx_dev.dv_xname);
3675
3676				}
3677				goto bnx_rx_int_next_rx;
3678			}
3679
3680			/*
3681			 * Get a new mbuf for the rx_bd.   If no new
3682			 * mbufs are available then reuse the current mbuf,
3683			 * log an ierror on the interface, and generate
3684			 * an error in the system log.
3685			 */
3686			if (bnx_get_buf(sc, NULL, &sw_prod, &sw_chain_prod, &sw_prod_bseq)) {
3687
3688				DBRUN(BNX_WARN,
3689					printf("%s: Failed to allocate "
3690					"new mbuf, incoming frame dropped!\n"));
3691
3692				ifp->if_ierrors++;
3693
3694				/* Try and reuse the exisitng mbuf. */
3695				if (bnx_get_buf(sc, m, &sw_prod, &sw_chain_prod, &sw_prod_bseq)) {
3696
3697					DBRUNIF(1, bnx_breakpoint(sc));
3698					panic("%s: Double mbuf allocation failure!", sc->bnx_dev.dv_xname);
3699
3700				}
3701				goto bnx_rx_int_next_rx;
3702			}
3703
3704			/* Skip over the l2_fhdr when passing the data up the stack. */
3705			m_adj(m, sizeof(struct l2_fhdr) + ETHER_ALIGN);
3706
3707			/* Adjust the packet length to match the received data. */
3708			m->m_pkthdr.len = m->m_len = len;
3709
3710			/* Send the packet to the appropriate interface. */
3711			m->m_pkthdr.rcvif = ifp;
3712
3713			DBRUN(BNX_VERBOSE_RECV,
3714				struct ether_header *eh;
3715				eh = mtod(m, struct ether_header *);
3716				printf("%s: to: %6D, from: %6D, type: 0x%04X\n",
3717					__FUNCTION__, eh->ether_dhost, ":",
3718					eh->ether_shost, ":", htons(eh->ether_type)));
3719
3720#ifdef BNX_CKSUM
3721			/* Validate the checksum if offload enabled. */
3722			if (ifp->if_capenable & IFCAP_RXCSUM) {
3723
3724				/* Check for an IP datagram. */
3725				if (status & L2_FHDR_STATUS_IP_DATAGRAM) {
3726					m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
3727
3728					/* Check if the IP checksum is valid. */
3729					if ((l2fhdr->l2_fhdr_ip_xsum ^ 0xffff) == 0)
3730						m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
3731					else
3732						DBPRINT(sc, BNX_WARN_SEND,
3733							"%s(): Invalid IP checksum = 0x%04X!\n",
3734							__FUNCTION__, l2fhdr->l2_fhdr_ip_xsum);
3735				}
3736
3737				/* Check for a valid TCP/UDP frame. */
3738				if (status & (L2_FHDR_STATUS_TCP_SEGMENT |
3739					L2_FHDR_STATUS_UDP_DATAGRAM)) {
3740
3741					/* Check for a good TCP/UDP checksum. */
3742					if ((status & (L2_FHDR_ERRORS_TCP_XSUM |
3743						      L2_FHDR_ERRORS_UDP_XSUM)) == 0) {
3744						m->m_pkthdr.csum_data =
3745						    l2fhdr->l2_fhdr_tcp_udp_xsum;
3746						m->m_pkthdr.csum_flags |= (CSUM_DATA_VALID
3747							| CSUM_PSEUDO_HDR);
3748					} else
3749						DBPRINT(sc, BNX_WARN_SEND,
3750							"%s(): Invalid TCP/UDP checksum = 0x%04X!\n",
3751							__FUNCTION__, l2fhdr->l2_fhdr_tcp_udp_xsum);
3752				}
3753			}
3754#endif
3755
3756#if NBPFILTER > 0
3757			/*
3758			 * Handle BPF listeners. Let the BPF
3759			 * user see the packet.
3760			 */
3761			if (ifp->if_bpf)
3762				bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_IN);
3763#endif
3764
3765			/* Pass the mbuf off to the upper layers. */
3766			ifp->if_ipackets++;
3767			DBPRINT(sc, BNX_VERBOSE_RECV, "%s(): Passing received frame up.\n",
3768				__FUNCTION__);
3769			ether_input_mbuf(ifp, m);
3770			DBRUNIF(1, sc->rx_mbuf_alloc--);
3771
3772bnx_rx_int_next_rx:
3773			sw_prod = NEXT_RX_BD(sw_prod);
3774		}
3775
3776		sw_cons = NEXT_RX_BD(sw_cons);
3777
3778		/* Refresh hw_cons to see if there's new work */
3779		if (sw_cons == hw_cons) {
3780			hw_cons = sc->hw_rx_cons = sblk->status_rx_quick_consumer_index0;
3781			if ((hw_cons & USABLE_RX_BD_PER_PAGE) == USABLE_RX_BD_PER_PAGE)
3782				hw_cons++;
3783		}
3784
3785		/* Prevent speculative reads from getting ahead of the status block. */
3786		bus_space_barrier(sc->bnx_btag, sc->bnx_bhandle, 0, 0,
3787			BUS_SPACE_BARRIER_READ);
3788	}
3789
3790	for (i = 0; i < RX_PAGES; i++)
3791		bus_dmamap_sync(sc->bnx_dmatag,
3792		    sc->rx_bd_chain_map[i], 0,
3793		    sc->rx_bd_chain_map[i]->dm_mapsize,
3794		    BUS_DMASYNC_PREWRITE);
3795
3796	sc->rx_cons = sw_cons;
3797	sc->rx_prod = sw_prod;
3798	sc->rx_prod_bseq = sw_prod_bseq;
3799
3800	REG_WR16(sc, MB_RX_CID_ADDR + BNX_L2CTX_HOST_BDIDX, sc->rx_prod);
3801	REG_WR(sc, MB_RX_CID_ADDR + BNX_L2CTX_HOST_BSEQ, sc->rx_prod_bseq);
3802
3803	DBPRINT(sc, BNX_INFO_RECV, "%s(exit): rx_prod = 0x%04X, "
3804		"rx_cons = 0x%04X, rx_prod_bseq = 0x%08X\n",
3805		__FUNCTION__, sc->rx_prod, sc->rx_cons, sc->rx_prod_bseq);
3806}
3807
3808/****************************************************************************/
3809/* Handles transmit completion interrupt events.                            */
3810/*                                                                          */
3811/* Returns:                                                                 */
3812/*   Nothing.                                                               */
3813/****************************************************************************/
3814void
3815bnx_tx_intr(struct bnx_softc *sc)
3816{
3817	struct status_block *sblk = sc->status_block;
3818	struct ifnet *ifp = &sc->arpcom.ac_if;
3819	u_int16_t hw_tx_cons, sw_tx_cons, sw_tx_chain_cons;
3820
3821	DBRUNIF(1, sc->tx_interrupts++);
3822
3823	/* Get the hardware's view of the TX consumer index. */
3824	hw_tx_cons = sc->hw_tx_cons = sblk->status_tx_quick_consumer_index0;
3825
3826	/* Skip to the next entry if this is a chain page pointer. */
3827	if ((hw_tx_cons & USABLE_TX_BD_PER_PAGE) == USABLE_TX_BD_PER_PAGE)
3828		hw_tx_cons++;
3829
3830	sw_tx_cons = sc->tx_cons;
3831
3832	/* Prevent speculative reads from getting ahead of the status block. */
3833	bus_space_barrier(sc->bnx_btag, sc->bnx_bhandle, 0, 0,
3834		BUS_SPACE_BARRIER_READ);
3835
3836	/* Cycle through any completed TX chain page entries. */
3837	while (sw_tx_cons != hw_tx_cons) {
3838#ifdef BNX_DEBUG
3839		struct tx_bd *txbd = NULL;
3840#endif
3841		sw_tx_chain_cons = TX_CHAIN_IDX(sw_tx_cons);
3842
3843		DBPRINT(sc, BNX_INFO_SEND,
3844			"%s(): hw_tx_cons = 0x%04X, sw_tx_cons = 0x%04X, "
3845			"sw_tx_chain_cons = 0x%04X\n",
3846			__FUNCTION__, hw_tx_cons, sw_tx_cons, sw_tx_chain_cons);
3847
3848		DBRUNIF((sw_tx_chain_cons > MAX_TX_BD),
3849			printf("%s: TX chain consumer out of range! "
3850				" 0x%04X > 0x%04X\n",
3851				sw_tx_chain_cons,
3852				(int) MAX_TX_BD);
3853			bnx_breakpoint(sc));
3854
3855		DBRUNIF(1,
3856			txbd = &sc->tx_bd_chain[TX_PAGE(sw_tx_chain_cons)]
3857				[TX_IDX(sw_tx_chain_cons)]);
3858
3859		DBRUNIF((txbd == NULL),
3860			printf("%s: Unexpected NULL tx_bd[0x%04X]!\n",
3861				sw_tx_chain_cons);
3862			bnx_breakpoint(sc));
3863
3864		DBRUN(BNX_INFO_SEND,
3865			printf("%s: ", __FUNCTION__);
3866			bnx_dump_txbd(sc, sw_tx_chain_cons, txbd));
3867
3868		/*
3869		 * Free the associated mbuf. Remember
3870		 * that only the last tx_bd of a packet
3871		 * has an mbuf pointer and DMA map.
3872		 */
3873		if (sc->tx_mbuf_ptr[sw_tx_chain_cons] != NULL) {
3874
3875			/* Validate that this is the last tx_bd. */
3876			DBRUNIF((!(txbd->tx_bd_vlan_tag_flags & TX_BD_FLAGS_END)),
3877				printf("%s: tx_bd END flag not set but "
3878				"txmbuf == NULL!\n");
3879				bnx_breakpoint(sc));
3880
3881			DBRUN(BNX_INFO_SEND,
3882				printf("%s: Unloading map/freeing mbuf "
3883					"from tx_bd[0x%04X]\n", __FUNCTION__, sw_tx_chain_cons));
3884
3885			/* Unmap the mbuf. */
3886			bus_dmamap_unload(sc->bnx_dmatag,
3887			    sc->tx_mbuf_map[sw_tx_chain_cons]);
3888
3889			/* Free the mbuf. */
3890			m_freem(sc->tx_mbuf_ptr[sw_tx_chain_cons]);
3891			sc->tx_mbuf_ptr[sw_tx_chain_cons] = NULL;
3892			DBRUNIF(1, sc->tx_mbuf_alloc--);
3893
3894			ifp->if_opackets++;
3895		}
3896
3897		sc->used_tx_bd--;
3898		sw_tx_cons = NEXT_TX_BD(sw_tx_cons);
3899
3900		/* Refresh hw_cons to see if there's new work. */
3901		hw_tx_cons = sc->hw_tx_cons = sblk->status_tx_quick_consumer_index0;
3902		if ((hw_tx_cons & USABLE_TX_BD_PER_PAGE) == USABLE_TX_BD_PER_PAGE)
3903			hw_tx_cons++;
3904
3905		/* Prevent speculative reads from getting ahead of the status block. */
3906		bus_space_barrier(sc->bnx_btag, sc->bnx_bhandle, 0, 0,
3907			BUS_SPACE_BARRIER_READ);
3908	}
3909
3910	/* Clear the TX timeout timer. */
3911	ifp->if_timer = 0;
3912
3913	/* Clear the tx hardware queue full flag. */
3914	if ((sc->used_tx_bd + BNX_TX_SLACK_SPACE) < USABLE_TX_BD) {
3915		DBRUNIF((ifp->if_flags & IFF_OACTIVE),
3916			printf("%s: TX chain is open for business! Used tx_bd = %d\n",
3917				sc->used_tx_bd));
3918		ifp->if_flags &= ~IFF_OACTIVE;
3919	}
3920
3921	sc->tx_cons = sw_tx_cons;
3922}
3923
3924/****************************************************************************/
3925/* Disables interrupt generation.                                           */
3926/*                                                                          */
3927/* Returns:                                                                 */
3928/*   Nothing.                                                               */
3929/****************************************************************************/
3930void
3931bnx_disable_intr(struct bnx_softc *sc)
3932{
3933	REG_WR(sc, BNX_PCICFG_INT_ACK_CMD,
3934	       BNX_PCICFG_INT_ACK_CMD_MASK_INT);
3935	REG_RD(sc, BNX_PCICFG_INT_ACK_CMD);
3936}
3937
3938/****************************************************************************/
3939/* Enables interrupt generation.                                            */
3940/*                                                                          */
3941/* Returns:                                                                 */
3942/*   Nothing.                                                               */
3943/****************************************************************************/
3944void
3945bnx_enable_intr(struct bnx_softc *sc)
3946{
3947	u_int32_t val;
3948
3949	REG_WR(sc, BNX_PCICFG_INT_ACK_CMD,
3950	       BNX_PCICFG_INT_ACK_CMD_INDEX_VALID |
3951	       BNX_PCICFG_INT_ACK_CMD_MASK_INT | sc->last_status_idx);
3952
3953	REG_WR(sc, BNX_PCICFG_INT_ACK_CMD,
3954	       BNX_PCICFG_INT_ACK_CMD_INDEX_VALID | sc->last_status_idx);
3955
3956	val = REG_RD(sc, BNX_HC_COMMAND);
3957	REG_WR(sc, BNX_HC_COMMAND, val | BNX_HC_COMMAND_COAL_NOW);
3958}
3959
3960/****************************************************************************/
3961/* Handles controller initialization.                                       */
3962/*                                                                          */
3963/* Returns:                                                                 */
3964/*   Nothing.                                                               */
3965/****************************************************************************/
3966void
3967bnx_init(void *xsc)
3968{
3969	struct bnx_softc *sc = (struct bnx_softc *)xsc;
3970	struct ifnet *ifp = &sc->arpcom.ac_if;
3971	u_int32_t ether_mtu;
3972	int s;
3973
3974	DBPRINT(sc, BNX_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
3975
3976	s = splnet();
3977
3978	bnx_stop(sc);
3979
3980	if (bnx_reset(sc, BNX_DRV_MSG_CODE_RESET)) {
3981		printf("%s: Controller reset failed!\n");
3982		goto bnx_init_locked_exit;
3983	}
3984
3985	if (bnx_chipinit(sc)) {
3986		printf("%s: Controller initialization failed!\n");
3987		goto bnx_init_locked_exit;
3988	}
3989
3990	if (bnx_blockinit(sc)) {
3991		printf("%s: Block initialization failed!\n");
3992		goto bnx_init_locked_exit;
3993	}
3994
3995	/* Load our MAC address. */
3996	bcopy(sc->arpcom.ac_enaddr, sc->eaddr, ETHER_ADDR_LEN);
3997	bnx_set_mac_addr(sc);
3998
3999	/* Calculate and program the Ethernet MTU size. */
4000#if 0
4001	ether_mtu = BNX_MAX_JUMBO_ETHER_MTU_VLAN;
4002#else
4003	ether_mtu = BNX_MAX_STD_ETHER_MTU_VLAN;
4004#endif
4005
4006	DBPRINT(sc, BNX_INFO, "%s(): setting mtu = %d\n",__FUNCTION__, ether_mtu);
4007
4008	/*
4009	 * Program the mtu and enable jumbo frame
4010	 * support.  Also set the mbuf
4011	 * allocation count for RX frames.
4012	 */
4013#if 0
4014	REG_WR(sc, BNX_EMAC_RX_MTU_SIZE, ether_mtu |
4015		BNX_EMAC_RX_MTU_SIZE_JUMBO_ENA);
4016	sc->mbuf_alloc_size = BNX_MAX_MRU; /* MJUM9BYTES */
4017#else
4018	REG_WR(sc, BNX_EMAC_RX_MTU_SIZE, ether_mtu);
4019	sc->mbuf_alloc_size = MCLBYTES;
4020#endif
4021
4022	/* Calculate the RX Ethernet frame size for rx_bd's. */
4023	sc->max_frame_size = sizeof(struct l2_fhdr) + 2 + ether_mtu + 8;
4024
4025	DBPRINT(sc, BNX_INFO,
4026		"%s(): mclbytes = %d, mbuf_alloc_size = %d, "
4027		"max_frame_size = %d\n",
4028		__FUNCTION__, (int) MCLBYTES, sc->mbuf_alloc_size, sc->max_frame_size);
4029
4030	/* Program appropriate promiscuous/multicast filtering. */
4031	bnx_set_rx_mode(sc);
4032
4033	/* Init RX buffer descriptor chain. */
4034	bnx_init_rx_chain(sc);
4035
4036	/* Init TX buffer descriptor chain. */
4037	bnx_init_tx_chain(sc);
4038
4039	/* Enable host interrupts. */
4040	bnx_enable_intr(sc);
4041
4042	bnx_ifmedia_upd(ifp);
4043
4044	ifp->if_flags |= IFF_RUNNING;
4045	ifp->if_flags &= ~IFF_OACTIVE;
4046
4047	timeout_add(&sc->bnx_timeout, hz);
4048
4049bnx_init_locked_exit:
4050	DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
4051
4052	splx(s);
4053
4054	return;
4055}
4056
4057/****************************************************************************/
4058/* Encapsultes an mbuf cluster into the tx_bd chain structure and makes the */
4059/* memory visible to the controller.                                        */
4060/*                                                                          */
4061/* Returns:                                                                 */
4062/*   0 for success, positive value for failure.                             */
4063/****************************************************************************/
4064int
4065bnx_tx_encap(struct bnx_softc *sc, struct mbuf *m_head, u_int16_t *prod,
4066	u_int16_t *chain_prod, u_int32_t *prod_bseq)
4067{
4068	u_int32_t vlan_tag_flags = 0;
4069#ifdef BNX_VLAN
4070	struct m_tag *mtag;
4071#endif
4072	struct bnx_dmamap_arg map_arg;
4073	bus_dmamap_t map;
4074	int i, rc = 0;
4075
4076#ifdef BNX_CKSUM
4077	/* Transfer any checksum offload flags to the bd. */
4078	if (m_head->m_pkthdr.csum_flags) {
4079		if (m_head->m_pkthdr.csum_flags & CSUM_IP)
4080			vlan_tag_flags |= TX_BD_FLAGS_IP_CKSUM;
4081		if (m_head->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP))
4082			vlan_tag_flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
4083	}
4084#endif
4085
4086#ifdef BNX_VLAN
4087	/* Transfer any VLAN tags to the bd. */
4088	mtag = VLAN_OUTPUT_TAG(&sc->arpcom.ac_if, m_head);
4089	if (mtag != NULL)
4090		vlan_tag_flags |= (TX_BD_FLAGS_VLAN_TAG |
4091			(VLAN_TAG_VALUE(mtag) << 16));
4092#endif
4093
4094	/* Map the mbuf into DMAable memory. */
4095	map = sc->tx_mbuf_map[*chain_prod];
4096	map_arg.sc         = sc;
4097	map_arg.prod       = *prod;
4098	map_arg.chain_prod = *chain_prod;
4099	map_arg.prod_bseq  = *prod_bseq;
4100	map_arg.tx_flags   = vlan_tag_flags;
4101	map_arg.maxsegs    = USABLE_TX_BD - sc->used_tx_bd -
4102		BNX_TX_SLACK_SPACE;
4103
4104#if 0
4105	KASSERT(map_arg.maxsegs > 0, ("Invalid TX maxsegs value!"));
4106#endif
4107
4108	for (i = 0; i < TX_PAGES; i++)
4109		map_arg.tx_chain[i] = sc->tx_bd_chain[i];
4110
4111	/* Map the mbuf into our DMA address space. */
4112	if (bus_dmamap_load_mbuf(sc->bnx_dmatag, map, m_head,
4113	    BUS_DMA_NOWAIT)) {
4114		printf("%s: Error mapping mbuf into TX chain!\n",
4115		    sc->bnx_dev.dv_xname);
4116		rc = ENOBUFS;
4117		goto bnx_tx_encap_exit;
4118	}
4119	bnx_dma_map_tx_desc(&map_arg, map);
4120
4121	/*
4122	 * Ensure that the map for this transmission
4123	 * is placed at the array index of the last
4124	 * descriptor in this chain.  This is done
4125	 * because a single map is used for all
4126	 * segments of the mbuf and we don't want to
4127	 * delete the map before all of the segments
4128	 * have been freed.
4129	 */
4130	sc->tx_mbuf_map[*chain_prod] =
4131		sc->tx_mbuf_map[map_arg.chain_prod];
4132	sc->tx_mbuf_map[map_arg.chain_prod] = map;
4133	sc->tx_mbuf_ptr[map_arg.chain_prod] = m_head;
4134	sc->used_tx_bd += map_arg.maxsegs;
4135
4136	DBRUNIF((sc->used_tx_bd > sc->tx_hi_watermark),
4137		sc->tx_hi_watermark = sc->used_tx_bd);
4138
4139	DBRUNIF(1, sc->tx_mbuf_alloc++);
4140
4141	DBRUN(BNX_VERBOSE_SEND, bnx_dump_tx_mbuf_chain(sc, *chain_prod,
4142		map_arg.maxsegs));
4143
4144	/* prod still points the last used tx_bd at this point. */
4145	*prod       = map_arg.prod;
4146	*chain_prod = map_arg.chain_prod;
4147	*prod_bseq  = map_arg.prod_bseq;
4148
4149bnx_tx_encap_exit:
4150
4151	return(rc);
4152}
4153
4154/****************************************************************************/
4155/* Main transmit routine.                                                   */
4156/*                                                                          */
4157/* Returns:                                                                 */
4158/*   Nothing.                                                               */
4159/****************************************************************************/
4160void
4161bnx_start(struct ifnet *ifp)
4162{
4163	struct bnx_softc *sc = ifp->if_softc;
4164	struct mbuf *m_head = NULL;
4165	int count = 0;
4166	u_int16_t tx_prod, tx_chain_prod;
4167	u_int32_t	tx_prod_bseq;
4168
4169	/* If there's no link or the transmit queue is empty then just exit. */
4170	if (!sc->bnx_link || IFQ_IS_EMPTY(&ifp->if_snd)) {
4171		DBPRINT(sc, BNX_INFO_SEND, "%s(): No link or transmit queue empty.\n",
4172			__FUNCTION__);
4173		goto bnx_start_locked_exit;
4174	}
4175
4176	/* prod points to the next free tx_bd. */
4177	tx_prod = sc->tx_prod;
4178	tx_chain_prod = TX_CHAIN_IDX(tx_prod);
4179	tx_prod_bseq = sc->tx_prod_bseq;
4180
4181	DBPRINT(sc, BNX_INFO_SEND,
4182		"%s(): Start: tx_prod = 0x%04X, tx_chain_prod = %04X, "
4183		"tx_prod_bseq = 0x%08X\n",
4184		__FUNCTION__, tx_prod, tx_chain_prod, tx_prod_bseq);
4185
4186	/* Keep adding entries while there is space in the ring. */
4187	while (sc->tx_mbuf_ptr[tx_chain_prod] == NULL) {
4188
4189		/* Check for any frames to send. */
4190		IF_DEQUEUE(&ifp->if_snd, m_head);
4191		if (m_head == NULL)
4192			break;
4193
4194		/*
4195		 * Pack the data into the transmit ring. If we
4196		 * don't have room, place the mbuf back at the
4197		 * head of the queue and set the OACTIVE flag
4198		 * to wait for the NIC to drain the chain.
4199		 */
4200		if (bnx_tx_encap(sc, m_head, &tx_prod, &tx_chain_prod, &tx_prod_bseq)) {
4201			IF_PREPEND(&ifp->if_snd, m_head);
4202			ifp->if_flags |= IFF_OACTIVE;
4203			DBPRINT(sc, BNX_INFO_SEND,
4204				"TX chain is closed for business! Total tx_bd used = %d\n",
4205				sc->used_tx_bd);
4206			break;
4207		}
4208
4209		count++;
4210
4211#if NBPFILTER > 0
4212		/* Send a copy of the frame to any BPF listeners. */
4213		if (ifp->if_bpf)
4214			bpf_mtap(ifp->if_bpf, m_head, BPF_DIRECTION_OUT);
4215#endif
4216
4217		tx_prod = NEXT_TX_BD(tx_prod);
4218		tx_chain_prod = TX_CHAIN_IDX(tx_prod);
4219	}
4220
4221	if (count == 0) {
4222		/* no packets were dequeued */
4223		DBPRINT(sc, BNX_VERBOSE_SEND, "%s(): No packets were dequeued\n",
4224			__FUNCTION__);
4225		goto bnx_start_locked_exit;
4226	}
4227
4228	/* Update the driver's counters. */
4229	sc->tx_prod      = tx_prod;
4230	sc->tx_prod_bseq = tx_prod_bseq;
4231
4232	DBPRINT(sc, BNX_INFO_SEND,
4233		"%s(): End: tx_prod = 0x%04X, tx_chain_prod = 0x%04X, "
4234		"tx_prod_bseq = 0x%08X\n",
4235		__FUNCTION__, tx_prod, tx_chain_prod, tx_prod_bseq);
4236
4237	/* Start the transmit. */
4238	REG_WR16(sc, MB_TX_CID_ADDR + BNX_L2CTX_TX_HOST_BIDX, sc->tx_prod);
4239	REG_WR(sc, MB_TX_CID_ADDR + BNX_L2CTX_TX_HOST_BSEQ, sc->tx_prod_bseq);
4240
4241	/* Set the tx timeout. */
4242	ifp->if_timer = BNX_TX_TIMEOUT;
4243
4244bnx_start_locked_exit:
4245	return;
4246}
4247
4248/****************************************************************************/
4249/* Handles any IOCTL calls from the operating system.                       */
4250/*                                                                          */
4251/* Returns:                                                                 */
4252/*   0 for success, positive value for failure.                             */
4253/****************************************************************************/
4254int
4255bnx_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
4256{
4257	struct bnx_softc *sc = ifp->if_softc;
4258	struct ifreq *ifr = (struct ifreq *) data;
4259	struct ifaddr *ifa = (struct ifaddr *)data;
4260	struct mii_data *mii;
4261	int s, error = 0;
4262
4263	s = splnet();
4264
4265	if ((error = ether_ioctl(ifp, &sc->arpcom, command, data)) > 0) {
4266		splx(s);
4267		return (error);
4268        }
4269
4270	switch (command) {
4271	case SIOCSIFADDR:
4272		ifp->if_flags |= IFF_UP;
4273		if (!(ifp->if_flags & IFF_RUNNING))
4274			bnx_init(sc);
4275#ifdef INET
4276		if (ifa->ifa_addr->sa_family == AF_INET)
4277			arp_ifinit(&sc->arpcom, ifa);
4278#endif /* INET */
4279		break;
4280	case SIOCSIFMTU:
4281		if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > ifp->if_hardmtu)
4282			error = EINVAL;
4283		else if (ifp->if_mtu != ifr->ifr_mtu)
4284			ifp->if_mtu = ifr->ifr_mtu;
4285		break;
4286	case SIOCSIFFLAGS:
4287		if (ifp->if_flags & IFF_UP) {
4288			if ((ifp->if_flags & IFF_RUNNING) &&
4289			    ((ifp->if_flags ^ sc->bnx_if_flags) &
4290			     (IFF_ALLMULTI | IFF_PROMISC)) != 0) {
4291				bnx_set_rx_mode(sc);
4292			} else {
4293				if (!(ifp->if_flags & IFF_RUNNING))
4294					bnx_init(ifp);
4295                        }
4296                } else {
4297			if (ifp->if_flags & IFF_RUNNING)
4298				bnx_stop(sc);
4299		}
4300		sc->bnx_if_flags = ifp->if_flags;
4301		break;
4302	case SIOCADDMULTI:
4303	case SIOCDELMULTI:
4304		error = (command == SIOCADDMULTI)
4305                        ? ether_addmulti(ifr, &sc->arpcom)
4306                        : ether_delmulti(ifr, &sc->arpcom);
4307
4308		if (error == ENETRESET) {
4309			if (ifp->if_flags & IFF_RUNNING)
4310				bnx_set_rx_mode(sc);
4311			error = 0;
4312		}
4313		break;
4314	case SIOCSIFMEDIA:
4315	case SIOCGIFMEDIA:
4316		DBPRINT(sc, BNX_VERBOSE, "bnx_phy_flags = 0x%08X\n",
4317			sc->bnx_phy_flags);
4318
4319		if (sc->bnx_phy_flags & BNX_PHY_SERDES_FLAG) {
4320			error = ifmedia_ioctl(ifp, ifr,
4321			    &sc->bnx_ifmedia, command);
4322		} else {
4323			mii = &sc->bnx_mii;
4324			error = ifmedia_ioctl(ifp, ifr,
4325			    &mii->mii_media, command);
4326		}
4327		break;
4328	default:
4329		error = ENOTTY;
4330		break;
4331	}
4332
4333	splx(s);
4334
4335	return (error);
4336}
4337
4338/****************************************************************************/
4339/* Transmit timeout handler.                                                */
4340/*                                                                          */
4341/* Returns:                                                                 */
4342/*   Nothing.                                                               */
4343/****************************************************************************/
4344void
4345bnx_watchdog(struct ifnet *ifp)
4346{
4347	struct bnx_softc *sc = ifp->if_softc;
4348
4349	DBRUN(BNX_WARN_SEND,
4350		bnx_dump_driver_state(sc);
4351		bnx_dump_status_block(sc));
4352
4353	printf("%s: Watchdog timeout occurred, resetting!\n");
4354
4355	/* DBRUN(BNX_FATAL, bnx_breakpoint(sc)); */
4356
4357	bnx_init(sc);
4358
4359	ifp->if_oerrors++;
4360}
4361
4362/*
4363 * Interrupt handler.
4364 */
4365/****************************************************************************/
4366/* Main interrupt entry point.  Verifies that the controller generated the  */
4367/* interrupt and then calls a separate routine for handle the various       */
4368/* interrupt causes (PHY, TX, RX).                                          */
4369/*                                                                          */
4370/* Returns:                                                                 */
4371/*   0 for success, positive value for failure.                             */
4372/****************************************************************************/
4373int
4374bnx_intr(void *xsc)
4375{
4376	struct bnx_softc *sc;
4377	struct ifnet *ifp;
4378	u_int32_t status_attn_bits;
4379
4380	sc = xsc;
4381	ifp = &sc->arpcom.ac_if;
4382
4383	DBRUNIF(1, sc->interrupts_generated++);
4384
4385	bus_dmamap_sync(sc->bnx_dmatag, sc->status_map, 0,
4386	    sc->status_map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
4387
4388	/*
4389	 * If the hardware status block index
4390	 * matches the last value read by the
4391	 * driver and we haven't asserted our
4392	 * interrupt then there's nothing to do.
4393	 */
4394	if ((sc->status_block->status_idx == sc->last_status_idx) &&
4395		(REG_RD(sc, BNX_PCICFG_MISC_STATUS) & BNX_PCICFG_MISC_STATUS_INTA_VALUE))
4396		return (0);
4397
4398	/* Ack the interrupt and stop others from occuring. */
4399	REG_WR(sc, BNX_PCICFG_INT_ACK_CMD,
4400		BNX_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
4401		BNX_PCICFG_INT_ACK_CMD_MASK_INT);
4402
4403	/* Keep processing data as long as there is work to do. */
4404	for (;;) {
4405
4406		status_attn_bits = sc->status_block->status_attn_bits;
4407
4408		DBRUNIF(DB_RANDOMTRUE(bnx_debug_unexpected_attention),
4409			printf("Simulating unexpected status attention bit set.");
4410			status_attn_bits = status_attn_bits | STATUS_ATTN_BITS_PARITY_ERROR);
4411
4412		/* Was it a link change interrupt? */
4413		if ((status_attn_bits & STATUS_ATTN_BITS_LINK_STATE) !=
4414			(sc->status_block->status_attn_bits_ack & STATUS_ATTN_BITS_LINK_STATE))
4415			bnx_phy_intr(sc);
4416
4417		/* If any other attention is asserted then the chip is toast. */
4418		if (((status_attn_bits & ~STATUS_ATTN_BITS_LINK_STATE) !=
4419			(sc->status_block->status_attn_bits_ack &
4420			~STATUS_ATTN_BITS_LINK_STATE))) {
4421
4422			DBRUN(1, sc->unexpected_attentions++);
4423
4424			printf("%s: Fatal attention detected: 0x%08X\n",
4425				sc->status_block->status_attn_bits);
4426
4427			DBRUN(BNX_FATAL,
4428				if (bnx_debug_unexpected_attention == 0)
4429					bnx_breakpoint(sc));
4430
4431			bnx_init(sc);
4432			return (1);
4433		}
4434
4435		/* Check for any completed RX frames. */
4436		if (sc->status_block->status_rx_quick_consumer_index0 != sc->hw_rx_cons)
4437			bnx_rx_intr(sc);
4438
4439		/* Check for any completed TX frames. */
4440		if (sc->status_block->status_tx_quick_consumer_index0 != sc->hw_tx_cons)
4441			bnx_tx_intr(sc);
4442
4443		/* Save the status block index value for use during the next interrupt. */
4444		sc->last_status_idx = sc->status_block->status_idx;
4445
4446		/* Prevent speculative reads from getting ahead of the status block. */
4447		bus_space_barrier(sc->bnx_btag, sc->bnx_bhandle, 0, 0,
4448			BUS_SPACE_BARRIER_READ);
4449
4450		/* If there's no work left then exit the interrupt service routine. */
4451		if ((sc->status_block->status_rx_quick_consumer_index0 == sc->hw_rx_cons) &&
4452	    	(sc->status_block->status_tx_quick_consumer_index0 == sc->hw_tx_cons))
4453			break;
4454
4455	}
4456
4457	bus_dmamap_sync(sc->bnx_dmatag, sc->status_map, 0,
4458	    sc->status_map->dm_mapsize, BUS_DMASYNC_PREWRITE);
4459
4460	/* Re-enable interrupts. */
4461	REG_WR(sc, BNX_PCICFG_INT_ACK_CMD,
4462	       BNX_PCICFG_INT_ACK_CMD_INDEX_VALID | sc->last_status_idx |
4463	       BNX_PCICFG_INT_ACK_CMD_MASK_INT);
4464	REG_WR(sc, BNX_PCICFG_INT_ACK_CMD,
4465	       BNX_PCICFG_INT_ACK_CMD_INDEX_VALID | sc->last_status_idx);
4466
4467	/* Handle any frames that arrived while handling the interrupt. */
4468	if (ifp->if_flags & IFF_RUNNING && !IFQ_IS_EMPTY(&ifp->if_snd))
4469		bnx_start(ifp);
4470
4471	return (1);
4472}
4473
4474/****************************************************************************/
4475/* Programs the various packet receive modes (broadcast and multicast).     */
4476/*                                                                          */
4477/* Returns:                                                                 */
4478/*   Nothing.                                                               */
4479/****************************************************************************/
4480void
4481bnx_set_rx_mode(struct bnx_softc *sc)
4482{
4483	struct arpcom *ac = &sc->arpcom;
4484	struct ifnet *ifp = &ac->ac_if;
4485	struct ether_multi *enm;
4486	struct ether_multistep step;
4487	u_int32_t hashes[4] = { 0, 0, 0, 0 };
4488	u_int32_t rx_mode, sort_mode;
4489	int h, i;
4490
4491	/* Initialize receive mode default settings. */
4492	rx_mode   = sc->rx_mode & ~(BNX_EMAC_RX_MODE_PROMISCUOUS |
4493			    BNX_EMAC_RX_MODE_KEEP_VLAN_TAG);
4494	sort_mode = 1 | BNX_RPM_SORT_USER0_BC_EN;
4495
4496	/*
4497	 * ASF/IPMI/UMP firmware requires that VLAN tag stripping
4498	 * be enbled.
4499	 */
4500	if (!(sc->bnx_flags & BNX_MFW_ENABLE_FLAG))
4501		rx_mode |= BNX_EMAC_RX_MODE_KEEP_VLAN_TAG;
4502
4503	/*
4504	 * Check for promiscuous, all multicast, or selected
4505	 * multicast address filtering.
4506	 */
4507	if (ifp->if_flags & IFF_PROMISC) {
4508		DBPRINT(sc, BNX_INFO, "Enabling promiscuous mode.\n");
4509
4510		/* Enable promiscuous mode. */
4511		rx_mode |= BNX_EMAC_RX_MODE_PROMISCUOUS;
4512		sort_mode |= BNX_RPM_SORT_USER0_PROM_EN;
4513	} else if (ifp->if_flags & IFF_ALLMULTI) {
4514allmulti:
4515		DBPRINT(sc, BNX_INFO, "Enabling all multicast mode.\n");
4516
4517		/* Enable all multicast addresses. */
4518		for (i = 0; i < NUM_MC_HASH_REGISTERS; i++)
4519			REG_WR(sc, BNX_EMAC_MULTICAST_HASH0 + (i * 4), 0xffffffff);
4520		sort_mode |= BNX_RPM_SORT_USER0_MC_EN;
4521	} else {
4522		/* Accept one or more multicast(s). */
4523		DBPRINT(sc, BNX_INFO, "Enabling selective multicast mode.\n");
4524
4525		ETHER_FIRST_MULTI(step, ac, enm);
4526		while (enm != NULL) {
4527			if (bcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
4528				ifp->if_flags |= IFF_ALLMULTI;
4529				goto allmulti;
4530			}
4531			h = ether_crc32_le(enm->enm_addrlo, ETHER_ADDR_LEN) & 0x7F;
4532			hashes[(h & 0x60) >> 5] |= 1 << (h & 0x1F);
4533			ETHER_NEXT_MULTI(step, enm);
4534		}
4535
4536		for (i = 0; i < 4; i++)
4537			REG_WR(sc, BNX_EMAC_MULTICAST_HASH0 + (i * 4), hashes[i]);
4538
4539		sort_mode |= BNX_RPM_SORT_USER0_MC_HSH_EN;
4540	}
4541
4542	/* Only make changes if the recive mode has actually changed. */
4543	if (rx_mode != sc->rx_mode) {
4544		DBPRINT(sc, BNX_VERBOSE, "Enabling new receive mode: 0x%08X\n",
4545			rx_mode);
4546
4547		sc->rx_mode = rx_mode;
4548		REG_WR(sc, BNX_EMAC_RX_MODE, rx_mode);
4549	}
4550
4551	/* Disable and clear the exisitng sort before enabling a new sort. */
4552	REG_WR(sc, BNX_RPM_SORT_USER0, 0x0);
4553	REG_WR(sc, BNX_RPM_SORT_USER0, sort_mode);
4554	REG_WR(sc, BNX_RPM_SORT_USER0, sort_mode | BNX_RPM_SORT_USER0_ENA);
4555}
4556
4557/****************************************************************************/
4558/* Called periodically to updates statistics from the controllers           */
4559/* statistics block.                                                        */
4560/*                                                                          */
4561/* Returns:                                                                 */
4562/*   Nothing.                                                               */
4563/****************************************************************************/
4564void
4565bnx_stats_update(struct bnx_softc *sc)
4566{
4567	struct ifnet *ifp = &sc->arpcom.ac_if;
4568	struct statistics_block *stats;
4569
4570	DBPRINT(sc, BNX_EXCESSIVE, "Entering %s()\n", __FUNCTION__);
4571
4572	stats = (struct statistics_block *) sc->stats_block;
4573
4574	/*
4575	 * Update the interface statistics from the
4576	 * hardware statistics.
4577	 */
4578	ifp->if_collisions = (u_long) stats->stat_EtherStatsCollisions;
4579
4580	ifp->if_ierrors = (u_long) stats->stat_EtherStatsUndersizePkts +
4581				      (u_long) stats->stat_EtherStatsOverrsizePkts +
4582					  (u_long) stats->stat_IfInMBUFDiscards +
4583					  (u_long) stats->stat_Dot3StatsAlignmentErrors +
4584					  (u_long) stats->stat_Dot3StatsFCSErrors;
4585
4586	ifp->if_oerrors = (u_long) stats->stat_emac_tx_stat_dot3statsinternalmactransmiterrors +
4587					  (u_long) stats->stat_Dot3StatsExcessiveCollisions +
4588					  (u_long) stats->stat_Dot3StatsLateCollisions;
4589
4590	/*
4591	 * Certain controllers don't report
4592	 * carrier sense errors correctly.
4593	 * See errata E11_5708CA0_1165.
4594	 */
4595	if (!(BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5706) &&
4596	    !(BNX_CHIP_ID(sc) == BNX_CHIP_ID_5708_A0))
4597		ifp->if_oerrors += (u_long) stats->stat_Dot3StatsCarrierSenseErrors;
4598
4599	/*
4600	 * Update the sysctl statistics from the
4601	 * hardware statistics.
4602	 */
4603	sc->stat_IfHCInOctets =
4604		((u_int64_t) stats->stat_IfHCInOctets_hi << 32) +
4605		 (u_int64_t) stats->stat_IfHCInOctets_lo;
4606
4607	sc->stat_IfHCInBadOctets =
4608		((u_int64_t) stats->stat_IfHCInBadOctets_hi << 32) +
4609		 (u_int64_t) stats->stat_IfHCInBadOctets_lo;
4610
4611	sc->stat_IfHCOutOctets =
4612		((u_int64_t) stats->stat_IfHCOutOctets_hi << 32) +
4613		 (u_int64_t) stats->stat_IfHCOutOctets_lo;
4614
4615	sc->stat_IfHCOutBadOctets =
4616		((u_int64_t) stats->stat_IfHCOutBadOctets_hi << 32) +
4617		 (u_int64_t) stats->stat_IfHCOutBadOctets_lo;
4618
4619	sc->stat_IfHCInUcastPkts =
4620		((u_int64_t) stats->stat_IfHCInUcastPkts_hi << 32) +
4621		 (u_int64_t) stats->stat_IfHCInUcastPkts_lo;
4622
4623	sc->stat_IfHCInMulticastPkts =
4624		((u_int64_t) stats->stat_IfHCInMulticastPkts_hi << 32) +
4625		 (u_int64_t) stats->stat_IfHCInMulticastPkts_lo;
4626
4627	sc->stat_IfHCInBroadcastPkts =
4628		((u_int64_t) stats->stat_IfHCInBroadcastPkts_hi << 32) +
4629		 (u_int64_t) stats->stat_IfHCInBroadcastPkts_lo;
4630
4631	sc->stat_IfHCOutUcastPkts =
4632		((u_int64_t) stats->stat_IfHCOutUcastPkts_hi << 32) +
4633		 (u_int64_t) stats->stat_IfHCOutUcastPkts_lo;
4634
4635	sc->stat_IfHCOutMulticastPkts =
4636		((u_int64_t) stats->stat_IfHCOutMulticastPkts_hi << 32) +
4637		 (u_int64_t) stats->stat_IfHCOutMulticastPkts_lo;
4638
4639	sc->stat_IfHCOutBroadcastPkts =
4640		((u_int64_t) stats->stat_IfHCOutBroadcastPkts_hi << 32) +
4641		 (u_int64_t) stats->stat_IfHCOutBroadcastPkts_lo;
4642
4643	sc->stat_emac_tx_stat_dot3statsinternalmactransmiterrors =
4644		stats->stat_emac_tx_stat_dot3statsinternalmactransmiterrors;
4645
4646	sc->stat_Dot3StatsCarrierSenseErrors =
4647		stats->stat_Dot3StatsCarrierSenseErrors;
4648
4649	sc->stat_Dot3StatsFCSErrors =
4650		stats->stat_Dot3StatsFCSErrors;
4651
4652	sc->stat_Dot3StatsAlignmentErrors =
4653		stats->stat_Dot3StatsAlignmentErrors;
4654
4655	sc->stat_Dot3StatsSingleCollisionFrames =
4656		stats->stat_Dot3StatsSingleCollisionFrames;
4657
4658	sc->stat_Dot3StatsMultipleCollisionFrames =
4659		stats->stat_Dot3StatsMultipleCollisionFrames;
4660
4661	sc->stat_Dot3StatsDeferredTransmissions =
4662		stats->stat_Dot3StatsDeferredTransmissions;
4663
4664	sc->stat_Dot3StatsExcessiveCollisions =
4665		stats->stat_Dot3StatsExcessiveCollisions;
4666
4667	sc->stat_Dot3StatsLateCollisions =
4668		stats->stat_Dot3StatsLateCollisions;
4669
4670	sc->stat_EtherStatsCollisions =
4671		stats->stat_EtherStatsCollisions;
4672
4673	sc->stat_EtherStatsFragments =
4674		stats->stat_EtherStatsFragments;
4675
4676	sc->stat_EtherStatsJabbers =
4677		stats->stat_EtherStatsJabbers;
4678
4679	sc->stat_EtherStatsUndersizePkts =
4680		stats->stat_EtherStatsUndersizePkts;
4681
4682	sc->stat_EtherStatsOverrsizePkts =
4683		stats->stat_EtherStatsOverrsizePkts;
4684
4685	sc->stat_EtherStatsPktsRx64Octets =
4686		stats->stat_EtherStatsPktsRx64Octets;
4687
4688	sc->stat_EtherStatsPktsRx65Octetsto127Octets =
4689		stats->stat_EtherStatsPktsRx65Octetsto127Octets;
4690
4691	sc->stat_EtherStatsPktsRx128Octetsto255Octets =
4692		stats->stat_EtherStatsPktsRx128Octetsto255Octets;
4693
4694	sc->stat_EtherStatsPktsRx256Octetsto511Octets =
4695		stats->stat_EtherStatsPktsRx256Octetsto511Octets;
4696
4697	sc->stat_EtherStatsPktsRx512Octetsto1023Octets =
4698		stats->stat_EtherStatsPktsRx512Octetsto1023Octets;
4699
4700	sc->stat_EtherStatsPktsRx1024Octetsto1522Octets =
4701		stats->stat_EtherStatsPktsRx1024Octetsto1522Octets;
4702
4703	sc->stat_EtherStatsPktsRx1523Octetsto9022Octets =
4704		stats->stat_EtherStatsPktsRx1523Octetsto9022Octets;
4705
4706	sc->stat_EtherStatsPktsTx64Octets =
4707		stats->stat_EtherStatsPktsTx64Octets;
4708
4709	sc->stat_EtherStatsPktsTx65Octetsto127Octets =
4710		stats->stat_EtherStatsPktsTx65Octetsto127Octets;
4711
4712	sc->stat_EtherStatsPktsTx128Octetsto255Octets =
4713		stats->stat_EtherStatsPktsTx128Octetsto255Octets;
4714
4715	sc->stat_EtherStatsPktsTx256Octetsto511Octets =
4716		stats->stat_EtherStatsPktsTx256Octetsto511Octets;
4717
4718	sc->stat_EtherStatsPktsTx512Octetsto1023Octets =
4719		stats->stat_EtherStatsPktsTx512Octetsto1023Octets;
4720
4721	sc->stat_EtherStatsPktsTx1024Octetsto1522Octets =
4722		stats->stat_EtherStatsPktsTx1024Octetsto1522Octets;
4723
4724	sc->stat_EtherStatsPktsTx1523Octetsto9022Octets =
4725		stats->stat_EtherStatsPktsTx1523Octetsto9022Octets;
4726
4727	sc->stat_XonPauseFramesReceived =
4728		stats->stat_XonPauseFramesReceived;
4729
4730	sc->stat_XoffPauseFramesReceived =
4731		stats->stat_XoffPauseFramesReceived;
4732
4733	sc->stat_OutXonSent =
4734		stats->stat_OutXonSent;
4735
4736	sc->stat_OutXoffSent =
4737		stats->stat_OutXoffSent;
4738
4739	sc->stat_FlowControlDone =
4740		stats->stat_FlowControlDone;
4741
4742	sc->stat_MacControlFramesReceived =
4743		stats->stat_MacControlFramesReceived;
4744
4745	sc->stat_XoffStateEntered =
4746		stats->stat_XoffStateEntered;
4747
4748	sc->stat_IfInFramesL2FilterDiscards =
4749		stats->stat_IfInFramesL2FilterDiscards;
4750
4751	sc->stat_IfInRuleCheckerDiscards =
4752		stats->stat_IfInRuleCheckerDiscards;
4753
4754	sc->stat_IfInFTQDiscards =
4755		stats->stat_IfInFTQDiscards;
4756
4757	sc->stat_IfInMBUFDiscards =
4758		stats->stat_IfInMBUFDiscards;
4759
4760	sc->stat_IfInRuleCheckerP4Hit =
4761		stats->stat_IfInRuleCheckerP4Hit;
4762
4763	sc->stat_CatchupInRuleCheckerDiscards =
4764		stats->stat_CatchupInRuleCheckerDiscards;
4765
4766	sc->stat_CatchupInFTQDiscards =
4767		stats->stat_CatchupInFTQDiscards;
4768
4769	sc->stat_CatchupInMBUFDiscards =
4770		stats->stat_CatchupInMBUFDiscards;
4771
4772	sc->stat_CatchupInRuleCheckerP4Hit =
4773		stats->stat_CatchupInRuleCheckerP4Hit;
4774
4775	DBPRINT(sc, BNX_EXCESSIVE, "Exiting %s()\n", __FUNCTION__);
4776}
4777
4778void
4779bnx_tick(void *xsc)
4780{
4781	struct bnx_softc *sc = xsc;
4782	struct ifnet *ifp = &sc->arpcom.ac_if;
4783	struct mii_data *mii = NULL;
4784	u_int32_t msg;
4785
4786	/* Tell the firmware that the driver is still running. */
4787#ifdef BNX_DEBUG
4788	msg = (u_int32_t) BNX_DRV_MSG_DATA_PULSE_CODE_ALWAYS_ALIVE;
4789#else
4790	msg = (u_int32_t) ++sc->bnx_fw_drv_pulse_wr_seq;
4791#endif
4792	REG_WR_IND(sc, sc->bnx_shmem_base + BNX_DRV_PULSE_MB, msg);
4793
4794	/* Update the statistics from the hardware statistics block. */
4795	bnx_stats_update(sc);
4796
4797	/* Schedule the next tick. */
4798	timeout_add(&sc->bnx_timeout, hz);
4799
4800	/* If link is up already up then we're done. */
4801	if (sc->bnx_link)
4802		goto bnx_tick_locked_exit;
4803
4804	/* DRC - ToDo: Add SerDes support and check SerDes link here. */
4805
4806	mii = &sc->bnx_mii;
4807	mii_tick(mii);
4808
4809	/* Check if the link has come up. */
4810	if (!sc->bnx_link && mii->mii_media_status & IFM_ACTIVE &&
4811	    IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) {
4812		sc->bnx_link++;
4813		/* Now that link is up, handle any outstanding TX traffic. */
4814		if (!IFQ_IS_EMPTY(&ifp->if_snd))
4815			bnx_start(ifp);
4816	}
4817
4818bnx_tick_locked_exit:
4819	return;
4820}
4821
4822/****************************************************************************/
4823/* BNX Debug Routines                                                       */
4824/****************************************************************************/
4825#ifdef BNX_DEBUG
4826
4827/****************************************************************************/
4828/* Prints out information about an mbuf.                                    */
4829/*                                                                          */
4830/* Returns:                                                                 */
4831/*   Nothing.                                                               */
4832/****************************************************************************/
4833void
4834bnx_dump_mbuf(struct bnx_softc *sc, struct mbuf *m)
4835{
4836	u_int32_t val_hi, val_lo;
4837	struct mbuf *mp = m;
4838
4839	if (m == NULL) {
4840		/* Index out of range. */
4841		printf("mbuf ptr is null!\n");
4842		return;
4843	}
4844
4845	while (mp) {
4846		val_hi = BNX_ADDR_HI(mp);
4847		val_lo = BNX_ADDR_LO(mp);
4848		printf("mbuf: vaddr = 0x%08X:%08X, m_len = %d, m_flags = ",
4849			   val_hi, val_lo, mp->m_len);
4850
4851		if (mp->m_flags & M_EXT)
4852			printf("M_EXT ");
4853		if (mp->m_flags & M_PKTHDR)
4854			printf("M_PKTHDR ");
4855		printf("\n");
4856
4857		if (mp->m_flags & M_EXT) {
4858			val_hi = BNX_ADDR_HI(mp->m_ext.ext_buf);
4859			val_lo = BNX_ADDR_LO(mp->m_ext.ext_buf);
4860			printf("- m_ext: vaddr = 0x%08X:%08X, ext_size = 0x%04X\n",
4861				val_hi, val_lo, mp->m_ext.ext_size);
4862		}
4863
4864		mp = mp->m_next;
4865	}
4866
4867
4868}
4869
4870/****************************************************************************/
4871/* Prints out the mbufs in the TX mbuf chain.                               */
4872/*                                                                          */
4873/* Returns:                                                                 */
4874/*   Nothing.                                                               */
4875/****************************************************************************/
4876void
4877bnx_dump_tx_mbuf_chain(struct bnx_softc *sc, int chain_prod, int count)
4878{
4879	struct mbuf *m;
4880	int i;
4881
4882	BNX_PRINTF(sc,
4883		"----------------------------"
4884		"  tx mbuf data  "
4885		"----------------------------\n");
4886
4887	for (i = 0; i < count; i++) {
4888	 	m = sc->tx_mbuf_ptr[chain_prod];
4889		BNX_PRINTF(sc, "txmbuf[%d]\n", chain_prod);
4890		bnx_dump_mbuf(sc, m);
4891		chain_prod = TX_CHAIN_IDX(NEXT_TX_BD(chain_prod));
4892	}
4893
4894	BNX_PRINTF(sc,
4895		"----------------------------"
4896		"----------------"
4897		"----------------------------\n");
4898}
4899
4900/*
4901 * This routine prints the RX mbuf chain.
4902 */
4903void
4904bnx_dump_rx_mbuf_chain(struct bnx_softc *sc, int chain_prod, int count)
4905{
4906	struct mbuf *m;
4907	int i;
4908
4909	BNX_PRINTF(sc,
4910		"----------------------------"
4911		"  rx mbuf data  "
4912		"----------------------------\n");
4913
4914	for (i = 0; i < count; i++) {
4915	 	m = sc->rx_mbuf_ptr[chain_prod];
4916		BNX_PRINTF(sc, "rxmbuf[0x%04X]\n", chain_prod);
4917		bnx_dump_mbuf(sc, m);
4918		chain_prod = RX_CHAIN_IDX(NEXT_RX_BD(chain_prod));
4919	}
4920
4921
4922	BNX_PRINTF(sc,
4923		"----------------------------"
4924		"----------------"
4925		"----------------------------\n");
4926}
4927
4928void
4929bnx_dump_txbd(struct bnx_softc *sc, int idx, struct tx_bd *txbd)
4930{
4931	if (idx > MAX_TX_BD)
4932		/* Index out of range. */
4933		BNX_PRINTF(sc, "tx_bd[0x%04X]: Invalid tx_bd index!\n", idx);
4934	else if ((idx & USABLE_TX_BD_PER_PAGE) == USABLE_TX_BD_PER_PAGE)
4935		/* TX Chain page pointer. */
4936		BNX_PRINTF(sc, "tx_bd[0x%04X]: haddr = 0x%08X:%08X, chain page pointer\n",
4937			idx, txbd->tx_bd_haddr_hi, txbd->tx_bd_haddr_lo);
4938	else
4939		/* Normal tx_bd entry. */
4940		BNX_PRINTF(sc, "tx_bd[0x%04X]: haddr = 0x%08X:%08X, nbytes = 0x%08X, "
4941			"flags = 0x%08X\n", idx,
4942			txbd->tx_bd_haddr_hi, txbd->tx_bd_haddr_lo,
4943			txbd->tx_bd_mss_nbytes, txbd->tx_bd_vlan_tag_flags);
4944}
4945
4946void
4947bnx_dump_rxbd(struct bnx_softc *sc, int idx, struct rx_bd *rxbd)
4948{
4949	if (idx > MAX_RX_BD)
4950		/* Index out of range. */
4951		BNX_PRINTF(sc, "rx_bd[0x%04X]: Invalid rx_bd index!\n", idx);
4952	else if ((idx & USABLE_RX_BD_PER_PAGE) == USABLE_RX_BD_PER_PAGE)
4953		/* TX Chain page pointer. */
4954		BNX_PRINTF(sc, "rx_bd[0x%04X]: haddr = 0x%08X:%08X, chain page pointer\n",
4955			idx, rxbd->rx_bd_haddr_hi, rxbd->rx_bd_haddr_lo);
4956	else
4957		/* Normal tx_bd entry. */
4958		BNX_PRINTF(sc, "rx_bd[0x%04X]: haddr = 0x%08X:%08X, nbytes = 0x%08X, "
4959			"flags = 0x%08X\n", idx,
4960			rxbd->rx_bd_haddr_hi, rxbd->rx_bd_haddr_lo,
4961			rxbd->rx_bd_len, rxbd->rx_bd_flags);
4962}
4963
4964void
4965bnx_dump_l2fhdr(struct bnx_softc *sc, int idx, struct l2_fhdr *l2fhdr)
4966{
4967	BNX_PRINTF(sc, "l2_fhdr[0x%04X]: status = 0x%08X, "
4968		"pkt_len = 0x%04X, vlan = 0x%04x, ip_xsum = 0x%04X, "
4969		"tcp_udp_xsum = 0x%04X\n", idx,
4970		l2fhdr->l2_fhdr_status, l2fhdr->l2_fhdr_pkt_len,
4971		l2fhdr->l2_fhdr_vlan_tag, l2fhdr->l2_fhdr_ip_xsum,
4972		l2fhdr->l2_fhdr_tcp_udp_xsum);
4973}
4974
4975/*
4976 * This routine prints the TX chain.
4977 */
4978void
4979bnx_dump_tx_chain(struct bnx_softc *sc, int tx_prod, int count)
4980{
4981	struct tx_bd *txbd;
4982	int i;
4983
4984	/* First some info about the tx_bd chain structure. */
4985	BNX_PRINTF(sc,
4986		"----------------------------"
4987		"  tx_bd  chain  "
4988		"----------------------------\n");
4989
4990	BNX_PRINTF(sc, "page size      = 0x%08X, tx chain pages        = 0x%08X\n",
4991		(u_int32_t) BCM_PAGE_SIZE, (u_int32_t) TX_PAGES);
4992
4993	BNX_PRINTF(sc, "tx_bd per page = 0x%08X, usable tx_bd per page = 0x%08X\n",
4994		(u_int32_t) TOTAL_TX_BD_PER_PAGE, (u_int32_t) USABLE_TX_BD_PER_PAGE);
4995
4996	BNX_PRINTF(sc, "total tx_bd    = 0x%08X\n", (u_int32_t) TOTAL_TX_BD);
4997
4998	BNX_PRINTF(sc, ""
4999		"-----------------------------"
5000		"   tx_bd data   "
5001		"-----------------------------\n");
5002
5003	/* Now print out the tx_bd's themselves. */
5004	for (i = 0; i < count; i++) {
5005	 	txbd = &sc->tx_bd_chain[TX_PAGE(tx_prod)][TX_IDX(tx_prod)];
5006		bnx_dump_txbd(sc, tx_prod, txbd);
5007		tx_prod = TX_CHAIN_IDX(NEXT_TX_BD(tx_prod));
5008	}
5009
5010	BNX_PRINTF(sc,
5011		"-----------------------------"
5012		"--------------"
5013		"-----------------------------\n");
5014}
5015
5016/*
5017 * This routine prints the RX chain.
5018 */
5019void
5020bnx_dump_rx_chain(struct bnx_softc *sc, int rx_prod, int count)
5021{
5022	struct rx_bd *rxbd;
5023	int i;
5024
5025	/* First some info about the tx_bd chain structure. */
5026	BNX_PRINTF(sc,
5027		"----------------------------"
5028		"  rx_bd  chain  "
5029		"----------------------------\n");
5030
5031	BNX_PRINTF(sc, "----- RX_BD Chain -----\n");
5032
5033	BNX_PRINTF(sc, "page size      = 0x%08X, rx chain pages        = 0x%08X\n",
5034		(u_int32_t) BCM_PAGE_SIZE, (u_int32_t) RX_PAGES);
5035
5036	BNX_PRINTF(sc, "rx_bd per page = 0x%08X, usable rx_bd per page = 0x%08X\n",
5037		(u_int32_t) TOTAL_RX_BD_PER_PAGE, (u_int32_t) USABLE_RX_BD_PER_PAGE);
5038
5039	BNX_PRINTF(sc, "total rx_bd    = 0x%08X\n", (u_int32_t) TOTAL_RX_BD);
5040
5041	BNX_PRINTF(sc,
5042		"----------------------------"
5043		"   rx_bd data   "
5044		"----------------------------\n");
5045
5046	/* Now print out the rx_bd's themselves. */
5047	for (i = 0; i < count; i++) {
5048		rxbd = &sc->rx_bd_chain[RX_PAGE(rx_prod)][RX_IDX(rx_prod)];
5049		bnx_dump_rxbd(sc, rx_prod, rxbd);
5050		rx_prod = RX_CHAIN_IDX(NEXT_RX_BD(rx_prod));
5051	}
5052
5053	BNX_PRINTF(sc,
5054		"----------------------------"
5055		"--------------"
5056		"----------------------------\n");
5057}
5058
5059/*
5060 * This routine prints the status block.
5061 */
5062void
5063bnx_dump_status_block(struct bnx_softc *sc)
5064{
5065	struct status_block *sblk;
5066
5067	sblk = sc->status_block;
5068
5069   	BNX_PRINTF(sc, "----------------------------- Status Block "
5070		"-----------------------------\n");
5071
5072	BNX_PRINTF(sc, "attn_bits  = 0x%08X, attn_bits_ack = 0x%08X, index = 0x%04X\n",
5073		sblk->status_attn_bits, sblk->status_attn_bits_ack,
5074		sblk->status_idx);
5075
5076	BNX_PRINTF(sc, "rx_cons0   = 0x%08X, tx_cons0      = 0x%08X\n",
5077		sblk->status_rx_quick_consumer_index0,
5078		sblk->status_tx_quick_consumer_index0);
5079
5080	BNX_PRINTF(sc, "status_idx = 0x%04X\n", sblk->status_idx);
5081
5082	/* Theses indices are not used for normal L2 drivers. */
5083	if (sblk->status_rx_quick_consumer_index1 ||
5084		sblk->status_tx_quick_consumer_index1)
5085		BNX_PRINTF(sc, "rx_cons1  = 0x%08X, tx_cons1      = 0x%08X\n",
5086			sblk->status_rx_quick_consumer_index1,
5087			sblk->status_tx_quick_consumer_index1);
5088
5089	if (sblk->status_rx_quick_consumer_index2 ||
5090		sblk->status_tx_quick_consumer_index2)
5091		BNX_PRINTF(sc, "rx_cons2  = 0x%08X, tx_cons2      = 0x%08X\n",
5092			sblk->status_rx_quick_consumer_index2,
5093			sblk->status_tx_quick_consumer_index2);
5094
5095	if (sblk->status_rx_quick_consumer_index3 ||
5096		sblk->status_tx_quick_consumer_index3)
5097		BNX_PRINTF(sc, "rx_cons3  = 0x%08X, tx_cons3      = 0x%08X\n",
5098			sblk->status_rx_quick_consumer_index3,
5099			sblk->status_tx_quick_consumer_index3);
5100
5101	if (sblk->status_rx_quick_consumer_index4 ||
5102		sblk->status_rx_quick_consumer_index5)
5103		BNX_PRINTF(sc, "rx_cons4  = 0x%08X, rx_cons5      = 0x%08X\n",
5104			sblk->status_rx_quick_consumer_index4,
5105			sblk->status_rx_quick_consumer_index5);
5106
5107	if (sblk->status_rx_quick_consumer_index6 ||
5108		sblk->status_rx_quick_consumer_index7)
5109		BNX_PRINTF(sc, "rx_cons6  = 0x%08X, rx_cons7      = 0x%08X\n",
5110			sblk->status_rx_quick_consumer_index6,
5111			sblk->status_rx_quick_consumer_index7);
5112
5113	if (sblk->status_rx_quick_consumer_index8 ||
5114		sblk->status_rx_quick_consumer_index9)
5115		BNX_PRINTF(sc, "rx_cons8  = 0x%08X, rx_cons9      = 0x%08X\n",
5116			sblk->status_rx_quick_consumer_index8,
5117			sblk->status_rx_quick_consumer_index9);
5118
5119	if (sblk->status_rx_quick_consumer_index10 ||
5120		sblk->status_rx_quick_consumer_index11)
5121		BNX_PRINTF(sc, "rx_cons10 = 0x%08X, rx_cons11     = 0x%08X\n",
5122			sblk->status_rx_quick_consumer_index10,
5123			sblk->status_rx_quick_consumer_index11);
5124
5125	if (sblk->status_rx_quick_consumer_index12 ||
5126		sblk->status_rx_quick_consumer_index13)
5127		BNX_PRINTF(sc, "rx_cons12 = 0x%08X, rx_cons13     = 0x%08X\n",
5128			sblk->status_rx_quick_consumer_index12,
5129			sblk->status_rx_quick_consumer_index13);
5130
5131	if (sblk->status_rx_quick_consumer_index14 ||
5132		sblk->status_rx_quick_consumer_index15)
5133		BNX_PRINTF(sc, "rx_cons14 = 0x%08X, rx_cons15     = 0x%08X\n",
5134			sblk->status_rx_quick_consumer_index14,
5135			sblk->status_rx_quick_consumer_index15);
5136
5137	if (sblk->status_completion_producer_index ||
5138		sblk->status_cmd_consumer_index)
5139		BNX_PRINTF(sc, "com_prod  = 0x%08X, cmd_cons      = 0x%08X\n",
5140			sblk->status_completion_producer_index,
5141			sblk->status_cmd_consumer_index);
5142
5143	BNX_PRINTF(sc, "-------------------------------------------"
5144		"-----------------------------\n");
5145}
5146
5147/*
5148 * This routine prints the statistics block.
5149 */
5150void
5151bnx_dump_stats_block(struct bnx_softc *sc)
5152{
5153	struct statistics_block *sblk;
5154
5155	sblk = sc->stats_block;
5156
5157	BNX_PRINTF(sc, ""
5158		"-----------------------------"
5159		" Stats  Block "
5160		"-----------------------------\n");
5161
5162	BNX_PRINTF(sc, "IfHcInOctets         = 0x%08X:%08X, "
5163		"IfHcInBadOctets      = 0x%08X:%08X\n",
5164		sblk->stat_IfHCInOctets_hi, sblk->stat_IfHCInOctets_lo,
5165		sblk->stat_IfHCInBadOctets_hi, sblk->stat_IfHCInBadOctets_lo);
5166
5167	BNX_PRINTF(sc, "IfHcOutOctets        = 0x%08X:%08X, "
5168		"IfHcOutBadOctets     = 0x%08X:%08X\n",
5169		sblk->stat_IfHCOutOctets_hi, sblk->stat_IfHCOutOctets_lo,
5170		sblk->stat_IfHCOutBadOctets_hi, sblk->stat_IfHCOutBadOctets_lo);
5171
5172	BNX_PRINTF(sc, "IfHcInUcastPkts      = 0x%08X:%08X, "
5173		"IfHcInMulticastPkts  = 0x%08X:%08X\n",
5174		sblk->stat_IfHCInUcastPkts_hi, sblk->stat_IfHCInUcastPkts_lo,
5175		sblk->stat_IfHCInMulticastPkts_hi, sblk->stat_IfHCInMulticastPkts_lo);
5176
5177	BNX_PRINTF(sc, "IfHcInBroadcastPkts  = 0x%08X:%08X, "
5178		"IfHcOutUcastPkts     = 0x%08X:%08X\n",
5179		sblk->stat_IfHCInBroadcastPkts_hi, sblk->stat_IfHCInBroadcastPkts_lo,
5180		sblk->stat_IfHCOutUcastPkts_hi, sblk->stat_IfHCOutUcastPkts_lo);
5181
5182	BNX_PRINTF(sc, "IfHcOutMulticastPkts = 0x%08X:%08X, IfHcOutBroadcastPkts = 0x%08X:%08X\n",
5183		sblk->stat_IfHCOutMulticastPkts_hi, sblk->stat_IfHCOutMulticastPkts_lo,
5184		sblk->stat_IfHCOutBroadcastPkts_hi, sblk->stat_IfHCOutBroadcastPkts_lo);
5185
5186	if (sblk->stat_emac_tx_stat_dot3statsinternalmactransmiterrors)
5187		BNX_PRINTF(sc, "0x%08X : "
5188		"emac_tx_stat_dot3statsinternalmactransmiterrors\n",
5189		sblk->stat_emac_tx_stat_dot3statsinternalmactransmiterrors);
5190
5191	if (sblk->stat_Dot3StatsCarrierSenseErrors)
5192		BNX_PRINTF(sc, "0x%08X : Dot3StatsCarrierSenseErrors\n",
5193			sblk->stat_Dot3StatsCarrierSenseErrors);
5194
5195	if (sblk->stat_Dot3StatsFCSErrors)
5196		BNX_PRINTF(sc, "0x%08X : Dot3StatsFCSErrors\n",
5197			sblk->stat_Dot3StatsFCSErrors);
5198
5199	if (sblk->stat_Dot3StatsAlignmentErrors)
5200		BNX_PRINTF(sc, "0x%08X : Dot3StatsAlignmentErrors\n",
5201			sblk->stat_Dot3StatsAlignmentErrors);
5202
5203	if (sblk->stat_Dot3StatsSingleCollisionFrames)
5204		BNX_PRINTF(sc, "0x%08X : Dot3StatsSingleCollisionFrames\n",
5205			sblk->stat_Dot3StatsSingleCollisionFrames);
5206
5207	if (sblk->stat_Dot3StatsMultipleCollisionFrames)
5208		BNX_PRINTF(sc, "0x%08X : Dot3StatsMultipleCollisionFrames\n",
5209			sblk->stat_Dot3StatsMultipleCollisionFrames);
5210
5211	if (sblk->stat_Dot3StatsDeferredTransmissions)
5212		BNX_PRINTF(sc, "0x%08X : Dot3StatsDeferredTransmissions\n",
5213			sblk->stat_Dot3StatsDeferredTransmissions);
5214
5215	if (sblk->stat_Dot3StatsExcessiveCollisions)
5216		BNX_PRINTF(sc, "0x%08X : Dot3StatsExcessiveCollisions\n",
5217			sblk->stat_Dot3StatsExcessiveCollisions);
5218
5219	if (sblk->stat_Dot3StatsLateCollisions)
5220		BNX_PRINTF(sc, "0x%08X : Dot3StatsLateCollisions\n",
5221			sblk->stat_Dot3StatsLateCollisions);
5222
5223	if (sblk->stat_EtherStatsCollisions)
5224		BNX_PRINTF(sc, "0x%08X : EtherStatsCollisions\n",
5225			sblk->stat_EtherStatsCollisions);
5226
5227	if (sblk->stat_EtherStatsFragments)
5228		BNX_PRINTF(sc, "0x%08X : EtherStatsFragments\n",
5229			sblk->stat_EtherStatsFragments);
5230
5231	if (sblk->stat_EtherStatsJabbers)
5232		BNX_PRINTF(sc, "0x%08X : EtherStatsJabbers\n",
5233			sblk->stat_EtherStatsJabbers);
5234
5235	if (sblk->stat_EtherStatsUndersizePkts)
5236		BNX_PRINTF(sc, "0x%08X : EtherStatsUndersizePkts\n",
5237			sblk->stat_EtherStatsUndersizePkts);
5238
5239	if (sblk->stat_EtherStatsOverrsizePkts)
5240		BNX_PRINTF(sc, "0x%08X : EtherStatsOverrsizePkts\n",
5241			sblk->stat_EtherStatsOverrsizePkts);
5242
5243	if (sblk->stat_EtherStatsPktsRx64Octets)
5244		BNX_PRINTF(sc, "0x%08X : EtherStatsPktsRx64Octets\n",
5245			sblk->stat_EtherStatsPktsRx64Octets);
5246
5247	if (sblk->stat_EtherStatsPktsRx65Octetsto127Octets)
5248		BNX_PRINTF(sc, "0x%08X : EtherStatsPktsRx65Octetsto127Octets\n",
5249			sblk->stat_EtherStatsPktsRx65Octetsto127Octets);
5250
5251	if (sblk->stat_EtherStatsPktsRx128Octetsto255Octets)
5252		BNX_PRINTF(sc, "0x%08X : EtherStatsPktsRx128Octetsto255Octets\n",
5253			sblk->stat_EtherStatsPktsRx128Octetsto255Octets);
5254
5255	if (sblk->stat_EtherStatsPktsRx256Octetsto511Octets)
5256		BNX_PRINTF(sc, "0x%08X : EtherStatsPktsRx256Octetsto511Octets\n",
5257			sblk->stat_EtherStatsPktsRx256Octetsto511Octets);
5258
5259	if (sblk->stat_EtherStatsPktsRx512Octetsto1023Octets)
5260		BNX_PRINTF(sc, "0x%08X : EtherStatsPktsRx512Octetsto1023Octets\n",
5261			sblk->stat_EtherStatsPktsRx512Octetsto1023Octets);
5262
5263	if (sblk->stat_EtherStatsPktsRx1024Octetsto1522Octets)
5264		BNX_PRINTF(sc, "0x%08X : EtherStatsPktsRx1024Octetsto1522Octets\n",
5265			sblk->stat_EtherStatsPktsRx1024Octetsto1522Octets);
5266
5267	if (sblk->stat_EtherStatsPktsRx1523Octetsto9022Octets)
5268		BNX_PRINTF(sc, "0x%08X : EtherStatsPktsRx1523Octetsto9022Octets\n",
5269			sblk->stat_EtherStatsPktsRx1523Octetsto9022Octets);
5270
5271	if (sblk->stat_EtherStatsPktsTx64Octets)
5272		BNX_PRINTF(sc, "0x%08X : EtherStatsPktsTx64Octets\n",
5273			sblk->stat_EtherStatsPktsTx64Octets);
5274
5275	if (sblk->stat_EtherStatsPktsTx65Octetsto127Octets)
5276		BNX_PRINTF(sc, "0x%08X : EtherStatsPktsTx65Octetsto127Octets\n",
5277			sblk->stat_EtherStatsPktsTx65Octetsto127Octets);
5278
5279	if (sblk->stat_EtherStatsPktsTx128Octetsto255Octets)
5280		BNX_PRINTF(sc, "0x%08X : EtherStatsPktsTx128Octetsto255Octets\n",
5281			sblk->stat_EtherStatsPktsTx128Octetsto255Octets);
5282
5283	if (sblk->stat_EtherStatsPktsTx256Octetsto511Octets)
5284		BNX_PRINTF(sc, "0x%08X : EtherStatsPktsTx256Octetsto511Octets\n",
5285			sblk->stat_EtherStatsPktsTx256Octetsto511Octets);
5286
5287	if (sblk->stat_EtherStatsPktsTx512Octetsto1023Octets)
5288		BNX_PRINTF(sc, "0x%08X : EtherStatsPktsTx512Octetsto1023Octets\n",
5289			sblk->stat_EtherStatsPktsTx512Octetsto1023Octets);
5290
5291	if (sblk->stat_EtherStatsPktsTx1024Octetsto1522Octets)
5292		BNX_PRINTF(sc, "0x%08X : EtherStatsPktsTx1024Octetsto1522Octets\n",
5293			sblk->stat_EtherStatsPktsTx1024Octetsto1522Octets);
5294
5295	if (sblk->stat_EtherStatsPktsTx1523Octetsto9022Octets)
5296		BNX_PRINTF(sc, "0x%08X : EtherStatsPktsTx1523Octetsto9022Octets\n",
5297			sblk->stat_EtherStatsPktsTx1523Octetsto9022Octets);
5298
5299	if (sblk->stat_XonPauseFramesReceived)
5300		BNX_PRINTF(sc, "0x%08X : XonPauseFramesReceived\n",
5301			sblk->stat_XonPauseFramesReceived);
5302
5303	if (sblk->stat_XoffPauseFramesReceived)
5304	   BNX_PRINTF(sc, "0x%08X : XoffPauseFramesReceived\n",
5305			sblk->stat_XoffPauseFramesReceived);
5306
5307	if (sblk->stat_OutXonSent)
5308		BNX_PRINTF(sc, "0x%08X : OutXonSent\n",
5309			sblk->stat_OutXonSent);
5310
5311	if (sblk->stat_OutXoffSent)
5312		BNX_PRINTF(sc, "0x%08X : OutXoffSent\n",
5313			sblk->stat_OutXoffSent);
5314
5315	if (sblk->stat_FlowControlDone)
5316		BNX_PRINTF(sc, "0x%08X : FlowControlDone\n",
5317			sblk->stat_FlowControlDone);
5318
5319	if (sblk->stat_MacControlFramesReceived)
5320		BNX_PRINTF(sc, "0x%08X : MacControlFramesReceived\n",
5321			sblk->stat_MacControlFramesReceived);
5322
5323	if (sblk->stat_XoffStateEntered)
5324		BNX_PRINTF(sc, "0x%08X : XoffStateEntered\n",
5325			sblk->stat_XoffStateEntered);
5326
5327	if (sblk->stat_IfInFramesL2FilterDiscards)
5328		BNX_PRINTF(sc, "0x%08X : IfInFramesL2FilterDiscards\n",
5329			sblk->stat_IfInFramesL2FilterDiscards);
5330
5331	if (sblk->stat_IfInRuleCheckerDiscards)
5332		BNX_PRINTF(sc, "0x%08X : IfInRuleCheckerDiscards\n",
5333			sblk->stat_IfInRuleCheckerDiscards);
5334
5335	if (sblk->stat_IfInFTQDiscards)
5336		BNX_PRINTF(sc, "0x%08X : IfInFTQDiscards\n",
5337			sblk->stat_IfInFTQDiscards);
5338
5339	if (sblk->stat_IfInMBUFDiscards)
5340		BNX_PRINTF(sc, "0x%08X : IfInMBUFDiscards\n",
5341			sblk->stat_IfInMBUFDiscards);
5342
5343	if (sblk->stat_IfInRuleCheckerP4Hit)
5344		BNX_PRINTF(sc, "0x%08X : IfInRuleCheckerP4Hit\n",
5345			sblk->stat_IfInRuleCheckerP4Hit);
5346
5347	if (sblk->stat_CatchupInRuleCheckerDiscards)
5348		BNX_PRINTF(sc, "0x%08X : CatchupInRuleCheckerDiscards\n",
5349			sblk->stat_CatchupInRuleCheckerDiscards);
5350
5351	if (sblk->stat_CatchupInFTQDiscards)
5352		BNX_PRINTF(sc, "0x%08X : CatchupInFTQDiscards\n",
5353			sblk->stat_CatchupInFTQDiscards);
5354
5355	if (sblk->stat_CatchupInMBUFDiscards)
5356		BNX_PRINTF(sc, "0x%08X : CatchupInMBUFDiscards\n",
5357			sblk->stat_CatchupInMBUFDiscards);
5358
5359	if (sblk->stat_CatchupInRuleCheckerP4Hit)
5360		BNX_PRINTF(sc, "0x%08X : CatchupInRuleCheckerP4Hit\n",
5361			sblk->stat_CatchupInRuleCheckerP4Hit);
5362
5363	BNX_PRINTF(sc,
5364		"-----------------------------"
5365		"--------------"
5366		"-----------------------------\n");
5367}
5368
5369void
5370bnx_dump_driver_state(struct bnx_softc *sc)
5371{
5372	u_int32_t val_hi, val_lo;
5373
5374	BNX_PRINTF(sc,
5375		"-----------------------------"
5376		" Driver State "
5377		"-----------------------------\n");
5378
5379	val_hi = BNX_ADDR_HI(sc);
5380	val_lo = BNX_ADDR_LO(sc);
5381	BNX_PRINTF(sc, "0x%08X:%08X - (sc) driver softc structure virtual address\n",
5382		val_hi, val_lo);
5383
5384	val_hi = BNX_ADDR_HI(sc->status_block);
5385	val_lo = BNX_ADDR_LO(sc->status_block);
5386	BNX_PRINTF(sc, "0x%08X:%08X - (sc->status_block) status block virtual address\n",
5387		val_hi, val_lo);
5388
5389	val_hi = BNX_ADDR_HI(sc->stats_block);
5390	val_lo = BNX_ADDR_LO(sc->stats_block);
5391	BNX_PRINTF(sc, "0x%08X:%08X - (sc->stats_block) statistics block virtual address\n",
5392		val_hi, val_lo);
5393
5394	val_hi = BNX_ADDR_HI(sc->tx_bd_chain);
5395	val_lo = BNX_ADDR_LO(sc->tx_bd_chain);
5396	BNX_PRINTF(sc,
5397		"0x%08X:%08X - (sc->tx_bd_chain) tx_bd chain virtual adddress\n",
5398		val_hi, val_lo);
5399
5400	val_hi = BNX_ADDR_HI(sc->rx_bd_chain);
5401	val_lo = BNX_ADDR_LO(sc->rx_bd_chain);
5402	BNX_PRINTF(sc,
5403		"0x%08X:%08X - (sc->rx_bd_chain) rx_bd chain virtual address\n",
5404		val_hi, val_lo);
5405
5406	val_hi = BNX_ADDR_HI(sc->tx_mbuf_ptr);
5407	val_lo = BNX_ADDR_LO(sc->tx_mbuf_ptr);
5408	BNX_PRINTF(sc,
5409		"0x%08X:%08X - (sc->tx_mbuf_ptr) tx mbuf chain virtual address\n",
5410		val_hi, val_lo);
5411
5412	val_hi = BNX_ADDR_HI(sc->rx_mbuf_ptr);
5413	val_lo = BNX_ADDR_LO(sc->rx_mbuf_ptr);
5414	BNX_PRINTF(sc,
5415		"0x%08X:%08X - (sc->rx_mbuf_ptr) rx mbuf chain virtual address\n",
5416		val_hi, val_lo);
5417
5418	BNX_PRINTF(sc, "         0x%08X - (sc->interrupts_generated) h/w intrs\n",
5419		sc->interrupts_generated);
5420
5421	BNX_PRINTF(sc, "         0x%08X - (sc->rx_interrupts) rx interrupts handled\n",
5422		sc->rx_interrupts);
5423
5424	BNX_PRINTF(sc, "         0x%08X - (sc->tx_interrupts) tx interrupts handled\n",
5425		sc->tx_interrupts);
5426
5427	BNX_PRINTF(sc, "         0x%08X - (sc->last_status_idx) status block index\n",
5428		sc->last_status_idx);
5429
5430	BNX_PRINTF(sc, "         0x%08X - (sc->tx_prod) tx producer index\n",
5431		sc->tx_prod);
5432
5433	BNX_PRINTF(sc, "         0x%08X - (sc->tx_cons) tx consumer index\n",
5434		sc->tx_cons);
5435
5436	BNX_PRINTF(sc, "         0x%08X - (sc->tx_prod_bseq) tx producer bseq index\n",
5437		sc->tx_prod_bseq);
5438
5439	BNX_PRINTF(sc, "         0x%08X - (sc->rx_prod) rx producer index\n",
5440		sc->rx_prod);
5441
5442	BNX_PRINTF(sc, "         0x%08X - (sc->rx_cons) rx consumer index\n",
5443		sc->rx_cons);
5444
5445	BNX_PRINTF(sc, "         0x%08X - (sc->rx_prod_bseq) rx producer bseq index\n",
5446		sc->rx_prod_bseq);
5447
5448	BNX_PRINTF(sc, "         0x%08X - (sc->rx_mbuf_alloc) rx mbufs allocated\n",
5449		sc->rx_mbuf_alloc);
5450
5451	BNX_PRINTF(sc, "         0x%08X - (sc->free_rx_bd) free rx_bd's\n",
5452		sc->free_rx_bd);
5453
5454	BNX_PRINTF(sc, "0x%08X/%08X - (sc->rx_low_watermark) rx low watermark\n",
5455		sc->rx_low_watermark, (u_int32_t) USABLE_RX_BD);
5456
5457	BNX_PRINTF(sc, "         0x%08X - (sc->txmbuf_alloc) tx mbufs allocated\n",
5458		sc->tx_mbuf_alloc);
5459
5460	BNX_PRINTF(sc, "         0x%08X - (sc->rx_mbuf_alloc) rx mbufs allocated\n",
5461		sc->rx_mbuf_alloc);
5462
5463	BNX_PRINTF(sc, "         0x%08X - (sc->used_tx_bd) used tx_bd's\n",
5464		sc->used_tx_bd);
5465
5466	BNX_PRINTF(sc, "0x%08X/%08X - (sc->tx_hi_watermark) tx hi watermark\n",
5467		sc->tx_hi_watermark, (u_int32_t) USABLE_TX_BD);
5468
5469	BNX_PRINTF(sc, "         0x%08X - (sc->mbuf_alloc_failed) failed mbuf alloc\n",
5470		sc->mbuf_alloc_failed);
5471
5472	BNX_PRINTF(sc,
5473		"-----------------------------"
5474		"--------------"
5475		"-----------------------------\n");
5476}
5477
5478void
5479bnx_dump_hw_state(struct bnx_softc *sc)
5480{
5481	u_int32_t val1;
5482	int i;
5483
5484	BNX_PRINTF(sc,
5485		"----------------------------"
5486		" Hardware State "
5487		"----------------------------\n");
5488
5489	BNX_PRINTF(sc, "0x%08X : bootcode version\n", sc->bnx_fw_ver);
5490
5491	val1 = REG_RD(sc, BNX_MISC_ENABLE_STATUS_BITS);
5492	BNX_PRINTF(sc, "0x%08X : (0x%04X) misc_enable_status_bits\n",
5493		val1, BNX_MISC_ENABLE_STATUS_BITS);
5494
5495	val1 = REG_RD(sc, BNX_DMA_STATUS);
5496	BNX_PRINTF(sc, "0x%08X : (0x%04X) dma_status\n", val1, BNX_DMA_STATUS);
5497
5498	val1 = REG_RD(sc, BNX_CTX_STATUS);
5499	BNX_PRINTF(sc, "0x%08X : (0x%04X) ctx_status\n", val1, BNX_CTX_STATUS);
5500
5501	val1 = REG_RD(sc, BNX_EMAC_STATUS);
5502	BNX_PRINTF(sc, "0x%08X : (0x%04X) emac_status\n", val1, BNX_EMAC_STATUS);
5503
5504	val1 = REG_RD(sc, BNX_RPM_STATUS);
5505	BNX_PRINTF(sc, "0x%08X : (0x%04X) rpm_status\n", val1, BNX_RPM_STATUS);
5506
5507	val1 = REG_RD(sc, BNX_TBDR_STATUS);
5508	BNX_PRINTF(sc, "0x%08X : (0x%04X) tbdr_status\n", val1, BNX_TBDR_STATUS);
5509
5510	val1 = REG_RD(sc, BNX_TDMA_STATUS);
5511	BNX_PRINTF(sc, "0x%08X : (0x%04X) tdma_status\n", val1, BNX_TDMA_STATUS);
5512
5513	val1 = REG_RD(sc, BNX_HC_STATUS);
5514	BNX_PRINTF(sc, "0x%08X : (0x%04X) hc_status\n", val1, BNX_HC_STATUS);
5515
5516	BNX_PRINTF(sc,
5517		"----------------------------"
5518		"----------------"
5519		"----------------------------\n");
5520
5521	BNX_PRINTF(sc,
5522		"----------------------------"
5523		" Register  Dump "
5524		"----------------------------\n");
5525
5526	for (i = 0x400; i < 0x8000; i += 0x10)
5527		BNX_PRINTF(sc, "0x%04X: 0x%08X 0x%08X 0x%08X 0x%08X\n",
5528			i, REG_RD(sc, i), REG_RD(sc, i + 0x4),
5529			REG_RD(sc, i + 0x8), REG_RD(sc, i + 0xC));
5530
5531	BNX_PRINTF(sc,
5532		"----------------------------"
5533		"----------------"
5534		"----------------------------\n");
5535}
5536
5537void
5538bnx_breakpoint(struct bnx_softc *sc)
5539{
5540
5541	/* Unreachable code to shut the compiler up about unused functions. */
5542	if (0) {
5543   		bnx_dump_txbd(sc, 0, NULL);
5544		bnx_dump_rxbd(sc, 0, NULL);
5545		bnx_dump_tx_mbuf_chain(sc, 0, USABLE_TX_BD);
5546		bnx_dump_rx_mbuf_chain(sc, 0, USABLE_RX_BD);
5547		bnx_dump_l2fhdr(sc, 0, NULL);
5548		bnx_dump_tx_chain(sc, 0, USABLE_TX_BD);
5549		bnx_dump_rx_chain(sc, 0, USABLE_RX_BD);
5550		bnx_dump_status_block(sc);
5551		bnx_dump_stats_block(sc);
5552		bnx_dump_driver_state(sc);
5553		bnx_dump_hw_state(sc);
5554	}
5555
5556	bnx_dump_driver_state(sc);
5557	/* Print the important status block fields. */
5558	bnx_dump_status_block(sc);
5559
5560#if 0
5561	/* Call the debugger. */
5562	breakpoint();
5563#endif
5564
5565	return;
5566}
5567#endif
5568