if_bce.c revision 161299
1/*-
2 * Copyright (c) 2006 Broadcom Corporation
3 *	David Christensen <davidch@broadcom.com>.  All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * 1. Redistributions of source code must retain the above copyright
10 *    notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 *    notice, this list of conditions and the following disclaimer in the
13 *    documentation and/or other materials provided with the distribution.
14 * 3. Neither the name of Broadcom Corporation nor the name of its contributors
15 *    may be used to endorse or promote products derived from this software
16 *    without specific prior written consent.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS'
19 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
22 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
28 * THE POSSIBILITY OF SUCH DAMAGE.
29 */
30
31#include <sys/cdefs.h>
32__FBSDID("$FreeBSD: head/sys/dev/bce/if_bce.c 161299 2006-08-15 04:56:29Z julian $");
33
34/*
35 * The following controllers are supported by this driver:
36 *   BCM5706C A2, A3
37 *   BCM5708C B1
38 *
39 * The following controllers are not supported by this driver:
40 * (These are not "Production" versions of the controller.)
41 *
42 *   BCM5706C A0, A1
43 *   BCM5706S A0, A1, A2, A3
44 *   BCM5708C A0, B0
45 *   BCM5708S A0, B0, B1
46 */
47
48#include "opt_bce.h"
49
50#include <dev/bce/if_bcereg.h>
51#include <dev/bce/if_bcefw.h>
52
53/****************************************************************************/
54/* BCE Driver Version                                                       */
55/****************************************************************************/
56char bce_driver_version[] = "v0.9.6";
57
58
59/****************************************************************************/
60/* BCE Debug Options                                                        */
61/****************************************************************************/
62#ifdef BCE_DEBUG
63	u32 bce_debug = BCE_WARN;
64
65	/*          0 = Never              */
66	/*          1 = 1 in 2,147,483,648 */
67	/*        256 = 1 in     8,388,608 */
68	/*       2048 = 1 in     1,048,576 */
69	/*      65536 = 1 in        32,768 */
70	/*    1048576 = 1 in         2,048 */
71	/*  268435456 =	1 in             8 */
72	/*  536870912 = 1 in             4 */
73	/* 1073741824 = 1 in             2 */
74
75	/* Controls how often the l2_fhdr frame error check will fail. */
76	int bce_debug_l2fhdr_status_check = 0;
77
78	/* Controls how often the unexpected attention check will fail. */
79	int bce_debug_unexpected_attention = 0;
80
81	/* Controls how often to simulate an mbuf allocation failure. */
82	int bce_debug_mbuf_allocation_failure = 0;
83
84	/* Controls how often to simulate a DMA mapping failure. */
85	int bce_debug_dma_map_addr_failure = 0;
86
87	/* Controls how often to simulate a bootcode failure. */
88	int bce_debug_bootcode_running_failure = 0;
89#endif
90
91
92/****************************************************************************/
93/* PCI Device ID Table                                                      */
94/*                                                                          */
95/* Used by bce_probe() to identify the devices supported by this driver.    */
96/****************************************************************************/
97#define BCE_DEVDESC_MAX		64
98
99static struct bce_type bce_devs[] = {
100	/* BCM5706C Controllers and OEM boards. */
101	{ BRCM_VENDORID, BRCM_DEVICEID_BCM5706,  HP_VENDORID, 0x3101,
102		"HP NC370T Multifunction Gigabit Server Adapter" },
103	{ BRCM_VENDORID, BRCM_DEVICEID_BCM5706,  HP_VENDORID, 0x3106,
104		"HP NC370i Multifunction Gigabit Server Adapter" },
105	{ BRCM_VENDORID, BRCM_DEVICEID_BCM5706,  PCI_ANY_ID,  PCI_ANY_ID,
106		"Broadcom NetXtreme II BCM5706 1000Base-T" },
107
108	/* BCM5706S controllers and OEM boards. */
109	{ BRCM_VENDORID, BRCM_DEVICEID_BCM5706S, HP_VENDORID, 0x3102,
110		"HP NC370F Multifunction Gigabit Server Adapter" },
111	{ BRCM_VENDORID, BRCM_DEVICEID_BCM5706S, PCI_ANY_ID,  PCI_ANY_ID,
112		"Broadcom NetXtreme II BCM5706 1000Base-SX" },
113
114	/* BCM5708C controllers and OEM boards. */
115	{ BRCM_VENDORID, BRCM_DEVICEID_BCM5708,  PCI_ANY_ID,  PCI_ANY_ID,
116		"Broadcom NetXtreme II BCM5708 1000Base-T" },
117
118	/* BCM5708S controllers and OEM boards. */
119	{ BRCM_VENDORID, BRCM_DEVICEID_BCM5708,  PCI_ANY_ID,  PCI_ANY_ID,
120		"Broadcom NetXtreme II BCM5708 1000Base-T" },
121	{ 0, 0, 0, 0, NULL }
122};
123
124
125/****************************************************************************/
126/* Supported Flash NVRAM device data.                                       */
127/****************************************************************************/
128static struct flash_spec flash_table[] =
129{
130	/* Slow EEPROM */
131	{0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
132	 1, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
133	 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
134	 "EEPROM - slow"},
135	/* Expansion entry 0001 */
136	{0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
137	 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
138	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
139	 "Entry 0001"},
140	/* Saifun SA25F010 (non-buffered flash) */
141	/* strap, cfg1, & write1 need updates */
142	{0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
143	 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
144	 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
145	 "Non-buffered flash (128kB)"},
146	/* Saifun SA25F020 (non-buffered flash) */
147	/* strap, cfg1, & write1 need updates */
148	{0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
149	 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
150	 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
151	 "Non-buffered flash (256kB)"},
152	/* Expansion entry 0100 */
153	{0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
154	 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
155	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
156	 "Entry 0100"},
157	/* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
158	{0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
159	 0, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
160	 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
161	 "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
162	/* Entry 0110: ST M45PE20 (non-buffered flash)*/
163	{0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
164	 0, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
165	 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
166	 "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
167	/* Saifun SA25F005 (non-buffered flash) */
168	/* strap, cfg1, & write1 need updates */
169	{0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
170	 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
171	 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
172	 "Non-buffered flash (64kB)"},
173	/* Fast EEPROM */
174	{0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
175	 1, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
176	 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
177	 "EEPROM - fast"},
178	/* Expansion entry 1001 */
179	{0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
180	 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
181	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
182	 "Entry 1001"},
183	/* Expansion entry 1010 */
184	{0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
185	 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
186	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
187	 "Entry 1010"},
188	/* ATMEL AT45DB011B (buffered flash) */
189	{0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
190	 1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
191	 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
192	 "Buffered flash (128kB)"},
193	/* Expansion entry 1100 */
194	{0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
195	 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
196	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
197	 "Entry 1100"},
198	/* Expansion entry 1101 */
199	{0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
200	 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
201	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
202	 "Entry 1101"},
203	/* Ateml Expansion entry 1110 */
204	{0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
205	 1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
206	 BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
207	 "Entry 1110 (Atmel)"},
208	/* ATMEL AT45DB021B (buffered flash) */
209	{0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
210	 1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
211	 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
212	 "Buffered flash (256kB)"},
213};
214
215
216/****************************************************************************/
217/* FreeBSD device entry points.                                             */
218/****************************************************************************/
219static int  bce_probe				(device_t);
220static int  bce_attach				(device_t);
221static int  bce_detach				(device_t);
222static void bce_shutdown			(device_t);
223
224
225/****************************************************************************/
226/* BCE Debug Data Structure Dump Routines                                   */
227/****************************************************************************/
228#ifdef BCE_DEBUG
229static void bce_dump_mbuf 			(struct bce_softc *, struct mbuf *);
230static void bce_dump_tx_mbuf_chain	(struct bce_softc *, int, int);
231static void bce_dump_rx_mbuf_chain	(struct bce_softc *, int, int);
232static void bce_dump_txbd			(struct bce_softc *, int, struct tx_bd *);
233static void bce_dump_rxbd			(struct bce_softc *, int, struct rx_bd *);
234static void bce_dump_l2fhdr			(struct bce_softc *, int, struct l2_fhdr *);
235static void bce_dump_tx_chain		(struct bce_softc *, int, int);
236static void bce_dump_rx_chain		(struct bce_softc *, int, int);
237static void bce_dump_status_block	(struct bce_softc *);
238static void bce_dump_stats_block	(struct bce_softc *);
239static void bce_dump_driver_state	(struct bce_softc *);
240static void bce_dump_hw_state		(struct bce_softc *);
241static void bce_breakpoint			(struct bce_softc *);
242#endif
243
244
245/****************************************************************************/
246/* BCE Register/Memory Access Routines                                      */
247/****************************************************************************/
248static u32  bce_reg_rd_ind			(struct bce_softc *, u32);
249static void bce_reg_wr_ind			(struct bce_softc *, u32, u32);
250static void bce_ctx_wr				(struct bce_softc *, u32, u32, u32);
251static int  bce_miibus_read_reg		(device_t, int, int);
252static int  bce_miibus_write_reg	(device_t, int, int, int);
253static void bce_miibus_statchg		(device_t);
254
255
256/****************************************************************************/
257/* BCE NVRAM Access Routines                                                */
258/****************************************************************************/
259static int  bce_acquire_nvram_lock	(struct bce_softc *);
260static int  bce_release_nvram_lock	(struct bce_softc *);
261static void bce_enable_nvram_access	(struct bce_softc *);
262static void	bce_disable_nvram_access(struct bce_softc *);
263static int  bce_nvram_read_dword	(struct bce_softc *, u32, u8 *, u32);
264static int  bce_init_nvram			(struct bce_softc *);
265static int  bce_nvram_read			(struct bce_softc *, u32, u8 *, int);
266static int  bce_nvram_test			(struct bce_softc *);
267#ifdef BCE_NVRAM_WRITE_SUPPORT
268static int  bce_enable_nvram_write	(struct bce_softc *);
269static void bce_disable_nvram_write	(struct bce_softc *);
270static int  bce_nvram_erase_page	(struct bce_softc *, u32);
271static int  bce_nvram_write_dword	(struct bce_softc *, u32, u8 *, u32);
272static int  bce_nvram_write			(struct bce_softc *, u32, u8 *, int);
273#endif
274
275/****************************************************************************/
276/*                                                                          */
277/****************************************************************************/
278static void bce_dma_map_addr		(void *, bus_dma_segment_t *, int, int);
279static void bce_dma_map_tx_desc		(void *, bus_dma_segment_t *, int, bus_size_t, int);
280static int  bce_dma_alloc			(device_t);
281static void bce_dma_free			(struct bce_softc *);
282static void bce_release_resources	(struct bce_softc *);
283
284/****************************************************************************/
285/* BCE Firmware Synchronization and Load                                    */
286/****************************************************************************/
287static int  bce_fw_sync				(struct bce_softc *, u32);
288static void bce_load_rv2p_fw		(struct bce_softc *, u32 *, u32, u32);
289static void bce_load_cpu_fw			(struct bce_softc *, struct cpu_reg *, struct fw_info *);
290static void bce_init_cpus			(struct bce_softc *);
291
292static void bce_stop				(struct bce_softc *);
293static int  bce_reset				(struct bce_softc *, u32);
294static int  bce_chipinit 			(struct bce_softc *);
295static int  bce_blockinit 			(struct bce_softc *);
296static int  bce_get_buf				(struct bce_softc *, struct mbuf *, u16 *, u16 *, u32 *);
297
298static int  bce_init_tx_chain		(struct bce_softc *);
299static int  bce_init_rx_chain		(struct bce_softc *);
300static void bce_free_rx_chain		(struct bce_softc *);
301static void bce_free_tx_chain		(struct bce_softc *);
302
303static int  bce_tx_encap			(struct bce_softc *, struct mbuf *, u16 *, u16 *, u32 *);
304static void bce_start_locked		(struct ifnet *);
305static void bce_start				(struct ifnet *);
306static int  bce_ioctl				(struct ifnet *, u_long, caddr_t);
307static void bce_watchdog			(struct ifnet *);
308static int  bce_ifmedia_upd			(struct ifnet *);
309static void bce_ifmedia_sts			(struct ifnet *, struct ifmediareq *);
310static void bce_init_locked			(struct bce_softc *);
311static void bce_init				(void *);
312
313static void bce_init_context		(struct bce_softc *);
314static void bce_get_mac_addr		(struct bce_softc *);
315static void bce_set_mac_addr		(struct bce_softc *);
316static void bce_phy_intr			(struct bce_softc *);
317static void bce_rx_intr				(struct bce_softc *);
318static void bce_tx_intr				(struct bce_softc *);
319static void bce_disable_intr		(struct bce_softc *);
320static void bce_enable_intr			(struct bce_softc *);
321
322#ifdef DEVICE_POLLING
323static void bce_poll_locked			(struct ifnet *, enum poll_cmd, int);
324static void bce_poll				(struct ifnet *, enum poll_cmd, int);
325#endif
326static void bce_intr				(void *);
327static void bce_set_rx_mode			(struct bce_softc *);
328static void bce_stats_update		(struct bce_softc *);
329static void bce_tick_locked			(struct bce_softc *);
330static void bce_tick				(void *);
331static void bce_add_sysctls			(struct bce_softc *);
332
333
334/****************************************************************************/
335/* FreeBSD device dispatch table.                                           */
336/****************************************************************************/
337static device_method_t bce_methods[] = {
338	/* Device interface */
339	DEVMETHOD(device_probe,		bce_probe),
340	DEVMETHOD(device_attach,	bce_attach),
341	DEVMETHOD(device_detach,	bce_detach),
342	DEVMETHOD(device_shutdown,	bce_shutdown),
343
344	/* bus interface */
345	DEVMETHOD(bus_print_child,	bus_generic_print_child),
346	DEVMETHOD(bus_driver_added,	bus_generic_driver_added),
347
348	/* MII interface */
349	DEVMETHOD(miibus_readreg,	bce_miibus_read_reg),
350	DEVMETHOD(miibus_writereg,	bce_miibus_write_reg),
351	DEVMETHOD(miibus_statchg,	bce_miibus_statchg),
352
353	{ 0, 0 }
354};
355
356static driver_t bce_driver = {
357	"bce",
358	bce_methods,
359	sizeof(struct bce_softc)
360};
361
362static devclass_t bce_devclass;
363
364MODULE_DEPEND(bce, pci, 1, 1, 1);
365MODULE_DEPEND(bce, ether, 1, 1, 1);
366MODULE_DEPEND(bce, miibus, 1, 1, 1);
367
368DRIVER_MODULE(bce, pci, bce_driver, bce_devclass, 0, 0);
369DRIVER_MODULE(miibus, bce, miibus_driver, miibus_devclass, 0, 0);
370
371
372/****************************************************************************/
373/* Device probe function.                                                   */
374/*                                                                          */
375/* Compares the device to the driver's list of supported devices and        */
376/* reports back to the OS whether this is the right driver for the device.  */
377/*                                                                          */
378/* Returns:                                                                 */
379/*   BUS_PROBE_DEFAULT on success, positive value on failure.               */
380/****************************************************************************/
381static int
382bce_probe(device_t dev)
383{
384	struct bce_type *t;
385	struct bce_softc *sc;
386	char *descbuf;
387	u16 vid = 0, did = 0, svid = 0, sdid = 0;
388
389	t = bce_devs;
390
391	sc = device_get_softc(dev);
392	bzero(sc, sizeof(struct bce_softc));
393	sc->bce_unit = device_get_unit(dev);
394	sc->bce_dev = dev;
395
396	/* Get the data for the device to be probed. */
397	vid  = pci_get_vendor(dev);
398	did  = pci_get_device(dev);
399	svid = pci_get_subvendor(dev);
400	sdid = pci_get_subdevice(dev);
401
402	DBPRINT(sc, BCE_VERBOSE_LOAD,
403		"%s(); VID = 0x%04X, DID = 0x%04X, SVID = 0x%04X, "
404		"SDID = 0x%04X\n", __FUNCTION__, vid, did, svid, sdid);
405
406	/* Look through the list of known devices for a match. */
407	while(t->bce_name != NULL) {
408
409		if ((vid == t->bce_vid) && (did == t->bce_did) &&
410			((svid == t->bce_svid) || (t->bce_svid == PCI_ANY_ID)) &&
411			((sdid == t->bce_sdid) || (t->bce_sdid == PCI_ANY_ID))) {
412
413			descbuf = malloc(BCE_DEVDESC_MAX, M_TEMP, M_NOWAIT);
414
415			if (descbuf == NULL)
416				return(ENOMEM);
417
418			/* Print out the device identity. */
419			snprintf(descbuf, BCE_DEVDESC_MAX, "%s (%c%d), %s",
420				t->bce_name,
421			    (((pci_read_config(dev, PCIR_REVID, 4) & 0xf0) >> 4) + 'A'),
422			    (pci_read_config(dev, PCIR_REVID, 4) & 0xf),
423			    bce_driver_version);
424
425			device_set_desc_copy(dev, descbuf);
426			free(descbuf, M_TEMP);
427			return(BUS_PROBE_DEFAULT);
428		}
429		t++;
430	}
431
432	DBPRINT(sc, BCE_VERBOSE_LOAD, "%s(%d): No IOCTL match found!\n",
433		__FILE__, __LINE__);
434
435	return(ENXIO);
436}
437
438
439/****************************************************************************/
440/* Device attach function.                                                  */
441/*                                                                          */
442/* Allocates device resources, performs secondary chip identification,      */
443/* resets and initializes the hardware, and initializes driver instance     */
444/* variables.                                                               */
445/*                                                                          */
446/* Returns:                                                                 */
447/*   0 on success, positive value on failure.                               */
448/****************************************************************************/
449static int
450bce_attach(device_t dev)
451{
452	struct bce_softc *sc;
453	struct ifnet *ifp;
454	u32 val;
455	int mbuf, rid, rc = 0;
456
457	sc = device_get_softc(dev);
458	sc->bce_dev = dev;
459
460	DBPRINT(sc, BCE_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
461
462	mbuf = device_get_unit(dev);
463	sc->bce_unit = mbuf;
464
465	pci_enable_busmaster(dev);
466
467	/* Allocate PCI memory resources. */
468	rid = PCIR_BAR(0);
469	sc->bce_res = bus_alloc_resource_any(
470		dev, 							/* dev */
471		SYS_RES_MEMORY, 				/* type */
472		&rid,							/* rid */
473	    RF_ACTIVE | PCI_RF_DENSE);		/* flags */
474
475	if (sc->bce_res == NULL) {
476		BCE_PRINTF(sc, "%s(%d): PCI memory allocation failed\n",
477			__FILE__, __LINE__);
478		rc = ENXIO;
479		goto bce_attach_fail;
480	}
481
482	/* Get various resource handles. */
483	sc->bce_btag    = rman_get_bustag(sc->bce_res);
484	sc->bce_bhandle = rman_get_bushandle(sc->bce_res);
485	sc->bce_vhandle = (vm_offset_t) rman_get_virtual(sc->bce_res);
486
487	/* Allocate PCI IRQ resources. */
488	rid = 0;
489	sc->bce_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
490	    RF_SHAREABLE | RF_ACTIVE);
491
492	if (sc->bce_irq == NULL) {
493		BCE_PRINTF(sc, "%s(%d): PCI map interrupt failed\n",
494			__FILE__, __LINE__);
495		rc = ENXIO;
496		goto bce_attach_fail;
497	}
498
499	/* Initialize mutex for the current device instance. */
500	BCE_LOCK_INIT(sc, device_get_nameunit(dev));
501
502	/*
503	 * Configure byte swap and enable indirect register access.
504	 * Rely on CPU to do target byte swapping on big endian systems.
505	 * Access to registers outside of PCI configurtion space are not
506	 * valid until this is done.
507	 */
508	pci_write_config(dev, BCE_PCICFG_MISC_CONFIG,
509			       BCE_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
510			       BCE_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP, 4);
511
512	/* Save ASIC revsion info. */
513	sc->bce_chipid =  REG_RD(sc, BCE_MISC_ID);
514
515	/* Weed out any non-production controller revisions. */
516	switch(BCE_CHIP_ID(sc)) {
517		case BCE_CHIP_ID_5706_A0:
518		case BCE_CHIP_ID_5706_A1:
519		case BCE_CHIP_ID_5708_A0:
520		case BCE_CHIP_ID_5708_B0:
521			BCE_PRINTF(sc, "%s(%d): Unsupported controller revision (%c%d)!\n",
522				__FILE__, __LINE__,
523				(((pci_read_config(dev, PCIR_REVID, 4) & 0xf0) >> 4) + 'A'),
524			    (pci_read_config(dev, PCIR_REVID, 4) & 0xf));
525			rc = ENODEV;
526			goto bce_attach_fail;
527	}
528
529	if (BCE_CHIP_BOND_ID(sc) & BCE_CHIP_BOND_ID_SERDES_BIT) {
530		BCE_PRINTF(sc, "%s(%d): SerDes controllers are not supported!\n",
531			__FILE__, __LINE__);
532		rc = ENODEV;
533		goto bce_attach_fail;
534	}
535
536	/*
537	 * The embedded PCIe to PCI-X bridge (EPB)
538	 * in the 5708 cannot address memory above
539	 * 40 bits (E7_5708CB1_23043 & E6_5708SB1_23043).
540	 */
541	if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5708)
542		sc->max_bus_addr = BCE_BUS_SPACE_MAXADDR;
543	else
544		sc->max_bus_addr = BUS_SPACE_MAXADDR;
545
546	/*
547	 * Find the base address for shared memory access.
548	 * Newer versions of bootcode use a signature and offset
549	 * while older versions use a fixed address.
550	 */
551	val = REG_RD_IND(sc, BCE_SHM_HDR_SIGNATURE);
552	if ((val & BCE_SHM_HDR_SIGNATURE_SIG_MASK) == BCE_SHM_HDR_SIGNATURE_SIG)
553		sc->bce_shmem_base = REG_RD_IND(sc, BCE_SHM_HDR_ADDR_0);
554	else
555		sc->bce_shmem_base = HOST_VIEW_SHMEM_BASE;
556
557	DBPRINT(sc, BCE_INFO, "bce_shmem_base = 0x%08X\n", sc->bce_shmem_base);
558
559	/* Set initial device and PHY flags */
560	sc->bce_flags = 0;
561	sc->bce_phy_flags = 0;
562
563	/* Get PCI bus information (speed and type). */
564	val = REG_RD(sc, BCE_PCICFG_MISC_STATUS);
565	if (val & BCE_PCICFG_MISC_STATUS_PCIX_DET) {
566		u32 clkreg;
567
568		sc->bce_flags |= BCE_PCIX_FLAG;
569
570		clkreg = REG_RD(sc, BCE_PCICFG_PCI_CLOCK_CONTROL_BITS);
571
572		clkreg &= BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET;
573		switch (clkreg) {
574		case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ:
575			sc->bus_speed_mhz = 133;
576			break;
577
578		case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ:
579			sc->bus_speed_mhz = 100;
580			break;
581
582		case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ:
583		case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ:
584			sc->bus_speed_mhz = 66;
585			break;
586
587		case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ:
588		case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ:
589			sc->bus_speed_mhz = 50;
590			break;
591
592		case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW:
593		case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ:
594		case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ:
595			sc->bus_speed_mhz = 33;
596			break;
597		}
598	} else {
599		if (val & BCE_PCICFG_MISC_STATUS_M66EN)
600			sc->bus_speed_mhz = 66;
601		else
602			sc->bus_speed_mhz = 33;
603	}
604
605	if (val & BCE_PCICFG_MISC_STATUS_32BIT_DET)
606		sc->bce_flags |= BCE_PCI_32BIT_FLAG;
607
608	BCE_PRINTF(sc, "ASIC ID 0x%08X; Revision (%c%d); PCI%s %s %dMHz\n",
609		sc->bce_chipid,
610		((BCE_CHIP_ID(sc) & 0xf000) >> 12) + 'A',
611		((BCE_CHIP_ID(sc) & 0x0ff0) >> 4),
612		((sc->bce_flags & BCE_PCIX_FLAG) ? "-X" : ""),
613		((sc->bce_flags & BCE_PCI_32BIT_FLAG) ? "32-bit" : "64-bit"),
614		sc->bus_speed_mhz);
615
616	/* Reset the controller. */
617	if (bce_reset(sc, BCE_DRV_MSG_CODE_RESET)) {
618		rc = ENXIO;
619		goto bce_attach_fail;
620	}
621
622	/* Initialize the controller. */
623	if (bce_chipinit(sc)) {
624		BCE_PRINTF(sc, "%s(%d): Controller initialization failed!\n",
625			__FILE__, __LINE__);
626		rc = ENXIO;
627		goto bce_attach_fail;
628	}
629
630	/* Perform NVRAM test. */
631	if (bce_nvram_test(sc)) {
632		BCE_PRINTF(sc, "%s(%d): NVRAM test failed!\n",
633			__FILE__, __LINE__);
634		rc = ENXIO;
635		goto bce_attach_fail;
636	}
637
638	/* Fetch the permanent Ethernet MAC address. */
639	bce_get_mac_addr(sc);
640
641	/*
642	 * Trip points control how many BDs
643	 * should be ready before generating an
644	 * interrupt while ticks control how long
645	 * a BD can sit in the chain before
646	 * generating an interrupt.  Set the default
647	 * values for the RX and TX rings.
648	 */
649
650#ifdef BCE_DRBUG
651	/* Force more frequent interrupts. */
652	sc->bce_tx_quick_cons_trip_int = 1;
653	sc->bce_tx_quick_cons_trip     = 1;
654	sc->bce_tx_ticks_int           = 0;
655	sc->bce_tx_ticks               = 0;
656
657	sc->bce_rx_quick_cons_trip_int = 1;
658	sc->bce_rx_quick_cons_trip     = 1;
659	sc->bce_rx_ticks_int           = 0;
660	sc->bce_rx_ticks               = 0;
661#else
662	sc->bce_tx_quick_cons_trip_int = 20;
663	sc->bce_tx_quick_cons_trip     = 20;
664	sc->bce_tx_ticks_int           = 80;
665	sc->bce_tx_ticks               = 80;
666
667	sc->bce_rx_quick_cons_trip_int = 6;
668	sc->bce_rx_quick_cons_trip     = 6;
669	sc->bce_rx_ticks_int           = 18;
670	sc->bce_rx_ticks               = 18;
671#endif
672
673	/* Update statistics once every second. */
674	sc->bce_stats_ticks = 1000000 & 0xffff00;
675
676	/*
677	 * The copper based NetXtreme II controllers
678	 * use an integrated PHY at address 1 while
679	 * the SerDes controllers use a PHY at
680	 * address 2.
681	 */
682	sc->bce_phy_addr = 1;
683
684	if (BCE_CHIP_BOND_ID(sc) & BCE_CHIP_BOND_ID_SERDES_BIT) {
685		sc->bce_phy_flags |= BCE_PHY_SERDES_FLAG;
686		sc->bce_flags |= BCE_NO_WOL_FLAG;
687		if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5708) {
688			sc->bce_phy_addr = 2;
689			val = REG_RD_IND(sc, sc->bce_shmem_base +
690					 BCE_SHARED_HW_CFG_CONFIG);
691			if (val & BCE_SHARED_HW_CFG_PHY_2_5G)
692				sc->bce_phy_flags |= BCE_PHY_2_5G_CAPABLE_FLAG;
693		}
694	}
695
696	/* Allocate DMA memory resources. */
697	if (bce_dma_alloc(dev)) {
698		BCE_PRINTF(sc, "%s(%d): DMA resource allocation failed!\n",
699		    __FILE__, __LINE__);
700		rc = ENXIO;
701		goto bce_attach_fail;
702	}
703
704	/* Allocate an ifnet structure. */
705	ifp = sc->bce_ifp = if_alloc(IFT_ETHER);
706	if (ifp == NULL) {
707		BCE_PRINTF(sc, "%s(%d): Interface allocation failed!\n",
708			__FILE__, __LINE__);
709		rc = ENXIO;
710		goto bce_attach_fail;
711	}
712
713	/* Initialize the ifnet interface. */
714	ifp->if_softc        = sc;
715	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
716	ifp->if_flags        = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
717	ifp->if_ioctl        = bce_ioctl;
718	ifp->if_start        = bce_start;
719	ifp->if_timer        = 0;
720	ifp->if_watchdog     = bce_watchdog;
721	ifp->if_init         = bce_init;
722	ifp->if_mtu          = ETHERMTU;
723	ifp->if_hwassist     = BCE_IF_HWASSIST;
724	ifp->if_capabilities = BCE_IF_CAPABILITIES;
725	ifp->if_capenable    = ifp->if_capabilities;
726
727	/* Assume a standard 1500 byte MTU size for mbuf allocations. */
728	sc->mbuf_alloc_size  = MCLBYTES;
729#ifdef DEVICE_POLLING
730	ifp->if_capabilities |= IFCAP_POLLING;
731#endif
732
733	ifp->if_snd.ifq_drv_maxlen = USABLE_TX_BD;
734	if (sc->bce_phy_flags & BCE_PHY_2_5G_CAPABLE_FLAG)
735		ifp->if_baudrate = IF_Gbps(2.5);
736	else
737		ifp->if_baudrate = IF_Gbps(1);
738
739	IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen);
740	IFQ_SET_READY(&ifp->if_snd);
741
742	if (sc->bce_phy_flags & BCE_PHY_SERDES_FLAG) {
743		BCE_PRINTF(sc, "%s(%d): SerDes is not supported by this driver!\n",
744			__FILE__, __LINE__);
745		rc = ENODEV;
746		goto bce_attach_fail;
747	} else {
748		/* Look for our PHY. */
749		if (mii_phy_probe(dev, &sc->bce_miibus, bce_ifmedia_upd,
750			bce_ifmedia_sts)) {
751			BCE_PRINTF(sc, "%s(%d): PHY probe failed!\n",
752				__FILE__, __LINE__);
753			rc = ENXIO;
754			goto bce_attach_fail;
755		}
756	}
757
758	/* Attach to the Ethernet interface list. */
759	ether_ifattach(ifp, sc->eaddr);
760
761#if __FreeBSD_version < 500000
762	callout_init(&sc->bce_stat_ch);
763#else
764	callout_init(&sc->bce_stat_ch, CALLOUT_MPSAFE);
765#endif
766
767	/* Hookup IRQ last. */
768	rc = bus_setup_intr(dev, sc->bce_irq, INTR_TYPE_NET | INTR_MPSAFE,
769	   bce_intr, sc, &sc->bce_intrhand);
770
771	if (rc) {
772		BCE_PRINTF(sc, "%s(%d): Failed to setup IRQ!\n",
773			__FILE__, __LINE__);
774		bce_detach(dev);
775		goto bce_attach_exit;
776	}
777
778	/* Print some important debugging info. */
779	DBRUN(BCE_INFO, bce_dump_driver_state(sc));
780
781	/* Add the supported sysctls to the kernel. */
782	bce_add_sysctls(sc);
783
784	goto bce_attach_exit;
785
786bce_attach_fail:
787	bce_release_resources(sc);
788
789bce_attach_exit:
790
791	DBPRINT(sc, BCE_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
792
793	return(rc);
794}
795
796
797/****************************************************************************/
798/* Device detach function.                                                  */
799/*                                                                          */
800/* Stops the controller, resets the controller, and releases resources.     */
801/*                                                                          */
802/* Returns:                                                                 */
803/*   0 on success, positive value on failure.                               */
804/****************************************************************************/
805static int
806bce_detach(device_t dev)
807{
808	struct bce_softc *sc;
809	struct ifnet *ifp;
810
811	sc = device_get_softc(dev);
812
813	DBPRINT(sc, BCE_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
814
815	ifp = sc->bce_ifp;
816
817#ifdef DEVICE_POLLING
818	if (ifp->if_capenable & IFCAP_POLLING)
819		ether_poll_deregister(ifp);
820#endif
821
822	/* Stop and reset the controller. */
823	BCE_LOCK(sc);
824	bce_stop(sc);
825	bce_reset(sc, BCE_DRV_MSG_CODE_RESET);
826	BCE_UNLOCK(sc);
827
828	ether_ifdetach(ifp);
829
830	/* If we have a child device on the MII bus remove it too. */
831	if (sc->bce_phy_flags & BCE_PHY_SERDES_FLAG) {
832		ifmedia_removeall(&sc->bce_ifmedia);
833	} else {
834		bus_generic_detach(dev);
835		device_delete_child(dev, sc->bce_miibus);
836	}
837
838	/* Release all remaining resources. */
839	bce_release_resources(sc);
840
841	DBPRINT(sc, BCE_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
842
843	return(0);
844}
845
846
847/****************************************************************************/
848/* Device shutdown function.                                                */
849/*                                                                          */
850/* Stops and resets the controller.                                         */
851/*                                                                          */
852/* Returns:                                                                 */
853/*   Nothing                                                                */
854/****************************************************************************/
855static void
856bce_shutdown(device_t dev)
857{
858	struct bce_softc *sc = device_get_softc(dev);
859
860	BCE_LOCK(sc);
861	bce_stop(sc);
862	bce_reset(sc, BCE_DRV_MSG_CODE_RESET);
863	BCE_UNLOCK(sc);
864}
865
866
867/****************************************************************************/
868/* Indirect register read.                                                  */
869/*                                                                          */
870/* Reads NetXtreme II registers using an index/data register pair in PCI    */
871/* configuration space.  Using this mechanism avoids issues with posted     */
872/* reads but is much slower than memory-mapped I/O.                         */
873/*                                                                          */
874/* Returns:                                                                 */
875/*   The value of the register.                                             */
876/****************************************************************************/
877static u32
878bce_reg_rd_ind(struct bce_softc *sc, u32 offset)
879{
880	device_t dev;
881	dev = sc->bce_dev;
882
883	pci_write_config(dev, BCE_PCICFG_REG_WINDOW_ADDRESS, offset, 4);
884#ifdef BCE_DEBUG
885	{
886		u32 val;
887		val = pci_read_config(dev, BCE_PCICFG_REG_WINDOW, 4);
888		DBPRINT(sc, BCE_EXCESSIVE, "%s(); offset = 0x%08X, val = 0x%08X\n",
889			__FUNCTION__, offset, val);
890		return val;
891	}
892#else
893	return pci_read_config(dev, BCE_PCICFG_REG_WINDOW, 4);
894#endif
895}
896
897
898/****************************************************************************/
899/* Indirect register write.                                                 */
900/*                                                                          */
901/* Writes NetXtreme II registers using an index/data register pair in PCI   */
902/* configuration space.  Using this mechanism avoids issues with posted     */
903/* writes but is muchh slower than memory-mapped I/O.                       */
904/*                                                                          */
905/* Returns:                                                                 */
906/*   Nothing.                                                               */
907/****************************************************************************/
908static void
909bce_reg_wr_ind(struct bce_softc *sc, u32 offset, u32 val)
910{
911	device_t dev;
912	dev = sc->bce_dev;
913
914	DBPRINT(sc, BCE_EXCESSIVE, "%s(); offset = 0x%08X, val = 0x%08X\n",
915		__FUNCTION__, offset, val);
916
917	pci_write_config(dev, BCE_PCICFG_REG_WINDOW_ADDRESS, offset, 4);
918	pci_write_config(dev, BCE_PCICFG_REG_WINDOW, val, 4);
919}
920
921
922/****************************************************************************/
923/* Context memory write.                                                    */
924/*                                                                          */
925/* The NetXtreme II controller uses context memory to track connection      */
926/* information for L2 and higher network protocols.                         */
927/*                                                                          */
928/* Returns:                                                                 */
929/*   Nothing.                                                               */
930/****************************************************************************/
931static void
932bce_ctx_wr(struct bce_softc *sc, u32 cid_addr, u32 offset, u32 val)
933{
934
935	DBPRINT(sc, BCE_EXCESSIVE, "%s(); cid_addr = 0x%08X, offset = 0x%08X, "
936		"val = 0x%08X\n", __FUNCTION__, cid_addr, offset, val);
937
938	offset += cid_addr;
939	REG_WR(sc, BCE_CTX_DATA_ADR, offset);
940	REG_WR(sc, BCE_CTX_DATA, val);
941}
942
943
944/****************************************************************************/
945/* PHY register read.                                                       */
946/*                                                                          */
947/* Implements register reads on the MII bus.                                */
948/*                                                                          */
949/* Returns:                                                                 */
950/*   The value of the register.                                             */
951/****************************************************************************/
952static int
953bce_miibus_read_reg(device_t dev, int phy, int reg)
954{
955	struct bce_softc *sc;
956	u32 val;
957	int i;
958
959	sc = device_get_softc(dev);
960
961	/* Make sure we are accessing the correct PHY address. */
962	if (phy != sc->bce_phy_addr) {
963		DBPRINT(sc, BCE_VERBOSE, "Invalid PHY address %d for PHY read!\n", phy);
964		return(0);
965	}
966
967	if (sc->bce_phy_flags & BCE_PHY_INT_MODE_AUTO_POLLING_FLAG) {
968		val = REG_RD(sc, BCE_EMAC_MDIO_MODE);
969		val &= ~BCE_EMAC_MDIO_MODE_AUTO_POLL;
970
971		REG_WR(sc, BCE_EMAC_MDIO_MODE, val);
972		REG_RD(sc, BCE_EMAC_MDIO_MODE);
973
974		DELAY(40);
975	}
976
977	val = BCE_MIPHY(phy) | BCE_MIREG(reg) |
978		BCE_EMAC_MDIO_COMM_COMMAND_READ | BCE_EMAC_MDIO_COMM_DISEXT |
979		BCE_EMAC_MDIO_COMM_START_BUSY;
980	REG_WR(sc, BCE_EMAC_MDIO_COMM, val);
981
982	for (i = 0; i < BCE_PHY_TIMEOUT; i++) {
983		DELAY(10);
984
985		val = REG_RD(sc, BCE_EMAC_MDIO_COMM);
986		if (!(val & BCE_EMAC_MDIO_COMM_START_BUSY)) {
987			DELAY(5);
988
989			val = REG_RD(sc, BCE_EMAC_MDIO_COMM);
990			val &= BCE_EMAC_MDIO_COMM_DATA;
991
992			break;
993		}
994	}
995
996	if (val & BCE_EMAC_MDIO_COMM_START_BUSY) {
997		BCE_PRINTF(sc, "%s(%d): Error: PHY read timeout! phy = %d, reg = 0x%04X\n",
998			__FILE__, __LINE__, phy, reg);
999		val = 0x0;
1000	} else {
1001		val = REG_RD(sc, BCE_EMAC_MDIO_COMM);
1002	}
1003
1004	DBPRINT(sc, BCE_EXCESSIVE, "%s(): phy = %d, reg = 0x%04X, val = 0x%04X\n",
1005		__FUNCTION__, phy, (u16) reg & 0xffff, (u16) val & 0xffff);
1006
1007	if (sc->bce_phy_flags & BCE_PHY_INT_MODE_AUTO_POLLING_FLAG) {
1008		val = REG_RD(sc, BCE_EMAC_MDIO_MODE);
1009		val |= BCE_EMAC_MDIO_MODE_AUTO_POLL;
1010
1011		REG_WR(sc, BCE_EMAC_MDIO_MODE, val);
1012		REG_RD(sc, BCE_EMAC_MDIO_MODE);
1013
1014		DELAY(40);
1015	}
1016
1017	return (val & 0xffff);
1018
1019}
1020
1021
1022/****************************************************************************/
1023/* PHY register write.                                                      */
1024/*                                                                          */
1025/* Implements register writes on the MII bus.                               */
1026/*                                                                          */
1027/* Returns:                                                                 */
1028/*   The value of the register.                                             */
1029/****************************************************************************/
1030static int
1031bce_miibus_write_reg(device_t dev, int phy, int reg, int val)
1032{
1033	struct bce_softc *sc;
1034	u32 val1;
1035	int i;
1036
1037	sc = device_get_softc(dev);
1038
1039	/* Make sure we are accessing the correct PHY address. */
1040	if (phy != sc->bce_phy_addr) {
1041		DBPRINT(sc, BCE_WARN, "Invalid PHY address %d for PHY write!\n", phy);
1042		return(0);
1043	}
1044
1045	DBPRINT(sc, BCE_EXCESSIVE, "%s(): phy = %d, reg = 0x%04X, val = 0x%04X\n",
1046		__FUNCTION__, phy, (u16) reg & 0xffff, (u16) val & 0xffff);
1047
1048	if (sc->bce_phy_flags & BCE_PHY_INT_MODE_AUTO_POLLING_FLAG) {
1049		val1 = REG_RD(sc, BCE_EMAC_MDIO_MODE);
1050		val1 &= ~BCE_EMAC_MDIO_MODE_AUTO_POLL;
1051
1052		REG_WR(sc, BCE_EMAC_MDIO_MODE, val1);
1053		REG_RD(sc, BCE_EMAC_MDIO_MODE);
1054
1055		DELAY(40);
1056	}
1057
1058	val1 = BCE_MIPHY(phy) | BCE_MIREG(reg) | val |
1059		BCE_EMAC_MDIO_COMM_COMMAND_WRITE |
1060		BCE_EMAC_MDIO_COMM_START_BUSY | BCE_EMAC_MDIO_COMM_DISEXT;
1061	REG_WR(sc, BCE_EMAC_MDIO_COMM, val1);
1062
1063	for (i = 0; i < BCE_PHY_TIMEOUT; i++) {
1064		DELAY(10);
1065
1066		val1 = REG_RD(sc, BCE_EMAC_MDIO_COMM);
1067		if (!(val1 & BCE_EMAC_MDIO_COMM_START_BUSY)) {
1068			DELAY(5);
1069			break;
1070		}
1071	}
1072
1073	if (val1 & BCE_EMAC_MDIO_COMM_START_BUSY)
1074		BCE_PRINTF(sc, "%s(%d): PHY write timeout!\n",
1075			__FILE__, __LINE__);
1076
1077	if (sc->bce_phy_flags & BCE_PHY_INT_MODE_AUTO_POLLING_FLAG) {
1078		val1 = REG_RD(sc, BCE_EMAC_MDIO_MODE);
1079		val1 |= BCE_EMAC_MDIO_MODE_AUTO_POLL;
1080
1081		REG_WR(sc, BCE_EMAC_MDIO_MODE, val1);
1082		REG_RD(sc, BCE_EMAC_MDIO_MODE);
1083
1084		DELAY(40);
1085	}
1086
1087	return 0;
1088}
1089
1090
1091/****************************************************************************/
1092/* MII bus status change.                                                   */
1093/*                                                                          */
1094/* Called by the MII bus driver when the PHY establishes link to set the    */
1095/* MAC interface registers.                                                 */
1096/*                                                                          */
1097/* Returns:                                                                 */
1098/*   Nothing.                                                               */
1099/****************************************************************************/
1100static void
1101bce_miibus_statchg(device_t dev)
1102{
1103	struct bce_softc *sc;
1104	struct mii_data *mii;
1105
1106	sc = device_get_softc(dev);
1107
1108	mii = device_get_softc(sc->bce_miibus);
1109
1110	BCE_CLRBIT(sc, BCE_EMAC_MODE, BCE_EMAC_MODE_PORT);
1111
1112	/* Set MII or GMII inerface based on the speed negotiated by the PHY. */
1113	if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T) {
1114		DBPRINT(sc, BCE_INFO, "Setting GMII interface.\n");
1115		BCE_SETBIT(sc, BCE_EMAC_MODE, BCE_EMAC_MODE_PORT_GMII);
1116	} else {
1117		DBPRINT(sc, BCE_INFO, "Setting MII interface.\n");
1118		BCE_SETBIT(sc, BCE_EMAC_MODE, BCE_EMAC_MODE_PORT_MII);
1119	}
1120
1121	/* Set half or full duplex based on the duplicity negotiated by the PHY. */
1122	if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) {
1123		DBPRINT(sc, BCE_INFO, "Setting Full-Duplex interface.\n");
1124		BCE_CLRBIT(sc, BCE_EMAC_MODE, BCE_EMAC_MODE_HALF_DUPLEX);
1125	} else {
1126		DBPRINT(sc, BCE_INFO, "Setting Half-Duplex interface.\n");
1127		BCE_SETBIT(sc, BCE_EMAC_MODE, BCE_EMAC_MODE_HALF_DUPLEX);
1128	}
1129}
1130
1131
1132/****************************************************************************/
1133/* Acquire NVRAM lock.                                                      */
1134/*                                                                          */
1135/* Before the NVRAM can be accessed the caller must acquire an NVRAM lock.  */
1136/* Locks 0 and 2 are reserved, lock 1 is used by firmware and lock 2 is     */
1137/* for use by the driver.                                                   */
1138/*                                                                          */
1139/* Returns:                                                                 */
1140/*   0 on success, positive value on failure.                               */
1141/****************************************************************************/
1142static int
1143bce_acquire_nvram_lock(struct bce_softc *sc)
1144{
1145	u32 val;
1146	int j;
1147
1148	DBPRINT(sc, BCE_VERBOSE, "Acquiring NVRAM lock.\n");
1149
1150	/* Request access to the flash interface. */
1151	REG_WR(sc, BCE_NVM_SW_ARB, BCE_NVM_SW_ARB_ARB_REQ_SET2);
1152	for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
1153		val = REG_RD(sc, BCE_NVM_SW_ARB);
1154		if (val & BCE_NVM_SW_ARB_ARB_ARB2)
1155			break;
1156
1157		DELAY(5);
1158	}
1159
1160	if (j >= NVRAM_TIMEOUT_COUNT) {
1161		DBPRINT(sc, BCE_WARN, "Timeout acquiring NVRAM lock!\n");
1162		return EBUSY;
1163	}
1164
1165	return 0;
1166}
1167
1168
1169/****************************************************************************/
1170/* Release NVRAM lock.                                                      */
1171/*                                                                          */
1172/* When the caller is finished accessing NVRAM the lock must be released.   */
1173/* Locks 0 and 2 are reserved, lock 1 is used by firmware and lock 2 is     */
1174/* for use by the driver.                                                   */
1175/*                                                                          */
1176/* Returns:                                                                 */
1177/*   0 on success, positive value on failure.                               */
1178/****************************************************************************/
1179static int
1180bce_release_nvram_lock(struct bce_softc *sc)
1181{
1182	int j;
1183	u32 val;
1184
1185	DBPRINT(sc, BCE_VERBOSE, "Releasing NVRAM lock.\n");
1186
1187	/*
1188	 * Relinquish nvram interface.
1189	 */
1190	REG_WR(sc, BCE_NVM_SW_ARB, BCE_NVM_SW_ARB_ARB_REQ_CLR2);
1191
1192	for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
1193		val = REG_RD(sc, BCE_NVM_SW_ARB);
1194		if (!(val & BCE_NVM_SW_ARB_ARB_ARB2))
1195			break;
1196
1197		DELAY(5);
1198	}
1199
1200	if (j >= NVRAM_TIMEOUT_COUNT) {
1201		DBPRINT(sc, BCE_WARN, "Timeout reeasing NVRAM lock!\n");
1202		return EBUSY;
1203	}
1204
1205	return 0;
1206}
1207
1208
1209#ifdef BCE_NVRAM_WRITE_SUPPORT
1210/****************************************************************************/
1211/* Enable NVRAM write access.                                               */
1212/*                                                                          */
1213/* Before writing to NVRAM the caller must enable NVRAM writes.             */
1214/*                                                                          */
1215/* Returns:                                                                 */
1216/*   0 on success, positive value on failure.                               */
1217/****************************************************************************/
1218static int
1219bce_enable_nvram_write(struct bce_softc *sc)
1220{
1221	u32 val;
1222
1223	DBPRINT(sc, BCE_VERBOSE, "Enabling NVRAM write.\n");
1224
1225	val = REG_RD(sc, BCE_MISC_CFG);
1226	REG_WR(sc, BCE_MISC_CFG, val | BCE_MISC_CFG_NVM_WR_EN_PCI);
1227
1228	if (!sc->bce_flash_info->buffered) {
1229		int j;
1230
1231		REG_WR(sc, BCE_NVM_COMMAND, BCE_NVM_COMMAND_DONE);
1232		REG_WR(sc, BCE_NVM_COMMAND,	BCE_NVM_COMMAND_WREN | BCE_NVM_COMMAND_DOIT);
1233
1234		for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
1235			DELAY(5);
1236
1237			val = REG_RD(sc, BCE_NVM_COMMAND);
1238			if (val & BCE_NVM_COMMAND_DONE)
1239				break;
1240		}
1241
1242		if (j >= NVRAM_TIMEOUT_COUNT) {
1243			DBPRINT(sc, BCE_WARN, "Timeout writing NVRAM!\n");
1244			return EBUSY;
1245		}
1246	}
1247	return 0;
1248}
1249
1250
1251/****************************************************************************/
1252/* Disable NVRAM write access.                                              */
1253/*                                                                          */
1254/* When the caller is finished writing to NVRAM write access must be        */
1255/* disabled.                                                                */
1256/*                                                                          */
1257/* Returns:                                                                 */
1258/*   Nothing.                                                               */
1259/****************************************************************************/
1260static void
1261bce_disable_nvram_write(struct bce_softc *sc)
1262{
1263	u32 val;
1264
1265	DBPRINT(sc, BCE_VERBOSE,  "Disabling NVRAM write.\n");
1266
1267	val = REG_RD(sc, BCE_MISC_CFG);
1268	REG_WR(sc, BCE_MISC_CFG, val & ~BCE_MISC_CFG_NVM_WR_EN);
1269}
1270#endif
1271
1272
1273/****************************************************************************/
1274/* Enable NVRAM access.                                                     */
1275/*                                                                          */
1276/* Before accessing NVRAM for read or write operations the caller must      */
1277/* enabled NVRAM access.                                                    */
1278/*                                                                          */
1279/* Returns:                                                                 */
1280/*   Nothing.                                                               */
1281/****************************************************************************/
1282static void
1283bce_enable_nvram_access(struct bce_softc *sc)
1284{
1285	u32 val;
1286
1287	DBPRINT(sc, BCE_VERBOSE, "Enabling NVRAM access.\n");
1288
1289	val = REG_RD(sc, BCE_NVM_ACCESS_ENABLE);
1290	/* Enable both bits, even on read. */
1291	REG_WR(sc, BCE_NVM_ACCESS_ENABLE,
1292	       val | BCE_NVM_ACCESS_ENABLE_EN | BCE_NVM_ACCESS_ENABLE_WR_EN);
1293}
1294
1295
1296/****************************************************************************/
1297/* Disable NVRAM access.                                                    */
1298/*                                                                          */
1299/* When the caller is finished accessing NVRAM access must be disabled.     */
1300/*                                                                          */
1301/* Returns:                                                                 */
1302/*   Nothing.                                                               */
1303/****************************************************************************/
1304static void
1305bce_disable_nvram_access(struct bce_softc *sc)
1306{
1307	u32 val;
1308
1309	DBPRINT(sc, BCE_VERBOSE, "Disabling NVRAM access.\n");
1310
1311	val = REG_RD(sc, BCE_NVM_ACCESS_ENABLE);
1312
1313	/* Disable both bits, even after read. */
1314	REG_WR(sc, BCE_NVM_ACCESS_ENABLE,
1315		val & ~(BCE_NVM_ACCESS_ENABLE_EN |
1316			BCE_NVM_ACCESS_ENABLE_WR_EN));
1317}
1318
1319
1320#ifdef BCE_NVRAM_WRITE_SUPPORT
1321/****************************************************************************/
1322/* Erase NVRAM page before writing.                                         */
1323/*                                                                          */
1324/* Non-buffered flash parts require that a page be erased before it is      */
1325/* written.                                                                 */
1326/*                                                                          */
1327/* Returns:                                                                 */
1328/*   0 on success, positive value on failure.                               */
1329/****************************************************************************/
1330static int
1331bce_nvram_erase_page(struct bce_softc *sc, u32 offset)
1332{
1333	u32 cmd;
1334	int j;
1335
1336	/* Buffered flash doesn't require an erase. */
1337	if (sc->bce_flash_info->buffered)
1338		return 0;
1339
1340	DBPRINT(sc, BCE_VERBOSE, "Erasing NVRAM page.\n");
1341
1342	/* Build an erase command. */
1343	cmd = BCE_NVM_COMMAND_ERASE | BCE_NVM_COMMAND_WR |
1344	      BCE_NVM_COMMAND_DOIT;
1345
1346	/*
1347	 * Clear the DONE bit separately, set the NVRAM adress to erase,
1348	 * and issue the erase command.
1349	 */
1350	REG_WR(sc, BCE_NVM_COMMAND, BCE_NVM_COMMAND_DONE);
1351	REG_WR(sc, BCE_NVM_ADDR, offset & BCE_NVM_ADDR_NVM_ADDR_VALUE);
1352	REG_WR(sc, BCE_NVM_COMMAND, cmd);
1353
1354	/* Wait for completion. */
1355	for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
1356		u32 val;
1357
1358		DELAY(5);
1359
1360		val = REG_RD(sc, BCE_NVM_COMMAND);
1361		if (val & BCE_NVM_COMMAND_DONE)
1362			break;
1363	}
1364
1365	if (j >= NVRAM_TIMEOUT_COUNT) {
1366		DBPRINT(sc, BCE_WARN, "Timeout erasing NVRAM.\n");
1367		return EBUSY;
1368	}
1369
1370	return 0;
1371}
1372#endif /* BCE_NVRAM_WRITE_SUPPORT */
1373
1374
1375/****************************************************************************/
1376/* Read a dword (32 bits) from NVRAM.                                       */
1377/*                                                                          */
1378/* Read a 32 bit word from NVRAM.  The caller is assumed to have already    */
1379/* obtained the NVRAM lock and enabled the controller for NVRAM access.     */
1380/*                                                                          */
1381/* Returns:                                                                 */
1382/*   0 on success and the 32 bit value read, positive value on failure.     */
1383/****************************************************************************/
1384static int
1385bce_nvram_read_dword(struct bce_softc *sc, u32 offset, u8 *ret_val,
1386							u32 cmd_flags)
1387{
1388	u32 cmd;
1389	int i, rc = 0;
1390
1391	/* Build the command word. */
1392	cmd = BCE_NVM_COMMAND_DOIT | cmd_flags;
1393
1394	/* Calculate the offset for buffered flash. */
1395	if (sc->bce_flash_info->buffered) {
1396		offset = ((offset / sc->bce_flash_info->page_size) <<
1397			   sc->bce_flash_info->page_bits) +
1398			  (offset % sc->bce_flash_info->page_size);
1399	}
1400
1401	/*
1402	 * Clear the DONE bit separately, set the address to read,
1403	 * and issue the read.
1404	 */
1405	REG_WR(sc, BCE_NVM_COMMAND, BCE_NVM_COMMAND_DONE);
1406	REG_WR(sc, BCE_NVM_ADDR, offset & BCE_NVM_ADDR_NVM_ADDR_VALUE);
1407	REG_WR(sc, BCE_NVM_COMMAND, cmd);
1408
1409	/* Wait for completion. */
1410	for (i = 0; i < NVRAM_TIMEOUT_COUNT; i++) {
1411		u32 val;
1412
1413		DELAY(5);
1414
1415		val = REG_RD(sc, BCE_NVM_COMMAND);
1416		if (val & BCE_NVM_COMMAND_DONE) {
1417			val = REG_RD(sc, BCE_NVM_READ);
1418
1419			val = bce_be32toh(val);
1420			memcpy(ret_val, &val, 4);
1421			break;
1422		}
1423	}
1424
1425	/* Check for errors. */
1426	if (i >= NVRAM_TIMEOUT_COUNT) {
1427		BCE_PRINTF(sc, "%s(%d): Timeout error reading NVRAM at offset 0x%08X!\n",
1428			__FILE__, __LINE__, offset);
1429		rc = EBUSY;
1430	}
1431
1432	return(rc);
1433}
1434
1435
1436#ifdef BCE_NVRAM_WRITE_SUPPORT
1437/****************************************************************************/
1438/* Write a dword (32 bits) to NVRAM.                                        */
1439/*                                                                          */
1440/* Write a 32 bit word to NVRAM.  The caller is assumed to have already     */
1441/* obtained the NVRAM lock, enabled the controller for NVRAM access, and    */
1442/* enabled NVRAM write access.                                              */
1443/*                                                                          */
1444/* Returns:                                                                 */
1445/*   0 on success, positive value on failure.                               */
1446/****************************************************************************/
1447static int
1448bce_nvram_write_dword(struct bce_softc *sc, u32 offset, u8 *val,
1449	u32 cmd_flags)
1450{
1451	u32 cmd, val32;
1452	int j;
1453
1454	/* Build the command word. */
1455	cmd = BCE_NVM_COMMAND_DOIT | BCE_NVM_COMMAND_WR | cmd_flags;
1456
1457	/* Calculate the offset for buffered flash. */
1458	if (sc->bce_flash_info->buffered) {
1459		offset = ((offset / sc->bce_flash_info->page_size) <<
1460			  sc->bce_flash_info->page_bits) +
1461			 (offset % sc->bce_flash_info->page_size);
1462	}
1463
1464	/*
1465	 * Clear the DONE bit separately, convert NVRAM data to big-endian,
1466	 * set the NVRAM address to write, and issue the write command
1467	 */
1468	REG_WR(sc, BCE_NVM_COMMAND, BCE_NVM_COMMAND_DONE);
1469	memcpy(&val32, val, 4);
1470	val32 = htobe32(val32);
1471	REG_WR(sc, BCE_NVM_WRITE, val32);
1472	REG_WR(sc, BCE_NVM_ADDR, offset & BCE_NVM_ADDR_NVM_ADDR_VALUE);
1473	REG_WR(sc, BCE_NVM_COMMAND, cmd);
1474
1475	/* Wait for completion. */
1476	for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
1477		DELAY(5);
1478
1479		if (REG_RD(sc, BCE_NVM_COMMAND) & BCE_NVM_COMMAND_DONE)
1480			break;
1481	}
1482	if (j >= NVRAM_TIMEOUT_COUNT) {
1483		BCE_PRINTF(sc, "%s(%d): Timeout error writing NVRAM at offset 0x%08X\n",
1484			__FILE__, __LINE__, offset);
1485		return EBUSY;
1486	}
1487
1488	return 0;
1489}
1490#endif /* BCE_NVRAM_WRITE_SUPPORT */
1491
1492
1493/****************************************************************************/
1494/* Initialize NVRAM access.                                                 */
1495/*                                                                          */
1496/* Identify the NVRAM device in use and prepare the NVRAM interface to      */
1497/* access that device.                                                      */
1498/*                                                                          */
1499/* Returns:                                                                 */
1500/*   0 on success, positive value on failure.                               */
1501/****************************************************************************/
1502static int
1503bce_init_nvram(struct bce_softc *sc)
1504{
1505	u32 val;
1506	int j, entry_count, rc;
1507	struct flash_spec *flash;
1508
1509	DBPRINT(sc,BCE_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
1510
1511	/* Determine the selected interface. */
1512	val = REG_RD(sc, BCE_NVM_CFG1);
1513
1514	entry_count = sizeof(flash_table) / sizeof(struct flash_spec);
1515
1516	rc = 0;
1517
1518	/*
1519	 * Flash reconfiguration is required to support additional
1520	 * NVRAM devices not directly supported in hardware.
1521	 * Check if the flash interface was reconfigured
1522	 * by the bootcode.
1523	 */
1524
1525	if (val & 0x40000000) {
1526		/* Flash interface reconfigured by bootcode. */
1527
1528		DBPRINT(sc,BCE_INFO_LOAD,
1529			"bce_init_nvram(): Flash WAS reconfigured.\n");
1530
1531		for (j = 0, flash = &flash_table[0]; j < entry_count;
1532		     j++, flash++) {
1533			if ((val & FLASH_BACKUP_STRAP_MASK) ==
1534			    (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
1535				sc->bce_flash_info = flash;
1536				break;
1537			}
1538		}
1539	} else {
1540		/* Flash interface not yet reconfigured. */
1541		u32 mask;
1542
1543		DBPRINT(sc,BCE_INFO_LOAD,
1544			"bce_init_nvram(): Flash was NOT reconfigured.\n");
1545
1546		if (val & (1 << 23))
1547			mask = FLASH_BACKUP_STRAP_MASK;
1548		else
1549			mask = FLASH_STRAP_MASK;
1550
1551		/* Look for the matching NVRAM device configuration data. */
1552		for (j = 0, flash = &flash_table[0]; j < entry_count; j++, flash++) {
1553
1554			/* Check if the device matches any of the known devices. */
1555			if ((val & mask) == (flash->strapping & mask)) {
1556				/* Found a device match. */
1557				sc->bce_flash_info = flash;
1558
1559				/* Request access to the flash interface. */
1560				if ((rc = bce_acquire_nvram_lock(sc)) != 0)
1561					return rc;
1562
1563				/* Reconfigure the flash interface. */
1564				bce_enable_nvram_access(sc);
1565				REG_WR(sc, BCE_NVM_CFG1, flash->config1);
1566				REG_WR(sc, BCE_NVM_CFG2, flash->config2);
1567				REG_WR(sc, BCE_NVM_CFG3, flash->config3);
1568				REG_WR(sc, BCE_NVM_WRITE1, flash->write1);
1569				bce_disable_nvram_access(sc);
1570				bce_release_nvram_lock(sc);
1571
1572				break;
1573			}
1574		}
1575	}
1576
1577	/* Check if a matching device was found. */
1578	if (j == entry_count) {
1579		sc->bce_flash_info = NULL;
1580		BCE_PRINTF(sc, "%s(%d): Unknown Flash NVRAM found!\n",
1581			__FILE__, __LINE__);
1582		rc = ENODEV;
1583	}
1584
1585	/* Write the flash config data to the shared memory interface. */
1586	val = REG_RD_IND(sc, sc->bce_shmem_base + BCE_SHARED_HW_CFG_CONFIG2);
1587	val &= BCE_SHARED_HW_CFG2_NVM_SIZE_MASK;
1588	if (val)
1589		sc->bce_flash_size = val;
1590	else
1591		sc->bce_flash_size = sc->bce_flash_info->total_size;
1592
1593	DBPRINT(sc, BCE_INFO_LOAD, "bce_init_nvram() flash->total_size = 0x%08X\n",
1594		sc->bce_flash_info->total_size);
1595
1596	DBPRINT(sc,BCE_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
1597
1598	return rc;
1599}
1600
1601
1602/****************************************************************************/
1603/* Read an arbitrary range of data from NVRAM.                              */
1604/*                                                                          */
1605/* Prepares the NVRAM interface for access and reads the requested data     */
1606/* into the supplied buffer.                                                */
1607/*                                                                          */
1608/* Returns:                                                                 */
1609/*   0 on success and the data read, positive value on failure.             */
1610/****************************************************************************/
1611static int
1612bce_nvram_read(struct bce_softc *sc, u32 offset, u8 *ret_buf,
1613	int buf_size)
1614{
1615	int rc = 0;
1616	u32 cmd_flags, offset32, len32, extra;
1617
1618	if (buf_size == 0)
1619		return 0;
1620
1621	/* Request access to the flash interface. */
1622	if ((rc = bce_acquire_nvram_lock(sc)) != 0)
1623		return rc;
1624
1625	/* Enable access to flash interface */
1626	bce_enable_nvram_access(sc);
1627
1628	len32 = buf_size;
1629	offset32 = offset;
1630	extra = 0;
1631
1632	cmd_flags = 0;
1633
1634	if (offset32 & 3) {
1635		u8 buf[4];
1636		u32 pre_len;
1637
1638		offset32 &= ~3;
1639		pre_len = 4 - (offset & 3);
1640
1641		if (pre_len >= len32) {
1642			pre_len = len32;
1643			cmd_flags = BCE_NVM_COMMAND_FIRST | BCE_NVM_COMMAND_LAST;
1644		}
1645		else {
1646			cmd_flags = BCE_NVM_COMMAND_FIRST;
1647		}
1648
1649		rc = bce_nvram_read_dword(sc, offset32, buf, cmd_flags);
1650
1651		if (rc)
1652			return rc;
1653
1654		memcpy(ret_buf, buf + (offset & 3), pre_len);
1655
1656		offset32 += 4;
1657		ret_buf += pre_len;
1658		len32 -= pre_len;
1659	}
1660
1661	if (len32 & 3) {
1662		extra = 4 - (len32 & 3);
1663		len32 = (len32 + 4) & ~3;
1664	}
1665
1666	if (len32 == 4) {
1667		u8 buf[4];
1668
1669		if (cmd_flags)
1670			cmd_flags = BCE_NVM_COMMAND_LAST;
1671		else
1672			cmd_flags = BCE_NVM_COMMAND_FIRST |
1673				    BCE_NVM_COMMAND_LAST;
1674
1675		rc = bce_nvram_read_dword(sc, offset32, buf, cmd_flags);
1676
1677		memcpy(ret_buf, buf, 4 - extra);
1678	}
1679	else if (len32 > 0) {
1680		u8 buf[4];
1681
1682		/* Read the first word. */
1683		if (cmd_flags)
1684			cmd_flags = 0;
1685		else
1686			cmd_flags = BCE_NVM_COMMAND_FIRST;
1687
1688		rc = bce_nvram_read_dword(sc, offset32, ret_buf, cmd_flags);
1689
1690		/* Advance to the next dword. */
1691		offset32 += 4;
1692		ret_buf += 4;
1693		len32 -= 4;
1694
1695		while (len32 > 4 && rc == 0) {
1696			rc = bce_nvram_read_dword(sc, offset32, ret_buf, 0);
1697
1698			/* Advance to the next dword. */
1699			offset32 += 4;
1700			ret_buf += 4;
1701			len32 -= 4;
1702		}
1703
1704		if (rc)
1705			return rc;
1706
1707		cmd_flags = BCE_NVM_COMMAND_LAST;
1708		rc = bce_nvram_read_dword(sc, offset32, buf, cmd_flags);
1709
1710		memcpy(ret_buf, buf, 4 - extra);
1711	}
1712
1713	/* Disable access to flash interface and release the lock. */
1714	bce_disable_nvram_access(sc);
1715	bce_release_nvram_lock(sc);
1716
1717	return rc;
1718}
1719
1720
1721#ifdef BCE_NVRAM_WRITE_SUPPORT
1722/****************************************************************************/
1723/* Write an arbitrary range of data from NVRAM.                             */
1724/*                                                                          */
1725/* Prepares the NVRAM interface for write access and writes the requested   */
1726/* data from the supplied buffer.  The caller is responsible for            */
1727/* calculating any appropriate CRCs.                                        */
1728/*                                                                          */
1729/* Returns:                                                                 */
1730/*   0 on success, positive value on failure.                               */
1731/****************************************************************************/
1732static int
1733bce_nvram_write(struct bce_softc *sc, u32 offset, u8 *data_buf,
1734	int buf_size)
1735{
1736	u32 written, offset32, len32;
1737	u8 *buf, start[4], end[4];
1738	int rc = 0;
1739	int align_start, align_end;
1740
1741	buf = data_buf;
1742	offset32 = offset;
1743	len32 = buf_size;
1744	align_start = align_end = 0;
1745
1746	if ((align_start = (offset32 & 3))) {
1747		offset32 &= ~3;
1748		len32 += align_start;
1749		if ((rc = bce_nvram_read(sc, offset32, start, 4)))
1750			return rc;
1751	}
1752
1753	if (len32 & 3) {
1754	       	if ((len32 > 4) || !align_start) {
1755			align_end = 4 - (len32 & 3);
1756			len32 += align_end;
1757			if ((rc = bce_nvram_read(sc, offset32 + len32 - 4,
1758				end, 4))) {
1759				return rc;
1760			}
1761		}
1762	}
1763
1764	if (align_start || align_end) {
1765		buf = malloc(len32, M_DEVBUF, M_NOWAIT);
1766		if (buf == 0)
1767			return ENOMEM;
1768		if (align_start) {
1769			memcpy(buf, start, 4);
1770		}
1771		if (align_end) {
1772			memcpy(buf + len32 - 4, end, 4);
1773		}
1774		memcpy(buf + align_start, data_buf, buf_size);
1775	}
1776
1777	written = 0;
1778	while ((written < len32) && (rc == 0)) {
1779		u32 page_start, page_end, data_start, data_end;
1780		u32 addr, cmd_flags;
1781		int i;
1782		u8 flash_buffer[264];
1783
1784	    /* Find the page_start addr */
1785		page_start = offset32 + written;
1786		page_start -= (page_start % sc->bce_flash_info->page_size);
1787		/* Find the page_end addr */
1788		page_end = page_start + sc->bce_flash_info->page_size;
1789		/* Find the data_start addr */
1790		data_start = (written == 0) ? offset32 : page_start;
1791		/* Find the data_end addr */
1792		data_end = (page_end > offset32 + len32) ?
1793			(offset32 + len32) : page_end;
1794
1795		/* Request access to the flash interface. */
1796		if ((rc = bce_acquire_nvram_lock(sc)) != 0)
1797			goto nvram_write_end;
1798
1799		/* Enable access to flash interface */
1800		bce_enable_nvram_access(sc);
1801
1802		cmd_flags = BCE_NVM_COMMAND_FIRST;
1803		if (sc->bce_flash_info->buffered == 0) {
1804			int j;
1805
1806			/* Read the whole page into the buffer
1807			 * (non-buffer flash only) */
1808			for (j = 0; j < sc->bce_flash_info->page_size; j += 4) {
1809				if (j == (sc->bce_flash_info->page_size - 4)) {
1810					cmd_flags |= BCE_NVM_COMMAND_LAST;
1811				}
1812				rc = bce_nvram_read_dword(sc,
1813					page_start + j,
1814					&flash_buffer[j],
1815					cmd_flags);
1816
1817				if (rc)
1818					goto nvram_write_end;
1819
1820				cmd_flags = 0;
1821			}
1822		}
1823
1824		/* Enable writes to flash interface (unlock write-protect) */
1825		if ((rc = bce_enable_nvram_write(sc)) != 0)
1826			goto nvram_write_end;
1827
1828		/* Erase the page */
1829		if ((rc = bce_nvram_erase_page(sc, page_start)) != 0)
1830			goto nvram_write_end;
1831
1832		/* Re-enable the write again for the actual write */
1833		bce_enable_nvram_write(sc);
1834
1835		/* Loop to write back the buffer data from page_start to
1836		 * data_start */
1837		i = 0;
1838		if (sc->bce_flash_info->buffered == 0) {
1839			for (addr = page_start; addr < data_start;
1840				addr += 4, i += 4) {
1841
1842				rc = bce_nvram_write_dword(sc, addr,
1843					&flash_buffer[i], cmd_flags);
1844
1845				if (rc != 0)
1846					goto nvram_write_end;
1847
1848				cmd_flags = 0;
1849			}
1850		}
1851
1852		/* Loop to write the new data from data_start to data_end */
1853		for (addr = data_start; addr < data_end; addr += 4, i++) {
1854			if ((addr == page_end - 4) ||
1855				((sc->bce_flash_info->buffered) &&
1856				 (addr == data_end - 4))) {
1857
1858				cmd_flags |= BCE_NVM_COMMAND_LAST;
1859			}
1860			rc = bce_nvram_write_dword(sc, addr, buf,
1861				cmd_flags);
1862
1863			if (rc != 0)
1864				goto nvram_write_end;
1865
1866			cmd_flags = 0;
1867			buf += 4;
1868		}
1869
1870		/* Loop to write back the buffer data from data_end
1871		 * to page_end */
1872		if (sc->bce_flash_info->buffered == 0) {
1873			for (addr = data_end; addr < page_end;
1874				addr += 4, i += 4) {
1875
1876				if (addr == page_end-4) {
1877					cmd_flags = BCE_NVM_COMMAND_LAST;
1878                		}
1879				rc = bce_nvram_write_dword(sc, addr,
1880					&flash_buffer[i], cmd_flags);
1881
1882				if (rc != 0)
1883					goto nvram_write_end;
1884
1885				cmd_flags = 0;
1886			}
1887		}
1888
1889		/* Disable writes to flash interface (lock write-protect) */
1890		bce_disable_nvram_write(sc);
1891
1892		/* Disable access to flash interface */
1893		bce_disable_nvram_access(sc);
1894		bce_release_nvram_lock(sc);
1895
1896		/* Increment written */
1897		written += data_end - data_start;
1898	}
1899
1900nvram_write_end:
1901	if (align_start || align_end)
1902		free(buf, M_DEVBUF);
1903
1904	return rc;
1905}
1906#endif /* BCE_NVRAM_WRITE_SUPPORT */
1907
1908
1909/****************************************************************************/
1910/* Verifies that NVRAM is accessible and contains valid data.               */
1911/*                                                                          */
1912/* Reads the configuration data from NVRAM and verifies that the CRC is     */
1913/* correct.                                                                 */
1914/*                                                                          */
1915/* Returns:                                                                 */
1916/*   0 on success, positive value on failure.                               */
1917/****************************************************************************/
1918static int
1919bce_nvram_test(struct bce_softc *sc)
1920{
1921	u32 buf[BCE_NVRAM_SIZE / 4];
1922	u8 *data = (u8 *) buf;
1923	int rc = 0;
1924	u32 magic, csum;
1925
1926
1927	/*
1928	 * Check that the device NVRAM is valid by reading
1929	 * the magic value at offset 0.
1930	 */
1931	if ((rc = bce_nvram_read(sc, 0, data, 4)) != 0)
1932		goto bce_nvram_test_done;
1933
1934
1935    magic = bce_be32toh(buf[0]);
1936	if (magic != BCE_NVRAM_MAGIC) {
1937		rc = ENODEV;
1938		BCE_PRINTF(sc, "%s(%d): Invalid NVRAM magic value! Expected: 0x%08X, "
1939			"Found: 0x%08X\n",
1940			__FILE__, __LINE__, BCE_NVRAM_MAGIC, magic);
1941		goto bce_nvram_test_done;
1942	}
1943
1944	/*
1945	 * Verify that the device NVRAM includes valid
1946	 * configuration data.
1947	 */
1948	if ((rc = bce_nvram_read(sc, 0x100, data, BCE_NVRAM_SIZE)) != 0)
1949		goto bce_nvram_test_done;
1950
1951	csum = ether_crc32_le(data, 0x100);
1952	if (csum != BCE_CRC32_RESIDUAL) {
1953		rc = ENODEV;
1954		BCE_PRINTF(sc, "%s(%d): Invalid Manufacturing Information NVRAM CRC! "
1955			"Expected: 0x%08X, Found: 0x%08X\n",
1956			__FILE__, __LINE__, BCE_CRC32_RESIDUAL, csum);
1957		goto bce_nvram_test_done;
1958	}
1959
1960	csum = ether_crc32_le(data + 0x100, 0x100);
1961	if (csum != BCE_CRC32_RESIDUAL) {
1962		BCE_PRINTF(sc, "%s(%d): Invalid Feature Configuration Information "
1963			"NVRAM CRC! Expected: 0x%08X, Found: 08%08X\n",
1964			__FILE__, __LINE__, BCE_CRC32_RESIDUAL, csum);
1965		rc = ENODEV;
1966	}
1967
1968bce_nvram_test_done:
1969	return rc;
1970}
1971
1972
1973/****************************************************************************/
1974/* Free any DMA memory owned by the driver.                                 */
1975/*                                                                          */
1976/* Scans through each data structre that requires DMA memory and frees      */
1977/* the memory if allocated.                                                 */
1978/*                                                                          */
1979/* Returns:                                                                 */
1980/*   Nothing.                                                               */
1981/****************************************************************************/
1982static void
1983bce_dma_free(struct bce_softc *sc)
1984{
1985	int i;
1986
1987	DBPRINT(sc,BCE_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
1988
1989	/* Destroy the status block. */
1990	if (sc->status_block != NULL)
1991		bus_dmamem_free(
1992			sc->status_tag,
1993		    sc->status_block,
1994		    sc->status_map);
1995
1996	if (sc->status_map != NULL) {
1997		bus_dmamap_unload(
1998			sc->status_tag,
1999		    sc->status_map);
2000		bus_dmamap_destroy(sc->status_tag,
2001		    sc->status_map);
2002	}
2003
2004	if (sc->status_tag != NULL)
2005		bus_dma_tag_destroy(sc->status_tag);
2006
2007
2008	/* Destroy the statistics block. */
2009	if (sc->stats_block != NULL)
2010		bus_dmamem_free(
2011			sc->stats_tag,
2012		    sc->stats_block,
2013		    sc->stats_map);
2014
2015	if (sc->stats_map != NULL) {
2016		bus_dmamap_unload(
2017			sc->stats_tag,
2018		    sc->stats_map);
2019		bus_dmamap_destroy(sc->stats_tag,
2020		    sc->stats_map);
2021	}
2022
2023	if (sc->stats_tag != NULL)
2024		bus_dma_tag_destroy(sc->stats_tag);
2025
2026
2027	/* Free, unmap and destroy all TX buffer descriptor chain pages. */
2028	for (i = 0; i < TX_PAGES; i++ ) {
2029		if (sc->tx_bd_chain[i] != NULL)
2030			bus_dmamem_free(
2031				sc->tx_bd_chain_tag,
2032			    sc->tx_bd_chain[i],
2033			    sc->tx_bd_chain_map[i]);
2034
2035		if (sc->tx_bd_chain_map[i] != NULL) {
2036			bus_dmamap_unload(
2037				sc->tx_bd_chain_tag,
2038		    	sc->tx_bd_chain_map[i]);
2039			bus_dmamap_destroy(
2040				sc->tx_bd_chain_tag,
2041			    sc->tx_bd_chain_map[i]);
2042		}
2043
2044	}
2045
2046	/* Destroy the TX buffer descriptor tag. */
2047	if (sc->tx_bd_chain_tag != NULL)
2048		bus_dma_tag_destroy(sc->tx_bd_chain_tag);
2049
2050
2051	/* Free, unmap and destroy all RX buffer descriptor chain pages. */
2052	for (i = 0; i < RX_PAGES; i++ ) {
2053		if (sc->rx_bd_chain[i] != NULL)
2054			bus_dmamem_free(
2055				sc->rx_bd_chain_tag,
2056			    sc->rx_bd_chain[i],
2057			    sc->rx_bd_chain_map[i]);
2058
2059		if (sc->rx_bd_chain_map[i] != NULL) {
2060			bus_dmamap_unload(
2061				sc->rx_bd_chain_tag,
2062		    	sc->rx_bd_chain_map[i]);
2063			bus_dmamap_destroy(
2064				sc->rx_bd_chain_tag,
2065			    sc->rx_bd_chain_map[i]);
2066		}
2067	}
2068
2069	/* Destroy the RX buffer descriptor tag. */
2070	if (sc->rx_bd_chain_tag != NULL)
2071		bus_dma_tag_destroy(sc->rx_bd_chain_tag);
2072
2073
2074	/* Unload and destroy the TX mbuf maps. */
2075	for (i = 0; i < TOTAL_TX_BD; i++) {
2076		if (sc->tx_mbuf_map[i] != NULL) {
2077			bus_dmamap_unload(sc->tx_mbuf_tag,
2078				sc->tx_mbuf_map[i]);
2079			bus_dmamap_destroy(sc->tx_mbuf_tag,
2080	 			sc->tx_mbuf_map[i]);
2081		}
2082	}
2083
2084	/* Destroy the TX mbuf tag. */
2085	if (sc->tx_mbuf_tag != NULL)
2086		bus_dma_tag_destroy(sc->tx_mbuf_tag);
2087
2088
2089	/* Unload and destroy the RX mbuf maps. */
2090	for (i = 0; i < TOTAL_RX_BD; i++) {
2091		if (sc->rx_mbuf_map[i] != NULL) {
2092			bus_dmamap_unload(sc->rx_mbuf_tag,
2093				sc->rx_mbuf_map[i]);
2094			bus_dmamap_destroy(sc->rx_mbuf_tag,
2095	 			sc->rx_mbuf_map[i]);
2096		}
2097	}
2098
2099	/* Destroy the RX mbuf tag. */
2100	if (sc->rx_mbuf_tag != NULL)
2101		bus_dma_tag_destroy(sc->rx_mbuf_tag);
2102
2103
2104	/* Destroy the parent tag */
2105	if (sc->parent_tag != NULL)
2106		bus_dma_tag_destroy(sc->parent_tag);
2107
2108	DBPRINT(sc, BCE_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
2109
2110}
2111
2112
2113/****************************************************************************/
2114/* Get DMA memory from the OS.                                              */
2115/*                                                                          */
2116/* Validates that the OS has provided DMA buffers in response to a          */
2117/* bus_dmamap_load() call and saves the physical address of those buffers.  */
2118/* When the callback is used the OS will return 0 for the mapping function  */
2119/* (bus_dmamap_load()) so we use the value of map_arg->maxsegs to pass any  */
2120/* failures back to the caller.                                             */
2121/*                                                                          */
2122/* Returns:                                                                 */
2123/*   Nothing.                                                               */
2124/****************************************************************************/
2125static void
2126bce_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error)
2127{
2128	struct bce_dmamap_arg *map_arg = arg;
2129	struct bce_softc *sc = map_arg->sc;
2130
2131	/* Simulate a mapping failure. */
2132	DBRUNIF(DB_RANDOMTRUE(bce_debug_dma_map_addr_failure),
2133		BCE_PRINTF(sc, "%s(%d): Simulating DMA mapping error.\n",
2134			__FILE__, __LINE__);
2135		error = ENOMEM);
2136
2137	/* Check for an error and signal the caller that an error occurred. */
2138	if (error || (nseg > map_arg->maxsegs)) {
2139		BCE_PRINTF(sc, "%s(%d): DMA mapping error! error = %d, "
2140		"nseg = %d, maxsegs = %d\n",
2141			__FILE__, __LINE__, error, nseg, map_arg->maxsegs);
2142		map_arg->maxsegs = 0;
2143		goto bce_dma_map_addr_exit;
2144	}
2145
2146	map_arg->busaddr = segs->ds_addr;
2147
2148bce_dma_map_addr_exit:
2149	return;
2150}
2151
2152
2153/****************************************************************************/
2154/* Map TX buffers into TX buffer descriptors.                               */
2155/*                                                                          */
2156/* Given a series of DMA memory containting an outgoing frame, map the      */
2157/* segments into the tx_bd structure used by the hardware.                  */
2158/*                                                                          */
2159/* Returns:                                                                 */
2160/*   Nothing.                                                               */
2161/****************************************************************************/
2162static void
2163bce_dma_map_tx_desc(void *arg, bus_dma_segment_t *segs,
2164	int nseg, bus_size_t mapsize, int error)
2165{
2166	struct bce_dmamap_arg *map_arg;
2167	struct bce_softc *sc;
2168	struct tx_bd *txbd = NULL;
2169	int i = 0;
2170	u16 prod, chain_prod;
2171	u32	prod_bseq;
2172#ifdef BCE_DEBUG
2173	u16 debug_prod;
2174#endif
2175
2176	map_arg = arg;
2177	sc = map_arg->sc;
2178
2179	if (error) {
2180		DBPRINT(sc, BCE_WARN, "%s(): Called with error = %d\n",
2181			__FUNCTION__, error);
2182		return;
2183	}
2184
2185	/* Signal error to caller if there's too many segments */
2186	if (nseg > map_arg->maxsegs) {
2187		DBPRINT(sc, BCE_WARN,
2188			"%s(): Mapped TX descriptors: max segs = %d, "
2189			"actual segs = %d\n",
2190			__FUNCTION__, map_arg->maxsegs, nseg);
2191
2192		map_arg->maxsegs = 0;
2193		return;
2194	}
2195
2196	/* prod points to an empty tx_bd at this point. */
2197	prod       = map_arg->prod;
2198	chain_prod = map_arg->chain_prod;
2199	prod_bseq  = map_arg->prod_bseq;
2200
2201#ifdef BCE_DEBUG
2202	debug_prod = chain_prod;
2203#endif
2204
2205	DBPRINT(sc, BCE_INFO_SEND,
2206		"%s(): Start: prod = 0x%04X, chain_prod = %04X, "
2207		"prod_bseq = 0x%08X\n",
2208		__FUNCTION__, prod, chain_prod, prod_bseq);
2209
2210	/*
2211	 * Cycle through each mbuf segment that makes up
2212	 * the outgoing frame, gathering the mapping info
2213	 * for that segment and creating a tx_bd to for
2214	 * the mbuf.
2215	 */
2216
2217	txbd = &map_arg->tx_chain[TX_PAGE(chain_prod)][TX_IDX(chain_prod)];
2218
2219	/* Setup the first tx_bd for the first segment. */
2220	txbd->tx_bd_haddr_lo       = htole32(BCE_ADDR_LO(segs[i].ds_addr));
2221	txbd->tx_bd_haddr_hi       = htole32(BCE_ADDR_HI(segs[i].ds_addr));
2222	txbd->tx_bd_mss_nbytes     = htole16(segs[i].ds_len);
2223	txbd->tx_bd_vlan_tag_flags = htole16(map_arg->tx_flags |
2224			TX_BD_FLAGS_START);
2225	prod_bseq += segs[i].ds_len;
2226
2227	/* Setup any remaing segments. */
2228	for (i = 1; i < nseg; i++) {
2229		prod       = NEXT_TX_BD(prod);
2230		chain_prod = TX_CHAIN_IDX(prod);
2231
2232		txbd = &map_arg->tx_chain[TX_PAGE(chain_prod)][TX_IDX(chain_prod)];
2233
2234		txbd->tx_bd_haddr_lo       = htole32(BCE_ADDR_LO(segs[i].ds_addr));
2235		txbd->tx_bd_haddr_hi       = htole32(BCE_ADDR_HI(segs[i].ds_addr));
2236		txbd->tx_bd_mss_nbytes     = htole16(segs[i].ds_len);
2237		txbd->tx_bd_vlan_tag_flags = htole16(map_arg->tx_flags);
2238
2239		prod_bseq += segs[i].ds_len;
2240	}
2241
2242	/* Set the END flag on the last TX buffer descriptor. */
2243	txbd->tx_bd_vlan_tag_flags |= htole16(TX_BD_FLAGS_END);
2244
2245	DBRUN(BCE_INFO_SEND, bce_dump_tx_chain(sc, debug_prod, nseg));
2246
2247	DBPRINT(sc, BCE_INFO_SEND,
2248		"%s(): End: prod = 0x%04X, chain_prod = %04X, "
2249		"prod_bseq = 0x%08X\n",
2250		__FUNCTION__, prod, chain_prod, prod_bseq);
2251
2252	/* prod points to the last tx_bd at this point. */
2253	map_arg->maxsegs    = nseg;
2254	map_arg->prod       = prod;
2255	map_arg->chain_prod = chain_prod;
2256	map_arg->prod_bseq  = prod_bseq;
2257}
2258
2259
2260/****************************************************************************/
2261/* Allocate any DMA memory needed by the driver.                            */
2262/*                                                                          */
2263/* Allocates DMA memory needed for the various global structures needed by  */
2264/* hardware.                                                                */
2265/*                                                                          */
2266/* Returns:                                                                 */
2267/*   0 for success, positive value for failure.                             */
2268/****************************************************************************/
2269static int
2270bce_dma_alloc(device_t dev)
2271{
2272	struct bce_softc *sc;
2273	int i, error, rc = 0;
2274	struct bce_dmamap_arg map_arg;
2275
2276	sc = device_get_softc(dev);
2277
2278	DBPRINT(sc, BCE_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
2279
2280	/*
2281	 * Allocate the parent bus DMA tag appropriate for PCI.
2282	 */
2283	if (bus_dma_tag_create(NULL,		/* parent     */
2284			BCE_DMA_ALIGN,				/* alignment  */
2285			BCE_DMA_BOUNDARY,			/* boundary   */
2286			sc->max_bus_addr,			/* lowaddr    */
2287			BUS_SPACE_MAXADDR,			/* highaddr   */
2288			NULL, 						/* filterfunc */
2289			NULL,						/* filterarg  */
2290			MAXBSIZE, 					/* maxsize    */
2291			BUS_SPACE_UNRESTRICTED,		/* nsegments  */
2292			BUS_SPACE_MAXSIZE_32BIT,	/* maxsegsize */
2293			0,							/* flags      */
2294			NULL, 						/* locfunc    */
2295			NULL,						/* lockarg    */
2296			&sc->parent_tag)) {
2297		BCE_PRINTF(sc, "%s(%d): Could not allocate parent DMA tag!\n",
2298			__FILE__, __LINE__);
2299		rc = ENOMEM;
2300		goto bce_dma_alloc_exit;
2301	}
2302
2303	/*
2304	 * Create a DMA tag for the status block, allocate and clear the
2305	 * memory, map the memory into DMA space, and fetch the physical
2306	 * address of the block.
2307	 */
2308	if (bus_dma_tag_create(
2309			sc->parent_tag,			/* parent      */
2310	    	BCE_DMA_ALIGN,			/* alignment   */
2311	    	BCE_DMA_BOUNDARY,		/* boundary    */
2312	    	sc->max_bus_addr,		/* lowaddr     */
2313	    	BUS_SPACE_MAXADDR,		/* highaddr    */
2314	    	NULL, 					/* filterfunc  */
2315	    	NULL, 					/* filterarg   */
2316	    	BCE_STATUS_BLK_SZ, 		/* maxsize     */
2317	    	1,						/* nsegments   */
2318	    	BCE_STATUS_BLK_SZ, 		/* maxsegsize  */
2319	    	0,						/* flags       */
2320	    	NULL, 					/* lockfunc    */
2321	    	NULL,					/* lockarg     */
2322	    	&sc->status_tag)) {
2323		BCE_PRINTF(sc, "%s(%d): Could not allocate status block DMA tag!\n",
2324			__FILE__, __LINE__);
2325		rc = ENOMEM;
2326		goto bce_dma_alloc_exit;
2327	}
2328
2329	if(bus_dmamem_alloc(
2330			sc->status_tag,				/* dmat        */
2331	    	(void **)&sc->status_block,	/* vaddr       */
2332	    	BUS_DMA_NOWAIT,					/* flags       */
2333	    	&sc->status_map)) {
2334		BCE_PRINTF(sc, "%s(%d): Could not allocate status block DMA memory!\n",
2335			__FILE__, __LINE__);
2336		rc = ENOMEM;
2337		goto bce_dma_alloc_exit;
2338	}
2339
2340	bzero((char *)sc->status_block, BCE_STATUS_BLK_SZ);
2341
2342	map_arg.sc = sc;
2343	map_arg.maxsegs = 1;
2344
2345	error = bus_dmamap_load(
2346			sc->status_tag,	   		/* dmat        */
2347	    	sc->status_map,	   		/* map         */
2348	    	sc->status_block,	 	/* buf         */
2349	    	BCE_STATUS_BLK_SZ,	 	/* buflen      */
2350	    	bce_dma_map_addr, 	 	/* callback    */
2351	    	&map_arg,			 	/* callbackarg */
2352	    	BUS_DMA_NOWAIT);		/* flags       */
2353
2354	if(error || (map_arg.maxsegs == 0)) {
2355		BCE_PRINTF(sc, "%s(%d): Could not map status block DMA memory!\n",
2356			__FILE__, __LINE__);
2357		rc = ENOMEM;
2358		goto bce_dma_alloc_exit;
2359	}
2360
2361	sc->status_block_paddr = map_arg.busaddr;
2362	/* DRC - Fix for 64 bit addresses. */
2363	DBPRINT(sc, BCE_INFO, "status_block_paddr = 0x%08X\n",
2364		(u32) sc->status_block_paddr);
2365
2366	/*
2367	 * Create a DMA tag for the statistics block, allocate and clear the
2368	 * memory, map the memory into DMA space, and fetch the physical
2369	 * address of the block.
2370	 */
2371	if (bus_dma_tag_create(
2372			sc->parent_tag,			/* parent      */
2373	    	BCE_DMA_ALIGN,	 		/* alignment   */
2374	    	BCE_DMA_BOUNDARY, 		/* boundary    */
2375	    	sc->max_bus_addr,		/* lowaddr     */
2376	    	BUS_SPACE_MAXADDR,		/* highaddr    */
2377	    	NULL,		 	  		/* filterfunc  */
2378	    	NULL, 			  		/* filterarg   */
2379	    	BCE_STATS_BLK_SZ, 		/* maxsize     */
2380	    	1,				  		/* nsegments   */
2381	    	BCE_STATS_BLK_SZ, 		/* maxsegsize  */
2382	    	0, 				  		/* flags       */
2383	    	NULL, 			  		/* lockfunc    */
2384	    	NULL, 			  		/* lockarg     */
2385	    	&sc->stats_tag)) {
2386		BCE_PRINTF(sc, "%s(%d): Could not allocate statistics block DMA tag!\n",
2387			__FILE__, __LINE__);
2388		rc = ENOMEM;
2389		goto bce_dma_alloc_exit;
2390	}
2391
2392	if (bus_dmamem_alloc(
2393			sc->stats_tag,				/* dmat        */
2394	    	(void **)&sc->stats_block,	/* vaddr       */
2395	    	BUS_DMA_NOWAIT,	 			/* flags       */
2396	    	&sc->stats_map)) {
2397		BCE_PRINTF(sc, "%s(%d): Could not allocate statistics block DMA memory!\n",
2398			__FILE__, __LINE__);
2399		rc = ENOMEM;
2400		goto bce_dma_alloc_exit;
2401	}
2402
2403	bzero((char *)sc->stats_block, BCE_STATS_BLK_SZ);
2404
2405	map_arg.sc = sc;
2406	map_arg.maxsegs = 1;
2407
2408	error = bus_dmamap_load(
2409			sc->stats_tag,	 	/* dmat        */
2410	    	sc->stats_map,	 	/* map         */
2411	    	sc->stats_block, 	/* buf         */
2412	    	BCE_STATS_BLK_SZ,	/* buflen      */
2413	    	bce_dma_map_addr,	/* callback    */
2414	    	&map_arg, 		 	/* callbackarg */
2415	    	BUS_DMA_NOWAIT);	/* flags       */
2416
2417	if(error || (map_arg.maxsegs == 0)) {
2418		BCE_PRINTF(sc, "%s(%d): Could not map statistics block DMA memory!\n",
2419			__FILE__, __LINE__);
2420		rc = ENOMEM;
2421		goto bce_dma_alloc_exit;
2422	}
2423
2424	sc->stats_block_paddr = map_arg.busaddr;
2425	/* DRC - Fix for 64 bit address. */
2426	DBPRINT(sc,BCE_INFO, "stats_block_paddr = 0x%08X\n",
2427		(u32) sc->stats_block_paddr);
2428
2429	/*
2430	 * Create a DMA tag for the TX buffer descriptor chain,
2431	 * allocate and clear the  memory, and fetch the
2432	 * physical address of the block.
2433	 */
2434	if(bus_dma_tag_create(
2435			sc->parent_tag,		  /* parent      */
2436	    	BCM_PAGE_SIZE,		  /* alignment   */
2437	    	BCE_DMA_BOUNDARY,	  /* boundary    */
2438			sc->max_bus_addr,	  /* lowaddr     */
2439			BUS_SPACE_MAXADDR, 	  /* highaddr    */
2440			NULL, 				  /* filterfunc  */
2441			NULL, 				  /* filterarg   */
2442			BCE_TX_CHAIN_PAGE_SZ, /* maxsize     */
2443			1,			  		  /* nsegments   */
2444			BCE_TX_CHAIN_PAGE_SZ, /* maxsegsize  */
2445			0,				 	  /* flags       */
2446			NULL, 				  /* lockfunc    */
2447			NULL,				  /* lockarg     */
2448			&sc->tx_bd_chain_tag)) {
2449		BCE_PRINTF(sc, "%s(%d): Could not allocate TX descriptor chain DMA tag!\n",
2450			__FILE__, __LINE__);
2451		rc = ENOMEM;
2452		goto bce_dma_alloc_exit;
2453	}
2454
2455	for (i = 0; i < TX_PAGES; i++) {
2456
2457		if(bus_dmamem_alloc(
2458				sc->tx_bd_chain_tag,			/* tag   */
2459	    		(void **)&sc->tx_bd_chain[i],	/* vaddr */
2460	    		BUS_DMA_NOWAIT,					/* flags */
2461		    	&sc->tx_bd_chain_map[i])) {
2462			BCE_PRINTF(sc, "%s(%d): Could not allocate TX descriptor "
2463				"chain DMA memory!\n", __FILE__, __LINE__);
2464			rc = ENOMEM;
2465			goto bce_dma_alloc_exit;
2466		}
2467
2468		map_arg.maxsegs = 1;
2469		map_arg.sc = sc;
2470
2471		error = bus_dmamap_load(
2472				sc->tx_bd_chain_tag,	 /* dmat        */
2473	    		sc->tx_bd_chain_map[i],	 /* map         */
2474	    		sc->tx_bd_chain[i],		 /* buf         */
2475		    	BCE_TX_CHAIN_PAGE_SZ,  	 /* buflen      */
2476		    	bce_dma_map_addr, 	   	 /* callback    */
2477	    		&map_arg, 			   	 /* callbackarg */
2478	    		BUS_DMA_NOWAIT);	   	 /* flags       */
2479
2480		if(error || (map_arg.maxsegs == 0)) {
2481			BCE_PRINTF(sc, "%s(%d): Could not map TX descriptor chain DMA memory!\n",
2482				__FILE__, __LINE__);
2483			rc = ENOMEM;
2484			goto bce_dma_alloc_exit;
2485		}
2486
2487		sc->tx_bd_chain_paddr[i] = map_arg.busaddr;
2488		/* DRC - Fix for 64 bit systems. */
2489		DBPRINT(sc, BCE_INFO, "tx_bd_chain_paddr[%d] = 0x%08X\n",
2490			i, (u32) sc->tx_bd_chain_paddr[i]);
2491	}
2492
2493	/* Create a DMA tag for TX mbufs. */
2494	if (bus_dma_tag_create(
2495			sc->parent_tag,	 	 	/* parent      */
2496	    	BCE_DMA_ALIGN,	 		/* alignment   */
2497	    	BCE_DMA_BOUNDARY, 		/* boundary    */
2498			sc->max_bus_addr,		/* lowaddr     */
2499			BUS_SPACE_MAXADDR,		/* highaddr    */
2500			NULL, 			  		/* filterfunc  */
2501			NULL, 			  		/* filterarg   */
2502			MCLBYTES * BCE_MAX_SEGMENTS,	/* maxsize     */
2503			BCE_MAX_SEGMENTS,  		/* nsegments   */
2504			MCLBYTES,				/* maxsegsize  */
2505			0,				 		/* flags       */
2506			NULL, 			  		/* lockfunc    */
2507			NULL,			  		/* lockarg     */
2508	    	&sc->tx_mbuf_tag)) {
2509		BCE_PRINTF(sc, "%s(%d): Could not allocate TX mbuf DMA tag!\n",
2510			__FILE__, __LINE__);
2511		rc = ENOMEM;
2512		goto bce_dma_alloc_exit;
2513	}
2514
2515	/* Create DMA maps for the TX mbufs clusters. */
2516	for (i = 0; i < TOTAL_TX_BD; i++) {
2517		if (bus_dmamap_create(sc->tx_mbuf_tag, BUS_DMA_NOWAIT,
2518			&sc->tx_mbuf_map[i])) {
2519			BCE_PRINTF(sc, "%s(%d): Unable to create TX mbuf DMA map!\n",
2520				__FILE__, __LINE__);
2521			rc = ENOMEM;
2522			goto bce_dma_alloc_exit;
2523		}
2524	}
2525
2526	/*
2527	 * Create a DMA tag for the RX buffer descriptor chain,
2528	 * allocate and clear the  memory, and fetch the physical
2529	 * address of the blocks.
2530	 */
2531	if (bus_dma_tag_create(
2532			sc->parent_tag,			/* parent      */
2533	    	BCM_PAGE_SIZE,			/* alignment   */
2534	    	BCE_DMA_BOUNDARY,		/* boundary    */
2535			BUS_SPACE_MAXADDR,		/* lowaddr     */
2536			sc->max_bus_addr,		/* lowaddr     */
2537			NULL,					/* filter      */
2538			NULL, 					/* filterarg   */
2539			BCE_RX_CHAIN_PAGE_SZ,	/* maxsize     */
2540			1, 						/* nsegments   */
2541			BCE_RX_CHAIN_PAGE_SZ,	/* maxsegsize  */
2542			0,				 		/* flags       */
2543			NULL,					/* lockfunc    */
2544			NULL,					/* lockarg     */
2545			&sc->rx_bd_chain_tag)) {
2546		BCE_PRINTF(sc, "%s(%d): Could not allocate RX descriptor chain DMA tag!\n",
2547			__FILE__, __LINE__);
2548		rc = ENOMEM;
2549		goto bce_dma_alloc_exit;
2550	}
2551
2552	for (i = 0; i < RX_PAGES; i++) {
2553
2554		if (bus_dmamem_alloc(
2555				sc->rx_bd_chain_tag,			/* tag   */
2556	    		(void **)&sc->rx_bd_chain[i], 	/* vaddr */
2557	    		BUS_DMA_NOWAIT,				  	/* flags */
2558		    	&sc->rx_bd_chain_map[i])) {
2559			BCE_PRINTF(sc, "%s(%d): Could not allocate RX descriptor chain "
2560				"DMA memory!\n", __FILE__, __LINE__);
2561			rc = ENOMEM;
2562			goto bce_dma_alloc_exit;
2563		}
2564
2565		bzero((char *)sc->rx_bd_chain[i], BCE_RX_CHAIN_PAGE_SZ);
2566
2567		map_arg.maxsegs = 1;
2568		map_arg.sc = sc;
2569
2570		error = bus_dmamap_load(
2571				sc->rx_bd_chain_tag,	/* dmat        */
2572	    		sc->rx_bd_chain_map[i],	/* map         */
2573	    		sc->rx_bd_chain[i],		/* buf         */
2574		    	BCE_RX_CHAIN_PAGE_SZ,  	/* buflen      */
2575		    	bce_dma_map_addr,	   	/* callback    */
2576	    		&map_arg,			   	/* callbackarg */
2577	    		BUS_DMA_NOWAIT);		/* flags       */
2578
2579		if(error || (map_arg.maxsegs == 0)) {
2580			BCE_PRINTF(sc, "%s(%d): Could not map RX descriptor chain DMA memory!\n",
2581				__FILE__, __LINE__);
2582			rc = ENOMEM;
2583			goto bce_dma_alloc_exit;
2584		}
2585
2586		sc->rx_bd_chain_paddr[i] = map_arg.busaddr;
2587		/* DRC - Fix for 64 bit systems. */
2588		DBPRINT(sc, BCE_INFO, "rx_bd_chain_paddr[%d] = 0x%08X\n",
2589			i, (u32) sc->rx_bd_chain_paddr[i]);
2590	}
2591
2592	/*
2593	 * Create a DMA tag for RX mbufs.
2594	 */
2595	if (bus_dma_tag_create(
2596			sc->parent_tag,			/* parent      */
2597	    	BCE_DMA_ALIGN,		  	/* alignment   */
2598	    	BCE_DMA_BOUNDARY,	  	/* boundary    */
2599			sc->max_bus_addr,	  	/* lowaddr     */
2600			BUS_SPACE_MAXADDR, 	  	/* highaddr    */
2601			NULL, 				  	/* filterfunc  */
2602			NULL, 				  	/* filterarg   */
2603			MJUM9BYTES,				/* maxsize     */
2604			BCE_MAX_SEGMENTS,  		/* nsegments   */
2605			MJUM9BYTES,				/* maxsegsize  */
2606			0,				 	  	/* flags       */
2607			NULL, 				  	/* lockfunc    */
2608			NULL,				  	/* lockarg     */
2609	    	&sc->rx_mbuf_tag)) {
2610		BCE_PRINTF(sc, "%s(%d): Could not allocate RX mbuf DMA tag!\n",
2611			__FILE__, __LINE__);
2612		rc = ENOMEM;
2613		goto bce_dma_alloc_exit;
2614	}
2615
2616	/* Create DMA maps for the RX mbuf clusters. */
2617	for (i = 0; i < TOTAL_RX_BD; i++) {
2618		if (bus_dmamap_create(sc->rx_mbuf_tag, BUS_DMA_NOWAIT,
2619				&sc->rx_mbuf_map[i])) {
2620			BCE_PRINTF(sc, "%s(%d): Unable to create RX mbuf DMA map!\n",
2621				__FILE__, __LINE__);
2622			rc = ENOMEM;
2623			goto bce_dma_alloc_exit;
2624		}
2625	}
2626
2627bce_dma_alloc_exit:
2628	DBPRINT(sc, BCE_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
2629
2630	return(rc);
2631}
2632
2633
2634/****************************************************************************/
2635/* Release all resources used by the driver.                                */
2636/*                                                                          */
2637/* Releases all resources acquired by the driver including interrupts,      */
2638/* interrupt handler, interfaces, mutexes, and DMA memory.                  */
2639/*                                                                          */
2640/* Returns:                                                                 */
2641/*   Nothing.                                                               */
2642/****************************************************************************/
2643static void
2644bce_release_resources(struct bce_softc *sc)
2645{
2646	device_t dev;
2647
2648	DBPRINT(sc, BCE_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
2649
2650	dev = sc->bce_dev;
2651
2652	bce_dma_free(sc);
2653
2654	if (sc->bce_intrhand != NULL)
2655		bus_teardown_intr(dev, sc->bce_irq, sc->bce_intrhand);
2656
2657	if (sc->bce_irq != NULL)
2658		bus_release_resource(dev,
2659			SYS_RES_IRQ,
2660			0,
2661			sc->bce_irq);
2662
2663	if (sc->bce_res != NULL)
2664		bus_release_resource(dev,
2665			SYS_RES_MEMORY,
2666		    PCIR_BAR(0),
2667		    sc->bce_res);
2668
2669	if (sc->bce_ifp != NULL)
2670		if_free(sc->bce_ifp);
2671
2672
2673	if (mtx_initialized(&sc->bce_mtx))
2674		BCE_LOCK_DESTROY(sc);
2675
2676	DBPRINT(sc, BCE_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
2677
2678}
2679
2680
2681/****************************************************************************/
2682/* Firmware synchronization.                                                */
2683/*                                                                          */
2684/* Before performing certain events such as a chip reset, synchronize with  */
2685/* the firmware first.                                                      */
2686/*                                                                          */
2687/* Returns:                                                                 */
2688/*   0 for success, positive value for failure.                             */
2689/****************************************************************************/
2690static int
2691bce_fw_sync(struct bce_softc *sc, u32 msg_data)
2692{
2693	int i, rc = 0;
2694	u32 val;
2695
2696	/* Don't waste any time if we've timed out before. */
2697	if (sc->bce_fw_timed_out) {
2698		rc = EBUSY;
2699		goto bce_fw_sync_exit;
2700	}
2701
2702	/* Increment the message sequence number. */
2703	sc->bce_fw_wr_seq++;
2704	msg_data |= sc->bce_fw_wr_seq;
2705
2706 	DBPRINT(sc, BCE_VERBOSE, "bce_fw_sync(): msg_data = 0x%08X\n", msg_data);
2707
2708	/* Send the message to the bootcode driver mailbox. */
2709	REG_WR_IND(sc, sc->bce_shmem_base + BCE_DRV_MB, msg_data);
2710
2711	/* Wait for the bootcode to acknowledge the message. */
2712	for (i = 0; i < FW_ACK_TIME_OUT_MS; i++) {
2713		/* Check for a response in the bootcode firmware mailbox. */
2714		val = REG_RD_IND(sc, sc->bce_shmem_base + BCE_FW_MB);
2715		if ((val & BCE_FW_MSG_ACK) == (msg_data & BCE_DRV_MSG_SEQ))
2716			break;
2717		DELAY(1000);
2718	}
2719
2720	/* If we've timed out, tell the bootcode that we've stopped waiting. */
2721	if (((val & BCE_FW_MSG_ACK) != (msg_data & BCE_DRV_MSG_SEQ)) &&
2722		((msg_data & BCE_DRV_MSG_DATA) != BCE_DRV_MSG_DATA_WAIT0)) {
2723
2724		BCE_PRINTF(sc, "%s(%d): Firmware synchronization timeout! "
2725			"msg_data = 0x%08X\n",
2726			__FILE__, __LINE__, msg_data);
2727
2728		msg_data &= ~BCE_DRV_MSG_CODE;
2729		msg_data |= BCE_DRV_MSG_CODE_FW_TIMEOUT;
2730
2731		REG_WR_IND(sc, sc->bce_shmem_base + BCE_DRV_MB, msg_data);
2732
2733		sc->bce_fw_timed_out = 1;
2734		rc = EBUSY;
2735	}
2736
2737bce_fw_sync_exit:
2738	return (rc);
2739}
2740
2741
2742/****************************************************************************/
2743/* Load Receive Virtual 2 Physical (RV2P) processor firmware.               */
2744/*                                                                          */
2745/* Returns:                                                                 */
2746/*   Nothing.                                                               */
2747/****************************************************************************/
2748static void
2749bce_load_rv2p_fw(struct bce_softc *sc, u32 *rv2p_code,
2750	u32 rv2p_code_len, u32 rv2p_proc)
2751{
2752	int i;
2753	u32 val;
2754
2755	for (i = 0; i < rv2p_code_len; i += 8) {
2756		REG_WR(sc, BCE_RV2P_INSTR_HIGH, *rv2p_code);
2757		rv2p_code++;
2758		REG_WR(sc, BCE_RV2P_INSTR_LOW, *rv2p_code);
2759		rv2p_code++;
2760
2761		if (rv2p_proc == RV2P_PROC1) {
2762			val = (i / 8) | BCE_RV2P_PROC1_ADDR_CMD_RDWR;
2763			REG_WR(sc, BCE_RV2P_PROC1_ADDR_CMD, val);
2764		}
2765		else {
2766			val = (i / 8) | BCE_RV2P_PROC2_ADDR_CMD_RDWR;
2767			REG_WR(sc, BCE_RV2P_PROC2_ADDR_CMD, val);
2768		}
2769	}
2770
2771	/* Reset the processor, un-stall is done later. */
2772	if (rv2p_proc == RV2P_PROC1) {
2773		REG_WR(sc, BCE_RV2P_COMMAND, BCE_RV2P_COMMAND_PROC1_RESET);
2774	}
2775	else {
2776		REG_WR(sc, BCE_RV2P_COMMAND, BCE_RV2P_COMMAND_PROC2_RESET);
2777	}
2778}
2779
2780
2781/****************************************************************************/
2782/* Load RISC processor firmware.                                            */
2783/*                                                                          */
2784/* Loads firmware from the file if_bcefw.h into the scratchpad memory       */
2785/* associated with a particular processor.                                  */
2786/*                                                                          */
2787/* Returns:                                                                 */
2788/*   Nothing.                                                               */
2789/****************************************************************************/
2790static void
2791bce_load_cpu_fw(struct bce_softc *sc, struct cpu_reg *cpu_reg,
2792	struct fw_info *fw)
2793{
2794	u32 offset;
2795	u32 val;
2796
2797	/* Halt the CPU. */
2798	val = REG_RD_IND(sc, cpu_reg->mode);
2799	val |= cpu_reg->mode_value_halt;
2800	REG_WR_IND(sc, cpu_reg->mode, val);
2801	REG_WR_IND(sc, cpu_reg->state, cpu_reg->state_value_clear);
2802
2803	/* Load the Text area. */
2804	offset = cpu_reg->spad_base + (fw->text_addr - cpu_reg->mips_view_base);
2805	if (fw->text) {
2806		int j;
2807
2808		for (j = 0; j < (fw->text_len / 4); j++, offset += 4) {
2809			REG_WR_IND(sc, offset, fw->text[j]);
2810	        }
2811	}
2812
2813	/* Load the Data area. */
2814	offset = cpu_reg->spad_base + (fw->data_addr - cpu_reg->mips_view_base);
2815	if (fw->data) {
2816		int j;
2817
2818		for (j = 0; j < (fw->data_len / 4); j++, offset += 4) {
2819			REG_WR_IND(sc, offset, fw->data[j]);
2820		}
2821	}
2822
2823	/* Load the SBSS area. */
2824	offset = cpu_reg->spad_base + (fw->sbss_addr - cpu_reg->mips_view_base);
2825	if (fw->sbss) {
2826		int j;
2827
2828		for (j = 0; j < (fw->sbss_len / 4); j++, offset += 4) {
2829			REG_WR_IND(sc, offset, fw->sbss[j]);
2830		}
2831	}
2832
2833	/* Load the BSS area. */
2834	offset = cpu_reg->spad_base + (fw->bss_addr - cpu_reg->mips_view_base);
2835	if (fw->bss) {
2836		int j;
2837
2838		for (j = 0; j < (fw->bss_len/4); j++, offset += 4) {
2839			REG_WR_IND(sc, offset, fw->bss[j]);
2840		}
2841	}
2842
2843	/* Load the Read-Only area. */
2844	offset = cpu_reg->spad_base +
2845		(fw->rodata_addr - cpu_reg->mips_view_base);
2846	if (fw->rodata) {
2847		int j;
2848
2849		for (j = 0; j < (fw->rodata_len / 4); j++, offset += 4) {
2850			REG_WR_IND(sc, offset, fw->rodata[j]);
2851		}
2852	}
2853
2854	/* Clear the pre-fetch instruction. */
2855	REG_WR_IND(sc, cpu_reg->inst, 0);
2856	REG_WR_IND(sc, cpu_reg->pc, fw->start_addr);
2857
2858	/* Start the CPU. */
2859	val = REG_RD_IND(sc, cpu_reg->mode);
2860	val &= ~cpu_reg->mode_value_halt;
2861	REG_WR_IND(sc, cpu_reg->state, cpu_reg->state_value_clear);
2862	REG_WR_IND(sc, cpu_reg->mode, val);
2863}
2864
2865
2866/****************************************************************************/
2867/* Initialize the RV2P, RX, TX, TPAT, and COM CPUs.                         */
2868/*                                                                          */
2869/* Loads the firmware for each CPU and starts the CPU.                      */
2870/*                                                                          */
2871/* Returns:                                                                 */
2872/*   Nothing.                                                               */
2873/****************************************************************************/
2874static void
2875bce_init_cpus(struct bce_softc *sc)
2876{
2877	struct cpu_reg cpu_reg;
2878	struct fw_info fw;
2879
2880	/* Initialize the RV2P processor. */
2881	bce_load_rv2p_fw(sc, bce_rv2p_proc1, sizeof(bce_rv2p_proc1), RV2P_PROC1);
2882	bce_load_rv2p_fw(sc, bce_rv2p_proc2, sizeof(bce_rv2p_proc2), RV2P_PROC2);
2883
2884	/* Initialize the RX Processor. */
2885	cpu_reg.mode = BCE_RXP_CPU_MODE;
2886	cpu_reg.mode_value_halt = BCE_RXP_CPU_MODE_SOFT_HALT;
2887	cpu_reg.mode_value_sstep = BCE_RXP_CPU_MODE_STEP_ENA;
2888	cpu_reg.state = BCE_RXP_CPU_STATE;
2889	cpu_reg.state_value_clear = 0xffffff;
2890	cpu_reg.gpr0 = BCE_RXP_CPU_REG_FILE;
2891	cpu_reg.evmask = BCE_RXP_CPU_EVENT_MASK;
2892	cpu_reg.pc = BCE_RXP_CPU_PROGRAM_COUNTER;
2893	cpu_reg.inst = BCE_RXP_CPU_INSTRUCTION;
2894	cpu_reg.bp = BCE_RXP_CPU_HW_BREAKPOINT;
2895	cpu_reg.spad_base = BCE_RXP_SCRATCH;
2896	cpu_reg.mips_view_base = 0x8000000;
2897
2898	fw.ver_major = bce_RXP_b06FwReleaseMajor;
2899	fw.ver_minor = bce_RXP_b06FwReleaseMinor;
2900	fw.ver_fix = bce_RXP_b06FwReleaseFix;
2901	fw.start_addr = bce_RXP_b06FwStartAddr;
2902
2903	fw.text_addr = bce_RXP_b06FwTextAddr;
2904	fw.text_len = bce_RXP_b06FwTextLen;
2905	fw.text_index = 0;
2906	fw.text = bce_RXP_b06FwText;
2907
2908	fw.data_addr = bce_RXP_b06FwDataAddr;
2909	fw.data_len = bce_RXP_b06FwDataLen;
2910	fw.data_index = 0;
2911	fw.data = bce_RXP_b06FwData;
2912
2913	fw.sbss_addr = bce_RXP_b06FwSbssAddr;
2914	fw.sbss_len = bce_RXP_b06FwSbssLen;
2915	fw.sbss_index = 0;
2916	fw.sbss = bce_RXP_b06FwSbss;
2917
2918	fw.bss_addr = bce_RXP_b06FwBssAddr;
2919	fw.bss_len = bce_RXP_b06FwBssLen;
2920	fw.bss_index = 0;
2921	fw.bss = bce_RXP_b06FwBss;
2922
2923	fw.rodata_addr = bce_RXP_b06FwRodataAddr;
2924	fw.rodata_len = bce_RXP_b06FwRodataLen;
2925	fw.rodata_index = 0;
2926	fw.rodata = bce_RXP_b06FwRodata;
2927
2928	DBPRINT(sc, BCE_INFO_RESET, "Loading RX firmware.\n");
2929	bce_load_cpu_fw(sc, &cpu_reg, &fw);
2930
2931	/* Initialize the TX Processor. */
2932	cpu_reg.mode = BCE_TXP_CPU_MODE;
2933	cpu_reg.mode_value_halt = BCE_TXP_CPU_MODE_SOFT_HALT;
2934	cpu_reg.mode_value_sstep = BCE_TXP_CPU_MODE_STEP_ENA;
2935	cpu_reg.state = BCE_TXP_CPU_STATE;
2936	cpu_reg.state_value_clear = 0xffffff;
2937	cpu_reg.gpr0 = BCE_TXP_CPU_REG_FILE;
2938	cpu_reg.evmask = BCE_TXP_CPU_EVENT_MASK;
2939	cpu_reg.pc = BCE_TXP_CPU_PROGRAM_COUNTER;
2940	cpu_reg.inst = BCE_TXP_CPU_INSTRUCTION;
2941	cpu_reg.bp = BCE_TXP_CPU_HW_BREAKPOINT;
2942	cpu_reg.spad_base = BCE_TXP_SCRATCH;
2943	cpu_reg.mips_view_base = 0x8000000;
2944
2945	fw.ver_major = bce_TXP_b06FwReleaseMajor;
2946	fw.ver_minor = bce_TXP_b06FwReleaseMinor;
2947	fw.ver_fix = bce_TXP_b06FwReleaseFix;
2948	fw.start_addr = bce_TXP_b06FwStartAddr;
2949
2950	fw.text_addr = bce_TXP_b06FwTextAddr;
2951	fw.text_len = bce_TXP_b06FwTextLen;
2952	fw.text_index = 0;
2953	fw.text = bce_TXP_b06FwText;
2954
2955	fw.data_addr = bce_TXP_b06FwDataAddr;
2956	fw.data_len = bce_TXP_b06FwDataLen;
2957	fw.data_index = 0;
2958	fw.data = bce_TXP_b06FwData;
2959
2960	fw.sbss_addr = bce_TXP_b06FwSbssAddr;
2961	fw.sbss_len = bce_TXP_b06FwSbssLen;
2962	fw.sbss_index = 0;
2963	fw.sbss = bce_TXP_b06FwSbss;
2964
2965	fw.bss_addr = bce_TXP_b06FwBssAddr;
2966	fw.bss_len = bce_TXP_b06FwBssLen;
2967	fw.bss_index = 0;
2968	fw.bss = bce_TXP_b06FwBss;
2969
2970	fw.rodata_addr = bce_TXP_b06FwRodataAddr;
2971	fw.rodata_len = bce_TXP_b06FwRodataLen;
2972	fw.rodata_index = 0;
2973	fw.rodata = bce_TXP_b06FwRodata;
2974
2975	DBPRINT(sc, BCE_INFO_RESET, "Loading TX firmware.\n");
2976	bce_load_cpu_fw(sc, &cpu_reg, &fw);
2977
2978	/* Initialize the TX Patch-up Processor. */
2979	cpu_reg.mode = BCE_TPAT_CPU_MODE;
2980	cpu_reg.mode_value_halt = BCE_TPAT_CPU_MODE_SOFT_HALT;
2981	cpu_reg.mode_value_sstep = BCE_TPAT_CPU_MODE_STEP_ENA;
2982	cpu_reg.state = BCE_TPAT_CPU_STATE;
2983	cpu_reg.state_value_clear = 0xffffff;
2984	cpu_reg.gpr0 = BCE_TPAT_CPU_REG_FILE;
2985	cpu_reg.evmask = BCE_TPAT_CPU_EVENT_MASK;
2986	cpu_reg.pc = BCE_TPAT_CPU_PROGRAM_COUNTER;
2987	cpu_reg.inst = BCE_TPAT_CPU_INSTRUCTION;
2988	cpu_reg.bp = BCE_TPAT_CPU_HW_BREAKPOINT;
2989	cpu_reg.spad_base = BCE_TPAT_SCRATCH;
2990	cpu_reg.mips_view_base = 0x8000000;
2991
2992	fw.ver_major = bce_TPAT_b06FwReleaseMajor;
2993	fw.ver_minor = bce_TPAT_b06FwReleaseMinor;
2994	fw.ver_fix = bce_TPAT_b06FwReleaseFix;
2995	fw.start_addr = bce_TPAT_b06FwStartAddr;
2996
2997	fw.text_addr = bce_TPAT_b06FwTextAddr;
2998	fw.text_len = bce_TPAT_b06FwTextLen;
2999	fw.text_index = 0;
3000	fw.text = bce_TPAT_b06FwText;
3001
3002	fw.data_addr = bce_TPAT_b06FwDataAddr;
3003	fw.data_len = bce_TPAT_b06FwDataLen;
3004	fw.data_index = 0;
3005	fw.data = bce_TPAT_b06FwData;
3006
3007	fw.sbss_addr = bce_TPAT_b06FwSbssAddr;
3008	fw.sbss_len = bce_TPAT_b06FwSbssLen;
3009	fw.sbss_index = 0;
3010	fw.sbss = bce_TPAT_b06FwSbss;
3011
3012	fw.bss_addr = bce_TPAT_b06FwBssAddr;
3013	fw.bss_len = bce_TPAT_b06FwBssLen;
3014	fw.bss_index = 0;
3015	fw.bss = bce_TPAT_b06FwBss;
3016
3017	fw.rodata_addr = bce_TPAT_b06FwRodataAddr;
3018	fw.rodata_len = bce_TPAT_b06FwRodataLen;
3019	fw.rodata_index = 0;
3020	fw.rodata = bce_TPAT_b06FwRodata;
3021
3022	DBPRINT(sc, BCE_INFO_RESET, "Loading TPAT firmware.\n");
3023	bce_load_cpu_fw(sc, &cpu_reg, &fw);
3024
3025	/* Initialize the Completion Processor. */
3026	cpu_reg.mode = BCE_COM_CPU_MODE;
3027	cpu_reg.mode_value_halt = BCE_COM_CPU_MODE_SOFT_HALT;
3028	cpu_reg.mode_value_sstep = BCE_COM_CPU_MODE_STEP_ENA;
3029	cpu_reg.state = BCE_COM_CPU_STATE;
3030	cpu_reg.state_value_clear = 0xffffff;
3031	cpu_reg.gpr0 = BCE_COM_CPU_REG_FILE;
3032	cpu_reg.evmask = BCE_COM_CPU_EVENT_MASK;
3033	cpu_reg.pc = BCE_COM_CPU_PROGRAM_COUNTER;
3034	cpu_reg.inst = BCE_COM_CPU_INSTRUCTION;
3035	cpu_reg.bp = BCE_COM_CPU_HW_BREAKPOINT;
3036	cpu_reg.spad_base = BCE_COM_SCRATCH;
3037	cpu_reg.mips_view_base = 0x8000000;
3038
3039	fw.ver_major = bce_COM_b06FwReleaseMajor;
3040	fw.ver_minor = bce_COM_b06FwReleaseMinor;
3041	fw.ver_fix = bce_COM_b06FwReleaseFix;
3042	fw.start_addr = bce_COM_b06FwStartAddr;
3043
3044	fw.text_addr = bce_COM_b06FwTextAddr;
3045	fw.text_len = bce_COM_b06FwTextLen;
3046	fw.text_index = 0;
3047	fw.text = bce_COM_b06FwText;
3048
3049	fw.data_addr = bce_COM_b06FwDataAddr;
3050	fw.data_len = bce_COM_b06FwDataLen;
3051	fw.data_index = 0;
3052	fw.data = bce_COM_b06FwData;
3053
3054	fw.sbss_addr = bce_COM_b06FwSbssAddr;
3055	fw.sbss_len = bce_COM_b06FwSbssLen;
3056	fw.sbss_index = 0;
3057	fw.sbss = bce_COM_b06FwSbss;
3058
3059	fw.bss_addr = bce_COM_b06FwBssAddr;
3060	fw.bss_len = bce_COM_b06FwBssLen;
3061	fw.bss_index = 0;
3062	fw.bss = bce_COM_b06FwBss;
3063
3064	fw.rodata_addr = bce_COM_b06FwRodataAddr;
3065	fw.rodata_len = bce_COM_b06FwRodataLen;
3066	fw.rodata_index = 0;
3067	fw.rodata = bce_COM_b06FwRodata;
3068
3069	DBPRINT(sc, BCE_INFO_RESET, "Loading COM firmware.\n");
3070	bce_load_cpu_fw(sc, &cpu_reg, &fw);
3071}
3072
3073
3074/****************************************************************************/
3075/* Initialize context memory.                                               */
3076/*                                                                          */
3077/* Clears the memory associated with each Context ID (CID).                 */
3078/*                                                                          */
3079/* Returns:                                                                 */
3080/*   Nothing.                                                               */
3081/****************************************************************************/
3082static void
3083bce_init_context(struct bce_softc *sc)
3084{
3085	u32 vcid;
3086
3087	vcid = 96;
3088	while (vcid) {
3089		u32 vcid_addr, pcid_addr, offset;
3090
3091		vcid--;
3092
3093   		vcid_addr = GET_CID_ADDR(vcid);
3094		pcid_addr = vcid_addr;
3095
3096		REG_WR(sc, BCE_CTX_VIRT_ADDR, 0x00);
3097		REG_WR(sc, BCE_CTX_PAGE_TBL, pcid_addr);
3098
3099		/* Zero out the context. */
3100		for (offset = 0; offset < PHY_CTX_SIZE; offset += 4) {
3101			CTX_WR(sc, 0x00, offset, 0);
3102		}
3103
3104		REG_WR(sc, BCE_CTX_VIRT_ADDR, vcid_addr);
3105		REG_WR(sc, BCE_CTX_PAGE_TBL, pcid_addr);
3106	}
3107}
3108
3109
3110/****************************************************************************/
3111/* Fetch the permanent MAC address of the controller.                       */
3112/*                                                                          */
3113/* Returns:                                                                 */
3114/*   Nothing.                                                               */
3115/****************************************************************************/
3116static void
3117bce_get_mac_addr(struct bce_softc *sc)
3118{
3119	u32 mac_lo = 0, mac_hi = 0;
3120
3121	/*
3122	 * The NetXtreme II bootcode populates various NIC
3123	 * power-on and runtime configuration items in a
3124	 * shared memory area.  The factory configured MAC
3125	 * address is available from both NVRAM and the
3126	 * shared memory area so we'll read the value from
3127	 * shared memory for speed.
3128	 */
3129
3130	mac_hi = REG_RD_IND(sc, sc->bce_shmem_base +
3131		BCE_PORT_HW_CFG_MAC_UPPER);
3132	mac_lo = REG_RD_IND(sc, sc->bce_shmem_base +
3133		BCE_PORT_HW_CFG_MAC_LOWER);
3134
3135	if ((mac_lo == 0) && (mac_hi == 0)) {
3136		BCE_PRINTF(sc, "%s(%d): Invalid Ethernet address!\n",
3137			__FILE__, __LINE__);
3138	} else {
3139		sc->eaddr[0] = (u_char)(mac_hi >> 8);
3140		sc->eaddr[1] = (u_char)(mac_hi >> 0);
3141		sc->eaddr[2] = (u_char)(mac_lo >> 24);
3142		sc->eaddr[3] = (u_char)(mac_lo >> 16);
3143		sc->eaddr[4] = (u_char)(mac_lo >> 8);
3144		sc->eaddr[5] = (u_char)(mac_lo >> 0);
3145	}
3146
3147	DBPRINT(sc, BCE_INFO, "Permanent Ethernet address = %6D\n", sc->eaddr, ":");
3148}
3149
3150
3151/****************************************************************************/
3152/* Program the MAC address.                                                 */
3153/*                                                                          */
3154/* Returns:                                                                 */
3155/*   Nothing.                                                               */
3156/****************************************************************************/
3157static void
3158bce_set_mac_addr(struct bce_softc *sc)
3159{
3160	u32 val;
3161	u8 *mac_addr = sc->eaddr;
3162
3163	DBPRINT(sc, BCE_INFO, "Setting Ethernet address = %6D\n", sc->eaddr, ":");
3164
3165	val = (mac_addr[0] << 8) | mac_addr[1];
3166
3167	REG_WR(sc, BCE_EMAC_MAC_MATCH0, val);
3168
3169	val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
3170		(mac_addr[4] << 8) | mac_addr[5];
3171
3172	REG_WR(sc, BCE_EMAC_MAC_MATCH1, val);
3173}
3174
3175
3176/****************************************************************************/
3177/* Stop the controller.                                                     */
3178/*                                                                          */
3179/* Returns:                                                                 */
3180/*   Nothing.                                                               */
3181/****************************************************************************/
3182static void
3183bce_stop(struct bce_softc *sc)
3184{
3185	struct ifnet *ifp;
3186	struct ifmedia_entry *ifm;
3187	struct mii_data *mii = NULL;
3188	int mtmp, itmp;
3189
3190	DBPRINT(sc, BCE_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
3191
3192	BCE_LOCK_ASSERT(sc);
3193
3194	ifp = sc->bce_ifp;
3195
3196	mii = device_get_softc(sc->bce_miibus);
3197
3198	callout_stop(&sc->bce_stat_ch);
3199
3200	/* Disable the transmit/receive blocks. */
3201	REG_WR(sc, BCE_MISC_ENABLE_CLR_BITS, 0x5ffffff);
3202	REG_RD(sc, BCE_MISC_ENABLE_CLR_BITS);
3203	DELAY(20);
3204
3205	bce_disable_intr(sc);
3206
3207	/* Tell firmware that the driver is going away. */
3208	bce_reset(sc, BCE_DRV_MSG_CODE_SUSPEND_NO_WOL);
3209
3210	/* Free the RX lists. */
3211	bce_free_rx_chain(sc);
3212
3213	/* Free TX buffers. */
3214	bce_free_tx_chain(sc);
3215
3216	/*
3217	 * Isolate/power down the PHY, but leave the media selection
3218	 * unchanged so that things will be put back to normal when
3219	 * we bring the interface back up.
3220	 */
3221
3222	itmp = ifp->if_flags;
3223	ifp->if_flags |= IFF_UP;
3224	/*
3225	 * If we are called from bce_detach(), mii is already NULL.
3226	 */
3227	if (mii != NULL) {
3228		ifm = mii->mii_media.ifm_cur;
3229		mtmp = ifm->ifm_media;
3230		ifm->ifm_media = IFM_ETHER | IFM_NONE;
3231		mii_mediachg(mii);
3232		ifm->ifm_media = mtmp;
3233	}
3234
3235	ifp->if_flags = itmp;
3236	ifp->if_timer = 0;
3237
3238	sc->bce_link = 0;
3239
3240	ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
3241
3242	DBPRINT(sc, BCE_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
3243
3244}
3245
3246
3247static int
3248bce_reset(struct bce_softc *sc, u32 reset_code)
3249{
3250	u32 val;
3251	int i, rc = 0;
3252
3253	DBPRINT(sc, BCE_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
3254
3255	/* Wait for pending PCI transactions to complete. */
3256	REG_WR(sc, BCE_MISC_ENABLE_CLR_BITS,
3257	       BCE_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
3258	       BCE_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
3259	       BCE_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
3260	       BCE_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
3261	val = REG_RD(sc, BCE_MISC_ENABLE_CLR_BITS);
3262	DELAY(5);
3263
3264	/* Assume bootcode is running. */
3265	sc->bce_fw_timed_out = 0;
3266
3267	/* Give the firmware a chance to prepare for the reset. */
3268	rc = bce_fw_sync(sc, BCE_DRV_MSG_DATA_WAIT0 | reset_code);
3269	if (rc)
3270		goto bce_reset_exit;
3271
3272	/* Set a firmware reminder that this is a soft reset. */
3273	REG_WR_IND(sc, sc->bce_shmem_base + BCE_DRV_RESET_SIGNATURE,
3274		   BCE_DRV_RESET_SIGNATURE_MAGIC);
3275
3276	/* Dummy read to force the chip to complete all current transactions. */
3277	val = REG_RD(sc, BCE_MISC_ID);
3278
3279	/* Chip reset. */
3280	val = BCE_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3281	      BCE_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
3282	      BCE_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
3283	REG_WR(sc, BCE_PCICFG_MISC_CONFIG, val);
3284
3285	/* Allow up to 30us for reset to complete. */
3286	for (i = 0; i < 10; i++) {
3287		val = REG_RD(sc, BCE_PCICFG_MISC_CONFIG);
3288		if ((val & (BCE_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3289			    BCE_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0) {
3290			break;
3291		}
3292		DELAY(10);
3293	}
3294
3295	/* Check that reset completed successfully. */
3296	if (val & (BCE_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3297		   BCE_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
3298		BCE_PRINTF(sc, "%s(%d): Reset failed!\n",
3299			__FILE__, __LINE__);
3300		rc = EBUSY;
3301		goto bce_reset_exit;
3302	}
3303
3304	/* Make sure byte swapping is properly configured. */
3305	val = REG_RD(sc, BCE_PCI_SWAP_DIAG0);
3306	if (val != 0x01020304) {
3307		BCE_PRINTF(sc, "%s(%d): Byte swap is incorrect!\n",
3308			__FILE__, __LINE__);
3309		rc = ENODEV;
3310		goto bce_reset_exit;
3311	}
3312
3313	/* Just completed a reset, assume that firmware is running again. */
3314	sc->bce_fw_timed_out = 0;
3315
3316	/* Wait for the firmware to finish its initialization. */
3317	rc = bce_fw_sync(sc, BCE_DRV_MSG_DATA_WAIT1 | reset_code);
3318	if (rc)
3319		BCE_PRINTF(sc, "%s(%d): Firmware did not complete initialization!\n",
3320			__FILE__, __LINE__);
3321
3322bce_reset_exit:
3323	DBPRINT(sc, BCE_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
3324
3325	return (rc);
3326}
3327
3328
3329static int
3330bce_chipinit(struct bce_softc *sc)
3331{
3332	u32 val;
3333	int rc = 0;
3334
3335	DBPRINT(sc, BCE_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
3336
3337	/* Make sure the interrupt is not active. */
3338	REG_WR(sc, BCE_PCICFG_INT_ACK_CMD, BCE_PCICFG_INT_ACK_CMD_MASK_INT);
3339
3340	/* Initialize DMA byte/word swapping, configure the number of DMA  */
3341	/* channels and PCI clock compensation delay.                      */
3342	val = BCE_DMA_CONFIG_DATA_BYTE_SWAP |
3343	      BCE_DMA_CONFIG_DATA_WORD_SWAP |
3344#if BYTE_ORDER == BIG_ENDIAN
3345	      BCE_DMA_CONFIG_CNTL_BYTE_SWAP |
3346#endif
3347	      BCE_DMA_CONFIG_CNTL_WORD_SWAP |
3348	      DMA_READ_CHANS << 12 |
3349	      DMA_WRITE_CHANS << 16;
3350
3351	val |= (0x2 << 20) | BCE_DMA_CONFIG_CNTL_PCI_COMP_DLY;
3352
3353	if ((sc->bce_flags & BCE_PCIX_FLAG) && (sc->bus_speed_mhz == 133))
3354		val |= BCE_DMA_CONFIG_PCI_FAST_CLK_CMP;
3355
3356	/*
3357	 * This setting resolves a problem observed on certain Intel PCI
3358	 * chipsets that cannot handle multiple outstanding DMA operations.
3359	 * See errata E9_5706A1_65.
3360	 */
3361	if ((BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5706) &&
3362	    (BCE_CHIP_ID(sc) != BCE_CHIP_ID_5706_A0) &&
3363	    !(sc->bce_flags & BCE_PCIX_FLAG))
3364		val |= BCE_DMA_CONFIG_CNTL_PING_PONG_DMA;
3365
3366	REG_WR(sc, BCE_DMA_CONFIG, val);
3367
3368	/* Clear the PCI-X relaxed ordering bit. See errata E3_5708CA0_570. */
3369	if (sc->bce_flags & BCE_PCIX_FLAG) {
3370		u16 val;
3371
3372		val = pci_read_config(sc->bce_dev, BCE_PCI_PCIX_CMD, 2);
3373		pci_write_config(sc->bce_dev, BCE_PCI_PCIX_CMD, val & ~0x2, 2);
3374	}
3375
3376	/* Enable the RX_V2P and Context state machines before access. */
3377	REG_WR(sc, BCE_MISC_ENABLE_SET_BITS,
3378	       BCE_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
3379	       BCE_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
3380	       BCE_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
3381
3382	/* Initialize context mapping and zero out the quick contexts. */
3383	bce_init_context(sc);
3384
3385	/* Initialize the on-boards CPUs */
3386	bce_init_cpus(sc);
3387
3388	/* Prepare NVRAM for access. */
3389	if (bce_init_nvram(sc)) {
3390		rc = ENODEV;
3391		goto bce_chipinit_exit;
3392	}
3393
3394	/* Set the kernel bypass block size */
3395	val = REG_RD(sc, BCE_MQ_CONFIG);
3396	val &= ~BCE_MQ_CONFIG_KNL_BYP_BLK_SIZE;
3397	val |= BCE_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
3398	REG_WR(sc, BCE_MQ_CONFIG, val);
3399
3400	val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
3401	REG_WR(sc, BCE_MQ_KNL_BYP_WIND_START, val);
3402	REG_WR(sc, BCE_MQ_KNL_WIND_END, val);
3403
3404	val = (BCM_PAGE_BITS - 8) << 24;
3405	REG_WR(sc, BCE_RV2P_CONFIG, val);
3406
3407	/* Configure page size. */
3408	val = REG_RD(sc, BCE_TBDR_CONFIG);
3409	val &= ~BCE_TBDR_CONFIG_PAGE_SIZE;
3410	val |= (BCM_PAGE_BITS - 8) << 24 | 0x40;
3411	REG_WR(sc, BCE_TBDR_CONFIG, val);
3412
3413bce_chipinit_exit:
3414	DBPRINT(sc, BCE_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
3415
3416	return(rc);
3417}
3418
3419
3420/****************************************************************************/
3421/* Initialize the controller in preparation to send/receive traffic.        */
3422/*                                                                          */
3423/* Returns:                                                                 */
3424/*   0 for success, positive value for failure.                             */
3425/****************************************************************************/
3426static int
3427bce_blockinit(struct bce_softc *sc)
3428{
3429	u32 reg, val;
3430	int rc = 0;
3431
3432	DBPRINT(sc, BCE_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
3433
3434	/* Load the hardware default MAC address. */
3435	bce_set_mac_addr(sc);
3436
3437	/* Set the Ethernet backoff seed value */
3438	val = sc->eaddr[0]         + (sc->eaddr[1] << 8) +
3439	      (sc->eaddr[2] << 16) + (sc->eaddr[3]     ) +
3440	      (sc->eaddr[4] << 8)  + (sc->eaddr[5] << 16);
3441	REG_WR(sc, BCE_EMAC_BACKOFF_SEED, val);
3442
3443	sc->last_status_idx = 0;
3444	sc->rx_mode = BCE_EMAC_RX_MODE_SORT_MODE;
3445
3446	/* Set up link change interrupt generation. */
3447	REG_WR(sc, BCE_EMAC_ATTENTION_ENA, BCE_EMAC_ATTENTION_ENA_LINK);
3448
3449	/* Program the physical address of the status block. */
3450	REG_WR(sc, BCE_HC_STATUS_ADDR_L,
3451		BCE_ADDR_LO(sc->status_block_paddr));
3452	REG_WR(sc, BCE_HC_STATUS_ADDR_H,
3453		BCE_ADDR_HI(sc->status_block_paddr));
3454
3455	/* Program the physical address of the statistics block. */
3456	REG_WR(sc, BCE_HC_STATISTICS_ADDR_L,
3457		BCE_ADDR_LO(sc->stats_block_paddr));
3458	REG_WR(sc, BCE_HC_STATISTICS_ADDR_H,
3459		BCE_ADDR_HI(sc->stats_block_paddr));
3460
3461	/* Program various host coalescing parameters. */
3462	REG_WR(sc, BCE_HC_TX_QUICK_CONS_TRIP,
3463		(sc->bce_tx_quick_cons_trip_int << 16) | sc->bce_tx_quick_cons_trip);
3464	REG_WR(sc, BCE_HC_RX_QUICK_CONS_TRIP,
3465		(sc->bce_rx_quick_cons_trip_int << 16) | sc->bce_rx_quick_cons_trip);
3466	REG_WR(sc, BCE_HC_COMP_PROD_TRIP,
3467		(sc->bce_comp_prod_trip_int << 16) | sc->bce_comp_prod_trip);
3468	REG_WR(sc, BCE_HC_TX_TICKS,
3469		(sc->bce_tx_ticks_int << 16) | sc->bce_tx_ticks);
3470	REG_WR(sc, BCE_HC_RX_TICKS,
3471		(sc->bce_rx_ticks_int << 16) | sc->bce_rx_ticks);
3472	REG_WR(sc, BCE_HC_COM_TICKS,
3473		(sc->bce_com_ticks_int << 16) | sc->bce_com_ticks);
3474	REG_WR(sc, BCE_HC_CMD_TICKS,
3475		(sc->bce_cmd_ticks_int << 16) | sc->bce_cmd_ticks);
3476	REG_WR(sc, BCE_HC_STATS_TICKS,
3477		(sc->bce_stats_ticks & 0xffff00));
3478	REG_WR(sc, BCE_HC_STAT_COLLECT_TICKS,
3479		0xbb8);  /* 3ms */
3480	REG_WR(sc, BCE_HC_CONFIG,
3481		(BCE_HC_CONFIG_RX_TMR_MODE | BCE_HC_CONFIG_TX_TMR_MODE |
3482		BCE_HC_CONFIG_COLLECT_STATS));
3483
3484	/* Clear the internal statistics counters. */
3485	REG_WR(sc, BCE_HC_COMMAND, BCE_HC_COMMAND_CLR_STAT_NOW);
3486
3487	/* Verify that bootcode is running. */
3488	reg = REG_RD_IND(sc, sc->bce_shmem_base + BCE_DEV_INFO_SIGNATURE);
3489
3490	DBRUNIF(DB_RANDOMTRUE(bce_debug_bootcode_running_failure),
3491		BCE_PRINTF(sc, "%s(%d): Simulating bootcode failure.\n",
3492			__FILE__, __LINE__);
3493		reg = 0);
3494
3495	if ((reg & BCE_DEV_INFO_SIGNATURE_MAGIC_MASK) !=
3496	    BCE_DEV_INFO_SIGNATURE_MAGIC) {
3497		BCE_PRINTF(sc, "%s(%d): Bootcode not running! Found: 0x%08X, "
3498			"Expected: 08%08X\n", __FILE__, __LINE__,
3499			(reg & BCE_DEV_INFO_SIGNATURE_MAGIC_MASK),
3500			BCE_DEV_INFO_SIGNATURE_MAGIC);
3501		rc = ENODEV;
3502		goto bce_blockinit_exit;
3503	}
3504
3505	/* Check if any management firmware is running. */
3506	reg = REG_RD_IND(sc, sc->bce_shmem_base + BCE_PORT_FEATURE);
3507	if (reg & (BCE_PORT_FEATURE_ASF_ENABLED | BCE_PORT_FEATURE_IMD_ENABLED)) {
3508		DBPRINT(sc, BCE_INFO, "Management F/W Enabled.\n");
3509		sc->bce_flags |= BCE_MFW_ENABLE_FLAG;
3510	}
3511
3512	sc->bce_fw_ver = REG_RD_IND(sc, sc->bce_shmem_base + BCE_DEV_INFO_BC_REV);
3513	DBPRINT(sc, BCE_INFO, "bootcode rev = 0x%08X\n", sc->bce_fw_ver);
3514
3515	/* Allow bootcode to apply any additional fixes before enabling MAC. */
3516	rc = bce_fw_sync(sc, BCE_DRV_MSG_DATA_WAIT2 | BCE_DRV_MSG_CODE_RESET);
3517
3518	/* Enable link state change interrupt generation. */
3519	REG_WR(sc, BCE_HC_ATTN_BITS_ENABLE, STATUS_ATTN_BITS_LINK_STATE);
3520
3521	/* Enable all remaining blocks in the MAC. */
3522	REG_WR(sc, BCE_MISC_ENABLE_SET_BITS, 0x5ffffff);
3523	REG_RD(sc, BCE_MISC_ENABLE_SET_BITS);
3524	DELAY(20);
3525
3526bce_blockinit_exit:
3527	DBPRINT(sc, BCE_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
3528
3529	return (rc);
3530}
3531
3532
3533/****************************************************************************/
3534/* Encapsulate an mbuf cluster into the rx_bd chain.                        */
3535/*                                                                          */
3536/* The NetXtreme II can support Jumbo frames by using multiple rx_bd's.     */
3537/* This routine will map an mbuf cluster into 1 or more rx_bd's as          */
3538/* necessary.                                                               */
3539/*                                                                          */
3540/* Returns:                                                                 */
3541/*   0 for success, positive value for failure.                             */
3542/****************************************************************************/
3543static int
3544bce_get_buf(struct bce_softc *sc, struct mbuf *m, u16 *prod, u16 *chain_prod,
3545	u32 *prod_bseq)
3546{
3547	bus_dmamap_t		map;
3548	bus_dma_segment_t	segs[4];
3549	struct mbuf *m_new = NULL;
3550	struct rx_bd		*rxbd;
3551	int i, nsegs, error, rc = 0;
3552#ifdef BCE_DEBUG
3553	u16 debug_chain_prod = *chain_prod;
3554#endif
3555
3556	DBPRINT(sc, (BCE_VERBOSE_RESET | BCE_VERBOSE_RECV), "Entering %s()\n",
3557		__FUNCTION__);
3558
3559	/* Make sure the inputs are valid. */
3560	DBRUNIF((*chain_prod > MAX_RX_BD),
3561		BCE_PRINTF(sc, "%s(%d): RX producer out of range: 0x%04X > 0x%04X\n",
3562		__FILE__, __LINE__, *chain_prod, (u16) MAX_RX_BD));
3563
3564	DBPRINT(sc, BCE_VERBOSE_RECV, "%s(enter): prod = 0x%04X, chain_prod = 0x%04X, "
3565		"prod_bseq = 0x%08X\n", __FUNCTION__, *prod, *chain_prod, *prod_bseq);
3566
3567	if (m == NULL) {
3568
3569		DBRUNIF(DB_RANDOMTRUE(bce_debug_mbuf_allocation_failure),
3570			BCE_PRINTF(sc, "%s(%d): Simulating mbuf allocation failure.\n",
3571				__FILE__, __LINE__);
3572			sc->mbuf_alloc_failed++;
3573			rc = ENOBUFS;
3574			goto bce_get_buf_exit);
3575
3576		/* This is a new mbuf allocation. */
3577		MGETHDR(m_new, M_DONTWAIT, MT_DATA);
3578		if (m_new == NULL) {
3579
3580			DBPRINT(sc, BCE_WARN, "%s(%d): RX mbuf header allocation failed!\n",
3581				__FILE__, __LINE__);
3582
3583			DBRUNIF(1, sc->mbuf_alloc_failed++);
3584
3585			rc = ENOBUFS;
3586			goto bce_get_buf_exit;
3587		}
3588
3589		DBRUNIF(1, sc->rx_mbuf_alloc++);
3590		m_cljget(m_new, M_DONTWAIT, sc->mbuf_alloc_size);
3591		if (!(m_new->m_flags & M_EXT)) {
3592
3593			DBPRINT(sc, BCE_WARN, "%s(%d): RX mbuf chain allocation failed!\n",
3594				__FILE__, __LINE__);
3595
3596			m_freem(m_new);
3597
3598			DBRUNIF(1, sc->rx_mbuf_alloc--);
3599			DBRUNIF(1, sc->mbuf_alloc_failed++);
3600
3601			rc = ENOBUFS;
3602			goto bce_get_buf_exit;
3603		}
3604
3605		m_new->m_len = m_new->m_pkthdr.len = sc->mbuf_alloc_size;
3606	} else {
3607		m_new = m;
3608		m_new->m_len = m_new->m_pkthdr.len = sc->mbuf_alloc_size;
3609		m_new->m_data = m_new->m_ext.ext_buf;
3610	}
3611
3612	/* Map the mbuf cluster into device memory. */
3613	map = sc->rx_mbuf_map[*chain_prod];
3614	error = bus_dmamap_load_mbuf_sg(sc->rx_mbuf_tag, map, m_new,
3615	    segs, &nsegs, BUS_DMA_NOWAIT);
3616
3617	if (error) {
3618		BCE_PRINTF(sc, "%s(%d): Error mapping mbuf into RX chain!\n",
3619			__FILE__, __LINE__);
3620
3621		m_freem(m_new);
3622
3623		DBRUNIF(1, sc->rx_mbuf_alloc--);
3624
3625		rc = ENOBUFS;
3626		goto bce_get_buf_exit;
3627	}
3628
3629	/* Watch for overflow. */
3630	DBRUNIF((sc->free_rx_bd > USABLE_RX_BD),
3631		BCE_PRINTF(sc, "%s(%d): Too many free rx_bd (0x%04X > 0x%04X)!\n",
3632			__FILE__, __LINE__, sc->free_rx_bd, (u16) USABLE_RX_BD));
3633
3634	DBRUNIF((sc->free_rx_bd < sc->rx_low_watermark),
3635		sc->rx_low_watermark = sc->free_rx_bd);
3636
3637	/* Setup the rx_bd for the first segment. */
3638	rxbd = &sc->rx_bd_chain[RX_PAGE(*chain_prod)][RX_IDX(*chain_prod)];
3639
3640	rxbd->rx_bd_haddr_lo  = htole32(BCE_ADDR_LO(segs[0].ds_addr));
3641	rxbd->rx_bd_haddr_hi  = htole32(BCE_ADDR_HI(segs[0].ds_addr));
3642	rxbd->rx_bd_len       = htole32(segs[0].ds_len);
3643	rxbd->rx_bd_flags     = htole32(RX_BD_FLAGS_START);
3644	*prod_bseq += segs[0].ds_len;
3645
3646	for (i = 1; i < nsegs; i++) {
3647
3648		*prod = NEXT_RX_BD(*prod);
3649		*chain_prod = RX_CHAIN_IDX(*prod);
3650
3651		rxbd = &sc->rx_bd_chain[RX_PAGE(*chain_prod)][RX_IDX(*chain_prod)];
3652
3653		rxbd->rx_bd_haddr_lo  = htole32(BCE_ADDR_LO(segs[i].ds_addr));
3654		rxbd->rx_bd_haddr_hi  = htole32(BCE_ADDR_HI(segs[i].ds_addr));
3655		rxbd->rx_bd_len       = htole32(segs[i].ds_len);
3656		rxbd->rx_bd_flags     = 0;
3657		*prod_bseq += segs[i].ds_len;
3658	}
3659
3660	rxbd->rx_bd_flags |= htole32(RX_BD_FLAGS_END);
3661
3662	/* Save the mbuf and update our counter. */
3663	sc->rx_mbuf_ptr[*chain_prod] = m_new;
3664	sc->free_rx_bd -= nsegs;
3665
3666	DBRUN(BCE_VERBOSE_RECV, bce_dump_rx_mbuf_chain(sc, debug_chain_prod,
3667		nsegs));
3668
3669	DBPRINT(sc, BCE_VERBOSE_RECV, "%s(exit): prod = 0x%04X, chain_prod = 0x%04X, "
3670		"prod_bseq = 0x%08X\n", __FUNCTION__, *prod, *chain_prod, *prod_bseq);
3671
3672bce_get_buf_exit:
3673	DBPRINT(sc, (BCE_VERBOSE_RESET | BCE_VERBOSE_RECV), "Exiting %s()\n",
3674		__FUNCTION__);
3675
3676	return(rc);
3677}
3678
3679
3680/****************************************************************************/
3681/* Allocate memory and initialize the TX data structures.                   */
3682/*                                                                          */
3683/* Returns:                                                                 */
3684/*   0 for success, positive value for failure.                             */
3685/****************************************************************************/
3686static int
3687bce_init_tx_chain(struct bce_softc *sc)
3688{
3689	struct tx_bd *txbd;
3690	u32 val;
3691	int i, rc = 0;
3692
3693	DBPRINT(sc, BCE_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
3694
3695	/* Set the initial TX producer/consumer indices. */
3696	sc->tx_prod        = 0;
3697	sc->tx_cons        = 0;
3698	sc->tx_prod_bseq   = 0;
3699	sc->used_tx_bd = 0;
3700	DBRUNIF(1, sc->tx_hi_watermark = USABLE_TX_BD);
3701
3702	/*
3703	 * The NetXtreme II supports a linked-list structre called
3704	 * a Buffer Descriptor Chain (or BD chain).  A BD chain
3705	 * consists of a series of 1 or more chain pages, each of which
3706	 * consists of a fixed number of BD entries.
3707	 * The last BD entry on each page is a pointer to the next page
3708	 * in the chain, and the last pointer in the BD chain
3709	 * points back to the beginning of the chain.
3710	 */
3711
3712	/* Set the TX next pointer chain entries. */
3713	for (i = 0; i < TX_PAGES; i++) {
3714		int j;
3715
3716		txbd = &sc->tx_bd_chain[i][USABLE_TX_BD_PER_PAGE];
3717
3718		/* Check if we've reached the last page. */
3719		if (i == (TX_PAGES - 1))
3720			j = 0;
3721		else
3722			j = i + 1;
3723
3724		txbd->tx_bd_haddr_hi = htole32(BCE_ADDR_HI(sc->tx_bd_chain_paddr[j]));
3725		txbd->tx_bd_haddr_lo = htole32(BCE_ADDR_LO(sc->tx_bd_chain_paddr[j]));
3726	}
3727
3728	/*
3729	 * Initialize the context ID for an L2 TX chain.
3730	 */
3731	val = BCE_L2CTX_TYPE_TYPE_L2;
3732	val |= BCE_L2CTX_TYPE_SIZE_L2;
3733	CTX_WR(sc, GET_CID_ADDR(TX_CID), BCE_L2CTX_TYPE, val);
3734
3735	val = BCE_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
3736	CTX_WR(sc, GET_CID_ADDR(TX_CID), BCE_L2CTX_CMD_TYPE, val);
3737
3738	/* Point the hardware to the first page in the chain. */
3739	val = BCE_ADDR_HI(sc->tx_bd_chain_paddr[0]);
3740	CTX_WR(sc, GET_CID_ADDR(TX_CID), BCE_L2CTX_TBDR_BHADDR_HI, val);
3741	val = BCE_ADDR_LO(sc->tx_bd_chain_paddr[0]);
3742	CTX_WR(sc, GET_CID_ADDR(TX_CID), BCE_L2CTX_TBDR_BHADDR_LO, val);
3743
3744	DBRUN(BCE_VERBOSE_SEND, bce_dump_tx_chain(sc, 0, TOTAL_TX_BD));
3745
3746	DBPRINT(sc, BCE_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
3747
3748	return(rc);
3749}
3750
3751
3752/****************************************************************************/
3753/* Free memory and clear the TX data structures.                            */
3754/*                                                                          */
3755/* Returns:                                                                 */
3756/*   Nothing.                                                               */
3757/****************************************************************************/
3758static void
3759bce_free_tx_chain(struct bce_softc *sc)
3760{
3761	int i;
3762
3763	DBPRINT(sc, BCE_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
3764
3765	/* Unmap, unload, and free any mbufs still in the TX mbuf chain. */
3766	for (i = 0; i < TOTAL_TX_BD; i++) {
3767		if (sc->tx_mbuf_ptr[i] != NULL) {
3768			if (sc->tx_mbuf_map != NULL)
3769				bus_dmamap_sync(sc->tx_mbuf_tag, sc->tx_mbuf_map[i],
3770					BUS_DMASYNC_POSTWRITE);
3771			m_freem(sc->tx_mbuf_ptr[i]);
3772			sc->tx_mbuf_ptr[i] = NULL;
3773			DBRUNIF(1, sc->tx_mbuf_alloc--);
3774		}
3775	}
3776
3777	/* Clear each TX chain page. */
3778	for (i = 0; i < TX_PAGES; i++)
3779		bzero((char *)sc->tx_bd_chain[i], BCE_TX_CHAIN_PAGE_SZ);
3780
3781	/* Check if we lost any mbufs in the process. */
3782	DBRUNIF((sc->tx_mbuf_alloc),
3783		BCE_PRINTF(sc, "%s(%d): Memory leak! Lost %d mbufs "
3784			"from tx chain!\n",
3785			__FILE__, __LINE__, sc->tx_mbuf_alloc));
3786
3787	DBPRINT(sc, BCE_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
3788}
3789
3790
3791/****************************************************************************/
3792/* Allocate memory and initialize the RX data structures.                   */
3793/*                                                                          */
3794/* Returns:                                                                 */
3795/*   0 for success, positive value for failure.                             */
3796/****************************************************************************/
3797static int
3798bce_init_rx_chain(struct bce_softc *sc)
3799{
3800	struct rx_bd *rxbd;
3801	int i, rc = 0;
3802	u16 prod, chain_prod;
3803	u32 prod_bseq, val;
3804
3805	DBPRINT(sc, BCE_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
3806
3807	/* Initialize the RX producer and consumer indices. */
3808	sc->rx_prod        = 0;
3809	sc->rx_cons        = 0;
3810	sc->rx_prod_bseq   = 0;
3811	sc->free_rx_bd     = BCE_RX_SLACK_SPACE;
3812	DBRUNIF(1, sc->rx_low_watermark = USABLE_RX_BD);
3813
3814	/* Initialize the RX next pointer chain entries. */
3815	for (i = 0; i < RX_PAGES; i++) {
3816		int j;
3817
3818		rxbd = &sc->rx_bd_chain[i][USABLE_RX_BD_PER_PAGE];
3819
3820		/* Check if we've reached the last page. */
3821		if (i == (RX_PAGES - 1))
3822			j = 0;
3823		else
3824			j = i + 1;
3825
3826		/* Setup the chain page pointers. */
3827		rxbd->rx_bd_haddr_hi = htole32(BCE_ADDR_HI(sc->rx_bd_chain_paddr[j]));
3828		rxbd->rx_bd_haddr_lo = htole32(BCE_ADDR_LO(sc->rx_bd_chain_paddr[j]));
3829	}
3830
3831	/* Initialize the context ID for an L2 RX chain. */
3832	val = BCE_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE;
3833	val |= BCE_L2CTX_CTX_TYPE_SIZE_L2;
3834	val |= 0x02 << 8;
3835	CTX_WR(sc, GET_CID_ADDR(RX_CID), BCE_L2CTX_CTX_TYPE, val);
3836
3837	/* Point the hardware to the first page in the chain. */
3838	val = BCE_ADDR_HI(sc->rx_bd_chain_paddr[0]);
3839	CTX_WR(sc, GET_CID_ADDR(RX_CID), BCE_L2CTX_NX_BDHADDR_HI, val);
3840	val = BCE_ADDR_LO(sc->rx_bd_chain_paddr[0]);
3841	CTX_WR(sc, GET_CID_ADDR(RX_CID), BCE_L2CTX_NX_BDHADDR_LO, val);
3842
3843	/* Allocate mbuf clusters for the rx_bd chain. */
3844	prod = prod_bseq = 0;
3845	while (prod < BCE_RX_SLACK_SPACE) {
3846		chain_prod = RX_CHAIN_IDX(prod);
3847		if (bce_get_buf(sc, NULL, &prod, &chain_prod, &prod_bseq)) {
3848			BCE_PRINTF(sc, "%s(%d): Error filling RX chain: rx_bd[0x%04X]!\n",
3849				__FILE__, __LINE__, chain_prod);
3850			rc = ENOBUFS;
3851			break;
3852		}
3853		prod = NEXT_RX_BD(prod);
3854	}
3855
3856	/* Save the RX chain producer index. */
3857	sc->rx_prod      = prod;
3858	sc->rx_prod_bseq = prod_bseq;
3859
3860	for (i = 0; i < RX_PAGES; i++) {
3861		bus_dmamap_sync(
3862			sc->rx_bd_chain_tag,
3863	    	sc->rx_bd_chain_map[i],
3864		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3865	}
3866
3867	/* Tell the chip about the waiting rx_bd's. */
3868	REG_WR16(sc, MB_RX_CID_ADDR + BCE_L2CTX_HOST_BDIDX, sc->rx_prod);
3869	REG_WR(sc, MB_RX_CID_ADDR + BCE_L2CTX_HOST_BSEQ, sc->rx_prod_bseq);
3870
3871	DBRUN(BCE_VERBOSE_RECV, bce_dump_rx_chain(sc, 0, TOTAL_RX_BD));
3872
3873	DBPRINT(sc, BCE_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
3874
3875	return(rc);
3876}
3877
3878
3879/****************************************************************************/
3880/* Free memory and clear the RX data structures.                            */
3881/*                                                                          */
3882/* Returns:                                                                 */
3883/*   Nothing.                                                               */
3884/****************************************************************************/
3885static void
3886bce_free_rx_chain(struct bce_softc *sc)
3887{
3888	int i;
3889
3890	DBPRINT(sc, BCE_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
3891
3892	/* Free any mbufs still in the RX mbuf chain. */
3893	for (i = 0; i < TOTAL_RX_BD; i++) {
3894		if (sc->rx_mbuf_ptr[i] != NULL) {
3895			if (sc->rx_mbuf_map[i] != NULL)
3896				bus_dmamap_sync(sc->rx_mbuf_tag, sc->rx_mbuf_map[i],
3897					BUS_DMASYNC_POSTREAD);
3898			m_freem(sc->rx_mbuf_ptr[i]);
3899			sc->rx_mbuf_ptr[i] = NULL;
3900			DBRUNIF(1, sc->rx_mbuf_alloc--);
3901		}
3902	}
3903
3904	/* Clear each RX chain page. */
3905	for (i = 0; i < RX_PAGES; i++)
3906		bzero((char *)sc->rx_bd_chain[i], BCE_RX_CHAIN_PAGE_SZ);
3907
3908	/* Check if we lost any mbufs in the process. */
3909	DBRUNIF((sc->rx_mbuf_alloc),
3910		BCE_PRINTF(sc, "%s(%d): Memory leak! Lost %d mbufs from rx chain!\n",
3911			__FILE__, __LINE__, sc->rx_mbuf_alloc));
3912
3913	DBPRINT(sc, BCE_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
3914}
3915
3916
3917/****************************************************************************/
3918/* Set media options.                                                       */
3919/*                                                                          */
3920/* Returns:                                                                 */
3921/*   0 for success, positive value for failure.                             */
3922/****************************************************************************/
3923static int
3924bce_ifmedia_upd(struct ifnet *ifp)
3925{
3926	struct bce_softc *sc;
3927	struct mii_data *mii;
3928	struct ifmedia *ifm;
3929	int rc = 0;
3930
3931	sc = ifp->if_softc;
3932	ifm = &sc->bce_ifmedia;
3933
3934	/* DRC - ToDo: Add SerDes support. */
3935
3936	mii = device_get_softc(sc->bce_miibus);
3937	sc->bce_link = 0;
3938	if (mii->mii_instance) {
3939		struct mii_softc *miisc;
3940		for (miisc = LIST_FIRST(&mii->mii_phys); miisc != NULL;
3941		    miisc = LIST_NEXT(miisc, mii_list))
3942			mii_phy_reset(miisc);
3943	}
3944	mii_mediachg(mii);
3945
3946	return(rc);
3947}
3948
3949
3950/****************************************************************************/
3951/* Reports current media status.                                            */
3952/*                                                                          */
3953/* Returns:                                                                 */
3954/*   Nothing.                                                               */
3955/****************************************************************************/
3956static void
3957bce_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
3958{
3959	struct bce_softc *sc;
3960	struct mii_data *mii;
3961
3962	sc = ifp->if_softc;
3963
3964	BCE_LOCK(sc);
3965
3966	mii = device_get_softc(sc->bce_miibus);
3967
3968	/* DRC - ToDo: Add SerDes support. */
3969
3970	mii_pollstat(mii);
3971	ifmr->ifm_active = mii->mii_media_active;
3972	ifmr->ifm_status = mii->mii_media_status;
3973
3974	BCE_UNLOCK(sc);
3975}
3976
3977
3978/****************************************************************************/
3979/* Handles PHY generated interrupt events.                                  */
3980/*                                                                          */
3981/* Returns:                                                                 */
3982/*   Nothing.                                                               */
3983/****************************************************************************/
3984static void
3985bce_phy_intr(struct bce_softc *sc)
3986{
3987	u32 new_link_state, old_link_state;
3988
3989	new_link_state = sc->status_block->status_attn_bits &
3990		STATUS_ATTN_BITS_LINK_STATE;
3991	old_link_state = sc->status_block->status_attn_bits_ack &
3992		STATUS_ATTN_BITS_LINK_STATE;
3993
3994	/* Handle any changes if the link state has changed. */
3995	if (new_link_state != old_link_state) {
3996
3997		DBRUN(BCE_VERBOSE_INTR, bce_dump_status_block(sc));
3998
3999		sc->bce_link = 0;
4000		callout_stop(&sc->bce_stat_ch);
4001		bce_tick_locked(sc);
4002
4003		/* Update the status_attn_bits_ack field in the status block. */
4004		if (new_link_state) {
4005			REG_WR(sc, BCE_PCICFG_STATUS_BIT_SET_CMD,
4006				STATUS_ATTN_BITS_LINK_STATE);
4007			DBPRINT(sc, BCE_INFO, "Link is now UP.\n");
4008		}
4009		else {
4010			REG_WR(sc, BCE_PCICFG_STATUS_BIT_CLEAR_CMD,
4011				STATUS_ATTN_BITS_LINK_STATE);
4012			DBPRINT(sc, BCE_INFO, "Link is now DOWN.\n");
4013		}
4014
4015	}
4016
4017	/* Acknowledge the link change interrupt. */
4018	REG_WR(sc, BCE_EMAC_STATUS, BCE_EMAC_STATUS_LINK_CHANGE);
4019}
4020
4021
4022/****************************************************************************/
4023/* Handles received frame interrupt events.                                 */
4024/*                                                                          */
4025/* Returns:                                                                 */
4026/*   Nothing.                                                               */
4027/****************************************************************************/
4028static void
4029bce_rx_intr(struct bce_softc *sc)
4030{
4031	struct status_block *sblk = sc->status_block;
4032	struct ifnet *ifp = sc->bce_ifp;
4033	u16 hw_cons, sw_cons, sw_chain_cons, sw_prod, sw_chain_prod;
4034	u32 sw_prod_bseq;
4035	struct l2_fhdr *l2fhdr;
4036
4037	DBRUNIF(1, sc->rx_interrupts++);
4038
4039	/* Prepare the RX chain pages to be accessed by the host CPU. */
4040	for (int i = 0; i < RX_PAGES; i++)
4041		bus_dmamap_sync(sc->rx_bd_chain_tag,
4042		    sc->rx_bd_chain_map[i], BUS_DMASYNC_POSTWRITE);
4043
4044	/* Get the hardware's view of the RX consumer index. */
4045	hw_cons = sc->hw_rx_cons = sblk->status_rx_quick_consumer_index0;
4046	if ((hw_cons & USABLE_RX_BD_PER_PAGE) == USABLE_RX_BD_PER_PAGE)
4047		hw_cons++;
4048
4049	/* Get working copies of the driver's view of the RX indices. */
4050	sw_cons = sc->rx_cons;
4051	sw_prod = sc->rx_prod;
4052	sw_prod_bseq = sc->rx_prod_bseq;
4053
4054	DBPRINT(sc, BCE_INFO_RECV, "%s(enter): sw_prod = 0x%04X, "
4055		"sw_cons = 0x%04X, sw_prod_bseq = 0x%08X\n",
4056		__FUNCTION__, sw_prod, sw_cons,
4057		sw_prod_bseq);
4058
4059	/* Prevent speculative reads from getting ahead of the status block. */
4060	bus_space_barrier(sc->bce_btag, sc->bce_bhandle, 0, 0,
4061		BUS_SPACE_BARRIER_READ);
4062
4063	DBRUNIF((sc->free_rx_bd < sc->rx_low_watermark),
4064		sc->rx_low_watermark = sc->free_rx_bd);
4065
4066	/*
4067	 * Scan through the receive chain as long
4068	 * as there is work to do.
4069	 */
4070	while (sw_cons != hw_cons) {
4071		struct mbuf *m;
4072		struct rx_bd *rxbd;
4073		unsigned int len;
4074		u32 status;
4075
4076		/* Convert the producer/consumer indices to an actual rx_bd index. */
4077		sw_chain_cons = RX_CHAIN_IDX(sw_cons);
4078		sw_chain_prod = RX_CHAIN_IDX(sw_prod);
4079
4080		/* Get the used rx_bd. */
4081		rxbd = &sc->rx_bd_chain[RX_PAGE(sw_chain_cons)][RX_IDX(sw_chain_cons)];
4082		sc->free_rx_bd++;
4083
4084		DBRUN(BCE_VERBOSE_RECV,
4085			BCE_PRINTF(sc, "%s(): ", __FUNCTION__);
4086			bce_dump_rxbd(sc, sw_chain_cons, rxbd));
4087
4088#ifdef DEVICE_POLLING
4089		if (ifp->if_capenable & IFCAP_POLLING) {
4090			if (sc->bce_rxcycles <= 0)
4091				break;
4092			sc->bce_rxcycles--;
4093		}
4094#endif
4095
4096		/* The mbuf is stored with the last rx_bd entry of a packet. */
4097		if (sc->rx_mbuf_ptr[sw_chain_cons] != NULL) {
4098
4099			/* Validate that this is the last rx_bd. */
4100			DBRUNIF((!(rxbd->rx_bd_flags & RX_BD_FLAGS_END)),
4101				BCE_PRINTF(sc, "%s(%d): Unexpected mbuf found in rx_bd[0x%04X]!\n",
4102				__FILE__, __LINE__, sw_chain_cons);
4103				bce_breakpoint(sc));
4104
4105			/* DRC - ToDo: If the received packet is small, say less */
4106			/*             than 128 bytes, allocate a new mbuf here, */
4107			/*             copy the data to that mbuf, and recycle   */
4108			/*             the mapped jumbo frame.                   */
4109
4110			/* Unmap the mbuf from DMA space. */
4111			bus_dmamap_sync(sc->rx_mbuf_tag,
4112			    sc->rx_mbuf_map[sw_chain_cons],
4113		    	BUS_DMASYNC_POSTREAD);
4114			bus_dmamap_unload(sc->rx_mbuf_tag,
4115			    sc->rx_mbuf_map[sw_chain_cons]);
4116
4117			/* Remove the mbuf from the driver's chain. */
4118			m = sc->rx_mbuf_ptr[sw_chain_cons];
4119			sc->rx_mbuf_ptr[sw_chain_cons] = NULL;
4120
4121			/*
4122			 * Frames received on the NetXteme II are prepended
4123			 * with the l2_fhdr structure which provides status
4124			 * information about the received frame (including
4125			 * VLAN tags and checksum info) and are also
4126			 * automatically adjusted to align the IP header
4127			 * (i.e. two null bytes are inserted before the
4128			 * Ethernet header).
4129			 */
4130			l2fhdr = mtod(m, struct l2_fhdr *);
4131
4132			len    = l2fhdr->l2_fhdr_pkt_len;
4133			status = l2fhdr->l2_fhdr_status;
4134
4135			DBRUNIF(DB_RANDOMTRUE(bce_debug_l2fhdr_status_check),
4136				BCE_PRINTF(sc, "Simulating l2_fhdr status error.\n");
4137				status = status | L2_FHDR_ERRORS_PHY_DECODE);
4138
4139			/* Watch for unusual sized frames. */
4140			DBRUNIF(((len < BCE_MIN_MTU) || (len > BCE_MAX_JUMBO_ETHER_MTU_VLAN)),
4141				BCE_PRINTF(sc, "%s(%d): Unusual frame size found. "
4142					"Min(%d), Actual(%d), Max(%d)\n",
4143					__FILE__, __LINE__, (int) BCE_MIN_MTU,
4144					len, (int) BCE_MAX_JUMBO_ETHER_MTU_VLAN);
4145				bce_dump_mbuf(sc, m);
4146		 		bce_breakpoint(sc));
4147
4148			len -= ETHER_CRC_LEN;
4149
4150			/* Check the received frame for errors. */
4151			if (status &  (L2_FHDR_ERRORS_BAD_CRC |
4152				L2_FHDR_ERRORS_PHY_DECODE | L2_FHDR_ERRORS_ALIGNMENT |
4153				L2_FHDR_ERRORS_TOO_SHORT  | L2_FHDR_ERRORS_GIANT_FRAME)) {
4154
4155				ifp->if_ierrors++;
4156				DBRUNIF(1, sc->l2fhdr_status_errors++);
4157
4158				/* Reuse the mbuf for a new frame. */
4159				if (bce_get_buf(sc, m, &sw_prod, &sw_chain_prod, &sw_prod_bseq)) {
4160
4161					DBRUNIF(1, bce_breakpoint(sc));
4162					panic("bce%d: Can't reuse RX mbuf!\n", sc->bce_unit);
4163
4164				}
4165				goto bce_rx_int_next_rx;
4166			}
4167
4168			/*
4169			 * Get a new mbuf for the rx_bd.   If no new
4170			 * mbufs are available then reuse the current mbuf,
4171			 * log an ierror on the interface, and generate
4172			 * an error in the system log.
4173			 */
4174			if (bce_get_buf(sc, NULL, &sw_prod, &sw_chain_prod, &sw_prod_bseq)) {
4175
4176				DBRUN(BCE_WARN,
4177					BCE_PRINTF(sc, "%s(%d): Failed to allocate "
4178					"new mbuf, incoming frame dropped!\n",
4179					__FILE__, __LINE__));
4180
4181				ifp->if_ierrors++;
4182
4183				/* Try and reuse the exisitng mbuf. */
4184				if (bce_get_buf(sc, m, &sw_prod, &sw_chain_prod, &sw_prod_bseq)) {
4185
4186					DBRUNIF(1, bce_breakpoint(sc));
4187					panic("bce%d: Double mbuf allocation failure!", sc->bce_unit);
4188
4189				}
4190				goto bce_rx_int_next_rx;
4191			}
4192
4193			/* Skip over the l2_fhdr when passing the data up the stack. */
4194			m_adj(m, sizeof(struct l2_fhdr) + ETHER_ALIGN);
4195
4196			/* Adjust the packet length to match the received data. */
4197			m->m_pkthdr.len = m->m_len = len;
4198
4199			/* Send the packet to the appropriate interface. */
4200			m->m_pkthdr.rcvif = ifp;
4201
4202			DBRUN(BCE_VERBOSE_RECV,
4203				struct ether_header *eh;
4204				eh = mtod(m, struct ether_header *);
4205				BCE_PRINTF(sc, "%s(): to: %6D, from: %6D, type: 0x%04X\n",
4206					__FUNCTION__, eh->ether_dhost, ":",
4207					eh->ether_shost, ":", htons(eh->ether_type)));
4208
4209			/* Validate the checksum if offload enabled. */
4210			if (ifp->if_capenable & IFCAP_RXCSUM) {
4211
4212				/* Check for an IP datagram. */
4213				if (status & L2_FHDR_STATUS_IP_DATAGRAM) {
4214					m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
4215
4216					/* Check if the IP checksum is valid. */
4217					if ((l2fhdr->l2_fhdr_ip_xsum ^ 0xffff) == 0)
4218						m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
4219					else
4220						DBPRINT(sc, BCE_WARN_SEND,
4221							"%s(): Invalid IP checksum = 0x%04X!\n",
4222							__FUNCTION__, l2fhdr->l2_fhdr_ip_xsum);
4223				}
4224
4225				/* Check for a valid TCP/UDP frame. */
4226				if (status & (L2_FHDR_STATUS_TCP_SEGMENT |
4227					L2_FHDR_STATUS_UDP_DATAGRAM)) {
4228
4229					/* Check for a good TCP/UDP checksum. */
4230					if ((status & (L2_FHDR_ERRORS_TCP_XSUM |
4231						      L2_FHDR_ERRORS_UDP_XSUM)) == 0) {
4232						m->m_pkthdr.csum_data =
4233						    l2fhdr->l2_fhdr_tcp_udp_xsum;
4234						m->m_pkthdr.csum_flags |= (CSUM_DATA_VALID
4235							| CSUM_PSEUDO_HDR);
4236					} else
4237						DBPRINT(sc, BCE_WARN_SEND,
4238							"%s(): Invalid TCP/UDP checksum = 0x%04X!\n",
4239							__FUNCTION__, l2fhdr->l2_fhdr_tcp_udp_xsum);
4240				}
4241			}
4242
4243
4244			/*
4245			 * If we received a packet with a vlan tag,
4246			 * attach that information to the packet.
4247			 */
4248			if (status & L2_FHDR_STATUS_L2_VLAN_TAG) {
4249				DBPRINT(sc, BCE_VERBOSE_SEND, "%s(): VLAN tag = 0x%04X\n",
4250					__FUNCTION__, l2fhdr->l2_fhdr_vlan_tag);
4251#if __FreeBSD_version < 700000
4252				VLAN_INPUT_TAG(ifp, m, l2fhdr->l2_fhdr_vlan_tag, continue);
4253#else
4254				VLAN_INPUT_TAG(ifp, m, l2fhdr->l2_fhdr_vlan_tag);
4255				if (m == NULL)
4256					continue;
4257#endif
4258			}
4259
4260			/* Pass the mbuf off to the upper layers. */
4261			ifp->if_ipackets++;
4262			DBPRINT(sc, BCE_VERBOSE_RECV, "%s(): Passing received frame up.\n",
4263				__FUNCTION__);
4264			BCE_UNLOCK(sc);
4265			(*ifp->if_input)(ifp, m);
4266			DBRUNIF(1, sc->rx_mbuf_alloc--);
4267			BCE_LOCK(sc);
4268
4269bce_rx_int_next_rx:
4270			sw_prod = NEXT_RX_BD(sw_prod);
4271		}
4272
4273		sw_cons = NEXT_RX_BD(sw_cons);
4274
4275		/* Refresh hw_cons to see if there's new work */
4276		if (sw_cons == hw_cons) {
4277			hw_cons = sc->hw_rx_cons = sblk->status_rx_quick_consumer_index0;
4278			if ((hw_cons & USABLE_RX_BD_PER_PAGE) == USABLE_RX_BD_PER_PAGE)
4279				hw_cons++;
4280		}
4281
4282		/* Prevent speculative reads from getting ahead of the status block. */
4283		bus_space_barrier(sc->bce_btag, sc->bce_bhandle, 0, 0,
4284			BUS_SPACE_BARRIER_READ);
4285	}
4286
4287	for (int i = 0; i < RX_PAGES; i++)
4288		bus_dmamap_sync(sc->rx_bd_chain_tag,
4289		    sc->rx_bd_chain_map[i], BUS_DMASYNC_PREWRITE);
4290
4291	sc->rx_cons = sw_cons;
4292	sc->rx_prod = sw_prod;
4293	sc->rx_prod_bseq = sw_prod_bseq;
4294
4295	REG_WR16(sc, MB_RX_CID_ADDR + BCE_L2CTX_HOST_BDIDX, sc->rx_prod);
4296	REG_WR(sc, MB_RX_CID_ADDR + BCE_L2CTX_HOST_BSEQ, sc->rx_prod_bseq);
4297
4298	DBPRINT(sc, BCE_INFO_RECV, "%s(exit): rx_prod = 0x%04X, "
4299		"rx_cons = 0x%04X, rx_prod_bseq = 0x%08X\n",
4300		__FUNCTION__, sc->rx_prod, sc->rx_cons, sc->rx_prod_bseq);
4301}
4302
4303
4304/****************************************************************************/
4305/* Handles transmit completion interrupt events.                            */
4306/*                                                                          */
4307/* Returns:                                                                 */
4308/*   Nothing.                                                               */
4309/****************************************************************************/
4310static void
4311bce_tx_intr(struct bce_softc *sc)
4312{
4313	struct status_block *sblk = sc->status_block;
4314	struct ifnet *ifp = sc->bce_ifp;
4315	u16 hw_tx_cons, sw_tx_cons, sw_tx_chain_cons;
4316
4317	BCE_LOCK_ASSERT(sc);
4318
4319	DBRUNIF(1, sc->tx_interrupts++);
4320
4321	/* Get the hardware's view of the TX consumer index. */
4322	hw_tx_cons = sc->hw_tx_cons = sblk->status_tx_quick_consumer_index0;
4323
4324	/* Skip to the next entry if this is a chain page pointer. */
4325	if ((hw_tx_cons & USABLE_TX_BD_PER_PAGE) == USABLE_TX_BD_PER_PAGE)
4326		hw_tx_cons++;
4327
4328	sw_tx_cons = sc->tx_cons;
4329
4330	/* Prevent speculative reads from getting ahead of the status block. */
4331	bus_space_barrier(sc->bce_btag, sc->bce_bhandle, 0, 0,
4332		BUS_SPACE_BARRIER_READ);
4333
4334	/* Cycle through any completed TX chain page entries. */
4335	while (sw_tx_cons != hw_tx_cons) {
4336#ifdef BCE_DEBUG
4337		struct tx_bd *txbd = NULL;
4338#endif
4339		sw_tx_chain_cons = TX_CHAIN_IDX(sw_tx_cons);
4340
4341		DBPRINT(sc, BCE_INFO_SEND,
4342			"%s(): hw_tx_cons = 0x%04X, sw_tx_cons = 0x%04X, "
4343			"sw_tx_chain_cons = 0x%04X\n",
4344			__FUNCTION__, hw_tx_cons, sw_tx_cons, sw_tx_chain_cons);
4345
4346		DBRUNIF((sw_tx_chain_cons > MAX_TX_BD),
4347			BCE_PRINTF(sc, "%s(%d): TX chain consumer out of range! "
4348				" 0x%04X > 0x%04X\n",
4349				__FILE__, __LINE__, sw_tx_chain_cons,
4350				(int) MAX_TX_BD);
4351			bce_breakpoint(sc));
4352
4353		DBRUNIF(1,
4354			txbd = &sc->tx_bd_chain[TX_PAGE(sw_tx_chain_cons)]
4355				[TX_IDX(sw_tx_chain_cons)]);
4356
4357		DBRUNIF((txbd == NULL),
4358			BCE_PRINTF(sc, "%s(%d): Unexpected NULL tx_bd[0x%04X]!\n",
4359				__FILE__, __LINE__, sw_tx_chain_cons);
4360			bce_breakpoint(sc));
4361
4362		DBRUN(BCE_INFO_SEND,
4363			BCE_PRINTF(sc, "%s(): ", __FUNCTION__);
4364			bce_dump_txbd(sc, sw_tx_chain_cons, txbd));
4365
4366		/*
4367		 * Free the associated mbuf. Remember
4368		 * that only the last tx_bd of a packet
4369		 * has an mbuf pointer and DMA map.
4370		 */
4371		if (sc->tx_mbuf_ptr[sw_tx_chain_cons] != NULL) {
4372
4373			/* Validate that this is the last tx_bd. */
4374			DBRUNIF((!(txbd->tx_bd_vlan_tag_flags & TX_BD_FLAGS_END)),
4375				BCE_PRINTF(sc, "%s(%d): tx_bd END flag not set but "
4376				"txmbuf == NULL!\n", __FILE__, __LINE__);
4377				bce_breakpoint(sc));
4378
4379			DBRUN(BCE_INFO_SEND,
4380				BCE_PRINTF(sc, "%s(): Unloading map/freeing mbuf "
4381					"from tx_bd[0x%04X]\n", __FUNCTION__, sw_tx_chain_cons));
4382
4383			/* Unmap the mbuf. */
4384			bus_dmamap_unload(sc->tx_mbuf_tag,
4385			    sc->tx_mbuf_map[sw_tx_chain_cons]);
4386
4387			/* Free the mbuf. */
4388			m_freem(sc->tx_mbuf_ptr[sw_tx_chain_cons]);
4389			sc->tx_mbuf_ptr[sw_tx_chain_cons] = NULL;
4390			DBRUNIF(1, sc->tx_mbuf_alloc--);
4391
4392			ifp->if_opackets++;
4393		}
4394
4395		sc->used_tx_bd--;
4396		sw_tx_cons = NEXT_TX_BD(sw_tx_cons);
4397
4398		/* Refresh hw_cons to see if there's new work. */
4399		hw_tx_cons = sc->hw_tx_cons = sblk->status_tx_quick_consumer_index0;
4400		if ((hw_tx_cons & USABLE_TX_BD_PER_PAGE) == USABLE_TX_BD_PER_PAGE)
4401			hw_tx_cons++;
4402
4403		/* Prevent speculative reads from getting ahead of the status block. */
4404		bus_space_barrier(sc->bce_btag, sc->bce_bhandle, 0, 0,
4405			BUS_SPACE_BARRIER_READ);
4406	}
4407
4408	/* Clear the TX timeout timer. */
4409	ifp->if_timer = 0;
4410
4411	/* Clear the tx hardware queue full flag. */
4412	if ((sc->used_tx_bd + BCE_TX_SLACK_SPACE) < USABLE_TX_BD) {
4413		DBRUNIF((ifp->if_drv_flags & IFF_DRV_OACTIVE),
4414			BCE_PRINTF(sc, "%s(): TX chain is open for business! Used tx_bd = %d\n",
4415				__FUNCTION__, sc->used_tx_bd));
4416		ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
4417	}
4418
4419	sc->tx_cons = sw_tx_cons;
4420}
4421
4422
4423/****************************************************************************/
4424/* Disables interrupt generation.                                           */
4425/*                                                                          */
4426/* Returns:                                                                 */
4427/*   Nothing.                                                               */
4428/****************************************************************************/
4429static void
4430bce_disable_intr(struct bce_softc *sc)
4431{
4432	REG_WR(sc, BCE_PCICFG_INT_ACK_CMD,
4433	       BCE_PCICFG_INT_ACK_CMD_MASK_INT);
4434	REG_RD(sc, BCE_PCICFG_INT_ACK_CMD);
4435}
4436
4437
4438/****************************************************************************/
4439/* Enables interrupt generation.                                            */
4440/*                                                                          */
4441/* Returns:                                                                 */
4442/*   Nothing.                                                               */
4443/****************************************************************************/
4444static void
4445bce_enable_intr(struct bce_softc *sc)
4446{
4447	u32 val;
4448
4449	REG_WR(sc, BCE_PCICFG_INT_ACK_CMD,
4450	       BCE_PCICFG_INT_ACK_CMD_INDEX_VALID |
4451	       BCE_PCICFG_INT_ACK_CMD_MASK_INT | sc->last_status_idx);
4452
4453	REG_WR(sc, BCE_PCICFG_INT_ACK_CMD,
4454	       BCE_PCICFG_INT_ACK_CMD_INDEX_VALID | sc->last_status_idx);
4455
4456	val = REG_RD(sc, BCE_HC_COMMAND);
4457	REG_WR(sc, BCE_HC_COMMAND, val | BCE_HC_COMMAND_COAL_NOW);
4458}
4459
4460
4461/****************************************************************************/
4462/* Handles controller initialization.                                       */
4463/*                                                                          */
4464/* Must be called from a locked routine.                                    */
4465/*                                                                          */
4466/* Returns:                                                                 */
4467/*   Nothing.                                                               */
4468/****************************************************************************/
4469static void
4470bce_init_locked(struct bce_softc *sc)
4471{
4472	struct ifnet *ifp;
4473	u32 ether_mtu;
4474
4475	DBPRINT(sc, BCE_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
4476
4477	BCE_LOCK_ASSERT(sc);
4478
4479	ifp = sc->bce_ifp;
4480
4481	/* Check if the driver is still running and bail out if it is. */
4482	if (ifp->if_drv_flags & IFF_DRV_RUNNING)
4483		goto bce_init_locked_exit;
4484
4485	bce_stop(sc);
4486
4487	if (bce_reset(sc, BCE_DRV_MSG_CODE_RESET)) {
4488		BCE_PRINTF(sc, "%s(%d): Controller reset failed!\n",
4489			__FILE__, __LINE__);
4490		goto bce_init_locked_exit;
4491	}
4492
4493	if (bce_chipinit(sc)) {
4494		BCE_PRINTF(sc, "%s(%d): Controller initialization failed!\n",
4495			__FILE__, __LINE__);
4496		goto bce_init_locked_exit;
4497	}
4498
4499	if (bce_blockinit(sc)) {
4500		BCE_PRINTF(sc, "%s(%d): Block initialization failed!\n",
4501			__FILE__, __LINE__);
4502		goto bce_init_locked_exit;
4503	}
4504
4505	/* Load our MAC address. */
4506	bcopy(IF_LLADDR(sc->bce_ifp), sc->eaddr, ETHER_ADDR_LEN);
4507	bce_set_mac_addr(sc);
4508
4509	/* Calculate and program the Ethernet MTU size. */
4510	ether_mtu = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN + ifp->if_mtu +
4511		ETHER_CRC_LEN;
4512
4513	DBPRINT(sc, BCE_INFO, "%s(): setting mtu = %d\n",__FUNCTION__, ether_mtu);
4514
4515	/*
4516	 * Program the mtu, enabling jumbo frame
4517	 * support if necessary.  Also set the mbuf
4518	 * allocation count for RX frames.
4519	 */
4520	if (ether_mtu > ETHER_MAX_LEN + ETHER_VLAN_ENCAP_LEN) {
4521		REG_WR(sc, BCE_EMAC_RX_MTU_SIZE, ether_mtu |
4522			BCE_EMAC_RX_MTU_SIZE_JUMBO_ENA);
4523		sc->mbuf_alloc_size = MJUM9BYTES;
4524	} else {
4525		REG_WR(sc, BCE_EMAC_RX_MTU_SIZE, ether_mtu);
4526		sc->mbuf_alloc_size = MCLBYTES;
4527	}
4528
4529	/* Calculate the RX Ethernet frame size for rx_bd's. */
4530	sc->max_frame_size = sizeof(struct l2_fhdr) + 2 + ether_mtu + 8;
4531
4532	DBPRINT(sc, BCE_INFO,
4533		"%s(): mclbytes = %d, mbuf_alloc_size = %d, "
4534		"max_frame_size = %d\n",
4535		__FUNCTION__, (int) MCLBYTES, sc->mbuf_alloc_size, sc->max_frame_size);
4536
4537	/* Program appropriate promiscuous/multicast filtering. */
4538	bce_set_rx_mode(sc);
4539
4540	/* Init RX buffer descriptor chain. */
4541	bce_init_rx_chain(sc);
4542
4543	/* Init TX buffer descriptor chain. */
4544	bce_init_tx_chain(sc);
4545
4546#ifdef DEVICE_POLLING
4547	/* Disable interrupts if we are polling. */
4548	if (ifp->if_capenable & IFCAP_POLLING) {
4549		bce_disable_intr(sc);
4550
4551		REG_WR(sc, BCE_HC_RX_QUICK_CONS_TRIP,
4552			(1 << 16) | sc->bce_rx_quick_cons_trip);
4553		REG_WR(sc, BCE_HC_TX_QUICK_CONS_TRIP,
4554			(1 << 16) | sc->bce_tx_quick_cons_trip);
4555	} else
4556#endif
4557	/* Enable host interrupts. */
4558	bce_enable_intr(sc);
4559
4560	bce_ifmedia_upd(ifp);
4561
4562	ifp->if_drv_flags |= IFF_DRV_RUNNING;
4563	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
4564
4565	callout_reset(&sc->bce_stat_ch, hz, bce_tick, sc);
4566
4567bce_init_locked_exit:
4568	DBPRINT(sc, BCE_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
4569
4570	return;
4571}
4572
4573
4574/****************************************************************************/
4575/* Handles controller initialization when called from an unlocked routine.  */
4576/*                                                                          */
4577/* Returns:                                                                 */
4578/*   Nothing.                                                               */
4579/****************************************************************************/
4580static void
4581bce_init(void *xsc)
4582{
4583	struct bce_softc *sc = xsc;
4584
4585	BCE_LOCK(sc);
4586	bce_init_locked(sc);
4587	BCE_UNLOCK(sc);
4588}
4589
4590
4591/****************************************************************************/
4592/* Encapsultes an mbuf cluster into the tx_bd chain structure and makes the */
4593/* memory visible to the controller.                                        */
4594/*                                                                          */
4595/* Returns:                                                                 */
4596/*   0 for success, positive value for failure.                             */
4597/****************************************************************************/
4598static int
4599bce_tx_encap(struct bce_softc *sc, struct mbuf *m_head, u16 *prod,
4600	u16 *chain_prod, u32 *prod_bseq)
4601{
4602	u32 vlan_tag_flags = 0;
4603	struct m_tag *mtag;
4604	struct bce_dmamap_arg map_arg;
4605	bus_dmamap_t map;
4606	int i, error, rc = 0;
4607
4608	/* Transfer any checksum offload flags to the bd. */
4609	if (m_head->m_pkthdr.csum_flags) {
4610		if (m_head->m_pkthdr.csum_flags & CSUM_IP)
4611			vlan_tag_flags |= TX_BD_FLAGS_IP_CKSUM;
4612		if (m_head->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP))
4613			vlan_tag_flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
4614	}
4615
4616	/* Transfer any VLAN tags to the bd. */
4617	mtag = VLAN_OUTPUT_TAG(sc->bce_ifp, m_head);
4618	if (mtag != NULL)
4619		vlan_tag_flags |= (TX_BD_FLAGS_VLAN_TAG |
4620			(VLAN_TAG_VALUE(mtag) << 16));
4621
4622	/* Map the mbuf into DMAable memory. */
4623	map = sc->tx_mbuf_map[*chain_prod];
4624	map_arg.sc         = sc;
4625	map_arg.prod       = *prod;
4626	map_arg.chain_prod = *chain_prod;
4627	map_arg.prod_bseq  = *prod_bseq;
4628	map_arg.tx_flags   = vlan_tag_flags;
4629	map_arg.maxsegs    = USABLE_TX_BD - sc->used_tx_bd -
4630		BCE_TX_SLACK_SPACE;
4631
4632	KASSERT(map_arg.maxsegs > 0, ("Invalid TX maxsegs value!"));
4633
4634	for (i = 0; i < TX_PAGES; i++)
4635		map_arg.tx_chain[i] = sc->tx_bd_chain[i];
4636
4637	/* Map the mbuf into our DMA address space. */
4638	error = bus_dmamap_load_mbuf(sc->tx_mbuf_tag, map, m_head,
4639	    bce_dma_map_tx_desc, &map_arg, BUS_DMA_NOWAIT);
4640
4641	if (error || map_arg.maxsegs == 0) {
4642
4643            /* Try to defrag the mbuf if there are too many segments. */
4644            if (error == EFBIG && map_arg.maxsegs != 0) {
4645                struct mbuf *m0;
4646
4647	        DBPRINT(sc, BCE_WARN, "%s(): fragmented mbuf (%d pieces)\n",
4648                    __FUNCTION__, map_arg.maxsegs);
4649
4650                m0 = m_defrag(m_head, M_DONTWAIT);
4651                if (m0 != NULL) {
4652                    m_head = m0;
4653                    error = bus_dmamap_load_mbuf(sc->tx_mbuf_tag,
4654                        map, m_head, bce_dma_map_tx_desc, &map_arg,
4655                        BUS_DMA_NOWAIT);
4656                }
4657            }
4658
4659            /* Still getting an error after a defrag. */
4660            if (error) {
4661                BCE_PRINTF(sc,
4662                    "%s(%d): Error mapping mbuf into TX chain!\n",
4663                    __FILE__, __LINE__);
4664                rc = ENOBUFS;
4665                goto bce_tx_encap_exit;
4666            }
4667
4668	}
4669
4670	/*
4671	 * Ensure that the map for this transmission
4672	 * is placed at the array index of the last
4673	 * descriptor in this chain.  This is done
4674	 * because a single map is used for all
4675	 * segments of the mbuf and we don't want to
4676	 * delete the map before all of the segments
4677	 * have been freed.
4678	 */
4679	sc->tx_mbuf_map[*chain_prod] =
4680		sc->tx_mbuf_map[map_arg.chain_prod];
4681	sc->tx_mbuf_map[map_arg.chain_prod] = map;
4682	sc->tx_mbuf_ptr[map_arg.chain_prod] = m_head;
4683	sc->used_tx_bd += map_arg.maxsegs;
4684
4685	DBRUNIF((sc->used_tx_bd > sc->tx_hi_watermark),
4686		sc->tx_hi_watermark = sc->used_tx_bd);
4687
4688	DBRUNIF(1, sc->tx_mbuf_alloc++);
4689
4690	DBRUN(BCE_VERBOSE_SEND, bce_dump_tx_mbuf_chain(sc, *chain_prod,
4691		map_arg.maxsegs));
4692
4693	/* prod still points the last used tx_bd at this point. */
4694	*prod       = map_arg.prod;
4695	*chain_prod = map_arg.chain_prod;
4696	*prod_bseq  = map_arg.prod_bseq;
4697
4698bce_tx_encap_exit:
4699
4700	return(rc);
4701}
4702
4703
4704/****************************************************************************/
4705/* Main transmit routine when called from another routine with a lock.      */
4706/*                                                                          */
4707/* Returns:                                                                 */
4708/*   Nothing.                                                               */
4709/****************************************************************************/
4710static void
4711bce_start_locked(struct ifnet *ifp)
4712{
4713	struct bce_softc *sc = ifp->if_softc;
4714	struct mbuf *m_head = NULL;
4715	int count = 0;
4716	u16 tx_prod, tx_chain_prod;
4717	u32	tx_prod_bseq;
4718
4719	/* If there's no link or the transmit queue is empty then just exit. */
4720	if (!sc->bce_link || IFQ_DRV_IS_EMPTY(&ifp->if_snd)) {
4721		DBPRINT(sc, BCE_INFO_SEND, "%s(): No link or transmit queue empty.\n",
4722			__FUNCTION__);
4723		goto bce_start_locked_exit;
4724	}
4725
4726	/* prod points to the next free tx_bd. */
4727	tx_prod = sc->tx_prod;
4728	tx_chain_prod = TX_CHAIN_IDX(tx_prod);
4729	tx_prod_bseq = sc->tx_prod_bseq;
4730
4731	DBPRINT(sc, BCE_INFO_SEND,
4732		"%s(): Start: tx_prod = 0x%04X, tx_chain_prod = %04X, "
4733		"tx_prod_bseq = 0x%08X\n",
4734		__FUNCTION__, tx_prod, tx_chain_prod, tx_prod_bseq);
4735
4736	/* Keep adding entries while there is space in the ring. */
4737	while(sc->tx_mbuf_ptr[tx_chain_prod] == NULL) {
4738
4739		/* Check for any frames to send. */
4740		IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
4741		if (m_head == NULL)
4742			break;
4743
4744		/*
4745		 * Pack the data into the transmit ring. If we
4746		 * don't have room, place the mbuf back at the
4747		 * head of the queue and set the OACTIVE flag
4748		 * to wait for the NIC to drain the chain.
4749		 */
4750		if (bce_tx_encap(sc, m_head, &tx_prod, &tx_chain_prod, &tx_prod_bseq)) {
4751			IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
4752			ifp->if_drv_flags |= IFF_DRV_OACTIVE;
4753			DBPRINT(sc, BCE_INFO_SEND,
4754				"TX chain is closed for business! Total tx_bd used = %d\n",
4755				sc->used_tx_bd);
4756			break;
4757		}
4758
4759		count++;
4760
4761		/* Send a copy of the frame to any BPF listeners. */
4762		BPF_MTAP(ifp, m_head);
4763
4764		tx_prod = NEXT_TX_BD(tx_prod);
4765		tx_chain_prod = TX_CHAIN_IDX(tx_prod);
4766	}
4767
4768	if (count == 0) {
4769		/* no packets were dequeued */
4770		DBPRINT(sc, BCE_VERBOSE_SEND, "%s(): No packets were dequeued\n",
4771			__FUNCTION__);
4772		goto bce_start_locked_exit;
4773	}
4774
4775	/* Update the driver's counters. */
4776	sc->tx_prod      = tx_prod;
4777	sc->tx_prod_bseq = tx_prod_bseq;
4778
4779	DBPRINT(sc, BCE_INFO_SEND,
4780		"%s(): End: tx_prod = 0x%04X, tx_chain_prod = 0x%04X, "
4781		"tx_prod_bseq = 0x%08X\n",
4782		__FUNCTION__, tx_prod, tx_chain_prod, tx_prod_bseq);
4783
4784	/* Start the transmit. */
4785	REG_WR16(sc, MB_TX_CID_ADDR + BCE_L2CTX_TX_HOST_BIDX, sc->tx_prod);
4786	REG_WR(sc, MB_TX_CID_ADDR + BCE_L2CTX_TX_HOST_BSEQ, sc->tx_prod_bseq);
4787
4788	/* Set the tx timeout. */
4789	ifp->if_timer = BCE_TX_TIMEOUT;
4790
4791bce_start_locked_exit:
4792	return;
4793}
4794
4795
4796/****************************************************************************/
4797/* Main transmit routine when called from another routine without a lock.   */
4798/*                                                                          */
4799/* Returns:                                                                 */
4800/*   Nothing.                                                               */
4801/****************************************************************************/
4802static void
4803bce_start(struct ifnet *ifp)
4804{
4805	struct bce_softc *sc = ifp->if_softc;
4806
4807	BCE_LOCK(sc);
4808	bce_start_locked(ifp);
4809	BCE_UNLOCK(sc);
4810}
4811
4812
4813/****************************************************************************/
4814/* Handles any IOCTL calls from the operating system.                       */
4815/*                                                                          */
4816/* Returns:                                                                 */
4817/*   0 for success, positive value for failure.                             */
4818/****************************************************************************/
4819static int
4820bce_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
4821{
4822	struct bce_softc *sc = ifp->if_softc;
4823	struct ifreq *ifr = (struct ifreq *) data;
4824	struct mii_data *mii;
4825	int mask, error = 0;
4826
4827	DBPRINT(sc, BCE_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
4828
4829	switch(command) {
4830
4831		/* Set the MTU. */
4832		case SIOCSIFMTU:
4833			/* Check that the MTU setting is supported. */
4834			if ((ifr->ifr_mtu < BCE_MIN_MTU) ||
4835				(ifr->ifr_mtu > BCE_MAX_JUMBO_MTU)) {
4836				error = EINVAL;
4837				break;
4838			}
4839
4840			DBPRINT(sc, BCE_INFO, "Setting new MTU of %d\n", ifr->ifr_mtu);
4841
4842			BCE_LOCK(sc);
4843			ifp->if_mtu = ifr->ifr_mtu;
4844			ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
4845			bce_init_locked(sc);
4846			BCE_UNLOCK(sc);
4847			break;
4848
4849		/* Set interface. */
4850		case SIOCSIFFLAGS:
4851			DBPRINT(sc, BCE_VERBOSE, "Received SIOCSIFFLAGS\n");
4852
4853			BCE_LOCK(sc);
4854
4855			/* Check if the interface is up. */
4856			if (ifp->if_flags & IFF_UP) {
4857				if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
4858					/* Change the promiscuous/multicast flags as necessary. */
4859					bce_set_rx_mode(sc);
4860				} else {
4861					/* Start the HW */
4862					bce_init_locked(sc);
4863				}
4864			} else {
4865				/* The interface is down.  Check if the driver is running. */
4866				if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
4867					bce_stop(sc);
4868				}
4869			}
4870
4871			BCE_UNLOCK(sc);
4872			error = 0;
4873
4874			break;
4875
4876		/* Add/Delete multicast address */
4877		case SIOCADDMULTI:
4878		case SIOCDELMULTI:
4879			DBPRINT(sc, BCE_VERBOSE, "Received SIOCADDMULTI/SIOCDELMULTI\n");
4880
4881			BCE_LOCK(sc);
4882			if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
4883				bce_set_rx_mode(sc);
4884				error = 0;
4885			}
4886			BCE_UNLOCK(sc);
4887
4888			break;
4889
4890		/* Set/Get Interface media */
4891		case SIOCSIFMEDIA:
4892		case SIOCGIFMEDIA:
4893			DBPRINT(sc, BCE_VERBOSE, "Received SIOCSIFMEDIA/SIOCGIFMEDIA\n");
4894
4895			DBPRINT(sc, BCE_VERBOSE, "bce_phy_flags = 0x%08X\n",
4896				sc->bce_phy_flags);
4897
4898			if (sc->bce_phy_flags & BCE_PHY_SERDES_FLAG) {
4899				DBPRINT(sc, BCE_VERBOSE, "SerDes media set/get\n");
4900
4901				error = ifmedia_ioctl(ifp, ifr,
4902				    &sc->bce_ifmedia, command);
4903			} else {
4904				DBPRINT(sc, BCE_VERBOSE, "Copper media set/get\n");
4905				mii = device_get_softc(sc->bce_miibus);
4906				error = ifmedia_ioctl(ifp, ifr,
4907				    &mii->mii_media, command);
4908			}
4909			break;
4910
4911		/* Set interface capability */
4912		case SIOCSIFCAP:
4913			mask = ifr->ifr_reqcap ^ ifp->if_capenable;
4914			DBPRINT(sc, BCE_INFO, "Received SIOCSIFCAP = 0x%08X\n", (u32) mask);
4915
4916#ifdef DEVICE_POLLING
4917			if (mask & IFCAP_POLLING) {
4918				if (ifr->ifr_reqcap & IFCAP_POLLING) {
4919
4920					/* Setup the poll routine to call. */
4921					error = ether_poll_register(bce_poll, ifp);
4922					if (error) {
4923						BCE_PRINTF(sc, "%s(%d): Error registering poll function!\n",
4924							__FILE__, __LINE__);
4925						goto bce_ioctl_exit;
4926					}
4927
4928					/* Clear the interrupt. */
4929					BCE_LOCK(sc);
4930					bce_disable_intr(sc);
4931
4932					REG_WR(sc, BCE_HC_RX_QUICK_CONS_TRIP,
4933						(1 << 16) | sc->bce_rx_quick_cons_trip);
4934					REG_WR(sc, BCE_HC_TX_QUICK_CONS_TRIP,
4935						(1 << 16) | sc->bce_tx_quick_cons_trip);
4936
4937					ifp->if_capenable |= IFCAP_POLLING;
4938					BCE_UNLOCK(sc);
4939				} else {
4940					/* Clear the poll routine. */
4941					error = ether_poll_deregister(ifp);
4942
4943					/* Enable interrupt even in error case */
4944					BCE_LOCK(sc);
4945					bce_enable_intr(sc);
4946
4947					REG_WR(sc, BCE_HC_TX_QUICK_CONS_TRIP,
4948						(sc->bce_tx_quick_cons_trip_int << 16) |
4949						sc->bce_tx_quick_cons_trip);
4950					REG_WR(sc, BCE_HC_RX_QUICK_CONS_TRIP,
4951						(sc->bce_rx_quick_cons_trip_int << 16) |
4952						sc->bce_rx_quick_cons_trip);
4953
4954					ifp->if_capenable &= ~IFCAP_POLLING;
4955					BCE_UNLOCK(sc);
4956				}
4957			}
4958#endif /*DEVICE_POLLING */
4959
4960			/* Toggle the TX checksum capabilites enable flag. */
4961			if (mask & IFCAP_TXCSUM) {
4962				ifp->if_capenable ^= IFCAP_TXCSUM;
4963				if (IFCAP_TXCSUM & ifp->if_capenable)
4964					ifp->if_hwassist = BCE_IF_HWASSIST;
4965				else
4966					ifp->if_hwassist = 0;
4967			}
4968
4969			/* Toggle the RX checksum capabilities enable flag. */
4970			if (mask & IFCAP_RXCSUM) {
4971				ifp->if_capenable ^= IFCAP_RXCSUM;
4972				if (IFCAP_RXCSUM & ifp->if_capenable)
4973					ifp->if_hwassist = BCE_IF_HWASSIST;
4974				else
4975					ifp->if_hwassist = 0;
4976			}
4977
4978			/* Toggle VLAN_MTU capabilities enable flag. */
4979			if (mask & IFCAP_VLAN_MTU) {
4980				BCE_PRINTF(sc, "%s(%d): Changing VLAN_MTU not supported.\n",
4981					__FILE__, __LINE__);
4982			}
4983
4984			/* Toggle VLANHWTAG capabilities enabled flag. */
4985			if (mask & IFCAP_VLAN_HWTAGGING) {
4986				if (sc->bce_flags & BCE_MFW_ENABLE_FLAG)
4987					BCE_PRINTF(sc, "%s(%d): Cannot change VLAN_HWTAGGING while "
4988						"management firmware (ASF/IPMI/UMP) is running!\n",
4989						__FILE__, __LINE__);
4990				else
4991					BCE_PRINTF(sc, "%s(%d): Changing VLAN_HWTAGGING not supported!\n",
4992						__FILE__, __LINE__);
4993			}
4994
4995			break;
4996		default:
4997			DBPRINT(sc, BCE_INFO, "Received unsupported IOCTL: 0x%08X\n",
4998				(u32) command);
4999
5000			/* We don't know how to handle the IOCTL, pass it on. */
5001			error = ether_ioctl(ifp, command, data);
5002			break;
5003	}
5004
5005#ifdef DEVICE_POLLING
5006bce_ioctl_exit:
5007#endif
5008
5009	DBPRINT(sc, BCE_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
5010
5011	return(error);
5012}
5013
5014
5015/****************************************************************************/
5016/* Transmit timeout handler.                                                */
5017/*                                                                          */
5018/* Returns:                                                                 */
5019/*   Nothing.                                                               */
5020/****************************************************************************/
5021static void
5022bce_watchdog(struct ifnet *ifp)
5023{
5024	struct bce_softc *sc = ifp->if_softc;
5025
5026	DBRUN(BCE_WARN_SEND,
5027		bce_dump_driver_state(sc);
5028		bce_dump_status_block(sc));
5029
5030	BCE_PRINTF(sc, "%s(%d): Watchdog timeout occurred, resetting!\n",
5031		__FILE__, __LINE__);
5032
5033	/* DBRUN(BCE_FATAL, bce_breakpoint(sc)); */
5034
5035	BCE_LOCK(sc);
5036	ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
5037
5038	bce_init_locked(sc);
5039	ifp->if_oerrors++;
5040	BCE_UNLOCK(sc);
5041
5042}
5043
5044
5045#ifdef DEVICE_POLLING
5046static void
5047bce_poll_locked(struct ifnet *ifp, enum poll_cmd cmd, int count)
5048{
5049	struct bce_softc *sc = ifp->if_softc;
5050
5051	BCE_LOCK_ASSERT(sc);
5052
5053	sc->bce_rxcycles = count;
5054
5055	bus_dmamap_sync(sc->status_tag, sc->status_map,
5056	    BUS_DMASYNC_POSTWRITE);
5057
5058	/* Check for any completed RX frames. */
5059	if (sc->status_block->status_rx_quick_consumer_index0 !=
5060		sc->hw_rx_cons)
5061		bce_rx_intr(sc);
5062
5063	/* Check for any completed TX frames. */
5064	if (sc->status_block->status_tx_quick_consumer_index0 !=
5065		sc->hw_tx_cons)
5066		bce_tx_intr(sc);
5067
5068	/* Check for new frames to transmit. */
5069	if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
5070		bce_start_locked(ifp);
5071
5072}
5073
5074
5075static void
5076bce_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
5077{
5078	struct bce_softc *sc = ifp->if_softc;
5079
5080	BCE_LOCK(sc);
5081	if (ifp->if_drv_flags & IFF_DRV_RUNNING)
5082		bce_poll_locked(ifp, cmd, count);
5083	BCE_UNLOCK(sc);
5084}
5085#endif /* DEVICE_POLLING */
5086
5087
5088#if 0
5089static inline int
5090bce_has_work(struct bce_softc *sc)
5091{
5092	struct status_block *stat = sc->status_block;
5093
5094	if ((stat->status_rx_quick_consumer_index0 != sc->hw_rx_cons) ||
5095	    (stat->status_tx_quick_consumer_index0 != sc->hw_tx_cons))
5096		return 1;
5097
5098	if (((stat->status_attn_bits & STATUS_ATTN_BITS_LINK_STATE) != 0) !=
5099	    bp->link_up)
5100		return 1;
5101
5102	return 0;
5103}
5104#endif
5105
5106
5107/*
5108 * Interrupt handler.
5109 */
5110/****************************************************************************/
5111/* Main interrupt entry point.  Verifies that the controller generated the  */
5112/* interrupt and then calls a separate routine for handle the various       */
5113/* interrupt causes (PHY, TX, RX).                                          */
5114/*                                                                          */
5115/* Returns:                                                                 */
5116/*   0 for success, positive value for failure.                             */
5117/****************************************************************************/
5118static void
5119bce_intr(void *xsc)
5120{
5121	struct bce_softc *sc;
5122	struct ifnet *ifp;
5123	u32 status_attn_bits;
5124
5125	sc = xsc;
5126	ifp = sc->bce_ifp;
5127
5128	BCE_LOCK(sc);
5129
5130	DBRUNIF(1, sc->interrupts_generated++);
5131
5132#ifdef DEVICE_POLLING
5133	if (ifp->if_capenable & IFCAP_POLLING) {
5134		DBPRINT(sc, BCE_INFO, "Polling enabled!\n");
5135		goto bce_intr_exit;
5136	}
5137#endif
5138
5139	bus_dmamap_sync(sc->status_tag, sc->status_map,
5140	    BUS_DMASYNC_POSTWRITE);
5141
5142	/*
5143	 * If the hardware status block index
5144	 * matches the last value read by the
5145	 * driver and we haven't asserted our
5146	 * interrupt then there's nothing to do.
5147	 */
5148	if ((sc->status_block->status_idx == sc->last_status_idx) &&
5149		(REG_RD(sc, BCE_PCICFG_MISC_STATUS) & BCE_PCICFG_MISC_STATUS_INTA_VALUE))
5150		goto bce_intr_exit;
5151
5152	/* Ack the interrupt and stop others from occuring. */
5153	REG_WR(sc, BCE_PCICFG_INT_ACK_CMD,
5154		BCE_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
5155		BCE_PCICFG_INT_ACK_CMD_MASK_INT);
5156
5157	/* Keep processing data as long as there is work to do. */
5158	for (;;) {
5159
5160		status_attn_bits = sc->status_block->status_attn_bits;
5161
5162		DBRUNIF(DB_RANDOMTRUE(bce_debug_unexpected_attention),
5163			BCE_PRINTF(sc, "Simulating unexpected status attention bit set.");
5164			status_attn_bits = status_attn_bits | STATUS_ATTN_BITS_PARITY_ERROR);
5165
5166		/* Was it a link change interrupt? */
5167		if ((status_attn_bits & STATUS_ATTN_BITS_LINK_STATE) !=
5168			(sc->status_block->status_attn_bits_ack & STATUS_ATTN_BITS_LINK_STATE))
5169			bce_phy_intr(sc);
5170
5171		/* If any other attention is asserted then the chip is toast. */
5172		if (((status_attn_bits & ~STATUS_ATTN_BITS_LINK_STATE) !=
5173			(sc->status_block->status_attn_bits_ack &
5174			~STATUS_ATTN_BITS_LINK_STATE))) {
5175
5176			DBRUN(1, sc->unexpected_attentions++);
5177
5178			BCE_PRINTF(sc, "%s(%d): Fatal attention detected: 0x%08X\n",
5179				__FILE__, __LINE__, sc->status_block->status_attn_bits);
5180
5181			DBRUN(BCE_FATAL,
5182				if (bce_debug_unexpected_attention == 0)
5183					bce_breakpoint(sc));
5184
5185			bce_init_locked(sc);
5186			goto bce_intr_exit;
5187		}
5188
5189		/* Check for any completed RX frames. */
5190		if (sc->status_block->status_rx_quick_consumer_index0 != sc->hw_rx_cons)
5191			bce_rx_intr(sc);
5192
5193		/* Check for any completed TX frames. */
5194		if (sc->status_block->status_tx_quick_consumer_index0 != sc->hw_tx_cons)
5195			bce_tx_intr(sc);
5196
5197		/* Save the status block index value for use during the next interrupt. */
5198		sc->last_status_idx = sc->status_block->status_idx;
5199
5200		/* Prevent speculative reads from getting ahead of the status block. */
5201		bus_space_barrier(sc->bce_btag, sc->bce_bhandle, 0, 0,
5202			BUS_SPACE_BARRIER_READ);
5203
5204		/* If there's no work left then exit the interrupt service routine. */
5205		if ((sc->status_block->status_rx_quick_consumer_index0 == sc->hw_rx_cons) &&
5206	    	(sc->status_block->status_tx_quick_consumer_index0 == sc->hw_tx_cons))
5207			break;
5208
5209	}
5210
5211	bus_dmamap_sync(sc->status_tag,	sc->status_map,
5212	    BUS_DMASYNC_PREWRITE);
5213
5214	/* Re-enable interrupts. */
5215	REG_WR(sc, BCE_PCICFG_INT_ACK_CMD,
5216	       BCE_PCICFG_INT_ACK_CMD_INDEX_VALID | sc->last_status_idx |
5217	       BCE_PCICFG_INT_ACK_CMD_MASK_INT);
5218	REG_WR(sc, BCE_PCICFG_INT_ACK_CMD,
5219	       BCE_PCICFG_INT_ACK_CMD_INDEX_VALID | sc->last_status_idx);
5220
5221	/* Handle any frames that arrived while handling the interrupt. */
5222	if (ifp->if_drv_flags & IFF_DRV_RUNNING && !IFQ_DRV_IS_EMPTY(&ifp->if_snd))
5223		bce_start_locked(ifp);
5224
5225bce_intr_exit:
5226	BCE_UNLOCK(sc);
5227}
5228
5229
5230/****************************************************************************/
5231/* Programs the various packet receive modes (broadcast and multicast).     */
5232/*                                                                          */
5233/* Returns:                                                                 */
5234/*   Nothing.                                                               */
5235/****************************************************************************/
5236static void
5237bce_set_rx_mode(struct bce_softc *sc)
5238{
5239	struct ifnet *ifp;
5240	struct ifmultiaddr *ifma;
5241	u32 hashes[4] = { 0, 0, 0, 0 };
5242	u32 rx_mode, sort_mode;
5243	int h, i;
5244
5245	BCE_LOCK_ASSERT(sc);
5246
5247	ifp = sc->bce_ifp;
5248
5249	/* Initialize receive mode default settings. */
5250	rx_mode   = sc->rx_mode & ~(BCE_EMAC_RX_MODE_PROMISCUOUS |
5251			    BCE_EMAC_RX_MODE_KEEP_VLAN_TAG);
5252	sort_mode = 1 | BCE_RPM_SORT_USER0_BC_EN;
5253
5254	/*
5255	 * ASF/IPMI/UMP firmware requires that VLAN tag stripping
5256	 * be enbled.
5257	 */
5258	if (!(BCE_IF_CAPABILITIES & IFCAP_VLAN_HWTAGGING) &&
5259		(!(sc->bce_flags & BCE_MFW_ENABLE_FLAG)))
5260		rx_mode |= BCE_EMAC_RX_MODE_KEEP_VLAN_TAG;
5261
5262	/*
5263	 * Check for promiscuous, all multicast, or selected
5264	 * multicast address filtering.
5265	 */
5266	if (ifp->if_flags & IFF_PROMISC) {
5267		DBPRINT(sc, BCE_INFO, "Enabling promiscuous mode.\n");
5268
5269		/* Enable promiscuous mode. */
5270		rx_mode |= BCE_EMAC_RX_MODE_PROMISCUOUS;
5271		sort_mode |= BCE_RPM_SORT_USER0_PROM_EN;
5272	} else if (ifp->if_flags & IFF_ALLMULTI) {
5273		DBPRINT(sc, BCE_INFO, "Enabling all multicast mode.\n");
5274
5275		/* Enable all multicast addresses. */
5276		for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
5277			REG_WR(sc, BCE_EMAC_MULTICAST_HASH0 + (i * 4), 0xffffffff);
5278       	}
5279		sort_mode |= BCE_RPM_SORT_USER0_MC_EN;
5280	} else {
5281		/* Accept one or more multicast(s). */
5282		DBPRINT(sc, BCE_INFO, "Enabling selective multicast mode.\n");
5283
5284		IF_ADDR_LOCK(ifp);
5285		TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
5286			if (ifma->ifma_addr->sa_family != AF_LINK)
5287				continue;
5288			h = ether_crc32_le(LLADDR((struct sockaddr_dl *)
5289		    	ifma->ifma_addr), ETHER_ADDR_LEN) & 0x7F;
5290			hashes[(h & 0x60) >> 5] |= 1 << (h & 0x1F);
5291		}
5292		IF_ADDR_UNLOCK(ifp);
5293
5294		for (i = 0; i < 4; i++)
5295			REG_WR(sc, BCE_EMAC_MULTICAST_HASH0 + (i * 4), hashes[i]);
5296
5297		sort_mode |= BCE_RPM_SORT_USER0_MC_HSH_EN;
5298	}
5299
5300	/* Only make changes if the recive mode has actually changed. */
5301	if (rx_mode != sc->rx_mode) {
5302		DBPRINT(sc, BCE_VERBOSE, "Enabling new receive mode: 0x%08X\n",
5303			rx_mode);
5304
5305		sc->rx_mode = rx_mode;
5306		REG_WR(sc, BCE_EMAC_RX_MODE, rx_mode);
5307	}
5308
5309	/* Disable and clear the exisitng sort before enabling a new sort. */
5310	REG_WR(sc, BCE_RPM_SORT_USER0, 0x0);
5311	REG_WR(sc, BCE_RPM_SORT_USER0, sort_mode);
5312	REG_WR(sc, BCE_RPM_SORT_USER0, sort_mode | BCE_RPM_SORT_USER0_ENA);
5313}
5314
5315
5316/****************************************************************************/
5317/* Called periodically to updates statistics from the controllers           */
5318/* statistics block.                                                        */
5319/*                                                                          */
5320/* Returns:                                                                 */
5321/*   Nothing.                                                               */
5322/****************************************************************************/
5323static void
5324bce_stats_update(struct bce_softc *sc)
5325{
5326	struct ifnet *ifp;
5327	struct statistics_block *stats;
5328
5329	DBPRINT(sc, BCE_EXCESSIVE, "Entering %s()\n", __FUNCTION__);
5330
5331	ifp = sc->bce_ifp;
5332
5333	stats = (struct statistics_block *) sc->stats_block;
5334
5335	/*
5336	 * Update the interface statistics from the
5337	 * hardware statistics.
5338	 */
5339	ifp->if_collisions = (u_long) stats->stat_EtherStatsCollisions;
5340
5341	ifp->if_ierrors = (u_long) stats->stat_EtherStatsUndersizePkts +
5342				      (u_long) stats->stat_EtherStatsOverrsizePkts +
5343					  (u_long) stats->stat_IfInMBUFDiscards +
5344					  (u_long) stats->stat_Dot3StatsAlignmentErrors +
5345					  (u_long) stats->stat_Dot3StatsFCSErrors;
5346
5347	ifp->if_oerrors = (u_long) stats->stat_emac_tx_stat_dot3statsinternalmactransmiterrors +
5348					  (u_long) stats->stat_Dot3StatsExcessiveCollisions +
5349					  (u_long) stats->stat_Dot3StatsLateCollisions;
5350
5351	/*
5352	 * Certain controllers don't report
5353	 * carrier sense errors correctly.
5354	 * See errata E11_5708CA0_1165.
5355	 */
5356	if (!(BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5706) &&
5357	    !(BCE_CHIP_ID(sc) == BCE_CHIP_ID_5708_A0))
5358		ifp->if_oerrors += (u_long) stats->stat_Dot3StatsCarrierSenseErrors;
5359
5360	/*
5361	 * Update the sysctl statistics from the
5362	 * hardware statistics.
5363	 */
5364	sc->stat_IfHCInOctets =
5365		((u64) stats->stat_IfHCInOctets_hi << 32) +
5366		 (u64) stats->stat_IfHCInOctets_lo;
5367
5368	sc->stat_IfHCInBadOctets =
5369		((u64) stats->stat_IfHCInBadOctets_hi << 32) +
5370		 (u64) stats->stat_IfHCInBadOctets_lo;
5371
5372	sc->stat_IfHCOutOctets =
5373		((u64) stats->stat_IfHCOutOctets_hi << 32) +
5374		 (u64) stats->stat_IfHCOutOctets_lo;
5375
5376	sc->stat_IfHCOutBadOctets =
5377		((u64) stats->stat_IfHCOutBadOctets_hi << 32) +
5378		 (u64) stats->stat_IfHCOutBadOctets_lo;
5379
5380	sc->stat_IfHCInUcastPkts =
5381		((u64) stats->stat_IfHCInUcastPkts_hi << 32) +
5382		 (u64) stats->stat_IfHCInUcastPkts_lo;
5383
5384	sc->stat_IfHCInMulticastPkts =
5385		((u64) stats->stat_IfHCInMulticastPkts_hi << 32) +
5386		 (u64) stats->stat_IfHCInMulticastPkts_lo;
5387
5388	sc->stat_IfHCInBroadcastPkts =
5389		((u64) stats->stat_IfHCInBroadcastPkts_hi << 32) +
5390		 (u64) stats->stat_IfHCInBroadcastPkts_lo;
5391
5392	sc->stat_IfHCOutUcastPkts =
5393		((u64) stats->stat_IfHCOutUcastPkts_hi << 32) +
5394		 (u64) stats->stat_IfHCOutUcastPkts_lo;
5395
5396	sc->stat_IfHCOutMulticastPkts =
5397		((u64) stats->stat_IfHCOutMulticastPkts_hi << 32) +
5398		 (u64) stats->stat_IfHCOutMulticastPkts_lo;
5399
5400	sc->stat_IfHCOutBroadcastPkts =
5401		((u64) stats->stat_IfHCOutBroadcastPkts_hi << 32) +
5402		 (u64) stats->stat_IfHCOutBroadcastPkts_lo;
5403
5404	sc->stat_emac_tx_stat_dot3statsinternalmactransmiterrors =
5405		stats->stat_emac_tx_stat_dot3statsinternalmactransmiterrors;
5406
5407	sc->stat_Dot3StatsCarrierSenseErrors =
5408		stats->stat_Dot3StatsCarrierSenseErrors;
5409
5410	sc->stat_Dot3StatsFCSErrors =
5411		stats->stat_Dot3StatsFCSErrors;
5412
5413	sc->stat_Dot3StatsAlignmentErrors =
5414		stats->stat_Dot3StatsAlignmentErrors;
5415
5416	sc->stat_Dot3StatsSingleCollisionFrames =
5417		stats->stat_Dot3StatsSingleCollisionFrames;
5418
5419	sc->stat_Dot3StatsMultipleCollisionFrames =
5420		stats->stat_Dot3StatsMultipleCollisionFrames;
5421
5422	sc->stat_Dot3StatsDeferredTransmissions =
5423		stats->stat_Dot3StatsDeferredTransmissions;
5424
5425	sc->stat_Dot3StatsExcessiveCollisions =
5426		stats->stat_Dot3StatsExcessiveCollisions;
5427
5428	sc->stat_Dot3StatsLateCollisions =
5429		stats->stat_Dot3StatsLateCollisions;
5430
5431	sc->stat_EtherStatsCollisions =
5432		stats->stat_EtherStatsCollisions;
5433
5434	sc->stat_EtherStatsFragments =
5435		stats->stat_EtherStatsFragments;
5436
5437	sc->stat_EtherStatsJabbers =
5438		stats->stat_EtherStatsJabbers;
5439
5440	sc->stat_EtherStatsUndersizePkts =
5441		stats->stat_EtherStatsUndersizePkts;
5442
5443	sc->stat_EtherStatsOverrsizePkts =
5444		stats->stat_EtherStatsOverrsizePkts;
5445
5446	sc->stat_EtherStatsPktsRx64Octets =
5447		stats->stat_EtherStatsPktsRx64Octets;
5448
5449	sc->stat_EtherStatsPktsRx65Octetsto127Octets =
5450		stats->stat_EtherStatsPktsRx65Octetsto127Octets;
5451
5452	sc->stat_EtherStatsPktsRx128Octetsto255Octets =
5453		stats->stat_EtherStatsPktsRx128Octetsto255Octets;
5454
5455	sc->stat_EtherStatsPktsRx256Octetsto511Octets =
5456		stats->stat_EtherStatsPktsRx256Octetsto511Octets;
5457
5458	sc->stat_EtherStatsPktsRx512Octetsto1023Octets =
5459		stats->stat_EtherStatsPktsRx512Octetsto1023Octets;
5460
5461	sc->stat_EtherStatsPktsRx1024Octetsto1522Octets =
5462		stats->stat_EtherStatsPktsRx1024Octetsto1522Octets;
5463
5464	sc->stat_EtherStatsPktsRx1523Octetsto9022Octets =
5465		stats->stat_EtherStatsPktsRx1523Octetsto9022Octets;
5466
5467	sc->stat_EtherStatsPktsTx64Octets =
5468		stats->stat_EtherStatsPktsTx64Octets;
5469
5470	sc->stat_EtherStatsPktsTx65Octetsto127Octets =
5471		stats->stat_EtherStatsPktsTx65Octetsto127Octets;
5472
5473	sc->stat_EtherStatsPktsTx128Octetsto255Octets =
5474		stats->stat_EtherStatsPktsTx128Octetsto255Octets;
5475
5476	sc->stat_EtherStatsPktsTx256Octetsto511Octets =
5477		stats->stat_EtherStatsPktsTx256Octetsto511Octets;
5478
5479	sc->stat_EtherStatsPktsTx512Octetsto1023Octets =
5480		stats->stat_EtherStatsPktsTx512Octetsto1023Octets;
5481
5482	sc->stat_EtherStatsPktsTx1024Octetsto1522Octets =
5483		stats->stat_EtherStatsPktsTx1024Octetsto1522Octets;
5484
5485	sc->stat_EtherStatsPktsTx1523Octetsto9022Octets =
5486		stats->stat_EtherStatsPktsTx1523Octetsto9022Octets;
5487
5488	sc->stat_XonPauseFramesReceived =
5489		stats->stat_XonPauseFramesReceived;
5490
5491	sc->stat_XoffPauseFramesReceived =
5492		stats->stat_XoffPauseFramesReceived;
5493
5494	sc->stat_OutXonSent =
5495		stats->stat_OutXonSent;
5496
5497	sc->stat_OutXoffSent =
5498		stats->stat_OutXoffSent;
5499
5500	sc->stat_FlowControlDone =
5501		stats->stat_FlowControlDone;
5502
5503	sc->stat_MacControlFramesReceived =
5504		stats->stat_MacControlFramesReceived;
5505
5506	sc->stat_XoffStateEntered =
5507		stats->stat_XoffStateEntered;
5508
5509	sc->stat_IfInFramesL2FilterDiscards =
5510		stats->stat_IfInFramesL2FilterDiscards;
5511
5512	sc->stat_IfInRuleCheckerDiscards =
5513		stats->stat_IfInRuleCheckerDiscards;
5514
5515	sc->stat_IfInFTQDiscards =
5516		stats->stat_IfInFTQDiscards;
5517
5518	sc->stat_IfInMBUFDiscards =
5519		stats->stat_IfInMBUFDiscards;
5520
5521	sc->stat_IfInRuleCheckerP4Hit =
5522		stats->stat_IfInRuleCheckerP4Hit;
5523
5524	sc->stat_CatchupInRuleCheckerDiscards =
5525		stats->stat_CatchupInRuleCheckerDiscards;
5526
5527	sc->stat_CatchupInFTQDiscards =
5528		stats->stat_CatchupInFTQDiscards;
5529
5530	sc->stat_CatchupInMBUFDiscards =
5531		stats->stat_CatchupInMBUFDiscards;
5532
5533	sc->stat_CatchupInRuleCheckerP4Hit =
5534		stats->stat_CatchupInRuleCheckerP4Hit;
5535
5536	DBPRINT(sc, BCE_EXCESSIVE, "Exiting %s()\n", __FUNCTION__);
5537}
5538
5539
5540static void
5541bce_tick_locked(struct bce_softc *sc)
5542{
5543	struct mii_data *mii = NULL;
5544	struct ifnet *ifp;
5545	u32 msg;
5546
5547	ifp = sc->bce_ifp;
5548
5549	BCE_LOCK_ASSERT(sc);
5550
5551	/* Tell the firmware that the driver is still running. */
5552#ifdef BCE_DEBUG
5553	msg = (u32) BCE_DRV_MSG_DATA_PULSE_CODE_ALWAYS_ALIVE;
5554#else
5555	msg = (u32) ++sc->bce_fw_drv_pulse_wr_seq;
5556#endif
5557	REG_WR_IND(sc, sc->bce_shmem_base + BCE_DRV_PULSE_MB, msg);
5558
5559	/* Update the statistics from the hardware statistics block. */
5560	bce_stats_update(sc);
5561
5562	/* Schedule the next tick. */
5563	callout_reset(
5564		&sc->bce_stat_ch,		/* callout */
5565		hz, 					/* ticks */
5566		bce_tick, 				/* function */
5567		sc);					/* function argument */
5568
5569	/* If link is up already up then we're done. */
5570	if (sc->bce_link)
5571		goto bce_tick_locked_exit;
5572
5573	/* DRC - ToDo: Add SerDes support and check SerDes link here. */
5574
5575	mii = device_get_softc(sc->bce_miibus);
5576	mii_tick(mii);
5577
5578	/* Check if the link has come up. */
5579	if (!sc->bce_link && mii->mii_media_status & IFM_ACTIVE &&
5580	    IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) {
5581		sc->bce_link++;
5582		if ((IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T ||
5583		    IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_SX) &&
5584		    bootverbose)
5585			BCE_PRINTF(sc, "Gigabit link up\n");
5586		/* Now that link is up, handle any outstanding TX traffic. */
5587		if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
5588			bce_start_locked(ifp);
5589	}
5590
5591bce_tick_locked_exit:
5592	return;
5593}
5594
5595
5596static void
5597bce_tick(void *xsc)
5598{
5599	struct bce_softc *sc;
5600
5601	sc = xsc;
5602
5603	BCE_LOCK(sc);
5604	bce_tick_locked(sc);
5605	BCE_UNLOCK(sc);
5606}
5607
5608
5609#ifdef BCE_DEBUG
5610/****************************************************************************/
5611/* Allows the driver state to be dumped through the sysctl interface.       */
5612/*                                                                          */
5613/* Returns:                                                                 */
5614/*   0 for success, positive value for failure.                             */
5615/****************************************************************************/
5616static int
5617bce_sysctl_driver_state(SYSCTL_HANDLER_ARGS)
5618{
5619        int error;
5620        int result;
5621        struct bce_softc *sc;
5622
5623        result = -1;
5624        error = sysctl_handle_int(oidp, &result, 0, req);
5625
5626        if (error || !req->newptr)
5627                return (error);
5628
5629        if (result == 1) {
5630                sc = (struct bce_softc *)arg1;
5631                bce_dump_driver_state(sc);
5632        }
5633
5634        return error;
5635}
5636
5637
5638/****************************************************************************/
5639/* Allows the hardware state to be dumped through the sysctl interface.     */
5640/*                                                                          */
5641/* Returns:                                                                 */
5642/*   0 for success, positive value for failure.                             */
5643/****************************************************************************/
5644static int
5645bce_sysctl_hw_state(SYSCTL_HANDLER_ARGS)
5646{
5647        int error;
5648        int result;
5649        struct bce_softc *sc;
5650
5651        result = -1;
5652        error = sysctl_handle_int(oidp, &result, 0, req);
5653
5654        if (error || !req->newptr)
5655                return (error);
5656
5657        if (result == 1) {
5658                sc = (struct bce_softc *)arg1;
5659                bce_dump_hw_state(sc);
5660        }
5661
5662        return error;
5663}
5664
5665
5666/****************************************************************************/
5667/*                                                                          */
5668/*                                                                          */
5669/* Returns:                                                                 */
5670/*   0 for success, positive value for failure.                             */
5671/****************************************************************************/
5672static int
5673bce_sysctl_dump_rx_chain(SYSCTL_HANDLER_ARGS)
5674{
5675        int error;
5676        int result;
5677        struct bce_softc *sc;
5678
5679        result = -1;
5680        error = sysctl_handle_int(oidp, &result, 0, req);
5681
5682        if (error || !req->newptr)
5683                return (error);
5684
5685        if (result == 1) {
5686                sc = (struct bce_softc *)arg1;
5687                bce_dump_rx_chain(sc, 0, USABLE_RX_BD);
5688        }
5689
5690        return error;
5691}
5692
5693
5694/****************************************************************************/
5695/*                                                                          */
5696/*                                                                          */
5697/* Returns:                                                                 */
5698/*   0 for success, positive value for failure.                             */
5699/****************************************************************************/
5700static int
5701bce_sysctl_breakpoint(SYSCTL_HANDLER_ARGS)
5702{
5703        int error;
5704        int result;
5705        struct bce_softc *sc;
5706
5707        result = -1;
5708        error = sysctl_handle_int(oidp, &result, 0, req);
5709
5710        if (error || !req->newptr)
5711                return (error);
5712
5713        if (result == 1) {
5714                sc = (struct bce_softc *)arg1;
5715                bce_breakpoint(sc);
5716        }
5717
5718        return error;
5719}
5720#endif
5721
5722
5723/****************************************************************************/
5724/* Adds any sysctl parameters for tuning or debugging purposes.             */
5725/*                                                                          */
5726/* Returns:                                                                 */
5727/*   0 for success, positive value for failure.                             */
5728/****************************************************************************/
5729static void
5730bce_add_sysctls(struct bce_softc *sc)
5731{
5732	struct sysctl_ctx_list *ctx;
5733	struct sysctl_oid_list *children;
5734
5735	ctx = device_get_sysctl_ctx(sc->bce_dev);
5736	children = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->bce_dev));
5737
5738	SYSCTL_ADD_STRING(ctx, children, OID_AUTO,
5739		"driver_version",
5740		CTLFLAG_RD, &bce_driver_version,
5741		0, "bce driver version");
5742
5743#ifdef BCE_DEBUG
5744	SYSCTL_ADD_INT(ctx, children, OID_AUTO,
5745		"rx_low_watermark",
5746		CTLFLAG_RD, &sc->rx_low_watermark,
5747		0, "Lowest level of free rx_bd's");
5748
5749	SYSCTL_ADD_INT(ctx, children, OID_AUTO,
5750		"tx_hi_watermark",
5751		CTLFLAG_RD, &sc->tx_hi_watermark,
5752		0, "Highest level of used tx_bd's");
5753
5754	SYSCTL_ADD_INT(ctx, children, OID_AUTO,
5755		"l2fhdr_status_errors",
5756		CTLFLAG_RD, &sc->l2fhdr_status_errors,
5757		0, "l2_fhdr status errors");
5758
5759	SYSCTL_ADD_INT(ctx, children, OID_AUTO,
5760		"unexpected_attentions",
5761		CTLFLAG_RD, &sc->unexpected_attentions,
5762		0, "unexpected attentions");
5763
5764	SYSCTL_ADD_INT(ctx, children, OID_AUTO,
5765		"lost_status_block_updates",
5766		CTLFLAG_RD, &sc->lost_status_block_updates,
5767		0, "lost status block updates");
5768
5769	SYSCTL_ADD_INT(ctx, children, OID_AUTO,
5770		"mbuf_alloc_failed",
5771		CTLFLAG_RD, &sc->mbuf_alloc_failed,
5772		0, "mbuf cluster allocation failures");
5773#endif
5774
5775	SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
5776		"stat_IfHcInOctets",
5777		CTLFLAG_RD, &sc->stat_IfHCInOctets,
5778		"Bytes received");
5779
5780	SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
5781		"stat_IfHCInBadOctets",
5782		CTLFLAG_RD, &sc->stat_IfHCInBadOctets,
5783		"Bad bytes received");
5784
5785	SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
5786		"stat_IfHCOutOctets",
5787		CTLFLAG_RD, &sc->stat_IfHCOutOctets,
5788		"Bytes sent");
5789
5790	SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
5791		"stat_IfHCOutBadOctets",
5792		CTLFLAG_RD, &sc->stat_IfHCOutBadOctets,
5793		"Bad bytes sent");
5794
5795	SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
5796		"stat_IfHCInUcastPkts",
5797		CTLFLAG_RD, &sc->stat_IfHCInUcastPkts,
5798		"Unicast packets received");
5799
5800	SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
5801		"stat_IfHCInMulticastPkts",
5802		CTLFLAG_RD, &sc->stat_IfHCInMulticastPkts,
5803		"Multicast packets received");
5804
5805	SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
5806		"stat_IfHCInBroadcastPkts",
5807		CTLFLAG_RD, &sc->stat_IfHCInBroadcastPkts,
5808		"Broadcast packets received");
5809
5810	SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
5811		"stat_IfHCOutUcastPkts",
5812		CTLFLAG_RD, &sc->stat_IfHCOutUcastPkts,
5813		"Unicast packets sent");
5814
5815	SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
5816		"stat_IfHCOutMulticastPkts",
5817		CTLFLAG_RD, &sc->stat_IfHCOutMulticastPkts,
5818		"Multicast packets sent");
5819
5820	SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
5821		"stat_IfHCOutBroadcastPkts",
5822		CTLFLAG_RD, &sc->stat_IfHCOutBroadcastPkts,
5823		"Broadcast packets sent");
5824
5825	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5826		"stat_emac_tx_stat_dot3statsinternalmactransmiterrors",
5827		CTLFLAG_RD, &sc->stat_emac_tx_stat_dot3statsinternalmactransmiterrors,
5828		0, "Internal MAC transmit errors");
5829
5830	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5831		"stat_Dot3StatsCarrierSenseErrors",
5832		CTLFLAG_RD, &sc->stat_Dot3StatsCarrierSenseErrors,
5833		0, "Carrier sense errors");
5834
5835	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5836		"stat_Dot3StatsFCSErrors",
5837		CTLFLAG_RD, &sc->stat_Dot3StatsFCSErrors,
5838		0, "Frame check sequence errors");
5839
5840	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5841		"stat_Dot3StatsAlignmentErrors",
5842		CTLFLAG_RD, &sc->stat_Dot3StatsAlignmentErrors,
5843		0, "Alignment errors");
5844
5845	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5846		"stat_Dot3StatsSingleCollisionFrames",
5847		CTLFLAG_RD, &sc->stat_Dot3StatsSingleCollisionFrames,
5848		0, "Single Collision Frames");
5849
5850	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5851		"stat_Dot3StatsMultipleCollisionFrames",
5852		CTLFLAG_RD, &sc->stat_Dot3StatsMultipleCollisionFrames,
5853		0, "Multiple Collision Frames");
5854
5855	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5856		"stat_Dot3StatsDeferredTransmissions",
5857		CTLFLAG_RD, &sc->stat_Dot3StatsDeferredTransmissions,
5858		0, "Deferred Transmissions");
5859
5860	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5861		"stat_Dot3StatsExcessiveCollisions",
5862		CTLFLAG_RD, &sc->stat_Dot3StatsExcessiveCollisions,
5863		0, "Excessive Collisions");
5864
5865	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5866		"stat_Dot3StatsLateCollisions",
5867		CTLFLAG_RD, &sc->stat_Dot3StatsLateCollisions,
5868		0, "Late Collisions");
5869
5870	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5871		"stat_EtherStatsCollisions",
5872		CTLFLAG_RD, &sc->stat_EtherStatsCollisions,
5873		0, "Collisions");
5874
5875	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5876		"stat_EtherStatsFragments",
5877		CTLFLAG_RD, &sc->stat_EtherStatsFragments,
5878		0, "Fragments");
5879
5880	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5881		"stat_EtherStatsJabbers",
5882		CTLFLAG_RD, &sc->stat_EtherStatsJabbers,
5883		0, "Jabbers");
5884
5885	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5886		"stat_EtherStatsUndersizePkts",
5887		CTLFLAG_RD, &sc->stat_EtherStatsUndersizePkts,
5888		0, "Undersize packets");
5889
5890	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5891		"stat_EtherStatsOverrsizePkts",
5892		CTLFLAG_RD, &sc->stat_EtherStatsOverrsizePkts,
5893		0, "stat_EtherStatsOverrsizePkts");
5894
5895	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5896		"stat_EtherStatsPktsRx64Octets",
5897		CTLFLAG_RD, &sc->stat_EtherStatsPktsRx64Octets,
5898		0, "Bytes received in 64 byte packets");
5899
5900	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5901		"stat_EtherStatsPktsRx65Octetsto127Octets",
5902		CTLFLAG_RD, &sc->stat_EtherStatsPktsRx65Octetsto127Octets,
5903		0, "Bytes received in 65 to 127 byte packets");
5904
5905	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5906		"stat_EtherStatsPktsRx128Octetsto255Octets",
5907		CTLFLAG_RD, &sc->stat_EtherStatsPktsRx128Octetsto255Octets,
5908		0, "Bytes received in 128 to 255 byte packets");
5909
5910	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5911		"stat_EtherStatsPktsRx256Octetsto511Octets",
5912		CTLFLAG_RD, &sc->stat_EtherStatsPktsRx256Octetsto511Octets,
5913		0, "Bytes received in 256 to 511 byte packets");
5914
5915	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5916		"stat_EtherStatsPktsRx512Octetsto1023Octets",
5917		CTLFLAG_RD, &sc->stat_EtherStatsPktsRx512Octetsto1023Octets,
5918		0, "Bytes received in 512 to 1023 byte packets");
5919
5920	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5921		"stat_EtherStatsPktsRx1024Octetsto1522Octets",
5922		CTLFLAG_RD, &sc->stat_EtherStatsPktsRx1024Octetsto1522Octets,
5923		0, "Bytes received in 1024 t0 1522 byte packets");
5924
5925	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5926		"stat_EtherStatsPktsRx1523Octetsto9022Octets",
5927		CTLFLAG_RD, &sc->stat_EtherStatsPktsRx1523Octetsto9022Octets,
5928		0, "Bytes received in 1523 to 9022 byte packets");
5929
5930	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5931		"stat_EtherStatsPktsTx64Octets",
5932		CTLFLAG_RD, &sc->stat_EtherStatsPktsTx64Octets,
5933		0, "Bytes sent in 64 byte packets");
5934
5935	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5936		"stat_EtherStatsPktsTx65Octetsto127Octets",
5937		CTLFLAG_RD, &sc->stat_EtherStatsPktsTx65Octetsto127Octets,
5938		0, "Bytes sent in 65 to 127 byte packets");
5939
5940	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5941		"stat_EtherStatsPktsTx128Octetsto255Octets",
5942		CTLFLAG_RD, &sc->stat_EtherStatsPktsTx128Octetsto255Octets,
5943		0, "Bytes sent in 128 to 255 byte packets");
5944
5945	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5946		"stat_EtherStatsPktsTx256Octetsto511Octets",
5947		CTLFLAG_RD, &sc->stat_EtherStatsPktsTx256Octetsto511Octets,
5948		0, "Bytes sent in 256 to 511 byte packets");
5949
5950	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5951		"stat_EtherStatsPktsTx512Octetsto1023Octets",
5952		CTLFLAG_RD, &sc->stat_EtherStatsPktsTx512Octetsto1023Octets,
5953		0, "Bytes sent in 512 to 1023 byte packets");
5954
5955	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5956		"stat_EtherStatsPktsTx1024Octetsto1522Octets",
5957		CTLFLAG_RD, &sc->stat_EtherStatsPktsTx1024Octetsto1522Octets,
5958		0, "Bytes sent in 1024 to 1522 byte packets");
5959
5960	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5961		"stat_EtherStatsPktsTx1523Octetsto9022Octets",
5962		CTLFLAG_RD, &sc->stat_EtherStatsPktsTx1523Octetsto9022Octets,
5963		0, "Bytes sent in 1523 to 9022 byte packets");
5964
5965	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5966		"stat_XonPauseFramesReceived",
5967		CTLFLAG_RD, &sc->stat_XonPauseFramesReceived,
5968		0, "XON pause frames receved");
5969
5970	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5971		"stat_XoffPauseFramesReceived",
5972		CTLFLAG_RD, &sc->stat_XoffPauseFramesReceived,
5973		0, "XOFF pause frames received");
5974
5975	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5976		"stat_OutXonSent",
5977		CTLFLAG_RD, &sc->stat_OutXonSent,
5978		0, "XON pause frames sent");
5979
5980	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5981		"stat_OutXoffSent",
5982		CTLFLAG_RD, &sc->stat_OutXoffSent,
5983		0, "XOFF pause frames sent");
5984
5985	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5986		"stat_FlowControlDone",
5987		CTLFLAG_RD, &sc->stat_FlowControlDone,
5988		0, "Flow control done");
5989
5990	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5991		"stat_MacControlFramesReceived",
5992		CTLFLAG_RD, &sc->stat_MacControlFramesReceived,
5993		0, "MAC control frames received");
5994
5995	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5996		"stat_XoffStateEntered",
5997		CTLFLAG_RD, &sc->stat_XoffStateEntered,
5998		0, "XOFF state entered");
5999
6000	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6001		"stat_IfInFramesL2FilterDiscards",
6002		CTLFLAG_RD, &sc->stat_IfInFramesL2FilterDiscards,
6003		0, "Received L2 packets discarded");
6004
6005	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6006		"stat_IfInRuleCheckerDiscards",
6007		CTLFLAG_RD, &sc->stat_IfInRuleCheckerDiscards,
6008		0, "Received packets discarded by rule");
6009
6010	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6011		"stat_IfInFTQDiscards",
6012		CTLFLAG_RD, &sc->stat_IfInFTQDiscards,
6013		0, "Received packet FTQ discards");
6014
6015	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6016		"stat_IfInMBUFDiscards",
6017		CTLFLAG_RD, &sc->stat_IfInMBUFDiscards,
6018		0, "Received packets discarded due to lack of controller buffer memory");
6019
6020	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6021		"stat_IfInRuleCheckerP4Hit",
6022		CTLFLAG_RD, &sc->stat_IfInRuleCheckerP4Hit,
6023		0, "Received packets rule checker hits");
6024
6025	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6026		"stat_CatchupInRuleCheckerDiscards",
6027		CTLFLAG_RD, &sc->stat_CatchupInRuleCheckerDiscards,
6028		0, "Received packets discarded in Catchup path");
6029
6030	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6031		"stat_CatchupInFTQDiscards",
6032		CTLFLAG_RD, &sc->stat_CatchupInFTQDiscards,
6033		0, "Received packets discarded in FTQ in Catchup path");
6034
6035	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6036		"stat_CatchupInMBUFDiscards",
6037		CTLFLAG_RD, &sc->stat_CatchupInMBUFDiscards,
6038		0, "Received packets discarded in controller buffer memory in Catchup path");
6039
6040	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6041		"stat_CatchupInRuleCheckerP4Hit",
6042		CTLFLAG_RD, &sc->stat_CatchupInRuleCheckerP4Hit,
6043		0, "Received packets rule checker hits in Catchup path");
6044
6045#ifdef BCE_DEBUG
6046	SYSCTL_ADD_PROC(ctx, children, OID_AUTO,
6047		"driver_state", CTLTYPE_INT | CTLFLAG_RW,
6048		(void *)sc, 0,
6049		bce_sysctl_driver_state, "I", "Drive state information");
6050
6051	SYSCTL_ADD_PROC(ctx, children, OID_AUTO,
6052		"hw_state", CTLTYPE_INT | CTLFLAG_RW,
6053		(void *)sc, 0,
6054		bce_sysctl_hw_state, "I", "Hardware state information");
6055
6056	SYSCTL_ADD_PROC(ctx, children, OID_AUTO,
6057		"dump_rx_chain", CTLTYPE_INT | CTLFLAG_RW,
6058		(void *)sc, 0,
6059		bce_sysctl_dump_rx_chain, "I", "Dump rx_bd chain");
6060
6061	SYSCTL_ADD_PROC(ctx, children, OID_AUTO,
6062		"breakpoint", CTLTYPE_INT | CTLFLAG_RW,
6063		(void *)sc, 0,
6064		bce_sysctl_breakpoint, "I", "Driver breakpoint");
6065#endif
6066
6067}
6068
6069
6070/****************************************************************************/
6071/* BCE Debug Routines                                                       */
6072/****************************************************************************/
6073#ifdef BCE_DEBUG
6074
6075/****************************************************************************/
6076/* Prints out information about an mbuf.                                    */
6077/*                                                                          */
6078/* Returns:                                                                 */
6079/*   Nothing.                                                               */
6080/****************************************************************************/
6081static void
6082bce_dump_mbuf(struct bce_softc *sc, struct mbuf *m)
6083{
6084	u32 val_hi, val_lo;
6085	struct mbuf *mp = m;
6086
6087	if (m == NULL) {
6088		/* Index out of range. */
6089		printf("mbuf ptr is null!\n");
6090		return;
6091	}
6092
6093	while (mp) {
6094		val_hi = BCE_ADDR_HI(mp);
6095		val_lo = BCE_ADDR_LO(mp);
6096		BCE_PRINTF(sc, "mbuf: vaddr = 0x%08X:%08X, m_len = %d, m_flags = ",
6097			   val_hi, val_lo, mp->m_len);
6098
6099		if (mp->m_flags & M_EXT)
6100			printf("M_EXT ");
6101		if (mp->m_flags & M_PKTHDR)
6102			printf("M_PKTHDR ");
6103		printf("\n");
6104
6105		if (mp->m_flags & M_EXT) {
6106			val_hi = BCE_ADDR_HI(mp->m_ext.ext_buf);
6107			val_lo = BCE_ADDR_LO(mp->m_ext.ext_buf);
6108			BCE_PRINTF(sc, "- m_ext: vaddr = 0x%08X:%08X, ext_size = 0x%04X\n",
6109				val_hi, val_lo, mp->m_ext.ext_size);
6110		}
6111
6112		mp = mp->m_next;
6113	}
6114
6115
6116}
6117
6118
6119/****************************************************************************/
6120/* Prints out the mbufs in the TX mbuf chain.                               */
6121/*                                                                          */
6122/* Returns:                                                                 */
6123/*   Nothing.                                                               */
6124/****************************************************************************/
6125static void
6126bce_dump_tx_mbuf_chain(struct bce_softc *sc, int chain_prod, int count)
6127{
6128	struct mbuf *m;
6129
6130	BCE_PRINTF(sc,
6131		"----------------------------"
6132		"  tx mbuf data  "
6133		"----------------------------\n");
6134
6135	for (int i = 0; i < count; i++) {
6136	 	m = sc->tx_mbuf_ptr[chain_prod];
6137		BCE_PRINTF(sc, "txmbuf[%d]\n", chain_prod);
6138		bce_dump_mbuf(sc, m);
6139		chain_prod = TX_CHAIN_IDX(NEXT_TX_BD(chain_prod));
6140	}
6141
6142	BCE_PRINTF(sc,
6143		"----------------------------"
6144		"----------------"
6145		"----------------------------\n");
6146}
6147
6148
6149/*
6150 * This routine prints the RX mbuf chain.
6151 */
6152static void
6153bce_dump_rx_mbuf_chain(struct bce_softc *sc, int chain_prod, int count)
6154{
6155	struct mbuf *m;
6156
6157	BCE_PRINTF(sc,
6158		"----------------------------"
6159		"  rx mbuf data  "
6160		"----------------------------\n");
6161
6162	for (int i = 0; i < count; i++) {
6163	 	m = sc->rx_mbuf_ptr[chain_prod];
6164		BCE_PRINTF(sc, "rxmbuf[0x%04X]\n", chain_prod);
6165		bce_dump_mbuf(sc, m);
6166		chain_prod = RX_CHAIN_IDX(NEXT_RX_BD(chain_prod));
6167	}
6168
6169
6170	BCE_PRINTF(sc,
6171		"----------------------------"
6172		"----------------"
6173		"----------------------------\n");
6174}
6175
6176
6177static void
6178bce_dump_txbd(struct bce_softc *sc, int idx, struct tx_bd *txbd)
6179{
6180	if (idx > MAX_TX_BD)
6181		/* Index out of range. */
6182		BCE_PRINTF(sc, "tx_bd[0x%04X]: Invalid tx_bd index!\n", idx);
6183	else if ((idx & USABLE_TX_BD_PER_PAGE) == USABLE_TX_BD_PER_PAGE)
6184		/* TX Chain page pointer. */
6185		BCE_PRINTF(sc, "tx_bd[0x%04X]: haddr = 0x%08X:%08X, chain page pointer\n",
6186			idx, txbd->tx_bd_haddr_hi, txbd->tx_bd_haddr_lo);
6187	else
6188		/* Normal tx_bd entry. */
6189		BCE_PRINTF(sc, "tx_bd[0x%04X]: haddr = 0x%08X:%08X, nbytes = 0x%08X, "
6190			"flags = 0x%08X\n", idx,
6191			txbd->tx_bd_haddr_hi, txbd->tx_bd_haddr_lo,
6192			txbd->tx_bd_mss_nbytes, txbd->tx_bd_vlan_tag_flags);
6193}
6194
6195
6196static void
6197bce_dump_rxbd(struct bce_softc *sc, int idx, struct rx_bd *rxbd)
6198{
6199	if (idx > MAX_RX_BD)
6200		/* Index out of range. */
6201		BCE_PRINTF(sc, "rx_bd[0x%04X]: Invalid rx_bd index!\n", idx);
6202	else if ((idx & USABLE_RX_BD_PER_PAGE) == USABLE_RX_BD_PER_PAGE)
6203		/* TX Chain page pointer. */
6204		BCE_PRINTF(sc, "rx_bd[0x%04X]: haddr = 0x%08X:%08X, chain page pointer\n",
6205			idx, rxbd->rx_bd_haddr_hi, rxbd->rx_bd_haddr_lo);
6206	else
6207		/* Normal tx_bd entry. */
6208		BCE_PRINTF(sc, "rx_bd[0x%04X]: haddr = 0x%08X:%08X, nbytes = 0x%08X, "
6209			"flags = 0x%08X\n", idx,
6210			rxbd->rx_bd_haddr_hi, rxbd->rx_bd_haddr_lo,
6211			rxbd->rx_bd_len, rxbd->rx_bd_flags);
6212}
6213
6214
6215static void
6216bce_dump_l2fhdr(struct bce_softc *sc, int idx, struct l2_fhdr *l2fhdr)
6217{
6218	BCE_PRINTF(sc, "l2_fhdr[0x%04X]: status = 0x%08X, "
6219		"pkt_len = 0x%04X, vlan = 0x%04x, ip_xsum = 0x%04X, "
6220		"tcp_udp_xsum = 0x%04X\n", idx,
6221		l2fhdr->l2_fhdr_status, l2fhdr->l2_fhdr_pkt_len,
6222		l2fhdr->l2_fhdr_vlan_tag, l2fhdr->l2_fhdr_ip_xsum,
6223		l2fhdr->l2_fhdr_tcp_udp_xsum);
6224}
6225
6226
6227/*
6228 * This routine prints the TX chain.
6229 */
6230static void
6231bce_dump_tx_chain(struct bce_softc *sc, int tx_prod, int count)
6232{
6233	struct tx_bd *txbd;
6234
6235	/* First some info about the tx_bd chain structure. */
6236	BCE_PRINTF(sc,
6237		"----------------------------"
6238		"  tx_bd  chain  "
6239		"----------------------------\n");
6240
6241	BCE_PRINTF(sc, "page size      = 0x%08X, tx chain pages        = 0x%08X\n",
6242		(u32) BCM_PAGE_SIZE, (u32) TX_PAGES);
6243
6244	BCE_PRINTF(sc, "tx_bd per page = 0x%08X, usable tx_bd per page = 0x%08X\n",
6245		(u32) TOTAL_TX_BD_PER_PAGE, (u32) USABLE_TX_BD_PER_PAGE);
6246
6247	BCE_PRINTF(sc, "total tx_bd    = 0x%08X\n", (u32) TOTAL_TX_BD);
6248
6249	BCE_PRINTF(sc, ""
6250		"-----------------------------"
6251		"   tx_bd data   "
6252		"-----------------------------\n");
6253
6254	/* Now print out the tx_bd's themselves. */
6255	for (int i = 0; i < count; i++) {
6256	 	txbd = &sc->tx_bd_chain[TX_PAGE(tx_prod)][TX_IDX(tx_prod)];
6257		bce_dump_txbd(sc, tx_prod, txbd);
6258		tx_prod = TX_CHAIN_IDX(NEXT_TX_BD(tx_prod));
6259	}
6260
6261	BCE_PRINTF(sc,
6262		"-----------------------------"
6263		"--------------"
6264		"-----------------------------\n");
6265}
6266
6267
6268/*
6269 * This routine prints the RX chain.
6270 */
6271static void
6272bce_dump_rx_chain(struct bce_softc *sc, int rx_prod, int count)
6273{
6274	struct rx_bd *rxbd;
6275
6276	/* First some info about the tx_bd chain structure. */
6277	BCE_PRINTF(sc,
6278		"----------------------------"
6279		"  rx_bd  chain  "
6280		"----------------------------\n");
6281
6282	BCE_PRINTF(sc, "----- RX_BD Chain -----\n");
6283
6284	BCE_PRINTF(sc, "page size      = 0x%08X, rx chain pages        = 0x%08X\n",
6285		(u32) BCM_PAGE_SIZE, (u32) RX_PAGES);
6286
6287	BCE_PRINTF(sc, "rx_bd per page = 0x%08X, usable rx_bd per page = 0x%08X\n",
6288		(u32) TOTAL_RX_BD_PER_PAGE, (u32) USABLE_RX_BD_PER_PAGE);
6289
6290	BCE_PRINTF(sc, "total rx_bd    = 0x%08X\n", (u32) TOTAL_RX_BD);
6291
6292	BCE_PRINTF(sc,
6293		"----------------------------"
6294		"   rx_bd data   "
6295		"----------------------------\n");
6296
6297	/* Now print out the rx_bd's themselves. */
6298	for (int i = 0; i < count; i++) {
6299		rxbd = &sc->rx_bd_chain[RX_PAGE(rx_prod)][RX_IDX(rx_prod)];
6300		bce_dump_rxbd(sc, rx_prod, rxbd);
6301		rx_prod = RX_CHAIN_IDX(NEXT_RX_BD(rx_prod));
6302	}
6303
6304	BCE_PRINTF(sc,
6305		"----------------------------"
6306		"--------------"
6307		"----------------------------\n");
6308}
6309
6310
6311/*
6312 * This routine prints the status block.
6313 */
6314static void
6315bce_dump_status_block(struct bce_softc *sc)
6316{
6317	struct status_block *sblk;
6318
6319	sblk = sc->status_block;
6320
6321   	BCE_PRINTF(sc, "----------------------------- Status Block "
6322		"-----------------------------\n");
6323
6324	BCE_PRINTF(sc, "attn_bits  = 0x%08X, attn_bits_ack = 0x%08X, index = 0x%04X\n",
6325		sblk->status_attn_bits, sblk->status_attn_bits_ack,
6326		sblk->status_idx);
6327
6328	BCE_PRINTF(sc, "rx_cons0   = 0x%08X, tx_cons0      = 0x%08X\n",
6329		sblk->status_rx_quick_consumer_index0,
6330		sblk->status_tx_quick_consumer_index0);
6331
6332	BCE_PRINTF(sc, "status_idx = 0x%04X\n", sblk->status_idx);
6333
6334	/* Theses indices are not used for normal L2 drivers. */
6335	if (sblk->status_rx_quick_consumer_index1 ||
6336		sblk->status_tx_quick_consumer_index1)
6337		BCE_PRINTF(sc, "rx_cons1  = 0x%08X, tx_cons1      = 0x%08X\n",
6338			sblk->status_rx_quick_consumer_index1,
6339			sblk->status_tx_quick_consumer_index1);
6340
6341	if (sblk->status_rx_quick_consumer_index2 ||
6342		sblk->status_tx_quick_consumer_index2)
6343		BCE_PRINTF(sc, "rx_cons2  = 0x%08X, tx_cons2      = 0x%08X\n",
6344			sblk->status_rx_quick_consumer_index2,
6345			sblk->status_tx_quick_consumer_index2);
6346
6347	if (sblk->status_rx_quick_consumer_index3 ||
6348		sblk->status_tx_quick_consumer_index3)
6349		BCE_PRINTF(sc, "rx_cons3  = 0x%08X, tx_cons3      = 0x%08X\n",
6350			sblk->status_rx_quick_consumer_index3,
6351			sblk->status_tx_quick_consumer_index3);
6352
6353	if (sblk->status_rx_quick_consumer_index4 ||
6354		sblk->status_rx_quick_consumer_index5)
6355		BCE_PRINTF(sc, "rx_cons4  = 0x%08X, rx_cons5      = 0x%08X\n",
6356			sblk->status_rx_quick_consumer_index4,
6357			sblk->status_rx_quick_consumer_index5);
6358
6359	if (sblk->status_rx_quick_consumer_index6 ||
6360		sblk->status_rx_quick_consumer_index7)
6361		BCE_PRINTF(sc, "rx_cons6  = 0x%08X, rx_cons7      = 0x%08X\n",
6362			sblk->status_rx_quick_consumer_index6,
6363			sblk->status_rx_quick_consumer_index7);
6364
6365	if (sblk->status_rx_quick_consumer_index8 ||
6366		sblk->status_rx_quick_consumer_index9)
6367		BCE_PRINTF(sc, "rx_cons8  = 0x%08X, rx_cons9      = 0x%08X\n",
6368			sblk->status_rx_quick_consumer_index8,
6369			sblk->status_rx_quick_consumer_index9);
6370
6371	if (sblk->status_rx_quick_consumer_index10 ||
6372		sblk->status_rx_quick_consumer_index11)
6373		BCE_PRINTF(sc, "rx_cons10 = 0x%08X, rx_cons11     = 0x%08X\n",
6374			sblk->status_rx_quick_consumer_index10,
6375			sblk->status_rx_quick_consumer_index11);
6376
6377	if (sblk->status_rx_quick_consumer_index12 ||
6378		sblk->status_rx_quick_consumer_index13)
6379		BCE_PRINTF(sc, "rx_cons12 = 0x%08X, rx_cons13     = 0x%08X\n",
6380			sblk->status_rx_quick_consumer_index12,
6381			sblk->status_rx_quick_consumer_index13);
6382
6383	if (sblk->status_rx_quick_consumer_index14 ||
6384		sblk->status_rx_quick_consumer_index15)
6385		BCE_PRINTF(sc, "rx_cons14 = 0x%08X, rx_cons15     = 0x%08X\n",
6386			sblk->status_rx_quick_consumer_index14,
6387			sblk->status_rx_quick_consumer_index15);
6388
6389	if (sblk->status_completion_producer_index ||
6390		sblk->status_cmd_consumer_index)
6391		BCE_PRINTF(sc, "com_prod  = 0x%08X, cmd_cons      = 0x%08X\n",
6392			sblk->status_completion_producer_index,
6393			sblk->status_cmd_consumer_index);
6394
6395	BCE_PRINTF(sc, "-------------------------------------------"
6396		"-----------------------------\n");
6397}
6398
6399
6400/*
6401 * This routine prints the statistics block.
6402 */
6403static void
6404bce_dump_stats_block(struct bce_softc *sc)
6405{
6406	struct statistics_block *sblk;
6407
6408	sblk = sc->stats_block;
6409
6410	BCE_PRINTF(sc, ""
6411		"-----------------------------"
6412		" Stats  Block "
6413		"-----------------------------\n");
6414
6415	BCE_PRINTF(sc, "IfHcInOctets         = 0x%08X:%08X, "
6416		"IfHcInBadOctets      = 0x%08X:%08X\n",
6417		sblk->stat_IfHCInOctets_hi, sblk->stat_IfHCInOctets_lo,
6418		sblk->stat_IfHCInBadOctets_hi, sblk->stat_IfHCInBadOctets_lo);
6419
6420	BCE_PRINTF(sc, "IfHcOutOctets        = 0x%08X:%08X, "
6421		"IfHcOutBadOctets     = 0x%08X:%08X\n",
6422		sblk->stat_IfHCOutOctets_hi, sblk->stat_IfHCOutOctets_lo,
6423		sblk->stat_IfHCOutBadOctets_hi, sblk->stat_IfHCOutBadOctets_lo);
6424
6425	BCE_PRINTF(sc, "IfHcInUcastPkts      = 0x%08X:%08X, "
6426		"IfHcInMulticastPkts  = 0x%08X:%08X\n",
6427		sblk->stat_IfHCInUcastPkts_hi, sblk->stat_IfHCInUcastPkts_lo,
6428		sblk->stat_IfHCInMulticastPkts_hi, sblk->stat_IfHCInMulticastPkts_lo);
6429
6430	BCE_PRINTF(sc, "IfHcInBroadcastPkts  = 0x%08X:%08X, "
6431		"IfHcOutUcastPkts     = 0x%08X:%08X\n",
6432		sblk->stat_IfHCInBroadcastPkts_hi, sblk->stat_IfHCInBroadcastPkts_lo,
6433		sblk->stat_IfHCOutUcastPkts_hi, sblk->stat_IfHCOutUcastPkts_lo);
6434
6435	BCE_PRINTF(sc, "IfHcOutMulticastPkts = 0x%08X:%08X, IfHcOutBroadcastPkts = 0x%08X:%08X\n",
6436		sblk->stat_IfHCOutMulticastPkts_hi, sblk->stat_IfHCOutMulticastPkts_lo,
6437		sblk->stat_IfHCOutBroadcastPkts_hi, sblk->stat_IfHCOutBroadcastPkts_lo);
6438
6439	if (sblk->stat_emac_tx_stat_dot3statsinternalmactransmiterrors)
6440		BCE_PRINTF(sc, "0x%08X : "
6441		"emac_tx_stat_dot3statsinternalmactransmiterrors\n",
6442		sblk->stat_emac_tx_stat_dot3statsinternalmactransmiterrors);
6443
6444	if (sblk->stat_Dot3StatsCarrierSenseErrors)
6445		BCE_PRINTF(sc, "0x%08X : Dot3StatsCarrierSenseErrors\n",
6446			sblk->stat_Dot3StatsCarrierSenseErrors);
6447
6448	if (sblk->stat_Dot3StatsFCSErrors)
6449		BCE_PRINTF(sc, "0x%08X : Dot3StatsFCSErrors\n",
6450			sblk->stat_Dot3StatsFCSErrors);
6451
6452	if (sblk->stat_Dot3StatsAlignmentErrors)
6453		BCE_PRINTF(sc, "0x%08X : Dot3StatsAlignmentErrors\n",
6454			sblk->stat_Dot3StatsAlignmentErrors);
6455
6456	if (sblk->stat_Dot3StatsSingleCollisionFrames)
6457		BCE_PRINTF(sc, "0x%08X : Dot3StatsSingleCollisionFrames\n",
6458			sblk->stat_Dot3StatsSingleCollisionFrames);
6459
6460	if (sblk->stat_Dot3StatsMultipleCollisionFrames)
6461		BCE_PRINTF(sc, "0x%08X : Dot3StatsMultipleCollisionFrames\n",
6462			sblk->stat_Dot3StatsMultipleCollisionFrames);
6463
6464	if (sblk->stat_Dot3StatsDeferredTransmissions)
6465		BCE_PRINTF(sc, "0x%08X : Dot3StatsDeferredTransmissions\n",
6466			sblk->stat_Dot3StatsDeferredTransmissions);
6467
6468	if (sblk->stat_Dot3StatsExcessiveCollisions)
6469		BCE_PRINTF(sc, "0x%08X : Dot3StatsExcessiveCollisions\n",
6470			sblk->stat_Dot3StatsExcessiveCollisions);
6471
6472	if (sblk->stat_Dot3StatsLateCollisions)
6473		BCE_PRINTF(sc, "0x%08X : Dot3StatsLateCollisions\n",
6474			sblk->stat_Dot3StatsLateCollisions);
6475
6476	if (sblk->stat_EtherStatsCollisions)
6477		BCE_PRINTF(sc, "0x%08X : EtherStatsCollisions\n",
6478			sblk->stat_EtherStatsCollisions);
6479
6480	if (sblk->stat_EtherStatsFragments)
6481		BCE_PRINTF(sc, "0x%08X : EtherStatsFragments\n",
6482			sblk->stat_EtherStatsFragments);
6483
6484	if (sblk->stat_EtherStatsJabbers)
6485		BCE_PRINTF(sc, "0x%08X : EtherStatsJabbers\n",
6486			sblk->stat_EtherStatsJabbers);
6487
6488	if (sblk->stat_EtherStatsUndersizePkts)
6489		BCE_PRINTF(sc, "0x%08X : EtherStatsUndersizePkts\n",
6490			sblk->stat_EtherStatsUndersizePkts);
6491
6492	if (sblk->stat_EtherStatsOverrsizePkts)
6493		BCE_PRINTF(sc, "0x%08X : EtherStatsOverrsizePkts\n",
6494			sblk->stat_EtherStatsOverrsizePkts);
6495
6496	if (sblk->stat_EtherStatsPktsRx64Octets)
6497		BCE_PRINTF(sc, "0x%08X : EtherStatsPktsRx64Octets\n",
6498			sblk->stat_EtherStatsPktsRx64Octets);
6499
6500	if (sblk->stat_EtherStatsPktsRx65Octetsto127Octets)
6501		BCE_PRINTF(sc, "0x%08X : EtherStatsPktsRx65Octetsto127Octets\n",
6502			sblk->stat_EtherStatsPktsRx65Octetsto127Octets);
6503
6504	if (sblk->stat_EtherStatsPktsRx128Octetsto255Octets)
6505		BCE_PRINTF(sc, "0x%08X : EtherStatsPktsRx128Octetsto255Octets\n",
6506			sblk->stat_EtherStatsPktsRx128Octetsto255Octets);
6507
6508	if (sblk->stat_EtherStatsPktsRx256Octetsto511Octets)
6509		BCE_PRINTF(sc, "0x%08X : EtherStatsPktsRx256Octetsto511Octets\n",
6510			sblk->stat_EtherStatsPktsRx256Octetsto511Octets);
6511
6512	if (sblk->stat_EtherStatsPktsRx512Octetsto1023Octets)
6513		BCE_PRINTF(sc, "0x%08X : EtherStatsPktsRx512Octetsto1023Octets\n",
6514			sblk->stat_EtherStatsPktsRx512Octetsto1023Octets);
6515
6516	if (sblk->stat_EtherStatsPktsRx1024Octetsto1522Octets)
6517		BCE_PRINTF(sc, "0x%08X : EtherStatsPktsRx1024Octetsto1522Octets\n",
6518			sblk->stat_EtherStatsPktsRx1024Octetsto1522Octets);
6519
6520	if (sblk->stat_EtherStatsPktsRx1523Octetsto9022Octets)
6521		BCE_PRINTF(sc, "0x%08X : EtherStatsPktsRx1523Octetsto9022Octets\n",
6522			sblk->stat_EtherStatsPktsRx1523Octetsto9022Octets);
6523
6524	if (sblk->stat_EtherStatsPktsTx64Octets)
6525		BCE_PRINTF(sc, "0x%08X : EtherStatsPktsTx64Octets\n",
6526			sblk->stat_EtherStatsPktsTx64Octets);
6527
6528	if (sblk->stat_EtherStatsPktsTx65Octetsto127Octets)
6529		BCE_PRINTF(sc, "0x%08X : EtherStatsPktsTx65Octetsto127Octets\n",
6530			sblk->stat_EtherStatsPktsTx65Octetsto127Octets);
6531
6532	if (sblk->stat_EtherStatsPktsTx128Octetsto255Octets)
6533		BCE_PRINTF(sc, "0x%08X : EtherStatsPktsTx128Octetsto255Octets\n",
6534			sblk->stat_EtherStatsPktsTx128Octetsto255Octets);
6535
6536	if (sblk->stat_EtherStatsPktsTx256Octetsto511Octets)
6537		BCE_PRINTF(sc, "0x%08X : EtherStatsPktsTx256Octetsto511Octets\n",
6538			sblk->stat_EtherStatsPktsTx256Octetsto511Octets);
6539
6540	if (sblk->stat_EtherStatsPktsTx512Octetsto1023Octets)
6541		BCE_PRINTF(sc, "0x%08X : EtherStatsPktsTx512Octetsto1023Octets\n",
6542			sblk->stat_EtherStatsPktsTx512Octetsto1023Octets);
6543
6544	if (sblk->stat_EtherStatsPktsTx1024Octetsto1522Octets)
6545		BCE_PRINTF(sc, "0x%08X : EtherStatsPktsTx1024Octetsto1522Octets\n",
6546			sblk->stat_EtherStatsPktsTx1024Octetsto1522Octets);
6547
6548	if (sblk->stat_EtherStatsPktsTx1523Octetsto9022Octets)
6549		BCE_PRINTF(sc, "0x%08X : EtherStatsPktsTx1523Octetsto9022Octets\n",
6550			sblk->stat_EtherStatsPktsTx1523Octetsto9022Octets);
6551
6552	if (sblk->stat_XonPauseFramesReceived)
6553		BCE_PRINTF(sc, "0x%08X : XonPauseFramesReceived\n",
6554			sblk->stat_XonPauseFramesReceived);
6555
6556	if (sblk->stat_XoffPauseFramesReceived)
6557	   BCE_PRINTF(sc, "0x%08X : XoffPauseFramesReceived\n",
6558			sblk->stat_XoffPauseFramesReceived);
6559
6560	if (sblk->stat_OutXonSent)
6561		BCE_PRINTF(sc, "0x%08X : OutXonSent\n",
6562			sblk->stat_OutXonSent);
6563
6564	if (sblk->stat_OutXoffSent)
6565		BCE_PRINTF(sc, "0x%08X : OutXoffSent\n",
6566			sblk->stat_OutXoffSent);
6567
6568	if (sblk->stat_FlowControlDone)
6569		BCE_PRINTF(sc, "0x%08X : FlowControlDone\n",
6570			sblk->stat_FlowControlDone);
6571
6572	if (sblk->stat_MacControlFramesReceived)
6573		BCE_PRINTF(sc, "0x%08X : MacControlFramesReceived\n",
6574			sblk->stat_MacControlFramesReceived);
6575
6576	if (sblk->stat_XoffStateEntered)
6577		BCE_PRINTF(sc, "0x%08X : XoffStateEntered\n",
6578			sblk->stat_XoffStateEntered);
6579
6580	if (sblk->stat_IfInFramesL2FilterDiscards)
6581		BCE_PRINTF(sc, "0x%08X : IfInFramesL2FilterDiscards\n",
6582			sblk->stat_IfInFramesL2FilterDiscards);
6583
6584	if (sblk->stat_IfInRuleCheckerDiscards)
6585		BCE_PRINTF(sc, "0x%08X : IfInRuleCheckerDiscards\n",
6586			sblk->stat_IfInRuleCheckerDiscards);
6587
6588	if (sblk->stat_IfInFTQDiscards)
6589		BCE_PRINTF(sc, "0x%08X : IfInFTQDiscards\n",
6590			sblk->stat_IfInFTQDiscards);
6591
6592	if (sblk->stat_IfInMBUFDiscards)
6593		BCE_PRINTF(sc, "0x%08X : IfInMBUFDiscards\n",
6594			sblk->stat_IfInMBUFDiscards);
6595
6596	if (sblk->stat_IfInRuleCheckerP4Hit)
6597		BCE_PRINTF(sc, "0x%08X : IfInRuleCheckerP4Hit\n",
6598			sblk->stat_IfInRuleCheckerP4Hit);
6599
6600	if (sblk->stat_CatchupInRuleCheckerDiscards)
6601		BCE_PRINTF(sc, "0x%08X : CatchupInRuleCheckerDiscards\n",
6602			sblk->stat_CatchupInRuleCheckerDiscards);
6603
6604	if (sblk->stat_CatchupInFTQDiscards)
6605		BCE_PRINTF(sc, "0x%08X : CatchupInFTQDiscards\n",
6606			sblk->stat_CatchupInFTQDiscards);
6607
6608	if (sblk->stat_CatchupInMBUFDiscards)
6609		BCE_PRINTF(sc, "0x%08X : CatchupInMBUFDiscards\n",
6610			sblk->stat_CatchupInMBUFDiscards);
6611
6612	if (sblk->stat_CatchupInRuleCheckerP4Hit)
6613		BCE_PRINTF(sc, "0x%08X : CatchupInRuleCheckerP4Hit\n",
6614			sblk->stat_CatchupInRuleCheckerP4Hit);
6615
6616	BCE_PRINTF(sc,
6617		"-----------------------------"
6618		"--------------"
6619		"-----------------------------\n");
6620}
6621
6622
6623static void
6624bce_dump_driver_state(struct bce_softc *sc)
6625{
6626	u32 val_hi, val_lo;
6627
6628	BCE_PRINTF(sc,
6629		"-----------------------------"
6630		" Driver State "
6631		"-----------------------------\n");
6632
6633	val_hi = BCE_ADDR_HI(sc);
6634	val_lo = BCE_ADDR_LO(sc);
6635	BCE_PRINTF(sc, "0x%08X:%08X - (sc) driver softc structure virtual address\n",
6636		val_hi, val_lo);
6637
6638	val_hi = BCE_ADDR_HI(sc->bce_vhandle);
6639	val_lo = BCE_ADDR_LO(sc->bce_vhandle);
6640	BCE_PRINTF(sc, "0x%08X:%08X - (sc->bce_vhandle) PCI BAR virtual address\n",
6641		val_hi, val_lo);
6642
6643	val_hi = BCE_ADDR_HI(sc->status_block);
6644	val_lo = BCE_ADDR_LO(sc->status_block);
6645	BCE_PRINTF(sc, "0x%08X:%08X - (sc->status_block) status block virtual address\n",
6646		val_hi, val_lo);
6647
6648	val_hi = BCE_ADDR_HI(sc->stats_block);
6649	val_lo = BCE_ADDR_LO(sc->stats_block);
6650	BCE_PRINTF(sc, "0x%08X:%08X - (sc->stats_block) statistics block virtual address\n",
6651		val_hi, val_lo);
6652
6653	val_hi = BCE_ADDR_HI(sc->tx_bd_chain);
6654	val_lo = BCE_ADDR_LO(sc->tx_bd_chain);
6655	BCE_PRINTF(sc,
6656		"0x%08X:%08X - (sc->tx_bd_chain) tx_bd chain virtual adddress\n",
6657		val_hi, val_lo);
6658
6659	val_hi = BCE_ADDR_HI(sc->rx_bd_chain);
6660	val_lo = BCE_ADDR_LO(sc->rx_bd_chain);
6661	BCE_PRINTF(sc,
6662		"0x%08X:%08X - (sc->rx_bd_chain) rx_bd chain virtual address\n",
6663		val_hi, val_lo);
6664
6665	val_hi = BCE_ADDR_HI(sc->tx_mbuf_ptr);
6666	val_lo = BCE_ADDR_LO(sc->tx_mbuf_ptr);
6667	BCE_PRINTF(sc,
6668		"0x%08X:%08X - (sc->tx_mbuf_ptr) tx mbuf chain virtual address\n",
6669		val_hi, val_lo);
6670
6671	val_hi = BCE_ADDR_HI(sc->rx_mbuf_ptr);
6672	val_lo = BCE_ADDR_LO(sc->rx_mbuf_ptr);
6673	BCE_PRINTF(sc,
6674		"0x%08X:%08X - (sc->rx_mbuf_ptr) rx mbuf chain virtual address\n",
6675		val_hi, val_lo);
6676
6677	BCE_PRINTF(sc, "         0x%08X - (sc->interrupts_generated) h/w intrs\n",
6678		sc->interrupts_generated);
6679
6680	BCE_PRINTF(sc, "         0x%08X - (sc->rx_interrupts) rx interrupts handled\n",
6681		sc->rx_interrupts);
6682
6683	BCE_PRINTF(sc, "         0x%08X - (sc->tx_interrupts) tx interrupts handled\n",
6684		sc->tx_interrupts);
6685
6686	BCE_PRINTF(sc, "         0x%08X - (sc->last_status_idx) status block index\n",
6687		sc->last_status_idx);
6688
6689	BCE_PRINTF(sc, "         0x%08X - (sc->tx_prod) tx producer index\n",
6690		sc->tx_prod);
6691
6692	BCE_PRINTF(sc, "         0x%08X - (sc->tx_cons) tx consumer index\n",
6693		sc->tx_cons);
6694
6695	BCE_PRINTF(sc, "         0x%08X - (sc->tx_prod_bseq) tx producer bseq index\n",
6696		sc->tx_prod_bseq);
6697
6698	BCE_PRINTF(sc, "         0x%08X - (sc->rx_prod) rx producer index\n",
6699		sc->rx_prod);
6700
6701	BCE_PRINTF(sc, "         0x%08X - (sc->rx_cons) rx consumer index\n",
6702		sc->rx_cons);
6703
6704	BCE_PRINTF(sc, "         0x%08X - (sc->rx_prod_bseq) rx producer bseq index\n",
6705		sc->rx_prod_bseq);
6706
6707	BCE_PRINTF(sc, "         0x%08X - (sc->rx_mbuf_alloc) rx mbufs allocated\n",
6708		sc->rx_mbuf_alloc);
6709
6710	BCE_PRINTF(sc, "         0x%08X - (sc->free_rx_bd) free rx_bd's\n",
6711		sc->free_rx_bd);
6712
6713	BCE_PRINTF(sc, "0x%08X/%08X - (sc->rx_low_watermark) rx low watermark\n",
6714		sc->rx_low_watermark, (u32) USABLE_RX_BD);
6715
6716	BCE_PRINTF(sc, "         0x%08X - (sc->txmbuf_alloc) tx mbufs allocated\n",
6717		sc->tx_mbuf_alloc);
6718
6719	BCE_PRINTF(sc, "         0x%08X - (sc->rx_mbuf_alloc) rx mbufs allocated\n",
6720		sc->rx_mbuf_alloc);
6721
6722	BCE_PRINTF(sc, "         0x%08X - (sc->used_tx_bd) used tx_bd's\n",
6723		sc->used_tx_bd);
6724
6725	BCE_PRINTF(sc, "0x%08X/%08X - (sc->tx_hi_watermark) tx hi watermark\n",
6726		sc->tx_hi_watermark, (u32) USABLE_TX_BD);
6727
6728	BCE_PRINTF(sc, "         0x%08X - (sc->mbuf_alloc_failed) failed mbuf alloc\n",
6729		sc->mbuf_alloc_failed);
6730
6731	BCE_PRINTF(sc,
6732		"-----------------------------"
6733		"--------------"
6734		"-----------------------------\n");
6735}
6736
6737
6738static void
6739bce_dump_hw_state(struct bce_softc *sc)
6740{
6741	u32 val1;
6742
6743	BCE_PRINTF(sc,
6744		"----------------------------"
6745		" Hardware State "
6746		"----------------------------\n");
6747
6748	BCE_PRINTF(sc, "0x%08X : bootcode version\n", sc->bce_fw_ver);
6749
6750	val1 = REG_RD(sc, BCE_MISC_ENABLE_STATUS_BITS);
6751	BCE_PRINTF(sc, "0x%08X : (0x%04X) misc_enable_status_bits\n",
6752		val1, BCE_MISC_ENABLE_STATUS_BITS);
6753
6754	val1 = REG_RD(sc, BCE_DMA_STATUS);
6755	BCE_PRINTF(sc, "0x%08X : (0x%04X) dma_status\n", val1, BCE_DMA_STATUS);
6756
6757	val1 = REG_RD(sc, BCE_CTX_STATUS);
6758	BCE_PRINTF(sc, "0x%08X : (0x%04X) ctx_status\n", val1, BCE_CTX_STATUS);
6759
6760	val1 = REG_RD(sc, BCE_EMAC_STATUS);
6761	BCE_PRINTF(sc, "0x%08X : (0x%04X) emac_status\n", val1, BCE_EMAC_STATUS);
6762
6763	val1 = REG_RD(sc, BCE_RPM_STATUS);
6764	BCE_PRINTF(sc, "0x%08X : (0x%04X) rpm_status\n", val1, BCE_RPM_STATUS);
6765
6766	val1 = REG_RD(sc, BCE_TBDR_STATUS);
6767	BCE_PRINTF(sc, "0x%08X : (0x%04X) tbdr_status\n", val1, BCE_TBDR_STATUS);
6768
6769	val1 = REG_RD(sc, BCE_TDMA_STATUS);
6770	BCE_PRINTF(sc, "0x%08X : (0x%04X) tdma_status\n", val1, BCE_TDMA_STATUS);
6771
6772	val1 = REG_RD(sc, BCE_HC_STATUS);
6773	BCE_PRINTF(sc, "0x%08X : (0x%04X) hc_status\n", val1, BCE_HC_STATUS);
6774
6775	BCE_PRINTF(sc,
6776		"----------------------------"
6777		"----------------"
6778		"----------------------------\n");
6779
6780	BCE_PRINTF(sc,
6781		"----------------------------"
6782		" Register  Dump "
6783		"----------------------------\n");
6784
6785	for (int i = 0x400; i < 0x8000; i += 0x10)
6786		BCE_PRINTF(sc, "0x%04X: 0x%08X 0x%08X 0x%08X 0x%08X\n",
6787			i, REG_RD(sc, i), REG_RD(sc, i + 0x4),
6788			REG_RD(sc, i + 0x8), REG_RD(sc, i + 0xC));
6789
6790	BCE_PRINTF(sc,
6791		"----------------------------"
6792		"----------------"
6793		"----------------------------\n");
6794}
6795
6796
6797static void
6798bce_breakpoint(struct bce_softc *sc)
6799{
6800
6801	/* Unreachable code to shut the compiler up about unused functions. */
6802	if (0) {
6803   		bce_dump_txbd(sc, 0, NULL);
6804		bce_dump_rxbd(sc, 0, NULL);
6805		bce_dump_tx_mbuf_chain(sc, 0, USABLE_TX_BD);
6806		bce_dump_rx_mbuf_chain(sc, 0, USABLE_RX_BD);
6807		bce_dump_l2fhdr(sc, 0, NULL);
6808		bce_dump_tx_chain(sc, 0, USABLE_TX_BD);
6809		bce_dump_rx_chain(sc, 0, USABLE_RX_BD);
6810		bce_dump_status_block(sc);
6811		bce_dump_stats_block(sc);
6812		bce_dump_driver_state(sc);
6813		bce_dump_hw_state(sc);
6814	}
6815
6816	bce_dump_driver_state(sc);
6817	/* Print the important status block fields. */
6818	bce_dump_status_block(sc);
6819
6820	/* Call the debugger. */
6821	breakpoint();
6822
6823	return;
6824}
6825#endif
6826