if_bge.c revision 135784
1/*
2 * Copyright (c) 2001 Wind River Systems
3 * Copyright (c) 1997, 1998, 1999, 2001
4 *	Bill Paul <wpaul@windriver.com>.  All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 *    notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 *    notice, this list of conditions and the following disclaimer in the
13 *    documentation and/or other materials provided with the distribution.
14 * 3. All advertising materials mentioning features or use of this software
15 *    must display the following acknowledgement:
16 *	This product includes software developed by Bill Paul.
17 * 4. Neither the name of the author nor the names of any co-contributors
18 *    may be used to endorse or promote products derived from this software
19 *    without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED.  IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
25 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
31 * THE POSSIBILITY OF SUCH DAMAGE.
32 */
33
34#include <sys/cdefs.h>
35__FBSDID("$FreeBSD: head/sys/dev/bge/if_bge.c 135784 2004-09-25 05:07:20Z ps $");
36
37/*
38 * Broadcom BCM570x family gigabit ethernet driver for FreeBSD.
39 *
40 * The Broadcom BCM5700 is based on technology originally developed by
41 * Alteon Networks as part of the Tigon I and Tigon II gigabit ethernet
42 * MAC chips. The BCM5700, sometimes refered to as the Tigon III, has
43 * two on-board MIPS R4000 CPUs and can have as much as 16MB of external
44 * SSRAM. The BCM5700 supports TCP, UDP and IP checksum offload, jumbo
45 * frames, highly configurable RX filtering, and 16 RX and TX queues
46 * (which, along with RX filter rules, can be used for QOS applications).
47 * Other features, such as TCP segmentation, may be available as part
48 * of value-added firmware updates. Unlike the Tigon I and Tigon II,
49 * firmware images can be stored in hardware and need not be compiled
50 * into the driver.
51 *
52 * The BCM5700 supports the PCI v2.2 and PCI-X v1.0 standards, and will
53 * function in a 32-bit/64-bit 33/66Mhz bus, or a 64-bit/133Mhz bus.
54 *
55 * The BCM5701 is a single-chip solution incorporating both the BCM5700
56 * MAC and a BCM5401 10/100/1000 PHY. Unlike the BCM5700, the BCM5701
57 * does not support external SSRAM.
58 *
59 * Broadcom also produces a variation of the BCM5700 under the "Altima"
60 * brand name, which is functionally similar but lacks PCI-X support.
61 *
62 * Without external SSRAM, you can only have at most 4 TX rings,
63 * and the use of the mini RX ring is disabled. This seems to imply
64 * that these features are simply not available on the BCM5701. As a
65 * result, this driver does not implement any support for the mini RX
66 * ring.
67 */
68
69#include <sys/param.h>
70#include <sys/endian.h>
71#include <sys/systm.h>
72#include <sys/sockio.h>
73#include <sys/mbuf.h>
74#include <sys/malloc.h>
75#include <sys/kernel.h>
76#include <sys/module.h>
77#include <sys/socket.h>
78#include <sys/queue.h>
79
80#include <net/if.h>
81#include <net/if_arp.h>
82#include <net/ethernet.h>
83#include <net/if_dl.h>
84#include <net/if_media.h>
85
86#include <net/bpf.h>
87
88#include <net/if_types.h>
89#include <net/if_vlan_var.h>
90
91#include <netinet/in_systm.h>
92#include <netinet/in.h>
93#include <netinet/ip.h>
94
95#include <machine/clock.h>      /* for DELAY */
96#include <machine/bus_memio.h>
97#include <machine/bus.h>
98#include <machine/resource.h>
99#include <sys/bus.h>
100#include <sys/rman.h>
101
102#include <dev/mii/mii.h>
103#include <dev/mii/miivar.h>
104#include "miidevs.h"
105#include <dev/mii/brgphyreg.h>
106
107#include <dev/pci/pcireg.h>
108#include <dev/pci/pcivar.h>
109
110#include <dev/bge/if_bgereg.h>
111
112#define BGE_CSUM_FEATURES	(CSUM_IP | CSUM_TCP | CSUM_UDP)
113
114MODULE_DEPEND(bge, pci, 1, 1, 1);
115MODULE_DEPEND(bge, ether, 1, 1, 1);
116MODULE_DEPEND(bge, miibus, 1, 1, 1);
117
118/* "controller miibus0" required.  See GENERIC if you get errors here. */
119#include "miibus_if.h"
120
121/*
122 * Various supported device vendors/types and their names. Note: the
123 * spec seems to indicate that the hardware still has Alteon's vendor
124 * ID burned into it, though it will always be overriden by the vendor
125 * ID in the EEPROM. Just to be safe, we cover all possibilities.
126 */
127#define BGE_DEVDESC_MAX		64	/* Maximum device description length */
128
129static struct bge_type bge_devs[] = {
130	{ ALT_VENDORID,	ALT_DEVICEID_BCM5700,
131		"Broadcom BCM5700 Gigabit Ethernet" },
132	{ ALT_VENDORID,	ALT_DEVICEID_BCM5701,
133		"Broadcom BCM5701 Gigabit Ethernet" },
134	{ BCOM_VENDORID, BCOM_DEVICEID_BCM5700,
135		"Broadcom BCM5700 Gigabit Ethernet" },
136	{ BCOM_VENDORID, BCOM_DEVICEID_BCM5701,
137		"Broadcom BCM5701 Gigabit Ethernet" },
138	{ BCOM_VENDORID, BCOM_DEVICEID_BCM5702,
139		"Broadcom BCM5702 Gigabit Ethernet" },
140	{ BCOM_VENDORID, BCOM_DEVICEID_BCM5702X,
141		"Broadcom BCM5702X Gigabit Ethernet" },
142	{ BCOM_VENDORID, BCOM_DEVICEID_BCM5703,
143		"Broadcom BCM5703 Gigabit Ethernet" },
144	{ BCOM_VENDORID, BCOM_DEVICEID_BCM5703X,
145		"Broadcom BCM5703X Gigabit Ethernet" },
146	{ BCOM_VENDORID, BCOM_DEVICEID_BCM5704C,
147		"Broadcom BCM5704C Dual Gigabit Ethernet" },
148	{ BCOM_VENDORID, BCOM_DEVICEID_BCM5704S,
149		"Broadcom BCM5704S Dual Gigabit Ethernet" },
150	{ BCOM_VENDORID, BCOM_DEVICEID_BCM5705,
151		"Broadcom BCM5705 Gigabit Ethernet" },
152	{ BCOM_VENDORID, BCOM_DEVICEID_BCM5705K,
153		"Broadcom BCM5705K Gigabit Ethernet" },
154	{ BCOM_VENDORID, BCOM_DEVICEID_BCM5705M,
155		"Broadcom BCM5705M Gigabit Ethernet" },
156	{ BCOM_VENDORID, BCOM_DEVICEID_BCM5705M_ALT,
157		"Broadcom BCM5705M Gigabit Ethernet" },
158	{ BCOM_VENDORID, BCOM_DEVICEID_BCM5750,
159		"Broadcom BCM5750 Gigabit Ethernet" },
160	{ BCOM_VENDORID, BCOM_DEVICEID_BCM5750M,
161		"Broadcom BCM5750M Gigabit Ethernet" },
162	{ BCOM_VENDORID, BCOM_DEVICEID_BCM5751,
163		"Broadcom BCM5751 Gigabit Ethernet" },
164	{ BCOM_VENDORID, BCOM_DEVICEID_BCM5782,
165		"Broadcom BCM5782 Gigabit Ethernet" },
166	{ BCOM_VENDORID, BCOM_DEVICEID_BCM5788,
167		"Broadcom BCM5788 Gigabit Ethernet" },
168	{ BCOM_VENDORID, BCOM_DEVICEID_BCM5901,
169		"Broadcom BCM5901 Fast Ethernet" },
170	{ BCOM_VENDORID, BCOM_DEVICEID_BCM5901A2,
171		"Broadcom BCM5901A2 Fast Ethernet" },
172	{ SK_VENDORID, SK_DEVICEID_ALTIMA,
173		"SysKonnect Gigabit Ethernet" },
174	{ ALTIMA_VENDORID, ALTIMA_DEVICE_AC1000,
175		"Altima AC1000 Gigabit Ethernet" },
176	{ ALTIMA_VENDORID, ALTIMA_DEVICE_AC1002,
177		"Altima AC1002 Gigabit Ethernet" },
178	{ ALTIMA_VENDORID, ALTIMA_DEVICE_AC9100,
179		"Altima AC9100 Gigabit Ethernet" },
180	{ 0, 0, NULL }
181};
182
183static int bge_probe		(device_t);
184static int bge_attach		(device_t);
185static int bge_detach		(device_t);
186static void bge_release_resources
187				(struct bge_softc *);
188static void bge_dma_map_addr	(void *, bus_dma_segment_t *, int, int);
189static void bge_dma_map_tx_desc	(void *, bus_dma_segment_t *, int,
190				    bus_size_t, int);
191static int bge_dma_alloc	(device_t);
192static void bge_dma_free	(struct bge_softc *);
193
194static void bge_txeof		(struct bge_softc *);
195static void bge_rxeof		(struct bge_softc *);
196
197static void bge_tick_locked	(struct bge_softc *);
198static void bge_tick		(void *);
199static void bge_stats_update	(struct bge_softc *);
200static void bge_stats_update_regs
201				(struct bge_softc *);
202static int bge_encap		(struct bge_softc *, struct mbuf *,
203					u_int32_t *);
204
205static void bge_intr		(void *);
206static void bge_start_locked	(struct ifnet *);
207static void bge_start		(struct ifnet *);
208static int bge_ioctl		(struct ifnet *, u_long, caddr_t);
209static void bge_init_locked	(struct bge_softc *);
210static void bge_init		(void *);
211static void bge_stop		(struct bge_softc *);
212static void bge_watchdog		(struct ifnet *);
213static void bge_shutdown		(device_t);
214static int bge_ifmedia_upd	(struct ifnet *);
215static void bge_ifmedia_sts	(struct ifnet *, struct ifmediareq *);
216
217static u_int8_t	bge_eeprom_getbyte	(struct bge_softc *, int, u_int8_t *);
218static int bge_read_eeprom	(struct bge_softc *, caddr_t, int, int);
219
220static void bge_setmulti	(struct bge_softc *);
221
222static void bge_handle_events	(struct bge_softc *);
223static int bge_alloc_jumbo_mem	(struct bge_softc *);
224static void bge_free_jumbo_mem	(struct bge_softc *);
225static void *bge_jalloc		(struct bge_softc *);
226static void bge_jfree		(void *, void *);
227static int bge_newbuf_std	(struct bge_softc *, int, struct mbuf *);
228static int bge_newbuf_jumbo	(struct bge_softc *, int, struct mbuf *);
229static int bge_init_rx_ring_std	(struct bge_softc *);
230static void bge_free_rx_ring_std	(struct bge_softc *);
231static int bge_init_rx_ring_jumbo	(struct bge_softc *);
232static void bge_free_rx_ring_jumbo	(struct bge_softc *);
233static void bge_free_tx_ring	(struct bge_softc *);
234static int bge_init_tx_ring	(struct bge_softc *);
235
236static int bge_chipinit		(struct bge_softc *);
237static int bge_blockinit	(struct bge_softc *);
238
239#ifdef notdef
240static u_int8_t bge_vpd_readbyte(struct bge_softc *, int);
241static void bge_vpd_read_res	(struct bge_softc *, struct vpd_res *, int);
242static void bge_vpd_read	(struct bge_softc *);
243#endif
244
245static u_int32_t bge_readmem_ind
246				(struct bge_softc *, int);
247static void bge_writemem_ind	(struct bge_softc *, int, int);
248#ifdef notdef
249static u_int32_t bge_readreg_ind
250				(struct bge_softc *, int);
251#endif
252static void bge_writereg_ind	(struct bge_softc *, int, int);
253
254static int bge_miibus_readreg	(device_t, int, int);
255static int bge_miibus_writereg	(device_t, int, int, int);
256static void bge_miibus_statchg	(device_t);
257
258static void bge_reset		(struct bge_softc *);
259
260static device_method_t bge_methods[] = {
261	/* Device interface */
262	DEVMETHOD(device_probe,		bge_probe),
263	DEVMETHOD(device_attach,	bge_attach),
264	DEVMETHOD(device_detach,	bge_detach),
265	DEVMETHOD(device_shutdown,	bge_shutdown),
266
267	/* bus interface */
268	DEVMETHOD(bus_print_child,	bus_generic_print_child),
269	DEVMETHOD(bus_driver_added,	bus_generic_driver_added),
270
271	/* MII interface */
272	DEVMETHOD(miibus_readreg,	bge_miibus_readreg),
273	DEVMETHOD(miibus_writereg,	bge_miibus_writereg),
274	DEVMETHOD(miibus_statchg,	bge_miibus_statchg),
275
276	{ 0, 0 }
277};
278
279static driver_t bge_driver = {
280	"bge",
281	bge_methods,
282	sizeof(struct bge_softc)
283};
284
285static devclass_t bge_devclass;
286
287DRIVER_MODULE(bge, pci, bge_driver, bge_devclass, 0, 0);
288DRIVER_MODULE(miibus, bge, miibus_driver, miibus_devclass, 0, 0);
289
290static u_int32_t
291bge_readmem_ind(sc, off)
292	struct bge_softc *sc;
293	int off;
294{
295	device_t dev;
296
297	dev = sc->bge_dev;
298
299	pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, off, 4);
300	return(pci_read_config(dev, BGE_PCI_MEMWIN_DATA, 4));
301}
302
303static void
304bge_writemem_ind(sc, off, val)
305	struct bge_softc *sc;
306	int off, val;
307{
308	device_t dev;
309
310	dev = sc->bge_dev;
311
312	pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, off, 4);
313	pci_write_config(dev, BGE_PCI_MEMWIN_DATA, val, 4);
314
315	return;
316}
317
318#ifdef notdef
319static u_int32_t
320bge_readreg_ind(sc, off)
321	struct bge_softc *sc;
322	int off;
323{
324	device_t dev;
325
326	dev = sc->bge_dev;
327
328	pci_write_config(dev, BGE_PCI_REG_BASEADDR, off, 4);
329	return(pci_read_config(dev, BGE_PCI_REG_DATA, 4));
330}
331#endif
332
333static void
334bge_writereg_ind(sc, off, val)
335	struct bge_softc *sc;
336	int off, val;
337{
338	device_t dev;
339
340	dev = sc->bge_dev;
341
342	pci_write_config(dev, BGE_PCI_REG_BASEADDR, off, 4);
343	pci_write_config(dev, BGE_PCI_REG_DATA, val, 4);
344
345	return;
346}
347
348/*
349 * Map a single buffer address.
350 */
351
352static void
353bge_dma_map_addr(arg, segs, nseg, error)
354	void *arg;
355	bus_dma_segment_t *segs;
356	int nseg;
357	int error;
358{
359	struct bge_dmamap_arg *ctx;
360
361	if (error)
362		return;
363
364	ctx = arg;
365
366	if (nseg > ctx->bge_maxsegs) {
367		ctx->bge_maxsegs = 0;
368		return;
369	}
370
371	ctx->bge_busaddr = segs->ds_addr;
372
373	return;
374}
375
376/*
377 * Map an mbuf chain into an TX ring.
378 */
379
380static void
381bge_dma_map_tx_desc(arg, segs, nseg, mapsize, error)
382	void *arg;
383	bus_dma_segment_t *segs;
384	int nseg;
385	bus_size_t mapsize;
386	int error;
387{
388	struct bge_dmamap_arg *ctx;
389	struct bge_tx_bd *d = NULL;
390	int i = 0, idx;
391
392	if (error)
393		return;
394
395	ctx = arg;
396
397	/* Signal error to caller if there's too many segments */
398	if (nseg > ctx->bge_maxsegs) {
399		ctx->bge_maxsegs = 0;
400		return;
401	}
402
403	idx = ctx->bge_idx;
404	while(1) {
405		d = &ctx->bge_ring[idx];
406		d->bge_addr.bge_addr_lo =
407		    htole32(BGE_ADDR_LO(segs[i].ds_addr));
408		d->bge_addr.bge_addr_hi =
409		    htole32(BGE_ADDR_HI(segs[i].ds_addr));
410		d->bge_len = htole16(segs[i].ds_len);
411		d->bge_flags = htole16(ctx->bge_flags);
412                i++;
413		if (i == nseg)
414			break;
415		BGE_INC(idx, BGE_TX_RING_CNT);
416	}
417
418	d->bge_flags |= htole16(BGE_TXBDFLAG_END);
419	ctx->bge_maxsegs = nseg;
420	ctx->bge_idx = idx;
421
422	return;
423}
424
425
426#ifdef notdef
427static u_int8_t
428bge_vpd_readbyte(sc, addr)
429	struct bge_softc *sc;
430	int addr;
431{
432	int i;
433	device_t dev;
434	u_int32_t val;
435
436	dev = sc->bge_dev;
437	pci_write_config(dev, BGE_PCI_VPD_ADDR, addr, 2);
438	for (i = 0; i < BGE_TIMEOUT * 10; i++) {
439		DELAY(10);
440		if (pci_read_config(dev, BGE_PCI_VPD_ADDR, 2) & BGE_VPD_FLAG)
441			break;
442	}
443
444	if (i == BGE_TIMEOUT) {
445		printf("bge%d: VPD read timed out\n", sc->bge_unit);
446		return(0);
447	}
448
449	val = pci_read_config(dev, BGE_PCI_VPD_DATA, 4);
450
451	return((val >> ((addr % 4) * 8)) & 0xFF);
452}
453
454static void
455bge_vpd_read_res(sc, res, addr)
456	struct bge_softc *sc;
457	struct vpd_res *res;
458	int addr;
459{
460	int i;
461	u_int8_t *ptr;
462
463	ptr = (u_int8_t *)res;
464	for (i = 0; i < sizeof(struct vpd_res); i++)
465		ptr[i] = bge_vpd_readbyte(sc, i + addr);
466
467	return;
468}
469
470static void
471bge_vpd_read(sc)
472	struct bge_softc *sc;
473{
474	int pos = 0, i;
475	struct vpd_res res;
476
477	if (sc->bge_vpd_prodname != NULL)
478		free(sc->bge_vpd_prodname, M_DEVBUF);
479	if (sc->bge_vpd_readonly != NULL)
480		free(sc->bge_vpd_readonly, M_DEVBUF);
481	sc->bge_vpd_prodname = NULL;
482	sc->bge_vpd_readonly = NULL;
483
484	bge_vpd_read_res(sc, &res, pos);
485
486	if (res.vr_id != VPD_RES_ID) {
487		printf("bge%d: bad VPD resource id: expected %x got %x\n",
488			sc->bge_unit, VPD_RES_ID, res.vr_id);
489                return;
490        }
491
492	pos += sizeof(res);
493	sc->bge_vpd_prodname = malloc(res.vr_len + 1, M_DEVBUF, M_NOWAIT);
494	for (i = 0; i < res.vr_len; i++)
495		sc->bge_vpd_prodname[i] = bge_vpd_readbyte(sc, i + pos);
496	sc->bge_vpd_prodname[i] = '\0';
497	pos += i;
498
499	bge_vpd_read_res(sc, &res, pos);
500
501	if (res.vr_id != VPD_RES_READ) {
502		printf("bge%d: bad VPD resource id: expected %x got %x\n",
503		    sc->bge_unit, VPD_RES_READ, res.vr_id);
504		return;
505	}
506
507	pos += sizeof(res);
508	sc->bge_vpd_readonly = malloc(res.vr_len, M_DEVBUF, M_NOWAIT);
509	for (i = 0; i < res.vr_len + 1; i++)
510		sc->bge_vpd_readonly[i] = bge_vpd_readbyte(sc, i + pos);
511
512	return;
513}
514#endif
515
516/*
517 * Read a byte of data stored in the EEPROM at address 'addr.' The
518 * BCM570x supports both the traditional bitbang interface and an
519 * auto access interface for reading the EEPROM. We use the auto
520 * access method.
521 */
522static u_int8_t
523bge_eeprom_getbyte(sc, addr, dest)
524	struct bge_softc *sc;
525	int addr;
526	u_int8_t *dest;
527{
528	int i;
529	u_int32_t byte = 0;
530
531	/*
532	 * Enable use of auto EEPROM access so we can avoid
533	 * having to use the bitbang method.
534	 */
535	BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_AUTO_EEPROM);
536
537	/* Reset the EEPROM, load the clock period. */
538	CSR_WRITE_4(sc, BGE_EE_ADDR,
539	    BGE_EEADDR_RESET|BGE_EEHALFCLK(BGE_HALFCLK_384SCL));
540	DELAY(20);
541
542	/* Issue the read EEPROM command. */
543	CSR_WRITE_4(sc, BGE_EE_ADDR, BGE_EE_READCMD | addr);
544
545	/* Wait for completion */
546	for(i = 0; i < BGE_TIMEOUT * 10; i++) {
547		DELAY(10);
548		if (CSR_READ_4(sc, BGE_EE_ADDR) & BGE_EEADDR_DONE)
549			break;
550	}
551
552	if (i == BGE_TIMEOUT) {
553		printf("bge%d: eeprom read timed out\n", sc->bge_unit);
554		return(0);
555	}
556
557	/* Get result. */
558	byte = CSR_READ_4(sc, BGE_EE_DATA);
559
560        *dest = (byte >> ((addr % 4) * 8)) & 0xFF;
561
562	return(0);
563}
564
565/*
566 * Read a sequence of bytes from the EEPROM.
567 */
568static int
569bge_read_eeprom(sc, dest, off, cnt)
570	struct bge_softc *sc;
571	caddr_t dest;
572	int off;
573	int cnt;
574{
575	int err = 0, i;
576	u_int8_t byte = 0;
577
578	for (i = 0; i < cnt; i++) {
579		err = bge_eeprom_getbyte(sc, off + i, &byte);
580		if (err)
581			break;
582		*(dest + i) = byte;
583	}
584
585	return(err ? 1 : 0);
586}
587
588static int
589bge_miibus_readreg(dev, phy, reg)
590	device_t dev;
591	int phy, reg;
592{
593	struct bge_softc *sc;
594	u_int32_t val, autopoll;
595	int i;
596
597	sc = device_get_softc(dev);
598
599	/*
600	 * Broadcom's own driver always assumes the internal
601	 * PHY is at GMII address 1. On some chips, the PHY responds
602	 * to accesses at all addresses, which could cause us to
603	 * bogusly attach the PHY 32 times at probe type. Always
604	 * restricting the lookup to address 1 is simpler than
605	 * trying to figure out which chips revisions should be
606	 * special-cased.
607	 */
608	if (phy != 1)
609		return(0);
610
611	/* Reading with autopolling on may trigger PCI errors */
612	autopoll = CSR_READ_4(sc, BGE_MI_MODE);
613	if (autopoll & BGE_MIMODE_AUTOPOLL) {
614		BGE_CLRBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL);
615		DELAY(40);
616	}
617
618	CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_READ|BGE_MICOMM_BUSY|
619	    BGE_MIPHY(phy)|BGE_MIREG(reg));
620
621	for (i = 0; i < BGE_TIMEOUT; i++) {
622		val = CSR_READ_4(sc, BGE_MI_COMM);
623		if (!(val & BGE_MICOMM_BUSY))
624			break;
625	}
626
627	if (i == BGE_TIMEOUT) {
628		printf("bge%d: PHY read timed out\n", sc->bge_unit);
629		val = 0;
630		goto done;
631	}
632
633	val = CSR_READ_4(sc, BGE_MI_COMM);
634
635done:
636	if (autopoll & BGE_MIMODE_AUTOPOLL) {
637		BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL);
638		DELAY(40);
639	}
640
641	if (val & BGE_MICOMM_READFAIL)
642		return(0);
643
644	return(val & 0xFFFF);
645}
646
647static int
648bge_miibus_writereg(dev, phy, reg, val)
649	device_t dev;
650	int phy, reg, val;
651{
652	struct bge_softc *sc;
653	u_int32_t autopoll;
654	int i;
655
656	sc = device_get_softc(dev);
657
658	/* Reading with autopolling on may trigger PCI errors */
659	autopoll = CSR_READ_4(sc, BGE_MI_MODE);
660	if (autopoll & BGE_MIMODE_AUTOPOLL) {
661		BGE_CLRBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL);
662		DELAY(40);
663	}
664
665	CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_WRITE|BGE_MICOMM_BUSY|
666	    BGE_MIPHY(phy)|BGE_MIREG(reg)|val);
667
668	for (i = 0; i < BGE_TIMEOUT; i++) {
669		if (!(CSR_READ_4(sc, BGE_MI_COMM) & BGE_MICOMM_BUSY))
670			break;
671	}
672
673	if (autopoll & BGE_MIMODE_AUTOPOLL) {
674		BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL);
675		DELAY(40);
676	}
677
678	if (i == BGE_TIMEOUT) {
679		printf("bge%d: PHY read timed out\n", sc->bge_unit);
680		return(0);
681	}
682
683	return(0);
684}
685
686static void
687bge_miibus_statchg(dev)
688	device_t dev;
689{
690	struct bge_softc *sc;
691	struct mii_data *mii;
692
693	sc = device_get_softc(dev);
694	mii = device_get_softc(sc->bge_miibus);
695
696	BGE_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_PORTMODE);
697	if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T) {
698		BGE_SETBIT(sc, BGE_MAC_MODE, BGE_PORTMODE_GMII);
699	} else {
700		BGE_SETBIT(sc, BGE_MAC_MODE, BGE_PORTMODE_MII);
701	}
702
703	if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) {
704		BGE_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX);
705	} else {
706		BGE_SETBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX);
707	}
708
709	return;
710}
711
712/*
713 * Handle events that have triggered interrupts.
714 */
715static void
716bge_handle_events(sc)
717	struct bge_softc		*sc;
718{
719
720	return;
721}
722
723/*
724 * Memory management for jumbo frames.
725 */
726
727static int
728bge_alloc_jumbo_mem(sc)
729	struct bge_softc		*sc;
730{
731	caddr_t			ptr;
732	register int		i, error;
733	struct bge_jpool_entry   *entry;
734
735	/* Create tag for jumbo buffer block */
736
737	error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag,
738	    PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
739	    NULL, BGE_JMEM, 1, BGE_JMEM, 0, NULL, NULL,
740	    &sc->bge_cdata.bge_jumbo_tag);
741
742	if (error) {
743		printf("bge%d: could not allocate jumbo dma tag\n",
744		    sc->bge_unit);
745		return (ENOMEM);
746	}
747
748	/* Allocate DMA'able memory for jumbo buffer block */
749
750	error = bus_dmamem_alloc(sc->bge_cdata.bge_jumbo_tag,
751	    (void **)&sc->bge_ldata.bge_jumbo_buf, BUS_DMA_NOWAIT,
752	    &sc->bge_cdata.bge_jumbo_map);
753
754	if (error)
755                return (ENOMEM);
756
757	SLIST_INIT(&sc->bge_jfree_listhead);
758	SLIST_INIT(&sc->bge_jinuse_listhead);
759
760	/*
761	 * Now divide it up into 9K pieces and save the addresses
762	 * in an array.
763	 */
764	ptr = sc->bge_ldata.bge_jumbo_buf;
765	for (i = 0; i < BGE_JSLOTS; i++) {
766		sc->bge_cdata.bge_jslots[i] = ptr;
767		ptr += BGE_JLEN;
768		entry = malloc(sizeof(struct bge_jpool_entry),
769		    M_DEVBUF, M_NOWAIT);
770		if (entry == NULL) {
771			bge_free_jumbo_mem(sc);
772			sc->bge_ldata.bge_jumbo_buf = NULL;
773			printf("bge%d: no memory for jumbo "
774			    "buffer queue!\n", sc->bge_unit);
775			return(ENOBUFS);
776		}
777		entry->slot = i;
778		SLIST_INSERT_HEAD(&sc->bge_jfree_listhead,
779		    entry, jpool_entries);
780	}
781
782	return(0);
783}
784
785static void
786bge_free_jumbo_mem(sc)
787        struct bge_softc *sc;
788{
789        int i;
790        struct bge_jpool_entry *entry;
791
792	for (i = 0; i < BGE_JSLOTS; i++) {
793		entry = SLIST_FIRST(&sc->bge_jfree_listhead);
794		SLIST_REMOVE_HEAD(&sc->bge_jfree_listhead, jpool_entries);
795		free(entry, M_DEVBUF);
796	}
797
798	/* Destroy jumbo buffer block */
799
800	if (sc->bge_ldata.bge_rx_jumbo_ring)
801		bus_dmamem_free(sc->bge_cdata.bge_jumbo_tag,
802		    sc->bge_ldata.bge_jumbo_buf,
803		    sc->bge_cdata.bge_jumbo_map);
804
805	if (sc->bge_cdata.bge_rx_jumbo_ring_map)
806		bus_dmamap_destroy(sc->bge_cdata.bge_jumbo_tag,
807		    sc->bge_cdata.bge_jumbo_map);
808
809	if (sc->bge_cdata.bge_jumbo_tag)
810		bus_dma_tag_destroy(sc->bge_cdata.bge_jumbo_tag);
811
812        return;
813}
814
815/*
816 * Allocate a jumbo buffer.
817 */
818static void *
819bge_jalloc(sc)
820	struct bge_softc		*sc;
821{
822	struct bge_jpool_entry   *entry;
823
824	entry = SLIST_FIRST(&sc->bge_jfree_listhead);
825
826	if (entry == NULL) {
827		printf("bge%d: no free jumbo buffers\n", sc->bge_unit);
828		return(NULL);
829	}
830
831	SLIST_REMOVE_HEAD(&sc->bge_jfree_listhead, jpool_entries);
832	SLIST_INSERT_HEAD(&sc->bge_jinuse_listhead, entry, jpool_entries);
833	return(sc->bge_cdata.bge_jslots[entry->slot]);
834}
835
836/*
837 * Release a jumbo buffer.
838 */
839static void
840bge_jfree(buf, args)
841	void *buf;
842	void *args;
843{
844	struct bge_jpool_entry *entry;
845	struct bge_softc *sc;
846	int i;
847
848	/* Extract the softc struct pointer. */
849	sc = (struct bge_softc *)args;
850
851	if (sc == NULL)
852		panic("bge_jfree: can't find softc pointer!");
853
854	/* calculate the slot this buffer belongs to */
855
856	i = ((vm_offset_t)buf
857	     - (vm_offset_t)sc->bge_ldata.bge_jumbo_buf) / BGE_JLEN;
858
859	if ((i < 0) || (i >= BGE_JSLOTS))
860		panic("bge_jfree: asked to free buffer that we don't manage!");
861
862	entry = SLIST_FIRST(&sc->bge_jinuse_listhead);
863	if (entry == NULL)
864		panic("bge_jfree: buffer not in use!");
865	entry->slot = i;
866	SLIST_REMOVE_HEAD(&sc->bge_jinuse_listhead, jpool_entries);
867	SLIST_INSERT_HEAD(&sc->bge_jfree_listhead, entry, jpool_entries);
868
869	return;
870}
871
872
873/*
874 * Intialize a standard receive ring descriptor.
875 */
876static int
877bge_newbuf_std(sc, i, m)
878	struct bge_softc	*sc;
879	int			i;
880	struct mbuf		*m;
881{
882	struct mbuf		*m_new = NULL;
883	struct bge_rx_bd	*r;
884	struct bge_dmamap_arg	ctx;
885	int			error;
886
887	if (m == NULL) {
888		MGETHDR(m_new, M_DONTWAIT, MT_DATA);
889		if (m_new == NULL) {
890			return(ENOBUFS);
891		}
892
893		MCLGET(m_new, M_DONTWAIT);
894		if (!(m_new->m_flags & M_EXT)) {
895			m_freem(m_new);
896			return(ENOBUFS);
897		}
898		m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
899	} else {
900		m_new = m;
901		m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
902		m_new->m_data = m_new->m_ext.ext_buf;
903	}
904
905	if (!sc->bge_rx_alignment_bug)
906		m_adj(m_new, ETHER_ALIGN);
907	sc->bge_cdata.bge_rx_std_chain[i] = m_new;
908	r = &sc->bge_ldata.bge_rx_std_ring[i];
909	ctx.bge_maxsegs = 1;
910	ctx.sc = sc;
911	error = bus_dmamap_load(sc->bge_cdata.bge_mtag,
912	    sc->bge_cdata.bge_rx_std_dmamap[i], mtod(m_new, void *),
913	    m_new->m_len, bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT);
914	if (error || ctx.bge_maxsegs == 0) {
915		if (m == NULL)
916			m_freem(m_new);
917		return(ENOMEM);
918	}
919	r->bge_addr.bge_addr_lo = htole32(BGE_ADDR_LO(ctx.bge_busaddr));
920	r->bge_addr.bge_addr_hi = htole32(BGE_ADDR_HI(ctx.bge_busaddr));
921	r->bge_flags = htole16(BGE_RXBDFLAG_END);
922	r->bge_len = htole16(m_new->m_len);
923	r->bge_idx = htole16(i);
924
925	bus_dmamap_sync(sc->bge_cdata.bge_mtag,
926	    sc->bge_cdata.bge_rx_std_dmamap[i],
927	    BUS_DMASYNC_PREREAD);
928
929	return(0);
930}
931
932/*
933 * Initialize a jumbo receive ring descriptor. This allocates
934 * a jumbo buffer from the pool managed internally by the driver.
935 */
936static int
937bge_newbuf_jumbo(sc, i, m)
938	struct bge_softc *sc;
939	int i;
940	struct mbuf *m;
941{
942	struct mbuf *m_new = NULL;
943	struct bge_rx_bd *r;
944	struct bge_dmamap_arg ctx;
945	int error;
946
947	if (m == NULL) {
948		caddr_t			*buf = NULL;
949
950		/* Allocate the mbuf. */
951		MGETHDR(m_new, M_DONTWAIT, MT_DATA);
952		if (m_new == NULL) {
953			return(ENOBUFS);
954		}
955
956		/* Allocate the jumbo buffer */
957		buf = bge_jalloc(sc);
958		if (buf == NULL) {
959			m_freem(m_new);
960			printf("bge%d: jumbo allocation failed "
961			    "-- packet dropped!\n", sc->bge_unit);
962			return(ENOBUFS);
963		}
964
965		/* Attach the buffer to the mbuf. */
966		m_new->m_data = (void *) buf;
967		m_new->m_len = m_new->m_pkthdr.len = BGE_JUMBO_FRAMELEN;
968		MEXTADD(m_new, buf, BGE_JUMBO_FRAMELEN, bge_jfree,
969		    (struct bge_softc *)sc, 0, EXT_NET_DRV);
970	} else {
971		m_new = m;
972		m_new->m_data = m_new->m_ext.ext_buf;
973		m_new->m_ext.ext_size = BGE_JUMBO_FRAMELEN;
974	}
975
976	if (!sc->bge_rx_alignment_bug)
977		m_adj(m_new, ETHER_ALIGN);
978	/* Set up the descriptor. */
979	sc->bge_cdata.bge_rx_jumbo_chain[i] = m_new;
980	r = &sc->bge_ldata.bge_rx_jumbo_ring[i];
981	ctx.bge_maxsegs = 1;
982	ctx.sc = sc;
983	error = bus_dmamap_load(sc->bge_cdata.bge_mtag_jumbo,
984	    sc->bge_cdata.bge_rx_jumbo_dmamap[i], mtod(m_new, void *),
985	    m_new->m_len, bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT);
986	if (error || ctx.bge_maxsegs == 0) {
987		if (m == NULL)
988			m_freem(m_new);
989		return(ENOMEM);
990	}
991	r->bge_addr.bge_addr_lo = htole32(BGE_ADDR_LO(ctx.bge_busaddr));
992	r->bge_addr.bge_addr_hi = htole32(BGE_ADDR_HI(ctx.bge_busaddr));
993	r->bge_flags = htole16(BGE_RXBDFLAG_END|BGE_RXBDFLAG_JUMBO_RING);
994	r->bge_len = htole16(m_new->m_len);
995	r->bge_idx = htole16(i);
996
997	bus_dmamap_sync(sc->bge_cdata.bge_mtag,
998	    sc->bge_cdata.bge_rx_jumbo_dmamap[i],
999	    BUS_DMASYNC_PREREAD);
1000
1001	return(0);
1002}
1003
1004/*
1005 * The standard receive ring has 512 entries in it. At 2K per mbuf cluster,
1006 * that's 1MB or memory, which is a lot. For now, we fill only the first
1007 * 256 ring entries and hope that our CPU is fast enough to keep up with
1008 * the NIC.
1009 */
1010static int
1011bge_init_rx_ring_std(sc)
1012	struct bge_softc *sc;
1013{
1014	int i;
1015
1016	for (i = 0; i < BGE_SSLOTS; i++) {
1017		if (bge_newbuf_std(sc, i, NULL) == ENOBUFS)
1018			return(ENOBUFS);
1019	};
1020
1021	bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag,
1022	    sc->bge_cdata.bge_rx_std_ring_map,
1023	    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1024
1025	sc->bge_std = i - 1;
1026	CSR_WRITE_4(sc, BGE_MBX_RX_STD_PROD_LO, sc->bge_std);
1027
1028	return(0);
1029}
1030
1031static void
1032bge_free_rx_ring_std(sc)
1033	struct bge_softc *sc;
1034{
1035	int i;
1036
1037	for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
1038		if (sc->bge_cdata.bge_rx_std_chain[i] != NULL) {
1039			m_freem(sc->bge_cdata.bge_rx_std_chain[i]);
1040			sc->bge_cdata.bge_rx_std_chain[i] = NULL;
1041			bus_dmamap_unload(sc->bge_cdata.bge_mtag,
1042			    sc->bge_cdata.bge_rx_std_dmamap[i]);
1043		}
1044		bzero((char *)&sc->bge_ldata.bge_rx_std_ring[i],
1045		    sizeof(struct bge_rx_bd));
1046	}
1047
1048	return;
1049}
1050
1051static int
1052bge_init_rx_ring_jumbo(sc)
1053	struct bge_softc *sc;
1054{
1055	int i;
1056	struct bge_rcb *rcb;
1057
1058	for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
1059		if (bge_newbuf_jumbo(sc, i, NULL) == ENOBUFS)
1060			return(ENOBUFS);
1061	};
1062
1063	bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag,
1064	    sc->bge_cdata.bge_rx_jumbo_ring_map,
1065	    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1066
1067	sc->bge_jumbo = i - 1;
1068
1069	rcb = &sc->bge_ldata.bge_info.bge_jumbo_rx_rcb;
1070	rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(0, 0);
1071	CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags);
1072
1073	CSR_WRITE_4(sc, BGE_MBX_RX_JUMBO_PROD_LO, sc->bge_jumbo);
1074
1075	return(0);
1076}
1077
1078static void
1079bge_free_rx_ring_jumbo(sc)
1080	struct bge_softc *sc;
1081{
1082	int i;
1083
1084	for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
1085		if (sc->bge_cdata.bge_rx_jumbo_chain[i] != NULL) {
1086			m_freem(sc->bge_cdata.bge_rx_jumbo_chain[i]);
1087			sc->bge_cdata.bge_rx_jumbo_chain[i] = NULL;
1088			bus_dmamap_unload(sc->bge_cdata.bge_mtag_jumbo,
1089			    sc->bge_cdata.bge_rx_jumbo_dmamap[i]);
1090		}
1091		bzero((char *)&sc->bge_ldata.bge_rx_jumbo_ring[i],
1092		    sizeof(struct bge_rx_bd));
1093	}
1094
1095	return;
1096}
1097
1098static void
1099bge_free_tx_ring(sc)
1100	struct bge_softc *sc;
1101{
1102	int i;
1103
1104	if (sc->bge_ldata.bge_tx_ring == NULL)
1105		return;
1106
1107	for (i = 0; i < BGE_TX_RING_CNT; i++) {
1108		if (sc->bge_cdata.bge_tx_chain[i] != NULL) {
1109			m_freem(sc->bge_cdata.bge_tx_chain[i]);
1110			sc->bge_cdata.bge_tx_chain[i] = NULL;
1111			bus_dmamap_unload(sc->bge_cdata.bge_mtag,
1112			    sc->bge_cdata.bge_tx_dmamap[i]);
1113		}
1114		bzero((char *)&sc->bge_ldata.bge_tx_ring[i],
1115		    sizeof(struct bge_tx_bd));
1116	}
1117
1118	return;
1119}
1120
1121static int
1122bge_init_tx_ring(sc)
1123	struct bge_softc *sc;
1124{
1125	sc->bge_txcnt = 0;
1126	sc->bge_tx_saved_considx = 0;
1127
1128	CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, 0);
1129	/* 5700 b2 errata */
1130	if (sc->bge_chiprev == BGE_CHIPREV_5700_BX)
1131		CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, 0);
1132
1133	CSR_WRITE_4(sc, BGE_MBX_TX_NIC_PROD0_LO, 0);
1134	/* 5700 b2 errata */
1135	if (sc->bge_chiprev == BGE_CHIPREV_5700_BX)
1136		CSR_WRITE_4(sc, BGE_MBX_TX_NIC_PROD0_LO, 0);
1137
1138	return(0);
1139}
1140
1141static void
1142bge_setmulti(sc)
1143	struct bge_softc *sc;
1144{
1145	struct ifnet *ifp;
1146	struct ifmultiaddr *ifma;
1147	u_int32_t hashes[4] = { 0, 0, 0, 0 };
1148	int h, i;
1149
1150	BGE_LOCK_ASSERT(sc);
1151
1152	ifp = &sc->arpcom.ac_if;
1153
1154	if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) {
1155		for (i = 0; i < 4; i++)
1156			CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), 0xFFFFFFFF);
1157		return;
1158	}
1159
1160	/* First, zot all the existing filters. */
1161	for (i = 0; i < 4; i++)
1162		CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), 0);
1163
1164	/* Now program new ones. */
1165	TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1166		if (ifma->ifma_addr->sa_family != AF_LINK)
1167			continue;
1168		h = ether_crc32_le(LLADDR((struct sockaddr_dl *)
1169		    ifma->ifma_addr), ETHER_ADDR_LEN) & 0x7F;
1170		hashes[(h & 0x60) >> 5] |= 1 << (h & 0x1F);
1171	}
1172
1173	for (i = 0; i < 4; i++)
1174		CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), hashes[i]);
1175
1176	return;
1177}
1178
1179/*
1180 * Do endian, PCI and DMA initialization. Also check the on-board ROM
1181 * self-test results.
1182 */
1183static int
1184bge_chipinit(sc)
1185	struct bge_softc *sc;
1186{
1187	int			i;
1188	u_int32_t		dma_rw_ctl;
1189
1190	/* Set endianness before we access any non-PCI registers. */
1191#if BYTE_ORDER == BIG_ENDIAN
1192	pci_write_config(sc->bge_dev, BGE_PCI_MISC_CTL,
1193	    BGE_BIGENDIAN_INIT, 4);
1194#else
1195	pci_write_config(sc->bge_dev, BGE_PCI_MISC_CTL,
1196	    BGE_LITTLEENDIAN_INIT, 4);
1197#endif
1198
1199	/*
1200	 * Check the 'ROM failed' bit on the RX CPU to see if
1201	 * self-tests passed.
1202	 */
1203	if (CSR_READ_4(sc, BGE_RXCPU_MODE) & BGE_RXCPUMODE_ROMFAIL) {
1204		printf("bge%d: RX CPU self-diagnostics failed!\n",
1205		    sc->bge_unit);
1206		return(ENODEV);
1207	}
1208
1209	/* Clear the MAC control register */
1210	CSR_WRITE_4(sc, BGE_MAC_MODE, 0);
1211
1212	/*
1213	 * Clear the MAC statistics block in the NIC's
1214	 * internal memory.
1215	 */
1216	for (i = BGE_STATS_BLOCK;
1217	    i < BGE_STATS_BLOCK_END + 1; i += sizeof(u_int32_t))
1218		BGE_MEMWIN_WRITE(sc, i, 0);
1219
1220	for (i = BGE_STATUS_BLOCK;
1221	    i < BGE_STATUS_BLOCK_END + 1; i += sizeof(u_int32_t))
1222		BGE_MEMWIN_WRITE(sc, i, 0);
1223
1224	/* Set up the PCI DMA control register. */
1225	if (sc->bge_pcie) {
1226		dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD |
1227		    (0xf << BGE_PCIDMARWCTL_RD_WAT_SHIFT) |
1228		    (0x2 << BGE_PCIDMARWCTL_WR_WAT_SHIFT);
1229	} else if (pci_read_config(sc->bge_dev, BGE_PCI_PCISTATE, 4) &
1230	    BGE_PCISTATE_PCI_BUSMODE) {
1231		/* Conventional PCI bus */
1232		dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD |
1233		    (0x7 << BGE_PCIDMARWCTL_RD_WAT_SHIFT) |
1234		    (0x7 << BGE_PCIDMARWCTL_WR_WAT_SHIFT) |
1235		    (0x0F);
1236	} else {
1237		/* PCI-X bus */
1238		/*
1239		 * The 5704 uses a different encoding of read/write
1240		 * watermarks.
1241		 */
1242		if (sc->bge_asicrev == BGE_ASICREV_BCM5704)
1243			dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD |
1244			    (0x7 << BGE_PCIDMARWCTL_RD_WAT_SHIFT) |
1245			    (0x3 << BGE_PCIDMARWCTL_WR_WAT_SHIFT);
1246		else
1247			dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD |
1248			    (0x3 << BGE_PCIDMARWCTL_RD_WAT_SHIFT) |
1249			    (0x3 << BGE_PCIDMARWCTL_WR_WAT_SHIFT) |
1250			    (0x0F);
1251
1252		/*
1253		 * 5703 and 5704 need ONEDMA_AT_ONCE as a workaround
1254		 * for hardware bugs.
1255		 */
1256		if (sc->bge_asicrev == BGE_ASICREV_BCM5703 ||
1257		    sc->bge_asicrev == BGE_ASICREV_BCM5704) {
1258			u_int32_t tmp;
1259
1260			tmp = CSR_READ_4(sc, BGE_PCI_CLKCTL) & 0x1f;
1261			if (tmp == 0x6 || tmp == 0x7)
1262				dma_rw_ctl |= BGE_PCIDMARWCTL_ONEDMA_ATONCE;
1263		}
1264	}
1265
1266	if (sc->bge_asicrev == BGE_ASICREV_BCM5703 ||
1267	    sc->bge_asicrev == BGE_ASICREV_BCM5704 ||
1268	    sc->bge_asicrev == BGE_ASICREV_BCM5705 ||
1269	    sc->bge_asicrev == BGE_ASICREV_BCM5750)
1270		dma_rw_ctl &= ~BGE_PCIDMARWCTL_MINDMA;
1271	pci_write_config(sc->bge_dev, BGE_PCI_DMA_RW_CTL, dma_rw_ctl, 4);
1272
1273	/*
1274	 * Set up general mode register.
1275	 */
1276	CSR_WRITE_4(sc, BGE_MODE_CTL, BGE_MODECTL_WORDSWAP_NONFRAME|
1277	    BGE_MODECTL_BYTESWAP_DATA|BGE_MODECTL_WORDSWAP_DATA|
1278	    BGE_MODECTL_MAC_ATTN_INTR|BGE_MODECTL_HOST_SEND_BDS|
1279	    BGE_MODECTL_TX_NO_PHDR_CSUM|BGE_MODECTL_RX_NO_PHDR_CSUM);
1280
1281	/*
1282	 * Disable memory write invalidate.  Apparently it is not supported
1283	 * properly by these devices.
1284	 */
1285	PCI_CLRBIT(sc->bge_dev, BGE_PCI_CMD, PCIM_CMD_MWIEN, 4);
1286
1287#ifdef __brokenalpha__
1288	/*
1289	 * Must insure that we do not cross an 8K (bytes) boundary
1290	 * for DMA reads.  Our highest limit is 1K bytes.  This is a
1291	 * restriction on some ALPHA platforms with early revision
1292	 * 21174 PCI chipsets, such as the AlphaPC 164lx
1293	 */
1294	PCI_SETBIT(sc->bge_dev, BGE_PCI_DMA_RW_CTL,
1295	    BGE_PCI_READ_BNDRY_1024BYTES, 4);
1296#endif
1297
1298	/* Set the timer prescaler (always 66Mhz) */
1299	CSR_WRITE_4(sc, BGE_MISC_CFG, 65 << 1/*BGE_32BITTIME_66MHZ*/);
1300
1301	return(0);
1302}
1303
1304static int
1305bge_blockinit(sc)
1306	struct bge_softc *sc;
1307{
1308	struct bge_rcb *rcb;
1309	volatile struct bge_rcb *vrcb;
1310	int i;
1311
1312	/*
1313	 * Initialize the memory window pointer register so that
1314	 * we can access the first 32K of internal NIC RAM. This will
1315	 * allow us to set up the TX send ring RCBs and the RX return
1316	 * ring RCBs, plus other things which live in NIC memory.
1317	 */
1318	CSR_WRITE_4(sc, BGE_PCI_MEMWIN_BASEADDR, 0);
1319
1320	/* Note: the BCM5704 has a smaller mbuf space than other chips. */
1321
1322	if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
1323	    sc->bge_asicrev != BGE_ASICREV_BCM5750) {
1324		/* Configure mbuf memory pool */
1325		if (sc->bge_extram) {
1326			CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_BASEADDR,
1327			    BGE_EXT_SSRAM);
1328			if (sc->bge_asicrev == BGE_ASICREV_BCM5704)
1329				CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x10000);
1330			else
1331				CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x18000);
1332		} else {
1333			CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_BASEADDR,
1334			    BGE_BUFFPOOL_1);
1335			if (sc->bge_asicrev == BGE_ASICREV_BCM5704)
1336				CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x10000);
1337			else
1338				CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x18000);
1339		}
1340
1341		/* Configure DMA resource pool */
1342		CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_BASEADDR,
1343		    BGE_DMA_DESCRIPTORS);
1344		CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LEN, 0x2000);
1345	}
1346
1347	/* Configure mbuf pool watermarks */
1348	if (sc->bge_asicrev == BGE_ASICREV_BCM5705 ||
1349	    sc->bge_asicrev == BGE_ASICREV_BCM5750) {
1350		CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0);
1351		CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x10);
1352	} else {
1353		CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x50);
1354		CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x20);
1355	}
1356	CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x60);
1357
1358	/* Configure DMA resource watermarks */
1359	CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LOWAT, 5);
1360	CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_HIWAT, 10);
1361
1362	/* Enable buffer manager */
1363	if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
1364	    sc->bge_asicrev != BGE_ASICREV_BCM5750) {
1365		CSR_WRITE_4(sc, BGE_BMAN_MODE,
1366		    BGE_BMANMODE_ENABLE|BGE_BMANMODE_LOMBUF_ATTN);
1367
1368		/* Poll for buffer manager start indication */
1369		for (i = 0; i < BGE_TIMEOUT; i++) {
1370			if (CSR_READ_4(sc, BGE_BMAN_MODE) & BGE_BMANMODE_ENABLE)
1371				break;
1372			DELAY(10);
1373		}
1374
1375		if (i == BGE_TIMEOUT) {
1376			printf("bge%d: buffer manager failed to start\n",
1377			    sc->bge_unit);
1378			return(ENXIO);
1379		}
1380	}
1381
1382	/* Enable flow-through queues */
1383	CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF);
1384	CSR_WRITE_4(sc, BGE_FTQ_RESET, 0);
1385
1386	/* Wait until queue initialization is complete */
1387	for (i = 0; i < BGE_TIMEOUT; i++) {
1388		if (CSR_READ_4(sc, BGE_FTQ_RESET) == 0)
1389			break;
1390		DELAY(10);
1391	}
1392
1393	if (i == BGE_TIMEOUT) {
1394		printf("bge%d: flow-through queue init failed\n",
1395		    sc->bge_unit);
1396		return(ENXIO);
1397	}
1398
1399	/* Initialize the standard RX ring control block */
1400	rcb = &sc->bge_ldata.bge_info.bge_std_rx_rcb;
1401	rcb->bge_hostaddr.bge_addr_lo =
1402	    BGE_ADDR_LO(sc->bge_ldata.bge_rx_std_ring_paddr);
1403	rcb->bge_hostaddr.bge_addr_hi =
1404	    BGE_ADDR_HI(sc->bge_ldata.bge_rx_std_ring_paddr);
1405	bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag,
1406	    sc->bge_cdata.bge_rx_std_ring_map, BUS_DMASYNC_PREREAD);
1407	if (sc->bge_asicrev == BGE_ASICREV_BCM5705 ||
1408	    sc->bge_asicrev == BGE_ASICREV_BCM5750)
1409		rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(512, 0);
1410	else
1411		rcb->bge_maxlen_flags =
1412		    BGE_RCB_MAXLEN_FLAGS(BGE_MAX_FRAMELEN, 0);
1413	if (sc->bge_extram)
1414		rcb->bge_nicaddr = BGE_EXT_STD_RX_RINGS;
1415	else
1416		rcb->bge_nicaddr = BGE_STD_RX_RINGS;
1417	CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_HI, rcb->bge_hostaddr.bge_addr_hi);
1418	CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_LO, rcb->bge_hostaddr.bge_addr_lo);
1419
1420	CSR_WRITE_4(sc, BGE_RX_STD_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags);
1421	CSR_WRITE_4(sc, BGE_RX_STD_RCB_NICADDR, rcb->bge_nicaddr);
1422
1423	/*
1424	 * Initialize the jumbo RX ring control block
1425	 * We set the 'ring disabled' bit in the flags
1426	 * field until we're actually ready to start
1427	 * using this ring (i.e. once we set the MTU
1428	 * high enough to require it).
1429	 */
1430	if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
1431	    sc->bge_asicrev != BGE_ASICREV_BCM5750) {
1432		rcb = &sc->bge_ldata.bge_info.bge_jumbo_rx_rcb;
1433
1434		rcb->bge_hostaddr.bge_addr_lo =
1435		    BGE_ADDR_LO(sc->bge_ldata.bge_rx_jumbo_ring_paddr);
1436		rcb->bge_hostaddr.bge_addr_hi =
1437		    BGE_ADDR_HI(sc->bge_ldata.bge_rx_jumbo_ring_paddr);
1438		bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag,
1439		    sc->bge_cdata.bge_rx_jumbo_ring_map,
1440		    BUS_DMASYNC_PREREAD);
1441		rcb->bge_maxlen_flags =
1442		    BGE_RCB_MAXLEN_FLAGS(BGE_MAX_FRAMELEN,
1443		    BGE_RCB_FLAG_RING_DISABLED);
1444		if (sc->bge_extram)
1445			rcb->bge_nicaddr = BGE_EXT_JUMBO_RX_RINGS;
1446		else
1447			rcb->bge_nicaddr = BGE_JUMBO_RX_RINGS;
1448		CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_HI,
1449		    rcb->bge_hostaddr.bge_addr_hi);
1450		CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_LO,
1451		    rcb->bge_hostaddr.bge_addr_lo);
1452
1453		CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS,
1454		    rcb->bge_maxlen_flags);
1455		CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_NICADDR, rcb->bge_nicaddr);
1456
1457		/* Set up dummy disabled mini ring RCB */
1458		rcb = &sc->bge_ldata.bge_info.bge_mini_rx_rcb;
1459		rcb->bge_maxlen_flags =
1460		    BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED);
1461		CSR_WRITE_4(sc, BGE_RX_MINI_RCB_MAXLEN_FLAGS,
1462		    rcb->bge_maxlen_flags);
1463	}
1464
1465	/*
1466	 * Set the BD ring replentish thresholds. The recommended
1467	 * values are 1/8th the number of descriptors allocated to
1468	 * each ring.
1469	 */
1470	CSR_WRITE_4(sc, BGE_RBDI_STD_REPL_THRESH, BGE_STD_RX_RING_CNT/8);
1471	CSR_WRITE_4(sc, BGE_RBDI_JUMBO_REPL_THRESH, BGE_JUMBO_RX_RING_CNT/8);
1472
1473	/*
1474	 * Disable all unused send rings by setting the 'ring disabled'
1475	 * bit in the flags field of all the TX send ring control blocks.
1476	 * These are located in NIC memory.
1477	 */
1478	vrcb = (volatile struct bge_rcb *)(sc->bge_vhandle + BGE_MEMWIN_START +
1479	    BGE_SEND_RING_RCB);
1480	for (i = 0; i < BGE_TX_RINGS_EXTSSRAM_MAX; i++) {
1481		vrcb->bge_maxlen_flags =
1482		    BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED);
1483		vrcb->bge_nicaddr = 0;
1484		vrcb++;
1485	}
1486
1487	/* Configure TX RCB 0 (we use only the first ring) */
1488	vrcb = (volatile struct bge_rcb *)(sc->bge_vhandle + BGE_MEMWIN_START +
1489	    BGE_SEND_RING_RCB);
1490	vrcb->bge_hostaddr.bge_addr_lo =
1491	    htole32(BGE_ADDR_LO(sc->bge_ldata.bge_tx_ring_paddr));
1492	vrcb->bge_hostaddr.bge_addr_hi =
1493	    htole32(BGE_ADDR_HI(sc->bge_ldata.bge_tx_ring_paddr));
1494	vrcb->bge_nicaddr = BGE_NIC_TXRING_ADDR(0, BGE_TX_RING_CNT);
1495	if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
1496	    sc->bge_asicrev != BGE_ASICREV_BCM5750)
1497		vrcb->bge_maxlen_flags =
1498		    BGE_RCB_MAXLEN_FLAGS(BGE_TX_RING_CNT, 0);
1499
1500	/* Disable all unused RX return rings */
1501	vrcb = (volatile struct bge_rcb *)(sc->bge_vhandle + BGE_MEMWIN_START +
1502	    BGE_RX_RETURN_RING_RCB);
1503	for (i = 0; i < BGE_RX_RINGS_MAX; i++) {
1504		vrcb->bge_hostaddr.bge_addr_hi = 0;
1505		vrcb->bge_hostaddr.bge_addr_lo = 0;
1506		vrcb->bge_maxlen_flags =
1507		    BGE_RCB_MAXLEN_FLAGS(sc->bge_return_ring_cnt,
1508		    BGE_RCB_FLAG_RING_DISABLED);
1509		vrcb->bge_nicaddr = 0;
1510		CSR_WRITE_4(sc, BGE_MBX_RX_CONS0_LO +
1511		    (i * (sizeof(u_int64_t))), 0);
1512		vrcb++;
1513	}
1514
1515	/* Initialize RX ring indexes */
1516	CSR_WRITE_4(sc, BGE_MBX_RX_STD_PROD_LO, 0);
1517	CSR_WRITE_4(sc, BGE_MBX_RX_JUMBO_PROD_LO, 0);
1518	CSR_WRITE_4(sc, BGE_MBX_RX_MINI_PROD_LO, 0);
1519
1520	/*
1521	 * Set up RX return ring 0
1522	 * Note that the NIC address for RX return rings is 0x00000000.
1523	 * The return rings live entirely within the host, so the
1524	 * nicaddr field in the RCB isn't used.
1525	 */
1526	vrcb = (volatile struct bge_rcb *)(sc->bge_vhandle + BGE_MEMWIN_START +
1527	    BGE_RX_RETURN_RING_RCB);
1528	vrcb->bge_hostaddr.bge_addr_lo =
1529	    BGE_ADDR_LO(sc->bge_ldata.bge_rx_return_ring_paddr);
1530	vrcb->bge_hostaddr.bge_addr_hi =
1531	    BGE_ADDR_HI(sc->bge_ldata.bge_rx_return_ring_paddr);
1532	bus_dmamap_sync(sc->bge_cdata.bge_rx_return_ring_tag,
1533	    sc->bge_cdata.bge_rx_return_ring_map, BUS_DMASYNC_PREWRITE);
1534	vrcb->bge_nicaddr = 0x00000000;
1535	vrcb->bge_maxlen_flags =
1536	    BGE_RCB_MAXLEN_FLAGS(sc->bge_return_ring_cnt, 0);
1537
1538	/* Set random backoff seed for TX */
1539	CSR_WRITE_4(sc, BGE_TX_RANDOM_BACKOFF,
1540	    sc->arpcom.ac_enaddr[0] + sc->arpcom.ac_enaddr[1] +
1541	    sc->arpcom.ac_enaddr[2] + sc->arpcom.ac_enaddr[3] +
1542	    sc->arpcom.ac_enaddr[4] + sc->arpcom.ac_enaddr[5] +
1543	    BGE_TX_BACKOFF_SEED_MASK);
1544
1545	/* Set inter-packet gap */
1546	CSR_WRITE_4(sc, BGE_TX_LENGTHS, 0x2620);
1547
1548	/*
1549	 * Specify which ring to use for packets that don't match
1550	 * any RX rules.
1551	 */
1552	CSR_WRITE_4(sc, BGE_RX_RULES_CFG, 0x08);
1553
1554	/*
1555	 * Configure number of RX lists. One interrupt distribution
1556	 * list, sixteen active lists, one bad frames class.
1557	 */
1558	CSR_WRITE_4(sc, BGE_RXLP_CFG, 0x181);
1559
1560	/* Inialize RX list placement stats mask. */
1561	CSR_WRITE_4(sc, BGE_RXLP_STATS_ENABLE_MASK, 0x007FFFFF);
1562	CSR_WRITE_4(sc, BGE_RXLP_STATS_CTL, 0x1);
1563
1564	/* Disable host coalescing until we get it set up */
1565	CSR_WRITE_4(sc, BGE_HCC_MODE, 0x00000000);
1566
1567	/* Poll to make sure it's shut down. */
1568	for (i = 0; i < BGE_TIMEOUT; i++) {
1569		if (!(CSR_READ_4(sc, BGE_HCC_MODE) & BGE_HCCMODE_ENABLE))
1570			break;
1571		DELAY(10);
1572	}
1573
1574	if (i == BGE_TIMEOUT) {
1575		printf("bge%d: host coalescing engine failed to idle\n",
1576		    sc->bge_unit);
1577		return(ENXIO);
1578	}
1579
1580	/* Set up host coalescing defaults */
1581	CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS, sc->bge_rx_coal_ticks);
1582	CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS, sc->bge_tx_coal_ticks);
1583	CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS, sc->bge_rx_max_coal_bds);
1584	CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS, sc->bge_tx_max_coal_bds);
1585	if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
1586	    sc->bge_asicrev != BGE_ASICREV_BCM5750) {
1587		CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS_INT, 0);
1588		CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS_INT, 0);
1589	}
1590	CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS_INT, 0);
1591	CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS_INT, 0);
1592
1593	/* Set up address of statistics block */
1594	if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
1595	    sc->bge_asicrev != BGE_ASICREV_BCM5750) {
1596		CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_HI,
1597		    BGE_ADDR_HI(sc->bge_ldata.bge_stats_paddr));
1598		CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_LO,
1599		    BGE_ADDR_LO(sc->bge_ldata.bge_stats_paddr));
1600		CSR_WRITE_4(sc, BGE_HCC_STATS_BASEADDR, BGE_STATS_BLOCK);
1601		CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_BASEADDR, BGE_STATUS_BLOCK);
1602		CSR_WRITE_4(sc, BGE_HCC_STATS_TICKS, sc->bge_stat_ticks);
1603	}
1604
1605	/* Set up address of status block */
1606	CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_HI,
1607	    BGE_ADDR_HI(sc->bge_ldata.bge_status_block_paddr));
1608	CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_LO,
1609	    BGE_ADDR_LO(sc->bge_ldata.bge_status_block_paddr));
1610	bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
1611	    sc->bge_cdata.bge_status_map, BUS_DMASYNC_PREWRITE);
1612	sc->bge_ldata.bge_status_block->bge_idx[0].bge_rx_prod_idx = 0;
1613	sc->bge_ldata.bge_status_block->bge_idx[0].bge_tx_cons_idx = 0;
1614
1615	/* Turn on host coalescing state machine */
1616	CSR_WRITE_4(sc, BGE_HCC_MODE, BGE_HCCMODE_ENABLE);
1617
1618	/* Turn on RX BD completion state machine and enable attentions */
1619	CSR_WRITE_4(sc, BGE_RBDC_MODE,
1620	    BGE_RBDCMODE_ENABLE|BGE_RBDCMODE_ATTN);
1621
1622	/* Turn on RX list placement state machine */
1623	CSR_WRITE_4(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE);
1624
1625	/* Turn on RX list selector state machine. */
1626	if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
1627	    sc->bge_asicrev != BGE_ASICREV_BCM5750)
1628		CSR_WRITE_4(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE);
1629
1630	/* Turn on DMA, clear stats */
1631	CSR_WRITE_4(sc, BGE_MAC_MODE, BGE_MACMODE_TXDMA_ENB|
1632	    BGE_MACMODE_RXDMA_ENB|BGE_MACMODE_RX_STATS_CLEAR|
1633	    BGE_MACMODE_TX_STATS_CLEAR|BGE_MACMODE_RX_STATS_ENB|
1634	    BGE_MACMODE_TX_STATS_ENB|BGE_MACMODE_FRMHDR_DMA_ENB|
1635	    (sc->bge_tbi ? BGE_PORTMODE_TBI : BGE_PORTMODE_MII));
1636
1637	/* Set misc. local control, enable interrupts on attentions */
1638	CSR_WRITE_4(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_ONATTN);
1639
1640#ifdef notdef
1641	/* Assert GPIO pins for PHY reset */
1642	BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUT0|
1643	    BGE_MLC_MISCIO_OUT1|BGE_MLC_MISCIO_OUT2);
1644	BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUTEN0|
1645	    BGE_MLC_MISCIO_OUTEN1|BGE_MLC_MISCIO_OUTEN2);
1646#endif
1647
1648	/* Turn on DMA completion state machine */
1649	if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
1650	    sc->bge_asicrev != BGE_ASICREV_BCM5750)
1651		CSR_WRITE_4(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE);
1652
1653	/* Turn on write DMA state machine */
1654	CSR_WRITE_4(sc, BGE_WDMA_MODE,
1655	    BGE_WDMAMODE_ENABLE|BGE_WDMAMODE_ALL_ATTNS);
1656
1657	/* Turn on read DMA state machine */
1658	CSR_WRITE_4(sc, BGE_RDMA_MODE,
1659	    BGE_RDMAMODE_ENABLE|BGE_RDMAMODE_ALL_ATTNS);
1660
1661	/* Turn on RX data completion state machine */
1662	CSR_WRITE_4(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE);
1663
1664	/* Turn on RX BD initiator state machine */
1665	CSR_WRITE_4(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE);
1666
1667	/* Turn on RX data and RX BD initiator state machine */
1668	CSR_WRITE_4(sc, BGE_RDBDI_MODE, BGE_RDBDIMODE_ENABLE);
1669
1670	/* Turn on Mbuf cluster free state machine */
1671	if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
1672	    sc->bge_asicrev != BGE_ASICREV_BCM5750)
1673		CSR_WRITE_4(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE);
1674
1675	/* Turn on send BD completion state machine */
1676	CSR_WRITE_4(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE);
1677
1678	/* Turn on send data completion state machine */
1679	CSR_WRITE_4(sc, BGE_SDC_MODE, BGE_SDCMODE_ENABLE);
1680
1681	/* Turn on send data initiator state machine */
1682	CSR_WRITE_4(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE);
1683
1684	/* Turn on send BD initiator state machine */
1685	CSR_WRITE_4(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE);
1686
1687	/* Turn on send BD selector state machine */
1688	CSR_WRITE_4(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE);
1689
1690	CSR_WRITE_4(sc, BGE_SDI_STATS_ENABLE_MASK, 0x007FFFFF);
1691	CSR_WRITE_4(sc, BGE_SDI_STATS_CTL,
1692	    BGE_SDISTATSCTL_ENABLE|BGE_SDISTATSCTL_FASTER);
1693
1694	/* ack/clear link change events */
1695	CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED|
1696	    BGE_MACSTAT_CFG_CHANGED|BGE_MACSTAT_MI_COMPLETE|
1697	    BGE_MACSTAT_LINK_CHANGED);
1698	CSR_WRITE_4(sc, BGE_MI_STS, 0);
1699
1700	/* Enable PHY auto polling (for MII/GMII only) */
1701	if (sc->bge_tbi) {
1702		CSR_WRITE_4(sc, BGE_MI_STS, BGE_MISTS_LINK);
1703 	} else {
1704		BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL|10<<16);
1705		if (sc->bge_asicrev == BGE_ASICREV_BCM5700)
1706			CSR_WRITE_4(sc, BGE_MAC_EVT_ENB,
1707			    BGE_EVTENB_MI_INTERRUPT);
1708	}
1709
1710	/* Enable link state change attentions. */
1711	BGE_SETBIT(sc, BGE_MAC_EVT_ENB, BGE_EVTENB_LINK_CHANGED);
1712
1713	return(0);
1714}
1715
1716/*
1717 * Probe for a Broadcom chip. Check the PCI vendor and device IDs
1718 * against our list and return its name if we find a match. Note
1719 * that since the Broadcom controller contains VPD support, we
1720 * can get the device name string from the controller itself instead
1721 * of the compiled-in string. This is a little slow, but it guarantees
1722 * we'll always announce the right product name.
1723 */
1724static int
1725bge_probe(dev)
1726	device_t dev;
1727{
1728	struct bge_type *t;
1729	struct bge_softc *sc;
1730	char *descbuf;
1731
1732	t = bge_devs;
1733
1734	sc = device_get_softc(dev);
1735	bzero(sc, sizeof(struct bge_softc));
1736	sc->bge_unit = device_get_unit(dev);
1737	sc->bge_dev = dev;
1738
1739	while(t->bge_name != NULL) {
1740		if ((pci_get_vendor(dev) == t->bge_vid) &&
1741		    (pci_get_device(dev) == t->bge_did)) {
1742#ifdef notdef
1743			bge_vpd_read(sc);
1744			device_set_desc(dev, sc->bge_vpd_prodname);
1745#endif
1746			descbuf = malloc(BGE_DEVDESC_MAX, M_TEMP, M_NOWAIT);
1747			if (descbuf == NULL)
1748				return(ENOMEM);
1749			snprintf(descbuf, BGE_DEVDESC_MAX,
1750			    "%s, ASIC rev. %#04x", t->bge_name,
1751			    pci_read_config(dev, BGE_PCI_MISC_CTL, 4) >> 16);
1752			device_set_desc_copy(dev, descbuf);
1753			if (pci_get_subvendor(dev) == DELL_VENDORID)
1754				sc->bge_no_3_led = 1;
1755			free(descbuf, M_TEMP);
1756			return(0);
1757		}
1758		t++;
1759	}
1760
1761	return(ENXIO);
1762}
1763
1764static void
1765bge_dma_free(sc)
1766	struct bge_softc *sc;
1767{
1768	int i;
1769
1770
1771	/* Destroy DMA maps for RX buffers */
1772
1773	for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
1774		if (sc->bge_cdata.bge_rx_std_dmamap[i])
1775			bus_dmamap_destroy(sc->bge_cdata.bge_mtag,
1776			    sc->bge_cdata.bge_rx_std_dmamap[i]);
1777	}
1778
1779	/* Destroy DMA maps for jumbo RX buffers */
1780
1781	for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
1782		if (sc->bge_cdata.bge_rx_jumbo_dmamap[i])
1783			bus_dmamap_destroy(sc->bge_cdata.bge_mtag_jumbo,
1784			    sc->bge_cdata.bge_rx_jumbo_dmamap[i]);
1785	}
1786
1787	/* Destroy DMA maps for TX buffers */
1788
1789	for (i = 0; i < BGE_TX_RING_CNT; i++) {
1790		if (sc->bge_cdata.bge_tx_dmamap[i])
1791			bus_dmamap_destroy(sc->bge_cdata.bge_mtag,
1792			    sc->bge_cdata.bge_tx_dmamap[i]);
1793	}
1794
1795	if (sc->bge_cdata.bge_mtag)
1796		bus_dma_tag_destroy(sc->bge_cdata.bge_mtag);
1797
1798
1799	/* Destroy standard RX ring */
1800
1801	if (sc->bge_ldata.bge_rx_std_ring)
1802		bus_dmamem_free(sc->bge_cdata.bge_rx_std_ring_tag,
1803		    sc->bge_ldata.bge_rx_std_ring,
1804		    sc->bge_cdata.bge_rx_std_ring_map);
1805
1806	if (sc->bge_cdata.bge_rx_std_ring_map) {
1807		bus_dmamap_unload(sc->bge_cdata.bge_rx_std_ring_tag,
1808		    sc->bge_cdata.bge_rx_std_ring_map);
1809		bus_dmamap_destroy(sc->bge_cdata.bge_rx_std_ring_tag,
1810		    sc->bge_cdata.bge_rx_std_ring_map);
1811	}
1812
1813	if (sc->bge_cdata.bge_rx_std_ring_tag)
1814		bus_dma_tag_destroy(sc->bge_cdata.bge_rx_std_ring_tag);
1815
1816	/* Destroy jumbo RX ring */
1817
1818	if (sc->bge_ldata.bge_rx_jumbo_ring)
1819		bus_dmamem_free(sc->bge_cdata.bge_rx_jumbo_ring_tag,
1820		    sc->bge_ldata.bge_rx_jumbo_ring,
1821		    sc->bge_cdata.bge_rx_jumbo_ring_map);
1822
1823	if (sc->bge_cdata.bge_rx_jumbo_ring_map) {
1824		bus_dmamap_unload(sc->bge_cdata.bge_rx_jumbo_ring_tag,
1825		    sc->bge_cdata.bge_rx_jumbo_ring_map);
1826		bus_dmamap_destroy(sc->bge_cdata.bge_rx_jumbo_ring_tag,
1827		    sc->bge_cdata.bge_rx_jumbo_ring_map);
1828	}
1829
1830	if (sc->bge_cdata.bge_rx_jumbo_ring_tag)
1831		bus_dma_tag_destroy(sc->bge_cdata.bge_rx_jumbo_ring_tag);
1832
1833	/* Destroy RX return ring */
1834
1835	if (sc->bge_ldata.bge_rx_return_ring)
1836		bus_dmamem_free(sc->bge_cdata.bge_rx_return_ring_tag,
1837		    sc->bge_ldata.bge_rx_return_ring,
1838		    sc->bge_cdata.bge_rx_return_ring_map);
1839
1840	if (sc->bge_cdata.bge_rx_return_ring_map) {
1841		bus_dmamap_unload(sc->bge_cdata.bge_rx_return_ring_tag,
1842		    sc->bge_cdata.bge_rx_return_ring_map);
1843		bus_dmamap_destroy(sc->bge_cdata.bge_rx_return_ring_tag,
1844		    sc->bge_cdata.bge_rx_return_ring_map);
1845	}
1846
1847	if (sc->bge_cdata.bge_rx_return_ring_tag)
1848		bus_dma_tag_destroy(sc->bge_cdata.bge_rx_return_ring_tag);
1849
1850	/* Destroy TX ring */
1851
1852	if (sc->bge_ldata.bge_tx_ring)
1853		bus_dmamem_free(sc->bge_cdata.bge_tx_ring_tag,
1854		    sc->bge_ldata.bge_tx_ring,
1855		    sc->bge_cdata.bge_tx_ring_map);
1856
1857	if (sc->bge_cdata.bge_tx_ring_map) {
1858		bus_dmamap_unload(sc->bge_cdata.bge_tx_ring_tag,
1859		    sc->bge_cdata.bge_tx_ring_map);
1860		bus_dmamap_destroy(sc->bge_cdata.bge_tx_ring_tag,
1861		    sc->bge_cdata.bge_tx_ring_map);
1862	}
1863
1864	if (sc->bge_cdata.bge_tx_ring_tag)
1865		bus_dma_tag_destroy(sc->bge_cdata.bge_tx_ring_tag);
1866
1867	/* Destroy status block */
1868
1869	if (sc->bge_ldata.bge_status_block)
1870		bus_dmamem_free(sc->bge_cdata.bge_status_tag,
1871		    sc->bge_ldata.bge_status_block,
1872		    sc->bge_cdata.bge_status_map);
1873
1874	if (sc->bge_cdata.bge_status_map) {
1875		bus_dmamap_unload(sc->bge_cdata.bge_status_tag,
1876		    sc->bge_cdata.bge_status_map);
1877		bus_dmamap_destroy(sc->bge_cdata.bge_status_tag,
1878		    sc->bge_cdata.bge_status_map);
1879	}
1880
1881	if (sc->bge_cdata.bge_status_tag)
1882		bus_dma_tag_destroy(sc->bge_cdata.bge_status_tag);
1883
1884	/* Destroy statistics block */
1885
1886	if (sc->bge_ldata.bge_stats)
1887		bus_dmamem_free(sc->bge_cdata.bge_stats_tag,
1888		    sc->bge_ldata.bge_stats,
1889		    sc->bge_cdata.bge_stats_map);
1890
1891	if (sc->bge_cdata.bge_stats_map) {
1892		bus_dmamap_unload(sc->bge_cdata.bge_stats_tag,
1893		    sc->bge_cdata.bge_stats_map);
1894		bus_dmamap_destroy(sc->bge_cdata.bge_stats_tag,
1895		    sc->bge_cdata.bge_stats_map);
1896	}
1897
1898	if (sc->bge_cdata.bge_stats_tag)
1899		bus_dma_tag_destroy(sc->bge_cdata.bge_stats_tag);
1900
1901	/* Destroy the parent tag */
1902
1903	if (sc->bge_cdata.bge_parent_tag)
1904		bus_dma_tag_destroy(sc->bge_cdata.bge_parent_tag);
1905
1906	return;
1907}
1908
1909static int
1910bge_dma_alloc(dev)
1911	device_t dev;
1912{
1913	struct bge_softc *sc;
1914	int nseg, i, error;
1915	struct bge_dmamap_arg ctx;
1916
1917	sc = device_get_softc(dev);
1918
1919	/*
1920	 * Allocate the parent bus DMA tag appropriate for PCI.
1921	 */
1922#define BGE_NSEG_NEW 32
1923	error = bus_dma_tag_create(NULL,	/* parent */
1924			PAGE_SIZE, 0,		/* alignment, boundary */
1925			BUS_SPACE_MAXADDR,	/* lowaddr */
1926			BUS_SPACE_MAXADDR_32BIT,/* highaddr */
1927			NULL, NULL,		/* filter, filterarg */
1928			MAXBSIZE, BGE_NSEG_NEW,	/* maxsize, nsegments */
1929			BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */
1930                        BUS_DMA_ALLOCNOW,	/* flags */
1931			NULL, NULL,		/* lockfunc, lockarg */
1932			&sc->bge_cdata.bge_parent_tag);
1933
1934	/*
1935	 * Create tag for RX mbufs.
1936	 */
1937	nseg = 32;
1938	error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag, ETHER_ALIGN,
1939	    0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
1940	    NULL, MCLBYTES * nseg, nseg, MCLBYTES, 0, NULL, NULL,
1941	    &sc->bge_cdata.bge_mtag);
1942
1943	if (error) {
1944		device_printf(dev, "could not allocate dma tag\n");
1945		return (ENOMEM);
1946	}
1947
1948	/* Create DMA maps for RX buffers */
1949
1950	for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
1951		error = bus_dmamap_create(sc->bge_cdata.bge_mtag, 0,
1952			    &sc->bge_cdata.bge_rx_std_dmamap[i]);
1953		if (error) {
1954			device_printf(dev, "can't create DMA map for RX\n");
1955			return(ENOMEM);
1956		}
1957	}
1958
1959	/* Create DMA maps for TX buffers */
1960
1961	for (i = 0; i < BGE_TX_RING_CNT; i++) {
1962		error = bus_dmamap_create(sc->bge_cdata.bge_mtag, 0,
1963			    &sc->bge_cdata.bge_tx_dmamap[i]);
1964		if (error) {
1965			device_printf(dev, "can't create DMA map for RX\n");
1966			return(ENOMEM);
1967		}
1968	}
1969
1970	/* Create tag for standard RX ring */
1971
1972	error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag,
1973	    PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
1974	    NULL, BGE_STD_RX_RING_SZ, 1, BGE_STD_RX_RING_SZ, 0,
1975	    NULL, NULL, &sc->bge_cdata.bge_rx_std_ring_tag);
1976
1977	if (error) {
1978		device_printf(dev, "could not allocate dma tag\n");
1979		return (ENOMEM);
1980	}
1981
1982	/* Allocate DMA'able memory for standard RX ring */
1983
1984	error = bus_dmamem_alloc(sc->bge_cdata.bge_rx_std_ring_tag,
1985	    (void **)&sc->bge_ldata.bge_rx_std_ring, BUS_DMA_NOWAIT,
1986	    &sc->bge_cdata.bge_rx_std_ring_map);
1987        if (error)
1988                return (ENOMEM);
1989
1990        bzero((char *)sc->bge_ldata.bge_rx_std_ring, BGE_STD_RX_RING_SZ);
1991
1992	/* Load the address of the standard RX ring */
1993
1994	ctx.bge_maxsegs = 1;
1995	ctx.sc = sc;
1996
1997	error = bus_dmamap_load(sc->bge_cdata.bge_rx_std_ring_tag,
1998	    sc->bge_cdata.bge_rx_std_ring_map, sc->bge_ldata.bge_rx_std_ring,
1999	    BGE_STD_RX_RING_SZ, bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT);
2000
2001	if (error)
2002		return (ENOMEM);
2003
2004	sc->bge_ldata.bge_rx_std_ring_paddr = ctx.bge_busaddr;
2005
2006	if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
2007	    sc->bge_asicrev != BGE_ASICREV_BCM5750) {
2008
2009		/*
2010		 * Create tag for jumbo mbufs.
2011		 * This is really a bit of a kludge. We allocate a special
2012		 * jumbo buffer pool which (thanks to the way our DMA
2013		 * memory allocation works) will consist of contiguous
2014		 * pages. This means that even though a jumbo buffer might
2015		 * be larger than a page size, we don't really need to
2016		 * map it into more than one DMA segment. However, the
2017		 * default mbuf tag will result in multi-segment mappings,
2018		 * so we have to create a special jumbo mbuf tag that
2019		 * lets us get away with mapping the jumbo buffers as
2020		 * a single segment. I think eventually the driver should
2021		 * be changed so that it uses ordinary mbufs and cluster
2022		 * buffers, i.e. jumbo frames can span multiple DMA
2023		 * descriptors. But that's a project for another day.
2024		 */
2025
2026		error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag,
2027		    ETHER_ALIGN, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
2028		    NULL, MCLBYTES * nseg, nseg, BGE_JLEN, 0, NULL, NULL,
2029		    &sc->bge_cdata.bge_mtag_jumbo);
2030
2031		if (error) {
2032			device_printf(dev, "could not allocate dma tag\n");
2033			return (ENOMEM);
2034		}
2035
2036		/* Create tag for jumbo RX ring */
2037
2038		error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag,
2039		    PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
2040		    NULL, BGE_JUMBO_RX_RING_SZ, 1, BGE_JUMBO_RX_RING_SZ, 0,
2041		    NULL, NULL, &sc->bge_cdata.bge_rx_jumbo_ring_tag);
2042
2043		if (error) {
2044			device_printf(dev, "could not allocate dma tag\n");
2045			return (ENOMEM);
2046		}
2047
2048		/* Allocate DMA'able memory for jumbo RX ring */
2049
2050		error = bus_dmamem_alloc(sc->bge_cdata.bge_rx_jumbo_ring_tag,
2051		    (void **)&sc->bge_ldata.bge_rx_jumbo_ring, BUS_DMA_NOWAIT,
2052		    &sc->bge_cdata.bge_rx_jumbo_ring_map);
2053		if (error)
2054			return (ENOMEM);
2055
2056		bzero((char *)sc->bge_ldata.bge_rx_jumbo_ring,
2057		    BGE_JUMBO_RX_RING_SZ);
2058
2059		/* Load the address of the jumbo RX ring */
2060
2061		ctx.bge_maxsegs = 1;
2062		ctx.sc = sc;
2063
2064		error = bus_dmamap_load(sc->bge_cdata.bge_rx_jumbo_ring_tag,
2065		    sc->bge_cdata.bge_rx_jumbo_ring_map,
2066		    sc->bge_ldata.bge_rx_jumbo_ring, BGE_JUMBO_RX_RING_SZ,
2067		    bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT);
2068
2069		if (error)
2070			return (ENOMEM);
2071
2072		sc->bge_ldata.bge_rx_jumbo_ring_paddr = ctx.bge_busaddr;
2073
2074		/* Create DMA maps for jumbo RX buffers */
2075
2076		for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
2077			error = bus_dmamap_create(sc->bge_cdata.bge_mtag_jumbo,
2078				    0, &sc->bge_cdata.bge_rx_jumbo_dmamap[i]);
2079			if (error) {
2080				device_printf(dev,
2081				    "can't create DMA map for RX\n");
2082				return(ENOMEM);
2083			}
2084		}
2085
2086	}
2087
2088	/* Create tag for RX return ring */
2089
2090	error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag,
2091	    PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
2092	    NULL, BGE_RX_RTN_RING_SZ(sc), 1, BGE_RX_RTN_RING_SZ(sc), 0,
2093	    NULL, NULL, &sc->bge_cdata.bge_rx_return_ring_tag);
2094
2095	if (error) {
2096		device_printf(dev, "could not allocate dma tag\n");
2097		return (ENOMEM);
2098	}
2099
2100	/* Allocate DMA'able memory for RX return ring */
2101
2102	error = bus_dmamem_alloc(sc->bge_cdata.bge_rx_return_ring_tag,
2103	    (void **)&sc->bge_ldata.bge_rx_return_ring, BUS_DMA_NOWAIT,
2104	    &sc->bge_cdata.bge_rx_return_ring_map);
2105        if (error)
2106                return (ENOMEM);
2107
2108        bzero((char *)sc->bge_ldata.bge_rx_return_ring,
2109	    BGE_RX_RTN_RING_SZ(sc));
2110
2111	/* Load the address of the RX return ring */
2112
2113	ctx.bge_maxsegs = 1;
2114	ctx.sc = sc;
2115
2116	error = bus_dmamap_load(sc->bge_cdata.bge_rx_return_ring_tag,
2117	    sc->bge_cdata.bge_rx_return_ring_map,
2118	    sc->bge_ldata.bge_rx_return_ring, BGE_RX_RTN_RING_SZ(sc),
2119	    bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT);
2120
2121	if (error)
2122		return (ENOMEM);
2123
2124	sc->bge_ldata.bge_rx_return_ring_paddr = ctx.bge_busaddr;
2125
2126	/* Create tag for TX ring */
2127
2128	error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag,
2129	    PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
2130	    NULL, BGE_TX_RING_SZ, 1, BGE_TX_RING_SZ, 0, NULL, NULL,
2131	    &sc->bge_cdata.bge_tx_ring_tag);
2132
2133	if (error) {
2134		device_printf(dev, "could not allocate dma tag\n");
2135		return (ENOMEM);
2136	}
2137
2138	/* Allocate DMA'able memory for TX ring */
2139
2140	error = bus_dmamem_alloc(sc->bge_cdata.bge_tx_ring_tag,
2141	    (void **)&sc->bge_ldata.bge_tx_ring, BUS_DMA_NOWAIT,
2142	    &sc->bge_cdata.bge_tx_ring_map);
2143        if (error)
2144                return (ENOMEM);
2145
2146        bzero((char *)sc->bge_ldata.bge_tx_ring, BGE_TX_RING_SZ);
2147
2148	/* Load the address of the TX ring */
2149
2150	ctx.bge_maxsegs = 1;
2151	ctx.sc = sc;
2152
2153	error = bus_dmamap_load(sc->bge_cdata.bge_tx_ring_tag,
2154	    sc->bge_cdata.bge_tx_ring_map, sc->bge_ldata.bge_tx_ring,
2155	    BGE_TX_RING_SZ, bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT);
2156
2157	if (error)
2158		return (ENOMEM);
2159
2160	sc->bge_ldata.bge_tx_ring_paddr = ctx.bge_busaddr;
2161
2162	/* Create tag for status block */
2163
2164	error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag,
2165	    PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
2166	    NULL, BGE_STATUS_BLK_SZ, 1, BGE_STATUS_BLK_SZ, 0,
2167	    NULL, NULL, &sc->bge_cdata.bge_status_tag);
2168
2169	if (error) {
2170		device_printf(dev, "could not allocate dma tag\n");
2171		return (ENOMEM);
2172	}
2173
2174	/* Allocate DMA'able memory for status block */
2175
2176	error = bus_dmamem_alloc(sc->bge_cdata.bge_status_tag,
2177	    (void **)&sc->bge_ldata.bge_status_block, BUS_DMA_NOWAIT,
2178	    &sc->bge_cdata.bge_status_map);
2179        if (error)
2180                return (ENOMEM);
2181
2182        bzero((char *)sc->bge_ldata.bge_status_block, BGE_STATUS_BLK_SZ);
2183
2184	/* Load the address of the status block */
2185
2186	ctx.sc = sc;
2187	ctx.bge_maxsegs = 1;
2188
2189	error = bus_dmamap_load(sc->bge_cdata.bge_status_tag,
2190	    sc->bge_cdata.bge_status_map, sc->bge_ldata.bge_status_block,
2191	    BGE_STATUS_BLK_SZ, bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT);
2192
2193	if (error)
2194		return (ENOMEM);
2195
2196	sc->bge_ldata.bge_status_block_paddr = ctx.bge_busaddr;
2197
2198	/* Create tag for statistics block */
2199
2200	error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag,
2201	    PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
2202	    NULL, BGE_STATS_SZ, 1, BGE_STATS_SZ, 0, NULL, NULL,
2203	    &sc->bge_cdata.bge_stats_tag);
2204
2205	if (error) {
2206		device_printf(dev, "could not allocate dma tag\n");
2207		return (ENOMEM);
2208	}
2209
2210	/* Allocate DMA'able memory for statistics block */
2211
2212	error = bus_dmamem_alloc(sc->bge_cdata.bge_stats_tag,
2213	    (void **)&sc->bge_ldata.bge_stats, BUS_DMA_NOWAIT,
2214	    &sc->bge_cdata.bge_stats_map);
2215        if (error)
2216                return (ENOMEM);
2217
2218        bzero((char *)sc->bge_ldata.bge_stats, BGE_STATS_SZ);
2219
2220	/* Load the address of the statstics block */
2221
2222	ctx.sc = sc;
2223	ctx.bge_maxsegs = 1;
2224
2225	error = bus_dmamap_load(sc->bge_cdata.bge_stats_tag,
2226	    sc->bge_cdata.bge_stats_map, sc->bge_ldata.bge_stats,
2227	    BGE_STATS_SZ, bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT);
2228
2229	if (error)
2230		return (ENOMEM);
2231
2232	sc->bge_ldata.bge_stats_paddr = ctx.bge_busaddr;
2233
2234	return(0);
2235}
2236
2237static int
2238bge_attach(dev)
2239	device_t dev;
2240{
2241	struct ifnet *ifp;
2242	struct bge_softc *sc;
2243	u_int32_t hwcfg = 0;
2244	u_int32_t mac_addr = 0;
2245	int unit, error = 0, rid;
2246
2247	sc = device_get_softc(dev);
2248	unit = device_get_unit(dev);
2249	sc->bge_dev = dev;
2250	sc->bge_unit = unit;
2251
2252	/*
2253	 * Map control/status registers.
2254	 */
2255	pci_enable_busmaster(dev);
2256
2257	rid = BGE_PCI_BAR0;
2258	sc->bge_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
2259	    RF_ACTIVE|PCI_RF_DENSE);
2260
2261	if (sc->bge_res == NULL) {
2262		printf ("bge%d: couldn't map memory\n", unit);
2263		error = ENXIO;
2264		goto fail;
2265	}
2266
2267	sc->bge_btag = rman_get_bustag(sc->bge_res);
2268	sc->bge_bhandle = rman_get_bushandle(sc->bge_res);
2269	sc->bge_vhandle = (vm_offset_t)rman_get_virtual(sc->bge_res);
2270
2271	/* Allocate interrupt */
2272	rid = 0;
2273
2274	sc->bge_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
2275	    RF_SHAREABLE | RF_ACTIVE);
2276
2277	if (sc->bge_irq == NULL) {
2278		printf("bge%d: couldn't map interrupt\n", unit);
2279		error = ENXIO;
2280		goto fail;
2281	}
2282
2283	sc->bge_unit = unit;
2284
2285	BGE_LOCK_INIT(sc, device_get_nameunit(dev));
2286
2287	/* Save ASIC rev. */
2288
2289	sc->bge_chipid =
2290	    pci_read_config(dev, BGE_PCI_MISC_CTL, 4) &
2291	    BGE_PCIMISCCTL_ASICREV;
2292	sc->bge_asicrev = BGE_ASICREV(sc->bge_chipid);
2293	sc->bge_chiprev = BGE_CHIPREV(sc->bge_chipid);
2294
2295	/*
2296	 * XXX: Broadcom Linux driver.  Not in specs or eratta.
2297	 * PCI-Express?
2298	 */
2299	if (sc->bge_asicrev == BGE_ASICREV_BCM5750) {
2300		u_int32_t v;
2301
2302		v = pci_read_config(dev, BGE_PCI_MSI_CAPID, 4);
2303		if (((v >> 8) & 0xff) == BGE_PCIE_CAPID_REG) {
2304			v = pci_read_config(dev, BGE_PCIE_CAPID_REG, 4);
2305			if ((v & 0xff) == BGE_PCIE_CAPID)
2306				sc->bge_pcie = 1;
2307		}
2308	}
2309
2310	/* Try to reset the chip. */
2311	bge_reset(sc);
2312
2313	if (bge_chipinit(sc)) {
2314		printf("bge%d: chip initialization failed\n", sc->bge_unit);
2315		bge_release_resources(sc);
2316		error = ENXIO;
2317		goto fail;
2318	}
2319
2320	/*
2321	 * Get station address from the EEPROM.
2322	 */
2323	mac_addr = bge_readmem_ind(sc, 0x0c14);
2324	if ((mac_addr >> 16) == 0x484b) {
2325		sc->arpcom.ac_enaddr[0] = (u_char)(mac_addr >> 8);
2326		sc->arpcom.ac_enaddr[1] = (u_char)mac_addr;
2327		mac_addr = bge_readmem_ind(sc, 0x0c18);
2328		sc->arpcom.ac_enaddr[2] = (u_char)(mac_addr >> 24);
2329		sc->arpcom.ac_enaddr[3] = (u_char)(mac_addr >> 16);
2330		sc->arpcom.ac_enaddr[4] = (u_char)(mac_addr >> 8);
2331		sc->arpcom.ac_enaddr[5] = (u_char)mac_addr;
2332	} else if (bge_read_eeprom(sc, (caddr_t)&sc->arpcom.ac_enaddr,
2333	    BGE_EE_MAC_OFFSET + 2, ETHER_ADDR_LEN)) {
2334		printf("bge%d: failed to read station address\n", unit);
2335		bge_release_resources(sc);
2336		error = ENXIO;
2337		goto fail;
2338	}
2339
2340	/* 5705 limits RX return ring to 512 entries. */
2341	if (sc->bge_asicrev == BGE_ASICREV_BCM5705 ||
2342	    sc->bge_asicrev == BGE_ASICREV_BCM5750)
2343		sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT_5705;
2344	else
2345		sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT;
2346
2347	if (bge_dma_alloc(dev)) {
2348		printf ("bge%d: failed to allocate DMA resources\n",
2349		    sc->bge_unit);
2350		bge_release_resources(sc);
2351		error = ENXIO;
2352		goto fail;
2353	}
2354
2355	/*
2356	 * Try to allocate memory for jumbo buffers.
2357	 * The 5705 does not appear to support jumbo frames.
2358	 */
2359	if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
2360	    sc->bge_asicrev != BGE_ASICREV_BCM5750) {
2361		if (bge_alloc_jumbo_mem(sc)) {
2362			printf("bge%d: jumbo buffer allocation "
2363			    "failed\n", sc->bge_unit);
2364			bge_release_resources(sc);
2365			error = ENXIO;
2366			goto fail;
2367		}
2368	}
2369
2370	/* Set default tuneable values. */
2371	sc->bge_stat_ticks = BGE_TICKS_PER_SEC;
2372	sc->bge_rx_coal_ticks = 150;
2373	sc->bge_tx_coal_ticks = 150;
2374	sc->bge_rx_max_coal_bds = 64;
2375	sc->bge_tx_max_coal_bds = 128;
2376
2377	/* Set up ifnet structure */
2378	ifp = &sc->arpcom.ac_if;
2379	ifp->if_softc = sc;
2380	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
2381	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
2382	ifp->if_ioctl = bge_ioctl;
2383	ifp->if_start = bge_start;
2384	ifp->if_watchdog = bge_watchdog;
2385	ifp->if_init = bge_init;
2386	ifp->if_mtu = ETHERMTU;
2387	ifp->if_snd.ifq_maxlen = BGE_TX_RING_CNT - 1;
2388	ifp->if_hwassist = BGE_CSUM_FEATURES;
2389	/* NB: the code for RX csum offload is disabled for now */
2390	ifp->if_capabilities = IFCAP_TXCSUM | IFCAP_VLAN_HWTAGGING |
2391	    IFCAP_VLAN_MTU;
2392	ifp->if_capenable = ifp->if_capabilities;
2393
2394	/*
2395	 * Figure out what sort of media we have by checking the
2396	 * hardware config word in the first 32k of NIC internal memory,
2397	 * or fall back to examining the EEPROM if necessary.
2398	 * Note: on some BCM5700 cards, this value appears to be unset.
2399	 * If that's the case, we have to rely on identifying the NIC
2400	 * by its PCI subsystem ID, as we do below for the SysKonnect
2401	 * SK-9D41.
2402	 */
2403	if (bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_SIG) == BGE_MAGIC_NUMBER)
2404		hwcfg = bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_NICCFG);
2405	else {
2406		bge_read_eeprom(sc, (caddr_t)&hwcfg,
2407				BGE_EE_HWCFG_OFFSET, sizeof(hwcfg));
2408		hwcfg = ntohl(hwcfg);
2409	}
2410
2411	if ((hwcfg & BGE_HWCFG_MEDIA) == BGE_MEDIA_FIBER)
2412		sc->bge_tbi = 1;
2413
2414	/* The SysKonnect SK-9D41 is a 1000baseSX card. */
2415	if ((pci_read_config(dev, BGE_PCI_SUBSYS, 4) >> 16) == SK_SUBSYSID_9D41)
2416		sc->bge_tbi = 1;
2417
2418	if (sc->bge_tbi) {
2419		ifmedia_init(&sc->bge_ifmedia, IFM_IMASK,
2420		    bge_ifmedia_upd, bge_ifmedia_sts);
2421		ifmedia_add(&sc->bge_ifmedia, IFM_ETHER|IFM_1000_SX, 0, NULL);
2422		ifmedia_add(&sc->bge_ifmedia,
2423		    IFM_ETHER|IFM_1000_SX|IFM_FDX, 0, NULL);
2424		ifmedia_add(&sc->bge_ifmedia, IFM_ETHER|IFM_AUTO, 0, NULL);
2425		ifmedia_set(&sc->bge_ifmedia, IFM_ETHER|IFM_AUTO);
2426		sc->bge_ifmedia.ifm_media = sc->bge_ifmedia.ifm_cur->ifm_media;
2427	} else {
2428		/*
2429		 * Do transceiver setup.
2430		 */
2431		if (mii_phy_probe(dev, &sc->bge_miibus,
2432		    bge_ifmedia_upd, bge_ifmedia_sts)) {
2433			printf("bge%d: MII without any PHY!\n", sc->bge_unit);
2434			bge_release_resources(sc);
2435			bge_free_jumbo_mem(sc);
2436			error = ENXIO;
2437			goto fail;
2438		}
2439	}
2440
2441	/*
2442	 * When using the BCM5701 in PCI-X mode, data corruption has
2443	 * been observed in the first few bytes of some received packets.
2444	 * Aligning the packet buffer in memory eliminates the corruption.
2445	 * Unfortunately, this misaligns the packet payloads.  On platforms
2446	 * which do not support unaligned accesses, we will realign the
2447	 * payloads by copying the received packets.
2448	 */
2449	switch (sc->bge_chipid) {
2450	case BGE_CHIPID_BCM5701_A0:
2451	case BGE_CHIPID_BCM5701_B0:
2452	case BGE_CHIPID_BCM5701_B2:
2453	case BGE_CHIPID_BCM5701_B5:
2454		/* If in PCI-X mode, work around the alignment bug. */
2455		if ((pci_read_config(dev, BGE_PCI_PCISTATE, 4) &
2456		    (BGE_PCISTATE_PCI_BUSMODE | BGE_PCISTATE_PCI_BUSSPEED)) ==
2457		    BGE_PCISTATE_PCI_BUSSPEED)
2458			sc->bge_rx_alignment_bug = 1;
2459		break;
2460	}
2461
2462	/*
2463	 * Call MI attach routine.
2464	 */
2465	ether_ifattach(ifp, sc->arpcom.ac_enaddr);
2466	callout_init(&sc->bge_stat_ch, CALLOUT_MPSAFE);
2467
2468	/*
2469	 * Hookup IRQ last.
2470	 */
2471	error = bus_setup_intr(dev, sc->bge_irq, INTR_TYPE_NET | INTR_MPSAFE,
2472	   bge_intr, sc, &sc->bge_intrhand);
2473
2474	if (error) {
2475		bge_release_resources(sc);
2476		printf("bge%d: couldn't set up irq\n", unit);
2477	}
2478
2479fail:
2480	return(error);
2481}
2482
2483static int
2484bge_detach(dev)
2485	device_t dev;
2486{
2487	struct bge_softc *sc;
2488	struct ifnet *ifp;
2489
2490	sc = device_get_softc(dev);
2491	ifp = &sc->arpcom.ac_if;
2492
2493	BGE_LOCK(sc);
2494	bge_stop(sc);
2495	bge_reset(sc);
2496	BGE_UNLOCK(sc);
2497
2498	ether_ifdetach(ifp);
2499
2500	if (sc->bge_tbi) {
2501		ifmedia_removeall(&sc->bge_ifmedia);
2502	} else {
2503		bus_generic_detach(dev);
2504		device_delete_child(dev, sc->bge_miibus);
2505	}
2506
2507	bge_release_resources(sc);
2508	if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
2509	    sc->bge_asicrev != BGE_ASICREV_BCM5750)
2510		bge_free_jumbo_mem(sc);
2511
2512	return(0);
2513}
2514
2515static void
2516bge_release_resources(sc)
2517	struct bge_softc *sc;
2518{
2519        device_t dev;
2520
2521        dev = sc->bge_dev;
2522
2523	if (sc->bge_vpd_prodname != NULL)
2524		free(sc->bge_vpd_prodname, M_DEVBUF);
2525
2526	if (sc->bge_vpd_readonly != NULL)
2527		free(sc->bge_vpd_readonly, M_DEVBUF);
2528
2529        if (sc->bge_intrhand != NULL)
2530                bus_teardown_intr(dev, sc->bge_irq, sc->bge_intrhand);
2531
2532        if (sc->bge_irq != NULL)
2533		bus_release_resource(dev, SYS_RES_IRQ, 0, sc->bge_irq);
2534
2535        if (sc->bge_res != NULL)
2536		bus_release_resource(dev, SYS_RES_MEMORY,
2537		    BGE_PCI_BAR0, sc->bge_res);
2538
2539	bge_dma_free(sc);
2540
2541	if (mtx_initialized(&sc->bge_mtx))	/* XXX */
2542		BGE_LOCK_DESTROY(sc);
2543
2544        return;
2545}
2546
2547static void
2548bge_reset(sc)
2549	struct bge_softc *sc;
2550{
2551	device_t dev;
2552	u_int32_t cachesize, command, pcistate, reset;
2553	int i, val = 0;
2554
2555	dev = sc->bge_dev;
2556
2557	/* Save some important PCI state. */
2558	cachesize = pci_read_config(dev, BGE_PCI_CACHESZ, 4);
2559	command = pci_read_config(dev, BGE_PCI_CMD, 4);
2560	pcistate = pci_read_config(dev, BGE_PCI_PCISTATE, 4);
2561
2562	pci_write_config(dev, BGE_PCI_MISC_CTL,
2563	    BGE_PCIMISCCTL_INDIRECT_ACCESS|BGE_PCIMISCCTL_MASK_PCI_INTR|
2564	    BGE_PCIMISCCTL_ENDIAN_WORDSWAP|BGE_PCIMISCCTL_PCISTATE_RW, 4);
2565
2566	reset = BGE_MISCCFG_RESET_CORE_CLOCKS|(65<<1);
2567
2568	/* XXX: Broadcom Linux driver. */
2569	if (sc->bge_pcie) {
2570		if (CSR_READ_4(sc, 0x7e2c) == 0x60)	/* PCIE 1.0 */
2571			CSR_WRITE_4(sc, 0x7e2c, 0x20);
2572		if (sc->bge_chipid != BGE_CHIPID_BCM5750_A0) {
2573			/* Prevent PCIE link training during global reset */
2574			CSR_WRITE_4(sc, BGE_MISC_CFG, (1<<29));
2575			reset |= (1<<29);
2576		}
2577	}
2578
2579	/* Issue global reset */
2580	bge_writereg_ind(sc, BGE_MISC_CFG, reset);
2581
2582	DELAY(1000);
2583
2584	/* XXX: Broadcom Linux driver. */
2585	if (sc->bge_pcie) {
2586		if (sc->bge_chipid == BGE_CHIPID_BCM5750_A0) {
2587			uint32_t v;
2588
2589			DELAY(500000); /* wait for link training to complete */
2590			v = pci_read_config(dev, 0xc4, 4);
2591			pci_write_config(dev, 0xc4, v | (1<<15), 4);
2592		}
2593		/* Set PCIE max payload size and clear error status. */
2594		pci_write_config(dev, 0xd8, 0xf5000, 4);
2595	}
2596
2597	/* Reset some of the PCI state that got zapped by reset */
2598	pci_write_config(dev, BGE_PCI_MISC_CTL,
2599	    BGE_PCIMISCCTL_INDIRECT_ACCESS|BGE_PCIMISCCTL_MASK_PCI_INTR|
2600	    BGE_PCIMISCCTL_ENDIAN_WORDSWAP|BGE_PCIMISCCTL_PCISTATE_RW, 4);
2601	pci_write_config(dev, BGE_PCI_CACHESZ, cachesize, 4);
2602	pci_write_config(dev, BGE_PCI_CMD, command, 4);
2603	bge_writereg_ind(sc, BGE_MISC_CFG, (65 << 1));
2604
2605	/* Enable memory arbiter. */
2606	if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
2607	    sc->bge_asicrev != BGE_ASICREV_BCM5750)
2608		CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE);
2609
2610	/*
2611	 * Prevent PXE restart: write a magic number to the
2612	 * general communications memory at 0xB50.
2613	 */
2614	bge_writemem_ind(sc, BGE_SOFTWARE_GENCOMM, BGE_MAGIC_NUMBER);
2615	/*
2616	 * Poll the value location we just wrote until
2617	 * we see the 1's complement of the magic number.
2618	 * This indicates that the firmware initialization
2619	 * is complete.
2620	 */
2621	for (i = 0; i < BGE_TIMEOUT; i++) {
2622		val = bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM);
2623		if (val == ~BGE_MAGIC_NUMBER)
2624			break;
2625		DELAY(10);
2626	}
2627
2628	if (i == BGE_TIMEOUT) {
2629		printf("bge%d: firmware handshake timed out\n", sc->bge_unit);
2630		return;
2631	}
2632
2633	/*
2634	 * XXX Wait for the value of the PCISTATE register to
2635	 * return to its original pre-reset state. This is a
2636	 * fairly good indicator of reset completion. If we don't
2637	 * wait for the reset to fully complete, trying to read
2638	 * from the device's non-PCI registers may yield garbage
2639	 * results.
2640	 */
2641	for (i = 0; i < BGE_TIMEOUT; i++) {
2642		if (pci_read_config(dev, BGE_PCI_PCISTATE, 4) == pcistate)
2643			break;
2644		DELAY(10);
2645	}
2646
2647	/* Fix up byte swapping */
2648	CSR_WRITE_4(sc, BGE_MODE_CTL, BGE_MODECTL_BYTESWAP_NONFRAME|
2649	    BGE_MODECTL_BYTESWAP_DATA);
2650
2651	CSR_WRITE_4(sc, BGE_MAC_MODE, 0);
2652
2653	/*
2654	 * The 5704 in TBI mode apparently needs some special
2655	 * adjustment to insure the SERDES drive level is set
2656	 * to 1.2V.
2657	 */
2658	if (sc->bge_asicrev == BGE_ASICREV_BCM5704 && sc->bge_tbi) {
2659		uint32_t serdescfg;
2660		serdescfg = CSR_READ_4(sc, BGE_SERDES_CFG);
2661		serdescfg = (serdescfg & ~0xFFF) | 0x880;
2662		CSR_WRITE_4(sc, BGE_SERDES_CFG, serdescfg);
2663	}
2664
2665	/* XXX: Broadcom Linux driver. */
2666	if (sc->bge_pcie && sc->bge_chipid != BGE_CHIPID_BCM5750_A0) {
2667		uint32_t v;
2668
2669		v = CSR_READ_4(sc, 0x7c00);
2670		CSR_WRITE_4(sc, 0x7c00, v | (1<<25));
2671	}
2672	DELAY(10000);
2673
2674	return;
2675}
2676
2677/*
2678 * Frame reception handling. This is called if there's a frame
2679 * on the receive return list.
2680 *
2681 * Note: we have to be able to handle two possibilities here:
2682 * 1) the frame is from the jumbo recieve ring
2683 * 2) the frame is from the standard receive ring
2684 */
2685
2686static void
2687bge_rxeof(sc)
2688	struct bge_softc *sc;
2689{
2690	struct ifnet *ifp;
2691	int stdcnt = 0, jumbocnt = 0;
2692
2693	BGE_LOCK_ASSERT(sc);
2694
2695	ifp = &sc->arpcom.ac_if;
2696
2697	bus_dmamap_sync(sc->bge_cdata.bge_rx_return_ring_tag,
2698	    sc->bge_cdata.bge_rx_return_ring_map, BUS_DMASYNC_POSTWRITE);
2699	bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag,
2700	    sc->bge_cdata.bge_rx_std_ring_map, BUS_DMASYNC_POSTREAD);
2701	if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
2702	    sc->bge_asicrev != BGE_ASICREV_BCM5750) {
2703		bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag,
2704		    sc->bge_cdata.bge_rx_jumbo_ring_map,
2705		    BUS_DMASYNC_POSTREAD);
2706	}
2707
2708	while(sc->bge_rx_saved_considx !=
2709	    sc->bge_ldata.bge_status_block->bge_idx[0].bge_rx_prod_idx) {
2710		struct bge_rx_bd	*cur_rx;
2711		u_int32_t		rxidx;
2712		struct ether_header	*eh;
2713		struct mbuf		*m = NULL;
2714		u_int16_t		vlan_tag = 0;
2715		int			have_tag = 0;
2716
2717		cur_rx =
2718	    &sc->bge_ldata.bge_rx_return_ring[sc->bge_rx_saved_considx];
2719
2720		rxidx = cur_rx->bge_idx;
2721		BGE_INC(sc->bge_rx_saved_considx, sc->bge_return_ring_cnt);
2722
2723		if (cur_rx->bge_flags & BGE_RXBDFLAG_VLAN_TAG) {
2724			have_tag = 1;
2725			vlan_tag = cur_rx->bge_vlan_tag;
2726		}
2727
2728		if (cur_rx->bge_flags & BGE_RXBDFLAG_JUMBO_RING) {
2729			BGE_INC(sc->bge_jumbo, BGE_JUMBO_RX_RING_CNT);
2730			bus_dmamap_sync(sc->bge_cdata.bge_mtag_jumbo,
2731			    sc->bge_cdata.bge_rx_jumbo_dmamap[rxidx],
2732			    BUS_DMASYNC_POSTREAD);
2733			bus_dmamap_unload(sc->bge_cdata.bge_mtag_jumbo,
2734			    sc->bge_cdata.bge_rx_jumbo_dmamap[rxidx]);
2735			m = sc->bge_cdata.bge_rx_jumbo_chain[rxidx];
2736			sc->bge_cdata.bge_rx_jumbo_chain[rxidx] = NULL;
2737			jumbocnt++;
2738			if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) {
2739				ifp->if_ierrors++;
2740				bge_newbuf_jumbo(sc, sc->bge_jumbo, m);
2741				continue;
2742			}
2743			if (bge_newbuf_jumbo(sc,
2744			    sc->bge_jumbo, NULL) == ENOBUFS) {
2745				ifp->if_ierrors++;
2746				bge_newbuf_jumbo(sc, sc->bge_jumbo, m);
2747				continue;
2748			}
2749		} else {
2750			BGE_INC(sc->bge_std, BGE_STD_RX_RING_CNT);
2751			bus_dmamap_sync(sc->bge_cdata.bge_mtag,
2752			    sc->bge_cdata.bge_rx_std_dmamap[rxidx],
2753			    BUS_DMASYNC_POSTREAD);
2754			bus_dmamap_unload(sc->bge_cdata.bge_mtag,
2755			    sc->bge_cdata.bge_rx_std_dmamap[rxidx]);
2756			m = sc->bge_cdata.bge_rx_std_chain[rxidx];
2757			sc->bge_cdata.bge_rx_std_chain[rxidx] = NULL;
2758			stdcnt++;
2759			if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) {
2760				ifp->if_ierrors++;
2761				bge_newbuf_std(sc, sc->bge_std, m);
2762				continue;
2763			}
2764			if (bge_newbuf_std(sc, sc->bge_std,
2765			    NULL) == ENOBUFS) {
2766				ifp->if_ierrors++;
2767				bge_newbuf_std(sc, sc->bge_std, m);
2768				continue;
2769			}
2770		}
2771
2772		ifp->if_ipackets++;
2773#ifndef __i386__
2774		/*
2775		 * The i386 allows unaligned accesses, but for other
2776		 * platforms we must make sure the payload is aligned.
2777		 */
2778		if (sc->bge_rx_alignment_bug) {
2779			bcopy(m->m_data, m->m_data + ETHER_ALIGN,
2780			    cur_rx->bge_len);
2781			m->m_data += ETHER_ALIGN;
2782		}
2783#endif
2784		eh = mtod(m, struct ether_header *);
2785		m->m_pkthdr.len = m->m_len = cur_rx->bge_len - ETHER_CRC_LEN;
2786		m->m_pkthdr.rcvif = ifp;
2787
2788#if 0 /* currently broken for some packets, possibly related to TCP options */
2789		if (ifp->if_capenable & IFCAP_RXCSUM) {
2790			m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
2791			if ((cur_rx->bge_ip_csum ^ 0xffff) == 0)
2792				m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
2793			if (cur_rx->bge_flags & BGE_RXBDFLAG_TCP_UDP_CSUM) {
2794				m->m_pkthdr.csum_data =
2795				    cur_rx->bge_tcp_udp_csum;
2796				m->m_pkthdr.csum_flags |= CSUM_DATA_VALID;
2797			}
2798		}
2799#endif
2800
2801		/*
2802		 * If we received a packet with a vlan tag,
2803		 * attach that information to the packet.
2804		 */
2805		if (have_tag)
2806			VLAN_INPUT_TAG(ifp, m, vlan_tag, continue);
2807
2808		BGE_UNLOCK(sc);
2809		(*ifp->if_input)(ifp, m);
2810		BGE_LOCK(sc);
2811	}
2812
2813	bus_dmamap_sync(sc->bge_cdata.bge_rx_return_ring_tag,
2814	    sc->bge_cdata.bge_rx_return_ring_map, BUS_DMASYNC_PREWRITE);
2815	bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag,
2816	    sc->bge_cdata.bge_rx_std_ring_map,
2817	    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_PREWRITE);
2818	if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
2819	    sc->bge_asicrev != BGE_ASICREV_BCM5750) {
2820		bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag,
2821		    sc->bge_cdata.bge_rx_jumbo_ring_map,
2822		    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
2823	}
2824
2825	CSR_WRITE_4(sc, BGE_MBX_RX_CONS0_LO, sc->bge_rx_saved_considx);
2826	if (stdcnt)
2827		CSR_WRITE_4(sc, BGE_MBX_RX_STD_PROD_LO, sc->bge_std);
2828	if (jumbocnt)
2829		CSR_WRITE_4(sc, BGE_MBX_RX_JUMBO_PROD_LO, sc->bge_jumbo);
2830
2831	return;
2832}
2833
2834static void
2835bge_txeof(sc)
2836	struct bge_softc *sc;
2837{
2838	struct bge_tx_bd *cur_tx = NULL;
2839	struct ifnet *ifp;
2840
2841	BGE_LOCK_ASSERT(sc);
2842
2843	ifp = &sc->arpcom.ac_if;
2844
2845	/*
2846	 * Go through our tx ring and free mbufs for those
2847	 * frames that have been sent.
2848	 */
2849	while (sc->bge_tx_saved_considx !=
2850	    sc->bge_ldata.bge_status_block->bge_idx[0].bge_tx_cons_idx) {
2851		u_int32_t		idx = 0;
2852
2853		idx = sc->bge_tx_saved_considx;
2854		cur_tx = &sc->bge_ldata.bge_tx_ring[idx];
2855		if (cur_tx->bge_flags & BGE_TXBDFLAG_END)
2856			ifp->if_opackets++;
2857		if (sc->bge_cdata.bge_tx_chain[idx] != NULL) {
2858			m_freem(sc->bge_cdata.bge_tx_chain[idx]);
2859			sc->bge_cdata.bge_tx_chain[idx] = NULL;
2860			bus_dmamap_unload(sc->bge_cdata.bge_mtag,
2861			    sc->bge_cdata.bge_tx_dmamap[idx]);
2862		}
2863		sc->bge_txcnt--;
2864		BGE_INC(sc->bge_tx_saved_considx, BGE_TX_RING_CNT);
2865		ifp->if_timer = 0;
2866	}
2867
2868	if (cur_tx != NULL)
2869		ifp->if_flags &= ~IFF_OACTIVE;
2870
2871	return;
2872}
2873
2874static void
2875bge_intr(xsc)
2876	void *xsc;
2877{
2878	struct bge_softc *sc;
2879	struct ifnet *ifp;
2880	u_int32_t statusword;
2881	u_int32_t status, mimode;
2882
2883	sc = xsc;
2884	ifp = &sc->arpcom.ac_if;
2885
2886	BGE_LOCK(sc);
2887
2888	bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
2889	    sc->bge_cdata.bge_status_map, BUS_DMASYNC_POSTWRITE);
2890
2891	statusword =
2892	    atomic_readandclear_32(&sc->bge_ldata.bge_status_block->bge_status);
2893
2894#ifdef notdef
2895	/* Avoid this for now -- checking this register is expensive. */
2896	/* Make sure this is really our interrupt. */
2897	if (!(CSR_READ_4(sc, BGE_MISC_LOCAL_CTL) & BGE_MLC_INTR_STATE))
2898		return;
2899#endif
2900	/* Ack interrupt and stop others from occuring. */
2901	CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 1);
2902
2903	/*
2904	 * Process link state changes.
2905	 * Grrr. The link status word in the status block does
2906	 * not work correctly on the BCM5700 rev AX and BX chips,
2907	 * according to all available information. Hence, we have
2908	 * to enable MII interrupts in order to properly obtain
2909	 * async link changes. Unfortunately, this also means that
2910	 * we have to read the MAC status register to detect link
2911	 * changes, thereby adding an additional register access to
2912	 * the interrupt handler.
2913	 */
2914
2915	if (sc->bge_asicrev == BGE_ASICREV_BCM5700) {
2916
2917		status = CSR_READ_4(sc, BGE_MAC_STS);
2918		if (status & BGE_MACSTAT_MI_INTERRUPT) {
2919			sc->bge_link = 0;
2920			callout_stop(&sc->bge_stat_ch);
2921			bge_tick_locked(sc);
2922			/* Clear the interrupt */
2923			CSR_WRITE_4(sc, BGE_MAC_EVT_ENB,
2924			    BGE_EVTENB_MI_INTERRUPT);
2925			bge_miibus_readreg(sc->bge_dev, 1, BRGPHY_MII_ISR);
2926			bge_miibus_writereg(sc->bge_dev, 1, BRGPHY_MII_IMR,
2927			    BRGPHY_INTRS);
2928		}
2929	} else {
2930		if (statusword & BGE_STATFLAG_LINKSTATE_CHANGED) {
2931			/*
2932			 * Sometimes PCS encoding errors are detected in
2933			 * TBI mode (on fiber NICs), and for some reason
2934			 * the chip will signal them as link changes.
2935			 * If we get a link change event, but the 'PCS
2936			 * encoding error' bit in the MAC status register
2937			 * is set, don't bother doing a link check.
2938			 * This avoids spurious "gigabit link up" messages
2939			 * that sometimes appear on fiber NICs during
2940			 * periods of heavy traffic. (There should be no
2941			 * effect on copper NICs.)
2942			 *
2943			 * If we do have a copper NIC (bge_tbi == 0) then
2944			 * check that the AUTOPOLL bit is set before
2945			 * processing the event as a real link change.
2946			 * Turning AUTOPOLL on and off in the MII read/write
2947			 * functions will often trigger a link status
2948			 * interrupt for no reason.
2949			 */
2950			status = CSR_READ_4(sc, BGE_MAC_STS);
2951			mimode = CSR_READ_4(sc, BGE_MI_MODE);
2952			if (!(status & (BGE_MACSTAT_PORT_DECODE_ERROR|
2953			    BGE_MACSTAT_MI_COMPLETE)) && (!sc->bge_tbi &&
2954			    (mimode & BGE_MIMODE_AUTOPOLL))) {
2955				sc->bge_link = 0;
2956				callout_stop(&sc->bge_stat_ch);
2957				bge_tick_locked(sc);
2958			}
2959			/* Clear the interrupt */
2960			CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED|
2961			    BGE_MACSTAT_CFG_CHANGED|BGE_MACSTAT_MI_COMPLETE|
2962			    BGE_MACSTAT_LINK_CHANGED);
2963
2964			/* Force flush the status block cached by PCI bridge */
2965			CSR_READ_4(sc, BGE_MBX_IRQ0_LO);
2966		}
2967	}
2968
2969	if (ifp->if_flags & IFF_RUNNING) {
2970		/* Check RX return ring producer/consumer */
2971		bge_rxeof(sc);
2972
2973		/* Check TX ring producer/consumer */
2974		bge_txeof(sc);
2975	}
2976
2977	bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
2978	    sc->bge_cdata.bge_status_map, BUS_DMASYNC_PREWRITE);
2979
2980	bge_handle_events(sc);
2981
2982	/* Re-enable interrupts. */
2983	CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 0);
2984
2985	if (ifp->if_flags & IFF_RUNNING && ifp->if_snd.ifq_head != NULL)
2986		bge_start_locked(ifp);
2987
2988	BGE_UNLOCK(sc);
2989
2990	return;
2991}
2992
2993static void
2994bge_tick_locked(sc)
2995	struct bge_softc *sc;
2996{
2997	struct mii_data *mii = NULL;
2998	struct ifmedia *ifm = NULL;
2999	struct ifnet *ifp;
3000
3001	ifp = &sc->arpcom.ac_if;
3002
3003	BGE_LOCK_ASSERT(sc);
3004
3005	if (sc->bge_asicrev == BGE_ASICREV_BCM5705 ||
3006	    sc->bge_asicrev == BGE_ASICREV_BCM5750)
3007		bge_stats_update_regs(sc);
3008	else
3009		bge_stats_update(sc);
3010	callout_reset(&sc->bge_stat_ch, hz, bge_tick, sc);
3011	if (sc->bge_link)
3012		return;
3013
3014	if (sc->bge_tbi) {
3015		ifm = &sc->bge_ifmedia;
3016		if (CSR_READ_4(sc, BGE_MAC_STS) &
3017		    BGE_MACSTAT_TBI_PCS_SYNCHED) {
3018			sc->bge_link++;
3019			if (sc->bge_asicrev == BGE_ASICREV_BCM5704)
3020				BGE_CLRBIT(sc, BGE_MAC_MODE,
3021				    BGE_MACMODE_TBI_SEND_CFGS);
3022			CSR_WRITE_4(sc, BGE_MAC_STS, 0xFFFFFFFF);
3023			printf("bge%d: gigabit link up\n", sc->bge_unit);
3024			if (ifp->if_snd.ifq_head != NULL)
3025				bge_start_locked(ifp);
3026		}
3027		return;
3028	}
3029
3030	mii = device_get_softc(sc->bge_miibus);
3031	mii_tick(mii);
3032
3033	if (!sc->bge_link && mii->mii_media_status & IFM_ACTIVE &&
3034	    IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) {
3035		sc->bge_link++;
3036		if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T ||
3037		    IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_SX)
3038			printf("bge%d: gigabit link up\n",
3039			   sc->bge_unit);
3040		if (ifp->if_snd.ifq_head != NULL)
3041			bge_start_locked(ifp);
3042	}
3043
3044	return;
3045}
3046
3047static void
3048bge_tick(xsc)
3049	void *xsc;
3050{
3051	struct bge_softc *sc;
3052
3053	sc = xsc;
3054
3055	BGE_LOCK(sc);
3056	bge_tick_locked(sc);
3057	BGE_UNLOCK(sc);
3058}
3059
3060static void
3061bge_stats_update_regs(sc)
3062	struct bge_softc *sc;
3063{
3064	struct ifnet *ifp;
3065	struct bge_mac_stats_regs stats;
3066	u_int32_t *s;
3067	int i;
3068
3069	ifp = &sc->arpcom.ac_if;
3070
3071	s = (u_int32_t *)&stats;
3072	for (i = 0; i < sizeof(struct bge_mac_stats_regs); i += 4) {
3073		*s = CSR_READ_4(sc, BGE_RX_STATS + i);
3074		s++;
3075	}
3076
3077	ifp->if_collisions +=
3078	   (stats.dot3StatsSingleCollisionFrames +
3079	   stats.dot3StatsMultipleCollisionFrames +
3080	   stats.dot3StatsExcessiveCollisions +
3081	   stats.dot3StatsLateCollisions) -
3082	   ifp->if_collisions;
3083
3084	return;
3085}
3086
3087static void
3088bge_stats_update(sc)
3089	struct bge_softc *sc;
3090{
3091	struct ifnet *ifp;
3092	struct bge_stats *stats;
3093
3094	ifp = &sc->arpcom.ac_if;
3095
3096	stats = (struct bge_stats *)(sc->bge_vhandle +
3097	    BGE_MEMWIN_START + BGE_STATS_BLOCK);
3098
3099	ifp->if_collisions +=
3100	   (stats->txstats.dot3StatsSingleCollisionFrames.bge_addr_lo +
3101	   stats->txstats.dot3StatsMultipleCollisionFrames.bge_addr_lo +
3102	   stats->txstats.dot3StatsExcessiveCollisions.bge_addr_lo +
3103	   stats->txstats.dot3StatsLateCollisions.bge_addr_lo) -
3104	   ifp->if_collisions;
3105
3106#ifdef notdef
3107	ifp->if_collisions +=
3108	   (sc->bge_rdata->bge_info.bge_stats.dot3StatsSingleCollisionFrames +
3109	   sc->bge_rdata->bge_info.bge_stats.dot3StatsMultipleCollisionFrames +
3110	   sc->bge_rdata->bge_info.bge_stats.dot3StatsExcessiveCollisions +
3111	   sc->bge_rdata->bge_info.bge_stats.dot3StatsLateCollisions) -
3112	   ifp->if_collisions;
3113#endif
3114
3115	return;
3116}
3117
3118/*
3119 * Encapsulate an mbuf chain in the tx ring  by coupling the mbuf data
3120 * pointers to descriptors.
3121 */
3122static int
3123bge_encap(sc, m_head, txidx)
3124	struct bge_softc *sc;
3125	struct mbuf *m_head;
3126	u_int32_t *txidx;
3127{
3128	struct bge_tx_bd	*f = NULL;
3129	u_int16_t		csum_flags = 0;
3130	struct m_tag		*mtag;
3131	struct bge_dmamap_arg	ctx;
3132	bus_dmamap_t		map;
3133	int			error;
3134
3135
3136	if (m_head->m_pkthdr.csum_flags) {
3137		if (m_head->m_pkthdr.csum_flags & CSUM_IP)
3138			csum_flags |= BGE_TXBDFLAG_IP_CSUM;
3139		if (m_head->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP))
3140			csum_flags |= BGE_TXBDFLAG_TCP_UDP_CSUM;
3141		if (m_head->m_flags & M_LASTFRAG)
3142			csum_flags |= BGE_TXBDFLAG_IP_FRAG_END;
3143		else if (m_head->m_flags & M_FRAG)
3144			csum_flags |= BGE_TXBDFLAG_IP_FRAG;
3145	}
3146
3147	mtag = VLAN_OUTPUT_TAG(&sc->arpcom.ac_if, m_head);
3148
3149	ctx.sc = sc;
3150	ctx.bge_idx = *txidx;
3151	ctx.bge_ring = sc->bge_ldata.bge_tx_ring;
3152	ctx.bge_flags = csum_flags;
3153	/*
3154	 * Sanity check: avoid coming within 16 descriptors
3155	 * of the end of the ring.
3156	 */
3157	ctx.bge_maxsegs = (BGE_TX_RING_CNT - sc->bge_txcnt) - 16;
3158
3159	map = sc->bge_cdata.bge_tx_dmamap[*txidx];
3160	error = bus_dmamap_load_mbuf(sc->bge_cdata.bge_mtag, map,
3161	    m_head, bge_dma_map_tx_desc, &ctx, BUS_DMA_NOWAIT);
3162
3163	if (error || ctx.bge_maxsegs == 0 /*||
3164	    ctx.bge_idx == sc->bge_tx_saved_considx*/)
3165		return (ENOBUFS);
3166
3167	/*
3168	 * Insure that the map for this transmission
3169	 * is placed at the array index of the last descriptor
3170	 * in this chain.
3171	 */
3172	sc->bge_cdata.bge_tx_dmamap[*txidx] =
3173	    sc->bge_cdata.bge_tx_dmamap[ctx.bge_idx];
3174	sc->bge_cdata.bge_tx_dmamap[ctx.bge_idx] = map;
3175	sc->bge_cdata.bge_tx_chain[ctx.bge_idx] = m_head;
3176	sc->bge_txcnt += ctx.bge_maxsegs;
3177	f = &sc->bge_ldata.bge_tx_ring[*txidx];
3178	if (mtag != NULL) {
3179		f->bge_flags |= htole16(BGE_TXBDFLAG_VLAN_TAG);
3180		f->bge_vlan_tag = htole16(VLAN_TAG_VALUE(mtag));
3181	} else {
3182		f->bge_vlan_tag = 0;
3183	}
3184
3185	BGE_INC(ctx.bge_idx, BGE_TX_RING_CNT);
3186	*txidx = ctx.bge_idx;
3187
3188	return(0);
3189}
3190
3191/*
3192 * Main transmit routine. To avoid having to do mbuf copies, we put pointers
3193 * to the mbuf data regions directly in the transmit descriptors.
3194 */
3195static void
3196bge_start_locked(ifp)
3197	struct ifnet *ifp;
3198{
3199	struct bge_softc *sc;
3200	struct mbuf *m_head = NULL;
3201	u_int32_t prodidx = 0;
3202
3203	sc = ifp->if_softc;
3204
3205	if (!sc->bge_link && ifp->if_snd.ifq_len < 10)
3206		return;
3207
3208	prodidx = CSR_READ_4(sc, BGE_MBX_TX_HOST_PROD0_LO);
3209
3210	while(sc->bge_cdata.bge_tx_chain[prodidx] == NULL) {
3211		IF_DEQUEUE(&ifp->if_snd, m_head);
3212		if (m_head == NULL)
3213			break;
3214
3215		/*
3216		 * XXX
3217		 * The code inside the if() block is never reached since we
3218		 * must mark CSUM_IP_FRAGS in our if_hwassist to start getting
3219		 * requests to checksum TCP/UDP in a fragmented packet.
3220		 *
3221		 * XXX
3222		 * safety overkill.  If this is a fragmented packet chain
3223		 * with delayed TCP/UDP checksums, then only encapsulate
3224		 * it if we have enough descriptors to handle the entire
3225		 * chain at once.
3226		 * (paranoia -- may not actually be needed)
3227		 */
3228		if (m_head->m_flags & M_FIRSTFRAG &&
3229		    m_head->m_pkthdr.csum_flags & (CSUM_DELAY_DATA)) {
3230			if ((BGE_TX_RING_CNT - sc->bge_txcnt) <
3231			    m_head->m_pkthdr.csum_data + 16) {
3232				IF_PREPEND(&ifp->if_snd, m_head);
3233				ifp->if_flags |= IFF_OACTIVE;
3234				break;
3235			}
3236		}
3237
3238		/*
3239		 * Pack the data into the transmit ring. If we
3240		 * don't have room, set the OACTIVE flag and wait
3241		 * for the NIC to drain the ring.
3242		 */
3243		if (bge_encap(sc, m_head, &prodidx)) {
3244			IF_PREPEND(&ifp->if_snd, m_head);
3245			ifp->if_flags |= IFF_OACTIVE;
3246			break;
3247		}
3248
3249		/*
3250		 * If there's a BPF listener, bounce a copy of this frame
3251		 * to him.
3252		 */
3253		BPF_MTAP(ifp, m_head);
3254	}
3255
3256	/* Transmit */
3257	CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx);
3258	/* 5700 b2 errata */
3259	if (sc->bge_chiprev == BGE_CHIPREV_5700_BX)
3260		CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx);
3261
3262	/*
3263	 * Set a timeout in case the chip goes out to lunch.
3264	 */
3265	ifp->if_timer = 5;
3266
3267	return;
3268}
3269
3270/*
3271 * Main transmit routine. To avoid having to do mbuf copies, we put pointers
3272 * to the mbuf data regions directly in the transmit descriptors.
3273 */
3274static void
3275bge_start(ifp)
3276	struct ifnet *ifp;
3277{
3278	struct bge_softc *sc;
3279
3280	sc = ifp->if_softc;
3281	BGE_LOCK(sc);
3282	bge_start_locked(ifp);
3283	BGE_UNLOCK(sc);
3284}
3285
3286static void
3287bge_init_locked(sc)
3288	struct bge_softc *sc;
3289{
3290	struct ifnet *ifp;
3291	u_int16_t *m;
3292
3293	BGE_LOCK_ASSERT(sc);
3294
3295	ifp = &sc->arpcom.ac_if;
3296
3297	if (ifp->if_flags & IFF_RUNNING)
3298		return;
3299
3300	/* Cancel pending I/O and flush buffers. */
3301	bge_stop(sc);
3302	bge_reset(sc);
3303	bge_chipinit(sc);
3304
3305	/*
3306	 * Init the various state machines, ring
3307	 * control blocks and firmware.
3308	 */
3309	if (bge_blockinit(sc)) {
3310		printf("bge%d: initialization failure\n", sc->bge_unit);
3311		return;
3312	}
3313
3314	ifp = &sc->arpcom.ac_if;
3315
3316	/* Specify MTU. */
3317	CSR_WRITE_4(sc, BGE_RX_MTU, ifp->if_mtu +
3318	    ETHER_HDR_LEN + ETHER_CRC_LEN + ETHER_VLAN_ENCAP_LEN);
3319
3320	/* Load our MAC address. */
3321	m = (u_int16_t *)&sc->arpcom.ac_enaddr[0];
3322	CSR_WRITE_4(sc, BGE_MAC_ADDR1_LO, htons(m[0]));
3323	CSR_WRITE_4(sc, BGE_MAC_ADDR1_HI, (htons(m[1]) << 16) | htons(m[2]));
3324
3325	/* Enable or disable promiscuous mode as needed. */
3326	if (ifp->if_flags & IFF_PROMISC) {
3327		BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC);
3328	} else {
3329		BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC);
3330	}
3331
3332	/* Program multicast filter. */
3333	bge_setmulti(sc);
3334
3335	/* Init RX ring. */
3336	bge_init_rx_ring_std(sc);
3337
3338	/*
3339	 * Workaround for a bug in 5705 ASIC rev A0. Poll the NIC's
3340	 * memory to insure that the chip has in fact read the first
3341	 * entry of the ring.
3342	 */
3343	if (sc->bge_chipid == BGE_CHIPID_BCM5705_A0) {
3344		u_int32_t		v, i;
3345		for (i = 0; i < 10; i++) {
3346			DELAY(20);
3347			v = bge_readmem_ind(sc, BGE_STD_RX_RINGS + 8);
3348			if (v == (MCLBYTES - ETHER_ALIGN))
3349				break;
3350		}
3351		if (i == 10)
3352			printf ("bge%d: 5705 A0 chip failed to load RX ring\n",
3353			    sc->bge_unit);
3354	}
3355
3356	/* Init jumbo RX ring. */
3357	if (ifp->if_mtu > (ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN))
3358		bge_init_rx_ring_jumbo(sc);
3359
3360	/* Init our RX return ring index */
3361	sc->bge_rx_saved_considx = 0;
3362
3363	/* Init TX ring. */
3364	bge_init_tx_ring(sc);
3365
3366	/* Turn on transmitter */
3367	BGE_SETBIT(sc, BGE_TX_MODE, BGE_TXMODE_ENABLE);
3368
3369	/* Turn on receiver */
3370	BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE);
3371
3372	/* Tell firmware we're alive. */
3373	BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
3374
3375	/* Enable host interrupts. */
3376	BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_CLEAR_INTA);
3377	BGE_CLRBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR);
3378	CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 0);
3379
3380	bge_ifmedia_upd(ifp);
3381
3382	ifp->if_flags |= IFF_RUNNING;
3383	ifp->if_flags &= ~IFF_OACTIVE;
3384
3385	callout_reset(&sc->bge_stat_ch, hz, bge_tick, sc);
3386
3387	return;
3388}
3389
3390static void
3391bge_init(xsc)
3392	void *xsc;
3393{
3394	struct bge_softc *sc = xsc;
3395
3396	BGE_LOCK(sc);
3397	bge_init_locked(sc);
3398	BGE_UNLOCK(sc);
3399
3400	return;
3401}
3402
3403/*
3404 * Set media options.
3405 */
3406static int
3407bge_ifmedia_upd(ifp)
3408	struct ifnet *ifp;
3409{
3410	struct bge_softc *sc;
3411	struct mii_data *mii;
3412	struct ifmedia *ifm;
3413
3414	sc = ifp->if_softc;
3415	ifm = &sc->bge_ifmedia;
3416
3417	/* If this is a 1000baseX NIC, enable the TBI port. */
3418	if (sc->bge_tbi) {
3419		if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
3420			return(EINVAL);
3421		switch(IFM_SUBTYPE(ifm->ifm_media)) {
3422		case IFM_AUTO:
3423			/*
3424			 * The BCM5704 ASIC appears to have a special
3425			 * mechanism for programming the autoneg
3426			 * advertisement registers in TBI mode.
3427			 */
3428			if (sc->bge_asicrev == BGE_ASICREV_BCM5704) {
3429				uint32_t sgdig;
3430				CSR_WRITE_4(sc, BGE_TX_TBI_AUTONEG, 0);
3431				sgdig = CSR_READ_4(sc, BGE_SGDIG_CFG);
3432				sgdig |= BGE_SGDIGCFG_AUTO|
3433				    BGE_SGDIGCFG_PAUSE_CAP|
3434				    BGE_SGDIGCFG_ASYM_PAUSE;
3435				CSR_WRITE_4(sc, BGE_SGDIG_CFG,
3436				    sgdig|BGE_SGDIGCFG_SEND);
3437				DELAY(5);
3438				CSR_WRITE_4(sc, BGE_SGDIG_CFG, sgdig);
3439			}
3440			break;
3441		case IFM_1000_SX:
3442			if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) {
3443				BGE_CLRBIT(sc, BGE_MAC_MODE,
3444				    BGE_MACMODE_HALF_DUPLEX);
3445			} else {
3446				BGE_SETBIT(sc, BGE_MAC_MODE,
3447				    BGE_MACMODE_HALF_DUPLEX);
3448			}
3449			break;
3450		default:
3451			return(EINVAL);
3452		}
3453		return(0);
3454	}
3455
3456	mii = device_get_softc(sc->bge_miibus);
3457	sc->bge_link = 0;
3458	if (mii->mii_instance) {
3459		struct mii_softc *miisc;
3460		for (miisc = LIST_FIRST(&mii->mii_phys); miisc != NULL;
3461		    miisc = LIST_NEXT(miisc, mii_list))
3462			mii_phy_reset(miisc);
3463	}
3464	mii_mediachg(mii);
3465
3466	return(0);
3467}
3468
3469/*
3470 * Report current media status.
3471 */
3472static void
3473bge_ifmedia_sts(ifp, ifmr)
3474	struct ifnet *ifp;
3475	struct ifmediareq *ifmr;
3476{
3477	struct bge_softc *sc;
3478	struct mii_data *mii;
3479
3480	sc = ifp->if_softc;
3481
3482	if (sc->bge_tbi) {
3483		ifmr->ifm_status = IFM_AVALID;
3484		ifmr->ifm_active = IFM_ETHER;
3485		if (CSR_READ_4(sc, BGE_MAC_STS) &
3486		    BGE_MACSTAT_TBI_PCS_SYNCHED)
3487			ifmr->ifm_status |= IFM_ACTIVE;
3488		ifmr->ifm_active |= IFM_1000_SX;
3489		if (CSR_READ_4(sc, BGE_MAC_MODE) & BGE_MACMODE_HALF_DUPLEX)
3490			ifmr->ifm_active |= IFM_HDX;
3491		else
3492			ifmr->ifm_active |= IFM_FDX;
3493		return;
3494	}
3495
3496	mii = device_get_softc(sc->bge_miibus);
3497	mii_pollstat(mii);
3498	ifmr->ifm_active = mii->mii_media_active;
3499	ifmr->ifm_status = mii->mii_media_status;
3500
3501	return;
3502}
3503
3504static int
3505bge_ioctl(ifp, command, data)
3506	struct ifnet *ifp;
3507	u_long command;
3508	caddr_t data;
3509{
3510	struct bge_softc *sc = ifp->if_softc;
3511	struct ifreq *ifr = (struct ifreq *) data;
3512	int mask, error = 0;
3513	struct mii_data *mii;
3514
3515	switch(command) {
3516	case SIOCSIFMTU:
3517		/* Disallow jumbo frames on 5705. */
3518		if (((sc->bge_asicrev == BGE_ASICREV_BCM5705 ||
3519		      sc->bge_asicrev == BGE_ASICREV_BCM5750) &&
3520		    ifr->ifr_mtu > ETHERMTU) || ifr->ifr_mtu > BGE_JUMBO_MTU)
3521			error = EINVAL;
3522		else {
3523			ifp->if_mtu = ifr->ifr_mtu;
3524			ifp->if_flags &= ~IFF_RUNNING;
3525			bge_init(sc);
3526		}
3527		break;
3528	case SIOCSIFFLAGS:
3529		BGE_LOCK(sc);
3530		if (ifp->if_flags & IFF_UP) {
3531			/*
3532			 * If only the state of the PROMISC flag changed,
3533			 * then just use the 'set promisc mode' command
3534			 * instead of reinitializing the entire NIC. Doing
3535			 * a full re-init means reloading the firmware and
3536			 * waiting for it to start up, which may take a
3537			 * second or two.
3538			 */
3539			if (ifp->if_flags & IFF_RUNNING &&
3540			    ifp->if_flags & IFF_PROMISC &&
3541			    !(sc->bge_if_flags & IFF_PROMISC)) {
3542				BGE_SETBIT(sc, BGE_RX_MODE,
3543				    BGE_RXMODE_RX_PROMISC);
3544			} else if (ifp->if_flags & IFF_RUNNING &&
3545			    !(ifp->if_flags & IFF_PROMISC) &&
3546			    sc->bge_if_flags & IFF_PROMISC) {
3547				BGE_CLRBIT(sc, BGE_RX_MODE,
3548				    BGE_RXMODE_RX_PROMISC);
3549			} else
3550				bge_init_locked(sc);
3551		} else {
3552			if (ifp->if_flags & IFF_RUNNING) {
3553				bge_stop(sc);
3554			}
3555		}
3556		sc->bge_if_flags = ifp->if_flags;
3557		BGE_UNLOCK(sc);
3558		error = 0;
3559		break;
3560	case SIOCADDMULTI:
3561	case SIOCDELMULTI:
3562		if (ifp->if_flags & IFF_RUNNING) {
3563			BGE_LOCK(sc);
3564			bge_setmulti(sc);
3565			BGE_UNLOCK(sc);
3566			error = 0;
3567		}
3568		break;
3569	case SIOCSIFMEDIA:
3570	case SIOCGIFMEDIA:
3571		if (sc->bge_tbi) {
3572			error = ifmedia_ioctl(ifp, ifr,
3573			    &sc->bge_ifmedia, command);
3574		} else {
3575			mii = device_get_softc(sc->bge_miibus);
3576			error = ifmedia_ioctl(ifp, ifr,
3577			    &mii->mii_media, command);
3578		}
3579		break;
3580        case SIOCSIFCAP:
3581		mask = ifr->ifr_reqcap ^ ifp->if_capenable;
3582		/* NB: the code for RX csum offload is disabled for now */
3583		if (mask & IFCAP_TXCSUM) {
3584			ifp->if_capenable ^= IFCAP_TXCSUM;
3585			if (IFCAP_TXCSUM & ifp->if_capenable)
3586				ifp->if_hwassist = BGE_CSUM_FEATURES;
3587			else
3588				ifp->if_hwassist = 0;
3589		}
3590		error = 0;
3591		break;
3592	default:
3593		error = ether_ioctl(ifp, command, data);
3594		break;
3595	}
3596
3597	return(error);
3598}
3599
3600static void
3601bge_watchdog(ifp)
3602	struct ifnet *ifp;
3603{
3604	struct bge_softc *sc;
3605
3606	sc = ifp->if_softc;
3607
3608	printf("bge%d: watchdog timeout -- resetting\n", sc->bge_unit);
3609
3610	ifp->if_flags &= ~IFF_RUNNING;
3611	bge_init(sc);
3612
3613	ifp->if_oerrors++;
3614
3615	return;
3616}
3617
3618/*
3619 * Stop the adapter and free any mbufs allocated to the
3620 * RX and TX lists.
3621 */
3622static void
3623bge_stop(sc)
3624	struct bge_softc *sc;
3625{
3626	struct ifnet *ifp;
3627	struct ifmedia_entry *ifm;
3628	struct mii_data *mii = NULL;
3629	int mtmp, itmp;
3630
3631	BGE_LOCK_ASSERT(sc);
3632
3633	ifp = &sc->arpcom.ac_if;
3634
3635	if (!sc->bge_tbi)
3636		mii = device_get_softc(sc->bge_miibus);
3637
3638	callout_stop(&sc->bge_stat_ch);
3639
3640	/*
3641	 * Disable all of the receiver blocks
3642	 */
3643	BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE);
3644	BGE_CLRBIT(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE);
3645	BGE_CLRBIT(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE);
3646	if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
3647	    sc->bge_asicrev != BGE_ASICREV_BCM5750)
3648		BGE_CLRBIT(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE);
3649	BGE_CLRBIT(sc, BGE_RDBDI_MODE, BGE_RBDIMODE_ENABLE);
3650	BGE_CLRBIT(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE);
3651	BGE_CLRBIT(sc, BGE_RBDC_MODE, BGE_RBDCMODE_ENABLE);
3652
3653	/*
3654	 * Disable all of the transmit blocks
3655	 */
3656	BGE_CLRBIT(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE);
3657	BGE_CLRBIT(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE);
3658	BGE_CLRBIT(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE);
3659	BGE_CLRBIT(sc, BGE_RDMA_MODE, BGE_RDMAMODE_ENABLE);
3660	BGE_CLRBIT(sc, BGE_SDC_MODE, BGE_SDCMODE_ENABLE);
3661	if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
3662	    sc->bge_asicrev != BGE_ASICREV_BCM5750)
3663		BGE_CLRBIT(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE);
3664	BGE_CLRBIT(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE);
3665
3666	/*
3667	 * Shut down all of the memory managers and related
3668	 * state machines.
3669	 */
3670	BGE_CLRBIT(sc, BGE_HCC_MODE, BGE_HCCMODE_ENABLE);
3671	BGE_CLRBIT(sc, BGE_WDMA_MODE, BGE_WDMAMODE_ENABLE);
3672	if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
3673	    sc->bge_asicrev != BGE_ASICREV_BCM5750)
3674		BGE_CLRBIT(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE);
3675	CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF);
3676	CSR_WRITE_4(sc, BGE_FTQ_RESET, 0);
3677	if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
3678	    sc->bge_asicrev != BGE_ASICREV_BCM5750) {
3679		BGE_CLRBIT(sc, BGE_BMAN_MODE, BGE_BMANMODE_ENABLE);
3680		BGE_CLRBIT(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE);
3681	}
3682
3683	/* Disable host interrupts. */
3684	BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR);
3685	CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 1);
3686
3687	/*
3688	 * Tell firmware we're shutting down.
3689	 */
3690	BGE_CLRBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
3691
3692	/* Free the RX lists. */
3693	bge_free_rx_ring_std(sc);
3694
3695	/* Free jumbo RX list. */
3696	if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
3697	    sc->bge_asicrev != BGE_ASICREV_BCM5750)
3698		bge_free_rx_ring_jumbo(sc);
3699
3700	/* Free TX buffers. */
3701	bge_free_tx_ring(sc);
3702
3703	/*
3704	 * Isolate/power down the PHY, but leave the media selection
3705	 * unchanged so that things will be put back to normal when
3706	 * we bring the interface back up.
3707	 */
3708	if (!sc->bge_tbi) {
3709		itmp = ifp->if_flags;
3710		ifp->if_flags |= IFF_UP;
3711		ifm = mii->mii_media.ifm_cur;
3712		mtmp = ifm->ifm_media;
3713		ifm->ifm_media = IFM_ETHER|IFM_NONE;
3714		mii_mediachg(mii);
3715		ifm->ifm_media = mtmp;
3716		ifp->if_flags = itmp;
3717	}
3718
3719	sc->bge_link = 0;
3720
3721	sc->bge_tx_saved_considx = BGE_TXCONS_UNSET;
3722
3723	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
3724
3725	return;
3726}
3727
3728/*
3729 * Stop all chip I/O so that the kernel's probe routines don't
3730 * get confused by errant DMAs when rebooting.
3731 */
3732static void
3733bge_shutdown(dev)
3734	device_t dev;
3735{
3736	struct bge_softc *sc;
3737
3738	sc = device_get_softc(dev);
3739
3740	BGE_LOCK(sc);
3741	bge_stop(sc);
3742	bge_reset(sc);
3743	BGE_UNLOCK(sc);
3744
3745	return;
3746}
3747