Deleted Added
full compact
if_bge.c (135359) if_bge.c (135772)
1/*
2 * Copyright (c) 2001 Wind River Systems
3 * Copyright (c) 1997, 1998, 1999, 2001
4 * Bill Paul <wpaul@windriver.com>. All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. All advertising materials mentioning features or use of this software
15 * must display the following acknowledgement:
16 * This product includes software developed by Bill Paul.
17 * 4. Neither the name of the author nor the names of any co-contributors
18 * may be used to endorse or promote products derived from this software
19 * without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
25 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
31 * THE POSSIBILITY OF SUCH DAMAGE.
32 */
33
34#include <sys/cdefs.h>
1/*
2 * Copyright (c) 2001 Wind River Systems
3 * Copyright (c) 1997, 1998, 1999, 2001
4 * Bill Paul <wpaul@windriver.com>. All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. All advertising materials mentioning features or use of this software
15 * must display the following acknowledgement:
16 * This product includes software developed by Bill Paul.
17 * 4. Neither the name of the author nor the names of any co-contributors
18 * may be used to endorse or promote products derived from this software
19 * without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
25 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
31 * THE POSSIBILITY OF SUCH DAMAGE.
32 */
33
34#include <sys/cdefs.h>
35__FBSDID("$FreeBSD: head/sys/dev/bge/if_bge.c 135359 2004-09-17 04:58:17Z wpaul $");
35__FBSDID("$FreeBSD: head/sys/dev/bge/if_bge.c 135772 2004-09-24 22:24:33Z ps $");
36
37/*
38 * Broadcom BCM570x family gigabit ethernet driver for FreeBSD.
39 *
40 * The Broadcom BCM5700 is based on technology originally developed by
41 * Alteon Networks as part of the Tigon I and Tigon II gigabit ethernet
42 * MAC chips. The BCM5700, sometimes refered to as the Tigon III, has
43 * two on-board MIPS R4000 CPUs and can have as much as 16MB of external
44 * SSRAM. The BCM5700 supports TCP, UDP and IP checksum offload, jumbo
45 * frames, highly configurable RX filtering, and 16 RX and TX queues
46 * (which, along with RX filter rules, can be used for QOS applications).
47 * Other features, such as TCP segmentation, may be available as part
48 * of value-added firmware updates. Unlike the Tigon I and Tigon II,
49 * firmware images can be stored in hardware and need not be compiled
50 * into the driver.
51 *
52 * The BCM5700 supports the PCI v2.2 and PCI-X v1.0 standards, and will
53 * function in a 32-bit/64-bit 33/66Mhz bus, or a 64-bit/133Mhz bus.
54 *
55 * The BCM5701 is a single-chip solution incorporating both the BCM5700
56 * MAC and a BCM5401 10/100/1000 PHY. Unlike the BCM5700, the BCM5701
57 * does not support external SSRAM.
58 *
59 * Broadcom also produces a variation of the BCM5700 under the "Altima"
60 * brand name, which is functionally similar but lacks PCI-X support.
61 *
62 * Without external SSRAM, you can only have at most 4 TX rings,
63 * and the use of the mini RX ring is disabled. This seems to imply
64 * that these features are simply not available on the BCM5701. As a
65 * result, this driver does not implement any support for the mini RX
66 * ring.
67 */
68
69#include <sys/param.h>
70#include <sys/endian.h>
71#include <sys/systm.h>
72#include <sys/sockio.h>
73#include <sys/mbuf.h>
74#include <sys/malloc.h>
75#include <sys/kernel.h>
76#include <sys/module.h>
77#include <sys/socket.h>
78#include <sys/queue.h>
79
80#include <net/if.h>
81#include <net/if_arp.h>
82#include <net/ethernet.h>
83#include <net/if_dl.h>
84#include <net/if_media.h>
85
86#include <net/bpf.h>
87
88#include <net/if_types.h>
89#include <net/if_vlan_var.h>
90
91#include <netinet/in_systm.h>
92#include <netinet/in.h>
93#include <netinet/ip.h>
94
95#include <machine/clock.h> /* for DELAY */
96#include <machine/bus_memio.h>
97#include <machine/bus.h>
98#include <machine/resource.h>
99#include <sys/bus.h>
100#include <sys/rman.h>
101
102#include <dev/mii/mii.h>
103#include <dev/mii/miivar.h>
104#include "miidevs.h"
105#include <dev/mii/brgphyreg.h>
106
107#include <dev/pci/pcireg.h>
108#include <dev/pci/pcivar.h>
109
110#include <dev/bge/if_bgereg.h>
111
112#define BGE_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP)
113
114MODULE_DEPEND(bge, pci, 1, 1, 1);
115MODULE_DEPEND(bge, ether, 1, 1, 1);
116MODULE_DEPEND(bge, miibus, 1, 1, 1);
117
118/* "controller miibus0" required. See GENERIC if you get errors here. */
119#include "miibus_if.h"
120
121/*
122 * Various supported device vendors/types and their names. Note: the
123 * spec seems to indicate that the hardware still has Alteon's vendor
124 * ID burned into it, though it will always be overriden by the vendor
125 * ID in the EEPROM. Just to be safe, we cover all possibilities.
126 */
127#define BGE_DEVDESC_MAX 64 /* Maximum device description length */
128
129static struct bge_type bge_devs[] = {
130 { ALT_VENDORID, ALT_DEVICEID_BCM5700,
131 "Broadcom BCM5700 Gigabit Ethernet" },
132 { ALT_VENDORID, ALT_DEVICEID_BCM5701,
133 "Broadcom BCM5701 Gigabit Ethernet" },
134 { BCOM_VENDORID, BCOM_DEVICEID_BCM5700,
135 "Broadcom BCM5700 Gigabit Ethernet" },
136 { BCOM_VENDORID, BCOM_DEVICEID_BCM5701,
137 "Broadcom BCM5701 Gigabit Ethernet" },
138 { BCOM_VENDORID, BCOM_DEVICEID_BCM5702,
139 "Broadcom BCM5702 Gigabit Ethernet" },
140 { BCOM_VENDORID, BCOM_DEVICEID_BCM5702X,
141 "Broadcom BCM5702X Gigabit Ethernet" },
142 { BCOM_VENDORID, BCOM_DEVICEID_BCM5703,
143 "Broadcom BCM5703 Gigabit Ethernet" },
144 { BCOM_VENDORID, BCOM_DEVICEID_BCM5703X,
145 "Broadcom BCM5703X Gigabit Ethernet" },
146 { BCOM_VENDORID, BCOM_DEVICEID_BCM5704C,
147 "Broadcom BCM5704C Dual Gigabit Ethernet" },
148 { BCOM_VENDORID, BCOM_DEVICEID_BCM5704S,
149 "Broadcom BCM5704S Dual Gigabit Ethernet" },
150 { BCOM_VENDORID, BCOM_DEVICEID_BCM5705,
151 "Broadcom BCM5705 Gigabit Ethernet" },
152 { BCOM_VENDORID, BCOM_DEVICEID_BCM5705K,
153 "Broadcom BCM5705K Gigabit Ethernet" },
154 { BCOM_VENDORID, BCOM_DEVICEID_BCM5705M,
155 "Broadcom BCM5705M Gigabit Ethernet" },
156 { BCOM_VENDORID, BCOM_DEVICEID_BCM5705M_ALT,
157 "Broadcom BCM5705M Gigabit Ethernet" },
36
37/*
38 * Broadcom BCM570x family gigabit ethernet driver for FreeBSD.
39 *
40 * The Broadcom BCM5700 is based on technology originally developed by
41 * Alteon Networks as part of the Tigon I and Tigon II gigabit ethernet
42 * MAC chips. The BCM5700, sometimes refered to as the Tigon III, has
43 * two on-board MIPS R4000 CPUs and can have as much as 16MB of external
44 * SSRAM. The BCM5700 supports TCP, UDP and IP checksum offload, jumbo
45 * frames, highly configurable RX filtering, and 16 RX and TX queues
46 * (which, along with RX filter rules, can be used for QOS applications).
47 * Other features, such as TCP segmentation, may be available as part
48 * of value-added firmware updates. Unlike the Tigon I and Tigon II,
49 * firmware images can be stored in hardware and need not be compiled
50 * into the driver.
51 *
52 * The BCM5700 supports the PCI v2.2 and PCI-X v1.0 standards, and will
53 * function in a 32-bit/64-bit 33/66Mhz bus, or a 64-bit/133Mhz bus.
54 *
55 * The BCM5701 is a single-chip solution incorporating both the BCM5700
56 * MAC and a BCM5401 10/100/1000 PHY. Unlike the BCM5700, the BCM5701
57 * does not support external SSRAM.
58 *
59 * Broadcom also produces a variation of the BCM5700 under the "Altima"
60 * brand name, which is functionally similar but lacks PCI-X support.
61 *
62 * Without external SSRAM, you can only have at most 4 TX rings,
63 * and the use of the mini RX ring is disabled. This seems to imply
64 * that these features are simply not available on the BCM5701. As a
65 * result, this driver does not implement any support for the mini RX
66 * ring.
67 */
68
69#include <sys/param.h>
70#include <sys/endian.h>
71#include <sys/systm.h>
72#include <sys/sockio.h>
73#include <sys/mbuf.h>
74#include <sys/malloc.h>
75#include <sys/kernel.h>
76#include <sys/module.h>
77#include <sys/socket.h>
78#include <sys/queue.h>
79
80#include <net/if.h>
81#include <net/if_arp.h>
82#include <net/ethernet.h>
83#include <net/if_dl.h>
84#include <net/if_media.h>
85
86#include <net/bpf.h>
87
88#include <net/if_types.h>
89#include <net/if_vlan_var.h>
90
91#include <netinet/in_systm.h>
92#include <netinet/in.h>
93#include <netinet/ip.h>
94
95#include <machine/clock.h> /* for DELAY */
96#include <machine/bus_memio.h>
97#include <machine/bus.h>
98#include <machine/resource.h>
99#include <sys/bus.h>
100#include <sys/rman.h>
101
102#include <dev/mii/mii.h>
103#include <dev/mii/miivar.h>
104#include "miidevs.h"
105#include <dev/mii/brgphyreg.h>
106
107#include <dev/pci/pcireg.h>
108#include <dev/pci/pcivar.h>
109
110#include <dev/bge/if_bgereg.h>
111
112#define BGE_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP)
113
114MODULE_DEPEND(bge, pci, 1, 1, 1);
115MODULE_DEPEND(bge, ether, 1, 1, 1);
116MODULE_DEPEND(bge, miibus, 1, 1, 1);
117
118/* "controller miibus0" required. See GENERIC if you get errors here. */
119#include "miibus_if.h"
120
121/*
122 * Various supported device vendors/types and their names. Note: the
123 * spec seems to indicate that the hardware still has Alteon's vendor
124 * ID burned into it, though it will always be overriden by the vendor
125 * ID in the EEPROM. Just to be safe, we cover all possibilities.
126 */
127#define BGE_DEVDESC_MAX 64 /* Maximum device description length */
128
129static struct bge_type bge_devs[] = {
130 { ALT_VENDORID, ALT_DEVICEID_BCM5700,
131 "Broadcom BCM5700 Gigabit Ethernet" },
132 { ALT_VENDORID, ALT_DEVICEID_BCM5701,
133 "Broadcom BCM5701 Gigabit Ethernet" },
134 { BCOM_VENDORID, BCOM_DEVICEID_BCM5700,
135 "Broadcom BCM5700 Gigabit Ethernet" },
136 { BCOM_VENDORID, BCOM_DEVICEID_BCM5701,
137 "Broadcom BCM5701 Gigabit Ethernet" },
138 { BCOM_VENDORID, BCOM_DEVICEID_BCM5702,
139 "Broadcom BCM5702 Gigabit Ethernet" },
140 { BCOM_VENDORID, BCOM_DEVICEID_BCM5702X,
141 "Broadcom BCM5702X Gigabit Ethernet" },
142 { BCOM_VENDORID, BCOM_DEVICEID_BCM5703,
143 "Broadcom BCM5703 Gigabit Ethernet" },
144 { BCOM_VENDORID, BCOM_DEVICEID_BCM5703X,
145 "Broadcom BCM5703X Gigabit Ethernet" },
146 { BCOM_VENDORID, BCOM_DEVICEID_BCM5704C,
147 "Broadcom BCM5704C Dual Gigabit Ethernet" },
148 { BCOM_VENDORID, BCOM_DEVICEID_BCM5704S,
149 "Broadcom BCM5704S Dual Gigabit Ethernet" },
150 { BCOM_VENDORID, BCOM_DEVICEID_BCM5705,
151 "Broadcom BCM5705 Gigabit Ethernet" },
152 { BCOM_VENDORID, BCOM_DEVICEID_BCM5705K,
153 "Broadcom BCM5705K Gigabit Ethernet" },
154 { BCOM_VENDORID, BCOM_DEVICEID_BCM5705M,
155 "Broadcom BCM5705M Gigabit Ethernet" },
156 { BCOM_VENDORID, BCOM_DEVICEID_BCM5705M_ALT,
157 "Broadcom BCM5705M Gigabit Ethernet" },
158 { BCOM_VENDORID, BCOM_DEVICEID_BCM5750,
159 "Broadcom BCM5750 Gigabit Ethernet" },
160 { BCOM_VENDORID, BCOM_DEVICEID_BCM5750M,
161 "Broadcom BCM5750M Gigabit Ethernet" },
162 { BCOM_VENDORID, BCOM_DEVICEID_BCM5751,
163 "Broadcom BCM5751 Gigabit Ethernet" },
158 { BCOM_VENDORID, BCOM_DEVICEID_BCM5782,
159 "Broadcom BCM5782 Gigabit Ethernet" },
160 { BCOM_VENDORID, BCOM_DEVICEID_BCM5788,
161 "Broadcom BCM5788 Gigabit Ethernet" },
162 { BCOM_VENDORID, BCOM_DEVICEID_BCM5901,
163 "Broadcom BCM5901 Fast Ethernet" },
164 { BCOM_VENDORID, BCOM_DEVICEID_BCM5901A2,
165 "Broadcom BCM5901A2 Fast Ethernet" },
166 { SK_VENDORID, SK_DEVICEID_ALTIMA,
167 "SysKonnect Gigabit Ethernet" },
168 { ALTIMA_VENDORID, ALTIMA_DEVICE_AC1000,
169 "Altima AC1000 Gigabit Ethernet" },
170 { ALTIMA_VENDORID, ALTIMA_DEVICE_AC1002,
171 "Altima AC1002 Gigabit Ethernet" },
172 { ALTIMA_VENDORID, ALTIMA_DEVICE_AC9100,
173 "Altima AC9100 Gigabit Ethernet" },
174 { 0, 0, NULL }
175};
176
177static int bge_probe (device_t);
178static int bge_attach (device_t);
179static int bge_detach (device_t);
180static void bge_release_resources
181 (struct bge_softc *);
182static void bge_dma_map_addr (void *, bus_dma_segment_t *, int, int);
183static void bge_dma_map_tx_desc (void *, bus_dma_segment_t *, int,
184 bus_size_t, int);
185static int bge_dma_alloc (device_t);
186static void bge_dma_free (struct bge_softc *);
187
188static void bge_txeof (struct bge_softc *);
189static void bge_rxeof (struct bge_softc *);
190
191static void bge_tick_locked (struct bge_softc *);
192static void bge_tick (void *);
193static void bge_stats_update (struct bge_softc *);
194static void bge_stats_update_regs
195 (struct bge_softc *);
196static int bge_encap (struct bge_softc *, struct mbuf *,
197 u_int32_t *);
198
199static void bge_intr (void *);
200static void bge_start_locked (struct ifnet *);
201static void bge_start (struct ifnet *);
202static int bge_ioctl (struct ifnet *, u_long, caddr_t);
203static void bge_init_locked (struct bge_softc *);
204static void bge_init (void *);
205static void bge_stop (struct bge_softc *);
206static void bge_watchdog (struct ifnet *);
207static void bge_shutdown (device_t);
208static int bge_ifmedia_upd (struct ifnet *);
209static void bge_ifmedia_sts (struct ifnet *, struct ifmediareq *);
210
211static u_int8_t bge_eeprom_getbyte (struct bge_softc *, int, u_int8_t *);
212static int bge_read_eeprom (struct bge_softc *, caddr_t, int, int);
213
214static void bge_setmulti (struct bge_softc *);
215
216static void bge_handle_events (struct bge_softc *);
217static int bge_alloc_jumbo_mem (struct bge_softc *);
218static void bge_free_jumbo_mem (struct bge_softc *);
219static void *bge_jalloc (struct bge_softc *);
220static void bge_jfree (void *, void *);
221static int bge_newbuf_std (struct bge_softc *, int, struct mbuf *);
222static int bge_newbuf_jumbo (struct bge_softc *, int, struct mbuf *);
223static int bge_init_rx_ring_std (struct bge_softc *);
224static void bge_free_rx_ring_std (struct bge_softc *);
225static int bge_init_rx_ring_jumbo (struct bge_softc *);
226static void bge_free_rx_ring_jumbo (struct bge_softc *);
227static void bge_free_tx_ring (struct bge_softc *);
228static int bge_init_tx_ring (struct bge_softc *);
229
230static int bge_chipinit (struct bge_softc *);
231static int bge_blockinit (struct bge_softc *);
232
233#ifdef notdef
234static u_int8_t bge_vpd_readbyte(struct bge_softc *, int);
235static void bge_vpd_read_res (struct bge_softc *, struct vpd_res *, int);
236static void bge_vpd_read (struct bge_softc *);
237#endif
238
239static u_int32_t bge_readmem_ind
240 (struct bge_softc *, int);
241static void bge_writemem_ind (struct bge_softc *, int, int);
242#ifdef notdef
243static u_int32_t bge_readreg_ind
244 (struct bge_softc *, int);
245#endif
246static void bge_writereg_ind (struct bge_softc *, int, int);
247
248static int bge_miibus_readreg (device_t, int, int);
249static int bge_miibus_writereg (device_t, int, int, int);
250static void bge_miibus_statchg (device_t);
251
252static void bge_reset (struct bge_softc *);
253
254static device_method_t bge_methods[] = {
255 /* Device interface */
256 DEVMETHOD(device_probe, bge_probe),
257 DEVMETHOD(device_attach, bge_attach),
258 DEVMETHOD(device_detach, bge_detach),
259 DEVMETHOD(device_shutdown, bge_shutdown),
260
261 /* bus interface */
262 DEVMETHOD(bus_print_child, bus_generic_print_child),
263 DEVMETHOD(bus_driver_added, bus_generic_driver_added),
264
265 /* MII interface */
266 DEVMETHOD(miibus_readreg, bge_miibus_readreg),
267 DEVMETHOD(miibus_writereg, bge_miibus_writereg),
268 DEVMETHOD(miibus_statchg, bge_miibus_statchg),
269
270 { 0, 0 }
271};
272
273static driver_t bge_driver = {
274 "bge",
275 bge_methods,
276 sizeof(struct bge_softc)
277};
278
279static devclass_t bge_devclass;
280
281DRIVER_MODULE(bge, pci, bge_driver, bge_devclass, 0, 0);
282DRIVER_MODULE(miibus, bge, miibus_driver, miibus_devclass, 0, 0);
283
284static u_int32_t
285bge_readmem_ind(sc, off)
286 struct bge_softc *sc;
287 int off;
288{
289 device_t dev;
290
291 dev = sc->bge_dev;
292
293 pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, off, 4);
294 return(pci_read_config(dev, BGE_PCI_MEMWIN_DATA, 4));
295}
296
297static void
298bge_writemem_ind(sc, off, val)
299 struct bge_softc *sc;
300 int off, val;
301{
302 device_t dev;
303
304 dev = sc->bge_dev;
305
306 pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, off, 4);
307 pci_write_config(dev, BGE_PCI_MEMWIN_DATA, val, 4);
308
309 return;
310}
311
312#ifdef notdef
313static u_int32_t
314bge_readreg_ind(sc, off)
315 struct bge_softc *sc;
316 int off;
317{
318 device_t dev;
319
320 dev = sc->bge_dev;
321
322 pci_write_config(dev, BGE_PCI_REG_BASEADDR, off, 4);
323 return(pci_read_config(dev, BGE_PCI_REG_DATA, 4));
324}
325#endif
326
327static void
328bge_writereg_ind(sc, off, val)
329 struct bge_softc *sc;
330 int off, val;
331{
332 device_t dev;
333
334 dev = sc->bge_dev;
335
336 pci_write_config(dev, BGE_PCI_REG_BASEADDR, off, 4);
337 pci_write_config(dev, BGE_PCI_REG_DATA, val, 4);
338
339 return;
340}
341
342/*
343 * Map a single buffer address.
344 */
345
346static void
347bge_dma_map_addr(arg, segs, nseg, error)
348 void *arg;
349 bus_dma_segment_t *segs;
350 int nseg;
351 int error;
352{
353 struct bge_dmamap_arg *ctx;
354
355 if (error)
356 return;
357
358 ctx = arg;
359
360 if (nseg > ctx->bge_maxsegs) {
361 ctx->bge_maxsegs = 0;
362 return;
363 }
364
365 ctx->bge_busaddr = segs->ds_addr;
366
367 return;
368}
369
370/*
371 * Map an mbuf chain into an TX ring.
372 */
373
374static void
375bge_dma_map_tx_desc(arg, segs, nseg, mapsize, error)
376 void *arg;
377 bus_dma_segment_t *segs;
378 int nseg;
379 bus_size_t mapsize;
380 int error;
381{
382 struct bge_dmamap_arg *ctx;
383 struct bge_tx_bd *d = NULL;
384 int i = 0, idx;
385
386 if (error)
387 return;
388
389 ctx = arg;
390
391 /* Signal error to caller if there's too many segments */
392 if (nseg > ctx->bge_maxsegs) {
393 ctx->bge_maxsegs = 0;
394 return;
395 }
396
397 idx = ctx->bge_idx;
398 while(1) {
399 d = &ctx->bge_ring[idx];
400 d->bge_addr.bge_addr_lo =
401 htole32(BGE_ADDR_LO(segs[i].ds_addr));
402 d->bge_addr.bge_addr_hi =
403 htole32(BGE_ADDR_HI(segs[i].ds_addr));
404 d->bge_len = htole16(segs[i].ds_len);
405 d->bge_flags = htole16(ctx->bge_flags);
406 i++;
407 if (i == nseg)
408 break;
409 BGE_INC(idx, BGE_TX_RING_CNT);
410 }
411
412 d->bge_flags |= htole16(BGE_TXBDFLAG_END);
413 ctx->bge_maxsegs = nseg;
414 ctx->bge_idx = idx;
415
416 return;
417}
418
419
420#ifdef notdef
421static u_int8_t
422bge_vpd_readbyte(sc, addr)
423 struct bge_softc *sc;
424 int addr;
425{
426 int i;
427 device_t dev;
428 u_int32_t val;
429
430 dev = sc->bge_dev;
431 pci_write_config(dev, BGE_PCI_VPD_ADDR, addr, 2);
432 for (i = 0; i < BGE_TIMEOUT * 10; i++) {
433 DELAY(10);
434 if (pci_read_config(dev, BGE_PCI_VPD_ADDR, 2) & BGE_VPD_FLAG)
435 break;
436 }
437
438 if (i == BGE_TIMEOUT) {
439 printf("bge%d: VPD read timed out\n", sc->bge_unit);
440 return(0);
441 }
442
443 val = pci_read_config(dev, BGE_PCI_VPD_DATA, 4);
444
445 return((val >> ((addr % 4) * 8)) & 0xFF);
446}
447
448static void
449bge_vpd_read_res(sc, res, addr)
450 struct bge_softc *sc;
451 struct vpd_res *res;
452 int addr;
453{
454 int i;
455 u_int8_t *ptr;
456
457 ptr = (u_int8_t *)res;
458 for (i = 0; i < sizeof(struct vpd_res); i++)
459 ptr[i] = bge_vpd_readbyte(sc, i + addr);
460
461 return;
462}
463
464static void
465bge_vpd_read(sc)
466 struct bge_softc *sc;
467{
468 int pos = 0, i;
469 struct vpd_res res;
470
471 if (sc->bge_vpd_prodname != NULL)
472 free(sc->bge_vpd_prodname, M_DEVBUF);
473 if (sc->bge_vpd_readonly != NULL)
474 free(sc->bge_vpd_readonly, M_DEVBUF);
475 sc->bge_vpd_prodname = NULL;
476 sc->bge_vpd_readonly = NULL;
477
478 bge_vpd_read_res(sc, &res, pos);
479
480 if (res.vr_id != VPD_RES_ID) {
481 printf("bge%d: bad VPD resource id: expected %x got %x\n",
482 sc->bge_unit, VPD_RES_ID, res.vr_id);
483 return;
484 }
485
486 pos += sizeof(res);
487 sc->bge_vpd_prodname = malloc(res.vr_len + 1, M_DEVBUF, M_NOWAIT);
488 for (i = 0; i < res.vr_len; i++)
489 sc->bge_vpd_prodname[i] = bge_vpd_readbyte(sc, i + pos);
490 sc->bge_vpd_prodname[i] = '\0';
491 pos += i;
492
493 bge_vpd_read_res(sc, &res, pos);
494
495 if (res.vr_id != VPD_RES_READ) {
496 printf("bge%d: bad VPD resource id: expected %x got %x\n",
497 sc->bge_unit, VPD_RES_READ, res.vr_id);
498 return;
499 }
500
501 pos += sizeof(res);
502 sc->bge_vpd_readonly = malloc(res.vr_len, M_DEVBUF, M_NOWAIT);
503 for (i = 0; i < res.vr_len + 1; i++)
504 sc->bge_vpd_readonly[i] = bge_vpd_readbyte(sc, i + pos);
505
506 return;
507}
508#endif
509
510/*
511 * Read a byte of data stored in the EEPROM at address 'addr.' The
512 * BCM570x supports both the traditional bitbang interface and an
513 * auto access interface for reading the EEPROM. We use the auto
514 * access method.
515 */
516static u_int8_t
517bge_eeprom_getbyte(sc, addr, dest)
518 struct bge_softc *sc;
519 int addr;
520 u_int8_t *dest;
521{
522 int i;
523 u_int32_t byte = 0;
524
525 /*
526 * Enable use of auto EEPROM access so we can avoid
527 * having to use the bitbang method.
528 */
529 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_AUTO_EEPROM);
530
531 /* Reset the EEPROM, load the clock period. */
532 CSR_WRITE_4(sc, BGE_EE_ADDR,
533 BGE_EEADDR_RESET|BGE_EEHALFCLK(BGE_HALFCLK_384SCL));
534 DELAY(20);
535
536 /* Issue the read EEPROM command. */
537 CSR_WRITE_4(sc, BGE_EE_ADDR, BGE_EE_READCMD | addr);
538
539 /* Wait for completion */
540 for(i = 0; i < BGE_TIMEOUT * 10; i++) {
541 DELAY(10);
542 if (CSR_READ_4(sc, BGE_EE_ADDR) & BGE_EEADDR_DONE)
543 break;
544 }
545
546 if (i == BGE_TIMEOUT) {
547 printf("bge%d: eeprom read timed out\n", sc->bge_unit);
548 return(0);
549 }
550
551 /* Get result. */
552 byte = CSR_READ_4(sc, BGE_EE_DATA);
553
554 *dest = (byte >> ((addr % 4) * 8)) & 0xFF;
555
556 return(0);
557}
558
559/*
560 * Read a sequence of bytes from the EEPROM.
561 */
562static int
563bge_read_eeprom(sc, dest, off, cnt)
564 struct bge_softc *sc;
565 caddr_t dest;
566 int off;
567 int cnt;
568{
569 int err = 0, i;
570 u_int8_t byte = 0;
571
572 for (i = 0; i < cnt; i++) {
573 err = bge_eeprom_getbyte(sc, off + i, &byte);
574 if (err)
575 break;
576 *(dest + i) = byte;
577 }
578
579 return(err ? 1 : 0);
580}
581
582static int
583bge_miibus_readreg(dev, phy, reg)
584 device_t dev;
585 int phy, reg;
586{
587 struct bge_softc *sc;
588 u_int32_t val, autopoll;
589 int i;
590
591 sc = device_get_softc(dev);
592
593 /*
594 * Broadcom's own driver always assumes the internal
595 * PHY is at GMII address 1. On some chips, the PHY responds
596 * to accesses at all addresses, which could cause us to
597 * bogusly attach the PHY 32 times at probe type. Always
598 * restricting the lookup to address 1 is simpler than
599 * trying to figure out which chips revisions should be
600 * special-cased.
601 */
602 if (phy != 1)
603 return(0);
604
605 /* Reading with autopolling on may trigger PCI errors */
606 autopoll = CSR_READ_4(sc, BGE_MI_MODE);
607 if (autopoll & BGE_MIMODE_AUTOPOLL) {
608 BGE_CLRBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL);
609 DELAY(40);
610 }
611
612 CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_READ|BGE_MICOMM_BUSY|
613 BGE_MIPHY(phy)|BGE_MIREG(reg));
614
615 for (i = 0; i < BGE_TIMEOUT; i++) {
616 val = CSR_READ_4(sc, BGE_MI_COMM);
617 if (!(val & BGE_MICOMM_BUSY))
618 break;
619 }
620
621 if (i == BGE_TIMEOUT) {
622 printf("bge%d: PHY read timed out\n", sc->bge_unit);
623 val = 0;
624 goto done;
625 }
626
627 val = CSR_READ_4(sc, BGE_MI_COMM);
628
629done:
630 if (autopoll & BGE_MIMODE_AUTOPOLL) {
631 BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL);
632 DELAY(40);
633 }
634
635 if (val & BGE_MICOMM_READFAIL)
636 return(0);
637
638 return(val & 0xFFFF);
639}
640
641static int
642bge_miibus_writereg(dev, phy, reg, val)
643 device_t dev;
644 int phy, reg, val;
645{
646 struct bge_softc *sc;
647 u_int32_t autopoll;
648 int i;
649
650 sc = device_get_softc(dev);
651
652 /* Reading with autopolling on may trigger PCI errors */
653 autopoll = CSR_READ_4(sc, BGE_MI_MODE);
654 if (autopoll & BGE_MIMODE_AUTOPOLL) {
655 BGE_CLRBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL);
656 DELAY(40);
657 }
658
659 CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_WRITE|BGE_MICOMM_BUSY|
660 BGE_MIPHY(phy)|BGE_MIREG(reg)|val);
661
662 for (i = 0; i < BGE_TIMEOUT; i++) {
663 if (!(CSR_READ_4(sc, BGE_MI_COMM) & BGE_MICOMM_BUSY))
664 break;
665 }
666
667 if (autopoll & BGE_MIMODE_AUTOPOLL) {
668 BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL);
669 DELAY(40);
670 }
671
672 if (i == BGE_TIMEOUT) {
673 printf("bge%d: PHY read timed out\n", sc->bge_unit);
674 return(0);
675 }
676
677 return(0);
678}
679
680static void
681bge_miibus_statchg(dev)
682 device_t dev;
683{
684 struct bge_softc *sc;
685 struct mii_data *mii;
686
687 sc = device_get_softc(dev);
688 mii = device_get_softc(sc->bge_miibus);
689
690 BGE_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_PORTMODE);
691 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T) {
692 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_PORTMODE_GMII);
693 } else {
694 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_PORTMODE_MII);
695 }
696
697 if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) {
698 BGE_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX);
699 } else {
700 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX);
701 }
702
703 return;
704}
705
706/*
707 * Handle events that have triggered interrupts.
708 */
709static void
710bge_handle_events(sc)
711 struct bge_softc *sc;
712{
713
714 return;
715}
716
717/*
718 * Memory management for jumbo frames.
719 */
720
721static int
722bge_alloc_jumbo_mem(sc)
723 struct bge_softc *sc;
724{
725 caddr_t ptr;
726 register int i, error;
727 struct bge_jpool_entry *entry;
728
729 /* Create tag for jumbo buffer block */
730
731 error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag,
732 PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
733 NULL, BGE_JMEM, 1, BGE_JMEM, 0, NULL, NULL,
734 &sc->bge_cdata.bge_jumbo_tag);
735
736 if (error) {
737 printf("bge%d: could not allocate jumbo dma tag\n",
738 sc->bge_unit);
739 return (ENOMEM);
740 }
741
742 /* Allocate DMA'able memory for jumbo buffer block */
743
744 error = bus_dmamem_alloc(sc->bge_cdata.bge_jumbo_tag,
745 (void **)&sc->bge_ldata.bge_jumbo_buf, BUS_DMA_NOWAIT,
746 &sc->bge_cdata.bge_jumbo_map);
747
748 if (error)
749 return (ENOMEM);
750
751 SLIST_INIT(&sc->bge_jfree_listhead);
752 SLIST_INIT(&sc->bge_jinuse_listhead);
753
754 /*
755 * Now divide it up into 9K pieces and save the addresses
756 * in an array.
757 */
758 ptr = sc->bge_ldata.bge_jumbo_buf;
759 for (i = 0; i < BGE_JSLOTS; i++) {
760 sc->bge_cdata.bge_jslots[i] = ptr;
761 ptr += BGE_JLEN;
762 entry = malloc(sizeof(struct bge_jpool_entry),
763 M_DEVBUF, M_NOWAIT);
764 if (entry == NULL) {
765 bge_free_jumbo_mem(sc);
766 sc->bge_ldata.bge_jumbo_buf = NULL;
767 printf("bge%d: no memory for jumbo "
768 "buffer queue!\n", sc->bge_unit);
769 return(ENOBUFS);
770 }
771 entry->slot = i;
772 SLIST_INSERT_HEAD(&sc->bge_jfree_listhead,
773 entry, jpool_entries);
774 }
775
776 return(0);
777}
778
779static void
780bge_free_jumbo_mem(sc)
781 struct bge_softc *sc;
782{
783 int i;
784 struct bge_jpool_entry *entry;
785
786 for (i = 0; i < BGE_JSLOTS; i++) {
787 entry = SLIST_FIRST(&sc->bge_jfree_listhead);
788 SLIST_REMOVE_HEAD(&sc->bge_jfree_listhead, jpool_entries);
789 free(entry, M_DEVBUF);
790 }
791
792 /* Destroy jumbo buffer block */
793
794 if (sc->bge_ldata.bge_rx_jumbo_ring)
795 bus_dmamem_free(sc->bge_cdata.bge_jumbo_tag,
796 sc->bge_ldata.bge_jumbo_buf,
797 sc->bge_cdata.bge_jumbo_map);
798
799 if (sc->bge_cdata.bge_rx_jumbo_ring_map)
800 bus_dmamap_destroy(sc->bge_cdata.bge_jumbo_tag,
801 sc->bge_cdata.bge_jumbo_map);
802
803 if (sc->bge_cdata.bge_jumbo_tag)
804 bus_dma_tag_destroy(sc->bge_cdata.bge_jumbo_tag);
805
806 return;
807}
808
809/*
810 * Allocate a jumbo buffer.
811 */
812static void *
813bge_jalloc(sc)
814 struct bge_softc *sc;
815{
816 struct bge_jpool_entry *entry;
817
818 entry = SLIST_FIRST(&sc->bge_jfree_listhead);
819
820 if (entry == NULL) {
821 printf("bge%d: no free jumbo buffers\n", sc->bge_unit);
822 return(NULL);
823 }
824
825 SLIST_REMOVE_HEAD(&sc->bge_jfree_listhead, jpool_entries);
826 SLIST_INSERT_HEAD(&sc->bge_jinuse_listhead, entry, jpool_entries);
827 return(sc->bge_cdata.bge_jslots[entry->slot]);
828}
829
830/*
831 * Release a jumbo buffer.
832 */
833static void
834bge_jfree(buf, args)
835 void *buf;
836 void *args;
837{
838 struct bge_jpool_entry *entry;
839 struct bge_softc *sc;
840 int i;
841
842 /* Extract the softc struct pointer. */
843 sc = (struct bge_softc *)args;
844
845 if (sc == NULL)
846 panic("bge_jfree: can't find softc pointer!");
847
848 /* calculate the slot this buffer belongs to */
849
850 i = ((vm_offset_t)buf
851 - (vm_offset_t)sc->bge_ldata.bge_jumbo_buf) / BGE_JLEN;
852
853 if ((i < 0) || (i >= BGE_JSLOTS))
854 panic("bge_jfree: asked to free buffer that we don't manage!");
855
856 entry = SLIST_FIRST(&sc->bge_jinuse_listhead);
857 if (entry == NULL)
858 panic("bge_jfree: buffer not in use!");
859 entry->slot = i;
860 SLIST_REMOVE_HEAD(&sc->bge_jinuse_listhead, jpool_entries);
861 SLIST_INSERT_HEAD(&sc->bge_jfree_listhead, entry, jpool_entries);
862
863 return;
864}
865
866
867/*
868 * Intialize a standard receive ring descriptor.
869 */
870static int
871bge_newbuf_std(sc, i, m)
872 struct bge_softc *sc;
873 int i;
874 struct mbuf *m;
875{
876 struct mbuf *m_new = NULL;
877 struct bge_rx_bd *r;
878 struct bge_dmamap_arg ctx;
879 int error;
880
881 if (m == NULL) {
882 MGETHDR(m_new, M_DONTWAIT, MT_DATA);
883 if (m_new == NULL) {
884 return(ENOBUFS);
885 }
886
887 MCLGET(m_new, M_DONTWAIT);
888 if (!(m_new->m_flags & M_EXT)) {
889 m_freem(m_new);
890 return(ENOBUFS);
891 }
892 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
893 } else {
894 m_new = m;
895 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
896 m_new->m_data = m_new->m_ext.ext_buf;
897 }
898
899 if (!sc->bge_rx_alignment_bug)
900 m_adj(m_new, ETHER_ALIGN);
901 sc->bge_cdata.bge_rx_std_chain[i] = m_new;
902 r = &sc->bge_ldata.bge_rx_std_ring[i];
903 ctx.bge_maxsegs = 1;
904 ctx.sc = sc;
905 error = bus_dmamap_load(sc->bge_cdata.bge_mtag,
906 sc->bge_cdata.bge_rx_std_dmamap[i], mtod(m_new, void *),
907 m_new->m_len, bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT);
908 if (error || ctx.bge_maxsegs == 0) {
909 if (m == NULL)
910 m_freem(m_new);
911 return(ENOMEM);
912 }
913 r->bge_addr.bge_addr_lo = htole32(BGE_ADDR_LO(ctx.bge_busaddr));
914 r->bge_addr.bge_addr_hi = htole32(BGE_ADDR_HI(ctx.bge_busaddr));
915 r->bge_flags = htole16(BGE_RXBDFLAG_END);
916 r->bge_len = htole16(m_new->m_len);
917 r->bge_idx = htole16(i);
918
919 bus_dmamap_sync(sc->bge_cdata.bge_mtag,
920 sc->bge_cdata.bge_rx_std_dmamap[i],
921 BUS_DMASYNC_PREREAD);
922
923 return(0);
924}
925
926/*
927 * Initialize a jumbo receive ring descriptor. This allocates
928 * a jumbo buffer from the pool managed internally by the driver.
929 */
930static int
931bge_newbuf_jumbo(sc, i, m)
932 struct bge_softc *sc;
933 int i;
934 struct mbuf *m;
935{
936 struct mbuf *m_new = NULL;
937 struct bge_rx_bd *r;
938 struct bge_dmamap_arg ctx;
939 int error;
940
941 if (m == NULL) {
942 caddr_t *buf = NULL;
943
944 /* Allocate the mbuf. */
945 MGETHDR(m_new, M_DONTWAIT, MT_DATA);
946 if (m_new == NULL) {
947 return(ENOBUFS);
948 }
949
950 /* Allocate the jumbo buffer */
951 buf = bge_jalloc(sc);
952 if (buf == NULL) {
953 m_freem(m_new);
954 printf("bge%d: jumbo allocation failed "
955 "-- packet dropped!\n", sc->bge_unit);
956 return(ENOBUFS);
957 }
958
959 /* Attach the buffer to the mbuf. */
960 m_new->m_data = (void *) buf;
961 m_new->m_len = m_new->m_pkthdr.len = BGE_JUMBO_FRAMELEN;
962 MEXTADD(m_new, buf, BGE_JUMBO_FRAMELEN, bge_jfree,
963 (struct bge_softc *)sc, 0, EXT_NET_DRV);
964 } else {
965 m_new = m;
966 m_new->m_data = m_new->m_ext.ext_buf;
967 m_new->m_ext.ext_size = BGE_JUMBO_FRAMELEN;
968 }
969
970 if (!sc->bge_rx_alignment_bug)
971 m_adj(m_new, ETHER_ALIGN);
972 /* Set up the descriptor. */
973 sc->bge_cdata.bge_rx_jumbo_chain[i] = m_new;
974 r = &sc->bge_ldata.bge_rx_jumbo_ring[i];
975 ctx.bge_maxsegs = 1;
976 ctx.sc = sc;
977 error = bus_dmamap_load(sc->bge_cdata.bge_mtag_jumbo,
978 sc->bge_cdata.bge_rx_jumbo_dmamap[i], mtod(m_new, void *),
979 m_new->m_len, bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT);
980 if (error || ctx.bge_maxsegs == 0) {
981 if (m == NULL)
982 m_freem(m_new);
983 return(ENOMEM);
984 }
985 r->bge_addr.bge_addr_lo = htole32(BGE_ADDR_LO(ctx.bge_busaddr));
986 r->bge_addr.bge_addr_hi = htole32(BGE_ADDR_HI(ctx.bge_busaddr));
987 r->bge_flags = htole16(BGE_RXBDFLAG_END|BGE_RXBDFLAG_JUMBO_RING);
988 r->bge_len = htole16(m_new->m_len);
989 r->bge_idx = htole16(i);
990
991 bus_dmamap_sync(sc->bge_cdata.bge_mtag,
992 sc->bge_cdata.bge_rx_jumbo_dmamap[i],
993 BUS_DMASYNC_PREREAD);
994
995 return(0);
996}
997
998/*
999 * The standard receive ring has 512 entries in it. At 2K per mbuf cluster,
1000 * that's 1MB or memory, which is a lot. For now, we fill only the first
1001 * 256 ring entries and hope that our CPU is fast enough to keep up with
1002 * the NIC.
1003 */
1004static int
1005bge_init_rx_ring_std(sc)
1006 struct bge_softc *sc;
1007{
1008 int i;
1009
1010 for (i = 0; i < BGE_SSLOTS; i++) {
1011 if (bge_newbuf_std(sc, i, NULL) == ENOBUFS)
1012 return(ENOBUFS);
1013 };
1014
1015 bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag,
1016 sc->bge_cdata.bge_rx_std_ring_map,
1017 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1018
1019 sc->bge_std = i - 1;
1020 CSR_WRITE_4(sc, BGE_MBX_RX_STD_PROD_LO, sc->bge_std);
1021
1022 return(0);
1023}
1024
1025static void
1026bge_free_rx_ring_std(sc)
1027 struct bge_softc *sc;
1028{
1029 int i;
1030
1031 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
1032 if (sc->bge_cdata.bge_rx_std_chain[i] != NULL) {
1033 m_freem(sc->bge_cdata.bge_rx_std_chain[i]);
1034 sc->bge_cdata.bge_rx_std_chain[i] = NULL;
1035 bus_dmamap_unload(sc->bge_cdata.bge_mtag,
1036 sc->bge_cdata.bge_rx_std_dmamap[i]);
1037 }
1038 bzero((char *)&sc->bge_ldata.bge_rx_std_ring[i],
1039 sizeof(struct bge_rx_bd));
1040 }
1041
1042 return;
1043}
1044
1045static int
1046bge_init_rx_ring_jumbo(sc)
1047 struct bge_softc *sc;
1048{
1049 int i;
1050 struct bge_rcb *rcb;
1051
1052 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
1053 if (bge_newbuf_jumbo(sc, i, NULL) == ENOBUFS)
1054 return(ENOBUFS);
1055 };
1056
1057 bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag,
1058 sc->bge_cdata.bge_rx_jumbo_ring_map,
1059 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1060
1061 sc->bge_jumbo = i - 1;
1062
1063 rcb = &sc->bge_ldata.bge_info.bge_jumbo_rx_rcb;
1064 rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(0, 0);
1065 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags);
1066
1067 CSR_WRITE_4(sc, BGE_MBX_RX_JUMBO_PROD_LO, sc->bge_jumbo);
1068
1069 return(0);
1070}
1071
1072static void
1073bge_free_rx_ring_jumbo(sc)
1074 struct bge_softc *sc;
1075{
1076 int i;
1077
1078 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
1079 if (sc->bge_cdata.bge_rx_jumbo_chain[i] != NULL) {
1080 m_freem(sc->bge_cdata.bge_rx_jumbo_chain[i]);
1081 sc->bge_cdata.bge_rx_jumbo_chain[i] = NULL;
1082 bus_dmamap_unload(sc->bge_cdata.bge_mtag_jumbo,
1083 sc->bge_cdata.bge_rx_jumbo_dmamap[i]);
1084 }
1085 bzero((char *)&sc->bge_ldata.bge_rx_jumbo_ring[i],
1086 sizeof(struct bge_rx_bd));
1087 }
1088
1089 return;
1090}
1091
1092static void
1093bge_free_tx_ring(sc)
1094 struct bge_softc *sc;
1095{
1096 int i;
1097
1098 if (sc->bge_ldata.bge_tx_ring == NULL)
1099 return;
1100
1101 for (i = 0; i < BGE_TX_RING_CNT; i++) {
1102 if (sc->bge_cdata.bge_tx_chain[i] != NULL) {
1103 m_freem(sc->bge_cdata.bge_tx_chain[i]);
1104 sc->bge_cdata.bge_tx_chain[i] = NULL;
1105 bus_dmamap_unload(sc->bge_cdata.bge_mtag,
1106 sc->bge_cdata.bge_tx_dmamap[i]);
1107 }
1108 bzero((char *)&sc->bge_ldata.bge_tx_ring[i],
1109 sizeof(struct bge_tx_bd));
1110 }
1111
1112 return;
1113}
1114
1115static int
1116bge_init_tx_ring(sc)
1117 struct bge_softc *sc;
1118{
1119 sc->bge_txcnt = 0;
1120 sc->bge_tx_saved_considx = 0;
1121
1122 CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, 0);
1123 /* 5700 b2 errata */
1124 if (sc->bge_chiprev == BGE_CHIPREV_5700_BX)
1125 CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, 0);
1126
1127 CSR_WRITE_4(sc, BGE_MBX_TX_NIC_PROD0_LO, 0);
1128 /* 5700 b2 errata */
1129 if (sc->bge_chiprev == BGE_CHIPREV_5700_BX)
1130 CSR_WRITE_4(sc, BGE_MBX_TX_NIC_PROD0_LO, 0);
1131
1132 return(0);
1133}
1134
1135static void
1136bge_setmulti(sc)
1137 struct bge_softc *sc;
1138{
1139 struct ifnet *ifp;
1140 struct ifmultiaddr *ifma;
1141 u_int32_t hashes[4] = { 0, 0, 0, 0 };
1142 int h, i;
1143
1144 BGE_LOCK_ASSERT(sc);
1145
1146 ifp = &sc->arpcom.ac_if;
1147
1148 if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) {
1149 for (i = 0; i < 4; i++)
1150 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), 0xFFFFFFFF);
1151 return;
1152 }
1153
1154 /* First, zot all the existing filters. */
1155 for (i = 0; i < 4; i++)
1156 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), 0);
1157
1158 /* Now program new ones. */
1159 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1160 if (ifma->ifma_addr->sa_family != AF_LINK)
1161 continue;
1162 h = ether_crc32_le(LLADDR((struct sockaddr_dl *)
1163 ifma->ifma_addr), ETHER_ADDR_LEN) & 0x7F;
1164 hashes[(h & 0x60) >> 5] |= 1 << (h & 0x1F);
1165 }
1166
1167 for (i = 0; i < 4; i++)
1168 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), hashes[i]);
1169
1170 return;
1171}
1172
1173/*
1174 * Do endian, PCI and DMA initialization. Also check the on-board ROM
1175 * self-test results.
1176 */
1177static int
1178bge_chipinit(sc)
1179 struct bge_softc *sc;
1180{
1181 int i;
1182 u_int32_t dma_rw_ctl;
1183
1184 /* Set endianness before we access any non-PCI registers. */
1185#if BYTE_ORDER == BIG_ENDIAN
1186 pci_write_config(sc->bge_dev, BGE_PCI_MISC_CTL,
1187 BGE_BIGENDIAN_INIT, 4);
1188#else
1189 pci_write_config(sc->bge_dev, BGE_PCI_MISC_CTL,
1190 BGE_LITTLEENDIAN_INIT, 4);
1191#endif
1192
1193 /*
1194 * Check the 'ROM failed' bit on the RX CPU to see if
1195 * self-tests passed.
1196 */
1197 if (CSR_READ_4(sc, BGE_RXCPU_MODE) & BGE_RXCPUMODE_ROMFAIL) {
1198 printf("bge%d: RX CPU self-diagnostics failed!\n",
1199 sc->bge_unit);
1200 return(ENODEV);
1201 }
1202
1203 /* Clear the MAC control register */
1204 CSR_WRITE_4(sc, BGE_MAC_MODE, 0);
1205
1206 /*
1207 * Clear the MAC statistics block in the NIC's
1208 * internal memory.
1209 */
1210 for (i = BGE_STATS_BLOCK;
1211 i < BGE_STATS_BLOCK_END + 1; i += sizeof(u_int32_t))
1212 BGE_MEMWIN_WRITE(sc, i, 0);
1213
1214 for (i = BGE_STATUS_BLOCK;
1215 i < BGE_STATUS_BLOCK_END + 1; i += sizeof(u_int32_t))
1216 BGE_MEMWIN_WRITE(sc, i, 0);
1217
1218 /* Set up the PCI DMA control register. */
164 { BCOM_VENDORID, BCOM_DEVICEID_BCM5782,
165 "Broadcom BCM5782 Gigabit Ethernet" },
166 { BCOM_VENDORID, BCOM_DEVICEID_BCM5788,
167 "Broadcom BCM5788 Gigabit Ethernet" },
168 { BCOM_VENDORID, BCOM_DEVICEID_BCM5901,
169 "Broadcom BCM5901 Fast Ethernet" },
170 { BCOM_VENDORID, BCOM_DEVICEID_BCM5901A2,
171 "Broadcom BCM5901A2 Fast Ethernet" },
172 { SK_VENDORID, SK_DEVICEID_ALTIMA,
173 "SysKonnect Gigabit Ethernet" },
174 { ALTIMA_VENDORID, ALTIMA_DEVICE_AC1000,
175 "Altima AC1000 Gigabit Ethernet" },
176 { ALTIMA_VENDORID, ALTIMA_DEVICE_AC1002,
177 "Altima AC1002 Gigabit Ethernet" },
178 { ALTIMA_VENDORID, ALTIMA_DEVICE_AC9100,
179 "Altima AC9100 Gigabit Ethernet" },
180 { 0, 0, NULL }
181};
182
183static int bge_probe (device_t);
184static int bge_attach (device_t);
185static int bge_detach (device_t);
186static void bge_release_resources
187 (struct bge_softc *);
188static void bge_dma_map_addr (void *, bus_dma_segment_t *, int, int);
189static void bge_dma_map_tx_desc (void *, bus_dma_segment_t *, int,
190 bus_size_t, int);
191static int bge_dma_alloc (device_t);
192static void bge_dma_free (struct bge_softc *);
193
194static void bge_txeof (struct bge_softc *);
195static void bge_rxeof (struct bge_softc *);
196
197static void bge_tick_locked (struct bge_softc *);
198static void bge_tick (void *);
199static void bge_stats_update (struct bge_softc *);
200static void bge_stats_update_regs
201 (struct bge_softc *);
202static int bge_encap (struct bge_softc *, struct mbuf *,
203 u_int32_t *);
204
205static void bge_intr (void *);
206static void bge_start_locked (struct ifnet *);
207static void bge_start (struct ifnet *);
208static int bge_ioctl (struct ifnet *, u_long, caddr_t);
209static void bge_init_locked (struct bge_softc *);
210static void bge_init (void *);
211static void bge_stop (struct bge_softc *);
212static void bge_watchdog (struct ifnet *);
213static void bge_shutdown (device_t);
214static int bge_ifmedia_upd (struct ifnet *);
215static void bge_ifmedia_sts (struct ifnet *, struct ifmediareq *);
216
217static u_int8_t bge_eeprom_getbyte (struct bge_softc *, int, u_int8_t *);
218static int bge_read_eeprom (struct bge_softc *, caddr_t, int, int);
219
220static void bge_setmulti (struct bge_softc *);
221
222static void bge_handle_events (struct bge_softc *);
223static int bge_alloc_jumbo_mem (struct bge_softc *);
224static void bge_free_jumbo_mem (struct bge_softc *);
225static void *bge_jalloc (struct bge_softc *);
226static void bge_jfree (void *, void *);
227static int bge_newbuf_std (struct bge_softc *, int, struct mbuf *);
228static int bge_newbuf_jumbo (struct bge_softc *, int, struct mbuf *);
229static int bge_init_rx_ring_std (struct bge_softc *);
230static void bge_free_rx_ring_std (struct bge_softc *);
231static int bge_init_rx_ring_jumbo (struct bge_softc *);
232static void bge_free_rx_ring_jumbo (struct bge_softc *);
233static void bge_free_tx_ring (struct bge_softc *);
234static int bge_init_tx_ring (struct bge_softc *);
235
236static int bge_chipinit (struct bge_softc *);
237static int bge_blockinit (struct bge_softc *);
238
239#ifdef notdef
240static u_int8_t bge_vpd_readbyte(struct bge_softc *, int);
241static void bge_vpd_read_res (struct bge_softc *, struct vpd_res *, int);
242static void bge_vpd_read (struct bge_softc *);
243#endif
244
245static u_int32_t bge_readmem_ind
246 (struct bge_softc *, int);
247static void bge_writemem_ind (struct bge_softc *, int, int);
248#ifdef notdef
249static u_int32_t bge_readreg_ind
250 (struct bge_softc *, int);
251#endif
252static void bge_writereg_ind (struct bge_softc *, int, int);
253
254static int bge_miibus_readreg (device_t, int, int);
255static int bge_miibus_writereg (device_t, int, int, int);
256static void bge_miibus_statchg (device_t);
257
258static void bge_reset (struct bge_softc *);
259
260static device_method_t bge_methods[] = {
261 /* Device interface */
262 DEVMETHOD(device_probe, bge_probe),
263 DEVMETHOD(device_attach, bge_attach),
264 DEVMETHOD(device_detach, bge_detach),
265 DEVMETHOD(device_shutdown, bge_shutdown),
266
267 /* bus interface */
268 DEVMETHOD(bus_print_child, bus_generic_print_child),
269 DEVMETHOD(bus_driver_added, bus_generic_driver_added),
270
271 /* MII interface */
272 DEVMETHOD(miibus_readreg, bge_miibus_readreg),
273 DEVMETHOD(miibus_writereg, bge_miibus_writereg),
274 DEVMETHOD(miibus_statchg, bge_miibus_statchg),
275
276 { 0, 0 }
277};
278
279static driver_t bge_driver = {
280 "bge",
281 bge_methods,
282 sizeof(struct bge_softc)
283};
284
285static devclass_t bge_devclass;
286
287DRIVER_MODULE(bge, pci, bge_driver, bge_devclass, 0, 0);
288DRIVER_MODULE(miibus, bge, miibus_driver, miibus_devclass, 0, 0);
289
290static u_int32_t
291bge_readmem_ind(sc, off)
292 struct bge_softc *sc;
293 int off;
294{
295 device_t dev;
296
297 dev = sc->bge_dev;
298
299 pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, off, 4);
300 return(pci_read_config(dev, BGE_PCI_MEMWIN_DATA, 4));
301}
302
303static void
304bge_writemem_ind(sc, off, val)
305 struct bge_softc *sc;
306 int off, val;
307{
308 device_t dev;
309
310 dev = sc->bge_dev;
311
312 pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, off, 4);
313 pci_write_config(dev, BGE_PCI_MEMWIN_DATA, val, 4);
314
315 return;
316}
317
318#ifdef notdef
319static u_int32_t
320bge_readreg_ind(sc, off)
321 struct bge_softc *sc;
322 int off;
323{
324 device_t dev;
325
326 dev = sc->bge_dev;
327
328 pci_write_config(dev, BGE_PCI_REG_BASEADDR, off, 4);
329 return(pci_read_config(dev, BGE_PCI_REG_DATA, 4));
330}
331#endif
332
333static void
334bge_writereg_ind(sc, off, val)
335 struct bge_softc *sc;
336 int off, val;
337{
338 device_t dev;
339
340 dev = sc->bge_dev;
341
342 pci_write_config(dev, BGE_PCI_REG_BASEADDR, off, 4);
343 pci_write_config(dev, BGE_PCI_REG_DATA, val, 4);
344
345 return;
346}
347
348/*
349 * Map a single buffer address.
350 */
351
352static void
353bge_dma_map_addr(arg, segs, nseg, error)
354 void *arg;
355 bus_dma_segment_t *segs;
356 int nseg;
357 int error;
358{
359 struct bge_dmamap_arg *ctx;
360
361 if (error)
362 return;
363
364 ctx = arg;
365
366 if (nseg > ctx->bge_maxsegs) {
367 ctx->bge_maxsegs = 0;
368 return;
369 }
370
371 ctx->bge_busaddr = segs->ds_addr;
372
373 return;
374}
375
376/*
377 * Map an mbuf chain into an TX ring.
378 */
379
380static void
381bge_dma_map_tx_desc(arg, segs, nseg, mapsize, error)
382 void *arg;
383 bus_dma_segment_t *segs;
384 int nseg;
385 bus_size_t mapsize;
386 int error;
387{
388 struct bge_dmamap_arg *ctx;
389 struct bge_tx_bd *d = NULL;
390 int i = 0, idx;
391
392 if (error)
393 return;
394
395 ctx = arg;
396
397 /* Signal error to caller if there's too many segments */
398 if (nseg > ctx->bge_maxsegs) {
399 ctx->bge_maxsegs = 0;
400 return;
401 }
402
403 idx = ctx->bge_idx;
404 while(1) {
405 d = &ctx->bge_ring[idx];
406 d->bge_addr.bge_addr_lo =
407 htole32(BGE_ADDR_LO(segs[i].ds_addr));
408 d->bge_addr.bge_addr_hi =
409 htole32(BGE_ADDR_HI(segs[i].ds_addr));
410 d->bge_len = htole16(segs[i].ds_len);
411 d->bge_flags = htole16(ctx->bge_flags);
412 i++;
413 if (i == nseg)
414 break;
415 BGE_INC(idx, BGE_TX_RING_CNT);
416 }
417
418 d->bge_flags |= htole16(BGE_TXBDFLAG_END);
419 ctx->bge_maxsegs = nseg;
420 ctx->bge_idx = idx;
421
422 return;
423}
424
425
426#ifdef notdef
427static u_int8_t
428bge_vpd_readbyte(sc, addr)
429 struct bge_softc *sc;
430 int addr;
431{
432 int i;
433 device_t dev;
434 u_int32_t val;
435
436 dev = sc->bge_dev;
437 pci_write_config(dev, BGE_PCI_VPD_ADDR, addr, 2);
438 for (i = 0; i < BGE_TIMEOUT * 10; i++) {
439 DELAY(10);
440 if (pci_read_config(dev, BGE_PCI_VPD_ADDR, 2) & BGE_VPD_FLAG)
441 break;
442 }
443
444 if (i == BGE_TIMEOUT) {
445 printf("bge%d: VPD read timed out\n", sc->bge_unit);
446 return(0);
447 }
448
449 val = pci_read_config(dev, BGE_PCI_VPD_DATA, 4);
450
451 return((val >> ((addr % 4) * 8)) & 0xFF);
452}
453
454static void
455bge_vpd_read_res(sc, res, addr)
456 struct bge_softc *sc;
457 struct vpd_res *res;
458 int addr;
459{
460 int i;
461 u_int8_t *ptr;
462
463 ptr = (u_int8_t *)res;
464 for (i = 0; i < sizeof(struct vpd_res); i++)
465 ptr[i] = bge_vpd_readbyte(sc, i + addr);
466
467 return;
468}
469
470static void
471bge_vpd_read(sc)
472 struct bge_softc *sc;
473{
474 int pos = 0, i;
475 struct vpd_res res;
476
477 if (sc->bge_vpd_prodname != NULL)
478 free(sc->bge_vpd_prodname, M_DEVBUF);
479 if (sc->bge_vpd_readonly != NULL)
480 free(sc->bge_vpd_readonly, M_DEVBUF);
481 sc->bge_vpd_prodname = NULL;
482 sc->bge_vpd_readonly = NULL;
483
484 bge_vpd_read_res(sc, &res, pos);
485
486 if (res.vr_id != VPD_RES_ID) {
487 printf("bge%d: bad VPD resource id: expected %x got %x\n",
488 sc->bge_unit, VPD_RES_ID, res.vr_id);
489 return;
490 }
491
492 pos += sizeof(res);
493 sc->bge_vpd_prodname = malloc(res.vr_len + 1, M_DEVBUF, M_NOWAIT);
494 for (i = 0; i < res.vr_len; i++)
495 sc->bge_vpd_prodname[i] = bge_vpd_readbyte(sc, i + pos);
496 sc->bge_vpd_prodname[i] = '\0';
497 pos += i;
498
499 bge_vpd_read_res(sc, &res, pos);
500
501 if (res.vr_id != VPD_RES_READ) {
502 printf("bge%d: bad VPD resource id: expected %x got %x\n",
503 sc->bge_unit, VPD_RES_READ, res.vr_id);
504 return;
505 }
506
507 pos += sizeof(res);
508 sc->bge_vpd_readonly = malloc(res.vr_len, M_DEVBUF, M_NOWAIT);
509 for (i = 0; i < res.vr_len + 1; i++)
510 sc->bge_vpd_readonly[i] = bge_vpd_readbyte(sc, i + pos);
511
512 return;
513}
514#endif
515
516/*
517 * Read a byte of data stored in the EEPROM at address 'addr.' The
518 * BCM570x supports both the traditional bitbang interface and an
519 * auto access interface for reading the EEPROM. We use the auto
520 * access method.
521 */
522static u_int8_t
523bge_eeprom_getbyte(sc, addr, dest)
524 struct bge_softc *sc;
525 int addr;
526 u_int8_t *dest;
527{
528 int i;
529 u_int32_t byte = 0;
530
531 /*
532 * Enable use of auto EEPROM access so we can avoid
533 * having to use the bitbang method.
534 */
535 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_AUTO_EEPROM);
536
537 /* Reset the EEPROM, load the clock period. */
538 CSR_WRITE_4(sc, BGE_EE_ADDR,
539 BGE_EEADDR_RESET|BGE_EEHALFCLK(BGE_HALFCLK_384SCL));
540 DELAY(20);
541
542 /* Issue the read EEPROM command. */
543 CSR_WRITE_4(sc, BGE_EE_ADDR, BGE_EE_READCMD | addr);
544
545 /* Wait for completion */
546 for(i = 0; i < BGE_TIMEOUT * 10; i++) {
547 DELAY(10);
548 if (CSR_READ_4(sc, BGE_EE_ADDR) & BGE_EEADDR_DONE)
549 break;
550 }
551
552 if (i == BGE_TIMEOUT) {
553 printf("bge%d: eeprom read timed out\n", sc->bge_unit);
554 return(0);
555 }
556
557 /* Get result. */
558 byte = CSR_READ_4(sc, BGE_EE_DATA);
559
560 *dest = (byte >> ((addr % 4) * 8)) & 0xFF;
561
562 return(0);
563}
564
565/*
566 * Read a sequence of bytes from the EEPROM.
567 */
568static int
569bge_read_eeprom(sc, dest, off, cnt)
570 struct bge_softc *sc;
571 caddr_t dest;
572 int off;
573 int cnt;
574{
575 int err = 0, i;
576 u_int8_t byte = 0;
577
578 for (i = 0; i < cnt; i++) {
579 err = bge_eeprom_getbyte(sc, off + i, &byte);
580 if (err)
581 break;
582 *(dest + i) = byte;
583 }
584
585 return(err ? 1 : 0);
586}
587
588static int
589bge_miibus_readreg(dev, phy, reg)
590 device_t dev;
591 int phy, reg;
592{
593 struct bge_softc *sc;
594 u_int32_t val, autopoll;
595 int i;
596
597 sc = device_get_softc(dev);
598
599 /*
600 * Broadcom's own driver always assumes the internal
601 * PHY is at GMII address 1. On some chips, the PHY responds
602 * to accesses at all addresses, which could cause us to
603 * bogusly attach the PHY 32 times at probe type. Always
604 * restricting the lookup to address 1 is simpler than
605 * trying to figure out which chips revisions should be
606 * special-cased.
607 */
608 if (phy != 1)
609 return(0);
610
611 /* Reading with autopolling on may trigger PCI errors */
612 autopoll = CSR_READ_4(sc, BGE_MI_MODE);
613 if (autopoll & BGE_MIMODE_AUTOPOLL) {
614 BGE_CLRBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL);
615 DELAY(40);
616 }
617
618 CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_READ|BGE_MICOMM_BUSY|
619 BGE_MIPHY(phy)|BGE_MIREG(reg));
620
621 for (i = 0; i < BGE_TIMEOUT; i++) {
622 val = CSR_READ_4(sc, BGE_MI_COMM);
623 if (!(val & BGE_MICOMM_BUSY))
624 break;
625 }
626
627 if (i == BGE_TIMEOUT) {
628 printf("bge%d: PHY read timed out\n", sc->bge_unit);
629 val = 0;
630 goto done;
631 }
632
633 val = CSR_READ_4(sc, BGE_MI_COMM);
634
635done:
636 if (autopoll & BGE_MIMODE_AUTOPOLL) {
637 BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL);
638 DELAY(40);
639 }
640
641 if (val & BGE_MICOMM_READFAIL)
642 return(0);
643
644 return(val & 0xFFFF);
645}
646
647static int
648bge_miibus_writereg(dev, phy, reg, val)
649 device_t dev;
650 int phy, reg, val;
651{
652 struct bge_softc *sc;
653 u_int32_t autopoll;
654 int i;
655
656 sc = device_get_softc(dev);
657
658 /* Reading with autopolling on may trigger PCI errors */
659 autopoll = CSR_READ_4(sc, BGE_MI_MODE);
660 if (autopoll & BGE_MIMODE_AUTOPOLL) {
661 BGE_CLRBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL);
662 DELAY(40);
663 }
664
665 CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_WRITE|BGE_MICOMM_BUSY|
666 BGE_MIPHY(phy)|BGE_MIREG(reg)|val);
667
668 for (i = 0; i < BGE_TIMEOUT; i++) {
669 if (!(CSR_READ_4(sc, BGE_MI_COMM) & BGE_MICOMM_BUSY))
670 break;
671 }
672
673 if (autopoll & BGE_MIMODE_AUTOPOLL) {
674 BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL);
675 DELAY(40);
676 }
677
678 if (i == BGE_TIMEOUT) {
679 printf("bge%d: PHY read timed out\n", sc->bge_unit);
680 return(0);
681 }
682
683 return(0);
684}
685
686static void
687bge_miibus_statchg(dev)
688 device_t dev;
689{
690 struct bge_softc *sc;
691 struct mii_data *mii;
692
693 sc = device_get_softc(dev);
694 mii = device_get_softc(sc->bge_miibus);
695
696 BGE_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_PORTMODE);
697 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T) {
698 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_PORTMODE_GMII);
699 } else {
700 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_PORTMODE_MII);
701 }
702
703 if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) {
704 BGE_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX);
705 } else {
706 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX);
707 }
708
709 return;
710}
711
712/*
713 * Handle events that have triggered interrupts.
714 */
715static void
716bge_handle_events(sc)
717 struct bge_softc *sc;
718{
719
720 return;
721}
722
723/*
724 * Memory management for jumbo frames.
725 */
726
727static int
728bge_alloc_jumbo_mem(sc)
729 struct bge_softc *sc;
730{
731 caddr_t ptr;
732 register int i, error;
733 struct bge_jpool_entry *entry;
734
735 /* Create tag for jumbo buffer block */
736
737 error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag,
738 PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
739 NULL, BGE_JMEM, 1, BGE_JMEM, 0, NULL, NULL,
740 &sc->bge_cdata.bge_jumbo_tag);
741
742 if (error) {
743 printf("bge%d: could not allocate jumbo dma tag\n",
744 sc->bge_unit);
745 return (ENOMEM);
746 }
747
748 /* Allocate DMA'able memory for jumbo buffer block */
749
750 error = bus_dmamem_alloc(sc->bge_cdata.bge_jumbo_tag,
751 (void **)&sc->bge_ldata.bge_jumbo_buf, BUS_DMA_NOWAIT,
752 &sc->bge_cdata.bge_jumbo_map);
753
754 if (error)
755 return (ENOMEM);
756
757 SLIST_INIT(&sc->bge_jfree_listhead);
758 SLIST_INIT(&sc->bge_jinuse_listhead);
759
760 /*
761 * Now divide it up into 9K pieces and save the addresses
762 * in an array.
763 */
764 ptr = sc->bge_ldata.bge_jumbo_buf;
765 for (i = 0; i < BGE_JSLOTS; i++) {
766 sc->bge_cdata.bge_jslots[i] = ptr;
767 ptr += BGE_JLEN;
768 entry = malloc(sizeof(struct bge_jpool_entry),
769 M_DEVBUF, M_NOWAIT);
770 if (entry == NULL) {
771 bge_free_jumbo_mem(sc);
772 sc->bge_ldata.bge_jumbo_buf = NULL;
773 printf("bge%d: no memory for jumbo "
774 "buffer queue!\n", sc->bge_unit);
775 return(ENOBUFS);
776 }
777 entry->slot = i;
778 SLIST_INSERT_HEAD(&sc->bge_jfree_listhead,
779 entry, jpool_entries);
780 }
781
782 return(0);
783}
784
785static void
786bge_free_jumbo_mem(sc)
787 struct bge_softc *sc;
788{
789 int i;
790 struct bge_jpool_entry *entry;
791
792 for (i = 0; i < BGE_JSLOTS; i++) {
793 entry = SLIST_FIRST(&sc->bge_jfree_listhead);
794 SLIST_REMOVE_HEAD(&sc->bge_jfree_listhead, jpool_entries);
795 free(entry, M_DEVBUF);
796 }
797
798 /* Destroy jumbo buffer block */
799
800 if (sc->bge_ldata.bge_rx_jumbo_ring)
801 bus_dmamem_free(sc->bge_cdata.bge_jumbo_tag,
802 sc->bge_ldata.bge_jumbo_buf,
803 sc->bge_cdata.bge_jumbo_map);
804
805 if (sc->bge_cdata.bge_rx_jumbo_ring_map)
806 bus_dmamap_destroy(sc->bge_cdata.bge_jumbo_tag,
807 sc->bge_cdata.bge_jumbo_map);
808
809 if (sc->bge_cdata.bge_jumbo_tag)
810 bus_dma_tag_destroy(sc->bge_cdata.bge_jumbo_tag);
811
812 return;
813}
814
815/*
816 * Allocate a jumbo buffer.
817 */
818static void *
819bge_jalloc(sc)
820 struct bge_softc *sc;
821{
822 struct bge_jpool_entry *entry;
823
824 entry = SLIST_FIRST(&sc->bge_jfree_listhead);
825
826 if (entry == NULL) {
827 printf("bge%d: no free jumbo buffers\n", sc->bge_unit);
828 return(NULL);
829 }
830
831 SLIST_REMOVE_HEAD(&sc->bge_jfree_listhead, jpool_entries);
832 SLIST_INSERT_HEAD(&sc->bge_jinuse_listhead, entry, jpool_entries);
833 return(sc->bge_cdata.bge_jslots[entry->slot]);
834}
835
836/*
837 * Release a jumbo buffer.
838 */
839static void
840bge_jfree(buf, args)
841 void *buf;
842 void *args;
843{
844 struct bge_jpool_entry *entry;
845 struct bge_softc *sc;
846 int i;
847
848 /* Extract the softc struct pointer. */
849 sc = (struct bge_softc *)args;
850
851 if (sc == NULL)
852 panic("bge_jfree: can't find softc pointer!");
853
854 /* calculate the slot this buffer belongs to */
855
856 i = ((vm_offset_t)buf
857 - (vm_offset_t)sc->bge_ldata.bge_jumbo_buf) / BGE_JLEN;
858
859 if ((i < 0) || (i >= BGE_JSLOTS))
860 panic("bge_jfree: asked to free buffer that we don't manage!");
861
862 entry = SLIST_FIRST(&sc->bge_jinuse_listhead);
863 if (entry == NULL)
864 panic("bge_jfree: buffer not in use!");
865 entry->slot = i;
866 SLIST_REMOVE_HEAD(&sc->bge_jinuse_listhead, jpool_entries);
867 SLIST_INSERT_HEAD(&sc->bge_jfree_listhead, entry, jpool_entries);
868
869 return;
870}
871
872
873/*
874 * Intialize a standard receive ring descriptor.
875 */
876static int
877bge_newbuf_std(sc, i, m)
878 struct bge_softc *sc;
879 int i;
880 struct mbuf *m;
881{
882 struct mbuf *m_new = NULL;
883 struct bge_rx_bd *r;
884 struct bge_dmamap_arg ctx;
885 int error;
886
887 if (m == NULL) {
888 MGETHDR(m_new, M_DONTWAIT, MT_DATA);
889 if (m_new == NULL) {
890 return(ENOBUFS);
891 }
892
893 MCLGET(m_new, M_DONTWAIT);
894 if (!(m_new->m_flags & M_EXT)) {
895 m_freem(m_new);
896 return(ENOBUFS);
897 }
898 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
899 } else {
900 m_new = m;
901 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
902 m_new->m_data = m_new->m_ext.ext_buf;
903 }
904
905 if (!sc->bge_rx_alignment_bug)
906 m_adj(m_new, ETHER_ALIGN);
907 sc->bge_cdata.bge_rx_std_chain[i] = m_new;
908 r = &sc->bge_ldata.bge_rx_std_ring[i];
909 ctx.bge_maxsegs = 1;
910 ctx.sc = sc;
911 error = bus_dmamap_load(sc->bge_cdata.bge_mtag,
912 sc->bge_cdata.bge_rx_std_dmamap[i], mtod(m_new, void *),
913 m_new->m_len, bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT);
914 if (error || ctx.bge_maxsegs == 0) {
915 if (m == NULL)
916 m_freem(m_new);
917 return(ENOMEM);
918 }
919 r->bge_addr.bge_addr_lo = htole32(BGE_ADDR_LO(ctx.bge_busaddr));
920 r->bge_addr.bge_addr_hi = htole32(BGE_ADDR_HI(ctx.bge_busaddr));
921 r->bge_flags = htole16(BGE_RXBDFLAG_END);
922 r->bge_len = htole16(m_new->m_len);
923 r->bge_idx = htole16(i);
924
925 bus_dmamap_sync(sc->bge_cdata.bge_mtag,
926 sc->bge_cdata.bge_rx_std_dmamap[i],
927 BUS_DMASYNC_PREREAD);
928
929 return(0);
930}
931
932/*
933 * Initialize a jumbo receive ring descriptor. This allocates
934 * a jumbo buffer from the pool managed internally by the driver.
935 */
936static int
937bge_newbuf_jumbo(sc, i, m)
938 struct bge_softc *sc;
939 int i;
940 struct mbuf *m;
941{
942 struct mbuf *m_new = NULL;
943 struct bge_rx_bd *r;
944 struct bge_dmamap_arg ctx;
945 int error;
946
947 if (m == NULL) {
948 caddr_t *buf = NULL;
949
950 /* Allocate the mbuf. */
951 MGETHDR(m_new, M_DONTWAIT, MT_DATA);
952 if (m_new == NULL) {
953 return(ENOBUFS);
954 }
955
956 /* Allocate the jumbo buffer */
957 buf = bge_jalloc(sc);
958 if (buf == NULL) {
959 m_freem(m_new);
960 printf("bge%d: jumbo allocation failed "
961 "-- packet dropped!\n", sc->bge_unit);
962 return(ENOBUFS);
963 }
964
965 /* Attach the buffer to the mbuf. */
966 m_new->m_data = (void *) buf;
967 m_new->m_len = m_new->m_pkthdr.len = BGE_JUMBO_FRAMELEN;
968 MEXTADD(m_new, buf, BGE_JUMBO_FRAMELEN, bge_jfree,
969 (struct bge_softc *)sc, 0, EXT_NET_DRV);
970 } else {
971 m_new = m;
972 m_new->m_data = m_new->m_ext.ext_buf;
973 m_new->m_ext.ext_size = BGE_JUMBO_FRAMELEN;
974 }
975
976 if (!sc->bge_rx_alignment_bug)
977 m_adj(m_new, ETHER_ALIGN);
978 /* Set up the descriptor. */
979 sc->bge_cdata.bge_rx_jumbo_chain[i] = m_new;
980 r = &sc->bge_ldata.bge_rx_jumbo_ring[i];
981 ctx.bge_maxsegs = 1;
982 ctx.sc = sc;
983 error = bus_dmamap_load(sc->bge_cdata.bge_mtag_jumbo,
984 sc->bge_cdata.bge_rx_jumbo_dmamap[i], mtod(m_new, void *),
985 m_new->m_len, bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT);
986 if (error || ctx.bge_maxsegs == 0) {
987 if (m == NULL)
988 m_freem(m_new);
989 return(ENOMEM);
990 }
991 r->bge_addr.bge_addr_lo = htole32(BGE_ADDR_LO(ctx.bge_busaddr));
992 r->bge_addr.bge_addr_hi = htole32(BGE_ADDR_HI(ctx.bge_busaddr));
993 r->bge_flags = htole16(BGE_RXBDFLAG_END|BGE_RXBDFLAG_JUMBO_RING);
994 r->bge_len = htole16(m_new->m_len);
995 r->bge_idx = htole16(i);
996
997 bus_dmamap_sync(sc->bge_cdata.bge_mtag,
998 sc->bge_cdata.bge_rx_jumbo_dmamap[i],
999 BUS_DMASYNC_PREREAD);
1000
1001 return(0);
1002}
1003
1004/*
1005 * The standard receive ring has 512 entries in it. At 2K per mbuf cluster,
1006 * that's 1MB or memory, which is a lot. For now, we fill only the first
1007 * 256 ring entries and hope that our CPU is fast enough to keep up with
1008 * the NIC.
1009 */
1010static int
1011bge_init_rx_ring_std(sc)
1012 struct bge_softc *sc;
1013{
1014 int i;
1015
1016 for (i = 0; i < BGE_SSLOTS; i++) {
1017 if (bge_newbuf_std(sc, i, NULL) == ENOBUFS)
1018 return(ENOBUFS);
1019 };
1020
1021 bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag,
1022 sc->bge_cdata.bge_rx_std_ring_map,
1023 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1024
1025 sc->bge_std = i - 1;
1026 CSR_WRITE_4(sc, BGE_MBX_RX_STD_PROD_LO, sc->bge_std);
1027
1028 return(0);
1029}
1030
1031static void
1032bge_free_rx_ring_std(sc)
1033 struct bge_softc *sc;
1034{
1035 int i;
1036
1037 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
1038 if (sc->bge_cdata.bge_rx_std_chain[i] != NULL) {
1039 m_freem(sc->bge_cdata.bge_rx_std_chain[i]);
1040 sc->bge_cdata.bge_rx_std_chain[i] = NULL;
1041 bus_dmamap_unload(sc->bge_cdata.bge_mtag,
1042 sc->bge_cdata.bge_rx_std_dmamap[i]);
1043 }
1044 bzero((char *)&sc->bge_ldata.bge_rx_std_ring[i],
1045 sizeof(struct bge_rx_bd));
1046 }
1047
1048 return;
1049}
1050
1051static int
1052bge_init_rx_ring_jumbo(sc)
1053 struct bge_softc *sc;
1054{
1055 int i;
1056 struct bge_rcb *rcb;
1057
1058 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
1059 if (bge_newbuf_jumbo(sc, i, NULL) == ENOBUFS)
1060 return(ENOBUFS);
1061 };
1062
1063 bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag,
1064 sc->bge_cdata.bge_rx_jumbo_ring_map,
1065 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1066
1067 sc->bge_jumbo = i - 1;
1068
1069 rcb = &sc->bge_ldata.bge_info.bge_jumbo_rx_rcb;
1070 rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(0, 0);
1071 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags);
1072
1073 CSR_WRITE_4(sc, BGE_MBX_RX_JUMBO_PROD_LO, sc->bge_jumbo);
1074
1075 return(0);
1076}
1077
1078static void
1079bge_free_rx_ring_jumbo(sc)
1080 struct bge_softc *sc;
1081{
1082 int i;
1083
1084 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
1085 if (sc->bge_cdata.bge_rx_jumbo_chain[i] != NULL) {
1086 m_freem(sc->bge_cdata.bge_rx_jumbo_chain[i]);
1087 sc->bge_cdata.bge_rx_jumbo_chain[i] = NULL;
1088 bus_dmamap_unload(sc->bge_cdata.bge_mtag_jumbo,
1089 sc->bge_cdata.bge_rx_jumbo_dmamap[i]);
1090 }
1091 bzero((char *)&sc->bge_ldata.bge_rx_jumbo_ring[i],
1092 sizeof(struct bge_rx_bd));
1093 }
1094
1095 return;
1096}
1097
1098static void
1099bge_free_tx_ring(sc)
1100 struct bge_softc *sc;
1101{
1102 int i;
1103
1104 if (sc->bge_ldata.bge_tx_ring == NULL)
1105 return;
1106
1107 for (i = 0; i < BGE_TX_RING_CNT; i++) {
1108 if (sc->bge_cdata.bge_tx_chain[i] != NULL) {
1109 m_freem(sc->bge_cdata.bge_tx_chain[i]);
1110 sc->bge_cdata.bge_tx_chain[i] = NULL;
1111 bus_dmamap_unload(sc->bge_cdata.bge_mtag,
1112 sc->bge_cdata.bge_tx_dmamap[i]);
1113 }
1114 bzero((char *)&sc->bge_ldata.bge_tx_ring[i],
1115 sizeof(struct bge_tx_bd));
1116 }
1117
1118 return;
1119}
1120
1121static int
1122bge_init_tx_ring(sc)
1123 struct bge_softc *sc;
1124{
1125 sc->bge_txcnt = 0;
1126 sc->bge_tx_saved_considx = 0;
1127
1128 CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, 0);
1129 /* 5700 b2 errata */
1130 if (sc->bge_chiprev == BGE_CHIPREV_5700_BX)
1131 CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, 0);
1132
1133 CSR_WRITE_4(sc, BGE_MBX_TX_NIC_PROD0_LO, 0);
1134 /* 5700 b2 errata */
1135 if (sc->bge_chiprev == BGE_CHIPREV_5700_BX)
1136 CSR_WRITE_4(sc, BGE_MBX_TX_NIC_PROD0_LO, 0);
1137
1138 return(0);
1139}
1140
1141static void
1142bge_setmulti(sc)
1143 struct bge_softc *sc;
1144{
1145 struct ifnet *ifp;
1146 struct ifmultiaddr *ifma;
1147 u_int32_t hashes[4] = { 0, 0, 0, 0 };
1148 int h, i;
1149
1150 BGE_LOCK_ASSERT(sc);
1151
1152 ifp = &sc->arpcom.ac_if;
1153
1154 if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) {
1155 for (i = 0; i < 4; i++)
1156 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), 0xFFFFFFFF);
1157 return;
1158 }
1159
1160 /* First, zot all the existing filters. */
1161 for (i = 0; i < 4; i++)
1162 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), 0);
1163
1164 /* Now program new ones. */
1165 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1166 if (ifma->ifma_addr->sa_family != AF_LINK)
1167 continue;
1168 h = ether_crc32_le(LLADDR((struct sockaddr_dl *)
1169 ifma->ifma_addr), ETHER_ADDR_LEN) & 0x7F;
1170 hashes[(h & 0x60) >> 5] |= 1 << (h & 0x1F);
1171 }
1172
1173 for (i = 0; i < 4; i++)
1174 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), hashes[i]);
1175
1176 return;
1177}
1178
1179/*
1180 * Do endian, PCI and DMA initialization. Also check the on-board ROM
1181 * self-test results.
1182 */
1183static int
1184bge_chipinit(sc)
1185 struct bge_softc *sc;
1186{
1187 int i;
1188 u_int32_t dma_rw_ctl;
1189
1190 /* Set endianness before we access any non-PCI registers. */
1191#if BYTE_ORDER == BIG_ENDIAN
1192 pci_write_config(sc->bge_dev, BGE_PCI_MISC_CTL,
1193 BGE_BIGENDIAN_INIT, 4);
1194#else
1195 pci_write_config(sc->bge_dev, BGE_PCI_MISC_CTL,
1196 BGE_LITTLEENDIAN_INIT, 4);
1197#endif
1198
1199 /*
1200 * Check the 'ROM failed' bit on the RX CPU to see if
1201 * self-tests passed.
1202 */
1203 if (CSR_READ_4(sc, BGE_RXCPU_MODE) & BGE_RXCPUMODE_ROMFAIL) {
1204 printf("bge%d: RX CPU self-diagnostics failed!\n",
1205 sc->bge_unit);
1206 return(ENODEV);
1207 }
1208
1209 /* Clear the MAC control register */
1210 CSR_WRITE_4(sc, BGE_MAC_MODE, 0);
1211
1212 /*
1213 * Clear the MAC statistics block in the NIC's
1214 * internal memory.
1215 */
1216 for (i = BGE_STATS_BLOCK;
1217 i < BGE_STATS_BLOCK_END + 1; i += sizeof(u_int32_t))
1218 BGE_MEMWIN_WRITE(sc, i, 0);
1219
1220 for (i = BGE_STATUS_BLOCK;
1221 i < BGE_STATUS_BLOCK_END + 1; i += sizeof(u_int32_t))
1222 BGE_MEMWIN_WRITE(sc, i, 0);
1223
1224 /* Set up the PCI DMA control register. */
1219 if (pci_read_config(sc->bge_dev, BGE_PCI_PCISTATE, 4) &
1225 if (sc->bge_pcie) {
1226 dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD |
1227 (0xf << BGE_PCIDMARWCTL_RD_WAT_SHIFT) |
1228 (0x2 << BGE_PCIDMARWCTL_WR_WAT_SHIFT);
1229 } else if (pci_read_config(sc->bge_dev, BGE_PCI_PCISTATE, 4) &
1220 BGE_PCISTATE_PCI_BUSMODE) {
1221 /* Conventional PCI bus */
1222 dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD |
1223 (0x7 << BGE_PCIDMARWCTL_RD_WAT_SHIFT) |
1224 (0x7 << BGE_PCIDMARWCTL_WR_WAT_SHIFT) |
1225 (0x0F);
1226 } else {
1227 /* PCI-X bus */
1228 /*
1229 * The 5704 uses a different encoding of read/write
1230 * watermarks.
1231 */
1232 if (sc->bge_asicrev == BGE_ASICREV_BCM5704)
1233 dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD |
1234 (0x7 << BGE_PCIDMARWCTL_RD_WAT_SHIFT) |
1235 (0x3 << BGE_PCIDMARWCTL_WR_WAT_SHIFT);
1236 else
1237 dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD |
1238 (0x3 << BGE_PCIDMARWCTL_RD_WAT_SHIFT) |
1239 (0x3 << BGE_PCIDMARWCTL_WR_WAT_SHIFT) |
1240 (0x0F);
1241
1242 /*
1243 * 5703 and 5704 need ONEDMA_AT_ONCE as a workaround
1244 * for hardware bugs.
1245 */
1246 if (sc->bge_asicrev == BGE_ASICREV_BCM5703 ||
1247 sc->bge_asicrev == BGE_ASICREV_BCM5704) {
1248 u_int32_t tmp;
1249
1250 tmp = CSR_READ_4(sc, BGE_PCI_CLKCTL) & 0x1f;
1251 if (tmp == 0x6 || tmp == 0x7)
1252 dma_rw_ctl |= BGE_PCIDMARWCTL_ONEDMA_ATONCE;
1253 }
1254 }
1255
1256 if (sc->bge_asicrev == BGE_ASICREV_BCM5703 ||
1257 sc->bge_asicrev == BGE_ASICREV_BCM5704 ||
1230 BGE_PCISTATE_PCI_BUSMODE) {
1231 /* Conventional PCI bus */
1232 dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD |
1233 (0x7 << BGE_PCIDMARWCTL_RD_WAT_SHIFT) |
1234 (0x7 << BGE_PCIDMARWCTL_WR_WAT_SHIFT) |
1235 (0x0F);
1236 } else {
1237 /* PCI-X bus */
1238 /*
1239 * The 5704 uses a different encoding of read/write
1240 * watermarks.
1241 */
1242 if (sc->bge_asicrev == BGE_ASICREV_BCM5704)
1243 dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD |
1244 (0x7 << BGE_PCIDMARWCTL_RD_WAT_SHIFT) |
1245 (0x3 << BGE_PCIDMARWCTL_WR_WAT_SHIFT);
1246 else
1247 dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD |
1248 (0x3 << BGE_PCIDMARWCTL_RD_WAT_SHIFT) |
1249 (0x3 << BGE_PCIDMARWCTL_WR_WAT_SHIFT) |
1250 (0x0F);
1251
1252 /*
1253 * 5703 and 5704 need ONEDMA_AT_ONCE as a workaround
1254 * for hardware bugs.
1255 */
1256 if (sc->bge_asicrev == BGE_ASICREV_BCM5703 ||
1257 sc->bge_asicrev == BGE_ASICREV_BCM5704) {
1258 u_int32_t tmp;
1259
1260 tmp = CSR_READ_4(sc, BGE_PCI_CLKCTL) & 0x1f;
1261 if (tmp == 0x6 || tmp == 0x7)
1262 dma_rw_ctl |= BGE_PCIDMARWCTL_ONEDMA_ATONCE;
1263 }
1264 }
1265
1266 if (sc->bge_asicrev == BGE_ASICREV_BCM5703 ||
1267 sc->bge_asicrev == BGE_ASICREV_BCM5704 ||
1258 sc->bge_asicrev == BGE_ASICREV_BCM5705)
1268 sc->bge_asicrev == BGE_ASICREV_BCM5705 ||
1269 sc->bge_asicrev == BGE_ASICREV_BCM5750)
1259 dma_rw_ctl &= ~BGE_PCIDMARWCTL_MINDMA;
1260 pci_write_config(sc->bge_dev, BGE_PCI_DMA_RW_CTL, dma_rw_ctl, 4);
1261
1262 /*
1263 * Set up general mode register.
1264 */
1265 CSR_WRITE_4(sc, BGE_MODE_CTL, BGE_MODECTL_WORDSWAP_NONFRAME|
1266 BGE_MODECTL_BYTESWAP_DATA|BGE_MODECTL_WORDSWAP_DATA|
1267 BGE_MODECTL_MAC_ATTN_INTR|BGE_MODECTL_HOST_SEND_BDS|
1268 BGE_MODECTL_TX_NO_PHDR_CSUM|BGE_MODECTL_RX_NO_PHDR_CSUM);
1269
1270 /*
1271 * Disable memory write invalidate. Apparently it is not supported
1272 * properly by these devices.
1273 */
1274 PCI_CLRBIT(sc->bge_dev, BGE_PCI_CMD, PCIM_CMD_MWIEN, 4);
1275
1276#ifdef __brokenalpha__
1277 /*
1278 * Must insure that we do not cross an 8K (bytes) boundary
1279 * for DMA reads. Our highest limit is 1K bytes. This is a
1280 * restriction on some ALPHA platforms with early revision
1281 * 21174 PCI chipsets, such as the AlphaPC 164lx
1282 */
1283 PCI_SETBIT(sc->bge_dev, BGE_PCI_DMA_RW_CTL,
1284 BGE_PCI_READ_BNDRY_1024BYTES, 4);
1285#endif
1286
1287 /* Set the timer prescaler (always 66Mhz) */
1288 CSR_WRITE_4(sc, BGE_MISC_CFG, 65 << 1/*BGE_32BITTIME_66MHZ*/);
1289
1290 return(0);
1291}
1292
1293static int
1294bge_blockinit(sc)
1295 struct bge_softc *sc;
1296{
1297 struct bge_rcb *rcb;
1298 volatile struct bge_rcb *vrcb;
1299 int i;
1300
1301 /*
1302 * Initialize the memory window pointer register so that
1303 * we can access the first 32K of internal NIC RAM. This will
1304 * allow us to set up the TX send ring RCBs and the RX return
1305 * ring RCBs, plus other things which live in NIC memory.
1306 */
1307 CSR_WRITE_4(sc, BGE_PCI_MEMWIN_BASEADDR, 0);
1308
1309 /* Note: the BCM5704 has a smaller mbuf space than other chips. */
1310
1270 dma_rw_ctl &= ~BGE_PCIDMARWCTL_MINDMA;
1271 pci_write_config(sc->bge_dev, BGE_PCI_DMA_RW_CTL, dma_rw_ctl, 4);
1272
1273 /*
1274 * Set up general mode register.
1275 */
1276 CSR_WRITE_4(sc, BGE_MODE_CTL, BGE_MODECTL_WORDSWAP_NONFRAME|
1277 BGE_MODECTL_BYTESWAP_DATA|BGE_MODECTL_WORDSWAP_DATA|
1278 BGE_MODECTL_MAC_ATTN_INTR|BGE_MODECTL_HOST_SEND_BDS|
1279 BGE_MODECTL_TX_NO_PHDR_CSUM|BGE_MODECTL_RX_NO_PHDR_CSUM);
1280
1281 /*
1282 * Disable memory write invalidate. Apparently it is not supported
1283 * properly by these devices.
1284 */
1285 PCI_CLRBIT(sc->bge_dev, BGE_PCI_CMD, PCIM_CMD_MWIEN, 4);
1286
1287#ifdef __brokenalpha__
1288 /*
1289 * Must insure that we do not cross an 8K (bytes) boundary
1290 * for DMA reads. Our highest limit is 1K bytes. This is a
1291 * restriction on some ALPHA platforms with early revision
1292 * 21174 PCI chipsets, such as the AlphaPC 164lx
1293 */
1294 PCI_SETBIT(sc->bge_dev, BGE_PCI_DMA_RW_CTL,
1295 BGE_PCI_READ_BNDRY_1024BYTES, 4);
1296#endif
1297
1298 /* Set the timer prescaler (always 66Mhz) */
1299 CSR_WRITE_4(sc, BGE_MISC_CFG, 65 << 1/*BGE_32BITTIME_66MHZ*/);
1300
1301 return(0);
1302}
1303
1304static int
1305bge_blockinit(sc)
1306 struct bge_softc *sc;
1307{
1308 struct bge_rcb *rcb;
1309 volatile struct bge_rcb *vrcb;
1310 int i;
1311
1312 /*
1313 * Initialize the memory window pointer register so that
1314 * we can access the first 32K of internal NIC RAM. This will
1315 * allow us to set up the TX send ring RCBs and the RX return
1316 * ring RCBs, plus other things which live in NIC memory.
1317 */
1318 CSR_WRITE_4(sc, BGE_PCI_MEMWIN_BASEADDR, 0);
1319
1320 /* Note: the BCM5704 has a smaller mbuf space than other chips. */
1321
1311 if (sc->bge_asicrev != BGE_ASICREV_BCM5705) {
1322 if (sc->bge_asicrev != BGE_ASICREV_BCM5705 ||
1323 sc->bge_asicrev != BGE_ASICREV_BCM5750) {
1312 /* Configure mbuf memory pool */
1313 if (sc->bge_extram) {
1314 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_BASEADDR,
1315 BGE_EXT_SSRAM);
1316 if (sc->bge_asicrev == BGE_ASICREV_BCM5704)
1317 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x10000);
1318 else
1319 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x18000);
1320 } else {
1321 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_BASEADDR,
1322 BGE_BUFFPOOL_1);
1323 if (sc->bge_asicrev == BGE_ASICREV_BCM5704)
1324 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x10000);
1325 else
1326 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x18000);
1327 }
1328
1329 /* Configure DMA resource pool */
1330 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_BASEADDR,
1331 BGE_DMA_DESCRIPTORS);
1332 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LEN, 0x2000);
1333 }
1334
1335 /* Configure mbuf pool watermarks */
1324 /* Configure mbuf memory pool */
1325 if (sc->bge_extram) {
1326 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_BASEADDR,
1327 BGE_EXT_SSRAM);
1328 if (sc->bge_asicrev == BGE_ASICREV_BCM5704)
1329 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x10000);
1330 else
1331 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x18000);
1332 } else {
1333 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_BASEADDR,
1334 BGE_BUFFPOOL_1);
1335 if (sc->bge_asicrev == BGE_ASICREV_BCM5704)
1336 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x10000);
1337 else
1338 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x18000);
1339 }
1340
1341 /* Configure DMA resource pool */
1342 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_BASEADDR,
1343 BGE_DMA_DESCRIPTORS);
1344 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LEN, 0x2000);
1345 }
1346
1347 /* Configure mbuf pool watermarks */
1336 if (sc->bge_asicrev == BGE_ASICREV_BCM5705) {
1348 if (sc->bge_asicrev == BGE_ASICREV_BCM5705 ||
1349 sc->bge_asicrev == BGE_ASICREV_BCM5750) {
1337 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0);
1338 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x10);
1339 } else {
1340 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x50);
1341 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x20);
1342 }
1343 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x60);
1344
1345 /* Configure DMA resource watermarks */
1346 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LOWAT, 5);
1347 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_HIWAT, 10);
1348
1349 /* Enable buffer manager */
1350 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0);
1351 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x10);
1352 } else {
1353 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x50);
1354 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x20);
1355 }
1356 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x60);
1357
1358 /* Configure DMA resource watermarks */
1359 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LOWAT, 5);
1360 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_HIWAT, 10);
1361
1362 /* Enable buffer manager */
1350 if (sc->bge_asicrev != BGE_ASICREV_BCM5705) {
1363 if (sc->bge_asicrev != BGE_ASICREV_BCM5705 ||
1364 sc->bge_asicrev != BGE_ASICREV_BCM5750) {
1351 CSR_WRITE_4(sc, BGE_BMAN_MODE,
1352 BGE_BMANMODE_ENABLE|BGE_BMANMODE_LOMBUF_ATTN);
1353
1354 /* Poll for buffer manager start indication */
1355 for (i = 0; i < BGE_TIMEOUT; i++) {
1356 if (CSR_READ_4(sc, BGE_BMAN_MODE) & BGE_BMANMODE_ENABLE)
1357 break;
1358 DELAY(10);
1359 }
1360
1361 if (i == BGE_TIMEOUT) {
1362 printf("bge%d: buffer manager failed to start\n",
1363 sc->bge_unit);
1364 return(ENXIO);
1365 }
1366 }
1367
1368 /* Enable flow-through queues */
1369 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF);
1370 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0);
1371
1372 /* Wait until queue initialization is complete */
1373 for (i = 0; i < BGE_TIMEOUT; i++) {
1374 if (CSR_READ_4(sc, BGE_FTQ_RESET) == 0)
1375 break;
1376 DELAY(10);
1377 }
1378
1379 if (i == BGE_TIMEOUT) {
1380 printf("bge%d: flow-through queue init failed\n",
1381 sc->bge_unit);
1382 return(ENXIO);
1383 }
1384
1385 /* Initialize the standard RX ring control block */
1386 rcb = &sc->bge_ldata.bge_info.bge_std_rx_rcb;
1387 rcb->bge_hostaddr.bge_addr_lo =
1388 BGE_ADDR_LO(sc->bge_ldata.bge_rx_std_ring_paddr);
1389 rcb->bge_hostaddr.bge_addr_hi =
1390 BGE_ADDR_HI(sc->bge_ldata.bge_rx_std_ring_paddr);
1391 bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag,
1392 sc->bge_cdata.bge_rx_std_ring_map, BUS_DMASYNC_PREREAD);
1365 CSR_WRITE_4(sc, BGE_BMAN_MODE,
1366 BGE_BMANMODE_ENABLE|BGE_BMANMODE_LOMBUF_ATTN);
1367
1368 /* Poll for buffer manager start indication */
1369 for (i = 0; i < BGE_TIMEOUT; i++) {
1370 if (CSR_READ_4(sc, BGE_BMAN_MODE) & BGE_BMANMODE_ENABLE)
1371 break;
1372 DELAY(10);
1373 }
1374
1375 if (i == BGE_TIMEOUT) {
1376 printf("bge%d: buffer manager failed to start\n",
1377 sc->bge_unit);
1378 return(ENXIO);
1379 }
1380 }
1381
1382 /* Enable flow-through queues */
1383 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF);
1384 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0);
1385
1386 /* Wait until queue initialization is complete */
1387 for (i = 0; i < BGE_TIMEOUT; i++) {
1388 if (CSR_READ_4(sc, BGE_FTQ_RESET) == 0)
1389 break;
1390 DELAY(10);
1391 }
1392
1393 if (i == BGE_TIMEOUT) {
1394 printf("bge%d: flow-through queue init failed\n",
1395 sc->bge_unit);
1396 return(ENXIO);
1397 }
1398
1399 /* Initialize the standard RX ring control block */
1400 rcb = &sc->bge_ldata.bge_info.bge_std_rx_rcb;
1401 rcb->bge_hostaddr.bge_addr_lo =
1402 BGE_ADDR_LO(sc->bge_ldata.bge_rx_std_ring_paddr);
1403 rcb->bge_hostaddr.bge_addr_hi =
1404 BGE_ADDR_HI(sc->bge_ldata.bge_rx_std_ring_paddr);
1405 bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag,
1406 sc->bge_cdata.bge_rx_std_ring_map, BUS_DMASYNC_PREREAD);
1393 if (sc->bge_asicrev == BGE_ASICREV_BCM5705)
1407 if (sc->bge_asicrev == BGE_ASICREV_BCM5705 ||
1408 sc->bge_asicrev == BGE_ASICREV_BCM5750)
1394 rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(512, 0);
1395 else
1396 rcb->bge_maxlen_flags =
1397 BGE_RCB_MAXLEN_FLAGS(BGE_MAX_FRAMELEN, 0);
1398 if (sc->bge_extram)
1399 rcb->bge_nicaddr = BGE_EXT_STD_RX_RINGS;
1400 else
1401 rcb->bge_nicaddr = BGE_STD_RX_RINGS;
1402 CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_HI, rcb->bge_hostaddr.bge_addr_hi);
1403 CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_LO, rcb->bge_hostaddr.bge_addr_lo);
1404
1405 CSR_WRITE_4(sc, BGE_RX_STD_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags);
1406 CSR_WRITE_4(sc, BGE_RX_STD_RCB_NICADDR, rcb->bge_nicaddr);
1407
1408 /*
1409 * Initialize the jumbo RX ring control block
1410 * We set the 'ring disabled' bit in the flags
1411 * field until we're actually ready to start
1412 * using this ring (i.e. once we set the MTU
1413 * high enough to require it).
1414 */
1409 rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(512, 0);
1410 else
1411 rcb->bge_maxlen_flags =
1412 BGE_RCB_MAXLEN_FLAGS(BGE_MAX_FRAMELEN, 0);
1413 if (sc->bge_extram)
1414 rcb->bge_nicaddr = BGE_EXT_STD_RX_RINGS;
1415 else
1416 rcb->bge_nicaddr = BGE_STD_RX_RINGS;
1417 CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_HI, rcb->bge_hostaddr.bge_addr_hi);
1418 CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_LO, rcb->bge_hostaddr.bge_addr_lo);
1419
1420 CSR_WRITE_4(sc, BGE_RX_STD_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags);
1421 CSR_WRITE_4(sc, BGE_RX_STD_RCB_NICADDR, rcb->bge_nicaddr);
1422
1423 /*
1424 * Initialize the jumbo RX ring control block
1425 * We set the 'ring disabled' bit in the flags
1426 * field until we're actually ready to start
1427 * using this ring (i.e. once we set the MTU
1428 * high enough to require it).
1429 */
1415 if (sc->bge_asicrev != BGE_ASICREV_BCM5705) {
1430 if (sc->bge_asicrev != BGE_ASICREV_BCM5705 ||
1431 sc->bge_asicrev != BGE_ASICREV_BCM5750) {
1416 rcb = &sc->bge_ldata.bge_info.bge_jumbo_rx_rcb;
1417
1418 rcb->bge_hostaddr.bge_addr_lo =
1419 BGE_ADDR_LO(sc->bge_ldata.bge_rx_jumbo_ring_paddr);
1420 rcb->bge_hostaddr.bge_addr_hi =
1421 BGE_ADDR_HI(sc->bge_ldata.bge_rx_jumbo_ring_paddr);
1422 bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag,
1423 sc->bge_cdata.bge_rx_jumbo_ring_map,
1424 BUS_DMASYNC_PREREAD);
1425 rcb->bge_maxlen_flags =
1426 BGE_RCB_MAXLEN_FLAGS(BGE_MAX_FRAMELEN,
1427 BGE_RCB_FLAG_RING_DISABLED);
1428 if (sc->bge_extram)
1429 rcb->bge_nicaddr = BGE_EXT_JUMBO_RX_RINGS;
1430 else
1431 rcb->bge_nicaddr = BGE_JUMBO_RX_RINGS;
1432 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_HI,
1433 rcb->bge_hostaddr.bge_addr_hi);
1434 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_LO,
1435 rcb->bge_hostaddr.bge_addr_lo);
1436
1437 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS,
1438 rcb->bge_maxlen_flags);
1439 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_NICADDR, rcb->bge_nicaddr);
1440
1441 /* Set up dummy disabled mini ring RCB */
1442 rcb = &sc->bge_ldata.bge_info.bge_mini_rx_rcb;
1443 rcb->bge_maxlen_flags =
1444 BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED);
1445 CSR_WRITE_4(sc, BGE_RX_MINI_RCB_MAXLEN_FLAGS,
1446 rcb->bge_maxlen_flags);
1447 }
1448
1449 /*
1450 * Set the BD ring replentish thresholds. The recommended
1451 * values are 1/8th the number of descriptors allocated to
1452 * each ring.
1453 */
1454 CSR_WRITE_4(sc, BGE_RBDI_STD_REPL_THRESH, BGE_STD_RX_RING_CNT/8);
1455 CSR_WRITE_4(sc, BGE_RBDI_JUMBO_REPL_THRESH, BGE_JUMBO_RX_RING_CNT/8);
1456
1457 /*
1458 * Disable all unused send rings by setting the 'ring disabled'
1459 * bit in the flags field of all the TX send ring control blocks.
1460 * These are located in NIC memory.
1461 */
1462 vrcb = (volatile struct bge_rcb *)(sc->bge_vhandle + BGE_MEMWIN_START +
1463 BGE_SEND_RING_RCB);
1464 for (i = 0; i < BGE_TX_RINGS_EXTSSRAM_MAX; i++) {
1465 vrcb->bge_maxlen_flags =
1466 BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED);
1467 vrcb->bge_nicaddr = 0;
1468 vrcb++;
1469 }
1470
1471 /* Configure TX RCB 0 (we use only the first ring) */
1472 vrcb = (volatile struct bge_rcb *)(sc->bge_vhandle + BGE_MEMWIN_START +
1473 BGE_SEND_RING_RCB);
1474 vrcb->bge_hostaddr.bge_addr_lo =
1475 htole32(BGE_ADDR_LO(sc->bge_ldata.bge_tx_ring_paddr));
1476 vrcb->bge_hostaddr.bge_addr_hi =
1477 htole32(BGE_ADDR_HI(sc->bge_ldata.bge_tx_ring_paddr));
1478 vrcb->bge_nicaddr = BGE_NIC_TXRING_ADDR(0, BGE_TX_RING_CNT);
1432 rcb = &sc->bge_ldata.bge_info.bge_jumbo_rx_rcb;
1433
1434 rcb->bge_hostaddr.bge_addr_lo =
1435 BGE_ADDR_LO(sc->bge_ldata.bge_rx_jumbo_ring_paddr);
1436 rcb->bge_hostaddr.bge_addr_hi =
1437 BGE_ADDR_HI(sc->bge_ldata.bge_rx_jumbo_ring_paddr);
1438 bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag,
1439 sc->bge_cdata.bge_rx_jumbo_ring_map,
1440 BUS_DMASYNC_PREREAD);
1441 rcb->bge_maxlen_flags =
1442 BGE_RCB_MAXLEN_FLAGS(BGE_MAX_FRAMELEN,
1443 BGE_RCB_FLAG_RING_DISABLED);
1444 if (sc->bge_extram)
1445 rcb->bge_nicaddr = BGE_EXT_JUMBO_RX_RINGS;
1446 else
1447 rcb->bge_nicaddr = BGE_JUMBO_RX_RINGS;
1448 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_HI,
1449 rcb->bge_hostaddr.bge_addr_hi);
1450 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_LO,
1451 rcb->bge_hostaddr.bge_addr_lo);
1452
1453 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS,
1454 rcb->bge_maxlen_flags);
1455 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_NICADDR, rcb->bge_nicaddr);
1456
1457 /* Set up dummy disabled mini ring RCB */
1458 rcb = &sc->bge_ldata.bge_info.bge_mini_rx_rcb;
1459 rcb->bge_maxlen_flags =
1460 BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED);
1461 CSR_WRITE_4(sc, BGE_RX_MINI_RCB_MAXLEN_FLAGS,
1462 rcb->bge_maxlen_flags);
1463 }
1464
1465 /*
1466 * Set the BD ring replentish thresholds. The recommended
1467 * values are 1/8th the number of descriptors allocated to
1468 * each ring.
1469 */
1470 CSR_WRITE_4(sc, BGE_RBDI_STD_REPL_THRESH, BGE_STD_RX_RING_CNT/8);
1471 CSR_WRITE_4(sc, BGE_RBDI_JUMBO_REPL_THRESH, BGE_JUMBO_RX_RING_CNT/8);
1472
1473 /*
1474 * Disable all unused send rings by setting the 'ring disabled'
1475 * bit in the flags field of all the TX send ring control blocks.
1476 * These are located in NIC memory.
1477 */
1478 vrcb = (volatile struct bge_rcb *)(sc->bge_vhandle + BGE_MEMWIN_START +
1479 BGE_SEND_RING_RCB);
1480 for (i = 0; i < BGE_TX_RINGS_EXTSSRAM_MAX; i++) {
1481 vrcb->bge_maxlen_flags =
1482 BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED);
1483 vrcb->bge_nicaddr = 0;
1484 vrcb++;
1485 }
1486
1487 /* Configure TX RCB 0 (we use only the first ring) */
1488 vrcb = (volatile struct bge_rcb *)(sc->bge_vhandle + BGE_MEMWIN_START +
1489 BGE_SEND_RING_RCB);
1490 vrcb->bge_hostaddr.bge_addr_lo =
1491 htole32(BGE_ADDR_LO(sc->bge_ldata.bge_tx_ring_paddr));
1492 vrcb->bge_hostaddr.bge_addr_hi =
1493 htole32(BGE_ADDR_HI(sc->bge_ldata.bge_tx_ring_paddr));
1494 vrcb->bge_nicaddr = BGE_NIC_TXRING_ADDR(0, BGE_TX_RING_CNT);
1479 if (sc->bge_asicrev != BGE_ASICREV_BCM5705)
1495 if (sc->bge_asicrev != BGE_ASICREV_BCM5705 ||
1496 sc->bge_asicrev != BGE_ASICREV_BCM5750)
1480 vrcb->bge_maxlen_flags =
1481 BGE_RCB_MAXLEN_FLAGS(BGE_TX_RING_CNT, 0);
1482
1483 /* Disable all unused RX return rings */
1484 vrcb = (volatile struct bge_rcb *)(sc->bge_vhandle + BGE_MEMWIN_START +
1485 BGE_RX_RETURN_RING_RCB);
1486 for (i = 0; i < BGE_RX_RINGS_MAX; i++) {
1487 vrcb->bge_hostaddr.bge_addr_hi = 0;
1488 vrcb->bge_hostaddr.bge_addr_lo = 0;
1489 vrcb->bge_maxlen_flags =
1490 BGE_RCB_MAXLEN_FLAGS(sc->bge_return_ring_cnt,
1491 BGE_RCB_FLAG_RING_DISABLED);
1492 vrcb->bge_nicaddr = 0;
1493 CSR_WRITE_4(sc, BGE_MBX_RX_CONS0_LO +
1494 (i * (sizeof(u_int64_t))), 0);
1495 vrcb++;
1496 }
1497
1498 /* Initialize RX ring indexes */
1499 CSR_WRITE_4(sc, BGE_MBX_RX_STD_PROD_LO, 0);
1500 CSR_WRITE_4(sc, BGE_MBX_RX_JUMBO_PROD_LO, 0);
1501 CSR_WRITE_4(sc, BGE_MBX_RX_MINI_PROD_LO, 0);
1502
1503 /*
1504 * Set up RX return ring 0
1505 * Note that the NIC address for RX return rings is 0x00000000.
1506 * The return rings live entirely within the host, so the
1507 * nicaddr field in the RCB isn't used.
1508 */
1509 vrcb = (volatile struct bge_rcb *)(sc->bge_vhandle + BGE_MEMWIN_START +
1510 BGE_RX_RETURN_RING_RCB);
1511 vrcb->bge_hostaddr.bge_addr_lo =
1512 BGE_ADDR_LO(sc->bge_ldata.bge_rx_return_ring_paddr);
1513 vrcb->bge_hostaddr.bge_addr_hi =
1514 BGE_ADDR_HI(sc->bge_ldata.bge_rx_return_ring_paddr);
1515 bus_dmamap_sync(sc->bge_cdata.bge_rx_return_ring_tag,
1516 sc->bge_cdata.bge_rx_return_ring_map, BUS_DMASYNC_PREWRITE);
1517 vrcb->bge_nicaddr = 0x00000000;
1518 vrcb->bge_maxlen_flags =
1519 BGE_RCB_MAXLEN_FLAGS(sc->bge_return_ring_cnt, 0);
1520
1521 /* Set random backoff seed for TX */
1522 CSR_WRITE_4(sc, BGE_TX_RANDOM_BACKOFF,
1523 sc->arpcom.ac_enaddr[0] + sc->arpcom.ac_enaddr[1] +
1524 sc->arpcom.ac_enaddr[2] + sc->arpcom.ac_enaddr[3] +
1525 sc->arpcom.ac_enaddr[4] + sc->arpcom.ac_enaddr[5] +
1526 BGE_TX_BACKOFF_SEED_MASK);
1527
1528 /* Set inter-packet gap */
1529 CSR_WRITE_4(sc, BGE_TX_LENGTHS, 0x2620);
1530
1531 /*
1532 * Specify which ring to use for packets that don't match
1533 * any RX rules.
1534 */
1535 CSR_WRITE_4(sc, BGE_RX_RULES_CFG, 0x08);
1536
1537 /*
1538 * Configure number of RX lists. One interrupt distribution
1539 * list, sixteen active lists, one bad frames class.
1540 */
1541 CSR_WRITE_4(sc, BGE_RXLP_CFG, 0x181);
1542
1543 /* Inialize RX list placement stats mask. */
1544 CSR_WRITE_4(sc, BGE_RXLP_STATS_ENABLE_MASK, 0x007FFFFF);
1545 CSR_WRITE_4(sc, BGE_RXLP_STATS_CTL, 0x1);
1546
1547 /* Disable host coalescing until we get it set up */
1548 CSR_WRITE_4(sc, BGE_HCC_MODE, 0x00000000);
1549
1550 /* Poll to make sure it's shut down. */
1551 for (i = 0; i < BGE_TIMEOUT; i++) {
1552 if (!(CSR_READ_4(sc, BGE_HCC_MODE) & BGE_HCCMODE_ENABLE))
1553 break;
1554 DELAY(10);
1555 }
1556
1557 if (i == BGE_TIMEOUT) {
1558 printf("bge%d: host coalescing engine failed to idle\n",
1559 sc->bge_unit);
1560 return(ENXIO);
1561 }
1562
1563 /* Set up host coalescing defaults */
1564 CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS, sc->bge_rx_coal_ticks);
1565 CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS, sc->bge_tx_coal_ticks);
1566 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS, sc->bge_rx_max_coal_bds);
1567 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS, sc->bge_tx_max_coal_bds);
1497 vrcb->bge_maxlen_flags =
1498 BGE_RCB_MAXLEN_FLAGS(BGE_TX_RING_CNT, 0);
1499
1500 /* Disable all unused RX return rings */
1501 vrcb = (volatile struct bge_rcb *)(sc->bge_vhandle + BGE_MEMWIN_START +
1502 BGE_RX_RETURN_RING_RCB);
1503 for (i = 0; i < BGE_RX_RINGS_MAX; i++) {
1504 vrcb->bge_hostaddr.bge_addr_hi = 0;
1505 vrcb->bge_hostaddr.bge_addr_lo = 0;
1506 vrcb->bge_maxlen_flags =
1507 BGE_RCB_MAXLEN_FLAGS(sc->bge_return_ring_cnt,
1508 BGE_RCB_FLAG_RING_DISABLED);
1509 vrcb->bge_nicaddr = 0;
1510 CSR_WRITE_4(sc, BGE_MBX_RX_CONS0_LO +
1511 (i * (sizeof(u_int64_t))), 0);
1512 vrcb++;
1513 }
1514
1515 /* Initialize RX ring indexes */
1516 CSR_WRITE_4(sc, BGE_MBX_RX_STD_PROD_LO, 0);
1517 CSR_WRITE_4(sc, BGE_MBX_RX_JUMBO_PROD_LO, 0);
1518 CSR_WRITE_4(sc, BGE_MBX_RX_MINI_PROD_LO, 0);
1519
1520 /*
1521 * Set up RX return ring 0
1522 * Note that the NIC address for RX return rings is 0x00000000.
1523 * The return rings live entirely within the host, so the
1524 * nicaddr field in the RCB isn't used.
1525 */
1526 vrcb = (volatile struct bge_rcb *)(sc->bge_vhandle + BGE_MEMWIN_START +
1527 BGE_RX_RETURN_RING_RCB);
1528 vrcb->bge_hostaddr.bge_addr_lo =
1529 BGE_ADDR_LO(sc->bge_ldata.bge_rx_return_ring_paddr);
1530 vrcb->bge_hostaddr.bge_addr_hi =
1531 BGE_ADDR_HI(sc->bge_ldata.bge_rx_return_ring_paddr);
1532 bus_dmamap_sync(sc->bge_cdata.bge_rx_return_ring_tag,
1533 sc->bge_cdata.bge_rx_return_ring_map, BUS_DMASYNC_PREWRITE);
1534 vrcb->bge_nicaddr = 0x00000000;
1535 vrcb->bge_maxlen_flags =
1536 BGE_RCB_MAXLEN_FLAGS(sc->bge_return_ring_cnt, 0);
1537
1538 /* Set random backoff seed for TX */
1539 CSR_WRITE_4(sc, BGE_TX_RANDOM_BACKOFF,
1540 sc->arpcom.ac_enaddr[0] + sc->arpcom.ac_enaddr[1] +
1541 sc->arpcom.ac_enaddr[2] + sc->arpcom.ac_enaddr[3] +
1542 sc->arpcom.ac_enaddr[4] + sc->arpcom.ac_enaddr[5] +
1543 BGE_TX_BACKOFF_SEED_MASK);
1544
1545 /* Set inter-packet gap */
1546 CSR_WRITE_4(sc, BGE_TX_LENGTHS, 0x2620);
1547
1548 /*
1549 * Specify which ring to use for packets that don't match
1550 * any RX rules.
1551 */
1552 CSR_WRITE_4(sc, BGE_RX_RULES_CFG, 0x08);
1553
1554 /*
1555 * Configure number of RX lists. One interrupt distribution
1556 * list, sixteen active lists, one bad frames class.
1557 */
1558 CSR_WRITE_4(sc, BGE_RXLP_CFG, 0x181);
1559
1560 /* Inialize RX list placement stats mask. */
1561 CSR_WRITE_4(sc, BGE_RXLP_STATS_ENABLE_MASK, 0x007FFFFF);
1562 CSR_WRITE_4(sc, BGE_RXLP_STATS_CTL, 0x1);
1563
1564 /* Disable host coalescing until we get it set up */
1565 CSR_WRITE_4(sc, BGE_HCC_MODE, 0x00000000);
1566
1567 /* Poll to make sure it's shut down. */
1568 for (i = 0; i < BGE_TIMEOUT; i++) {
1569 if (!(CSR_READ_4(sc, BGE_HCC_MODE) & BGE_HCCMODE_ENABLE))
1570 break;
1571 DELAY(10);
1572 }
1573
1574 if (i == BGE_TIMEOUT) {
1575 printf("bge%d: host coalescing engine failed to idle\n",
1576 sc->bge_unit);
1577 return(ENXIO);
1578 }
1579
1580 /* Set up host coalescing defaults */
1581 CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS, sc->bge_rx_coal_ticks);
1582 CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS, sc->bge_tx_coal_ticks);
1583 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS, sc->bge_rx_max_coal_bds);
1584 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS, sc->bge_tx_max_coal_bds);
1568 if (sc->bge_asicrev != BGE_ASICREV_BCM5705) {
1585 if (sc->bge_asicrev != BGE_ASICREV_BCM5705 ||
1586 sc->bge_asicrev != BGE_ASICREV_BCM5750) {
1569 CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS_INT, 0);
1570 CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS_INT, 0);
1571 }
1572 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS_INT, 0);
1573 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS_INT, 0);
1574
1575 /* Set up address of statistics block */
1587 CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS_INT, 0);
1588 CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS_INT, 0);
1589 }
1590 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS_INT, 0);
1591 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS_INT, 0);
1592
1593 /* Set up address of statistics block */
1576 if (sc->bge_asicrev != BGE_ASICREV_BCM5705) {
1594 if (sc->bge_asicrev != BGE_ASICREV_BCM5705 ||
1595 sc->bge_asicrev != BGE_ASICREV_BCM5750) {
1577 CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_HI,
1578 BGE_ADDR_HI(sc->bge_ldata.bge_stats_paddr));
1579 CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_LO,
1580 BGE_ADDR_LO(sc->bge_ldata.bge_stats_paddr));
1581 CSR_WRITE_4(sc, BGE_HCC_STATS_BASEADDR, BGE_STATS_BLOCK);
1582 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_BASEADDR, BGE_STATUS_BLOCK);
1583 CSR_WRITE_4(sc, BGE_HCC_STATS_TICKS, sc->bge_stat_ticks);
1584 }
1585
1586 /* Set up address of status block */
1587 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_HI,
1588 BGE_ADDR_HI(sc->bge_ldata.bge_status_block_paddr));
1589 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_LO,
1590 BGE_ADDR_LO(sc->bge_ldata.bge_status_block_paddr));
1591 bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
1592 sc->bge_cdata.bge_status_map, BUS_DMASYNC_PREWRITE);
1593 sc->bge_ldata.bge_status_block->bge_idx[0].bge_rx_prod_idx = 0;
1594 sc->bge_ldata.bge_status_block->bge_idx[0].bge_tx_cons_idx = 0;
1595
1596 /* Turn on host coalescing state machine */
1597 CSR_WRITE_4(sc, BGE_HCC_MODE, BGE_HCCMODE_ENABLE);
1598
1599 /* Turn on RX BD completion state machine and enable attentions */
1600 CSR_WRITE_4(sc, BGE_RBDC_MODE,
1601 BGE_RBDCMODE_ENABLE|BGE_RBDCMODE_ATTN);
1602
1603 /* Turn on RX list placement state machine */
1604 CSR_WRITE_4(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE);
1605
1606 /* Turn on RX list selector state machine. */
1596 CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_HI,
1597 BGE_ADDR_HI(sc->bge_ldata.bge_stats_paddr));
1598 CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_LO,
1599 BGE_ADDR_LO(sc->bge_ldata.bge_stats_paddr));
1600 CSR_WRITE_4(sc, BGE_HCC_STATS_BASEADDR, BGE_STATS_BLOCK);
1601 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_BASEADDR, BGE_STATUS_BLOCK);
1602 CSR_WRITE_4(sc, BGE_HCC_STATS_TICKS, sc->bge_stat_ticks);
1603 }
1604
1605 /* Set up address of status block */
1606 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_HI,
1607 BGE_ADDR_HI(sc->bge_ldata.bge_status_block_paddr));
1608 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_LO,
1609 BGE_ADDR_LO(sc->bge_ldata.bge_status_block_paddr));
1610 bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
1611 sc->bge_cdata.bge_status_map, BUS_DMASYNC_PREWRITE);
1612 sc->bge_ldata.bge_status_block->bge_idx[0].bge_rx_prod_idx = 0;
1613 sc->bge_ldata.bge_status_block->bge_idx[0].bge_tx_cons_idx = 0;
1614
1615 /* Turn on host coalescing state machine */
1616 CSR_WRITE_4(sc, BGE_HCC_MODE, BGE_HCCMODE_ENABLE);
1617
1618 /* Turn on RX BD completion state machine and enable attentions */
1619 CSR_WRITE_4(sc, BGE_RBDC_MODE,
1620 BGE_RBDCMODE_ENABLE|BGE_RBDCMODE_ATTN);
1621
1622 /* Turn on RX list placement state machine */
1623 CSR_WRITE_4(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE);
1624
1625 /* Turn on RX list selector state machine. */
1607 if (sc->bge_asicrev != BGE_ASICREV_BCM5705)
1626 if (sc->bge_asicrev != BGE_ASICREV_BCM5705 ||
1627 sc->bge_asicrev != BGE_ASICREV_BCM5750)
1608 CSR_WRITE_4(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE);
1609
1610 /* Turn on DMA, clear stats */
1611 CSR_WRITE_4(sc, BGE_MAC_MODE, BGE_MACMODE_TXDMA_ENB|
1612 BGE_MACMODE_RXDMA_ENB|BGE_MACMODE_RX_STATS_CLEAR|
1613 BGE_MACMODE_TX_STATS_CLEAR|BGE_MACMODE_RX_STATS_ENB|
1614 BGE_MACMODE_TX_STATS_ENB|BGE_MACMODE_FRMHDR_DMA_ENB|
1615 (sc->bge_tbi ? BGE_PORTMODE_TBI : BGE_PORTMODE_MII));
1616
1617 /* Set misc. local control, enable interrupts on attentions */
1618 CSR_WRITE_4(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_ONATTN);
1619
1620#ifdef notdef
1621 /* Assert GPIO pins for PHY reset */
1622 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUT0|
1623 BGE_MLC_MISCIO_OUT1|BGE_MLC_MISCIO_OUT2);
1624 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUTEN0|
1625 BGE_MLC_MISCIO_OUTEN1|BGE_MLC_MISCIO_OUTEN2);
1626#endif
1627
1628 /* Turn on DMA completion state machine */
1628 CSR_WRITE_4(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE);
1629
1630 /* Turn on DMA, clear stats */
1631 CSR_WRITE_4(sc, BGE_MAC_MODE, BGE_MACMODE_TXDMA_ENB|
1632 BGE_MACMODE_RXDMA_ENB|BGE_MACMODE_RX_STATS_CLEAR|
1633 BGE_MACMODE_TX_STATS_CLEAR|BGE_MACMODE_RX_STATS_ENB|
1634 BGE_MACMODE_TX_STATS_ENB|BGE_MACMODE_FRMHDR_DMA_ENB|
1635 (sc->bge_tbi ? BGE_PORTMODE_TBI : BGE_PORTMODE_MII));
1636
1637 /* Set misc. local control, enable interrupts on attentions */
1638 CSR_WRITE_4(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_ONATTN);
1639
1640#ifdef notdef
1641 /* Assert GPIO pins for PHY reset */
1642 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUT0|
1643 BGE_MLC_MISCIO_OUT1|BGE_MLC_MISCIO_OUT2);
1644 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUTEN0|
1645 BGE_MLC_MISCIO_OUTEN1|BGE_MLC_MISCIO_OUTEN2);
1646#endif
1647
1648 /* Turn on DMA completion state machine */
1629 if (sc->bge_asicrev != BGE_ASICREV_BCM5705)
1649 if (sc->bge_asicrev != BGE_ASICREV_BCM5705 ||
1650 sc->bge_asicrev != BGE_ASICREV_BCM5750)
1630 CSR_WRITE_4(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE);
1631
1632 /* Turn on write DMA state machine */
1633 CSR_WRITE_4(sc, BGE_WDMA_MODE,
1634 BGE_WDMAMODE_ENABLE|BGE_WDMAMODE_ALL_ATTNS);
1635
1636 /* Turn on read DMA state machine */
1637 CSR_WRITE_4(sc, BGE_RDMA_MODE,
1638 BGE_RDMAMODE_ENABLE|BGE_RDMAMODE_ALL_ATTNS);
1639
1640 /* Turn on RX data completion state machine */
1641 CSR_WRITE_4(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE);
1642
1643 /* Turn on RX BD initiator state machine */
1644 CSR_WRITE_4(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE);
1645
1646 /* Turn on RX data and RX BD initiator state machine */
1647 CSR_WRITE_4(sc, BGE_RDBDI_MODE, BGE_RDBDIMODE_ENABLE);
1648
1649 /* Turn on Mbuf cluster free state machine */
1651 CSR_WRITE_4(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE);
1652
1653 /* Turn on write DMA state machine */
1654 CSR_WRITE_4(sc, BGE_WDMA_MODE,
1655 BGE_WDMAMODE_ENABLE|BGE_WDMAMODE_ALL_ATTNS);
1656
1657 /* Turn on read DMA state machine */
1658 CSR_WRITE_4(sc, BGE_RDMA_MODE,
1659 BGE_RDMAMODE_ENABLE|BGE_RDMAMODE_ALL_ATTNS);
1660
1661 /* Turn on RX data completion state machine */
1662 CSR_WRITE_4(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE);
1663
1664 /* Turn on RX BD initiator state machine */
1665 CSR_WRITE_4(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE);
1666
1667 /* Turn on RX data and RX BD initiator state machine */
1668 CSR_WRITE_4(sc, BGE_RDBDI_MODE, BGE_RDBDIMODE_ENABLE);
1669
1670 /* Turn on Mbuf cluster free state machine */
1650 if (sc->bge_asicrev != BGE_ASICREV_BCM5705)
1671 if (sc->bge_asicrev != BGE_ASICREV_BCM5705 ||
1672 sc->bge_asicrev != BGE_ASICREV_BCM5750)
1651 CSR_WRITE_4(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE);
1652
1653 /* Turn on send BD completion state machine */
1654 CSR_WRITE_4(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE);
1655
1656 /* Turn on send data completion state machine */
1657 CSR_WRITE_4(sc, BGE_SDC_MODE, BGE_SDCMODE_ENABLE);
1658
1659 /* Turn on send data initiator state machine */
1660 CSR_WRITE_4(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE);
1661
1662 /* Turn on send BD initiator state machine */
1663 CSR_WRITE_4(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE);
1664
1665 /* Turn on send BD selector state machine */
1666 CSR_WRITE_4(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE);
1667
1668 CSR_WRITE_4(sc, BGE_SDI_STATS_ENABLE_MASK, 0x007FFFFF);
1669 CSR_WRITE_4(sc, BGE_SDI_STATS_CTL,
1670 BGE_SDISTATSCTL_ENABLE|BGE_SDISTATSCTL_FASTER);
1671
1672 /* ack/clear link change events */
1673 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED|
1674 BGE_MACSTAT_CFG_CHANGED|BGE_MACSTAT_MI_COMPLETE|
1675 BGE_MACSTAT_LINK_CHANGED);
1676 CSR_WRITE_4(sc, BGE_MI_STS, 0);
1677
1678 /* Enable PHY auto polling (for MII/GMII only) */
1679 if (sc->bge_tbi) {
1680 CSR_WRITE_4(sc, BGE_MI_STS, BGE_MISTS_LINK);
1681 } else {
1682 BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL|10<<16);
1683 if (sc->bge_asicrev == BGE_ASICREV_BCM5700)
1684 CSR_WRITE_4(sc, BGE_MAC_EVT_ENB,
1685 BGE_EVTENB_MI_INTERRUPT);
1686 }
1687
1688 /* Enable link state change attentions. */
1689 BGE_SETBIT(sc, BGE_MAC_EVT_ENB, BGE_EVTENB_LINK_CHANGED);
1690
1691 return(0);
1692}
1693
1694/*
1695 * Probe for a Broadcom chip. Check the PCI vendor and device IDs
1696 * against our list and return its name if we find a match. Note
1697 * that since the Broadcom controller contains VPD support, we
1698 * can get the device name string from the controller itself instead
1699 * of the compiled-in string. This is a little slow, but it guarantees
1700 * we'll always announce the right product name.
1701 */
1702static int
1703bge_probe(dev)
1704 device_t dev;
1705{
1706 struct bge_type *t;
1707 struct bge_softc *sc;
1708 char *descbuf;
1709
1710 t = bge_devs;
1711
1712 sc = device_get_softc(dev);
1713 bzero(sc, sizeof(struct bge_softc));
1714 sc->bge_unit = device_get_unit(dev);
1715 sc->bge_dev = dev;
1716
1717 while(t->bge_name != NULL) {
1718 if ((pci_get_vendor(dev) == t->bge_vid) &&
1719 (pci_get_device(dev) == t->bge_did)) {
1720#ifdef notdef
1721 bge_vpd_read(sc);
1722 device_set_desc(dev, sc->bge_vpd_prodname);
1723#endif
1724 descbuf = malloc(BGE_DEVDESC_MAX, M_TEMP, M_NOWAIT);
1725 if (descbuf == NULL)
1726 return(ENOMEM);
1727 snprintf(descbuf, BGE_DEVDESC_MAX,
1728 "%s, ASIC rev. %#04x", t->bge_name,
1729 pci_read_config(dev, BGE_PCI_MISC_CTL, 4) >> 16);
1730 device_set_desc_copy(dev, descbuf);
1731 if (pci_get_subvendor(dev) == DELL_VENDORID)
1732 sc->bge_no_3_led = 1;
1733 free(descbuf, M_TEMP);
1734 return(0);
1735 }
1736 t++;
1737 }
1738
1739 return(ENXIO);
1740}
1741
1742static void
1743bge_dma_free(sc)
1744 struct bge_softc *sc;
1745{
1746 int i;
1747
1748
1749 /* Destroy DMA maps for RX buffers */
1750
1751 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
1752 if (sc->bge_cdata.bge_rx_std_dmamap[i])
1753 bus_dmamap_destroy(sc->bge_cdata.bge_mtag,
1754 sc->bge_cdata.bge_rx_std_dmamap[i]);
1755 }
1756
1757 /* Destroy DMA maps for jumbo RX buffers */
1758
1759 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
1760 if (sc->bge_cdata.bge_rx_jumbo_dmamap[i])
1761 bus_dmamap_destroy(sc->bge_cdata.bge_mtag_jumbo,
1762 sc->bge_cdata.bge_rx_jumbo_dmamap[i]);
1763 }
1764
1765 /* Destroy DMA maps for TX buffers */
1766
1767 for (i = 0; i < BGE_TX_RING_CNT; i++) {
1768 if (sc->bge_cdata.bge_tx_dmamap[i])
1769 bus_dmamap_destroy(sc->bge_cdata.bge_mtag,
1770 sc->bge_cdata.bge_tx_dmamap[i]);
1771 }
1772
1773 if (sc->bge_cdata.bge_mtag)
1774 bus_dma_tag_destroy(sc->bge_cdata.bge_mtag);
1775
1776
1777 /* Destroy standard RX ring */
1778
1779 if (sc->bge_ldata.bge_rx_std_ring)
1780 bus_dmamem_free(sc->bge_cdata.bge_rx_std_ring_tag,
1781 sc->bge_ldata.bge_rx_std_ring,
1782 sc->bge_cdata.bge_rx_std_ring_map);
1783
1784 if (sc->bge_cdata.bge_rx_std_ring_map) {
1785 bus_dmamap_unload(sc->bge_cdata.bge_rx_std_ring_tag,
1786 sc->bge_cdata.bge_rx_std_ring_map);
1787 bus_dmamap_destroy(sc->bge_cdata.bge_rx_std_ring_tag,
1788 sc->bge_cdata.bge_rx_std_ring_map);
1789 }
1790
1791 if (sc->bge_cdata.bge_rx_std_ring_tag)
1792 bus_dma_tag_destroy(sc->bge_cdata.bge_rx_std_ring_tag);
1793
1794 /* Destroy jumbo RX ring */
1795
1796 if (sc->bge_ldata.bge_rx_jumbo_ring)
1797 bus_dmamem_free(sc->bge_cdata.bge_rx_jumbo_ring_tag,
1798 sc->bge_ldata.bge_rx_jumbo_ring,
1799 sc->bge_cdata.bge_rx_jumbo_ring_map);
1800
1801 if (sc->bge_cdata.bge_rx_jumbo_ring_map) {
1802 bus_dmamap_unload(sc->bge_cdata.bge_rx_jumbo_ring_tag,
1803 sc->bge_cdata.bge_rx_jumbo_ring_map);
1804 bus_dmamap_destroy(sc->bge_cdata.bge_rx_jumbo_ring_tag,
1805 sc->bge_cdata.bge_rx_jumbo_ring_map);
1806 }
1807
1808 if (sc->bge_cdata.bge_rx_jumbo_ring_tag)
1809 bus_dma_tag_destroy(sc->bge_cdata.bge_rx_jumbo_ring_tag);
1810
1811 /* Destroy RX return ring */
1812
1813 if (sc->bge_ldata.bge_rx_return_ring)
1814 bus_dmamem_free(sc->bge_cdata.bge_rx_return_ring_tag,
1815 sc->bge_ldata.bge_rx_return_ring,
1816 sc->bge_cdata.bge_rx_return_ring_map);
1817
1818 if (sc->bge_cdata.bge_rx_return_ring_map) {
1819 bus_dmamap_unload(sc->bge_cdata.bge_rx_return_ring_tag,
1820 sc->bge_cdata.bge_rx_return_ring_map);
1821 bus_dmamap_destroy(sc->bge_cdata.bge_rx_return_ring_tag,
1822 sc->bge_cdata.bge_rx_return_ring_map);
1823 }
1824
1825 if (sc->bge_cdata.bge_rx_return_ring_tag)
1826 bus_dma_tag_destroy(sc->bge_cdata.bge_rx_return_ring_tag);
1827
1828 /* Destroy TX ring */
1829
1830 if (sc->bge_ldata.bge_tx_ring)
1831 bus_dmamem_free(sc->bge_cdata.bge_tx_ring_tag,
1832 sc->bge_ldata.bge_tx_ring,
1833 sc->bge_cdata.bge_tx_ring_map);
1834
1835 if (sc->bge_cdata.bge_tx_ring_map) {
1836 bus_dmamap_unload(sc->bge_cdata.bge_tx_ring_tag,
1837 sc->bge_cdata.bge_tx_ring_map);
1838 bus_dmamap_destroy(sc->bge_cdata.bge_tx_ring_tag,
1839 sc->bge_cdata.bge_tx_ring_map);
1840 }
1841
1842 if (sc->bge_cdata.bge_tx_ring_tag)
1843 bus_dma_tag_destroy(sc->bge_cdata.bge_tx_ring_tag);
1844
1845 /* Destroy status block */
1846
1847 if (sc->bge_ldata.bge_status_block)
1848 bus_dmamem_free(sc->bge_cdata.bge_status_tag,
1849 sc->bge_ldata.bge_status_block,
1850 sc->bge_cdata.bge_status_map);
1851
1852 if (sc->bge_cdata.bge_status_map) {
1853 bus_dmamap_unload(sc->bge_cdata.bge_status_tag,
1854 sc->bge_cdata.bge_status_map);
1855 bus_dmamap_destroy(sc->bge_cdata.bge_status_tag,
1856 sc->bge_cdata.bge_status_map);
1857 }
1858
1859 if (sc->bge_cdata.bge_status_tag)
1860 bus_dma_tag_destroy(sc->bge_cdata.bge_status_tag);
1861
1862 /* Destroy statistics block */
1863
1864 if (sc->bge_ldata.bge_stats)
1865 bus_dmamem_free(sc->bge_cdata.bge_stats_tag,
1866 sc->bge_ldata.bge_stats,
1867 sc->bge_cdata.bge_stats_map);
1868
1869 if (sc->bge_cdata.bge_stats_map) {
1870 bus_dmamap_unload(sc->bge_cdata.bge_stats_tag,
1871 sc->bge_cdata.bge_stats_map);
1872 bus_dmamap_destroy(sc->bge_cdata.bge_stats_tag,
1873 sc->bge_cdata.bge_stats_map);
1874 }
1875
1876 if (sc->bge_cdata.bge_stats_tag)
1877 bus_dma_tag_destroy(sc->bge_cdata.bge_stats_tag);
1878
1879 /* Destroy the parent tag */
1880
1881 if (sc->bge_cdata.bge_parent_tag)
1882 bus_dma_tag_destroy(sc->bge_cdata.bge_parent_tag);
1883
1884 return;
1885}
1886
1887static int
1888bge_dma_alloc(dev)
1889 device_t dev;
1890{
1891 struct bge_softc *sc;
1892 int nseg, i, error;
1893 struct bge_dmamap_arg ctx;
1894
1895 sc = device_get_softc(dev);
1896
1897 /*
1898 * Allocate the parent bus DMA tag appropriate for PCI.
1899 */
1900#define BGE_NSEG_NEW 32
1901 error = bus_dma_tag_create(NULL, /* parent */
1902 PAGE_SIZE, 0, /* alignment, boundary */
1903 BUS_SPACE_MAXADDR, /* lowaddr */
1904 BUS_SPACE_MAXADDR_32BIT,/* highaddr */
1905 NULL, NULL, /* filter, filterarg */
1906 MAXBSIZE, BGE_NSEG_NEW, /* maxsize, nsegments */
1907 BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */
1908 BUS_DMA_ALLOCNOW, /* flags */
1909 NULL, NULL, /* lockfunc, lockarg */
1910 &sc->bge_cdata.bge_parent_tag);
1911
1912 /*
1913 * Create tag for RX mbufs.
1914 */
1915 nseg = 32;
1916 error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag, ETHER_ALIGN,
1917 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
1918 NULL, MCLBYTES * nseg, nseg, MCLBYTES, 0, NULL, NULL,
1919 &sc->bge_cdata.bge_mtag);
1920
1921 if (error) {
1922 device_printf(dev, "could not allocate dma tag\n");
1923 return (ENOMEM);
1924 }
1925
1926 /* Create DMA maps for RX buffers */
1927
1928 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
1929 error = bus_dmamap_create(sc->bge_cdata.bge_mtag, 0,
1930 &sc->bge_cdata.bge_rx_std_dmamap[i]);
1931 if (error) {
1932 device_printf(dev, "can't create DMA map for RX\n");
1933 return(ENOMEM);
1934 }
1935 }
1936
1937 /* Create DMA maps for TX buffers */
1938
1939 for (i = 0; i < BGE_TX_RING_CNT; i++) {
1940 error = bus_dmamap_create(sc->bge_cdata.bge_mtag, 0,
1941 &sc->bge_cdata.bge_tx_dmamap[i]);
1942 if (error) {
1943 device_printf(dev, "can't create DMA map for RX\n");
1944 return(ENOMEM);
1945 }
1946 }
1947
1948 /* Create tag for standard RX ring */
1949
1950 error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag,
1951 PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
1952 NULL, BGE_STD_RX_RING_SZ, 1, BGE_STD_RX_RING_SZ, 0,
1953 NULL, NULL, &sc->bge_cdata.bge_rx_std_ring_tag);
1954
1955 if (error) {
1956 device_printf(dev, "could not allocate dma tag\n");
1957 return (ENOMEM);
1958 }
1959
1960 /* Allocate DMA'able memory for standard RX ring */
1961
1962 error = bus_dmamem_alloc(sc->bge_cdata.bge_rx_std_ring_tag,
1963 (void **)&sc->bge_ldata.bge_rx_std_ring, BUS_DMA_NOWAIT,
1964 &sc->bge_cdata.bge_rx_std_ring_map);
1965 if (error)
1966 return (ENOMEM);
1967
1968 bzero((char *)sc->bge_ldata.bge_rx_std_ring, BGE_STD_RX_RING_SZ);
1969
1970 /* Load the address of the standard RX ring */
1971
1972 ctx.bge_maxsegs = 1;
1973 ctx.sc = sc;
1974
1975 error = bus_dmamap_load(sc->bge_cdata.bge_rx_std_ring_tag,
1976 sc->bge_cdata.bge_rx_std_ring_map, sc->bge_ldata.bge_rx_std_ring,
1977 BGE_STD_RX_RING_SZ, bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT);
1978
1979 if (error)
1980 return (ENOMEM);
1981
1982 sc->bge_ldata.bge_rx_std_ring_paddr = ctx.bge_busaddr;
1983
1673 CSR_WRITE_4(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE);
1674
1675 /* Turn on send BD completion state machine */
1676 CSR_WRITE_4(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE);
1677
1678 /* Turn on send data completion state machine */
1679 CSR_WRITE_4(sc, BGE_SDC_MODE, BGE_SDCMODE_ENABLE);
1680
1681 /* Turn on send data initiator state machine */
1682 CSR_WRITE_4(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE);
1683
1684 /* Turn on send BD initiator state machine */
1685 CSR_WRITE_4(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE);
1686
1687 /* Turn on send BD selector state machine */
1688 CSR_WRITE_4(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE);
1689
1690 CSR_WRITE_4(sc, BGE_SDI_STATS_ENABLE_MASK, 0x007FFFFF);
1691 CSR_WRITE_4(sc, BGE_SDI_STATS_CTL,
1692 BGE_SDISTATSCTL_ENABLE|BGE_SDISTATSCTL_FASTER);
1693
1694 /* ack/clear link change events */
1695 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED|
1696 BGE_MACSTAT_CFG_CHANGED|BGE_MACSTAT_MI_COMPLETE|
1697 BGE_MACSTAT_LINK_CHANGED);
1698 CSR_WRITE_4(sc, BGE_MI_STS, 0);
1699
1700 /* Enable PHY auto polling (for MII/GMII only) */
1701 if (sc->bge_tbi) {
1702 CSR_WRITE_4(sc, BGE_MI_STS, BGE_MISTS_LINK);
1703 } else {
1704 BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL|10<<16);
1705 if (sc->bge_asicrev == BGE_ASICREV_BCM5700)
1706 CSR_WRITE_4(sc, BGE_MAC_EVT_ENB,
1707 BGE_EVTENB_MI_INTERRUPT);
1708 }
1709
1710 /* Enable link state change attentions. */
1711 BGE_SETBIT(sc, BGE_MAC_EVT_ENB, BGE_EVTENB_LINK_CHANGED);
1712
1713 return(0);
1714}
1715
1716/*
1717 * Probe for a Broadcom chip. Check the PCI vendor and device IDs
1718 * against our list and return its name if we find a match. Note
1719 * that since the Broadcom controller contains VPD support, we
1720 * can get the device name string from the controller itself instead
1721 * of the compiled-in string. This is a little slow, but it guarantees
1722 * we'll always announce the right product name.
1723 */
1724static int
1725bge_probe(dev)
1726 device_t dev;
1727{
1728 struct bge_type *t;
1729 struct bge_softc *sc;
1730 char *descbuf;
1731
1732 t = bge_devs;
1733
1734 sc = device_get_softc(dev);
1735 bzero(sc, sizeof(struct bge_softc));
1736 sc->bge_unit = device_get_unit(dev);
1737 sc->bge_dev = dev;
1738
1739 while(t->bge_name != NULL) {
1740 if ((pci_get_vendor(dev) == t->bge_vid) &&
1741 (pci_get_device(dev) == t->bge_did)) {
1742#ifdef notdef
1743 bge_vpd_read(sc);
1744 device_set_desc(dev, sc->bge_vpd_prodname);
1745#endif
1746 descbuf = malloc(BGE_DEVDESC_MAX, M_TEMP, M_NOWAIT);
1747 if (descbuf == NULL)
1748 return(ENOMEM);
1749 snprintf(descbuf, BGE_DEVDESC_MAX,
1750 "%s, ASIC rev. %#04x", t->bge_name,
1751 pci_read_config(dev, BGE_PCI_MISC_CTL, 4) >> 16);
1752 device_set_desc_copy(dev, descbuf);
1753 if (pci_get_subvendor(dev) == DELL_VENDORID)
1754 sc->bge_no_3_led = 1;
1755 free(descbuf, M_TEMP);
1756 return(0);
1757 }
1758 t++;
1759 }
1760
1761 return(ENXIO);
1762}
1763
1764static void
1765bge_dma_free(sc)
1766 struct bge_softc *sc;
1767{
1768 int i;
1769
1770
1771 /* Destroy DMA maps for RX buffers */
1772
1773 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
1774 if (sc->bge_cdata.bge_rx_std_dmamap[i])
1775 bus_dmamap_destroy(sc->bge_cdata.bge_mtag,
1776 sc->bge_cdata.bge_rx_std_dmamap[i]);
1777 }
1778
1779 /* Destroy DMA maps for jumbo RX buffers */
1780
1781 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
1782 if (sc->bge_cdata.bge_rx_jumbo_dmamap[i])
1783 bus_dmamap_destroy(sc->bge_cdata.bge_mtag_jumbo,
1784 sc->bge_cdata.bge_rx_jumbo_dmamap[i]);
1785 }
1786
1787 /* Destroy DMA maps for TX buffers */
1788
1789 for (i = 0; i < BGE_TX_RING_CNT; i++) {
1790 if (sc->bge_cdata.bge_tx_dmamap[i])
1791 bus_dmamap_destroy(sc->bge_cdata.bge_mtag,
1792 sc->bge_cdata.bge_tx_dmamap[i]);
1793 }
1794
1795 if (sc->bge_cdata.bge_mtag)
1796 bus_dma_tag_destroy(sc->bge_cdata.bge_mtag);
1797
1798
1799 /* Destroy standard RX ring */
1800
1801 if (sc->bge_ldata.bge_rx_std_ring)
1802 bus_dmamem_free(sc->bge_cdata.bge_rx_std_ring_tag,
1803 sc->bge_ldata.bge_rx_std_ring,
1804 sc->bge_cdata.bge_rx_std_ring_map);
1805
1806 if (sc->bge_cdata.bge_rx_std_ring_map) {
1807 bus_dmamap_unload(sc->bge_cdata.bge_rx_std_ring_tag,
1808 sc->bge_cdata.bge_rx_std_ring_map);
1809 bus_dmamap_destroy(sc->bge_cdata.bge_rx_std_ring_tag,
1810 sc->bge_cdata.bge_rx_std_ring_map);
1811 }
1812
1813 if (sc->bge_cdata.bge_rx_std_ring_tag)
1814 bus_dma_tag_destroy(sc->bge_cdata.bge_rx_std_ring_tag);
1815
1816 /* Destroy jumbo RX ring */
1817
1818 if (sc->bge_ldata.bge_rx_jumbo_ring)
1819 bus_dmamem_free(sc->bge_cdata.bge_rx_jumbo_ring_tag,
1820 sc->bge_ldata.bge_rx_jumbo_ring,
1821 sc->bge_cdata.bge_rx_jumbo_ring_map);
1822
1823 if (sc->bge_cdata.bge_rx_jumbo_ring_map) {
1824 bus_dmamap_unload(sc->bge_cdata.bge_rx_jumbo_ring_tag,
1825 sc->bge_cdata.bge_rx_jumbo_ring_map);
1826 bus_dmamap_destroy(sc->bge_cdata.bge_rx_jumbo_ring_tag,
1827 sc->bge_cdata.bge_rx_jumbo_ring_map);
1828 }
1829
1830 if (sc->bge_cdata.bge_rx_jumbo_ring_tag)
1831 bus_dma_tag_destroy(sc->bge_cdata.bge_rx_jumbo_ring_tag);
1832
1833 /* Destroy RX return ring */
1834
1835 if (sc->bge_ldata.bge_rx_return_ring)
1836 bus_dmamem_free(sc->bge_cdata.bge_rx_return_ring_tag,
1837 sc->bge_ldata.bge_rx_return_ring,
1838 sc->bge_cdata.bge_rx_return_ring_map);
1839
1840 if (sc->bge_cdata.bge_rx_return_ring_map) {
1841 bus_dmamap_unload(sc->bge_cdata.bge_rx_return_ring_tag,
1842 sc->bge_cdata.bge_rx_return_ring_map);
1843 bus_dmamap_destroy(sc->bge_cdata.bge_rx_return_ring_tag,
1844 sc->bge_cdata.bge_rx_return_ring_map);
1845 }
1846
1847 if (sc->bge_cdata.bge_rx_return_ring_tag)
1848 bus_dma_tag_destroy(sc->bge_cdata.bge_rx_return_ring_tag);
1849
1850 /* Destroy TX ring */
1851
1852 if (sc->bge_ldata.bge_tx_ring)
1853 bus_dmamem_free(sc->bge_cdata.bge_tx_ring_tag,
1854 sc->bge_ldata.bge_tx_ring,
1855 sc->bge_cdata.bge_tx_ring_map);
1856
1857 if (sc->bge_cdata.bge_tx_ring_map) {
1858 bus_dmamap_unload(sc->bge_cdata.bge_tx_ring_tag,
1859 sc->bge_cdata.bge_tx_ring_map);
1860 bus_dmamap_destroy(sc->bge_cdata.bge_tx_ring_tag,
1861 sc->bge_cdata.bge_tx_ring_map);
1862 }
1863
1864 if (sc->bge_cdata.bge_tx_ring_tag)
1865 bus_dma_tag_destroy(sc->bge_cdata.bge_tx_ring_tag);
1866
1867 /* Destroy status block */
1868
1869 if (sc->bge_ldata.bge_status_block)
1870 bus_dmamem_free(sc->bge_cdata.bge_status_tag,
1871 sc->bge_ldata.bge_status_block,
1872 sc->bge_cdata.bge_status_map);
1873
1874 if (sc->bge_cdata.bge_status_map) {
1875 bus_dmamap_unload(sc->bge_cdata.bge_status_tag,
1876 sc->bge_cdata.bge_status_map);
1877 bus_dmamap_destroy(sc->bge_cdata.bge_status_tag,
1878 sc->bge_cdata.bge_status_map);
1879 }
1880
1881 if (sc->bge_cdata.bge_status_tag)
1882 bus_dma_tag_destroy(sc->bge_cdata.bge_status_tag);
1883
1884 /* Destroy statistics block */
1885
1886 if (sc->bge_ldata.bge_stats)
1887 bus_dmamem_free(sc->bge_cdata.bge_stats_tag,
1888 sc->bge_ldata.bge_stats,
1889 sc->bge_cdata.bge_stats_map);
1890
1891 if (sc->bge_cdata.bge_stats_map) {
1892 bus_dmamap_unload(sc->bge_cdata.bge_stats_tag,
1893 sc->bge_cdata.bge_stats_map);
1894 bus_dmamap_destroy(sc->bge_cdata.bge_stats_tag,
1895 sc->bge_cdata.bge_stats_map);
1896 }
1897
1898 if (sc->bge_cdata.bge_stats_tag)
1899 bus_dma_tag_destroy(sc->bge_cdata.bge_stats_tag);
1900
1901 /* Destroy the parent tag */
1902
1903 if (sc->bge_cdata.bge_parent_tag)
1904 bus_dma_tag_destroy(sc->bge_cdata.bge_parent_tag);
1905
1906 return;
1907}
1908
1909static int
1910bge_dma_alloc(dev)
1911 device_t dev;
1912{
1913 struct bge_softc *sc;
1914 int nseg, i, error;
1915 struct bge_dmamap_arg ctx;
1916
1917 sc = device_get_softc(dev);
1918
1919 /*
1920 * Allocate the parent bus DMA tag appropriate for PCI.
1921 */
1922#define BGE_NSEG_NEW 32
1923 error = bus_dma_tag_create(NULL, /* parent */
1924 PAGE_SIZE, 0, /* alignment, boundary */
1925 BUS_SPACE_MAXADDR, /* lowaddr */
1926 BUS_SPACE_MAXADDR_32BIT,/* highaddr */
1927 NULL, NULL, /* filter, filterarg */
1928 MAXBSIZE, BGE_NSEG_NEW, /* maxsize, nsegments */
1929 BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */
1930 BUS_DMA_ALLOCNOW, /* flags */
1931 NULL, NULL, /* lockfunc, lockarg */
1932 &sc->bge_cdata.bge_parent_tag);
1933
1934 /*
1935 * Create tag for RX mbufs.
1936 */
1937 nseg = 32;
1938 error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag, ETHER_ALIGN,
1939 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
1940 NULL, MCLBYTES * nseg, nseg, MCLBYTES, 0, NULL, NULL,
1941 &sc->bge_cdata.bge_mtag);
1942
1943 if (error) {
1944 device_printf(dev, "could not allocate dma tag\n");
1945 return (ENOMEM);
1946 }
1947
1948 /* Create DMA maps for RX buffers */
1949
1950 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
1951 error = bus_dmamap_create(sc->bge_cdata.bge_mtag, 0,
1952 &sc->bge_cdata.bge_rx_std_dmamap[i]);
1953 if (error) {
1954 device_printf(dev, "can't create DMA map for RX\n");
1955 return(ENOMEM);
1956 }
1957 }
1958
1959 /* Create DMA maps for TX buffers */
1960
1961 for (i = 0; i < BGE_TX_RING_CNT; i++) {
1962 error = bus_dmamap_create(sc->bge_cdata.bge_mtag, 0,
1963 &sc->bge_cdata.bge_tx_dmamap[i]);
1964 if (error) {
1965 device_printf(dev, "can't create DMA map for RX\n");
1966 return(ENOMEM);
1967 }
1968 }
1969
1970 /* Create tag for standard RX ring */
1971
1972 error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag,
1973 PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
1974 NULL, BGE_STD_RX_RING_SZ, 1, BGE_STD_RX_RING_SZ, 0,
1975 NULL, NULL, &sc->bge_cdata.bge_rx_std_ring_tag);
1976
1977 if (error) {
1978 device_printf(dev, "could not allocate dma tag\n");
1979 return (ENOMEM);
1980 }
1981
1982 /* Allocate DMA'able memory for standard RX ring */
1983
1984 error = bus_dmamem_alloc(sc->bge_cdata.bge_rx_std_ring_tag,
1985 (void **)&sc->bge_ldata.bge_rx_std_ring, BUS_DMA_NOWAIT,
1986 &sc->bge_cdata.bge_rx_std_ring_map);
1987 if (error)
1988 return (ENOMEM);
1989
1990 bzero((char *)sc->bge_ldata.bge_rx_std_ring, BGE_STD_RX_RING_SZ);
1991
1992 /* Load the address of the standard RX ring */
1993
1994 ctx.bge_maxsegs = 1;
1995 ctx.sc = sc;
1996
1997 error = bus_dmamap_load(sc->bge_cdata.bge_rx_std_ring_tag,
1998 sc->bge_cdata.bge_rx_std_ring_map, sc->bge_ldata.bge_rx_std_ring,
1999 BGE_STD_RX_RING_SZ, bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT);
2000
2001 if (error)
2002 return (ENOMEM);
2003
2004 sc->bge_ldata.bge_rx_std_ring_paddr = ctx.bge_busaddr;
2005
1984 if (sc->bge_asicrev != BGE_ASICREV_BCM5705) {
2006 if (sc->bge_asicrev != BGE_ASICREV_BCM5705 ||
2007 sc->bge_asicrev != BGE_ASICREV_BCM5750) {
1985
1986 /*
1987 * Create tag for jumbo mbufs.
1988 * This is really a bit of a kludge. We allocate a special
1989 * jumbo buffer pool which (thanks to the way our DMA
1990 * memory allocation works) will consist of contiguous
1991 * pages. This means that even though a jumbo buffer might
1992 * be larger than a page size, we don't really need to
1993 * map it into more than one DMA segment. However, the
1994 * default mbuf tag will result in multi-segment mappings,
1995 * so we have to create a special jumbo mbuf tag that
1996 * lets us get away with mapping the jumbo buffers as
1997 * a single segment. I think eventually the driver should
1998 * be changed so that it uses ordinary mbufs and cluster
1999 * buffers, i.e. jumbo frames can span multiple DMA
2000 * descriptors. But that's a project for another day.
2001 */
2002
2003 error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag,
2004 ETHER_ALIGN, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
2005 NULL, MCLBYTES * nseg, nseg, BGE_JLEN, 0, NULL, NULL,
2006 &sc->bge_cdata.bge_mtag_jumbo);
2007
2008 if (error) {
2009 device_printf(dev, "could not allocate dma tag\n");
2010 return (ENOMEM);
2011 }
2012
2013 /* Create tag for jumbo RX ring */
2014
2015 error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag,
2016 PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
2017 NULL, BGE_JUMBO_RX_RING_SZ, 1, BGE_JUMBO_RX_RING_SZ, 0,
2018 NULL, NULL, &sc->bge_cdata.bge_rx_jumbo_ring_tag);
2019
2020 if (error) {
2021 device_printf(dev, "could not allocate dma tag\n");
2022 return (ENOMEM);
2023 }
2024
2025 /* Allocate DMA'able memory for jumbo RX ring */
2026
2027 error = bus_dmamem_alloc(sc->bge_cdata.bge_rx_jumbo_ring_tag,
2028 (void **)&sc->bge_ldata.bge_rx_jumbo_ring, BUS_DMA_NOWAIT,
2029 &sc->bge_cdata.bge_rx_jumbo_ring_map);
2030 if (error)
2031 return (ENOMEM);
2032
2033 bzero((char *)sc->bge_ldata.bge_rx_jumbo_ring,
2034 BGE_JUMBO_RX_RING_SZ);
2035
2036 /* Load the address of the jumbo RX ring */
2037
2038 ctx.bge_maxsegs = 1;
2039 ctx.sc = sc;
2040
2041 error = bus_dmamap_load(sc->bge_cdata.bge_rx_jumbo_ring_tag,
2042 sc->bge_cdata.bge_rx_jumbo_ring_map,
2043 sc->bge_ldata.bge_rx_jumbo_ring, BGE_JUMBO_RX_RING_SZ,
2044 bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT);
2045
2046 if (error)
2047 return (ENOMEM);
2048
2049 sc->bge_ldata.bge_rx_jumbo_ring_paddr = ctx.bge_busaddr;
2050
2051 /* Create DMA maps for jumbo RX buffers */
2052
2053 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
2054 error = bus_dmamap_create(sc->bge_cdata.bge_mtag_jumbo,
2055 0, &sc->bge_cdata.bge_rx_jumbo_dmamap[i]);
2056 if (error) {
2057 device_printf(dev,
2058 "can't create DMA map for RX\n");
2059 return(ENOMEM);
2060 }
2061 }
2062
2063 }
2064
2065 /* Create tag for RX return ring */
2066
2067 error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag,
2068 PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
2069 NULL, BGE_RX_RTN_RING_SZ(sc), 1, BGE_RX_RTN_RING_SZ(sc), 0,
2070 NULL, NULL, &sc->bge_cdata.bge_rx_return_ring_tag);
2071
2072 if (error) {
2073 device_printf(dev, "could not allocate dma tag\n");
2074 return (ENOMEM);
2075 }
2076
2077 /* Allocate DMA'able memory for RX return ring */
2078
2079 error = bus_dmamem_alloc(sc->bge_cdata.bge_rx_return_ring_tag,
2080 (void **)&sc->bge_ldata.bge_rx_return_ring, BUS_DMA_NOWAIT,
2081 &sc->bge_cdata.bge_rx_return_ring_map);
2082 if (error)
2083 return (ENOMEM);
2084
2085 bzero((char *)sc->bge_ldata.bge_rx_return_ring,
2086 BGE_RX_RTN_RING_SZ(sc));
2087
2088 /* Load the address of the RX return ring */
2089
2090 ctx.bge_maxsegs = 1;
2091 ctx.sc = sc;
2092
2093 error = bus_dmamap_load(sc->bge_cdata.bge_rx_return_ring_tag,
2094 sc->bge_cdata.bge_rx_return_ring_map,
2095 sc->bge_ldata.bge_rx_return_ring, BGE_RX_RTN_RING_SZ(sc),
2096 bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT);
2097
2098 if (error)
2099 return (ENOMEM);
2100
2101 sc->bge_ldata.bge_rx_return_ring_paddr = ctx.bge_busaddr;
2102
2103 /* Create tag for TX ring */
2104
2105 error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag,
2106 PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
2107 NULL, BGE_TX_RING_SZ, 1, BGE_TX_RING_SZ, 0, NULL, NULL,
2108 &sc->bge_cdata.bge_tx_ring_tag);
2109
2110 if (error) {
2111 device_printf(dev, "could not allocate dma tag\n");
2112 return (ENOMEM);
2113 }
2114
2115 /* Allocate DMA'able memory for TX ring */
2116
2117 error = bus_dmamem_alloc(sc->bge_cdata.bge_tx_ring_tag,
2118 (void **)&sc->bge_ldata.bge_tx_ring, BUS_DMA_NOWAIT,
2119 &sc->bge_cdata.bge_tx_ring_map);
2120 if (error)
2121 return (ENOMEM);
2122
2123 bzero((char *)sc->bge_ldata.bge_tx_ring, BGE_TX_RING_SZ);
2124
2125 /* Load the address of the TX ring */
2126
2127 ctx.bge_maxsegs = 1;
2128 ctx.sc = sc;
2129
2130 error = bus_dmamap_load(sc->bge_cdata.bge_tx_ring_tag,
2131 sc->bge_cdata.bge_tx_ring_map, sc->bge_ldata.bge_tx_ring,
2132 BGE_TX_RING_SZ, bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT);
2133
2134 if (error)
2135 return (ENOMEM);
2136
2137 sc->bge_ldata.bge_tx_ring_paddr = ctx.bge_busaddr;
2138
2139 /* Create tag for status block */
2140
2141 error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag,
2142 PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
2143 NULL, BGE_STATUS_BLK_SZ, 1, BGE_STATUS_BLK_SZ, 0,
2144 NULL, NULL, &sc->bge_cdata.bge_status_tag);
2145
2146 if (error) {
2147 device_printf(dev, "could not allocate dma tag\n");
2148 return (ENOMEM);
2149 }
2150
2151 /* Allocate DMA'able memory for status block */
2152
2153 error = bus_dmamem_alloc(sc->bge_cdata.bge_status_tag,
2154 (void **)&sc->bge_ldata.bge_status_block, BUS_DMA_NOWAIT,
2155 &sc->bge_cdata.bge_status_map);
2156 if (error)
2157 return (ENOMEM);
2158
2159 bzero((char *)sc->bge_ldata.bge_status_block, BGE_STATUS_BLK_SZ);
2160
2161 /* Load the address of the status block */
2162
2163 ctx.sc = sc;
2164 ctx.bge_maxsegs = 1;
2165
2166 error = bus_dmamap_load(sc->bge_cdata.bge_status_tag,
2167 sc->bge_cdata.bge_status_map, sc->bge_ldata.bge_status_block,
2168 BGE_STATUS_BLK_SZ, bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT);
2169
2170 if (error)
2171 return (ENOMEM);
2172
2173 sc->bge_ldata.bge_status_block_paddr = ctx.bge_busaddr;
2174
2175 /* Create tag for statistics block */
2176
2177 error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag,
2178 PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
2179 NULL, BGE_STATS_SZ, 1, BGE_STATS_SZ, 0, NULL, NULL,
2180 &sc->bge_cdata.bge_stats_tag);
2181
2182 if (error) {
2183 device_printf(dev, "could not allocate dma tag\n");
2184 return (ENOMEM);
2185 }
2186
2187 /* Allocate DMA'able memory for statistics block */
2188
2189 error = bus_dmamem_alloc(sc->bge_cdata.bge_stats_tag,
2190 (void **)&sc->bge_ldata.bge_stats, BUS_DMA_NOWAIT,
2191 &sc->bge_cdata.bge_stats_map);
2192 if (error)
2193 return (ENOMEM);
2194
2195 bzero((char *)sc->bge_ldata.bge_stats, BGE_STATS_SZ);
2196
2197 /* Load the address of the statstics block */
2198
2199 ctx.sc = sc;
2200 ctx.bge_maxsegs = 1;
2201
2202 error = bus_dmamap_load(sc->bge_cdata.bge_stats_tag,
2203 sc->bge_cdata.bge_stats_map, sc->bge_ldata.bge_stats,
2204 BGE_STATS_SZ, bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT);
2205
2206 if (error)
2207 return (ENOMEM);
2208
2209 sc->bge_ldata.bge_stats_paddr = ctx.bge_busaddr;
2210
2211 return(0);
2212}
2213
2214static int
2215bge_attach(dev)
2216 device_t dev;
2217{
2218 struct ifnet *ifp;
2219 struct bge_softc *sc;
2220 u_int32_t hwcfg = 0;
2221 u_int32_t mac_addr = 0;
2222 int unit, error = 0, rid;
2223
2224 sc = device_get_softc(dev);
2225 unit = device_get_unit(dev);
2226 sc->bge_dev = dev;
2227 sc->bge_unit = unit;
2228
2229 /*
2230 * Map control/status registers.
2231 */
2232 pci_enable_busmaster(dev);
2233
2234 rid = BGE_PCI_BAR0;
2235 sc->bge_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
2236 RF_ACTIVE|PCI_RF_DENSE);
2237
2238 if (sc->bge_res == NULL) {
2239 printf ("bge%d: couldn't map memory\n", unit);
2240 error = ENXIO;
2241 goto fail;
2242 }
2243
2244 sc->bge_btag = rman_get_bustag(sc->bge_res);
2245 sc->bge_bhandle = rman_get_bushandle(sc->bge_res);
2246 sc->bge_vhandle = (vm_offset_t)rman_get_virtual(sc->bge_res);
2247
2248 /* Allocate interrupt */
2249 rid = 0;
2250
2251 sc->bge_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
2252 RF_SHAREABLE | RF_ACTIVE);
2253
2254 if (sc->bge_irq == NULL) {
2255 printf("bge%d: couldn't map interrupt\n", unit);
2256 error = ENXIO;
2257 goto fail;
2258 }
2259
2260 sc->bge_unit = unit;
2261
2262 BGE_LOCK_INIT(sc, device_get_nameunit(dev));
2263
2008
2009 /*
2010 * Create tag for jumbo mbufs.
2011 * This is really a bit of a kludge. We allocate a special
2012 * jumbo buffer pool which (thanks to the way our DMA
2013 * memory allocation works) will consist of contiguous
2014 * pages. This means that even though a jumbo buffer might
2015 * be larger than a page size, we don't really need to
2016 * map it into more than one DMA segment. However, the
2017 * default mbuf tag will result in multi-segment mappings,
2018 * so we have to create a special jumbo mbuf tag that
2019 * lets us get away with mapping the jumbo buffers as
2020 * a single segment. I think eventually the driver should
2021 * be changed so that it uses ordinary mbufs and cluster
2022 * buffers, i.e. jumbo frames can span multiple DMA
2023 * descriptors. But that's a project for another day.
2024 */
2025
2026 error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag,
2027 ETHER_ALIGN, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
2028 NULL, MCLBYTES * nseg, nseg, BGE_JLEN, 0, NULL, NULL,
2029 &sc->bge_cdata.bge_mtag_jumbo);
2030
2031 if (error) {
2032 device_printf(dev, "could not allocate dma tag\n");
2033 return (ENOMEM);
2034 }
2035
2036 /* Create tag for jumbo RX ring */
2037
2038 error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag,
2039 PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
2040 NULL, BGE_JUMBO_RX_RING_SZ, 1, BGE_JUMBO_RX_RING_SZ, 0,
2041 NULL, NULL, &sc->bge_cdata.bge_rx_jumbo_ring_tag);
2042
2043 if (error) {
2044 device_printf(dev, "could not allocate dma tag\n");
2045 return (ENOMEM);
2046 }
2047
2048 /* Allocate DMA'able memory for jumbo RX ring */
2049
2050 error = bus_dmamem_alloc(sc->bge_cdata.bge_rx_jumbo_ring_tag,
2051 (void **)&sc->bge_ldata.bge_rx_jumbo_ring, BUS_DMA_NOWAIT,
2052 &sc->bge_cdata.bge_rx_jumbo_ring_map);
2053 if (error)
2054 return (ENOMEM);
2055
2056 bzero((char *)sc->bge_ldata.bge_rx_jumbo_ring,
2057 BGE_JUMBO_RX_RING_SZ);
2058
2059 /* Load the address of the jumbo RX ring */
2060
2061 ctx.bge_maxsegs = 1;
2062 ctx.sc = sc;
2063
2064 error = bus_dmamap_load(sc->bge_cdata.bge_rx_jumbo_ring_tag,
2065 sc->bge_cdata.bge_rx_jumbo_ring_map,
2066 sc->bge_ldata.bge_rx_jumbo_ring, BGE_JUMBO_RX_RING_SZ,
2067 bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT);
2068
2069 if (error)
2070 return (ENOMEM);
2071
2072 sc->bge_ldata.bge_rx_jumbo_ring_paddr = ctx.bge_busaddr;
2073
2074 /* Create DMA maps for jumbo RX buffers */
2075
2076 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
2077 error = bus_dmamap_create(sc->bge_cdata.bge_mtag_jumbo,
2078 0, &sc->bge_cdata.bge_rx_jumbo_dmamap[i]);
2079 if (error) {
2080 device_printf(dev,
2081 "can't create DMA map for RX\n");
2082 return(ENOMEM);
2083 }
2084 }
2085
2086 }
2087
2088 /* Create tag for RX return ring */
2089
2090 error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag,
2091 PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
2092 NULL, BGE_RX_RTN_RING_SZ(sc), 1, BGE_RX_RTN_RING_SZ(sc), 0,
2093 NULL, NULL, &sc->bge_cdata.bge_rx_return_ring_tag);
2094
2095 if (error) {
2096 device_printf(dev, "could not allocate dma tag\n");
2097 return (ENOMEM);
2098 }
2099
2100 /* Allocate DMA'able memory for RX return ring */
2101
2102 error = bus_dmamem_alloc(sc->bge_cdata.bge_rx_return_ring_tag,
2103 (void **)&sc->bge_ldata.bge_rx_return_ring, BUS_DMA_NOWAIT,
2104 &sc->bge_cdata.bge_rx_return_ring_map);
2105 if (error)
2106 return (ENOMEM);
2107
2108 bzero((char *)sc->bge_ldata.bge_rx_return_ring,
2109 BGE_RX_RTN_RING_SZ(sc));
2110
2111 /* Load the address of the RX return ring */
2112
2113 ctx.bge_maxsegs = 1;
2114 ctx.sc = sc;
2115
2116 error = bus_dmamap_load(sc->bge_cdata.bge_rx_return_ring_tag,
2117 sc->bge_cdata.bge_rx_return_ring_map,
2118 sc->bge_ldata.bge_rx_return_ring, BGE_RX_RTN_RING_SZ(sc),
2119 bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT);
2120
2121 if (error)
2122 return (ENOMEM);
2123
2124 sc->bge_ldata.bge_rx_return_ring_paddr = ctx.bge_busaddr;
2125
2126 /* Create tag for TX ring */
2127
2128 error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag,
2129 PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
2130 NULL, BGE_TX_RING_SZ, 1, BGE_TX_RING_SZ, 0, NULL, NULL,
2131 &sc->bge_cdata.bge_tx_ring_tag);
2132
2133 if (error) {
2134 device_printf(dev, "could not allocate dma tag\n");
2135 return (ENOMEM);
2136 }
2137
2138 /* Allocate DMA'able memory for TX ring */
2139
2140 error = bus_dmamem_alloc(sc->bge_cdata.bge_tx_ring_tag,
2141 (void **)&sc->bge_ldata.bge_tx_ring, BUS_DMA_NOWAIT,
2142 &sc->bge_cdata.bge_tx_ring_map);
2143 if (error)
2144 return (ENOMEM);
2145
2146 bzero((char *)sc->bge_ldata.bge_tx_ring, BGE_TX_RING_SZ);
2147
2148 /* Load the address of the TX ring */
2149
2150 ctx.bge_maxsegs = 1;
2151 ctx.sc = sc;
2152
2153 error = bus_dmamap_load(sc->bge_cdata.bge_tx_ring_tag,
2154 sc->bge_cdata.bge_tx_ring_map, sc->bge_ldata.bge_tx_ring,
2155 BGE_TX_RING_SZ, bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT);
2156
2157 if (error)
2158 return (ENOMEM);
2159
2160 sc->bge_ldata.bge_tx_ring_paddr = ctx.bge_busaddr;
2161
2162 /* Create tag for status block */
2163
2164 error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag,
2165 PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
2166 NULL, BGE_STATUS_BLK_SZ, 1, BGE_STATUS_BLK_SZ, 0,
2167 NULL, NULL, &sc->bge_cdata.bge_status_tag);
2168
2169 if (error) {
2170 device_printf(dev, "could not allocate dma tag\n");
2171 return (ENOMEM);
2172 }
2173
2174 /* Allocate DMA'able memory for status block */
2175
2176 error = bus_dmamem_alloc(sc->bge_cdata.bge_status_tag,
2177 (void **)&sc->bge_ldata.bge_status_block, BUS_DMA_NOWAIT,
2178 &sc->bge_cdata.bge_status_map);
2179 if (error)
2180 return (ENOMEM);
2181
2182 bzero((char *)sc->bge_ldata.bge_status_block, BGE_STATUS_BLK_SZ);
2183
2184 /* Load the address of the status block */
2185
2186 ctx.sc = sc;
2187 ctx.bge_maxsegs = 1;
2188
2189 error = bus_dmamap_load(sc->bge_cdata.bge_status_tag,
2190 sc->bge_cdata.bge_status_map, sc->bge_ldata.bge_status_block,
2191 BGE_STATUS_BLK_SZ, bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT);
2192
2193 if (error)
2194 return (ENOMEM);
2195
2196 sc->bge_ldata.bge_status_block_paddr = ctx.bge_busaddr;
2197
2198 /* Create tag for statistics block */
2199
2200 error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag,
2201 PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
2202 NULL, BGE_STATS_SZ, 1, BGE_STATS_SZ, 0, NULL, NULL,
2203 &sc->bge_cdata.bge_stats_tag);
2204
2205 if (error) {
2206 device_printf(dev, "could not allocate dma tag\n");
2207 return (ENOMEM);
2208 }
2209
2210 /* Allocate DMA'able memory for statistics block */
2211
2212 error = bus_dmamem_alloc(sc->bge_cdata.bge_stats_tag,
2213 (void **)&sc->bge_ldata.bge_stats, BUS_DMA_NOWAIT,
2214 &sc->bge_cdata.bge_stats_map);
2215 if (error)
2216 return (ENOMEM);
2217
2218 bzero((char *)sc->bge_ldata.bge_stats, BGE_STATS_SZ);
2219
2220 /* Load the address of the statstics block */
2221
2222 ctx.sc = sc;
2223 ctx.bge_maxsegs = 1;
2224
2225 error = bus_dmamap_load(sc->bge_cdata.bge_stats_tag,
2226 sc->bge_cdata.bge_stats_map, sc->bge_ldata.bge_stats,
2227 BGE_STATS_SZ, bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT);
2228
2229 if (error)
2230 return (ENOMEM);
2231
2232 sc->bge_ldata.bge_stats_paddr = ctx.bge_busaddr;
2233
2234 return(0);
2235}
2236
2237static int
2238bge_attach(dev)
2239 device_t dev;
2240{
2241 struct ifnet *ifp;
2242 struct bge_softc *sc;
2243 u_int32_t hwcfg = 0;
2244 u_int32_t mac_addr = 0;
2245 int unit, error = 0, rid;
2246
2247 sc = device_get_softc(dev);
2248 unit = device_get_unit(dev);
2249 sc->bge_dev = dev;
2250 sc->bge_unit = unit;
2251
2252 /*
2253 * Map control/status registers.
2254 */
2255 pci_enable_busmaster(dev);
2256
2257 rid = BGE_PCI_BAR0;
2258 sc->bge_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
2259 RF_ACTIVE|PCI_RF_DENSE);
2260
2261 if (sc->bge_res == NULL) {
2262 printf ("bge%d: couldn't map memory\n", unit);
2263 error = ENXIO;
2264 goto fail;
2265 }
2266
2267 sc->bge_btag = rman_get_bustag(sc->bge_res);
2268 sc->bge_bhandle = rman_get_bushandle(sc->bge_res);
2269 sc->bge_vhandle = (vm_offset_t)rman_get_virtual(sc->bge_res);
2270
2271 /* Allocate interrupt */
2272 rid = 0;
2273
2274 sc->bge_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
2275 RF_SHAREABLE | RF_ACTIVE);
2276
2277 if (sc->bge_irq == NULL) {
2278 printf("bge%d: couldn't map interrupt\n", unit);
2279 error = ENXIO;
2280 goto fail;
2281 }
2282
2283 sc->bge_unit = unit;
2284
2285 BGE_LOCK_INIT(sc, device_get_nameunit(dev));
2286
2287 /* Save ASIC rev. */
2288
2289 sc->bge_chipid =
2290 pci_read_config(dev, BGE_PCI_MISC_CTL, 4) &
2291 BGE_PCIMISCCTL_ASICREV;
2292 sc->bge_asicrev = BGE_ASICREV(sc->bge_chipid);
2293 sc->bge_chiprev = BGE_CHIPREV(sc->bge_chipid);
2294
2295 /*
2296 * XXX: Broadcom Linux driver. Not in specs or eratta.
2297 * PCI-Express?
2298 */
2299 if (sc->bge_asicrev == BGE_ASICREV_BCM5750) {
2300 u_int32_t v;
2301
2302 v = pci_read_config(dev, BGE_PCI_MSI_CAPID, 4);
2303 if (((v >> 8) & 0xff) == BGE_PCIE_CAPID_REG) {
2304 v = pci_read_config(dev, BGE_PCIE_CAPID_REG, 4);
2305 if ((v & 0xff) == BGE_PCIE_CAPID)
2306 sc->bge_pcie = 1;
2307 }
2308 }
2309
2264 /* Try to reset the chip. */
2265 bge_reset(sc);
2266
2267 if (bge_chipinit(sc)) {
2268 printf("bge%d: chip initialization failed\n", sc->bge_unit);
2269 bge_release_resources(sc);
2270 error = ENXIO;
2271 goto fail;
2272 }
2273
2274 /*
2275 * Get station address from the EEPROM.
2276 */
2277 mac_addr = bge_readmem_ind(sc, 0x0c14);
2278 if ((mac_addr >> 16) == 0x484b) {
2279 sc->arpcom.ac_enaddr[0] = (u_char)(mac_addr >> 8);
2280 sc->arpcom.ac_enaddr[1] = (u_char)mac_addr;
2281 mac_addr = bge_readmem_ind(sc, 0x0c18);
2282 sc->arpcom.ac_enaddr[2] = (u_char)(mac_addr >> 24);
2283 sc->arpcom.ac_enaddr[3] = (u_char)(mac_addr >> 16);
2284 sc->arpcom.ac_enaddr[4] = (u_char)(mac_addr >> 8);
2285 sc->arpcom.ac_enaddr[5] = (u_char)mac_addr;
2286 } else if (bge_read_eeprom(sc, (caddr_t)&sc->arpcom.ac_enaddr,
2287 BGE_EE_MAC_OFFSET + 2, ETHER_ADDR_LEN)) {
2288 printf("bge%d: failed to read station address\n", unit);
2289 bge_release_resources(sc);
2290 error = ENXIO;
2291 goto fail;
2292 }
2293
2310 /* Try to reset the chip. */
2311 bge_reset(sc);
2312
2313 if (bge_chipinit(sc)) {
2314 printf("bge%d: chip initialization failed\n", sc->bge_unit);
2315 bge_release_resources(sc);
2316 error = ENXIO;
2317 goto fail;
2318 }
2319
2320 /*
2321 * Get station address from the EEPROM.
2322 */
2323 mac_addr = bge_readmem_ind(sc, 0x0c14);
2324 if ((mac_addr >> 16) == 0x484b) {
2325 sc->arpcom.ac_enaddr[0] = (u_char)(mac_addr >> 8);
2326 sc->arpcom.ac_enaddr[1] = (u_char)mac_addr;
2327 mac_addr = bge_readmem_ind(sc, 0x0c18);
2328 sc->arpcom.ac_enaddr[2] = (u_char)(mac_addr >> 24);
2329 sc->arpcom.ac_enaddr[3] = (u_char)(mac_addr >> 16);
2330 sc->arpcom.ac_enaddr[4] = (u_char)(mac_addr >> 8);
2331 sc->arpcom.ac_enaddr[5] = (u_char)mac_addr;
2332 } else if (bge_read_eeprom(sc, (caddr_t)&sc->arpcom.ac_enaddr,
2333 BGE_EE_MAC_OFFSET + 2, ETHER_ADDR_LEN)) {
2334 printf("bge%d: failed to read station address\n", unit);
2335 bge_release_resources(sc);
2336 error = ENXIO;
2337 goto fail;
2338 }
2339
2294 /* Save ASIC rev. */
2295
2296 sc->bge_chipid =
2297 pci_read_config(dev, BGE_PCI_MISC_CTL, 4) &
2298 BGE_PCIMISCCTL_ASICREV;
2299 sc->bge_asicrev = BGE_ASICREV(sc->bge_chipid);
2300 sc->bge_chiprev = BGE_CHIPREV(sc->bge_chipid);
2301
2302 /* 5705 limits RX return ring to 512 entries. */
2340 /* 5705 limits RX return ring to 512 entries. */
2303 if (sc->bge_asicrev == BGE_ASICREV_BCM5705)
2341 if (sc->bge_asicrev == BGE_ASICREV_BCM5705 ||
2342 sc->bge_asicrev == BGE_ASICREV_BCM5750)
2304 sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT_5705;
2305 else
2306 sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT;
2307
2308 if (bge_dma_alloc(dev)) {
2309 printf ("bge%d: failed to allocate DMA resources\n",
2310 sc->bge_unit);
2311 bge_release_resources(sc);
2312 error = ENXIO;
2313 goto fail;
2314 }
2315
2316 /*
2317 * Try to allocate memory for jumbo buffers.
2318 * The 5705 does not appear to support jumbo frames.
2319 */
2343 sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT_5705;
2344 else
2345 sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT;
2346
2347 if (bge_dma_alloc(dev)) {
2348 printf ("bge%d: failed to allocate DMA resources\n",
2349 sc->bge_unit);
2350 bge_release_resources(sc);
2351 error = ENXIO;
2352 goto fail;
2353 }
2354
2355 /*
2356 * Try to allocate memory for jumbo buffers.
2357 * The 5705 does not appear to support jumbo frames.
2358 */
2320 if (sc->bge_asicrev != BGE_ASICREV_BCM5705) {
2359 if (sc->bge_asicrev != BGE_ASICREV_BCM5705 ||
2360 sc->bge_asicrev != BGE_ASICREV_BCM5750) {
2321 if (bge_alloc_jumbo_mem(sc)) {
2322 printf("bge%d: jumbo buffer allocation "
2323 "failed\n", sc->bge_unit);
2324 bge_release_resources(sc);
2325 error = ENXIO;
2326 goto fail;
2327 }
2328 }
2329
2330 /* Set default tuneable values. */
2331 sc->bge_stat_ticks = BGE_TICKS_PER_SEC;
2332 sc->bge_rx_coal_ticks = 150;
2333 sc->bge_tx_coal_ticks = 150;
2334 sc->bge_rx_max_coal_bds = 64;
2335 sc->bge_tx_max_coal_bds = 128;
2336
2337 /* Set up ifnet structure */
2338 ifp = &sc->arpcom.ac_if;
2339 ifp->if_softc = sc;
2340 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
2341 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
2342 ifp->if_ioctl = bge_ioctl;
2343 ifp->if_start = bge_start;
2344 ifp->if_watchdog = bge_watchdog;
2345 ifp->if_init = bge_init;
2346 ifp->if_mtu = ETHERMTU;
2347 ifp->if_snd.ifq_maxlen = BGE_TX_RING_CNT - 1;
2348 ifp->if_hwassist = BGE_CSUM_FEATURES;
2349 /* NB: the code for RX csum offload is disabled for now */
2350 ifp->if_capabilities = IFCAP_TXCSUM | IFCAP_VLAN_HWTAGGING |
2351 IFCAP_VLAN_MTU;
2352 ifp->if_capenable = ifp->if_capabilities;
2353
2354 /*
2355 * Figure out what sort of media we have by checking the
2356 * hardware config word in the first 32k of NIC internal memory,
2357 * or fall back to examining the EEPROM if necessary.
2358 * Note: on some BCM5700 cards, this value appears to be unset.
2359 * If that's the case, we have to rely on identifying the NIC
2360 * by its PCI subsystem ID, as we do below for the SysKonnect
2361 * SK-9D41.
2362 */
2363 if (bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_SIG) == BGE_MAGIC_NUMBER)
2364 hwcfg = bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_NICCFG);
2365 else {
2366 bge_read_eeprom(sc, (caddr_t)&hwcfg,
2367 BGE_EE_HWCFG_OFFSET, sizeof(hwcfg));
2368 hwcfg = ntohl(hwcfg);
2369 }
2370
2371 if ((hwcfg & BGE_HWCFG_MEDIA) == BGE_MEDIA_FIBER)
2372 sc->bge_tbi = 1;
2373
2374 /* The SysKonnect SK-9D41 is a 1000baseSX card. */
2375 if ((pci_read_config(dev, BGE_PCI_SUBSYS, 4) >> 16) == SK_SUBSYSID_9D41)
2376 sc->bge_tbi = 1;
2377
2378 if (sc->bge_tbi) {
2379 ifmedia_init(&sc->bge_ifmedia, IFM_IMASK,
2380 bge_ifmedia_upd, bge_ifmedia_sts);
2381 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER|IFM_1000_SX, 0, NULL);
2382 ifmedia_add(&sc->bge_ifmedia,
2383 IFM_ETHER|IFM_1000_SX|IFM_FDX, 0, NULL);
2384 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER|IFM_AUTO, 0, NULL);
2385 ifmedia_set(&sc->bge_ifmedia, IFM_ETHER|IFM_AUTO);
2386 sc->bge_ifmedia.ifm_media = sc->bge_ifmedia.ifm_cur->ifm_media;
2387 } else {
2388 /*
2389 * Do transceiver setup.
2390 */
2391 if (mii_phy_probe(dev, &sc->bge_miibus,
2392 bge_ifmedia_upd, bge_ifmedia_sts)) {
2393 printf("bge%d: MII without any PHY!\n", sc->bge_unit);
2394 bge_release_resources(sc);
2395 bge_free_jumbo_mem(sc);
2396 error = ENXIO;
2397 goto fail;
2398 }
2399 }
2400
2401 /*
2402 * When using the BCM5701 in PCI-X mode, data corruption has
2403 * been observed in the first few bytes of some received packets.
2404 * Aligning the packet buffer in memory eliminates the corruption.
2405 * Unfortunately, this misaligns the packet payloads. On platforms
2406 * which do not support unaligned accesses, we will realign the
2407 * payloads by copying the received packets.
2408 */
2409 switch (sc->bge_chipid) {
2410 case BGE_CHIPID_BCM5701_A0:
2411 case BGE_CHIPID_BCM5701_B0:
2412 case BGE_CHIPID_BCM5701_B2:
2413 case BGE_CHIPID_BCM5701_B5:
2414 /* If in PCI-X mode, work around the alignment bug. */
2415 if ((pci_read_config(dev, BGE_PCI_PCISTATE, 4) &
2416 (BGE_PCISTATE_PCI_BUSMODE | BGE_PCISTATE_PCI_BUSSPEED)) ==
2417 BGE_PCISTATE_PCI_BUSSPEED)
2418 sc->bge_rx_alignment_bug = 1;
2419 break;
2420 }
2421
2422 /*
2423 * Call MI attach routine.
2424 */
2425 ether_ifattach(ifp, sc->arpcom.ac_enaddr);
2426 callout_init(&sc->bge_stat_ch, CALLOUT_MPSAFE);
2427
2428 /*
2429 * Hookup IRQ last.
2430 */
2431 error = bus_setup_intr(dev, sc->bge_irq, INTR_TYPE_NET | INTR_MPSAFE,
2432 bge_intr, sc, &sc->bge_intrhand);
2433
2434 if (error) {
2435 bge_release_resources(sc);
2436 printf("bge%d: couldn't set up irq\n", unit);
2437 }
2438
2439fail:
2440 return(error);
2441}
2442
2443static int
2444bge_detach(dev)
2445 device_t dev;
2446{
2447 struct bge_softc *sc;
2448 struct ifnet *ifp;
2449
2450 sc = device_get_softc(dev);
2451 ifp = &sc->arpcom.ac_if;
2452
2453 BGE_LOCK(sc);
2454 bge_stop(sc);
2455 bge_reset(sc);
2456 BGE_UNLOCK(sc);
2457
2458 ether_ifdetach(ifp);
2459
2460 if (sc->bge_tbi) {
2461 ifmedia_removeall(&sc->bge_ifmedia);
2462 } else {
2463 bus_generic_detach(dev);
2464 device_delete_child(dev, sc->bge_miibus);
2465 }
2466
2467 bge_release_resources(sc);
2361 if (bge_alloc_jumbo_mem(sc)) {
2362 printf("bge%d: jumbo buffer allocation "
2363 "failed\n", sc->bge_unit);
2364 bge_release_resources(sc);
2365 error = ENXIO;
2366 goto fail;
2367 }
2368 }
2369
2370 /* Set default tuneable values. */
2371 sc->bge_stat_ticks = BGE_TICKS_PER_SEC;
2372 sc->bge_rx_coal_ticks = 150;
2373 sc->bge_tx_coal_ticks = 150;
2374 sc->bge_rx_max_coal_bds = 64;
2375 sc->bge_tx_max_coal_bds = 128;
2376
2377 /* Set up ifnet structure */
2378 ifp = &sc->arpcom.ac_if;
2379 ifp->if_softc = sc;
2380 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
2381 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
2382 ifp->if_ioctl = bge_ioctl;
2383 ifp->if_start = bge_start;
2384 ifp->if_watchdog = bge_watchdog;
2385 ifp->if_init = bge_init;
2386 ifp->if_mtu = ETHERMTU;
2387 ifp->if_snd.ifq_maxlen = BGE_TX_RING_CNT - 1;
2388 ifp->if_hwassist = BGE_CSUM_FEATURES;
2389 /* NB: the code for RX csum offload is disabled for now */
2390 ifp->if_capabilities = IFCAP_TXCSUM | IFCAP_VLAN_HWTAGGING |
2391 IFCAP_VLAN_MTU;
2392 ifp->if_capenable = ifp->if_capabilities;
2393
2394 /*
2395 * Figure out what sort of media we have by checking the
2396 * hardware config word in the first 32k of NIC internal memory,
2397 * or fall back to examining the EEPROM if necessary.
2398 * Note: on some BCM5700 cards, this value appears to be unset.
2399 * If that's the case, we have to rely on identifying the NIC
2400 * by its PCI subsystem ID, as we do below for the SysKonnect
2401 * SK-9D41.
2402 */
2403 if (bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_SIG) == BGE_MAGIC_NUMBER)
2404 hwcfg = bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_NICCFG);
2405 else {
2406 bge_read_eeprom(sc, (caddr_t)&hwcfg,
2407 BGE_EE_HWCFG_OFFSET, sizeof(hwcfg));
2408 hwcfg = ntohl(hwcfg);
2409 }
2410
2411 if ((hwcfg & BGE_HWCFG_MEDIA) == BGE_MEDIA_FIBER)
2412 sc->bge_tbi = 1;
2413
2414 /* The SysKonnect SK-9D41 is a 1000baseSX card. */
2415 if ((pci_read_config(dev, BGE_PCI_SUBSYS, 4) >> 16) == SK_SUBSYSID_9D41)
2416 sc->bge_tbi = 1;
2417
2418 if (sc->bge_tbi) {
2419 ifmedia_init(&sc->bge_ifmedia, IFM_IMASK,
2420 bge_ifmedia_upd, bge_ifmedia_sts);
2421 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER|IFM_1000_SX, 0, NULL);
2422 ifmedia_add(&sc->bge_ifmedia,
2423 IFM_ETHER|IFM_1000_SX|IFM_FDX, 0, NULL);
2424 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER|IFM_AUTO, 0, NULL);
2425 ifmedia_set(&sc->bge_ifmedia, IFM_ETHER|IFM_AUTO);
2426 sc->bge_ifmedia.ifm_media = sc->bge_ifmedia.ifm_cur->ifm_media;
2427 } else {
2428 /*
2429 * Do transceiver setup.
2430 */
2431 if (mii_phy_probe(dev, &sc->bge_miibus,
2432 bge_ifmedia_upd, bge_ifmedia_sts)) {
2433 printf("bge%d: MII without any PHY!\n", sc->bge_unit);
2434 bge_release_resources(sc);
2435 bge_free_jumbo_mem(sc);
2436 error = ENXIO;
2437 goto fail;
2438 }
2439 }
2440
2441 /*
2442 * When using the BCM5701 in PCI-X mode, data corruption has
2443 * been observed in the first few bytes of some received packets.
2444 * Aligning the packet buffer in memory eliminates the corruption.
2445 * Unfortunately, this misaligns the packet payloads. On platforms
2446 * which do not support unaligned accesses, we will realign the
2447 * payloads by copying the received packets.
2448 */
2449 switch (sc->bge_chipid) {
2450 case BGE_CHIPID_BCM5701_A0:
2451 case BGE_CHIPID_BCM5701_B0:
2452 case BGE_CHIPID_BCM5701_B2:
2453 case BGE_CHIPID_BCM5701_B5:
2454 /* If in PCI-X mode, work around the alignment bug. */
2455 if ((pci_read_config(dev, BGE_PCI_PCISTATE, 4) &
2456 (BGE_PCISTATE_PCI_BUSMODE | BGE_PCISTATE_PCI_BUSSPEED)) ==
2457 BGE_PCISTATE_PCI_BUSSPEED)
2458 sc->bge_rx_alignment_bug = 1;
2459 break;
2460 }
2461
2462 /*
2463 * Call MI attach routine.
2464 */
2465 ether_ifattach(ifp, sc->arpcom.ac_enaddr);
2466 callout_init(&sc->bge_stat_ch, CALLOUT_MPSAFE);
2467
2468 /*
2469 * Hookup IRQ last.
2470 */
2471 error = bus_setup_intr(dev, sc->bge_irq, INTR_TYPE_NET | INTR_MPSAFE,
2472 bge_intr, sc, &sc->bge_intrhand);
2473
2474 if (error) {
2475 bge_release_resources(sc);
2476 printf("bge%d: couldn't set up irq\n", unit);
2477 }
2478
2479fail:
2480 return(error);
2481}
2482
2483static int
2484bge_detach(dev)
2485 device_t dev;
2486{
2487 struct bge_softc *sc;
2488 struct ifnet *ifp;
2489
2490 sc = device_get_softc(dev);
2491 ifp = &sc->arpcom.ac_if;
2492
2493 BGE_LOCK(sc);
2494 bge_stop(sc);
2495 bge_reset(sc);
2496 BGE_UNLOCK(sc);
2497
2498 ether_ifdetach(ifp);
2499
2500 if (sc->bge_tbi) {
2501 ifmedia_removeall(&sc->bge_ifmedia);
2502 } else {
2503 bus_generic_detach(dev);
2504 device_delete_child(dev, sc->bge_miibus);
2505 }
2506
2507 bge_release_resources(sc);
2468 if (sc->bge_asicrev != BGE_ASICREV_BCM5705)
2508 if (sc->bge_asicrev != BGE_ASICREV_BCM5705 ||
2509 sc->bge_asicrev != BGE_ASICREV_BCM5750)
2469 bge_free_jumbo_mem(sc);
2470
2471 return(0);
2472}
2473
2474static void
2475bge_release_resources(sc)
2476 struct bge_softc *sc;
2477{
2478 device_t dev;
2479
2480 dev = sc->bge_dev;
2481
2482 if (sc->bge_vpd_prodname != NULL)
2483 free(sc->bge_vpd_prodname, M_DEVBUF);
2484
2485 if (sc->bge_vpd_readonly != NULL)
2486 free(sc->bge_vpd_readonly, M_DEVBUF);
2487
2488 if (sc->bge_intrhand != NULL)
2489 bus_teardown_intr(dev, sc->bge_irq, sc->bge_intrhand);
2490
2491 if (sc->bge_irq != NULL)
2492 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->bge_irq);
2493
2494 if (sc->bge_res != NULL)
2495 bus_release_resource(dev, SYS_RES_MEMORY,
2496 BGE_PCI_BAR0, sc->bge_res);
2497
2498 bge_dma_free(sc);
2499
2500 if (mtx_initialized(&sc->bge_mtx)) /* XXX */
2501 BGE_LOCK_DESTROY(sc);
2502
2503 return;
2504}
2505
2506static void
2507bge_reset(sc)
2508 struct bge_softc *sc;
2509{
2510 device_t dev;
2510 bge_free_jumbo_mem(sc);
2511
2512 return(0);
2513}
2514
2515static void
2516bge_release_resources(sc)
2517 struct bge_softc *sc;
2518{
2519 device_t dev;
2520
2521 dev = sc->bge_dev;
2522
2523 if (sc->bge_vpd_prodname != NULL)
2524 free(sc->bge_vpd_prodname, M_DEVBUF);
2525
2526 if (sc->bge_vpd_readonly != NULL)
2527 free(sc->bge_vpd_readonly, M_DEVBUF);
2528
2529 if (sc->bge_intrhand != NULL)
2530 bus_teardown_intr(dev, sc->bge_irq, sc->bge_intrhand);
2531
2532 if (sc->bge_irq != NULL)
2533 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->bge_irq);
2534
2535 if (sc->bge_res != NULL)
2536 bus_release_resource(dev, SYS_RES_MEMORY,
2537 BGE_PCI_BAR0, sc->bge_res);
2538
2539 bge_dma_free(sc);
2540
2541 if (mtx_initialized(&sc->bge_mtx)) /* XXX */
2542 BGE_LOCK_DESTROY(sc);
2543
2544 return;
2545}
2546
2547static void
2548bge_reset(sc)
2549 struct bge_softc *sc;
2550{
2551 device_t dev;
2511 u_int32_t cachesize, command, pcistate;
2552 u_int32_t cachesize, command, pcistate, reset;
2512 int i, val = 0;
2513
2514 dev = sc->bge_dev;
2515
2516 /* Save some important PCI state. */
2517 cachesize = pci_read_config(dev, BGE_PCI_CACHESZ, 4);
2518 command = pci_read_config(dev, BGE_PCI_CMD, 4);
2519 pcistate = pci_read_config(dev, BGE_PCI_PCISTATE, 4);
2520
2521 pci_write_config(dev, BGE_PCI_MISC_CTL,
2522 BGE_PCIMISCCTL_INDIRECT_ACCESS|BGE_PCIMISCCTL_MASK_PCI_INTR|
2523 BGE_PCIMISCCTL_ENDIAN_WORDSWAP|BGE_PCIMISCCTL_PCISTATE_RW, 4);
2524
2553 int i, val = 0;
2554
2555 dev = sc->bge_dev;
2556
2557 /* Save some important PCI state. */
2558 cachesize = pci_read_config(dev, BGE_PCI_CACHESZ, 4);
2559 command = pci_read_config(dev, BGE_PCI_CMD, 4);
2560 pcistate = pci_read_config(dev, BGE_PCI_PCISTATE, 4);
2561
2562 pci_write_config(dev, BGE_PCI_MISC_CTL,
2563 BGE_PCIMISCCTL_INDIRECT_ACCESS|BGE_PCIMISCCTL_MASK_PCI_INTR|
2564 BGE_PCIMISCCTL_ENDIAN_WORDSWAP|BGE_PCIMISCCTL_PCISTATE_RW, 4);
2565
2566 reset = BGE_MISCCFG_RESET_CORE_CLOCKS|(65<<1);
2567
2568 /* XXX: Broadcom Linux driver. */
2569 if (sc->bge_pcie) {
2570 if (CSR_READ_4(sc, 0x7e2c) == 0x60) /* PCIE 1.0 */
2571 CSR_WRITE_4(sc, 0x7e2c, 0x20);
2572 if (sc->bge_chipid != BGE_CHIPID_BCM5750_A0) {
2573 /* Prevent PCIE link training during global reset */
2574 CSR_WRITE_4(sc, BGE_MISC_CFG, (1<<29));
2575 reset |= (1<<29);
2576 }
2577 }
2578
2525 /* Issue global reset */
2579 /* Issue global reset */
2526 bge_writereg_ind(sc, BGE_MISC_CFG,
2527 BGE_MISCCFG_RESET_CORE_CLOCKS|(65<<1));
2580 bge_writereg_ind(sc, BGE_MISC_CFG, reset);
2528
2529 DELAY(1000);
2530
2581
2582 DELAY(1000);
2583
2584 /* XXX: Broadcom Linux driver. */
2585 if (sc->bge_pcie) {
2586 if (sc->bge_chipid == BGE_CHIPID_BCM5750_A0) {
2587 uint32_t v;
2588
2589 DELAY(500000); /* wait for link training to complete */
2590 v = pci_read_config(dev, 0xc4, 4);
2591 pci_write_config(dev, 0xc4, v | (1<<15), 4);
2592 }
2593 /* Set PCIE max payload size and clear error status. */
2594 pci_write_config(dev, 0xd8, 0xf5000, 4);
2595 }
2596
2531 /* Reset some of the PCI state that got zapped by reset */
2532 pci_write_config(dev, BGE_PCI_MISC_CTL,
2533 BGE_PCIMISCCTL_INDIRECT_ACCESS|BGE_PCIMISCCTL_MASK_PCI_INTR|
2534 BGE_PCIMISCCTL_ENDIAN_WORDSWAP|BGE_PCIMISCCTL_PCISTATE_RW, 4);
2535 pci_write_config(dev, BGE_PCI_CACHESZ, cachesize, 4);
2536 pci_write_config(dev, BGE_PCI_CMD, command, 4);
2537 bge_writereg_ind(sc, BGE_MISC_CFG, (65 << 1));
2538
2539 /* Enable memory arbiter. */
2597 /* Reset some of the PCI state that got zapped by reset */
2598 pci_write_config(dev, BGE_PCI_MISC_CTL,
2599 BGE_PCIMISCCTL_INDIRECT_ACCESS|BGE_PCIMISCCTL_MASK_PCI_INTR|
2600 BGE_PCIMISCCTL_ENDIAN_WORDSWAP|BGE_PCIMISCCTL_PCISTATE_RW, 4);
2601 pci_write_config(dev, BGE_PCI_CACHESZ, cachesize, 4);
2602 pci_write_config(dev, BGE_PCI_CMD, command, 4);
2603 bge_writereg_ind(sc, BGE_MISC_CFG, (65 << 1));
2604
2605 /* Enable memory arbiter. */
2540 if (sc->bge_asicrev != BGE_ASICREV_BCM5705)
2606 if (sc->bge_asicrev != BGE_ASICREV_BCM5705 ||
2607 sc->bge_asicrev != BGE_ASICREV_BCM5750)
2541 CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE);
2542
2543 /*
2544 * Prevent PXE restart: write a magic number to the
2545 * general communications memory at 0xB50.
2546 */
2547 bge_writemem_ind(sc, BGE_SOFTWARE_GENCOMM, BGE_MAGIC_NUMBER);
2548 /*
2549 * Poll the value location we just wrote until
2550 * we see the 1's complement of the magic number.
2551 * This indicates that the firmware initialization
2552 * is complete.
2553 */
2554 for (i = 0; i < BGE_TIMEOUT; i++) {
2555 val = bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM);
2556 if (val == ~BGE_MAGIC_NUMBER)
2557 break;
2558 DELAY(10);
2559 }
2560
2561 if (i == BGE_TIMEOUT) {
2562 printf("bge%d: firmware handshake timed out\n", sc->bge_unit);
2563 return;
2564 }
2565
2566 /*
2567 * XXX Wait for the value of the PCISTATE register to
2568 * return to its original pre-reset state. This is a
2569 * fairly good indicator of reset completion. If we don't
2570 * wait for the reset to fully complete, trying to read
2571 * from the device's non-PCI registers may yield garbage
2572 * results.
2573 */
2574 for (i = 0; i < BGE_TIMEOUT; i++) {
2575 if (pci_read_config(dev, BGE_PCI_PCISTATE, 4) == pcistate)
2576 break;
2577 DELAY(10);
2578 }
2579
2580 /* Fix up byte swapping */
2581 CSR_WRITE_4(sc, BGE_MODE_CTL, BGE_MODECTL_BYTESWAP_NONFRAME|
2582 BGE_MODECTL_BYTESWAP_DATA);
2583
2584 CSR_WRITE_4(sc, BGE_MAC_MODE, 0);
2585
2586 /*
2587 * The 5704 in TBI mode apparently needs some special
2588 * adjustment to insure the SERDES drive level is set
2589 * to 1.2V.
2590 */
2591 if (sc->bge_asicrev == BGE_ASICREV_BCM5704 && sc->bge_tbi) {
2592 uint32_t serdescfg;
2593 serdescfg = CSR_READ_4(sc, BGE_SERDES_CFG);
2594 serdescfg = (serdescfg & ~0xFFF) | 0x880;
2595 CSR_WRITE_4(sc, BGE_SERDES_CFG, serdescfg);
2596 }
2597
2608 CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE);
2609
2610 /*
2611 * Prevent PXE restart: write a magic number to the
2612 * general communications memory at 0xB50.
2613 */
2614 bge_writemem_ind(sc, BGE_SOFTWARE_GENCOMM, BGE_MAGIC_NUMBER);
2615 /*
2616 * Poll the value location we just wrote until
2617 * we see the 1's complement of the magic number.
2618 * This indicates that the firmware initialization
2619 * is complete.
2620 */
2621 for (i = 0; i < BGE_TIMEOUT; i++) {
2622 val = bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM);
2623 if (val == ~BGE_MAGIC_NUMBER)
2624 break;
2625 DELAY(10);
2626 }
2627
2628 if (i == BGE_TIMEOUT) {
2629 printf("bge%d: firmware handshake timed out\n", sc->bge_unit);
2630 return;
2631 }
2632
2633 /*
2634 * XXX Wait for the value of the PCISTATE register to
2635 * return to its original pre-reset state. This is a
2636 * fairly good indicator of reset completion. If we don't
2637 * wait for the reset to fully complete, trying to read
2638 * from the device's non-PCI registers may yield garbage
2639 * results.
2640 */
2641 for (i = 0; i < BGE_TIMEOUT; i++) {
2642 if (pci_read_config(dev, BGE_PCI_PCISTATE, 4) == pcistate)
2643 break;
2644 DELAY(10);
2645 }
2646
2647 /* Fix up byte swapping */
2648 CSR_WRITE_4(sc, BGE_MODE_CTL, BGE_MODECTL_BYTESWAP_NONFRAME|
2649 BGE_MODECTL_BYTESWAP_DATA);
2650
2651 CSR_WRITE_4(sc, BGE_MAC_MODE, 0);
2652
2653 /*
2654 * The 5704 in TBI mode apparently needs some special
2655 * adjustment to insure the SERDES drive level is set
2656 * to 1.2V.
2657 */
2658 if (sc->bge_asicrev == BGE_ASICREV_BCM5704 && sc->bge_tbi) {
2659 uint32_t serdescfg;
2660 serdescfg = CSR_READ_4(sc, BGE_SERDES_CFG);
2661 serdescfg = (serdescfg & ~0xFFF) | 0x880;
2662 CSR_WRITE_4(sc, BGE_SERDES_CFG, serdescfg);
2663 }
2664
2665 /* XXX: Broadcom Linux driver. */
2666 if (sc->bge_pcie && sc->bge_chipid != BGE_CHIPID_BCM5750_A0) {
2667 uint32_t v;
2668
2669 v = CSR_READ_4(sc, 0x7c00);
2670 CSR_WRITE_4(sc, 0x7c00, v | (1<<25));
2671 }
2598 DELAY(10000);
2599
2600 return;
2601}
2602
2603/*
2604 * Frame reception handling. This is called if there's a frame
2605 * on the receive return list.
2606 *
2607 * Note: we have to be able to handle two possibilities here:
2608 * 1) the frame is from the jumbo recieve ring
2609 * 2) the frame is from the standard receive ring
2610 */
2611
2612static void
2613bge_rxeof(sc)
2614 struct bge_softc *sc;
2615{
2616 struct ifnet *ifp;
2617 int stdcnt = 0, jumbocnt = 0;
2618
2619 BGE_LOCK_ASSERT(sc);
2620
2621 ifp = &sc->arpcom.ac_if;
2622
2623 bus_dmamap_sync(sc->bge_cdata.bge_rx_return_ring_tag,
2624 sc->bge_cdata.bge_rx_return_ring_map, BUS_DMASYNC_POSTWRITE);
2625 bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag,
2626 sc->bge_cdata.bge_rx_std_ring_map, BUS_DMASYNC_POSTREAD);
2672 DELAY(10000);
2673
2674 return;
2675}
2676
2677/*
2678 * Frame reception handling. This is called if there's a frame
2679 * on the receive return list.
2680 *
2681 * Note: we have to be able to handle two possibilities here:
2682 * 1) the frame is from the jumbo recieve ring
2683 * 2) the frame is from the standard receive ring
2684 */
2685
2686static void
2687bge_rxeof(sc)
2688 struct bge_softc *sc;
2689{
2690 struct ifnet *ifp;
2691 int stdcnt = 0, jumbocnt = 0;
2692
2693 BGE_LOCK_ASSERT(sc);
2694
2695 ifp = &sc->arpcom.ac_if;
2696
2697 bus_dmamap_sync(sc->bge_cdata.bge_rx_return_ring_tag,
2698 sc->bge_cdata.bge_rx_return_ring_map, BUS_DMASYNC_POSTWRITE);
2699 bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag,
2700 sc->bge_cdata.bge_rx_std_ring_map, BUS_DMASYNC_POSTREAD);
2627 if (sc->bge_asicrev != BGE_ASICREV_BCM5705) {
2701 if (sc->bge_asicrev != BGE_ASICREV_BCM5705 ||
2702 sc->bge_asicrev != BGE_ASICREV_BCM5750) {
2628 bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag,
2629 sc->bge_cdata.bge_rx_jumbo_ring_map,
2630 BUS_DMASYNC_POSTREAD);
2631 }
2632
2633 while(sc->bge_rx_saved_considx !=
2634 sc->bge_ldata.bge_status_block->bge_idx[0].bge_rx_prod_idx) {
2635 struct bge_rx_bd *cur_rx;
2636 u_int32_t rxidx;
2637 struct ether_header *eh;
2638 struct mbuf *m = NULL;
2639 u_int16_t vlan_tag = 0;
2640 int have_tag = 0;
2641
2642 cur_rx =
2643 &sc->bge_ldata.bge_rx_return_ring[sc->bge_rx_saved_considx];
2644
2645 rxidx = cur_rx->bge_idx;
2646 BGE_INC(sc->bge_rx_saved_considx, sc->bge_return_ring_cnt);
2647
2648 if (cur_rx->bge_flags & BGE_RXBDFLAG_VLAN_TAG) {
2649 have_tag = 1;
2650 vlan_tag = cur_rx->bge_vlan_tag;
2651 }
2652
2653 if (cur_rx->bge_flags & BGE_RXBDFLAG_JUMBO_RING) {
2654 BGE_INC(sc->bge_jumbo, BGE_JUMBO_RX_RING_CNT);
2655 bus_dmamap_sync(sc->bge_cdata.bge_mtag_jumbo,
2656 sc->bge_cdata.bge_rx_jumbo_dmamap[rxidx],
2657 BUS_DMASYNC_POSTREAD);
2658 bus_dmamap_unload(sc->bge_cdata.bge_mtag_jumbo,
2659 sc->bge_cdata.bge_rx_jumbo_dmamap[rxidx]);
2660 m = sc->bge_cdata.bge_rx_jumbo_chain[rxidx];
2661 sc->bge_cdata.bge_rx_jumbo_chain[rxidx] = NULL;
2662 jumbocnt++;
2663 if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) {
2664 ifp->if_ierrors++;
2665 bge_newbuf_jumbo(sc, sc->bge_jumbo, m);
2666 continue;
2667 }
2668 if (bge_newbuf_jumbo(sc,
2669 sc->bge_jumbo, NULL) == ENOBUFS) {
2670 ifp->if_ierrors++;
2671 bge_newbuf_jumbo(sc, sc->bge_jumbo, m);
2672 continue;
2673 }
2674 } else {
2675 BGE_INC(sc->bge_std, BGE_STD_RX_RING_CNT);
2676 bus_dmamap_sync(sc->bge_cdata.bge_mtag,
2677 sc->bge_cdata.bge_rx_std_dmamap[rxidx],
2678 BUS_DMASYNC_POSTREAD);
2679 bus_dmamap_unload(sc->bge_cdata.bge_mtag,
2680 sc->bge_cdata.bge_rx_std_dmamap[rxidx]);
2681 m = sc->bge_cdata.bge_rx_std_chain[rxidx];
2682 sc->bge_cdata.bge_rx_std_chain[rxidx] = NULL;
2683 stdcnt++;
2684 if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) {
2685 ifp->if_ierrors++;
2686 bge_newbuf_std(sc, sc->bge_std, m);
2687 continue;
2688 }
2689 if (bge_newbuf_std(sc, sc->bge_std,
2690 NULL) == ENOBUFS) {
2691 ifp->if_ierrors++;
2692 bge_newbuf_std(sc, sc->bge_std, m);
2693 continue;
2694 }
2695 }
2696
2697 ifp->if_ipackets++;
2698#ifndef __i386__
2699 /*
2700 * The i386 allows unaligned accesses, but for other
2701 * platforms we must make sure the payload is aligned.
2702 */
2703 if (sc->bge_rx_alignment_bug) {
2704 bcopy(m->m_data, m->m_data + ETHER_ALIGN,
2705 cur_rx->bge_len);
2706 m->m_data += ETHER_ALIGN;
2707 }
2708#endif
2709 eh = mtod(m, struct ether_header *);
2710 m->m_pkthdr.len = m->m_len = cur_rx->bge_len - ETHER_CRC_LEN;
2711 m->m_pkthdr.rcvif = ifp;
2712
2713#if 0 /* currently broken for some packets, possibly related to TCP options */
2714 if (ifp->if_capenable & IFCAP_RXCSUM) {
2715 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
2716 if ((cur_rx->bge_ip_csum ^ 0xffff) == 0)
2717 m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
2718 if (cur_rx->bge_flags & BGE_RXBDFLAG_TCP_UDP_CSUM) {
2719 m->m_pkthdr.csum_data =
2720 cur_rx->bge_tcp_udp_csum;
2721 m->m_pkthdr.csum_flags |= CSUM_DATA_VALID;
2722 }
2723 }
2724#endif
2725
2726 /*
2727 * If we received a packet with a vlan tag,
2728 * attach that information to the packet.
2729 */
2730 if (have_tag)
2731 VLAN_INPUT_TAG(ifp, m, vlan_tag, continue);
2732
2733 BGE_UNLOCK(sc);
2734 (*ifp->if_input)(ifp, m);
2735 BGE_LOCK(sc);
2736 }
2737
2738 bus_dmamap_sync(sc->bge_cdata.bge_rx_return_ring_tag,
2739 sc->bge_cdata.bge_rx_return_ring_map, BUS_DMASYNC_PREWRITE);
2740 bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag,
2741 sc->bge_cdata.bge_rx_std_ring_map,
2742 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_PREWRITE);
2703 bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag,
2704 sc->bge_cdata.bge_rx_jumbo_ring_map,
2705 BUS_DMASYNC_POSTREAD);
2706 }
2707
2708 while(sc->bge_rx_saved_considx !=
2709 sc->bge_ldata.bge_status_block->bge_idx[0].bge_rx_prod_idx) {
2710 struct bge_rx_bd *cur_rx;
2711 u_int32_t rxidx;
2712 struct ether_header *eh;
2713 struct mbuf *m = NULL;
2714 u_int16_t vlan_tag = 0;
2715 int have_tag = 0;
2716
2717 cur_rx =
2718 &sc->bge_ldata.bge_rx_return_ring[sc->bge_rx_saved_considx];
2719
2720 rxidx = cur_rx->bge_idx;
2721 BGE_INC(sc->bge_rx_saved_considx, sc->bge_return_ring_cnt);
2722
2723 if (cur_rx->bge_flags & BGE_RXBDFLAG_VLAN_TAG) {
2724 have_tag = 1;
2725 vlan_tag = cur_rx->bge_vlan_tag;
2726 }
2727
2728 if (cur_rx->bge_flags & BGE_RXBDFLAG_JUMBO_RING) {
2729 BGE_INC(sc->bge_jumbo, BGE_JUMBO_RX_RING_CNT);
2730 bus_dmamap_sync(sc->bge_cdata.bge_mtag_jumbo,
2731 sc->bge_cdata.bge_rx_jumbo_dmamap[rxidx],
2732 BUS_DMASYNC_POSTREAD);
2733 bus_dmamap_unload(sc->bge_cdata.bge_mtag_jumbo,
2734 sc->bge_cdata.bge_rx_jumbo_dmamap[rxidx]);
2735 m = sc->bge_cdata.bge_rx_jumbo_chain[rxidx];
2736 sc->bge_cdata.bge_rx_jumbo_chain[rxidx] = NULL;
2737 jumbocnt++;
2738 if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) {
2739 ifp->if_ierrors++;
2740 bge_newbuf_jumbo(sc, sc->bge_jumbo, m);
2741 continue;
2742 }
2743 if (bge_newbuf_jumbo(sc,
2744 sc->bge_jumbo, NULL) == ENOBUFS) {
2745 ifp->if_ierrors++;
2746 bge_newbuf_jumbo(sc, sc->bge_jumbo, m);
2747 continue;
2748 }
2749 } else {
2750 BGE_INC(sc->bge_std, BGE_STD_RX_RING_CNT);
2751 bus_dmamap_sync(sc->bge_cdata.bge_mtag,
2752 sc->bge_cdata.bge_rx_std_dmamap[rxidx],
2753 BUS_DMASYNC_POSTREAD);
2754 bus_dmamap_unload(sc->bge_cdata.bge_mtag,
2755 sc->bge_cdata.bge_rx_std_dmamap[rxidx]);
2756 m = sc->bge_cdata.bge_rx_std_chain[rxidx];
2757 sc->bge_cdata.bge_rx_std_chain[rxidx] = NULL;
2758 stdcnt++;
2759 if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) {
2760 ifp->if_ierrors++;
2761 bge_newbuf_std(sc, sc->bge_std, m);
2762 continue;
2763 }
2764 if (bge_newbuf_std(sc, sc->bge_std,
2765 NULL) == ENOBUFS) {
2766 ifp->if_ierrors++;
2767 bge_newbuf_std(sc, sc->bge_std, m);
2768 continue;
2769 }
2770 }
2771
2772 ifp->if_ipackets++;
2773#ifndef __i386__
2774 /*
2775 * The i386 allows unaligned accesses, but for other
2776 * platforms we must make sure the payload is aligned.
2777 */
2778 if (sc->bge_rx_alignment_bug) {
2779 bcopy(m->m_data, m->m_data + ETHER_ALIGN,
2780 cur_rx->bge_len);
2781 m->m_data += ETHER_ALIGN;
2782 }
2783#endif
2784 eh = mtod(m, struct ether_header *);
2785 m->m_pkthdr.len = m->m_len = cur_rx->bge_len - ETHER_CRC_LEN;
2786 m->m_pkthdr.rcvif = ifp;
2787
2788#if 0 /* currently broken for some packets, possibly related to TCP options */
2789 if (ifp->if_capenable & IFCAP_RXCSUM) {
2790 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
2791 if ((cur_rx->bge_ip_csum ^ 0xffff) == 0)
2792 m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
2793 if (cur_rx->bge_flags & BGE_RXBDFLAG_TCP_UDP_CSUM) {
2794 m->m_pkthdr.csum_data =
2795 cur_rx->bge_tcp_udp_csum;
2796 m->m_pkthdr.csum_flags |= CSUM_DATA_VALID;
2797 }
2798 }
2799#endif
2800
2801 /*
2802 * If we received a packet with a vlan tag,
2803 * attach that information to the packet.
2804 */
2805 if (have_tag)
2806 VLAN_INPUT_TAG(ifp, m, vlan_tag, continue);
2807
2808 BGE_UNLOCK(sc);
2809 (*ifp->if_input)(ifp, m);
2810 BGE_LOCK(sc);
2811 }
2812
2813 bus_dmamap_sync(sc->bge_cdata.bge_rx_return_ring_tag,
2814 sc->bge_cdata.bge_rx_return_ring_map, BUS_DMASYNC_PREWRITE);
2815 bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag,
2816 sc->bge_cdata.bge_rx_std_ring_map,
2817 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_PREWRITE);
2743 if (sc->bge_asicrev != BGE_ASICREV_BCM5705) {
2818 if (sc->bge_asicrev != BGE_ASICREV_BCM5705 ||
2819 sc->bge_asicrev != BGE_ASICREV_BCM5750) {
2744 bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag,
2745 sc->bge_cdata.bge_rx_jumbo_ring_map,
2746 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
2747 }
2748
2749 CSR_WRITE_4(sc, BGE_MBX_RX_CONS0_LO, sc->bge_rx_saved_considx);
2750 if (stdcnt)
2751 CSR_WRITE_4(sc, BGE_MBX_RX_STD_PROD_LO, sc->bge_std);
2752 if (jumbocnt)
2753 CSR_WRITE_4(sc, BGE_MBX_RX_JUMBO_PROD_LO, sc->bge_jumbo);
2754
2755 return;
2756}
2757
2758static void
2759bge_txeof(sc)
2760 struct bge_softc *sc;
2761{
2762 struct bge_tx_bd *cur_tx = NULL;
2763 struct ifnet *ifp;
2764
2765 BGE_LOCK_ASSERT(sc);
2766
2767 ifp = &sc->arpcom.ac_if;
2768
2769 /*
2770 * Go through our tx ring and free mbufs for those
2771 * frames that have been sent.
2772 */
2773 while (sc->bge_tx_saved_considx !=
2774 sc->bge_ldata.bge_status_block->bge_idx[0].bge_tx_cons_idx) {
2775 u_int32_t idx = 0;
2776
2777 idx = sc->bge_tx_saved_considx;
2778 cur_tx = &sc->bge_ldata.bge_tx_ring[idx];
2779 if (cur_tx->bge_flags & BGE_TXBDFLAG_END)
2780 ifp->if_opackets++;
2781 if (sc->bge_cdata.bge_tx_chain[idx] != NULL) {
2782 m_freem(sc->bge_cdata.bge_tx_chain[idx]);
2783 sc->bge_cdata.bge_tx_chain[idx] = NULL;
2784 bus_dmamap_unload(sc->bge_cdata.bge_mtag,
2785 sc->bge_cdata.bge_tx_dmamap[idx]);
2786 }
2787 sc->bge_txcnt--;
2788 BGE_INC(sc->bge_tx_saved_considx, BGE_TX_RING_CNT);
2789 ifp->if_timer = 0;
2790 }
2791
2792 if (cur_tx != NULL)
2793 ifp->if_flags &= ~IFF_OACTIVE;
2794
2795 return;
2796}
2797
2798static void
2799bge_intr(xsc)
2800 void *xsc;
2801{
2802 struct bge_softc *sc;
2803 struct ifnet *ifp;
2804 u_int32_t statusword;
2805 u_int32_t status, mimode;
2806
2807 sc = xsc;
2808 ifp = &sc->arpcom.ac_if;
2809
2810 BGE_LOCK(sc);
2811
2812 bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
2813 sc->bge_cdata.bge_status_map, BUS_DMASYNC_POSTWRITE);
2814
2815 statusword =
2816 atomic_readandclear_32(&sc->bge_ldata.bge_status_block->bge_status);
2817
2818#ifdef notdef
2819 /* Avoid this for now -- checking this register is expensive. */
2820 /* Make sure this is really our interrupt. */
2821 if (!(CSR_READ_4(sc, BGE_MISC_LOCAL_CTL) & BGE_MLC_INTR_STATE))
2822 return;
2823#endif
2824 /* Ack interrupt and stop others from occuring. */
2825 CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 1);
2826
2827 /*
2828 * Process link state changes.
2829 * Grrr. The link status word in the status block does
2830 * not work correctly on the BCM5700 rev AX and BX chips,
2831 * according to all available information. Hence, we have
2832 * to enable MII interrupts in order to properly obtain
2833 * async link changes. Unfortunately, this also means that
2834 * we have to read the MAC status register to detect link
2835 * changes, thereby adding an additional register access to
2836 * the interrupt handler.
2837 */
2838
2839 if (sc->bge_asicrev == BGE_ASICREV_BCM5700) {
2840
2841 status = CSR_READ_4(sc, BGE_MAC_STS);
2842 if (status & BGE_MACSTAT_MI_INTERRUPT) {
2843 sc->bge_link = 0;
2844 callout_stop(&sc->bge_stat_ch);
2845 bge_tick_locked(sc);
2846 /* Clear the interrupt */
2847 CSR_WRITE_4(sc, BGE_MAC_EVT_ENB,
2848 BGE_EVTENB_MI_INTERRUPT);
2849 bge_miibus_readreg(sc->bge_dev, 1, BRGPHY_MII_ISR);
2850 bge_miibus_writereg(sc->bge_dev, 1, BRGPHY_MII_IMR,
2851 BRGPHY_INTRS);
2852 }
2853 } else {
2854 if (statusword & BGE_STATFLAG_LINKSTATE_CHANGED) {
2855 /*
2856 * Sometimes PCS encoding errors are detected in
2857 * TBI mode (on fiber NICs), and for some reason
2858 * the chip will signal them as link changes.
2859 * If we get a link change event, but the 'PCS
2860 * encoding error' bit in the MAC status register
2861 * is set, don't bother doing a link check.
2862 * This avoids spurious "gigabit link up" messages
2863 * that sometimes appear on fiber NICs during
2864 * periods of heavy traffic. (There should be no
2865 * effect on copper NICs.)
2866 *
2867 * If we do have a copper NIC (bge_tbi == 0) then
2868 * check that the AUTOPOLL bit is set before
2869 * processing the event as a real link change.
2870 * Turning AUTOPOLL on and off in the MII read/write
2871 * functions will often trigger a link status
2872 * interrupt for no reason.
2873 */
2874 status = CSR_READ_4(sc, BGE_MAC_STS);
2875 mimode = CSR_READ_4(sc, BGE_MI_MODE);
2876 if (!(status & (BGE_MACSTAT_PORT_DECODE_ERROR|
2877 BGE_MACSTAT_MI_COMPLETE)) && (!sc->bge_tbi &&
2878 (mimode & BGE_MIMODE_AUTOPOLL))) {
2879 sc->bge_link = 0;
2880 callout_stop(&sc->bge_stat_ch);
2881 bge_tick_locked(sc);
2882 }
2883 /* Clear the interrupt */
2884 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED|
2885 BGE_MACSTAT_CFG_CHANGED|BGE_MACSTAT_MI_COMPLETE|
2886 BGE_MACSTAT_LINK_CHANGED);
2887
2888 /* Force flush the status block cached by PCI bridge */
2889 CSR_READ_4(sc, BGE_MBX_IRQ0_LO);
2890 }
2891 }
2892
2893 if (ifp->if_flags & IFF_RUNNING) {
2894 /* Check RX return ring producer/consumer */
2895 bge_rxeof(sc);
2896
2897 /* Check TX ring producer/consumer */
2898 bge_txeof(sc);
2899 }
2900
2901 bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
2902 sc->bge_cdata.bge_status_map, BUS_DMASYNC_PREWRITE);
2903
2904 bge_handle_events(sc);
2905
2906 /* Re-enable interrupts. */
2907 CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 0);
2908
2909 if (ifp->if_flags & IFF_RUNNING && ifp->if_snd.ifq_head != NULL)
2910 bge_start_locked(ifp);
2911
2912 BGE_UNLOCK(sc);
2913
2914 return;
2915}
2916
2917static void
2918bge_tick_locked(sc)
2919 struct bge_softc *sc;
2920{
2921 struct mii_data *mii = NULL;
2922 struct ifmedia *ifm = NULL;
2923 struct ifnet *ifp;
2924
2925 ifp = &sc->arpcom.ac_if;
2926
2927 BGE_LOCK_ASSERT(sc);
2928
2820 bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag,
2821 sc->bge_cdata.bge_rx_jumbo_ring_map,
2822 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
2823 }
2824
2825 CSR_WRITE_4(sc, BGE_MBX_RX_CONS0_LO, sc->bge_rx_saved_considx);
2826 if (stdcnt)
2827 CSR_WRITE_4(sc, BGE_MBX_RX_STD_PROD_LO, sc->bge_std);
2828 if (jumbocnt)
2829 CSR_WRITE_4(sc, BGE_MBX_RX_JUMBO_PROD_LO, sc->bge_jumbo);
2830
2831 return;
2832}
2833
2834static void
2835bge_txeof(sc)
2836 struct bge_softc *sc;
2837{
2838 struct bge_tx_bd *cur_tx = NULL;
2839 struct ifnet *ifp;
2840
2841 BGE_LOCK_ASSERT(sc);
2842
2843 ifp = &sc->arpcom.ac_if;
2844
2845 /*
2846 * Go through our tx ring and free mbufs for those
2847 * frames that have been sent.
2848 */
2849 while (sc->bge_tx_saved_considx !=
2850 sc->bge_ldata.bge_status_block->bge_idx[0].bge_tx_cons_idx) {
2851 u_int32_t idx = 0;
2852
2853 idx = sc->bge_tx_saved_considx;
2854 cur_tx = &sc->bge_ldata.bge_tx_ring[idx];
2855 if (cur_tx->bge_flags & BGE_TXBDFLAG_END)
2856 ifp->if_opackets++;
2857 if (sc->bge_cdata.bge_tx_chain[idx] != NULL) {
2858 m_freem(sc->bge_cdata.bge_tx_chain[idx]);
2859 sc->bge_cdata.bge_tx_chain[idx] = NULL;
2860 bus_dmamap_unload(sc->bge_cdata.bge_mtag,
2861 sc->bge_cdata.bge_tx_dmamap[idx]);
2862 }
2863 sc->bge_txcnt--;
2864 BGE_INC(sc->bge_tx_saved_considx, BGE_TX_RING_CNT);
2865 ifp->if_timer = 0;
2866 }
2867
2868 if (cur_tx != NULL)
2869 ifp->if_flags &= ~IFF_OACTIVE;
2870
2871 return;
2872}
2873
2874static void
2875bge_intr(xsc)
2876 void *xsc;
2877{
2878 struct bge_softc *sc;
2879 struct ifnet *ifp;
2880 u_int32_t statusword;
2881 u_int32_t status, mimode;
2882
2883 sc = xsc;
2884 ifp = &sc->arpcom.ac_if;
2885
2886 BGE_LOCK(sc);
2887
2888 bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
2889 sc->bge_cdata.bge_status_map, BUS_DMASYNC_POSTWRITE);
2890
2891 statusword =
2892 atomic_readandclear_32(&sc->bge_ldata.bge_status_block->bge_status);
2893
2894#ifdef notdef
2895 /* Avoid this for now -- checking this register is expensive. */
2896 /* Make sure this is really our interrupt. */
2897 if (!(CSR_READ_4(sc, BGE_MISC_LOCAL_CTL) & BGE_MLC_INTR_STATE))
2898 return;
2899#endif
2900 /* Ack interrupt and stop others from occuring. */
2901 CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 1);
2902
2903 /*
2904 * Process link state changes.
2905 * Grrr. The link status word in the status block does
2906 * not work correctly on the BCM5700 rev AX and BX chips,
2907 * according to all available information. Hence, we have
2908 * to enable MII interrupts in order to properly obtain
2909 * async link changes. Unfortunately, this also means that
2910 * we have to read the MAC status register to detect link
2911 * changes, thereby adding an additional register access to
2912 * the interrupt handler.
2913 */
2914
2915 if (sc->bge_asicrev == BGE_ASICREV_BCM5700) {
2916
2917 status = CSR_READ_4(sc, BGE_MAC_STS);
2918 if (status & BGE_MACSTAT_MI_INTERRUPT) {
2919 sc->bge_link = 0;
2920 callout_stop(&sc->bge_stat_ch);
2921 bge_tick_locked(sc);
2922 /* Clear the interrupt */
2923 CSR_WRITE_4(sc, BGE_MAC_EVT_ENB,
2924 BGE_EVTENB_MI_INTERRUPT);
2925 bge_miibus_readreg(sc->bge_dev, 1, BRGPHY_MII_ISR);
2926 bge_miibus_writereg(sc->bge_dev, 1, BRGPHY_MII_IMR,
2927 BRGPHY_INTRS);
2928 }
2929 } else {
2930 if (statusword & BGE_STATFLAG_LINKSTATE_CHANGED) {
2931 /*
2932 * Sometimes PCS encoding errors are detected in
2933 * TBI mode (on fiber NICs), and for some reason
2934 * the chip will signal them as link changes.
2935 * If we get a link change event, but the 'PCS
2936 * encoding error' bit in the MAC status register
2937 * is set, don't bother doing a link check.
2938 * This avoids spurious "gigabit link up" messages
2939 * that sometimes appear on fiber NICs during
2940 * periods of heavy traffic. (There should be no
2941 * effect on copper NICs.)
2942 *
2943 * If we do have a copper NIC (bge_tbi == 0) then
2944 * check that the AUTOPOLL bit is set before
2945 * processing the event as a real link change.
2946 * Turning AUTOPOLL on and off in the MII read/write
2947 * functions will often trigger a link status
2948 * interrupt for no reason.
2949 */
2950 status = CSR_READ_4(sc, BGE_MAC_STS);
2951 mimode = CSR_READ_4(sc, BGE_MI_MODE);
2952 if (!(status & (BGE_MACSTAT_PORT_DECODE_ERROR|
2953 BGE_MACSTAT_MI_COMPLETE)) && (!sc->bge_tbi &&
2954 (mimode & BGE_MIMODE_AUTOPOLL))) {
2955 sc->bge_link = 0;
2956 callout_stop(&sc->bge_stat_ch);
2957 bge_tick_locked(sc);
2958 }
2959 /* Clear the interrupt */
2960 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED|
2961 BGE_MACSTAT_CFG_CHANGED|BGE_MACSTAT_MI_COMPLETE|
2962 BGE_MACSTAT_LINK_CHANGED);
2963
2964 /* Force flush the status block cached by PCI bridge */
2965 CSR_READ_4(sc, BGE_MBX_IRQ0_LO);
2966 }
2967 }
2968
2969 if (ifp->if_flags & IFF_RUNNING) {
2970 /* Check RX return ring producer/consumer */
2971 bge_rxeof(sc);
2972
2973 /* Check TX ring producer/consumer */
2974 bge_txeof(sc);
2975 }
2976
2977 bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
2978 sc->bge_cdata.bge_status_map, BUS_DMASYNC_PREWRITE);
2979
2980 bge_handle_events(sc);
2981
2982 /* Re-enable interrupts. */
2983 CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 0);
2984
2985 if (ifp->if_flags & IFF_RUNNING && ifp->if_snd.ifq_head != NULL)
2986 bge_start_locked(ifp);
2987
2988 BGE_UNLOCK(sc);
2989
2990 return;
2991}
2992
2993static void
2994bge_tick_locked(sc)
2995 struct bge_softc *sc;
2996{
2997 struct mii_data *mii = NULL;
2998 struct ifmedia *ifm = NULL;
2999 struct ifnet *ifp;
3000
3001 ifp = &sc->arpcom.ac_if;
3002
3003 BGE_LOCK_ASSERT(sc);
3004
2929 if (sc->bge_asicrev == BGE_ASICREV_BCM5705)
3005 if (sc->bge_asicrev == BGE_ASICREV_BCM5705 ||
3006 sc->bge_asicrev == BGE_ASICREV_BCM5750)
2930 bge_stats_update_regs(sc);
2931 else
2932 bge_stats_update(sc);
2933 callout_reset(&sc->bge_stat_ch, hz, bge_tick, sc);
2934 if (sc->bge_link)
2935 return;
2936
2937 if (sc->bge_tbi) {
2938 ifm = &sc->bge_ifmedia;
2939 if (CSR_READ_4(sc, BGE_MAC_STS) &
2940 BGE_MACSTAT_TBI_PCS_SYNCHED) {
2941 sc->bge_link++;
2942 if (sc->bge_asicrev == BGE_ASICREV_BCM5704)
2943 BGE_CLRBIT(sc, BGE_MAC_MODE,
2944 BGE_MACMODE_TBI_SEND_CFGS);
2945 CSR_WRITE_4(sc, BGE_MAC_STS, 0xFFFFFFFF);
2946 printf("bge%d: gigabit link up\n", sc->bge_unit);
2947 if (ifp->if_snd.ifq_head != NULL)
2948 bge_start_locked(ifp);
2949 }
2950 return;
2951 }
2952
2953 mii = device_get_softc(sc->bge_miibus);
2954 mii_tick(mii);
2955
2956 if (!sc->bge_link && mii->mii_media_status & IFM_ACTIVE &&
2957 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) {
2958 sc->bge_link++;
2959 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T ||
2960 IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_SX)
2961 printf("bge%d: gigabit link up\n",
2962 sc->bge_unit);
2963 if (ifp->if_snd.ifq_head != NULL)
2964 bge_start_locked(ifp);
2965 }
2966
2967 return;
2968}
2969
2970static void
2971bge_tick(xsc)
2972 void *xsc;
2973{
2974 struct bge_softc *sc;
2975
2976 sc = xsc;
2977
2978 BGE_LOCK(sc);
2979 bge_tick_locked(sc);
2980 BGE_UNLOCK(sc);
2981}
2982
2983static void
2984bge_stats_update_regs(sc)
2985 struct bge_softc *sc;
2986{
2987 struct ifnet *ifp;
2988 struct bge_mac_stats_regs stats;
2989 u_int32_t *s;
2990 int i;
2991
2992 ifp = &sc->arpcom.ac_if;
2993
2994 s = (u_int32_t *)&stats;
2995 for (i = 0; i < sizeof(struct bge_mac_stats_regs); i += 4) {
2996 *s = CSR_READ_4(sc, BGE_RX_STATS + i);
2997 s++;
2998 }
2999
3000 ifp->if_collisions +=
3001 (stats.dot3StatsSingleCollisionFrames +
3002 stats.dot3StatsMultipleCollisionFrames +
3003 stats.dot3StatsExcessiveCollisions +
3004 stats.dot3StatsLateCollisions) -
3005 ifp->if_collisions;
3006
3007 return;
3008}
3009
3010static void
3011bge_stats_update(sc)
3012 struct bge_softc *sc;
3013{
3014 struct ifnet *ifp;
3015 struct bge_stats *stats;
3016
3017 ifp = &sc->arpcom.ac_if;
3018
3019 stats = (struct bge_stats *)(sc->bge_vhandle +
3020 BGE_MEMWIN_START + BGE_STATS_BLOCK);
3021
3022 ifp->if_collisions +=
3023 (stats->txstats.dot3StatsSingleCollisionFrames.bge_addr_lo +
3024 stats->txstats.dot3StatsMultipleCollisionFrames.bge_addr_lo +
3025 stats->txstats.dot3StatsExcessiveCollisions.bge_addr_lo +
3026 stats->txstats.dot3StatsLateCollisions.bge_addr_lo) -
3027 ifp->if_collisions;
3028
3029#ifdef notdef
3030 ifp->if_collisions +=
3031 (sc->bge_rdata->bge_info.bge_stats.dot3StatsSingleCollisionFrames +
3032 sc->bge_rdata->bge_info.bge_stats.dot3StatsMultipleCollisionFrames +
3033 sc->bge_rdata->bge_info.bge_stats.dot3StatsExcessiveCollisions +
3034 sc->bge_rdata->bge_info.bge_stats.dot3StatsLateCollisions) -
3035 ifp->if_collisions;
3036#endif
3037
3038 return;
3039}
3040
3041/*
3042 * Encapsulate an mbuf chain in the tx ring by coupling the mbuf data
3043 * pointers to descriptors.
3044 */
3045static int
3046bge_encap(sc, m_head, txidx)
3047 struct bge_softc *sc;
3048 struct mbuf *m_head;
3049 u_int32_t *txidx;
3050{
3051 struct bge_tx_bd *f = NULL;
3052 u_int16_t csum_flags = 0;
3053 struct m_tag *mtag;
3054 struct bge_dmamap_arg ctx;
3055 bus_dmamap_t map;
3056 int error;
3057
3058
3059 if (m_head->m_pkthdr.csum_flags) {
3060 if (m_head->m_pkthdr.csum_flags & CSUM_IP)
3061 csum_flags |= BGE_TXBDFLAG_IP_CSUM;
3062 if (m_head->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP))
3063 csum_flags |= BGE_TXBDFLAG_TCP_UDP_CSUM;
3064 if (m_head->m_flags & M_LASTFRAG)
3065 csum_flags |= BGE_TXBDFLAG_IP_FRAG_END;
3066 else if (m_head->m_flags & M_FRAG)
3067 csum_flags |= BGE_TXBDFLAG_IP_FRAG;
3068 }
3069
3070 mtag = VLAN_OUTPUT_TAG(&sc->arpcom.ac_if, m_head);
3071
3072 ctx.sc = sc;
3073 ctx.bge_idx = *txidx;
3074 ctx.bge_ring = sc->bge_ldata.bge_tx_ring;
3075 ctx.bge_flags = csum_flags;
3076 /*
3077 * Sanity check: avoid coming within 16 descriptors
3078 * of the end of the ring.
3079 */
3080 ctx.bge_maxsegs = (BGE_TX_RING_CNT - sc->bge_txcnt) - 16;
3081
3082 map = sc->bge_cdata.bge_tx_dmamap[*txidx];
3083 error = bus_dmamap_load_mbuf(sc->bge_cdata.bge_mtag, map,
3084 m_head, bge_dma_map_tx_desc, &ctx, BUS_DMA_NOWAIT);
3085
3086 if (error || ctx.bge_maxsegs == 0 /*||
3087 ctx.bge_idx == sc->bge_tx_saved_considx*/)
3088 return (ENOBUFS);
3089
3090 /*
3091 * Insure that the map for this transmission
3092 * is placed at the array index of the last descriptor
3093 * in this chain.
3094 */
3095 sc->bge_cdata.bge_tx_dmamap[*txidx] =
3096 sc->bge_cdata.bge_tx_dmamap[ctx.bge_idx];
3097 sc->bge_cdata.bge_tx_dmamap[ctx.bge_idx] = map;
3098 sc->bge_cdata.bge_tx_chain[ctx.bge_idx] = m_head;
3099 sc->bge_txcnt += ctx.bge_maxsegs;
3100 f = &sc->bge_ldata.bge_tx_ring[*txidx];
3101 if (mtag != NULL) {
3102 f->bge_flags |= htole16(BGE_TXBDFLAG_VLAN_TAG);
3103 f->bge_vlan_tag = htole16(VLAN_TAG_VALUE(mtag));
3104 } else {
3105 f->bge_vlan_tag = 0;
3106 }
3107
3108 BGE_INC(ctx.bge_idx, BGE_TX_RING_CNT);
3109 *txidx = ctx.bge_idx;
3110
3111 return(0);
3112}
3113
3114/*
3115 * Main transmit routine. To avoid having to do mbuf copies, we put pointers
3116 * to the mbuf data regions directly in the transmit descriptors.
3117 */
3118static void
3119bge_start_locked(ifp)
3120 struct ifnet *ifp;
3121{
3122 struct bge_softc *sc;
3123 struct mbuf *m_head = NULL;
3124 u_int32_t prodidx = 0;
3125
3126 sc = ifp->if_softc;
3127
3128 if (!sc->bge_link && ifp->if_snd.ifq_len < 10)
3129 return;
3130
3131 prodidx = CSR_READ_4(sc, BGE_MBX_TX_HOST_PROD0_LO);
3132
3133 while(sc->bge_cdata.bge_tx_chain[prodidx] == NULL) {
3134 IF_DEQUEUE(&ifp->if_snd, m_head);
3135 if (m_head == NULL)
3136 break;
3137
3138 /*
3139 * XXX
3140 * The code inside the if() block is never reached since we
3141 * must mark CSUM_IP_FRAGS in our if_hwassist to start getting
3142 * requests to checksum TCP/UDP in a fragmented packet.
3143 *
3144 * XXX
3145 * safety overkill. If this is a fragmented packet chain
3146 * with delayed TCP/UDP checksums, then only encapsulate
3147 * it if we have enough descriptors to handle the entire
3148 * chain at once.
3149 * (paranoia -- may not actually be needed)
3150 */
3151 if (m_head->m_flags & M_FIRSTFRAG &&
3152 m_head->m_pkthdr.csum_flags & (CSUM_DELAY_DATA)) {
3153 if ((BGE_TX_RING_CNT - sc->bge_txcnt) <
3154 m_head->m_pkthdr.csum_data + 16) {
3155 IF_PREPEND(&ifp->if_snd, m_head);
3156 ifp->if_flags |= IFF_OACTIVE;
3157 break;
3158 }
3159 }
3160
3161 /*
3162 * Pack the data into the transmit ring. If we
3163 * don't have room, set the OACTIVE flag and wait
3164 * for the NIC to drain the ring.
3165 */
3166 if (bge_encap(sc, m_head, &prodidx)) {
3167 IF_PREPEND(&ifp->if_snd, m_head);
3168 ifp->if_flags |= IFF_OACTIVE;
3169 break;
3170 }
3171
3172 /*
3173 * If there's a BPF listener, bounce a copy of this frame
3174 * to him.
3175 */
3176 BPF_MTAP(ifp, m_head);
3177 }
3178
3179 /* Transmit */
3180 CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx);
3181 /* 5700 b2 errata */
3182 if (sc->bge_chiprev == BGE_CHIPREV_5700_BX)
3183 CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx);
3184
3185 /*
3186 * Set a timeout in case the chip goes out to lunch.
3187 */
3188 ifp->if_timer = 5;
3189
3190 return;
3191}
3192
3193/*
3194 * Main transmit routine. To avoid having to do mbuf copies, we put pointers
3195 * to the mbuf data regions directly in the transmit descriptors.
3196 */
3197static void
3198bge_start(ifp)
3199 struct ifnet *ifp;
3200{
3201 struct bge_softc *sc;
3202
3203 sc = ifp->if_softc;
3204 BGE_LOCK(sc);
3205 bge_start_locked(ifp);
3206 BGE_UNLOCK(sc);
3207}
3208
3209static void
3210bge_init_locked(sc)
3211 struct bge_softc *sc;
3212{
3213 struct ifnet *ifp;
3214 u_int16_t *m;
3215
3216 BGE_LOCK_ASSERT(sc);
3217
3218 ifp = &sc->arpcom.ac_if;
3219
3220 if (ifp->if_flags & IFF_RUNNING)
3221 return;
3222
3223 /* Cancel pending I/O and flush buffers. */
3224 bge_stop(sc);
3225 bge_reset(sc);
3226 bge_chipinit(sc);
3227
3228 /*
3229 * Init the various state machines, ring
3230 * control blocks and firmware.
3231 */
3232 if (bge_blockinit(sc)) {
3233 printf("bge%d: initialization failure\n", sc->bge_unit);
3234 return;
3235 }
3236
3237 ifp = &sc->arpcom.ac_if;
3238
3239 /* Specify MTU. */
3240 CSR_WRITE_4(sc, BGE_RX_MTU, ifp->if_mtu +
3241 ETHER_HDR_LEN + ETHER_CRC_LEN + ETHER_VLAN_ENCAP_LEN);
3242
3243 /* Load our MAC address. */
3244 m = (u_int16_t *)&sc->arpcom.ac_enaddr[0];
3245 CSR_WRITE_4(sc, BGE_MAC_ADDR1_LO, htons(m[0]));
3246 CSR_WRITE_4(sc, BGE_MAC_ADDR1_HI, (htons(m[1]) << 16) | htons(m[2]));
3247
3248 /* Enable or disable promiscuous mode as needed. */
3249 if (ifp->if_flags & IFF_PROMISC) {
3250 BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC);
3251 } else {
3252 BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC);
3253 }
3254
3255 /* Program multicast filter. */
3256 bge_setmulti(sc);
3257
3258 /* Init RX ring. */
3259 bge_init_rx_ring_std(sc);
3260
3261 /*
3262 * Workaround for a bug in 5705 ASIC rev A0. Poll the NIC's
3263 * memory to insure that the chip has in fact read the first
3264 * entry of the ring.
3265 */
3266 if (sc->bge_chipid == BGE_CHIPID_BCM5705_A0) {
3267 u_int32_t v, i;
3268 for (i = 0; i < 10; i++) {
3269 DELAY(20);
3270 v = bge_readmem_ind(sc, BGE_STD_RX_RINGS + 8);
3271 if (v == (MCLBYTES - ETHER_ALIGN))
3272 break;
3273 }
3274 if (i == 10)
3275 printf ("bge%d: 5705 A0 chip failed to load RX ring\n",
3276 sc->bge_unit);
3277 }
3278
3279 /* Init jumbo RX ring. */
3280 if (ifp->if_mtu > (ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN))
3281 bge_init_rx_ring_jumbo(sc);
3282
3283 /* Init our RX return ring index */
3284 sc->bge_rx_saved_considx = 0;
3285
3286 /* Init TX ring. */
3287 bge_init_tx_ring(sc);
3288
3289 /* Turn on transmitter */
3290 BGE_SETBIT(sc, BGE_TX_MODE, BGE_TXMODE_ENABLE);
3291
3292 /* Turn on receiver */
3293 BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE);
3294
3295 /* Tell firmware we're alive. */
3296 BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
3297
3298 /* Enable host interrupts. */
3299 BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_CLEAR_INTA);
3300 BGE_CLRBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR);
3301 CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 0);
3302
3303 bge_ifmedia_upd(ifp);
3304
3305 ifp->if_flags |= IFF_RUNNING;
3306 ifp->if_flags &= ~IFF_OACTIVE;
3307
3308 callout_reset(&sc->bge_stat_ch, hz, bge_tick, sc);
3309
3310 return;
3311}
3312
3313static void
3314bge_init(xsc)
3315 void *xsc;
3316{
3317 struct bge_softc *sc = xsc;
3318
3319 BGE_LOCK(sc);
3320 bge_init_locked(sc);
3321 BGE_UNLOCK(sc);
3322
3323 return;
3324}
3325
3326/*
3327 * Set media options.
3328 */
3329static int
3330bge_ifmedia_upd(ifp)
3331 struct ifnet *ifp;
3332{
3333 struct bge_softc *sc;
3334 struct mii_data *mii;
3335 struct ifmedia *ifm;
3336
3337 sc = ifp->if_softc;
3338 ifm = &sc->bge_ifmedia;
3339
3340 /* If this is a 1000baseX NIC, enable the TBI port. */
3341 if (sc->bge_tbi) {
3342 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
3343 return(EINVAL);
3344 switch(IFM_SUBTYPE(ifm->ifm_media)) {
3345 case IFM_AUTO:
3346 /*
3347 * The BCM5704 ASIC appears to have a special
3348 * mechanism for programming the autoneg
3349 * advertisement registers in TBI mode.
3350 */
3351 if (sc->bge_asicrev == BGE_ASICREV_BCM5704) {
3352 uint32_t sgdig;
3353 CSR_WRITE_4(sc, BGE_TX_TBI_AUTONEG, 0);
3354 sgdig = CSR_READ_4(sc, BGE_SGDIG_CFG);
3355 sgdig |= BGE_SGDIGCFG_AUTO|
3356 BGE_SGDIGCFG_PAUSE_CAP|
3357 BGE_SGDIGCFG_ASYM_PAUSE;
3358 CSR_WRITE_4(sc, BGE_SGDIG_CFG,
3359 sgdig|BGE_SGDIGCFG_SEND);
3360 DELAY(5);
3361 CSR_WRITE_4(sc, BGE_SGDIG_CFG, sgdig);
3362 }
3363 break;
3364 case IFM_1000_SX:
3365 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) {
3366 BGE_CLRBIT(sc, BGE_MAC_MODE,
3367 BGE_MACMODE_HALF_DUPLEX);
3368 } else {
3369 BGE_SETBIT(sc, BGE_MAC_MODE,
3370 BGE_MACMODE_HALF_DUPLEX);
3371 }
3372 break;
3373 default:
3374 return(EINVAL);
3375 }
3376 return(0);
3377 }
3378
3379 mii = device_get_softc(sc->bge_miibus);
3380 sc->bge_link = 0;
3381 if (mii->mii_instance) {
3382 struct mii_softc *miisc;
3383 for (miisc = LIST_FIRST(&mii->mii_phys); miisc != NULL;
3384 miisc = LIST_NEXT(miisc, mii_list))
3385 mii_phy_reset(miisc);
3386 }
3387 mii_mediachg(mii);
3388
3389 return(0);
3390}
3391
3392/*
3393 * Report current media status.
3394 */
3395static void
3396bge_ifmedia_sts(ifp, ifmr)
3397 struct ifnet *ifp;
3398 struct ifmediareq *ifmr;
3399{
3400 struct bge_softc *sc;
3401 struct mii_data *mii;
3402
3403 sc = ifp->if_softc;
3404
3405 if (sc->bge_tbi) {
3406 ifmr->ifm_status = IFM_AVALID;
3407 ifmr->ifm_active = IFM_ETHER;
3408 if (CSR_READ_4(sc, BGE_MAC_STS) &
3409 BGE_MACSTAT_TBI_PCS_SYNCHED)
3410 ifmr->ifm_status |= IFM_ACTIVE;
3411 ifmr->ifm_active |= IFM_1000_SX;
3412 if (CSR_READ_4(sc, BGE_MAC_MODE) & BGE_MACMODE_HALF_DUPLEX)
3413 ifmr->ifm_active |= IFM_HDX;
3414 else
3415 ifmr->ifm_active |= IFM_FDX;
3416 return;
3417 }
3418
3419 mii = device_get_softc(sc->bge_miibus);
3420 mii_pollstat(mii);
3421 ifmr->ifm_active = mii->mii_media_active;
3422 ifmr->ifm_status = mii->mii_media_status;
3423
3424 return;
3425}
3426
3427static int
3428bge_ioctl(ifp, command, data)
3429 struct ifnet *ifp;
3430 u_long command;
3431 caddr_t data;
3432{
3433 struct bge_softc *sc = ifp->if_softc;
3434 struct ifreq *ifr = (struct ifreq *) data;
3435 int mask, error = 0;
3436 struct mii_data *mii;
3437
3438 switch(command) {
3439 case SIOCSIFMTU:
3440 /* Disallow jumbo frames on 5705. */
3007 bge_stats_update_regs(sc);
3008 else
3009 bge_stats_update(sc);
3010 callout_reset(&sc->bge_stat_ch, hz, bge_tick, sc);
3011 if (sc->bge_link)
3012 return;
3013
3014 if (sc->bge_tbi) {
3015 ifm = &sc->bge_ifmedia;
3016 if (CSR_READ_4(sc, BGE_MAC_STS) &
3017 BGE_MACSTAT_TBI_PCS_SYNCHED) {
3018 sc->bge_link++;
3019 if (sc->bge_asicrev == BGE_ASICREV_BCM5704)
3020 BGE_CLRBIT(sc, BGE_MAC_MODE,
3021 BGE_MACMODE_TBI_SEND_CFGS);
3022 CSR_WRITE_4(sc, BGE_MAC_STS, 0xFFFFFFFF);
3023 printf("bge%d: gigabit link up\n", sc->bge_unit);
3024 if (ifp->if_snd.ifq_head != NULL)
3025 bge_start_locked(ifp);
3026 }
3027 return;
3028 }
3029
3030 mii = device_get_softc(sc->bge_miibus);
3031 mii_tick(mii);
3032
3033 if (!sc->bge_link && mii->mii_media_status & IFM_ACTIVE &&
3034 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) {
3035 sc->bge_link++;
3036 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T ||
3037 IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_SX)
3038 printf("bge%d: gigabit link up\n",
3039 sc->bge_unit);
3040 if (ifp->if_snd.ifq_head != NULL)
3041 bge_start_locked(ifp);
3042 }
3043
3044 return;
3045}
3046
3047static void
3048bge_tick(xsc)
3049 void *xsc;
3050{
3051 struct bge_softc *sc;
3052
3053 sc = xsc;
3054
3055 BGE_LOCK(sc);
3056 bge_tick_locked(sc);
3057 BGE_UNLOCK(sc);
3058}
3059
3060static void
3061bge_stats_update_regs(sc)
3062 struct bge_softc *sc;
3063{
3064 struct ifnet *ifp;
3065 struct bge_mac_stats_regs stats;
3066 u_int32_t *s;
3067 int i;
3068
3069 ifp = &sc->arpcom.ac_if;
3070
3071 s = (u_int32_t *)&stats;
3072 for (i = 0; i < sizeof(struct bge_mac_stats_regs); i += 4) {
3073 *s = CSR_READ_4(sc, BGE_RX_STATS + i);
3074 s++;
3075 }
3076
3077 ifp->if_collisions +=
3078 (stats.dot3StatsSingleCollisionFrames +
3079 stats.dot3StatsMultipleCollisionFrames +
3080 stats.dot3StatsExcessiveCollisions +
3081 stats.dot3StatsLateCollisions) -
3082 ifp->if_collisions;
3083
3084 return;
3085}
3086
3087static void
3088bge_stats_update(sc)
3089 struct bge_softc *sc;
3090{
3091 struct ifnet *ifp;
3092 struct bge_stats *stats;
3093
3094 ifp = &sc->arpcom.ac_if;
3095
3096 stats = (struct bge_stats *)(sc->bge_vhandle +
3097 BGE_MEMWIN_START + BGE_STATS_BLOCK);
3098
3099 ifp->if_collisions +=
3100 (stats->txstats.dot3StatsSingleCollisionFrames.bge_addr_lo +
3101 stats->txstats.dot3StatsMultipleCollisionFrames.bge_addr_lo +
3102 stats->txstats.dot3StatsExcessiveCollisions.bge_addr_lo +
3103 stats->txstats.dot3StatsLateCollisions.bge_addr_lo) -
3104 ifp->if_collisions;
3105
3106#ifdef notdef
3107 ifp->if_collisions +=
3108 (sc->bge_rdata->bge_info.bge_stats.dot3StatsSingleCollisionFrames +
3109 sc->bge_rdata->bge_info.bge_stats.dot3StatsMultipleCollisionFrames +
3110 sc->bge_rdata->bge_info.bge_stats.dot3StatsExcessiveCollisions +
3111 sc->bge_rdata->bge_info.bge_stats.dot3StatsLateCollisions) -
3112 ifp->if_collisions;
3113#endif
3114
3115 return;
3116}
3117
3118/*
3119 * Encapsulate an mbuf chain in the tx ring by coupling the mbuf data
3120 * pointers to descriptors.
3121 */
3122static int
3123bge_encap(sc, m_head, txidx)
3124 struct bge_softc *sc;
3125 struct mbuf *m_head;
3126 u_int32_t *txidx;
3127{
3128 struct bge_tx_bd *f = NULL;
3129 u_int16_t csum_flags = 0;
3130 struct m_tag *mtag;
3131 struct bge_dmamap_arg ctx;
3132 bus_dmamap_t map;
3133 int error;
3134
3135
3136 if (m_head->m_pkthdr.csum_flags) {
3137 if (m_head->m_pkthdr.csum_flags & CSUM_IP)
3138 csum_flags |= BGE_TXBDFLAG_IP_CSUM;
3139 if (m_head->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP))
3140 csum_flags |= BGE_TXBDFLAG_TCP_UDP_CSUM;
3141 if (m_head->m_flags & M_LASTFRAG)
3142 csum_flags |= BGE_TXBDFLAG_IP_FRAG_END;
3143 else if (m_head->m_flags & M_FRAG)
3144 csum_flags |= BGE_TXBDFLAG_IP_FRAG;
3145 }
3146
3147 mtag = VLAN_OUTPUT_TAG(&sc->arpcom.ac_if, m_head);
3148
3149 ctx.sc = sc;
3150 ctx.bge_idx = *txidx;
3151 ctx.bge_ring = sc->bge_ldata.bge_tx_ring;
3152 ctx.bge_flags = csum_flags;
3153 /*
3154 * Sanity check: avoid coming within 16 descriptors
3155 * of the end of the ring.
3156 */
3157 ctx.bge_maxsegs = (BGE_TX_RING_CNT - sc->bge_txcnt) - 16;
3158
3159 map = sc->bge_cdata.bge_tx_dmamap[*txidx];
3160 error = bus_dmamap_load_mbuf(sc->bge_cdata.bge_mtag, map,
3161 m_head, bge_dma_map_tx_desc, &ctx, BUS_DMA_NOWAIT);
3162
3163 if (error || ctx.bge_maxsegs == 0 /*||
3164 ctx.bge_idx == sc->bge_tx_saved_considx*/)
3165 return (ENOBUFS);
3166
3167 /*
3168 * Insure that the map for this transmission
3169 * is placed at the array index of the last descriptor
3170 * in this chain.
3171 */
3172 sc->bge_cdata.bge_tx_dmamap[*txidx] =
3173 sc->bge_cdata.bge_tx_dmamap[ctx.bge_idx];
3174 sc->bge_cdata.bge_tx_dmamap[ctx.bge_idx] = map;
3175 sc->bge_cdata.bge_tx_chain[ctx.bge_idx] = m_head;
3176 sc->bge_txcnt += ctx.bge_maxsegs;
3177 f = &sc->bge_ldata.bge_tx_ring[*txidx];
3178 if (mtag != NULL) {
3179 f->bge_flags |= htole16(BGE_TXBDFLAG_VLAN_TAG);
3180 f->bge_vlan_tag = htole16(VLAN_TAG_VALUE(mtag));
3181 } else {
3182 f->bge_vlan_tag = 0;
3183 }
3184
3185 BGE_INC(ctx.bge_idx, BGE_TX_RING_CNT);
3186 *txidx = ctx.bge_idx;
3187
3188 return(0);
3189}
3190
3191/*
3192 * Main transmit routine. To avoid having to do mbuf copies, we put pointers
3193 * to the mbuf data regions directly in the transmit descriptors.
3194 */
3195static void
3196bge_start_locked(ifp)
3197 struct ifnet *ifp;
3198{
3199 struct bge_softc *sc;
3200 struct mbuf *m_head = NULL;
3201 u_int32_t prodidx = 0;
3202
3203 sc = ifp->if_softc;
3204
3205 if (!sc->bge_link && ifp->if_snd.ifq_len < 10)
3206 return;
3207
3208 prodidx = CSR_READ_4(sc, BGE_MBX_TX_HOST_PROD0_LO);
3209
3210 while(sc->bge_cdata.bge_tx_chain[prodidx] == NULL) {
3211 IF_DEQUEUE(&ifp->if_snd, m_head);
3212 if (m_head == NULL)
3213 break;
3214
3215 /*
3216 * XXX
3217 * The code inside the if() block is never reached since we
3218 * must mark CSUM_IP_FRAGS in our if_hwassist to start getting
3219 * requests to checksum TCP/UDP in a fragmented packet.
3220 *
3221 * XXX
3222 * safety overkill. If this is a fragmented packet chain
3223 * with delayed TCP/UDP checksums, then only encapsulate
3224 * it if we have enough descriptors to handle the entire
3225 * chain at once.
3226 * (paranoia -- may not actually be needed)
3227 */
3228 if (m_head->m_flags & M_FIRSTFRAG &&
3229 m_head->m_pkthdr.csum_flags & (CSUM_DELAY_DATA)) {
3230 if ((BGE_TX_RING_CNT - sc->bge_txcnt) <
3231 m_head->m_pkthdr.csum_data + 16) {
3232 IF_PREPEND(&ifp->if_snd, m_head);
3233 ifp->if_flags |= IFF_OACTIVE;
3234 break;
3235 }
3236 }
3237
3238 /*
3239 * Pack the data into the transmit ring. If we
3240 * don't have room, set the OACTIVE flag and wait
3241 * for the NIC to drain the ring.
3242 */
3243 if (bge_encap(sc, m_head, &prodidx)) {
3244 IF_PREPEND(&ifp->if_snd, m_head);
3245 ifp->if_flags |= IFF_OACTIVE;
3246 break;
3247 }
3248
3249 /*
3250 * If there's a BPF listener, bounce a copy of this frame
3251 * to him.
3252 */
3253 BPF_MTAP(ifp, m_head);
3254 }
3255
3256 /* Transmit */
3257 CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx);
3258 /* 5700 b2 errata */
3259 if (sc->bge_chiprev == BGE_CHIPREV_5700_BX)
3260 CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx);
3261
3262 /*
3263 * Set a timeout in case the chip goes out to lunch.
3264 */
3265 ifp->if_timer = 5;
3266
3267 return;
3268}
3269
3270/*
3271 * Main transmit routine. To avoid having to do mbuf copies, we put pointers
3272 * to the mbuf data regions directly in the transmit descriptors.
3273 */
3274static void
3275bge_start(ifp)
3276 struct ifnet *ifp;
3277{
3278 struct bge_softc *sc;
3279
3280 sc = ifp->if_softc;
3281 BGE_LOCK(sc);
3282 bge_start_locked(ifp);
3283 BGE_UNLOCK(sc);
3284}
3285
3286static void
3287bge_init_locked(sc)
3288 struct bge_softc *sc;
3289{
3290 struct ifnet *ifp;
3291 u_int16_t *m;
3292
3293 BGE_LOCK_ASSERT(sc);
3294
3295 ifp = &sc->arpcom.ac_if;
3296
3297 if (ifp->if_flags & IFF_RUNNING)
3298 return;
3299
3300 /* Cancel pending I/O and flush buffers. */
3301 bge_stop(sc);
3302 bge_reset(sc);
3303 bge_chipinit(sc);
3304
3305 /*
3306 * Init the various state machines, ring
3307 * control blocks and firmware.
3308 */
3309 if (bge_blockinit(sc)) {
3310 printf("bge%d: initialization failure\n", sc->bge_unit);
3311 return;
3312 }
3313
3314 ifp = &sc->arpcom.ac_if;
3315
3316 /* Specify MTU. */
3317 CSR_WRITE_4(sc, BGE_RX_MTU, ifp->if_mtu +
3318 ETHER_HDR_LEN + ETHER_CRC_LEN + ETHER_VLAN_ENCAP_LEN);
3319
3320 /* Load our MAC address. */
3321 m = (u_int16_t *)&sc->arpcom.ac_enaddr[0];
3322 CSR_WRITE_4(sc, BGE_MAC_ADDR1_LO, htons(m[0]));
3323 CSR_WRITE_4(sc, BGE_MAC_ADDR1_HI, (htons(m[1]) << 16) | htons(m[2]));
3324
3325 /* Enable or disable promiscuous mode as needed. */
3326 if (ifp->if_flags & IFF_PROMISC) {
3327 BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC);
3328 } else {
3329 BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC);
3330 }
3331
3332 /* Program multicast filter. */
3333 bge_setmulti(sc);
3334
3335 /* Init RX ring. */
3336 bge_init_rx_ring_std(sc);
3337
3338 /*
3339 * Workaround for a bug in 5705 ASIC rev A0. Poll the NIC's
3340 * memory to insure that the chip has in fact read the first
3341 * entry of the ring.
3342 */
3343 if (sc->bge_chipid == BGE_CHIPID_BCM5705_A0) {
3344 u_int32_t v, i;
3345 for (i = 0; i < 10; i++) {
3346 DELAY(20);
3347 v = bge_readmem_ind(sc, BGE_STD_RX_RINGS + 8);
3348 if (v == (MCLBYTES - ETHER_ALIGN))
3349 break;
3350 }
3351 if (i == 10)
3352 printf ("bge%d: 5705 A0 chip failed to load RX ring\n",
3353 sc->bge_unit);
3354 }
3355
3356 /* Init jumbo RX ring. */
3357 if (ifp->if_mtu > (ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN))
3358 bge_init_rx_ring_jumbo(sc);
3359
3360 /* Init our RX return ring index */
3361 sc->bge_rx_saved_considx = 0;
3362
3363 /* Init TX ring. */
3364 bge_init_tx_ring(sc);
3365
3366 /* Turn on transmitter */
3367 BGE_SETBIT(sc, BGE_TX_MODE, BGE_TXMODE_ENABLE);
3368
3369 /* Turn on receiver */
3370 BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE);
3371
3372 /* Tell firmware we're alive. */
3373 BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
3374
3375 /* Enable host interrupts. */
3376 BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_CLEAR_INTA);
3377 BGE_CLRBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR);
3378 CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 0);
3379
3380 bge_ifmedia_upd(ifp);
3381
3382 ifp->if_flags |= IFF_RUNNING;
3383 ifp->if_flags &= ~IFF_OACTIVE;
3384
3385 callout_reset(&sc->bge_stat_ch, hz, bge_tick, sc);
3386
3387 return;
3388}
3389
3390static void
3391bge_init(xsc)
3392 void *xsc;
3393{
3394 struct bge_softc *sc = xsc;
3395
3396 BGE_LOCK(sc);
3397 bge_init_locked(sc);
3398 BGE_UNLOCK(sc);
3399
3400 return;
3401}
3402
3403/*
3404 * Set media options.
3405 */
3406static int
3407bge_ifmedia_upd(ifp)
3408 struct ifnet *ifp;
3409{
3410 struct bge_softc *sc;
3411 struct mii_data *mii;
3412 struct ifmedia *ifm;
3413
3414 sc = ifp->if_softc;
3415 ifm = &sc->bge_ifmedia;
3416
3417 /* If this is a 1000baseX NIC, enable the TBI port. */
3418 if (sc->bge_tbi) {
3419 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
3420 return(EINVAL);
3421 switch(IFM_SUBTYPE(ifm->ifm_media)) {
3422 case IFM_AUTO:
3423 /*
3424 * The BCM5704 ASIC appears to have a special
3425 * mechanism for programming the autoneg
3426 * advertisement registers in TBI mode.
3427 */
3428 if (sc->bge_asicrev == BGE_ASICREV_BCM5704) {
3429 uint32_t sgdig;
3430 CSR_WRITE_4(sc, BGE_TX_TBI_AUTONEG, 0);
3431 sgdig = CSR_READ_4(sc, BGE_SGDIG_CFG);
3432 sgdig |= BGE_SGDIGCFG_AUTO|
3433 BGE_SGDIGCFG_PAUSE_CAP|
3434 BGE_SGDIGCFG_ASYM_PAUSE;
3435 CSR_WRITE_4(sc, BGE_SGDIG_CFG,
3436 sgdig|BGE_SGDIGCFG_SEND);
3437 DELAY(5);
3438 CSR_WRITE_4(sc, BGE_SGDIG_CFG, sgdig);
3439 }
3440 break;
3441 case IFM_1000_SX:
3442 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) {
3443 BGE_CLRBIT(sc, BGE_MAC_MODE,
3444 BGE_MACMODE_HALF_DUPLEX);
3445 } else {
3446 BGE_SETBIT(sc, BGE_MAC_MODE,
3447 BGE_MACMODE_HALF_DUPLEX);
3448 }
3449 break;
3450 default:
3451 return(EINVAL);
3452 }
3453 return(0);
3454 }
3455
3456 mii = device_get_softc(sc->bge_miibus);
3457 sc->bge_link = 0;
3458 if (mii->mii_instance) {
3459 struct mii_softc *miisc;
3460 for (miisc = LIST_FIRST(&mii->mii_phys); miisc != NULL;
3461 miisc = LIST_NEXT(miisc, mii_list))
3462 mii_phy_reset(miisc);
3463 }
3464 mii_mediachg(mii);
3465
3466 return(0);
3467}
3468
3469/*
3470 * Report current media status.
3471 */
3472static void
3473bge_ifmedia_sts(ifp, ifmr)
3474 struct ifnet *ifp;
3475 struct ifmediareq *ifmr;
3476{
3477 struct bge_softc *sc;
3478 struct mii_data *mii;
3479
3480 sc = ifp->if_softc;
3481
3482 if (sc->bge_tbi) {
3483 ifmr->ifm_status = IFM_AVALID;
3484 ifmr->ifm_active = IFM_ETHER;
3485 if (CSR_READ_4(sc, BGE_MAC_STS) &
3486 BGE_MACSTAT_TBI_PCS_SYNCHED)
3487 ifmr->ifm_status |= IFM_ACTIVE;
3488 ifmr->ifm_active |= IFM_1000_SX;
3489 if (CSR_READ_4(sc, BGE_MAC_MODE) & BGE_MACMODE_HALF_DUPLEX)
3490 ifmr->ifm_active |= IFM_HDX;
3491 else
3492 ifmr->ifm_active |= IFM_FDX;
3493 return;
3494 }
3495
3496 mii = device_get_softc(sc->bge_miibus);
3497 mii_pollstat(mii);
3498 ifmr->ifm_active = mii->mii_media_active;
3499 ifmr->ifm_status = mii->mii_media_status;
3500
3501 return;
3502}
3503
3504static int
3505bge_ioctl(ifp, command, data)
3506 struct ifnet *ifp;
3507 u_long command;
3508 caddr_t data;
3509{
3510 struct bge_softc *sc = ifp->if_softc;
3511 struct ifreq *ifr = (struct ifreq *) data;
3512 int mask, error = 0;
3513 struct mii_data *mii;
3514
3515 switch(command) {
3516 case SIOCSIFMTU:
3517 /* Disallow jumbo frames on 5705. */
3441 if ((sc->bge_asicrev == BGE_ASICREV_BCM5705 &&
3518 if (((sc->bge_asicrev == BGE_ASICREV_BCM5705 ||
3519 sc->bge_asicrev == BGE_ASICREV_BCM5750) &&
3442 ifr->ifr_mtu > ETHERMTU) || ifr->ifr_mtu > BGE_JUMBO_MTU)
3443 error = EINVAL;
3444 else {
3445 ifp->if_mtu = ifr->ifr_mtu;
3446 ifp->if_flags &= ~IFF_RUNNING;
3447 bge_init(sc);
3448 }
3449 break;
3450 case SIOCSIFFLAGS:
3451 BGE_LOCK(sc);
3452 if (ifp->if_flags & IFF_UP) {
3453 /*
3454 * If only the state of the PROMISC flag changed,
3455 * then just use the 'set promisc mode' command
3456 * instead of reinitializing the entire NIC. Doing
3457 * a full re-init means reloading the firmware and
3458 * waiting for it to start up, which may take a
3459 * second or two.
3460 */
3461 if (ifp->if_flags & IFF_RUNNING &&
3462 ifp->if_flags & IFF_PROMISC &&
3463 !(sc->bge_if_flags & IFF_PROMISC)) {
3464 BGE_SETBIT(sc, BGE_RX_MODE,
3465 BGE_RXMODE_RX_PROMISC);
3466 } else if (ifp->if_flags & IFF_RUNNING &&
3467 !(ifp->if_flags & IFF_PROMISC) &&
3468 sc->bge_if_flags & IFF_PROMISC) {
3469 BGE_CLRBIT(sc, BGE_RX_MODE,
3470 BGE_RXMODE_RX_PROMISC);
3471 } else
3472 bge_init_locked(sc);
3473 } else {
3474 if (ifp->if_flags & IFF_RUNNING) {
3475 bge_stop(sc);
3476 }
3477 }
3478 sc->bge_if_flags = ifp->if_flags;
3479 BGE_UNLOCK(sc);
3480 error = 0;
3481 break;
3482 case SIOCADDMULTI:
3483 case SIOCDELMULTI:
3484 if (ifp->if_flags & IFF_RUNNING) {
3485 BGE_LOCK(sc);
3486 bge_setmulti(sc);
3487 BGE_UNLOCK(sc);
3488 error = 0;
3489 }
3490 break;
3491 case SIOCSIFMEDIA:
3492 case SIOCGIFMEDIA:
3493 if (sc->bge_tbi) {
3494 error = ifmedia_ioctl(ifp, ifr,
3495 &sc->bge_ifmedia, command);
3496 } else {
3497 mii = device_get_softc(sc->bge_miibus);
3498 error = ifmedia_ioctl(ifp, ifr,
3499 &mii->mii_media, command);
3500 }
3501 break;
3502 case SIOCSIFCAP:
3503 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
3504 /* NB: the code for RX csum offload is disabled for now */
3505 if (mask & IFCAP_TXCSUM) {
3506 ifp->if_capenable ^= IFCAP_TXCSUM;
3507 if (IFCAP_TXCSUM & ifp->if_capenable)
3508 ifp->if_hwassist = BGE_CSUM_FEATURES;
3509 else
3510 ifp->if_hwassist = 0;
3511 }
3512 error = 0;
3513 break;
3514 default:
3515 error = ether_ioctl(ifp, command, data);
3516 break;
3517 }
3518
3519 return(error);
3520}
3521
3522static void
3523bge_watchdog(ifp)
3524 struct ifnet *ifp;
3525{
3526 struct bge_softc *sc;
3527
3528 sc = ifp->if_softc;
3529
3530 printf("bge%d: watchdog timeout -- resetting\n", sc->bge_unit);
3531
3532 ifp->if_flags &= ~IFF_RUNNING;
3533 bge_init(sc);
3534
3535 ifp->if_oerrors++;
3536
3537 return;
3538}
3539
3540/*
3541 * Stop the adapter and free any mbufs allocated to the
3542 * RX and TX lists.
3543 */
3544static void
3545bge_stop(sc)
3546 struct bge_softc *sc;
3547{
3548 struct ifnet *ifp;
3549 struct ifmedia_entry *ifm;
3550 struct mii_data *mii = NULL;
3551 int mtmp, itmp;
3552
3553 BGE_LOCK_ASSERT(sc);
3554
3555 ifp = &sc->arpcom.ac_if;
3556
3557 if (!sc->bge_tbi)
3558 mii = device_get_softc(sc->bge_miibus);
3559
3560 callout_stop(&sc->bge_stat_ch);
3561
3562 /*
3563 * Disable all of the receiver blocks
3564 */
3565 BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE);
3566 BGE_CLRBIT(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE);
3567 BGE_CLRBIT(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE);
3520 ifr->ifr_mtu > ETHERMTU) || ifr->ifr_mtu > BGE_JUMBO_MTU)
3521 error = EINVAL;
3522 else {
3523 ifp->if_mtu = ifr->ifr_mtu;
3524 ifp->if_flags &= ~IFF_RUNNING;
3525 bge_init(sc);
3526 }
3527 break;
3528 case SIOCSIFFLAGS:
3529 BGE_LOCK(sc);
3530 if (ifp->if_flags & IFF_UP) {
3531 /*
3532 * If only the state of the PROMISC flag changed,
3533 * then just use the 'set promisc mode' command
3534 * instead of reinitializing the entire NIC. Doing
3535 * a full re-init means reloading the firmware and
3536 * waiting for it to start up, which may take a
3537 * second or two.
3538 */
3539 if (ifp->if_flags & IFF_RUNNING &&
3540 ifp->if_flags & IFF_PROMISC &&
3541 !(sc->bge_if_flags & IFF_PROMISC)) {
3542 BGE_SETBIT(sc, BGE_RX_MODE,
3543 BGE_RXMODE_RX_PROMISC);
3544 } else if (ifp->if_flags & IFF_RUNNING &&
3545 !(ifp->if_flags & IFF_PROMISC) &&
3546 sc->bge_if_flags & IFF_PROMISC) {
3547 BGE_CLRBIT(sc, BGE_RX_MODE,
3548 BGE_RXMODE_RX_PROMISC);
3549 } else
3550 bge_init_locked(sc);
3551 } else {
3552 if (ifp->if_flags & IFF_RUNNING) {
3553 bge_stop(sc);
3554 }
3555 }
3556 sc->bge_if_flags = ifp->if_flags;
3557 BGE_UNLOCK(sc);
3558 error = 0;
3559 break;
3560 case SIOCADDMULTI:
3561 case SIOCDELMULTI:
3562 if (ifp->if_flags & IFF_RUNNING) {
3563 BGE_LOCK(sc);
3564 bge_setmulti(sc);
3565 BGE_UNLOCK(sc);
3566 error = 0;
3567 }
3568 break;
3569 case SIOCSIFMEDIA:
3570 case SIOCGIFMEDIA:
3571 if (sc->bge_tbi) {
3572 error = ifmedia_ioctl(ifp, ifr,
3573 &sc->bge_ifmedia, command);
3574 } else {
3575 mii = device_get_softc(sc->bge_miibus);
3576 error = ifmedia_ioctl(ifp, ifr,
3577 &mii->mii_media, command);
3578 }
3579 break;
3580 case SIOCSIFCAP:
3581 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
3582 /* NB: the code for RX csum offload is disabled for now */
3583 if (mask & IFCAP_TXCSUM) {
3584 ifp->if_capenable ^= IFCAP_TXCSUM;
3585 if (IFCAP_TXCSUM & ifp->if_capenable)
3586 ifp->if_hwassist = BGE_CSUM_FEATURES;
3587 else
3588 ifp->if_hwassist = 0;
3589 }
3590 error = 0;
3591 break;
3592 default:
3593 error = ether_ioctl(ifp, command, data);
3594 break;
3595 }
3596
3597 return(error);
3598}
3599
3600static void
3601bge_watchdog(ifp)
3602 struct ifnet *ifp;
3603{
3604 struct bge_softc *sc;
3605
3606 sc = ifp->if_softc;
3607
3608 printf("bge%d: watchdog timeout -- resetting\n", sc->bge_unit);
3609
3610 ifp->if_flags &= ~IFF_RUNNING;
3611 bge_init(sc);
3612
3613 ifp->if_oerrors++;
3614
3615 return;
3616}
3617
3618/*
3619 * Stop the adapter and free any mbufs allocated to the
3620 * RX and TX lists.
3621 */
3622static void
3623bge_stop(sc)
3624 struct bge_softc *sc;
3625{
3626 struct ifnet *ifp;
3627 struct ifmedia_entry *ifm;
3628 struct mii_data *mii = NULL;
3629 int mtmp, itmp;
3630
3631 BGE_LOCK_ASSERT(sc);
3632
3633 ifp = &sc->arpcom.ac_if;
3634
3635 if (!sc->bge_tbi)
3636 mii = device_get_softc(sc->bge_miibus);
3637
3638 callout_stop(&sc->bge_stat_ch);
3639
3640 /*
3641 * Disable all of the receiver blocks
3642 */
3643 BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE);
3644 BGE_CLRBIT(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE);
3645 BGE_CLRBIT(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE);
3568 if (sc->bge_asicrev != BGE_ASICREV_BCM5705)
3646 if (sc->bge_asicrev != BGE_ASICREV_BCM5705 ||
3647 sc->bge_asicrev != BGE_ASICREV_BCM5750)
3569 BGE_CLRBIT(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE);
3570 BGE_CLRBIT(sc, BGE_RDBDI_MODE, BGE_RBDIMODE_ENABLE);
3571 BGE_CLRBIT(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE);
3572 BGE_CLRBIT(sc, BGE_RBDC_MODE, BGE_RBDCMODE_ENABLE);
3573
3574 /*
3575 * Disable all of the transmit blocks
3576 */
3577 BGE_CLRBIT(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE);
3578 BGE_CLRBIT(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE);
3579 BGE_CLRBIT(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE);
3580 BGE_CLRBIT(sc, BGE_RDMA_MODE, BGE_RDMAMODE_ENABLE);
3581 BGE_CLRBIT(sc, BGE_SDC_MODE, BGE_SDCMODE_ENABLE);
3648 BGE_CLRBIT(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE);
3649 BGE_CLRBIT(sc, BGE_RDBDI_MODE, BGE_RBDIMODE_ENABLE);
3650 BGE_CLRBIT(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE);
3651 BGE_CLRBIT(sc, BGE_RBDC_MODE, BGE_RBDCMODE_ENABLE);
3652
3653 /*
3654 * Disable all of the transmit blocks
3655 */
3656 BGE_CLRBIT(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE);
3657 BGE_CLRBIT(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE);
3658 BGE_CLRBIT(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE);
3659 BGE_CLRBIT(sc, BGE_RDMA_MODE, BGE_RDMAMODE_ENABLE);
3660 BGE_CLRBIT(sc, BGE_SDC_MODE, BGE_SDCMODE_ENABLE);
3582 if (sc->bge_asicrev != BGE_ASICREV_BCM5705)
3661 if (sc->bge_asicrev != BGE_ASICREV_BCM5705 ||
3662 sc->bge_asicrev != BGE_ASICREV_BCM5750)
3583 BGE_CLRBIT(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE);
3584 BGE_CLRBIT(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE);
3585
3586 /*
3587 * Shut down all of the memory managers and related
3588 * state machines.
3589 */
3590 BGE_CLRBIT(sc, BGE_HCC_MODE, BGE_HCCMODE_ENABLE);
3591 BGE_CLRBIT(sc, BGE_WDMA_MODE, BGE_WDMAMODE_ENABLE);
3663 BGE_CLRBIT(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE);
3664 BGE_CLRBIT(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE);
3665
3666 /*
3667 * Shut down all of the memory managers and related
3668 * state machines.
3669 */
3670 BGE_CLRBIT(sc, BGE_HCC_MODE, BGE_HCCMODE_ENABLE);
3671 BGE_CLRBIT(sc, BGE_WDMA_MODE, BGE_WDMAMODE_ENABLE);
3592 if (sc->bge_asicrev != BGE_ASICREV_BCM5705)
3672 if (sc->bge_asicrev != BGE_ASICREV_BCM5705 ||
3673 sc->bge_asicrev != BGE_ASICREV_BCM5750)
3593 BGE_CLRBIT(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE);
3594 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF);
3595 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0);
3674 BGE_CLRBIT(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE);
3675 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF);
3676 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0);
3596 if (sc->bge_asicrev != BGE_ASICREV_BCM5705) {
3677 if (sc->bge_asicrev != BGE_ASICREV_BCM5705 ||
3678 sc->bge_asicrev != BGE_ASICREV_BCM5750) {
3597 BGE_CLRBIT(sc, BGE_BMAN_MODE, BGE_BMANMODE_ENABLE);
3598 BGE_CLRBIT(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE);
3599 }
3600
3601 /* Disable host interrupts. */
3602 BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR);
3603 CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 1);
3604
3605 /*
3606 * Tell firmware we're shutting down.
3607 */
3608 BGE_CLRBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
3609
3610 /* Free the RX lists. */
3611 bge_free_rx_ring_std(sc);
3612
3613 /* Free jumbo RX list. */
3679 BGE_CLRBIT(sc, BGE_BMAN_MODE, BGE_BMANMODE_ENABLE);
3680 BGE_CLRBIT(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE);
3681 }
3682
3683 /* Disable host interrupts. */
3684 BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR);
3685 CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 1);
3686
3687 /*
3688 * Tell firmware we're shutting down.
3689 */
3690 BGE_CLRBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
3691
3692 /* Free the RX lists. */
3693 bge_free_rx_ring_std(sc);
3694
3695 /* Free jumbo RX list. */
3614 if (sc->bge_asicrev != BGE_ASICREV_BCM5705)
3696 if (sc->bge_asicrev != BGE_ASICREV_BCM5705 ||
3697 sc->bge_asicrev != BGE_ASICREV_BCM5750)
3615 bge_free_rx_ring_jumbo(sc);
3616
3617 /* Free TX buffers. */
3618 bge_free_tx_ring(sc);
3619
3620 /*
3621 * Isolate/power down the PHY, but leave the media selection
3622 * unchanged so that things will be put back to normal when
3623 * we bring the interface back up.
3624 */
3625 if (!sc->bge_tbi) {
3626 itmp = ifp->if_flags;
3627 ifp->if_flags |= IFF_UP;
3628 ifm = mii->mii_media.ifm_cur;
3629 mtmp = ifm->ifm_media;
3630 ifm->ifm_media = IFM_ETHER|IFM_NONE;
3631 mii_mediachg(mii);
3632 ifm->ifm_media = mtmp;
3633 ifp->if_flags = itmp;
3634 }
3635
3636 sc->bge_link = 0;
3637
3638 sc->bge_tx_saved_considx = BGE_TXCONS_UNSET;
3639
3640 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
3641
3642 return;
3643}
3644
3645/*
3646 * Stop all chip I/O so that the kernel's probe routines don't
3647 * get confused by errant DMAs when rebooting.
3648 */
3649static void
3650bge_shutdown(dev)
3651 device_t dev;
3652{
3653 struct bge_softc *sc;
3654
3655 sc = device_get_softc(dev);
3656
3657 BGE_LOCK(sc);
3658 bge_stop(sc);
3659 bge_reset(sc);
3660 BGE_UNLOCK(sc);
3661
3662 return;
3663}
3698 bge_free_rx_ring_jumbo(sc);
3699
3700 /* Free TX buffers. */
3701 bge_free_tx_ring(sc);
3702
3703 /*
3704 * Isolate/power down the PHY, but leave the media selection
3705 * unchanged so that things will be put back to normal when
3706 * we bring the interface back up.
3707 */
3708 if (!sc->bge_tbi) {
3709 itmp = ifp->if_flags;
3710 ifp->if_flags |= IFF_UP;
3711 ifm = mii->mii_media.ifm_cur;
3712 mtmp = ifm->ifm_media;
3713 ifm->ifm_media = IFM_ETHER|IFM_NONE;
3714 mii_mediachg(mii);
3715 ifm->ifm_media = mtmp;
3716 ifp->if_flags = itmp;
3717 }
3718
3719 sc->bge_link = 0;
3720
3721 sc->bge_tx_saved_considx = BGE_TXCONS_UNSET;
3722
3723 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
3724
3725 return;
3726}
3727
3728/*
3729 * Stop all chip I/O so that the kernel's probe routines don't
3730 * get confused by errant DMAs when rebooting.
3731 */
3732static void
3733bge_shutdown(dev)
3734 device_t dev;
3735{
3736 struct bge_softc *sc;
3737
3738 sc = device_get_softc(dev);
3739
3740 BGE_LOCK(sc);
3741 bge_stop(sc);
3742 bge_reset(sc);
3743 BGE_UNLOCK(sc);
3744
3745 return;
3746}