Deleted Added
full compact
if_bge.c (155170) if_bge.c (155180)
1/*-
2 * Copyright (c) 2001 Wind River Systems
3 * Copyright (c) 1997, 1998, 1999, 2001
4 * Bill Paul <wpaul@windriver.com>. All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. All advertising materials mentioning features or use of this software
15 * must display the following acknowledgement:
16 * This product includes software developed by Bill Paul.
17 * 4. Neither the name of the author nor the names of any co-contributors
18 * may be used to endorse or promote products derived from this software
19 * without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
25 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
31 * THE POSSIBILITY OF SUCH DAMAGE.
32 */
33
34#include <sys/cdefs.h>
1/*-
2 * Copyright (c) 2001 Wind River Systems
3 * Copyright (c) 1997, 1998, 1999, 2001
4 * Bill Paul <wpaul@windriver.com>. All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. All advertising materials mentioning features or use of this software
15 * must display the following acknowledgement:
16 * This product includes software developed by Bill Paul.
17 * 4. Neither the name of the author nor the names of any co-contributors
18 * may be used to endorse or promote products derived from this software
19 * without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
25 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
31 * THE POSSIBILITY OF SUCH DAMAGE.
32 */
33
34#include <sys/cdefs.h>
35__FBSDID("$FreeBSD: head/sys/dev/bge/if_bge.c 155170 2006-02-01 10:11:24Z ru $");
35__FBSDID("$FreeBSD: head/sys/dev/bge/if_bge.c 155180 2006-02-01 14:26:35Z oleg $");
36
37/*
38 * Broadcom BCM570x family gigabit ethernet driver for FreeBSD.
39 *
40 * The Broadcom BCM5700 is based on technology originally developed by
41 * Alteon Networks as part of the Tigon I and Tigon II gigabit ethernet
42 * MAC chips. The BCM5700, sometimes refered to as the Tigon III, has
43 * two on-board MIPS R4000 CPUs and can have as much as 16MB of external
44 * SSRAM. The BCM5700 supports TCP, UDP and IP checksum offload, jumbo
45 * frames, highly configurable RX filtering, and 16 RX and TX queues
46 * (which, along with RX filter rules, can be used for QOS applications).
47 * Other features, such as TCP segmentation, may be available as part
48 * of value-added firmware updates. Unlike the Tigon I and Tigon II,
49 * firmware images can be stored in hardware and need not be compiled
50 * into the driver.
51 *
52 * The BCM5700 supports the PCI v2.2 and PCI-X v1.0 standards, and will
53 * function in a 32-bit/64-bit 33/66Mhz bus, or a 64-bit/133Mhz bus.
54 *
55 * The BCM5701 is a single-chip solution incorporating both the BCM5700
56 * MAC and a BCM5401 10/100/1000 PHY. Unlike the BCM5700, the BCM5701
57 * does not support external SSRAM.
58 *
59 * Broadcom also produces a variation of the BCM5700 under the "Altima"
60 * brand name, which is functionally similar but lacks PCI-X support.
61 *
62 * Without external SSRAM, you can only have at most 4 TX rings,
63 * and the use of the mini RX ring is disabled. This seems to imply
64 * that these features are simply not available on the BCM5701. As a
65 * result, this driver does not implement any support for the mini RX
66 * ring.
67 */
68
69#ifdef HAVE_KERNEL_OPTION_HEADERS
70#include "opt_device_polling.h"
71#endif
72
73#include <sys/param.h>
74#include <sys/endian.h>
75#include <sys/systm.h>
76#include <sys/sockio.h>
77#include <sys/mbuf.h>
78#include <sys/malloc.h>
79#include <sys/kernel.h>
80#include <sys/module.h>
81#include <sys/socket.h>
82
83#include <net/if.h>
84#include <net/if_arp.h>
85#include <net/ethernet.h>
86#include <net/if_dl.h>
87#include <net/if_media.h>
88
89#include <net/bpf.h>
90
91#include <net/if_types.h>
92#include <net/if_vlan_var.h>
93
94#include <netinet/in_systm.h>
95#include <netinet/in.h>
96#include <netinet/ip.h>
97
98#include <machine/clock.h> /* for DELAY */
99#include <machine/bus.h>
100#include <machine/resource.h>
101#include <sys/bus.h>
102#include <sys/rman.h>
103
104#include <dev/mii/mii.h>
105#include <dev/mii/miivar.h>
106#include "miidevs.h"
107#include <dev/mii/brgphyreg.h>
108
109#include <dev/pci/pcireg.h>
110#include <dev/pci/pcivar.h>
111
112#include <dev/bge/if_bgereg.h>
113
114#include "opt_bge.h"
115
116#define BGE_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP)
117#define ETHER_MIN_NOPAD (ETHER_MIN_LEN - ETHER_CRC_LEN) /* i.e., 60 */
118
119MODULE_DEPEND(bge, pci, 1, 1, 1);
120MODULE_DEPEND(bge, ether, 1, 1, 1);
121MODULE_DEPEND(bge, miibus, 1, 1, 1);
122
123/* "device miibus" required. See GENERIC if you get errors here. */
124#include "miibus_if.h"
125
126/*
127 * Various supported device vendors/types and their names. Note: the
128 * spec seems to indicate that the hardware still has Alteon's vendor
129 * ID burned into it, though it will always be overriden by the vendor
130 * ID in the EEPROM. Just to be safe, we cover all possibilities.
131 */
132#define BGE_DEVDESC_MAX 64 /* Maximum device description length */
133
134static struct bge_type bge_devs[] = {
135 { ALT_VENDORID, ALT_DEVICEID_BCM5700,
136 "Broadcom BCM5700 Gigabit Ethernet" },
137 { ALT_VENDORID, ALT_DEVICEID_BCM5701,
138 "Broadcom BCM5701 Gigabit Ethernet" },
139 { BCOM_VENDORID, BCOM_DEVICEID_BCM5700,
140 "Broadcom BCM5700 Gigabit Ethernet" },
141 { BCOM_VENDORID, BCOM_DEVICEID_BCM5701,
142 "Broadcom BCM5701 Gigabit Ethernet" },
143 { BCOM_VENDORID, BCOM_DEVICEID_BCM5702,
144 "Broadcom BCM5702 Gigabit Ethernet" },
145 { BCOM_VENDORID, BCOM_DEVICEID_BCM5702X,
146 "Broadcom BCM5702X Gigabit Ethernet" },
147 { BCOM_VENDORID, BCOM_DEVICEID_BCM5703,
148 "Broadcom BCM5703 Gigabit Ethernet" },
149 { BCOM_VENDORID, BCOM_DEVICEID_BCM5703X,
150 "Broadcom BCM5703X Gigabit Ethernet" },
151 { BCOM_VENDORID, BCOM_DEVICEID_BCM5704C,
152 "Broadcom BCM5704C Dual Gigabit Ethernet" },
153 { BCOM_VENDORID, BCOM_DEVICEID_BCM5704S,
154 "Broadcom BCM5704S Dual Gigabit Ethernet" },
155 { BCOM_VENDORID, BCOM_DEVICEID_BCM5705,
156 "Broadcom BCM5705 Gigabit Ethernet" },
157 { BCOM_VENDORID, BCOM_DEVICEID_BCM5705K,
158 "Broadcom BCM5705K Gigabit Ethernet" },
159 { BCOM_VENDORID, BCOM_DEVICEID_BCM5705M,
160 "Broadcom BCM5705M Gigabit Ethernet" },
161 { BCOM_VENDORID, BCOM_DEVICEID_BCM5705M_ALT,
162 "Broadcom BCM5705M Gigabit Ethernet" },
163 { BCOM_VENDORID, BCOM_DEVICEID_BCM5714C,
164 "Broadcom BCM5714C Gigabit Ethernet" },
165 { BCOM_VENDORID, BCOM_DEVICEID_BCM5721,
166 "Broadcom BCM5721 Gigabit Ethernet" },
167 { BCOM_VENDORID, BCOM_DEVICEID_BCM5750,
168 "Broadcom BCM5750 Gigabit Ethernet" },
169 { BCOM_VENDORID, BCOM_DEVICEID_BCM5750M,
170 "Broadcom BCM5750M Gigabit Ethernet" },
171 { BCOM_VENDORID, BCOM_DEVICEID_BCM5751,
172 "Broadcom BCM5751 Gigabit Ethernet" },
173 { BCOM_VENDORID, BCOM_DEVICEID_BCM5751M,
174 "Broadcom BCM5751M Gigabit Ethernet" },
175 { BCOM_VENDORID, BCOM_DEVICEID_BCM5752,
176 "Broadcom BCM5752 Gigabit Ethernet" },
177 { BCOM_VENDORID, BCOM_DEVICEID_BCM5782,
178 "Broadcom BCM5782 Gigabit Ethernet" },
179 { BCOM_VENDORID, BCOM_DEVICEID_BCM5788,
180 "Broadcom BCM5788 Gigabit Ethernet" },
181 { BCOM_VENDORID, BCOM_DEVICEID_BCM5789,
182 "Broadcom BCM5789 Gigabit Ethernet" },
183 { BCOM_VENDORID, BCOM_DEVICEID_BCM5901,
184 "Broadcom BCM5901 Fast Ethernet" },
185 { BCOM_VENDORID, BCOM_DEVICEID_BCM5901A2,
186 "Broadcom BCM5901A2 Fast Ethernet" },
187 { SK_VENDORID, SK_DEVICEID_ALTIMA,
188 "SysKonnect Gigabit Ethernet" },
189 { ALTIMA_VENDORID, ALTIMA_DEVICE_AC1000,
190 "Altima AC1000 Gigabit Ethernet" },
191 { ALTIMA_VENDORID, ALTIMA_DEVICE_AC1002,
192 "Altima AC1002 Gigabit Ethernet" },
193 { ALTIMA_VENDORID, ALTIMA_DEVICE_AC9100,
194 "Altima AC9100 Gigabit Ethernet" },
195 { 0, 0, NULL }
196};
197
198static int bge_probe (device_t);
199static int bge_attach (device_t);
200static int bge_detach (device_t);
201static int bge_suspend (device_t);
202static int bge_resume (device_t);
203static void bge_release_resources
204 (struct bge_softc *);
205static void bge_dma_map_addr (void *, bus_dma_segment_t *, int, int);
206static int bge_dma_alloc (device_t);
207static void bge_dma_free (struct bge_softc *);
208
209static void bge_txeof (struct bge_softc *);
210static void bge_rxeof (struct bge_softc *);
211
212static void bge_tick_locked (struct bge_softc *);
213static void bge_tick (void *);
214static void bge_stats_update (struct bge_softc *);
215static void bge_stats_update_regs
216 (struct bge_softc *);
217static int bge_encap (struct bge_softc *, struct mbuf *,
218 u_int32_t *);
219
220static void bge_intr (void *);
221static void bge_start_locked (struct ifnet *);
222static void bge_start (struct ifnet *);
223static int bge_ioctl (struct ifnet *, u_long, caddr_t);
224static void bge_init_locked (struct bge_softc *);
225static void bge_init (void *);
226static void bge_stop (struct bge_softc *);
227static void bge_watchdog (struct ifnet *);
228static void bge_shutdown (device_t);
229static int bge_ifmedia_upd (struct ifnet *);
230static void bge_ifmedia_sts (struct ifnet *, struct ifmediareq *);
231
232static u_int8_t bge_eeprom_getbyte (struct bge_softc *, int, u_int8_t *);
233static int bge_read_eeprom (struct bge_softc *, caddr_t, int, int);
234
235static void bge_setmulti (struct bge_softc *);
236
237static int bge_newbuf_std (struct bge_softc *, int, struct mbuf *);
238static int bge_newbuf_jumbo (struct bge_softc *, int, struct mbuf *);
239static int bge_init_rx_ring_std (struct bge_softc *);
240static void bge_free_rx_ring_std (struct bge_softc *);
241static int bge_init_rx_ring_jumbo (struct bge_softc *);
242static void bge_free_rx_ring_jumbo (struct bge_softc *);
243static void bge_free_tx_ring (struct bge_softc *);
244static int bge_init_tx_ring (struct bge_softc *);
245
246static int bge_chipinit (struct bge_softc *);
247static int bge_blockinit (struct bge_softc *);
248
249#ifdef notdef
250static u_int8_t bge_vpd_readbyte(struct bge_softc *, int);
251static void bge_vpd_read_res (struct bge_softc *, struct vpd_res *, int);
252static void bge_vpd_read (struct bge_softc *);
253#endif
254
255static u_int32_t bge_readmem_ind
256 (struct bge_softc *, int);
257static void bge_writemem_ind (struct bge_softc *, int, int);
258#ifdef notdef
259static u_int32_t bge_readreg_ind
260 (struct bge_softc *, int);
261#endif
262static void bge_writereg_ind (struct bge_softc *, int, int);
263
264static int bge_miibus_readreg (device_t, int, int);
265static int bge_miibus_writereg (device_t, int, int, int);
266static void bge_miibus_statchg (device_t);
267#ifdef DEVICE_POLLING
268static void bge_poll (struct ifnet *ifp, enum poll_cmd cmd,
269 int count);
270static void bge_poll_locked (struct ifnet *ifp, enum poll_cmd cmd,
271 int count);
272#endif
273
274static void bge_reset (struct bge_softc *);
275static void bge_link_upd (struct bge_softc *);
276
277static device_method_t bge_methods[] = {
278 /* Device interface */
279 DEVMETHOD(device_probe, bge_probe),
280 DEVMETHOD(device_attach, bge_attach),
281 DEVMETHOD(device_detach, bge_detach),
282 DEVMETHOD(device_shutdown, bge_shutdown),
283 DEVMETHOD(device_suspend, bge_suspend),
284 DEVMETHOD(device_resume, bge_resume),
285
286 /* bus interface */
287 DEVMETHOD(bus_print_child, bus_generic_print_child),
288 DEVMETHOD(bus_driver_added, bus_generic_driver_added),
289
290 /* MII interface */
291 DEVMETHOD(miibus_readreg, bge_miibus_readreg),
292 DEVMETHOD(miibus_writereg, bge_miibus_writereg),
293 DEVMETHOD(miibus_statchg, bge_miibus_statchg),
294
295 { 0, 0 }
296};
297
298static driver_t bge_driver = {
299 "bge",
300 bge_methods,
301 sizeof(struct bge_softc)
302};
303
304static devclass_t bge_devclass;
305
306DRIVER_MODULE(bge, pci, bge_driver, bge_devclass, 0, 0);
307DRIVER_MODULE(miibus, bge, miibus_driver, miibus_devclass, 0, 0);
308
309static u_int32_t
310bge_readmem_ind(sc, off)
311 struct bge_softc *sc;
312 int off;
313{
314 device_t dev;
315
316 dev = sc->bge_dev;
317
318 pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, off, 4);
319 return(pci_read_config(dev, BGE_PCI_MEMWIN_DATA, 4));
320}
321
322static void
323bge_writemem_ind(sc, off, val)
324 struct bge_softc *sc;
325 int off, val;
326{
327 device_t dev;
328
329 dev = sc->bge_dev;
330
331 pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, off, 4);
332 pci_write_config(dev, BGE_PCI_MEMWIN_DATA, val, 4);
333
334 return;
335}
336
337#ifdef notdef
338static u_int32_t
339bge_readreg_ind(sc, off)
340 struct bge_softc *sc;
341 int off;
342{
343 device_t dev;
344
345 dev = sc->bge_dev;
346
347 pci_write_config(dev, BGE_PCI_REG_BASEADDR, off, 4);
348 return(pci_read_config(dev, BGE_PCI_REG_DATA, 4));
349}
350#endif
351
352static void
353bge_writereg_ind(sc, off, val)
354 struct bge_softc *sc;
355 int off, val;
356{
357 device_t dev;
358
359 dev = sc->bge_dev;
360
361 pci_write_config(dev, BGE_PCI_REG_BASEADDR, off, 4);
362 pci_write_config(dev, BGE_PCI_REG_DATA, val, 4);
363
364 return;
365}
366
367/*
368 * Map a single buffer address.
369 */
370
371static void
372bge_dma_map_addr(arg, segs, nseg, error)
373 void *arg;
374 bus_dma_segment_t *segs;
375 int nseg;
376 int error;
377{
378 struct bge_dmamap_arg *ctx;
379
380 if (error)
381 return;
382
383 ctx = arg;
384
385 if (nseg > ctx->bge_maxsegs) {
386 ctx->bge_maxsegs = 0;
387 return;
388 }
389
390 ctx->bge_busaddr = segs->ds_addr;
391
392 return;
393}
394
395#ifdef notdef
396static u_int8_t
397bge_vpd_readbyte(sc, addr)
398 struct bge_softc *sc;
399 int addr;
400{
401 int i;
402 device_t dev;
403 u_int32_t val;
404
405 dev = sc->bge_dev;
406 pci_write_config(dev, BGE_PCI_VPD_ADDR, addr, 2);
407 for (i = 0; i < BGE_TIMEOUT * 10; i++) {
408 DELAY(10);
409 if (pci_read_config(dev, BGE_PCI_VPD_ADDR, 2) & BGE_VPD_FLAG)
410 break;
411 }
412
413 if (i == BGE_TIMEOUT) {
414 device_printf(sc->bge_dev, "VPD read timed out\n");
415 return(0);
416 }
417
418 val = pci_read_config(dev, BGE_PCI_VPD_DATA, 4);
419
420 return((val >> ((addr % 4) * 8)) & 0xFF);
421}
422
423static void
424bge_vpd_read_res(sc, res, addr)
425 struct bge_softc *sc;
426 struct vpd_res *res;
427 int addr;
428{
429 int i;
430 u_int8_t *ptr;
431
432 ptr = (u_int8_t *)res;
433 for (i = 0; i < sizeof(struct vpd_res); i++)
434 ptr[i] = bge_vpd_readbyte(sc, i + addr);
435
436 return;
437}
438
439static void
440bge_vpd_read(sc)
441 struct bge_softc *sc;
442{
443 int pos = 0, i;
444 struct vpd_res res;
445
446 if (sc->bge_vpd_prodname != NULL)
447 free(sc->bge_vpd_prodname, M_DEVBUF);
448 if (sc->bge_vpd_readonly != NULL)
449 free(sc->bge_vpd_readonly, M_DEVBUF);
450 sc->bge_vpd_prodname = NULL;
451 sc->bge_vpd_readonly = NULL;
452
453 bge_vpd_read_res(sc, &res, pos);
454
455 if (res.vr_id != VPD_RES_ID) {
456 device_printf(sc->bge_dev,
457 "bad VPD resource id: expected %x got %x\n", VPD_RES_ID,
458 res.vr_id);
459 return;
460 }
461
462 pos += sizeof(res);
463 sc->bge_vpd_prodname = malloc(res.vr_len + 1, M_DEVBUF, M_NOWAIT);
464 for (i = 0; i < res.vr_len; i++)
465 sc->bge_vpd_prodname[i] = bge_vpd_readbyte(sc, i + pos);
466 sc->bge_vpd_prodname[i] = '\0';
467 pos += i;
468
469 bge_vpd_read_res(sc, &res, pos);
470
471 if (res.vr_id != VPD_RES_READ) {
472 device_printf(sc->bge_dev,
473 "bad VPD resource id: expected %x got %x\n", VPD_RES_READ,
474 res.vr_id);
475 return;
476 }
477
478 pos += sizeof(res);
479 sc->bge_vpd_readonly = malloc(res.vr_len, M_DEVBUF, M_NOWAIT);
480 for (i = 0; i < res.vr_len + 1; i++)
481 sc->bge_vpd_readonly[i] = bge_vpd_readbyte(sc, i + pos);
482
483 return;
484}
485#endif
486
487/*
488 * Read a byte of data stored in the EEPROM at address 'addr.' The
489 * BCM570x supports both the traditional bitbang interface and an
490 * auto access interface for reading the EEPROM. We use the auto
491 * access method.
492 */
493static u_int8_t
494bge_eeprom_getbyte(sc, addr, dest)
495 struct bge_softc *sc;
496 int addr;
497 u_int8_t *dest;
498{
499 int i;
500 u_int32_t byte = 0;
501
502 /*
503 * Enable use of auto EEPROM access so we can avoid
504 * having to use the bitbang method.
505 */
506 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_AUTO_EEPROM);
507
508 /* Reset the EEPROM, load the clock period. */
509 CSR_WRITE_4(sc, BGE_EE_ADDR,
510 BGE_EEADDR_RESET|BGE_EEHALFCLK(BGE_HALFCLK_384SCL));
511 DELAY(20);
512
513 /* Issue the read EEPROM command. */
514 CSR_WRITE_4(sc, BGE_EE_ADDR, BGE_EE_READCMD | addr);
515
516 /* Wait for completion */
517 for(i = 0; i < BGE_TIMEOUT * 10; i++) {
518 DELAY(10);
519 if (CSR_READ_4(sc, BGE_EE_ADDR) & BGE_EEADDR_DONE)
520 break;
521 }
522
523 if (i == BGE_TIMEOUT) {
524 device_printf(sc->bge_dev, "EEPROM read timed out\n");
525 return(1);
526 }
527
528 /* Get result. */
529 byte = CSR_READ_4(sc, BGE_EE_DATA);
530
531 *dest = (byte >> ((addr % 4) * 8)) & 0xFF;
532
533 return(0);
534}
535
536/*
537 * Read a sequence of bytes from the EEPROM.
538 */
539static int
540bge_read_eeprom(sc, dest, off, cnt)
541 struct bge_softc *sc;
542 caddr_t dest;
543 int off;
544 int cnt;
545{
546 int err = 0, i;
547 u_int8_t byte = 0;
548
549 for (i = 0; i < cnt; i++) {
550 err = bge_eeprom_getbyte(sc, off + i, &byte);
551 if (err)
552 break;
553 *(dest + i) = byte;
554 }
555
556 return(err ? 1 : 0);
557}
558
559static int
560bge_miibus_readreg(dev, phy, reg)
561 device_t dev;
562 int phy, reg;
563{
564 struct bge_softc *sc;
565 u_int32_t val, autopoll;
566 int i;
567
568 sc = device_get_softc(dev);
569
570 /*
571 * Broadcom's own driver always assumes the internal
572 * PHY is at GMII address 1. On some chips, the PHY responds
573 * to accesses at all addresses, which could cause us to
574 * bogusly attach the PHY 32 times at probe type. Always
575 * restricting the lookup to address 1 is simpler than
576 * trying to figure out which chips revisions should be
577 * special-cased.
578 */
579 if (phy != 1)
580 return(0);
581
582 /* Reading with autopolling on may trigger PCI errors */
583 autopoll = CSR_READ_4(sc, BGE_MI_MODE);
584 if (autopoll & BGE_MIMODE_AUTOPOLL) {
585 BGE_CLRBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL);
586 DELAY(40);
587 }
588
589 CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_READ|BGE_MICOMM_BUSY|
590 BGE_MIPHY(phy)|BGE_MIREG(reg));
591
592 for (i = 0; i < BGE_TIMEOUT; i++) {
593 val = CSR_READ_4(sc, BGE_MI_COMM);
594 if (!(val & BGE_MICOMM_BUSY))
595 break;
596 }
597
598 if (i == BGE_TIMEOUT) {
599 if_printf(sc->bge_ifp, "PHY read timed out\n");
600 val = 0;
601 goto done;
602 }
603
604 val = CSR_READ_4(sc, BGE_MI_COMM);
605
606done:
607 if (autopoll & BGE_MIMODE_AUTOPOLL) {
608 BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL);
609 DELAY(40);
610 }
611
612 if (val & BGE_MICOMM_READFAIL)
613 return(0);
614
615 return(val & 0xFFFF);
616}
617
618static int
619bge_miibus_writereg(dev, phy, reg, val)
620 device_t dev;
621 int phy, reg, val;
622{
623 struct bge_softc *sc;
624 u_int32_t autopoll;
625 int i;
626
627 sc = device_get_softc(dev);
628
629 /* Reading with autopolling on may trigger PCI errors */
630 autopoll = CSR_READ_4(sc, BGE_MI_MODE);
631 if (autopoll & BGE_MIMODE_AUTOPOLL) {
632 BGE_CLRBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL);
633 DELAY(40);
634 }
635
636 CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_WRITE|BGE_MICOMM_BUSY|
637 BGE_MIPHY(phy)|BGE_MIREG(reg)|val);
638
639 for (i = 0; i < BGE_TIMEOUT; i++) {
640 if (!(CSR_READ_4(sc, BGE_MI_COMM) & BGE_MICOMM_BUSY))
641 break;
642 }
643
644 if (autopoll & BGE_MIMODE_AUTOPOLL) {
645 BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL);
646 DELAY(40);
647 }
648
649 if (i == BGE_TIMEOUT) {
650 if_printf(sc->bge_ifp, "PHY read timed out\n");
651 return(0);
652 }
653
654 return(0);
655}
656
657static void
658bge_miibus_statchg(dev)
659 device_t dev;
660{
661 struct bge_softc *sc;
662 struct mii_data *mii;
663
664 sc = device_get_softc(dev);
665 mii = device_get_softc(sc->bge_miibus);
666
667 BGE_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_PORTMODE);
668 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T) {
669 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_PORTMODE_GMII);
670 } else {
671 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_PORTMODE_MII);
672 }
673
674 if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) {
675 BGE_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX);
676 } else {
677 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX);
678 }
679
680 return;
681}
682
683/*
684 * Intialize a standard receive ring descriptor.
685 */
686static int
687bge_newbuf_std(sc, i, m)
688 struct bge_softc *sc;
689 int i;
690 struct mbuf *m;
691{
692 struct mbuf *m_new = NULL;
693 struct bge_rx_bd *r;
694 struct bge_dmamap_arg ctx;
695 int error;
696
697 if (m == NULL) {
698 m_new = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
699 if (m_new == NULL)
700 return(ENOBUFS);
701 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
702 } else {
703 m_new = m;
704 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
705 m_new->m_data = m_new->m_ext.ext_buf;
706 }
707
708 if (!sc->bge_rx_alignment_bug)
709 m_adj(m_new, ETHER_ALIGN);
710 sc->bge_cdata.bge_rx_std_chain[i] = m_new;
711 r = &sc->bge_ldata.bge_rx_std_ring[i];
712 ctx.bge_maxsegs = 1;
713 ctx.sc = sc;
714 error = bus_dmamap_load(sc->bge_cdata.bge_mtag,
715 sc->bge_cdata.bge_rx_std_dmamap[i], mtod(m_new, void *),
716 m_new->m_len, bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT);
717 if (error || ctx.bge_maxsegs == 0) {
718 if (m == NULL) {
719 sc->bge_cdata.bge_rx_std_chain[i] = NULL;
720 m_freem(m_new);
721 }
722 return(ENOMEM);
723 }
724 r->bge_addr.bge_addr_lo = BGE_ADDR_LO(ctx.bge_busaddr);
725 r->bge_addr.bge_addr_hi = BGE_ADDR_HI(ctx.bge_busaddr);
726 r->bge_flags = BGE_RXBDFLAG_END;
727 r->bge_len = m_new->m_len;
728 r->bge_idx = i;
729
730 bus_dmamap_sync(sc->bge_cdata.bge_mtag,
731 sc->bge_cdata.bge_rx_std_dmamap[i],
732 BUS_DMASYNC_PREREAD);
733
734 return(0);
735}
736
737/*
738 * Initialize a jumbo receive ring descriptor. This allocates
739 * a jumbo buffer from the pool managed internally by the driver.
740 */
741static int
742bge_newbuf_jumbo(sc, i, m)
743 struct bge_softc *sc;
744 int i;
745 struct mbuf *m;
746{
747 bus_dma_segment_t segs[BGE_NSEG_JUMBO];
748 struct bge_extrx_bd *r;
749 struct mbuf *m_new = NULL;
750 int nsegs;
751 int error;
752
753 if (m == NULL) {
754 MGETHDR(m_new, M_DONTWAIT, MT_DATA);
755 if (m_new == NULL)
756 return(ENOBUFS);
757
758 m_cljget(m_new, M_DONTWAIT, MJUM9BYTES);
759 if (!(m_new->m_flags & M_EXT)) {
760 m_freem(m_new);
761 return(ENOBUFS);
762 }
763 m_new->m_len = m_new->m_pkthdr.len = MJUM9BYTES;
764 } else {
765 m_new = m;
766 m_new->m_len = m_new->m_pkthdr.len = MJUM9BYTES;
767 m_new->m_data = m_new->m_ext.ext_buf;
768 }
769
770 if (!sc->bge_rx_alignment_bug)
771 m_adj(m_new, ETHER_ALIGN);
772
773 error = bus_dmamap_load_mbuf_sg(sc->bge_cdata.bge_mtag_jumbo,
774 sc->bge_cdata.bge_rx_jumbo_dmamap[i],
775 m_new, segs, &nsegs, BUS_DMA_NOWAIT);
776 if (error) {
777 if (m == NULL)
778 m_freem(m_new);
779 return(error);
780 }
781 sc->bge_cdata.bge_rx_jumbo_chain[i] = m_new;
782
783 /*
784 * Fill in the extended RX buffer descriptor.
785 */
786 r = &sc->bge_ldata.bge_rx_jumbo_ring[i];
787 r->bge_flags = BGE_RXBDFLAG_JUMBO_RING|BGE_RXBDFLAG_END;
788 r->bge_idx = i;
789 r->bge_len3 = r->bge_len2 = r->bge_len1 = 0;
790 switch (nsegs) {
791 case 4:
792 r->bge_addr3.bge_addr_lo = BGE_ADDR_LO(segs[3].ds_addr);
793 r->bge_addr3.bge_addr_hi = BGE_ADDR_HI(segs[3].ds_addr);
794 r->bge_len3 = segs[3].ds_len;
795 case 3:
796 r->bge_addr2.bge_addr_lo = BGE_ADDR_LO(segs[2].ds_addr);
797 r->bge_addr2.bge_addr_hi = BGE_ADDR_HI(segs[2].ds_addr);
798 r->bge_len2 = segs[2].ds_len;
799 case 2:
800 r->bge_addr1.bge_addr_lo = BGE_ADDR_LO(segs[1].ds_addr);
801 r->bge_addr1.bge_addr_hi = BGE_ADDR_HI(segs[1].ds_addr);
802 r->bge_len1 = segs[1].ds_len;
803 case 1:
804 r->bge_addr0.bge_addr_lo = BGE_ADDR_LO(segs[0].ds_addr);
805 r->bge_addr0.bge_addr_hi = BGE_ADDR_HI(segs[0].ds_addr);
806 r->bge_len0 = segs[0].ds_len;
807 break;
808 default:
809 panic("%s: %d segments\n", __func__, nsegs);
810 }
811
812 bus_dmamap_sync(sc->bge_cdata.bge_mtag,
813 sc->bge_cdata.bge_rx_jumbo_dmamap[i],
814 BUS_DMASYNC_PREREAD);
815
816 return (0);
817}
818
819/*
820 * The standard receive ring has 512 entries in it. At 2K per mbuf cluster,
821 * that's 1MB or memory, which is a lot. For now, we fill only the first
822 * 256 ring entries and hope that our CPU is fast enough to keep up with
823 * the NIC.
824 */
825static int
826bge_init_rx_ring_std(sc)
827 struct bge_softc *sc;
828{
829 int i;
830
831 for (i = 0; i < BGE_SSLOTS; i++) {
832 if (bge_newbuf_std(sc, i, NULL) == ENOBUFS)
833 return(ENOBUFS);
834 };
835
836 bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag,
837 sc->bge_cdata.bge_rx_std_ring_map,
838 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
839
840 sc->bge_std = i - 1;
841 CSR_WRITE_4(sc, BGE_MBX_RX_STD_PROD_LO, sc->bge_std);
842
843 return(0);
844}
845
846static void
847bge_free_rx_ring_std(sc)
848 struct bge_softc *sc;
849{
850 int i;
851
852 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
853 if (sc->bge_cdata.bge_rx_std_chain[i] != NULL) {
854 bus_dmamap_sync(sc->bge_cdata.bge_mtag,
855 sc->bge_cdata.bge_rx_std_dmamap[i],
856 BUS_DMASYNC_POSTREAD);
857 bus_dmamap_unload(sc->bge_cdata.bge_mtag,
858 sc->bge_cdata.bge_rx_std_dmamap[i]);
859 m_freem(sc->bge_cdata.bge_rx_std_chain[i]);
860 sc->bge_cdata.bge_rx_std_chain[i] = NULL;
861 }
862 bzero((char *)&sc->bge_ldata.bge_rx_std_ring[i],
863 sizeof(struct bge_rx_bd));
864 }
865
866 return;
867}
868
869static int
870bge_init_rx_ring_jumbo(sc)
871 struct bge_softc *sc;
872{
873 struct bge_rcb *rcb;
874 int i;
875
876 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
877 if (bge_newbuf_jumbo(sc, i, NULL) == ENOBUFS)
878 return(ENOBUFS);
879 };
880
881 bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag,
882 sc->bge_cdata.bge_rx_jumbo_ring_map,
883 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
884
885 sc->bge_jumbo = i - 1;
886
887 rcb = &sc->bge_ldata.bge_info.bge_jumbo_rx_rcb;
888 rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(0,
889 BGE_RCB_FLAG_USE_EXT_RX_BD);
890 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags);
891
892 CSR_WRITE_4(sc, BGE_MBX_RX_JUMBO_PROD_LO, sc->bge_jumbo);
893
894 return(0);
895}
896
897static void
898bge_free_rx_ring_jumbo(sc)
899 struct bge_softc *sc;
900{
901 int i;
902
903 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
904 if (sc->bge_cdata.bge_rx_jumbo_chain[i] != NULL) {
905 bus_dmamap_sync(sc->bge_cdata.bge_mtag_jumbo,
906 sc->bge_cdata.bge_rx_jumbo_dmamap[i],
907 BUS_DMASYNC_POSTREAD);
908 bus_dmamap_unload(sc->bge_cdata.bge_mtag_jumbo,
909 sc->bge_cdata.bge_rx_jumbo_dmamap[i]);
910 m_freem(sc->bge_cdata.bge_rx_jumbo_chain[i]);
911 sc->bge_cdata.bge_rx_jumbo_chain[i] = NULL;
912 }
913 bzero((char *)&sc->bge_ldata.bge_rx_jumbo_ring[i],
914 sizeof(struct bge_extrx_bd));
915 }
916
917 return;
918}
919
920static void
921bge_free_tx_ring(sc)
922 struct bge_softc *sc;
923{
924 int i;
925
926 if (sc->bge_ldata.bge_tx_ring == NULL)
927 return;
928
929 for (i = 0; i < BGE_TX_RING_CNT; i++) {
930 if (sc->bge_cdata.bge_tx_chain[i] != NULL) {
931 bus_dmamap_sync(sc->bge_cdata.bge_mtag,
932 sc->bge_cdata.bge_tx_dmamap[i],
933 BUS_DMASYNC_POSTWRITE);
934 bus_dmamap_unload(sc->bge_cdata.bge_mtag,
935 sc->bge_cdata.bge_tx_dmamap[i]);
936 m_freem(sc->bge_cdata.bge_tx_chain[i]);
937 sc->bge_cdata.bge_tx_chain[i] = NULL;
938 }
939 bzero((char *)&sc->bge_ldata.bge_tx_ring[i],
940 sizeof(struct bge_tx_bd));
941 }
942
943 return;
944}
945
946static int
947bge_init_tx_ring(sc)
948 struct bge_softc *sc;
949{
950 sc->bge_txcnt = 0;
951 sc->bge_tx_saved_considx = 0;
952
953 /* Initialize transmit producer index for host-memory send ring. */
954 sc->bge_tx_prodidx = 0;
955 CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, sc->bge_tx_prodidx);
956
957 /* 5700 b2 errata */
958 if (sc->bge_chiprev == BGE_CHIPREV_5700_BX)
959 CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, sc->bge_tx_prodidx);
960
961 /* NIC-memory send ring not used; initialize to zero. */
962 CSR_WRITE_4(sc, BGE_MBX_TX_NIC_PROD0_LO, 0);
963 /* 5700 b2 errata */
964 if (sc->bge_chiprev == BGE_CHIPREV_5700_BX)
965 CSR_WRITE_4(sc, BGE_MBX_TX_NIC_PROD0_LO, 0);
966
967 return(0);
968}
969
970static void
971bge_setmulti(sc)
972 struct bge_softc *sc;
973{
974 struct ifnet *ifp;
975 struct ifmultiaddr *ifma;
976 u_int32_t hashes[4] = { 0, 0, 0, 0 };
977 int h, i;
978
979 BGE_LOCK_ASSERT(sc);
980
981 ifp = sc->bge_ifp;
982
983 if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) {
984 for (i = 0; i < 4; i++)
985 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), 0xFFFFFFFF);
986 return;
987 }
988
989 /* First, zot all the existing filters. */
990 for (i = 0; i < 4; i++)
991 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), 0);
992
993 /* Now program new ones. */
994 IF_ADDR_LOCK(ifp);
995 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
996 if (ifma->ifma_addr->sa_family != AF_LINK)
997 continue;
998 h = ether_crc32_le(LLADDR((struct sockaddr_dl *)
999 ifma->ifma_addr), ETHER_ADDR_LEN) & 0x7F;
1000 hashes[(h & 0x60) >> 5] |= 1 << (h & 0x1F);
1001 }
1002 IF_ADDR_UNLOCK(ifp);
1003
1004 for (i = 0; i < 4; i++)
1005 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), hashes[i]);
1006
1007 return;
1008}
1009
1010/*
1011 * Do endian, PCI and DMA initialization. Also check the on-board ROM
1012 * self-test results.
1013 */
1014static int
1015bge_chipinit(sc)
1016 struct bge_softc *sc;
1017{
1018 int i;
1019 u_int32_t dma_rw_ctl;
1020
1021 /* Set endian type before we access any non-PCI registers. */
1022 pci_write_config(sc->bge_dev, BGE_PCI_MISC_CTL, BGE_INIT, 4);
1023
1024 /*
1025 * Check the 'ROM failed' bit on the RX CPU to see if
1026 * self-tests passed.
1027 */
1028 if (CSR_READ_4(sc, BGE_RXCPU_MODE) & BGE_RXCPUMODE_ROMFAIL) {
1029 device_printf(sc->bge_dev, "RX CPU self-diagnostics failed!\n");
1030 return(ENODEV);
1031 }
1032
1033 /* Clear the MAC control register */
1034 CSR_WRITE_4(sc, BGE_MAC_MODE, 0);
1035
1036 /*
1037 * Clear the MAC statistics block in the NIC's
1038 * internal memory.
1039 */
1040 for (i = BGE_STATS_BLOCK;
1041 i < BGE_STATS_BLOCK_END + 1; i += sizeof(u_int32_t))
1042 BGE_MEMWIN_WRITE(sc, i, 0);
1043
1044 for (i = BGE_STATUS_BLOCK;
1045 i < BGE_STATUS_BLOCK_END + 1; i += sizeof(u_int32_t))
1046 BGE_MEMWIN_WRITE(sc, i, 0);
1047
1048 /* Set up the PCI DMA control register. */
1049 if (sc->bge_pcie) {
1050 dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD |
1051 (0xf << BGE_PCIDMARWCTL_RD_WAT_SHIFT) |
1052 (0x2 << BGE_PCIDMARWCTL_WR_WAT_SHIFT);
1053 } else if (pci_read_config(sc->bge_dev, BGE_PCI_PCISTATE, 4) &
1054 BGE_PCISTATE_PCI_BUSMODE) {
1055 /* Conventional PCI bus */
1056 dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD |
1057 (0x7 << BGE_PCIDMARWCTL_RD_WAT_SHIFT) |
1058 (0x7 << BGE_PCIDMARWCTL_WR_WAT_SHIFT) |
1059 (0x0F);
1060 } else {
1061 /* PCI-X bus */
1062 /*
1063 * The 5704 uses a different encoding of read/write
1064 * watermarks.
1065 */
1066 if (sc->bge_asicrev == BGE_ASICREV_BCM5704)
1067 dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD |
1068 (0x7 << BGE_PCIDMARWCTL_RD_WAT_SHIFT) |
1069 (0x3 << BGE_PCIDMARWCTL_WR_WAT_SHIFT);
1070 else
1071 dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD |
1072 (0x3 << BGE_PCIDMARWCTL_RD_WAT_SHIFT) |
1073 (0x3 << BGE_PCIDMARWCTL_WR_WAT_SHIFT) |
1074 (0x0F);
1075
1076 /*
1077 * 5703 and 5704 need ONEDMA_AT_ONCE as a workaround
1078 * for hardware bugs.
1079 */
1080 if (sc->bge_asicrev == BGE_ASICREV_BCM5703 ||
1081 sc->bge_asicrev == BGE_ASICREV_BCM5704) {
1082 u_int32_t tmp;
1083
1084 tmp = CSR_READ_4(sc, BGE_PCI_CLKCTL) & 0x1f;
1085 if (tmp == 0x6 || tmp == 0x7)
1086 dma_rw_ctl |= BGE_PCIDMARWCTL_ONEDMA_ATONCE;
1087 }
1088 }
1089
1090 if (sc->bge_asicrev == BGE_ASICREV_BCM5703 ||
1091 sc->bge_asicrev == BGE_ASICREV_BCM5704 ||
1092 sc->bge_asicrev == BGE_ASICREV_BCM5705 ||
1093 sc->bge_asicrev == BGE_ASICREV_BCM5750)
1094 dma_rw_ctl &= ~BGE_PCIDMARWCTL_MINDMA;
1095 pci_write_config(sc->bge_dev, BGE_PCI_DMA_RW_CTL, dma_rw_ctl, 4);
1096
1097 /*
1098 * Set up general mode register.
1099 */
1100 CSR_WRITE_4(sc, BGE_MODE_CTL, BGE_DMA_SWAP_OPTIONS|
1101 BGE_MODECTL_MAC_ATTN_INTR|BGE_MODECTL_HOST_SEND_BDS|
1102 BGE_MODECTL_TX_NO_PHDR_CSUM|BGE_MODECTL_RX_NO_PHDR_CSUM);
1103
1104 /*
1105 * Disable memory write invalidate. Apparently it is not supported
1106 * properly by these devices.
1107 */
1108 PCI_CLRBIT(sc->bge_dev, BGE_PCI_CMD, PCIM_CMD_MWIEN, 4);
1109
1110#ifdef __brokenalpha__
1111 /*
1112 * Must insure that we do not cross an 8K (bytes) boundary
1113 * for DMA reads. Our highest limit is 1K bytes. This is a
1114 * restriction on some ALPHA platforms with early revision
1115 * 21174 PCI chipsets, such as the AlphaPC 164lx
1116 */
1117 PCI_SETBIT(sc->bge_dev, BGE_PCI_DMA_RW_CTL,
1118 BGE_PCI_READ_BNDRY_1024BYTES, 4);
1119#endif
1120
1121 /* Set the timer prescaler (always 66Mhz) */
1122 CSR_WRITE_4(sc, BGE_MISC_CFG, 65 << 1/*BGE_32BITTIME_66MHZ*/);
1123
1124 return(0);
1125}
1126
1127static int
1128bge_blockinit(sc)
1129 struct bge_softc *sc;
1130{
1131 struct bge_rcb *rcb;
1132 bus_size_t vrcb;
1133 bge_hostaddr taddr;
1134 int i;
1135
1136 /*
1137 * Initialize the memory window pointer register so that
1138 * we can access the first 32K of internal NIC RAM. This will
1139 * allow us to set up the TX send ring RCBs and the RX return
1140 * ring RCBs, plus other things which live in NIC memory.
1141 */
1142 CSR_WRITE_4(sc, BGE_PCI_MEMWIN_BASEADDR, 0);
1143
1144 /* Note: the BCM5704 has a smaller mbuf space than other chips. */
1145
1146 if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
1147 sc->bge_asicrev != BGE_ASICREV_BCM5750) {
1148 /* Configure mbuf memory pool */
1149 if (sc->bge_extram) {
1150 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_BASEADDR,
1151 BGE_EXT_SSRAM);
1152 if (sc->bge_asicrev == BGE_ASICREV_BCM5704)
1153 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x10000);
1154 else
1155 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x18000);
1156 } else {
1157 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_BASEADDR,
1158 BGE_BUFFPOOL_1);
1159 if (sc->bge_asicrev == BGE_ASICREV_BCM5704)
1160 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x10000);
1161 else
1162 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x18000);
1163 }
1164
1165 /* Configure DMA resource pool */
1166 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_BASEADDR,
1167 BGE_DMA_DESCRIPTORS);
1168 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LEN, 0x2000);
1169 }
1170
1171 /* Configure mbuf pool watermarks */
1172 if (sc->bge_asicrev == BGE_ASICREV_BCM5705 ||
1173 sc->bge_asicrev == BGE_ASICREV_BCM5750) {
1174 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0);
1175 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x10);
1176 } else {
1177 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x50);
1178 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x20);
1179 }
1180 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x60);
1181
1182 /* Configure DMA resource watermarks */
1183 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LOWAT, 5);
1184 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_HIWAT, 10);
1185
1186 /* Enable buffer manager */
1187 if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
1188 sc->bge_asicrev != BGE_ASICREV_BCM5750) {
1189 CSR_WRITE_4(sc, BGE_BMAN_MODE,
1190 BGE_BMANMODE_ENABLE|BGE_BMANMODE_LOMBUF_ATTN);
1191
1192 /* Poll for buffer manager start indication */
1193 for (i = 0; i < BGE_TIMEOUT; i++) {
1194 if (CSR_READ_4(sc, BGE_BMAN_MODE) & BGE_BMANMODE_ENABLE)
1195 break;
1196 DELAY(10);
1197 }
1198
1199 if (i == BGE_TIMEOUT) {
1200 device_printf(sc->bge_dev,
1201 "buffer manager failed to start\n");
1202 return(ENXIO);
1203 }
1204 }
1205
1206 /* Enable flow-through queues */
1207 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF);
1208 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0);
1209
1210 /* Wait until queue initialization is complete */
1211 for (i = 0; i < BGE_TIMEOUT; i++) {
1212 if (CSR_READ_4(sc, BGE_FTQ_RESET) == 0)
1213 break;
1214 DELAY(10);
1215 }
1216
1217 if (i == BGE_TIMEOUT) {
1218 device_printf(sc->bge_dev, "flow-through queue init failed\n");
1219 return(ENXIO);
1220 }
1221
1222 /* Initialize the standard RX ring control block */
1223 rcb = &sc->bge_ldata.bge_info.bge_std_rx_rcb;
1224 rcb->bge_hostaddr.bge_addr_lo =
1225 BGE_ADDR_LO(sc->bge_ldata.bge_rx_std_ring_paddr);
1226 rcb->bge_hostaddr.bge_addr_hi =
1227 BGE_ADDR_HI(sc->bge_ldata.bge_rx_std_ring_paddr);
1228 bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag,
1229 sc->bge_cdata.bge_rx_std_ring_map, BUS_DMASYNC_PREREAD);
1230 if (sc->bge_asicrev == BGE_ASICREV_BCM5705 ||
1231 sc->bge_asicrev == BGE_ASICREV_BCM5750)
1232 rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(512, 0);
1233 else
1234 rcb->bge_maxlen_flags =
1235 BGE_RCB_MAXLEN_FLAGS(BGE_MAX_FRAMELEN, 0);
1236 if (sc->bge_extram)
1237 rcb->bge_nicaddr = BGE_EXT_STD_RX_RINGS;
1238 else
1239 rcb->bge_nicaddr = BGE_STD_RX_RINGS;
1240 CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_HI, rcb->bge_hostaddr.bge_addr_hi);
1241 CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_LO, rcb->bge_hostaddr.bge_addr_lo);
1242
1243 CSR_WRITE_4(sc, BGE_RX_STD_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags);
1244 CSR_WRITE_4(sc, BGE_RX_STD_RCB_NICADDR, rcb->bge_nicaddr);
1245
1246 /*
1247 * Initialize the jumbo RX ring control block
1248 * We set the 'ring disabled' bit in the flags
1249 * field until we're actually ready to start
1250 * using this ring (i.e. once we set the MTU
1251 * high enough to require it).
1252 */
1253 if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
1254 sc->bge_asicrev != BGE_ASICREV_BCM5750) {
1255 rcb = &sc->bge_ldata.bge_info.bge_jumbo_rx_rcb;
1256
1257 rcb->bge_hostaddr.bge_addr_lo =
1258 BGE_ADDR_LO(sc->bge_ldata.bge_rx_jumbo_ring_paddr);
1259 rcb->bge_hostaddr.bge_addr_hi =
1260 BGE_ADDR_HI(sc->bge_ldata.bge_rx_jumbo_ring_paddr);
1261 bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag,
1262 sc->bge_cdata.bge_rx_jumbo_ring_map,
1263 BUS_DMASYNC_PREREAD);
1264 rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(0,
1265 BGE_RCB_FLAG_USE_EXT_RX_BD|BGE_RCB_FLAG_RING_DISABLED);
1266 if (sc->bge_extram)
1267 rcb->bge_nicaddr = BGE_EXT_JUMBO_RX_RINGS;
1268 else
1269 rcb->bge_nicaddr = BGE_JUMBO_RX_RINGS;
1270 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_HI,
1271 rcb->bge_hostaddr.bge_addr_hi);
1272 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_LO,
1273 rcb->bge_hostaddr.bge_addr_lo);
1274
1275 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS,
1276 rcb->bge_maxlen_flags);
1277 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_NICADDR, rcb->bge_nicaddr);
1278
1279 /* Set up dummy disabled mini ring RCB */
1280 rcb = &sc->bge_ldata.bge_info.bge_mini_rx_rcb;
1281 rcb->bge_maxlen_flags =
1282 BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED);
1283 CSR_WRITE_4(sc, BGE_RX_MINI_RCB_MAXLEN_FLAGS,
1284 rcb->bge_maxlen_flags);
1285 }
1286
1287 /*
1288 * Set the BD ring replentish thresholds. The recommended
1289 * values are 1/8th the number of descriptors allocated to
1290 * each ring.
1291 */
1292 CSR_WRITE_4(sc, BGE_RBDI_STD_REPL_THRESH, BGE_STD_RX_RING_CNT/8);
1293 CSR_WRITE_4(sc, BGE_RBDI_JUMBO_REPL_THRESH, BGE_JUMBO_RX_RING_CNT/8);
1294
1295 /*
1296 * Disable all unused send rings by setting the 'ring disabled'
1297 * bit in the flags field of all the TX send ring control blocks.
1298 * These are located in NIC memory.
1299 */
1300 vrcb = BGE_MEMWIN_START + BGE_SEND_RING_RCB;
1301 for (i = 0; i < BGE_TX_RINGS_EXTSSRAM_MAX; i++) {
1302 RCB_WRITE_4(sc, vrcb, bge_maxlen_flags,
1303 BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED));
1304 RCB_WRITE_4(sc, vrcb, bge_nicaddr, 0);
1305 vrcb += sizeof(struct bge_rcb);
1306 }
1307
1308 /* Configure TX RCB 0 (we use only the first ring) */
1309 vrcb = BGE_MEMWIN_START + BGE_SEND_RING_RCB;
1310 BGE_HOSTADDR(taddr, sc->bge_ldata.bge_tx_ring_paddr);
1311 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, taddr.bge_addr_hi);
1312 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, taddr.bge_addr_lo);
1313 RCB_WRITE_4(sc, vrcb, bge_nicaddr,
1314 BGE_NIC_TXRING_ADDR(0, BGE_TX_RING_CNT));
1315 if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
1316 sc->bge_asicrev != BGE_ASICREV_BCM5750)
1317 RCB_WRITE_4(sc, vrcb, bge_maxlen_flags,
1318 BGE_RCB_MAXLEN_FLAGS(BGE_TX_RING_CNT, 0));
1319
1320 /* Disable all unused RX return rings */
1321 vrcb = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB;
1322 for (i = 0; i < BGE_RX_RINGS_MAX; i++) {
1323 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, 0);
1324 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, 0);
1325 RCB_WRITE_4(sc, vrcb, bge_maxlen_flags,
1326 BGE_RCB_MAXLEN_FLAGS(sc->bge_return_ring_cnt,
1327 BGE_RCB_FLAG_RING_DISABLED));
1328 RCB_WRITE_4(sc, vrcb, bge_nicaddr, 0);
1329 CSR_WRITE_4(sc, BGE_MBX_RX_CONS0_LO +
1330 (i * (sizeof(u_int64_t))), 0);
1331 vrcb += sizeof(struct bge_rcb);
1332 }
1333
1334 /* Initialize RX ring indexes */
1335 CSR_WRITE_4(sc, BGE_MBX_RX_STD_PROD_LO, 0);
1336 CSR_WRITE_4(sc, BGE_MBX_RX_JUMBO_PROD_LO, 0);
1337 CSR_WRITE_4(sc, BGE_MBX_RX_MINI_PROD_LO, 0);
1338
1339 /*
1340 * Set up RX return ring 0
1341 * Note that the NIC address for RX return rings is 0x00000000.
1342 * The return rings live entirely within the host, so the
1343 * nicaddr field in the RCB isn't used.
1344 */
1345 vrcb = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB;
1346 BGE_HOSTADDR(taddr, sc->bge_ldata.bge_rx_return_ring_paddr);
1347 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, taddr.bge_addr_hi);
1348 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, taddr.bge_addr_lo);
1349 RCB_WRITE_4(sc, vrcb, bge_nicaddr, 0x00000000);
1350 RCB_WRITE_4(sc, vrcb, bge_maxlen_flags,
1351 BGE_RCB_MAXLEN_FLAGS(sc->bge_return_ring_cnt, 0));
1352
1353 /* Set random backoff seed for TX */
1354 CSR_WRITE_4(sc, BGE_TX_RANDOM_BACKOFF,
1355 IF_LLADDR(sc->bge_ifp)[0] + IF_LLADDR(sc->bge_ifp)[1] +
1356 IF_LLADDR(sc->bge_ifp)[2] + IF_LLADDR(sc->bge_ifp)[3] +
1357 IF_LLADDR(sc->bge_ifp)[4] + IF_LLADDR(sc->bge_ifp)[5] +
1358 BGE_TX_BACKOFF_SEED_MASK);
1359
1360 /* Set inter-packet gap */
1361 CSR_WRITE_4(sc, BGE_TX_LENGTHS, 0x2620);
1362
1363 /*
1364 * Specify which ring to use for packets that don't match
1365 * any RX rules.
1366 */
1367 CSR_WRITE_4(sc, BGE_RX_RULES_CFG, 0x08);
1368
1369 /*
1370 * Configure number of RX lists. One interrupt distribution
1371 * list, sixteen active lists, one bad frames class.
1372 */
1373 CSR_WRITE_4(sc, BGE_RXLP_CFG, 0x181);
1374
1375 /* Inialize RX list placement stats mask. */
1376 CSR_WRITE_4(sc, BGE_RXLP_STATS_ENABLE_MASK, 0x007FFFFF);
1377 CSR_WRITE_4(sc, BGE_RXLP_STATS_CTL, 0x1);
1378
1379 /* Disable host coalescing until we get it set up */
1380 CSR_WRITE_4(sc, BGE_HCC_MODE, 0x00000000);
1381
1382 /* Poll to make sure it's shut down. */
1383 for (i = 0; i < BGE_TIMEOUT; i++) {
1384 if (!(CSR_READ_4(sc, BGE_HCC_MODE) & BGE_HCCMODE_ENABLE))
1385 break;
1386 DELAY(10);
1387 }
1388
1389 if (i == BGE_TIMEOUT) {
1390 device_printf(sc->bge_dev,
1391 "host coalescing engine failed to idle\n");
1392 return(ENXIO);
1393 }
1394
1395 /* Set up host coalescing defaults */
1396 CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS, sc->bge_rx_coal_ticks);
1397 CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS, sc->bge_tx_coal_ticks);
1398 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS, sc->bge_rx_max_coal_bds);
1399 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS, sc->bge_tx_max_coal_bds);
1400 if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
1401 sc->bge_asicrev != BGE_ASICREV_BCM5750) {
1402 CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS_INT, 0);
1403 CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS_INT, 0);
1404 }
1405 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS_INT, 0);
1406 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS_INT, 0);
1407
1408 /* Set up address of statistics block */
1409 if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
1410 sc->bge_asicrev != BGE_ASICREV_BCM5750) {
1411 CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_HI,
1412 BGE_ADDR_HI(sc->bge_ldata.bge_stats_paddr));
1413 CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_LO,
1414 BGE_ADDR_LO(sc->bge_ldata.bge_stats_paddr));
1415 CSR_WRITE_4(sc, BGE_HCC_STATS_BASEADDR, BGE_STATS_BLOCK);
1416 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_BASEADDR, BGE_STATUS_BLOCK);
1417 CSR_WRITE_4(sc, BGE_HCC_STATS_TICKS, sc->bge_stat_ticks);
1418 }
1419
1420 /* Set up address of status block */
1421 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_HI,
1422 BGE_ADDR_HI(sc->bge_ldata.bge_status_block_paddr));
1423 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_LO,
1424 BGE_ADDR_LO(sc->bge_ldata.bge_status_block_paddr));
1425 sc->bge_ldata.bge_status_block->bge_idx[0].bge_rx_prod_idx = 0;
1426 sc->bge_ldata.bge_status_block->bge_idx[0].bge_tx_cons_idx = 0;
1427
1428 /* Turn on host coalescing state machine */
1429 CSR_WRITE_4(sc, BGE_HCC_MODE, BGE_HCCMODE_ENABLE);
1430
1431 /* Turn on RX BD completion state machine and enable attentions */
1432 CSR_WRITE_4(sc, BGE_RBDC_MODE,
1433 BGE_RBDCMODE_ENABLE|BGE_RBDCMODE_ATTN);
1434
1435 /* Turn on RX list placement state machine */
1436 CSR_WRITE_4(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE);
1437
1438 /* Turn on RX list selector state machine. */
1439 if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
1440 sc->bge_asicrev != BGE_ASICREV_BCM5750)
1441 CSR_WRITE_4(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE);
1442
1443 /* Turn on DMA, clear stats */
1444 CSR_WRITE_4(sc, BGE_MAC_MODE, BGE_MACMODE_TXDMA_ENB|
1445 BGE_MACMODE_RXDMA_ENB|BGE_MACMODE_RX_STATS_CLEAR|
1446 BGE_MACMODE_TX_STATS_CLEAR|BGE_MACMODE_RX_STATS_ENB|
1447 BGE_MACMODE_TX_STATS_ENB|BGE_MACMODE_FRMHDR_DMA_ENB|
1448 (sc->bge_tbi ? BGE_PORTMODE_TBI : BGE_PORTMODE_MII));
1449
1450 /* Set misc. local control, enable interrupts on attentions */
1451 CSR_WRITE_4(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_ONATTN);
1452
1453#ifdef notdef
1454 /* Assert GPIO pins for PHY reset */
1455 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUT0|
1456 BGE_MLC_MISCIO_OUT1|BGE_MLC_MISCIO_OUT2);
1457 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUTEN0|
1458 BGE_MLC_MISCIO_OUTEN1|BGE_MLC_MISCIO_OUTEN2);
1459#endif
1460
1461 /* Turn on DMA completion state machine */
1462 if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
1463 sc->bge_asicrev != BGE_ASICREV_BCM5750)
1464 CSR_WRITE_4(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE);
1465
1466 /* Turn on write DMA state machine */
1467 CSR_WRITE_4(sc, BGE_WDMA_MODE,
1468 BGE_WDMAMODE_ENABLE|BGE_WDMAMODE_ALL_ATTNS);
1469
1470 /* Turn on read DMA state machine */
1471 CSR_WRITE_4(sc, BGE_RDMA_MODE,
1472 BGE_RDMAMODE_ENABLE|BGE_RDMAMODE_ALL_ATTNS);
1473
1474 /* Turn on RX data completion state machine */
1475 CSR_WRITE_4(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE);
1476
1477 /* Turn on RX BD initiator state machine */
1478 CSR_WRITE_4(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE);
1479
1480 /* Turn on RX data and RX BD initiator state machine */
1481 CSR_WRITE_4(sc, BGE_RDBDI_MODE, BGE_RDBDIMODE_ENABLE);
1482
1483 /* Turn on Mbuf cluster free state machine */
1484 if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
1485 sc->bge_asicrev != BGE_ASICREV_BCM5750)
1486 CSR_WRITE_4(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE);
1487
1488 /* Turn on send BD completion state machine */
1489 CSR_WRITE_4(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE);
1490
1491 /* Turn on send data completion state machine */
1492 CSR_WRITE_4(sc, BGE_SDC_MODE, BGE_SDCMODE_ENABLE);
1493
1494 /* Turn on send data initiator state machine */
1495 CSR_WRITE_4(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE);
1496
1497 /* Turn on send BD initiator state machine */
1498 CSR_WRITE_4(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE);
1499
1500 /* Turn on send BD selector state machine */
1501 CSR_WRITE_4(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE);
1502
1503 CSR_WRITE_4(sc, BGE_SDI_STATS_ENABLE_MASK, 0x007FFFFF);
1504 CSR_WRITE_4(sc, BGE_SDI_STATS_CTL,
1505 BGE_SDISTATSCTL_ENABLE|BGE_SDISTATSCTL_FASTER);
1506
1507 /* ack/clear link change events */
1508 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED|
1509 BGE_MACSTAT_CFG_CHANGED|BGE_MACSTAT_MI_COMPLETE|
1510 BGE_MACSTAT_LINK_CHANGED);
1511 CSR_WRITE_4(sc, BGE_MI_STS, 0);
1512
1513 /* Enable PHY auto polling (for MII/GMII only) */
1514 if (sc->bge_tbi) {
1515 CSR_WRITE_4(sc, BGE_MI_STS, BGE_MISTS_LINK);
1516 } else {
1517 BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL|10<<16);
1518 if (sc->bge_asicrev == BGE_ASICREV_BCM5700 &&
1519 sc->bge_chipid != BGE_CHIPID_BCM5700_B1)
1520 CSR_WRITE_4(sc, BGE_MAC_EVT_ENB,
1521 BGE_EVTENB_MI_INTERRUPT);
1522 }
1523
1524 /*
1525 * Clear any pending link state attention.
1526 * Otherwise some link state change events may be lost until attention
1527 * is cleared by bge_intr() -> bge_link_upd() sequence.
1528 * It's not necessary on newer BCM chips - perhaps enabling link
1529 * state change attentions implies clearing pending attention.
1530 */
1531 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED|
1532 BGE_MACSTAT_CFG_CHANGED|BGE_MACSTAT_MI_COMPLETE|
1533 BGE_MACSTAT_LINK_CHANGED);
1534
1535 /* Enable link state change attentions. */
1536 BGE_SETBIT(sc, BGE_MAC_EVT_ENB, BGE_EVTENB_LINK_CHANGED);
1537
1538 return(0);
1539}
1540
1541/*
1542 * Probe for a Broadcom chip. Check the PCI vendor and device IDs
1543 * against our list and return its name if we find a match. Note
1544 * that since the Broadcom controller contains VPD support, we
1545 * can get the device name string from the controller itself instead
1546 * of the compiled-in string. This is a little slow, but it guarantees
1547 * we'll always announce the right product name.
1548 */
1549static int
1550bge_probe(dev)
1551 device_t dev;
1552{
1553 struct bge_type *t;
1554 struct bge_softc *sc;
1555 char *descbuf;
1556
1557 t = bge_devs;
1558
1559 sc = device_get_softc(dev);
1560 bzero(sc, sizeof(struct bge_softc));
1561 sc->bge_dev = dev;
1562
1563 while(t->bge_name != NULL) {
1564 if ((pci_get_vendor(dev) == t->bge_vid) &&
1565 (pci_get_device(dev) == t->bge_did)) {
1566#ifdef notdef
1567 bge_vpd_read(sc);
1568 device_set_desc(dev, sc->bge_vpd_prodname);
1569#endif
1570 descbuf = malloc(BGE_DEVDESC_MAX, M_TEMP, M_NOWAIT);
1571 if (descbuf == NULL)
1572 return(ENOMEM);
1573 snprintf(descbuf, BGE_DEVDESC_MAX,
1574 "%s, ASIC rev. %#04x", t->bge_name,
1575 pci_read_config(dev, BGE_PCI_MISC_CTL, 4) >> 16);
1576 device_set_desc_copy(dev, descbuf);
1577 if (pci_get_subvendor(dev) == DELL_VENDORID)
1578 sc->bge_no_3_led = 1;
1579 free(descbuf, M_TEMP);
1580 return(0);
1581 }
1582 t++;
1583 }
1584
1585 return(ENXIO);
1586}
1587
1588static void
1589bge_dma_free(sc)
1590 struct bge_softc *sc;
1591{
1592 int i;
1593
1594
1595 /* Destroy DMA maps for RX buffers */
1596
1597 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
1598 if (sc->bge_cdata.bge_rx_std_dmamap[i])
1599 bus_dmamap_destroy(sc->bge_cdata.bge_mtag,
1600 sc->bge_cdata.bge_rx_std_dmamap[i]);
1601 }
1602
1603 /* Destroy DMA maps for jumbo RX buffers */
1604
1605 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
1606 if (sc->bge_cdata.bge_rx_jumbo_dmamap[i])
1607 bus_dmamap_destroy(sc->bge_cdata.bge_mtag_jumbo,
1608 sc->bge_cdata.bge_rx_jumbo_dmamap[i]);
1609 }
1610
1611 /* Destroy DMA maps for TX buffers */
1612
1613 for (i = 0; i < BGE_TX_RING_CNT; i++) {
1614 if (sc->bge_cdata.bge_tx_dmamap[i])
1615 bus_dmamap_destroy(sc->bge_cdata.bge_mtag,
1616 sc->bge_cdata.bge_tx_dmamap[i]);
1617 }
1618
1619 if (sc->bge_cdata.bge_mtag)
1620 bus_dma_tag_destroy(sc->bge_cdata.bge_mtag);
1621
1622
1623 /* Destroy standard RX ring */
1624
1625 if (sc->bge_cdata.bge_rx_std_ring_map)
1626 bus_dmamap_unload(sc->bge_cdata.bge_rx_std_ring_tag,
1627 sc->bge_cdata.bge_rx_std_ring_map);
1628 if (sc->bge_cdata.bge_rx_std_ring_map && sc->bge_ldata.bge_rx_std_ring)
1629 bus_dmamem_free(sc->bge_cdata.bge_rx_std_ring_tag,
1630 sc->bge_ldata.bge_rx_std_ring,
1631 sc->bge_cdata.bge_rx_std_ring_map);
1632
1633 if (sc->bge_cdata.bge_rx_std_ring_tag)
1634 bus_dma_tag_destroy(sc->bge_cdata.bge_rx_std_ring_tag);
1635
1636 /* Destroy jumbo RX ring */
1637
1638 if (sc->bge_cdata.bge_rx_jumbo_ring_map)
1639 bus_dmamap_unload(sc->bge_cdata.bge_rx_jumbo_ring_tag,
1640 sc->bge_cdata.bge_rx_jumbo_ring_map);
1641
1642 if (sc->bge_cdata.bge_rx_jumbo_ring_map &&
1643 sc->bge_ldata.bge_rx_jumbo_ring)
1644 bus_dmamem_free(sc->bge_cdata.bge_rx_jumbo_ring_tag,
1645 sc->bge_ldata.bge_rx_jumbo_ring,
1646 sc->bge_cdata.bge_rx_jumbo_ring_map);
1647
1648 if (sc->bge_cdata.bge_rx_jumbo_ring_tag)
1649 bus_dma_tag_destroy(sc->bge_cdata.bge_rx_jumbo_ring_tag);
1650
1651 /* Destroy RX return ring */
1652
1653 if (sc->bge_cdata.bge_rx_return_ring_map)
1654 bus_dmamap_unload(sc->bge_cdata.bge_rx_return_ring_tag,
1655 sc->bge_cdata.bge_rx_return_ring_map);
1656
1657 if (sc->bge_cdata.bge_rx_return_ring_map &&
1658 sc->bge_ldata.bge_rx_return_ring)
1659 bus_dmamem_free(sc->bge_cdata.bge_rx_return_ring_tag,
1660 sc->bge_ldata.bge_rx_return_ring,
1661 sc->bge_cdata.bge_rx_return_ring_map);
1662
1663 if (sc->bge_cdata.bge_rx_return_ring_tag)
1664 bus_dma_tag_destroy(sc->bge_cdata.bge_rx_return_ring_tag);
1665
1666 /* Destroy TX ring */
1667
1668 if (sc->bge_cdata.bge_tx_ring_map)
1669 bus_dmamap_unload(sc->bge_cdata.bge_tx_ring_tag,
1670 sc->bge_cdata.bge_tx_ring_map);
1671
1672 if (sc->bge_cdata.bge_tx_ring_map && sc->bge_ldata.bge_tx_ring)
1673 bus_dmamem_free(sc->bge_cdata.bge_tx_ring_tag,
1674 sc->bge_ldata.bge_tx_ring,
1675 sc->bge_cdata.bge_tx_ring_map);
1676
1677 if (sc->bge_cdata.bge_tx_ring_tag)
1678 bus_dma_tag_destroy(sc->bge_cdata.bge_tx_ring_tag);
1679
1680 /* Destroy status block */
1681
1682 if (sc->bge_cdata.bge_status_map)
1683 bus_dmamap_unload(sc->bge_cdata.bge_status_tag,
1684 sc->bge_cdata.bge_status_map);
1685
1686 if (sc->bge_cdata.bge_status_map && sc->bge_ldata.bge_status_block)
1687 bus_dmamem_free(sc->bge_cdata.bge_status_tag,
1688 sc->bge_ldata.bge_status_block,
1689 sc->bge_cdata.bge_status_map);
1690
1691 if (sc->bge_cdata.bge_status_tag)
1692 bus_dma_tag_destroy(sc->bge_cdata.bge_status_tag);
1693
1694 /* Destroy statistics block */
1695
1696 if (sc->bge_cdata.bge_stats_map)
1697 bus_dmamap_unload(sc->bge_cdata.bge_stats_tag,
1698 sc->bge_cdata.bge_stats_map);
1699
1700 if (sc->bge_cdata.bge_stats_map && sc->bge_ldata.bge_stats)
1701 bus_dmamem_free(sc->bge_cdata.bge_stats_tag,
1702 sc->bge_ldata.bge_stats,
1703 sc->bge_cdata.bge_stats_map);
1704
1705 if (sc->bge_cdata.bge_stats_tag)
1706 bus_dma_tag_destroy(sc->bge_cdata.bge_stats_tag);
1707
1708 /* Destroy the parent tag */
1709
1710 if (sc->bge_cdata.bge_parent_tag)
1711 bus_dma_tag_destroy(sc->bge_cdata.bge_parent_tag);
1712
1713 return;
1714}
1715
1716static int
1717bge_dma_alloc(dev)
1718 device_t dev;
1719{
1720 struct bge_softc *sc;
1721 int i, error;
1722 struct bge_dmamap_arg ctx;
1723
1724 sc = device_get_softc(dev);
1725
1726 /*
1727 * Allocate the parent bus DMA tag appropriate for PCI.
1728 */
1729 error = bus_dma_tag_create(NULL, /* parent */
1730 PAGE_SIZE, 0, /* alignment, boundary */
1731 BUS_SPACE_MAXADDR, /* lowaddr */
1732 BUS_SPACE_MAXADDR, /* highaddr */
1733 NULL, NULL, /* filter, filterarg */
1734 MAXBSIZE, BGE_NSEG_NEW, /* maxsize, nsegments */
1735 BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */
1736 0, /* flags */
1737 NULL, NULL, /* lockfunc, lockarg */
1738 &sc->bge_cdata.bge_parent_tag);
1739
1740 if (error != 0) {
1741 device_printf(sc->bge_dev,
1742 "could not allocate parent dma tag\n");
1743 return (ENOMEM);
1744 }
1745
1746 /*
1747 * Create tag for RX mbufs.
1748 */
1749 error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag, 1,
1750 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
1751 NULL, MCLBYTES * BGE_NSEG_NEW, BGE_NSEG_NEW, MCLBYTES,
1752 BUS_DMA_ALLOCNOW, NULL, NULL, &sc->bge_cdata.bge_mtag);
1753
1754 if (error) {
1755 device_printf(sc->bge_dev, "could not allocate dma tag\n");
1756 return (ENOMEM);
1757 }
1758
1759 /* Create DMA maps for RX buffers */
1760
1761 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
1762 error = bus_dmamap_create(sc->bge_cdata.bge_mtag, 0,
1763 &sc->bge_cdata.bge_rx_std_dmamap[i]);
1764 if (error) {
1765 device_printf(sc->bge_dev,
1766 "can't create DMA map for RX\n");
1767 return(ENOMEM);
1768 }
1769 }
1770
1771 /* Create DMA maps for TX buffers */
1772
1773 for (i = 0; i < BGE_TX_RING_CNT; i++) {
1774 error = bus_dmamap_create(sc->bge_cdata.bge_mtag, 0,
1775 &sc->bge_cdata.bge_tx_dmamap[i]);
1776 if (error) {
1777 device_printf(sc->bge_dev,
1778 "can't create DMA map for RX\n");
1779 return(ENOMEM);
1780 }
1781 }
1782
1783 /* Create tag for standard RX ring */
1784
1785 error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag,
1786 PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
1787 NULL, BGE_STD_RX_RING_SZ, 1, BGE_STD_RX_RING_SZ, 0,
1788 NULL, NULL, &sc->bge_cdata.bge_rx_std_ring_tag);
1789
1790 if (error) {
1791 device_printf(sc->bge_dev, "could not allocate dma tag\n");
1792 return (ENOMEM);
1793 }
1794
1795 /* Allocate DMA'able memory for standard RX ring */
1796
1797 error = bus_dmamem_alloc(sc->bge_cdata.bge_rx_std_ring_tag,
1798 (void **)&sc->bge_ldata.bge_rx_std_ring, BUS_DMA_NOWAIT,
1799 &sc->bge_cdata.bge_rx_std_ring_map);
1800 if (error)
1801 return (ENOMEM);
1802
1803 bzero((char *)sc->bge_ldata.bge_rx_std_ring, BGE_STD_RX_RING_SZ);
1804
1805 /* Load the address of the standard RX ring */
1806
1807 ctx.bge_maxsegs = 1;
1808 ctx.sc = sc;
1809
1810 error = bus_dmamap_load(sc->bge_cdata.bge_rx_std_ring_tag,
1811 sc->bge_cdata.bge_rx_std_ring_map, sc->bge_ldata.bge_rx_std_ring,
1812 BGE_STD_RX_RING_SZ, bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT);
1813
1814 if (error)
1815 return (ENOMEM);
1816
1817 sc->bge_ldata.bge_rx_std_ring_paddr = ctx.bge_busaddr;
1818
1819 if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
1820 sc->bge_asicrev != BGE_ASICREV_BCM5750) {
1821
1822 /*
1823 * Create tag for jumbo mbufs.
1824 * This is really a bit of a kludge. We allocate a special
1825 * jumbo buffer pool which (thanks to the way our DMA
1826 * memory allocation works) will consist of contiguous
1827 * pages. This means that even though a jumbo buffer might
1828 * be larger than a page size, we don't really need to
1829 * map it into more than one DMA segment. However, the
1830 * default mbuf tag will result in multi-segment mappings,
1831 * so we have to create a special jumbo mbuf tag that
1832 * lets us get away with mapping the jumbo buffers as
1833 * a single segment. I think eventually the driver should
1834 * be changed so that it uses ordinary mbufs and cluster
1835 * buffers, i.e. jumbo frames can span multiple DMA
1836 * descriptors. But that's a project for another day.
1837 */
1838
1839 error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag,
1840 1, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
1841 NULL, MJUM9BYTES, BGE_NSEG_JUMBO, PAGE_SIZE,
1842 0, NULL, NULL, &sc->bge_cdata.bge_mtag_jumbo);
1843
1844 if (error) {
1845 device_printf(sc->bge_dev,
1846 "could not allocate dma tag\n");
1847 return (ENOMEM);
1848 }
1849
1850 /* Create tag for jumbo RX ring */
1851 error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag,
1852 PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
1853 NULL, BGE_JUMBO_RX_RING_SZ, 1, BGE_JUMBO_RX_RING_SZ, 0,
1854 NULL, NULL, &sc->bge_cdata.bge_rx_jumbo_ring_tag);
1855
1856 if (error) {
1857 device_printf(sc->bge_dev,
1858 "could not allocate dma tag\n");
1859 return (ENOMEM);
1860 }
1861
1862 /* Allocate DMA'able memory for jumbo RX ring */
1863 error = bus_dmamem_alloc(sc->bge_cdata.bge_rx_jumbo_ring_tag,
1864 (void **)&sc->bge_ldata.bge_rx_jumbo_ring,
1865 BUS_DMA_NOWAIT | BUS_DMA_ZERO,
1866 &sc->bge_cdata.bge_rx_jumbo_ring_map);
1867 if (error)
1868 return (ENOMEM);
1869
1870 /* Load the address of the jumbo RX ring */
1871 ctx.bge_maxsegs = 1;
1872 ctx.sc = sc;
1873
1874 error = bus_dmamap_load(sc->bge_cdata.bge_rx_jumbo_ring_tag,
1875 sc->bge_cdata.bge_rx_jumbo_ring_map,
1876 sc->bge_ldata.bge_rx_jumbo_ring, BGE_JUMBO_RX_RING_SZ,
1877 bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT);
1878
1879 if (error)
1880 return (ENOMEM);
1881
1882 sc->bge_ldata.bge_rx_jumbo_ring_paddr = ctx.bge_busaddr;
1883
1884 /* Create DMA maps for jumbo RX buffers */
1885
1886 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
1887 error = bus_dmamap_create(sc->bge_cdata.bge_mtag_jumbo,
1888 0, &sc->bge_cdata.bge_rx_jumbo_dmamap[i]);
1889 if (error) {
1890 device_printf(sc->bge_dev,
1891 "can't create DMA map for RX\n");
1892 return(ENOMEM);
1893 }
1894 }
1895
1896 }
1897
1898 /* Create tag for RX return ring */
1899
1900 error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag,
1901 PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
1902 NULL, BGE_RX_RTN_RING_SZ(sc), 1, BGE_RX_RTN_RING_SZ(sc), 0,
1903 NULL, NULL, &sc->bge_cdata.bge_rx_return_ring_tag);
1904
1905 if (error) {
1906 device_printf(sc->bge_dev, "could not allocate dma tag\n");
1907 return (ENOMEM);
1908 }
1909
1910 /* Allocate DMA'able memory for RX return ring */
1911
1912 error = bus_dmamem_alloc(sc->bge_cdata.bge_rx_return_ring_tag,
1913 (void **)&sc->bge_ldata.bge_rx_return_ring, BUS_DMA_NOWAIT,
1914 &sc->bge_cdata.bge_rx_return_ring_map);
1915 if (error)
1916 return (ENOMEM);
1917
1918 bzero((char *)sc->bge_ldata.bge_rx_return_ring,
1919 BGE_RX_RTN_RING_SZ(sc));
1920
1921 /* Load the address of the RX return ring */
1922
1923 ctx.bge_maxsegs = 1;
1924 ctx.sc = sc;
1925
1926 error = bus_dmamap_load(sc->bge_cdata.bge_rx_return_ring_tag,
1927 sc->bge_cdata.bge_rx_return_ring_map,
1928 sc->bge_ldata.bge_rx_return_ring, BGE_RX_RTN_RING_SZ(sc),
1929 bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT);
1930
1931 if (error)
1932 return (ENOMEM);
1933
1934 sc->bge_ldata.bge_rx_return_ring_paddr = ctx.bge_busaddr;
1935
1936 /* Create tag for TX ring */
1937
1938 error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag,
1939 PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
1940 NULL, BGE_TX_RING_SZ, 1, BGE_TX_RING_SZ, 0, NULL, NULL,
1941 &sc->bge_cdata.bge_tx_ring_tag);
1942
1943 if (error) {
1944 device_printf(sc->bge_dev, "could not allocate dma tag\n");
1945 return (ENOMEM);
1946 }
1947
1948 /* Allocate DMA'able memory for TX ring */
1949
1950 error = bus_dmamem_alloc(sc->bge_cdata.bge_tx_ring_tag,
1951 (void **)&sc->bge_ldata.bge_tx_ring, BUS_DMA_NOWAIT,
1952 &sc->bge_cdata.bge_tx_ring_map);
1953 if (error)
1954 return (ENOMEM);
1955
1956 bzero((char *)sc->bge_ldata.bge_tx_ring, BGE_TX_RING_SZ);
1957
1958 /* Load the address of the TX ring */
1959
1960 ctx.bge_maxsegs = 1;
1961 ctx.sc = sc;
1962
1963 error = bus_dmamap_load(sc->bge_cdata.bge_tx_ring_tag,
1964 sc->bge_cdata.bge_tx_ring_map, sc->bge_ldata.bge_tx_ring,
1965 BGE_TX_RING_SZ, bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT);
1966
1967 if (error)
1968 return (ENOMEM);
1969
1970 sc->bge_ldata.bge_tx_ring_paddr = ctx.bge_busaddr;
1971
1972 /* Create tag for status block */
1973
1974 error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag,
1975 PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
1976 NULL, BGE_STATUS_BLK_SZ, 1, BGE_STATUS_BLK_SZ, 0,
1977 NULL, NULL, &sc->bge_cdata.bge_status_tag);
1978
1979 if (error) {
1980 device_printf(sc->bge_dev, "could not allocate dma tag\n");
1981 return (ENOMEM);
1982 }
1983
1984 /* Allocate DMA'able memory for status block */
1985
1986 error = bus_dmamem_alloc(sc->bge_cdata.bge_status_tag,
1987 (void **)&sc->bge_ldata.bge_status_block, BUS_DMA_NOWAIT,
1988 &sc->bge_cdata.bge_status_map);
1989 if (error)
1990 return (ENOMEM);
1991
1992 bzero((char *)sc->bge_ldata.bge_status_block, BGE_STATUS_BLK_SZ);
1993
1994 /* Load the address of the status block */
1995
1996 ctx.sc = sc;
1997 ctx.bge_maxsegs = 1;
1998
1999 error = bus_dmamap_load(sc->bge_cdata.bge_status_tag,
2000 sc->bge_cdata.bge_status_map, sc->bge_ldata.bge_status_block,
2001 BGE_STATUS_BLK_SZ, bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT);
2002
2003 if (error)
2004 return (ENOMEM);
2005
2006 sc->bge_ldata.bge_status_block_paddr = ctx.bge_busaddr;
2007
2008 /* Create tag for statistics block */
2009
2010 error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag,
2011 PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
2012 NULL, BGE_STATS_SZ, 1, BGE_STATS_SZ, 0, NULL, NULL,
2013 &sc->bge_cdata.bge_stats_tag);
2014
2015 if (error) {
2016 device_printf(sc->bge_dev, "could not allocate dma tag\n");
2017 return (ENOMEM);
2018 }
2019
2020 /* Allocate DMA'able memory for statistics block */
2021
2022 error = bus_dmamem_alloc(sc->bge_cdata.bge_stats_tag,
2023 (void **)&sc->bge_ldata.bge_stats, BUS_DMA_NOWAIT,
2024 &sc->bge_cdata.bge_stats_map);
2025 if (error)
2026 return (ENOMEM);
2027
2028 bzero((char *)sc->bge_ldata.bge_stats, BGE_STATS_SZ);
2029
2030 /* Load the address of the statstics block */
2031
2032 ctx.sc = sc;
2033 ctx.bge_maxsegs = 1;
2034
2035 error = bus_dmamap_load(sc->bge_cdata.bge_stats_tag,
2036 sc->bge_cdata.bge_stats_map, sc->bge_ldata.bge_stats,
2037 BGE_STATS_SZ, bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT);
2038
2039 if (error)
2040 return (ENOMEM);
2041
2042 sc->bge_ldata.bge_stats_paddr = ctx.bge_busaddr;
2043
2044 return(0);
2045}
2046
2047static int
2048bge_attach(dev)
2049 device_t dev;
2050{
2051 struct ifnet *ifp;
2052 struct bge_softc *sc;
2053 u_int32_t hwcfg = 0;
2054 u_int32_t mac_tmp = 0;
2055 u_char eaddr[6];
2056 int error = 0, rid;
2057
2058 sc = device_get_softc(dev);
2059 sc->bge_dev = dev;
2060
2061 /*
2062 * Map control/status registers.
2063 */
2064 pci_enable_busmaster(dev);
2065
2066 rid = BGE_PCI_BAR0;
2067 sc->bge_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
2068 RF_ACTIVE|PCI_RF_DENSE);
2069
2070 if (sc->bge_res == NULL) {
2071 device_printf (sc->bge_dev, "couldn't map memory\n");
2072 error = ENXIO;
2073 goto fail;
2074 }
2075
2076 sc->bge_btag = rman_get_bustag(sc->bge_res);
2077 sc->bge_bhandle = rman_get_bushandle(sc->bge_res);
2078
2079 /* Allocate interrupt */
2080 rid = 0;
2081
2082 sc->bge_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
2083 RF_SHAREABLE | RF_ACTIVE);
2084
2085 if (sc->bge_irq == NULL) {
2086 device_printf(sc->bge_dev, "couldn't map interrupt\n");
2087 error = ENXIO;
2088 goto fail;
2089 }
2090
2091 BGE_LOCK_INIT(sc, device_get_nameunit(dev));
2092
2093 /* Save ASIC rev. */
2094
2095 sc->bge_chipid =
2096 pci_read_config(dev, BGE_PCI_MISC_CTL, 4) &
2097 BGE_PCIMISCCTL_ASICREV;
2098 sc->bge_asicrev = BGE_ASICREV(sc->bge_chipid);
2099 sc->bge_chiprev = BGE_CHIPREV(sc->bge_chipid);
2100
2101 /*
2102 * Treat the 5714 and the 5752 like the 5750 until we have more info
2103 * on this chip.
2104 */
2105 if (sc->bge_asicrev == BGE_ASICREV_BCM5714 ||
2106 sc->bge_asicrev == BGE_ASICREV_BCM5752)
2107 sc->bge_asicrev = BGE_ASICREV_BCM5750;
2108
2109 /*
2110 * XXX: Broadcom Linux driver. Not in specs or eratta.
2111 * PCI-Express?
2112 */
2113 if (sc->bge_asicrev == BGE_ASICREV_BCM5750) {
2114 u_int32_t v;
2115
2116 v = pci_read_config(dev, BGE_PCI_MSI_CAPID, 4);
2117 if (((v >> 8) & 0xff) == BGE_PCIE_CAPID_REG) {
2118 v = pci_read_config(dev, BGE_PCIE_CAPID_REG, 4);
2119 if ((v & 0xff) == BGE_PCIE_CAPID)
2120 sc->bge_pcie = 1;
2121 }
2122 }
2123
2124 /* Try to reset the chip. */
2125 bge_reset(sc);
2126
2127 if (bge_chipinit(sc)) {
2128 device_printf(sc->bge_dev, "chip initialization failed\n");
2129 bge_release_resources(sc);
2130 error = ENXIO;
2131 goto fail;
2132 }
2133
2134 /*
2135 * Get station address from the EEPROM.
2136 */
2137 mac_tmp = bge_readmem_ind(sc, 0x0c14);
2138 if ((mac_tmp >> 16) == 0x484b) {
2139 eaddr[0] = (u_char)(mac_tmp >> 8);
2140 eaddr[1] = (u_char)mac_tmp;
2141 mac_tmp = bge_readmem_ind(sc, 0x0c18);
2142 eaddr[2] = (u_char)(mac_tmp >> 24);
2143 eaddr[3] = (u_char)(mac_tmp >> 16);
2144 eaddr[4] = (u_char)(mac_tmp >> 8);
2145 eaddr[5] = (u_char)mac_tmp;
2146 } else if (bge_read_eeprom(sc, eaddr,
2147 BGE_EE_MAC_OFFSET + 2, ETHER_ADDR_LEN)) {
2148 device_printf(sc->bge_dev, "failed to read station address\n");
2149 bge_release_resources(sc);
2150 error = ENXIO;
2151 goto fail;
2152 }
2153
2154 /* 5705 limits RX return ring to 512 entries. */
2155 if (sc->bge_asicrev == BGE_ASICREV_BCM5705 ||
2156 sc->bge_asicrev == BGE_ASICREV_BCM5750)
2157 sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT_5705;
2158 else
2159 sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT;
2160
2161 if (bge_dma_alloc(dev)) {
2162 device_printf(sc->bge_dev,
2163 "failed to allocate DMA resources\n");
2164 bge_release_resources(sc);
2165 error = ENXIO;
2166 goto fail;
2167 }
2168
2169 /* Set default tuneable values. */
2170 sc->bge_stat_ticks = BGE_TICKS_PER_SEC;
2171 sc->bge_rx_coal_ticks = 150;
2172 sc->bge_tx_coal_ticks = 150;
2173 sc->bge_rx_max_coal_bds = 64;
2174 sc->bge_tx_max_coal_bds = 128;
2175
2176 /* Set up ifnet structure */
2177 ifp = sc->bge_ifp = if_alloc(IFT_ETHER);
2178 if (ifp == NULL) {
2179 device_printf(sc->bge_dev, "failed to if_alloc()\n");
2180 bge_release_resources(sc);
2181 error = ENXIO;
2182 goto fail;
2183 }
2184 ifp->if_softc = sc;
2185 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
2186 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
2187 ifp->if_ioctl = bge_ioctl;
2188 ifp->if_start = bge_start;
2189 ifp->if_watchdog = bge_watchdog;
2190 ifp->if_init = bge_init;
2191 ifp->if_mtu = ETHERMTU;
2192 ifp->if_snd.ifq_drv_maxlen = BGE_TX_RING_CNT - 1;
2193 IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen);
2194 IFQ_SET_READY(&ifp->if_snd);
2195 ifp->if_hwassist = BGE_CSUM_FEATURES;
2196 ifp->if_capabilities = IFCAP_HWCSUM | IFCAP_VLAN_HWTAGGING |
2197 IFCAP_VLAN_MTU | IFCAP_VLAN_HWCSUM;
2198 ifp->if_capenable = ifp->if_capabilities;
2199#ifdef DEVICE_POLLING
2200 ifp->if_capabilities |= IFCAP_POLLING;
2201#endif
2202
2203 /*
2204 * 5700 B0 chips do not support checksumming correctly due
2205 * to hardware bugs.
2206 */
2207 if (sc->bge_chipid == BGE_CHIPID_BCM5700_B0) {
2208 ifp->if_capabilities &= ~IFCAP_HWCSUM;
2209 ifp->if_capenable &= IFCAP_HWCSUM;
2210 ifp->if_hwassist = 0;
2211 }
2212
2213 /*
2214 * Figure out what sort of media we have by checking the
2215 * hardware config word in the first 32k of NIC internal memory,
2216 * or fall back to examining the EEPROM if necessary.
2217 * Note: on some BCM5700 cards, this value appears to be unset.
2218 * If that's the case, we have to rely on identifying the NIC
2219 * by its PCI subsystem ID, as we do below for the SysKonnect
2220 * SK-9D41.
2221 */
2222 if (bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_SIG) == BGE_MAGIC_NUMBER)
2223 hwcfg = bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_NICCFG);
2224 else {
2225 if (bge_read_eeprom(sc, (caddr_t)&hwcfg, BGE_EE_HWCFG_OFFSET,
2226 sizeof(hwcfg))) {
2227 device_printf(sc->bge_dev, "failed to read EEPROM\n");
2228 bge_release_resources(sc);
2229 error = ENXIO;
2230 goto fail;
2231 }
2232 hwcfg = ntohl(hwcfg);
2233 }
2234
2235 if ((hwcfg & BGE_HWCFG_MEDIA) == BGE_MEDIA_FIBER)
2236 sc->bge_tbi = 1;
2237
2238 /* The SysKonnect SK-9D41 is a 1000baseSX card. */
2239 if ((pci_read_config(dev, BGE_PCI_SUBSYS, 4) >> 16) == SK_SUBSYSID_9D41)
2240 sc->bge_tbi = 1;
2241
2242 if (sc->bge_tbi) {
2243 ifmedia_init(&sc->bge_ifmedia, IFM_IMASK,
2244 bge_ifmedia_upd, bge_ifmedia_sts);
2245 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER|IFM_1000_SX, 0, NULL);
2246 ifmedia_add(&sc->bge_ifmedia,
2247 IFM_ETHER|IFM_1000_SX|IFM_FDX, 0, NULL);
2248 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER|IFM_AUTO, 0, NULL);
2249 ifmedia_set(&sc->bge_ifmedia, IFM_ETHER|IFM_AUTO);
2250 sc->bge_ifmedia.ifm_media = sc->bge_ifmedia.ifm_cur->ifm_media;
2251 } else {
2252 /*
2253 * Do transceiver setup.
2254 */
2255 if (mii_phy_probe(dev, &sc->bge_miibus,
2256 bge_ifmedia_upd, bge_ifmedia_sts)) {
2257 device_printf(sc->bge_dev, "MII without any PHY!\n");
2258 bge_release_resources(sc);
2259 error = ENXIO;
2260 goto fail;
2261 }
2262 }
2263
2264 /*
2265 * When using the BCM5701 in PCI-X mode, data corruption has
2266 * been observed in the first few bytes of some received packets.
2267 * Aligning the packet buffer in memory eliminates the corruption.
2268 * Unfortunately, this misaligns the packet payloads. On platforms
2269 * which do not support unaligned accesses, we will realign the
2270 * payloads by copying the received packets.
2271 */
2272 switch (sc->bge_chipid) {
2273 case BGE_CHIPID_BCM5701_A0:
2274 case BGE_CHIPID_BCM5701_B0:
2275 case BGE_CHIPID_BCM5701_B2:
2276 case BGE_CHIPID_BCM5701_B5:
2277 /* If in PCI-X mode, work around the alignment bug. */
2278 if ((pci_read_config(dev, BGE_PCI_PCISTATE, 4) &
2279 (BGE_PCISTATE_PCI_BUSMODE | BGE_PCISTATE_PCI_BUSSPEED)) ==
2280 BGE_PCISTATE_PCI_BUSSPEED)
2281 sc->bge_rx_alignment_bug = 1;
2282 break;
2283 }
2284
2285 /*
2286 * Call MI attach routine.
2287 */
2288 ether_ifattach(ifp, eaddr);
2289 callout_init(&sc->bge_stat_ch, CALLOUT_MPSAFE);
2290
2291 /*
2292 * Hookup IRQ last.
2293 */
2294 error = bus_setup_intr(dev, sc->bge_irq, INTR_TYPE_NET | INTR_MPSAFE,
2295 bge_intr, sc, &sc->bge_intrhand);
2296
2297 if (error) {
2298 bge_detach(dev);
2299 device_printf(sc->bge_dev, "couldn't set up irq\n");
2300 }
2301
2302fail:
2303 return(error);
2304}
2305
2306static int
2307bge_detach(dev)
2308 device_t dev;
2309{
2310 struct bge_softc *sc;
2311 struct ifnet *ifp;
2312
2313 sc = device_get_softc(dev);
2314 ifp = sc->bge_ifp;
2315
2316#ifdef DEVICE_POLLING
2317 if (ifp->if_capenable & IFCAP_POLLING)
2318 ether_poll_deregister(ifp);
2319#endif
2320
2321 BGE_LOCK(sc);
2322 bge_stop(sc);
2323 bge_reset(sc);
2324 BGE_UNLOCK(sc);
2325
2326 ether_ifdetach(ifp);
2327
2328 if (sc->bge_tbi) {
2329 ifmedia_removeall(&sc->bge_ifmedia);
2330 } else {
2331 bus_generic_detach(dev);
2332 device_delete_child(dev, sc->bge_miibus);
2333 }
2334
2335 bge_release_resources(sc);
2336
2337 return(0);
2338}
2339
2340static void
2341bge_release_resources(sc)
2342 struct bge_softc *sc;
2343{
2344 device_t dev;
2345
2346 dev = sc->bge_dev;
2347
2348 if (sc->bge_vpd_prodname != NULL)
2349 free(sc->bge_vpd_prodname, M_DEVBUF);
2350
2351 if (sc->bge_vpd_readonly != NULL)
2352 free(sc->bge_vpd_readonly, M_DEVBUF);
2353
2354 if (sc->bge_intrhand != NULL)
2355 bus_teardown_intr(dev, sc->bge_irq, sc->bge_intrhand);
2356
2357 if (sc->bge_irq != NULL)
2358 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->bge_irq);
2359
2360 if (sc->bge_res != NULL)
2361 bus_release_resource(dev, SYS_RES_MEMORY,
2362 BGE_PCI_BAR0, sc->bge_res);
2363
2364 if (sc->bge_ifp != NULL)
2365 if_free(sc->bge_ifp);
2366
2367 bge_dma_free(sc);
2368
2369 if (mtx_initialized(&sc->bge_mtx)) /* XXX */
2370 BGE_LOCK_DESTROY(sc);
2371
2372 return;
2373}
2374
2375static void
2376bge_reset(sc)
2377 struct bge_softc *sc;
2378{
2379 device_t dev;
2380 u_int32_t cachesize, command, pcistate, reset;
2381 int i, val = 0;
2382
2383 dev = sc->bge_dev;
2384
2385 /* Save some important PCI state. */
2386 cachesize = pci_read_config(dev, BGE_PCI_CACHESZ, 4);
2387 command = pci_read_config(dev, BGE_PCI_CMD, 4);
2388 pcistate = pci_read_config(dev, BGE_PCI_PCISTATE, 4);
2389
2390 pci_write_config(dev, BGE_PCI_MISC_CTL,
2391 BGE_PCIMISCCTL_INDIRECT_ACCESS|BGE_PCIMISCCTL_MASK_PCI_INTR|
2392 BGE_HIF_SWAP_OPTIONS|BGE_PCIMISCCTL_PCISTATE_RW, 4);
2393
2394 reset = BGE_MISCCFG_RESET_CORE_CLOCKS|(65<<1);
2395
2396 /* XXX: Broadcom Linux driver. */
2397 if (sc->bge_pcie) {
2398 if (CSR_READ_4(sc, 0x7e2c) == 0x60) /* PCIE 1.0 */
2399 CSR_WRITE_4(sc, 0x7e2c, 0x20);
2400 if (sc->bge_chipid != BGE_CHIPID_BCM5750_A0) {
2401 /* Prevent PCIE link training during global reset */
2402 CSR_WRITE_4(sc, BGE_MISC_CFG, (1<<29));
2403 reset |= (1<<29);
2404 }
2405 }
2406
2407 /* Issue global reset */
2408 bge_writereg_ind(sc, BGE_MISC_CFG, reset);
2409
2410 DELAY(1000);
2411
2412 /* XXX: Broadcom Linux driver. */
2413 if (sc->bge_pcie) {
2414 if (sc->bge_chipid == BGE_CHIPID_BCM5750_A0) {
2415 uint32_t v;
2416
2417 DELAY(500000); /* wait for link training to complete */
2418 v = pci_read_config(dev, 0xc4, 4);
2419 pci_write_config(dev, 0xc4, v | (1<<15), 4);
2420 }
2421 /* Set PCIE max payload size and clear error status. */
2422 pci_write_config(dev, 0xd8, 0xf5000, 4);
2423 }
2424
2425 /* Reset some of the PCI state that got zapped by reset */
2426 pci_write_config(dev, BGE_PCI_MISC_CTL,
2427 BGE_PCIMISCCTL_INDIRECT_ACCESS|BGE_PCIMISCCTL_MASK_PCI_INTR|
2428 BGE_HIF_SWAP_OPTIONS|BGE_PCIMISCCTL_PCISTATE_RW, 4);
2429 pci_write_config(dev, BGE_PCI_CACHESZ, cachesize, 4);
2430 pci_write_config(dev, BGE_PCI_CMD, command, 4);
2431 bge_writereg_ind(sc, BGE_MISC_CFG, (65 << 1));
2432
2433 /* Enable memory arbiter. */
2434 if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
2435 sc->bge_asicrev != BGE_ASICREV_BCM5750)
2436 CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE);
2437
2438 /*
2439 * Prevent PXE restart: write a magic number to the
2440 * general communications memory at 0xB50.
2441 */
2442 bge_writemem_ind(sc, BGE_SOFTWARE_GENCOMM, BGE_MAGIC_NUMBER);
2443 /*
2444 * Poll the value location we just wrote until
2445 * we see the 1's complement of the magic number.
2446 * This indicates that the firmware initialization
2447 * is complete.
2448 */
2449 for (i = 0; i < BGE_TIMEOUT; i++) {
2450 val = bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM);
2451 if (val == ~BGE_MAGIC_NUMBER)
2452 break;
2453 DELAY(10);
2454 }
2455
2456 if (i == BGE_TIMEOUT) {
2457 device_printf(sc->bge_dev, "firmware handshake timed out\n");
2458 return;
2459 }
2460
2461 /*
2462 * XXX Wait for the value of the PCISTATE register to
2463 * return to its original pre-reset state. This is a
2464 * fairly good indicator of reset completion. If we don't
2465 * wait for the reset to fully complete, trying to read
2466 * from the device's non-PCI registers may yield garbage
2467 * results.
2468 */
2469 for (i = 0; i < BGE_TIMEOUT; i++) {
2470 if (pci_read_config(dev, BGE_PCI_PCISTATE, 4) == pcistate)
2471 break;
2472 DELAY(10);
2473 }
2474
2475 /* Fix up byte swapping */
2476 CSR_WRITE_4(sc, BGE_MODE_CTL, BGE_DMA_SWAP_OPTIONS|
2477 BGE_MODECTL_BYTESWAP_DATA);
2478
2479 CSR_WRITE_4(sc, BGE_MAC_MODE, 0);
2480
2481 /*
2482 * The 5704 in TBI mode apparently needs some special
2483 * adjustment to insure the SERDES drive level is set
2484 * to 1.2V.
2485 */
2486 if (sc->bge_asicrev == BGE_ASICREV_BCM5704 && sc->bge_tbi) {
2487 uint32_t serdescfg;
2488 serdescfg = CSR_READ_4(sc, BGE_SERDES_CFG);
2489 serdescfg = (serdescfg & ~0xFFF) | 0x880;
2490 CSR_WRITE_4(sc, BGE_SERDES_CFG, serdescfg);
2491 }
2492
2493 /* XXX: Broadcom Linux driver. */
2494 if (sc->bge_pcie && sc->bge_chipid != BGE_CHIPID_BCM5750_A0) {
2495 uint32_t v;
2496
2497 v = CSR_READ_4(sc, 0x7c00);
2498 CSR_WRITE_4(sc, 0x7c00, v | (1<<25));
2499 }
2500 DELAY(10000);
2501
2502 return;
2503}
2504
2505/*
2506 * Frame reception handling. This is called if there's a frame
2507 * on the receive return list.
2508 *
2509 * Note: we have to be able to handle two possibilities here:
2510 * 1) the frame is from the jumbo receive ring
2511 * 2) the frame is from the standard receive ring
2512 */
2513
2514static void
2515bge_rxeof(sc)
2516 struct bge_softc *sc;
2517{
2518 struct ifnet *ifp;
2519 int stdcnt = 0, jumbocnt = 0;
2520
2521 BGE_LOCK_ASSERT(sc);
2522
2523 ifp = sc->bge_ifp;
2524
2525 bus_dmamap_sync(sc->bge_cdata.bge_rx_return_ring_tag,
2526 sc->bge_cdata.bge_rx_return_ring_map, BUS_DMASYNC_POSTREAD);
2527 bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag,
2528 sc->bge_cdata.bge_rx_std_ring_map, BUS_DMASYNC_POSTREAD);
2529 if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
2530 sc->bge_asicrev != BGE_ASICREV_BCM5750) {
2531 bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag,
2532 sc->bge_cdata.bge_rx_jumbo_ring_map,
2533 BUS_DMASYNC_POSTREAD);
2534 }
2535
2536 while(sc->bge_rx_saved_considx !=
2537 sc->bge_ldata.bge_status_block->bge_idx[0].bge_rx_prod_idx) {
2538 struct bge_rx_bd *cur_rx;
2539 u_int32_t rxidx;
2540 struct ether_header *eh;
2541 struct mbuf *m = NULL;
2542 u_int16_t vlan_tag = 0;
2543 int have_tag = 0;
2544
2545#ifdef DEVICE_POLLING
2546 if (ifp->if_capenable & IFCAP_POLLING) {
2547 if (sc->rxcycles <= 0)
2548 break;
2549 sc->rxcycles--;
2550 }
2551#endif
2552
2553 cur_rx =
2554 &sc->bge_ldata.bge_rx_return_ring[sc->bge_rx_saved_considx];
2555
2556 rxidx = cur_rx->bge_idx;
2557 BGE_INC(sc->bge_rx_saved_considx, sc->bge_return_ring_cnt);
2558
2559 if (cur_rx->bge_flags & BGE_RXBDFLAG_VLAN_TAG) {
2560 have_tag = 1;
2561 vlan_tag = cur_rx->bge_vlan_tag;
2562 }
2563
2564 if (cur_rx->bge_flags & BGE_RXBDFLAG_JUMBO_RING) {
2565 BGE_INC(sc->bge_jumbo, BGE_JUMBO_RX_RING_CNT);
2566 bus_dmamap_sync(sc->bge_cdata.bge_mtag_jumbo,
2567 sc->bge_cdata.bge_rx_jumbo_dmamap[rxidx],
2568 BUS_DMASYNC_POSTREAD);
2569 bus_dmamap_unload(sc->bge_cdata.bge_mtag_jumbo,
2570 sc->bge_cdata.bge_rx_jumbo_dmamap[rxidx]);
2571 m = sc->bge_cdata.bge_rx_jumbo_chain[rxidx];
2572 sc->bge_cdata.bge_rx_jumbo_chain[rxidx] = NULL;
2573 jumbocnt++;
2574 if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) {
2575 ifp->if_ierrors++;
2576 bge_newbuf_jumbo(sc, sc->bge_jumbo, m);
2577 continue;
2578 }
2579 if (bge_newbuf_jumbo(sc,
2580 sc->bge_jumbo, NULL) == ENOBUFS) {
2581 ifp->if_ierrors++;
2582 bge_newbuf_jumbo(sc, sc->bge_jumbo, m);
2583 continue;
2584 }
2585 } else {
2586 BGE_INC(sc->bge_std, BGE_STD_RX_RING_CNT);
2587 bus_dmamap_sync(sc->bge_cdata.bge_mtag,
2588 sc->bge_cdata.bge_rx_std_dmamap[rxidx],
2589 BUS_DMASYNC_POSTREAD);
2590 bus_dmamap_unload(sc->bge_cdata.bge_mtag,
2591 sc->bge_cdata.bge_rx_std_dmamap[rxidx]);
2592 m = sc->bge_cdata.bge_rx_std_chain[rxidx];
2593 sc->bge_cdata.bge_rx_std_chain[rxidx] = NULL;
2594 stdcnt++;
2595 if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) {
2596 ifp->if_ierrors++;
2597 bge_newbuf_std(sc, sc->bge_std, m);
2598 continue;
2599 }
2600 if (bge_newbuf_std(sc, sc->bge_std,
2601 NULL) == ENOBUFS) {
2602 ifp->if_ierrors++;
2603 bge_newbuf_std(sc, sc->bge_std, m);
2604 continue;
2605 }
2606 }
2607
2608 ifp->if_ipackets++;
2609#ifndef __NO_STRICT_ALIGNMENT
2610 /*
2611 * For architectures with strict alignment we must make sure
2612 * the payload is aligned.
2613 */
2614 if (sc->bge_rx_alignment_bug) {
2615 bcopy(m->m_data, m->m_data + ETHER_ALIGN,
2616 cur_rx->bge_len);
2617 m->m_data += ETHER_ALIGN;
2618 }
2619#endif
2620 eh = mtod(m, struct ether_header *);
2621 m->m_pkthdr.len = m->m_len = cur_rx->bge_len - ETHER_CRC_LEN;
2622 m->m_pkthdr.rcvif = ifp;
2623
2624 if (ifp->if_capenable & IFCAP_RXCSUM) {
2625 if (cur_rx->bge_flags & BGE_RXBDFLAG_IP_CSUM) {
2626 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
2627 if ((cur_rx->bge_ip_csum ^ 0xffff) == 0)
2628 m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
2629 }
2630 if (cur_rx->bge_flags & BGE_RXBDFLAG_TCP_UDP_CSUM &&
2631 m->m_pkthdr.len >= ETHER_MIN_NOPAD) {
2632 m->m_pkthdr.csum_data =
2633 cur_rx->bge_tcp_udp_csum;
2634 m->m_pkthdr.csum_flags |= CSUM_DATA_VALID;
2635 }
2636 }
2637
2638 /*
2639 * If we received a packet with a vlan tag,
2640 * attach that information to the packet.
2641 */
2642 if (have_tag) {
2643 VLAN_INPUT_TAG(ifp, m, vlan_tag);
2644 if (m == NULL)
2645 continue;
2646 }
2647
2648 BGE_UNLOCK(sc);
2649 (*ifp->if_input)(ifp, m);
2650 BGE_LOCK(sc);
2651 }
2652
2653 if (stdcnt > 0)
2654 bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag,
2655 sc->bge_cdata.bge_rx_std_ring_map, BUS_DMASYNC_PREWRITE);
2656 if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
2657 sc->bge_asicrev != BGE_ASICREV_BCM5750) {
2658 if (jumbocnt > 0)
2659 bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag,
2660 sc->bge_cdata.bge_rx_jumbo_ring_map,
2661 BUS_DMASYNC_PREWRITE);
2662 }
2663
2664 CSR_WRITE_4(sc, BGE_MBX_RX_CONS0_LO, sc->bge_rx_saved_considx);
2665 if (stdcnt)
2666 CSR_WRITE_4(sc, BGE_MBX_RX_STD_PROD_LO, sc->bge_std);
2667 if (jumbocnt)
2668 CSR_WRITE_4(sc, BGE_MBX_RX_JUMBO_PROD_LO, sc->bge_jumbo);
2669
2670 return;
2671}
2672
2673static void
2674bge_txeof(sc)
2675 struct bge_softc *sc;
2676{
2677 struct bge_tx_bd *cur_tx = NULL;
2678 struct ifnet *ifp;
2679
2680 BGE_LOCK_ASSERT(sc);
2681
2682 ifp = sc->bge_ifp;
2683
2684 bus_dmamap_sync(sc->bge_cdata.bge_tx_ring_tag,
2685 sc->bge_cdata.bge_tx_ring_map,
2686 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
2687 /*
2688 * Go through our tx ring and free mbufs for those
2689 * frames that have been sent.
2690 */
2691 while (sc->bge_tx_saved_considx !=
2692 sc->bge_ldata.bge_status_block->bge_idx[0].bge_tx_cons_idx) {
2693 u_int32_t idx = 0;
2694
2695 idx = sc->bge_tx_saved_considx;
2696 cur_tx = &sc->bge_ldata.bge_tx_ring[idx];
2697 if (cur_tx->bge_flags & BGE_TXBDFLAG_END)
2698 ifp->if_opackets++;
2699 if (sc->bge_cdata.bge_tx_chain[idx] != NULL) {
2700 bus_dmamap_sync(sc->bge_cdata.bge_mtag,
2701 sc->bge_cdata.bge_tx_dmamap[idx],
2702 BUS_DMASYNC_POSTWRITE);
2703 bus_dmamap_unload(sc->bge_cdata.bge_mtag,
2704 sc->bge_cdata.bge_tx_dmamap[idx]);
2705 m_freem(sc->bge_cdata.bge_tx_chain[idx]);
2706 sc->bge_cdata.bge_tx_chain[idx] = NULL;
2707 }
2708 sc->bge_txcnt--;
2709 BGE_INC(sc->bge_tx_saved_considx, BGE_TX_RING_CNT);
2710 ifp->if_timer = 0;
2711 }
2712
2713 if (cur_tx != NULL)
2714 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
2715
2716 return;
2717}
2718
2719#ifdef DEVICE_POLLING
2720static void
2721bge_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
2722{
2723 struct bge_softc *sc = ifp->if_softc;
2724
2725 BGE_LOCK(sc);
2726 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
2727 bge_poll_locked(ifp, cmd, count);
2728 BGE_UNLOCK(sc);
2729}
2730
2731static void
2732bge_poll_locked(struct ifnet *ifp, enum poll_cmd cmd, int count)
2733{
2734 struct bge_softc *sc = ifp->if_softc;
2735
2736 BGE_LOCK_ASSERT(sc);
2737
2738 sc->rxcycles = count;
2739 bge_rxeof(sc);
2740 bge_txeof(sc);
2741 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
2742 bge_start_locked(ifp);
2743
2744 if (cmd == POLL_AND_CHECK_STATUS) {
2745 uint32_t statusword;
2746
2747 bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
2748 sc->bge_cdata.bge_status_map, BUS_DMASYNC_POSTREAD);
2749
2750 statusword = atomic_readandclear_32(&sc->bge_ldata.bge_status_block->bge_status);
2751
2752 if ((sc->bge_asicrev == BGE_ASICREV_BCM5700 &&
2753 sc->bge_chipid != BGE_CHIPID_BCM5700_B1) ||
2754 statusword & BGE_STATFLAG_LINKSTATE_CHANGED)
2755 bge_link_upd(sc);
2756
2757 bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
2758 sc->bge_cdata.bge_status_map, BUS_DMASYNC_PREREAD);
2759 }
2760}
2761#endif /* DEVICE_POLLING */
2762
2763static void
2764bge_intr(xsc)
2765 void *xsc;
2766{
2767 struct bge_softc *sc;
2768 struct ifnet *ifp;
2769 uint32_t statusword;
2770
2771 sc = xsc;
2772
2773 BGE_LOCK(sc);
2774
2775 ifp = sc->bge_ifp;
2776
2777#ifdef DEVICE_POLLING
2778 if (ifp->if_capenable & IFCAP_POLLING) {
2779 BGE_UNLOCK(sc);
2780 return;
2781 }
2782#endif
2783
2784 bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
2785 sc->bge_cdata.bge_status_map, BUS_DMASYNC_POSTREAD);
2786
2787 statusword =
2788 atomic_readandclear_32(&sc->bge_ldata.bge_status_block->bge_status);
2789
36
37/*
38 * Broadcom BCM570x family gigabit ethernet driver for FreeBSD.
39 *
40 * The Broadcom BCM5700 is based on technology originally developed by
41 * Alteon Networks as part of the Tigon I and Tigon II gigabit ethernet
42 * MAC chips. The BCM5700, sometimes refered to as the Tigon III, has
43 * two on-board MIPS R4000 CPUs and can have as much as 16MB of external
44 * SSRAM. The BCM5700 supports TCP, UDP and IP checksum offload, jumbo
45 * frames, highly configurable RX filtering, and 16 RX and TX queues
46 * (which, along with RX filter rules, can be used for QOS applications).
47 * Other features, such as TCP segmentation, may be available as part
48 * of value-added firmware updates. Unlike the Tigon I and Tigon II,
49 * firmware images can be stored in hardware and need not be compiled
50 * into the driver.
51 *
52 * The BCM5700 supports the PCI v2.2 and PCI-X v1.0 standards, and will
53 * function in a 32-bit/64-bit 33/66Mhz bus, or a 64-bit/133Mhz bus.
54 *
55 * The BCM5701 is a single-chip solution incorporating both the BCM5700
56 * MAC and a BCM5401 10/100/1000 PHY. Unlike the BCM5700, the BCM5701
57 * does not support external SSRAM.
58 *
59 * Broadcom also produces a variation of the BCM5700 under the "Altima"
60 * brand name, which is functionally similar but lacks PCI-X support.
61 *
62 * Without external SSRAM, you can only have at most 4 TX rings,
63 * and the use of the mini RX ring is disabled. This seems to imply
64 * that these features are simply not available on the BCM5701. As a
65 * result, this driver does not implement any support for the mini RX
66 * ring.
67 */
68
69#ifdef HAVE_KERNEL_OPTION_HEADERS
70#include "opt_device_polling.h"
71#endif
72
73#include <sys/param.h>
74#include <sys/endian.h>
75#include <sys/systm.h>
76#include <sys/sockio.h>
77#include <sys/mbuf.h>
78#include <sys/malloc.h>
79#include <sys/kernel.h>
80#include <sys/module.h>
81#include <sys/socket.h>
82
83#include <net/if.h>
84#include <net/if_arp.h>
85#include <net/ethernet.h>
86#include <net/if_dl.h>
87#include <net/if_media.h>
88
89#include <net/bpf.h>
90
91#include <net/if_types.h>
92#include <net/if_vlan_var.h>
93
94#include <netinet/in_systm.h>
95#include <netinet/in.h>
96#include <netinet/ip.h>
97
98#include <machine/clock.h> /* for DELAY */
99#include <machine/bus.h>
100#include <machine/resource.h>
101#include <sys/bus.h>
102#include <sys/rman.h>
103
104#include <dev/mii/mii.h>
105#include <dev/mii/miivar.h>
106#include "miidevs.h"
107#include <dev/mii/brgphyreg.h>
108
109#include <dev/pci/pcireg.h>
110#include <dev/pci/pcivar.h>
111
112#include <dev/bge/if_bgereg.h>
113
114#include "opt_bge.h"
115
116#define BGE_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP)
117#define ETHER_MIN_NOPAD (ETHER_MIN_LEN - ETHER_CRC_LEN) /* i.e., 60 */
118
119MODULE_DEPEND(bge, pci, 1, 1, 1);
120MODULE_DEPEND(bge, ether, 1, 1, 1);
121MODULE_DEPEND(bge, miibus, 1, 1, 1);
122
123/* "device miibus" required. See GENERIC if you get errors here. */
124#include "miibus_if.h"
125
126/*
127 * Various supported device vendors/types and their names. Note: the
128 * spec seems to indicate that the hardware still has Alteon's vendor
129 * ID burned into it, though it will always be overriden by the vendor
130 * ID in the EEPROM. Just to be safe, we cover all possibilities.
131 */
132#define BGE_DEVDESC_MAX 64 /* Maximum device description length */
133
134static struct bge_type bge_devs[] = {
135 { ALT_VENDORID, ALT_DEVICEID_BCM5700,
136 "Broadcom BCM5700 Gigabit Ethernet" },
137 { ALT_VENDORID, ALT_DEVICEID_BCM5701,
138 "Broadcom BCM5701 Gigabit Ethernet" },
139 { BCOM_VENDORID, BCOM_DEVICEID_BCM5700,
140 "Broadcom BCM5700 Gigabit Ethernet" },
141 { BCOM_VENDORID, BCOM_DEVICEID_BCM5701,
142 "Broadcom BCM5701 Gigabit Ethernet" },
143 { BCOM_VENDORID, BCOM_DEVICEID_BCM5702,
144 "Broadcom BCM5702 Gigabit Ethernet" },
145 { BCOM_VENDORID, BCOM_DEVICEID_BCM5702X,
146 "Broadcom BCM5702X Gigabit Ethernet" },
147 { BCOM_VENDORID, BCOM_DEVICEID_BCM5703,
148 "Broadcom BCM5703 Gigabit Ethernet" },
149 { BCOM_VENDORID, BCOM_DEVICEID_BCM5703X,
150 "Broadcom BCM5703X Gigabit Ethernet" },
151 { BCOM_VENDORID, BCOM_DEVICEID_BCM5704C,
152 "Broadcom BCM5704C Dual Gigabit Ethernet" },
153 { BCOM_VENDORID, BCOM_DEVICEID_BCM5704S,
154 "Broadcom BCM5704S Dual Gigabit Ethernet" },
155 { BCOM_VENDORID, BCOM_DEVICEID_BCM5705,
156 "Broadcom BCM5705 Gigabit Ethernet" },
157 { BCOM_VENDORID, BCOM_DEVICEID_BCM5705K,
158 "Broadcom BCM5705K Gigabit Ethernet" },
159 { BCOM_VENDORID, BCOM_DEVICEID_BCM5705M,
160 "Broadcom BCM5705M Gigabit Ethernet" },
161 { BCOM_VENDORID, BCOM_DEVICEID_BCM5705M_ALT,
162 "Broadcom BCM5705M Gigabit Ethernet" },
163 { BCOM_VENDORID, BCOM_DEVICEID_BCM5714C,
164 "Broadcom BCM5714C Gigabit Ethernet" },
165 { BCOM_VENDORID, BCOM_DEVICEID_BCM5721,
166 "Broadcom BCM5721 Gigabit Ethernet" },
167 { BCOM_VENDORID, BCOM_DEVICEID_BCM5750,
168 "Broadcom BCM5750 Gigabit Ethernet" },
169 { BCOM_VENDORID, BCOM_DEVICEID_BCM5750M,
170 "Broadcom BCM5750M Gigabit Ethernet" },
171 { BCOM_VENDORID, BCOM_DEVICEID_BCM5751,
172 "Broadcom BCM5751 Gigabit Ethernet" },
173 { BCOM_VENDORID, BCOM_DEVICEID_BCM5751M,
174 "Broadcom BCM5751M Gigabit Ethernet" },
175 { BCOM_VENDORID, BCOM_DEVICEID_BCM5752,
176 "Broadcom BCM5752 Gigabit Ethernet" },
177 { BCOM_VENDORID, BCOM_DEVICEID_BCM5782,
178 "Broadcom BCM5782 Gigabit Ethernet" },
179 { BCOM_VENDORID, BCOM_DEVICEID_BCM5788,
180 "Broadcom BCM5788 Gigabit Ethernet" },
181 { BCOM_VENDORID, BCOM_DEVICEID_BCM5789,
182 "Broadcom BCM5789 Gigabit Ethernet" },
183 { BCOM_VENDORID, BCOM_DEVICEID_BCM5901,
184 "Broadcom BCM5901 Fast Ethernet" },
185 { BCOM_VENDORID, BCOM_DEVICEID_BCM5901A2,
186 "Broadcom BCM5901A2 Fast Ethernet" },
187 { SK_VENDORID, SK_DEVICEID_ALTIMA,
188 "SysKonnect Gigabit Ethernet" },
189 { ALTIMA_VENDORID, ALTIMA_DEVICE_AC1000,
190 "Altima AC1000 Gigabit Ethernet" },
191 { ALTIMA_VENDORID, ALTIMA_DEVICE_AC1002,
192 "Altima AC1002 Gigabit Ethernet" },
193 { ALTIMA_VENDORID, ALTIMA_DEVICE_AC9100,
194 "Altima AC9100 Gigabit Ethernet" },
195 { 0, 0, NULL }
196};
197
198static int bge_probe (device_t);
199static int bge_attach (device_t);
200static int bge_detach (device_t);
201static int bge_suspend (device_t);
202static int bge_resume (device_t);
203static void bge_release_resources
204 (struct bge_softc *);
205static void bge_dma_map_addr (void *, bus_dma_segment_t *, int, int);
206static int bge_dma_alloc (device_t);
207static void bge_dma_free (struct bge_softc *);
208
209static void bge_txeof (struct bge_softc *);
210static void bge_rxeof (struct bge_softc *);
211
212static void bge_tick_locked (struct bge_softc *);
213static void bge_tick (void *);
214static void bge_stats_update (struct bge_softc *);
215static void bge_stats_update_regs
216 (struct bge_softc *);
217static int bge_encap (struct bge_softc *, struct mbuf *,
218 u_int32_t *);
219
220static void bge_intr (void *);
221static void bge_start_locked (struct ifnet *);
222static void bge_start (struct ifnet *);
223static int bge_ioctl (struct ifnet *, u_long, caddr_t);
224static void bge_init_locked (struct bge_softc *);
225static void bge_init (void *);
226static void bge_stop (struct bge_softc *);
227static void bge_watchdog (struct ifnet *);
228static void bge_shutdown (device_t);
229static int bge_ifmedia_upd (struct ifnet *);
230static void bge_ifmedia_sts (struct ifnet *, struct ifmediareq *);
231
232static u_int8_t bge_eeprom_getbyte (struct bge_softc *, int, u_int8_t *);
233static int bge_read_eeprom (struct bge_softc *, caddr_t, int, int);
234
235static void bge_setmulti (struct bge_softc *);
236
237static int bge_newbuf_std (struct bge_softc *, int, struct mbuf *);
238static int bge_newbuf_jumbo (struct bge_softc *, int, struct mbuf *);
239static int bge_init_rx_ring_std (struct bge_softc *);
240static void bge_free_rx_ring_std (struct bge_softc *);
241static int bge_init_rx_ring_jumbo (struct bge_softc *);
242static void bge_free_rx_ring_jumbo (struct bge_softc *);
243static void bge_free_tx_ring (struct bge_softc *);
244static int bge_init_tx_ring (struct bge_softc *);
245
246static int bge_chipinit (struct bge_softc *);
247static int bge_blockinit (struct bge_softc *);
248
249#ifdef notdef
250static u_int8_t bge_vpd_readbyte(struct bge_softc *, int);
251static void bge_vpd_read_res (struct bge_softc *, struct vpd_res *, int);
252static void bge_vpd_read (struct bge_softc *);
253#endif
254
255static u_int32_t bge_readmem_ind
256 (struct bge_softc *, int);
257static void bge_writemem_ind (struct bge_softc *, int, int);
258#ifdef notdef
259static u_int32_t bge_readreg_ind
260 (struct bge_softc *, int);
261#endif
262static void bge_writereg_ind (struct bge_softc *, int, int);
263
264static int bge_miibus_readreg (device_t, int, int);
265static int bge_miibus_writereg (device_t, int, int, int);
266static void bge_miibus_statchg (device_t);
267#ifdef DEVICE_POLLING
268static void bge_poll (struct ifnet *ifp, enum poll_cmd cmd,
269 int count);
270static void bge_poll_locked (struct ifnet *ifp, enum poll_cmd cmd,
271 int count);
272#endif
273
274static void bge_reset (struct bge_softc *);
275static void bge_link_upd (struct bge_softc *);
276
277static device_method_t bge_methods[] = {
278 /* Device interface */
279 DEVMETHOD(device_probe, bge_probe),
280 DEVMETHOD(device_attach, bge_attach),
281 DEVMETHOD(device_detach, bge_detach),
282 DEVMETHOD(device_shutdown, bge_shutdown),
283 DEVMETHOD(device_suspend, bge_suspend),
284 DEVMETHOD(device_resume, bge_resume),
285
286 /* bus interface */
287 DEVMETHOD(bus_print_child, bus_generic_print_child),
288 DEVMETHOD(bus_driver_added, bus_generic_driver_added),
289
290 /* MII interface */
291 DEVMETHOD(miibus_readreg, bge_miibus_readreg),
292 DEVMETHOD(miibus_writereg, bge_miibus_writereg),
293 DEVMETHOD(miibus_statchg, bge_miibus_statchg),
294
295 { 0, 0 }
296};
297
298static driver_t bge_driver = {
299 "bge",
300 bge_methods,
301 sizeof(struct bge_softc)
302};
303
304static devclass_t bge_devclass;
305
306DRIVER_MODULE(bge, pci, bge_driver, bge_devclass, 0, 0);
307DRIVER_MODULE(miibus, bge, miibus_driver, miibus_devclass, 0, 0);
308
309static u_int32_t
310bge_readmem_ind(sc, off)
311 struct bge_softc *sc;
312 int off;
313{
314 device_t dev;
315
316 dev = sc->bge_dev;
317
318 pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, off, 4);
319 return(pci_read_config(dev, BGE_PCI_MEMWIN_DATA, 4));
320}
321
322static void
323bge_writemem_ind(sc, off, val)
324 struct bge_softc *sc;
325 int off, val;
326{
327 device_t dev;
328
329 dev = sc->bge_dev;
330
331 pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, off, 4);
332 pci_write_config(dev, BGE_PCI_MEMWIN_DATA, val, 4);
333
334 return;
335}
336
337#ifdef notdef
338static u_int32_t
339bge_readreg_ind(sc, off)
340 struct bge_softc *sc;
341 int off;
342{
343 device_t dev;
344
345 dev = sc->bge_dev;
346
347 pci_write_config(dev, BGE_PCI_REG_BASEADDR, off, 4);
348 return(pci_read_config(dev, BGE_PCI_REG_DATA, 4));
349}
350#endif
351
352static void
353bge_writereg_ind(sc, off, val)
354 struct bge_softc *sc;
355 int off, val;
356{
357 device_t dev;
358
359 dev = sc->bge_dev;
360
361 pci_write_config(dev, BGE_PCI_REG_BASEADDR, off, 4);
362 pci_write_config(dev, BGE_PCI_REG_DATA, val, 4);
363
364 return;
365}
366
367/*
368 * Map a single buffer address.
369 */
370
371static void
372bge_dma_map_addr(arg, segs, nseg, error)
373 void *arg;
374 bus_dma_segment_t *segs;
375 int nseg;
376 int error;
377{
378 struct bge_dmamap_arg *ctx;
379
380 if (error)
381 return;
382
383 ctx = arg;
384
385 if (nseg > ctx->bge_maxsegs) {
386 ctx->bge_maxsegs = 0;
387 return;
388 }
389
390 ctx->bge_busaddr = segs->ds_addr;
391
392 return;
393}
394
395#ifdef notdef
396static u_int8_t
397bge_vpd_readbyte(sc, addr)
398 struct bge_softc *sc;
399 int addr;
400{
401 int i;
402 device_t dev;
403 u_int32_t val;
404
405 dev = sc->bge_dev;
406 pci_write_config(dev, BGE_PCI_VPD_ADDR, addr, 2);
407 for (i = 0; i < BGE_TIMEOUT * 10; i++) {
408 DELAY(10);
409 if (pci_read_config(dev, BGE_PCI_VPD_ADDR, 2) & BGE_VPD_FLAG)
410 break;
411 }
412
413 if (i == BGE_TIMEOUT) {
414 device_printf(sc->bge_dev, "VPD read timed out\n");
415 return(0);
416 }
417
418 val = pci_read_config(dev, BGE_PCI_VPD_DATA, 4);
419
420 return((val >> ((addr % 4) * 8)) & 0xFF);
421}
422
423static void
424bge_vpd_read_res(sc, res, addr)
425 struct bge_softc *sc;
426 struct vpd_res *res;
427 int addr;
428{
429 int i;
430 u_int8_t *ptr;
431
432 ptr = (u_int8_t *)res;
433 for (i = 0; i < sizeof(struct vpd_res); i++)
434 ptr[i] = bge_vpd_readbyte(sc, i + addr);
435
436 return;
437}
438
439static void
440bge_vpd_read(sc)
441 struct bge_softc *sc;
442{
443 int pos = 0, i;
444 struct vpd_res res;
445
446 if (sc->bge_vpd_prodname != NULL)
447 free(sc->bge_vpd_prodname, M_DEVBUF);
448 if (sc->bge_vpd_readonly != NULL)
449 free(sc->bge_vpd_readonly, M_DEVBUF);
450 sc->bge_vpd_prodname = NULL;
451 sc->bge_vpd_readonly = NULL;
452
453 bge_vpd_read_res(sc, &res, pos);
454
455 if (res.vr_id != VPD_RES_ID) {
456 device_printf(sc->bge_dev,
457 "bad VPD resource id: expected %x got %x\n", VPD_RES_ID,
458 res.vr_id);
459 return;
460 }
461
462 pos += sizeof(res);
463 sc->bge_vpd_prodname = malloc(res.vr_len + 1, M_DEVBUF, M_NOWAIT);
464 for (i = 0; i < res.vr_len; i++)
465 sc->bge_vpd_prodname[i] = bge_vpd_readbyte(sc, i + pos);
466 sc->bge_vpd_prodname[i] = '\0';
467 pos += i;
468
469 bge_vpd_read_res(sc, &res, pos);
470
471 if (res.vr_id != VPD_RES_READ) {
472 device_printf(sc->bge_dev,
473 "bad VPD resource id: expected %x got %x\n", VPD_RES_READ,
474 res.vr_id);
475 return;
476 }
477
478 pos += sizeof(res);
479 sc->bge_vpd_readonly = malloc(res.vr_len, M_DEVBUF, M_NOWAIT);
480 for (i = 0; i < res.vr_len + 1; i++)
481 sc->bge_vpd_readonly[i] = bge_vpd_readbyte(sc, i + pos);
482
483 return;
484}
485#endif
486
487/*
488 * Read a byte of data stored in the EEPROM at address 'addr.' The
489 * BCM570x supports both the traditional bitbang interface and an
490 * auto access interface for reading the EEPROM. We use the auto
491 * access method.
492 */
493static u_int8_t
494bge_eeprom_getbyte(sc, addr, dest)
495 struct bge_softc *sc;
496 int addr;
497 u_int8_t *dest;
498{
499 int i;
500 u_int32_t byte = 0;
501
502 /*
503 * Enable use of auto EEPROM access so we can avoid
504 * having to use the bitbang method.
505 */
506 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_AUTO_EEPROM);
507
508 /* Reset the EEPROM, load the clock period. */
509 CSR_WRITE_4(sc, BGE_EE_ADDR,
510 BGE_EEADDR_RESET|BGE_EEHALFCLK(BGE_HALFCLK_384SCL));
511 DELAY(20);
512
513 /* Issue the read EEPROM command. */
514 CSR_WRITE_4(sc, BGE_EE_ADDR, BGE_EE_READCMD | addr);
515
516 /* Wait for completion */
517 for(i = 0; i < BGE_TIMEOUT * 10; i++) {
518 DELAY(10);
519 if (CSR_READ_4(sc, BGE_EE_ADDR) & BGE_EEADDR_DONE)
520 break;
521 }
522
523 if (i == BGE_TIMEOUT) {
524 device_printf(sc->bge_dev, "EEPROM read timed out\n");
525 return(1);
526 }
527
528 /* Get result. */
529 byte = CSR_READ_4(sc, BGE_EE_DATA);
530
531 *dest = (byte >> ((addr % 4) * 8)) & 0xFF;
532
533 return(0);
534}
535
536/*
537 * Read a sequence of bytes from the EEPROM.
538 */
539static int
540bge_read_eeprom(sc, dest, off, cnt)
541 struct bge_softc *sc;
542 caddr_t dest;
543 int off;
544 int cnt;
545{
546 int err = 0, i;
547 u_int8_t byte = 0;
548
549 for (i = 0; i < cnt; i++) {
550 err = bge_eeprom_getbyte(sc, off + i, &byte);
551 if (err)
552 break;
553 *(dest + i) = byte;
554 }
555
556 return(err ? 1 : 0);
557}
558
559static int
560bge_miibus_readreg(dev, phy, reg)
561 device_t dev;
562 int phy, reg;
563{
564 struct bge_softc *sc;
565 u_int32_t val, autopoll;
566 int i;
567
568 sc = device_get_softc(dev);
569
570 /*
571 * Broadcom's own driver always assumes the internal
572 * PHY is at GMII address 1. On some chips, the PHY responds
573 * to accesses at all addresses, which could cause us to
574 * bogusly attach the PHY 32 times at probe type. Always
575 * restricting the lookup to address 1 is simpler than
576 * trying to figure out which chips revisions should be
577 * special-cased.
578 */
579 if (phy != 1)
580 return(0);
581
582 /* Reading with autopolling on may trigger PCI errors */
583 autopoll = CSR_READ_4(sc, BGE_MI_MODE);
584 if (autopoll & BGE_MIMODE_AUTOPOLL) {
585 BGE_CLRBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL);
586 DELAY(40);
587 }
588
589 CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_READ|BGE_MICOMM_BUSY|
590 BGE_MIPHY(phy)|BGE_MIREG(reg));
591
592 for (i = 0; i < BGE_TIMEOUT; i++) {
593 val = CSR_READ_4(sc, BGE_MI_COMM);
594 if (!(val & BGE_MICOMM_BUSY))
595 break;
596 }
597
598 if (i == BGE_TIMEOUT) {
599 if_printf(sc->bge_ifp, "PHY read timed out\n");
600 val = 0;
601 goto done;
602 }
603
604 val = CSR_READ_4(sc, BGE_MI_COMM);
605
606done:
607 if (autopoll & BGE_MIMODE_AUTOPOLL) {
608 BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL);
609 DELAY(40);
610 }
611
612 if (val & BGE_MICOMM_READFAIL)
613 return(0);
614
615 return(val & 0xFFFF);
616}
617
618static int
619bge_miibus_writereg(dev, phy, reg, val)
620 device_t dev;
621 int phy, reg, val;
622{
623 struct bge_softc *sc;
624 u_int32_t autopoll;
625 int i;
626
627 sc = device_get_softc(dev);
628
629 /* Reading with autopolling on may trigger PCI errors */
630 autopoll = CSR_READ_4(sc, BGE_MI_MODE);
631 if (autopoll & BGE_MIMODE_AUTOPOLL) {
632 BGE_CLRBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL);
633 DELAY(40);
634 }
635
636 CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_WRITE|BGE_MICOMM_BUSY|
637 BGE_MIPHY(phy)|BGE_MIREG(reg)|val);
638
639 for (i = 0; i < BGE_TIMEOUT; i++) {
640 if (!(CSR_READ_4(sc, BGE_MI_COMM) & BGE_MICOMM_BUSY))
641 break;
642 }
643
644 if (autopoll & BGE_MIMODE_AUTOPOLL) {
645 BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL);
646 DELAY(40);
647 }
648
649 if (i == BGE_TIMEOUT) {
650 if_printf(sc->bge_ifp, "PHY read timed out\n");
651 return(0);
652 }
653
654 return(0);
655}
656
657static void
658bge_miibus_statchg(dev)
659 device_t dev;
660{
661 struct bge_softc *sc;
662 struct mii_data *mii;
663
664 sc = device_get_softc(dev);
665 mii = device_get_softc(sc->bge_miibus);
666
667 BGE_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_PORTMODE);
668 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T) {
669 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_PORTMODE_GMII);
670 } else {
671 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_PORTMODE_MII);
672 }
673
674 if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) {
675 BGE_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX);
676 } else {
677 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX);
678 }
679
680 return;
681}
682
683/*
684 * Intialize a standard receive ring descriptor.
685 */
686static int
687bge_newbuf_std(sc, i, m)
688 struct bge_softc *sc;
689 int i;
690 struct mbuf *m;
691{
692 struct mbuf *m_new = NULL;
693 struct bge_rx_bd *r;
694 struct bge_dmamap_arg ctx;
695 int error;
696
697 if (m == NULL) {
698 m_new = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
699 if (m_new == NULL)
700 return(ENOBUFS);
701 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
702 } else {
703 m_new = m;
704 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
705 m_new->m_data = m_new->m_ext.ext_buf;
706 }
707
708 if (!sc->bge_rx_alignment_bug)
709 m_adj(m_new, ETHER_ALIGN);
710 sc->bge_cdata.bge_rx_std_chain[i] = m_new;
711 r = &sc->bge_ldata.bge_rx_std_ring[i];
712 ctx.bge_maxsegs = 1;
713 ctx.sc = sc;
714 error = bus_dmamap_load(sc->bge_cdata.bge_mtag,
715 sc->bge_cdata.bge_rx_std_dmamap[i], mtod(m_new, void *),
716 m_new->m_len, bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT);
717 if (error || ctx.bge_maxsegs == 0) {
718 if (m == NULL) {
719 sc->bge_cdata.bge_rx_std_chain[i] = NULL;
720 m_freem(m_new);
721 }
722 return(ENOMEM);
723 }
724 r->bge_addr.bge_addr_lo = BGE_ADDR_LO(ctx.bge_busaddr);
725 r->bge_addr.bge_addr_hi = BGE_ADDR_HI(ctx.bge_busaddr);
726 r->bge_flags = BGE_RXBDFLAG_END;
727 r->bge_len = m_new->m_len;
728 r->bge_idx = i;
729
730 bus_dmamap_sync(sc->bge_cdata.bge_mtag,
731 sc->bge_cdata.bge_rx_std_dmamap[i],
732 BUS_DMASYNC_PREREAD);
733
734 return(0);
735}
736
737/*
738 * Initialize a jumbo receive ring descriptor. This allocates
739 * a jumbo buffer from the pool managed internally by the driver.
740 */
741static int
742bge_newbuf_jumbo(sc, i, m)
743 struct bge_softc *sc;
744 int i;
745 struct mbuf *m;
746{
747 bus_dma_segment_t segs[BGE_NSEG_JUMBO];
748 struct bge_extrx_bd *r;
749 struct mbuf *m_new = NULL;
750 int nsegs;
751 int error;
752
753 if (m == NULL) {
754 MGETHDR(m_new, M_DONTWAIT, MT_DATA);
755 if (m_new == NULL)
756 return(ENOBUFS);
757
758 m_cljget(m_new, M_DONTWAIT, MJUM9BYTES);
759 if (!(m_new->m_flags & M_EXT)) {
760 m_freem(m_new);
761 return(ENOBUFS);
762 }
763 m_new->m_len = m_new->m_pkthdr.len = MJUM9BYTES;
764 } else {
765 m_new = m;
766 m_new->m_len = m_new->m_pkthdr.len = MJUM9BYTES;
767 m_new->m_data = m_new->m_ext.ext_buf;
768 }
769
770 if (!sc->bge_rx_alignment_bug)
771 m_adj(m_new, ETHER_ALIGN);
772
773 error = bus_dmamap_load_mbuf_sg(sc->bge_cdata.bge_mtag_jumbo,
774 sc->bge_cdata.bge_rx_jumbo_dmamap[i],
775 m_new, segs, &nsegs, BUS_DMA_NOWAIT);
776 if (error) {
777 if (m == NULL)
778 m_freem(m_new);
779 return(error);
780 }
781 sc->bge_cdata.bge_rx_jumbo_chain[i] = m_new;
782
783 /*
784 * Fill in the extended RX buffer descriptor.
785 */
786 r = &sc->bge_ldata.bge_rx_jumbo_ring[i];
787 r->bge_flags = BGE_RXBDFLAG_JUMBO_RING|BGE_RXBDFLAG_END;
788 r->bge_idx = i;
789 r->bge_len3 = r->bge_len2 = r->bge_len1 = 0;
790 switch (nsegs) {
791 case 4:
792 r->bge_addr3.bge_addr_lo = BGE_ADDR_LO(segs[3].ds_addr);
793 r->bge_addr3.bge_addr_hi = BGE_ADDR_HI(segs[3].ds_addr);
794 r->bge_len3 = segs[3].ds_len;
795 case 3:
796 r->bge_addr2.bge_addr_lo = BGE_ADDR_LO(segs[2].ds_addr);
797 r->bge_addr2.bge_addr_hi = BGE_ADDR_HI(segs[2].ds_addr);
798 r->bge_len2 = segs[2].ds_len;
799 case 2:
800 r->bge_addr1.bge_addr_lo = BGE_ADDR_LO(segs[1].ds_addr);
801 r->bge_addr1.bge_addr_hi = BGE_ADDR_HI(segs[1].ds_addr);
802 r->bge_len1 = segs[1].ds_len;
803 case 1:
804 r->bge_addr0.bge_addr_lo = BGE_ADDR_LO(segs[0].ds_addr);
805 r->bge_addr0.bge_addr_hi = BGE_ADDR_HI(segs[0].ds_addr);
806 r->bge_len0 = segs[0].ds_len;
807 break;
808 default:
809 panic("%s: %d segments\n", __func__, nsegs);
810 }
811
812 bus_dmamap_sync(sc->bge_cdata.bge_mtag,
813 sc->bge_cdata.bge_rx_jumbo_dmamap[i],
814 BUS_DMASYNC_PREREAD);
815
816 return (0);
817}
818
819/*
820 * The standard receive ring has 512 entries in it. At 2K per mbuf cluster,
821 * that's 1MB or memory, which is a lot. For now, we fill only the first
822 * 256 ring entries and hope that our CPU is fast enough to keep up with
823 * the NIC.
824 */
825static int
826bge_init_rx_ring_std(sc)
827 struct bge_softc *sc;
828{
829 int i;
830
831 for (i = 0; i < BGE_SSLOTS; i++) {
832 if (bge_newbuf_std(sc, i, NULL) == ENOBUFS)
833 return(ENOBUFS);
834 };
835
836 bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag,
837 sc->bge_cdata.bge_rx_std_ring_map,
838 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
839
840 sc->bge_std = i - 1;
841 CSR_WRITE_4(sc, BGE_MBX_RX_STD_PROD_LO, sc->bge_std);
842
843 return(0);
844}
845
846static void
847bge_free_rx_ring_std(sc)
848 struct bge_softc *sc;
849{
850 int i;
851
852 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
853 if (sc->bge_cdata.bge_rx_std_chain[i] != NULL) {
854 bus_dmamap_sync(sc->bge_cdata.bge_mtag,
855 sc->bge_cdata.bge_rx_std_dmamap[i],
856 BUS_DMASYNC_POSTREAD);
857 bus_dmamap_unload(sc->bge_cdata.bge_mtag,
858 sc->bge_cdata.bge_rx_std_dmamap[i]);
859 m_freem(sc->bge_cdata.bge_rx_std_chain[i]);
860 sc->bge_cdata.bge_rx_std_chain[i] = NULL;
861 }
862 bzero((char *)&sc->bge_ldata.bge_rx_std_ring[i],
863 sizeof(struct bge_rx_bd));
864 }
865
866 return;
867}
868
869static int
870bge_init_rx_ring_jumbo(sc)
871 struct bge_softc *sc;
872{
873 struct bge_rcb *rcb;
874 int i;
875
876 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
877 if (bge_newbuf_jumbo(sc, i, NULL) == ENOBUFS)
878 return(ENOBUFS);
879 };
880
881 bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag,
882 sc->bge_cdata.bge_rx_jumbo_ring_map,
883 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
884
885 sc->bge_jumbo = i - 1;
886
887 rcb = &sc->bge_ldata.bge_info.bge_jumbo_rx_rcb;
888 rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(0,
889 BGE_RCB_FLAG_USE_EXT_RX_BD);
890 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags);
891
892 CSR_WRITE_4(sc, BGE_MBX_RX_JUMBO_PROD_LO, sc->bge_jumbo);
893
894 return(0);
895}
896
897static void
898bge_free_rx_ring_jumbo(sc)
899 struct bge_softc *sc;
900{
901 int i;
902
903 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
904 if (sc->bge_cdata.bge_rx_jumbo_chain[i] != NULL) {
905 bus_dmamap_sync(sc->bge_cdata.bge_mtag_jumbo,
906 sc->bge_cdata.bge_rx_jumbo_dmamap[i],
907 BUS_DMASYNC_POSTREAD);
908 bus_dmamap_unload(sc->bge_cdata.bge_mtag_jumbo,
909 sc->bge_cdata.bge_rx_jumbo_dmamap[i]);
910 m_freem(sc->bge_cdata.bge_rx_jumbo_chain[i]);
911 sc->bge_cdata.bge_rx_jumbo_chain[i] = NULL;
912 }
913 bzero((char *)&sc->bge_ldata.bge_rx_jumbo_ring[i],
914 sizeof(struct bge_extrx_bd));
915 }
916
917 return;
918}
919
920static void
921bge_free_tx_ring(sc)
922 struct bge_softc *sc;
923{
924 int i;
925
926 if (sc->bge_ldata.bge_tx_ring == NULL)
927 return;
928
929 for (i = 0; i < BGE_TX_RING_CNT; i++) {
930 if (sc->bge_cdata.bge_tx_chain[i] != NULL) {
931 bus_dmamap_sync(sc->bge_cdata.bge_mtag,
932 sc->bge_cdata.bge_tx_dmamap[i],
933 BUS_DMASYNC_POSTWRITE);
934 bus_dmamap_unload(sc->bge_cdata.bge_mtag,
935 sc->bge_cdata.bge_tx_dmamap[i]);
936 m_freem(sc->bge_cdata.bge_tx_chain[i]);
937 sc->bge_cdata.bge_tx_chain[i] = NULL;
938 }
939 bzero((char *)&sc->bge_ldata.bge_tx_ring[i],
940 sizeof(struct bge_tx_bd));
941 }
942
943 return;
944}
945
946static int
947bge_init_tx_ring(sc)
948 struct bge_softc *sc;
949{
950 sc->bge_txcnt = 0;
951 sc->bge_tx_saved_considx = 0;
952
953 /* Initialize transmit producer index for host-memory send ring. */
954 sc->bge_tx_prodidx = 0;
955 CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, sc->bge_tx_prodidx);
956
957 /* 5700 b2 errata */
958 if (sc->bge_chiprev == BGE_CHIPREV_5700_BX)
959 CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, sc->bge_tx_prodidx);
960
961 /* NIC-memory send ring not used; initialize to zero. */
962 CSR_WRITE_4(sc, BGE_MBX_TX_NIC_PROD0_LO, 0);
963 /* 5700 b2 errata */
964 if (sc->bge_chiprev == BGE_CHIPREV_5700_BX)
965 CSR_WRITE_4(sc, BGE_MBX_TX_NIC_PROD0_LO, 0);
966
967 return(0);
968}
969
970static void
971bge_setmulti(sc)
972 struct bge_softc *sc;
973{
974 struct ifnet *ifp;
975 struct ifmultiaddr *ifma;
976 u_int32_t hashes[4] = { 0, 0, 0, 0 };
977 int h, i;
978
979 BGE_LOCK_ASSERT(sc);
980
981 ifp = sc->bge_ifp;
982
983 if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) {
984 for (i = 0; i < 4; i++)
985 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), 0xFFFFFFFF);
986 return;
987 }
988
989 /* First, zot all the existing filters. */
990 for (i = 0; i < 4; i++)
991 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), 0);
992
993 /* Now program new ones. */
994 IF_ADDR_LOCK(ifp);
995 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
996 if (ifma->ifma_addr->sa_family != AF_LINK)
997 continue;
998 h = ether_crc32_le(LLADDR((struct sockaddr_dl *)
999 ifma->ifma_addr), ETHER_ADDR_LEN) & 0x7F;
1000 hashes[(h & 0x60) >> 5] |= 1 << (h & 0x1F);
1001 }
1002 IF_ADDR_UNLOCK(ifp);
1003
1004 for (i = 0; i < 4; i++)
1005 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), hashes[i]);
1006
1007 return;
1008}
1009
1010/*
1011 * Do endian, PCI and DMA initialization. Also check the on-board ROM
1012 * self-test results.
1013 */
1014static int
1015bge_chipinit(sc)
1016 struct bge_softc *sc;
1017{
1018 int i;
1019 u_int32_t dma_rw_ctl;
1020
1021 /* Set endian type before we access any non-PCI registers. */
1022 pci_write_config(sc->bge_dev, BGE_PCI_MISC_CTL, BGE_INIT, 4);
1023
1024 /*
1025 * Check the 'ROM failed' bit on the RX CPU to see if
1026 * self-tests passed.
1027 */
1028 if (CSR_READ_4(sc, BGE_RXCPU_MODE) & BGE_RXCPUMODE_ROMFAIL) {
1029 device_printf(sc->bge_dev, "RX CPU self-diagnostics failed!\n");
1030 return(ENODEV);
1031 }
1032
1033 /* Clear the MAC control register */
1034 CSR_WRITE_4(sc, BGE_MAC_MODE, 0);
1035
1036 /*
1037 * Clear the MAC statistics block in the NIC's
1038 * internal memory.
1039 */
1040 for (i = BGE_STATS_BLOCK;
1041 i < BGE_STATS_BLOCK_END + 1; i += sizeof(u_int32_t))
1042 BGE_MEMWIN_WRITE(sc, i, 0);
1043
1044 for (i = BGE_STATUS_BLOCK;
1045 i < BGE_STATUS_BLOCK_END + 1; i += sizeof(u_int32_t))
1046 BGE_MEMWIN_WRITE(sc, i, 0);
1047
1048 /* Set up the PCI DMA control register. */
1049 if (sc->bge_pcie) {
1050 dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD |
1051 (0xf << BGE_PCIDMARWCTL_RD_WAT_SHIFT) |
1052 (0x2 << BGE_PCIDMARWCTL_WR_WAT_SHIFT);
1053 } else if (pci_read_config(sc->bge_dev, BGE_PCI_PCISTATE, 4) &
1054 BGE_PCISTATE_PCI_BUSMODE) {
1055 /* Conventional PCI bus */
1056 dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD |
1057 (0x7 << BGE_PCIDMARWCTL_RD_WAT_SHIFT) |
1058 (0x7 << BGE_PCIDMARWCTL_WR_WAT_SHIFT) |
1059 (0x0F);
1060 } else {
1061 /* PCI-X bus */
1062 /*
1063 * The 5704 uses a different encoding of read/write
1064 * watermarks.
1065 */
1066 if (sc->bge_asicrev == BGE_ASICREV_BCM5704)
1067 dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD |
1068 (0x7 << BGE_PCIDMARWCTL_RD_WAT_SHIFT) |
1069 (0x3 << BGE_PCIDMARWCTL_WR_WAT_SHIFT);
1070 else
1071 dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD |
1072 (0x3 << BGE_PCIDMARWCTL_RD_WAT_SHIFT) |
1073 (0x3 << BGE_PCIDMARWCTL_WR_WAT_SHIFT) |
1074 (0x0F);
1075
1076 /*
1077 * 5703 and 5704 need ONEDMA_AT_ONCE as a workaround
1078 * for hardware bugs.
1079 */
1080 if (sc->bge_asicrev == BGE_ASICREV_BCM5703 ||
1081 sc->bge_asicrev == BGE_ASICREV_BCM5704) {
1082 u_int32_t tmp;
1083
1084 tmp = CSR_READ_4(sc, BGE_PCI_CLKCTL) & 0x1f;
1085 if (tmp == 0x6 || tmp == 0x7)
1086 dma_rw_ctl |= BGE_PCIDMARWCTL_ONEDMA_ATONCE;
1087 }
1088 }
1089
1090 if (sc->bge_asicrev == BGE_ASICREV_BCM5703 ||
1091 sc->bge_asicrev == BGE_ASICREV_BCM5704 ||
1092 sc->bge_asicrev == BGE_ASICREV_BCM5705 ||
1093 sc->bge_asicrev == BGE_ASICREV_BCM5750)
1094 dma_rw_ctl &= ~BGE_PCIDMARWCTL_MINDMA;
1095 pci_write_config(sc->bge_dev, BGE_PCI_DMA_RW_CTL, dma_rw_ctl, 4);
1096
1097 /*
1098 * Set up general mode register.
1099 */
1100 CSR_WRITE_4(sc, BGE_MODE_CTL, BGE_DMA_SWAP_OPTIONS|
1101 BGE_MODECTL_MAC_ATTN_INTR|BGE_MODECTL_HOST_SEND_BDS|
1102 BGE_MODECTL_TX_NO_PHDR_CSUM|BGE_MODECTL_RX_NO_PHDR_CSUM);
1103
1104 /*
1105 * Disable memory write invalidate. Apparently it is not supported
1106 * properly by these devices.
1107 */
1108 PCI_CLRBIT(sc->bge_dev, BGE_PCI_CMD, PCIM_CMD_MWIEN, 4);
1109
1110#ifdef __brokenalpha__
1111 /*
1112 * Must insure that we do not cross an 8K (bytes) boundary
1113 * for DMA reads. Our highest limit is 1K bytes. This is a
1114 * restriction on some ALPHA platforms with early revision
1115 * 21174 PCI chipsets, such as the AlphaPC 164lx
1116 */
1117 PCI_SETBIT(sc->bge_dev, BGE_PCI_DMA_RW_CTL,
1118 BGE_PCI_READ_BNDRY_1024BYTES, 4);
1119#endif
1120
1121 /* Set the timer prescaler (always 66Mhz) */
1122 CSR_WRITE_4(sc, BGE_MISC_CFG, 65 << 1/*BGE_32BITTIME_66MHZ*/);
1123
1124 return(0);
1125}
1126
1127static int
1128bge_blockinit(sc)
1129 struct bge_softc *sc;
1130{
1131 struct bge_rcb *rcb;
1132 bus_size_t vrcb;
1133 bge_hostaddr taddr;
1134 int i;
1135
1136 /*
1137 * Initialize the memory window pointer register so that
1138 * we can access the first 32K of internal NIC RAM. This will
1139 * allow us to set up the TX send ring RCBs and the RX return
1140 * ring RCBs, plus other things which live in NIC memory.
1141 */
1142 CSR_WRITE_4(sc, BGE_PCI_MEMWIN_BASEADDR, 0);
1143
1144 /* Note: the BCM5704 has a smaller mbuf space than other chips. */
1145
1146 if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
1147 sc->bge_asicrev != BGE_ASICREV_BCM5750) {
1148 /* Configure mbuf memory pool */
1149 if (sc->bge_extram) {
1150 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_BASEADDR,
1151 BGE_EXT_SSRAM);
1152 if (sc->bge_asicrev == BGE_ASICREV_BCM5704)
1153 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x10000);
1154 else
1155 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x18000);
1156 } else {
1157 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_BASEADDR,
1158 BGE_BUFFPOOL_1);
1159 if (sc->bge_asicrev == BGE_ASICREV_BCM5704)
1160 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x10000);
1161 else
1162 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x18000);
1163 }
1164
1165 /* Configure DMA resource pool */
1166 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_BASEADDR,
1167 BGE_DMA_DESCRIPTORS);
1168 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LEN, 0x2000);
1169 }
1170
1171 /* Configure mbuf pool watermarks */
1172 if (sc->bge_asicrev == BGE_ASICREV_BCM5705 ||
1173 sc->bge_asicrev == BGE_ASICREV_BCM5750) {
1174 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0);
1175 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x10);
1176 } else {
1177 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x50);
1178 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x20);
1179 }
1180 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x60);
1181
1182 /* Configure DMA resource watermarks */
1183 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LOWAT, 5);
1184 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_HIWAT, 10);
1185
1186 /* Enable buffer manager */
1187 if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
1188 sc->bge_asicrev != BGE_ASICREV_BCM5750) {
1189 CSR_WRITE_4(sc, BGE_BMAN_MODE,
1190 BGE_BMANMODE_ENABLE|BGE_BMANMODE_LOMBUF_ATTN);
1191
1192 /* Poll for buffer manager start indication */
1193 for (i = 0; i < BGE_TIMEOUT; i++) {
1194 if (CSR_READ_4(sc, BGE_BMAN_MODE) & BGE_BMANMODE_ENABLE)
1195 break;
1196 DELAY(10);
1197 }
1198
1199 if (i == BGE_TIMEOUT) {
1200 device_printf(sc->bge_dev,
1201 "buffer manager failed to start\n");
1202 return(ENXIO);
1203 }
1204 }
1205
1206 /* Enable flow-through queues */
1207 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF);
1208 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0);
1209
1210 /* Wait until queue initialization is complete */
1211 for (i = 0; i < BGE_TIMEOUT; i++) {
1212 if (CSR_READ_4(sc, BGE_FTQ_RESET) == 0)
1213 break;
1214 DELAY(10);
1215 }
1216
1217 if (i == BGE_TIMEOUT) {
1218 device_printf(sc->bge_dev, "flow-through queue init failed\n");
1219 return(ENXIO);
1220 }
1221
1222 /* Initialize the standard RX ring control block */
1223 rcb = &sc->bge_ldata.bge_info.bge_std_rx_rcb;
1224 rcb->bge_hostaddr.bge_addr_lo =
1225 BGE_ADDR_LO(sc->bge_ldata.bge_rx_std_ring_paddr);
1226 rcb->bge_hostaddr.bge_addr_hi =
1227 BGE_ADDR_HI(sc->bge_ldata.bge_rx_std_ring_paddr);
1228 bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag,
1229 sc->bge_cdata.bge_rx_std_ring_map, BUS_DMASYNC_PREREAD);
1230 if (sc->bge_asicrev == BGE_ASICREV_BCM5705 ||
1231 sc->bge_asicrev == BGE_ASICREV_BCM5750)
1232 rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(512, 0);
1233 else
1234 rcb->bge_maxlen_flags =
1235 BGE_RCB_MAXLEN_FLAGS(BGE_MAX_FRAMELEN, 0);
1236 if (sc->bge_extram)
1237 rcb->bge_nicaddr = BGE_EXT_STD_RX_RINGS;
1238 else
1239 rcb->bge_nicaddr = BGE_STD_RX_RINGS;
1240 CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_HI, rcb->bge_hostaddr.bge_addr_hi);
1241 CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_LO, rcb->bge_hostaddr.bge_addr_lo);
1242
1243 CSR_WRITE_4(sc, BGE_RX_STD_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags);
1244 CSR_WRITE_4(sc, BGE_RX_STD_RCB_NICADDR, rcb->bge_nicaddr);
1245
1246 /*
1247 * Initialize the jumbo RX ring control block
1248 * We set the 'ring disabled' bit in the flags
1249 * field until we're actually ready to start
1250 * using this ring (i.e. once we set the MTU
1251 * high enough to require it).
1252 */
1253 if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
1254 sc->bge_asicrev != BGE_ASICREV_BCM5750) {
1255 rcb = &sc->bge_ldata.bge_info.bge_jumbo_rx_rcb;
1256
1257 rcb->bge_hostaddr.bge_addr_lo =
1258 BGE_ADDR_LO(sc->bge_ldata.bge_rx_jumbo_ring_paddr);
1259 rcb->bge_hostaddr.bge_addr_hi =
1260 BGE_ADDR_HI(sc->bge_ldata.bge_rx_jumbo_ring_paddr);
1261 bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag,
1262 sc->bge_cdata.bge_rx_jumbo_ring_map,
1263 BUS_DMASYNC_PREREAD);
1264 rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(0,
1265 BGE_RCB_FLAG_USE_EXT_RX_BD|BGE_RCB_FLAG_RING_DISABLED);
1266 if (sc->bge_extram)
1267 rcb->bge_nicaddr = BGE_EXT_JUMBO_RX_RINGS;
1268 else
1269 rcb->bge_nicaddr = BGE_JUMBO_RX_RINGS;
1270 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_HI,
1271 rcb->bge_hostaddr.bge_addr_hi);
1272 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_LO,
1273 rcb->bge_hostaddr.bge_addr_lo);
1274
1275 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS,
1276 rcb->bge_maxlen_flags);
1277 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_NICADDR, rcb->bge_nicaddr);
1278
1279 /* Set up dummy disabled mini ring RCB */
1280 rcb = &sc->bge_ldata.bge_info.bge_mini_rx_rcb;
1281 rcb->bge_maxlen_flags =
1282 BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED);
1283 CSR_WRITE_4(sc, BGE_RX_MINI_RCB_MAXLEN_FLAGS,
1284 rcb->bge_maxlen_flags);
1285 }
1286
1287 /*
1288 * Set the BD ring replentish thresholds. The recommended
1289 * values are 1/8th the number of descriptors allocated to
1290 * each ring.
1291 */
1292 CSR_WRITE_4(sc, BGE_RBDI_STD_REPL_THRESH, BGE_STD_RX_RING_CNT/8);
1293 CSR_WRITE_4(sc, BGE_RBDI_JUMBO_REPL_THRESH, BGE_JUMBO_RX_RING_CNT/8);
1294
1295 /*
1296 * Disable all unused send rings by setting the 'ring disabled'
1297 * bit in the flags field of all the TX send ring control blocks.
1298 * These are located in NIC memory.
1299 */
1300 vrcb = BGE_MEMWIN_START + BGE_SEND_RING_RCB;
1301 for (i = 0; i < BGE_TX_RINGS_EXTSSRAM_MAX; i++) {
1302 RCB_WRITE_4(sc, vrcb, bge_maxlen_flags,
1303 BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED));
1304 RCB_WRITE_4(sc, vrcb, bge_nicaddr, 0);
1305 vrcb += sizeof(struct bge_rcb);
1306 }
1307
1308 /* Configure TX RCB 0 (we use only the first ring) */
1309 vrcb = BGE_MEMWIN_START + BGE_SEND_RING_RCB;
1310 BGE_HOSTADDR(taddr, sc->bge_ldata.bge_tx_ring_paddr);
1311 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, taddr.bge_addr_hi);
1312 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, taddr.bge_addr_lo);
1313 RCB_WRITE_4(sc, vrcb, bge_nicaddr,
1314 BGE_NIC_TXRING_ADDR(0, BGE_TX_RING_CNT));
1315 if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
1316 sc->bge_asicrev != BGE_ASICREV_BCM5750)
1317 RCB_WRITE_4(sc, vrcb, bge_maxlen_flags,
1318 BGE_RCB_MAXLEN_FLAGS(BGE_TX_RING_CNT, 0));
1319
1320 /* Disable all unused RX return rings */
1321 vrcb = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB;
1322 for (i = 0; i < BGE_RX_RINGS_MAX; i++) {
1323 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, 0);
1324 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, 0);
1325 RCB_WRITE_4(sc, vrcb, bge_maxlen_flags,
1326 BGE_RCB_MAXLEN_FLAGS(sc->bge_return_ring_cnt,
1327 BGE_RCB_FLAG_RING_DISABLED));
1328 RCB_WRITE_4(sc, vrcb, bge_nicaddr, 0);
1329 CSR_WRITE_4(sc, BGE_MBX_RX_CONS0_LO +
1330 (i * (sizeof(u_int64_t))), 0);
1331 vrcb += sizeof(struct bge_rcb);
1332 }
1333
1334 /* Initialize RX ring indexes */
1335 CSR_WRITE_4(sc, BGE_MBX_RX_STD_PROD_LO, 0);
1336 CSR_WRITE_4(sc, BGE_MBX_RX_JUMBO_PROD_LO, 0);
1337 CSR_WRITE_4(sc, BGE_MBX_RX_MINI_PROD_LO, 0);
1338
1339 /*
1340 * Set up RX return ring 0
1341 * Note that the NIC address for RX return rings is 0x00000000.
1342 * The return rings live entirely within the host, so the
1343 * nicaddr field in the RCB isn't used.
1344 */
1345 vrcb = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB;
1346 BGE_HOSTADDR(taddr, sc->bge_ldata.bge_rx_return_ring_paddr);
1347 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, taddr.bge_addr_hi);
1348 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, taddr.bge_addr_lo);
1349 RCB_WRITE_4(sc, vrcb, bge_nicaddr, 0x00000000);
1350 RCB_WRITE_4(sc, vrcb, bge_maxlen_flags,
1351 BGE_RCB_MAXLEN_FLAGS(sc->bge_return_ring_cnt, 0));
1352
1353 /* Set random backoff seed for TX */
1354 CSR_WRITE_4(sc, BGE_TX_RANDOM_BACKOFF,
1355 IF_LLADDR(sc->bge_ifp)[0] + IF_LLADDR(sc->bge_ifp)[1] +
1356 IF_LLADDR(sc->bge_ifp)[2] + IF_LLADDR(sc->bge_ifp)[3] +
1357 IF_LLADDR(sc->bge_ifp)[4] + IF_LLADDR(sc->bge_ifp)[5] +
1358 BGE_TX_BACKOFF_SEED_MASK);
1359
1360 /* Set inter-packet gap */
1361 CSR_WRITE_4(sc, BGE_TX_LENGTHS, 0x2620);
1362
1363 /*
1364 * Specify which ring to use for packets that don't match
1365 * any RX rules.
1366 */
1367 CSR_WRITE_4(sc, BGE_RX_RULES_CFG, 0x08);
1368
1369 /*
1370 * Configure number of RX lists. One interrupt distribution
1371 * list, sixteen active lists, one bad frames class.
1372 */
1373 CSR_WRITE_4(sc, BGE_RXLP_CFG, 0x181);
1374
1375 /* Inialize RX list placement stats mask. */
1376 CSR_WRITE_4(sc, BGE_RXLP_STATS_ENABLE_MASK, 0x007FFFFF);
1377 CSR_WRITE_4(sc, BGE_RXLP_STATS_CTL, 0x1);
1378
1379 /* Disable host coalescing until we get it set up */
1380 CSR_WRITE_4(sc, BGE_HCC_MODE, 0x00000000);
1381
1382 /* Poll to make sure it's shut down. */
1383 for (i = 0; i < BGE_TIMEOUT; i++) {
1384 if (!(CSR_READ_4(sc, BGE_HCC_MODE) & BGE_HCCMODE_ENABLE))
1385 break;
1386 DELAY(10);
1387 }
1388
1389 if (i == BGE_TIMEOUT) {
1390 device_printf(sc->bge_dev,
1391 "host coalescing engine failed to idle\n");
1392 return(ENXIO);
1393 }
1394
1395 /* Set up host coalescing defaults */
1396 CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS, sc->bge_rx_coal_ticks);
1397 CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS, sc->bge_tx_coal_ticks);
1398 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS, sc->bge_rx_max_coal_bds);
1399 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS, sc->bge_tx_max_coal_bds);
1400 if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
1401 sc->bge_asicrev != BGE_ASICREV_BCM5750) {
1402 CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS_INT, 0);
1403 CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS_INT, 0);
1404 }
1405 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS_INT, 0);
1406 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS_INT, 0);
1407
1408 /* Set up address of statistics block */
1409 if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
1410 sc->bge_asicrev != BGE_ASICREV_BCM5750) {
1411 CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_HI,
1412 BGE_ADDR_HI(sc->bge_ldata.bge_stats_paddr));
1413 CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_LO,
1414 BGE_ADDR_LO(sc->bge_ldata.bge_stats_paddr));
1415 CSR_WRITE_4(sc, BGE_HCC_STATS_BASEADDR, BGE_STATS_BLOCK);
1416 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_BASEADDR, BGE_STATUS_BLOCK);
1417 CSR_WRITE_4(sc, BGE_HCC_STATS_TICKS, sc->bge_stat_ticks);
1418 }
1419
1420 /* Set up address of status block */
1421 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_HI,
1422 BGE_ADDR_HI(sc->bge_ldata.bge_status_block_paddr));
1423 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_LO,
1424 BGE_ADDR_LO(sc->bge_ldata.bge_status_block_paddr));
1425 sc->bge_ldata.bge_status_block->bge_idx[0].bge_rx_prod_idx = 0;
1426 sc->bge_ldata.bge_status_block->bge_idx[0].bge_tx_cons_idx = 0;
1427
1428 /* Turn on host coalescing state machine */
1429 CSR_WRITE_4(sc, BGE_HCC_MODE, BGE_HCCMODE_ENABLE);
1430
1431 /* Turn on RX BD completion state machine and enable attentions */
1432 CSR_WRITE_4(sc, BGE_RBDC_MODE,
1433 BGE_RBDCMODE_ENABLE|BGE_RBDCMODE_ATTN);
1434
1435 /* Turn on RX list placement state machine */
1436 CSR_WRITE_4(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE);
1437
1438 /* Turn on RX list selector state machine. */
1439 if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
1440 sc->bge_asicrev != BGE_ASICREV_BCM5750)
1441 CSR_WRITE_4(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE);
1442
1443 /* Turn on DMA, clear stats */
1444 CSR_WRITE_4(sc, BGE_MAC_MODE, BGE_MACMODE_TXDMA_ENB|
1445 BGE_MACMODE_RXDMA_ENB|BGE_MACMODE_RX_STATS_CLEAR|
1446 BGE_MACMODE_TX_STATS_CLEAR|BGE_MACMODE_RX_STATS_ENB|
1447 BGE_MACMODE_TX_STATS_ENB|BGE_MACMODE_FRMHDR_DMA_ENB|
1448 (sc->bge_tbi ? BGE_PORTMODE_TBI : BGE_PORTMODE_MII));
1449
1450 /* Set misc. local control, enable interrupts on attentions */
1451 CSR_WRITE_4(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_ONATTN);
1452
1453#ifdef notdef
1454 /* Assert GPIO pins for PHY reset */
1455 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUT0|
1456 BGE_MLC_MISCIO_OUT1|BGE_MLC_MISCIO_OUT2);
1457 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUTEN0|
1458 BGE_MLC_MISCIO_OUTEN1|BGE_MLC_MISCIO_OUTEN2);
1459#endif
1460
1461 /* Turn on DMA completion state machine */
1462 if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
1463 sc->bge_asicrev != BGE_ASICREV_BCM5750)
1464 CSR_WRITE_4(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE);
1465
1466 /* Turn on write DMA state machine */
1467 CSR_WRITE_4(sc, BGE_WDMA_MODE,
1468 BGE_WDMAMODE_ENABLE|BGE_WDMAMODE_ALL_ATTNS);
1469
1470 /* Turn on read DMA state machine */
1471 CSR_WRITE_4(sc, BGE_RDMA_MODE,
1472 BGE_RDMAMODE_ENABLE|BGE_RDMAMODE_ALL_ATTNS);
1473
1474 /* Turn on RX data completion state machine */
1475 CSR_WRITE_4(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE);
1476
1477 /* Turn on RX BD initiator state machine */
1478 CSR_WRITE_4(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE);
1479
1480 /* Turn on RX data and RX BD initiator state machine */
1481 CSR_WRITE_4(sc, BGE_RDBDI_MODE, BGE_RDBDIMODE_ENABLE);
1482
1483 /* Turn on Mbuf cluster free state machine */
1484 if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
1485 sc->bge_asicrev != BGE_ASICREV_BCM5750)
1486 CSR_WRITE_4(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE);
1487
1488 /* Turn on send BD completion state machine */
1489 CSR_WRITE_4(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE);
1490
1491 /* Turn on send data completion state machine */
1492 CSR_WRITE_4(sc, BGE_SDC_MODE, BGE_SDCMODE_ENABLE);
1493
1494 /* Turn on send data initiator state machine */
1495 CSR_WRITE_4(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE);
1496
1497 /* Turn on send BD initiator state machine */
1498 CSR_WRITE_4(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE);
1499
1500 /* Turn on send BD selector state machine */
1501 CSR_WRITE_4(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE);
1502
1503 CSR_WRITE_4(sc, BGE_SDI_STATS_ENABLE_MASK, 0x007FFFFF);
1504 CSR_WRITE_4(sc, BGE_SDI_STATS_CTL,
1505 BGE_SDISTATSCTL_ENABLE|BGE_SDISTATSCTL_FASTER);
1506
1507 /* ack/clear link change events */
1508 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED|
1509 BGE_MACSTAT_CFG_CHANGED|BGE_MACSTAT_MI_COMPLETE|
1510 BGE_MACSTAT_LINK_CHANGED);
1511 CSR_WRITE_4(sc, BGE_MI_STS, 0);
1512
1513 /* Enable PHY auto polling (for MII/GMII only) */
1514 if (sc->bge_tbi) {
1515 CSR_WRITE_4(sc, BGE_MI_STS, BGE_MISTS_LINK);
1516 } else {
1517 BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL|10<<16);
1518 if (sc->bge_asicrev == BGE_ASICREV_BCM5700 &&
1519 sc->bge_chipid != BGE_CHIPID_BCM5700_B1)
1520 CSR_WRITE_4(sc, BGE_MAC_EVT_ENB,
1521 BGE_EVTENB_MI_INTERRUPT);
1522 }
1523
1524 /*
1525 * Clear any pending link state attention.
1526 * Otherwise some link state change events may be lost until attention
1527 * is cleared by bge_intr() -> bge_link_upd() sequence.
1528 * It's not necessary on newer BCM chips - perhaps enabling link
1529 * state change attentions implies clearing pending attention.
1530 */
1531 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED|
1532 BGE_MACSTAT_CFG_CHANGED|BGE_MACSTAT_MI_COMPLETE|
1533 BGE_MACSTAT_LINK_CHANGED);
1534
1535 /* Enable link state change attentions. */
1536 BGE_SETBIT(sc, BGE_MAC_EVT_ENB, BGE_EVTENB_LINK_CHANGED);
1537
1538 return(0);
1539}
1540
1541/*
1542 * Probe for a Broadcom chip. Check the PCI vendor and device IDs
1543 * against our list and return its name if we find a match. Note
1544 * that since the Broadcom controller contains VPD support, we
1545 * can get the device name string from the controller itself instead
1546 * of the compiled-in string. This is a little slow, but it guarantees
1547 * we'll always announce the right product name.
1548 */
1549static int
1550bge_probe(dev)
1551 device_t dev;
1552{
1553 struct bge_type *t;
1554 struct bge_softc *sc;
1555 char *descbuf;
1556
1557 t = bge_devs;
1558
1559 sc = device_get_softc(dev);
1560 bzero(sc, sizeof(struct bge_softc));
1561 sc->bge_dev = dev;
1562
1563 while(t->bge_name != NULL) {
1564 if ((pci_get_vendor(dev) == t->bge_vid) &&
1565 (pci_get_device(dev) == t->bge_did)) {
1566#ifdef notdef
1567 bge_vpd_read(sc);
1568 device_set_desc(dev, sc->bge_vpd_prodname);
1569#endif
1570 descbuf = malloc(BGE_DEVDESC_MAX, M_TEMP, M_NOWAIT);
1571 if (descbuf == NULL)
1572 return(ENOMEM);
1573 snprintf(descbuf, BGE_DEVDESC_MAX,
1574 "%s, ASIC rev. %#04x", t->bge_name,
1575 pci_read_config(dev, BGE_PCI_MISC_CTL, 4) >> 16);
1576 device_set_desc_copy(dev, descbuf);
1577 if (pci_get_subvendor(dev) == DELL_VENDORID)
1578 sc->bge_no_3_led = 1;
1579 free(descbuf, M_TEMP);
1580 return(0);
1581 }
1582 t++;
1583 }
1584
1585 return(ENXIO);
1586}
1587
1588static void
1589bge_dma_free(sc)
1590 struct bge_softc *sc;
1591{
1592 int i;
1593
1594
1595 /* Destroy DMA maps for RX buffers */
1596
1597 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
1598 if (sc->bge_cdata.bge_rx_std_dmamap[i])
1599 bus_dmamap_destroy(sc->bge_cdata.bge_mtag,
1600 sc->bge_cdata.bge_rx_std_dmamap[i]);
1601 }
1602
1603 /* Destroy DMA maps for jumbo RX buffers */
1604
1605 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
1606 if (sc->bge_cdata.bge_rx_jumbo_dmamap[i])
1607 bus_dmamap_destroy(sc->bge_cdata.bge_mtag_jumbo,
1608 sc->bge_cdata.bge_rx_jumbo_dmamap[i]);
1609 }
1610
1611 /* Destroy DMA maps for TX buffers */
1612
1613 for (i = 0; i < BGE_TX_RING_CNT; i++) {
1614 if (sc->bge_cdata.bge_tx_dmamap[i])
1615 bus_dmamap_destroy(sc->bge_cdata.bge_mtag,
1616 sc->bge_cdata.bge_tx_dmamap[i]);
1617 }
1618
1619 if (sc->bge_cdata.bge_mtag)
1620 bus_dma_tag_destroy(sc->bge_cdata.bge_mtag);
1621
1622
1623 /* Destroy standard RX ring */
1624
1625 if (sc->bge_cdata.bge_rx_std_ring_map)
1626 bus_dmamap_unload(sc->bge_cdata.bge_rx_std_ring_tag,
1627 sc->bge_cdata.bge_rx_std_ring_map);
1628 if (sc->bge_cdata.bge_rx_std_ring_map && sc->bge_ldata.bge_rx_std_ring)
1629 bus_dmamem_free(sc->bge_cdata.bge_rx_std_ring_tag,
1630 sc->bge_ldata.bge_rx_std_ring,
1631 sc->bge_cdata.bge_rx_std_ring_map);
1632
1633 if (sc->bge_cdata.bge_rx_std_ring_tag)
1634 bus_dma_tag_destroy(sc->bge_cdata.bge_rx_std_ring_tag);
1635
1636 /* Destroy jumbo RX ring */
1637
1638 if (sc->bge_cdata.bge_rx_jumbo_ring_map)
1639 bus_dmamap_unload(sc->bge_cdata.bge_rx_jumbo_ring_tag,
1640 sc->bge_cdata.bge_rx_jumbo_ring_map);
1641
1642 if (sc->bge_cdata.bge_rx_jumbo_ring_map &&
1643 sc->bge_ldata.bge_rx_jumbo_ring)
1644 bus_dmamem_free(sc->bge_cdata.bge_rx_jumbo_ring_tag,
1645 sc->bge_ldata.bge_rx_jumbo_ring,
1646 sc->bge_cdata.bge_rx_jumbo_ring_map);
1647
1648 if (sc->bge_cdata.bge_rx_jumbo_ring_tag)
1649 bus_dma_tag_destroy(sc->bge_cdata.bge_rx_jumbo_ring_tag);
1650
1651 /* Destroy RX return ring */
1652
1653 if (sc->bge_cdata.bge_rx_return_ring_map)
1654 bus_dmamap_unload(sc->bge_cdata.bge_rx_return_ring_tag,
1655 sc->bge_cdata.bge_rx_return_ring_map);
1656
1657 if (sc->bge_cdata.bge_rx_return_ring_map &&
1658 sc->bge_ldata.bge_rx_return_ring)
1659 bus_dmamem_free(sc->bge_cdata.bge_rx_return_ring_tag,
1660 sc->bge_ldata.bge_rx_return_ring,
1661 sc->bge_cdata.bge_rx_return_ring_map);
1662
1663 if (sc->bge_cdata.bge_rx_return_ring_tag)
1664 bus_dma_tag_destroy(sc->bge_cdata.bge_rx_return_ring_tag);
1665
1666 /* Destroy TX ring */
1667
1668 if (sc->bge_cdata.bge_tx_ring_map)
1669 bus_dmamap_unload(sc->bge_cdata.bge_tx_ring_tag,
1670 sc->bge_cdata.bge_tx_ring_map);
1671
1672 if (sc->bge_cdata.bge_tx_ring_map && sc->bge_ldata.bge_tx_ring)
1673 bus_dmamem_free(sc->bge_cdata.bge_tx_ring_tag,
1674 sc->bge_ldata.bge_tx_ring,
1675 sc->bge_cdata.bge_tx_ring_map);
1676
1677 if (sc->bge_cdata.bge_tx_ring_tag)
1678 bus_dma_tag_destroy(sc->bge_cdata.bge_tx_ring_tag);
1679
1680 /* Destroy status block */
1681
1682 if (sc->bge_cdata.bge_status_map)
1683 bus_dmamap_unload(sc->bge_cdata.bge_status_tag,
1684 sc->bge_cdata.bge_status_map);
1685
1686 if (sc->bge_cdata.bge_status_map && sc->bge_ldata.bge_status_block)
1687 bus_dmamem_free(sc->bge_cdata.bge_status_tag,
1688 sc->bge_ldata.bge_status_block,
1689 sc->bge_cdata.bge_status_map);
1690
1691 if (sc->bge_cdata.bge_status_tag)
1692 bus_dma_tag_destroy(sc->bge_cdata.bge_status_tag);
1693
1694 /* Destroy statistics block */
1695
1696 if (sc->bge_cdata.bge_stats_map)
1697 bus_dmamap_unload(sc->bge_cdata.bge_stats_tag,
1698 sc->bge_cdata.bge_stats_map);
1699
1700 if (sc->bge_cdata.bge_stats_map && sc->bge_ldata.bge_stats)
1701 bus_dmamem_free(sc->bge_cdata.bge_stats_tag,
1702 sc->bge_ldata.bge_stats,
1703 sc->bge_cdata.bge_stats_map);
1704
1705 if (sc->bge_cdata.bge_stats_tag)
1706 bus_dma_tag_destroy(sc->bge_cdata.bge_stats_tag);
1707
1708 /* Destroy the parent tag */
1709
1710 if (sc->bge_cdata.bge_parent_tag)
1711 bus_dma_tag_destroy(sc->bge_cdata.bge_parent_tag);
1712
1713 return;
1714}
1715
1716static int
1717bge_dma_alloc(dev)
1718 device_t dev;
1719{
1720 struct bge_softc *sc;
1721 int i, error;
1722 struct bge_dmamap_arg ctx;
1723
1724 sc = device_get_softc(dev);
1725
1726 /*
1727 * Allocate the parent bus DMA tag appropriate for PCI.
1728 */
1729 error = bus_dma_tag_create(NULL, /* parent */
1730 PAGE_SIZE, 0, /* alignment, boundary */
1731 BUS_SPACE_MAXADDR, /* lowaddr */
1732 BUS_SPACE_MAXADDR, /* highaddr */
1733 NULL, NULL, /* filter, filterarg */
1734 MAXBSIZE, BGE_NSEG_NEW, /* maxsize, nsegments */
1735 BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */
1736 0, /* flags */
1737 NULL, NULL, /* lockfunc, lockarg */
1738 &sc->bge_cdata.bge_parent_tag);
1739
1740 if (error != 0) {
1741 device_printf(sc->bge_dev,
1742 "could not allocate parent dma tag\n");
1743 return (ENOMEM);
1744 }
1745
1746 /*
1747 * Create tag for RX mbufs.
1748 */
1749 error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag, 1,
1750 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
1751 NULL, MCLBYTES * BGE_NSEG_NEW, BGE_NSEG_NEW, MCLBYTES,
1752 BUS_DMA_ALLOCNOW, NULL, NULL, &sc->bge_cdata.bge_mtag);
1753
1754 if (error) {
1755 device_printf(sc->bge_dev, "could not allocate dma tag\n");
1756 return (ENOMEM);
1757 }
1758
1759 /* Create DMA maps for RX buffers */
1760
1761 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
1762 error = bus_dmamap_create(sc->bge_cdata.bge_mtag, 0,
1763 &sc->bge_cdata.bge_rx_std_dmamap[i]);
1764 if (error) {
1765 device_printf(sc->bge_dev,
1766 "can't create DMA map for RX\n");
1767 return(ENOMEM);
1768 }
1769 }
1770
1771 /* Create DMA maps for TX buffers */
1772
1773 for (i = 0; i < BGE_TX_RING_CNT; i++) {
1774 error = bus_dmamap_create(sc->bge_cdata.bge_mtag, 0,
1775 &sc->bge_cdata.bge_tx_dmamap[i]);
1776 if (error) {
1777 device_printf(sc->bge_dev,
1778 "can't create DMA map for RX\n");
1779 return(ENOMEM);
1780 }
1781 }
1782
1783 /* Create tag for standard RX ring */
1784
1785 error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag,
1786 PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
1787 NULL, BGE_STD_RX_RING_SZ, 1, BGE_STD_RX_RING_SZ, 0,
1788 NULL, NULL, &sc->bge_cdata.bge_rx_std_ring_tag);
1789
1790 if (error) {
1791 device_printf(sc->bge_dev, "could not allocate dma tag\n");
1792 return (ENOMEM);
1793 }
1794
1795 /* Allocate DMA'able memory for standard RX ring */
1796
1797 error = bus_dmamem_alloc(sc->bge_cdata.bge_rx_std_ring_tag,
1798 (void **)&sc->bge_ldata.bge_rx_std_ring, BUS_DMA_NOWAIT,
1799 &sc->bge_cdata.bge_rx_std_ring_map);
1800 if (error)
1801 return (ENOMEM);
1802
1803 bzero((char *)sc->bge_ldata.bge_rx_std_ring, BGE_STD_RX_RING_SZ);
1804
1805 /* Load the address of the standard RX ring */
1806
1807 ctx.bge_maxsegs = 1;
1808 ctx.sc = sc;
1809
1810 error = bus_dmamap_load(sc->bge_cdata.bge_rx_std_ring_tag,
1811 sc->bge_cdata.bge_rx_std_ring_map, sc->bge_ldata.bge_rx_std_ring,
1812 BGE_STD_RX_RING_SZ, bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT);
1813
1814 if (error)
1815 return (ENOMEM);
1816
1817 sc->bge_ldata.bge_rx_std_ring_paddr = ctx.bge_busaddr;
1818
1819 if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
1820 sc->bge_asicrev != BGE_ASICREV_BCM5750) {
1821
1822 /*
1823 * Create tag for jumbo mbufs.
1824 * This is really a bit of a kludge. We allocate a special
1825 * jumbo buffer pool which (thanks to the way our DMA
1826 * memory allocation works) will consist of contiguous
1827 * pages. This means that even though a jumbo buffer might
1828 * be larger than a page size, we don't really need to
1829 * map it into more than one DMA segment. However, the
1830 * default mbuf tag will result in multi-segment mappings,
1831 * so we have to create a special jumbo mbuf tag that
1832 * lets us get away with mapping the jumbo buffers as
1833 * a single segment. I think eventually the driver should
1834 * be changed so that it uses ordinary mbufs and cluster
1835 * buffers, i.e. jumbo frames can span multiple DMA
1836 * descriptors. But that's a project for another day.
1837 */
1838
1839 error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag,
1840 1, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
1841 NULL, MJUM9BYTES, BGE_NSEG_JUMBO, PAGE_SIZE,
1842 0, NULL, NULL, &sc->bge_cdata.bge_mtag_jumbo);
1843
1844 if (error) {
1845 device_printf(sc->bge_dev,
1846 "could not allocate dma tag\n");
1847 return (ENOMEM);
1848 }
1849
1850 /* Create tag for jumbo RX ring */
1851 error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag,
1852 PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
1853 NULL, BGE_JUMBO_RX_RING_SZ, 1, BGE_JUMBO_RX_RING_SZ, 0,
1854 NULL, NULL, &sc->bge_cdata.bge_rx_jumbo_ring_tag);
1855
1856 if (error) {
1857 device_printf(sc->bge_dev,
1858 "could not allocate dma tag\n");
1859 return (ENOMEM);
1860 }
1861
1862 /* Allocate DMA'able memory for jumbo RX ring */
1863 error = bus_dmamem_alloc(sc->bge_cdata.bge_rx_jumbo_ring_tag,
1864 (void **)&sc->bge_ldata.bge_rx_jumbo_ring,
1865 BUS_DMA_NOWAIT | BUS_DMA_ZERO,
1866 &sc->bge_cdata.bge_rx_jumbo_ring_map);
1867 if (error)
1868 return (ENOMEM);
1869
1870 /* Load the address of the jumbo RX ring */
1871 ctx.bge_maxsegs = 1;
1872 ctx.sc = sc;
1873
1874 error = bus_dmamap_load(sc->bge_cdata.bge_rx_jumbo_ring_tag,
1875 sc->bge_cdata.bge_rx_jumbo_ring_map,
1876 sc->bge_ldata.bge_rx_jumbo_ring, BGE_JUMBO_RX_RING_SZ,
1877 bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT);
1878
1879 if (error)
1880 return (ENOMEM);
1881
1882 sc->bge_ldata.bge_rx_jumbo_ring_paddr = ctx.bge_busaddr;
1883
1884 /* Create DMA maps for jumbo RX buffers */
1885
1886 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
1887 error = bus_dmamap_create(sc->bge_cdata.bge_mtag_jumbo,
1888 0, &sc->bge_cdata.bge_rx_jumbo_dmamap[i]);
1889 if (error) {
1890 device_printf(sc->bge_dev,
1891 "can't create DMA map for RX\n");
1892 return(ENOMEM);
1893 }
1894 }
1895
1896 }
1897
1898 /* Create tag for RX return ring */
1899
1900 error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag,
1901 PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
1902 NULL, BGE_RX_RTN_RING_SZ(sc), 1, BGE_RX_RTN_RING_SZ(sc), 0,
1903 NULL, NULL, &sc->bge_cdata.bge_rx_return_ring_tag);
1904
1905 if (error) {
1906 device_printf(sc->bge_dev, "could not allocate dma tag\n");
1907 return (ENOMEM);
1908 }
1909
1910 /* Allocate DMA'able memory for RX return ring */
1911
1912 error = bus_dmamem_alloc(sc->bge_cdata.bge_rx_return_ring_tag,
1913 (void **)&sc->bge_ldata.bge_rx_return_ring, BUS_DMA_NOWAIT,
1914 &sc->bge_cdata.bge_rx_return_ring_map);
1915 if (error)
1916 return (ENOMEM);
1917
1918 bzero((char *)sc->bge_ldata.bge_rx_return_ring,
1919 BGE_RX_RTN_RING_SZ(sc));
1920
1921 /* Load the address of the RX return ring */
1922
1923 ctx.bge_maxsegs = 1;
1924 ctx.sc = sc;
1925
1926 error = bus_dmamap_load(sc->bge_cdata.bge_rx_return_ring_tag,
1927 sc->bge_cdata.bge_rx_return_ring_map,
1928 sc->bge_ldata.bge_rx_return_ring, BGE_RX_RTN_RING_SZ(sc),
1929 bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT);
1930
1931 if (error)
1932 return (ENOMEM);
1933
1934 sc->bge_ldata.bge_rx_return_ring_paddr = ctx.bge_busaddr;
1935
1936 /* Create tag for TX ring */
1937
1938 error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag,
1939 PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
1940 NULL, BGE_TX_RING_SZ, 1, BGE_TX_RING_SZ, 0, NULL, NULL,
1941 &sc->bge_cdata.bge_tx_ring_tag);
1942
1943 if (error) {
1944 device_printf(sc->bge_dev, "could not allocate dma tag\n");
1945 return (ENOMEM);
1946 }
1947
1948 /* Allocate DMA'able memory for TX ring */
1949
1950 error = bus_dmamem_alloc(sc->bge_cdata.bge_tx_ring_tag,
1951 (void **)&sc->bge_ldata.bge_tx_ring, BUS_DMA_NOWAIT,
1952 &sc->bge_cdata.bge_tx_ring_map);
1953 if (error)
1954 return (ENOMEM);
1955
1956 bzero((char *)sc->bge_ldata.bge_tx_ring, BGE_TX_RING_SZ);
1957
1958 /* Load the address of the TX ring */
1959
1960 ctx.bge_maxsegs = 1;
1961 ctx.sc = sc;
1962
1963 error = bus_dmamap_load(sc->bge_cdata.bge_tx_ring_tag,
1964 sc->bge_cdata.bge_tx_ring_map, sc->bge_ldata.bge_tx_ring,
1965 BGE_TX_RING_SZ, bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT);
1966
1967 if (error)
1968 return (ENOMEM);
1969
1970 sc->bge_ldata.bge_tx_ring_paddr = ctx.bge_busaddr;
1971
1972 /* Create tag for status block */
1973
1974 error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag,
1975 PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
1976 NULL, BGE_STATUS_BLK_SZ, 1, BGE_STATUS_BLK_SZ, 0,
1977 NULL, NULL, &sc->bge_cdata.bge_status_tag);
1978
1979 if (error) {
1980 device_printf(sc->bge_dev, "could not allocate dma tag\n");
1981 return (ENOMEM);
1982 }
1983
1984 /* Allocate DMA'able memory for status block */
1985
1986 error = bus_dmamem_alloc(sc->bge_cdata.bge_status_tag,
1987 (void **)&sc->bge_ldata.bge_status_block, BUS_DMA_NOWAIT,
1988 &sc->bge_cdata.bge_status_map);
1989 if (error)
1990 return (ENOMEM);
1991
1992 bzero((char *)sc->bge_ldata.bge_status_block, BGE_STATUS_BLK_SZ);
1993
1994 /* Load the address of the status block */
1995
1996 ctx.sc = sc;
1997 ctx.bge_maxsegs = 1;
1998
1999 error = bus_dmamap_load(sc->bge_cdata.bge_status_tag,
2000 sc->bge_cdata.bge_status_map, sc->bge_ldata.bge_status_block,
2001 BGE_STATUS_BLK_SZ, bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT);
2002
2003 if (error)
2004 return (ENOMEM);
2005
2006 sc->bge_ldata.bge_status_block_paddr = ctx.bge_busaddr;
2007
2008 /* Create tag for statistics block */
2009
2010 error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag,
2011 PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
2012 NULL, BGE_STATS_SZ, 1, BGE_STATS_SZ, 0, NULL, NULL,
2013 &sc->bge_cdata.bge_stats_tag);
2014
2015 if (error) {
2016 device_printf(sc->bge_dev, "could not allocate dma tag\n");
2017 return (ENOMEM);
2018 }
2019
2020 /* Allocate DMA'able memory for statistics block */
2021
2022 error = bus_dmamem_alloc(sc->bge_cdata.bge_stats_tag,
2023 (void **)&sc->bge_ldata.bge_stats, BUS_DMA_NOWAIT,
2024 &sc->bge_cdata.bge_stats_map);
2025 if (error)
2026 return (ENOMEM);
2027
2028 bzero((char *)sc->bge_ldata.bge_stats, BGE_STATS_SZ);
2029
2030 /* Load the address of the statstics block */
2031
2032 ctx.sc = sc;
2033 ctx.bge_maxsegs = 1;
2034
2035 error = bus_dmamap_load(sc->bge_cdata.bge_stats_tag,
2036 sc->bge_cdata.bge_stats_map, sc->bge_ldata.bge_stats,
2037 BGE_STATS_SZ, bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT);
2038
2039 if (error)
2040 return (ENOMEM);
2041
2042 sc->bge_ldata.bge_stats_paddr = ctx.bge_busaddr;
2043
2044 return(0);
2045}
2046
2047static int
2048bge_attach(dev)
2049 device_t dev;
2050{
2051 struct ifnet *ifp;
2052 struct bge_softc *sc;
2053 u_int32_t hwcfg = 0;
2054 u_int32_t mac_tmp = 0;
2055 u_char eaddr[6];
2056 int error = 0, rid;
2057
2058 sc = device_get_softc(dev);
2059 sc->bge_dev = dev;
2060
2061 /*
2062 * Map control/status registers.
2063 */
2064 pci_enable_busmaster(dev);
2065
2066 rid = BGE_PCI_BAR0;
2067 sc->bge_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
2068 RF_ACTIVE|PCI_RF_DENSE);
2069
2070 if (sc->bge_res == NULL) {
2071 device_printf (sc->bge_dev, "couldn't map memory\n");
2072 error = ENXIO;
2073 goto fail;
2074 }
2075
2076 sc->bge_btag = rman_get_bustag(sc->bge_res);
2077 sc->bge_bhandle = rman_get_bushandle(sc->bge_res);
2078
2079 /* Allocate interrupt */
2080 rid = 0;
2081
2082 sc->bge_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
2083 RF_SHAREABLE | RF_ACTIVE);
2084
2085 if (sc->bge_irq == NULL) {
2086 device_printf(sc->bge_dev, "couldn't map interrupt\n");
2087 error = ENXIO;
2088 goto fail;
2089 }
2090
2091 BGE_LOCK_INIT(sc, device_get_nameunit(dev));
2092
2093 /* Save ASIC rev. */
2094
2095 sc->bge_chipid =
2096 pci_read_config(dev, BGE_PCI_MISC_CTL, 4) &
2097 BGE_PCIMISCCTL_ASICREV;
2098 sc->bge_asicrev = BGE_ASICREV(sc->bge_chipid);
2099 sc->bge_chiprev = BGE_CHIPREV(sc->bge_chipid);
2100
2101 /*
2102 * Treat the 5714 and the 5752 like the 5750 until we have more info
2103 * on this chip.
2104 */
2105 if (sc->bge_asicrev == BGE_ASICREV_BCM5714 ||
2106 sc->bge_asicrev == BGE_ASICREV_BCM5752)
2107 sc->bge_asicrev = BGE_ASICREV_BCM5750;
2108
2109 /*
2110 * XXX: Broadcom Linux driver. Not in specs or eratta.
2111 * PCI-Express?
2112 */
2113 if (sc->bge_asicrev == BGE_ASICREV_BCM5750) {
2114 u_int32_t v;
2115
2116 v = pci_read_config(dev, BGE_PCI_MSI_CAPID, 4);
2117 if (((v >> 8) & 0xff) == BGE_PCIE_CAPID_REG) {
2118 v = pci_read_config(dev, BGE_PCIE_CAPID_REG, 4);
2119 if ((v & 0xff) == BGE_PCIE_CAPID)
2120 sc->bge_pcie = 1;
2121 }
2122 }
2123
2124 /* Try to reset the chip. */
2125 bge_reset(sc);
2126
2127 if (bge_chipinit(sc)) {
2128 device_printf(sc->bge_dev, "chip initialization failed\n");
2129 bge_release_resources(sc);
2130 error = ENXIO;
2131 goto fail;
2132 }
2133
2134 /*
2135 * Get station address from the EEPROM.
2136 */
2137 mac_tmp = bge_readmem_ind(sc, 0x0c14);
2138 if ((mac_tmp >> 16) == 0x484b) {
2139 eaddr[0] = (u_char)(mac_tmp >> 8);
2140 eaddr[1] = (u_char)mac_tmp;
2141 mac_tmp = bge_readmem_ind(sc, 0x0c18);
2142 eaddr[2] = (u_char)(mac_tmp >> 24);
2143 eaddr[3] = (u_char)(mac_tmp >> 16);
2144 eaddr[4] = (u_char)(mac_tmp >> 8);
2145 eaddr[5] = (u_char)mac_tmp;
2146 } else if (bge_read_eeprom(sc, eaddr,
2147 BGE_EE_MAC_OFFSET + 2, ETHER_ADDR_LEN)) {
2148 device_printf(sc->bge_dev, "failed to read station address\n");
2149 bge_release_resources(sc);
2150 error = ENXIO;
2151 goto fail;
2152 }
2153
2154 /* 5705 limits RX return ring to 512 entries. */
2155 if (sc->bge_asicrev == BGE_ASICREV_BCM5705 ||
2156 sc->bge_asicrev == BGE_ASICREV_BCM5750)
2157 sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT_5705;
2158 else
2159 sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT;
2160
2161 if (bge_dma_alloc(dev)) {
2162 device_printf(sc->bge_dev,
2163 "failed to allocate DMA resources\n");
2164 bge_release_resources(sc);
2165 error = ENXIO;
2166 goto fail;
2167 }
2168
2169 /* Set default tuneable values. */
2170 sc->bge_stat_ticks = BGE_TICKS_PER_SEC;
2171 sc->bge_rx_coal_ticks = 150;
2172 sc->bge_tx_coal_ticks = 150;
2173 sc->bge_rx_max_coal_bds = 64;
2174 sc->bge_tx_max_coal_bds = 128;
2175
2176 /* Set up ifnet structure */
2177 ifp = sc->bge_ifp = if_alloc(IFT_ETHER);
2178 if (ifp == NULL) {
2179 device_printf(sc->bge_dev, "failed to if_alloc()\n");
2180 bge_release_resources(sc);
2181 error = ENXIO;
2182 goto fail;
2183 }
2184 ifp->if_softc = sc;
2185 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
2186 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
2187 ifp->if_ioctl = bge_ioctl;
2188 ifp->if_start = bge_start;
2189 ifp->if_watchdog = bge_watchdog;
2190 ifp->if_init = bge_init;
2191 ifp->if_mtu = ETHERMTU;
2192 ifp->if_snd.ifq_drv_maxlen = BGE_TX_RING_CNT - 1;
2193 IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen);
2194 IFQ_SET_READY(&ifp->if_snd);
2195 ifp->if_hwassist = BGE_CSUM_FEATURES;
2196 ifp->if_capabilities = IFCAP_HWCSUM | IFCAP_VLAN_HWTAGGING |
2197 IFCAP_VLAN_MTU | IFCAP_VLAN_HWCSUM;
2198 ifp->if_capenable = ifp->if_capabilities;
2199#ifdef DEVICE_POLLING
2200 ifp->if_capabilities |= IFCAP_POLLING;
2201#endif
2202
2203 /*
2204 * 5700 B0 chips do not support checksumming correctly due
2205 * to hardware bugs.
2206 */
2207 if (sc->bge_chipid == BGE_CHIPID_BCM5700_B0) {
2208 ifp->if_capabilities &= ~IFCAP_HWCSUM;
2209 ifp->if_capenable &= IFCAP_HWCSUM;
2210 ifp->if_hwassist = 0;
2211 }
2212
2213 /*
2214 * Figure out what sort of media we have by checking the
2215 * hardware config word in the first 32k of NIC internal memory,
2216 * or fall back to examining the EEPROM if necessary.
2217 * Note: on some BCM5700 cards, this value appears to be unset.
2218 * If that's the case, we have to rely on identifying the NIC
2219 * by its PCI subsystem ID, as we do below for the SysKonnect
2220 * SK-9D41.
2221 */
2222 if (bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_SIG) == BGE_MAGIC_NUMBER)
2223 hwcfg = bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_NICCFG);
2224 else {
2225 if (bge_read_eeprom(sc, (caddr_t)&hwcfg, BGE_EE_HWCFG_OFFSET,
2226 sizeof(hwcfg))) {
2227 device_printf(sc->bge_dev, "failed to read EEPROM\n");
2228 bge_release_resources(sc);
2229 error = ENXIO;
2230 goto fail;
2231 }
2232 hwcfg = ntohl(hwcfg);
2233 }
2234
2235 if ((hwcfg & BGE_HWCFG_MEDIA) == BGE_MEDIA_FIBER)
2236 sc->bge_tbi = 1;
2237
2238 /* The SysKonnect SK-9D41 is a 1000baseSX card. */
2239 if ((pci_read_config(dev, BGE_PCI_SUBSYS, 4) >> 16) == SK_SUBSYSID_9D41)
2240 sc->bge_tbi = 1;
2241
2242 if (sc->bge_tbi) {
2243 ifmedia_init(&sc->bge_ifmedia, IFM_IMASK,
2244 bge_ifmedia_upd, bge_ifmedia_sts);
2245 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER|IFM_1000_SX, 0, NULL);
2246 ifmedia_add(&sc->bge_ifmedia,
2247 IFM_ETHER|IFM_1000_SX|IFM_FDX, 0, NULL);
2248 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER|IFM_AUTO, 0, NULL);
2249 ifmedia_set(&sc->bge_ifmedia, IFM_ETHER|IFM_AUTO);
2250 sc->bge_ifmedia.ifm_media = sc->bge_ifmedia.ifm_cur->ifm_media;
2251 } else {
2252 /*
2253 * Do transceiver setup.
2254 */
2255 if (mii_phy_probe(dev, &sc->bge_miibus,
2256 bge_ifmedia_upd, bge_ifmedia_sts)) {
2257 device_printf(sc->bge_dev, "MII without any PHY!\n");
2258 bge_release_resources(sc);
2259 error = ENXIO;
2260 goto fail;
2261 }
2262 }
2263
2264 /*
2265 * When using the BCM5701 in PCI-X mode, data corruption has
2266 * been observed in the first few bytes of some received packets.
2267 * Aligning the packet buffer in memory eliminates the corruption.
2268 * Unfortunately, this misaligns the packet payloads. On platforms
2269 * which do not support unaligned accesses, we will realign the
2270 * payloads by copying the received packets.
2271 */
2272 switch (sc->bge_chipid) {
2273 case BGE_CHIPID_BCM5701_A0:
2274 case BGE_CHIPID_BCM5701_B0:
2275 case BGE_CHIPID_BCM5701_B2:
2276 case BGE_CHIPID_BCM5701_B5:
2277 /* If in PCI-X mode, work around the alignment bug. */
2278 if ((pci_read_config(dev, BGE_PCI_PCISTATE, 4) &
2279 (BGE_PCISTATE_PCI_BUSMODE | BGE_PCISTATE_PCI_BUSSPEED)) ==
2280 BGE_PCISTATE_PCI_BUSSPEED)
2281 sc->bge_rx_alignment_bug = 1;
2282 break;
2283 }
2284
2285 /*
2286 * Call MI attach routine.
2287 */
2288 ether_ifattach(ifp, eaddr);
2289 callout_init(&sc->bge_stat_ch, CALLOUT_MPSAFE);
2290
2291 /*
2292 * Hookup IRQ last.
2293 */
2294 error = bus_setup_intr(dev, sc->bge_irq, INTR_TYPE_NET | INTR_MPSAFE,
2295 bge_intr, sc, &sc->bge_intrhand);
2296
2297 if (error) {
2298 bge_detach(dev);
2299 device_printf(sc->bge_dev, "couldn't set up irq\n");
2300 }
2301
2302fail:
2303 return(error);
2304}
2305
2306static int
2307bge_detach(dev)
2308 device_t dev;
2309{
2310 struct bge_softc *sc;
2311 struct ifnet *ifp;
2312
2313 sc = device_get_softc(dev);
2314 ifp = sc->bge_ifp;
2315
2316#ifdef DEVICE_POLLING
2317 if (ifp->if_capenable & IFCAP_POLLING)
2318 ether_poll_deregister(ifp);
2319#endif
2320
2321 BGE_LOCK(sc);
2322 bge_stop(sc);
2323 bge_reset(sc);
2324 BGE_UNLOCK(sc);
2325
2326 ether_ifdetach(ifp);
2327
2328 if (sc->bge_tbi) {
2329 ifmedia_removeall(&sc->bge_ifmedia);
2330 } else {
2331 bus_generic_detach(dev);
2332 device_delete_child(dev, sc->bge_miibus);
2333 }
2334
2335 bge_release_resources(sc);
2336
2337 return(0);
2338}
2339
2340static void
2341bge_release_resources(sc)
2342 struct bge_softc *sc;
2343{
2344 device_t dev;
2345
2346 dev = sc->bge_dev;
2347
2348 if (sc->bge_vpd_prodname != NULL)
2349 free(sc->bge_vpd_prodname, M_DEVBUF);
2350
2351 if (sc->bge_vpd_readonly != NULL)
2352 free(sc->bge_vpd_readonly, M_DEVBUF);
2353
2354 if (sc->bge_intrhand != NULL)
2355 bus_teardown_intr(dev, sc->bge_irq, sc->bge_intrhand);
2356
2357 if (sc->bge_irq != NULL)
2358 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->bge_irq);
2359
2360 if (sc->bge_res != NULL)
2361 bus_release_resource(dev, SYS_RES_MEMORY,
2362 BGE_PCI_BAR0, sc->bge_res);
2363
2364 if (sc->bge_ifp != NULL)
2365 if_free(sc->bge_ifp);
2366
2367 bge_dma_free(sc);
2368
2369 if (mtx_initialized(&sc->bge_mtx)) /* XXX */
2370 BGE_LOCK_DESTROY(sc);
2371
2372 return;
2373}
2374
2375static void
2376bge_reset(sc)
2377 struct bge_softc *sc;
2378{
2379 device_t dev;
2380 u_int32_t cachesize, command, pcistate, reset;
2381 int i, val = 0;
2382
2383 dev = sc->bge_dev;
2384
2385 /* Save some important PCI state. */
2386 cachesize = pci_read_config(dev, BGE_PCI_CACHESZ, 4);
2387 command = pci_read_config(dev, BGE_PCI_CMD, 4);
2388 pcistate = pci_read_config(dev, BGE_PCI_PCISTATE, 4);
2389
2390 pci_write_config(dev, BGE_PCI_MISC_CTL,
2391 BGE_PCIMISCCTL_INDIRECT_ACCESS|BGE_PCIMISCCTL_MASK_PCI_INTR|
2392 BGE_HIF_SWAP_OPTIONS|BGE_PCIMISCCTL_PCISTATE_RW, 4);
2393
2394 reset = BGE_MISCCFG_RESET_CORE_CLOCKS|(65<<1);
2395
2396 /* XXX: Broadcom Linux driver. */
2397 if (sc->bge_pcie) {
2398 if (CSR_READ_4(sc, 0x7e2c) == 0x60) /* PCIE 1.0 */
2399 CSR_WRITE_4(sc, 0x7e2c, 0x20);
2400 if (sc->bge_chipid != BGE_CHIPID_BCM5750_A0) {
2401 /* Prevent PCIE link training during global reset */
2402 CSR_WRITE_4(sc, BGE_MISC_CFG, (1<<29));
2403 reset |= (1<<29);
2404 }
2405 }
2406
2407 /* Issue global reset */
2408 bge_writereg_ind(sc, BGE_MISC_CFG, reset);
2409
2410 DELAY(1000);
2411
2412 /* XXX: Broadcom Linux driver. */
2413 if (sc->bge_pcie) {
2414 if (sc->bge_chipid == BGE_CHIPID_BCM5750_A0) {
2415 uint32_t v;
2416
2417 DELAY(500000); /* wait for link training to complete */
2418 v = pci_read_config(dev, 0xc4, 4);
2419 pci_write_config(dev, 0xc4, v | (1<<15), 4);
2420 }
2421 /* Set PCIE max payload size and clear error status. */
2422 pci_write_config(dev, 0xd8, 0xf5000, 4);
2423 }
2424
2425 /* Reset some of the PCI state that got zapped by reset */
2426 pci_write_config(dev, BGE_PCI_MISC_CTL,
2427 BGE_PCIMISCCTL_INDIRECT_ACCESS|BGE_PCIMISCCTL_MASK_PCI_INTR|
2428 BGE_HIF_SWAP_OPTIONS|BGE_PCIMISCCTL_PCISTATE_RW, 4);
2429 pci_write_config(dev, BGE_PCI_CACHESZ, cachesize, 4);
2430 pci_write_config(dev, BGE_PCI_CMD, command, 4);
2431 bge_writereg_ind(sc, BGE_MISC_CFG, (65 << 1));
2432
2433 /* Enable memory arbiter. */
2434 if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
2435 sc->bge_asicrev != BGE_ASICREV_BCM5750)
2436 CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE);
2437
2438 /*
2439 * Prevent PXE restart: write a magic number to the
2440 * general communications memory at 0xB50.
2441 */
2442 bge_writemem_ind(sc, BGE_SOFTWARE_GENCOMM, BGE_MAGIC_NUMBER);
2443 /*
2444 * Poll the value location we just wrote until
2445 * we see the 1's complement of the magic number.
2446 * This indicates that the firmware initialization
2447 * is complete.
2448 */
2449 for (i = 0; i < BGE_TIMEOUT; i++) {
2450 val = bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM);
2451 if (val == ~BGE_MAGIC_NUMBER)
2452 break;
2453 DELAY(10);
2454 }
2455
2456 if (i == BGE_TIMEOUT) {
2457 device_printf(sc->bge_dev, "firmware handshake timed out\n");
2458 return;
2459 }
2460
2461 /*
2462 * XXX Wait for the value of the PCISTATE register to
2463 * return to its original pre-reset state. This is a
2464 * fairly good indicator of reset completion. If we don't
2465 * wait for the reset to fully complete, trying to read
2466 * from the device's non-PCI registers may yield garbage
2467 * results.
2468 */
2469 for (i = 0; i < BGE_TIMEOUT; i++) {
2470 if (pci_read_config(dev, BGE_PCI_PCISTATE, 4) == pcistate)
2471 break;
2472 DELAY(10);
2473 }
2474
2475 /* Fix up byte swapping */
2476 CSR_WRITE_4(sc, BGE_MODE_CTL, BGE_DMA_SWAP_OPTIONS|
2477 BGE_MODECTL_BYTESWAP_DATA);
2478
2479 CSR_WRITE_4(sc, BGE_MAC_MODE, 0);
2480
2481 /*
2482 * The 5704 in TBI mode apparently needs some special
2483 * adjustment to insure the SERDES drive level is set
2484 * to 1.2V.
2485 */
2486 if (sc->bge_asicrev == BGE_ASICREV_BCM5704 && sc->bge_tbi) {
2487 uint32_t serdescfg;
2488 serdescfg = CSR_READ_4(sc, BGE_SERDES_CFG);
2489 serdescfg = (serdescfg & ~0xFFF) | 0x880;
2490 CSR_WRITE_4(sc, BGE_SERDES_CFG, serdescfg);
2491 }
2492
2493 /* XXX: Broadcom Linux driver. */
2494 if (sc->bge_pcie && sc->bge_chipid != BGE_CHIPID_BCM5750_A0) {
2495 uint32_t v;
2496
2497 v = CSR_READ_4(sc, 0x7c00);
2498 CSR_WRITE_4(sc, 0x7c00, v | (1<<25));
2499 }
2500 DELAY(10000);
2501
2502 return;
2503}
2504
2505/*
2506 * Frame reception handling. This is called if there's a frame
2507 * on the receive return list.
2508 *
2509 * Note: we have to be able to handle two possibilities here:
2510 * 1) the frame is from the jumbo receive ring
2511 * 2) the frame is from the standard receive ring
2512 */
2513
2514static void
2515bge_rxeof(sc)
2516 struct bge_softc *sc;
2517{
2518 struct ifnet *ifp;
2519 int stdcnt = 0, jumbocnt = 0;
2520
2521 BGE_LOCK_ASSERT(sc);
2522
2523 ifp = sc->bge_ifp;
2524
2525 bus_dmamap_sync(sc->bge_cdata.bge_rx_return_ring_tag,
2526 sc->bge_cdata.bge_rx_return_ring_map, BUS_DMASYNC_POSTREAD);
2527 bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag,
2528 sc->bge_cdata.bge_rx_std_ring_map, BUS_DMASYNC_POSTREAD);
2529 if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
2530 sc->bge_asicrev != BGE_ASICREV_BCM5750) {
2531 bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag,
2532 sc->bge_cdata.bge_rx_jumbo_ring_map,
2533 BUS_DMASYNC_POSTREAD);
2534 }
2535
2536 while(sc->bge_rx_saved_considx !=
2537 sc->bge_ldata.bge_status_block->bge_idx[0].bge_rx_prod_idx) {
2538 struct bge_rx_bd *cur_rx;
2539 u_int32_t rxidx;
2540 struct ether_header *eh;
2541 struct mbuf *m = NULL;
2542 u_int16_t vlan_tag = 0;
2543 int have_tag = 0;
2544
2545#ifdef DEVICE_POLLING
2546 if (ifp->if_capenable & IFCAP_POLLING) {
2547 if (sc->rxcycles <= 0)
2548 break;
2549 sc->rxcycles--;
2550 }
2551#endif
2552
2553 cur_rx =
2554 &sc->bge_ldata.bge_rx_return_ring[sc->bge_rx_saved_considx];
2555
2556 rxidx = cur_rx->bge_idx;
2557 BGE_INC(sc->bge_rx_saved_considx, sc->bge_return_ring_cnt);
2558
2559 if (cur_rx->bge_flags & BGE_RXBDFLAG_VLAN_TAG) {
2560 have_tag = 1;
2561 vlan_tag = cur_rx->bge_vlan_tag;
2562 }
2563
2564 if (cur_rx->bge_flags & BGE_RXBDFLAG_JUMBO_RING) {
2565 BGE_INC(sc->bge_jumbo, BGE_JUMBO_RX_RING_CNT);
2566 bus_dmamap_sync(sc->bge_cdata.bge_mtag_jumbo,
2567 sc->bge_cdata.bge_rx_jumbo_dmamap[rxidx],
2568 BUS_DMASYNC_POSTREAD);
2569 bus_dmamap_unload(sc->bge_cdata.bge_mtag_jumbo,
2570 sc->bge_cdata.bge_rx_jumbo_dmamap[rxidx]);
2571 m = sc->bge_cdata.bge_rx_jumbo_chain[rxidx];
2572 sc->bge_cdata.bge_rx_jumbo_chain[rxidx] = NULL;
2573 jumbocnt++;
2574 if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) {
2575 ifp->if_ierrors++;
2576 bge_newbuf_jumbo(sc, sc->bge_jumbo, m);
2577 continue;
2578 }
2579 if (bge_newbuf_jumbo(sc,
2580 sc->bge_jumbo, NULL) == ENOBUFS) {
2581 ifp->if_ierrors++;
2582 bge_newbuf_jumbo(sc, sc->bge_jumbo, m);
2583 continue;
2584 }
2585 } else {
2586 BGE_INC(sc->bge_std, BGE_STD_RX_RING_CNT);
2587 bus_dmamap_sync(sc->bge_cdata.bge_mtag,
2588 sc->bge_cdata.bge_rx_std_dmamap[rxidx],
2589 BUS_DMASYNC_POSTREAD);
2590 bus_dmamap_unload(sc->bge_cdata.bge_mtag,
2591 sc->bge_cdata.bge_rx_std_dmamap[rxidx]);
2592 m = sc->bge_cdata.bge_rx_std_chain[rxidx];
2593 sc->bge_cdata.bge_rx_std_chain[rxidx] = NULL;
2594 stdcnt++;
2595 if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) {
2596 ifp->if_ierrors++;
2597 bge_newbuf_std(sc, sc->bge_std, m);
2598 continue;
2599 }
2600 if (bge_newbuf_std(sc, sc->bge_std,
2601 NULL) == ENOBUFS) {
2602 ifp->if_ierrors++;
2603 bge_newbuf_std(sc, sc->bge_std, m);
2604 continue;
2605 }
2606 }
2607
2608 ifp->if_ipackets++;
2609#ifndef __NO_STRICT_ALIGNMENT
2610 /*
2611 * For architectures with strict alignment we must make sure
2612 * the payload is aligned.
2613 */
2614 if (sc->bge_rx_alignment_bug) {
2615 bcopy(m->m_data, m->m_data + ETHER_ALIGN,
2616 cur_rx->bge_len);
2617 m->m_data += ETHER_ALIGN;
2618 }
2619#endif
2620 eh = mtod(m, struct ether_header *);
2621 m->m_pkthdr.len = m->m_len = cur_rx->bge_len - ETHER_CRC_LEN;
2622 m->m_pkthdr.rcvif = ifp;
2623
2624 if (ifp->if_capenable & IFCAP_RXCSUM) {
2625 if (cur_rx->bge_flags & BGE_RXBDFLAG_IP_CSUM) {
2626 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
2627 if ((cur_rx->bge_ip_csum ^ 0xffff) == 0)
2628 m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
2629 }
2630 if (cur_rx->bge_flags & BGE_RXBDFLAG_TCP_UDP_CSUM &&
2631 m->m_pkthdr.len >= ETHER_MIN_NOPAD) {
2632 m->m_pkthdr.csum_data =
2633 cur_rx->bge_tcp_udp_csum;
2634 m->m_pkthdr.csum_flags |= CSUM_DATA_VALID;
2635 }
2636 }
2637
2638 /*
2639 * If we received a packet with a vlan tag,
2640 * attach that information to the packet.
2641 */
2642 if (have_tag) {
2643 VLAN_INPUT_TAG(ifp, m, vlan_tag);
2644 if (m == NULL)
2645 continue;
2646 }
2647
2648 BGE_UNLOCK(sc);
2649 (*ifp->if_input)(ifp, m);
2650 BGE_LOCK(sc);
2651 }
2652
2653 if (stdcnt > 0)
2654 bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag,
2655 sc->bge_cdata.bge_rx_std_ring_map, BUS_DMASYNC_PREWRITE);
2656 if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
2657 sc->bge_asicrev != BGE_ASICREV_BCM5750) {
2658 if (jumbocnt > 0)
2659 bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag,
2660 sc->bge_cdata.bge_rx_jumbo_ring_map,
2661 BUS_DMASYNC_PREWRITE);
2662 }
2663
2664 CSR_WRITE_4(sc, BGE_MBX_RX_CONS0_LO, sc->bge_rx_saved_considx);
2665 if (stdcnt)
2666 CSR_WRITE_4(sc, BGE_MBX_RX_STD_PROD_LO, sc->bge_std);
2667 if (jumbocnt)
2668 CSR_WRITE_4(sc, BGE_MBX_RX_JUMBO_PROD_LO, sc->bge_jumbo);
2669
2670 return;
2671}
2672
2673static void
2674bge_txeof(sc)
2675 struct bge_softc *sc;
2676{
2677 struct bge_tx_bd *cur_tx = NULL;
2678 struct ifnet *ifp;
2679
2680 BGE_LOCK_ASSERT(sc);
2681
2682 ifp = sc->bge_ifp;
2683
2684 bus_dmamap_sync(sc->bge_cdata.bge_tx_ring_tag,
2685 sc->bge_cdata.bge_tx_ring_map,
2686 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
2687 /*
2688 * Go through our tx ring and free mbufs for those
2689 * frames that have been sent.
2690 */
2691 while (sc->bge_tx_saved_considx !=
2692 sc->bge_ldata.bge_status_block->bge_idx[0].bge_tx_cons_idx) {
2693 u_int32_t idx = 0;
2694
2695 idx = sc->bge_tx_saved_considx;
2696 cur_tx = &sc->bge_ldata.bge_tx_ring[idx];
2697 if (cur_tx->bge_flags & BGE_TXBDFLAG_END)
2698 ifp->if_opackets++;
2699 if (sc->bge_cdata.bge_tx_chain[idx] != NULL) {
2700 bus_dmamap_sync(sc->bge_cdata.bge_mtag,
2701 sc->bge_cdata.bge_tx_dmamap[idx],
2702 BUS_DMASYNC_POSTWRITE);
2703 bus_dmamap_unload(sc->bge_cdata.bge_mtag,
2704 sc->bge_cdata.bge_tx_dmamap[idx]);
2705 m_freem(sc->bge_cdata.bge_tx_chain[idx]);
2706 sc->bge_cdata.bge_tx_chain[idx] = NULL;
2707 }
2708 sc->bge_txcnt--;
2709 BGE_INC(sc->bge_tx_saved_considx, BGE_TX_RING_CNT);
2710 ifp->if_timer = 0;
2711 }
2712
2713 if (cur_tx != NULL)
2714 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
2715
2716 return;
2717}
2718
2719#ifdef DEVICE_POLLING
2720static void
2721bge_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
2722{
2723 struct bge_softc *sc = ifp->if_softc;
2724
2725 BGE_LOCK(sc);
2726 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
2727 bge_poll_locked(ifp, cmd, count);
2728 BGE_UNLOCK(sc);
2729}
2730
2731static void
2732bge_poll_locked(struct ifnet *ifp, enum poll_cmd cmd, int count)
2733{
2734 struct bge_softc *sc = ifp->if_softc;
2735
2736 BGE_LOCK_ASSERT(sc);
2737
2738 sc->rxcycles = count;
2739 bge_rxeof(sc);
2740 bge_txeof(sc);
2741 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
2742 bge_start_locked(ifp);
2743
2744 if (cmd == POLL_AND_CHECK_STATUS) {
2745 uint32_t statusword;
2746
2747 bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
2748 sc->bge_cdata.bge_status_map, BUS_DMASYNC_POSTREAD);
2749
2750 statusword = atomic_readandclear_32(&sc->bge_ldata.bge_status_block->bge_status);
2751
2752 if ((sc->bge_asicrev == BGE_ASICREV_BCM5700 &&
2753 sc->bge_chipid != BGE_CHIPID_BCM5700_B1) ||
2754 statusword & BGE_STATFLAG_LINKSTATE_CHANGED)
2755 bge_link_upd(sc);
2756
2757 bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
2758 sc->bge_cdata.bge_status_map, BUS_DMASYNC_PREREAD);
2759 }
2760}
2761#endif /* DEVICE_POLLING */
2762
2763static void
2764bge_intr(xsc)
2765 void *xsc;
2766{
2767 struct bge_softc *sc;
2768 struct ifnet *ifp;
2769 uint32_t statusword;
2770
2771 sc = xsc;
2772
2773 BGE_LOCK(sc);
2774
2775 ifp = sc->bge_ifp;
2776
2777#ifdef DEVICE_POLLING
2778 if (ifp->if_capenable & IFCAP_POLLING) {
2779 BGE_UNLOCK(sc);
2780 return;
2781 }
2782#endif
2783
2784 bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
2785 sc->bge_cdata.bge_status_map, BUS_DMASYNC_POSTREAD);
2786
2787 statusword =
2788 atomic_readandclear_32(&sc->bge_ldata.bge_status_block->bge_status);
2789
2790 bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
2791 sc->bge_cdata.bge_status_map, BUS_DMASYNC_PREREAD);
2792
2790#ifdef notdef
2791 /* Avoid this for now -- checking this register is expensive. */
2792 /* Make sure this is really our interrupt. */
2793 if (!(CSR_READ_4(sc, BGE_MISC_LOCAL_CTL) & BGE_MLC_INTR_STATE))
2794 return;
2795#endif
2796 /* Ack interrupt and stop others from occuring. */
2797 CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 1);
2798
2799 if ((sc->bge_asicrev == BGE_ASICREV_BCM5700 &&
2800 sc->bge_chipid != BGE_CHIPID_BCM5700_B1) ||
2793#ifdef notdef
2794 /* Avoid this for now -- checking this register is expensive. */
2795 /* Make sure this is really our interrupt. */
2796 if (!(CSR_READ_4(sc, BGE_MISC_LOCAL_CTL) & BGE_MLC_INTR_STATE))
2797 return;
2798#endif
2799 /* Ack interrupt and stop others from occuring. */
2800 CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 1);
2801
2802 if ((sc->bge_asicrev == BGE_ASICREV_BCM5700 &&
2803 sc->bge_chipid != BGE_CHIPID_BCM5700_B1) ||
2801 statusword & BGE_STATFLAG_LINKSTATE_CHANGED)
2804 statusword & BGE_STATFLAG_LINKSTATE_CHANGED || sc->bge_link_evt)
2802 bge_link_upd(sc);
2803
2804 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
2805 /* Check RX return ring producer/consumer */
2806 bge_rxeof(sc);
2807
2808 /* Check TX ring producer/consumer */
2809 bge_txeof(sc);
2810 }
2811
2812 /* Re-enable interrupts. */
2813 CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 0);
2814
2815 if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
2816 !IFQ_DRV_IS_EMPTY(&ifp->if_snd))
2817 bge_start_locked(ifp);
2818
2819 BGE_UNLOCK(sc);
2820
2821 return;
2822}
2823
2824static void
2825bge_tick_locked(sc)
2826 struct bge_softc *sc;
2827{
2828 struct mii_data *mii = NULL;
2805 bge_link_upd(sc);
2806
2807 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
2808 /* Check RX return ring producer/consumer */
2809 bge_rxeof(sc);
2810
2811 /* Check TX ring producer/consumer */
2812 bge_txeof(sc);
2813 }
2814
2815 /* Re-enable interrupts. */
2816 CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 0);
2817
2818 if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
2819 !IFQ_DRV_IS_EMPTY(&ifp->if_snd))
2820 bge_start_locked(ifp);
2821
2822 BGE_UNLOCK(sc);
2823
2824 return;
2825}
2826
2827static void
2828bge_tick_locked(sc)
2829 struct bge_softc *sc;
2830{
2831 struct mii_data *mii = NULL;
2829 struct ifnet *ifp;
2830
2831 BGE_LOCK_ASSERT(sc);
2832
2832
2833 BGE_LOCK_ASSERT(sc);
2834
2833 ifp = sc->bge_ifp;
2834
2835 if (sc->bge_asicrev == BGE_ASICREV_BCM5705 ||
2836 sc->bge_asicrev == BGE_ASICREV_BCM5750)
2837 bge_stats_update_regs(sc);
2838 else
2839 bge_stats_update(sc);
2840
2841 if (!sc->bge_tbi) {
2842 mii = device_get_softc(sc->bge_miibus);
2843 mii_tick(mii);
2835 if (sc->bge_asicrev == BGE_ASICREV_BCM5705 ||
2836 sc->bge_asicrev == BGE_ASICREV_BCM5750)
2837 bge_stats_update_regs(sc);
2838 else
2839 bge_stats_update(sc);
2840
2841 if (!sc->bge_tbi) {
2842 mii = device_get_softc(sc->bge_miibus);
2843 mii_tick(mii);
2844 } else {
2845 /*
2846 * Since in TBI mode auto-polling can't be used we should poll
2847 * link status manually. Here we register pending link event
2848 * and trigger interrupt.
2849 */
2850#ifdef DEVICE_POLLING
2851 /* In polling mode we poll link state in bge_poll_locked() */
2852 if (!(sc->bge_ifp->if_capenable & IFCAP_POLLING))
2853#endif
2854 {
2855 sc->bge_link_evt++;
2856 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_SET);
2857 }
2844 }
2845
2846 callout_reset(&sc->bge_stat_ch, hz, bge_tick, sc);
2847}
2848
2849static void
2850bge_tick(xsc)
2851 void *xsc;
2852{
2853 struct bge_softc *sc;
2854
2855 sc = xsc;
2856
2857 BGE_LOCK(sc);
2858 bge_tick_locked(sc);
2859 BGE_UNLOCK(sc);
2860}
2861
2862static void
2863bge_stats_update_regs(sc)
2864 struct bge_softc *sc;
2865{
2866 struct ifnet *ifp;
2867 struct bge_mac_stats_regs stats;
2868 u_int32_t *s;
2869 u_long cnt; /* current register value */
2870 int i;
2871
2872 ifp = sc->bge_ifp;
2873
2874 s = (u_int32_t *)&stats;
2875 for (i = 0; i < sizeof(struct bge_mac_stats_regs); i += 4) {
2876 *s = CSR_READ_4(sc, BGE_RX_STATS + i);
2877 s++;
2878 }
2879
2880 cnt = stats.dot3StatsSingleCollisionFrames +
2881 stats.dot3StatsMultipleCollisionFrames +
2882 stats.dot3StatsExcessiveCollisions +
2883 stats.dot3StatsLateCollisions;
2884 ifp->if_collisions += cnt >= sc->bge_tx_collisions ?
2885 cnt - sc->bge_tx_collisions : cnt;
2886 sc->bge_tx_collisions = cnt;
2887}
2888
2889static void
2890bge_stats_update(sc)
2891 struct bge_softc *sc;
2892{
2893 struct ifnet *ifp;
2894 bus_size_t stats;
2895 u_long cnt; /* current register value */
2896
2897 ifp = sc->bge_ifp;
2898
2899 stats = BGE_MEMWIN_START + BGE_STATS_BLOCK;
2900
2901#define READ_STAT(sc, stats, stat) \
2902 CSR_READ_4(sc, stats + offsetof(struct bge_stats, stat))
2903
2904 cnt = READ_STAT(sc, stats,
2905 txstats.dot3StatsSingleCollisionFrames.bge_addr_lo);
2906 cnt += READ_STAT(sc, stats,
2907 txstats.dot3StatsMultipleCollisionFrames.bge_addr_lo);
2908 cnt += READ_STAT(sc, stats,
2909 txstats.dot3StatsExcessiveCollisions.bge_addr_lo);
2910 cnt += READ_STAT(sc, stats,
2911 txstats.dot3StatsLateCollisions.bge_addr_lo);
2912 ifp->if_collisions += cnt >= sc->bge_tx_collisions ?
2913 cnt - sc->bge_tx_collisions : cnt;
2914 sc->bge_tx_collisions = cnt;
2915
2916 cnt = READ_STAT(sc, stats, ifInDiscards.bge_addr_lo);
2917 ifp->if_ierrors += cnt >= sc->bge_rx_discards ?
2918 cnt - sc->bge_rx_discards : cnt;
2919 sc->bge_rx_discards = cnt;
2920
2921 cnt = READ_STAT(sc, stats, txstats.ifOutDiscards.bge_addr_lo);
2922 ifp->if_oerrors += cnt >= sc->bge_tx_discards ?
2923 cnt - sc->bge_tx_discards : cnt;
2924 sc->bge_tx_discards = cnt;
2925
2926#undef READ_STAT
2927}
2928
2929/*
2930 * Pad outbound frame to ETHER_MIN_NOPAD for an unusual reason.
2931 * The bge hardware will pad out Tx runts to ETHER_MIN_NOPAD,
2932 * but when such padded frames employ the bge IP/TCP checksum offload,
2933 * the hardware checksum assist gives incorrect results (possibly
2934 * from incorporating its own padding into the UDP/TCP checksum; who knows).
2935 * If we pad such runts with zeros, the onboard checksum comes out correct.
2936 */
2937static __inline int
2938bge_cksum_pad(struct mbuf *m)
2939{
2940 int padlen = ETHER_MIN_NOPAD - m->m_pkthdr.len;
2941 struct mbuf *last;
2942
2943 /* If there's only the packet-header and we can pad there, use it. */
2944 if (m->m_pkthdr.len == m->m_len && M_WRITABLE(m) &&
2945 M_TRAILINGSPACE(m) >= padlen) {
2946 last = m;
2947 } else {
2948 /*
2949 * Walk packet chain to find last mbuf. We will either
2950 * pad there, or append a new mbuf and pad it.
2951 */
2952 for (last = m; last->m_next != NULL; last = last->m_next);
2953 if (!(M_WRITABLE(last) && M_TRAILINGSPACE(last) >= padlen)) {
2954 /* Allocate new empty mbuf, pad it. Compact later. */
2955 struct mbuf *n;
2956
2957 MGET(n, M_DONTWAIT, MT_DATA);
2958 if (n == NULL)
2959 return (ENOBUFS);
2960 n->m_len = 0;
2961 last->m_next = n;
2962 last = n;
2963 }
2964 }
2965
2966 /* Now zero the pad area, to avoid the bge cksum-assist bug. */
2967 memset(mtod(last, caddr_t) + last->m_len, 0, padlen);
2968 last->m_len += padlen;
2969 m->m_pkthdr.len += padlen;
2970
2971 return (0);
2972}
2973
2974/*
2975 * Encapsulate an mbuf chain in the tx ring by coupling the mbuf data
2976 * pointers to descriptors.
2977 */
2978static int
2979bge_encap(sc, m_head, txidx)
2980 struct bge_softc *sc;
2981 struct mbuf *m_head;
2982 uint32_t *txidx;
2983{
2984 bus_dma_segment_t segs[BGE_NSEG_NEW];
2985 bus_dmamap_t map;
2986 struct bge_tx_bd *d = NULL;
2987 struct m_tag *mtag;
2988 uint32_t idx = *txidx;
2989 uint16_t csum_flags = 0;
2990 int nsegs, i, error;
2991
2992 if (m_head->m_pkthdr.csum_flags) {
2993 if (m_head->m_pkthdr.csum_flags & CSUM_IP)
2994 csum_flags |= BGE_TXBDFLAG_IP_CSUM;
2995 if (m_head->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP)) {
2996 csum_flags |= BGE_TXBDFLAG_TCP_UDP_CSUM;
2997 if (m_head->m_pkthdr.len < ETHER_MIN_NOPAD &&
2998 bge_cksum_pad(m_head) != 0)
2999 return (ENOBUFS);
3000 }
3001 if (m_head->m_flags & M_LASTFRAG)
3002 csum_flags |= BGE_TXBDFLAG_IP_FRAG_END;
3003 else if (m_head->m_flags & M_FRAG)
3004 csum_flags |= BGE_TXBDFLAG_IP_FRAG;
3005 }
3006
3007 mtag = VLAN_OUTPUT_TAG(sc->bge_ifp, m_head);
3008
3009 map = sc->bge_cdata.bge_tx_dmamap[idx];
3010 error = bus_dmamap_load_mbuf_sg(sc->bge_cdata.bge_mtag, map,
3011 m_head, segs, &nsegs, BUS_DMA_NOWAIT);
3012 if (error) {
3013 if (error == EFBIG) {
3014 struct mbuf *m0;
3015
3016 m0 = m_defrag(m_head, M_DONTWAIT);
3017 if (m0 == NULL)
3018 return (ENOBUFS);
3019 m_head = m0;
3020 error = bus_dmamap_load_mbuf_sg(sc->bge_cdata.bge_mtag,
3021 map, m_head, segs, &nsegs, BUS_DMA_NOWAIT);
3022 }
3023 if (error)
3024 return (error);
3025 }
3026
3027 /*
3028 * Sanity check: avoid coming within 16 descriptors
3029 * of the end of the ring.
3030 */
3031 if (nsegs > (BGE_TX_RING_CNT - sc->bge_txcnt - 16)) {
3032 bus_dmamap_unload(sc->bge_cdata.bge_mtag, map);
3033 return (ENOBUFS);
3034 }
3035
3036 bus_dmamap_sync(sc->bge_cdata.bge_mtag, map, BUS_DMASYNC_PREWRITE);
3037
3038 for (i = 0; ; i++) {
3039 d = &sc->bge_ldata.bge_tx_ring[idx];
3040 d->bge_addr.bge_addr_lo = BGE_ADDR_LO(segs[i].ds_addr);
3041 d->bge_addr.bge_addr_hi = BGE_ADDR_HI(segs[i].ds_addr);
3042 d->bge_len = segs[i].ds_len;
3043 d->bge_flags = csum_flags;
3044 if (i == nsegs - 1)
3045 break;
3046 BGE_INC(idx, BGE_TX_RING_CNT);
3047 }
3048
3049 /* Mark the last segment as end of packet... */
3050 d->bge_flags |= BGE_TXBDFLAG_END;
3051 /* ... and put VLAN tag into first segment. */
3052 d = &sc->bge_ldata.bge_tx_ring[*txidx];
3053 if (mtag != NULL) {
3054 d->bge_flags |= BGE_TXBDFLAG_VLAN_TAG;
3055 d->bge_vlan_tag = VLAN_TAG_VALUE(mtag);
3056 } else
3057 d->bge_vlan_tag = 0;
3058
3059 /*
3060 * Insure that the map for this transmission
3061 * is placed at the array index of the last descriptor
3062 * in this chain.
3063 */
3064 sc->bge_cdata.bge_tx_dmamap[*txidx] = sc->bge_cdata.bge_tx_dmamap[idx];
3065 sc->bge_cdata.bge_tx_dmamap[idx] = map;
3066 sc->bge_cdata.bge_tx_chain[idx] = m_head;
3067 sc->bge_txcnt += nsegs;
3068
3069 BGE_INC(idx, BGE_TX_RING_CNT);
3070 *txidx = idx;
3071
3072 return (0);
3073}
3074
3075/*
3076 * Main transmit routine. To avoid having to do mbuf copies, we put pointers
3077 * to the mbuf data regions directly in the transmit descriptors.
3078 */
3079static void
3080bge_start_locked(ifp)
3081 struct ifnet *ifp;
3082{
3083 struct bge_softc *sc;
3084 struct mbuf *m_head = NULL;
3085 uint32_t prodidx;
3086 int count = 0;
3087
3088 sc = ifp->if_softc;
3089
3090 if (!sc->bge_link || IFQ_DRV_IS_EMPTY(&ifp->if_snd))
3091 return;
3092
3093 prodidx = sc->bge_tx_prodidx;
3094
3095 while(sc->bge_cdata.bge_tx_chain[prodidx] == NULL) {
3096 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
3097 if (m_head == NULL)
3098 break;
3099
3100 /*
3101 * XXX
3102 * The code inside the if() block is never reached since we
3103 * must mark CSUM_IP_FRAGS in our if_hwassist to start getting
3104 * requests to checksum TCP/UDP in a fragmented packet.
3105 *
3106 * XXX
3107 * safety overkill. If this is a fragmented packet chain
3108 * with delayed TCP/UDP checksums, then only encapsulate
3109 * it if we have enough descriptors to handle the entire
3110 * chain at once.
3111 * (paranoia -- may not actually be needed)
3112 */
3113 if (m_head->m_flags & M_FIRSTFRAG &&
3114 m_head->m_pkthdr.csum_flags & (CSUM_DELAY_DATA)) {
3115 if ((BGE_TX_RING_CNT - sc->bge_txcnt) <
3116 m_head->m_pkthdr.csum_data + 16) {
3117 IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
3118 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
3119 break;
3120 }
3121 }
3122
3123 /*
3124 * Pack the data into the transmit ring. If we
3125 * don't have room, set the OACTIVE flag and wait
3126 * for the NIC to drain the ring.
3127 */
3128 if (bge_encap(sc, m_head, &prodidx)) {
3129 IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
3130 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
3131 break;
3132 }
3133 ++count;
3134
3135 /*
3136 * If there's a BPF listener, bounce a copy of this frame
3137 * to him.
3138 */
3139 BPF_MTAP(ifp, m_head);
3140 }
3141
3142 if (count == 0) {
3143 /* no packets were dequeued */
3144 return;
3145 }
3146
3147 /* Transmit */
3148 CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx);
3149 /* 5700 b2 errata */
3150 if (sc->bge_chiprev == BGE_CHIPREV_5700_BX)
3151 CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx);
3152
3153 sc->bge_tx_prodidx = prodidx;
3154
3155 /*
3156 * Set a timeout in case the chip goes out to lunch.
3157 */
3158 ifp->if_timer = 5;
3159
3160 return;
3161}
3162
3163/*
3164 * Main transmit routine. To avoid having to do mbuf copies, we put pointers
3165 * to the mbuf data regions directly in the transmit descriptors.
3166 */
3167static void
3168bge_start(ifp)
3169 struct ifnet *ifp;
3170{
3171 struct bge_softc *sc;
3172
3173 sc = ifp->if_softc;
3174 BGE_LOCK(sc);
3175 bge_start_locked(ifp);
3176 BGE_UNLOCK(sc);
3177}
3178
3179static void
3180bge_init_locked(sc)
3181 struct bge_softc *sc;
3182{
3183 struct ifnet *ifp;
3184 u_int16_t *m;
3185
3186 BGE_LOCK_ASSERT(sc);
3187
3188 ifp = sc->bge_ifp;
3189
3190 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
3191 return;
3192
3193 /* Cancel pending I/O and flush buffers. */
3194 bge_stop(sc);
3195 bge_reset(sc);
3196 bge_chipinit(sc);
3197
3198 /*
3199 * Init the various state machines, ring
3200 * control blocks and firmware.
3201 */
3202 if (bge_blockinit(sc)) {
3203 device_printf(sc->bge_dev, "initialization failure\n");
3204 return;
3205 }
3206
3207 ifp = sc->bge_ifp;
3208
3209 /* Specify MTU. */
3210 CSR_WRITE_4(sc, BGE_RX_MTU, ifp->if_mtu +
3211 ETHER_HDR_LEN + ETHER_CRC_LEN + ETHER_VLAN_ENCAP_LEN);
3212
3213 /* Load our MAC address. */
3214 m = (u_int16_t *)IF_LLADDR(sc->bge_ifp);
3215 CSR_WRITE_4(sc, BGE_MAC_ADDR1_LO, htons(m[0]));
3216 CSR_WRITE_4(sc, BGE_MAC_ADDR1_HI, (htons(m[1]) << 16) | htons(m[2]));
3217
3218 /* Enable or disable promiscuous mode as needed. */
3219 if (ifp->if_flags & IFF_PROMISC) {
3220 BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC);
3221 } else {
3222 BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC);
3223 }
3224
3225 /* Program multicast filter. */
3226 bge_setmulti(sc);
3227
3228 /* Init RX ring. */
3229 bge_init_rx_ring_std(sc);
3230
3231 /*
3232 * Workaround for a bug in 5705 ASIC rev A0. Poll the NIC's
3233 * memory to insure that the chip has in fact read the first
3234 * entry of the ring.
3235 */
3236 if (sc->bge_chipid == BGE_CHIPID_BCM5705_A0) {
3237 u_int32_t v, i;
3238 for (i = 0; i < 10; i++) {
3239 DELAY(20);
3240 v = bge_readmem_ind(sc, BGE_STD_RX_RINGS + 8);
3241 if (v == (MCLBYTES - ETHER_ALIGN))
3242 break;
3243 }
3244 if (i == 10)
3245 device_printf (sc->bge_dev,
3246 "5705 A0 chip failed to load RX ring\n");
3247 }
3248
3249 /* Init jumbo RX ring. */
3250 if (ifp->if_mtu > (ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN))
3251 bge_init_rx_ring_jumbo(sc);
3252
3253 /* Init our RX return ring index */
3254 sc->bge_rx_saved_considx = 0;
3255
3256 /* Init TX ring. */
3257 bge_init_tx_ring(sc);
3258
3259 /* Turn on transmitter */
3260 BGE_SETBIT(sc, BGE_TX_MODE, BGE_TXMODE_ENABLE);
3261
3262 /* Turn on receiver */
3263 BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE);
3264
3265 /* Tell firmware we're alive. */
3266 BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
3267
3268#ifdef DEVICE_POLLING
3269 /* Disable interrupts if we are polling. */
3270 if (ifp->if_capenable & IFCAP_POLLING) {
3271 BGE_SETBIT(sc, BGE_PCI_MISC_CTL,
3272 BGE_PCIMISCCTL_MASK_PCI_INTR);
3273 CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 1);
3274 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS_INT, 1);
3275 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS_INT, 1);
3276 } else
3277#endif
3278
3279 /* Enable host interrupts. */
3280 {
3281 BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_CLEAR_INTA);
3282 BGE_CLRBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR);
3283 CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 0);
3284 }
3285
3286 bge_ifmedia_upd(ifp);
3287
3288 ifp->if_drv_flags |= IFF_DRV_RUNNING;
3289 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
3290
3291 callout_reset(&sc->bge_stat_ch, hz, bge_tick, sc);
3292}
3293
3294static void
3295bge_init(xsc)
3296 void *xsc;
3297{
3298 struct bge_softc *sc = xsc;
3299
3300 BGE_LOCK(sc);
3301 bge_init_locked(sc);
3302 BGE_UNLOCK(sc);
3303
3304 return;
3305}
3306
3307/*
3308 * Set media options.
3309 */
3310static int
3311bge_ifmedia_upd(ifp)
3312 struct ifnet *ifp;
3313{
3314 struct bge_softc *sc;
3315 struct mii_data *mii;
3316 struct ifmedia *ifm;
3317
3318 sc = ifp->if_softc;
3319 ifm = &sc->bge_ifmedia;
3320
3321 /* If this is a 1000baseX NIC, enable the TBI port. */
3322 if (sc->bge_tbi) {
3323 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
3324 return(EINVAL);
3325 switch(IFM_SUBTYPE(ifm->ifm_media)) {
3326 case IFM_AUTO:
3327#ifndef BGE_FAKE_AUTONEG
3328 /*
3329 * The BCM5704 ASIC appears to have a special
3330 * mechanism for programming the autoneg
3331 * advertisement registers in TBI mode.
3332 */
3333 if (sc->bge_asicrev == BGE_ASICREV_BCM5704) {
3334 uint32_t sgdig;
3335 CSR_WRITE_4(sc, BGE_TX_TBI_AUTONEG, 0);
3336 sgdig = CSR_READ_4(sc, BGE_SGDIG_CFG);
3337 sgdig |= BGE_SGDIGCFG_AUTO|
3338 BGE_SGDIGCFG_PAUSE_CAP|
3339 BGE_SGDIGCFG_ASYM_PAUSE;
3340 CSR_WRITE_4(sc, BGE_SGDIG_CFG,
3341 sgdig|BGE_SGDIGCFG_SEND);
3342 DELAY(5);
3343 CSR_WRITE_4(sc, BGE_SGDIG_CFG, sgdig);
3344 }
3345#endif
3346 break;
3347 case IFM_1000_SX:
3348 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) {
3349 BGE_CLRBIT(sc, BGE_MAC_MODE,
3350 BGE_MACMODE_HALF_DUPLEX);
3351 } else {
3352 BGE_SETBIT(sc, BGE_MAC_MODE,
3353 BGE_MACMODE_HALF_DUPLEX);
3354 }
3355 break;
3356 default:
3357 return(EINVAL);
3358 }
3359 return(0);
3360 }
3361
3362 mii = device_get_softc(sc->bge_miibus);
3363 if (mii->mii_instance) {
3364 struct mii_softc *miisc;
3365 for (miisc = LIST_FIRST(&mii->mii_phys); miisc != NULL;
3366 miisc = LIST_NEXT(miisc, mii_list))
3367 mii_phy_reset(miisc);
3368 }
3369 mii_mediachg(mii);
3370
3371 return(0);
3372}
3373
3374/*
3375 * Report current media status.
3376 */
3377static void
3378bge_ifmedia_sts(ifp, ifmr)
3379 struct ifnet *ifp;
3380 struct ifmediareq *ifmr;
3381{
3382 struct bge_softc *sc;
3383 struct mii_data *mii;
3384
3385 sc = ifp->if_softc;
3386
3387 if (sc->bge_tbi) {
3388 ifmr->ifm_status = IFM_AVALID;
3389 ifmr->ifm_active = IFM_ETHER;
3390 if (CSR_READ_4(sc, BGE_MAC_STS) &
3391 BGE_MACSTAT_TBI_PCS_SYNCHED)
3392 ifmr->ifm_status |= IFM_ACTIVE;
3393 ifmr->ifm_active |= IFM_1000_SX;
3394 if (CSR_READ_4(sc, BGE_MAC_MODE) & BGE_MACMODE_HALF_DUPLEX)
3395 ifmr->ifm_active |= IFM_HDX;
3396 else
3397 ifmr->ifm_active |= IFM_FDX;
3398 return;
3399 }
3400
3401 mii = device_get_softc(sc->bge_miibus);
3402 mii_pollstat(mii);
3403 ifmr->ifm_active = mii->mii_media_active;
3404 ifmr->ifm_status = mii->mii_media_status;
3405
3406 return;
3407}
3408
3409static int
3410bge_ioctl(ifp, command, data)
3411 struct ifnet *ifp;
3412 u_long command;
3413 caddr_t data;
3414{
3415 struct bge_softc *sc = ifp->if_softc;
3416 struct ifreq *ifr = (struct ifreq *) data;
3417 int mask, error = 0;
3418 struct mii_data *mii;
3419
3420 switch(command) {
3421 case SIOCSIFMTU:
3422 /* Disallow jumbo frames on 5705. */
3423 if (((sc->bge_asicrev == BGE_ASICREV_BCM5705 ||
3424 sc->bge_asicrev == BGE_ASICREV_BCM5750) &&
3425 ifr->ifr_mtu > ETHERMTU) || ifr->ifr_mtu > BGE_JUMBO_MTU)
3426 error = EINVAL;
3427 else {
3428 ifp->if_mtu = ifr->ifr_mtu;
3429 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
3430 bge_init(sc);
3431 }
3432 break;
3433 case SIOCSIFFLAGS:
3434 BGE_LOCK(sc);
3435 if (ifp->if_flags & IFF_UP) {
3436 /*
3437 * If only the state of the PROMISC flag changed,
3438 * then just use the 'set promisc mode' command
3439 * instead of reinitializing the entire NIC. Doing
3440 * a full re-init means reloading the firmware and
3441 * waiting for it to start up, which may take a
3442 * second or two. Similarly for ALLMULTI.
3443 */
3444 if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
3445 ifp->if_flags & IFF_PROMISC &&
3446 !(sc->bge_if_flags & IFF_PROMISC)) {
3447 BGE_SETBIT(sc, BGE_RX_MODE,
3448 BGE_RXMODE_RX_PROMISC);
3449 } else if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
3450 !(ifp->if_flags & IFF_PROMISC) &&
3451 sc->bge_if_flags & IFF_PROMISC) {
3452 BGE_CLRBIT(sc, BGE_RX_MODE,
3453 BGE_RXMODE_RX_PROMISC);
3454 } else if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
3455 (ifp->if_flags ^ sc->bge_if_flags) & IFF_ALLMULTI) {
3456 bge_setmulti(sc);
3457 } else
3458 bge_init_locked(sc);
3459 } else {
3460 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
3461 bge_stop(sc);
3462 }
3463 }
3464 sc->bge_if_flags = ifp->if_flags;
3465 BGE_UNLOCK(sc);
3466 error = 0;
3467 break;
3468 case SIOCADDMULTI:
3469 case SIOCDELMULTI:
3470 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
3471 BGE_LOCK(sc);
3472 bge_setmulti(sc);
3473 BGE_UNLOCK(sc);
3474 error = 0;
3475 }
3476 break;
3477 case SIOCSIFMEDIA:
3478 case SIOCGIFMEDIA:
3479 if (sc->bge_tbi) {
3480 error = ifmedia_ioctl(ifp, ifr,
3481 &sc->bge_ifmedia, command);
3482 } else {
3483 mii = device_get_softc(sc->bge_miibus);
3484 error = ifmedia_ioctl(ifp, ifr,
3485 &mii->mii_media, command);
3486 }
3487 break;
3488 case SIOCSIFCAP:
3489 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
3490#ifdef DEVICE_POLLING
3491 if (mask & IFCAP_POLLING) {
3492 if (ifr->ifr_reqcap & IFCAP_POLLING) {
3493 error = ether_poll_register(bge_poll, ifp);
3494 if (error)
3495 return(error);
3496 BGE_LOCK(sc);
3497 BGE_SETBIT(sc, BGE_PCI_MISC_CTL,
3498 BGE_PCIMISCCTL_MASK_PCI_INTR);
3499 CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 1);
3500 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS_INT, 1);
3501 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS_INT, 1);
3502 ifp->if_capenable |= IFCAP_POLLING;
3503 BGE_UNLOCK(sc);
3504 } else {
3505 error = ether_poll_deregister(ifp);
3506 /* Enable interrupt even in error case */
3507 BGE_LOCK(sc);
3508 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS_INT, 0);
3509 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS_INT, 0);
3510 BGE_CLRBIT(sc, BGE_PCI_MISC_CTL,
3511 BGE_PCIMISCCTL_MASK_PCI_INTR);
3512 CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 0);
3513 ifp->if_capenable &= ~IFCAP_POLLING;
3514 BGE_UNLOCK(sc);
3515 }
3516 }
3517#endif
3518 if (mask & IFCAP_HWCSUM) {
3519 ifp->if_capenable ^= IFCAP_HWCSUM;
3520 if (IFCAP_HWCSUM & ifp->if_capenable &&
3521 IFCAP_HWCSUM & ifp->if_capabilities)
3522 ifp->if_hwassist = BGE_CSUM_FEATURES;
3523 else
3524 ifp->if_hwassist = 0;
3525 VLAN_CAPABILITIES(ifp);
3526 }
3527 break;
3528 default:
3529 error = ether_ioctl(ifp, command, data);
3530 break;
3531 }
3532
3533 return(error);
3534}
3535
3536static void
3537bge_watchdog(ifp)
3538 struct ifnet *ifp;
3539{
3540 struct bge_softc *sc;
3541
3542 sc = ifp->if_softc;
3543
3544 if_printf(ifp, "watchdog timeout -- resetting\n");
3545
3546 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
3547 bge_init(sc);
3548
3549 ifp->if_oerrors++;
3550
3551 return;
3552}
3553
3554/*
3555 * Stop the adapter and free any mbufs allocated to the
3556 * RX and TX lists.
3557 */
3558static void
3559bge_stop(sc)
3560 struct bge_softc *sc;
3561{
3562 struct ifnet *ifp;
3563 struct ifmedia_entry *ifm;
3564 struct mii_data *mii = NULL;
3565 int mtmp, itmp;
3566
3567 BGE_LOCK_ASSERT(sc);
3568
3569 ifp = sc->bge_ifp;
3570
3571 if (!sc->bge_tbi)
3572 mii = device_get_softc(sc->bge_miibus);
3573
3574 callout_stop(&sc->bge_stat_ch);
3575
3576 /*
3577 * Disable all of the receiver blocks
3578 */
3579 BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE);
3580 BGE_CLRBIT(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE);
3581 BGE_CLRBIT(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE);
3582 if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
3583 sc->bge_asicrev != BGE_ASICREV_BCM5750)
3584 BGE_CLRBIT(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE);
3585 BGE_CLRBIT(sc, BGE_RDBDI_MODE, BGE_RBDIMODE_ENABLE);
3586 BGE_CLRBIT(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE);
3587 BGE_CLRBIT(sc, BGE_RBDC_MODE, BGE_RBDCMODE_ENABLE);
3588
3589 /*
3590 * Disable all of the transmit blocks
3591 */
3592 BGE_CLRBIT(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE);
3593 BGE_CLRBIT(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE);
3594 BGE_CLRBIT(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE);
3595 BGE_CLRBIT(sc, BGE_RDMA_MODE, BGE_RDMAMODE_ENABLE);
3596 BGE_CLRBIT(sc, BGE_SDC_MODE, BGE_SDCMODE_ENABLE);
3597 if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
3598 sc->bge_asicrev != BGE_ASICREV_BCM5750)
3599 BGE_CLRBIT(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE);
3600 BGE_CLRBIT(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE);
3601
3602 /*
3603 * Shut down all of the memory managers and related
3604 * state machines.
3605 */
3606 BGE_CLRBIT(sc, BGE_HCC_MODE, BGE_HCCMODE_ENABLE);
3607 BGE_CLRBIT(sc, BGE_WDMA_MODE, BGE_WDMAMODE_ENABLE);
3608 if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
3609 sc->bge_asicrev != BGE_ASICREV_BCM5750)
3610 BGE_CLRBIT(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE);
3611 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF);
3612 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0);
3613 if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
3614 sc->bge_asicrev != BGE_ASICREV_BCM5750) {
3615 BGE_CLRBIT(sc, BGE_BMAN_MODE, BGE_BMANMODE_ENABLE);
3616 BGE_CLRBIT(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE);
3617 }
3618
3619 /* Disable host interrupts. */
3620 BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR);
3621 CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 1);
3622
3623 /*
3624 * Tell firmware we're shutting down.
3625 */
3626 BGE_CLRBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
3627
3628 /* Free the RX lists. */
3629 bge_free_rx_ring_std(sc);
3630
3631 /* Free jumbo RX list. */
3632 if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
3633 sc->bge_asicrev != BGE_ASICREV_BCM5750)
3634 bge_free_rx_ring_jumbo(sc);
3635
3636 /* Free TX buffers. */
3637 bge_free_tx_ring(sc);
3638
3639 /*
3640 * Isolate/power down the PHY, but leave the media selection
3641 * unchanged so that things will be put back to normal when
3642 * we bring the interface back up.
3643 */
3644 if (!sc->bge_tbi) {
3645 itmp = ifp->if_flags;
3646 ifp->if_flags |= IFF_UP;
3647 /*
3648 * If we are called from bge_detach(), mii is already NULL.
3649 */
3650 if (mii != NULL) {
3651 ifm = mii->mii_media.ifm_cur;
3652 mtmp = ifm->ifm_media;
3653 ifm->ifm_media = IFM_ETHER|IFM_NONE;
3654 mii_mediachg(mii);
3655 ifm->ifm_media = mtmp;
3656 }
3657 ifp->if_flags = itmp;
3658 }
3659
3660 sc->bge_tx_saved_considx = BGE_TXCONS_UNSET;
3661
3662 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
3663
3664 return;
3665}
3666
3667/*
3668 * Stop all chip I/O so that the kernel's probe routines don't
3669 * get confused by errant DMAs when rebooting.
3670 */
3671static void
3672bge_shutdown(dev)
3673 device_t dev;
3674{
3675 struct bge_softc *sc;
3676
3677 sc = device_get_softc(dev);
3678
3679 BGE_LOCK(sc);
3680 bge_stop(sc);
3681 bge_reset(sc);
3682 BGE_UNLOCK(sc);
3683
3684 return;
3685}
3686
3687static int
3688bge_suspend(device_t dev)
3689{
3690 struct bge_softc *sc;
3691
3692 sc = device_get_softc(dev);
3693 BGE_LOCK(sc);
3694 bge_stop(sc);
3695 BGE_UNLOCK(sc);
3696
3697 return (0);
3698}
3699
3700static int
3701bge_resume(device_t dev)
3702{
3703 struct bge_softc *sc;
3704 struct ifnet *ifp;
3705
3706 sc = device_get_softc(dev);
3707 BGE_LOCK(sc);
3708 ifp = sc->bge_ifp;
3709 if (ifp->if_flags & IFF_UP) {
3710 bge_init_locked(sc);
3711 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
3712 bge_start_locked(ifp);
3713 }
3714 BGE_UNLOCK(sc);
3715
3716 return (0);
3717}
3718
3719static void
3720bge_link_upd(sc)
3721 struct bge_softc *sc;
3722{
3723 struct mii_data *mii;
3724 uint32_t link, status;
3725
3726 BGE_LOCK_ASSERT(sc);
3727
2858 }
2859
2860 callout_reset(&sc->bge_stat_ch, hz, bge_tick, sc);
2861}
2862
2863static void
2864bge_tick(xsc)
2865 void *xsc;
2866{
2867 struct bge_softc *sc;
2868
2869 sc = xsc;
2870
2871 BGE_LOCK(sc);
2872 bge_tick_locked(sc);
2873 BGE_UNLOCK(sc);
2874}
2875
2876static void
2877bge_stats_update_regs(sc)
2878 struct bge_softc *sc;
2879{
2880 struct ifnet *ifp;
2881 struct bge_mac_stats_regs stats;
2882 u_int32_t *s;
2883 u_long cnt; /* current register value */
2884 int i;
2885
2886 ifp = sc->bge_ifp;
2887
2888 s = (u_int32_t *)&stats;
2889 for (i = 0; i < sizeof(struct bge_mac_stats_regs); i += 4) {
2890 *s = CSR_READ_4(sc, BGE_RX_STATS + i);
2891 s++;
2892 }
2893
2894 cnt = stats.dot3StatsSingleCollisionFrames +
2895 stats.dot3StatsMultipleCollisionFrames +
2896 stats.dot3StatsExcessiveCollisions +
2897 stats.dot3StatsLateCollisions;
2898 ifp->if_collisions += cnt >= sc->bge_tx_collisions ?
2899 cnt - sc->bge_tx_collisions : cnt;
2900 sc->bge_tx_collisions = cnt;
2901}
2902
2903static void
2904bge_stats_update(sc)
2905 struct bge_softc *sc;
2906{
2907 struct ifnet *ifp;
2908 bus_size_t stats;
2909 u_long cnt; /* current register value */
2910
2911 ifp = sc->bge_ifp;
2912
2913 stats = BGE_MEMWIN_START + BGE_STATS_BLOCK;
2914
2915#define READ_STAT(sc, stats, stat) \
2916 CSR_READ_4(sc, stats + offsetof(struct bge_stats, stat))
2917
2918 cnt = READ_STAT(sc, stats,
2919 txstats.dot3StatsSingleCollisionFrames.bge_addr_lo);
2920 cnt += READ_STAT(sc, stats,
2921 txstats.dot3StatsMultipleCollisionFrames.bge_addr_lo);
2922 cnt += READ_STAT(sc, stats,
2923 txstats.dot3StatsExcessiveCollisions.bge_addr_lo);
2924 cnt += READ_STAT(sc, stats,
2925 txstats.dot3StatsLateCollisions.bge_addr_lo);
2926 ifp->if_collisions += cnt >= sc->bge_tx_collisions ?
2927 cnt - sc->bge_tx_collisions : cnt;
2928 sc->bge_tx_collisions = cnt;
2929
2930 cnt = READ_STAT(sc, stats, ifInDiscards.bge_addr_lo);
2931 ifp->if_ierrors += cnt >= sc->bge_rx_discards ?
2932 cnt - sc->bge_rx_discards : cnt;
2933 sc->bge_rx_discards = cnt;
2934
2935 cnt = READ_STAT(sc, stats, txstats.ifOutDiscards.bge_addr_lo);
2936 ifp->if_oerrors += cnt >= sc->bge_tx_discards ?
2937 cnt - sc->bge_tx_discards : cnt;
2938 sc->bge_tx_discards = cnt;
2939
2940#undef READ_STAT
2941}
2942
2943/*
2944 * Pad outbound frame to ETHER_MIN_NOPAD for an unusual reason.
2945 * The bge hardware will pad out Tx runts to ETHER_MIN_NOPAD,
2946 * but when such padded frames employ the bge IP/TCP checksum offload,
2947 * the hardware checksum assist gives incorrect results (possibly
2948 * from incorporating its own padding into the UDP/TCP checksum; who knows).
2949 * If we pad such runts with zeros, the onboard checksum comes out correct.
2950 */
2951static __inline int
2952bge_cksum_pad(struct mbuf *m)
2953{
2954 int padlen = ETHER_MIN_NOPAD - m->m_pkthdr.len;
2955 struct mbuf *last;
2956
2957 /* If there's only the packet-header and we can pad there, use it. */
2958 if (m->m_pkthdr.len == m->m_len && M_WRITABLE(m) &&
2959 M_TRAILINGSPACE(m) >= padlen) {
2960 last = m;
2961 } else {
2962 /*
2963 * Walk packet chain to find last mbuf. We will either
2964 * pad there, or append a new mbuf and pad it.
2965 */
2966 for (last = m; last->m_next != NULL; last = last->m_next);
2967 if (!(M_WRITABLE(last) && M_TRAILINGSPACE(last) >= padlen)) {
2968 /* Allocate new empty mbuf, pad it. Compact later. */
2969 struct mbuf *n;
2970
2971 MGET(n, M_DONTWAIT, MT_DATA);
2972 if (n == NULL)
2973 return (ENOBUFS);
2974 n->m_len = 0;
2975 last->m_next = n;
2976 last = n;
2977 }
2978 }
2979
2980 /* Now zero the pad area, to avoid the bge cksum-assist bug. */
2981 memset(mtod(last, caddr_t) + last->m_len, 0, padlen);
2982 last->m_len += padlen;
2983 m->m_pkthdr.len += padlen;
2984
2985 return (0);
2986}
2987
2988/*
2989 * Encapsulate an mbuf chain in the tx ring by coupling the mbuf data
2990 * pointers to descriptors.
2991 */
2992static int
2993bge_encap(sc, m_head, txidx)
2994 struct bge_softc *sc;
2995 struct mbuf *m_head;
2996 uint32_t *txidx;
2997{
2998 bus_dma_segment_t segs[BGE_NSEG_NEW];
2999 bus_dmamap_t map;
3000 struct bge_tx_bd *d = NULL;
3001 struct m_tag *mtag;
3002 uint32_t idx = *txidx;
3003 uint16_t csum_flags = 0;
3004 int nsegs, i, error;
3005
3006 if (m_head->m_pkthdr.csum_flags) {
3007 if (m_head->m_pkthdr.csum_flags & CSUM_IP)
3008 csum_flags |= BGE_TXBDFLAG_IP_CSUM;
3009 if (m_head->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP)) {
3010 csum_flags |= BGE_TXBDFLAG_TCP_UDP_CSUM;
3011 if (m_head->m_pkthdr.len < ETHER_MIN_NOPAD &&
3012 bge_cksum_pad(m_head) != 0)
3013 return (ENOBUFS);
3014 }
3015 if (m_head->m_flags & M_LASTFRAG)
3016 csum_flags |= BGE_TXBDFLAG_IP_FRAG_END;
3017 else if (m_head->m_flags & M_FRAG)
3018 csum_flags |= BGE_TXBDFLAG_IP_FRAG;
3019 }
3020
3021 mtag = VLAN_OUTPUT_TAG(sc->bge_ifp, m_head);
3022
3023 map = sc->bge_cdata.bge_tx_dmamap[idx];
3024 error = bus_dmamap_load_mbuf_sg(sc->bge_cdata.bge_mtag, map,
3025 m_head, segs, &nsegs, BUS_DMA_NOWAIT);
3026 if (error) {
3027 if (error == EFBIG) {
3028 struct mbuf *m0;
3029
3030 m0 = m_defrag(m_head, M_DONTWAIT);
3031 if (m0 == NULL)
3032 return (ENOBUFS);
3033 m_head = m0;
3034 error = bus_dmamap_load_mbuf_sg(sc->bge_cdata.bge_mtag,
3035 map, m_head, segs, &nsegs, BUS_DMA_NOWAIT);
3036 }
3037 if (error)
3038 return (error);
3039 }
3040
3041 /*
3042 * Sanity check: avoid coming within 16 descriptors
3043 * of the end of the ring.
3044 */
3045 if (nsegs > (BGE_TX_RING_CNT - sc->bge_txcnt - 16)) {
3046 bus_dmamap_unload(sc->bge_cdata.bge_mtag, map);
3047 return (ENOBUFS);
3048 }
3049
3050 bus_dmamap_sync(sc->bge_cdata.bge_mtag, map, BUS_DMASYNC_PREWRITE);
3051
3052 for (i = 0; ; i++) {
3053 d = &sc->bge_ldata.bge_tx_ring[idx];
3054 d->bge_addr.bge_addr_lo = BGE_ADDR_LO(segs[i].ds_addr);
3055 d->bge_addr.bge_addr_hi = BGE_ADDR_HI(segs[i].ds_addr);
3056 d->bge_len = segs[i].ds_len;
3057 d->bge_flags = csum_flags;
3058 if (i == nsegs - 1)
3059 break;
3060 BGE_INC(idx, BGE_TX_RING_CNT);
3061 }
3062
3063 /* Mark the last segment as end of packet... */
3064 d->bge_flags |= BGE_TXBDFLAG_END;
3065 /* ... and put VLAN tag into first segment. */
3066 d = &sc->bge_ldata.bge_tx_ring[*txidx];
3067 if (mtag != NULL) {
3068 d->bge_flags |= BGE_TXBDFLAG_VLAN_TAG;
3069 d->bge_vlan_tag = VLAN_TAG_VALUE(mtag);
3070 } else
3071 d->bge_vlan_tag = 0;
3072
3073 /*
3074 * Insure that the map for this transmission
3075 * is placed at the array index of the last descriptor
3076 * in this chain.
3077 */
3078 sc->bge_cdata.bge_tx_dmamap[*txidx] = sc->bge_cdata.bge_tx_dmamap[idx];
3079 sc->bge_cdata.bge_tx_dmamap[idx] = map;
3080 sc->bge_cdata.bge_tx_chain[idx] = m_head;
3081 sc->bge_txcnt += nsegs;
3082
3083 BGE_INC(idx, BGE_TX_RING_CNT);
3084 *txidx = idx;
3085
3086 return (0);
3087}
3088
3089/*
3090 * Main transmit routine. To avoid having to do mbuf copies, we put pointers
3091 * to the mbuf data regions directly in the transmit descriptors.
3092 */
3093static void
3094bge_start_locked(ifp)
3095 struct ifnet *ifp;
3096{
3097 struct bge_softc *sc;
3098 struct mbuf *m_head = NULL;
3099 uint32_t prodidx;
3100 int count = 0;
3101
3102 sc = ifp->if_softc;
3103
3104 if (!sc->bge_link || IFQ_DRV_IS_EMPTY(&ifp->if_snd))
3105 return;
3106
3107 prodidx = sc->bge_tx_prodidx;
3108
3109 while(sc->bge_cdata.bge_tx_chain[prodidx] == NULL) {
3110 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
3111 if (m_head == NULL)
3112 break;
3113
3114 /*
3115 * XXX
3116 * The code inside the if() block is never reached since we
3117 * must mark CSUM_IP_FRAGS in our if_hwassist to start getting
3118 * requests to checksum TCP/UDP in a fragmented packet.
3119 *
3120 * XXX
3121 * safety overkill. If this is a fragmented packet chain
3122 * with delayed TCP/UDP checksums, then only encapsulate
3123 * it if we have enough descriptors to handle the entire
3124 * chain at once.
3125 * (paranoia -- may not actually be needed)
3126 */
3127 if (m_head->m_flags & M_FIRSTFRAG &&
3128 m_head->m_pkthdr.csum_flags & (CSUM_DELAY_DATA)) {
3129 if ((BGE_TX_RING_CNT - sc->bge_txcnt) <
3130 m_head->m_pkthdr.csum_data + 16) {
3131 IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
3132 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
3133 break;
3134 }
3135 }
3136
3137 /*
3138 * Pack the data into the transmit ring. If we
3139 * don't have room, set the OACTIVE flag and wait
3140 * for the NIC to drain the ring.
3141 */
3142 if (bge_encap(sc, m_head, &prodidx)) {
3143 IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
3144 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
3145 break;
3146 }
3147 ++count;
3148
3149 /*
3150 * If there's a BPF listener, bounce a copy of this frame
3151 * to him.
3152 */
3153 BPF_MTAP(ifp, m_head);
3154 }
3155
3156 if (count == 0) {
3157 /* no packets were dequeued */
3158 return;
3159 }
3160
3161 /* Transmit */
3162 CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx);
3163 /* 5700 b2 errata */
3164 if (sc->bge_chiprev == BGE_CHIPREV_5700_BX)
3165 CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx);
3166
3167 sc->bge_tx_prodidx = prodidx;
3168
3169 /*
3170 * Set a timeout in case the chip goes out to lunch.
3171 */
3172 ifp->if_timer = 5;
3173
3174 return;
3175}
3176
3177/*
3178 * Main transmit routine. To avoid having to do mbuf copies, we put pointers
3179 * to the mbuf data regions directly in the transmit descriptors.
3180 */
3181static void
3182bge_start(ifp)
3183 struct ifnet *ifp;
3184{
3185 struct bge_softc *sc;
3186
3187 sc = ifp->if_softc;
3188 BGE_LOCK(sc);
3189 bge_start_locked(ifp);
3190 BGE_UNLOCK(sc);
3191}
3192
3193static void
3194bge_init_locked(sc)
3195 struct bge_softc *sc;
3196{
3197 struct ifnet *ifp;
3198 u_int16_t *m;
3199
3200 BGE_LOCK_ASSERT(sc);
3201
3202 ifp = sc->bge_ifp;
3203
3204 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
3205 return;
3206
3207 /* Cancel pending I/O and flush buffers. */
3208 bge_stop(sc);
3209 bge_reset(sc);
3210 bge_chipinit(sc);
3211
3212 /*
3213 * Init the various state machines, ring
3214 * control blocks and firmware.
3215 */
3216 if (bge_blockinit(sc)) {
3217 device_printf(sc->bge_dev, "initialization failure\n");
3218 return;
3219 }
3220
3221 ifp = sc->bge_ifp;
3222
3223 /* Specify MTU. */
3224 CSR_WRITE_4(sc, BGE_RX_MTU, ifp->if_mtu +
3225 ETHER_HDR_LEN + ETHER_CRC_LEN + ETHER_VLAN_ENCAP_LEN);
3226
3227 /* Load our MAC address. */
3228 m = (u_int16_t *)IF_LLADDR(sc->bge_ifp);
3229 CSR_WRITE_4(sc, BGE_MAC_ADDR1_LO, htons(m[0]));
3230 CSR_WRITE_4(sc, BGE_MAC_ADDR1_HI, (htons(m[1]) << 16) | htons(m[2]));
3231
3232 /* Enable or disable promiscuous mode as needed. */
3233 if (ifp->if_flags & IFF_PROMISC) {
3234 BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC);
3235 } else {
3236 BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC);
3237 }
3238
3239 /* Program multicast filter. */
3240 bge_setmulti(sc);
3241
3242 /* Init RX ring. */
3243 bge_init_rx_ring_std(sc);
3244
3245 /*
3246 * Workaround for a bug in 5705 ASIC rev A0. Poll the NIC's
3247 * memory to insure that the chip has in fact read the first
3248 * entry of the ring.
3249 */
3250 if (sc->bge_chipid == BGE_CHIPID_BCM5705_A0) {
3251 u_int32_t v, i;
3252 for (i = 0; i < 10; i++) {
3253 DELAY(20);
3254 v = bge_readmem_ind(sc, BGE_STD_RX_RINGS + 8);
3255 if (v == (MCLBYTES - ETHER_ALIGN))
3256 break;
3257 }
3258 if (i == 10)
3259 device_printf (sc->bge_dev,
3260 "5705 A0 chip failed to load RX ring\n");
3261 }
3262
3263 /* Init jumbo RX ring. */
3264 if (ifp->if_mtu > (ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN))
3265 bge_init_rx_ring_jumbo(sc);
3266
3267 /* Init our RX return ring index */
3268 sc->bge_rx_saved_considx = 0;
3269
3270 /* Init TX ring. */
3271 bge_init_tx_ring(sc);
3272
3273 /* Turn on transmitter */
3274 BGE_SETBIT(sc, BGE_TX_MODE, BGE_TXMODE_ENABLE);
3275
3276 /* Turn on receiver */
3277 BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE);
3278
3279 /* Tell firmware we're alive. */
3280 BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
3281
3282#ifdef DEVICE_POLLING
3283 /* Disable interrupts if we are polling. */
3284 if (ifp->if_capenable & IFCAP_POLLING) {
3285 BGE_SETBIT(sc, BGE_PCI_MISC_CTL,
3286 BGE_PCIMISCCTL_MASK_PCI_INTR);
3287 CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 1);
3288 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS_INT, 1);
3289 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS_INT, 1);
3290 } else
3291#endif
3292
3293 /* Enable host interrupts. */
3294 {
3295 BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_CLEAR_INTA);
3296 BGE_CLRBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR);
3297 CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 0);
3298 }
3299
3300 bge_ifmedia_upd(ifp);
3301
3302 ifp->if_drv_flags |= IFF_DRV_RUNNING;
3303 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
3304
3305 callout_reset(&sc->bge_stat_ch, hz, bge_tick, sc);
3306}
3307
3308static void
3309bge_init(xsc)
3310 void *xsc;
3311{
3312 struct bge_softc *sc = xsc;
3313
3314 BGE_LOCK(sc);
3315 bge_init_locked(sc);
3316 BGE_UNLOCK(sc);
3317
3318 return;
3319}
3320
3321/*
3322 * Set media options.
3323 */
3324static int
3325bge_ifmedia_upd(ifp)
3326 struct ifnet *ifp;
3327{
3328 struct bge_softc *sc;
3329 struct mii_data *mii;
3330 struct ifmedia *ifm;
3331
3332 sc = ifp->if_softc;
3333 ifm = &sc->bge_ifmedia;
3334
3335 /* If this is a 1000baseX NIC, enable the TBI port. */
3336 if (sc->bge_tbi) {
3337 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
3338 return(EINVAL);
3339 switch(IFM_SUBTYPE(ifm->ifm_media)) {
3340 case IFM_AUTO:
3341#ifndef BGE_FAKE_AUTONEG
3342 /*
3343 * The BCM5704 ASIC appears to have a special
3344 * mechanism for programming the autoneg
3345 * advertisement registers in TBI mode.
3346 */
3347 if (sc->bge_asicrev == BGE_ASICREV_BCM5704) {
3348 uint32_t sgdig;
3349 CSR_WRITE_4(sc, BGE_TX_TBI_AUTONEG, 0);
3350 sgdig = CSR_READ_4(sc, BGE_SGDIG_CFG);
3351 sgdig |= BGE_SGDIGCFG_AUTO|
3352 BGE_SGDIGCFG_PAUSE_CAP|
3353 BGE_SGDIGCFG_ASYM_PAUSE;
3354 CSR_WRITE_4(sc, BGE_SGDIG_CFG,
3355 sgdig|BGE_SGDIGCFG_SEND);
3356 DELAY(5);
3357 CSR_WRITE_4(sc, BGE_SGDIG_CFG, sgdig);
3358 }
3359#endif
3360 break;
3361 case IFM_1000_SX:
3362 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) {
3363 BGE_CLRBIT(sc, BGE_MAC_MODE,
3364 BGE_MACMODE_HALF_DUPLEX);
3365 } else {
3366 BGE_SETBIT(sc, BGE_MAC_MODE,
3367 BGE_MACMODE_HALF_DUPLEX);
3368 }
3369 break;
3370 default:
3371 return(EINVAL);
3372 }
3373 return(0);
3374 }
3375
3376 mii = device_get_softc(sc->bge_miibus);
3377 if (mii->mii_instance) {
3378 struct mii_softc *miisc;
3379 for (miisc = LIST_FIRST(&mii->mii_phys); miisc != NULL;
3380 miisc = LIST_NEXT(miisc, mii_list))
3381 mii_phy_reset(miisc);
3382 }
3383 mii_mediachg(mii);
3384
3385 return(0);
3386}
3387
3388/*
3389 * Report current media status.
3390 */
3391static void
3392bge_ifmedia_sts(ifp, ifmr)
3393 struct ifnet *ifp;
3394 struct ifmediareq *ifmr;
3395{
3396 struct bge_softc *sc;
3397 struct mii_data *mii;
3398
3399 sc = ifp->if_softc;
3400
3401 if (sc->bge_tbi) {
3402 ifmr->ifm_status = IFM_AVALID;
3403 ifmr->ifm_active = IFM_ETHER;
3404 if (CSR_READ_4(sc, BGE_MAC_STS) &
3405 BGE_MACSTAT_TBI_PCS_SYNCHED)
3406 ifmr->ifm_status |= IFM_ACTIVE;
3407 ifmr->ifm_active |= IFM_1000_SX;
3408 if (CSR_READ_4(sc, BGE_MAC_MODE) & BGE_MACMODE_HALF_DUPLEX)
3409 ifmr->ifm_active |= IFM_HDX;
3410 else
3411 ifmr->ifm_active |= IFM_FDX;
3412 return;
3413 }
3414
3415 mii = device_get_softc(sc->bge_miibus);
3416 mii_pollstat(mii);
3417 ifmr->ifm_active = mii->mii_media_active;
3418 ifmr->ifm_status = mii->mii_media_status;
3419
3420 return;
3421}
3422
3423static int
3424bge_ioctl(ifp, command, data)
3425 struct ifnet *ifp;
3426 u_long command;
3427 caddr_t data;
3428{
3429 struct bge_softc *sc = ifp->if_softc;
3430 struct ifreq *ifr = (struct ifreq *) data;
3431 int mask, error = 0;
3432 struct mii_data *mii;
3433
3434 switch(command) {
3435 case SIOCSIFMTU:
3436 /* Disallow jumbo frames on 5705. */
3437 if (((sc->bge_asicrev == BGE_ASICREV_BCM5705 ||
3438 sc->bge_asicrev == BGE_ASICREV_BCM5750) &&
3439 ifr->ifr_mtu > ETHERMTU) || ifr->ifr_mtu > BGE_JUMBO_MTU)
3440 error = EINVAL;
3441 else {
3442 ifp->if_mtu = ifr->ifr_mtu;
3443 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
3444 bge_init(sc);
3445 }
3446 break;
3447 case SIOCSIFFLAGS:
3448 BGE_LOCK(sc);
3449 if (ifp->if_flags & IFF_UP) {
3450 /*
3451 * If only the state of the PROMISC flag changed,
3452 * then just use the 'set promisc mode' command
3453 * instead of reinitializing the entire NIC. Doing
3454 * a full re-init means reloading the firmware and
3455 * waiting for it to start up, which may take a
3456 * second or two. Similarly for ALLMULTI.
3457 */
3458 if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
3459 ifp->if_flags & IFF_PROMISC &&
3460 !(sc->bge_if_flags & IFF_PROMISC)) {
3461 BGE_SETBIT(sc, BGE_RX_MODE,
3462 BGE_RXMODE_RX_PROMISC);
3463 } else if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
3464 !(ifp->if_flags & IFF_PROMISC) &&
3465 sc->bge_if_flags & IFF_PROMISC) {
3466 BGE_CLRBIT(sc, BGE_RX_MODE,
3467 BGE_RXMODE_RX_PROMISC);
3468 } else if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
3469 (ifp->if_flags ^ sc->bge_if_flags) & IFF_ALLMULTI) {
3470 bge_setmulti(sc);
3471 } else
3472 bge_init_locked(sc);
3473 } else {
3474 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
3475 bge_stop(sc);
3476 }
3477 }
3478 sc->bge_if_flags = ifp->if_flags;
3479 BGE_UNLOCK(sc);
3480 error = 0;
3481 break;
3482 case SIOCADDMULTI:
3483 case SIOCDELMULTI:
3484 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
3485 BGE_LOCK(sc);
3486 bge_setmulti(sc);
3487 BGE_UNLOCK(sc);
3488 error = 0;
3489 }
3490 break;
3491 case SIOCSIFMEDIA:
3492 case SIOCGIFMEDIA:
3493 if (sc->bge_tbi) {
3494 error = ifmedia_ioctl(ifp, ifr,
3495 &sc->bge_ifmedia, command);
3496 } else {
3497 mii = device_get_softc(sc->bge_miibus);
3498 error = ifmedia_ioctl(ifp, ifr,
3499 &mii->mii_media, command);
3500 }
3501 break;
3502 case SIOCSIFCAP:
3503 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
3504#ifdef DEVICE_POLLING
3505 if (mask & IFCAP_POLLING) {
3506 if (ifr->ifr_reqcap & IFCAP_POLLING) {
3507 error = ether_poll_register(bge_poll, ifp);
3508 if (error)
3509 return(error);
3510 BGE_LOCK(sc);
3511 BGE_SETBIT(sc, BGE_PCI_MISC_CTL,
3512 BGE_PCIMISCCTL_MASK_PCI_INTR);
3513 CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 1);
3514 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS_INT, 1);
3515 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS_INT, 1);
3516 ifp->if_capenable |= IFCAP_POLLING;
3517 BGE_UNLOCK(sc);
3518 } else {
3519 error = ether_poll_deregister(ifp);
3520 /* Enable interrupt even in error case */
3521 BGE_LOCK(sc);
3522 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS_INT, 0);
3523 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS_INT, 0);
3524 BGE_CLRBIT(sc, BGE_PCI_MISC_CTL,
3525 BGE_PCIMISCCTL_MASK_PCI_INTR);
3526 CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 0);
3527 ifp->if_capenable &= ~IFCAP_POLLING;
3528 BGE_UNLOCK(sc);
3529 }
3530 }
3531#endif
3532 if (mask & IFCAP_HWCSUM) {
3533 ifp->if_capenable ^= IFCAP_HWCSUM;
3534 if (IFCAP_HWCSUM & ifp->if_capenable &&
3535 IFCAP_HWCSUM & ifp->if_capabilities)
3536 ifp->if_hwassist = BGE_CSUM_FEATURES;
3537 else
3538 ifp->if_hwassist = 0;
3539 VLAN_CAPABILITIES(ifp);
3540 }
3541 break;
3542 default:
3543 error = ether_ioctl(ifp, command, data);
3544 break;
3545 }
3546
3547 return(error);
3548}
3549
3550static void
3551bge_watchdog(ifp)
3552 struct ifnet *ifp;
3553{
3554 struct bge_softc *sc;
3555
3556 sc = ifp->if_softc;
3557
3558 if_printf(ifp, "watchdog timeout -- resetting\n");
3559
3560 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
3561 bge_init(sc);
3562
3563 ifp->if_oerrors++;
3564
3565 return;
3566}
3567
3568/*
3569 * Stop the adapter and free any mbufs allocated to the
3570 * RX and TX lists.
3571 */
3572static void
3573bge_stop(sc)
3574 struct bge_softc *sc;
3575{
3576 struct ifnet *ifp;
3577 struct ifmedia_entry *ifm;
3578 struct mii_data *mii = NULL;
3579 int mtmp, itmp;
3580
3581 BGE_LOCK_ASSERT(sc);
3582
3583 ifp = sc->bge_ifp;
3584
3585 if (!sc->bge_tbi)
3586 mii = device_get_softc(sc->bge_miibus);
3587
3588 callout_stop(&sc->bge_stat_ch);
3589
3590 /*
3591 * Disable all of the receiver blocks
3592 */
3593 BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE);
3594 BGE_CLRBIT(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE);
3595 BGE_CLRBIT(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE);
3596 if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
3597 sc->bge_asicrev != BGE_ASICREV_BCM5750)
3598 BGE_CLRBIT(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE);
3599 BGE_CLRBIT(sc, BGE_RDBDI_MODE, BGE_RBDIMODE_ENABLE);
3600 BGE_CLRBIT(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE);
3601 BGE_CLRBIT(sc, BGE_RBDC_MODE, BGE_RBDCMODE_ENABLE);
3602
3603 /*
3604 * Disable all of the transmit blocks
3605 */
3606 BGE_CLRBIT(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE);
3607 BGE_CLRBIT(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE);
3608 BGE_CLRBIT(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE);
3609 BGE_CLRBIT(sc, BGE_RDMA_MODE, BGE_RDMAMODE_ENABLE);
3610 BGE_CLRBIT(sc, BGE_SDC_MODE, BGE_SDCMODE_ENABLE);
3611 if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
3612 sc->bge_asicrev != BGE_ASICREV_BCM5750)
3613 BGE_CLRBIT(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE);
3614 BGE_CLRBIT(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE);
3615
3616 /*
3617 * Shut down all of the memory managers and related
3618 * state machines.
3619 */
3620 BGE_CLRBIT(sc, BGE_HCC_MODE, BGE_HCCMODE_ENABLE);
3621 BGE_CLRBIT(sc, BGE_WDMA_MODE, BGE_WDMAMODE_ENABLE);
3622 if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
3623 sc->bge_asicrev != BGE_ASICREV_BCM5750)
3624 BGE_CLRBIT(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE);
3625 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF);
3626 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0);
3627 if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
3628 sc->bge_asicrev != BGE_ASICREV_BCM5750) {
3629 BGE_CLRBIT(sc, BGE_BMAN_MODE, BGE_BMANMODE_ENABLE);
3630 BGE_CLRBIT(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE);
3631 }
3632
3633 /* Disable host interrupts. */
3634 BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR);
3635 CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 1);
3636
3637 /*
3638 * Tell firmware we're shutting down.
3639 */
3640 BGE_CLRBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
3641
3642 /* Free the RX lists. */
3643 bge_free_rx_ring_std(sc);
3644
3645 /* Free jumbo RX list. */
3646 if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
3647 sc->bge_asicrev != BGE_ASICREV_BCM5750)
3648 bge_free_rx_ring_jumbo(sc);
3649
3650 /* Free TX buffers. */
3651 bge_free_tx_ring(sc);
3652
3653 /*
3654 * Isolate/power down the PHY, but leave the media selection
3655 * unchanged so that things will be put back to normal when
3656 * we bring the interface back up.
3657 */
3658 if (!sc->bge_tbi) {
3659 itmp = ifp->if_flags;
3660 ifp->if_flags |= IFF_UP;
3661 /*
3662 * If we are called from bge_detach(), mii is already NULL.
3663 */
3664 if (mii != NULL) {
3665 ifm = mii->mii_media.ifm_cur;
3666 mtmp = ifm->ifm_media;
3667 ifm->ifm_media = IFM_ETHER|IFM_NONE;
3668 mii_mediachg(mii);
3669 ifm->ifm_media = mtmp;
3670 }
3671 ifp->if_flags = itmp;
3672 }
3673
3674 sc->bge_tx_saved_considx = BGE_TXCONS_UNSET;
3675
3676 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
3677
3678 return;
3679}
3680
3681/*
3682 * Stop all chip I/O so that the kernel's probe routines don't
3683 * get confused by errant DMAs when rebooting.
3684 */
3685static void
3686bge_shutdown(dev)
3687 device_t dev;
3688{
3689 struct bge_softc *sc;
3690
3691 sc = device_get_softc(dev);
3692
3693 BGE_LOCK(sc);
3694 bge_stop(sc);
3695 bge_reset(sc);
3696 BGE_UNLOCK(sc);
3697
3698 return;
3699}
3700
3701static int
3702bge_suspend(device_t dev)
3703{
3704 struct bge_softc *sc;
3705
3706 sc = device_get_softc(dev);
3707 BGE_LOCK(sc);
3708 bge_stop(sc);
3709 BGE_UNLOCK(sc);
3710
3711 return (0);
3712}
3713
3714static int
3715bge_resume(device_t dev)
3716{
3717 struct bge_softc *sc;
3718 struct ifnet *ifp;
3719
3720 sc = device_get_softc(dev);
3721 BGE_LOCK(sc);
3722 ifp = sc->bge_ifp;
3723 if (ifp->if_flags & IFF_UP) {
3724 bge_init_locked(sc);
3725 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
3726 bge_start_locked(ifp);
3727 }
3728 BGE_UNLOCK(sc);
3729
3730 return (0);
3731}
3732
3733static void
3734bge_link_upd(sc)
3735 struct bge_softc *sc;
3736{
3737 struct mii_data *mii;
3738 uint32_t link, status;
3739
3740 BGE_LOCK_ASSERT(sc);
3741
3742 /* Clear 'pending link event' flag */
3743 sc->bge_link_evt = 0;
3744
3728 /*
3729 * Process link state changes.
3730 * Grrr. The link status word in the status block does
3731 * not work correctly on the BCM5700 rev AX and BX chips,
3732 * according to all available information. Hence, we have
3733 * to enable MII interrupts in order to properly obtain
3734 * async link changes. Unfortunately, this also means that
3735 * we have to read the MAC status register to detect link
3736 * changes, thereby adding an additional register access to
3737 * the interrupt handler.
3738 *
3739 * XXX: perhaps link state detection procedure used for
3740 * BGE_CHIPID_BCM5700_B1 can be used for others BCM5700 revisions.
3741 */
3742
3743 if (sc->bge_asicrev == BGE_ASICREV_BCM5700 &&
3744 sc->bge_chipid != BGE_CHIPID_BCM5700_B1) {
3745 status = CSR_READ_4(sc, BGE_MAC_STS);
3746 if (status & BGE_MACSTAT_MI_INTERRUPT) {
3747 callout_stop(&sc->bge_stat_ch);
3748 bge_tick_locked(sc);
3749
3750 mii = device_get_softc(sc->bge_miibus);
3751 if (!sc->bge_link &&
3752 mii->mii_media_status & IFM_ACTIVE &&
3753 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) {
3754 sc->bge_link++;
3755 if (bootverbose)
3756 if_printf(sc->bge_ifp, "link UP\n");
3757 } else if (sc->bge_link &&
3758 (!(mii->mii_media_status & IFM_ACTIVE) ||
3759 IFM_SUBTYPE(mii->mii_media_active) == IFM_NONE)) {
3760 sc->bge_link = 0;
3761 if (bootverbose)
3762 if_printf(sc->bge_ifp, "link DOWN\n");
3763 }
3764
3765 /* Clear the interrupt */
3766 CSR_WRITE_4(sc, BGE_MAC_EVT_ENB,
3767 BGE_EVTENB_MI_INTERRUPT);
3768 bge_miibus_readreg(sc->bge_dev, 1, BRGPHY_MII_ISR);
3769 bge_miibus_writereg(sc->bge_dev, 1, BRGPHY_MII_IMR,
3770 BRGPHY_INTRS);
3771 }
3772 return;
3773 }
3774
3775 if (sc->bge_tbi) {
3745 /*
3746 * Process link state changes.
3747 * Grrr. The link status word in the status block does
3748 * not work correctly on the BCM5700 rev AX and BX chips,
3749 * according to all available information. Hence, we have
3750 * to enable MII interrupts in order to properly obtain
3751 * async link changes. Unfortunately, this also means that
3752 * we have to read the MAC status register to detect link
3753 * changes, thereby adding an additional register access to
3754 * the interrupt handler.
3755 *
3756 * XXX: perhaps link state detection procedure used for
3757 * BGE_CHIPID_BCM5700_B1 can be used for others BCM5700 revisions.
3758 */
3759
3760 if (sc->bge_asicrev == BGE_ASICREV_BCM5700 &&
3761 sc->bge_chipid != BGE_CHIPID_BCM5700_B1) {
3762 status = CSR_READ_4(sc, BGE_MAC_STS);
3763 if (status & BGE_MACSTAT_MI_INTERRUPT) {
3764 callout_stop(&sc->bge_stat_ch);
3765 bge_tick_locked(sc);
3766
3767 mii = device_get_softc(sc->bge_miibus);
3768 if (!sc->bge_link &&
3769 mii->mii_media_status & IFM_ACTIVE &&
3770 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) {
3771 sc->bge_link++;
3772 if (bootverbose)
3773 if_printf(sc->bge_ifp, "link UP\n");
3774 } else if (sc->bge_link &&
3775 (!(mii->mii_media_status & IFM_ACTIVE) ||
3776 IFM_SUBTYPE(mii->mii_media_active) == IFM_NONE)) {
3777 sc->bge_link = 0;
3778 if (bootverbose)
3779 if_printf(sc->bge_ifp, "link DOWN\n");
3780 }
3781
3782 /* Clear the interrupt */
3783 CSR_WRITE_4(sc, BGE_MAC_EVT_ENB,
3784 BGE_EVTENB_MI_INTERRUPT);
3785 bge_miibus_readreg(sc->bge_dev, 1, BRGPHY_MII_ISR);
3786 bge_miibus_writereg(sc->bge_dev, 1, BRGPHY_MII_IMR,
3787 BRGPHY_INTRS);
3788 }
3789 return;
3790 }
3791
3792 if (sc->bge_tbi) {
3776 /*
3777 * Sometimes PCS encoding errors are detected in
3778 * TBI mode (on fiber NICs), and for some reason
3779 * the chip will signal them as link changes.
3780 * If we get a link change event, but the 'PCS
3781 * encoding error' bit in the MAC status register
3782 * is set, don't bother doing a link check.
3783 * This avoids spurious "link UP" messages
3784 * that sometimes appear on fiber NICs during
3785 * periods of heavy traffic. (There should be no
3786 * effect on copper NICs.)
3787 */
3788 status = CSR_READ_4(sc, BGE_MAC_STS);
3793 status = CSR_READ_4(sc, BGE_MAC_STS);
3789 if (!(status & (BGE_MACSTAT_PORT_DECODE_ERROR|
3790 BGE_MACSTAT_MI_COMPLETE))) {
3791 if (!sc->bge_link &&
3792 (status & BGE_MACSTAT_TBI_PCS_SYNCHED)) {
3794 if (status & BGE_MACSTAT_TBI_PCS_SYNCHED) {
3795 if (!sc->bge_link) {
3793 sc->bge_link++;
3794 if (sc->bge_asicrev == BGE_ASICREV_BCM5704)
3795 BGE_CLRBIT(sc, BGE_MAC_MODE,
3796 BGE_MACMODE_TBI_SEND_CFGS);
3797 CSR_WRITE_4(sc, BGE_MAC_STS, 0xFFFFFFFF);
3798 if (bootverbose)
3799 if_printf(sc->bge_ifp, "link UP\n");
3796 sc->bge_link++;
3797 if (sc->bge_asicrev == BGE_ASICREV_BCM5704)
3798 BGE_CLRBIT(sc, BGE_MAC_MODE,
3799 BGE_MACMODE_TBI_SEND_CFGS);
3800 CSR_WRITE_4(sc, BGE_MAC_STS, 0xFFFFFFFF);
3801 if (bootverbose)
3802 if_printf(sc->bge_ifp, "link UP\n");
3800 } else if (sc->bge_link) {
3801 sc->bge_link = 0;
3802 if (bootverbose)
3803 if_printf(sc->bge_ifp, "link DOWN\n");
3803 if_link_state_change(sc->bge_ifp, LINK_STATE_UP);
3804 }
3804 }
3805 } else if (sc->bge_link) {
3806 sc->bge_link = 0;
3807 if (bootverbose)
3808 if_printf(sc->bge_ifp, "link DOWN\n");
3809 if_link_state_change(sc->bge_ifp, LINK_STATE_DOWN);
3805 }
3806 } else {
3807 /*
3808 * Some broken BCM chips have BGE_STATFLAG_LINKSTATE_CHANGED bit
3809 * in status word always set. Workaround this bug by reading
3810 * PHY link status directly.
3811 */
3812 link = (CSR_READ_4(sc, BGE_MI_STS) & BGE_MISTS_LINK) ? 1 : 0;
3813
3814 if (link != sc->bge_link ||
3815 sc->bge_asicrev == BGE_ASICREV_BCM5700) {
3816 callout_stop(&sc->bge_stat_ch);
3817 bge_tick_locked(sc);
3818
3819 mii = device_get_softc(sc->bge_miibus);
3820 if (!sc->bge_link &&
3821 mii->mii_media_status & IFM_ACTIVE &&
3822 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) {
3823 sc->bge_link++;
3824 if (bootverbose)
3825 if_printf(sc->bge_ifp, "link UP\n");
3826 } else if (sc->bge_link &&
3827 (!(mii->mii_media_status & IFM_ACTIVE) ||
3828 IFM_SUBTYPE(mii->mii_media_active) == IFM_NONE)) {
3829 sc->bge_link = 0;
3830 if (bootverbose)
3831 if_printf(sc->bge_ifp, "link DOWN\n");
3832 }
3833 }
3834 }
3835
3836 /* Clear the interrupt */
3837 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED|
3838 BGE_MACSTAT_CFG_CHANGED|BGE_MACSTAT_MI_COMPLETE|
3839 BGE_MACSTAT_LINK_CHANGED);
3840}
3810 }
3811 } else {
3812 /*
3813 * Some broken BCM chips have BGE_STATFLAG_LINKSTATE_CHANGED bit
3814 * in status word always set. Workaround this bug by reading
3815 * PHY link status directly.
3816 */
3817 link = (CSR_READ_4(sc, BGE_MI_STS) & BGE_MISTS_LINK) ? 1 : 0;
3818
3819 if (link != sc->bge_link ||
3820 sc->bge_asicrev == BGE_ASICREV_BCM5700) {
3821 callout_stop(&sc->bge_stat_ch);
3822 bge_tick_locked(sc);
3823
3824 mii = device_get_softc(sc->bge_miibus);
3825 if (!sc->bge_link &&
3826 mii->mii_media_status & IFM_ACTIVE &&
3827 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) {
3828 sc->bge_link++;
3829 if (bootverbose)
3830 if_printf(sc->bge_ifp, "link UP\n");
3831 } else if (sc->bge_link &&
3832 (!(mii->mii_media_status & IFM_ACTIVE) ||
3833 IFM_SUBTYPE(mii->mii_media_active) == IFM_NONE)) {
3834 sc->bge_link = 0;
3835 if (bootverbose)
3836 if_printf(sc->bge_ifp, "link DOWN\n");
3837 }
3838 }
3839 }
3840
3841 /* Clear the interrupt */
3842 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED|
3843 BGE_MACSTAT_CFG_CHANGED|BGE_MACSTAT_MI_COMPLETE|
3844 BGE_MACSTAT_LINK_CHANGED);
3845}