Deleted Added
sdiff udiff text old ( 135359 ) new ( 135772 )
full compact
1/*
2 * Copyright (c) 2001 Wind River Systems
3 * Copyright (c) 1997, 1998, 1999, 2001
4 * Bill Paul <wpaul@windriver.com>. All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. All advertising materials mentioning features or use of this software
15 * must display the following acknowledgement:
16 * This product includes software developed by Bill Paul.
17 * 4. Neither the name of the author nor the names of any co-contributors
18 * may be used to endorse or promote products derived from this software
19 * without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
25 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
31 * THE POSSIBILITY OF SUCH DAMAGE.
32 */
33
34#include <sys/cdefs.h>
35__FBSDID("$FreeBSD: head/sys/dev/bge/if_bge.c 135359 2004-09-17 04:58:17Z wpaul $");
36
37/*
38 * Broadcom BCM570x family gigabit ethernet driver for FreeBSD.
39 *
40 * The Broadcom BCM5700 is based on technology originally developed by
41 * Alteon Networks as part of the Tigon I and Tigon II gigabit ethernet
42 * MAC chips. The BCM5700, sometimes refered to as the Tigon III, has
43 * two on-board MIPS R4000 CPUs and can have as much as 16MB of external
44 * SSRAM. The BCM5700 supports TCP, UDP and IP checksum offload, jumbo
45 * frames, highly configurable RX filtering, and 16 RX and TX queues
46 * (which, along with RX filter rules, can be used for QOS applications).
47 * Other features, such as TCP segmentation, may be available as part
48 * of value-added firmware updates. Unlike the Tigon I and Tigon II,
49 * firmware images can be stored in hardware and need not be compiled
50 * into the driver.
51 *
52 * The BCM5700 supports the PCI v2.2 and PCI-X v1.0 standards, and will
53 * function in a 32-bit/64-bit 33/66Mhz bus, or a 64-bit/133Mhz bus.
54 *
55 * The BCM5701 is a single-chip solution incorporating both the BCM5700
56 * MAC and a BCM5401 10/100/1000 PHY. Unlike the BCM5700, the BCM5701
57 * does not support external SSRAM.
58 *
59 * Broadcom also produces a variation of the BCM5700 under the "Altima"
60 * brand name, which is functionally similar but lacks PCI-X support.
61 *
62 * Without external SSRAM, you can only have at most 4 TX rings,
63 * and the use of the mini RX ring is disabled. This seems to imply
64 * that these features are simply not available on the BCM5701. As a
65 * result, this driver does not implement any support for the mini RX
66 * ring.
67 */
68
69#include <sys/param.h>
70#include <sys/endian.h>
71#include <sys/systm.h>
72#include <sys/sockio.h>
73#include <sys/mbuf.h>
74#include <sys/malloc.h>
75#include <sys/kernel.h>
76#include <sys/module.h>
77#include <sys/socket.h>
78#include <sys/queue.h>
79
80#include <net/if.h>
81#include <net/if_arp.h>
82#include <net/ethernet.h>
83#include <net/if_dl.h>
84#include <net/if_media.h>
85
86#include <net/bpf.h>
87
88#include <net/if_types.h>
89#include <net/if_vlan_var.h>
90
91#include <netinet/in_systm.h>
92#include <netinet/in.h>
93#include <netinet/ip.h>
94
95#include <machine/clock.h> /* for DELAY */
96#include <machine/bus_memio.h>
97#include <machine/bus.h>
98#include <machine/resource.h>
99#include <sys/bus.h>
100#include <sys/rman.h>
101
102#include <dev/mii/mii.h>
103#include <dev/mii/miivar.h>
104#include "miidevs.h"
105#include <dev/mii/brgphyreg.h>
106
107#include <dev/pci/pcireg.h>
108#include <dev/pci/pcivar.h>
109
110#include <dev/bge/if_bgereg.h>
111
112#define BGE_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP)
113
114MODULE_DEPEND(bge, pci, 1, 1, 1);
115MODULE_DEPEND(bge, ether, 1, 1, 1);
116MODULE_DEPEND(bge, miibus, 1, 1, 1);
117
118/* "controller miibus0" required. See GENERIC if you get errors here. */
119#include "miibus_if.h"
120
121/*
122 * Various supported device vendors/types and their names. Note: the
123 * spec seems to indicate that the hardware still has Alteon's vendor
124 * ID burned into it, though it will always be overriden by the vendor
125 * ID in the EEPROM. Just to be safe, we cover all possibilities.
126 */
127#define BGE_DEVDESC_MAX 64 /* Maximum device description length */
128
129static struct bge_type bge_devs[] = {
130 { ALT_VENDORID, ALT_DEVICEID_BCM5700,
131 "Broadcom BCM5700 Gigabit Ethernet" },
132 { ALT_VENDORID, ALT_DEVICEID_BCM5701,
133 "Broadcom BCM5701 Gigabit Ethernet" },
134 { BCOM_VENDORID, BCOM_DEVICEID_BCM5700,
135 "Broadcom BCM5700 Gigabit Ethernet" },
136 { BCOM_VENDORID, BCOM_DEVICEID_BCM5701,
137 "Broadcom BCM5701 Gigabit Ethernet" },
138 { BCOM_VENDORID, BCOM_DEVICEID_BCM5702,
139 "Broadcom BCM5702 Gigabit Ethernet" },
140 { BCOM_VENDORID, BCOM_DEVICEID_BCM5702X,
141 "Broadcom BCM5702X Gigabit Ethernet" },
142 { BCOM_VENDORID, BCOM_DEVICEID_BCM5703,
143 "Broadcom BCM5703 Gigabit Ethernet" },
144 { BCOM_VENDORID, BCOM_DEVICEID_BCM5703X,
145 "Broadcom BCM5703X Gigabit Ethernet" },
146 { BCOM_VENDORID, BCOM_DEVICEID_BCM5704C,
147 "Broadcom BCM5704C Dual Gigabit Ethernet" },
148 { BCOM_VENDORID, BCOM_DEVICEID_BCM5704S,
149 "Broadcom BCM5704S Dual Gigabit Ethernet" },
150 { BCOM_VENDORID, BCOM_DEVICEID_BCM5705,
151 "Broadcom BCM5705 Gigabit Ethernet" },
152 { BCOM_VENDORID, BCOM_DEVICEID_BCM5705K,
153 "Broadcom BCM5705K Gigabit Ethernet" },
154 { BCOM_VENDORID, BCOM_DEVICEID_BCM5705M,
155 "Broadcom BCM5705M Gigabit Ethernet" },
156 { BCOM_VENDORID, BCOM_DEVICEID_BCM5705M_ALT,
157 "Broadcom BCM5705M Gigabit Ethernet" },
158 { BCOM_VENDORID, BCOM_DEVICEID_BCM5782,
159 "Broadcom BCM5782 Gigabit Ethernet" },
160 { BCOM_VENDORID, BCOM_DEVICEID_BCM5788,
161 "Broadcom BCM5788 Gigabit Ethernet" },
162 { BCOM_VENDORID, BCOM_DEVICEID_BCM5901,
163 "Broadcom BCM5901 Fast Ethernet" },
164 { BCOM_VENDORID, BCOM_DEVICEID_BCM5901A2,
165 "Broadcom BCM5901A2 Fast Ethernet" },
166 { SK_VENDORID, SK_DEVICEID_ALTIMA,
167 "SysKonnect Gigabit Ethernet" },
168 { ALTIMA_VENDORID, ALTIMA_DEVICE_AC1000,
169 "Altima AC1000 Gigabit Ethernet" },
170 { ALTIMA_VENDORID, ALTIMA_DEVICE_AC1002,
171 "Altima AC1002 Gigabit Ethernet" },
172 { ALTIMA_VENDORID, ALTIMA_DEVICE_AC9100,
173 "Altima AC9100 Gigabit Ethernet" },
174 { 0, 0, NULL }
175};
176
177static int bge_probe (device_t);
178static int bge_attach (device_t);
179static int bge_detach (device_t);
180static void bge_release_resources
181 (struct bge_softc *);
182static void bge_dma_map_addr (void *, bus_dma_segment_t *, int, int);
183static void bge_dma_map_tx_desc (void *, bus_dma_segment_t *, int,
184 bus_size_t, int);
185static int bge_dma_alloc (device_t);
186static void bge_dma_free (struct bge_softc *);
187
188static void bge_txeof (struct bge_softc *);
189static void bge_rxeof (struct bge_softc *);
190
191static void bge_tick_locked (struct bge_softc *);
192static void bge_tick (void *);
193static void bge_stats_update (struct bge_softc *);
194static void bge_stats_update_regs
195 (struct bge_softc *);
196static int bge_encap (struct bge_softc *, struct mbuf *,
197 u_int32_t *);
198
199static void bge_intr (void *);
200static void bge_start_locked (struct ifnet *);
201static void bge_start (struct ifnet *);
202static int bge_ioctl (struct ifnet *, u_long, caddr_t);
203static void bge_init_locked (struct bge_softc *);
204static void bge_init (void *);
205static void bge_stop (struct bge_softc *);
206static void bge_watchdog (struct ifnet *);
207static void bge_shutdown (device_t);
208static int bge_ifmedia_upd (struct ifnet *);
209static void bge_ifmedia_sts (struct ifnet *, struct ifmediareq *);
210
211static u_int8_t bge_eeprom_getbyte (struct bge_softc *, int, u_int8_t *);
212static int bge_read_eeprom (struct bge_softc *, caddr_t, int, int);
213
214static void bge_setmulti (struct bge_softc *);
215
216static void bge_handle_events (struct bge_softc *);
217static int bge_alloc_jumbo_mem (struct bge_softc *);
218static void bge_free_jumbo_mem (struct bge_softc *);
219static void *bge_jalloc (struct bge_softc *);
220static void bge_jfree (void *, void *);
221static int bge_newbuf_std (struct bge_softc *, int, struct mbuf *);
222static int bge_newbuf_jumbo (struct bge_softc *, int, struct mbuf *);
223static int bge_init_rx_ring_std (struct bge_softc *);
224static void bge_free_rx_ring_std (struct bge_softc *);
225static int bge_init_rx_ring_jumbo (struct bge_softc *);
226static void bge_free_rx_ring_jumbo (struct bge_softc *);
227static void bge_free_tx_ring (struct bge_softc *);
228static int bge_init_tx_ring (struct bge_softc *);
229
230static int bge_chipinit (struct bge_softc *);
231static int bge_blockinit (struct bge_softc *);
232
233#ifdef notdef
234static u_int8_t bge_vpd_readbyte(struct bge_softc *, int);
235static void bge_vpd_read_res (struct bge_softc *, struct vpd_res *, int);
236static void bge_vpd_read (struct bge_softc *);
237#endif
238
239static u_int32_t bge_readmem_ind
240 (struct bge_softc *, int);
241static void bge_writemem_ind (struct bge_softc *, int, int);
242#ifdef notdef
243static u_int32_t bge_readreg_ind
244 (struct bge_softc *, int);
245#endif
246static void bge_writereg_ind (struct bge_softc *, int, int);
247
248static int bge_miibus_readreg (device_t, int, int);
249static int bge_miibus_writereg (device_t, int, int, int);
250static void bge_miibus_statchg (device_t);
251
252static void bge_reset (struct bge_softc *);
253
254static device_method_t bge_methods[] = {
255 /* Device interface */
256 DEVMETHOD(device_probe, bge_probe),
257 DEVMETHOD(device_attach, bge_attach),
258 DEVMETHOD(device_detach, bge_detach),
259 DEVMETHOD(device_shutdown, bge_shutdown),
260
261 /* bus interface */
262 DEVMETHOD(bus_print_child, bus_generic_print_child),
263 DEVMETHOD(bus_driver_added, bus_generic_driver_added),
264
265 /* MII interface */
266 DEVMETHOD(miibus_readreg, bge_miibus_readreg),
267 DEVMETHOD(miibus_writereg, bge_miibus_writereg),
268 DEVMETHOD(miibus_statchg, bge_miibus_statchg),
269
270 { 0, 0 }
271};
272
273static driver_t bge_driver = {
274 "bge",
275 bge_methods,
276 sizeof(struct bge_softc)
277};
278
279static devclass_t bge_devclass;
280
281DRIVER_MODULE(bge, pci, bge_driver, bge_devclass, 0, 0);
282DRIVER_MODULE(miibus, bge, miibus_driver, miibus_devclass, 0, 0);
283
284static u_int32_t
285bge_readmem_ind(sc, off)
286 struct bge_softc *sc;
287 int off;
288{
289 device_t dev;
290
291 dev = sc->bge_dev;
292
293 pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, off, 4);
294 return(pci_read_config(dev, BGE_PCI_MEMWIN_DATA, 4));
295}
296
297static void
298bge_writemem_ind(sc, off, val)
299 struct bge_softc *sc;
300 int off, val;
301{
302 device_t dev;
303
304 dev = sc->bge_dev;
305
306 pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, off, 4);
307 pci_write_config(dev, BGE_PCI_MEMWIN_DATA, val, 4);
308
309 return;
310}
311
312#ifdef notdef
313static u_int32_t
314bge_readreg_ind(sc, off)
315 struct bge_softc *sc;
316 int off;
317{
318 device_t dev;
319
320 dev = sc->bge_dev;
321
322 pci_write_config(dev, BGE_PCI_REG_BASEADDR, off, 4);
323 return(pci_read_config(dev, BGE_PCI_REG_DATA, 4));
324}
325#endif
326
327static void
328bge_writereg_ind(sc, off, val)
329 struct bge_softc *sc;
330 int off, val;
331{
332 device_t dev;
333
334 dev = sc->bge_dev;
335
336 pci_write_config(dev, BGE_PCI_REG_BASEADDR, off, 4);
337 pci_write_config(dev, BGE_PCI_REG_DATA, val, 4);
338
339 return;
340}
341
342/*
343 * Map a single buffer address.
344 */
345
346static void
347bge_dma_map_addr(arg, segs, nseg, error)
348 void *arg;
349 bus_dma_segment_t *segs;
350 int nseg;
351 int error;
352{
353 struct bge_dmamap_arg *ctx;
354
355 if (error)
356 return;
357
358 ctx = arg;
359
360 if (nseg > ctx->bge_maxsegs) {
361 ctx->bge_maxsegs = 0;
362 return;
363 }
364
365 ctx->bge_busaddr = segs->ds_addr;
366
367 return;
368}
369
370/*
371 * Map an mbuf chain into an TX ring.
372 */
373
374static void
375bge_dma_map_tx_desc(arg, segs, nseg, mapsize, error)
376 void *arg;
377 bus_dma_segment_t *segs;
378 int nseg;
379 bus_size_t mapsize;
380 int error;
381{
382 struct bge_dmamap_arg *ctx;
383 struct bge_tx_bd *d = NULL;
384 int i = 0, idx;
385
386 if (error)
387 return;
388
389 ctx = arg;
390
391 /* Signal error to caller if there's too many segments */
392 if (nseg > ctx->bge_maxsegs) {
393 ctx->bge_maxsegs = 0;
394 return;
395 }
396
397 idx = ctx->bge_idx;
398 while(1) {
399 d = &ctx->bge_ring[idx];
400 d->bge_addr.bge_addr_lo =
401 htole32(BGE_ADDR_LO(segs[i].ds_addr));
402 d->bge_addr.bge_addr_hi =
403 htole32(BGE_ADDR_HI(segs[i].ds_addr));
404 d->bge_len = htole16(segs[i].ds_len);
405 d->bge_flags = htole16(ctx->bge_flags);
406 i++;
407 if (i == nseg)
408 break;
409 BGE_INC(idx, BGE_TX_RING_CNT);
410 }
411
412 d->bge_flags |= htole16(BGE_TXBDFLAG_END);
413 ctx->bge_maxsegs = nseg;
414 ctx->bge_idx = idx;
415
416 return;
417}
418
419
420#ifdef notdef
421static u_int8_t
422bge_vpd_readbyte(sc, addr)
423 struct bge_softc *sc;
424 int addr;
425{
426 int i;
427 device_t dev;
428 u_int32_t val;
429
430 dev = sc->bge_dev;
431 pci_write_config(dev, BGE_PCI_VPD_ADDR, addr, 2);
432 for (i = 0; i < BGE_TIMEOUT * 10; i++) {
433 DELAY(10);
434 if (pci_read_config(dev, BGE_PCI_VPD_ADDR, 2) & BGE_VPD_FLAG)
435 break;
436 }
437
438 if (i == BGE_TIMEOUT) {
439 printf("bge%d: VPD read timed out\n", sc->bge_unit);
440 return(0);
441 }
442
443 val = pci_read_config(dev, BGE_PCI_VPD_DATA, 4);
444
445 return((val >> ((addr % 4) * 8)) & 0xFF);
446}
447
448static void
449bge_vpd_read_res(sc, res, addr)
450 struct bge_softc *sc;
451 struct vpd_res *res;
452 int addr;
453{
454 int i;
455 u_int8_t *ptr;
456
457 ptr = (u_int8_t *)res;
458 for (i = 0; i < sizeof(struct vpd_res); i++)
459 ptr[i] = bge_vpd_readbyte(sc, i + addr);
460
461 return;
462}
463
464static void
465bge_vpd_read(sc)
466 struct bge_softc *sc;
467{
468 int pos = 0, i;
469 struct vpd_res res;
470
471 if (sc->bge_vpd_prodname != NULL)
472 free(sc->bge_vpd_prodname, M_DEVBUF);
473 if (sc->bge_vpd_readonly != NULL)
474 free(sc->bge_vpd_readonly, M_DEVBUF);
475 sc->bge_vpd_prodname = NULL;
476 sc->bge_vpd_readonly = NULL;
477
478 bge_vpd_read_res(sc, &res, pos);
479
480 if (res.vr_id != VPD_RES_ID) {
481 printf("bge%d: bad VPD resource id: expected %x got %x\n",
482 sc->bge_unit, VPD_RES_ID, res.vr_id);
483 return;
484 }
485
486 pos += sizeof(res);
487 sc->bge_vpd_prodname = malloc(res.vr_len + 1, M_DEVBUF, M_NOWAIT);
488 for (i = 0; i < res.vr_len; i++)
489 sc->bge_vpd_prodname[i] = bge_vpd_readbyte(sc, i + pos);
490 sc->bge_vpd_prodname[i] = '\0';
491 pos += i;
492
493 bge_vpd_read_res(sc, &res, pos);
494
495 if (res.vr_id != VPD_RES_READ) {
496 printf("bge%d: bad VPD resource id: expected %x got %x\n",
497 sc->bge_unit, VPD_RES_READ, res.vr_id);
498 return;
499 }
500
501 pos += sizeof(res);
502 sc->bge_vpd_readonly = malloc(res.vr_len, M_DEVBUF, M_NOWAIT);
503 for (i = 0; i < res.vr_len + 1; i++)
504 sc->bge_vpd_readonly[i] = bge_vpd_readbyte(sc, i + pos);
505
506 return;
507}
508#endif
509
510/*
511 * Read a byte of data stored in the EEPROM at address 'addr.' The
512 * BCM570x supports both the traditional bitbang interface and an
513 * auto access interface for reading the EEPROM. We use the auto
514 * access method.
515 */
516static u_int8_t
517bge_eeprom_getbyte(sc, addr, dest)
518 struct bge_softc *sc;
519 int addr;
520 u_int8_t *dest;
521{
522 int i;
523 u_int32_t byte = 0;
524
525 /*
526 * Enable use of auto EEPROM access so we can avoid
527 * having to use the bitbang method.
528 */
529 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_AUTO_EEPROM);
530
531 /* Reset the EEPROM, load the clock period. */
532 CSR_WRITE_4(sc, BGE_EE_ADDR,
533 BGE_EEADDR_RESET|BGE_EEHALFCLK(BGE_HALFCLK_384SCL));
534 DELAY(20);
535
536 /* Issue the read EEPROM command. */
537 CSR_WRITE_4(sc, BGE_EE_ADDR, BGE_EE_READCMD | addr);
538
539 /* Wait for completion */
540 for(i = 0; i < BGE_TIMEOUT * 10; i++) {
541 DELAY(10);
542 if (CSR_READ_4(sc, BGE_EE_ADDR) & BGE_EEADDR_DONE)
543 break;
544 }
545
546 if (i == BGE_TIMEOUT) {
547 printf("bge%d: eeprom read timed out\n", sc->bge_unit);
548 return(0);
549 }
550
551 /* Get result. */
552 byte = CSR_READ_4(sc, BGE_EE_DATA);
553
554 *dest = (byte >> ((addr % 4) * 8)) & 0xFF;
555
556 return(0);
557}
558
559/*
560 * Read a sequence of bytes from the EEPROM.
561 */
562static int
563bge_read_eeprom(sc, dest, off, cnt)
564 struct bge_softc *sc;
565 caddr_t dest;
566 int off;
567 int cnt;
568{
569 int err = 0, i;
570 u_int8_t byte = 0;
571
572 for (i = 0; i < cnt; i++) {
573 err = bge_eeprom_getbyte(sc, off + i, &byte);
574 if (err)
575 break;
576 *(dest + i) = byte;
577 }
578
579 return(err ? 1 : 0);
580}
581
582static int
583bge_miibus_readreg(dev, phy, reg)
584 device_t dev;
585 int phy, reg;
586{
587 struct bge_softc *sc;
588 u_int32_t val, autopoll;
589 int i;
590
591 sc = device_get_softc(dev);
592
593 /*
594 * Broadcom's own driver always assumes the internal
595 * PHY is at GMII address 1. On some chips, the PHY responds
596 * to accesses at all addresses, which could cause us to
597 * bogusly attach the PHY 32 times at probe type. Always
598 * restricting the lookup to address 1 is simpler than
599 * trying to figure out which chips revisions should be
600 * special-cased.
601 */
602 if (phy != 1)
603 return(0);
604
605 /* Reading with autopolling on may trigger PCI errors */
606 autopoll = CSR_READ_4(sc, BGE_MI_MODE);
607 if (autopoll & BGE_MIMODE_AUTOPOLL) {
608 BGE_CLRBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL);
609 DELAY(40);
610 }
611
612 CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_READ|BGE_MICOMM_BUSY|
613 BGE_MIPHY(phy)|BGE_MIREG(reg));
614
615 for (i = 0; i < BGE_TIMEOUT; i++) {
616 val = CSR_READ_4(sc, BGE_MI_COMM);
617 if (!(val & BGE_MICOMM_BUSY))
618 break;
619 }
620
621 if (i == BGE_TIMEOUT) {
622 printf("bge%d: PHY read timed out\n", sc->bge_unit);
623 val = 0;
624 goto done;
625 }
626
627 val = CSR_READ_4(sc, BGE_MI_COMM);
628
629done:
630 if (autopoll & BGE_MIMODE_AUTOPOLL) {
631 BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL);
632 DELAY(40);
633 }
634
635 if (val & BGE_MICOMM_READFAIL)
636 return(0);
637
638 return(val & 0xFFFF);
639}
640
641static int
642bge_miibus_writereg(dev, phy, reg, val)
643 device_t dev;
644 int phy, reg, val;
645{
646 struct bge_softc *sc;
647 u_int32_t autopoll;
648 int i;
649
650 sc = device_get_softc(dev);
651
652 /* Reading with autopolling on may trigger PCI errors */
653 autopoll = CSR_READ_4(sc, BGE_MI_MODE);
654 if (autopoll & BGE_MIMODE_AUTOPOLL) {
655 BGE_CLRBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL);
656 DELAY(40);
657 }
658
659 CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_WRITE|BGE_MICOMM_BUSY|
660 BGE_MIPHY(phy)|BGE_MIREG(reg)|val);
661
662 for (i = 0; i < BGE_TIMEOUT; i++) {
663 if (!(CSR_READ_4(sc, BGE_MI_COMM) & BGE_MICOMM_BUSY))
664 break;
665 }
666
667 if (autopoll & BGE_MIMODE_AUTOPOLL) {
668 BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL);
669 DELAY(40);
670 }
671
672 if (i == BGE_TIMEOUT) {
673 printf("bge%d: PHY read timed out\n", sc->bge_unit);
674 return(0);
675 }
676
677 return(0);
678}
679
680static void
681bge_miibus_statchg(dev)
682 device_t dev;
683{
684 struct bge_softc *sc;
685 struct mii_data *mii;
686
687 sc = device_get_softc(dev);
688 mii = device_get_softc(sc->bge_miibus);
689
690 BGE_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_PORTMODE);
691 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T) {
692 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_PORTMODE_GMII);
693 } else {
694 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_PORTMODE_MII);
695 }
696
697 if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) {
698 BGE_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX);
699 } else {
700 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX);
701 }
702
703 return;
704}
705
706/*
707 * Handle events that have triggered interrupts.
708 */
709static void
710bge_handle_events(sc)
711 struct bge_softc *sc;
712{
713
714 return;
715}
716
717/*
718 * Memory management for jumbo frames.
719 */
720
721static int
722bge_alloc_jumbo_mem(sc)
723 struct bge_softc *sc;
724{
725 caddr_t ptr;
726 register int i, error;
727 struct bge_jpool_entry *entry;
728
729 /* Create tag for jumbo buffer block */
730
731 error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag,
732 PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
733 NULL, BGE_JMEM, 1, BGE_JMEM, 0, NULL, NULL,
734 &sc->bge_cdata.bge_jumbo_tag);
735
736 if (error) {
737 printf("bge%d: could not allocate jumbo dma tag\n",
738 sc->bge_unit);
739 return (ENOMEM);
740 }
741
742 /* Allocate DMA'able memory for jumbo buffer block */
743
744 error = bus_dmamem_alloc(sc->bge_cdata.bge_jumbo_tag,
745 (void **)&sc->bge_ldata.bge_jumbo_buf, BUS_DMA_NOWAIT,
746 &sc->bge_cdata.bge_jumbo_map);
747
748 if (error)
749 return (ENOMEM);
750
751 SLIST_INIT(&sc->bge_jfree_listhead);
752 SLIST_INIT(&sc->bge_jinuse_listhead);
753
754 /*
755 * Now divide it up into 9K pieces and save the addresses
756 * in an array.
757 */
758 ptr = sc->bge_ldata.bge_jumbo_buf;
759 for (i = 0; i < BGE_JSLOTS; i++) {
760 sc->bge_cdata.bge_jslots[i] = ptr;
761 ptr += BGE_JLEN;
762 entry = malloc(sizeof(struct bge_jpool_entry),
763 M_DEVBUF, M_NOWAIT);
764 if (entry == NULL) {
765 bge_free_jumbo_mem(sc);
766 sc->bge_ldata.bge_jumbo_buf = NULL;
767 printf("bge%d: no memory for jumbo "
768 "buffer queue!\n", sc->bge_unit);
769 return(ENOBUFS);
770 }
771 entry->slot = i;
772 SLIST_INSERT_HEAD(&sc->bge_jfree_listhead,
773 entry, jpool_entries);
774 }
775
776 return(0);
777}
778
779static void
780bge_free_jumbo_mem(sc)
781 struct bge_softc *sc;
782{
783 int i;
784 struct bge_jpool_entry *entry;
785
786 for (i = 0; i < BGE_JSLOTS; i++) {
787 entry = SLIST_FIRST(&sc->bge_jfree_listhead);
788 SLIST_REMOVE_HEAD(&sc->bge_jfree_listhead, jpool_entries);
789 free(entry, M_DEVBUF);
790 }
791
792 /* Destroy jumbo buffer block */
793
794 if (sc->bge_ldata.bge_rx_jumbo_ring)
795 bus_dmamem_free(sc->bge_cdata.bge_jumbo_tag,
796 sc->bge_ldata.bge_jumbo_buf,
797 sc->bge_cdata.bge_jumbo_map);
798
799 if (sc->bge_cdata.bge_rx_jumbo_ring_map)
800 bus_dmamap_destroy(sc->bge_cdata.bge_jumbo_tag,
801 sc->bge_cdata.bge_jumbo_map);
802
803 if (sc->bge_cdata.bge_jumbo_tag)
804 bus_dma_tag_destroy(sc->bge_cdata.bge_jumbo_tag);
805
806 return;
807}
808
809/*
810 * Allocate a jumbo buffer.
811 */
812static void *
813bge_jalloc(sc)
814 struct bge_softc *sc;
815{
816 struct bge_jpool_entry *entry;
817
818 entry = SLIST_FIRST(&sc->bge_jfree_listhead);
819
820 if (entry == NULL) {
821 printf("bge%d: no free jumbo buffers\n", sc->bge_unit);
822 return(NULL);
823 }
824
825 SLIST_REMOVE_HEAD(&sc->bge_jfree_listhead, jpool_entries);
826 SLIST_INSERT_HEAD(&sc->bge_jinuse_listhead, entry, jpool_entries);
827 return(sc->bge_cdata.bge_jslots[entry->slot]);
828}
829
830/*
831 * Release a jumbo buffer.
832 */
833static void
834bge_jfree(buf, args)
835 void *buf;
836 void *args;
837{
838 struct bge_jpool_entry *entry;
839 struct bge_softc *sc;
840 int i;
841
842 /* Extract the softc struct pointer. */
843 sc = (struct bge_softc *)args;
844
845 if (sc == NULL)
846 panic("bge_jfree: can't find softc pointer!");
847
848 /* calculate the slot this buffer belongs to */
849
850 i = ((vm_offset_t)buf
851 - (vm_offset_t)sc->bge_ldata.bge_jumbo_buf) / BGE_JLEN;
852
853 if ((i < 0) || (i >= BGE_JSLOTS))
854 panic("bge_jfree: asked to free buffer that we don't manage!");
855
856 entry = SLIST_FIRST(&sc->bge_jinuse_listhead);
857 if (entry == NULL)
858 panic("bge_jfree: buffer not in use!");
859 entry->slot = i;
860 SLIST_REMOVE_HEAD(&sc->bge_jinuse_listhead, jpool_entries);
861 SLIST_INSERT_HEAD(&sc->bge_jfree_listhead, entry, jpool_entries);
862
863 return;
864}
865
866
867/*
868 * Intialize a standard receive ring descriptor.
869 */
870static int
871bge_newbuf_std(sc, i, m)
872 struct bge_softc *sc;
873 int i;
874 struct mbuf *m;
875{
876 struct mbuf *m_new = NULL;
877 struct bge_rx_bd *r;
878 struct bge_dmamap_arg ctx;
879 int error;
880
881 if (m == NULL) {
882 MGETHDR(m_new, M_DONTWAIT, MT_DATA);
883 if (m_new == NULL) {
884 return(ENOBUFS);
885 }
886
887 MCLGET(m_new, M_DONTWAIT);
888 if (!(m_new->m_flags & M_EXT)) {
889 m_freem(m_new);
890 return(ENOBUFS);
891 }
892 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
893 } else {
894 m_new = m;
895 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
896 m_new->m_data = m_new->m_ext.ext_buf;
897 }
898
899 if (!sc->bge_rx_alignment_bug)
900 m_adj(m_new, ETHER_ALIGN);
901 sc->bge_cdata.bge_rx_std_chain[i] = m_new;
902 r = &sc->bge_ldata.bge_rx_std_ring[i];
903 ctx.bge_maxsegs = 1;
904 ctx.sc = sc;
905 error = bus_dmamap_load(sc->bge_cdata.bge_mtag,
906 sc->bge_cdata.bge_rx_std_dmamap[i], mtod(m_new, void *),
907 m_new->m_len, bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT);
908 if (error || ctx.bge_maxsegs == 0) {
909 if (m == NULL)
910 m_freem(m_new);
911 return(ENOMEM);
912 }
913 r->bge_addr.bge_addr_lo = htole32(BGE_ADDR_LO(ctx.bge_busaddr));
914 r->bge_addr.bge_addr_hi = htole32(BGE_ADDR_HI(ctx.bge_busaddr));
915 r->bge_flags = htole16(BGE_RXBDFLAG_END);
916 r->bge_len = htole16(m_new->m_len);
917 r->bge_idx = htole16(i);
918
919 bus_dmamap_sync(sc->bge_cdata.bge_mtag,
920 sc->bge_cdata.bge_rx_std_dmamap[i],
921 BUS_DMASYNC_PREREAD);
922
923 return(0);
924}
925
926/*
927 * Initialize a jumbo receive ring descriptor. This allocates
928 * a jumbo buffer from the pool managed internally by the driver.
929 */
930static int
931bge_newbuf_jumbo(sc, i, m)
932 struct bge_softc *sc;
933 int i;
934 struct mbuf *m;
935{
936 struct mbuf *m_new = NULL;
937 struct bge_rx_bd *r;
938 struct bge_dmamap_arg ctx;
939 int error;
940
941 if (m == NULL) {
942 caddr_t *buf = NULL;
943
944 /* Allocate the mbuf. */
945 MGETHDR(m_new, M_DONTWAIT, MT_DATA);
946 if (m_new == NULL) {
947 return(ENOBUFS);
948 }
949
950 /* Allocate the jumbo buffer */
951 buf = bge_jalloc(sc);
952 if (buf == NULL) {
953 m_freem(m_new);
954 printf("bge%d: jumbo allocation failed "
955 "-- packet dropped!\n", sc->bge_unit);
956 return(ENOBUFS);
957 }
958
959 /* Attach the buffer to the mbuf. */
960 m_new->m_data = (void *) buf;
961 m_new->m_len = m_new->m_pkthdr.len = BGE_JUMBO_FRAMELEN;
962 MEXTADD(m_new, buf, BGE_JUMBO_FRAMELEN, bge_jfree,
963 (struct bge_softc *)sc, 0, EXT_NET_DRV);
964 } else {
965 m_new = m;
966 m_new->m_data = m_new->m_ext.ext_buf;
967 m_new->m_ext.ext_size = BGE_JUMBO_FRAMELEN;
968 }
969
970 if (!sc->bge_rx_alignment_bug)
971 m_adj(m_new, ETHER_ALIGN);
972 /* Set up the descriptor. */
973 sc->bge_cdata.bge_rx_jumbo_chain[i] = m_new;
974 r = &sc->bge_ldata.bge_rx_jumbo_ring[i];
975 ctx.bge_maxsegs = 1;
976 ctx.sc = sc;
977 error = bus_dmamap_load(sc->bge_cdata.bge_mtag_jumbo,
978 sc->bge_cdata.bge_rx_jumbo_dmamap[i], mtod(m_new, void *),
979 m_new->m_len, bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT);
980 if (error || ctx.bge_maxsegs == 0) {
981 if (m == NULL)
982 m_freem(m_new);
983 return(ENOMEM);
984 }
985 r->bge_addr.bge_addr_lo = htole32(BGE_ADDR_LO(ctx.bge_busaddr));
986 r->bge_addr.bge_addr_hi = htole32(BGE_ADDR_HI(ctx.bge_busaddr));
987 r->bge_flags = htole16(BGE_RXBDFLAG_END|BGE_RXBDFLAG_JUMBO_RING);
988 r->bge_len = htole16(m_new->m_len);
989 r->bge_idx = htole16(i);
990
991 bus_dmamap_sync(sc->bge_cdata.bge_mtag,
992 sc->bge_cdata.bge_rx_jumbo_dmamap[i],
993 BUS_DMASYNC_PREREAD);
994
995 return(0);
996}
997
998/*
999 * The standard receive ring has 512 entries in it. At 2K per mbuf cluster,
1000 * that's 1MB or memory, which is a lot. For now, we fill only the first
1001 * 256 ring entries and hope that our CPU is fast enough to keep up with
1002 * the NIC.
1003 */
1004static int
1005bge_init_rx_ring_std(sc)
1006 struct bge_softc *sc;
1007{
1008 int i;
1009
1010 for (i = 0; i < BGE_SSLOTS; i++) {
1011 if (bge_newbuf_std(sc, i, NULL) == ENOBUFS)
1012 return(ENOBUFS);
1013 };
1014
1015 bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag,
1016 sc->bge_cdata.bge_rx_std_ring_map,
1017 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1018
1019 sc->bge_std = i - 1;
1020 CSR_WRITE_4(sc, BGE_MBX_RX_STD_PROD_LO, sc->bge_std);
1021
1022 return(0);
1023}
1024
1025static void
1026bge_free_rx_ring_std(sc)
1027 struct bge_softc *sc;
1028{
1029 int i;
1030
1031 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
1032 if (sc->bge_cdata.bge_rx_std_chain[i] != NULL) {
1033 m_freem(sc->bge_cdata.bge_rx_std_chain[i]);
1034 sc->bge_cdata.bge_rx_std_chain[i] = NULL;
1035 bus_dmamap_unload(sc->bge_cdata.bge_mtag,
1036 sc->bge_cdata.bge_rx_std_dmamap[i]);
1037 }
1038 bzero((char *)&sc->bge_ldata.bge_rx_std_ring[i],
1039 sizeof(struct bge_rx_bd));
1040 }
1041
1042 return;
1043}
1044
1045static int
1046bge_init_rx_ring_jumbo(sc)
1047 struct bge_softc *sc;
1048{
1049 int i;
1050 struct bge_rcb *rcb;
1051
1052 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
1053 if (bge_newbuf_jumbo(sc, i, NULL) == ENOBUFS)
1054 return(ENOBUFS);
1055 };
1056
1057 bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag,
1058 sc->bge_cdata.bge_rx_jumbo_ring_map,
1059 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1060
1061 sc->bge_jumbo = i - 1;
1062
1063 rcb = &sc->bge_ldata.bge_info.bge_jumbo_rx_rcb;
1064 rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(0, 0);
1065 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags);
1066
1067 CSR_WRITE_4(sc, BGE_MBX_RX_JUMBO_PROD_LO, sc->bge_jumbo);
1068
1069 return(0);
1070}
1071
1072static void
1073bge_free_rx_ring_jumbo(sc)
1074 struct bge_softc *sc;
1075{
1076 int i;
1077
1078 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
1079 if (sc->bge_cdata.bge_rx_jumbo_chain[i] != NULL) {
1080 m_freem(sc->bge_cdata.bge_rx_jumbo_chain[i]);
1081 sc->bge_cdata.bge_rx_jumbo_chain[i] = NULL;
1082 bus_dmamap_unload(sc->bge_cdata.bge_mtag_jumbo,
1083 sc->bge_cdata.bge_rx_jumbo_dmamap[i]);
1084 }
1085 bzero((char *)&sc->bge_ldata.bge_rx_jumbo_ring[i],
1086 sizeof(struct bge_rx_bd));
1087 }
1088
1089 return;
1090}
1091
1092static void
1093bge_free_tx_ring(sc)
1094 struct bge_softc *sc;
1095{
1096 int i;
1097
1098 if (sc->bge_ldata.bge_tx_ring == NULL)
1099 return;
1100
1101 for (i = 0; i < BGE_TX_RING_CNT; i++) {
1102 if (sc->bge_cdata.bge_tx_chain[i] != NULL) {
1103 m_freem(sc->bge_cdata.bge_tx_chain[i]);
1104 sc->bge_cdata.bge_tx_chain[i] = NULL;
1105 bus_dmamap_unload(sc->bge_cdata.bge_mtag,
1106 sc->bge_cdata.bge_tx_dmamap[i]);
1107 }
1108 bzero((char *)&sc->bge_ldata.bge_tx_ring[i],
1109 sizeof(struct bge_tx_bd));
1110 }
1111
1112 return;
1113}
1114
1115static int
1116bge_init_tx_ring(sc)
1117 struct bge_softc *sc;
1118{
1119 sc->bge_txcnt = 0;
1120 sc->bge_tx_saved_considx = 0;
1121
1122 CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, 0);
1123 /* 5700 b2 errata */
1124 if (sc->bge_chiprev == BGE_CHIPREV_5700_BX)
1125 CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, 0);
1126
1127 CSR_WRITE_4(sc, BGE_MBX_TX_NIC_PROD0_LO, 0);
1128 /* 5700 b2 errata */
1129 if (sc->bge_chiprev == BGE_CHIPREV_5700_BX)
1130 CSR_WRITE_4(sc, BGE_MBX_TX_NIC_PROD0_LO, 0);
1131
1132 return(0);
1133}
1134
1135static void
1136bge_setmulti(sc)
1137 struct bge_softc *sc;
1138{
1139 struct ifnet *ifp;
1140 struct ifmultiaddr *ifma;
1141 u_int32_t hashes[4] = { 0, 0, 0, 0 };
1142 int h, i;
1143
1144 BGE_LOCK_ASSERT(sc);
1145
1146 ifp = &sc->arpcom.ac_if;
1147
1148 if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) {
1149 for (i = 0; i < 4; i++)
1150 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), 0xFFFFFFFF);
1151 return;
1152 }
1153
1154 /* First, zot all the existing filters. */
1155 for (i = 0; i < 4; i++)
1156 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), 0);
1157
1158 /* Now program new ones. */
1159 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1160 if (ifma->ifma_addr->sa_family != AF_LINK)
1161 continue;
1162 h = ether_crc32_le(LLADDR((struct sockaddr_dl *)
1163 ifma->ifma_addr), ETHER_ADDR_LEN) & 0x7F;
1164 hashes[(h & 0x60) >> 5] |= 1 << (h & 0x1F);
1165 }
1166
1167 for (i = 0; i < 4; i++)
1168 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), hashes[i]);
1169
1170 return;
1171}
1172
1173/*
1174 * Do endian, PCI and DMA initialization. Also check the on-board ROM
1175 * self-test results.
1176 */
1177static int
1178bge_chipinit(sc)
1179 struct bge_softc *sc;
1180{
1181 int i;
1182 u_int32_t dma_rw_ctl;
1183
1184 /* Set endianness before we access any non-PCI registers. */
1185#if BYTE_ORDER == BIG_ENDIAN
1186 pci_write_config(sc->bge_dev, BGE_PCI_MISC_CTL,
1187 BGE_BIGENDIAN_INIT, 4);
1188#else
1189 pci_write_config(sc->bge_dev, BGE_PCI_MISC_CTL,
1190 BGE_LITTLEENDIAN_INIT, 4);
1191#endif
1192
1193 /*
1194 * Check the 'ROM failed' bit on the RX CPU to see if
1195 * self-tests passed.
1196 */
1197 if (CSR_READ_4(sc, BGE_RXCPU_MODE) & BGE_RXCPUMODE_ROMFAIL) {
1198 printf("bge%d: RX CPU self-diagnostics failed!\n",
1199 sc->bge_unit);
1200 return(ENODEV);
1201 }
1202
1203 /* Clear the MAC control register */
1204 CSR_WRITE_4(sc, BGE_MAC_MODE, 0);
1205
1206 /*
1207 * Clear the MAC statistics block in the NIC's
1208 * internal memory.
1209 */
1210 for (i = BGE_STATS_BLOCK;
1211 i < BGE_STATS_BLOCK_END + 1; i += sizeof(u_int32_t))
1212 BGE_MEMWIN_WRITE(sc, i, 0);
1213
1214 for (i = BGE_STATUS_BLOCK;
1215 i < BGE_STATUS_BLOCK_END + 1; i += sizeof(u_int32_t))
1216 BGE_MEMWIN_WRITE(sc, i, 0);
1217
1218 /* Set up the PCI DMA control register. */
1219 if (pci_read_config(sc->bge_dev, BGE_PCI_PCISTATE, 4) &
1220 BGE_PCISTATE_PCI_BUSMODE) {
1221 /* Conventional PCI bus */
1222 dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD |
1223 (0x7 << BGE_PCIDMARWCTL_RD_WAT_SHIFT) |
1224 (0x7 << BGE_PCIDMARWCTL_WR_WAT_SHIFT) |
1225 (0x0F);
1226 } else {
1227 /* PCI-X bus */
1228 /*
1229 * The 5704 uses a different encoding of read/write
1230 * watermarks.
1231 */
1232 if (sc->bge_asicrev == BGE_ASICREV_BCM5704)
1233 dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD |
1234 (0x7 << BGE_PCIDMARWCTL_RD_WAT_SHIFT) |
1235 (0x3 << BGE_PCIDMARWCTL_WR_WAT_SHIFT);
1236 else
1237 dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD |
1238 (0x3 << BGE_PCIDMARWCTL_RD_WAT_SHIFT) |
1239 (0x3 << BGE_PCIDMARWCTL_WR_WAT_SHIFT) |
1240 (0x0F);
1241
1242 /*
1243 * 5703 and 5704 need ONEDMA_AT_ONCE as a workaround
1244 * for hardware bugs.
1245 */
1246 if (sc->bge_asicrev == BGE_ASICREV_BCM5703 ||
1247 sc->bge_asicrev == BGE_ASICREV_BCM5704) {
1248 u_int32_t tmp;
1249
1250 tmp = CSR_READ_4(sc, BGE_PCI_CLKCTL) & 0x1f;
1251 if (tmp == 0x6 || tmp == 0x7)
1252 dma_rw_ctl |= BGE_PCIDMARWCTL_ONEDMA_ATONCE;
1253 }
1254 }
1255
1256 if (sc->bge_asicrev == BGE_ASICREV_BCM5703 ||
1257 sc->bge_asicrev == BGE_ASICREV_BCM5704 ||
1258 sc->bge_asicrev == BGE_ASICREV_BCM5705)
1259 dma_rw_ctl &= ~BGE_PCIDMARWCTL_MINDMA;
1260 pci_write_config(sc->bge_dev, BGE_PCI_DMA_RW_CTL, dma_rw_ctl, 4);
1261
1262 /*
1263 * Set up general mode register.
1264 */
1265 CSR_WRITE_4(sc, BGE_MODE_CTL, BGE_MODECTL_WORDSWAP_NONFRAME|
1266 BGE_MODECTL_BYTESWAP_DATA|BGE_MODECTL_WORDSWAP_DATA|
1267 BGE_MODECTL_MAC_ATTN_INTR|BGE_MODECTL_HOST_SEND_BDS|
1268 BGE_MODECTL_TX_NO_PHDR_CSUM|BGE_MODECTL_RX_NO_PHDR_CSUM);
1269
1270 /*
1271 * Disable memory write invalidate. Apparently it is not supported
1272 * properly by these devices.
1273 */
1274 PCI_CLRBIT(sc->bge_dev, BGE_PCI_CMD, PCIM_CMD_MWIEN, 4);
1275
1276#ifdef __brokenalpha__
1277 /*
1278 * Must insure that we do not cross an 8K (bytes) boundary
1279 * for DMA reads. Our highest limit is 1K bytes. This is a
1280 * restriction on some ALPHA platforms with early revision
1281 * 21174 PCI chipsets, such as the AlphaPC 164lx
1282 */
1283 PCI_SETBIT(sc->bge_dev, BGE_PCI_DMA_RW_CTL,
1284 BGE_PCI_READ_BNDRY_1024BYTES, 4);
1285#endif
1286
1287 /* Set the timer prescaler (always 66Mhz) */
1288 CSR_WRITE_4(sc, BGE_MISC_CFG, 65 << 1/*BGE_32BITTIME_66MHZ*/);
1289
1290 return(0);
1291}
1292
1293static int
1294bge_blockinit(sc)
1295 struct bge_softc *sc;
1296{
1297 struct bge_rcb *rcb;
1298 volatile struct bge_rcb *vrcb;
1299 int i;
1300
1301 /*
1302 * Initialize the memory window pointer register so that
1303 * we can access the first 32K of internal NIC RAM. This will
1304 * allow us to set up the TX send ring RCBs and the RX return
1305 * ring RCBs, plus other things which live in NIC memory.
1306 */
1307 CSR_WRITE_4(sc, BGE_PCI_MEMWIN_BASEADDR, 0);
1308
1309 /* Note: the BCM5704 has a smaller mbuf space than other chips. */
1310
1311 if (sc->bge_asicrev != BGE_ASICREV_BCM5705) {
1312 /* Configure mbuf memory pool */
1313 if (sc->bge_extram) {
1314 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_BASEADDR,
1315 BGE_EXT_SSRAM);
1316 if (sc->bge_asicrev == BGE_ASICREV_BCM5704)
1317 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x10000);
1318 else
1319 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x18000);
1320 } else {
1321 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_BASEADDR,
1322 BGE_BUFFPOOL_1);
1323 if (sc->bge_asicrev == BGE_ASICREV_BCM5704)
1324 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x10000);
1325 else
1326 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x18000);
1327 }
1328
1329 /* Configure DMA resource pool */
1330 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_BASEADDR,
1331 BGE_DMA_DESCRIPTORS);
1332 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LEN, 0x2000);
1333 }
1334
1335 /* Configure mbuf pool watermarks */
1336 if (sc->bge_asicrev == BGE_ASICREV_BCM5705) {
1337 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0);
1338 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x10);
1339 } else {
1340 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x50);
1341 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x20);
1342 }
1343 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x60);
1344
1345 /* Configure DMA resource watermarks */
1346 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LOWAT, 5);
1347 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_HIWAT, 10);
1348
1349 /* Enable buffer manager */
1350 if (sc->bge_asicrev != BGE_ASICREV_BCM5705) {
1351 CSR_WRITE_4(sc, BGE_BMAN_MODE,
1352 BGE_BMANMODE_ENABLE|BGE_BMANMODE_LOMBUF_ATTN);
1353
1354 /* Poll for buffer manager start indication */
1355 for (i = 0; i < BGE_TIMEOUT; i++) {
1356 if (CSR_READ_4(sc, BGE_BMAN_MODE) & BGE_BMANMODE_ENABLE)
1357 break;
1358 DELAY(10);
1359 }
1360
1361 if (i == BGE_TIMEOUT) {
1362 printf("bge%d: buffer manager failed to start\n",
1363 sc->bge_unit);
1364 return(ENXIO);
1365 }
1366 }
1367
1368 /* Enable flow-through queues */
1369 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF);
1370 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0);
1371
1372 /* Wait until queue initialization is complete */
1373 for (i = 0; i < BGE_TIMEOUT; i++) {
1374 if (CSR_READ_4(sc, BGE_FTQ_RESET) == 0)
1375 break;
1376 DELAY(10);
1377 }
1378
1379 if (i == BGE_TIMEOUT) {
1380 printf("bge%d: flow-through queue init failed\n",
1381 sc->bge_unit);
1382 return(ENXIO);
1383 }
1384
1385 /* Initialize the standard RX ring control block */
1386 rcb = &sc->bge_ldata.bge_info.bge_std_rx_rcb;
1387 rcb->bge_hostaddr.bge_addr_lo =
1388 BGE_ADDR_LO(sc->bge_ldata.bge_rx_std_ring_paddr);
1389 rcb->bge_hostaddr.bge_addr_hi =
1390 BGE_ADDR_HI(sc->bge_ldata.bge_rx_std_ring_paddr);
1391 bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag,
1392 sc->bge_cdata.bge_rx_std_ring_map, BUS_DMASYNC_PREREAD);
1393 if (sc->bge_asicrev == BGE_ASICREV_BCM5705)
1394 rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(512, 0);
1395 else
1396 rcb->bge_maxlen_flags =
1397 BGE_RCB_MAXLEN_FLAGS(BGE_MAX_FRAMELEN, 0);
1398 if (sc->bge_extram)
1399 rcb->bge_nicaddr = BGE_EXT_STD_RX_RINGS;
1400 else
1401 rcb->bge_nicaddr = BGE_STD_RX_RINGS;
1402 CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_HI, rcb->bge_hostaddr.bge_addr_hi);
1403 CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_LO, rcb->bge_hostaddr.bge_addr_lo);
1404
1405 CSR_WRITE_4(sc, BGE_RX_STD_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags);
1406 CSR_WRITE_4(sc, BGE_RX_STD_RCB_NICADDR, rcb->bge_nicaddr);
1407
1408 /*
1409 * Initialize the jumbo RX ring control block
1410 * We set the 'ring disabled' bit in the flags
1411 * field until we're actually ready to start
1412 * using this ring (i.e. once we set the MTU
1413 * high enough to require it).
1414 */
1415 if (sc->bge_asicrev != BGE_ASICREV_BCM5705) {
1416 rcb = &sc->bge_ldata.bge_info.bge_jumbo_rx_rcb;
1417
1418 rcb->bge_hostaddr.bge_addr_lo =
1419 BGE_ADDR_LO(sc->bge_ldata.bge_rx_jumbo_ring_paddr);
1420 rcb->bge_hostaddr.bge_addr_hi =
1421 BGE_ADDR_HI(sc->bge_ldata.bge_rx_jumbo_ring_paddr);
1422 bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag,
1423 sc->bge_cdata.bge_rx_jumbo_ring_map,
1424 BUS_DMASYNC_PREREAD);
1425 rcb->bge_maxlen_flags =
1426 BGE_RCB_MAXLEN_FLAGS(BGE_MAX_FRAMELEN,
1427 BGE_RCB_FLAG_RING_DISABLED);
1428 if (sc->bge_extram)
1429 rcb->bge_nicaddr = BGE_EXT_JUMBO_RX_RINGS;
1430 else
1431 rcb->bge_nicaddr = BGE_JUMBO_RX_RINGS;
1432 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_HI,
1433 rcb->bge_hostaddr.bge_addr_hi);
1434 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_LO,
1435 rcb->bge_hostaddr.bge_addr_lo);
1436
1437 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS,
1438 rcb->bge_maxlen_flags);
1439 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_NICADDR, rcb->bge_nicaddr);
1440
1441 /* Set up dummy disabled mini ring RCB */
1442 rcb = &sc->bge_ldata.bge_info.bge_mini_rx_rcb;
1443 rcb->bge_maxlen_flags =
1444 BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED);
1445 CSR_WRITE_4(sc, BGE_RX_MINI_RCB_MAXLEN_FLAGS,
1446 rcb->bge_maxlen_flags);
1447 }
1448
1449 /*
1450 * Set the BD ring replentish thresholds. The recommended
1451 * values are 1/8th the number of descriptors allocated to
1452 * each ring.
1453 */
1454 CSR_WRITE_4(sc, BGE_RBDI_STD_REPL_THRESH, BGE_STD_RX_RING_CNT/8);
1455 CSR_WRITE_4(sc, BGE_RBDI_JUMBO_REPL_THRESH, BGE_JUMBO_RX_RING_CNT/8);
1456
1457 /*
1458 * Disable all unused send rings by setting the 'ring disabled'
1459 * bit in the flags field of all the TX send ring control blocks.
1460 * These are located in NIC memory.
1461 */
1462 vrcb = (volatile struct bge_rcb *)(sc->bge_vhandle + BGE_MEMWIN_START +
1463 BGE_SEND_RING_RCB);
1464 for (i = 0; i < BGE_TX_RINGS_EXTSSRAM_MAX; i++) {
1465 vrcb->bge_maxlen_flags =
1466 BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED);
1467 vrcb->bge_nicaddr = 0;
1468 vrcb++;
1469 }
1470
1471 /* Configure TX RCB 0 (we use only the first ring) */
1472 vrcb = (volatile struct bge_rcb *)(sc->bge_vhandle + BGE_MEMWIN_START +
1473 BGE_SEND_RING_RCB);
1474 vrcb->bge_hostaddr.bge_addr_lo =
1475 htole32(BGE_ADDR_LO(sc->bge_ldata.bge_tx_ring_paddr));
1476 vrcb->bge_hostaddr.bge_addr_hi =
1477 htole32(BGE_ADDR_HI(sc->bge_ldata.bge_tx_ring_paddr));
1478 vrcb->bge_nicaddr = BGE_NIC_TXRING_ADDR(0, BGE_TX_RING_CNT);
1479 if (sc->bge_asicrev != BGE_ASICREV_BCM5705)
1480 vrcb->bge_maxlen_flags =
1481 BGE_RCB_MAXLEN_FLAGS(BGE_TX_RING_CNT, 0);
1482
1483 /* Disable all unused RX return rings */
1484 vrcb = (volatile struct bge_rcb *)(sc->bge_vhandle + BGE_MEMWIN_START +
1485 BGE_RX_RETURN_RING_RCB);
1486 for (i = 0; i < BGE_RX_RINGS_MAX; i++) {
1487 vrcb->bge_hostaddr.bge_addr_hi = 0;
1488 vrcb->bge_hostaddr.bge_addr_lo = 0;
1489 vrcb->bge_maxlen_flags =
1490 BGE_RCB_MAXLEN_FLAGS(sc->bge_return_ring_cnt,
1491 BGE_RCB_FLAG_RING_DISABLED);
1492 vrcb->bge_nicaddr = 0;
1493 CSR_WRITE_4(sc, BGE_MBX_RX_CONS0_LO +
1494 (i * (sizeof(u_int64_t))), 0);
1495 vrcb++;
1496 }
1497
1498 /* Initialize RX ring indexes */
1499 CSR_WRITE_4(sc, BGE_MBX_RX_STD_PROD_LO, 0);
1500 CSR_WRITE_4(sc, BGE_MBX_RX_JUMBO_PROD_LO, 0);
1501 CSR_WRITE_4(sc, BGE_MBX_RX_MINI_PROD_LO, 0);
1502
1503 /*
1504 * Set up RX return ring 0
1505 * Note that the NIC address for RX return rings is 0x00000000.
1506 * The return rings live entirely within the host, so the
1507 * nicaddr field in the RCB isn't used.
1508 */
1509 vrcb = (volatile struct bge_rcb *)(sc->bge_vhandle + BGE_MEMWIN_START +
1510 BGE_RX_RETURN_RING_RCB);
1511 vrcb->bge_hostaddr.bge_addr_lo =
1512 BGE_ADDR_LO(sc->bge_ldata.bge_rx_return_ring_paddr);
1513 vrcb->bge_hostaddr.bge_addr_hi =
1514 BGE_ADDR_HI(sc->bge_ldata.bge_rx_return_ring_paddr);
1515 bus_dmamap_sync(sc->bge_cdata.bge_rx_return_ring_tag,
1516 sc->bge_cdata.bge_rx_return_ring_map, BUS_DMASYNC_PREWRITE);
1517 vrcb->bge_nicaddr = 0x00000000;
1518 vrcb->bge_maxlen_flags =
1519 BGE_RCB_MAXLEN_FLAGS(sc->bge_return_ring_cnt, 0);
1520
1521 /* Set random backoff seed for TX */
1522 CSR_WRITE_4(sc, BGE_TX_RANDOM_BACKOFF,
1523 sc->arpcom.ac_enaddr[0] + sc->arpcom.ac_enaddr[1] +
1524 sc->arpcom.ac_enaddr[2] + sc->arpcom.ac_enaddr[3] +
1525 sc->arpcom.ac_enaddr[4] + sc->arpcom.ac_enaddr[5] +
1526 BGE_TX_BACKOFF_SEED_MASK);
1527
1528 /* Set inter-packet gap */
1529 CSR_WRITE_4(sc, BGE_TX_LENGTHS, 0x2620);
1530
1531 /*
1532 * Specify which ring to use for packets that don't match
1533 * any RX rules.
1534 */
1535 CSR_WRITE_4(sc, BGE_RX_RULES_CFG, 0x08);
1536
1537 /*
1538 * Configure number of RX lists. One interrupt distribution
1539 * list, sixteen active lists, one bad frames class.
1540 */
1541 CSR_WRITE_4(sc, BGE_RXLP_CFG, 0x181);
1542
1543 /* Inialize RX list placement stats mask. */
1544 CSR_WRITE_4(sc, BGE_RXLP_STATS_ENABLE_MASK, 0x007FFFFF);
1545 CSR_WRITE_4(sc, BGE_RXLP_STATS_CTL, 0x1);
1546
1547 /* Disable host coalescing until we get it set up */
1548 CSR_WRITE_4(sc, BGE_HCC_MODE, 0x00000000);
1549
1550 /* Poll to make sure it's shut down. */
1551 for (i = 0; i < BGE_TIMEOUT; i++) {
1552 if (!(CSR_READ_4(sc, BGE_HCC_MODE) & BGE_HCCMODE_ENABLE))
1553 break;
1554 DELAY(10);
1555 }
1556
1557 if (i == BGE_TIMEOUT) {
1558 printf("bge%d: host coalescing engine failed to idle\n",
1559 sc->bge_unit);
1560 return(ENXIO);
1561 }
1562
1563 /* Set up host coalescing defaults */
1564 CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS, sc->bge_rx_coal_ticks);
1565 CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS, sc->bge_tx_coal_ticks);
1566 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS, sc->bge_rx_max_coal_bds);
1567 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS, sc->bge_tx_max_coal_bds);
1568 if (sc->bge_asicrev != BGE_ASICREV_BCM5705) {
1569 CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS_INT, 0);
1570 CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS_INT, 0);
1571 }
1572 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS_INT, 0);
1573 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS_INT, 0);
1574
1575 /* Set up address of statistics block */
1576 if (sc->bge_asicrev != BGE_ASICREV_BCM5705) {
1577 CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_HI,
1578 BGE_ADDR_HI(sc->bge_ldata.bge_stats_paddr));
1579 CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_LO,
1580 BGE_ADDR_LO(sc->bge_ldata.bge_stats_paddr));
1581 CSR_WRITE_4(sc, BGE_HCC_STATS_BASEADDR, BGE_STATS_BLOCK);
1582 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_BASEADDR, BGE_STATUS_BLOCK);
1583 CSR_WRITE_4(sc, BGE_HCC_STATS_TICKS, sc->bge_stat_ticks);
1584 }
1585
1586 /* Set up address of status block */
1587 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_HI,
1588 BGE_ADDR_HI(sc->bge_ldata.bge_status_block_paddr));
1589 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_LO,
1590 BGE_ADDR_LO(sc->bge_ldata.bge_status_block_paddr));
1591 bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
1592 sc->bge_cdata.bge_status_map, BUS_DMASYNC_PREWRITE);
1593 sc->bge_ldata.bge_status_block->bge_idx[0].bge_rx_prod_idx = 0;
1594 sc->bge_ldata.bge_status_block->bge_idx[0].bge_tx_cons_idx = 0;
1595
1596 /* Turn on host coalescing state machine */
1597 CSR_WRITE_4(sc, BGE_HCC_MODE, BGE_HCCMODE_ENABLE);
1598
1599 /* Turn on RX BD completion state machine and enable attentions */
1600 CSR_WRITE_4(sc, BGE_RBDC_MODE,
1601 BGE_RBDCMODE_ENABLE|BGE_RBDCMODE_ATTN);
1602
1603 /* Turn on RX list placement state machine */
1604 CSR_WRITE_4(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE);
1605
1606 /* Turn on RX list selector state machine. */
1607 if (sc->bge_asicrev != BGE_ASICREV_BCM5705)
1608 CSR_WRITE_4(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE);
1609
1610 /* Turn on DMA, clear stats */
1611 CSR_WRITE_4(sc, BGE_MAC_MODE, BGE_MACMODE_TXDMA_ENB|
1612 BGE_MACMODE_RXDMA_ENB|BGE_MACMODE_RX_STATS_CLEAR|
1613 BGE_MACMODE_TX_STATS_CLEAR|BGE_MACMODE_RX_STATS_ENB|
1614 BGE_MACMODE_TX_STATS_ENB|BGE_MACMODE_FRMHDR_DMA_ENB|
1615 (sc->bge_tbi ? BGE_PORTMODE_TBI : BGE_PORTMODE_MII));
1616
1617 /* Set misc. local control, enable interrupts on attentions */
1618 CSR_WRITE_4(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_ONATTN);
1619
1620#ifdef notdef
1621 /* Assert GPIO pins for PHY reset */
1622 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUT0|
1623 BGE_MLC_MISCIO_OUT1|BGE_MLC_MISCIO_OUT2);
1624 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUTEN0|
1625 BGE_MLC_MISCIO_OUTEN1|BGE_MLC_MISCIO_OUTEN2);
1626#endif
1627
1628 /* Turn on DMA completion state machine */
1629 if (sc->bge_asicrev != BGE_ASICREV_BCM5705)
1630 CSR_WRITE_4(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE);
1631
1632 /* Turn on write DMA state machine */
1633 CSR_WRITE_4(sc, BGE_WDMA_MODE,
1634 BGE_WDMAMODE_ENABLE|BGE_WDMAMODE_ALL_ATTNS);
1635
1636 /* Turn on read DMA state machine */
1637 CSR_WRITE_4(sc, BGE_RDMA_MODE,
1638 BGE_RDMAMODE_ENABLE|BGE_RDMAMODE_ALL_ATTNS);
1639
1640 /* Turn on RX data completion state machine */
1641 CSR_WRITE_4(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE);
1642
1643 /* Turn on RX BD initiator state machine */
1644 CSR_WRITE_4(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE);
1645
1646 /* Turn on RX data and RX BD initiator state machine */
1647 CSR_WRITE_4(sc, BGE_RDBDI_MODE, BGE_RDBDIMODE_ENABLE);
1648
1649 /* Turn on Mbuf cluster free state machine */
1650 if (sc->bge_asicrev != BGE_ASICREV_BCM5705)
1651 CSR_WRITE_4(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE);
1652
1653 /* Turn on send BD completion state machine */
1654 CSR_WRITE_4(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE);
1655
1656 /* Turn on send data completion state machine */
1657 CSR_WRITE_4(sc, BGE_SDC_MODE, BGE_SDCMODE_ENABLE);
1658
1659 /* Turn on send data initiator state machine */
1660 CSR_WRITE_4(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE);
1661
1662 /* Turn on send BD initiator state machine */
1663 CSR_WRITE_4(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE);
1664
1665 /* Turn on send BD selector state machine */
1666 CSR_WRITE_4(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE);
1667
1668 CSR_WRITE_4(sc, BGE_SDI_STATS_ENABLE_MASK, 0x007FFFFF);
1669 CSR_WRITE_4(sc, BGE_SDI_STATS_CTL,
1670 BGE_SDISTATSCTL_ENABLE|BGE_SDISTATSCTL_FASTER);
1671
1672 /* ack/clear link change events */
1673 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED|
1674 BGE_MACSTAT_CFG_CHANGED|BGE_MACSTAT_MI_COMPLETE|
1675 BGE_MACSTAT_LINK_CHANGED);
1676 CSR_WRITE_4(sc, BGE_MI_STS, 0);
1677
1678 /* Enable PHY auto polling (for MII/GMII only) */
1679 if (sc->bge_tbi) {
1680 CSR_WRITE_4(sc, BGE_MI_STS, BGE_MISTS_LINK);
1681 } else {
1682 BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL|10<<16);
1683 if (sc->bge_asicrev == BGE_ASICREV_BCM5700)
1684 CSR_WRITE_4(sc, BGE_MAC_EVT_ENB,
1685 BGE_EVTENB_MI_INTERRUPT);
1686 }
1687
1688 /* Enable link state change attentions. */
1689 BGE_SETBIT(sc, BGE_MAC_EVT_ENB, BGE_EVTENB_LINK_CHANGED);
1690
1691 return(0);
1692}
1693
1694/*
1695 * Probe for a Broadcom chip. Check the PCI vendor and device IDs
1696 * against our list and return its name if we find a match. Note
1697 * that since the Broadcom controller contains VPD support, we
1698 * can get the device name string from the controller itself instead
1699 * of the compiled-in string. This is a little slow, but it guarantees
1700 * we'll always announce the right product name.
1701 */
1702static int
1703bge_probe(dev)
1704 device_t dev;
1705{
1706 struct bge_type *t;
1707 struct bge_softc *sc;
1708 char *descbuf;
1709
1710 t = bge_devs;
1711
1712 sc = device_get_softc(dev);
1713 bzero(sc, sizeof(struct bge_softc));
1714 sc->bge_unit = device_get_unit(dev);
1715 sc->bge_dev = dev;
1716
1717 while(t->bge_name != NULL) {
1718 if ((pci_get_vendor(dev) == t->bge_vid) &&
1719 (pci_get_device(dev) == t->bge_did)) {
1720#ifdef notdef
1721 bge_vpd_read(sc);
1722 device_set_desc(dev, sc->bge_vpd_prodname);
1723#endif
1724 descbuf = malloc(BGE_DEVDESC_MAX, M_TEMP, M_NOWAIT);
1725 if (descbuf == NULL)
1726 return(ENOMEM);
1727 snprintf(descbuf, BGE_DEVDESC_MAX,
1728 "%s, ASIC rev. %#04x", t->bge_name,
1729 pci_read_config(dev, BGE_PCI_MISC_CTL, 4) >> 16);
1730 device_set_desc_copy(dev, descbuf);
1731 if (pci_get_subvendor(dev) == DELL_VENDORID)
1732 sc->bge_no_3_led = 1;
1733 free(descbuf, M_TEMP);
1734 return(0);
1735 }
1736 t++;
1737 }
1738
1739 return(ENXIO);
1740}
1741
1742static void
1743bge_dma_free(sc)
1744 struct bge_softc *sc;
1745{
1746 int i;
1747
1748
1749 /* Destroy DMA maps for RX buffers */
1750
1751 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
1752 if (sc->bge_cdata.bge_rx_std_dmamap[i])
1753 bus_dmamap_destroy(sc->bge_cdata.bge_mtag,
1754 sc->bge_cdata.bge_rx_std_dmamap[i]);
1755 }
1756
1757 /* Destroy DMA maps for jumbo RX buffers */
1758
1759 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
1760 if (sc->bge_cdata.bge_rx_jumbo_dmamap[i])
1761 bus_dmamap_destroy(sc->bge_cdata.bge_mtag_jumbo,
1762 sc->bge_cdata.bge_rx_jumbo_dmamap[i]);
1763 }
1764
1765 /* Destroy DMA maps for TX buffers */
1766
1767 for (i = 0; i < BGE_TX_RING_CNT; i++) {
1768 if (sc->bge_cdata.bge_tx_dmamap[i])
1769 bus_dmamap_destroy(sc->bge_cdata.bge_mtag,
1770 sc->bge_cdata.bge_tx_dmamap[i]);
1771 }
1772
1773 if (sc->bge_cdata.bge_mtag)
1774 bus_dma_tag_destroy(sc->bge_cdata.bge_mtag);
1775
1776
1777 /* Destroy standard RX ring */
1778
1779 if (sc->bge_ldata.bge_rx_std_ring)
1780 bus_dmamem_free(sc->bge_cdata.bge_rx_std_ring_tag,
1781 sc->bge_ldata.bge_rx_std_ring,
1782 sc->bge_cdata.bge_rx_std_ring_map);
1783
1784 if (sc->bge_cdata.bge_rx_std_ring_map) {
1785 bus_dmamap_unload(sc->bge_cdata.bge_rx_std_ring_tag,
1786 sc->bge_cdata.bge_rx_std_ring_map);
1787 bus_dmamap_destroy(sc->bge_cdata.bge_rx_std_ring_tag,
1788 sc->bge_cdata.bge_rx_std_ring_map);
1789 }
1790
1791 if (sc->bge_cdata.bge_rx_std_ring_tag)
1792 bus_dma_tag_destroy(sc->bge_cdata.bge_rx_std_ring_tag);
1793
1794 /* Destroy jumbo RX ring */
1795
1796 if (sc->bge_ldata.bge_rx_jumbo_ring)
1797 bus_dmamem_free(sc->bge_cdata.bge_rx_jumbo_ring_tag,
1798 sc->bge_ldata.bge_rx_jumbo_ring,
1799 sc->bge_cdata.bge_rx_jumbo_ring_map);
1800
1801 if (sc->bge_cdata.bge_rx_jumbo_ring_map) {
1802 bus_dmamap_unload(sc->bge_cdata.bge_rx_jumbo_ring_tag,
1803 sc->bge_cdata.bge_rx_jumbo_ring_map);
1804 bus_dmamap_destroy(sc->bge_cdata.bge_rx_jumbo_ring_tag,
1805 sc->bge_cdata.bge_rx_jumbo_ring_map);
1806 }
1807
1808 if (sc->bge_cdata.bge_rx_jumbo_ring_tag)
1809 bus_dma_tag_destroy(sc->bge_cdata.bge_rx_jumbo_ring_tag);
1810
1811 /* Destroy RX return ring */
1812
1813 if (sc->bge_ldata.bge_rx_return_ring)
1814 bus_dmamem_free(sc->bge_cdata.bge_rx_return_ring_tag,
1815 sc->bge_ldata.bge_rx_return_ring,
1816 sc->bge_cdata.bge_rx_return_ring_map);
1817
1818 if (sc->bge_cdata.bge_rx_return_ring_map) {
1819 bus_dmamap_unload(sc->bge_cdata.bge_rx_return_ring_tag,
1820 sc->bge_cdata.bge_rx_return_ring_map);
1821 bus_dmamap_destroy(sc->bge_cdata.bge_rx_return_ring_tag,
1822 sc->bge_cdata.bge_rx_return_ring_map);
1823 }
1824
1825 if (sc->bge_cdata.bge_rx_return_ring_tag)
1826 bus_dma_tag_destroy(sc->bge_cdata.bge_rx_return_ring_tag);
1827
1828 /* Destroy TX ring */
1829
1830 if (sc->bge_ldata.bge_tx_ring)
1831 bus_dmamem_free(sc->bge_cdata.bge_tx_ring_tag,
1832 sc->bge_ldata.bge_tx_ring,
1833 sc->bge_cdata.bge_tx_ring_map);
1834
1835 if (sc->bge_cdata.bge_tx_ring_map) {
1836 bus_dmamap_unload(sc->bge_cdata.bge_tx_ring_tag,
1837 sc->bge_cdata.bge_tx_ring_map);
1838 bus_dmamap_destroy(sc->bge_cdata.bge_tx_ring_tag,
1839 sc->bge_cdata.bge_tx_ring_map);
1840 }
1841
1842 if (sc->bge_cdata.bge_tx_ring_tag)
1843 bus_dma_tag_destroy(sc->bge_cdata.bge_tx_ring_tag);
1844
1845 /* Destroy status block */
1846
1847 if (sc->bge_ldata.bge_status_block)
1848 bus_dmamem_free(sc->bge_cdata.bge_status_tag,
1849 sc->bge_ldata.bge_status_block,
1850 sc->bge_cdata.bge_status_map);
1851
1852 if (sc->bge_cdata.bge_status_map) {
1853 bus_dmamap_unload(sc->bge_cdata.bge_status_tag,
1854 sc->bge_cdata.bge_status_map);
1855 bus_dmamap_destroy(sc->bge_cdata.bge_status_tag,
1856 sc->bge_cdata.bge_status_map);
1857 }
1858
1859 if (sc->bge_cdata.bge_status_tag)
1860 bus_dma_tag_destroy(sc->bge_cdata.bge_status_tag);
1861
1862 /* Destroy statistics block */
1863
1864 if (sc->bge_ldata.bge_stats)
1865 bus_dmamem_free(sc->bge_cdata.bge_stats_tag,
1866 sc->bge_ldata.bge_stats,
1867 sc->bge_cdata.bge_stats_map);
1868
1869 if (sc->bge_cdata.bge_stats_map) {
1870 bus_dmamap_unload(sc->bge_cdata.bge_stats_tag,
1871 sc->bge_cdata.bge_stats_map);
1872 bus_dmamap_destroy(sc->bge_cdata.bge_stats_tag,
1873 sc->bge_cdata.bge_stats_map);
1874 }
1875
1876 if (sc->bge_cdata.bge_stats_tag)
1877 bus_dma_tag_destroy(sc->bge_cdata.bge_stats_tag);
1878
1879 /* Destroy the parent tag */
1880
1881 if (sc->bge_cdata.bge_parent_tag)
1882 bus_dma_tag_destroy(sc->bge_cdata.bge_parent_tag);
1883
1884 return;
1885}
1886
1887static int
1888bge_dma_alloc(dev)
1889 device_t dev;
1890{
1891 struct bge_softc *sc;
1892 int nseg, i, error;
1893 struct bge_dmamap_arg ctx;
1894
1895 sc = device_get_softc(dev);
1896
1897 /*
1898 * Allocate the parent bus DMA tag appropriate for PCI.
1899 */
1900#define BGE_NSEG_NEW 32
1901 error = bus_dma_tag_create(NULL, /* parent */
1902 PAGE_SIZE, 0, /* alignment, boundary */
1903 BUS_SPACE_MAXADDR, /* lowaddr */
1904 BUS_SPACE_MAXADDR_32BIT,/* highaddr */
1905 NULL, NULL, /* filter, filterarg */
1906 MAXBSIZE, BGE_NSEG_NEW, /* maxsize, nsegments */
1907 BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */
1908 BUS_DMA_ALLOCNOW, /* flags */
1909 NULL, NULL, /* lockfunc, lockarg */
1910 &sc->bge_cdata.bge_parent_tag);
1911
1912 /*
1913 * Create tag for RX mbufs.
1914 */
1915 nseg = 32;
1916 error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag, ETHER_ALIGN,
1917 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
1918 NULL, MCLBYTES * nseg, nseg, MCLBYTES, 0, NULL, NULL,
1919 &sc->bge_cdata.bge_mtag);
1920
1921 if (error) {
1922 device_printf(dev, "could not allocate dma tag\n");
1923 return (ENOMEM);
1924 }
1925
1926 /* Create DMA maps for RX buffers */
1927
1928 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
1929 error = bus_dmamap_create(sc->bge_cdata.bge_mtag, 0,
1930 &sc->bge_cdata.bge_rx_std_dmamap[i]);
1931 if (error) {
1932 device_printf(dev, "can't create DMA map for RX\n");
1933 return(ENOMEM);
1934 }
1935 }
1936
1937 /* Create DMA maps for TX buffers */
1938
1939 for (i = 0; i < BGE_TX_RING_CNT; i++) {
1940 error = bus_dmamap_create(sc->bge_cdata.bge_mtag, 0,
1941 &sc->bge_cdata.bge_tx_dmamap[i]);
1942 if (error) {
1943 device_printf(dev, "can't create DMA map for RX\n");
1944 return(ENOMEM);
1945 }
1946 }
1947
1948 /* Create tag for standard RX ring */
1949
1950 error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag,
1951 PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
1952 NULL, BGE_STD_RX_RING_SZ, 1, BGE_STD_RX_RING_SZ, 0,
1953 NULL, NULL, &sc->bge_cdata.bge_rx_std_ring_tag);
1954
1955 if (error) {
1956 device_printf(dev, "could not allocate dma tag\n");
1957 return (ENOMEM);
1958 }
1959
1960 /* Allocate DMA'able memory for standard RX ring */
1961
1962 error = bus_dmamem_alloc(sc->bge_cdata.bge_rx_std_ring_tag,
1963 (void **)&sc->bge_ldata.bge_rx_std_ring, BUS_DMA_NOWAIT,
1964 &sc->bge_cdata.bge_rx_std_ring_map);
1965 if (error)
1966 return (ENOMEM);
1967
1968 bzero((char *)sc->bge_ldata.bge_rx_std_ring, BGE_STD_RX_RING_SZ);
1969
1970 /* Load the address of the standard RX ring */
1971
1972 ctx.bge_maxsegs = 1;
1973 ctx.sc = sc;
1974
1975 error = bus_dmamap_load(sc->bge_cdata.bge_rx_std_ring_tag,
1976 sc->bge_cdata.bge_rx_std_ring_map, sc->bge_ldata.bge_rx_std_ring,
1977 BGE_STD_RX_RING_SZ, bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT);
1978
1979 if (error)
1980 return (ENOMEM);
1981
1982 sc->bge_ldata.bge_rx_std_ring_paddr = ctx.bge_busaddr;
1983
1984 if (sc->bge_asicrev != BGE_ASICREV_BCM5705) {
1985
1986 /*
1987 * Create tag for jumbo mbufs.
1988 * This is really a bit of a kludge. We allocate a special
1989 * jumbo buffer pool which (thanks to the way our DMA
1990 * memory allocation works) will consist of contiguous
1991 * pages. This means that even though a jumbo buffer might
1992 * be larger than a page size, we don't really need to
1993 * map it into more than one DMA segment. However, the
1994 * default mbuf tag will result in multi-segment mappings,
1995 * so we have to create a special jumbo mbuf tag that
1996 * lets us get away with mapping the jumbo buffers as
1997 * a single segment. I think eventually the driver should
1998 * be changed so that it uses ordinary mbufs and cluster
1999 * buffers, i.e. jumbo frames can span multiple DMA
2000 * descriptors. But that's a project for another day.
2001 */
2002
2003 error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag,
2004 ETHER_ALIGN, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
2005 NULL, MCLBYTES * nseg, nseg, BGE_JLEN, 0, NULL, NULL,
2006 &sc->bge_cdata.bge_mtag_jumbo);
2007
2008 if (error) {
2009 device_printf(dev, "could not allocate dma tag\n");
2010 return (ENOMEM);
2011 }
2012
2013 /* Create tag for jumbo RX ring */
2014
2015 error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag,
2016 PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
2017 NULL, BGE_JUMBO_RX_RING_SZ, 1, BGE_JUMBO_RX_RING_SZ, 0,
2018 NULL, NULL, &sc->bge_cdata.bge_rx_jumbo_ring_tag);
2019
2020 if (error) {
2021 device_printf(dev, "could not allocate dma tag\n");
2022 return (ENOMEM);
2023 }
2024
2025 /* Allocate DMA'able memory for jumbo RX ring */
2026
2027 error = bus_dmamem_alloc(sc->bge_cdata.bge_rx_jumbo_ring_tag,
2028 (void **)&sc->bge_ldata.bge_rx_jumbo_ring, BUS_DMA_NOWAIT,
2029 &sc->bge_cdata.bge_rx_jumbo_ring_map);
2030 if (error)
2031 return (ENOMEM);
2032
2033 bzero((char *)sc->bge_ldata.bge_rx_jumbo_ring,
2034 BGE_JUMBO_RX_RING_SZ);
2035
2036 /* Load the address of the jumbo RX ring */
2037
2038 ctx.bge_maxsegs = 1;
2039 ctx.sc = sc;
2040
2041 error = bus_dmamap_load(sc->bge_cdata.bge_rx_jumbo_ring_tag,
2042 sc->bge_cdata.bge_rx_jumbo_ring_map,
2043 sc->bge_ldata.bge_rx_jumbo_ring, BGE_JUMBO_RX_RING_SZ,
2044 bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT);
2045
2046 if (error)
2047 return (ENOMEM);
2048
2049 sc->bge_ldata.bge_rx_jumbo_ring_paddr = ctx.bge_busaddr;
2050
2051 /* Create DMA maps for jumbo RX buffers */
2052
2053 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
2054 error = bus_dmamap_create(sc->bge_cdata.bge_mtag_jumbo,
2055 0, &sc->bge_cdata.bge_rx_jumbo_dmamap[i]);
2056 if (error) {
2057 device_printf(dev,
2058 "can't create DMA map for RX\n");
2059 return(ENOMEM);
2060 }
2061 }
2062
2063 }
2064
2065 /* Create tag for RX return ring */
2066
2067 error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag,
2068 PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
2069 NULL, BGE_RX_RTN_RING_SZ(sc), 1, BGE_RX_RTN_RING_SZ(sc), 0,
2070 NULL, NULL, &sc->bge_cdata.bge_rx_return_ring_tag);
2071
2072 if (error) {
2073 device_printf(dev, "could not allocate dma tag\n");
2074 return (ENOMEM);
2075 }
2076
2077 /* Allocate DMA'able memory for RX return ring */
2078
2079 error = bus_dmamem_alloc(sc->bge_cdata.bge_rx_return_ring_tag,
2080 (void **)&sc->bge_ldata.bge_rx_return_ring, BUS_DMA_NOWAIT,
2081 &sc->bge_cdata.bge_rx_return_ring_map);
2082 if (error)
2083 return (ENOMEM);
2084
2085 bzero((char *)sc->bge_ldata.bge_rx_return_ring,
2086 BGE_RX_RTN_RING_SZ(sc));
2087
2088 /* Load the address of the RX return ring */
2089
2090 ctx.bge_maxsegs = 1;
2091 ctx.sc = sc;
2092
2093 error = bus_dmamap_load(sc->bge_cdata.bge_rx_return_ring_tag,
2094 sc->bge_cdata.bge_rx_return_ring_map,
2095 sc->bge_ldata.bge_rx_return_ring, BGE_RX_RTN_RING_SZ(sc),
2096 bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT);
2097
2098 if (error)
2099 return (ENOMEM);
2100
2101 sc->bge_ldata.bge_rx_return_ring_paddr = ctx.bge_busaddr;
2102
2103 /* Create tag for TX ring */
2104
2105 error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag,
2106 PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
2107 NULL, BGE_TX_RING_SZ, 1, BGE_TX_RING_SZ, 0, NULL, NULL,
2108 &sc->bge_cdata.bge_tx_ring_tag);
2109
2110 if (error) {
2111 device_printf(dev, "could not allocate dma tag\n");
2112 return (ENOMEM);
2113 }
2114
2115 /* Allocate DMA'able memory for TX ring */
2116
2117 error = bus_dmamem_alloc(sc->bge_cdata.bge_tx_ring_tag,
2118 (void **)&sc->bge_ldata.bge_tx_ring, BUS_DMA_NOWAIT,
2119 &sc->bge_cdata.bge_tx_ring_map);
2120 if (error)
2121 return (ENOMEM);
2122
2123 bzero((char *)sc->bge_ldata.bge_tx_ring, BGE_TX_RING_SZ);
2124
2125 /* Load the address of the TX ring */
2126
2127 ctx.bge_maxsegs = 1;
2128 ctx.sc = sc;
2129
2130 error = bus_dmamap_load(sc->bge_cdata.bge_tx_ring_tag,
2131 sc->bge_cdata.bge_tx_ring_map, sc->bge_ldata.bge_tx_ring,
2132 BGE_TX_RING_SZ, bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT);
2133
2134 if (error)
2135 return (ENOMEM);
2136
2137 sc->bge_ldata.bge_tx_ring_paddr = ctx.bge_busaddr;
2138
2139 /* Create tag for status block */
2140
2141 error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag,
2142 PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
2143 NULL, BGE_STATUS_BLK_SZ, 1, BGE_STATUS_BLK_SZ, 0,
2144 NULL, NULL, &sc->bge_cdata.bge_status_tag);
2145
2146 if (error) {
2147 device_printf(dev, "could not allocate dma tag\n");
2148 return (ENOMEM);
2149 }
2150
2151 /* Allocate DMA'able memory for status block */
2152
2153 error = bus_dmamem_alloc(sc->bge_cdata.bge_status_tag,
2154 (void **)&sc->bge_ldata.bge_status_block, BUS_DMA_NOWAIT,
2155 &sc->bge_cdata.bge_status_map);
2156 if (error)
2157 return (ENOMEM);
2158
2159 bzero((char *)sc->bge_ldata.bge_status_block, BGE_STATUS_BLK_SZ);
2160
2161 /* Load the address of the status block */
2162
2163 ctx.sc = sc;
2164 ctx.bge_maxsegs = 1;
2165
2166 error = bus_dmamap_load(sc->bge_cdata.bge_status_tag,
2167 sc->bge_cdata.bge_status_map, sc->bge_ldata.bge_status_block,
2168 BGE_STATUS_BLK_SZ, bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT);
2169
2170 if (error)
2171 return (ENOMEM);
2172
2173 sc->bge_ldata.bge_status_block_paddr = ctx.bge_busaddr;
2174
2175 /* Create tag for statistics block */
2176
2177 error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag,
2178 PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
2179 NULL, BGE_STATS_SZ, 1, BGE_STATS_SZ, 0, NULL, NULL,
2180 &sc->bge_cdata.bge_stats_tag);
2181
2182 if (error) {
2183 device_printf(dev, "could not allocate dma tag\n");
2184 return (ENOMEM);
2185 }
2186
2187 /* Allocate DMA'able memory for statistics block */
2188
2189 error = bus_dmamem_alloc(sc->bge_cdata.bge_stats_tag,
2190 (void **)&sc->bge_ldata.bge_stats, BUS_DMA_NOWAIT,
2191 &sc->bge_cdata.bge_stats_map);
2192 if (error)
2193 return (ENOMEM);
2194
2195 bzero((char *)sc->bge_ldata.bge_stats, BGE_STATS_SZ);
2196
2197 /* Load the address of the statstics block */
2198
2199 ctx.sc = sc;
2200 ctx.bge_maxsegs = 1;
2201
2202 error = bus_dmamap_load(sc->bge_cdata.bge_stats_tag,
2203 sc->bge_cdata.bge_stats_map, sc->bge_ldata.bge_stats,
2204 BGE_STATS_SZ, bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT);
2205
2206 if (error)
2207 return (ENOMEM);
2208
2209 sc->bge_ldata.bge_stats_paddr = ctx.bge_busaddr;
2210
2211 return(0);
2212}
2213
2214static int
2215bge_attach(dev)
2216 device_t dev;
2217{
2218 struct ifnet *ifp;
2219 struct bge_softc *sc;
2220 u_int32_t hwcfg = 0;
2221 u_int32_t mac_addr = 0;
2222 int unit, error = 0, rid;
2223
2224 sc = device_get_softc(dev);
2225 unit = device_get_unit(dev);
2226 sc->bge_dev = dev;
2227 sc->bge_unit = unit;
2228
2229 /*
2230 * Map control/status registers.
2231 */
2232 pci_enable_busmaster(dev);
2233
2234 rid = BGE_PCI_BAR0;
2235 sc->bge_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
2236 RF_ACTIVE|PCI_RF_DENSE);
2237
2238 if (sc->bge_res == NULL) {
2239 printf ("bge%d: couldn't map memory\n", unit);
2240 error = ENXIO;
2241 goto fail;
2242 }
2243
2244 sc->bge_btag = rman_get_bustag(sc->bge_res);
2245 sc->bge_bhandle = rman_get_bushandle(sc->bge_res);
2246 sc->bge_vhandle = (vm_offset_t)rman_get_virtual(sc->bge_res);
2247
2248 /* Allocate interrupt */
2249 rid = 0;
2250
2251 sc->bge_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
2252 RF_SHAREABLE | RF_ACTIVE);
2253
2254 if (sc->bge_irq == NULL) {
2255 printf("bge%d: couldn't map interrupt\n", unit);
2256 error = ENXIO;
2257 goto fail;
2258 }
2259
2260 sc->bge_unit = unit;
2261
2262 BGE_LOCK_INIT(sc, device_get_nameunit(dev));
2263
2264 /* Try to reset the chip. */
2265 bge_reset(sc);
2266
2267 if (bge_chipinit(sc)) {
2268 printf("bge%d: chip initialization failed\n", sc->bge_unit);
2269 bge_release_resources(sc);
2270 error = ENXIO;
2271 goto fail;
2272 }
2273
2274 /*
2275 * Get station address from the EEPROM.
2276 */
2277 mac_addr = bge_readmem_ind(sc, 0x0c14);
2278 if ((mac_addr >> 16) == 0x484b) {
2279 sc->arpcom.ac_enaddr[0] = (u_char)(mac_addr >> 8);
2280 sc->arpcom.ac_enaddr[1] = (u_char)mac_addr;
2281 mac_addr = bge_readmem_ind(sc, 0x0c18);
2282 sc->arpcom.ac_enaddr[2] = (u_char)(mac_addr >> 24);
2283 sc->arpcom.ac_enaddr[3] = (u_char)(mac_addr >> 16);
2284 sc->arpcom.ac_enaddr[4] = (u_char)(mac_addr >> 8);
2285 sc->arpcom.ac_enaddr[5] = (u_char)mac_addr;
2286 } else if (bge_read_eeprom(sc, (caddr_t)&sc->arpcom.ac_enaddr,
2287 BGE_EE_MAC_OFFSET + 2, ETHER_ADDR_LEN)) {
2288 printf("bge%d: failed to read station address\n", unit);
2289 bge_release_resources(sc);
2290 error = ENXIO;
2291 goto fail;
2292 }
2293
2294 /* Save ASIC rev. */
2295
2296 sc->bge_chipid =
2297 pci_read_config(dev, BGE_PCI_MISC_CTL, 4) &
2298 BGE_PCIMISCCTL_ASICREV;
2299 sc->bge_asicrev = BGE_ASICREV(sc->bge_chipid);
2300 sc->bge_chiprev = BGE_CHIPREV(sc->bge_chipid);
2301
2302 /* 5705 limits RX return ring to 512 entries. */
2303 if (sc->bge_asicrev == BGE_ASICREV_BCM5705)
2304 sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT_5705;
2305 else
2306 sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT;
2307
2308 if (bge_dma_alloc(dev)) {
2309 printf ("bge%d: failed to allocate DMA resources\n",
2310 sc->bge_unit);
2311 bge_release_resources(sc);
2312 error = ENXIO;
2313 goto fail;
2314 }
2315
2316 /*
2317 * Try to allocate memory for jumbo buffers.
2318 * The 5705 does not appear to support jumbo frames.
2319 */
2320 if (sc->bge_asicrev != BGE_ASICREV_BCM5705) {
2321 if (bge_alloc_jumbo_mem(sc)) {
2322 printf("bge%d: jumbo buffer allocation "
2323 "failed\n", sc->bge_unit);
2324 bge_release_resources(sc);
2325 error = ENXIO;
2326 goto fail;
2327 }
2328 }
2329
2330 /* Set default tuneable values. */
2331 sc->bge_stat_ticks = BGE_TICKS_PER_SEC;
2332 sc->bge_rx_coal_ticks = 150;
2333 sc->bge_tx_coal_ticks = 150;
2334 sc->bge_rx_max_coal_bds = 64;
2335 sc->bge_tx_max_coal_bds = 128;
2336
2337 /* Set up ifnet structure */
2338 ifp = &sc->arpcom.ac_if;
2339 ifp->if_softc = sc;
2340 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
2341 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
2342 ifp->if_ioctl = bge_ioctl;
2343 ifp->if_start = bge_start;
2344 ifp->if_watchdog = bge_watchdog;
2345 ifp->if_init = bge_init;
2346 ifp->if_mtu = ETHERMTU;
2347 ifp->if_snd.ifq_maxlen = BGE_TX_RING_CNT - 1;
2348 ifp->if_hwassist = BGE_CSUM_FEATURES;
2349 /* NB: the code for RX csum offload is disabled for now */
2350 ifp->if_capabilities = IFCAP_TXCSUM | IFCAP_VLAN_HWTAGGING |
2351 IFCAP_VLAN_MTU;
2352 ifp->if_capenable = ifp->if_capabilities;
2353
2354 /*
2355 * Figure out what sort of media we have by checking the
2356 * hardware config word in the first 32k of NIC internal memory,
2357 * or fall back to examining the EEPROM if necessary.
2358 * Note: on some BCM5700 cards, this value appears to be unset.
2359 * If that's the case, we have to rely on identifying the NIC
2360 * by its PCI subsystem ID, as we do below for the SysKonnect
2361 * SK-9D41.
2362 */
2363 if (bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_SIG) == BGE_MAGIC_NUMBER)
2364 hwcfg = bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_NICCFG);
2365 else {
2366 bge_read_eeprom(sc, (caddr_t)&hwcfg,
2367 BGE_EE_HWCFG_OFFSET, sizeof(hwcfg));
2368 hwcfg = ntohl(hwcfg);
2369 }
2370
2371 if ((hwcfg & BGE_HWCFG_MEDIA) == BGE_MEDIA_FIBER)
2372 sc->bge_tbi = 1;
2373
2374 /* The SysKonnect SK-9D41 is a 1000baseSX card. */
2375 if ((pci_read_config(dev, BGE_PCI_SUBSYS, 4) >> 16) == SK_SUBSYSID_9D41)
2376 sc->bge_tbi = 1;
2377
2378 if (sc->bge_tbi) {
2379 ifmedia_init(&sc->bge_ifmedia, IFM_IMASK,
2380 bge_ifmedia_upd, bge_ifmedia_sts);
2381 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER|IFM_1000_SX, 0, NULL);
2382 ifmedia_add(&sc->bge_ifmedia,
2383 IFM_ETHER|IFM_1000_SX|IFM_FDX, 0, NULL);
2384 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER|IFM_AUTO, 0, NULL);
2385 ifmedia_set(&sc->bge_ifmedia, IFM_ETHER|IFM_AUTO);
2386 sc->bge_ifmedia.ifm_media = sc->bge_ifmedia.ifm_cur->ifm_media;
2387 } else {
2388 /*
2389 * Do transceiver setup.
2390 */
2391 if (mii_phy_probe(dev, &sc->bge_miibus,
2392 bge_ifmedia_upd, bge_ifmedia_sts)) {
2393 printf("bge%d: MII without any PHY!\n", sc->bge_unit);
2394 bge_release_resources(sc);
2395 bge_free_jumbo_mem(sc);
2396 error = ENXIO;
2397 goto fail;
2398 }
2399 }
2400
2401 /*
2402 * When using the BCM5701 in PCI-X mode, data corruption has
2403 * been observed in the first few bytes of some received packets.
2404 * Aligning the packet buffer in memory eliminates the corruption.
2405 * Unfortunately, this misaligns the packet payloads. On platforms
2406 * which do not support unaligned accesses, we will realign the
2407 * payloads by copying the received packets.
2408 */
2409 switch (sc->bge_chipid) {
2410 case BGE_CHIPID_BCM5701_A0:
2411 case BGE_CHIPID_BCM5701_B0:
2412 case BGE_CHIPID_BCM5701_B2:
2413 case BGE_CHIPID_BCM5701_B5:
2414 /* If in PCI-X mode, work around the alignment bug. */
2415 if ((pci_read_config(dev, BGE_PCI_PCISTATE, 4) &
2416 (BGE_PCISTATE_PCI_BUSMODE | BGE_PCISTATE_PCI_BUSSPEED)) ==
2417 BGE_PCISTATE_PCI_BUSSPEED)
2418 sc->bge_rx_alignment_bug = 1;
2419 break;
2420 }
2421
2422 /*
2423 * Call MI attach routine.
2424 */
2425 ether_ifattach(ifp, sc->arpcom.ac_enaddr);
2426 callout_init(&sc->bge_stat_ch, CALLOUT_MPSAFE);
2427
2428 /*
2429 * Hookup IRQ last.
2430 */
2431 error = bus_setup_intr(dev, sc->bge_irq, INTR_TYPE_NET | INTR_MPSAFE,
2432 bge_intr, sc, &sc->bge_intrhand);
2433
2434 if (error) {
2435 bge_release_resources(sc);
2436 printf("bge%d: couldn't set up irq\n", unit);
2437 }
2438
2439fail:
2440 return(error);
2441}
2442
2443static int
2444bge_detach(dev)
2445 device_t dev;
2446{
2447 struct bge_softc *sc;
2448 struct ifnet *ifp;
2449
2450 sc = device_get_softc(dev);
2451 ifp = &sc->arpcom.ac_if;
2452
2453 BGE_LOCK(sc);
2454 bge_stop(sc);
2455 bge_reset(sc);
2456 BGE_UNLOCK(sc);
2457
2458 ether_ifdetach(ifp);
2459
2460 if (sc->bge_tbi) {
2461 ifmedia_removeall(&sc->bge_ifmedia);
2462 } else {
2463 bus_generic_detach(dev);
2464 device_delete_child(dev, sc->bge_miibus);
2465 }
2466
2467 bge_release_resources(sc);
2468 if (sc->bge_asicrev != BGE_ASICREV_BCM5705)
2469 bge_free_jumbo_mem(sc);
2470
2471 return(0);
2472}
2473
2474static void
2475bge_release_resources(sc)
2476 struct bge_softc *sc;
2477{
2478 device_t dev;
2479
2480 dev = sc->bge_dev;
2481
2482 if (sc->bge_vpd_prodname != NULL)
2483 free(sc->bge_vpd_prodname, M_DEVBUF);
2484
2485 if (sc->bge_vpd_readonly != NULL)
2486 free(sc->bge_vpd_readonly, M_DEVBUF);
2487
2488 if (sc->bge_intrhand != NULL)
2489 bus_teardown_intr(dev, sc->bge_irq, sc->bge_intrhand);
2490
2491 if (sc->bge_irq != NULL)
2492 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->bge_irq);
2493
2494 if (sc->bge_res != NULL)
2495 bus_release_resource(dev, SYS_RES_MEMORY,
2496 BGE_PCI_BAR0, sc->bge_res);
2497
2498 bge_dma_free(sc);
2499
2500 if (mtx_initialized(&sc->bge_mtx)) /* XXX */
2501 BGE_LOCK_DESTROY(sc);
2502
2503 return;
2504}
2505
2506static void
2507bge_reset(sc)
2508 struct bge_softc *sc;
2509{
2510 device_t dev;
2511 u_int32_t cachesize, command, pcistate;
2512 int i, val = 0;
2513
2514 dev = sc->bge_dev;
2515
2516 /* Save some important PCI state. */
2517 cachesize = pci_read_config(dev, BGE_PCI_CACHESZ, 4);
2518 command = pci_read_config(dev, BGE_PCI_CMD, 4);
2519 pcistate = pci_read_config(dev, BGE_PCI_PCISTATE, 4);
2520
2521 pci_write_config(dev, BGE_PCI_MISC_CTL,
2522 BGE_PCIMISCCTL_INDIRECT_ACCESS|BGE_PCIMISCCTL_MASK_PCI_INTR|
2523 BGE_PCIMISCCTL_ENDIAN_WORDSWAP|BGE_PCIMISCCTL_PCISTATE_RW, 4);
2524
2525 /* Issue global reset */
2526 bge_writereg_ind(sc, BGE_MISC_CFG,
2527 BGE_MISCCFG_RESET_CORE_CLOCKS|(65<<1));
2528
2529 DELAY(1000);
2530
2531 /* Reset some of the PCI state that got zapped by reset */
2532 pci_write_config(dev, BGE_PCI_MISC_CTL,
2533 BGE_PCIMISCCTL_INDIRECT_ACCESS|BGE_PCIMISCCTL_MASK_PCI_INTR|
2534 BGE_PCIMISCCTL_ENDIAN_WORDSWAP|BGE_PCIMISCCTL_PCISTATE_RW, 4);
2535 pci_write_config(dev, BGE_PCI_CACHESZ, cachesize, 4);
2536 pci_write_config(dev, BGE_PCI_CMD, command, 4);
2537 bge_writereg_ind(sc, BGE_MISC_CFG, (65 << 1));
2538
2539 /* Enable memory arbiter. */
2540 if (sc->bge_asicrev != BGE_ASICREV_BCM5705)
2541 CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE);
2542
2543 /*
2544 * Prevent PXE restart: write a magic number to the
2545 * general communications memory at 0xB50.
2546 */
2547 bge_writemem_ind(sc, BGE_SOFTWARE_GENCOMM, BGE_MAGIC_NUMBER);
2548 /*
2549 * Poll the value location we just wrote until
2550 * we see the 1's complement of the magic number.
2551 * This indicates that the firmware initialization
2552 * is complete.
2553 */
2554 for (i = 0; i < BGE_TIMEOUT; i++) {
2555 val = bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM);
2556 if (val == ~BGE_MAGIC_NUMBER)
2557 break;
2558 DELAY(10);
2559 }
2560
2561 if (i == BGE_TIMEOUT) {
2562 printf("bge%d: firmware handshake timed out\n", sc->bge_unit);
2563 return;
2564 }
2565
2566 /*
2567 * XXX Wait for the value of the PCISTATE register to
2568 * return to its original pre-reset state. This is a
2569 * fairly good indicator of reset completion. If we don't
2570 * wait for the reset to fully complete, trying to read
2571 * from the device's non-PCI registers may yield garbage
2572 * results.
2573 */
2574 for (i = 0; i < BGE_TIMEOUT; i++) {
2575 if (pci_read_config(dev, BGE_PCI_PCISTATE, 4) == pcistate)
2576 break;
2577 DELAY(10);
2578 }
2579
2580 /* Fix up byte swapping */
2581 CSR_WRITE_4(sc, BGE_MODE_CTL, BGE_MODECTL_BYTESWAP_NONFRAME|
2582 BGE_MODECTL_BYTESWAP_DATA);
2583
2584 CSR_WRITE_4(sc, BGE_MAC_MODE, 0);
2585
2586 /*
2587 * The 5704 in TBI mode apparently needs some special
2588 * adjustment to insure the SERDES drive level is set
2589 * to 1.2V.
2590 */
2591 if (sc->bge_asicrev == BGE_ASICREV_BCM5704 && sc->bge_tbi) {
2592 uint32_t serdescfg;
2593 serdescfg = CSR_READ_4(sc, BGE_SERDES_CFG);
2594 serdescfg = (serdescfg & ~0xFFF) | 0x880;
2595 CSR_WRITE_4(sc, BGE_SERDES_CFG, serdescfg);
2596 }
2597
2598 DELAY(10000);
2599
2600 return;
2601}
2602
2603/*
2604 * Frame reception handling. This is called if there's a frame
2605 * on the receive return list.
2606 *
2607 * Note: we have to be able to handle two possibilities here:
2608 * 1) the frame is from the jumbo recieve ring
2609 * 2) the frame is from the standard receive ring
2610 */
2611
2612static void
2613bge_rxeof(sc)
2614 struct bge_softc *sc;
2615{
2616 struct ifnet *ifp;
2617 int stdcnt = 0, jumbocnt = 0;
2618
2619 BGE_LOCK_ASSERT(sc);
2620
2621 ifp = &sc->arpcom.ac_if;
2622
2623 bus_dmamap_sync(sc->bge_cdata.bge_rx_return_ring_tag,
2624 sc->bge_cdata.bge_rx_return_ring_map, BUS_DMASYNC_POSTWRITE);
2625 bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag,
2626 sc->bge_cdata.bge_rx_std_ring_map, BUS_DMASYNC_POSTREAD);
2627 if (sc->bge_asicrev != BGE_ASICREV_BCM5705) {
2628 bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag,
2629 sc->bge_cdata.bge_rx_jumbo_ring_map,
2630 BUS_DMASYNC_POSTREAD);
2631 }
2632
2633 while(sc->bge_rx_saved_considx !=
2634 sc->bge_ldata.bge_status_block->bge_idx[0].bge_rx_prod_idx) {
2635 struct bge_rx_bd *cur_rx;
2636 u_int32_t rxidx;
2637 struct ether_header *eh;
2638 struct mbuf *m = NULL;
2639 u_int16_t vlan_tag = 0;
2640 int have_tag = 0;
2641
2642 cur_rx =
2643 &sc->bge_ldata.bge_rx_return_ring[sc->bge_rx_saved_considx];
2644
2645 rxidx = cur_rx->bge_idx;
2646 BGE_INC(sc->bge_rx_saved_considx, sc->bge_return_ring_cnt);
2647
2648 if (cur_rx->bge_flags & BGE_RXBDFLAG_VLAN_TAG) {
2649 have_tag = 1;
2650 vlan_tag = cur_rx->bge_vlan_tag;
2651 }
2652
2653 if (cur_rx->bge_flags & BGE_RXBDFLAG_JUMBO_RING) {
2654 BGE_INC(sc->bge_jumbo, BGE_JUMBO_RX_RING_CNT);
2655 bus_dmamap_sync(sc->bge_cdata.bge_mtag_jumbo,
2656 sc->bge_cdata.bge_rx_jumbo_dmamap[rxidx],
2657 BUS_DMASYNC_POSTREAD);
2658 bus_dmamap_unload(sc->bge_cdata.bge_mtag_jumbo,
2659 sc->bge_cdata.bge_rx_jumbo_dmamap[rxidx]);
2660 m = sc->bge_cdata.bge_rx_jumbo_chain[rxidx];
2661 sc->bge_cdata.bge_rx_jumbo_chain[rxidx] = NULL;
2662 jumbocnt++;
2663 if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) {
2664 ifp->if_ierrors++;
2665 bge_newbuf_jumbo(sc, sc->bge_jumbo, m);
2666 continue;
2667 }
2668 if (bge_newbuf_jumbo(sc,
2669 sc->bge_jumbo, NULL) == ENOBUFS) {
2670 ifp->if_ierrors++;
2671 bge_newbuf_jumbo(sc, sc->bge_jumbo, m);
2672 continue;
2673 }
2674 } else {
2675 BGE_INC(sc->bge_std, BGE_STD_RX_RING_CNT);
2676 bus_dmamap_sync(sc->bge_cdata.bge_mtag,
2677 sc->bge_cdata.bge_rx_std_dmamap[rxidx],
2678 BUS_DMASYNC_POSTREAD);
2679 bus_dmamap_unload(sc->bge_cdata.bge_mtag,
2680 sc->bge_cdata.bge_rx_std_dmamap[rxidx]);
2681 m = sc->bge_cdata.bge_rx_std_chain[rxidx];
2682 sc->bge_cdata.bge_rx_std_chain[rxidx] = NULL;
2683 stdcnt++;
2684 if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) {
2685 ifp->if_ierrors++;
2686 bge_newbuf_std(sc, sc->bge_std, m);
2687 continue;
2688 }
2689 if (bge_newbuf_std(sc, sc->bge_std,
2690 NULL) == ENOBUFS) {
2691 ifp->if_ierrors++;
2692 bge_newbuf_std(sc, sc->bge_std, m);
2693 continue;
2694 }
2695 }
2696
2697 ifp->if_ipackets++;
2698#ifndef __i386__
2699 /*
2700 * The i386 allows unaligned accesses, but for other
2701 * platforms we must make sure the payload is aligned.
2702 */
2703 if (sc->bge_rx_alignment_bug) {
2704 bcopy(m->m_data, m->m_data + ETHER_ALIGN,
2705 cur_rx->bge_len);
2706 m->m_data += ETHER_ALIGN;
2707 }
2708#endif
2709 eh = mtod(m, struct ether_header *);
2710 m->m_pkthdr.len = m->m_len = cur_rx->bge_len - ETHER_CRC_LEN;
2711 m->m_pkthdr.rcvif = ifp;
2712
2713#if 0 /* currently broken for some packets, possibly related to TCP options */
2714 if (ifp->if_capenable & IFCAP_RXCSUM) {
2715 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
2716 if ((cur_rx->bge_ip_csum ^ 0xffff) == 0)
2717 m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
2718 if (cur_rx->bge_flags & BGE_RXBDFLAG_TCP_UDP_CSUM) {
2719 m->m_pkthdr.csum_data =
2720 cur_rx->bge_tcp_udp_csum;
2721 m->m_pkthdr.csum_flags |= CSUM_DATA_VALID;
2722 }
2723 }
2724#endif
2725
2726 /*
2727 * If we received a packet with a vlan tag,
2728 * attach that information to the packet.
2729 */
2730 if (have_tag)
2731 VLAN_INPUT_TAG(ifp, m, vlan_tag, continue);
2732
2733 BGE_UNLOCK(sc);
2734 (*ifp->if_input)(ifp, m);
2735 BGE_LOCK(sc);
2736 }
2737
2738 bus_dmamap_sync(sc->bge_cdata.bge_rx_return_ring_tag,
2739 sc->bge_cdata.bge_rx_return_ring_map, BUS_DMASYNC_PREWRITE);
2740 bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag,
2741 sc->bge_cdata.bge_rx_std_ring_map,
2742 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_PREWRITE);
2743 if (sc->bge_asicrev != BGE_ASICREV_BCM5705) {
2744 bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag,
2745 sc->bge_cdata.bge_rx_jumbo_ring_map,
2746 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
2747 }
2748
2749 CSR_WRITE_4(sc, BGE_MBX_RX_CONS0_LO, sc->bge_rx_saved_considx);
2750 if (stdcnt)
2751 CSR_WRITE_4(sc, BGE_MBX_RX_STD_PROD_LO, sc->bge_std);
2752 if (jumbocnt)
2753 CSR_WRITE_4(sc, BGE_MBX_RX_JUMBO_PROD_LO, sc->bge_jumbo);
2754
2755 return;
2756}
2757
2758static void
2759bge_txeof(sc)
2760 struct bge_softc *sc;
2761{
2762 struct bge_tx_bd *cur_tx = NULL;
2763 struct ifnet *ifp;
2764
2765 BGE_LOCK_ASSERT(sc);
2766
2767 ifp = &sc->arpcom.ac_if;
2768
2769 /*
2770 * Go through our tx ring and free mbufs for those
2771 * frames that have been sent.
2772 */
2773 while (sc->bge_tx_saved_considx !=
2774 sc->bge_ldata.bge_status_block->bge_idx[0].bge_tx_cons_idx) {
2775 u_int32_t idx = 0;
2776
2777 idx = sc->bge_tx_saved_considx;
2778 cur_tx = &sc->bge_ldata.bge_tx_ring[idx];
2779 if (cur_tx->bge_flags & BGE_TXBDFLAG_END)
2780 ifp->if_opackets++;
2781 if (sc->bge_cdata.bge_tx_chain[idx] != NULL) {
2782 m_freem(sc->bge_cdata.bge_tx_chain[idx]);
2783 sc->bge_cdata.bge_tx_chain[idx] = NULL;
2784 bus_dmamap_unload(sc->bge_cdata.bge_mtag,
2785 sc->bge_cdata.bge_tx_dmamap[idx]);
2786 }
2787 sc->bge_txcnt--;
2788 BGE_INC(sc->bge_tx_saved_considx, BGE_TX_RING_CNT);
2789 ifp->if_timer = 0;
2790 }
2791
2792 if (cur_tx != NULL)
2793 ifp->if_flags &= ~IFF_OACTIVE;
2794
2795 return;
2796}
2797
2798static void
2799bge_intr(xsc)
2800 void *xsc;
2801{
2802 struct bge_softc *sc;
2803 struct ifnet *ifp;
2804 u_int32_t statusword;
2805 u_int32_t status, mimode;
2806
2807 sc = xsc;
2808 ifp = &sc->arpcom.ac_if;
2809
2810 BGE_LOCK(sc);
2811
2812 bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
2813 sc->bge_cdata.bge_status_map, BUS_DMASYNC_POSTWRITE);
2814
2815 statusword =
2816 atomic_readandclear_32(&sc->bge_ldata.bge_status_block->bge_status);
2817
2818#ifdef notdef
2819 /* Avoid this for now -- checking this register is expensive. */
2820 /* Make sure this is really our interrupt. */
2821 if (!(CSR_READ_4(sc, BGE_MISC_LOCAL_CTL) & BGE_MLC_INTR_STATE))
2822 return;
2823#endif
2824 /* Ack interrupt and stop others from occuring. */
2825 CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 1);
2826
2827 /*
2828 * Process link state changes.
2829 * Grrr. The link status word in the status block does
2830 * not work correctly on the BCM5700 rev AX and BX chips,
2831 * according to all available information. Hence, we have
2832 * to enable MII interrupts in order to properly obtain
2833 * async link changes. Unfortunately, this also means that
2834 * we have to read the MAC status register to detect link
2835 * changes, thereby adding an additional register access to
2836 * the interrupt handler.
2837 */
2838
2839 if (sc->bge_asicrev == BGE_ASICREV_BCM5700) {
2840
2841 status = CSR_READ_4(sc, BGE_MAC_STS);
2842 if (status & BGE_MACSTAT_MI_INTERRUPT) {
2843 sc->bge_link = 0;
2844 callout_stop(&sc->bge_stat_ch);
2845 bge_tick_locked(sc);
2846 /* Clear the interrupt */
2847 CSR_WRITE_4(sc, BGE_MAC_EVT_ENB,
2848 BGE_EVTENB_MI_INTERRUPT);
2849 bge_miibus_readreg(sc->bge_dev, 1, BRGPHY_MII_ISR);
2850 bge_miibus_writereg(sc->bge_dev, 1, BRGPHY_MII_IMR,
2851 BRGPHY_INTRS);
2852 }
2853 } else {
2854 if (statusword & BGE_STATFLAG_LINKSTATE_CHANGED) {
2855 /*
2856 * Sometimes PCS encoding errors are detected in
2857 * TBI mode (on fiber NICs), and for some reason
2858 * the chip will signal them as link changes.
2859 * If we get a link change event, but the 'PCS
2860 * encoding error' bit in the MAC status register
2861 * is set, don't bother doing a link check.
2862 * This avoids spurious "gigabit link up" messages
2863 * that sometimes appear on fiber NICs during
2864 * periods of heavy traffic. (There should be no
2865 * effect on copper NICs.)
2866 *
2867 * If we do have a copper NIC (bge_tbi == 0) then
2868 * check that the AUTOPOLL bit is set before
2869 * processing the event as a real link change.
2870 * Turning AUTOPOLL on and off in the MII read/write
2871 * functions will often trigger a link status
2872 * interrupt for no reason.
2873 */
2874 status = CSR_READ_4(sc, BGE_MAC_STS);
2875 mimode = CSR_READ_4(sc, BGE_MI_MODE);
2876 if (!(status & (BGE_MACSTAT_PORT_DECODE_ERROR|
2877 BGE_MACSTAT_MI_COMPLETE)) && (!sc->bge_tbi &&
2878 (mimode & BGE_MIMODE_AUTOPOLL))) {
2879 sc->bge_link = 0;
2880 callout_stop(&sc->bge_stat_ch);
2881 bge_tick_locked(sc);
2882 }
2883 /* Clear the interrupt */
2884 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED|
2885 BGE_MACSTAT_CFG_CHANGED|BGE_MACSTAT_MI_COMPLETE|
2886 BGE_MACSTAT_LINK_CHANGED);
2887
2888 /* Force flush the status block cached by PCI bridge */
2889 CSR_READ_4(sc, BGE_MBX_IRQ0_LO);
2890 }
2891 }
2892
2893 if (ifp->if_flags & IFF_RUNNING) {
2894 /* Check RX return ring producer/consumer */
2895 bge_rxeof(sc);
2896
2897 /* Check TX ring producer/consumer */
2898 bge_txeof(sc);
2899 }
2900
2901 bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
2902 sc->bge_cdata.bge_status_map, BUS_DMASYNC_PREWRITE);
2903
2904 bge_handle_events(sc);
2905
2906 /* Re-enable interrupts. */
2907 CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 0);
2908
2909 if (ifp->if_flags & IFF_RUNNING && ifp->if_snd.ifq_head != NULL)
2910 bge_start_locked(ifp);
2911
2912 BGE_UNLOCK(sc);
2913
2914 return;
2915}
2916
2917static void
2918bge_tick_locked(sc)
2919 struct bge_softc *sc;
2920{
2921 struct mii_data *mii = NULL;
2922 struct ifmedia *ifm = NULL;
2923 struct ifnet *ifp;
2924
2925 ifp = &sc->arpcom.ac_if;
2926
2927 BGE_LOCK_ASSERT(sc);
2928
2929 if (sc->bge_asicrev == BGE_ASICREV_BCM5705)
2930 bge_stats_update_regs(sc);
2931 else
2932 bge_stats_update(sc);
2933 callout_reset(&sc->bge_stat_ch, hz, bge_tick, sc);
2934 if (sc->bge_link)
2935 return;
2936
2937 if (sc->bge_tbi) {
2938 ifm = &sc->bge_ifmedia;
2939 if (CSR_READ_4(sc, BGE_MAC_STS) &
2940 BGE_MACSTAT_TBI_PCS_SYNCHED) {
2941 sc->bge_link++;
2942 if (sc->bge_asicrev == BGE_ASICREV_BCM5704)
2943 BGE_CLRBIT(sc, BGE_MAC_MODE,
2944 BGE_MACMODE_TBI_SEND_CFGS);
2945 CSR_WRITE_4(sc, BGE_MAC_STS, 0xFFFFFFFF);
2946 printf("bge%d: gigabit link up\n", sc->bge_unit);
2947 if (ifp->if_snd.ifq_head != NULL)
2948 bge_start_locked(ifp);
2949 }
2950 return;
2951 }
2952
2953 mii = device_get_softc(sc->bge_miibus);
2954 mii_tick(mii);
2955
2956 if (!sc->bge_link && mii->mii_media_status & IFM_ACTIVE &&
2957 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) {
2958 sc->bge_link++;
2959 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T ||
2960 IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_SX)
2961 printf("bge%d: gigabit link up\n",
2962 sc->bge_unit);
2963 if (ifp->if_snd.ifq_head != NULL)
2964 bge_start_locked(ifp);
2965 }
2966
2967 return;
2968}
2969
2970static void
2971bge_tick(xsc)
2972 void *xsc;
2973{
2974 struct bge_softc *sc;
2975
2976 sc = xsc;
2977
2978 BGE_LOCK(sc);
2979 bge_tick_locked(sc);
2980 BGE_UNLOCK(sc);
2981}
2982
2983static void
2984bge_stats_update_regs(sc)
2985 struct bge_softc *sc;
2986{
2987 struct ifnet *ifp;
2988 struct bge_mac_stats_regs stats;
2989 u_int32_t *s;
2990 int i;
2991
2992 ifp = &sc->arpcom.ac_if;
2993
2994 s = (u_int32_t *)&stats;
2995 for (i = 0; i < sizeof(struct bge_mac_stats_regs); i += 4) {
2996 *s = CSR_READ_4(sc, BGE_RX_STATS + i);
2997 s++;
2998 }
2999
3000 ifp->if_collisions +=
3001 (stats.dot3StatsSingleCollisionFrames +
3002 stats.dot3StatsMultipleCollisionFrames +
3003 stats.dot3StatsExcessiveCollisions +
3004 stats.dot3StatsLateCollisions) -
3005 ifp->if_collisions;
3006
3007 return;
3008}
3009
3010static void
3011bge_stats_update(sc)
3012 struct bge_softc *sc;
3013{
3014 struct ifnet *ifp;
3015 struct bge_stats *stats;
3016
3017 ifp = &sc->arpcom.ac_if;
3018
3019 stats = (struct bge_stats *)(sc->bge_vhandle +
3020 BGE_MEMWIN_START + BGE_STATS_BLOCK);
3021
3022 ifp->if_collisions +=
3023 (stats->txstats.dot3StatsSingleCollisionFrames.bge_addr_lo +
3024 stats->txstats.dot3StatsMultipleCollisionFrames.bge_addr_lo +
3025 stats->txstats.dot3StatsExcessiveCollisions.bge_addr_lo +
3026 stats->txstats.dot3StatsLateCollisions.bge_addr_lo) -
3027 ifp->if_collisions;
3028
3029#ifdef notdef
3030 ifp->if_collisions +=
3031 (sc->bge_rdata->bge_info.bge_stats.dot3StatsSingleCollisionFrames +
3032 sc->bge_rdata->bge_info.bge_stats.dot3StatsMultipleCollisionFrames +
3033 sc->bge_rdata->bge_info.bge_stats.dot3StatsExcessiveCollisions +
3034 sc->bge_rdata->bge_info.bge_stats.dot3StatsLateCollisions) -
3035 ifp->if_collisions;
3036#endif
3037
3038 return;
3039}
3040
3041/*
3042 * Encapsulate an mbuf chain in the tx ring by coupling the mbuf data
3043 * pointers to descriptors.
3044 */
3045static int
3046bge_encap(sc, m_head, txidx)
3047 struct bge_softc *sc;
3048 struct mbuf *m_head;
3049 u_int32_t *txidx;
3050{
3051 struct bge_tx_bd *f = NULL;
3052 u_int16_t csum_flags = 0;
3053 struct m_tag *mtag;
3054 struct bge_dmamap_arg ctx;
3055 bus_dmamap_t map;
3056 int error;
3057
3058
3059 if (m_head->m_pkthdr.csum_flags) {
3060 if (m_head->m_pkthdr.csum_flags & CSUM_IP)
3061 csum_flags |= BGE_TXBDFLAG_IP_CSUM;
3062 if (m_head->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP))
3063 csum_flags |= BGE_TXBDFLAG_TCP_UDP_CSUM;
3064 if (m_head->m_flags & M_LASTFRAG)
3065 csum_flags |= BGE_TXBDFLAG_IP_FRAG_END;
3066 else if (m_head->m_flags & M_FRAG)
3067 csum_flags |= BGE_TXBDFLAG_IP_FRAG;
3068 }
3069
3070 mtag = VLAN_OUTPUT_TAG(&sc->arpcom.ac_if, m_head);
3071
3072 ctx.sc = sc;
3073 ctx.bge_idx = *txidx;
3074 ctx.bge_ring = sc->bge_ldata.bge_tx_ring;
3075 ctx.bge_flags = csum_flags;
3076 /*
3077 * Sanity check: avoid coming within 16 descriptors
3078 * of the end of the ring.
3079 */
3080 ctx.bge_maxsegs = (BGE_TX_RING_CNT - sc->bge_txcnt) - 16;
3081
3082 map = sc->bge_cdata.bge_tx_dmamap[*txidx];
3083 error = bus_dmamap_load_mbuf(sc->bge_cdata.bge_mtag, map,
3084 m_head, bge_dma_map_tx_desc, &ctx, BUS_DMA_NOWAIT);
3085
3086 if (error || ctx.bge_maxsegs == 0 /*||
3087 ctx.bge_idx == sc->bge_tx_saved_considx*/)
3088 return (ENOBUFS);
3089
3090 /*
3091 * Insure that the map for this transmission
3092 * is placed at the array index of the last descriptor
3093 * in this chain.
3094 */
3095 sc->bge_cdata.bge_tx_dmamap[*txidx] =
3096 sc->bge_cdata.bge_tx_dmamap[ctx.bge_idx];
3097 sc->bge_cdata.bge_tx_dmamap[ctx.bge_idx] = map;
3098 sc->bge_cdata.bge_tx_chain[ctx.bge_idx] = m_head;
3099 sc->bge_txcnt += ctx.bge_maxsegs;
3100 f = &sc->bge_ldata.bge_tx_ring[*txidx];
3101 if (mtag != NULL) {
3102 f->bge_flags |= htole16(BGE_TXBDFLAG_VLAN_TAG);
3103 f->bge_vlan_tag = htole16(VLAN_TAG_VALUE(mtag));
3104 } else {
3105 f->bge_vlan_tag = 0;
3106 }
3107
3108 BGE_INC(ctx.bge_idx, BGE_TX_RING_CNT);
3109 *txidx = ctx.bge_idx;
3110
3111 return(0);
3112}
3113
3114/*
3115 * Main transmit routine. To avoid having to do mbuf copies, we put pointers
3116 * to the mbuf data regions directly in the transmit descriptors.
3117 */
3118static void
3119bge_start_locked(ifp)
3120 struct ifnet *ifp;
3121{
3122 struct bge_softc *sc;
3123 struct mbuf *m_head = NULL;
3124 u_int32_t prodidx = 0;
3125
3126 sc = ifp->if_softc;
3127
3128 if (!sc->bge_link && ifp->if_snd.ifq_len < 10)
3129 return;
3130
3131 prodidx = CSR_READ_4(sc, BGE_MBX_TX_HOST_PROD0_LO);
3132
3133 while(sc->bge_cdata.bge_tx_chain[prodidx] == NULL) {
3134 IF_DEQUEUE(&ifp->if_snd, m_head);
3135 if (m_head == NULL)
3136 break;
3137
3138 /*
3139 * XXX
3140 * The code inside the if() block is never reached since we
3141 * must mark CSUM_IP_FRAGS in our if_hwassist to start getting
3142 * requests to checksum TCP/UDP in a fragmented packet.
3143 *
3144 * XXX
3145 * safety overkill. If this is a fragmented packet chain
3146 * with delayed TCP/UDP checksums, then only encapsulate
3147 * it if we have enough descriptors to handle the entire
3148 * chain at once.
3149 * (paranoia -- may not actually be needed)
3150 */
3151 if (m_head->m_flags & M_FIRSTFRAG &&
3152 m_head->m_pkthdr.csum_flags & (CSUM_DELAY_DATA)) {
3153 if ((BGE_TX_RING_CNT - sc->bge_txcnt) <
3154 m_head->m_pkthdr.csum_data + 16) {
3155 IF_PREPEND(&ifp->if_snd, m_head);
3156 ifp->if_flags |= IFF_OACTIVE;
3157 break;
3158 }
3159 }
3160
3161 /*
3162 * Pack the data into the transmit ring. If we
3163 * don't have room, set the OACTIVE flag and wait
3164 * for the NIC to drain the ring.
3165 */
3166 if (bge_encap(sc, m_head, &prodidx)) {
3167 IF_PREPEND(&ifp->if_snd, m_head);
3168 ifp->if_flags |= IFF_OACTIVE;
3169 break;
3170 }
3171
3172 /*
3173 * If there's a BPF listener, bounce a copy of this frame
3174 * to him.
3175 */
3176 BPF_MTAP(ifp, m_head);
3177 }
3178
3179 /* Transmit */
3180 CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx);
3181 /* 5700 b2 errata */
3182 if (sc->bge_chiprev == BGE_CHIPREV_5700_BX)
3183 CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx);
3184
3185 /*
3186 * Set a timeout in case the chip goes out to lunch.
3187 */
3188 ifp->if_timer = 5;
3189
3190 return;
3191}
3192
3193/*
3194 * Main transmit routine. To avoid having to do mbuf copies, we put pointers
3195 * to the mbuf data regions directly in the transmit descriptors.
3196 */
3197static void
3198bge_start(ifp)
3199 struct ifnet *ifp;
3200{
3201 struct bge_softc *sc;
3202
3203 sc = ifp->if_softc;
3204 BGE_LOCK(sc);
3205 bge_start_locked(ifp);
3206 BGE_UNLOCK(sc);
3207}
3208
3209static void
3210bge_init_locked(sc)
3211 struct bge_softc *sc;
3212{
3213 struct ifnet *ifp;
3214 u_int16_t *m;
3215
3216 BGE_LOCK_ASSERT(sc);
3217
3218 ifp = &sc->arpcom.ac_if;
3219
3220 if (ifp->if_flags & IFF_RUNNING)
3221 return;
3222
3223 /* Cancel pending I/O and flush buffers. */
3224 bge_stop(sc);
3225 bge_reset(sc);
3226 bge_chipinit(sc);
3227
3228 /*
3229 * Init the various state machines, ring
3230 * control blocks and firmware.
3231 */
3232 if (bge_blockinit(sc)) {
3233 printf("bge%d: initialization failure\n", sc->bge_unit);
3234 return;
3235 }
3236
3237 ifp = &sc->arpcom.ac_if;
3238
3239 /* Specify MTU. */
3240 CSR_WRITE_4(sc, BGE_RX_MTU, ifp->if_mtu +
3241 ETHER_HDR_LEN + ETHER_CRC_LEN + ETHER_VLAN_ENCAP_LEN);
3242
3243 /* Load our MAC address. */
3244 m = (u_int16_t *)&sc->arpcom.ac_enaddr[0];
3245 CSR_WRITE_4(sc, BGE_MAC_ADDR1_LO, htons(m[0]));
3246 CSR_WRITE_4(sc, BGE_MAC_ADDR1_HI, (htons(m[1]) << 16) | htons(m[2]));
3247
3248 /* Enable or disable promiscuous mode as needed. */
3249 if (ifp->if_flags & IFF_PROMISC) {
3250 BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC);
3251 } else {
3252 BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC);
3253 }
3254
3255 /* Program multicast filter. */
3256 bge_setmulti(sc);
3257
3258 /* Init RX ring. */
3259 bge_init_rx_ring_std(sc);
3260
3261 /*
3262 * Workaround for a bug in 5705 ASIC rev A0. Poll the NIC's
3263 * memory to insure that the chip has in fact read the first
3264 * entry of the ring.
3265 */
3266 if (sc->bge_chipid == BGE_CHIPID_BCM5705_A0) {
3267 u_int32_t v, i;
3268 for (i = 0; i < 10; i++) {
3269 DELAY(20);
3270 v = bge_readmem_ind(sc, BGE_STD_RX_RINGS + 8);
3271 if (v == (MCLBYTES - ETHER_ALIGN))
3272 break;
3273 }
3274 if (i == 10)
3275 printf ("bge%d: 5705 A0 chip failed to load RX ring\n",
3276 sc->bge_unit);
3277 }
3278
3279 /* Init jumbo RX ring. */
3280 if (ifp->if_mtu > (ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN))
3281 bge_init_rx_ring_jumbo(sc);
3282
3283 /* Init our RX return ring index */
3284 sc->bge_rx_saved_considx = 0;
3285
3286 /* Init TX ring. */
3287 bge_init_tx_ring(sc);
3288
3289 /* Turn on transmitter */
3290 BGE_SETBIT(sc, BGE_TX_MODE, BGE_TXMODE_ENABLE);
3291
3292 /* Turn on receiver */
3293 BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE);
3294
3295 /* Tell firmware we're alive. */
3296 BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
3297
3298 /* Enable host interrupts. */
3299 BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_CLEAR_INTA);
3300 BGE_CLRBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR);
3301 CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 0);
3302
3303 bge_ifmedia_upd(ifp);
3304
3305 ifp->if_flags |= IFF_RUNNING;
3306 ifp->if_flags &= ~IFF_OACTIVE;
3307
3308 callout_reset(&sc->bge_stat_ch, hz, bge_tick, sc);
3309
3310 return;
3311}
3312
3313static void
3314bge_init(xsc)
3315 void *xsc;
3316{
3317 struct bge_softc *sc = xsc;
3318
3319 BGE_LOCK(sc);
3320 bge_init_locked(sc);
3321 BGE_UNLOCK(sc);
3322
3323 return;
3324}
3325
3326/*
3327 * Set media options.
3328 */
3329static int
3330bge_ifmedia_upd(ifp)
3331 struct ifnet *ifp;
3332{
3333 struct bge_softc *sc;
3334 struct mii_data *mii;
3335 struct ifmedia *ifm;
3336
3337 sc = ifp->if_softc;
3338 ifm = &sc->bge_ifmedia;
3339
3340 /* If this is a 1000baseX NIC, enable the TBI port. */
3341 if (sc->bge_tbi) {
3342 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
3343 return(EINVAL);
3344 switch(IFM_SUBTYPE(ifm->ifm_media)) {
3345 case IFM_AUTO:
3346 /*
3347 * The BCM5704 ASIC appears to have a special
3348 * mechanism for programming the autoneg
3349 * advertisement registers in TBI mode.
3350 */
3351 if (sc->bge_asicrev == BGE_ASICREV_BCM5704) {
3352 uint32_t sgdig;
3353 CSR_WRITE_4(sc, BGE_TX_TBI_AUTONEG, 0);
3354 sgdig = CSR_READ_4(sc, BGE_SGDIG_CFG);
3355 sgdig |= BGE_SGDIGCFG_AUTO|
3356 BGE_SGDIGCFG_PAUSE_CAP|
3357 BGE_SGDIGCFG_ASYM_PAUSE;
3358 CSR_WRITE_4(sc, BGE_SGDIG_CFG,
3359 sgdig|BGE_SGDIGCFG_SEND);
3360 DELAY(5);
3361 CSR_WRITE_4(sc, BGE_SGDIG_CFG, sgdig);
3362 }
3363 break;
3364 case IFM_1000_SX:
3365 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) {
3366 BGE_CLRBIT(sc, BGE_MAC_MODE,
3367 BGE_MACMODE_HALF_DUPLEX);
3368 } else {
3369 BGE_SETBIT(sc, BGE_MAC_MODE,
3370 BGE_MACMODE_HALF_DUPLEX);
3371 }
3372 break;
3373 default:
3374 return(EINVAL);
3375 }
3376 return(0);
3377 }
3378
3379 mii = device_get_softc(sc->bge_miibus);
3380 sc->bge_link = 0;
3381 if (mii->mii_instance) {
3382 struct mii_softc *miisc;
3383 for (miisc = LIST_FIRST(&mii->mii_phys); miisc != NULL;
3384 miisc = LIST_NEXT(miisc, mii_list))
3385 mii_phy_reset(miisc);
3386 }
3387 mii_mediachg(mii);
3388
3389 return(0);
3390}
3391
3392/*
3393 * Report current media status.
3394 */
3395static void
3396bge_ifmedia_sts(ifp, ifmr)
3397 struct ifnet *ifp;
3398 struct ifmediareq *ifmr;
3399{
3400 struct bge_softc *sc;
3401 struct mii_data *mii;
3402
3403 sc = ifp->if_softc;
3404
3405 if (sc->bge_tbi) {
3406 ifmr->ifm_status = IFM_AVALID;
3407 ifmr->ifm_active = IFM_ETHER;
3408 if (CSR_READ_4(sc, BGE_MAC_STS) &
3409 BGE_MACSTAT_TBI_PCS_SYNCHED)
3410 ifmr->ifm_status |= IFM_ACTIVE;
3411 ifmr->ifm_active |= IFM_1000_SX;
3412 if (CSR_READ_4(sc, BGE_MAC_MODE) & BGE_MACMODE_HALF_DUPLEX)
3413 ifmr->ifm_active |= IFM_HDX;
3414 else
3415 ifmr->ifm_active |= IFM_FDX;
3416 return;
3417 }
3418
3419 mii = device_get_softc(sc->bge_miibus);
3420 mii_pollstat(mii);
3421 ifmr->ifm_active = mii->mii_media_active;
3422 ifmr->ifm_status = mii->mii_media_status;
3423
3424 return;
3425}
3426
3427static int
3428bge_ioctl(ifp, command, data)
3429 struct ifnet *ifp;
3430 u_long command;
3431 caddr_t data;
3432{
3433 struct bge_softc *sc = ifp->if_softc;
3434 struct ifreq *ifr = (struct ifreq *) data;
3435 int mask, error = 0;
3436 struct mii_data *mii;
3437
3438 switch(command) {
3439 case SIOCSIFMTU:
3440 /* Disallow jumbo frames on 5705. */
3441 if ((sc->bge_asicrev == BGE_ASICREV_BCM5705 &&
3442 ifr->ifr_mtu > ETHERMTU) || ifr->ifr_mtu > BGE_JUMBO_MTU)
3443 error = EINVAL;
3444 else {
3445 ifp->if_mtu = ifr->ifr_mtu;
3446 ifp->if_flags &= ~IFF_RUNNING;
3447 bge_init(sc);
3448 }
3449 break;
3450 case SIOCSIFFLAGS:
3451 BGE_LOCK(sc);
3452 if (ifp->if_flags & IFF_UP) {
3453 /*
3454 * If only the state of the PROMISC flag changed,
3455 * then just use the 'set promisc mode' command
3456 * instead of reinitializing the entire NIC. Doing
3457 * a full re-init means reloading the firmware and
3458 * waiting for it to start up, which may take a
3459 * second or two.
3460 */
3461 if (ifp->if_flags & IFF_RUNNING &&
3462 ifp->if_flags & IFF_PROMISC &&
3463 !(sc->bge_if_flags & IFF_PROMISC)) {
3464 BGE_SETBIT(sc, BGE_RX_MODE,
3465 BGE_RXMODE_RX_PROMISC);
3466 } else if (ifp->if_flags & IFF_RUNNING &&
3467 !(ifp->if_flags & IFF_PROMISC) &&
3468 sc->bge_if_flags & IFF_PROMISC) {
3469 BGE_CLRBIT(sc, BGE_RX_MODE,
3470 BGE_RXMODE_RX_PROMISC);
3471 } else
3472 bge_init_locked(sc);
3473 } else {
3474 if (ifp->if_flags & IFF_RUNNING) {
3475 bge_stop(sc);
3476 }
3477 }
3478 sc->bge_if_flags = ifp->if_flags;
3479 BGE_UNLOCK(sc);
3480 error = 0;
3481 break;
3482 case SIOCADDMULTI:
3483 case SIOCDELMULTI:
3484 if (ifp->if_flags & IFF_RUNNING) {
3485 BGE_LOCK(sc);
3486 bge_setmulti(sc);
3487 BGE_UNLOCK(sc);
3488 error = 0;
3489 }
3490 break;
3491 case SIOCSIFMEDIA:
3492 case SIOCGIFMEDIA:
3493 if (sc->bge_tbi) {
3494 error = ifmedia_ioctl(ifp, ifr,
3495 &sc->bge_ifmedia, command);
3496 } else {
3497 mii = device_get_softc(sc->bge_miibus);
3498 error = ifmedia_ioctl(ifp, ifr,
3499 &mii->mii_media, command);
3500 }
3501 break;
3502 case SIOCSIFCAP:
3503 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
3504 /* NB: the code for RX csum offload is disabled for now */
3505 if (mask & IFCAP_TXCSUM) {
3506 ifp->if_capenable ^= IFCAP_TXCSUM;
3507 if (IFCAP_TXCSUM & ifp->if_capenable)
3508 ifp->if_hwassist = BGE_CSUM_FEATURES;
3509 else
3510 ifp->if_hwassist = 0;
3511 }
3512 error = 0;
3513 break;
3514 default:
3515 error = ether_ioctl(ifp, command, data);
3516 break;
3517 }
3518
3519 return(error);
3520}
3521
3522static void
3523bge_watchdog(ifp)
3524 struct ifnet *ifp;
3525{
3526 struct bge_softc *sc;
3527
3528 sc = ifp->if_softc;
3529
3530 printf("bge%d: watchdog timeout -- resetting\n", sc->bge_unit);
3531
3532 ifp->if_flags &= ~IFF_RUNNING;
3533 bge_init(sc);
3534
3535 ifp->if_oerrors++;
3536
3537 return;
3538}
3539
3540/*
3541 * Stop the adapter and free any mbufs allocated to the
3542 * RX and TX lists.
3543 */
3544static void
3545bge_stop(sc)
3546 struct bge_softc *sc;
3547{
3548 struct ifnet *ifp;
3549 struct ifmedia_entry *ifm;
3550 struct mii_data *mii = NULL;
3551 int mtmp, itmp;
3552
3553 BGE_LOCK_ASSERT(sc);
3554
3555 ifp = &sc->arpcom.ac_if;
3556
3557 if (!sc->bge_tbi)
3558 mii = device_get_softc(sc->bge_miibus);
3559
3560 callout_stop(&sc->bge_stat_ch);
3561
3562 /*
3563 * Disable all of the receiver blocks
3564 */
3565 BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE);
3566 BGE_CLRBIT(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE);
3567 BGE_CLRBIT(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE);
3568 if (sc->bge_asicrev != BGE_ASICREV_BCM5705)
3569 BGE_CLRBIT(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE);
3570 BGE_CLRBIT(sc, BGE_RDBDI_MODE, BGE_RBDIMODE_ENABLE);
3571 BGE_CLRBIT(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE);
3572 BGE_CLRBIT(sc, BGE_RBDC_MODE, BGE_RBDCMODE_ENABLE);
3573
3574 /*
3575 * Disable all of the transmit blocks
3576 */
3577 BGE_CLRBIT(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE);
3578 BGE_CLRBIT(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE);
3579 BGE_CLRBIT(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE);
3580 BGE_CLRBIT(sc, BGE_RDMA_MODE, BGE_RDMAMODE_ENABLE);
3581 BGE_CLRBIT(sc, BGE_SDC_MODE, BGE_SDCMODE_ENABLE);
3582 if (sc->bge_asicrev != BGE_ASICREV_BCM5705)
3583 BGE_CLRBIT(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE);
3584 BGE_CLRBIT(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE);
3585
3586 /*
3587 * Shut down all of the memory managers and related
3588 * state machines.
3589 */
3590 BGE_CLRBIT(sc, BGE_HCC_MODE, BGE_HCCMODE_ENABLE);
3591 BGE_CLRBIT(sc, BGE_WDMA_MODE, BGE_WDMAMODE_ENABLE);
3592 if (sc->bge_asicrev != BGE_ASICREV_BCM5705)
3593 BGE_CLRBIT(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE);
3594 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF);
3595 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0);
3596 if (sc->bge_asicrev != BGE_ASICREV_BCM5705) {
3597 BGE_CLRBIT(sc, BGE_BMAN_MODE, BGE_BMANMODE_ENABLE);
3598 BGE_CLRBIT(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE);
3599 }
3600
3601 /* Disable host interrupts. */
3602 BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR);
3603 CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 1);
3604
3605 /*
3606 * Tell firmware we're shutting down.
3607 */
3608 BGE_CLRBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
3609
3610 /* Free the RX lists. */
3611 bge_free_rx_ring_std(sc);
3612
3613 /* Free jumbo RX list. */
3614 if (sc->bge_asicrev != BGE_ASICREV_BCM5705)
3615 bge_free_rx_ring_jumbo(sc);
3616
3617 /* Free TX buffers. */
3618 bge_free_tx_ring(sc);
3619
3620 /*
3621 * Isolate/power down the PHY, but leave the media selection
3622 * unchanged so that things will be put back to normal when
3623 * we bring the interface back up.
3624 */
3625 if (!sc->bge_tbi) {
3626 itmp = ifp->if_flags;
3627 ifp->if_flags |= IFF_UP;
3628 ifm = mii->mii_media.ifm_cur;
3629 mtmp = ifm->ifm_media;
3630 ifm->ifm_media = IFM_ETHER|IFM_NONE;
3631 mii_mediachg(mii);
3632 ifm->ifm_media = mtmp;
3633 ifp->if_flags = itmp;
3634 }
3635
3636 sc->bge_link = 0;
3637
3638 sc->bge_tx_saved_considx = BGE_TXCONS_UNSET;
3639
3640 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
3641
3642 return;
3643}
3644
3645/*
3646 * Stop all chip I/O so that the kernel's probe routines don't
3647 * get confused by errant DMAs when rebooting.
3648 */
3649static void
3650bge_shutdown(dev)
3651 device_t dev;
3652{
3653 struct bge_softc *sc;
3654
3655 sc = device_get_softc(dev);
3656
3657 BGE_LOCK(sc);
3658 bge_stop(sc);
3659 bge_reset(sc);
3660 BGE_UNLOCK(sc);
3661
3662 return;
3663}