Deleted Added
full compact
if_bge.c (153234) if_bge.c (153239)
1/*-
2 * Copyright (c) 2001 Wind River Systems
3 * Copyright (c) 1997, 1998, 1999, 2001
4 * Bill Paul <wpaul@windriver.com>. All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. All advertising materials mentioning features or use of this software
15 * must display the following acknowledgement:
16 * This product includes software developed by Bill Paul.
17 * 4. Neither the name of the author nor the names of any co-contributors
18 * may be used to endorse or promote products derived from this software
19 * without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
25 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
31 * THE POSSIBILITY OF SUCH DAMAGE.
32 */
33
34#include <sys/cdefs.h>
1/*-
2 * Copyright (c) 2001 Wind River Systems
3 * Copyright (c) 1997, 1998, 1999, 2001
4 * Bill Paul <wpaul@windriver.com>. All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. All advertising materials mentioning features or use of this software
15 * must display the following acknowledgement:
16 * This product includes software developed by Bill Paul.
17 * 4. Neither the name of the author nor the names of any co-contributors
18 * may be used to endorse or promote products derived from this software
19 * without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
25 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
31 * THE POSSIBILITY OF SUCH DAMAGE.
32 */
33
34#include <sys/cdefs.h>
35__FBSDID("$FreeBSD: head/sys/dev/bge/if_bge.c 153234 2005-12-08 13:31:52Z oleg $");
35__FBSDID("$FreeBSD: head/sys/dev/bge/if_bge.c 153239 2005-12-08 16:11:45Z glebius $");
36
37/*
38 * Broadcom BCM570x family gigabit ethernet driver for FreeBSD.
39 *
40 * The Broadcom BCM5700 is based on technology originally developed by
41 * Alteon Networks as part of the Tigon I and Tigon II gigabit ethernet
42 * MAC chips. The BCM5700, sometimes refered to as the Tigon III, has
43 * two on-board MIPS R4000 CPUs and can have as much as 16MB of external
44 * SSRAM. The BCM5700 supports TCP, UDP and IP checksum offload, jumbo
45 * frames, highly configurable RX filtering, and 16 RX and TX queues
46 * (which, along with RX filter rules, can be used for QOS applications).
47 * Other features, such as TCP segmentation, may be available as part
48 * of value-added firmware updates. Unlike the Tigon I and Tigon II,
49 * firmware images can be stored in hardware and need not be compiled
50 * into the driver.
51 *
52 * The BCM5700 supports the PCI v2.2 and PCI-X v1.0 standards, and will
53 * function in a 32-bit/64-bit 33/66Mhz bus, or a 64-bit/133Mhz bus.
54 *
55 * The BCM5701 is a single-chip solution incorporating both the BCM5700
56 * MAC and a BCM5401 10/100/1000 PHY. Unlike the BCM5700, the BCM5701
57 * does not support external SSRAM.
58 *
59 * Broadcom also produces a variation of the BCM5700 under the "Altima"
60 * brand name, which is functionally similar but lacks PCI-X support.
61 *
62 * Without external SSRAM, you can only have at most 4 TX rings,
63 * and the use of the mini RX ring is disabled. This seems to imply
64 * that these features are simply not available on the BCM5701. As a
65 * result, this driver does not implement any support for the mini RX
66 * ring.
67 */
68
69#ifdef HAVE_KERNEL_OPTION_HEADERS
70#include "opt_device_polling.h"
71#endif
72
73#include <sys/param.h>
74#include <sys/endian.h>
75#include <sys/systm.h>
76#include <sys/sockio.h>
77#include <sys/mbuf.h>
78#include <sys/malloc.h>
79#include <sys/kernel.h>
80#include <sys/module.h>
81#include <sys/socket.h>
36
37/*
38 * Broadcom BCM570x family gigabit ethernet driver for FreeBSD.
39 *
40 * The Broadcom BCM5700 is based on technology originally developed by
41 * Alteon Networks as part of the Tigon I and Tigon II gigabit ethernet
42 * MAC chips. The BCM5700, sometimes refered to as the Tigon III, has
43 * two on-board MIPS R4000 CPUs and can have as much as 16MB of external
44 * SSRAM. The BCM5700 supports TCP, UDP and IP checksum offload, jumbo
45 * frames, highly configurable RX filtering, and 16 RX and TX queues
46 * (which, along with RX filter rules, can be used for QOS applications).
47 * Other features, such as TCP segmentation, may be available as part
48 * of value-added firmware updates. Unlike the Tigon I and Tigon II,
49 * firmware images can be stored in hardware and need not be compiled
50 * into the driver.
51 *
52 * The BCM5700 supports the PCI v2.2 and PCI-X v1.0 standards, and will
53 * function in a 32-bit/64-bit 33/66Mhz bus, or a 64-bit/133Mhz bus.
54 *
55 * The BCM5701 is a single-chip solution incorporating both the BCM5700
56 * MAC and a BCM5401 10/100/1000 PHY. Unlike the BCM5700, the BCM5701
57 * does not support external SSRAM.
58 *
59 * Broadcom also produces a variation of the BCM5700 under the "Altima"
60 * brand name, which is functionally similar but lacks PCI-X support.
61 *
62 * Without external SSRAM, you can only have at most 4 TX rings,
63 * and the use of the mini RX ring is disabled. This seems to imply
64 * that these features are simply not available on the BCM5701. As a
65 * result, this driver does not implement any support for the mini RX
66 * ring.
67 */
68
69#ifdef HAVE_KERNEL_OPTION_HEADERS
70#include "opt_device_polling.h"
71#endif
72
73#include <sys/param.h>
74#include <sys/endian.h>
75#include <sys/systm.h>
76#include <sys/sockio.h>
77#include <sys/mbuf.h>
78#include <sys/malloc.h>
79#include <sys/kernel.h>
80#include <sys/module.h>
81#include <sys/socket.h>
82#include <sys/queue.h>
83
84#include <net/if.h>
85#include <net/if_arp.h>
86#include <net/ethernet.h>
87#include <net/if_dl.h>
88#include <net/if_media.h>
89
90#include <net/bpf.h>
91
92#include <net/if_types.h>
93#include <net/if_vlan_var.h>
94
95#include <netinet/in_systm.h>
96#include <netinet/in.h>
97#include <netinet/ip.h>
98
99#include <machine/clock.h> /* for DELAY */
100#include <machine/bus.h>
101#include <machine/resource.h>
102#include <sys/bus.h>
103#include <sys/rman.h>
104
105#include <dev/mii/mii.h>
106#include <dev/mii/miivar.h>
107#include "miidevs.h"
108#include <dev/mii/brgphyreg.h>
109
110#include <dev/pci/pcireg.h>
111#include <dev/pci/pcivar.h>
112
113#include <dev/bge/if_bgereg.h>
114
115#include "opt_bge.h"
116
117#define BGE_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP)
118
119MODULE_DEPEND(bge, pci, 1, 1, 1);
120MODULE_DEPEND(bge, ether, 1, 1, 1);
121MODULE_DEPEND(bge, miibus, 1, 1, 1);
122
123/* "device miibus" required. See GENERIC if you get errors here. */
124#include "miibus_if.h"
125
126/*
127 * Various supported device vendors/types and their names. Note: the
128 * spec seems to indicate that the hardware still has Alteon's vendor
129 * ID burned into it, though it will always be overriden by the vendor
130 * ID in the EEPROM. Just to be safe, we cover all possibilities.
131 */
132#define BGE_DEVDESC_MAX 64 /* Maximum device description length */
133
134static struct bge_type bge_devs[] = {
135 { ALT_VENDORID, ALT_DEVICEID_BCM5700,
136 "Broadcom BCM5700 Gigabit Ethernet" },
137 { ALT_VENDORID, ALT_DEVICEID_BCM5701,
138 "Broadcom BCM5701 Gigabit Ethernet" },
139 { BCOM_VENDORID, BCOM_DEVICEID_BCM5700,
140 "Broadcom BCM5700 Gigabit Ethernet" },
141 { BCOM_VENDORID, BCOM_DEVICEID_BCM5701,
142 "Broadcom BCM5701 Gigabit Ethernet" },
143 { BCOM_VENDORID, BCOM_DEVICEID_BCM5702,
144 "Broadcom BCM5702 Gigabit Ethernet" },
145 { BCOM_VENDORID, BCOM_DEVICEID_BCM5702X,
146 "Broadcom BCM5702X Gigabit Ethernet" },
147 { BCOM_VENDORID, BCOM_DEVICEID_BCM5703,
148 "Broadcom BCM5703 Gigabit Ethernet" },
149 { BCOM_VENDORID, BCOM_DEVICEID_BCM5703X,
150 "Broadcom BCM5703X Gigabit Ethernet" },
151 { BCOM_VENDORID, BCOM_DEVICEID_BCM5704C,
152 "Broadcom BCM5704C Dual Gigabit Ethernet" },
153 { BCOM_VENDORID, BCOM_DEVICEID_BCM5704S,
154 "Broadcom BCM5704S Dual Gigabit Ethernet" },
155 { BCOM_VENDORID, BCOM_DEVICEID_BCM5705,
156 "Broadcom BCM5705 Gigabit Ethernet" },
157 { BCOM_VENDORID, BCOM_DEVICEID_BCM5705K,
158 "Broadcom BCM5705K Gigabit Ethernet" },
159 { BCOM_VENDORID, BCOM_DEVICEID_BCM5705M,
160 "Broadcom BCM5705M Gigabit Ethernet" },
161 { BCOM_VENDORID, BCOM_DEVICEID_BCM5705M_ALT,
162 "Broadcom BCM5705M Gigabit Ethernet" },
163 { BCOM_VENDORID, BCOM_DEVICEID_BCM5714C,
164 "Broadcom BCM5714C Gigabit Ethernet" },
165 { BCOM_VENDORID, BCOM_DEVICEID_BCM5721,
166 "Broadcom BCM5721 Gigabit Ethernet" },
167 { BCOM_VENDORID, BCOM_DEVICEID_BCM5750,
168 "Broadcom BCM5750 Gigabit Ethernet" },
169 { BCOM_VENDORID, BCOM_DEVICEID_BCM5750M,
170 "Broadcom BCM5750M Gigabit Ethernet" },
171 { BCOM_VENDORID, BCOM_DEVICEID_BCM5751,
172 "Broadcom BCM5751 Gigabit Ethernet" },
173 { BCOM_VENDORID, BCOM_DEVICEID_BCM5751M,
174 "Broadcom BCM5751M Gigabit Ethernet" },
175 { BCOM_VENDORID, BCOM_DEVICEID_BCM5752,
176 "Broadcom BCM5752 Gigabit Ethernet" },
177 { BCOM_VENDORID, BCOM_DEVICEID_BCM5782,
178 "Broadcom BCM5782 Gigabit Ethernet" },
179 { BCOM_VENDORID, BCOM_DEVICEID_BCM5788,
180 "Broadcom BCM5788 Gigabit Ethernet" },
181 { BCOM_VENDORID, BCOM_DEVICEID_BCM5789,
182 "Broadcom BCM5789 Gigabit Ethernet" },
183 { BCOM_VENDORID, BCOM_DEVICEID_BCM5901,
184 "Broadcom BCM5901 Fast Ethernet" },
185 { BCOM_VENDORID, BCOM_DEVICEID_BCM5901A2,
186 "Broadcom BCM5901A2 Fast Ethernet" },
187 { SK_VENDORID, SK_DEVICEID_ALTIMA,
188 "SysKonnect Gigabit Ethernet" },
189 { ALTIMA_VENDORID, ALTIMA_DEVICE_AC1000,
190 "Altima AC1000 Gigabit Ethernet" },
191 { ALTIMA_VENDORID, ALTIMA_DEVICE_AC1002,
192 "Altima AC1002 Gigabit Ethernet" },
193 { ALTIMA_VENDORID, ALTIMA_DEVICE_AC9100,
194 "Altima AC9100 Gigabit Ethernet" },
195 { 0, 0, NULL }
196};
197
198static int bge_probe (device_t);
199static int bge_attach (device_t);
200static int bge_detach (device_t);
201static int bge_suspend (device_t);
202static int bge_resume (device_t);
203static void bge_release_resources
204 (struct bge_softc *);
205static void bge_dma_map_addr (void *, bus_dma_segment_t *, int, int);
206static void bge_dma_map_tx_desc (void *, bus_dma_segment_t *, int,
207 bus_size_t, int);
208static int bge_dma_alloc (device_t);
209static void bge_dma_free (struct bge_softc *);
210
211static void bge_txeof (struct bge_softc *);
212static void bge_rxeof (struct bge_softc *);
213
214static void bge_tick_locked (struct bge_softc *);
215static void bge_tick (void *);
216static void bge_stats_update (struct bge_softc *);
217static void bge_stats_update_regs
218 (struct bge_softc *);
219static int bge_encap (struct bge_softc *, struct mbuf *,
220 u_int32_t *);
221
222static void bge_intr (void *);
223static void bge_start_locked (struct ifnet *);
224static void bge_start (struct ifnet *);
225static int bge_ioctl (struct ifnet *, u_long, caddr_t);
226static void bge_init_locked (struct bge_softc *);
227static void bge_init (void *);
228static void bge_stop (struct bge_softc *);
229static void bge_watchdog (struct ifnet *);
230static void bge_shutdown (device_t);
231static int bge_ifmedia_upd (struct ifnet *);
232static void bge_ifmedia_sts (struct ifnet *, struct ifmediareq *);
233
234static u_int8_t bge_eeprom_getbyte (struct bge_softc *, int, u_int8_t *);
235static int bge_read_eeprom (struct bge_softc *, caddr_t, int, int);
236
237static void bge_setmulti (struct bge_softc *);
238
239static void bge_handle_events (struct bge_softc *);
82
83#include <net/if.h>
84#include <net/if_arp.h>
85#include <net/ethernet.h>
86#include <net/if_dl.h>
87#include <net/if_media.h>
88
89#include <net/bpf.h>
90
91#include <net/if_types.h>
92#include <net/if_vlan_var.h>
93
94#include <netinet/in_systm.h>
95#include <netinet/in.h>
96#include <netinet/ip.h>
97
98#include <machine/clock.h> /* for DELAY */
99#include <machine/bus.h>
100#include <machine/resource.h>
101#include <sys/bus.h>
102#include <sys/rman.h>
103
104#include <dev/mii/mii.h>
105#include <dev/mii/miivar.h>
106#include "miidevs.h"
107#include <dev/mii/brgphyreg.h>
108
109#include <dev/pci/pcireg.h>
110#include <dev/pci/pcivar.h>
111
112#include <dev/bge/if_bgereg.h>
113
114#include "opt_bge.h"
115
116#define BGE_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP)
117
118MODULE_DEPEND(bge, pci, 1, 1, 1);
119MODULE_DEPEND(bge, ether, 1, 1, 1);
120MODULE_DEPEND(bge, miibus, 1, 1, 1);
121
122/* "device miibus" required. See GENERIC if you get errors here. */
123#include "miibus_if.h"
124
125/*
126 * Various supported device vendors/types and their names. Note: the
127 * spec seems to indicate that the hardware still has Alteon's vendor
128 * ID burned into it, though it will always be overriden by the vendor
129 * ID in the EEPROM. Just to be safe, we cover all possibilities.
130 */
131#define BGE_DEVDESC_MAX 64 /* Maximum device description length */
132
133static struct bge_type bge_devs[] = {
134 { ALT_VENDORID, ALT_DEVICEID_BCM5700,
135 "Broadcom BCM5700 Gigabit Ethernet" },
136 { ALT_VENDORID, ALT_DEVICEID_BCM5701,
137 "Broadcom BCM5701 Gigabit Ethernet" },
138 { BCOM_VENDORID, BCOM_DEVICEID_BCM5700,
139 "Broadcom BCM5700 Gigabit Ethernet" },
140 { BCOM_VENDORID, BCOM_DEVICEID_BCM5701,
141 "Broadcom BCM5701 Gigabit Ethernet" },
142 { BCOM_VENDORID, BCOM_DEVICEID_BCM5702,
143 "Broadcom BCM5702 Gigabit Ethernet" },
144 { BCOM_VENDORID, BCOM_DEVICEID_BCM5702X,
145 "Broadcom BCM5702X Gigabit Ethernet" },
146 { BCOM_VENDORID, BCOM_DEVICEID_BCM5703,
147 "Broadcom BCM5703 Gigabit Ethernet" },
148 { BCOM_VENDORID, BCOM_DEVICEID_BCM5703X,
149 "Broadcom BCM5703X Gigabit Ethernet" },
150 { BCOM_VENDORID, BCOM_DEVICEID_BCM5704C,
151 "Broadcom BCM5704C Dual Gigabit Ethernet" },
152 { BCOM_VENDORID, BCOM_DEVICEID_BCM5704S,
153 "Broadcom BCM5704S Dual Gigabit Ethernet" },
154 { BCOM_VENDORID, BCOM_DEVICEID_BCM5705,
155 "Broadcom BCM5705 Gigabit Ethernet" },
156 { BCOM_VENDORID, BCOM_DEVICEID_BCM5705K,
157 "Broadcom BCM5705K Gigabit Ethernet" },
158 { BCOM_VENDORID, BCOM_DEVICEID_BCM5705M,
159 "Broadcom BCM5705M Gigabit Ethernet" },
160 { BCOM_VENDORID, BCOM_DEVICEID_BCM5705M_ALT,
161 "Broadcom BCM5705M Gigabit Ethernet" },
162 { BCOM_VENDORID, BCOM_DEVICEID_BCM5714C,
163 "Broadcom BCM5714C Gigabit Ethernet" },
164 { BCOM_VENDORID, BCOM_DEVICEID_BCM5721,
165 "Broadcom BCM5721 Gigabit Ethernet" },
166 { BCOM_VENDORID, BCOM_DEVICEID_BCM5750,
167 "Broadcom BCM5750 Gigabit Ethernet" },
168 { BCOM_VENDORID, BCOM_DEVICEID_BCM5750M,
169 "Broadcom BCM5750M Gigabit Ethernet" },
170 { BCOM_VENDORID, BCOM_DEVICEID_BCM5751,
171 "Broadcom BCM5751 Gigabit Ethernet" },
172 { BCOM_VENDORID, BCOM_DEVICEID_BCM5751M,
173 "Broadcom BCM5751M Gigabit Ethernet" },
174 { BCOM_VENDORID, BCOM_DEVICEID_BCM5752,
175 "Broadcom BCM5752 Gigabit Ethernet" },
176 { BCOM_VENDORID, BCOM_DEVICEID_BCM5782,
177 "Broadcom BCM5782 Gigabit Ethernet" },
178 { BCOM_VENDORID, BCOM_DEVICEID_BCM5788,
179 "Broadcom BCM5788 Gigabit Ethernet" },
180 { BCOM_VENDORID, BCOM_DEVICEID_BCM5789,
181 "Broadcom BCM5789 Gigabit Ethernet" },
182 { BCOM_VENDORID, BCOM_DEVICEID_BCM5901,
183 "Broadcom BCM5901 Fast Ethernet" },
184 { BCOM_VENDORID, BCOM_DEVICEID_BCM5901A2,
185 "Broadcom BCM5901A2 Fast Ethernet" },
186 { SK_VENDORID, SK_DEVICEID_ALTIMA,
187 "SysKonnect Gigabit Ethernet" },
188 { ALTIMA_VENDORID, ALTIMA_DEVICE_AC1000,
189 "Altima AC1000 Gigabit Ethernet" },
190 { ALTIMA_VENDORID, ALTIMA_DEVICE_AC1002,
191 "Altima AC1002 Gigabit Ethernet" },
192 { ALTIMA_VENDORID, ALTIMA_DEVICE_AC9100,
193 "Altima AC9100 Gigabit Ethernet" },
194 { 0, 0, NULL }
195};
196
197static int bge_probe (device_t);
198static int bge_attach (device_t);
199static int bge_detach (device_t);
200static int bge_suspend (device_t);
201static int bge_resume (device_t);
202static void bge_release_resources
203 (struct bge_softc *);
204static void bge_dma_map_addr (void *, bus_dma_segment_t *, int, int);
205static void bge_dma_map_tx_desc (void *, bus_dma_segment_t *, int,
206 bus_size_t, int);
207static int bge_dma_alloc (device_t);
208static void bge_dma_free (struct bge_softc *);
209
210static void bge_txeof (struct bge_softc *);
211static void bge_rxeof (struct bge_softc *);
212
213static void bge_tick_locked (struct bge_softc *);
214static void bge_tick (void *);
215static void bge_stats_update (struct bge_softc *);
216static void bge_stats_update_regs
217 (struct bge_softc *);
218static int bge_encap (struct bge_softc *, struct mbuf *,
219 u_int32_t *);
220
221static void bge_intr (void *);
222static void bge_start_locked (struct ifnet *);
223static void bge_start (struct ifnet *);
224static int bge_ioctl (struct ifnet *, u_long, caddr_t);
225static void bge_init_locked (struct bge_softc *);
226static void bge_init (void *);
227static void bge_stop (struct bge_softc *);
228static void bge_watchdog (struct ifnet *);
229static void bge_shutdown (device_t);
230static int bge_ifmedia_upd (struct ifnet *);
231static void bge_ifmedia_sts (struct ifnet *, struct ifmediareq *);
232
233static u_int8_t bge_eeprom_getbyte (struct bge_softc *, int, u_int8_t *);
234static int bge_read_eeprom (struct bge_softc *, caddr_t, int, int);
235
236static void bge_setmulti (struct bge_softc *);
237
238static void bge_handle_events (struct bge_softc *);
240static int bge_alloc_jumbo_mem (struct bge_softc *);
241static void bge_free_jumbo_mem (struct bge_softc *);
242static void *bge_jalloc (struct bge_softc *);
243static void bge_jfree (void *, void *);
244static int bge_newbuf_std (struct bge_softc *, int, struct mbuf *);
245static int bge_newbuf_jumbo (struct bge_softc *, int, struct mbuf *);
246static int bge_init_rx_ring_std (struct bge_softc *);
247static void bge_free_rx_ring_std (struct bge_softc *);
248static int bge_init_rx_ring_jumbo (struct bge_softc *);
249static void bge_free_rx_ring_jumbo (struct bge_softc *);
250static void bge_free_tx_ring (struct bge_softc *);
251static int bge_init_tx_ring (struct bge_softc *);
252
253static int bge_chipinit (struct bge_softc *);
254static int bge_blockinit (struct bge_softc *);
255
256#ifdef notdef
257static u_int8_t bge_vpd_readbyte(struct bge_softc *, int);
258static void bge_vpd_read_res (struct bge_softc *, struct vpd_res *, int);
259static void bge_vpd_read (struct bge_softc *);
260#endif
261
262static u_int32_t bge_readmem_ind
263 (struct bge_softc *, int);
264static void bge_writemem_ind (struct bge_softc *, int, int);
265#ifdef notdef
266static u_int32_t bge_readreg_ind
267 (struct bge_softc *, int);
268#endif
269static void bge_writereg_ind (struct bge_softc *, int, int);
270
271static int bge_miibus_readreg (device_t, int, int);
272static int bge_miibus_writereg (device_t, int, int, int);
273static void bge_miibus_statchg (device_t);
274#ifdef DEVICE_POLLING
275static void bge_poll (struct ifnet *ifp, enum poll_cmd cmd,
276 int count);
277static void bge_poll_locked (struct ifnet *ifp, enum poll_cmd cmd,
278 int count);
279#endif
280
281static void bge_reset (struct bge_softc *);
282static void bge_link_upd (struct bge_softc *);
283
284static device_method_t bge_methods[] = {
285 /* Device interface */
286 DEVMETHOD(device_probe, bge_probe),
287 DEVMETHOD(device_attach, bge_attach),
288 DEVMETHOD(device_detach, bge_detach),
289 DEVMETHOD(device_shutdown, bge_shutdown),
290 DEVMETHOD(device_suspend, bge_suspend),
291 DEVMETHOD(device_resume, bge_resume),
292
293 /* bus interface */
294 DEVMETHOD(bus_print_child, bus_generic_print_child),
295 DEVMETHOD(bus_driver_added, bus_generic_driver_added),
296
297 /* MII interface */
298 DEVMETHOD(miibus_readreg, bge_miibus_readreg),
299 DEVMETHOD(miibus_writereg, bge_miibus_writereg),
300 DEVMETHOD(miibus_statchg, bge_miibus_statchg),
301
302 { 0, 0 }
303};
304
305static driver_t bge_driver = {
306 "bge",
307 bge_methods,
308 sizeof(struct bge_softc)
309};
310
311static devclass_t bge_devclass;
312
313DRIVER_MODULE(bge, pci, bge_driver, bge_devclass, 0, 0);
314DRIVER_MODULE(miibus, bge, miibus_driver, miibus_devclass, 0, 0);
315
316static u_int32_t
317bge_readmem_ind(sc, off)
318 struct bge_softc *sc;
319 int off;
320{
321 device_t dev;
322
323 dev = sc->bge_dev;
324
325 pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, off, 4);
326 return(pci_read_config(dev, BGE_PCI_MEMWIN_DATA, 4));
327}
328
329static void
330bge_writemem_ind(sc, off, val)
331 struct bge_softc *sc;
332 int off, val;
333{
334 device_t dev;
335
336 dev = sc->bge_dev;
337
338 pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, off, 4);
339 pci_write_config(dev, BGE_PCI_MEMWIN_DATA, val, 4);
340
341 return;
342}
343
344#ifdef notdef
345static u_int32_t
346bge_readreg_ind(sc, off)
347 struct bge_softc *sc;
348 int off;
349{
350 device_t dev;
351
352 dev = sc->bge_dev;
353
354 pci_write_config(dev, BGE_PCI_REG_BASEADDR, off, 4);
355 return(pci_read_config(dev, BGE_PCI_REG_DATA, 4));
356}
357#endif
358
359static void
360bge_writereg_ind(sc, off, val)
361 struct bge_softc *sc;
362 int off, val;
363{
364 device_t dev;
365
366 dev = sc->bge_dev;
367
368 pci_write_config(dev, BGE_PCI_REG_BASEADDR, off, 4);
369 pci_write_config(dev, BGE_PCI_REG_DATA, val, 4);
370
371 return;
372}
373
374/*
375 * Map a single buffer address.
376 */
377
378static void
379bge_dma_map_addr(arg, segs, nseg, error)
380 void *arg;
381 bus_dma_segment_t *segs;
382 int nseg;
383 int error;
384{
385 struct bge_dmamap_arg *ctx;
386
387 if (error)
388 return;
389
390 ctx = arg;
391
392 if (nseg > ctx->bge_maxsegs) {
393 ctx->bge_maxsegs = 0;
394 return;
395 }
396
397 ctx->bge_busaddr = segs->ds_addr;
398
399 return;
400}
401
402/*
403 * Map an mbuf chain into an TX ring.
404 */
405
406static void
407bge_dma_map_tx_desc(arg, segs, nseg, mapsize, error)
408 void *arg;
409 bus_dma_segment_t *segs;
410 int nseg;
411 bus_size_t mapsize;
412 int error;
413{
414 struct bge_dmamap_arg *ctx;
415 struct bge_tx_bd *d = NULL;
416 int i = 0, idx;
417
418 if (error)
419 return;
420
421 ctx = arg;
422
423 /* Signal error to caller if there's too many segments */
424 if (nseg > ctx->bge_maxsegs) {
425 ctx->bge_maxsegs = 0;
426 return;
427 }
428
429 idx = ctx->bge_idx;
430 while(1) {
431 d = &ctx->bge_ring[idx];
432 d->bge_addr.bge_addr_lo =
433 htole32(BGE_ADDR_LO(segs[i].ds_addr));
434 d->bge_addr.bge_addr_hi =
435 htole32(BGE_ADDR_HI(segs[i].ds_addr));
436 d->bge_len = htole16(segs[i].ds_len);
437 d->bge_flags = htole16(ctx->bge_flags);
438 i++;
439 if (i == nseg)
440 break;
441 BGE_INC(idx, BGE_TX_RING_CNT);
442 }
443
444 d->bge_flags |= htole16(BGE_TXBDFLAG_END);
445 ctx->bge_maxsegs = nseg;
446 ctx->bge_idx = idx;
447
448 return;
449}
450
239static int bge_newbuf_std (struct bge_softc *, int, struct mbuf *);
240static int bge_newbuf_jumbo (struct bge_softc *, int, struct mbuf *);
241static int bge_init_rx_ring_std (struct bge_softc *);
242static void bge_free_rx_ring_std (struct bge_softc *);
243static int bge_init_rx_ring_jumbo (struct bge_softc *);
244static void bge_free_rx_ring_jumbo (struct bge_softc *);
245static void bge_free_tx_ring (struct bge_softc *);
246static int bge_init_tx_ring (struct bge_softc *);
247
248static int bge_chipinit (struct bge_softc *);
249static int bge_blockinit (struct bge_softc *);
250
251#ifdef notdef
252static u_int8_t bge_vpd_readbyte(struct bge_softc *, int);
253static void bge_vpd_read_res (struct bge_softc *, struct vpd_res *, int);
254static void bge_vpd_read (struct bge_softc *);
255#endif
256
257static u_int32_t bge_readmem_ind
258 (struct bge_softc *, int);
259static void bge_writemem_ind (struct bge_softc *, int, int);
260#ifdef notdef
261static u_int32_t bge_readreg_ind
262 (struct bge_softc *, int);
263#endif
264static void bge_writereg_ind (struct bge_softc *, int, int);
265
266static int bge_miibus_readreg (device_t, int, int);
267static int bge_miibus_writereg (device_t, int, int, int);
268static void bge_miibus_statchg (device_t);
269#ifdef DEVICE_POLLING
270static void bge_poll (struct ifnet *ifp, enum poll_cmd cmd,
271 int count);
272static void bge_poll_locked (struct ifnet *ifp, enum poll_cmd cmd,
273 int count);
274#endif
275
276static void bge_reset (struct bge_softc *);
277static void bge_link_upd (struct bge_softc *);
278
279static device_method_t bge_methods[] = {
280 /* Device interface */
281 DEVMETHOD(device_probe, bge_probe),
282 DEVMETHOD(device_attach, bge_attach),
283 DEVMETHOD(device_detach, bge_detach),
284 DEVMETHOD(device_shutdown, bge_shutdown),
285 DEVMETHOD(device_suspend, bge_suspend),
286 DEVMETHOD(device_resume, bge_resume),
287
288 /* bus interface */
289 DEVMETHOD(bus_print_child, bus_generic_print_child),
290 DEVMETHOD(bus_driver_added, bus_generic_driver_added),
291
292 /* MII interface */
293 DEVMETHOD(miibus_readreg, bge_miibus_readreg),
294 DEVMETHOD(miibus_writereg, bge_miibus_writereg),
295 DEVMETHOD(miibus_statchg, bge_miibus_statchg),
296
297 { 0, 0 }
298};
299
300static driver_t bge_driver = {
301 "bge",
302 bge_methods,
303 sizeof(struct bge_softc)
304};
305
306static devclass_t bge_devclass;
307
308DRIVER_MODULE(bge, pci, bge_driver, bge_devclass, 0, 0);
309DRIVER_MODULE(miibus, bge, miibus_driver, miibus_devclass, 0, 0);
310
311static u_int32_t
312bge_readmem_ind(sc, off)
313 struct bge_softc *sc;
314 int off;
315{
316 device_t dev;
317
318 dev = sc->bge_dev;
319
320 pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, off, 4);
321 return(pci_read_config(dev, BGE_PCI_MEMWIN_DATA, 4));
322}
323
324static void
325bge_writemem_ind(sc, off, val)
326 struct bge_softc *sc;
327 int off, val;
328{
329 device_t dev;
330
331 dev = sc->bge_dev;
332
333 pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, off, 4);
334 pci_write_config(dev, BGE_PCI_MEMWIN_DATA, val, 4);
335
336 return;
337}
338
339#ifdef notdef
340static u_int32_t
341bge_readreg_ind(sc, off)
342 struct bge_softc *sc;
343 int off;
344{
345 device_t dev;
346
347 dev = sc->bge_dev;
348
349 pci_write_config(dev, BGE_PCI_REG_BASEADDR, off, 4);
350 return(pci_read_config(dev, BGE_PCI_REG_DATA, 4));
351}
352#endif
353
354static void
355bge_writereg_ind(sc, off, val)
356 struct bge_softc *sc;
357 int off, val;
358{
359 device_t dev;
360
361 dev = sc->bge_dev;
362
363 pci_write_config(dev, BGE_PCI_REG_BASEADDR, off, 4);
364 pci_write_config(dev, BGE_PCI_REG_DATA, val, 4);
365
366 return;
367}
368
369/*
370 * Map a single buffer address.
371 */
372
373static void
374bge_dma_map_addr(arg, segs, nseg, error)
375 void *arg;
376 bus_dma_segment_t *segs;
377 int nseg;
378 int error;
379{
380 struct bge_dmamap_arg *ctx;
381
382 if (error)
383 return;
384
385 ctx = arg;
386
387 if (nseg > ctx->bge_maxsegs) {
388 ctx->bge_maxsegs = 0;
389 return;
390 }
391
392 ctx->bge_busaddr = segs->ds_addr;
393
394 return;
395}
396
397/*
398 * Map an mbuf chain into an TX ring.
399 */
400
401static void
402bge_dma_map_tx_desc(arg, segs, nseg, mapsize, error)
403 void *arg;
404 bus_dma_segment_t *segs;
405 int nseg;
406 bus_size_t mapsize;
407 int error;
408{
409 struct bge_dmamap_arg *ctx;
410 struct bge_tx_bd *d = NULL;
411 int i = 0, idx;
412
413 if (error)
414 return;
415
416 ctx = arg;
417
418 /* Signal error to caller if there's too many segments */
419 if (nseg > ctx->bge_maxsegs) {
420 ctx->bge_maxsegs = 0;
421 return;
422 }
423
424 idx = ctx->bge_idx;
425 while(1) {
426 d = &ctx->bge_ring[idx];
427 d->bge_addr.bge_addr_lo =
428 htole32(BGE_ADDR_LO(segs[i].ds_addr));
429 d->bge_addr.bge_addr_hi =
430 htole32(BGE_ADDR_HI(segs[i].ds_addr));
431 d->bge_len = htole16(segs[i].ds_len);
432 d->bge_flags = htole16(ctx->bge_flags);
433 i++;
434 if (i == nseg)
435 break;
436 BGE_INC(idx, BGE_TX_RING_CNT);
437 }
438
439 d->bge_flags |= htole16(BGE_TXBDFLAG_END);
440 ctx->bge_maxsegs = nseg;
441 ctx->bge_idx = idx;
442
443 return;
444}
445
451
452#ifdef notdef
453static u_int8_t
454bge_vpd_readbyte(sc, addr)
455 struct bge_softc *sc;
456 int addr;
457{
458 int i;
459 device_t dev;
460 u_int32_t val;
461
462 dev = sc->bge_dev;
463 pci_write_config(dev, BGE_PCI_VPD_ADDR, addr, 2);
464 for (i = 0; i < BGE_TIMEOUT * 10; i++) {
465 DELAY(10);
466 if (pci_read_config(dev, BGE_PCI_VPD_ADDR, 2) & BGE_VPD_FLAG)
467 break;
468 }
469
470 if (i == BGE_TIMEOUT) {
471 printf("bge%d: VPD read timed out\n", sc->bge_unit);
472 return(0);
473 }
474
475 val = pci_read_config(dev, BGE_PCI_VPD_DATA, 4);
476
477 return((val >> ((addr % 4) * 8)) & 0xFF);
478}
479
480static void
481bge_vpd_read_res(sc, res, addr)
482 struct bge_softc *sc;
483 struct vpd_res *res;
484 int addr;
485{
486 int i;
487 u_int8_t *ptr;
488
489 ptr = (u_int8_t *)res;
490 for (i = 0; i < sizeof(struct vpd_res); i++)
491 ptr[i] = bge_vpd_readbyte(sc, i + addr);
492
493 return;
494}
495
496static void
497bge_vpd_read(sc)
498 struct bge_softc *sc;
499{
500 int pos = 0, i;
501 struct vpd_res res;
502
503 if (sc->bge_vpd_prodname != NULL)
504 free(sc->bge_vpd_prodname, M_DEVBUF);
505 if (sc->bge_vpd_readonly != NULL)
506 free(sc->bge_vpd_readonly, M_DEVBUF);
507 sc->bge_vpd_prodname = NULL;
508 sc->bge_vpd_readonly = NULL;
509
510 bge_vpd_read_res(sc, &res, pos);
511
512 if (res.vr_id != VPD_RES_ID) {
513 printf("bge%d: bad VPD resource id: expected %x got %x\n",
514 sc->bge_unit, VPD_RES_ID, res.vr_id);
515 return;
516 }
517
518 pos += sizeof(res);
519 sc->bge_vpd_prodname = malloc(res.vr_len + 1, M_DEVBUF, M_NOWAIT);
520 for (i = 0; i < res.vr_len; i++)
521 sc->bge_vpd_prodname[i] = bge_vpd_readbyte(sc, i + pos);
522 sc->bge_vpd_prodname[i] = '\0';
523 pos += i;
524
525 bge_vpd_read_res(sc, &res, pos);
526
527 if (res.vr_id != VPD_RES_READ) {
528 printf("bge%d: bad VPD resource id: expected %x got %x\n",
529 sc->bge_unit, VPD_RES_READ, res.vr_id);
530 return;
531 }
532
533 pos += sizeof(res);
534 sc->bge_vpd_readonly = malloc(res.vr_len, M_DEVBUF, M_NOWAIT);
535 for (i = 0; i < res.vr_len + 1; i++)
536 sc->bge_vpd_readonly[i] = bge_vpd_readbyte(sc, i + pos);
537
538 return;
539}
540#endif
541
542/*
543 * Read a byte of data stored in the EEPROM at address 'addr.' The
544 * BCM570x supports both the traditional bitbang interface and an
545 * auto access interface for reading the EEPROM. We use the auto
546 * access method.
547 */
548static u_int8_t
549bge_eeprom_getbyte(sc, addr, dest)
550 struct bge_softc *sc;
551 int addr;
552 u_int8_t *dest;
553{
554 int i;
555 u_int32_t byte = 0;
556
557 /*
558 * Enable use of auto EEPROM access so we can avoid
559 * having to use the bitbang method.
560 */
561 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_AUTO_EEPROM);
562
563 /* Reset the EEPROM, load the clock period. */
564 CSR_WRITE_4(sc, BGE_EE_ADDR,
565 BGE_EEADDR_RESET|BGE_EEHALFCLK(BGE_HALFCLK_384SCL));
566 DELAY(20);
567
568 /* Issue the read EEPROM command. */
569 CSR_WRITE_4(sc, BGE_EE_ADDR, BGE_EE_READCMD | addr);
570
571 /* Wait for completion */
572 for(i = 0; i < BGE_TIMEOUT * 10; i++) {
573 DELAY(10);
574 if (CSR_READ_4(sc, BGE_EE_ADDR) & BGE_EEADDR_DONE)
575 break;
576 }
577
578 if (i == BGE_TIMEOUT) {
579 printf("bge%d: eeprom read timed out\n", sc->bge_unit);
580 return(0);
581 }
582
583 /* Get result. */
584 byte = CSR_READ_4(sc, BGE_EE_DATA);
585
586 *dest = (byte >> ((addr % 4) * 8)) & 0xFF;
587
588 return(0);
589}
590
591/*
592 * Read a sequence of bytes from the EEPROM.
593 */
594static int
595bge_read_eeprom(sc, dest, off, cnt)
596 struct bge_softc *sc;
597 caddr_t dest;
598 int off;
599 int cnt;
600{
601 int err = 0, i;
602 u_int8_t byte = 0;
603
604 for (i = 0; i < cnt; i++) {
605 err = bge_eeprom_getbyte(sc, off + i, &byte);
606 if (err)
607 break;
608 *(dest + i) = byte;
609 }
610
611 return(err ? 1 : 0);
612}
613
614static int
615bge_miibus_readreg(dev, phy, reg)
616 device_t dev;
617 int phy, reg;
618{
619 struct bge_softc *sc;
620 u_int32_t val, autopoll;
621 int i;
622
623 sc = device_get_softc(dev);
624
625 /*
626 * Broadcom's own driver always assumes the internal
627 * PHY is at GMII address 1. On some chips, the PHY responds
628 * to accesses at all addresses, which could cause us to
629 * bogusly attach the PHY 32 times at probe type. Always
630 * restricting the lookup to address 1 is simpler than
631 * trying to figure out which chips revisions should be
632 * special-cased.
633 */
634 if (phy != 1)
635 return(0);
636
637 /* Reading with autopolling on may trigger PCI errors */
638 autopoll = CSR_READ_4(sc, BGE_MI_MODE);
639 if (autopoll & BGE_MIMODE_AUTOPOLL) {
640 BGE_CLRBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL);
641 DELAY(40);
642 }
643
644 CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_READ|BGE_MICOMM_BUSY|
645 BGE_MIPHY(phy)|BGE_MIREG(reg));
646
647 for (i = 0; i < BGE_TIMEOUT; i++) {
648 val = CSR_READ_4(sc, BGE_MI_COMM);
649 if (!(val & BGE_MICOMM_BUSY))
650 break;
651 }
652
653 if (i == BGE_TIMEOUT) {
654 printf("bge%d: PHY read timed out\n", sc->bge_unit);
655 val = 0;
656 goto done;
657 }
658
659 val = CSR_READ_4(sc, BGE_MI_COMM);
660
661done:
662 if (autopoll & BGE_MIMODE_AUTOPOLL) {
663 BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL);
664 DELAY(40);
665 }
666
667 if (val & BGE_MICOMM_READFAIL)
668 return(0);
669
670 return(val & 0xFFFF);
671}
672
673static int
674bge_miibus_writereg(dev, phy, reg, val)
675 device_t dev;
676 int phy, reg, val;
677{
678 struct bge_softc *sc;
679 u_int32_t autopoll;
680 int i;
681
682 sc = device_get_softc(dev);
683
684 /* Reading with autopolling on may trigger PCI errors */
685 autopoll = CSR_READ_4(sc, BGE_MI_MODE);
686 if (autopoll & BGE_MIMODE_AUTOPOLL) {
687 BGE_CLRBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL);
688 DELAY(40);
689 }
690
691 CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_WRITE|BGE_MICOMM_BUSY|
692 BGE_MIPHY(phy)|BGE_MIREG(reg)|val);
693
694 for (i = 0; i < BGE_TIMEOUT; i++) {
695 if (!(CSR_READ_4(sc, BGE_MI_COMM) & BGE_MICOMM_BUSY))
696 break;
697 }
698
699 if (autopoll & BGE_MIMODE_AUTOPOLL) {
700 BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL);
701 DELAY(40);
702 }
703
704 if (i == BGE_TIMEOUT) {
705 printf("bge%d: PHY read timed out\n", sc->bge_unit);
706 return(0);
707 }
708
709 return(0);
710}
711
712static void
713bge_miibus_statchg(dev)
714 device_t dev;
715{
716 struct bge_softc *sc;
717 struct mii_data *mii;
718
719 sc = device_get_softc(dev);
720 mii = device_get_softc(sc->bge_miibus);
721
722 BGE_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_PORTMODE);
723 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T) {
724 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_PORTMODE_GMII);
725 } else {
726 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_PORTMODE_MII);
727 }
728
729 if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) {
730 BGE_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX);
731 } else {
732 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX);
733 }
734
735 return;
736}
737
738/*
739 * Handle events that have triggered interrupts.
740 */
741static void
742bge_handle_events(sc)
743 struct bge_softc *sc;
744{
745
746 return;
747}
748
749/*
446#ifdef notdef
447static u_int8_t
448bge_vpd_readbyte(sc, addr)
449 struct bge_softc *sc;
450 int addr;
451{
452 int i;
453 device_t dev;
454 u_int32_t val;
455
456 dev = sc->bge_dev;
457 pci_write_config(dev, BGE_PCI_VPD_ADDR, addr, 2);
458 for (i = 0; i < BGE_TIMEOUT * 10; i++) {
459 DELAY(10);
460 if (pci_read_config(dev, BGE_PCI_VPD_ADDR, 2) & BGE_VPD_FLAG)
461 break;
462 }
463
464 if (i == BGE_TIMEOUT) {
465 printf("bge%d: VPD read timed out\n", sc->bge_unit);
466 return(0);
467 }
468
469 val = pci_read_config(dev, BGE_PCI_VPD_DATA, 4);
470
471 return((val >> ((addr % 4) * 8)) & 0xFF);
472}
473
474static void
475bge_vpd_read_res(sc, res, addr)
476 struct bge_softc *sc;
477 struct vpd_res *res;
478 int addr;
479{
480 int i;
481 u_int8_t *ptr;
482
483 ptr = (u_int8_t *)res;
484 for (i = 0; i < sizeof(struct vpd_res); i++)
485 ptr[i] = bge_vpd_readbyte(sc, i + addr);
486
487 return;
488}
489
490static void
491bge_vpd_read(sc)
492 struct bge_softc *sc;
493{
494 int pos = 0, i;
495 struct vpd_res res;
496
497 if (sc->bge_vpd_prodname != NULL)
498 free(sc->bge_vpd_prodname, M_DEVBUF);
499 if (sc->bge_vpd_readonly != NULL)
500 free(sc->bge_vpd_readonly, M_DEVBUF);
501 sc->bge_vpd_prodname = NULL;
502 sc->bge_vpd_readonly = NULL;
503
504 bge_vpd_read_res(sc, &res, pos);
505
506 if (res.vr_id != VPD_RES_ID) {
507 printf("bge%d: bad VPD resource id: expected %x got %x\n",
508 sc->bge_unit, VPD_RES_ID, res.vr_id);
509 return;
510 }
511
512 pos += sizeof(res);
513 sc->bge_vpd_prodname = malloc(res.vr_len + 1, M_DEVBUF, M_NOWAIT);
514 for (i = 0; i < res.vr_len; i++)
515 sc->bge_vpd_prodname[i] = bge_vpd_readbyte(sc, i + pos);
516 sc->bge_vpd_prodname[i] = '\0';
517 pos += i;
518
519 bge_vpd_read_res(sc, &res, pos);
520
521 if (res.vr_id != VPD_RES_READ) {
522 printf("bge%d: bad VPD resource id: expected %x got %x\n",
523 sc->bge_unit, VPD_RES_READ, res.vr_id);
524 return;
525 }
526
527 pos += sizeof(res);
528 sc->bge_vpd_readonly = malloc(res.vr_len, M_DEVBUF, M_NOWAIT);
529 for (i = 0; i < res.vr_len + 1; i++)
530 sc->bge_vpd_readonly[i] = bge_vpd_readbyte(sc, i + pos);
531
532 return;
533}
534#endif
535
536/*
537 * Read a byte of data stored in the EEPROM at address 'addr.' The
538 * BCM570x supports both the traditional bitbang interface and an
539 * auto access interface for reading the EEPROM. We use the auto
540 * access method.
541 */
542static u_int8_t
543bge_eeprom_getbyte(sc, addr, dest)
544 struct bge_softc *sc;
545 int addr;
546 u_int8_t *dest;
547{
548 int i;
549 u_int32_t byte = 0;
550
551 /*
552 * Enable use of auto EEPROM access so we can avoid
553 * having to use the bitbang method.
554 */
555 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_AUTO_EEPROM);
556
557 /* Reset the EEPROM, load the clock period. */
558 CSR_WRITE_4(sc, BGE_EE_ADDR,
559 BGE_EEADDR_RESET|BGE_EEHALFCLK(BGE_HALFCLK_384SCL));
560 DELAY(20);
561
562 /* Issue the read EEPROM command. */
563 CSR_WRITE_4(sc, BGE_EE_ADDR, BGE_EE_READCMD | addr);
564
565 /* Wait for completion */
566 for(i = 0; i < BGE_TIMEOUT * 10; i++) {
567 DELAY(10);
568 if (CSR_READ_4(sc, BGE_EE_ADDR) & BGE_EEADDR_DONE)
569 break;
570 }
571
572 if (i == BGE_TIMEOUT) {
573 printf("bge%d: eeprom read timed out\n", sc->bge_unit);
574 return(0);
575 }
576
577 /* Get result. */
578 byte = CSR_READ_4(sc, BGE_EE_DATA);
579
580 *dest = (byte >> ((addr % 4) * 8)) & 0xFF;
581
582 return(0);
583}
584
585/*
586 * Read a sequence of bytes from the EEPROM.
587 */
588static int
589bge_read_eeprom(sc, dest, off, cnt)
590 struct bge_softc *sc;
591 caddr_t dest;
592 int off;
593 int cnt;
594{
595 int err = 0, i;
596 u_int8_t byte = 0;
597
598 for (i = 0; i < cnt; i++) {
599 err = bge_eeprom_getbyte(sc, off + i, &byte);
600 if (err)
601 break;
602 *(dest + i) = byte;
603 }
604
605 return(err ? 1 : 0);
606}
607
608static int
609bge_miibus_readreg(dev, phy, reg)
610 device_t dev;
611 int phy, reg;
612{
613 struct bge_softc *sc;
614 u_int32_t val, autopoll;
615 int i;
616
617 sc = device_get_softc(dev);
618
619 /*
620 * Broadcom's own driver always assumes the internal
621 * PHY is at GMII address 1. On some chips, the PHY responds
622 * to accesses at all addresses, which could cause us to
623 * bogusly attach the PHY 32 times at probe type. Always
624 * restricting the lookup to address 1 is simpler than
625 * trying to figure out which chips revisions should be
626 * special-cased.
627 */
628 if (phy != 1)
629 return(0);
630
631 /* Reading with autopolling on may trigger PCI errors */
632 autopoll = CSR_READ_4(sc, BGE_MI_MODE);
633 if (autopoll & BGE_MIMODE_AUTOPOLL) {
634 BGE_CLRBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL);
635 DELAY(40);
636 }
637
638 CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_READ|BGE_MICOMM_BUSY|
639 BGE_MIPHY(phy)|BGE_MIREG(reg));
640
641 for (i = 0; i < BGE_TIMEOUT; i++) {
642 val = CSR_READ_4(sc, BGE_MI_COMM);
643 if (!(val & BGE_MICOMM_BUSY))
644 break;
645 }
646
647 if (i == BGE_TIMEOUT) {
648 printf("bge%d: PHY read timed out\n", sc->bge_unit);
649 val = 0;
650 goto done;
651 }
652
653 val = CSR_READ_4(sc, BGE_MI_COMM);
654
655done:
656 if (autopoll & BGE_MIMODE_AUTOPOLL) {
657 BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL);
658 DELAY(40);
659 }
660
661 if (val & BGE_MICOMM_READFAIL)
662 return(0);
663
664 return(val & 0xFFFF);
665}
666
667static int
668bge_miibus_writereg(dev, phy, reg, val)
669 device_t dev;
670 int phy, reg, val;
671{
672 struct bge_softc *sc;
673 u_int32_t autopoll;
674 int i;
675
676 sc = device_get_softc(dev);
677
678 /* Reading with autopolling on may trigger PCI errors */
679 autopoll = CSR_READ_4(sc, BGE_MI_MODE);
680 if (autopoll & BGE_MIMODE_AUTOPOLL) {
681 BGE_CLRBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL);
682 DELAY(40);
683 }
684
685 CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_WRITE|BGE_MICOMM_BUSY|
686 BGE_MIPHY(phy)|BGE_MIREG(reg)|val);
687
688 for (i = 0; i < BGE_TIMEOUT; i++) {
689 if (!(CSR_READ_4(sc, BGE_MI_COMM) & BGE_MICOMM_BUSY))
690 break;
691 }
692
693 if (autopoll & BGE_MIMODE_AUTOPOLL) {
694 BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL);
695 DELAY(40);
696 }
697
698 if (i == BGE_TIMEOUT) {
699 printf("bge%d: PHY read timed out\n", sc->bge_unit);
700 return(0);
701 }
702
703 return(0);
704}
705
706static void
707bge_miibus_statchg(dev)
708 device_t dev;
709{
710 struct bge_softc *sc;
711 struct mii_data *mii;
712
713 sc = device_get_softc(dev);
714 mii = device_get_softc(sc->bge_miibus);
715
716 BGE_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_PORTMODE);
717 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T) {
718 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_PORTMODE_GMII);
719 } else {
720 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_PORTMODE_MII);
721 }
722
723 if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) {
724 BGE_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX);
725 } else {
726 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX);
727 }
728
729 return;
730}
731
732/*
733 * Handle events that have triggered interrupts.
734 */
735static void
736bge_handle_events(sc)
737 struct bge_softc *sc;
738{
739
740 return;
741}
742
743/*
750 * Memory management for jumbo frames.
751 */
752
753static int
754bge_alloc_jumbo_mem(sc)
755 struct bge_softc *sc;
756{
757 caddr_t ptr;
758 register int i, error;
759 struct bge_jpool_entry *entry;
760
761 /* Create tag for jumbo buffer block */
762
763 error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag,
764 PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
765 NULL, BGE_JMEM, 1, BGE_JMEM, 0, NULL, NULL,
766 &sc->bge_cdata.bge_jumbo_tag);
767
768 if (error) {
769 printf("bge%d: could not allocate jumbo dma tag\n",
770 sc->bge_unit);
771 return (ENOMEM);
772 }
773
774 /* Allocate DMA'able memory for jumbo buffer block */
775
776 error = bus_dmamem_alloc(sc->bge_cdata.bge_jumbo_tag,
777 (void **)&sc->bge_ldata.bge_jumbo_buf, BUS_DMA_NOWAIT,
778 &sc->bge_cdata.bge_jumbo_map);
779
780 if (error)
781 return (ENOMEM);
782
783 SLIST_INIT(&sc->bge_jfree_listhead);
784 SLIST_INIT(&sc->bge_jinuse_listhead);
785
786 /*
787 * Now divide it up into 9K pieces and save the addresses
788 * in an array.
789 */
790 ptr = sc->bge_ldata.bge_jumbo_buf;
791 for (i = 0; i < BGE_JSLOTS; i++) {
792 sc->bge_cdata.bge_jslots[i] = ptr;
793 ptr += BGE_JLEN;
794 entry = malloc(sizeof(struct bge_jpool_entry),
795 M_DEVBUF, M_NOWAIT);
796 if (entry == NULL) {
797 bge_free_jumbo_mem(sc);
798 sc->bge_ldata.bge_jumbo_buf = NULL;
799 printf("bge%d: no memory for jumbo "
800 "buffer queue!\n", sc->bge_unit);
801 return(ENOBUFS);
802 }
803 entry->slot = i;
804 SLIST_INSERT_HEAD(&sc->bge_jfree_listhead,
805 entry, jpool_entries);
806 }
807
808 return(0);
809}
810
811static void
812bge_free_jumbo_mem(sc)
813 struct bge_softc *sc;
814{
815 int i;
816 struct bge_jpool_entry *entry;
817
818 for (i = 0; i < BGE_JSLOTS; i++) {
819 entry = SLIST_FIRST(&sc->bge_jfree_listhead);
820 SLIST_REMOVE_HEAD(&sc->bge_jfree_listhead, jpool_entries);
821 free(entry, M_DEVBUF);
822 }
823
824 /* Destroy jumbo buffer block */
825
826 if (sc->bge_ldata.bge_rx_jumbo_ring)
827 bus_dmamem_free(sc->bge_cdata.bge_jumbo_tag,
828 sc->bge_ldata.bge_jumbo_buf,
829 sc->bge_cdata.bge_jumbo_map);
830
831 if (sc->bge_cdata.bge_rx_jumbo_ring_map)
832 bus_dmamap_destroy(sc->bge_cdata.bge_jumbo_tag,
833 sc->bge_cdata.bge_jumbo_map);
834
835 if (sc->bge_cdata.bge_jumbo_tag)
836 bus_dma_tag_destroy(sc->bge_cdata.bge_jumbo_tag);
837
838 return;
839}
840
841/*
842 * Allocate a jumbo buffer.
843 */
844static void *
845bge_jalloc(sc)
846 struct bge_softc *sc;
847{
848 struct bge_jpool_entry *entry;
849
850 entry = SLIST_FIRST(&sc->bge_jfree_listhead);
851
852 if (entry == NULL) {
853 printf("bge%d: no free jumbo buffers\n", sc->bge_unit);
854 return(NULL);
855 }
856
857 SLIST_REMOVE_HEAD(&sc->bge_jfree_listhead, jpool_entries);
858 SLIST_INSERT_HEAD(&sc->bge_jinuse_listhead, entry, jpool_entries);
859 return(sc->bge_cdata.bge_jslots[entry->slot]);
860}
861
862/*
863 * Release a jumbo buffer.
864 */
865static void
866bge_jfree(buf, args)
867 void *buf;
868 void *args;
869{
870 struct bge_jpool_entry *entry;
871 struct bge_softc *sc;
872 int i;
873
874 /* Extract the softc struct pointer. */
875 sc = (struct bge_softc *)args;
876
877 if (sc == NULL)
878 panic("bge_jfree: can't find softc pointer!");
879
880 /* calculate the slot this buffer belongs to */
881
882 i = ((vm_offset_t)buf
883 - (vm_offset_t)sc->bge_ldata.bge_jumbo_buf) / BGE_JLEN;
884
885 if ((i < 0) || (i >= BGE_JSLOTS))
886 panic("bge_jfree: asked to free buffer that we don't manage!");
887
888 entry = SLIST_FIRST(&sc->bge_jinuse_listhead);
889 if (entry == NULL)
890 panic("bge_jfree: buffer not in use!");
891 entry->slot = i;
892 SLIST_REMOVE_HEAD(&sc->bge_jinuse_listhead, jpool_entries);
893 SLIST_INSERT_HEAD(&sc->bge_jfree_listhead, entry, jpool_entries);
894
895 return;
896}
897
898
899/*
900 * Intialize a standard receive ring descriptor.
901 */
902static int
903bge_newbuf_std(sc, i, m)
904 struct bge_softc *sc;
905 int i;
906 struct mbuf *m;
907{
908 struct mbuf *m_new = NULL;
909 struct bge_rx_bd *r;
910 struct bge_dmamap_arg ctx;
911 int error;
912
913 if (m == NULL) {
914 MGETHDR(m_new, M_DONTWAIT, MT_DATA);
915 if (m_new == NULL) {
916 return(ENOBUFS);
917 }
918
919 MCLGET(m_new, M_DONTWAIT);
920 if (!(m_new->m_flags & M_EXT)) {
921 m_freem(m_new);
922 return(ENOBUFS);
923 }
924 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
925 } else {
926 m_new = m;
927 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
928 m_new->m_data = m_new->m_ext.ext_buf;
929 }
930
931 if (!sc->bge_rx_alignment_bug)
932 m_adj(m_new, ETHER_ALIGN);
933 sc->bge_cdata.bge_rx_std_chain[i] = m_new;
934 r = &sc->bge_ldata.bge_rx_std_ring[i];
935 ctx.bge_maxsegs = 1;
936 ctx.sc = sc;
937 error = bus_dmamap_load(sc->bge_cdata.bge_mtag,
938 sc->bge_cdata.bge_rx_std_dmamap[i], mtod(m_new, void *),
939 m_new->m_len, bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT);
940 if (error || ctx.bge_maxsegs == 0) {
941 if (m == NULL) {
942 sc->bge_cdata.bge_rx_std_chain[i] = NULL;
943 m_freem(m_new);
944 }
945 return(ENOMEM);
946 }
947 r->bge_addr.bge_addr_lo = htole32(BGE_ADDR_LO(ctx.bge_busaddr));
948 r->bge_addr.bge_addr_hi = htole32(BGE_ADDR_HI(ctx.bge_busaddr));
949 r->bge_flags = htole16(BGE_RXBDFLAG_END);
950 r->bge_len = htole16(m_new->m_len);
951 r->bge_idx = htole16(i);
952
953 bus_dmamap_sync(sc->bge_cdata.bge_mtag,
954 sc->bge_cdata.bge_rx_std_dmamap[i],
955 BUS_DMASYNC_PREREAD);
956
957 return(0);
958}
959
960/*
961 * Initialize a jumbo receive ring descriptor. This allocates
962 * a jumbo buffer from the pool managed internally by the driver.
963 */
964static int
965bge_newbuf_jumbo(sc, i, m)
966 struct bge_softc *sc;
967 int i;
968 struct mbuf *m;
969{
744 * Intialize a standard receive ring descriptor.
745 */
746static int
747bge_newbuf_std(sc, i, m)
748 struct bge_softc *sc;
749 int i;
750 struct mbuf *m;
751{
752 struct mbuf *m_new = NULL;
753 struct bge_rx_bd *r;
754 struct bge_dmamap_arg ctx;
755 int error;
756
757 if (m == NULL) {
758 MGETHDR(m_new, M_DONTWAIT, MT_DATA);
759 if (m_new == NULL) {
760 return(ENOBUFS);
761 }
762
763 MCLGET(m_new, M_DONTWAIT);
764 if (!(m_new->m_flags & M_EXT)) {
765 m_freem(m_new);
766 return(ENOBUFS);
767 }
768 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
769 } else {
770 m_new = m;
771 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
772 m_new->m_data = m_new->m_ext.ext_buf;
773 }
774
775 if (!sc->bge_rx_alignment_bug)
776 m_adj(m_new, ETHER_ALIGN);
777 sc->bge_cdata.bge_rx_std_chain[i] = m_new;
778 r = &sc->bge_ldata.bge_rx_std_ring[i];
779 ctx.bge_maxsegs = 1;
780 ctx.sc = sc;
781 error = bus_dmamap_load(sc->bge_cdata.bge_mtag,
782 sc->bge_cdata.bge_rx_std_dmamap[i], mtod(m_new, void *),
783 m_new->m_len, bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT);
784 if (error || ctx.bge_maxsegs == 0) {
785 if (m == NULL) {
786 sc->bge_cdata.bge_rx_std_chain[i] = NULL;
787 m_freem(m_new);
788 }
789 return(ENOMEM);
790 }
791 r->bge_addr.bge_addr_lo = htole32(BGE_ADDR_LO(ctx.bge_busaddr));
792 r->bge_addr.bge_addr_hi = htole32(BGE_ADDR_HI(ctx.bge_busaddr));
793 r->bge_flags = htole16(BGE_RXBDFLAG_END);
794 r->bge_len = htole16(m_new->m_len);
795 r->bge_idx = htole16(i);
796
797 bus_dmamap_sync(sc->bge_cdata.bge_mtag,
798 sc->bge_cdata.bge_rx_std_dmamap[i],
799 BUS_DMASYNC_PREREAD);
800
801 return(0);
802}
803
804/*
805 * Initialize a jumbo receive ring descriptor. This allocates
806 * a jumbo buffer from the pool managed internally by the driver.
807 */
808static int
809bge_newbuf_jumbo(sc, i, m)
810 struct bge_softc *sc;
811 int i;
812 struct mbuf *m;
813{
814 bus_dma_segment_t segs[BGE_NSEG_JUMBO];
815 struct bge_extrx_bd *r;
970 struct mbuf *m_new = NULL;
816 struct mbuf *m_new = NULL;
971 struct bge_rx_bd *r;
972 struct bge_dmamap_arg ctx;
817 int nsegs;
973 int error;
974
975 if (m == NULL) {
818 int error;
819
820 if (m == NULL) {
976 caddr_t *buf = NULL;
977
978 /* Allocate the mbuf. */
979 MGETHDR(m_new, M_DONTWAIT, MT_DATA);
821 MGETHDR(m_new, M_DONTWAIT, MT_DATA);
980 if (m_new == NULL) {
822 if (m_new == NULL)
981 return(ENOBUFS);
823 return(ENOBUFS);
982 }
983
824
984 /* Allocate the jumbo buffer */
985 buf = bge_jalloc(sc);
986 if (buf == NULL) {
825 m_cljget(m_new, M_DONTWAIT, MJUM9BYTES);
826 if (!(m_new->m_flags & M_EXT)) {
987 m_freem(m_new);
827 m_freem(m_new);
988 printf("bge%d: jumbo allocation failed "
989 "-- packet dropped!\n", sc->bge_unit);
990 return(ENOBUFS);
991 }
828 return(ENOBUFS);
829 }
992
993 /* Attach the buffer to the mbuf. */
994 m_new->m_data = (void *) buf;
995 m_new->m_len = m_new->m_pkthdr.len = BGE_JUMBO_FRAMELEN;
996 MEXTADD(m_new, buf, BGE_JUMBO_FRAMELEN, bge_jfree,
997 (struct bge_softc *)sc, 0, EXT_NET_DRV);
830 m_new->m_len = m_new->m_pkthdr.len = MJUM9BYTES;
998 } else {
999 m_new = m;
831 } else {
832 m_new = m;
833 m_new->m_len = m_new->m_pkthdr.len = MJUM9BYTES;
1000 m_new->m_data = m_new->m_ext.ext_buf;
834 m_new->m_data = m_new->m_ext.ext_buf;
1001 m_new->m_ext.ext_size = BGE_JUMBO_FRAMELEN;
1002 }
1003
1004 if (!sc->bge_rx_alignment_bug)
1005 m_adj(m_new, ETHER_ALIGN);
835 }
836
837 if (!sc->bge_rx_alignment_bug)
838 m_adj(m_new, ETHER_ALIGN);
1006 /* Set up the descriptor. */
1007 sc->bge_cdata.bge_rx_jumbo_chain[i] = m_new;
1008 r = &sc->bge_ldata.bge_rx_jumbo_ring[i];
1009 ctx.bge_maxsegs = 1;
1010 ctx.sc = sc;
1011 error = bus_dmamap_load(sc->bge_cdata.bge_mtag_jumbo,
1012 sc->bge_cdata.bge_rx_jumbo_dmamap[i], mtod(m_new, void *),
1013 m_new->m_len, bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT);
1014 if (error || ctx.bge_maxsegs == 0) {
1015 if (m == NULL) {
1016 sc->bge_cdata.bge_rx_jumbo_chain[i] = NULL;
839
840 error = bus_dmamap_load_mbuf_sg(sc->bge_cdata.bge_mtag_jumbo,
841 sc->bge_cdata.bge_rx_jumbo_dmamap[i],
842 m_new, segs, &nsegs, BUS_DMA_NOWAIT);
843 if (error) {
844 if (m == NULL)
1017 m_freem(m_new);
845 m_freem(m_new);
1018 }
1019 return(ENOMEM);
846 return(error);
1020 }
847 }
1021 r->bge_addr.bge_addr_lo = htole32(BGE_ADDR_LO(ctx.bge_busaddr));
1022 r->bge_addr.bge_addr_hi = htole32(BGE_ADDR_HI(ctx.bge_busaddr));
1023 r->bge_flags = htole16(BGE_RXBDFLAG_END|BGE_RXBDFLAG_JUMBO_RING);
1024 r->bge_len = htole16(m_new->m_len);
848 KASSERT(nsegs == BGE_NSEG_JUMBO, ("%s: %d segments", __func__, nsegs));
849
850 sc->bge_cdata.bge_rx_jumbo_chain[i] = m_new;
851
852 /*
853 * Fill in the extended RX buffer descriptor.
854 */
855 r = &sc->bge_ldata.bge_rx_jumbo_ring[i];
856 r->bge_addr0.bge_addr_lo = htole32(BGE_ADDR_LO(segs[0].ds_addr));
857 r->bge_addr0.bge_addr_hi = htole32(BGE_ADDR_HI(segs[0].ds_addr));
858 r->bge_len0 = htole16(segs[0].ds_len);
859 r->bge_addr1.bge_addr_lo = htole32(BGE_ADDR_LO(segs[1].ds_addr));
860 r->bge_addr1.bge_addr_hi = htole32(BGE_ADDR_HI(segs[1].ds_addr));
861 r->bge_len1 = htole16(segs[1].ds_len);
862 r->bge_addr2.bge_addr_lo = htole32(BGE_ADDR_LO(segs[2].ds_addr));
863 r->bge_addr2.bge_addr_hi = htole32(BGE_ADDR_HI(segs[2].ds_addr));
864 r->bge_len2 = htole16(segs[2].ds_len);
865 r->bge_len3 = htole16(0);
866 r->bge_flags = htole16(BGE_RXBDFLAG_JUMBO_RING|BGE_RXBDFLAG_END);
1025 r->bge_idx = htole16(i);
1026
1027 bus_dmamap_sync(sc->bge_cdata.bge_mtag,
1028 sc->bge_cdata.bge_rx_jumbo_dmamap[i],
1029 BUS_DMASYNC_PREREAD);
1030
867 r->bge_idx = htole16(i);
868
869 bus_dmamap_sync(sc->bge_cdata.bge_mtag,
870 sc->bge_cdata.bge_rx_jumbo_dmamap[i],
871 BUS_DMASYNC_PREREAD);
872
1031 return(0);
873 return (0);
1032}
1033
1034/*
1035 * The standard receive ring has 512 entries in it. At 2K per mbuf cluster,
1036 * that's 1MB or memory, which is a lot. For now, we fill only the first
1037 * 256 ring entries and hope that our CPU is fast enough to keep up with
1038 * the NIC.
1039 */
1040static int
1041bge_init_rx_ring_std(sc)
1042 struct bge_softc *sc;
1043{
1044 int i;
1045
1046 for (i = 0; i < BGE_SSLOTS; i++) {
1047 if (bge_newbuf_std(sc, i, NULL) == ENOBUFS)
1048 return(ENOBUFS);
1049 };
1050
1051 bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag,
1052 sc->bge_cdata.bge_rx_std_ring_map,
1053 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1054
1055 sc->bge_std = i - 1;
1056 CSR_WRITE_4(sc, BGE_MBX_RX_STD_PROD_LO, sc->bge_std);
1057
1058 return(0);
1059}
1060
1061static void
1062bge_free_rx_ring_std(sc)
1063 struct bge_softc *sc;
1064{
1065 int i;
1066
1067 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
1068 if (sc->bge_cdata.bge_rx_std_chain[i] != NULL) {
1069 m_freem(sc->bge_cdata.bge_rx_std_chain[i]);
1070 sc->bge_cdata.bge_rx_std_chain[i] = NULL;
1071 bus_dmamap_unload(sc->bge_cdata.bge_mtag,
1072 sc->bge_cdata.bge_rx_std_dmamap[i]);
1073 }
1074 bzero((char *)&sc->bge_ldata.bge_rx_std_ring[i],
1075 sizeof(struct bge_rx_bd));
1076 }
1077
1078 return;
1079}
1080
1081static int
1082bge_init_rx_ring_jumbo(sc)
1083 struct bge_softc *sc;
1084{
874}
875
876/*
877 * The standard receive ring has 512 entries in it. At 2K per mbuf cluster,
878 * that's 1MB or memory, which is a lot. For now, we fill only the first
879 * 256 ring entries and hope that our CPU is fast enough to keep up with
880 * the NIC.
881 */
882static int
883bge_init_rx_ring_std(sc)
884 struct bge_softc *sc;
885{
886 int i;
887
888 for (i = 0; i < BGE_SSLOTS; i++) {
889 if (bge_newbuf_std(sc, i, NULL) == ENOBUFS)
890 return(ENOBUFS);
891 };
892
893 bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag,
894 sc->bge_cdata.bge_rx_std_ring_map,
895 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
896
897 sc->bge_std = i - 1;
898 CSR_WRITE_4(sc, BGE_MBX_RX_STD_PROD_LO, sc->bge_std);
899
900 return(0);
901}
902
903static void
904bge_free_rx_ring_std(sc)
905 struct bge_softc *sc;
906{
907 int i;
908
909 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
910 if (sc->bge_cdata.bge_rx_std_chain[i] != NULL) {
911 m_freem(sc->bge_cdata.bge_rx_std_chain[i]);
912 sc->bge_cdata.bge_rx_std_chain[i] = NULL;
913 bus_dmamap_unload(sc->bge_cdata.bge_mtag,
914 sc->bge_cdata.bge_rx_std_dmamap[i]);
915 }
916 bzero((char *)&sc->bge_ldata.bge_rx_std_ring[i],
917 sizeof(struct bge_rx_bd));
918 }
919
920 return;
921}
922
923static int
924bge_init_rx_ring_jumbo(sc)
925 struct bge_softc *sc;
926{
1085 int i;
1086 struct bge_rcb *rcb;
927 struct bge_rcb *rcb;
928 int i;
1087
1088 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
1089 if (bge_newbuf_jumbo(sc, i, NULL) == ENOBUFS)
1090 return(ENOBUFS);
1091 };
1092
1093 bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag,
1094 sc->bge_cdata.bge_rx_jumbo_ring_map,
1095 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1096
1097 sc->bge_jumbo = i - 1;
1098
1099 rcb = &sc->bge_ldata.bge_info.bge_jumbo_rx_rcb;
929
930 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
931 if (bge_newbuf_jumbo(sc, i, NULL) == ENOBUFS)
932 return(ENOBUFS);
933 };
934
935 bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag,
936 sc->bge_cdata.bge_rx_jumbo_ring_map,
937 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
938
939 sc->bge_jumbo = i - 1;
940
941 rcb = &sc->bge_ldata.bge_info.bge_jumbo_rx_rcb;
1100 rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(0, 0);
942 rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(0,
943 BGE_RCB_FLAG_USE_EXT_RX_BD);
1101 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags);
1102
1103 CSR_WRITE_4(sc, BGE_MBX_RX_JUMBO_PROD_LO, sc->bge_jumbo);
1104
1105 return(0);
1106}
1107
1108static void
1109bge_free_rx_ring_jumbo(sc)
1110 struct bge_softc *sc;
1111{
1112 int i;
1113
1114 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
1115 if (sc->bge_cdata.bge_rx_jumbo_chain[i] != NULL) {
1116 m_freem(sc->bge_cdata.bge_rx_jumbo_chain[i]);
1117 sc->bge_cdata.bge_rx_jumbo_chain[i] = NULL;
1118 bus_dmamap_unload(sc->bge_cdata.bge_mtag_jumbo,
1119 sc->bge_cdata.bge_rx_jumbo_dmamap[i]);
1120 }
1121 bzero((char *)&sc->bge_ldata.bge_rx_jumbo_ring[i],
944 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags);
945
946 CSR_WRITE_4(sc, BGE_MBX_RX_JUMBO_PROD_LO, sc->bge_jumbo);
947
948 return(0);
949}
950
951static void
952bge_free_rx_ring_jumbo(sc)
953 struct bge_softc *sc;
954{
955 int i;
956
957 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
958 if (sc->bge_cdata.bge_rx_jumbo_chain[i] != NULL) {
959 m_freem(sc->bge_cdata.bge_rx_jumbo_chain[i]);
960 sc->bge_cdata.bge_rx_jumbo_chain[i] = NULL;
961 bus_dmamap_unload(sc->bge_cdata.bge_mtag_jumbo,
962 sc->bge_cdata.bge_rx_jumbo_dmamap[i]);
963 }
964 bzero((char *)&sc->bge_ldata.bge_rx_jumbo_ring[i],
1122 sizeof(struct bge_rx_bd));
965 sizeof(struct bge_extrx_bd));
1123 }
1124
1125 return;
1126}
1127
1128static void
1129bge_free_tx_ring(sc)
1130 struct bge_softc *sc;
1131{
1132 int i;
1133
1134 if (sc->bge_ldata.bge_tx_ring == NULL)
1135 return;
1136
1137 for (i = 0; i < BGE_TX_RING_CNT; i++) {
1138 if (sc->bge_cdata.bge_tx_chain[i] != NULL) {
1139 m_freem(sc->bge_cdata.bge_tx_chain[i]);
1140 sc->bge_cdata.bge_tx_chain[i] = NULL;
1141 bus_dmamap_unload(sc->bge_cdata.bge_mtag,
1142 sc->bge_cdata.bge_tx_dmamap[i]);
1143 }
1144 bzero((char *)&sc->bge_ldata.bge_tx_ring[i],
1145 sizeof(struct bge_tx_bd));
1146 }
1147
1148 return;
1149}
1150
1151static int
1152bge_init_tx_ring(sc)
1153 struct bge_softc *sc;
1154{
1155 sc->bge_txcnt = 0;
1156 sc->bge_tx_saved_considx = 0;
1157
1158 CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, 0);
1159 /* 5700 b2 errata */
1160 if (sc->bge_chiprev == BGE_CHIPREV_5700_BX)
1161 CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, 0);
1162
1163 CSR_WRITE_4(sc, BGE_MBX_TX_NIC_PROD0_LO, 0);
1164 /* 5700 b2 errata */
1165 if (sc->bge_chiprev == BGE_CHIPREV_5700_BX)
1166 CSR_WRITE_4(sc, BGE_MBX_TX_NIC_PROD0_LO, 0);
1167
1168 return(0);
1169}
1170
1171static void
1172bge_setmulti(sc)
1173 struct bge_softc *sc;
1174{
1175 struct ifnet *ifp;
1176 struct ifmultiaddr *ifma;
1177 u_int32_t hashes[4] = { 0, 0, 0, 0 };
1178 int h, i;
1179
1180 BGE_LOCK_ASSERT(sc);
1181
1182 ifp = sc->bge_ifp;
1183
1184 if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) {
1185 for (i = 0; i < 4; i++)
1186 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), 0xFFFFFFFF);
1187 return;
1188 }
1189
1190 /* First, zot all the existing filters. */
1191 for (i = 0; i < 4; i++)
1192 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), 0);
1193
1194 /* Now program new ones. */
1195 IF_ADDR_LOCK(ifp);
1196 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1197 if (ifma->ifma_addr->sa_family != AF_LINK)
1198 continue;
1199 h = ether_crc32_le(LLADDR((struct sockaddr_dl *)
1200 ifma->ifma_addr), ETHER_ADDR_LEN) & 0x7F;
1201 hashes[(h & 0x60) >> 5] |= 1 << (h & 0x1F);
1202 }
1203 IF_ADDR_UNLOCK(ifp);
1204
1205 for (i = 0; i < 4; i++)
1206 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), hashes[i]);
1207
1208 return;
1209}
1210
1211/*
1212 * Do endian, PCI and DMA initialization. Also check the on-board ROM
1213 * self-test results.
1214 */
1215static int
1216bge_chipinit(sc)
1217 struct bge_softc *sc;
1218{
1219 int i;
1220 u_int32_t dma_rw_ctl;
1221
1222 /* Set endianness before we access any non-PCI registers. */
1223#if BYTE_ORDER == BIG_ENDIAN
1224 pci_write_config(sc->bge_dev, BGE_PCI_MISC_CTL,
1225 BGE_BIGENDIAN_INIT, 4);
1226#else
1227 pci_write_config(sc->bge_dev, BGE_PCI_MISC_CTL,
1228 BGE_LITTLEENDIAN_INIT, 4);
1229#endif
1230
1231 /*
1232 * Check the 'ROM failed' bit on the RX CPU to see if
1233 * self-tests passed.
1234 */
1235 if (CSR_READ_4(sc, BGE_RXCPU_MODE) & BGE_RXCPUMODE_ROMFAIL) {
1236 printf("bge%d: RX CPU self-diagnostics failed!\n",
1237 sc->bge_unit);
1238 return(ENODEV);
1239 }
1240
1241 /* Clear the MAC control register */
1242 CSR_WRITE_4(sc, BGE_MAC_MODE, 0);
1243
1244 /*
1245 * Clear the MAC statistics block in the NIC's
1246 * internal memory.
1247 */
1248 for (i = BGE_STATS_BLOCK;
1249 i < BGE_STATS_BLOCK_END + 1; i += sizeof(u_int32_t))
1250 BGE_MEMWIN_WRITE(sc, i, 0);
1251
1252 for (i = BGE_STATUS_BLOCK;
1253 i < BGE_STATUS_BLOCK_END + 1; i += sizeof(u_int32_t))
1254 BGE_MEMWIN_WRITE(sc, i, 0);
1255
1256 /* Set up the PCI DMA control register. */
1257 if (sc->bge_pcie) {
1258 dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD |
1259 (0xf << BGE_PCIDMARWCTL_RD_WAT_SHIFT) |
1260 (0x2 << BGE_PCIDMARWCTL_WR_WAT_SHIFT);
1261 } else if (pci_read_config(sc->bge_dev, BGE_PCI_PCISTATE, 4) &
1262 BGE_PCISTATE_PCI_BUSMODE) {
1263 /* Conventional PCI bus */
1264 dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD |
1265 (0x7 << BGE_PCIDMARWCTL_RD_WAT_SHIFT) |
1266 (0x7 << BGE_PCIDMARWCTL_WR_WAT_SHIFT) |
1267 (0x0F);
1268 } else {
1269 /* PCI-X bus */
1270 /*
1271 * The 5704 uses a different encoding of read/write
1272 * watermarks.
1273 */
1274 if (sc->bge_asicrev == BGE_ASICREV_BCM5704)
1275 dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD |
1276 (0x7 << BGE_PCIDMARWCTL_RD_WAT_SHIFT) |
1277 (0x3 << BGE_PCIDMARWCTL_WR_WAT_SHIFT);
1278 else
1279 dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD |
1280 (0x3 << BGE_PCIDMARWCTL_RD_WAT_SHIFT) |
1281 (0x3 << BGE_PCIDMARWCTL_WR_WAT_SHIFT) |
1282 (0x0F);
1283
1284 /*
1285 * 5703 and 5704 need ONEDMA_AT_ONCE as a workaround
1286 * for hardware bugs.
1287 */
1288 if (sc->bge_asicrev == BGE_ASICREV_BCM5703 ||
1289 sc->bge_asicrev == BGE_ASICREV_BCM5704) {
1290 u_int32_t tmp;
1291
1292 tmp = CSR_READ_4(sc, BGE_PCI_CLKCTL) & 0x1f;
1293 if (tmp == 0x6 || tmp == 0x7)
1294 dma_rw_ctl |= BGE_PCIDMARWCTL_ONEDMA_ATONCE;
1295 }
1296 }
1297
1298 if (sc->bge_asicrev == BGE_ASICREV_BCM5703 ||
1299 sc->bge_asicrev == BGE_ASICREV_BCM5704 ||
1300 sc->bge_asicrev == BGE_ASICREV_BCM5705 ||
1301 sc->bge_asicrev == BGE_ASICREV_BCM5750)
1302 dma_rw_ctl &= ~BGE_PCIDMARWCTL_MINDMA;
1303 pci_write_config(sc->bge_dev, BGE_PCI_DMA_RW_CTL, dma_rw_ctl, 4);
1304
1305 /*
1306 * Set up general mode register.
1307 */
1308 CSR_WRITE_4(sc, BGE_MODE_CTL, BGE_MODECTL_WORDSWAP_NONFRAME|
1309 BGE_MODECTL_BYTESWAP_DATA|BGE_MODECTL_WORDSWAP_DATA|
1310 BGE_MODECTL_MAC_ATTN_INTR|BGE_MODECTL_HOST_SEND_BDS|
1311 BGE_MODECTL_TX_NO_PHDR_CSUM|BGE_MODECTL_RX_NO_PHDR_CSUM);
1312
1313 /*
1314 * Disable memory write invalidate. Apparently it is not supported
1315 * properly by these devices.
1316 */
1317 PCI_CLRBIT(sc->bge_dev, BGE_PCI_CMD, PCIM_CMD_MWIEN, 4);
1318
1319#ifdef __brokenalpha__
1320 /*
1321 * Must insure that we do not cross an 8K (bytes) boundary
1322 * for DMA reads. Our highest limit is 1K bytes. This is a
1323 * restriction on some ALPHA platforms with early revision
1324 * 21174 PCI chipsets, such as the AlphaPC 164lx
1325 */
1326 PCI_SETBIT(sc->bge_dev, BGE_PCI_DMA_RW_CTL,
1327 BGE_PCI_READ_BNDRY_1024BYTES, 4);
1328#endif
1329
1330 /* Set the timer prescaler (always 66Mhz) */
1331 CSR_WRITE_4(sc, BGE_MISC_CFG, 65 << 1/*BGE_32BITTIME_66MHZ*/);
1332
1333 return(0);
1334}
1335
1336static int
1337bge_blockinit(sc)
1338 struct bge_softc *sc;
1339{
1340 struct bge_rcb *rcb;
1341 volatile struct bge_rcb *vrcb;
1342 int i;
1343
1344 /*
1345 * Initialize the memory window pointer register so that
1346 * we can access the first 32K of internal NIC RAM. This will
1347 * allow us to set up the TX send ring RCBs and the RX return
1348 * ring RCBs, plus other things which live in NIC memory.
1349 */
1350 CSR_WRITE_4(sc, BGE_PCI_MEMWIN_BASEADDR, 0);
1351
1352 /* Note: the BCM5704 has a smaller mbuf space than other chips. */
1353
1354 if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
1355 sc->bge_asicrev != BGE_ASICREV_BCM5750) {
1356 /* Configure mbuf memory pool */
1357 if (sc->bge_extram) {
1358 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_BASEADDR,
1359 BGE_EXT_SSRAM);
1360 if (sc->bge_asicrev == BGE_ASICREV_BCM5704)
1361 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x10000);
1362 else
1363 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x18000);
1364 } else {
1365 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_BASEADDR,
1366 BGE_BUFFPOOL_1);
1367 if (sc->bge_asicrev == BGE_ASICREV_BCM5704)
1368 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x10000);
1369 else
1370 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x18000);
1371 }
1372
1373 /* Configure DMA resource pool */
1374 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_BASEADDR,
1375 BGE_DMA_DESCRIPTORS);
1376 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LEN, 0x2000);
1377 }
1378
1379 /* Configure mbuf pool watermarks */
1380 if (sc->bge_asicrev == BGE_ASICREV_BCM5705 ||
1381 sc->bge_asicrev == BGE_ASICREV_BCM5750) {
1382 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0);
1383 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x10);
1384 } else {
1385 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x50);
1386 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x20);
1387 }
1388 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x60);
1389
1390 /* Configure DMA resource watermarks */
1391 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LOWAT, 5);
1392 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_HIWAT, 10);
1393
1394 /* Enable buffer manager */
1395 if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
1396 sc->bge_asicrev != BGE_ASICREV_BCM5750) {
1397 CSR_WRITE_4(sc, BGE_BMAN_MODE,
1398 BGE_BMANMODE_ENABLE|BGE_BMANMODE_LOMBUF_ATTN);
1399
1400 /* Poll for buffer manager start indication */
1401 for (i = 0; i < BGE_TIMEOUT; i++) {
1402 if (CSR_READ_4(sc, BGE_BMAN_MODE) & BGE_BMANMODE_ENABLE)
1403 break;
1404 DELAY(10);
1405 }
1406
1407 if (i == BGE_TIMEOUT) {
1408 printf("bge%d: buffer manager failed to start\n",
1409 sc->bge_unit);
1410 return(ENXIO);
1411 }
1412 }
1413
1414 /* Enable flow-through queues */
1415 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF);
1416 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0);
1417
1418 /* Wait until queue initialization is complete */
1419 for (i = 0; i < BGE_TIMEOUT; i++) {
1420 if (CSR_READ_4(sc, BGE_FTQ_RESET) == 0)
1421 break;
1422 DELAY(10);
1423 }
1424
1425 if (i == BGE_TIMEOUT) {
1426 printf("bge%d: flow-through queue init failed\n",
1427 sc->bge_unit);
1428 return(ENXIO);
1429 }
1430
1431 /* Initialize the standard RX ring control block */
1432 rcb = &sc->bge_ldata.bge_info.bge_std_rx_rcb;
1433 rcb->bge_hostaddr.bge_addr_lo =
1434 BGE_ADDR_LO(sc->bge_ldata.bge_rx_std_ring_paddr);
1435 rcb->bge_hostaddr.bge_addr_hi =
1436 BGE_ADDR_HI(sc->bge_ldata.bge_rx_std_ring_paddr);
1437 bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag,
1438 sc->bge_cdata.bge_rx_std_ring_map, BUS_DMASYNC_PREREAD);
1439 if (sc->bge_asicrev == BGE_ASICREV_BCM5705 ||
1440 sc->bge_asicrev == BGE_ASICREV_BCM5750)
1441 rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(512, 0);
1442 else
1443 rcb->bge_maxlen_flags =
1444 BGE_RCB_MAXLEN_FLAGS(BGE_MAX_FRAMELEN, 0);
1445 if (sc->bge_extram)
1446 rcb->bge_nicaddr = BGE_EXT_STD_RX_RINGS;
1447 else
1448 rcb->bge_nicaddr = BGE_STD_RX_RINGS;
1449 CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_HI, rcb->bge_hostaddr.bge_addr_hi);
1450 CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_LO, rcb->bge_hostaddr.bge_addr_lo);
1451
1452 CSR_WRITE_4(sc, BGE_RX_STD_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags);
1453 CSR_WRITE_4(sc, BGE_RX_STD_RCB_NICADDR, rcb->bge_nicaddr);
1454
1455 /*
1456 * Initialize the jumbo RX ring control block
1457 * We set the 'ring disabled' bit in the flags
1458 * field until we're actually ready to start
1459 * using this ring (i.e. once we set the MTU
1460 * high enough to require it).
1461 */
1462 if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
1463 sc->bge_asicrev != BGE_ASICREV_BCM5750) {
1464 rcb = &sc->bge_ldata.bge_info.bge_jumbo_rx_rcb;
1465
1466 rcb->bge_hostaddr.bge_addr_lo =
1467 BGE_ADDR_LO(sc->bge_ldata.bge_rx_jumbo_ring_paddr);
1468 rcb->bge_hostaddr.bge_addr_hi =
1469 BGE_ADDR_HI(sc->bge_ldata.bge_rx_jumbo_ring_paddr);
1470 bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag,
1471 sc->bge_cdata.bge_rx_jumbo_ring_map,
1472 BUS_DMASYNC_PREREAD);
966 }
967
968 return;
969}
970
971static void
972bge_free_tx_ring(sc)
973 struct bge_softc *sc;
974{
975 int i;
976
977 if (sc->bge_ldata.bge_tx_ring == NULL)
978 return;
979
980 for (i = 0; i < BGE_TX_RING_CNT; i++) {
981 if (sc->bge_cdata.bge_tx_chain[i] != NULL) {
982 m_freem(sc->bge_cdata.bge_tx_chain[i]);
983 sc->bge_cdata.bge_tx_chain[i] = NULL;
984 bus_dmamap_unload(sc->bge_cdata.bge_mtag,
985 sc->bge_cdata.bge_tx_dmamap[i]);
986 }
987 bzero((char *)&sc->bge_ldata.bge_tx_ring[i],
988 sizeof(struct bge_tx_bd));
989 }
990
991 return;
992}
993
994static int
995bge_init_tx_ring(sc)
996 struct bge_softc *sc;
997{
998 sc->bge_txcnt = 0;
999 sc->bge_tx_saved_considx = 0;
1000
1001 CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, 0);
1002 /* 5700 b2 errata */
1003 if (sc->bge_chiprev == BGE_CHIPREV_5700_BX)
1004 CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, 0);
1005
1006 CSR_WRITE_4(sc, BGE_MBX_TX_NIC_PROD0_LO, 0);
1007 /* 5700 b2 errata */
1008 if (sc->bge_chiprev == BGE_CHIPREV_5700_BX)
1009 CSR_WRITE_4(sc, BGE_MBX_TX_NIC_PROD0_LO, 0);
1010
1011 return(0);
1012}
1013
1014static void
1015bge_setmulti(sc)
1016 struct bge_softc *sc;
1017{
1018 struct ifnet *ifp;
1019 struct ifmultiaddr *ifma;
1020 u_int32_t hashes[4] = { 0, 0, 0, 0 };
1021 int h, i;
1022
1023 BGE_LOCK_ASSERT(sc);
1024
1025 ifp = sc->bge_ifp;
1026
1027 if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) {
1028 for (i = 0; i < 4; i++)
1029 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), 0xFFFFFFFF);
1030 return;
1031 }
1032
1033 /* First, zot all the existing filters. */
1034 for (i = 0; i < 4; i++)
1035 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), 0);
1036
1037 /* Now program new ones. */
1038 IF_ADDR_LOCK(ifp);
1039 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1040 if (ifma->ifma_addr->sa_family != AF_LINK)
1041 continue;
1042 h = ether_crc32_le(LLADDR((struct sockaddr_dl *)
1043 ifma->ifma_addr), ETHER_ADDR_LEN) & 0x7F;
1044 hashes[(h & 0x60) >> 5] |= 1 << (h & 0x1F);
1045 }
1046 IF_ADDR_UNLOCK(ifp);
1047
1048 for (i = 0; i < 4; i++)
1049 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), hashes[i]);
1050
1051 return;
1052}
1053
1054/*
1055 * Do endian, PCI and DMA initialization. Also check the on-board ROM
1056 * self-test results.
1057 */
1058static int
1059bge_chipinit(sc)
1060 struct bge_softc *sc;
1061{
1062 int i;
1063 u_int32_t dma_rw_ctl;
1064
1065 /* Set endianness before we access any non-PCI registers. */
1066#if BYTE_ORDER == BIG_ENDIAN
1067 pci_write_config(sc->bge_dev, BGE_PCI_MISC_CTL,
1068 BGE_BIGENDIAN_INIT, 4);
1069#else
1070 pci_write_config(sc->bge_dev, BGE_PCI_MISC_CTL,
1071 BGE_LITTLEENDIAN_INIT, 4);
1072#endif
1073
1074 /*
1075 * Check the 'ROM failed' bit on the RX CPU to see if
1076 * self-tests passed.
1077 */
1078 if (CSR_READ_4(sc, BGE_RXCPU_MODE) & BGE_RXCPUMODE_ROMFAIL) {
1079 printf("bge%d: RX CPU self-diagnostics failed!\n",
1080 sc->bge_unit);
1081 return(ENODEV);
1082 }
1083
1084 /* Clear the MAC control register */
1085 CSR_WRITE_4(sc, BGE_MAC_MODE, 0);
1086
1087 /*
1088 * Clear the MAC statistics block in the NIC's
1089 * internal memory.
1090 */
1091 for (i = BGE_STATS_BLOCK;
1092 i < BGE_STATS_BLOCK_END + 1; i += sizeof(u_int32_t))
1093 BGE_MEMWIN_WRITE(sc, i, 0);
1094
1095 for (i = BGE_STATUS_BLOCK;
1096 i < BGE_STATUS_BLOCK_END + 1; i += sizeof(u_int32_t))
1097 BGE_MEMWIN_WRITE(sc, i, 0);
1098
1099 /* Set up the PCI DMA control register. */
1100 if (sc->bge_pcie) {
1101 dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD |
1102 (0xf << BGE_PCIDMARWCTL_RD_WAT_SHIFT) |
1103 (0x2 << BGE_PCIDMARWCTL_WR_WAT_SHIFT);
1104 } else if (pci_read_config(sc->bge_dev, BGE_PCI_PCISTATE, 4) &
1105 BGE_PCISTATE_PCI_BUSMODE) {
1106 /* Conventional PCI bus */
1107 dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD |
1108 (0x7 << BGE_PCIDMARWCTL_RD_WAT_SHIFT) |
1109 (0x7 << BGE_PCIDMARWCTL_WR_WAT_SHIFT) |
1110 (0x0F);
1111 } else {
1112 /* PCI-X bus */
1113 /*
1114 * The 5704 uses a different encoding of read/write
1115 * watermarks.
1116 */
1117 if (sc->bge_asicrev == BGE_ASICREV_BCM5704)
1118 dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD |
1119 (0x7 << BGE_PCIDMARWCTL_RD_WAT_SHIFT) |
1120 (0x3 << BGE_PCIDMARWCTL_WR_WAT_SHIFT);
1121 else
1122 dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD |
1123 (0x3 << BGE_PCIDMARWCTL_RD_WAT_SHIFT) |
1124 (0x3 << BGE_PCIDMARWCTL_WR_WAT_SHIFT) |
1125 (0x0F);
1126
1127 /*
1128 * 5703 and 5704 need ONEDMA_AT_ONCE as a workaround
1129 * for hardware bugs.
1130 */
1131 if (sc->bge_asicrev == BGE_ASICREV_BCM5703 ||
1132 sc->bge_asicrev == BGE_ASICREV_BCM5704) {
1133 u_int32_t tmp;
1134
1135 tmp = CSR_READ_4(sc, BGE_PCI_CLKCTL) & 0x1f;
1136 if (tmp == 0x6 || tmp == 0x7)
1137 dma_rw_ctl |= BGE_PCIDMARWCTL_ONEDMA_ATONCE;
1138 }
1139 }
1140
1141 if (sc->bge_asicrev == BGE_ASICREV_BCM5703 ||
1142 sc->bge_asicrev == BGE_ASICREV_BCM5704 ||
1143 sc->bge_asicrev == BGE_ASICREV_BCM5705 ||
1144 sc->bge_asicrev == BGE_ASICREV_BCM5750)
1145 dma_rw_ctl &= ~BGE_PCIDMARWCTL_MINDMA;
1146 pci_write_config(sc->bge_dev, BGE_PCI_DMA_RW_CTL, dma_rw_ctl, 4);
1147
1148 /*
1149 * Set up general mode register.
1150 */
1151 CSR_WRITE_4(sc, BGE_MODE_CTL, BGE_MODECTL_WORDSWAP_NONFRAME|
1152 BGE_MODECTL_BYTESWAP_DATA|BGE_MODECTL_WORDSWAP_DATA|
1153 BGE_MODECTL_MAC_ATTN_INTR|BGE_MODECTL_HOST_SEND_BDS|
1154 BGE_MODECTL_TX_NO_PHDR_CSUM|BGE_MODECTL_RX_NO_PHDR_CSUM);
1155
1156 /*
1157 * Disable memory write invalidate. Apparently it is not supported
1158 * properly by these devices.
1159 */
1160 PCI_CLRBIT(sc->bge_dev, BGE_PCI_CMD, PCIM_CMD_MWIEN, 4);
1161
1162#ifdef __brokenalpha__
1163 /*
1164 * Must insure that we do not cross an 8K (bytes) boundary
1165 * for DMA reads. Our highest limit is 1K bytes. This is a
1166 * restriction on some ALPHA platforms with early revision
1167 * 21174 PCI chipsets, such as the AlphaPC 164lx
1168 */
1169 PCI_SETBIT(sc->bge_dev, BGE_PCI_DMA_RW_CTL,
1170 BGE_PCI_READ_BNDRY_1024BYTES, 4);
1171#endif
1172
1173 /* Set the timer prescaler (always 66Mhz) */
1174 CSR_WRITE_4(sc, BGE_MISC_CFG, 65 << 1/*BGE_32BITTIME_66MHZ*/);
1175
1176 return(0);
1177}
1178
1179static int
1180bge_blockinit(sc)
1181 struct bge_softc *sc;
1182{
1183 struct bge_rcb *rcb;
1184 volatile struct bge_rcb *vrcb;
1185 int i;
1186
1187 /*
1188 * Initialize the memory window pointer register so that
1189 * we can access the first 32K of internal NIC RAM. This will
1190 * allow us to set up the TX send ring RCBs and the RX return
1191 * ring RCBs, plus other things which live in NIC memory.
1192 */
1193 CSR_WRITE_4(sc, BGE_PCI_MEMWIN_BASEADDR, 0);
1194
1195 /* Note: the BCM5704 has a smaller mbuf space than other chips. */
1196
1197 if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
1198 sc->bge_asicrev != BGE_ASICREV_BCM5750) {
1199 /* Configure mbuf memory pool */
1200 if (sc->bge_extram) {
1201 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_BASEADDR,
1202 BGE_EXT_SSRAM);
1203 if (sc->bge_asicrev == BGE_ASICREV_BCM5704)
1204 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x10000);
1205 else
1206 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x18000);
1207 } else {
1208 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_BASEADDR,
1209 BGE_BUFFPOOL_1);
1210 if (sc->bge_asicrev == BGE_ASICREV_BCM5704)
1211 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x10000);
1212 else
1213 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x18000);
1214 }
1215
1216 /* Configure DMA resource pool */
1217 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_BASEADDR,
1218 BGE_DMA_DESCRIPTORS);
1219 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LEN, 0x2000);
1220 }
1221
1222 /* Configure mbuf pool watermarks */
1223 if (sc->bge_asicrev == BGE_ASICREV_BCM5705 ||
1224 sc->bge_asicrev == BGE_ASICREV_BCM5750) {
1225 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0);
1226 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x10);
1227 } else {
1228 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x50);
1229 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x20);
1230 }
1231 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x60);
1232
1233 /* Configure DMA resource watermarks */
1234 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LOWAT, 5);
1235 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_HIWAT, 10);
1236
1237 /* Enable buffer manager */
1238 if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
1239 sc->bge_asicrev != BGE_ASICREV_BCM5750) {
1240 CSR_WRITE_4(sc, BGE_BMAN_MODE,
1241 BGE_BMANMODE_ENABLE|BGE_BMANMODE_LOMBUF_ATTN);
1242
1243 /* Poll for buffer manager start indication */
1244 for (i = 0; i < BGE_TIMEOUT; i++) {
1245 if (CSR_READ_4(sc, BGE_BMAN_MODE) & BGE_BMANMODE_ENABLE)
1246 break;
1247 DELAY(10);
1248 }
1249
1250 if (i == BGE_TIMEOUT) {
1251 printf("bge%d: buffer manager failed to start\n",
1252 sc->bge_unit);
1253 return(ENXIO);
1254 }
1255 }
1256
1257 /* Enable flow-through queues */
1258 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF);
1259 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0);
1260
1261 /* Wait until queue initialization is complete */
1262 for (i = 0; i < BGE_TIMEOUT; i++) {
1263 if (CSR_READ_4(sc, BGE_FTQ_RESET) == 0)
1264 break;
1265 DELAY(10);
1266 }
1267
1268 if (i == BGE_TIMEOUT) {
1269 printf("bge%d: flow-through queue init failed\n",
1270 sc->bge_unit);
1271 return(ENXIO);
1272 }
1273
1274 /* Initialize the standard RX ring control block */
1275 rcb = &sc->bge_ldata.bge_info.bge_std_rx_rcb;
1276 rcb->bge_hostaddr.bge_addr_lo =
1277 BGE_ADDR_LO(sc->bge_ldata.bge_rx_std_ring_paddr);
1278 rcb->bge_hostaddr.bge_addr_hi =
1279 BGE_ADDR_HI(sc->bge_ldata.bge_rx_std_ring_paddr);
1280 bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag,
1281 sc->bge_cdata.bge_rx_std_ring_map, BUS_DMASYNC_PREREAD);
1282 if (sc->bge_asicrev == BGE_ASICREV_BCM5705 ||
1283 sc->bge_asicrev == BGE_ASICREV_BCM5750)
1284 rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(512, 0);
1285 else
1286 rcb->bge_maxlen_flags =
1287 BGE_RCB_MAXLEN_FLAGS(BGE_MAX_FRAMELEN, 0);
1288 if (sc->bge_extram)
1289 rcb->bge_nicaddr = BGE_EXT_STD_RX_RINGS;
1290 else
1291 rcb->bge_nicaddr = BGE_STD_RX_RINGS;
1292 CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_HI, rcb->bge_hostaddr.bge_addr_hi);
1293 CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_LO, rcb->bge_hostaddr.bge_addr_lo);
1294
1295 CSR_WRITE_4(sc, BGE_RX_STD_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags);
1296 CSR_WRITE_4(sc, BGE_RX_STD_RCB_NICADDR, rcb->bge_nicaddr);
1297
1298 /*
1299 * Initialize the jumbo RX ring control block
1300 * We set the 'ring disabled' bit in the flags
1301 * field until we're actually ready to start
1302 * using this ring (i.e. once we set the MTU
1303 * high enough to require it).
1304 */
1305 if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
1306 sc->bge_asicrev != BGE_ASICREV_BCM5750) {
1307 rcb = &sc->bge_ldata.bge_info.bge_jumbo_rx_rcb;
1308
1309 rcb->bge_hostaddr.bge_addr_lo =
1310 BGE_ADDR_LO(sc->bge_ldata.bge_rx_jumbo_ring_paddr);
1311 rcb->bge_hostaddr.bge_addr_hi =
1312 BGE_ADDR_HI(sc->bge_ldata.bge_rx_jumbo_ring_paddr);
1313 bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag,
1314 sc->bge_cdata.bge_rx_jumbo_ring_map,
1315 BUS_DMASYNC_PREREAD);
1473 rcb->bge_maxlen_flags =
1474 BGE_RCB_MAXLEN_FLAGS(BGE_MAX_FRAMELEN,
1475 BGE_RCB_FLAG_RING_DISABLED);
1316 rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(0,
1317 BGE_RCB_FLAG_USE_EXT_RX_BD|BGE_RCB_FLAG_RING_DISABLED);
1476 if (sc->bge_extram)
1477 rcb->bge_nicaddr = BGE_EXT_JUMBO_RX_RINGS;
1478 else
1479 rcb->bge_nicaddr = BGE_JUMBO_RX_RINGS;
1480 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_HI,
1481 rcb->bge_hostaddr.bge_addr_hi);
1482 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_LO,
1483 rcb->bge_hostaddr.bge_addr_lo);
1484
1485 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS,
1486 rcb->bge_maxlen_flags);
1487 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_NICADDR, rcb->bge_nicaddr);
1488
1489 /* Set up dummy disabled mini ring RCB */
1490 rcb = &sc->bge_ldata.bge_info.bge_mini_rx_rcb;
1491 rcb->bge_maxlen_flags =
1492 BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED);
1493 CSR_WRITE_4(sc, BGE_RX_MINI_RCB_MAXLEN_FLAGS,
1494 rcb->bge_maxlen_flags);
1495 }
1496
1497 /*
1498 * Set the BD ring replentish thresholds. The recommended
1499 * values are 1/8th the number of descriptors allocated to
1500 * each ring.
1501 */
1502 CSR_WRITE_4(sc, BGE_RBDI_STD_REPL_THRESH, BGE_STD_RX_RING_CNT/8);
1503 CSR_WRITE_4(sc, BGE_RBDI_JUMBO_REPL_THRESH, BGE_JUMBO_RX_RING_CNT/8);
1504
1505 /*
1506 * Disable all unused send rings by setting the 'ring disabled'
1507 * bit in the flags field of all the TX send ring control blocks.
1508 * These are located in NIC memory.
1509 */
1510 vrcb = (volatile struct bge_rcb *)(sc->bge_vhandle + BGE_MEMWIN_START +
1511 BGE_SEND_RING_RCB);
1512 for (i = 0; i < BGE_TX_RINGS_EXTSSRAM_MAX; i++) {
1513 vrcb->bge_maxlen_flags =
1514 BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED);
1515 vrcb->bge_nicaddr = 0;
1516 vrcb++;
1517 }
1518
1519 /* Configure TX RCB 0 (we use only the first ring) */
1520 vrcb = (volatile struct bge_rcb *)(sc->bge_vhandle + BGE_MEMWIN_START +
1521 BGE_SEND_RING_RCB);
1522 vrcb->bge_hostaddr.bge_addr_lo =
1523 htole32(BGE_ADDR_LO(sc->bge_ldata.bge_tx_ring_paddr));
1524 vrcb->bge_hostaddr.bge_addr_hi =
1525 htole32(BGE_ADDR_HI(sc->bge_ldata.bge_tx_ring_paddr));
1526 vrcb->bge_nicaddr = BGE_NIC_TXRING_ADDR(0, BGE_TX_RING_CNT);
1527 if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
1528 sc->bge_asicrev != BGE_ASICREV_BCM5750)
1529 vrcb->bge_maxlen_flags =
1530 BGE_RCB_MAXLEN_FLAGS(BGE_TX_RING_CNT, 0);
1531
1532 /* Disable all unused RX return rings */
1533 vrcb = (volatile struct bge_rcb *)(sc->bge_vhandle + BGE_MEMWIN_START +
1534 BGE_RX_RETURN_RING_RCB);
1535 for (i = 0; i < BGE_RX_RINGS_MAX; i++) {
1536 vrcb->bge_hostaddr.bge_addr_hi = 0;
1537 vrcb->bge_hostaddr.bge_addr_lo = 0;
1538 vrcb->bge_maxlen_flags =
1539 BGE_RCB_MAXLEN_FLAGS(sc->bge_return_ring_cnt,
1540 BGE_RCB_FLAG_RING_DISABLED);
1541 vrcb->bge_nicaddr = 0;
1542 CSR_WRITE_4(sc, BGE_MBX_RX_CONS0_LO +
1543 (i * (sizeof(u_int64_t))), 0);
1544 vrcb++;
1545 }
1546
1547 /* Initialize RX ring indexes */
1548 CSR_WRITE_4(sc, BGE_MBX_RX_STD_PROD_LO, 0);
1549 CSR_WRITE_4(sc, BGE_MBX_RX_JUMBO_PROD_LO, 0);
1550 CSR_WRITE_4(sc, BGE_MBX_RX_MINI_PROD_LO, 0);
1551
1552 /*
1553 * Set up RX return ring 0
1554 * Note that the NIC address for RX return rings is 0x00000000.
1555 * The return rings live entirely within the host, so the
1556 * nicaddr field in the RCB isn't used.
1557 */
1558 vrcb = (volatile struct bge_rcb *)(sc->bge_vhandle + BGE_MEMWIN_START +
1559 BGE_RX_RETURN_RING_RCB);
1560 vrcb->bge_hostaddr.bge_addr_lo =
1561 BGE_ADDR_LO(sc->bge_ldata.bge_rx_return_ring_paddr);
1562 vrcb->bge_hostaddr.bge_addr_hi =
1563 BGE_ADDR_HI(sc->bge_ldata.bge_rx_return_ring_paddr);
1564 bus_dmamap_sync(sc->bge_cdata.bge_rx_return_ring_tag,
1565 sc->bge_cdata.bge_rx_return_ring_map, BUS_DMASYNC_PREWRITE);
1566 vrcb->bge_nicaddr = 0x00000000;
1567 vrcb->bge_maxlen_flags =
1568 BGE_RCB_MAXLEN_FLAGS(sc->bge_return_ring_cnt, 0);
1569
1570 /* Set random backoff seed for TX */
1571 CSR_WRITE_4(sc, BGE_TX_RANDOM_BACKOFF,
1572 IF_LLADDR(sc->bge_ifp)[0] + IF_LLADDR(sc->bge_ifp)[1] +
1573 IF_LLADDR(sc->bge_ifp)[2] + IF_LLADDR(sc->bge_ifp)[3] +
1574 IF_LLADDR(sc->bge_ifp)[4] + IF_LLADDR(sc->bge_ifp)[5] +
1575 BGE_TX_BACKOFF_SEED_MASK);
1576
1577 /* Set inter-packet gap */
1578 CSR_WRITE_4(sc, BGE_TX_LENGTHS, 0x2620);
1579
1580 /*
1581 * Specify which ring to use for packets that don't match
1582 * any RX rules.
1583 */
1584 CSR_WRITE_4(sc, BGE_RX_RULES_CFG, 0x08);
1585
1586 /*
1587 * Configure number of RX lists. One interrupt distribution
1588 * list, sixteen active lists, one bad frames class.
1589 */
1590 CSR_WRITE_4(sc, BGE_RXLP_CFG, 0x181);
1591
1592 /* Inialize RX list placement stats mask. */
1593 CSR_WRITE_4(sc, BGE_RXLP_STATS_ENABLE_MASK, 0x007FFFFF);
1594 CSR_WRITE_4(sc, BGE_RXLP_STATS_CTL, 0x1);
1595
1596 /* Disable host coalescing until we get it set up */
1597 CSR_WRITE_4(sc, BGE_HCC_MODE, 0x00000000);
1598
1599 /* Poll to make sure it's shut down. */
1600 for (i = 0; i < BGE_TIMEOUT; i++) {
1601 if (!(CSR_READ_4(sc, BGE_HCC_MODE) & BGE_HCCMODE_ENABLE))
1602 break;
1603 DELAY(10);
1604 }
1605
1606 if (i == BGE_TIMEOUT) {
1607 printf("bge%d: host coalescing engine failed to idle\n",
1608 sc->bge_unit);
1609 return(ENXIO);
1610 }
1611
1612 /* Set up host coalescing defaults */
1613 CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS, sc->bge_rx_coal_ticks);
1614 CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS, sc->bge_tx_coal_ticks);
1615 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS, sc->bge_rx_max_coal_bds);
1616 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS, sc->bge_tx_max_coal_bds);
1617 if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
1618 sc->bge_asicrev != BGE_ASICREV_BCM5750) {
1619 CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS_INT, 0);
1620 CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS_INT, 0);
1621 }
1622 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS_INT, 0);
1623 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS_INT, 0);
1624
1625 /* Set up address of statistics block */
1626 if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
1627 sc->bge_asicrev != BGE_ASICREV_BCM5750) {
1628 CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_HI,
1629 BGE_ADDR_HI(sc->bge_ldata.bge_stats_paddr));
1630 CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_LO,
1631 BGE_ADDR_LO(sc->bge_ldata.bge_stats_paddr));
1632 CSR_WRITE_4(sc, BGE_HCC_STATS_BASEADDR, BGE_STATS_BLOCK);
1633 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_BASEADDR, BGE_STATUS_BLOCK);
1634 CSR_WRITE_4(sc, BGE_HCC_STATS_TICKS, sc->bge_stat_ticks);
1635 }
1636
1637 /* Set up address of status block */
1638 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_HI,
1639 BGE_ADDR_HI(sc->bge_ldata.bge_status_block_paddr));
1640 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_LO,
1641 BGE_ADDR_LO(sc->bge_ldata.bge_status_block_paddr));
1642 bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
1643 sc->bge_cdata.bge_status_map, BUS_DMASYNC_PREWRITE);
1644 sc->bge_ldata.bge_status_block->bge_idx[0].bge_rx_prod_idx = 0;
1645 sc->bge_ldata.bge_status_block->bge_idx[0].bge_tx_cons_idx = 0;
1646
1647 /* Turn on host coalescing state machine */
1648 CSR_WRITE_4(sc, BGE_HCC_MODE, BGE_HCCMODE_ENABLE);
1649
1650 /* Turn on RX BD completion state machine and enable attentions */
1651 CSR_WRITE_4(sc, BGE_RBDC_MODE,
1652 BGE_RBDCMODE_ENABLE|BGE_RBDCMODE_ATTN);
1653
1654 /* Turn on RX list placement state machine */
1655 CSR_WRITE_4(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE);
1656
1657 /* Turn on RX list selector state machine. */
1658 if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
1659 sc->bge_asicrev != BGE_ASICREV_BCM5750)
1660 CSR_WRITE_4(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE);
1661
1662 /* Turn on DMA, clear stats */
1663 CSR_WRITE_4(sc, BGE_MAC_MODE, BGE_MACMODE_TXDMA_ENB|
1664 BGE_MACMODE_RXDMA_ENB|BGE_MACMODE_RX_STATS_CLEAR|
1665 BGE_MACMODE_TX_STATS_CLEAR|BGE_MACMODE_RX_STATS_ENB|
1666 BGE_MACMODE_TX_STATS_ENB|BGE_MACMODE_FRMHDR_DMA_ENB|
1667 (sc->bge_tbi ? BGE_PORTMODE_TBI : BGE_PORTMODE_MII));
1668
1669 /* Set misc. local control, enable interrupts on attentions */
1670 CSR_WRITE_4(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_ONATTN);
1671
1672#ifdef notdef
1673 /* Assert GPIO pins for PHY reset */
1674 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUT0|
1675 BGE_MLC_MISCIO_OUT1|BGE_MLC_MISCIO_OUT2);
1676 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUTEN0|
1677 BGE_MLC_MISCIO_OUTEN1|BGE_MLC_MISCIO_OUTEN2);
1678#endif
1679
1680 /* Turn on DMA completion state machine */
1681 if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
1682 sc->bge_asicrev != BGE_ASICREV_BCM5750)
1683 CSR_WRITE_4(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE);
1684
1685 /* Turn on write DMA state machine */
1686 CSR_WRITE_4(sc, BGE_WDMA_MODE,
1687 BGE_WDMAMODE_ENABLE|BGE_WDMAMODE_ALL_ATTNS);
1688
1689 /* Turn on read DMA state machine */
1690 CSR_WRITE_4(sc, BGE_RDMA_MODE,
1691 BGE_RDMAMODE_ENABLE|BGE_RDMAMODE_ALL_ATTNS);
1692
1693 /* Turn on RX data completion state machine */
1694 CSR_WRITE_4(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE);
1695
1696 /* Turn on RX BD initiator state machine */
1697 CSR_WRITE_4(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE);
1698
1699 /* Turn on RX data and RX BD initiator state machine */
1700 CSR_WRITE_4(sc, BGE_RDBDI_MODE, BGE_RDBDIMODE_ENABLE);
1701
1702 /* Turn on Mbuf cluster free state machine */
1703 if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
1704 sc->bge_asicrev != BGE_ASICREV_BCM5750)
1705 CSR_WRITE_4(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE);
1706
1707 /* Turn on send BD completion state machine */
1708 CSR_WRITE_4(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE);
1709
1710 /* Turn on send data completion state machine */
1711 CSR_WRITE_4(sc, BGE_SDC_MODE, BGE_SDCMODE_ENABLE);
1712
1713 /* Turn on send data initiator state machine */
1714 CSR_WRITE_4(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE);
1715
1716 /* Turn on send BD initiator state machine */
1717 CSR_WRITE_4(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE);
1718
1719 /* Turn on send BD selector state machine */
1720 CSR_WRITE_4(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE);
1721
1722 CSR_WRITE_4(sc, BGE_SDI_STATS_ENABLE_MASK, 0x007FFFFF);
1723 CSR_WRITE_4(sc, BGE_SDI_STATS_CTL,
1724 BGE_SDISTATSCTL_ENABLE|BGE_SDISTATSCTL_FASTER);
1725
1726 /* ack/clear link change events */
1727 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED|
1728 BGE_MACSTAT_CFG_CHANGED|BGE_MACSTAT_MI_COMPLETE|
1729 BGE_MACSTAT_LINK_CHANGED);
1730 CSR_WRITE_4(sc, BGE_MI_STS, 0);
1731
1732 /* Enable PHY auto polling (for MII/GMII only) */
1733 if (sc->bge_tbi) {
1734 CSR_WRITE_4(sc, BGE_MI_STS, BGE_MISTS_LINK);
1735 } else {
1736 BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL|10<<16);
1737 if (sc->bge_asicrev == BGE_ASICREV_BCM5700)
1738 CSR_WRITE_4(sc, BGE_MAC_EVT_ENB,
1739 BGE_EVTENB_MI_INTERRUPT);
1740 }
1741
1742 /* Enable link state change attentions. */
1743 BGE_SETBIT(sc, BGE_MAC_EVT_ENB, BGE_EVTENB_LINK_CHANGED);
1744
1745 return(0);
1746}
1747
1748/*
1749 * Probe for a Broadcom chip. Check the PCI vendor and device IDs
1750 * against our list and return its name if we find a match. Note
1751 * that since the Broadcom controller contains VPD support, we
1752 * can get the device name string from the controller itself instead
1753 * of the compiled-in string. This is a little slow, but it guarantees
1754 * we'll always announce the right product name.
1755 */
1756static int
1757bge_probe(dev)
1758 device_t dev;
1759{
1760 struct bge_type *t;
1761 struct bge_softc *sc;
1762 char *descbuf;
1763
1764 t = bge_devs;
1765
1766 sc = device_get_softc(dev);
1767 bzero(sc, sizeof(struct bge_softc));
1768 sc->bge_unit = device_get_unit(dev);
1769 sc->bge_dev = dev;
1770
1771 while(t->bge_name != NULL) {
1772 if ((pci_get_vendor(dev) == t->bge_vid) &&
1773 (pci_get_device(dev) == t->bge_did)) {
1774#ifdef notdef
1775 bge_vpd_read(sc);
1776 device_set_desc(dev, sc->bge_vpd_prodname);
1777#endif
1778 descbuf = malloc(BGE_DEVDESC_MAX, M_TEMP, M_NOWAIT);
1779 if (descbuf == NULL)
1780 return(ENOMEM);
1781 snprintf(descbuf, BGE_DEVDESC_MAX,
1782 "%s, ASIC rev. %#04x", t->bge_name,
1783 pci_read_config(dev, BGE_PCI_MISC_CTL, 4) >> 16);
1784 device_set_desc_copy(dev, descbuf);
1785 if (pci_get_subvendor(dev) == DELL_VENDORID)
1786 sc->bge_no_3_led = 1;
1787 free(descbuf, M_TEMP);
1788 return(0);
1789 }
1790 t++;
1791 }
1792
1793 return(ENXIO);
1794}
1795
1796static void
1797bge_dma_free(sc)
1798 struct bge_softc *sc;
1799{
1800 int i;
1801
1802
1803 /* Destroy DMA maps for RX buffers */
1804
1805 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
1806 if (sc->bge_cdata.bge_rx_std_dmamap[i])
1807 bus_dmamap_destroy(sc->bge_cdata.bge_mtag,
1808 sc->bge_cdata.bge_rx_std_dmamap[i]);
1809 }
1810
1811 /* Destroy DMA maps for jumbo RX buffers */
1812
1813 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
1814 if (sc->bge_cdata.bge_rx_jumbo_dmamap[i])
1815 bus_dmamap_destroy(sc->bge_cdata.bge_mtag_jumbo,
1816 sc->bge_cdata.bge_rx_jumbo_dmamap[i]);
1817 }
1818
1819 /* Destroy DMA maps for TX buffers */
1820
1821 for (i = 0; i < BGE_TX_RING_CNT; i++) {
1822 if (sc->bge_cdata.bge_tx_dmamap[i])
1823 bus_dmamap_destroy(sc->bge_cdata.bge_mtag,
1824 sc->bge_cdata.bge_tx_dmamap[i]);
1825 }
1826
1827 if (sc->bge_cdata.bge_mtag)
1828 bus_dma_tag_destroy(sc->bge_cdata.bge_mtag);
1829
1830
1831 /* Destroy standard RX ring */
1832
1833 if (sc->bge_ldata.bge_rx_std_ring)
1834 bus_dmamem_free(sc->bge_cdata.bge_rx_std_ring_tag,
1835 sc->bge_ldata.bge_rx_std_ring,
1836 sc->bge_cdata.bge_rx_std_ring_map);
1837
1838 if (sc->bge_cdata.bge_rx_std_ring_map) {
1839 bus_dmamap_unload(sc->bge_cdata.bge_rx_std_ring_tag,
1840 sc->bge_cdata.bge_rx_std_ring_map);
1841 bus_dmamap_destroy(sc->bge_cdata.bge_rx_std_ring_tag,
1842 sc->bge_cdata.bge_rx_std_ring_map);
1843 }
1844
1845 if (sc->bge_cdata.bge_rx_std_ring_tag)
1846 bus_dma_tag_destroy(sc->bge_cdata.bge_rx_std_ring_tag);
1847
1848 /* Destroy jumbo RX ring */
1849
1850 if (sc->bge_ldata.bge_rx_jumbo_ring)
1851 bus_dmamem_free(sc->bge_cdata.bge_rx_jumbo_ring_tag,
1852 sc->bge_ldata.bge_rx_jumbo_ring,
1853 sc->bge_cdata.bge_rx_jumbo_ring_map);
1854
1855 if (sc->bge_cdata.bge_rx_jumbo_ring_map) {
1856 bus_dmamap_unload(sc->bge_cdata.bge_rx_jumbo_ring_tag,
1857 sc->bge_cdata.bge_rx_jumbo_ring_map);
1858 bus_dmamap_destroy(sc->bge_cdata.bge_rx_jumbo_ring_tag,
1859 sc->bge_cdata.bge_rx_jumbo_ring_map);
1860 }
1861
1862 if (sc->bge_cdata.bge_rx_jumbo_ring_tag)
1863 bus_dma_tag_destroy(sc->bge_cdata.bge_rx_jumbo_ring_tag);
1864
1865 /* Destroy RX return ring */
1866
1867 if (sc->bge_ldata.bge_rx_return_ring)
1868 bus_dmamem_free(sc->bge_cdata.bge_rx_return_ring_tag,
1869 sc->bge_ldata.bge_rx_return_ring,
1870 sc->bge_cdata.bge_rx_return_ring_map);
1871
1872 if (sc->bge_cdata.bge_rx_return_ring_map) {
1873 bus_dmamap_unload(sc->bge_cdata.bge_rx_return_ring_tag,
1874 sc->bge_cdata.bge_rx_return_ring_map);
1875 bus_dmamap_destroy(sc->bge_cdata.bge_rx_return_ring_tag,
1876 sc->bge_cdata.bge_rx_return_ring_map);
1877 }
1878
1879 if (sc->bge_cdata.bge_rx_return_ring_tag)
1880 bus_dma_tag_destroy(sc->bge_cdata.bge_rx_return_ring_tag);
1881
1882 /* Destroy TX ring */
1883
1884 if (sc->bge_ldata.bge_tx_ring)
1885 bus_dmamem_free(sc->bge_cdata.bge_tx_ring_tag,
1886 sc->bge_ldata.bge_tx_ring,
1887 sc->bge_cdata.bge_tx_ring_map);
1888
1889 if (sc->bge_cdata.bge_tx_ring_map) {
1890 bus_dmamap_unload(sc->bge_cdata.bge_tx_ring_tag,
1891 sc->bge_cdata.bge_tx_ring_map);
1892 bus_dmamap_destroy(sc->bge_cdata.bge_tx_ring_tag,
1893 sc->bge_cdata.bge_tx_ring_map);
1894 }
1895
1896 if (sc->bge_cdata.bge_tx_ring_tag)
1897 bus_dma_tag_destroy(sc->bge_cdata.bge_tx_ring_tag);
1898
1899 /* Destroy status block */
1900
1901 if (sc->bge_ldata.bge_status_block)
1902 bus_dmamem_free(sc->bge_cdata.bge_status_tag,
1903 sc->bge_ldata.bge_status_block,
1904 sc->bge_cdata.bge_status_map);
1905
1906 if (sc->bge_cdata.bge_status_map) {
1907 bus_dmamap_unload(sc->bge_cdata.bge_status_tag,
1908 sc->bge_cdata.bge_status_map);
1909 bus_dmamap_destroy(sc->bge_cdata.bge_status_tag,
1910 sc->bge_cdata.bge_status_map);
1911 }
1912
1913 if (sc->bge_cdata.bge_status_tag)
1914 bus_dma_tag_destroy(sc->bge_cdata.bge_status_tag);
1915
1916 /* Destroy statistics block */
1917
1918 if (sc->bge_ldata.bge_stats)
1919 bus_dmamem_free(sc->bge_cdata.bge_stats_tag,
1920 sc->bge_ldata.bge_stats,
1921 sc->bge_cdata.bge_stats_map);
1922
1923 if (sc->bge_cdata.bge_stats_map) {
1924 bus_dmamap_unload(sc->bge_cdata.bge_stats_tag,
1925 sc->bge_cdata.bge_stats_map);
1926 bus_dmamap_destroy(sc->bge_cdata.bge_stats_tag,
1927 sc->bge_cdata.bge_stats_map);
1928 }
1929
1930 if (sc->bge_cdata.bge_stats_tag)
1931 bus_dma_tag_destroy(sc->bge_cdata.bge_stats_tag);
1932
1933 /* Destroy the parent tag */
1934
1935 if (sc->bge_cdata.bge_parent_tag)
1936 bus_dma_tag_destroy(sc->bge_cdata.bge_parent_tag);
1937
1938 return;
1939}
1940
1941static int
1942bge_dma_alloc(dev)
1943 device_t dev;
1944{
1945 struct bge_softc *sc;
1318 if (sc->bge_extram)
1319 rcb->bge_nicaddr = BGE_EXT_JUMBO_RX_RINGS;
1320 else
1321 rcb->bge_nicaddr = BGE_JUMBO_RX_RINGS;
1322 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_HI,
1323 rcb->bge_hostaddr.bge_addr_hi);
1324 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_LO,
1325 rcb->bge_hostaddr.bge_addr_lo);
1326
1327 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS,
1328 rcb->bge_maxlen_flags);
1329 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_NICADDR, rcb->bge_nicaddr);
1330
1331 /* Set up dummy disabled mini ring RCB */
1332 rcb = &sc->bge_ldata.bge_info.bge_mini_rx_rcb;
1333 rcb->bge_maxlen_flags =
1334 BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED);
1335 CSR_WRITE_4(sc, BGE_RX_MINI_RCB_MAXLEN_FLAGS,
1336 rcb->bge_maxlen_flags);
1337 }
1338
1339 /*
1340 * Set the BD ring replentish thresholds. The recommended
1341 * values are 1/8th the number of descriptors allocated to
1342 * each ring.
1343 */
1344 CSR_WRITE_4(sc, BGE_RBDI_STD_REPL_THRESH, BGE_STD_RX_RING_CNT/8);
1345 CSR_WRITE_4(sc, BGE_RBDI_JUMBO_REPL_THRESH, BGE_JUMBO_RX_RING_CNT/8);
1346
1347 /*
1348 * Disable all unused send rings by setting the 'ring disabled'
1349 * bit in the flags field of all the TX send ring control blocks.
1350 * These are located in NIC memory.
1351 */
1352 vrcb = (volatile struct bge_rcb *)(sc->bge_vhandle + BGE_MEMWIN_START +
1353 BGE_SEND_RING_RCB);
1354 for (i = 0; i < BGE_TX_RINGS_EXTSSRAM_MAX; i++) {
1355 vrcb->bge_maxlen_flags =
1356 BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED);
1357 vrcb->bge_nicaddr = 0;
1358 vrcb++;
1359 }
1360
1361 /* Configure TX RCB 0 (we use only the first ring) */
1362 vrcb = (volatile struct bge_rcb *)(sc->bge_vhandle + BGE_MEMWIN_START +
1363 BGE_SEND_RING_RCB);
1364 vrcb->bge_hostaddr.bge_addr_lo =
1365 htole32(BGE_ADDR_LO(sc->bge_ldata.bge_tx_ring_paddr));
1366 vrcb->bge_hostaddr.bge_addr_hi =
1367 htole32(BGE_ADDR_HI(sc->bge_ldata.bge_tx_ring_paddr));
1368 vrcb->bge_nicaddr = BGE_NIC_TXRING_ADDR(0, BGE_TX_RING_CNT);
1369 if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
1370 sc->bge_asicrev != BGE_ASICREV_BCM5750)
1371 vrcb->bge_maxlen_flags =
1372 BGE_RCB_MAXLEN_FLAGS(BGE_TX_RING_CNT, 0);
1373
1374 /* Disable all unused RX return rings */
1375 vrcb = (volatile struct bge_rcb *)(sc->bge_vhandle + BGE_MEMWIN_START +
1376 BGE_RX_RETURN_RING_RCB);
1377 for (i = 0; i < BGE_RX_RINGS_MAX; i++) {
1378 vrcb->bge_hostaddr.bge_addr_hi = 0;
1379 vrcb->bge_hostaddr.bge_addr_lo = 0;
1380 vrcb->bge_maxlen_flags =
1381 BGE_RCB_MAXLEN_FLAGS(sc->bge_return_ring_cnt,
1382 BGE_RCB_FLAG_RING_DISABLED);
1383 vrcb->bge_nicaddr = 0;
1384 CSR_WRITE_4(sc, BGE_MBX_RX_CONS0_LO +
1385 (i * (sizeof(u_int64_t))), 0);
1386 vrcb++;
1387 }
1388
1389 /* Initialize RX ring indexes */
1390 CSR_WRITE_4(sc, BGE_MBX_RX_STD_PROD_LO, 0);
1391 CSR_WRITE_4(sc, BGE_MBX_RX_JUMBO_PROD_LO, 0);
1392 CSR_WRITE_4(sc, BGE_MBX_RX_MINI_PROD_LO, 0);
1393
1394 /*
1395 * Set up RX return ring 0
1396 * Note that the NIC address for RX return rings is 0x00000000.
1397 * The return rings live entirely within the host, so the
1398 * nicaddr field in the RCB isn't used.
1399 */
1400 vrcb = (volatile struct bge_rcb *)(sc->bge_vhandle + BGE_MEMWIN_START +
1401 BGE_RX_RETURN_RING_RCB);
1402 vrcb->bge_hostaddr.bge_addr_lo =
1403 BGE_ADDR_LO(sc->bge_ldata.bge_rx_return_ring_paddr);
1404 vrcb->bge_hostaddr.bge_addr_hi =
1405 BGE_ADDR_HI(sc->bge_ldata.bge_rx_return_ring_paddr);
1406 bus_dmamap_sync(sc->bge_cdata.bge_rx_return_ring_tag,
1407 sc->bge_cdata.bge_rx_return_ring_map, BUS_DMASYNC_PREWRITE);
1408 vrcb->bge_nicaddr = 0x00000000;
1409 vrcb->bge_maxlen_flags =
1410 BGE_RCB_MAXLEN_FLAGS(sc->bge_return_ring_cnt, 0);
1411
1412 /* Set random backoff seed for TX */
1413 CSR_WRITE_4(sc, BGE_TX_RANDOM_BACKOFF,
1414 IF_LLADDR(sc->bge_ifp)[0] + IF_LLADDR(sc->bge_ifp)[1] +
1415 IF_LLADDR(sc->bge_ifp)[2] + IF_LLADDR(sc->bge_ifp)[3] +
1416 IF_LLADDR(sc->bge_ifp)[4] + IF_LLADDR(sc->bge_ifp)[5] +
1417 BGE_TX_BACKOFF_SEED_MASK);
1418
1419 /* Set inter-packet gap */
1420 CSR_WRITE_4(sc, BGE_TX_LENGTHS, 0x2620);
1421
1422 /*
1423 * Specify which ring to use for packets that don't match
1424 * any RX rules.
1425 */
1426 CSR_WRITE_4(sc, BGE_RX_RULES_CFG, 0x08);
1427
1428 /*
1429 * Configure number of RX lists. One interrupt distribution
1430 * list, sixteen active lists, one bad frames class.
1431 */
1432 CSR_WRITE_4(sc, BGE_RXLP_CFG, 0x181);
1433
1434 /* Inialize RX list placement stats mask. */
1435 CSR_WRITE_4(sc, BGE_RXLP_STATS_ENABLE_MASK, 0x007FFFFF);
1436 CSR_WRITE_4(sc, BGE_RXLP_STATS_CTL, 0x1);
1437
1438 /* Disable host coalescing until we get it set up */
1439 CSR_WRITE_4(sc, BGE_HCC_MODE, 0x00000000);
1440
1441 /* Poll to make sure it's shut down. */
1442 for (i = 0; i < BGE_TIMEOUT; i++) {
1443 if (!(CSR_READ_4(sc, BGE_HCC_MODE) & BGE_HCCMODE_ENABLE))
1444 break;
1445 DELAY(10);
1446 }
1447
1448 if (i == BGE_TIMEOUT) {
1449 printf("bge%d: host coalescing engine failed to idle\n",
1450 sc->bge_unit);
1451 return(ENXIO);
1452 }
1453
1454 /* Set up host coalescing defaults */
1455 CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS, sc->bge_rx_coal_ticks);
1456 CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS, sc->bge_tx_coal_ticks);
1457 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS, sc->bge_rx_max_coal_bds);
1458 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS, sc->bge_tx_max_coal_bds);
1459 if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
1460 sc->bge_asicrev != BGE_ASICREV_BCM5750) {
1461 CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS_INT, 0);
1462 CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS_INT, 0);
1463 }
1464 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS_INT, 0);
1465 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS_INT, 0);
1466
1467 /* Set up address of statistics block */
1468 if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
1469 sc->bge_asicrev != BGE_ASICREV_BCM5750) {
1470 CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_HI,
1471 BGE_ADDR_HI(sc->bge_ldata.bge_stats_paddr));
1472 CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_LO,
1473 BGE_ADDR_LO(sc->bge_ldata.bge_stats_paddr));
1474 CSR_WRITE_4(sc, BGE_HCC_STATS_BASEADDR, BGE_STATS_BLOCK);
1475 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_BASEADDR, BGE_STATUS_BLOCK);
1476 CSR_WRITE_4(sc, BGE_HCC_STATS_TICKS, sc->bge_stat_ticks);
1477 }
1478
1479 /* Set up address of status block */
1480 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_HI,
1481 BGE_ADDR_HI(sc->bge_ldata.bge_status_block_paddr));
1482 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_LO,
1483 BGE_ADDR_LO(sc->bge_ldata.bge_status_block_paddr));
1484 bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
1485 sc->bge_cdata.bge_status_map, BUS_DMASYNC_PREWRITE);
1486 sc->bge_ldata.bge_status_block->bge_idx[0].bge_rx_prod_idx = 0;
1487 sc->bge_ldata.bge_status_block->bge_idx[0].bge_tx_cons_idx = 0;
1488
1489 /* Turn on host coalescing state machine */
1490 CSR_WRITE_4(sc, BGE_HCC_MODE, BGE_HCCMODE_ENABLE);
1491
1492 /* Turn on RX BD completion state machine and enable attentions */
1493 CSR_WRITE_4(sc, BGE_RBDC_MODE,
1494 BGE_RBDCMODE_ENABLE|BGE_RBDCMODE_ATTN);
1495
1496 /* Turn on RX list placement state machine */
1497 CSR_WRITE_4(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE);
1498
1499 /* Turn on RX list selector state machine. */
1500 if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
1501 sc->bge_asicrev != BGE_ASICREV_BCM5750)
1502 CSR_WRITE_4(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE);
1503
1504 /* Turn on DMA, clear stats */
1505 CSR_WRITE_4(sc, BGE_MAC_MODE, BGE_MACMODE_TXDMA_ENB|
1506 BGE_MACMODE_RXDMA_ENB|BGE_MACMODE_RX_STATS_CLEAR|
1507 BGE_MACMODE_TX_STATS_CLEAR|BGE_MACMODE_RX_STATS_ENB|
1508 BGE_MACMODE_TX_STATS_ENB|BGE_MACMODE_FRMHDR_DMA_ENB|
1509 (sc->bge_tbi ? BGE_PORTMODE_TBI : BGE_PORTMODE_MII));
1510
1511 /* Set misc. local control, enable interrupts on attentions */
1512 CSR_WRITE_4(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_ONATTN);
1513
1514#ifdef notdef
1515 /* Assert GPIO pins for PHY reset */
1516 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUT0|
1517 BGE_MLC_MISCIO_OUT1|BGE_MLC_MISCIO_OUT2);
1518 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUTEN0|
1519 BGE_MLC_MISCIO_OUTEN1|BGE_MLC_MISCIO_OUTEN2);
1520#endif
1521
1522 /* Turn on DMA completion state machine */
1523 if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
1524 sc->bge_asicrev != BGE_ASICREV_BCM5750)
1525 CSR_WRITE_4(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE);
1526
1527 /* Turn on write DMA state machine */
1528 CSR_WRITE_4(sc, BGE_WDMA_MODE,
1529 BGE_WDMAMODE_ENABLE|BGE_WDMAMODE_ALL_ATTNS);
1530
1531 /* Turn on read DMA state machine */
1532 CSR_WRITE_4(sc, BGE_RDMA_MODE,
1533 BGE_RDMAMODE_ENABLE|BGE_RDMAMODE_ALL_ATTNS);
1534
1535 /* Turn on RX data completion state machine */
1536 CSR_WRITE_4(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE);
1537
1538 /* Turn on RX BD initiator state machine */
1539 CSR_WRITE_4(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE);
1540
1541 /* Turn on RX data and RX BD initiator state machine */
1542 CSR_WRITE_4(sc, BGE_RDBDI_MODE, BGE_RDBDIMODE_ENABLE);
1543
1544 /* Turn on Mbuf cluster free state machine */
1545 if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
1546 sc->bge_asicrev != BGE_ASICREV_BCM5750)
1547 CSR_WRITE_4(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE);
1548
1549 /* Turn on send BD completion state machine */
1550 CSR_WRITE_4(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE);
1551
1552 /* Turn on send data completion state machine */
1553 CSR_WRITE_4(sc, BGE_SDC_MODE, BGE_SDCMODE_ENABLE);
1554
1555 /* Turn on send data initiator state machine */
1556 CSR_WRITE_4(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE);
1557
1558 /* Turn on send BD initiator state machine */
1559 CSR_WRITE_4(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE);
1560
1561 /* Turn on send BD selector state machine */
1562 CSR_WRITE_4(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE);
1563
1564 CSR_WRITE_4(sc, BGE_SDI_STATS_ENABLE_MASK, 0x007FFFFF);
1565 CSR_WRITE_4(sc, BGE_SDI_STATS_CTL,
1566 BGE_SDISTATSCTL_ENABLE|BGE_SDISTATSCTL_FASTER);
1567
1568 /* ack/clear link change events */
1569 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED|
1570 BGE_MACSTAT_CFG_CHANGED|BGE_MACSTAT_MI_COMPLETE|
1571 BGE_MACSTAT_LINK_CHANGED);
1572 CSR_WRITE_4(sc, BGE_MI_STS, 0);
1573
1574 /* Enable PHY auto polling (for MII/GMII only) */
1575 if (sc->bge_tbi) {
1576 CSR_WRITE_4(sc, BGE_MI_STS, BGE_MISTS_LINK);
1577 } else {
1578 BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL|10<<16);
1579 if (sc->bge_asicrev == BGE_ASICREV_BCM5700)
1580 CSR_WRITE_4(sc, BGE_MAC_EVT_ENB,
1581 BGE_EVTENB_MI_INTERRUPT);
1582 }
1583
1584 /* Enable link state change attentions. */
1585 BGE_SETBIT(sc, BGE_MAC_EVT_ENB, BGE_EVTENB_LINK_CHANGED);
1586
1587 return(0);
1588}
1589
1590/*
1591 * Probe for a Broadcom chip. Check the PCI vendor and device IDs
1592 * against our list and return its name if we find a match. Note
1593 * that since the Broadcom controller contains VPD support, we
1594 * can get the device name string from the controller itself instead
1595 * of the compiled-in string. This is a little slow, but it guarantees
1596 * we'll always announce the right product name.
1597 */
1598static int
1599bge_probe(dev)
1600 device_t dev;
1601{
1602 struct bge_type *t;
1603 struct bge_softc *sc;
1604 char *descbuf;
1605
1606 t = bge_devs;
1607
1608 sc = device_get_softc(dev);
1609 bzero(sc, sizeof(struct bge_softc));
1610 sc->bge_unit = device_get_unit(dev);
1611 sc->bge_dev = dev;
1612
1613 while(t->bge_name != NULL) {
1614 if ((pci_get_vendor(dev) == t->bge_vid) &&
1615 (pci_get_device(dev) == t->bge_did)) {
1616#ifdef notdef
1617 bge_vpd_read(sc);
1618 device_set_desc(dev, sc->bge_vpd_prodname);
1619#endif
1620 descbuf = malloc(BGE_DEVDESC_MAX, M_TEMP, M_NOWAIT);
1621 if (descbuf == NULL)
1622 return(ENOMEM);
1623 snprintf(descbuf, BGE_DEVDESC_MAX,
1624 "%s, ASIC rev. %#04x", t->bge_name,
1625 pci_read_config(dev, BGE_PCI_MISC_CTL, 4) >> 16);
1626 device_set_desc_copy(dev, descbuf);
1627 if (pci_get_subvendor(dev) == DELL_VENDORID)
1628 sc->bge_no_3_led = 1;
1629 free(descbuf, M_TEMP);
1630 return(0);
1631 }
1632 t++;
1633 }
1634
1635 return(ENXIO);
1636}
1637
1638static void
1639bge_dma_free(sc)
1640 struct bge_softc *sc;
1641{
1642 int i;
1643
1644
1645 /* Destroy DMA maps for RX buffers */
1646
1647 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
1648 if (sc->bge_cdata.bge_rx_std_dmamap[i])
1649 bus_dmamap_destroy(sc->bge_cdata.bge_mtag,
1650 sc->bge_cdata.bge_rx_std_dmamap[i]);
1651 }
1652
1653 /* Destroy DMA maps for jumbo RX buffers */
1654
1655 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
1656 if (sc->bge_cdata.bge_rx_jumbo_dmamap[i])
1657 bus_dmamap_destroy(sc->bge_cdata.bge_mtag_jumbo,
1658 sc->bge_cdata.bge_rx_jumbo_dmamap[i]);
1659 }
1660
1661 /* Destroy DMA maps for TX buffers */
1662
1663 for (i = 0; i < BGE_TX_RING_CNT; i++) {
1664 if (sc->bge_cdata.bge_tx_dmamap[i])
1665 bus_dmamap_destroy(sc->bge_cdata.bge_mtag,
1666 sc->bge_cdata.bge_tx_dmamap[i]);
1667 }
1668
1669 if (sc->bge_cdata.bge_mtag)
1670 bus_dma_tag_destroy(sc->bge_cdata.bge_mtag);
1671
1672
1673 /* Destroy standard RX ring */
1674
1675 if (sc->bge_ldata.bge_rx_std_ring)
1676 bus_dmamem_free(sc->bge_cdata.bge_rx_std_ring_tag,
1677 sc->bge_ldata.bge_rx_std_ring,
1678 sc->bge_cdata.bge_rx_std_ring_map);
1679
1680 if (sc->bge_cdata.bge_rx_std_ring_map) {
1681 bus_dmamap_unload(sc->bge_cdata.bge_rx_std_ring_tag,
1682 sc->bge_cdata.bge_rx_std_ring_map);
1683 bus_dmamap_destroy(sc->bge_cdata.bge_rx_std_ring_tag,
1684 sc->bge_cdata.bge_rx_std_ring_map);
1685 }
1686
1687 if (sc->bge_cdata.bge_rx_std_ring_tag)
1688 bus_dma_tag_destroy(sc->bge_cdata.bge_rx_std_ring_tag);
1689
1690 /* Destroy jumbo RX ring */
1691
1692 if (sc->bge_ldata.bge_rx_jumbo_ring)
1693 bus_dmamem_free(sc->bge_cdata.bge_rx_jumbo_ring_tag,
1694 sc->bge_ldata.bge_rx_jumbo_ring,
1695 sc->bge_cdata.bge_rx_jumbo_ring_map);
1696
1697 if (sc->bge_cdata.bge_rx_jumbo_ring_map) {
1698 bus_dmamap_unload(sc->bge_cdata.bge_rx_jumbo_ring_tag,
1699 sc->bge_cdata.bge_rx_jumbo_ring_map);
1700 bus_dmamap_destroy(sc->bge_cdata.bge_rx_jumbo_ring_tag,
1701 sc->bge_cdata.bge_rx_jumbo_ring_map);
1702 }
1703
1704 if (sc->bge_cdata.bge_rx_jumbo_ring_tag)
1705 bus_dma_tag_destroy(sc->bge_cdata.bge_rx_jumbo_ring_tag);
1706
1707 /* Destroy RX return ring */
1708
1709 if (sc->bge_ldata.bge_rx_return_ring)
1710 bus_dmamem_free(sc->bge_cdata.bge_rx_return_ring_tag,
1711 sc->bge_ldata.bge_rx_return_ring,
1712 sc->bge_cdata.bge_rx_return_ring_map);
1713
1714 if (sc->bge_cdata.bge_rx_return_ring_map) {
1715 bus_dmamap_unload(sc->bge_cdata.bge_rx_return_ring_tag,
1716 sc->bge_cdata.bge_rx_return_ring_map);
1717 bus_dmamap_destroy(sc->bge_cdata.bge_rx_return_ring_tag,
1718 sc->bge_cdata.bge_rx_return_ring_map);
1719 }
1720
1721 if (sc->bge_cdata.bge_rx_return_ring_tag)
1722 bus_dma_tag_destroy(sc->bge_cdata.bge_rx_return_ring_tag);
1723
1724 /* Destroy TX ring */
1725
1726 if (sc->bge_ldata.bge_tx_ring)
1727 bus_dmamem_free(sc->bge_cdata.bge_tx_ring_tag,
1728 sc->bge_ldata.bge_tx_ring,
1729 sc->bge_cdata.bge_tx_ring_map);
1730
1731 if (sc->bge_cdata.bge_tx_ring_map) {
1732 bus_dmamap_unload(sc->bge_cdata.bge_tx_ring_tag,
1733 sc->bge_cdata.bge_tx_ring_map);
1734 bus_dmamap_destroy(sc->bge_cdata.bge_tx_ring_tag,
1735 sc->bge_cdata.bge_tx_ring_map);
1736 }
1737
1738 if (sc->bge_cdata.bge_tx_ring_tag)
1739 bus_dma_tag_destroy(sc->bge_cdata.bge_tx_ring_tag);
1740
1741 /* Destroy status block */
1742
1743 if (sc->bge_ldata.bge_status_block)
1744 bus_dmamem_free(sc->bge_cdata.bge_status_tag,
1745 sc->bge_ldata.bge_status_block,
1746 sc->bge_cdata.bge_status_map);
1747
1748 if (sc->bge_cdata.bge_status_map) {
1749 bus_dmamap_unload(sc->bge_cdata.bge_status_tag,
1750 sc->bge_cdata.bge_status_map);
1751 bus_dmamap_destroy(sc->bge_cdata.bge_status_tag,
1752 sc->bge_cdata.bge_status_map);
1753 }
1754
1755 if (sc->bge_cdata.bge_status_tag)
1756 bus_dma_tag_destroy(sc->bge_cdata.bge_status_tag);
1757
1758 /* Destroy statistics block */
1759
1760 if (sc->bge_ldata.bge_stats)
1761 bus_dmamem_free(sc->bge_cdata.bge_stats_tag,
1762 sc->bge_ldata.bge_stats,
1763 sc->bge_cdata.bge_stats_map);
1764
1765 if (sc->bge_cdata.bge_stats_map) {
1766 bus_dmamap_unload(sc->bge_cdata.bge_stats_tag,
1767 sc->bge_cdata.bge_stats_map);
1768 bus_dmamap_destroy(sc->bge_cdata.bge_stats_tag,
1769 sc->bge_cdata.bge_stats_map);
1770 }
1771
1772 if (sc->bge_cdata.bge_stats_tag)
1773 bus_dma_tag_destroy(sc->bge_cdata.bge_stats_tag);
1774
1775 /* Destroy the parent tag */
1776
1777 if (sc->bge_cdata.bge_parent_tag)
1778 bus_dma_tag_destroy(sc->bge_cdata.bge_parent_tag);
1779
1780 return;
1781}
1782
1783static int
1784bge_dma_alloc(dev)
1785 device_t dev;
1786{
1787 struct bge_softc *sc;
1946 int nseg, i, error;
1788 int i, error;
1947 struct bge_dmamap_arg ctx;
1948
1949 sc = device_get_softc(dev);
1950
1951 /*
1952 * Allocate the parent bus DMA tag appropriate for PCI.
1953 */
1789 struct bge_dmamap_arg ctx;
1790
1791 sc = device_get_softc(dev);
1792
1793 /*
1794 * Allocate the parent bus DMA tag appropriate for PCI.
1795 */
1954#define BGE_NSEG_NEW 32
1955 error = bus_dma_tag_create(NULL, /* parent */
1956 PAGE_SIZE, 0, /* alignment, boundary */
1957 BUS_SPACE_MAXADDR, /* lowaddr */
1958 BUS_SPACE_MAXADDR, /* highaddr */
1959 NULL, NULL, /* filter, filterarg */
1960 MAXBSIZE, BGE_NSEG_NEW, /* maxsize, nsegments */
1961 BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */
1962 0, /* flags */
1963 NULL, NULL, /* lockfunc, lockarg */
1964 &sc->bge_cdata.bge_parent_tag);
1965
1966 /*
1967 * Create tag for RX mbufs.
1968 */
1796 error = bus_dma_tag_create(NULL, /* parent */
1797 PAGE_SIZE, 0, /* alignment, boundary */
1798 BUS_SPACE_MAXADDR, /* lowaddr */
1799 BUS_SPACE_MAXADDR, /* highaddr */
1800 NULL, NULL, /* filter, filterarg */
1801 MAXBSIZE, BGE_NSEG_NEW, /* maxsize, nsegments */
1802 BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */
1803 0, /* flags */
1804 NULL, NULL, /* lockfunc, lockarg */
1805 &sc->bge_cdata.bge_parent_tag);
1806
1807 /*
1808 * Create tag for RX mbufs.
1809 */
1969 nseg = 32;
1970 error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag, 1,
1971 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
1810 error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag, 1,
1811 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
1972 NULL, MCLBYTES * nseg, nseg, MCLBYTES, BUS_DMA_ALLOCNOW, NULL, NULL,
1973 &sc->bge_cdata.bge_mtag);
1812 NULL, MCLBYTES * BGE_NSEG_NEW, BGE_NSEG_NEW, MCLBYTES,
1813 BUS_DMA_ALLOCNOW, NULL, NULL, &sc->bge_cdata.bge_mtag);
1974
1975 if (error) {
1976 device_printf(dev, "could not allocate dma tag\n");
1977 return (ENOMEM);
1978 }
1979
1980 /* Create DMA maps for RX buffers */
1981
1982 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
1983 error = bus_dmamap_create(sc->bge_cdata.bge_mtag, 0,
1984 &sc->bge_cdata.bge_rx_std_dmamap[i]);
1985 if (error) {
1986 device_printf(dev, "can't create DMA map for RX\n");
1987 return(ENOMEM);
1988 }
1989 }
1990
1991 /* Create DMA maps for TX buffers */
1992
1993 for (i = 0; i < BGE_TX_RING_CNT; i++) {
1994 error = bus_dmamap_create(sc->bge_cdata.bge_mtag, 0,
1995 &sc->bge_cdata.bge_tx_dmamap[i]);
1996 if (error) {
1997 device_printf(dev, "can't create DMA map for RX\n");
1998 return(ENOMEM);
1999 }
2000 }
2001
2002 /* Create tag for standard RX ring */
2003
2004 error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag,
2005 PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
2006 NULL, BGE_STD_RX_RING_SZ, 1, BGE_STD_RX_RING_SZ, 0,
2007 NULL, NULL, &sc->bge_cdata.bge_rx_std_ring_tag);
2008
2009 if (error) {
2010 device_printf(dev, "could not allocate dma tag\n");
2011 return (ENOMEM);
2012 }
2013
2014 /* Allocate DMA'able memory for standard RX ring */
2015
2016 error = bus_dmamem_alloc(sc->bge_cdata.bge_rx_std_ring_tag,
2017 (void **)&sc->bge_ldata.bge_rx_std_ring, BUS_DMA_NOWAIT,
2018 &sc->bge_cdata.bge_rx_std_ring_map);
2019 if (error)
2020 return (ENOMEM);
2021
2022 bzero((char *)sc->bge_ldata.bge_rx_std_ring, BGE_STD_RX_RING_SZ);
2023
2024 /* Load the address of the standard RX ring */
2025
2026 ctx.bge_maxsegs = 1;
2027 ctx.sc = sc;
2028
2029 error = bus_dmamap_load(sc->bge_cdata.bge_rx_std_ring_tag,
2030 sc->bge_cdata.bge_rx_std_ring_map, sc->bge_ldata.bge_rx_std_ring,
2031 BGE_STD_RX_RING_SZ, bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT);
2032
2033 if (error)
2034 return (ENOMEM);
2035
2036 sc->bge_ldata.bge_rx_std_ring_paddr = ctx.bge_busaddr;
2037
2038 if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
2039 sc->bge_asicrev != BGE_ASICREV_BCM5750) {
2040
2041 /*
2042 * Create tag for jumbo mbufs.
2043 * This is really a bit of a kludge. We allocate a special
2044 * jumbo buffer pool which (thanks to the way our DMA
2045 * memory allocation works) will consist of contiguous
2046 * pages. This means that even though a jumbo buffer might
2047 * be larger than a page size, we don't really need to
2048 * map it into more than one DMA segment. However, the
2049 * default mbuf tag will result in multi-segment mappings,
2050 * so we have to create a special jumbo mbuf tag that
2051 * lets us get away with mapping the jumbo buffers as
2052 * a single segment. I think eventually the driver should
2053 * be changed so that it uses ordinary mbufs and cluster
2054 * buffers, i.e. jumbo frames can span multiple DMA
2055 * descriptors. But that's a project for another day.
2056 */
2057
2058 error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag,
2059 1, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
1814
1815 if (error) {
1816 device_printf(dev, "could not allocate dma tag\n");
1817 return (ENOMEM);
1818 }
1819
1820 /* Create DMA maps for RX buffers */
1821
1822 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
1823 error = bus_dmamap_create(sc->bge_cdata.bge_mtag, 0,
1824 &sc->bge_cdata.bge_rx_std_dmamap[i]);
1825 if (error) {
1826 device_printf(dev, "can't create DMA map for RX\n");
1827 return(ENOMEM);
1828 }
1829 }
1830
1831 /* Create DMA maps for TX buffers */
1832
1833 for (i = 0; i < BGE_TX_RING_CNT; i++) {
1834 error = bus_dmamap_create(sc->bge_cdata.bge_mtag, 0,
1835 &sc->bge_cdata.bge_tx_dmamap[i]);
1836 if (error) {
1837 device_printf(dev, "can't create DMA map for RX\n");
1838 return(ENOMEM);
1839 }
1840 }
1841
1842 /* Create tag for standard RX ring */
1843
1844 error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag,
1845 PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
1846 NULL, BGE_STD_RX_RING_SZ, 1, BGE_STD_RX_RING_SZ, 0,
1847 NULL, NULL, &sc->bge_cdata.bge_rx_std_ring_tag);
1848
1849 if (error) {
1850 device_printf(dev, "could not allocate dma tag\n");
1851 return (ENOMEM);
1852 }
1853
1854 /* Allocate DMA'able memory for standard RX ring */
1855
1856 error = bus_dmamem_alloc(sc->bge_cdata.bge_rx_std_ring_tag,
1857 (void **)&sc->bge_ldata.bge_rx_std_ring, BUS_DMA_NOWAIT,
1858 &sc->bge_cdata.bge_rx_std_ring_map);
1859 if (error)
1860 return (ENOMEM);
1861
1862 bzero((char *)sc->bge_ldata.bge_rx_std_ring, BGE_STD_RX_RING_SZ);
1863
1864 /* Load the address of the standard RX ring */
1865
1866 ctx.bge_maxsegs = 1;
1867 ctx.sc = sc;
1868
1869 error = bus_dmamap_load(sc->bge_cdata.bge_rx_std_ring_tag,
1870 sc->bge_cdata.bge_rx_std_ring_map, sc->bge_ldata.bge_rx_std_ring,
1871 BGE_STD_RX_RING_SZ, bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT);
1872
1873 if (error)
1874 return (ENOMEM);
1875
1876 sc->bge_ldata.bge_rx_std_ring_paddr = ctx.bge_busaddr;
1877
1878 if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
1879 sc->bge_asicrev != BGE_ASICREV_BCM5750) {
1880
1881 /*
1882 * Create tag for jumbo mbufs.
1883 * This is really a bit of a kludge. We allocate a special
1884 * jumbo buffer pool which (thanks to the way our DMA
1885 * memory allocation works) will consist of contiguous
1886 * pages. This means that even though a jumbo buffer might
1887 * be larger than a page size, we don't really need to
1888 * map it into more than one DMA segment. However, the
1889 * default mbuf tag will result in multi-segment mappings,
1890 * so we have to create a special jumbo mbuf tag that
1891 * lets us get away with mapping the jumbo buffers as
1892 * a single segment. I think eventually the driver should
1893 * be changed so that it uses ordinary mbufs and cluster
1894 * buffers, i.e. jumbo frames can span multiple DMA
1895 * descriptors. But that's a project for another day.
1896 */
1897
1898 error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag,
1899 1, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
2060 NULL, MCLBYTES * nseg, nseg, BGE_JLEN, 0, NULL, NULL,
2061 &sc->bge_cdata.bge_mtag_jumbo);
1900 NULL, MJUM9BYTES, BGE_NSEG_JUMBO, PAGE_SIZE,
1901 0, NULL, NULL, &sc->bge_cdata.bge_mtag_jumbo);
2062
2063 if (error) {
2064 device_printf(dev, "could not allocate dma tag\n");
2065 return (ENOMEM);
2066 }
2067
2068 /* Create tag for jumbo RX ring */
1902
1903 if (error) {
1904 device_printf(dev, "could not allocate dma tag\n");
1905 return (ENOMEM);
1906 }
1907
1908 /* Create tag for jumbo RX ring */
2069
2070 error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag,
2071 PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
2072 NULL, BGE_JUMBO_RX_RING_SZ, 1, BGE_JUMBO_RX_RING_SZ, 0,
2073 NULL, NULL, &sc->bge_cdata.bge_rx_jumbo_ring_tag);
2074
2075 if (error) {
2076 device_printf(dev, "could not allocate dma tag\n");
2077 return (ENOMEM);
2078 }
2079
2080 /* Allocate DMA'able memory for jumbo RX ring */
1909 error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag,
1910 PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
1911 NULL, BGE_JUMBO_RX_RING_SZ, 1, BGE_JUMBO_RX_RING_SZ, 0,
1912 NULL, NULL, &sc->bge_cdata.bge_rx_jumbo_ring_tag);
1913
1914 if (error) {
1915 device_printf(dev, "could not allocate dma tag\n");
1916 return (ENOMEM);
1917 }
1918
1919 /* Allocate DMA'able memory for jumbo RX ring */
2081
2082 error = bus_dmamem_alloc(sc->bge_cdata.bge_rx_jumbo_ring_tag,
1920 error = bus_dmamem_alloc(sc->bge_cdata.bge_rx_jumbo_ring_tag,
2083 (void **)&sc->bge_ldata.bge_rx_jumbo_ring, BUS_DMA_NOWAIT,
1921 (void **)&sc->bge_ldata.bge_rx_jumbo_ring,
1922 BUS_DMA_NOWAIT | BUS_DMA_ZERO,
2084 &sc->bge_cdata.bge_rx_jumbo_ring_map);
2085 if (error)
2086 return (ENOMEM);
2087
1923 &sc->bge_cdata.bge_rx_jumbo_ring_map);
1924 if (error)
1925 return (ENOMEM);
1926
2088 bzero((char *)sc->bge_ldata.bge_rx_jumbo_ring,
2089 BGE_JUMBO_RX_RING_SZ);
2090
2091 /* Load the address of the jumbo RX ring */
1927 /* Load the address of the jumbo RX ring */
2092
2093 ctx.bge_maxsegs = 1;
2094 ctx.sc = sc;
2095
2096 error = bus_dmamap_load(sc->bge_cdata.bge_rx_jumbo_ring_tag,
2097 sc->bge_cdata.bge_rx_jumbo_ring_map,
2098 sc->bge_ldata.bge_rx_jumbo_ring, BGE_JUMBO_RX_RING_SZ,
2099 bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT);
2100
2101 if (error)
2102 return (ENOMEM);
2103
2104 sc->bge_ldata.bge_rx_jumbo_ring_paddr = ctx.bge_busaddr;
2105
2106 /* Create DMA maps for jumbo RX buffers */
2107
2108 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
2109 error = bus_dmamap_create(sc->bge_cdata.bge_mtag_jumbo,
2110 0, &sc->bge_cdata.bge_rx_jumbo_dmamap[i]);
2111 if (error) {
2112 device_printf(dev,
2113 "can't create DMA map for RX\n");
2114 return(ENOMEM);
2115 }
2116 }
2117
2118 }
2119
2120 /* Create tag for RX return ring */
2121
2122 error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag,
2123 PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
2124 NULL, BGE_RX_RTN_RING_SZ(sc), 1, BGE_RX_RTN_RING_SZ(sc), 0,
2125 NULL, NULL, &sc->bge_cdata.bge_rx_return_ring_tag);
2126
2127 if (error) {
2128 device_printf(dev, "could not allocate dma tag\n");
2129 return (ENOMEM);
2130 }
2131
2132 /* Allocate DMA'able memory for RX return ring */
2133
2134 error = bus_dmamem_alloc(sc->bge_cdata.bge_rx_return_ring_tag,
2135 (void **)&sc->bge_ldata.bge_rx_return_ring, BUS_DMA_NOWAIT,
2136 &sc->bge_cdata.bge_rx_return_ring_map);
2137 if (error)
2138 return (ENOMEM);
2139
2140 bzero((char *)sc->bge_ldata.bge_rx_return_ring,
2141 BGE_RX_RTN_RING_SZ(sc));
2142
2143 /* Load the address of the RX return ring */
2144
2145 ctx.bge_maxsegs = 1;
2146 ctx.sc = sc;
2147
2148 error = bus_dmamap_load(sc->bge_cdata.bge_rx_return_ring_tag,
2149 sc->bge_cdata.bge_rx_return_ring_map,
2150 sc->bge_ldata.bge_rx_return_ring, BGE_RX_RTN_RING_SZ(sc),
2151 bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT);
2152
2153 if (error)
2154 return (ENOMEM);
2155
2156 sc->bge_ldata.bge_rx_return_ring_paddr = ctx.bge_busaddr;
2157
2158 /* Create tag for TX ring */
2159
2160 error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag,
2161 PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
2162 NULL, BGE_TX_RING_SZ, 1, BGE_TX_RING_SZ, 0, NULL, NULL,
2163 &sc->bge_cdata.bge_tx_ring_tag);
2164
2165 if (error) {
2166 device_printf(dev, "could not allocate dma tag\n");
2167 return (ENOMEM);
2168 }
2169
2170 /* Allocate DMA'able memory for TX ring */
2171
2172 error = bus_dmamem_alloc(sc->bge_cdata.bge_tx_ring_tag,
2173 (void **)&sc->bge_ldata.bge_tx_ring, BUS_DMA_NOWAIT,
2174 &sc->bge_cdata.bge_tx_ring_map);
2175 if (error)
2176 return (ENOMEM);
2177
2178 bzero((char *)sc->bge_ldata.bge_tx_ring, BGE_TX_RING_SZ);
2179
2180 /* Load the address of the TX ring */
2181
2182 ctx.bge_maxsegs = 1;
2183 ctx.sc = sc;
2184
2185 error = bus_dmamap_load(sc->bge_cdata.bge_tx_ring_tag,
2186 sc->bge_cdata.bge_tx_ring_map, sc->bge_ldata.bge_tx_ring,
2187 BGE_TX_RING_SZ, bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT);
2188
2189 if (error)
2190 return (ENOMEM);
2191
2192 sc->bge_ldata.bge_tx_ring_paddr = ctx.bge_busaddr;
2193
2194 /* Create tag for status block */
2195
2196 error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag,
2197 PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
2198 NULL, BGE_STATUS_BLK_SZ, 1, BGE_STATUS_BLK_SZ, 0,
2199 NULL, NULL, &sc->bge_cdata.bge_status_tag);
2200
2201 if (error) {
2202 device_printf(dev, "could not allocate dma tag\n");
2203 return (ENOMEM);
2204 }
2205
2206 /* Allocate DMA'able memory for status block */
2207
2208 error = bus_dmamem_alloc(sc->bge_cdata.bge_status_tag,
2209 (void **)&sc->bge_ldata.bge_status_block, BUS_DMA_NOWAIT,
2210 &sc->bge_cdata.bge_status_map);
2211 if (error)
2212 return (ENOMEM);
2213
2214 bzero((char *)sc->bge_ldata.bge_status_block, BGE_STATUS_BLK_SZ);
2215
2216 /* Load the address of the status block */
2217
2218 ctx.sc = sc;
2219 ctx.bge_maxsegs = 1;
2220
2221 error = bus_dmamap_load(sc->bge_cdata.bge_status_tag,
2222 sc->bge_cdata.bge_status_map, sc->bge_ldata.bge_status_block,
2223 BGE_STATUS_BLK_SZ, bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT);
2224
2225 if (error)
2226 return (ENOMEM);
2227
2228 sc->bge_ldata.bge_status_block_paddr = ctx.bge_busaddr;
2229
2230 /* Create tag for statistics block */
2231
2232 error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag,
2233 PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
2234 NULL, BGE_STATS_SZ, 1, BGE_STATS_SZ, 0, NULL, NULL,
2235 &sc->bge_cdata.bge_stats_tag);
2236
2237 if (error) {
2238 device_printf(dev, "could not allocate dma tag\n");
2239 return (ENOMEM);
2240 }
2241
2242 /* Allocate DMA'able memory for statistics block */
2243
2244 error = bus_dmamem_alloc(sc->bge_cdata.bge_stats_tag,
2245 (void **)&sc->bge_ldata.bge_stats, BUS_DMA_NOWAIT,
2246 &sc->bge_cdata.bge_stats_map);
2247 if (error)
2248 return (ENOMEM);
2249
2250 bzero((char *)sc->bge_ldata.bge_stats, BGE_STATS_SZ);
2251
2252 /* Load the address of the statstics block */
2253
2254 ctx.sc = sc;
2255 ctx.bge_maxsegs = 1;
2256
2257 error = bus_dmamap_load(sc->bge_cdata.bge_stats_tag,
2258 sc->bge_cdata.bge_stats_map, sc->bge_ldata.bge_stats,
2259 BGE_STATS_SZ, bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT);
2260
2261 if (error)
2262 return (ENOMEM);
2263
2264 sc->bge_ldata.bge_stats_paddr = ctx.bge_busaddr;
2265
2266 return(0);
2267}
2268
2269static int
2270bge_attach(dev)
2271 device_t dev;
2272{
2273 struct ifnet *ifp;
2274 struct bge_softc *sc;
2275 u_int32_t hwcfg = 0;
2276 u_int32_t mac_tmp = 0;
2277 u_char eaddr[6];
2278 int unit, error = 0, rid;
2279
2280 sc = device_get_softc(dev);
2281 unit = device_get_unit(dev);
2282 sc->bge_dev = dev;
2283 sc->bge_unit = unit;
2284
2285 /*
2286 * Map control/status registers.
2287 */
2288 pci_enable_busmaster(dev);
2289
2290 rid = BGE_PCI_BAR0;
2291 sc->bge_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
2292 RF_ACTIVE|PCI_RF_DENSE);
2293
2294 if (sc->bge_res == NULL) {
2295 printf ("bge%d: couldn't map memory\n", unit);
2296 error = ENXIO;
2297 goto fail;
2298 }
2299
2300 sc->bge_btag = rman_get_bustag(sc->bge_res);
2301 sc->bge_bhandle = rman_get_bushandle(sc->bge_res);
2302 sc->bge_vhandle = (vm_offset_t)rman_get_virtual(sc->bge_res);
2303
2304 /* Allocate interrupt */
2305 rid = 0;
2306
2307 sc->bge_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
2308 RF_SHAREABLE | RF_ACTIVE);
2309
2310 if (sc->bge_irq == NULL) {
2311 printf("bge%d: couldn't map interrupt\n", unit);
2312 error = ENXIO;
2313 goto fail;
2314 }
2315
2316 sc->bge_unit = unit;
2317
2318 BGE_LOCK_INIT(sc, device_get_nameunit(dev));
2319
2320 /* Save ASIC rev. */
2321
2322 sc->bge_chipid =
2323 pci_read_config(dev, BGE_PCI_MISC_CTL, 4) &
2324 BGE_PCIMISCCTL_ASICREV;
2325 sc->bge_asicrev = BGE_ASICREV(sc->bge_chipid);
2326 sc->bge_chiprev = BGE_CHIPREV(sc->bge_chipid);
2327
2328 /*
2329 * Treat the 5714 and the 5752 like the 5750 until we have more info
2330 * on this chip.
2331 */
2332 if (sc->bge_asicrev == BGE_ASICREV_BCM5714 ||
2333 sc->bge_asicrev == BGE_ASICREV_BCM5752)
2334 sc->bge_asicrev = BGE_ASICREV_BCM5750;
2335
2336 /*
2337 * XXX: Broadcom Linux driver. Not in specs or eratta.
2338 * PCI-Express?
2339 */
2340 if (sc->bge_asicrev == BGE_ASICREV_BCM5750) {
2341 u_int32_t v;
2342
2343 v = pci_read_config(dev, BGE_PCI_MSI_CAPID, 4);
2344 if (((v >> 8) & 0xff) == BGE_PCIE_CAPID_REG) {
2345 v = pci_read_config(dev, BGE_PCIE_CAPID_REG, 4);
2346 if ((v & 0xff) == BGE_PCIE_CAPID)
2347 sc->bge_pcie = 1;
2348 }
2349 }
2350
2351 /* Try to reset the chip. */
2352 bge_reset(sc);
2353
2354 if (bge_chipinit(sc)) {
2355 printf("bge%d: chip initialization failed\n", sc->bge_unit);
2356 bge_release_resources(sc);
2357 error = ENXIO;
2358 goto fail;
2359 }
2360
2361 /*
2362 * Get station address from the EEPROM.
2363 */
2364 mac_tmp = bge_readmem_ind(sc, 0x0c14);
2365 if ((mac_tmp >> 16) == 0x484b) {
2366 eaddr[0] = (u_char)(mac_tmp >> 8);
2367 eaddr[1] = (u_char)mac_tmp;
2368 mac_tmp = bge_readmem_ind(sc, 0x0c18);
2369 eaddr[2] = (u_char)(mac_tmp >> 24);
2370 eaddr[3] = (u_char)(mac_tmp >> 16);
2371 eaddr[4] = (u_char)(mac_tmp >> 8);
2372 eaddr[5] = (u_char)mac_tmp;
2373 } else if (bge_read_eeprom(sc, eaddr,
2374 BGE_EE_MAC_OFFSET + 2, ETHER_ADDR_LEN)) {
2375 printf("bge%d: failed to read station address\n", unit);
2376 bge_release_resources(sc);
2377 error = ENXIO;
2378 goto fail;
2379 }
2380
2381 /* 5705 limits RX return ring to 512 entries. */
2382 if (sc->bge_asicrev == BGE_ASICREV_BCM5705 ||
2383 sc->bge_asicrev == BGE_ASICREV_BCM5750)
2384 sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT_5705;
2385 else
2386 sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT;
2387
2388 if (bge_dma_alloc(dev)) {
2389 printf ("bge%d: failed to allocate DMA resources\n",
2390 sc->bge_unit);
2391 bge_release_resources(sc);
2392 error = ENXIO;
2393 goto fail;
2394 }
2395
1928 ctx.bge_maxsegs = 1;
1929 ctx.sc = sc;
1930
1931 error = bus_dmamap_load(sc->bge_cdata.bge_rx_jumbo_ring_tag,
1932 sc->bge_cdata.bge_rx_jumbo_ring_map,
1933 sc->bge_ldata.bge_rx_jumbo_ring, BGE_JUMBO_RX_RING_SZ,
1934 bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT);
1935
1936 if (error)
1937 return (ENOMEM);
1938
1939 sc->bge_ldata.bge_rx_jumbo_ring_paddr = ctx.bge_busaddr;
1940
1941 /* Create DMA maps for jumbo RX buffers */
1942
1943 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
1944 error = bus_dmamap_create(sc->bge_cdata.bge_mtag_jumbo,
1945 0, &sc->bge_cdata.bge_rx_jumbo_dmamap[i]);
1946 if (error) {
1947 device_printf(dev,
1948 "can't create DMA map for RX\n");
1949 return(ENOMEM);
1950 }
1951 }
1952
1953 }
1954
1955 /* Create tag for RX return ring */
1956
1957 error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag,
1958 PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
1959 NULL, BGE_RX_RTN_RING_SZ(sc), 1, BGE_RX_RTN_RING_SZ(sc), 0,
1960 NULL, NULL, &sc->bge_cdata.bge_rx_return_ring_tag);
1961
1962 if (error) {
1963 device_printf(dev, "could not allocate dma tag\n");
1964 return (ENOMEM);
1965 }
1966
1967 /* Allocate DMA'able memory for RX return ring */
1968
1969 error = bus_dmamem_alloc(sc->bge_cdata.bge_rx_return_ring_tag,
1970 (void **)&sc->bge_ldata.bge_rx_return_ring, BUS_DMA_NOWAIT,
1971 &sc->bge_cdata.bge_rx_return_ring_map);
1972 if (error)
1973 return (ENOMEM);
1974
1975 bzero((char *)sc->bge_ldata.bge_rx_return_ring,
1976 BGE_RX_RTN_RING_SZ(sc));
1977
1978 /* Load the address of the RX return ring */
1979
1980 ctx.bge_maxsegs = 1;
1981 ctx.sc = sc;
1982
1983 error = bus_dmamap_load(sc->bge_cdata.bge_rx_return_ring_tag,
1984 sc->bge_cdata.bge_rx_return_ring_map,
1985 sc->bge_ldata.bge_rx_return_ring, BGE_RX_RTN_RING_SZ(sc),
1986 bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT);
1987
1988 if (error)
1989 return (ENOMEM);
1990
1991 sc->bge_ldata.bge_rx_return_ring_paddr = ctx.bge_busaddr;
1992
1993 /* Create tag for TX ring */
1994
1995 error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag,
1996 PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
1997 NULL, BGE_TX_RING_SZ, 1, BGE_TX_RING_SZ, 0, NULL, NULL,
1998 &sc->bge_cdata.bge_tx_ring_tag);
1999
2000 if (error) {
2001 device_printf(dev, "could not allocate dma tag\n");
2002 return (ENOMEM);
2003 }
2004
2005 /* Allocate DMA'able memory for TX ring */
2006
2007 error = bus_dmamem_alloc(sc->bge_cdata.bge_tx_ring_tag,
2008 (void **)&sc->bge_ldata.bge_tx_ring, BUS_DMA_NOWAIT,
2009 &sc->bge_cdata.bge_tx_ring_map);
2010 if (error)
2011 return (ENOMEM);
2012
2013 bzero((char *)sc->bge_ldata.bge_tx_ring, BGE_TX_RING_SZ);
2014
2015 /* Load the address of the TX ring */
2016
2017 ctx.bge_maxsegs = 1;
2018 ctx.sc = sc;
2019
2020 error = bus_dmamap_load(sc->bge_cdata.bge_tx_ring_tag,
2021 sc->bge_cdata.bge_tx_ring_map, sc->bge_ldata.bge_tx_ring,
2022 BGE_TX_RING_SZ, bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT);
2023
2024 if (error)
2025 return (ENOMEM);
2026
2027 sc->bge_ldata.bge_tx_ring_paddr = ctx.bge_busaddr;
2028
2029 /* Create tag for status block */
2030
2031 error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag,
2032 PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
2033 NULL, BGE_STATUS_BLK_SZ, 1, BGE_STATUS_BLK_SZ, 0,
2034 NULL, NULL, &sc->bge_cdata.bge_status_tag);
2035
2036 if (error) {
2037 device_printf(dev, "could not allocate dma tag\n");
2038 return (ENOMEM);
2039 }
2040
2041 /* Allocate DMA'able memory for status block */
2042
2043 error = bus_dmamem_alloc(sc->bge_cdata.bge_status_tag,
2044 (void **)&sc->bge_ldata.bge_status_block, BUS_DMA_NOWAIT,
2045 &sc->bge_cdata.bge_status_map);
2046 if (error)
2047 return (ENOMEM);
2048
2049 bzero((char *)sc->bge_ldata.bge_status_block, BGE_STATUS_BLK_SZ);
2050
2051 /* Load the address of the status block */
2052
2053 ctx.sc = sc;
2054 ctx.bge_maxsegs = 1;
2055
2056 error = bus_dmamap_load(sc->bge_cdata.bge_status_tag,
2057 sc->bge_cdata.bge_status_map, sc->bge_ldata.bge_status_block,
2058 BGE_STATUS_BLK_SZ, bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT);
2059
2060 if (error)
2061 return (ENOMEM);
2062
2063 sc->bge_ldata.bge_status_block_paddr = ctx.bge_busaddr;
2064
2065 /* Create tag for statistics block */
2066
2067 error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag,
2068 PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
2069 NULL, BGE_STATS_SZ, 1, BGE_STATS_SZ, 0, NULL, NULL,
2070 &sc->bge_cdata.bge_stats_tag);
2071
2072 if (error) {
2073 device_printf(dev, "could not allocate dma tag\n");
2074 return (ENOMEM);
2075 }
2076
2077 /* Allocate DMA'able memory for statistics block */
2078
2079 error = bus_dmamem_alloc(sc->bge_cdata.bge_stats_tag,
2080 (void **)&sc->bge_ldata.bge_stats, BUS_DMA_NOWAIT,
2081 &sc->bge_cdata.bge_stats_map);
2082 if (error)
2083 return (ENOMEM);
2084
2085 bzero((char *)sc->bge_ldata.bge_stats, BGE_STATS_SZ);
2086
2087 /* Load the address of the statstics block */
2088
2089 ctx.sc = sc;
2090 ctx.bge_maxsegs = 1;
2091
2092 error = bus_dmamap_load(sc->bge_cdata.bge_stats_tag,
2093 sc->bge_cdata.bge_stats_map, sc->bge_ldata.bge_stats,
2094 BGE_STATS_SZ, bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT);
2095
2096 if (error)
2097 return (ENOMEM);
2098
2099 sc->bge_ldata.bge_stats_paddr = ctx.bge_busaddr;
2100
2101 return(0);
2102}
2103
2104static int
2105bge_attach(dev)
2106 device_t dev;
2107{
2108 struct ifnet *ifp;
2109 struct bge_softc *sc;
2110 u_int32_t hwcfg = 0;
2111 u_int32_t mac_tmp = 0;
2112 u_char eaddr[6];
2113 int unit, error = 0, rid;
2114
2115 sc = device_get_softc(dev);
2116 unit = device_get_unit(dev);
2117 sc->bge_dev = dev;
2118 sc->bge_unit = unit;
2119
2120 /*
2121 * Map control/status registers.
2122 */
2123 pci_enable_busmaster(dev);
2124
2125 rid = BGE_PCI_BAR0;
2126 sc->bge_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
2127 RF_ACTIVE|PCI_RF_DENSE);
2128
2129 if (sc->bge_res == NULL) {
2130 printf ("bge%d: couldn't map memory\n", unit);
2131 error = ENXIO;
2132 goto fail;
2133 }
2134
2135 sc->bge_btag = rman_get_bustag(sc->bge_res);
2136 sc->bge_bhandle = rman_get_bushandle(sc->bge_res);
2137 sc->bge_vhandle = (vm_offset_t)rman_get_virtual(sc->bge_res);
2138
2139 /* Allocate interrupt */
2140 rid = 0;
2141
2142 sc->bge_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
2143 RF_SHAREABLE | RF_ACTIVE);
2144
2145 if (sc->bge_irq == NULL) {
2146 printf("bge%d: couldn't map interrupt\n", unit);
2147 error = ENXIO;
2148 goto fail;
2149 }
2150
2151 sc->bge_unit = unit;
2152
2153 BGE_LOCK_INIT(sc, device_get_nameunit(dev));
2154
2155 /* Save ASIC rev. */
2156
2157 sc->bge_chipid =
2158 pci_read_config(dev, BGE_PCI_MISC_CTL, 4) &
2159 BGE_PCIMISCCTL_ASICREV;
2160 sc->bge_asicrev = BGE_ASICREV(sc->bge_chipid);
2161 sc->bge_chiprev = BGE_CHIPREV(sc->bge_chipid);
2162
2163 /*
2164 * Treat the 5714 and the 5752 like the 5750 until we have more info
2165 * on this chip.
2166 */
2167 if (sc->bge_asicrev == BGE_ASICREV_BCM5714 ||
2168 sc->bge_asicrev == BGE_ASICREV_BCM5752)
2169 sc->bge_asicrev = BGE_ASICREV_BCM5750;
2170
2171 /*
2172 * XXX: Broadcom Linux driver. Not in specs or eratta.
2173 * PCI-Express?
2174 */
2175 if (sc->bge_asicrev == BGE_ASICREV_BCM5750) {
2176 u_int32_t v;
2177
2178 v = pci_read_config(dev, BGE_PCI_MSI_CAPID, 4);
2179 if (((v >> 8) & 0xff) == BGE_PCIE_CAPID_REG) {
2180 v = pci_read_config(dev, BGE_PCIE_CAPID_REG, 4);
2181 if ((v & 0xff) == BGE_PCIE_CAPID)
2182 sc->bge_pcie = 1;
2183 }
2184 }
2185
2186 /* Try to reset the chip. */
2187 bge_reset(sc);
2188
2189 if (bge_chipinit(sc)) {
2190 printf("bge%d: chip initialization failed\n", sc->bge_unit);
2191 bge_release_resources(sc);
2192 error = ENXIO;
2193 goto fail;
2194 }
2195
2196 /*
2197 * Get station address from the EEPROM.
2198 */
2199 mac_tmp = bge_readmem_ind(sc, 0x0c14);
2200 if ((mac_tmp >> 16) == 0x484b) {
2201 eaddr[0] = (u_char)(mac_tmp >> 8);
2202 eaddr[1] = (u_char)mac_tmp;
2203 mac_tmp = bge_readmem_ind(sc, 0x0c18);
2204 eaddr[2] = (u_char)(mac_tmp >> 24);
2205 eaddr[3] = (u_char)(mac_tmp >> 16);
2206 eaddr[4] = (u_char)(mac_tmp >> 8);
2207 eaddr[5] = (u_char)mac_tmp;
2208 } else if (bge_read_eeprom(sc, eaddr,
2209 BGE_EE_MAC_OFFSET + 2, ETHER_ADDR_LEN)) {
2210 printf("bge%d: failed to read station address\n", unit);
2211 bge_release_resources(sc);
2212 error = ENXIO;
2213 goto fail;
2214 }
2215
2216 /* 5705 limits RX return ring to 512 entries. */
2217 if (sc->bge_asicrev == BGE_ASICREV_BCM5705 ||
2218 sc->bge_asicrev == BGE_ASICREV_BCM5750)
2219 sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT_5705;
2220 else
2221 sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT;
2222
2223 if (bge_dma_alloc(dev)) {
2224 printf ("bge%d: failed to allocate DMA resources\n",
2225 sc->bge_unit);
2226 bge_release_resources(sc);
2227 error = ENXIO;
2228 goto fail;
2229 }
2230
2396 /*
2397 * Try to allocate memory for jumbo buffers.
2398 * The 5705 does not appear to support jumbo frames.
2399 */
2400 if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
2401 sc->bge_asicrev != BGE_ASICREV_BCM5750) {
2402 if (bge_alloc_jumbo_mem(sc)) {
2403 printf("bge%d: jumbo buffer allocation "
2404 "failed\n", sc->bge_unit);
2405 bge_release_resources(sc);
2406 error = ENXIO;
2407 goto fail;
2408 }
2409 }
2410
2411 /* Set default tuneable values. */
2412 sc->bge_stat_ticks = BGE_TICKS_PER_SEC;
2413 sc->bge_rx_coal_ticks = 150;
2414 sc->bge_tx_coal_ticks = 150;
2415 sc->bge_rx_max_coal_bds = 64;
2416 sc->bge_tx_max_coal_bds = 128;
2417
2418 /* Set up ifnet structure */
2419 ifp = sc->bge_ifp = if_alloc(IFT_ETHER);
2420 if (ifp == NULL) {
2421 printf("bge%d: failed to if_alloc()\n", sc->bge_unit);
2422 bge_release_resources(sc);
2423 error = ENXIO;
2424 goto fail;
2425 }
2426 ifp->if_softc = sc;
2427 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
2428 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
2429 ifp->if_ioctl = bge_ioctl;
2430 ifp->if_start = bge_start;
2431 ifp->if_watchdog = bge_watchdog;
2432 ifp->if_init = bge_init;
2433 ifp->if_mtu = ETHERMTU;
2434 ifp->if_snd.ifq_drv_maxlen = BGE_TX_RING_CNT - 1;
2435 IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen);
2436 IFQ_SET_READY(&ifp->if_snd);
2437 ifp->if_hwassist = BGE_CSUM_FEATURES;
2438 /* NB: the code for RX csum offload is disabled for now */
2439 ifp->if_capabilities = IFCAP_TXCSUM | IFCAP_VLAN_HWTAGGING |
2440 IFCAP_VLAN_MTU;
2441 ifp->if_capenable = ifp->if_capabilities;
2442#ifdef DEVICE_POLLING
2443 ifp->if_capabilities |= IFCAP_POLLING;
2444#endif
2445
2446 /*
2447 * Figure out what sort of media we have by checking the
2448 * hardware config word in the first 32k of NIC internal memory,
2449 * or fall back to examining the EEPROM if necessary.
2450 * Note: on some BCM5700 cards, this value appears to be unset.
2451 * If that's the case, we have to rely on identifying the NIC
2452 * by its PCI subsystem ID, as we do below for the SysKonnect
2453 * SK-9D41.
2454 */
2455 if (bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_SIG) == BGE_MAGIC_NUMBER)
2456 hwcfg = bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_NICCFG);
2457 else {
2458 bge_read_eeprom(sc, (caddr_t)&hwcfg,
2459 BGE_EE_HWCFG_OFFSET, sizeof(hwcfg));
2460 hwcfg = ntohl(hwcfg);
2461 }
2462
2463 if ((hwcfg & BGE_HWCFG_MEDIA) == BGE_MEDIA_FIBER)
2464 sc->bge_tbi = 1;
2465
2466 /* The SysKonnect SK-9D41 is a 1000baseSX card. */
2467 if ((pci_read_config(dev, BGE_PCI_SUBSYS, 4) >> 16) == SK_SUBSYSID_9D41)
2468 sc->bge_tbi = 1;
2469
2470 if (sc->bge_tbi) {
2471 ifmedia_init(&sc->bge_ifmedia, IFM_IMASK,
2472 bge_ifmedia_upd, bge_ifmedia_sts);
2473 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER|IFM_1000_SX, 0, NULL);
2474 ifmedia_add(&sc->bge_ifmedia,
2475 IFM_ETHER|IFM_1000_SX|IFM_FDX, 0, NULL);
2476 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER|IFM_AUTO, 0, NULL);
2477 ifmedia_set(&sc->bge_ifmedia, IFM_ETHER|IFM_AUTO);
2478 sc->bge_ifmedia.ifm_media = sc->bge_ifmedia.ifm_cur->ifm_media;
2479 } else {
2480 /*
2481 * Do transceiver setup.
2482 */
2483 if (mii_phy_probe(dev, &sc->bge_miibus,
2484 bge_ifmedia_upd, bge_ifmedia_sts)) {
2485 printf("bge%d: MII without any PHY!\n", sc->bge_unit);
2486 bge_release_resources(sc);
2231 /* Set default tuneable values. */
2232 sc->bge_stat_ticks = BGE_TICKS_PER_SEC;
2233 sc->bge_rx_coal_ticks = 150;
2234 sc->bge_tx_coal_ticks = 150;
2235 sc->bge_rx_max_coal_bds = 64;
2236 sc->bge_tx_max_coal_bds = 128;
2237
2238 /* Set up ifnet structure */
2239 ifp = sc->bge_ifp = if_alloc(IFT_ETHER);
2240 if (ifp == NULL) {
2241 printf("bge%d: failed to if_alloc()\n", sc->bge_unit);
2242 bge_release_resources(sc);
2243 error = ENXIO;
2244 goto fail;
2245 }
2246 ifp->if_softc = sc;
2247 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
2248 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
2249 ifp->if_ioctl = bge_ioctl;
2250 ifp->if_start = bge_start;
2251 ifp->if_watchdog = bge_watchdog;
2252 ifp->if_init = bge_init;
2253 ifp->if_mtu = ETHERMTU;
2254 ifp->if_snd.ifq_drv_maxlen = BGE_TX_RING_CNT - 1;
2255 IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen);
2256 IFQ_SET_READY(&ifp->if_snd);
2257 ifp->if_hwassist = BGE_CSUM_FEATURES;
2258 /* NB: the code for RX csum offload is disabled for now */
2259 ifp->if_capabilities = IFCAP_TXCSUM | IFCAP_VLAN_HWTAGGING |
2260 IFCAP_VLAN_MTU;
2261 ifp->if_capenable = ifp->if_capabilities;
2262#ifdef DEVICE_POLLING
2263 ifp->if_capabilities |= IFCAP_POLLING;
2264#endif
2265
2266 /*
2267 * Figure out what sort of media we have by checking the
2268 * hardware config word in the first 32k of NIC internal memory,
2269 * or fall back to examining the EEPROM if necessary.
2270 * Note: on some BCM5700 cards, this value appears to be unset.
2271 * If that's the case, we have to rely on identifying the NIC
2272 * by its PCI subsystem ID, as we do below for the SysKonnect
2273 * SK-9D41.
2274 */
2275 if (bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_SIG) == BGE_MAGIC_NUMBER)
2276 hwcfg = bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_NICCFG);
2277 else {
2278 bge_read_eeprom(sc, (caddr_t)&hwcfg,
2279 BGE_EE_HWCFG_OFFSET, sizeof(hwcfg));
2280 hwcfg = ntohl(hwcfg);
2281 }
2282
2283 if ((hwcfg & BGE_HWCFG_MEDIA) == BGE_MEDIA_FIBER)
2284 sc->bge_tbi = 1;
2285
2286 /* The SysKonnect SK-9D41 is a 1000baseSX card. */
2287 if ((pci_read_config(dev, BGE_PCI_SUBSYS, 4) >> 16) == SK_SUBSYSID_9D41)
2288 sc->bge_tbi = 1;
2289
2290 if (sc->bge_tbi) {
2291 ifmedia_init(&sc->bge_ifmedia, IFM_IMASK,
2292 bge_ifmedia_upd, bge_ifmedia_sts);
2293 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER|IFM_1000_SX, 0, NULL);
2294 ifmedia_add(&sc->bge_ifmedia,
2295 IFM_ETHER|IFM_1000_SX|IFM_FDX, 0, NULL);
2296 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER|IFM_AUTO, 0, NULL);
2297 ifmedia_set(&sc->bge_ifmedia, IFM_ETHER|IFM_AUTO);
2298 sc->bge_ifmedia.ifm_media = sc->bge_ifmedia.ifm_cur->ifm_media;
2299 } else {
2300 /*
2301 * Do transceiver setup.
2302 */
2303 if (mii_phy_probe(dev, &sc->bge_miibus,
2304 bge_ifmedia_upd, bge_ifmedia_sts)) {
2305 printf("bge%d: MII without any PHY!\n", sc->bge_unit);
2306 bge_release_resources(sc);
2487 bge_free_jumbo_mem(sc);
2488 error = ENXIO;
2489 goto fail;
2490 }
2491 }
2492
2493 /*
2494 * When using the BCM5701 in PCI-X mode, data corruption has
2495 * been observed in the first few bytes of some received packets.
2496 * Aligning the packet buffer in memory eliminates the corruption.
2497 * Unfortunately, this misaligns the packet payloads. On platforms
2498 * which do not support unaligned accesses, we will realign the
2499 * payloads by copying the received packets.
2500 */
2501 switch (sc->bge_chipid) {
2502 case BGE_CHIPID_BCM5701_A0:
2503 case BGE_CHIPID_BCM5701_B0:
2504 case BGE_CHIPID_BCM5701_B2:
2505 case BGE_CHIPID_BCM5701_B5:
2506 /* If in PCI-X mode, work around the alignment bug. */
2507 if ((pci_read_config(dev, BGE_PCI_PCISTATE, 4) &
2508 (BGE_PCISTATE_PCI_BUSMODE | BGE_PCISTATE_PCI_BUSSPEED)) ==
2509 BGE_PCISTATE_PCI_BUSSPEED)
2510 sc->bge_rx_alignment_bug = 1;
2511 break;
2512 }
2513
2514 /*
2515 * Call MI attach routine.
2516 */
2517 ether_ifattach(ifp, eaddr);
2518 callout_init(&sc->bge_stat_ch, CALLOUT_MPSAFE);
2519
2520 /*
2521 * Hookup IRQ last.
2522 */
2523 error = bus_setup_intr(dev, sc->bge_irq, INTR_TYPE_NET | INTR_MPSAFE,
2524 bge_intr, sc, &sc->bge_intrhand);
2525
2526 if (error) {
2527 bge_detach(dev);
2528 printf("bge%d: couldn't set up irq\n", unit);
2529 }
2530
2531fail:
2532 return(error);
2533}
2534
2535static int
2536bge_detach(dev)
2537 device_t dev;
2538{
2539 struct bge_softc *sc;
2540 struct ifnet *ifp;
2541
2542 sc = device_get_softc(dev);
2543 ifp = sc->bge_ifp;
2544
2545#ifdef DEVICE_POLLING
2546 if (ifp->if_capenable & IFCAP_POLLING)
2547 ether_poll_deregister(ifp);
2548#endif
2549
2550 BGE_LOCK(sc);
2551 bge_stop(sc);
2552 bge_reset(sc);
2553 BGE_UNLOCK(sc);
2554
2555 ether_ifdetach(ifp);
2556
2557 if (sc->bge_tbi) {
2558 ifmedia_removeall(&sc->bge_ifmedia);
2559 } else {
2560 bus_generic_detach(dev);
2561 device_delete_child(dev, sc->bge_miibus);
2562 }
2563
2564 bge_release_resources(sc);
2307 error = ENXIO;
2308 goto fail;
2309 }
2310 }
2311
2312 /*
2313 * When using the BCM5701 in PCI-X mode, data corruption has
2314 * been observed in the first few bytes of some received packets.
2315 * Aligning the packet buffer in memory eliminates the corruption.
2316 * Unfortunately, this misaligns the packet payloads. On platforms
2317 * which do not support unaligned accesses, we will realign the
2318 * payloads by copying the received packets.
2319 */
2320 switch (sc->bge_chipid) {
2321 case BGE_CHIPID_BCM5701_A0:
2322 case BGE_CHIPID_BCM5701_B0:
2323 case BGE_CHIPID_BCM5701_B2:
2324 case BGE_CHIPID_BCM5701_B5:
2325 /* If in PCI-X mode, work around the alignment bug. */
2326 if ((pci_read_config(dev, BGE_PCI_PCISTATE, 4) &
2327 (BGE_PCISTATE_PCI_BUSMODE | BGE_PCISTATE_PCI_BUSSPEED)) ==
2328 BGE_PCISTATE_PCI_BUSSPEED)
2329 sc->bge_rx_alignment_bug = 1;
2330 break;
2331 }
2332
2333 /*
2334 * Call MI attach routine.
2335 */
2336 ether_ifattach(ifp, eaddr);
2337 callout_init(&sc->bge_stat_ch, CALLOUT_MPSAFE);
2338
2339 /*
2340 * Hookup IRQ last.
2341 */
2342 error = bus_setup_intr(dev, sc->bge_irq, INTR_TYPE_NET | INTR_MPSAFE,
2343 bge_intr, sc, &sc->bge_intrhand);
2344
2345 if (error) {
2346 bge_detach(dev);
2347 printf("bge%d: couldn't set up irq\n", unit);
2348 }
2349
2350fail:
2351 return(error);
2352}
2353
2354static int
2355bge_detach(dev)
2356 device_t dev;
2357{
2358 struct bge_softc *sc;
2359 struct ifnet *ifp;
2360
2361 sc = device_get_softc(dev);
2362 ifp = sc->bge_ifp;
2363
2364#ifdef DEVICE_POLLING
2365 if (ifp->if_capenable & IFCAP_POLLING)
2366 ether_poll_deregister(ifp);
2367#endif
2368
2369 BGE_LOCK(sc);
2370 bge_stop(sc);
2371 bge_reset(sc);
2372 BGE_UNLOCK(sc);
2373
2374 ether_ifdetach(ifp);
2375
2376 if (sc->bge_tbi) {
2377 ifmedia_removeall(&sc->bge_ifmedia);
2378 } else {
2379 bus_generic_detach(dev);
2380 device_delete_child(dev, sc->bge_miibus);
2381 }
2382
2383 bge_release_resources(sc);
2565 if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
2566 sc->bge_asicrev != BGE_ASICREV_BCM5750)
2567 bge_free_jumbo_mem(sc);
2568
2569 return(0);
2570}
2571
2572static void
2573bge_release_resources(sc)
2574 struct bge_softc *sc;
2575{
2576 device_t dev;
2577
2578 dev = sc->bge_dev;
2579
2580 if (sc->bge_vpd_prodname != NULL)
2581 free(sc->bge_vpd_prodname, M_DEVBUF);
2582
2583 if (sc->bge_vpd_readonly != NULL)
2584 free(sc->bge_vpd_readonly, M_DEVBUF);
2585
2586 if (sc->bge_intrhand != NULL)
2587 bus_teardown_intr(dev, sc->bge_irq, sc->bge_intrhand);
2588
2589 if (sc->bge_irq != NULL)
2590 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->bge_irq);
2591
2592 if (sc->bge_res != NULL)
2593 bus_release_resource(dev, SYS_RES_MEMORY,
2594 BGE_PCI_BAR0, sc->bge_res);
2595
2596 if (sc->bge_ifp != NULL)
2597 if_free(sc->bge_ifp);
2598
2599 bge_dma_free(sc);
2600
2601 if (mtx_initialized(&sc->bge_mtx)) /* XXX */
2602 BGE_LOCK_DESTROY(sc);
2603
2604 return;
2605}
2606
2607static void
2608bge_reset(sc)
2609 struct bge_softc *sc;
2610{
2611 device_t dev;
2612 u_int32_t cachesize, command, pcistate, reset;
2613 int i, val = 0;
2614
2615 dev = sc->bge_dev;
2616
2617 /* Save some important PCI state. */
2618 cachesize = pci_read_config(dev, BGE_PCI_CACHESZ, 4);
2619 command = pci_read_config(dev, BGE_PCI_CMD, 4);
2620 pcistate = pci_read_config(dev, BGE_PCI_PCISTATE, 4);
2621
2622 pci_write_config(dev, BGE_PCI_MISC_CTL,
2623 BGE_PCIMISCCTL_INDIRECT_ACCESS|BGE_PCIMISCCTL_MASK_PCI_INTR|
2624 BGE_PCIMISCCTL_ENDIAN_WORDSWAP|BGE_PCIMISCCTL_PCISTATE_RW, 4);
2625
2626 reset = BGE_MISCCFG_RESET_CORE_CLOCKS|(65<<1);
2627
2628 /* XXX: Broadcom Linux driver. */
2629 if (sc->bge_pcie) {
2630 if (CSR_READ_4(sc, 0x7e2c) == 0x60) /* PCIE 1.0 */
2631 CSR_WRITE_4(sc, 0x7e2c, 0x20);
2632 if (sc->bge_chipid != BGE_CHIPID_BCM5750_A0) {
2633 /* Prevent PCIE link training during global reset */
2634 CSR_WRITE_4(sc, BGE_MISC_CFG, (1<<29));
2635 reset |= (1<<29);
2636 }
2637 }
2638
2639 /* Issue global reset */
2640 bge_writereg_ind(sc, BGE_MISC_CFG, reset);
2641
2642 DELAY(1000);
2643
2644 /* XXX: Broadcom Linux driver. */
2645 if (sc->bge_pcie) {
2646 if (sc->bge_chipid == BGE_CHIPID_BCM5750_A0) {
2647 uint32_t v;
2648
2649 DELAY(500000); /* wait for link training to complete */
2650 v = pci_read_config(dev, 0xc4, 4);
2651 pci_write_config(dev, 0xc4, v | (1<<15), 4);
2652 }
2653 /* Set PCIE max payload size and clear error status. */
2654 pci_write_config(dev, 0xd8, 0xf5000, 4);
2655 }
2656
2657 /* Reset some of the PCI state that got zapped by reset */
2658 pci_write_config(dev, BGE_PCI_MISC_CTL,
2659 BGE_PCIMISCCTL_INDIRECT_ACCESS|BGE_PCIMISCCTL_MASK_PCI_INTR|
2660 BGE_PCIMISCCTL_ENDIAN_WORDSWAP|BGE_PCIMISCCTL_PCISTATE_RW, 4);
2661 pci_write_config(dev, BGE_PCI_CACHESZ, cachesize, 4);
2662 pci_write_config(dev, BGE_PCI_CMD, command, 4);
2663 bge_writereg_ind(sc, BGE_MISC_CFG, (65 << 1));
2664
2665 /* Enable memory arbiter. */
2666 if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
2667 sc->bge_asicrev != BGE_ASICREV_BCM5750)
2668 CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE);
2669
2670 /*
2671 * Prevent PXE restart: write a magic number to the
2672 * general communications memory at 0xB50.
2673 */
2674 bge_writemem_ind(sc, BGE_SOFTWARE_GENCOMM, BGE_MAGIC_NUMBER);
2675 /*
2676 * Poll the value location we just wrote until
2677 * we see the 1's complement of the magic number.
2678 * This indicates that the firmware initialization
2679 * is complete.
2680 */
2681 for (i = 0; i < BGE_TIMEOUT; i++) {
2682 val = bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM);
2683 if (val == ~BGE_MAGIC_NUMBER)
2684 break;
2685 DELAY(10);
2686 }
2687
2688 if (i == BGE_TIMEOUT) {
2689 printf("bge%d: firmware handshake timed out\n", sc->bge_unit);
2690 return;
2691 }
2692
2693 /*
2694 * XXX Wait for the value of the PCISTATE register to
2695 * return to its original pre-reset state. This is a
2696 * fairly good indicator of reset completion. If we don't
2697 * wait for the reset to fully complete, trying to read
2698 * from the device's non-PCI registers may yield garbage
2699 * results.
2700 */
2701 for (i = 0; i < BGE_TIMEOUT; i++) {
2702 if (pci_read_config(dev, BGE_PCI_PCISTATE, 4) == pcistate)
2703 break;
2704 DELAY(10);
2705 }
2706
2707 /* Fix up byte swapping */
2708 CSR_WRITE_4(sc, BGE_MODE_CTL, BGE_MODECTL_BYTESWAP_NONFRAME|
2709 BGE_MODECTL_BYTESWAP_DATA);
2710
2711 CSR_WRITE_4(sc, BGE_MAC_MODE, 0);
2712
2713 /*
2714 * The 5704 in TBI mode apparently needs some special
2715 * adjustment to insure the SERDES drive level is set
2716 * to 1.2V.
2717 */
2718 if (sc->bge_asicrev == BGE_ASICREV_BCM5704 && sc->bge_tbi) {
2719 uint32_t serdescfg;
2720 serdescfg = CSR_READ_4(sc, BGE_SERDES_CFG);
2721 serdescfg = (serdescfg & ~0xFFF) | 0x880;
2722 CSR_WRITE_4(sc, BGE_SERDES_CFG, serdescfg);
2723 }
2724
2725 /* XXX: Broadcom Linux driver. */
2726 if (sc->bge_pcie && sc->bge_chipid != BGE_CHIPID_BCM5750_A0) {
2727 uint32_t v;
2728
2729 v = CSR_READ_4(sc, 0x7c00);
2730 CSR_WRITE_4(sc, 0x7c00, v | (1<<25));
2731 }
2732 DELAY(10000);
2733
2734 return;
2735}
2736
2737/*
2738 * Frame reception handling. This is called if there's a frame
2739 * on the receive return list.
2740 *
2741 * Note: we have to be able to handle two possibilities here:
2384
2385 return(0);
2386}
2387
2388static void
2389bge_release_resources(sc)
2390 struct bge_softc *sc;
2391{
2392 device_t dev;
2393
2394 dev = sc->bge_dev;
2395
2396 if (sc->bge_vpd_prodname != NULL)
2397 free(sc->bge_vpd_prodname, M_DEVBUF);
2398
2399 if (sc->bge_vpd_readonly != NULL)
2400 free(sc->bge_vpd_readonly, M_DEVBUF);
2401
2402 if (sc->bge_intrhand != NULL)
2403 bus_teardown_intr(dev, sc->bge_irq, sc->bge_intrhand);
2404
2405 if (sc->bge_irq != NULL)
2406 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->bge_irq);
2407
2408 if (sc->bge_res != NULL)
2409 bus_release_resource(dev, SYS_RES_MEMORY,
2410 BGE_PCI_BAR0, sc->bge_res);
2411
2412 if (sc->bge_ifp != NULL)
2413 if_free(sc->bge_ifp);
2414
2415 bge_dma_free(sc);
2416
2417 if (mtx_initialized(&sc->bge_mtx)) /* XXX */
2418 BGE_LOCK_DESTROY(sc);
2419
2420 return;
2421}
2422
2423static void
2424bge_reset(sc)
2425 struct bge_softc *sc;
2426{
2427 device_t dev;
2428 u_int32_t cachesize, command, pcistate, reset;
2429 int i, val = 0;
2430
2431 dev = sc->bge_dev;
2432
2433 /* Save some important PCI state. */
2434 cachesize = pci_read_config(dev, BGE_PCI_CACHESZ, 4);
2435 command = pci_read_config(dev, BGE_PCI_CMD, 4);
2436 pcistate = pci_read_config(dev, BGE_PCI_PCISTATE, 4);
2437
2438 pci_write_config(dev, BGE_PCI_MISC_CTL,
2439 BGE_PCIMISCCTL_INDIRECT_ACCESS|BGE_PCIMISCCTL_MASK_PCI_INTR|
2440 BGE_PCIMISCCTL_ENDIAN_WORDSWAP|BGE_PCIMISCCTL_PCISTATE_RW, 4);
2441
2442 reset = BGE_MISCCFG_RESET_CORE_CLOCKS|(65<<1);
2443
2444 /* XXX: Broadcom Linux driver. */
2445 if (sc->bge_pcie) {
2446 if (CSR_READ_4(sc, 0x7e2c) == 0x60) /* PCIE 1.0 */
2447 CSR_WRITE_4(sc, 0x7e2c, 0x20);
2448 if (sc->bge_chipid != BGE_CHIPID_BCM5750_A0) {
2449 /* Prevent PCIE link training during global reset */
2450 CSR_WRITE_4(sc, BGE_MISC_CFG, (1<<29));
2451 reset |= (1<<29);
2452 }
2453 }
2454
2455 /* Issue global reset */
2456 bge_writereg_ind(sc, BGE_MISC_CFG, reset);
2457
2458 DELAY(1000);
2459
2460 /* XXX: Broadcom Linux driver. */
2461 if (sc->bge_pcie) {
2462 if (sc->bge_chipid == BGE_CHIPID_BCM5750_A0) {
2463 uint32_t v;
2464
2465 DELAY(500000); /* wait for link training to complete */
2466 v = pci_read_config(dev, 0xc4, 4);
2467 pci_write_config(dev, 0xc4, v | (1<<15), 4);
2468 }
2469 /* Set PCIE max payload size and clear error status. */
2470 pci_write_config(dev, 0xd8, 0xf5000, 4);
2471 }
2472
2473 /* Reset some of the PCI state that got zapped by reset */
2474 pci_write_config(dev, BGE_PCI_MISC_CTL,
2475 BGE_PCIMISCCTL_INDIRECT_ACCESS|BGE_PCIMISCCTL_MASK_PCI_INTR|
2476 BGE_PCIMISCCTL_ENDIAN_WORDSWAP|BGE_PCIMISCCTL_PCISTATE_RW, 4);
2477 pci_write_config(dev, BGE_PCI_CACHESZ, cachesize, 4);
2478 pci_write_config(dev, BGE_PCI_CMD, command, 4);
2479 bge_writereg_ind(sc, BGE_MISC_CFG, (65 << 1));
2480
2481 /* Enable memory arbiter. */
2482 if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
2483 sc->bge_asicrev != BGE_ASICREV_BCM5750)
2484 CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE);
2485
2486 /*
2487 * Prevent PXE restart: write a magic number to the
2488 * general communications memory at 0xB50.
2489 */
2490 bge_writemem_ind(sc, BGE_SOFTWARE_GENCOMM, BGE_MAGIC_NUMBER);
2491 /*
2492 * Poll the value location we just wrote until
2493 * we see the 1's complement of the magic number.
2494 * This indicates that the firmware initialization
2495 * is complete.
2496 */
2497 for (i = 0; i < BGE_TIMEOUT; i++) {
2498 val = bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM);
2499 if (val == ~BGE_MAGIC_NUMBER)
2500 break;
2501 DELAY(10);
2502 }
2503
2504 if (i == BGE_TIMEOUT) {
2505 printf("bge%d: firmware handshake timed out\n", sc->bge_unit);
2506 return;
2507 }
2508
2509 /*
2510 * XXX Wait for the value of the PCISTATE register to
2511 * return to its original pre-reset state. This is a
2512 * fairly good indicator of reset completion. If we don't
2513 * wait for the reset to fully complete, trying to read
2514 * from the device's non-PCI registers may yield garbage
2515 * results.
2516 */
2517 for (i = 0; i < BGE_TIMEOUT; i++) {
2518 if (pci_read_config(dev, BGE_PCI_PCISTATE, 4) == pcistate)
2519 break;
2520 DELAY(10);
2521 }
2522
2523 /* Fix up byte swapping */
2524 CSR_WRITE_4(sc, BGE_MODE_CTL, BGE_MODECTL_BYTESWAP_NONFRAME|
2525 BGE_MODECTL_BYTESWAP_DATA);
2526
2527 CSR_WRITE_4(sc, BGE_MAC_MODE, 0);
2528
2529 /*
2530 * The 5704 in TBI mode apparently needs some special
2531 * adjustment to insure the SERDES drive level is set
2532 * to 1.2V.
2533 */
2534 if (sc->bge_asicrev == BGE_ASICREV_BCM5704 && sc->bge_tbi) {
2535 uint32_t serdescfg;
2536 serdescfg = CSR_READ_4(sc, BGE_SERDES_CFG);
2537 serdescfg = (serdescfg & ~0xFFF) | 0x880;
2538 CSR_WRITE_4(sc, BGE_SERDES_CFG, serdescfg);
2539 }
2540
2541 /* XXX: Broadcom Linux driver. */
2542 if (sc->bge_pcie && sc->bge_chipid != BGE_CHIPID_BCM5750_A0) {
2543 uint32_t v;
2544
2545 v = CSR_READ_4(sc, 0x7c00);
2546 CSR_WRITE_4(sc, 0x7c00, v | (1<<25));
2547 }
2548 DELAY(10000);
2549
2550 return;
2551}
2552
2553/*
2554 * Frame reception handling. This is called if there's a frame
2555 * on the receive return list.
2556 *
2557 * Note: we have to be able to handle two possibilities here:
2742 * 1) the frame is from the jumbo recieve ring
2558 * 1) the frame is from the jumbo receive ring
2743 * 2) the frame is from the standard receive ring
2744 */
2745
2746static void
2747bge_rxeof(sc)
2748 struct bge_softc *sc;
2749{
2750 struct ifnet *ifp;
2751 int stdcnt = 0, jumbocnt = 0;
2752
2753 BGE_LOCK_ASSERT(sc);
2754
2755 ifp = sc->bge_ifp;
2756
2757 bus_dmamap_sync(sc->bge_cdata.bge_rx_return_ring_tag,
2758 sc->bge_cdata.bge_rx_return_ring_map, BUS_DMASYNC_POSTWRITE);
2759 bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag,
2760 sc->bge_cdata.bge_rx_std_ring_map, BUS_DMASYNC_POSTREAD);
2761 if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
2762 sc->bge_asicrev != BGE_ASICREV_BCM5750) {
2763 bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag,
2764 sc->bge_cdata.bge_rx_jumbo_ring_map,
2765 BUS_DMASYNC_POSTREAD);
2766 }
2767
2768 while(sc->bge_rx_saved_considx !=
2769 sc->bge_ldata.bge_status_block->bge_idx[0].bge_rx_prod_idx) {
2770 struct bge_rx_bd *cur_rx;
2771 u_int32_t rxidx;
2772 struct ether_header *eh;
2773 struct mbuf *m = NULL;
2774 u_int16_t vlan_tag = 0;
2775 int have_tag = 0;
2776
2777#ifdef DEVICE_POLLING
2778 if (ifp->if_capenable & IFCAP_POLLING) {
2779 if (sc->rxcycles <= 0)
2780 break;
2781 sc->rxcycles--;
2782 }
2783#endif
2784
2785 cur_rx =
2786 &sc->bge_ldata.bge_rx_return_ring[sc->bge_rx_saved_considx];
2787
2788 rxidx = cur_rx->bge_idx;
2789 BGE_INC(sc->bge_rx_saved_considx, sc->bge_return_ring_cnt);
2790
2791 if (cur_rx->bge_flags & BGE_RXBDFLAG_VLAN_TAG) {
2792 have_tag = 1;
2793 vlan_tag = cur_rx->bge_vlan_tag;
2794 }
2795
2796 if (cur_rx->bge_flags & BGE_RXBDFLAG_JUMBO_RING) {
2797 BGE_INC(sc->bge_jumbo, BGE_JUMBO_RX_RING_CNT);
2798 bus_dmamap_sync(sc->bge_cdata.bge_mtag_jumbo,
2799 sc->bge_cdata.bge_rx_jumbo_dmamap[rxidx],
2800 BUS_DMASYNC_POSTREAD);
2801 bus_dmamap_unload(sc->bge_cdata.bge_mtag_jumbo,
2802 sc->bge_cdata.bge_rx_jumbo_dmamap[rxidx]);
2803 m = sc->bge_cdata.bge_rx_jumbo_chain[rxidx];
2804 sc->bge_cdata.bge_rx_jumbo_chain[rxidx] = NULL;
2805 jumbocnt++;
2806 if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) {
2807 ifp->if_ierrors++;
2808 bge_newbuf_jumbo(sc, sc->bge_jumbo, m);
2809 continue;
2810 }
2811 if (bge_newbuf_jumbo(sc,
2812 sc->bge_jumbo, NULL) == ENOBUFS) {
2813 ifp->if_ierrors++;
2814 bge_newbuf_jumbo(sc, sc->bge_jumbo, m);
2815 continue;
2816 }
2817 } else {
2818 BGE_INC(sc->bge_std, BGE_STD_RX_RING_CNT);
2819 bus_dmamap_sync(sc->bge_cdata.bge_mtag,
2820 sc->bge_cdata.bge_rx_std_dmamap[rxidx],
2821 BUS_DMASYNC_POSTREAD);
2822 bus_dmamap_unload(sc->bge_cdata.bge_mtag,
2823 sc->bge_cdata.bge_rx_std_dmamap[rxidx]);
2824 m = sc->bge_cdata.bge_rx_std_chain[rxidx];
2825 sc->bge_cdata.bge_rx_std_chain[rxidx] = NULL;
2826 stdcnt++;
2827 if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) {
2828 ifp->if_ierrors++;
2829 bge_newbuf_std(sc, sc->bge_std, m);
2830 continue;
2831 }
2832 if (bge_newbuf_std(sc, sc->bge_std,
2833 NULL) == ENOBUFS) {
2834 ifp->if_ierrors++;
2835 bge_newbuf_std(sc, sc->bge_std, m);
2836 continue;
2837 }
2838 }
2839
2840 ifp->if_ipackets++;
2841#ifndef __i386__
2842 /*
2843 * The i386 allows unaligned accesses, but for other
2844 * platforms we must make sure the payload is aligned.
2845 */
2846 if (sc->bge_rx_alignment_bug) {
2847 bcopy(m->m_data, m->m_data + ETHER_ALIGN,
2848 cur_rx->bge_len);
2849 m->m_data += ETHER_ALIGN;
2850 }
2851#endif
2852 eh = mtod(m, struct ether_header *);
2853 m->m_pkthdr.len = m->m_len = cur_rx->bge_len - ETHER_CRC_LEN;
2854 m->m_pkthdr.rcvif = ifp;
2855
2856#if 0 /* currently broken for some packets, possibly related to TCP options */
2857 if (ifp->if_capenable & IFCAP_RXCSUM) {
2858 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
2859 if ((cur_rx->bge_ip_csum ^ 0xffff) == 0)
2860 m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
2861 if (cur_rx->bge_flags & BGE_RXBDFLAG_TCP_UDP_CSUM) {
2862 m->m_pkthdr.csum_data =
2863 cur_rx->bge_tcp_udp_csum;
2864 m->m_pkthdr.csum_flags |= CSUM_DATA_VALID;
2865 }
2866 }
2867#endif
2868
2869 /*
2870 * If we received a packet with a vlan tag,
2871 * attach that information to the packet.
2872 */
2873 if (have_tag)
2874 VLAN_INPUT_TAG(ifp, m, vlan_tag, continue);
2875
2876 BGE_UNLOCK(sc);
2877 (*ifp->if_input)(ifp, m);
2878 BGE_LOCK(sc);
2879 }
2880
2881 bus_dmamap_sync(sc->bge_cdata.bge_rx_return_ring_tag,
2882 sc->bge_cdata.bge_rx_return_ring_map, BUS_DMASYNC_PREWRITE);
2883 bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag,
2884 sc->bge_cdata.bge_rx_std_ring_map,
2885 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_PREWRITE);
2886 if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
2887 sc->bge_asicrev != BGE_ASICREV_BCM5750) {
2888 bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag,
2889 sc->bge_cdata.bge_rx_jumbo_ring_map,
2890 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
2891 }
2892
2893 CSR_WRITE_4(sc, BGE_MBX_RX_CONS0_LO, sc->bge_rx_saved_considx);
2894 if (stdcnt)
2895 CSR_WRITE_4(sc, BGE_MBX_RX_STD_PROD_LO, sc->bge_std);
2896 if (jumbocnt)
2897 CSR_WRITE_4(sc, BGE_MBX_RX_JUMBO_PROD_LO, sc->bge_jumbo);
2898
2899 return;
2900}
2901
2902static void
2903bge_txeof(sc)
2904 struct bge_softc *sc;
2905{
2906 struct bge_tx_bd *cur_tx = NULL;
2907 struct ifnet *ifp;
2908
2909 BGE_LOCK_ASSERT(sc);
2910
2911 ifp = sc->bge_ifp;
2912
2913 /*
2914 * Go through our tx ring and free mbufs for those
2915 * frames that have been sent.
2916 */
2917 while (sc->bge_tx_saved_considx !=
2918 sc->bge_ldata.bge_status_block->bge_idx[0].bge_tx_cons_idx) {
2919 u_int32_t idx = 0;
2920
2921 idx = sc->bge_tx_saved_considx;
2922 cur_tx = &sc->bge_ldata.bge_tx_ring[idx];
2923 if (cur_tx->bge_flags & BGE_TXBDFLAG_END)
2924 ifp->if_opackets++;
2925 if (sc->bge_cdata.bge_tx_chain[idx] != NULL) {
2926 m_freem(sc->bge_cdata.bge_tx_chain[idx]);
2927 sc->bge_cdata.bge_tx_chain[idx] = NULL;
2928 bus_dmamap_unload(sc->bge_cdata.bge_mtag,
2929 sc->bge_cdata.bge_tx_dmamap[idx]);
2930 }
2931 sc->bge_txcnt--;
2932 BGE_INC(sc->bge_tx_saved_considx, BGE_TX_RING_CNT);
2933 ifp->if_timer = 0;
2934 }
2935
2936 if (cur_tx != NULL)
2937 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
2938
2939 return;
2940}
2941
2942#ifdef DEVICE_POLLING
2943static void
2944bge_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
2945{
2946 struct bge_softc *sc = ifp->if_softc;
2947
2948 BGE_LOCK(sc);
2949 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
2950 bge_poll_locked(ifp, cmd, count);
2951 BGE_UNLOCK(sc);
2952}
2953
2954static void
2955bge_poll_locked(struct ifnet *ifp, enum poll_cmd cmd, int count)
2956{
2957 struct bge_softc *sc = ifp->if_softc;
2958
2959 BGE_LOCK_ASSERT(sc);
2960
2961 sc->rxcycles = count;
2962 bge_rxeof(sc);
2963 bge_txeof(sc);
2964 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
2965 bge_start_locked(ifp);
2966
2967 if (cmd == POLL_AND_CHECK_STATUS) {
2968 uint32_t statusword;
2969
2970 bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
2971 sc->bge_cdata.bge_status_map, BUS_DMASYNC_POSTWRITE);
2972
2973 statusword = atomic_readandclear_32(&sc->bge_ldata.bge_status_block->bge_status);
2974
2975 if (sc->bge_asicrev == BGE_ASICREV_BCM5700 ||
2976 statusword & BGE_STATFLAG_LINKSTATE_CHANGED)
2977 bge_link_upd(sc);
2978
2979 bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
2980 sc->bge_cdata.bge_status_map, BUS_DMASYNC_PREWRITE);
2981 }
2982}
2983#endif /* DEVICE_POLLING */
2984
2985static void
2986bge_intr(xsc)
2987 void *xsc;
2988{
2989 struct bge_softc *sc;
2990 struct ifnet *ifp;
2991 uint32_t statusword;
2992
2993 sc = xsc;
2994
2995 BGE_LOCK(sc);
2996
2997 ifp = sc->bge_ifp;
2998
2999#ifdef DEVICE_POLLING
3000 if (ifp->if_capenable & IFCAP_POLLING) {
3001 BGE_UNLOCK(sc);
3002 return;
3003 }
3004#endif
3005
3006 bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
3007 sc->bge_cdata.bge_status_map, BUS_DMASYNC_POSTWRITE);
3008
3009 statusword =
3010 atomic_readandclear_32(&sc->bge_ldata.bge_status_block->bge_status);
3011
3012#ifdef notdef
3013 /* Avoid this for now -- checking this register is expensive. */
3014 /* Make sure this is really our interrupt. */
3015 if (!(CSR_READ_4(sc, BGE_MISC_LOCAL_CTL) & BGE_MLC_INTR_STATE))
3016 return;
3017#endif
3018 /* Ack interrupt and stop others from occuring. */
3019 CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 1);
3020
3021 if (sc->bge_asicrev == BGE_ASICREV_BCM5700 ||
3022 statusword & BGE_STATFLAG_LINKSTATE_CHANGED)
3023 bge_link_upd(sc);
3024
3025 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
3026 /* Check RX return ring producer/consumer */
3027 bge_rxeof(sc);
3028
3029 /* Check TX ring producer/consumer */
3030 bge_txeof(sc);
3031 }
3032
3033 bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
3034 sc->bge_cdata.bge_status_map, BUS_DMASYNC_PREWRITE);
3035
3036 bge_handle_events(sc);
3037
3038 /* Re-enable interrupts. */
3039 CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 0);
3040
3041 if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
3042 !IFQ_DRV_IS_EMPTY(&ifp->if_snd))
3043 bge_start_locked(ifp);
3044
3045 BGE_UNLOCK(sc);
3046
3047 return;
3048}
3049
3050static void
3051bge_tick_locked(sc)
3052 struct bge_softc *sc;
3053{
3054 struct mii_data *mii = NULL;
3055 struct ifnet *ifp;
3056
3057 BGE_LOCK_ASSERT(sc);
3058
3059 ifp = sc->bge_ifp;
3060
3061 if (sc->bge_asicrev == BGE_ASICREV_BCM5705 ||
3062 sc->bge_asicrev == BGE_ASICREV_BCM5750)
3063 bge_stats_update_regs(sc);
3064 else
3065 bge_stats_update(sc);
3066
3067 if (sc->bge_tbi) {
3068 if (!sc->bge_link) {
3069 if (CSR_READ_4(sc, BGE_MAC_STS) &
3070 BGE_MACSTAT_TBI_PCS_SYNCHED) {
3071 sc->bge_link++;
3072 if (sc->bge_asicrev == BGE_ASICREV_BCM5704)
3073 BGE_CLRBIT(sc, BGE_MAC_MODE,
3074 BGE_MACMODE_TBI_SEND_CFGS);
3075 CSR_WRITE_4(sc, BGE_MAC_STS, 0xFFFFFFFF);
3076 if (bootverbose)
3077 printf("bge%d: gigabit link up\n",
3078 sc->bge_unit);
3079 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
3080 bge_start_locked(ifp);
3081 }
3082 }
3083 }
3084 else {
3085 mii = device_get_softc(sc->bge_miibus);
3086 mii_tick(mii);
3087
3088 if (!sc->bge_link && mii->mii_media_status & IFM_ACTIVE &&
3089 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) {
3090 sc->bge_link++;
3091 if ((IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T ||
3092 IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_SX)&&
3093 bootverbose)
3094 printf("bge%d: gigabit link up\n", sc->bge_unit);
3095 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
3096 bge_start_locked(ifp);
3097 }
3098 }
3099
3100 callout_reset(&sc->bge_stat_ch, hz, bge_tick, sc);
3101}
3102
3103static void
3104bge_tick(xsc)
3105 void *xsc;
3106{
3107 struct bge_softc *sc;
3108
3109 sc = xsc;
3110
3111 BGE_LOCK(sc);
3112 bge_tick_locked(sc);
3113 BGE_UNLOCK(sc);
3114}
3115
3116static void
3117bge_stats_update_regs(sc)
3118 struct bge_softc *sc;
3119{
3120 struct ifnet *ifp;
3121 struct bge_mac_stats_regs stats;
3122 u_int32_t *s;
3123 int i;
3124
3125 ifp = sc->bge_ifp;
3126
3127 s = (u_int32_t *)&stats;
3128 for (i = 0; i < sizeof(struct bge_mac_stats_regs); i += 4) {
3129 *s = CSR_READ_4(sc, BGE_RX_STATS + i);
3130 s++;
3131 }
3132
3133 ifp->if_collisions +=
3134 (stats.dot3StatsSingleCollisionFrames +
3135 stats.dot3StatsMultipleCollisionFrames +
3136 stats.dot3StatsExcessiveCollisions +
3137 stats.dot3StatsLateCollisions) -
3138 ifp->if_collisions;
3139
3140 return;
3141}
3142
3143static void
3144bge_stats_update(sc)
3145 struct bge_softc *sc;
3146{
3147 struct ifnet *ifp;
3148 struct bge_stats *stats;
3149
3150 ifp = sc->bge_ifp;
3151
3152 stats = (struct bge_stats *)(sc->bge_vhandle +
3153 BGE_MEMWIN_START + BGE_STATS_BLOCK);
3154
3155 ifp->if_collisions +=
3156 (stats->txstats.dot3StatsSingleCollisionFrames.bge_addr_lo +
3157 stats->txstats.dot3StatsMultipleCollisionFrames.bge_addr_lo +
3158 stats->txstats.dot3StatsExcessiveCollisions.bge_addr_lo +
3159 stats->txstats.dot3StatsLateCollisions.bge_addr_lo) -
3160 ifp->if_collisions;
3161
3162#ifdef notdef
3163 ifp->if_collisions +=
3164 (sc->bge_rdata->bge_info.bge_stats.dot3StatsSingleCollisionFrames +
3165 sc->bge_rdata->bge_info.bge_stats.dot3StatsMultipleCollisionFrames +
3166 sc->bge_rdata->bge_info.bge_stats.dot3StatsExcessiveCollisions +
3167 sc->bge_rdata->bge_info.bge_stats.dot3StatsLateCollisions) -
3168 ifp->if_collisions;
3169#endif
3170
3171 return;
3172}
3173
3174/*
3175 * Encapsulate an mbuf chain in the tx ring by coupling the mbuf data
3176 * pointers to descriptors.
3177 */
3178static int
3179bge_encap(sc, m_head, txidx)
3180 struct bge_softc *sc;
3181 struct mbuf *m_head;
3182 u_int32_t *txidx;
3183{
3184 struct bge_tx_bd *f = NULL;
3185 u_int16_t csum_flags = 0;
3186 struct m_tag *mtag;
3187 struct bge_dmamap_arg ctx;
3188 bus_dmamap_t map;
3189 int error;
3190
3191
3192 if (m_head->m_pkthdr.csum_flags) {
3193 if (m_head->m_pkthdr.csum_flags & CSUM_IP)
3194 csum_flags |= BGE_TXBDFLAG_IP_CSUM;
3195 if (m_head->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP))
3196 csum_flags |= BGE_TXBDFLAG_TCP_UDP_CSUM;
3197 if (m_head->m_flags & M_LASTFRAG)
3198 csum_flags |= BGE_TXBDFLAG_IP_FRAG_END;
3199 else if (m_head->m_flags & M_FRAG)
3200 csum_flags |= BGE_TXBDFLAG_IP_FRAG;
3201 }
3202
3203 mtag = VLAN_OUTPUT_TAG(sc->bge_ifp, m_head);
3204
3205 ctx.sc = sc;
3206 ctx.bge_idx = *txidx;
3207 ctx.bge_ring = sc->bge_ldata.bge_tx_ring;
3208 ctx.bge_flags = csum_flags;
3209 /*
3210 * Sanity check: avoid coming within 16 descriptors
3211 * of the end of the ring.
3212 */
3213 ctx.bge_maxsegs = (BGE_TX_RING_CNT - sc->bge_txcnt) - 16;
3214
3215 map = sc->bge_cdata.bge_tx_dmamap[*txidx];
3216 error = bus_dmamap_load_mbuf(sc->bge_cdata.bge_mtag, map,
3217 m_head, bge_dma_map_tx_desc, &ctx, BUS_DMA_NOWAIT);
3218
3219 if (error || ctx.bge_maxsegs == 0 /*||
3220 ctx.bge_idx == sc->bge_tx_saved_considx*/)
3221 return (ENOBUFS);
3222
3223 /*
3224 * Insure that the map for this transmission
3225 * is placed at the array index of the last descriptor
3226 * in this chain.
3227 */
3228 sc->bge_cdata.bge_tx_dmamap[*txidx] =
3229 sc->bge_cdata.bge_tx_dmamap[ctx.bge_idx];
3230 sc->bge_cdata.bge_tx_dmamap[ctx.bge_idx] = map;
3231 sc->bge_cdata.bge_tx_chain[ctx.bge_idx] = m_head;
3232 sc->bge_txcnt += ctx.bge_maxsegs;
3233 f = &sc->bge_ldata.bge_tx_ring[*txidx];
3234 if (mtag != NULL) {
3235 f->bge_flags |= htole16(BGE_TXBDFLAG_VLAN_TAG);
3236 f->bge_vlan_tag = htole16(VLAN_TAG_VALUE(mtag));
3237 } else {
3238 f->bge_vlan_tag = 0;
3239 }
3240
3241 BGE_INC(ctx.bge_idx, BGE_TX_RING_CNT);
3242 *txidx = ctx.bge_idx;
3243
3244 return(0);
3245}
3246
3247/*
3248 * Main transmit routine. To avoid having to do mbuf copies, we put pointers
3249 * to the mbuf data regions directly in the transmit descriptors.
3250 */
3251static void
3252bge_start_locked(ifp)
3253 struct ifnet *ifp;
3254{
3255 struct bge_softc *sc;
3256 struct mbuf *m_head = NULL;
3257 u_int32_t prodidx = 0;
3258 int count = 0;
3259
3260 sc = ifp->if_softc;
3261
3262 if (!sc->bge_link || IFQ_DRV_IS_EMPTY(&ifp->if_snd))
3263 return;
3264
3265 prodidx = CSR_READ_4(sc, BGE_MBX_TX_HOST_PROD0_LO);
3266
3267 while(sc->bge_cdata.bge_tx_chain[prodidx] == NULL) {
3268 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
3269 if (m_head == NULL)
3270 break;
3271
3272 /*
3273 * XXX
3274 * The code inside the if() block is never reached since we
3275 * must mark CSUM_IP_FRAGS in our if_hwassist to start getting
3276 * requests to checksum TCP/UDP in a fragmented packet.
3277 *
3278 * XXX
3279 * safety overkill. If this is a fragmented packet chain
3280 * with delayed TCP/UDP checksums, then only encapsulate
3281 * it if we have enough descriptors to handle the entire
3282 * chain at once.
3283 * (paranoia -- may not actually be needed)
3284 */
3285 if (m_head->m_flags & M_FIRSTFRAG &&
3286 m_head->m_pkthdr.csum_flags & (CSUM_DELAY_DATA)) {
3287 if ((BGE_TX_RING_CNT - sc->bge_txcnt) <
3288 m_head->m_pkthdr.csum_data + 16) {
3289 IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
3290 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
3291 break;
3292 }
3293 }
3294
3295 /*
3296 * Pack the data into the transmit ring. If we
3297 * don't have room, set the OACTIVE flag and wait
3298 * for the NIC to drain the ring.
3299 */
3300 if (bge_encap(sc, m_head, &prodidx)) {
3301 IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
3302 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
3303 break;
3304 }
3305 ++count;
3306
3307 /*
3308 * If there's a BPF listener, bounce a copy of this frame
3309 * to him.
3310 */
3311 BPF_MTAP(ifp, m_head);
3312 }
3313
3314 if (count == 0) {
3315 /* no packets were dequeued */
3316 return;
3317 }
3318
3319 /* Transmit */
3320 CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx);
3321 /* 5700 b2 errata */
3322 if (sc->bge_chiprev == BGE_CHIPREV_5700_BX)
3323 CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx);
3324
3325 /*
3326 * Set a timeout in case the chip goes out to lunch.
3327 */
3328 ifp->if_timer = 5;
3329
3330 return;
3331}
3332
3333/*
3334 * Main transmit routine. To avoid having to do mbuf copies, we put pointers
3335 * to the mbuf data regions directly in the transmit descriptors.
3336 */
3337static void
3338bge_start(ifp)
3339 struct ifnet *ifp;
3340{
3341 struct bge_softc *sc;
3342
3343 sc = ifp->if_softc;
3344 BGE_LOCK(sc);
3345 bge_start_locked(ifp);
3346 BGE_UNLOCK(sc);
3347}
3348
3349static void
3350bge_init_locked(sc)
3351 struct bge_softc *sc;
3352{
3353 struct ifnet *ifp;
3354 u_int16_t *m;
3355
3356 BGE_LOCK_ASSERT(sc);
3357
3358 ifp = sc->bge_ifp;
3359
3360 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
3361 return;
3362
3363 /* Cancel pending I/O and flush buffers. */
3364 bge_stop(sc);
3365 bge_reset(sc);
3366 bge_chipinit(sc);
3367
3368 /*
3369 * Init the various state machines, ring
3370 * control blocks and firmware.
3371 */
3372 if (bge_blockinit(sc)) {
3373 printf("bge%d: initialization failure\n", sc->bge_unit);
3374 return;
3375 }
3376
3377 ifp = sc->bge_ifp;
3378
3379 /* Specify MTU. */
3380 CSR_WRITE_4(sc, BGE_RX_MTU, ifp->if_mtu +
3381 ETHER_HDR_LEN + ETHER_CRC_LEN + ETHER_VLAN_ENCAP_LEN);
3382
3383 /* Load our MAC address. */
3384 m = (u_int16_t *)IF_LLADDR(sc->bge_ifp);
3385 CSR_WRITE_4(sc, BGE_MAC_ADDR1_LO, htons(m[0]));
3386 CSR_WRITE_4(sc, BGE_MAC_ADDR1_HI, (htons(m[1]) << 16) | htons(m[2]));
3387
3388 /* Enable or disable promiscuous mode as needed. */
3389 if (ifp->if_flags & IFF_PROMISC) {
3390 BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC);
3391 } else {
3392 BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC);
3393 }
3394
3395 /* Program multicast filter. */
3396 bge_setmulti(sc);
3397
3398 /* Init RX ring. */
3399 bge_init_rx_ring_std(sc);
3400
3401 /*
3402 * Workaround for a bug in 5705 ASIC rev A0. Poll the NIC's
3403 * memory to insure that the chip has in fact read the first
3404 * entry of the ring.
3405 */
3406 if (sc->bge_chipid == BGE_CHIPID_BCM5705_A0) {
3407 u_int32_t v, i;
3408 for (i = 0; i < 10; i++) {
3409 DELAY(20);
3410 v = bge_readmem_ind(sc, BGE_STD_RX_RINGS + 8);
3411 if (v == (MCLBYTES - ETHER_ALIGN))
3412 break;
3413 }
3414 if (i == 10)
3415 printf ("bge%d: 5705 A0 chip failed to load RX ring\n",
3416 sc->bge_unit);
3417 }
3418
3419 /* Init jumbo RX ring. */
3420 if (ifp->if_mtu > (ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN))
3421 bge_init_rx_ring_jumbo(sc);
3422
3423 /* Init our RX return ring index */
3424 sc->bge_rx_saved_considx = 0;
3425
3426 /* Init TX ring. */
3427 bge_init_tx_ring(sc);
3428
3429 /* Turn on transmitter */
3430 BGE_SETBIT(sc, BGE_TX_MODE, BGE_TXMODE_ENABLE);
3431
3432 /* Turn on receiver */
3433 BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE);
3434
3435 /* Tell firmware we're alive. */
3436 BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
3437
3438#ifdef DEVICE_POLLING
3439 /* Disable interrupts if we are polling. */
3440 if (ifp->if_capenable & IFCAP_POLLING) {
3441 BGE_SETBIT(sc, BGE_PCI_MISC_CTL,
3442 BGE_PCIMISCCTL_MASK_PCI_INTR);
3443 CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 1);
3444 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS_INT, 1);
3445 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS_INT, 1);
3446 } else
3447#endif
3448
3449 /* Enable host interrupts. */
3450 {
3451 BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_CLEAR_INTA);
3452 BGE_CLRBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR);
3453 CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 0);
3454 }
3455
3456 bge_ifmedia_upd(ifp);
3457
3458 ifp->if_drv_flags |= IFF_DRV_RUNNING;
3459 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
3460
3461 callout_reset(&sc->bge_stat_ch, hz, bge_tick, sc);
3462
3463 return;
3464}
3465
3466static void
3467bge_init(xsc)
3468 void *xsc;
3469{
3470 struct bge_softc *sc = xsc;
3471
3472 BGE_LOCK(sc);
3473 bge_init_locked(sc);
3474 BGE_UNLOCK(sc);
3475
3476 return;
3477}
3478
3479/*
3480 * Set media options.
3481 */
3482static int
3483bge_ifmedia_upd(ifp)
3484 struct ifnet *ifp;
3485{
3486 struct bge_softc *sc;
3487 struct mii_data *mii;
3488 struct ifmedia *ifm;
3489
3490 sc = ifp->if_softc;
3491 ifm = &sc->bge_ifmedia;
3492
3493 /* If this is a 1000baseX NIC, enable the TBI port. */
3494 if (sc->bge_tbi) {
3495 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
3496 return(EINVAL);
3497 switch(IFM_SUBTYPE(ifm->ifm_media)) {
3498 case IFM_AUTO:
3499#ifndef BGE_FAKE_AUTONEG
3500 /*
3501 * The BCM5704 ASIC appears to have a special
3502 * mechanism for programming the autoneg
3503 * advertisement registers in TBI mode.
3504 */
3505 if (sc->bge_asicrev == BGE_ASICREV_BCM5704) {
3506 uint32_t sgdig;
3507 CSR_WRITE_4(sc, BGE_TX_TBI_AUTONEG, 0);
3508 sgdig = CSR_READ_4(sc, BGE_SGDIG_CFG);
3509 sgdig |= BGE_SGDIGCFG_AUTO|
3510 BGE_SGDIGCFG_PAUSE_CAP|
3511 BGE_SGDIGCFG_ASYM_PAUSE;
3512 CSR_WRITE_4(sc, BGE_SGDIG_CFG,
3513 sgdig|BGE_SGDIGCFG_SEND);
3514 DELAY(5);
3515 CSR_WRITE_4(sc, BGE_SGDIG_CFG, sgdig);
3516 }
3517#endif
3518 break;
3519 case IFM_1000_SX:
3520 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) {
3521 BGE_CLRBIT(sc, BGE_MAC_MODE,
3522 BGE_MACMODE_HALF_DUPLEX);
3523 } else {
3524 BGE_SETBIT(sc, BGE_MAC_MODE,
3525 BGE_MACMODE_HALF_DUPLEX);
3526 }
3527 break;
3528 default:
3529 return(EINVAL);
3530 }
3531 return(0);
3532 }
3533
3534 mii = device_get_softc(sc->bge_miibus);
3535 sc->bge_link = 0;
3536 if (mii->mii_instance) {
3537 struct mii_softc *miisc;
3538 for (miisc = LIST_FIRST(&mii->mii_phys); miisc != NULL;
3539 miisc = LIST_NEXT(miisc, mii_list))
3540 mii_phy_reset(miisc);
3541 }
3542 mii_mediachg(mii);
3543
3544 return(0);
3545}
3546
3547/*
3548 * Report current media status.
3549 */
3550static void
3551bge_ifmedia_sts(ifp, ifmr)
3552 struct ifnet *ifp;
3553 struct ifmediareq *ifmr;
3554{
3555 struct bge_softc *sc;
3556 struct mii_data *mii;
3557
3558 sc = ifp->if_softc;
3559
3560 if (sc->bge_tbi) {
3561 ifmr->ifm_status = IFM_AVALID;
3562 ifmr->ifm_active = IFM_ETHER;
3563 if (CSR_READ_4(sc, BGE_MAC_STS) &
3564 BGE_MACSTAT_TBI_PCS_SYNCHED)
3565 ifmr->ifm_status |= IFM_ACTIVE;
3566 ifmr->ifm_active |= IFM_1000_SX;
3567 if (CSR_READ_4(sc, BGE_MAC_MODE) & BGE_MACMODE_HALF_DUPLEX)
3568 ifmr->ifm_active |= IFM_HDX;
3569 else
3570 ifmr->ifm_active |= IFM_FDX;
3571 return;
3572 }
3573
3574 mii = device_get_softc(sc->bge_miibus);
3575 mii_pollstat(mii);
3576 ifmr->ifm_active = mii->mii_media_active;
3577 ifmr->ifm_status = mii->mii_media_status;
3578
3579 return;
3580}
3581
3582static int
3583bge_ioctl(ifp, command, data)
3584 struct ifnet *ifp;
3585 u_long command;
3586 caddr_t data;
3587{
3588 struct bge_softc *sc = ifp->if_softc;
3589 struct ifreq *ifr = (struct ifreq *) data;
3590 int mask, error = 0;
3591 struct mii_data *mii;
3592
3593 switch(command) {
3594 case SIOCSIFMTU:
3595 /* Disallow jumbo frames on 5705. */
3596 if (((sc->bge_asicrev == BGE_ASICREV_BCM5705 ||
3597 sc->bge_asicrev == BGE_ASICREV_BCM5750) &&
3598 ifr->ifr_mtu > ETHERMTU) || ifr->ifr_mtu > BGE_JUMBO_MTU)
3599 error = EINVAL;
3600 else {
3601 ifp->if_mtu = ifr->ifr_mtu;
3602 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
3603 bge_init(sc);
3604 }
3605 break;
3606 case SIOCSIFFLAGS:
3607 BGE_LOCK(sc);
3608 if (ifp->if_flags & IFF_UP) {
3609 /*
3610 * If only the state of the PROMISC flag changed,
3611 * then just use the 'set promisc mode' command
3612 * instead of reinitializing the entire NIC. Doing
3613 * a full re-init means reloading the firmware and
3614 * waiting for it to start up, which may take a
3615 * second or two.
3616 */
3617 if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
3618 ifp->if_flags & IFF_PROMISC &&
3619 !(sc->bge_if_flags & IFF_PROMISC)) {
3620 BGE_SETBIT(sc, BGE_RX_MODE,
3621 BGE_RXMODE_RX_PROMISC);
3622 } else if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
3623 !(ifp->if_flags & IFF_PROMISC) &&
3624 sc->bge_if_flags & IFF_PROMISC) {
3625 BGE_CLRBIT(sc, BGE_RX_MODE,
3626 BGE_RXMODE_RX_PROMISC);
3627 } else
3628 bge_init_locked(sc);
3629 } else {
3630 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
3631 bge_stop(sc);
3632 }
3633 }
3634 sc->bge_if_flags = ifp->if_flags;
3635 BGE_UNLOCK(sc);
3636 error = 0;
3637 break;
3638 case SIOCADDMULTI:
3639 case SIOCDELMULTI:
3640 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
3641 BGE_LOCK(sc);
3642 bge_setmulti(sc);
3643 BGE_UNLOCK(sc);
3644 error = 0;
3645 }
3646 break;
3647 case SIOCSIFMEDIA:
3648 case SIOCGIFMEDIA:
3649 if (sc->bge_tbi) {
3650 error = ifmedia_ioctl(ifp, ifr,
3651 &sc->bge_ifmedia, command);
3652 } else {
3653 mii = device_get_softc(sc->bge_miibus);
3654 error = ifmedia_ioctl(ifp, ifr,
3655 &mii->mii_media, command);
3656 }
3657 break;
3658 case SIOCSIFCAP:
3659 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
3660#ifdef DEVICE_POLLING
3661 if (mask & IFCAP_POLLING) {
3662 if (ifr->ifr_reqcap & IFCAP_POLLING) {
3663 error = ether_poll_register(bge_poll, ifp);
3664 if (error)
3665 return(error);
3666 BGE_LOCK(sc);
3667 BGE_SETBIT(sc, BGE_PCI_MISC_CTL,
3668 BGE_PCIMISCCTL_MASK_PCI_INTR);
3669 CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 1);
3670 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS_INT, 1);
3671 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS_INT, 1);
3672 ifp->if_capenable |= IFCAP_POLLING;
3673 BGE_UNLOCK(sc);
3674 } else {
3675 error = ether_poll_deregister(ifp);
3676 /* Enable interrupt even in error case */
3677 BGE_LOCK(sc);
3678 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS_INT, 0);
3679 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS_INT, 0);
3680 BGE_CLRBIT(sc, BGE_PCI_MISC_CTL,
3681 BGE_PCIMISCCTL_MASK_PCI_INTR);
3682 CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 0);
3683 ifp->if_capenable &= ~IFCAP_POLLING;
3684 BGE_UNLOCK(sc);
3685 }
3686 }
3687#endif
3688 /* NB: the code for RX csum offload is disabled for now */
3689 if (mask & IFCAP_TXCSUM) {
3690 ifp->if_capenable ^= IFCAP_TXCSUM;
3691 if (IFCAP_TXCSUM & ifp->if_capenable)
3692 ifp->if_hwassist = BGE_CSUM_FEATURES;
3693 else
3694 ifp->if_hwassist = 0;
3695 }
3696 break;
3697 default:
3698 error = ether_ioctl(ifp, command, data);
3699 break;
3700 }
3701
3702 return(error);
3703}
3704
3705static void
3706bge_watchdog(ifp)
3707 struct ifnet *ifp;
3708{
3709 struct bge_softc *sc;
3710
3711 sc = ifp->if_softc;
3712
3713 printf("bge%d: watchdog timeout -- resetting\n", sc->bge_unit);
3714
3715 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
3716 bge_init(sc);
3717
3718 ifp->if_oerrors++;
3719
3720 return;
3721}
3722
3723/*
3724 * Stop the adapter and free any mbufs allocated to the
3725 * RX and TX lists.
3726 */
3727static void
3728bge_stop(sc)
3729 struct bge_softc *sc;
3730{
3731 struct ifnet *ifp;
3732 struct ifmedia_entry *ifm;
3733 struct mii_data *mii = NULL;
3734 int mtmp, itmp;
3735
3736 BGE_LOCK_ASSERT(sc);
3737
3738 ifp = sc->bge_ifp;
3739
3740 if (!sc->bge_tbi)
3741 mii = device_get_softc(sc->bge_miibus);
3742
3743 callout_stop(&sc->bge_stat_ch);
3744
3745 /*
3746 * Disable all of the receiver blocks
3747 */
3748 BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE);
3749 BGE_CLRBIT(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE);
3750 BGE_CLRBIT(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE);
3751 if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
3752 sc->bge_asicrev != BGE_ASICREV_BCM5750)
3753 BGE_CLRBIT(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE);
3754 BGE_CLRBIT(sc, BGE_RDBDI_MODE, BGE_RBDIMODE_ENABLE);
3755 BGE_CLRBIT(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE);
3756 BGE_CLRBIT(sc, BGE_RBDC_MODE, BGE_RBDCMODE_ENABLE);
3757
3758 /*
3759 * Disable all of the transmit blocks
3760 */
3761 BGE_CLRBIT(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE);
3762 BGE_CLRBIT(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE);
3763 BGE_CLRBIT(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE);
3764 BGE_CLRBIT(sc, BGE_RDMA_MODE, BGE_RDMAMODE_ENABLE);
3765 BGE_CLRBIT(sc, BGE_SDC_MODE, BGE_SDCMODE_ENABLE);
3766 if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
3767 sc->bge_asicrev != BGE_ASICREV_BCM5750)
3768 BGE_CLRBIT(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE);
3769 BGE_CLRBIT(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE);
3770
3771 /*
3772 * Shut down all of the memory managers and related
3773 * state machines.
3774 */
3775 BGE_CLRBIT(sc, BGE_HCC_MODE, BGE_HCCMODE_ENABLE);
3776 BGE_CLRBIT(sc, BGE_WDMA_MODE, BGE_WDMAMODE_ENABLE);
3777 if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
3778 sc->bge_asicrev != BGE_ASICREV_BCM5750)
3779 BGE_CLRBIT(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE);
3780 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF);
3781 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0);
3782 if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
3783 sc->bge_asicrev != BGE_ASICREV_BCM5750) {
3784 BGE_CLRBIT(sc, BGE_BMAN_MODE, BGE_BMANMODE_ENABLE);
3785 BGE_CLRBIT(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE);
3786 }
3787
3788 /* Disable host interrupts. */
3789 BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR);
3790 CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 1);
3791
3792 /*
3793 * Tell firmware we're shutting down.
3794 */
3795 BGE_CLRBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
3796
3797 /* Free the RX lists. */
3798 bge_free_rx_ring_std(sc);
3799
3800 /* Free jumbo RX list. */
3801 if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
3802 sc->bge_asicrev != BGE_ASICREV_BCM5750)
3803 bge_free_rx_ring_jumbo(sc);
3804
3805 /* Free TX buffers. */
3806 bge_free_tx_ring(sc);
3807
3808 /*
3809 * Isolate/power down the PHY, but leave the media selection
3810 * unchanged so that things will be put back to normal when
3811 * we bring the interface back up.
3812 */
3813 if (!sc->bge_tbi) {
3814 itmp = ifp->if_flags;
3815 ifp->if_flags |= IFF_UP;
3816 /*
3817 * If we are called from bge_detach(), mii is already NULL.
3818 */
3819 if (mii != NULL) {
3820 ifm = mii->mii_media.ifm_cur;
3821 mtmp = ifm->ifm_media;
3822 ifm->ifm_media = IFM_ETHER|IFM_NONE;
3823 mii_mediachg(mii);
3824 ifm->ifm_media = mtmp;
3825 }
3826 ifp->if_flags = itmp;
3827 }
3828
3829 sc->bge_link = 0;
3830
3831 sc->bge_tx_saved_considx = BGE_TXCONS_UNSET;
3832
3833 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
3834
3835 return;
3836}
3837
3838/*
3839 * Stop all chip I/O so that the kernel's probe routines don't
3840 * get confused by errant DMAs when rebooting.
3841 */
3842static void
3843bge_shutdown(dev)
3844 device_t dev;
3845{
3846 struct bge_softc *sc;
3847
3848 sc = device_get_softc(dev);
3849
3850 BGE_LOCK(sc);
3851 bge_stop(sc);
3852 bge_reset(sc);
3853 BGE_UNLOCK(sc);
3854
3855 return;
3856}
3857
3858static int
3859bge_suspend(device_t dev)
3860{
3861 struct bge_softc *sc;
3862
3863 sc = device_get_softc(dev);
3864 BGE_LOCK(sc);
3865 bge_stop(sc);
3866 BGE_UNLOCK(sc);
3867
3868 return (0);
3869}
3870
3871static int
3872bge_resume(device_t dev)
3873{
3874 struct bge_softc *sc;
3875 struct ifnet *ifp;
3876
3877 sc = device_get_softc(dev);
3878 BGE_LOCK(sc);
3879 ifp = sc->bge_ifp;
3880 if (ifp->if_flags & IFF_UP) {
3881 bge_init_locked(sc);
3882 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
3883 bge_start_locked(ifp);
3884 }
3885 BGE_UNLOCK(sc);
3886
3887 return (0);
3888}
3889
3890static void
3891bge_link_upd(sc)
3892 struct bge_softc *sc;
3893{
3894 uint32_t status;
3895
3896 BGE_LOCK_ASSERT(sc);
3897 /*
3898 * Process link state changes.
3899 * Grrr. The link status word in the status block does
3900 * not work correctly on the BCM5700 rev AX and BX chips,
3901 * according to all available information. Hence, we have
3902 * to enable MII interrupts in order to properly obtain
3903 * async link changes. Unfortunately, this also means that
3904 * we have to read the MAC status register to detect link
3905 * changes, thereby adding an additional register access to
3906 * the interrupt handler.
3907 */
3908
3909 if (sc->bge_asicrev == BGE_ASICREV_BCM5700) {
3910 status = CSR_READ_4(sc, BGE_MAC_STS);
3911 if (status & BGE_MACSTAT_MI_INTERRUPT) {
3912 sc->bge_link = 0;
3913 callout_stop(&sc->bge_stat_ch);
3914 bge_tick_locked(sc);
3915 /* Clear the interrupt */
3916 CSR_WRITE_4(sc, BGE_MAC_EVT_ENB,
3917 BGE_EVTENB_MI_INTERRUPT);
3918 bge_miibus_readreg(sc->bge_dev, 1, BRGPHY_MII_ISR);
3919 bge_miibus_writereg(sc->bge_dev, 1, BRGPHY_MII_IMR,
3920 BRGPHY_INTRS);
3921 }
3922 return;
3923 }
3924
3925 /*
3926 * Sometimes PCS encoding errors are detected in
3927 * TBI mode (on fiber NICs), and for some reason
3928 * the chip will signal them as link changes.
3929 * If we get a link change event, but the 'PCS
3930 * encoding error' bit in the MAC status register
3931 * is set, don't bother doing a link check.
3932 * This avoids spurious "gigabit link up" messages
3933 * that sometimes appear on fiber NICs during
3934 * periods of heavy traffic. (There should be no
3935 * effect on copper NICs.)
3936 */
3937 if (sc->bge_tbi) status = CSR_READ_4(sc, BGE_MAC_STS);
3938
3939 if (!sc->bge_tbi || !(status & (BGE_MACSTAT_PORT_DECODE_ERROR |
3940 BGE_MACSTAT_MI_COMPLETE))) {
3941 sc->bge_link = 0;
3942 callout_stop(&sc->bge_stat_ch);
3943 bge_tick_locked(sc);
3944 }
3945
3946 /* Clear the interrupt */
3947 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED|
3948 BGE_MACSTAT_CFG_CHANGED|BGE_MACSTAT_MI_COMPLETE|
3949 BGE_MACSTAT_LINK_CHANGED);
3950
3951 /* Force flush the status block cached by PCI bridge */
3952 CSR_READ_4(sc, BGE_MBX_IRQ0_LO);
3953}
3954
2559 * 2) the frame is from the standard receive ring
2560 */
2561
2562static void
2563bge_rxeof(sc)
2564 struct bge_softc *sc;
2565{
2566 struct ifnet *ifp;
2567 int stdcnt = 0, jumbocnt = 0;
2568
2569 BGE_LOCK_ASSERT(sc);
2570
2571 ifp = sc->bge_ifp;
2572
2573 bus_dmamap_sync(sc->bge_cdata.bge_rx_return_ring_tag,
2574 sc->bge_cdata.bge_rx_return_ring_map, BUS_DMASYNC_POSTWRITE);
2575 bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag,
2576 sc->bge_cdata.bge_rx_std_ring_map, BUS_DMASYNC_POSTREAD);
2577 if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
2578 sc->bge_asicrev != BGE_ASICREV_BCM5750) {
2579 bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag,
2580 sc->bge_cdata.bge_rx_jumbo_ring_map,
2581 BUS_DMASYNC_POSTREAD);
2582 }
2583
2584 while(sc->bge_rx_saved_considx !=
2585 sc->bge_ldata.bge_status_block->bge_idx[0].bge_rx_prod_idx) {
2586 struct bge_rx_bd *cur_rx;
2587 u_int32_t rxidx;
2588 struct ether_header *eh;
2589 struct mbuf *m = NULL;
2590 u_int16_t vlan_tag = 0;
2591 int have_tag = 0;
2592
2593#ifdef DEVICE_POLLING
2594 if (ifp->if_capenable & IFCAP_POLLING) {
2595 if (sc->rxcycles <= 0)
2596 break;
2597 sc->rxcycles--;
2598 }
2599#endif
2600
2601 cur_rx =
2602 &sc->bge_ldata.bge_rx_return_ring[sc->bge_rx_saved_considx];
2603
2604 rxidx = cur_rx->bge_idx;
2605 BGE_INC(sc->bge_rx_saved_considx, sc->bge_return_ring_cnt);
2606
2607 if (cur_rx->bge_flags & BGE_RXBDFLAG_VLAN_TAG) {
2608 have_tag = 1;
2609 vlan_tag = cur_rx->bge_vlan_tag;
2610 }
2611
2612 if (cur_rx->bge_flags & BGE_RXBDFLAG_JUMBO_RING) {
2613 BGE_INC(sc->bge_jumbo, BGE_JUMBO_RX_RING_CNT);
2614 bus_dmamap_sync(sc->bge_cdata.bge_mtag_jumbo,
2615 sc->bge_cdata.bge_rx_jumbo_dmamap[rxidx],
2616 BUS_DMASYNC_POSTREAD);
2617 bus_dmamap_unload(sc->bge_cdata.bge_mtag_jumbo,
2618 sc->bge_cdata.bge_rx_jumbo_dmamap[rxidx]);
2619 m = sc->bge_cdata.bge_rx_jumbo_chain[rxidx];
2620 sc->bge_cdata.bge_rx_jumbo_chain[rxidx] = NULL;
2621 jumbocnt++;
2622 if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) {
2623 ifp->if_ierrors++;
2624 bge_newbuf_jumbo(sc, sc->bge_jumbo, m);
2625 continue;
2626 }
2627 if (bge_newbuf_jumbo(sc,
2628 sc->bge_jumbo, NULL) == ENOBUFS) {
2629 ifp->if_ierrors++;
2630 bge_newbuf_jumbo(sc, sc->bge_jumbo, m);
2631 continue;
2632 }
2633 } else {
2634 BGE_INC(sc->bge_std, BGE_STD_RX_RING_CNT);
2635 bus_dmamap_sync(sc->bge_cdata.bge_mtag,
2636 sc->bge_cdata.bge_rx_std_dmamap[rxidx],
2637 BUS_DMASYNC_POSTREAD);
2638 bus_dmamap_unload(sc->bge_cdata.bge_mtag,
2639 sc->bge_cdata.bge_rx_std_dmamap[rxidx]);
2640 m = sc->bge_cdata.bge_rx_std_chain[rxidx];
2641 sc->bge_cdata.bge_rx_std_chain[rxidx] = NULL;
2642 stdcnt++;
2643 if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) {
2644 ifp->if_ierrors++;
2645 bge_newbuf_std(sc, sc->bge_std, m);
2646 continue;
2647 }
2648 if (bge_newbuf_std(sc, sc->bge_std,
2649 NULL) == ENOBUFS) {
2650 ifp->if_ierrors++;
2651 bge_newbuf_std(sc, sc->bge_std, m);
2652 continue;
2653 }
2654 }
2655
2656 ifp->if_ipackets++;
2657#ifndef __i386__
2658 /*
2659 * The i386 allows unaligned accesses, but for other
2660 * platforms we must make sure the payload is aligned.
2661 */
2662 if (sc->bge_rx_alignment_bug) {
2663 bcopy(m->m_data, m->m_data + ETHER_ALIGN,
2664 cur_rx->bge_len);
2665 m->m_data += ETHER_ALIGN;
2666 }
2667#endif
2668 eh = mtod(m, struct ether_header *);
2669 m->m_pkthdr.len = m->m_len = cur_rx->bge_len - ETHER_CRC_LEN;
2670 m->m_pkthdr.rcvif = ifp;
2671
2672#if 0 /* currently broken for some packets, possibly related to TCP options */
2673 if (ifp->if_capenable & IFCAP_RXCSUM) {
2674 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
2675 if ((cur_rx->bge_ip_csum ^ 0xffff) == 0)
2676 m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
2677 if (cur_rx->bge_flags & BGE_RXBDFLAG_TCP_UDP_CSUM) {
2678 m->m_pkthdr.csum_data =
2679 cur_rx->bge_tcp_udp_csum;
2680 m->m_pkthdr.csum_flags |= CSUM_DATA_VALID;
2681 }
2682 }
2683#endif
2684
2685 /*
2686 * If we received a packet with a vlan tag,
2687 * attach that information to the packet.
2688 */
2689 if (have_tag)
2690 VLAN_INPUT_TAG(ifp, m, vlan_tag, continue);
2691
2692 BGE_UNLOCK(sc);
2693 (*ifp->if_input)(ifp, m);
2694 BGE_LOCK(sc);
2695 }
2696
2697 bus_dmamap_sync(sc->bge_cdata.bge_rx_return_ring_tag,
2698 sc->bge_cdata.bge_rx_return_ring_map, BUS_DMASYNC_PREWRITE);
2699 bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag,
2700 sc->bge_cdata.bge_rx_std_ring_map,
2701 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_PREWRITE);
2702 if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
2703 sc->bge_asicrev != BGE_ASICREV_BCM5750) {
2704 bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag,
2705 sc->bge_cdata.bge_rx_jumbo_ring_map,
2706 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
2707 }
2708
2709 CSR_WRITE_4(sc, BGE_MBX_RX_CONS0_LO, sc->bge_rx_saved_considx);
2710 if (stdcnt)
2711 CSR_WRITE_4(sc, BGE_MBX_RX_STD_PROD_LO, sc->bge_std);
2712 if (jumbocnt)
2713 CSR_WRITE_4(sc, BGE_MBX_RX_JUMBO_PROD_LO, sc->bge_jumbo);
2714
2715 return;
2716}
2717
2718static void
2719bge_txeof(sc)
2720 struct bge_softc *sc;
2721{
2722 struct bge_tx_bd *cur_tx = NULL;
2723 struct ifnet *ifp;
2724
2725 BGE_LOCK_ASSERT(sc);
2726
2727 ifp = sc->bge_ifp;
2728
2729 /*
2730 * Go through our tx ring and free mbufs for those
2731 * frames that have been sent.
2732 */
2733 while (sc->bge_tx_saved_considx !=
2734 sc->bge_ldata.bge_status_block->bge_idx[0].bge_tx_cons_idx) {
2735 u_int32_t idx = 0;
2736
2737 idx = sc->bge_tx_saved_considx;
2738 cur_tx = &sc->bge_ldata.bge_tx_ring[idx];
2739 if (cur_tx->bge_flags & BGE_TXBDFLAG_END)
2740 ifp->if_opackets++;
2741 if (sc->bge_cdata.bge_tx_chain[idx] != NULL) {
2742 m_freem(sc->bge_cdata.bge_tx_chain[idx]);
2743 sc->bge_cdata.bge_tx_chain[idx] = NULL;
2744 bus_dmamap_unload(sc->bge_cdata.bge_mtag,
2745 sc->bge_cdata.bge_tx_dmamap[idx]);
2746 }
2747 sc->bge_txcnt--;
2748 BGE_INC(sc->bge_tx_saved_considx, BGE_TX_RING_CNT);
2749 ifp->if_timer = 0;
2750 }
2751
2752 if (cur_tx != NULL)
2753 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
2754
2755 return;
2756}
2757
2758#ifdef DEVICE_POLLING
2759static void
2760bge_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
2761{
2762 struct bge_softc *sc = ifp->if_softc;
2763
2764 BGE_LOCK(sc);
2765 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
2766 bge_poll_locked(ifp, cmd, count);
2767 BGE_UNLOCK(sc);
2768}
2769
2770static void
2771bge_poll_locked(struct ifnet *ifp, enum poll_cmd cmd, int count)
2772{
2773 struct bge_softc *sc = ifp->if_softc;
2774
2775 BGE_LOCK_ASSERT(sc);
2776
2777 sc->rxcycles = count;
2778 bge_rxeof(sc);
2779 bge_txeof(sc);
2780 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
2781 bge_start_locked(ifp);
2782
2783 if (cmd == POLL_AND_CHECK_STATUS) {
2784 uint32_t statusword;
2785
2786 bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
2787 sc->bge_cdata.bge_status_map, BUS_DMASYNC_POSTWRITE);
2788
2789 statusword = atomic_readandclear_32(&sc->bge_ldata.bge_status_block->bge_status);
2790
2791 if (sc->bge_asicrev == BGE_ASICREV_BCM5700 ||
2792 statusword & BGE_STATFLAG_LINKSTATE_CHANGED)
2793 bge_link_upd(sc);
2794
2795 bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
2796 sc->bge_cdata.bge_status_map, BUS_DMASYNC_PREWRITE);
2797 }
2798}
2799#endif /* DEVICE_POLLING */
2800
2801static void
2802bge_intr(xsc)
2803 void *xsc;
2804{
2805 struct bge_softc *sc;
2806 struct ifnet *ifp;
2807 uint32_t statusword;
2808
2809 sc = xsc;
2810
2811 BGE_LOCK(sc);
2812
2813 ifp = sc->bge_ifp;
2814
2815#ifdef DEVICE_POLLING
2816 if (ifp->if_capenable & IFCAP_POLLING) {
2817 BGE_UNLOCK(sc);
2818 return;
2819 }
2820#endif
2821
2822 bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
2823 sc->bge_cdata.bge_status_map, BUS_DMASYNC_POSTWRITE);
2824
2825 statusword =
2826 atomic_readandclear_32(&sc->bge_ldata.bge_status_block->bge_status);
2827
2828#ifdef notdef
2829 /* Avoid this for now -- checking this register is expensive. */
2830 /* Make sure this is really our interrupt. */
2831 if (!(CSR_READ_4(sc, BGE_MISC_LOCAL_CTL) & BGE_MLC_INTR_STATE))
2832 return;
2833#endif
2834 /* Ack interrupt and stop others from occuring. */
2835 CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 1);
2836
2837 if (sc->bge_asicrev == BGE_ASICREV_BCM5700 ||
2838 statusword & BGE_STATFLAG_LINKSTATE_CHANGED)
2839 bge_link_upd(sc);
2840
2841 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
2842 /* Check RX return ring producer/consumer */
2843 bge_rxeof(sc);
2844
2845 /* Check TX ring producer/consumer */
2846 bge_txeof(sc);
2847 }
2848
2849 bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
2850 sc->bge_cdata.bge_status_map, BUS_DMASYNC_PREWRITE);
2851
2852 bge_handle_events(sc);
2853
2854 /* Re-enable interrupts. */
2855 CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 0);
2856
2857 if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
2858 !IFQ_DRV_IS_EMPTY(&ifp->if_snd))
2859 bge_start_locked(ifp);
2860
2861 BGE_UNLOCK(sc);
2862
2863 return;
2864}
2865
2866static void
2867bge_tick_locked(sc)
2868 struct bge_softc *sc;
2869{
2870 struct mii_data *mii = NULL;
2871 struct ifnet *ifp;
2872
2873 BGE_LOCK_ASSERT(sc);
2874
2875 ifp = sc->bge_ifp;
2876
2877 if (sc->bge_asicrev == BGE_ASICREV_BCM5705 ||
2878 sc->bge_asicrev == BGE_ASICREV_BCM5750)
2879 bge_stats_update_regs(sc);
2880 else
2881 bge_stats_update(sc);
2882
2883 if (sc->bge_tbi) {
2884 if (!sc->bge_link) {
2885 if (CSR_READ_4(sc, BGE_MAC_STS) &
2886 BGE_MACSTAT_TBI_PCS_SYNCHED) {
2887 sc->bge_link++;
2888 if (sc->bge_asicrev == BGE_ASICREV_BCM5704)
2889 BGE_CLRBIT(sc, BGE_MAC_MODE,
2890 BGE_MACMODE_TBI_SEND_CFGS);
2891 CSR_WRITE_4(sc, BGE_MAC_STS, 0xFFFFFFFF);
2892 if (bootverbose)
2893 printf("bge%d: gigabit link up\n",
2894 sc->bge_unit);
2895 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
2896 bge_start_locked(ifp);
2897 }
2898 }
2899 }
2900 else {
2901 mii = device_get_softc(sc->bge_miibus);
2902 mii_tick(mii);
2903
2904 if (!sc->bge_link && mii->mii_media_status & IFM_ACTIVE &&
2905 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) {
2906 sc->bge_link++;
2907 if ((IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T ||
2908 IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_SX)&&
2909 bootverbose)
2910 printf("bge%d: gigabit link up\n", sc->bge_unit);
2911 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
2912 bge_start_locked(ifp);
2913 }
2914 }
2915
2916 callout_reset(&sc->bge_stat_ch, hz, bge_tick, sc);
2917}
2918
2919static void
2920bge_tick(xsc)
2921 void *xsc;
2922{
2923 struct bge_softc *sc;
2924
2925 sc = xsc;
2926
2927 BGE_LOCK(sc);
2928 bge_tick_locked(sc);
2929 BGE_UNLOCK(sc);
2930}
2931
2932static void
2933bge_stats_update_regs(sc)
2934 struct bge_softc *sc;
2935{
2936 struct ifnet *ifp;
2937 struct bge_mac_stats_regs stats;
2938 u_int32_t *s;
2939 int i;
2940
2941 ifp = sc->bge_ifp;
2942
2943 s = (u_int32_t *)&stats;
2944 for (i = 0; i < sizeof(struct bge_mac_stats_regs); i += 4) {
2945 *s = CSR_READ_4(sc, BGE_RX_STATS + i);
2946 s++;
2947 }
2948
2949 ifp->if_collisions +=
2950 (stats.dot3StatsSingleCollisionFrames +
2951 stats.dot3StatsMultipleCollisionFrames +
2952 stats.dot3StatsExcessiveCollisions +
2953 stats.dot3StatsLateCollisions) -
2954 ifp->if_collisions;
2955
2956 return;
2957}
2958
2959static void
2960bge_stats_update(sc)
2961 struct bge_softc *sc;
2962{
2963 struct ifnet *ifp;
2964 struct bge_stats *stats;
2965
2966 ifp = sc->bge_ifp;
2967
2968 stats = (struct bge_stats *)(sc->bge_vhandle +
2969 BGE_MEMWIN_START + BGE_STATS_BLOCK);
2970
2971 ifp->if_collisions +=
2972 (stats->txstats.dot3StatsSingleCollisionFrames.bge_addr_lo +
2973 stats->txstats.dot3StatsMultipleCollisionFrames.bge_addr_lo +
2974 stats->txstats.dot3StatsExcessiveCollisions.bge_addr_lo +
2975 stats->txstats.dot3StatsLateCollisions.bge_addr_lo) -
2976 ifp->if_collisions;
2977
2978#ifdef notdef
2979 ifp->if_collisions +=
2980 (sc->bge_rdata->bge_info.bge_stats.dot3StatsSingleCollisionFrames +
2981 sc->bge_rdata->bge_info.bge_stats.dot3StatsMultipleCollisionFrames +
2982 sc->bge_rdata->bge_info.bge_stats.dot3StatsExcessiveCollisions +
2983 sc->bge_rdata->bge_info.bge_stats.dot3StatsLateCollisions) -
2984 ifp->if_collisions;
2985#endif
2986
2987 return;
2988}
2989
2990/*
2991 * Encapsulate an mbuf chain in the tx ring by coupling the mbuf data
2992 * pointers to descriptors.
2993 */
2994static int
2995bge_encap(sc, m_head, txidx)
2996 struct bge_softc *sc;
2997 struct mbuf *m_head;
2998 u_int32_t *txidx;
2999{
3000 struct bge_tx_bd *f = NULL;
3001 u_int16_t csum_flags = 0;
3002 struct m_tag *mtag;
3003 struct bge_dmamap_arg ctx;
3004 bus_dmamap_t map;
3005 int error;
3006
3007
3008 if (m_head->m_pkthdr.csum_flags) {
3009 if (m_head->m_pkthdr.csum_flags & CSUM_IP)
3010 csum_flags |= BGE_TXBDFLAG_IP_CSUM;
3011 if (m_head->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP))
3012 csum_flags |= BGE_TXBDFLAG_TCP_UDP_CSUM;
3013 if (m_head->m_flags & M_LASTFRAG)
3014 csum_flags |= BGE_TXBDFLAG_IP_FRAG_END;
3015 else if (m_head->m_flags & M_FRAG)
3016 csum_flags |= BGE_TXBDFLAG_IP_FRAG;
3017 }
3018
3019 mtag = VLAN_OUTPUT_TAG(sc->bge_ifp, m_head);
3020
3021 ctx.sc = sc;
3022 ctx.bge_idx = *txidx;
3023 ctx.bge_ring = sc->bge_ldata.bge_tx_ring;
3024 ctx.bge_flags = csum_flags;
3025 /*
3026 * Sanity check: avoid coming within 16 descriptors
3027 * of the end of the ring.
3028 */
3029 ctx.bge_maxsegs = (BGE_TX_RING_CNT - sc->bge_txcnt) - 16;
3030
3031 map = sc->bge_cdata.bge_tx_dmamap[*txidx];
3032 error = bus_dmamap_load_mbuf(sc->bge_cdata.bge_mtag, map,
3033 m_head, bge_dma_map_tx_desc, &ctx, BUS_DMA_NOWAIT);
3034
3035 if (error || ctx.bge_maxsegs == 0 /*||
3036 ctx.bge_idx == sc->bge_tx_saved_considx*/)
3037 return (ENOBUFS);
3038
3039 /*
3040 * Insure that the map for this transmission
3041 * is placed at the array index of the last descriptor
3042 * in this chain.
3043 */
3044 sc->bge_cdata.bge_tx_dmamap[*txidx] =
3045 sc->bge_cdata.bge_tx_dmamap[ctx.bge_idx];
3046 sc->bge_cdata.bge_tx_dmamap[ctx.bge_idx] = map;
3047 sc->bge_cdata.bge_tx_chain[ctx.bge_idx] = m_head;
3048 sc->bge_txcnt += ctx.bge_maxsegs;
3049 f = &sc->bge_ldata.bge_tx_ring[*txidx];
3050 if (mtag != NULL) {
3051 f->bge_flags |= htole16(BGE_TXBDFLAG_VLAN_TAG);
3052 f->bge_vlan_tag = htole16(VLAN_TAG_VALUE(mtag));
3053 } else {
3054 f->bge_vlan_tag = 0;
3055 }
3056
3057 BGE_INC(ctx.bge_idx, BGE_TX_RING_CNT);
3058 *txidx = ctx.bge_idx;
3059
3060 return(0);
3061}
3062
3063/*
3064 * Main transmit routine. To avoid having to do mbuf copies, we put pointers
3065 * to the mbuf data regions directly in the transmit descriptors.
3066 */
3067static void
3068bge_start_locked(ifp)
3069 struct ifnet *ifp;
3070{
3071 struct bge_softc *sc;
3072 struct mbuf *m_head = NULL;
3073 u_int32_t prodidx = 0;
3074 int count = 0;
3075
3076 sc = ifp->if_softc;
3077
3078 if (!sc->bge_link || IFQ_DRV_IS_EMPTY(&ifp->if_snd))
3079 return;
3080
3081 prodidx = CSR_READ_4(sc, BGE_MBX_TX_HOST_PROD0_LO);
3082
3083 while(sc->bge_cdata.bge_tx_chain[prodidx] == NULL) {
3084 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
3085 if (m_head == NULL)
3086 break;
3087
3088 /*
3089 * XXX
3090 * The code inside the if() block is never reached since we
3091 * must mark CSUM_IP_FRAGS in our if_hwassist to start getting
3092 * requests to checksum TCP/UDP in a fragmented packet.
3093 *
3094 * XXX
3095 * safety overkill. If this is a fragmented packet chain
3096 * with delayed TCP/UDP checksums, then only encapsulate
3097 * it if we have enough descriptors to handle the entire
3098 * chain at once.
3099 * (paranoia -- may not actually be needed)
3100 */
3101 if (m_head->m_flags & M_FIRSTFRAG &&
3102 m_head->m_pkthdr.csum_flags & (CSUM_DELAY_DATA)) {
3103 if ((BGE_TX_RING_CNT - sc->bge_txcnt) <
3104 m_head->m_pkthdr.csum_data + 16) {
3105 IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
3106 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
3107 break;
3108 }
3109 }
3110
3111 /*
3112 * Pack the data into the transmit ring. If we
3113 * don't have room, set the OACTIVE flag and wait
3114 * for the NIC to drain the ring.
3115 */
3116 if (bge_encap(sc, m_head, &prodidx)) {
3117 IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
3118 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
3119 break;
3120 }
3121 ++count;
3122
3123 /*
3124 * If there's a BPF listener, bounce a copy of this frame
3125 * to him.
3126 */
3127 BPF_MTAP(ifp, m_head);
3128 }
3129
3130 if (count == 0) {
3131 /* no packets were dequeued */
3132 return;
3133 }
3134
3135 /* Transmit */
3136 CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx);
3137 /* 5700 b2 errata */
3138 if (sc->bge_chiprev == BGE_CHIPREV_5700_BX)
3139 CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx);
3140
3141 /*
3142 * Set a timeout in case the chip goes out to lunch.
3143 */
3144 ifp->if_timer = 5;
3145
3146 return;
3147}
3148
3149/*
3150 * Main transmit routine. To avoid having to do mbuf copies, we put pointers
3151 * to the mbuf data regions directly in the transmit descriptors.
3152 */
3153static void
3154bge_start(ifp)
3155 struct ifnet *ifp;
3156{
3157 struct bge_softc *sc;
3158
3159 sc = ifp->if_softc;
3160 BGE_LOCK(sc);
3161 bge_start_locked(ifp);
3162 BGE_UNLOCK(sc);
3163}
3164
3165static void
3166bge_init_locked(sc)
3167 struct bge_softc *sc;
3168{
3169 struct ifnet *ifp;
3170 u_int16_t *m;
3171
3172 BGE_LOCK_ASSERT(sc);
3173
3174 ifp = sc->bge_ifp;
3175
3176 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
3177 return;
3178
3179 /* Cancel pending I/O and flush buffers. */
3180 bge_stop(sc);
3181 bge_reset(sc);
3182 bge_chipinit(sc);
3183
3184 /*
3185 * Init the various state machines, ring
3186 * control blocks and firmware.
3187 */
3188 if (bge_blockinit(sc)) {
3189 printf("bge%d: initialization failure\n", sc->bge_unit);
3190 return;
3191 }
3192
3193 ifp = sc->bge_ifp;
3194
3195 /* Specify MTU. */
3196 CSR_WRITE_4(sc, BGE_RX_MTU, ifp->if_mtu +
3197 ETHER_HDR_LEN + ETHER_CRC_LEN + ETHER_VLAN_ENCAP_LEN);
3198
3199 /* Load our MAC address. */
3200 m = (u_int16_t *)IF_LLADDR(sc->bge_ifp);
3201 CSR_WRITE_4(sc, BGE_MAC_ADDR1_LO, htons(m[0]));
3202 CSR_WRITE_4(sc, BGE_MAC_ADDR1_HI, (htons(m[1]) << 16) | htons(m[2]));
3203
3204 /* Enable or disable promiscuous mode as needed. */
3205 if (ifp->if_flags & IFF_PROMISC) {
3206 BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC);
3207 } else {
3208 BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC);
3209 }
3210
3211 /* Program multicast filter. */
3212 bge_setmulti(sc);
3213
3214 /* Init RX ring. */
3215 bge_init_rx_ring_std(sc);
3216
3217 /*
3218 * Workaround for a bug in 5705 ASIC rev A0. Poll the NIC's
3219 * memory to insure that the chip has in fact read the first
3220 * entry of the ring.
3221 */
3222 if (sc->bge_chipid == BGE_CHIPID_BCM5705_A0) {
3223 u_int32_t v, i;
3224 for (i = 0; i < 10; i++) {
3225 DELAY(20);
3226 v = bge_readmem_ind(sc, BGE_STD_RX_RINGS + 8);
3227 if (v == (MCLBYTES - ETHER_ALIGN))
3228 break;
3229 }
3230 if (i == 10)
3231 printf ("bge%d: 5705 A0 chip failed to load RX ring\n",
3232 sc->bge_unit);
3233 }
3234
3235 /* Init jumbo RX ring. */
3236 if (ifp->if_mtu > (ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN))
3237 bge_init_rx_ring_jumbo(sc);
3238
3239 /* Init our RX return ring index */
3240 sc->bge_rx_saved_considx = 0;
3241
3242 /* Init TX ring. */
3243 bge_init_tx_ring(sc);
3244
3245 /* Turn on transmitter */
3246 BGE_SETBIT(sc, BGE_TX_MODE, BGE_TXMODE_ENABLE);
3247
3248 /* Turn on receiver */
3249 BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE);
3250
3251 /* Tell firmware we're alive. */
3252 BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
3253
3254#ifdef DEVICE_POLLING
3255 /* Disable interrupts if we are polling. */
3256 if (ifp->if_capenable & IFCAP_POLLING) {
3257 BGE_SETBIT(sc, BGE_PCI_MISC_CTL,
3258 BGE_PCIMISCCTL_MASK_PCI_INTR);
3259 CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 1);
3260 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS_INT, 1);
3261 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS_INT, 1);
3262 } else
3263#endif
3264
3265 /* Enable host interrupts. */
3266 {
3267 BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_CLEAR_INTA);
3268 BGE_CLRBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR);
3269 CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 0);
3270 }
3271
3272 bge_ifmedia_upd(ifp);
3273
3274 ifp->if_drv_flags |= IFF_DRV_RUNNING;
3275 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
3276
3277 callout_reset(&sc->bge_stat_ch, hz, bge_tick, sc);
3278
3279 return;
3280}
3281
3282static void
3283bge_init(xsc)
3284 void *xsc;
3285{
3286 struct bge_softc *sc = xsc;
3287
3288 BGE_LOCK(sc);
3289 bge_init_locked(sc);
3290 BGE_UNLOCK(sc);
3291
3292 return;
3293}
3294
3295/*
3296 * Set media options.
3297 */
3298static int
3299bge_ifmedia_upd(ifp)
3300 struct ifnet *ifp;
3301{
3302 struct bge_softc *sc;
3303 struct mii_data *mii;
3304 struct ifmedia *ifm;
3305
3306 sc = ifp->if_softc;
3307 ifm = &sc->bge_ifmedia;
3308
3309 /* If this is a 1000baseX NIC, enable the TBI port. */
3310 if (sc->bge_tbi) {
3311 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
3312 return(EINVAL);
3313 switch(IFM_SUBTYPE(ifm->ifm_media)) {
3314 case IFM_AUTO:
3315#ifndef BGE_FAKE_AUTONEG
3316 /*
3317 * The BCM5704 ASIC appears to have a special
3318 * mechanism for programming the autoneg
3319 * advertisement registers in TBI mode.
3320 */
3321 if (sc->bge_asicrev == BGE_ASICREV_BCM5704) {
3322 uint32_t sgdig;
3323 CSR_WRITE_4(sc, BGE_TX_TBI_AUTONEG, 0);
3324 sgdig = CSR_READ_4(sc, BGE_SGDIG_CFG);
3325 sgdig |= BGE_SGDIGCFG_AUTO|
3326 BGE_SGDIGCFG_PAUSE_CAP|
3327 BGE_SGDIGCFG_ASYM_PAUSE;
3328 CSR_WRITE_4(sc, BGE_SGDIG_CFG,
3329 sgdig|BGE_SGDIGCFG_SEND);
3330 DELAY(5);
3331 CSR_WRITE_4(sc, BGE_SGDIG_CFG, sgdig);
3332 }
3333#endif
3334 break;
3335 case IFM_1000_SX:
3336 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) {
3337 BGE_CLRBIT(sc, BGE_MAC_MODE,
3338 BGE_MACMODE_HALF_DUPLEX);
3339 } else {
3340 BGE_SETBIT(sc, BGE_MAC_MODE,
3341 BGE_MACMODE_HALF_DUPLEX);
3342 }
3343 break;
3344 default:
3345 return(EINVAL);
3346 }
3347 return(0);
3348 }
3349
3350 mii = device_get_softc(sc->bge_miibus);
3351 sc->bge_link = 0;
3352 if (mii->mii_instance) {
3353 struct mii_softc *miisc;
3354 for (miisc = LIST_FIRST(&mii->mii_phys); miisc != NULL;
3355 miisc = LIST_NEXT(miisc, mii_list))
3356 mii_phy_reset(miisc);
3357 }
3358 mii_mediachg(mii);
3359
3360 return(0);
3361}
3362
3363/*
3364 * Report current media status.
3365 */
3366static void
3367bge_ifmedia_sts(ifp, ifmr)
3368 struct ifnet *ifp;
3369 struct ifmediareq *ifmr;
3370{
3371 struct bge_softc *sc;
3372 struct mii_data *mii;
3373
3374 sc = ifp->if_softc;
3375
3376 if (sc->bge_tbi) {
3377 ifmr->ifm_status = IFM_AVALID;
3378 ifmr->ifm_active = IFM_ETHER;
3379 if (CSR_READ_4(sc, BGE_MAC_STS) &
3380 BGE_MACSTAT_TBI_PCS_SYNCHED)
3381 ifmr->ifm_status |= IFM_ACTIVE;
3382 ifmr->ifm_active |= IFM_1000_SX;
3383 if (CSR_READ_4(sc, BGE_MAC_MODE) & BGE_MACMODE_HALF_DUPLEX)
3384 ifmr->ifm_active |= IFM_HDX;
3385 else
3386 ifmr->ifm_active |= IFM_FDX;
3387 return;
3388 }
3389
3390 mii = device_get_softc(sc->bge_miibus);
3391 mii_pollstat(mii);
3392 ifmr->ifm_active = mii->mii_media_active;
3393 ifmr->ifm_status = mii->mii_media_status;
3394
3395 return;
3396}
3397
3398static int
3399bge_ioctl(ifp, command, data)
3400 struct ifnet *ifp;
3401 u_long command;
3402 caddr_t data;
3403{
3404 struct bge_softc *sc = ifp->if_softc;
3405 struct ifreq *ifr = (struct ifreq *) data;
3406 int mask, error = 0;
3407 struct mii_data *mii;
3408
3409 switch(command) {
3410 case SIOCSIFMTU:
3411 /* Disallow jumbo frames on 5705. */
3412 if (((sc->bge_asicrev == BGE_ASICREV_BCM5705 ||
3413 sc->bge_asicrev == BGE_ASICREV_BCM5750) &&
3414 ifr->ifr_mtu > ETHERMTU) || ifr->ifr_mtu > BGE_JUMBO_MTU)
3415 error = EINVAL;
3416 else {
3417 ifp->if_mtu = ifr->ifr_mtu;
3418 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
3419 bge_init(sc);
3420 }
3421 break;
3422 case SIOCSIFFLAGS:
3423 BGE_LOCK(sc);
3424 if (ifp->if_flags & IFF_UP) {
3425 /*
3426 * If only the state of the PROMISC flag changed,
3427 * then just use the 'set promisc mode' command
3428 * instead of reinitializing the entire NIC. Doing
3429 * a full re-init means reloading the firmware and
3430 * waiting for it to start up, which may take a
3431 * second or two.
3432 */
3433 if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
3434 ifp->if_flags & IFF_PROMISC &&
3435 !(sc->bge_if_flags & IFF_PROMISC)) {
3436 BGE_SETBIT(sc, BGE_RX_MODE,
3437 BGE_RXMODE_RX_PROMISC);
3438 } else if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
3439 !(ifp->if_flags & IFF_PROMISC) &&
3440 sc->bge_if_flags & IFF_PROMISC) {
3441 BGE_CLRBIT(sc, BGE_RX_MODE,
3442 BGE_RXMODE_RX_PROMISC);
3443 } else
3444 bge_init_locked(sc);
3445 } else {
3446 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
3447 bge_stop(sc);
3448 }
3449 }
3450 sc->bge_if_flags = ifp->if_flags;
3451 BGE_UNLOCK(sc);
3452 error = 0;
3453 break;
3454 case SIOCADDMULTI:
3455 case SIOCDELMULTI:
3456 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
3457 BGE_LOCK(sc);
3458 bge_setmulti(sc);
3459 BGE_UNLOCK(sc);
3460 error = 0;
3461 }
3462 break;
3463 case SIOCSIFMEDIA:
3464 case SIOCGIFMEDIA:
3465 if (sc->bge_tbi) {
3466 error = ifmedia_ioctl(ifp, ifr,
3467 &sc->bge_ifmedia, command);
3468 } else {
3469 mii = device_get_softc(sc->bge_miibus);
3470 error = ifmedia_ioctl(ifp, ifr,
3471 &mii->mii_media, command);
3472 }
3473 break;
3474 case SIOCSIFCAP:
3475 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
3476#ifdef DEVICE_POLLING
3477 if (mask & IFCAP_POLLING) {
3478 if (ifr->ifr_reqcap & IFCAP_POLLING) {
3479 error = ether_poll_register(bge_poll, ifp);
3480 if (error)
3481 return(error);
3482 BGE_LOCK(sc);
3483 BGE_SETBIT(sc, BGE_PCI_MISC_CTL,
3484 BGE_PCIMISCCTL_MASK_PCI_INTR);
3485 CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 1);
3486 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS_INT, 1);
3487 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS_INT, 1);
3488 ifp->if_capenable |= IFCAP_POLLING;
3489 BGE_UNLOCK(sc);
3490 } else {
3491 error = ether_poll_deregister(ifp);
3492 /* Enable interrupt even in error case */
3493 BGE_LOCK(sc);
3494 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS_INT, 0);
3495 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS_INT, 0);
3496 BGE_CLRBIT(sc, BGE_PCI_MISC_CTL,
3497 BGE_PCIMISCCTL_MASK_PCI_INTR);
3498 CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 0);
3499 ifp->if_capenable &= ~IFCAP_POLLING;
3500 BGE_UNLOCK(sc);
3501 }
3502 }
3503#endif
3504 /* NB: the code for RX csum offload is disabled for now */
3505 if (mask & IFCAP_TXCSUM) {
3506 ifp->if_capenable ^= IFCAP_TXCSUM;
3507 if (IFCAP_TXCSUM & ifp->if_capenable)
3508 ifp->if_hwassist = BGE_CSUM_FEATURES;
3509 else
3510 ifp->if_hwassist = 0;
3511 }
3512 break;
3513 default:
3514 error = ether_ioctl(ifp, command, data);
3515 break;
3516 }
3517
3518 return(error);
3519}
3520
3521static void
3522bge_watchdog(ifp)
3523 struct ifnet *ifp;
3524{
3525 struct bge_softc *sc;
3526
3527 sc = ifp->if_softc;
3528
3529 printf("bge%d: watchdog timeout -- resetting\n", sc->bge_unit);
3530
3531 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
3532 bge_init(sc);
3533
3534 ifp->if_oerrors++;
3535
3536 return;
3537}
3538
3539/*
3540 * Stop the adapter and free any mbufs allocated to the
3541 * RX and TX lists.
3542 */
3543static void
3544bge_stop(sc)
3545 struct bge_softc *sc;
3546{
3547 struct ifnet *ifp;
3548 struct ifmedia_entry *ifm;
3549 struct mii_data *mii = NULL;
3550 int mtmp, itmp;
3551
3552 BGE_LOCK_ASSERT(sc);
3553
3554 ifp = sc->bge_ifp;
3555
3556 if (!sc->bge_tbi)
3557 mii = device_get_softc(sc->bge_miibus);
3558
3559 callout_stop(&sc->bge_stat_ch);
3560
3561 /*
3562 * Disable all of the receiver blocks
3563 */
3564 BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE);
3565 BGE_CLRBIT(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE);
3566 BGE_CLRBIT(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE);
3567 if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
3568 sc->bge_asicrev != BGE_ASICREV_BCM5750)
3569 BGE_CLRBIT(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE);
3570 BGE_CLRBIT(sc, BGE_RDBDI_MODE, BGE_RBDIMODE_ENABLE);
3571 BGE_CLRBIT(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE);
3572 BGE_CLRBIT(sc, BGE_RBDC_MODE, BGE_RBDCMODE_ENABLE);
3573
3574 /*
3575 * Disable all of the transmit blocks
3576 */
3577 BGE_CLRBIT(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE);
3578 BGE_CLRBIT(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE);
3579 BGE_CLRBIT(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE);
3580 BGE_CLRBIT(sc, BGE_RDMA_MODE, BGE_RDMAMODE_ENABLE);
3581 BGE_CLRBIT(sc, BGE_SDC_MODE, BGE_SDCMODE_ENABLE);
3582 if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
3583 sc->bge_asicrev != BGE_ASICREV_BCM5750)
3584 BGE_CLRBIT(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE);
3585 BGE_CLRBIT(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE);
3586
3587 /*
3588 * Shut down all of the memory managers and related
3589 * state machines.
3590 */
3591 BGE_CLRBIT(sc, BGE_HCC_MODE, BGE_HCCMODE_ENABLE);
3592 BGE_CLRBIT(sc, BGE_WDMA_MODE, BGE_WDMAMODE_ENABLE);
3593 if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
3594 sc->bge_asicrev != BGE_ASICREV_BCM5750)
3595 BGE_CLRBIT(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE);
3596 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF);
3597 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0);
3598 if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
3599 sc->bge_asicrev != BGE_ASICREV_BCM5750) {
3600 BGE_CLRBIT(sc, BGE_BMAN_MODE, BGE_BMANMODE_ENABLE);
3601 BGE_CLRBIT(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE);
3602 }
3603
3604 /* Disable host interrupts. */
3605 BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR);
3606 CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 1);
3607
3608 /*
3609 * Tell firmware we're shutting down.
3610 */
3611 BGE_CLRBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
3612
3613 /* Free the RX lists. */
3614 bge_free_rx_ring_std(sc);
3615
3616 /* Free jumbo RX list. */
3617 if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
3618 sc->bge_asicrev != BGE_ASICREV_BCM5750)
3619 bge_free_rx_ring_jumbo(sc);
3620
3621 /* Free TX buffers. */
3622 bge_free_tx_ring(sc);
3623
3624 /*
3625 * Isolate/power down the PHY, but leave the media selection
3626 * unchanged so that things will be put back to normal when
3627 * we bring the interface back up.
3628 */
3629 if (!sc->bge_tbi) {
3630 itmp = ifp->if_flags;
3631 ifp->if_flags |= IFF_UP;
3632 /*
3633 * If we are called from bge_detach(), mii is already NULL.
3634 */
3635 if (mii != NULL) {
3636 ifm = mii->mii_media.ifm_cur;
3637 mtmp = ifm->ifm_media;
3638 ifm->ifm_media = IFM_ETHER|IFM_NONE;
3639 mii_mediachg(mii);
3640 ifm->ifm_media = mtmp;
3641 }
3642 ifp->if_flags = itmp;
3643 }
3644
3645 sc->bge_link = 0;
3646
3647 sc->bge_tx_saved_considx = BGE_TXCONS_UNSET;
3648
3649 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
3650
3651 return;
3652}
3653
3654/*
3655 * Stop all chip I/O so that the kernel's probe routines don't
3656 * get confused by errant DMAs when rebooting.
3657 */
3658static void
3659bge_shutdown(dev)
3660 device_t dev;
3661{
3662 struct bge_softc *sc;
3663
3664 sc = device_get_softc(dev);
3665
3666 BGE_LOCK(sc);
3667 bge_stop(sc);
3668 bge_reset(sc);
3669 BGE_UNLOCK(sc);
3670
3671 return;
3672}
3673
3674static int
3675bge_suspend(device_t dev)
3676{
3677 struct bge_softc *sc;
3678
3679 sc = device_get_softc(dev);
3680 BGE_LOCK(sc);
3681 bge_stop(sc);
3682 BGE_UNLOCK(sc);
3683
3684 return (0);
3685}
3686
3687static int
3688bge_resume(device_t dev)
3689{
3690 struct bge_softc *sc;
3691 struct ifnet *ifp;
3692
3693 sc = device_get_softc(dev);
3694 BGE_LOCK(sc);
3695 ifp = sc->bge_ifp;
3696 if (ifp->if_flags & IFF_UP) {
3697 bge_init_locked(sc);
3698 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
3699 bge_start_locked(ifp);
3700 }
3701 BGE_UNLOCK(sc);
3702
3703 return (0);
3704}
3705
3706static void
3707bge_link_upd(sc)
3708 struct bge_softc *sc;
3709{
3710 uint32_t status;
3711
3712 BGE_LOCK_ASSERT(sc);
3713 /*
3714 * Process link state changes.
3715 * Grrr. The link status word in the status block does
3716 * not work correctly on the BCM5700 rev AX and BX chips,
3717 * according to all available information. Hence, we have
3718 * to enable MII interrupts in order to properly obtain
3719 * async link changes. Unfortunately, this also means that
3720 * we have to read the MAC status register to detect link
3721 * changes, thereby adding an additional register access to
3722 * the interrupt handler.
3723 */
3724
3725 if (sc->bge_asicrev == BGE_ASICREV_BCM5700) {
3726 status = CSR_READ_4(sc, BGE_MAC_STS);
3727 if (status & BGE_MACSTAT_MI_INTERRUPT) {
3728 sc->bge_link = 0;
3729 callout_stop(&sc->bge_stat_ch);
3730 bge_tick_locked(sc);
3731 /* Clear the interrupt */
3732 CSR_WRITE_4(sc, BGE_MAC_EVT_ENB,
3733 BGE_EVTENB_MI_INTERRUPT);
3734 bge_miibus_readreg(sc->bge_dev, 1, BRGPHY_MII_ISR);
3735 bge_miibus_writereg(sc->bge_dev, 1, BRGPHY_MII_IMR,
3736 BRGPHY_INTRS);
3737 }
3738 return;
3739 }
3740
3741 /*
3742 * Sometimes PCS encoding errors are detected in
3743 * TBI mode (on fiber NICs), and for some reason
3744 * the chip will signal them as link changes.
3745 * If we get a link change event, but the 'PCS
3746 * encoding error' bit in the MAC status register
3747 * is set, don't bother doing a link check.
3748 * This avoids spurious "gigabit link up" messages
3749 * that sometimes appear on fiber NICs during
3750 * periods of heavy traffic. (There should be no
3751 * effect on copper NICs.)
3752 */
3753 if (sc->bge_tbi) status = CSR_READ_4(sc, BGE_MAC_STS);
3754
3755 if (!sc->bge_tbi || !(status & (BGE_MACSTAT_PORT_DECODE_ERROR |
3756 BGE_MACSTAT_MI_COMPLETE))) {
3757 sc->bge_link = 0;
3758 callout_stop(&sc->bge_stat_ch);
3759 bge_tick_locked(sc);
3760 }
3761
3762 /* Clear the interrupt */
3763 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED|
3764 BGE_MACSTAT_CFG_CHANGED|BGE_MACSTAT_MI_COMPLETE|
3765 BGE_MACSTAT_LINK_CHANGED);
3766
3767 /* Force flush the status block cached by PCI bridge */
3768 CSR_READ_4(sc, BGE_MBX_IRQ0_LO);
3769}
3770