Deleted Added
full compact
if_bge.c (200246) if_bge.c (200264)
1/*-
2 * Copyright (c) 2001 Wind River Systems
3 * Copyright (c) 1997, 1998, 1999, 2001
4 * Bill Paul <wpaul@windriver.com>. All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. All advertising materials mentioning features or use of this software
15 * must display the following acknowledgement:
16 * This product includes software developed by Bill Paul.
17 * 4. Neither the name of the author nor the names of any co-contributors
18 * may be used to endorse or promote products derived from this software
19 * without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
25 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
31 * THE POSSIBILITY OF SUCH DAMAGE.
32 */
33
34#include <sys/cdefs.h>
1/*-
2 * Copyright (c) 2001 Wind River Systems
3 * Copyright (c) 1997, 1998, 1999, 2001
4 * Bill Paul <wpaul@windriver.com>. All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. All advertising materials mentioning features or use of this software
15 * must display the following acknowledgement:
16 * This product includes software developed by Bill Paul.
17 * 4. Neither the name of the author nor the names of any co-contributors
18 * may be used to endorse or promote products derived from this software
19 * without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
25 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
31 * THE POSSIBILITY OF SUCH DAMAGE.
32 */
33
34#include <sys/cdefs.h>
35__FBSDID("$FreeBSD: head/sys/dev/bge/if_bge.c 200246 2009-12-08 03:24:29Z yongari $");
35__FBSDID("$FreeBSD: head/sys/dev/bge/if_bge.c 200264 2009-12-08 17:54:23Z yongari $");
36
37/*
38 * Broadcom BCM570x family gigabit ethernet driver for FreeBSD.
39 *
40 * The Broadcom BCM5700 is based on technology originally developed by
41 * Alteon Networks as part of the Tigon I and Tigon II gigabit ethernet
42 * MAC chips. The BCM5700, sometimes refered to as the Tigon III, has
43 * two on-board MIPS R4000 CPUs and can have as much as 16MB of external
44 * SSRAM. The BCM5700 supports TCP, UDP and IP checksum offload, jumbo
45 * frames, highly configurable RX filtering, and 16 RX and TX queues
46 * (which, along with RX filter rules, can be used for QOS applications).
47 * Other features, such as TCP segmentation, may be available as part
48 * of value-added firmware updates. Unlike the Tigon I and Tigon II,
49 * firmware images can be stored in hardware and need not be compiled
50 * into the driver.
51 *
52 * The BCM5700 supports the PCI v2.2 and PCI-X v1.0 standards, and will
53 * function in a 32-bit/64-bit 33/66Mhz bus, or a 64-bit/133Mhz bus.
54 *
55 * The BCM5701 is a single-chip solution incorporating both the BCM5700
56 * MAC and a BCM5401 10/100/1000 PHY. Unlike the BCM5700, the BCM5701
57 * does not support external SSRAM.
58 *
59 * Broadcom also produces a variation of the BCM5700 under the "Altima"
60 * brand name, which is functionally similar but lacks PCI-X support.
61 *
62 * Without external SSRAM, you can only have at most 4 TX rings,
63 * and the use of the mini RX ring is disabled. This seems to imply
64 * that these features are simply not available on the BCM5701. As a
65 * result, this driver does not implement any support for the mini RX
66 * ring.
67 */
68
69#ifdef HAVE_KERNEL_OPTION_HEADERS
70#include "opt_device_polling.h"
71#endif
72
73#include <sys/param.h>
74#include <sys/endian.h>
75#include <sys/systm.h>
76#include <sys/sockio.h>
77#include <sys/mbuf.h>
78#include <sys/malloc.h>
79#include <sys/kernel.h>
80#include <sys/module.h>
81#include <sys/socket.h>
82#include <sys/sysctl.h>
83#include <sys/taskqueue.h>
84
85#include <net/if.h>
86#include <net/if_arp.h>
87#include <net/ethernet.h>
88#include <net/if_dl.h>
89#include <net/if_media.h>
90
91#include <net/bpf.h>
92
93#include <net/if_types.h>
94#include <net/if_vlan_var.h>
95
96#include <netinet/in_systm.h>
97#include <netinet/in.h>
98#include <netinet/ip.h>
99#include <netinet/tcp.h>
100
101#include <machine/bus.h>
102#include <machine/resource.h>
103#include <sys/bus.h>
104#include <sys/rman.h>
105
106#include <dev/mii/mii.h>
107#include <dev/mii/miivar.h>
108#include "miidevs.h"
109#include <dev/mii/brgphyreg.h>
110
111#ifdef __sparc64__
112#include <dev/ofw/ofw_bus.h>
113#include <dev/ofw/openfirm.h>
114#include <machine/ofw_machdep.h>
115#include <machine/ver.h>
116#endif
117
118#include <dev/pci/pcireg.h>
119#include <dev/pci/pcivar.h>
120
121#include <dev/bge/if_bgereg.h>
122
123#define BGE_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP)
124#define ETHER_MIN_NOPAD (ETHER_MIN_LEN - ETHER_CRC_LEN) /* i.e., 60 */
125
126MODULE_DEPEND(bge, pci, 1, 1, 1);
127MODULE_DEPEND(bge, ether, 1, 1, 1);
128MODULE_DEPEND(bge, miibus, 1, 1, 1);
129
130/* "device miibus" required. See GENERIC if you get errors here. */
131#include "miibus_if.h"
132
133/*
134 * Various supported device vendors/types and their names. Note: the
135 * spec seems to indicate that the hardware still has Alteon's vendor
136 * ID burned into it, though it will always be overriden by the vendor
137 * ID in the EEPROM. Just to be safe, we cover all possibilities.
138 */
139static const struct bge_type {
140 uint16_t bge_vid;
141 uint16_t bge_did;
142} bge_devs[] = {
143 { ALTEON_VENDORID, ALTEON_DEVICEID_BCM5700 },
144 { ALTEON_VENDORID, ALTEON_DEVICEID_BCM5701 },
145
146 { ALTIMA_VENDORID, ALTIMA_DEVICE_AC1000 },
147 { ALTIMA_VENDORID, ALTIMA_DEVICE_AC1002 },
148 { ALTIMA_VENDORID, ALTIMA_DEVICE_AC9100 },
149
150 { APPLE_VENDORID, APPLE_DEVICE_BCM5701 },
151
152 { BCOM_VENDORID, BCOM_DEVICEID_BCM5700 },
153 { BCOM_VENDORID, BCOM_DEVICEID_BCM5701 },
154 { BCOM_VENDORID, BCOM_DEVICEID_BCM5702 },
155 { BCOM_VENDORID, BCOM_DEVICEID_BCM5702_ALT },
156 { BCOM_VENDORID, BCOM_DEVICEID_BCM5702X },
157 { BCOM_VENDORID, BCOM_DEVICEID_BCM5703 },
158 { BCOM_VENDORID, BCOM_DEVICEID_BCM5703_ALT },
159 { BCOM_VENDORID, BCOM_DEVICEID_BCM5703X },
160 { BCOM_VENDORID, BCOM_DEVICEID_BCM5704C },
161 { BCOM_VENDORID, BCOM_DEVICEID_BCM5704S },
162 { BCOM_VENDORID, BCOM_DEVICEID_BCM5704S_ALT },
163 { BCOM_VENDORID, BCOM_DEVICEID_BCM5705 },
164 { BCOM_VENDORID, BCOM_DEVICEID_BCM5705F },
165 { BCOM_VENDORID, BCOM_DEVICEID_BCM5705K },
166 { BCOM_VENDORID, BCOM_DEVICEID_BCM5705M },
167 { BCOM_VENDORID, BCOM_DEVICEID_BCM5705M_ALT },
168 { BCOM_VENDORID, BCOM_DEVICEID_BCM5714C },
169 { BCOM_VENDORID, BCOM_DEVICEID_BCM5714S },
170 { BCOM_VENDORID, BCOM_DEVICEID_BCM5715 },
171 { BCOM_VENDORID, BCOM_DEVICEID_BCM5715S },
172 { BCOM_VENDORID, BCOM_DEVICEID_BCM5720 },
173 { BCOM_VENDORID, BCOM_DEVICEID_BCM5721 },
174 { BCOM_VENDORID, BCOM_DEVICEID_BCM5722 },
175 { BCOM_VENDORID, BCOM_DEVICEID_BCM5723 },
176 { BCOM_VENDORID, BCOM_DEVICEID_BCM5750 },
177 { BCOM_VENDORID, BCOM_DEVICEID_BCM5750M },
178 { BCOM_VENDORID, BCOM_DEVICEID_BCM5751 },
179 { BCOM_VENDORID, BCOM_DEVICEID_BCM5751F },
180 { BCOM_VENDORID, BCOM_DEVICEID_BCM5751M },
181 { BCOM_VENDORID, BCOM_DEVICEID_BCM5752 },
182 { BCOM_VENDORID, BCOM_DEVICEID_BCM5752M },
183 { BCOM_VENDORID, BCOM_DEVICEID_BCM5753 },
184 { BCOM_VENDORID, BCOM_DEVICEID_BCM5753F },
185 { BCOM_VENDORID, BCOM_DEVICEID_BCM5753M },
186 { BCOM_VENDORID, BCOM_DEVICEID_BCM5754 },
187 { BCOM_VENDORID, BCOM_DEVICEID_BCM5754M },
188 { BCOM_VENDORID, BCOM_DEVICEID_BCM5755 },
189 { BCOM_VENDORID, BCOM_DEVICEID_BCM5755M },
190 { BCOM_VENDORID, BCOM_DEVICEID_BCM5761 },
191 { BCOM_VENDORID, BCOM_DEVICEID_BCM5761E },
192 { BCOM_VENDORID, BCOM_DEVICEID_BCM5761S },
193 { BCOM_VENDORID, BCOM_DEVICEID_BCM5761SE },
194 { BCOM_VENDORID, BCOM_DEVICEID_BCM5764 },
195 { BCOM_VENDORID, BCOM_DEVICEID_BCM5780 },
196 { BCOM_VENDORID, BCOM_DEVICEID_BCM5780S },
197 { BCOM_VENDORID, BCOM_DEVICEID_BCM5781 },
198 { BCOM_VENDORID, BCOM_DEVICEID_BCM5782 },
199 { BCOM_VENDORID, BCOM_DEVICEID_BCM5784 },
200 { BCOM_VENDORID, BCOM_DEVICEID_BCM5785F },
201 { BCOM_VENDORID, BCOM_DEVICEID_BCM5785G },
202 { BCOM_VENDORID, BCOM_DEVICEID_BCM5786 },
203 { BCOM_VENDORID, BCOM_DEVICEID_BCM5787 },
204 { BCOM_VENDORID, BCOM_DEVICEID_BCM5787F },
205 { BCOM_VENDORID, BCOM_DEVICEID_BCM5787M },
206 { BCOM_VENDORID, BCOM_DEVICEID_BCM5788 },
207 { BCOM_VENDORID, BCOM_DEVICEID_BCM5789 },
208 { BCOM_VENDORID, BCOM_DEVICEID_BCM5901 },
209 { BCOM_VENDORID, BCOM_DEVICEID_BCM5901A2 },
210 { BCOM_VENDORID, BCOM_DEVICEID_BCM5903M },
211 { BCOM_VENDORID, BCOM_DEVICEID_BCM5906 },
212 { BCOM_VENDORID, BCOM_DEVICEID_BCM5906M },
213 { BCOM_VENDORID, BCOM_DEVICEID_BCM57760 },
214 { BCOM_VENDORID, BCOM_DEVICEID_BCM57780 },
215 { BCOM_VENDORID, BCOM_DEVICEID_BCM57788 },
216 { BCOM_VENDORID, BCOM_DEVICEID_BCM57790 },
217
218 { SK_VENDORID, SK_DEVICEID_ALTIMA },
219
220 { TC_VENDORID, TC_DEVICEID_3C996 },
221
222 { FJTSU_VENDORID, FJTSU_DEVICEID_PW008GE4 },
223 { FJTSU_VENDORID, FJTSU_DEVICEID_PW008GE5 },
224 { FJTSU_VENDORID, FJTSU_DEVICEID_PP250450 },
225
226 { 0, 0 }
227};
228
229static const struct bge_vendor {
230 uint16_t v_id;
231 const char *v_name;
232} bge_vendors[] = {
233 { ALTEON_VENDORID, "Alteon" },
234 { ALTIMA_VENDORID, "Altima" },
235 { APPLE_VENDORID, "Apple" },
236 { BCOM_VENDORID, "Broadcom" },
237 { SK_VENDORID, "SysKonnect" },
238 { TC_VENDORID, "3Com" },
239 { FJTSU_VENDORID, "Fujitsu" },
240
241 { 0, NULL }
242};
243
244static const struct bge_revision {
245 uint32_t br_chipid;
246 const char *br_name;
247} bge_revisions[] = {
248 { BGE_CHIPID_BCM5700_A0, "BCM5700 A0" },
249 { BGE_CHIPID_BCM5700_A1, "BCM5700 A1" },
250 { BGE_CHIPID_BCM5700_B0, "BCM5700 B0" },
251 { BGE_CHIPID_BCM5700_B1, "BCM5700 B1" },
252 { BGE_CHIPID_BCM5700_B2, "BCM5700 B2" },
253 { BGE_CHIPID_BCM5700_B3, "BCM5700 B3" },
254 { BGE_CHIPID_BCM5700_ALTIMA, "BCM5700 Altima" },
255 { BGE_CHIPID_BCM5700_C0, "BCM5700 C0" },
256 { BGE_CHIPID_BCM5701_A0, "BCM5701 A0" },
257 { BGE_CHIPID_BCM5701_B0, "BCM5701 B0" },
258 { BGE_CHIPID_BCM5701_B2, "BCM5701 B2" },
259 { BGE_CHIPID_BCM5701_B5, "BCM5701 B5" },
260 { BGE_CHIPID_BCM5703_A0, "BCM5703 A0" },
261 { BGE_CHIPID_BCM5703_A1, "BCM5703 A1" },
262 { BGE_CHIPID_BCM5703_A2, "BCM5703 A2" },
263 { BGE_CHIPID_BCM5703_A3, "BCM5703 A3" },
264 { BGE_CHIPID_BCM5703_B0, "BCM5703 B0" },
265 { BGE_CHIPID_BCM5704_A0, "BCM5704 A0" },
266 { BGE_CHIPID_BCM5704_A1, "BCM5704 A1" },
267 { BGE_CHIPID_BCM5704_A2, "BCM5704 A2" },
268 { BGE_CHIPID_BCM5704_A3, "BCM5704 A3" },
269 { BGE_CHIPID_BCM5704_B0, "BCM5704 B0" },
270 { BGE_CHIPID_BCM5705_A0, "BCM5705 A0" },
271 { BGE_CHIPID_BCM5705_A1, "BCM5705 A1" },
272 { BGE_CHIPID_BCM5705_A2, "BCM5705 A2" },
273 { BGE_CHIPID_BCM5705_A3, "BCM5705 A3" },
274 { BGE_CHIPID_BCM5750_A0, "BCM5750 A0" },
275 { BGE_CHIPID_BCM5750_A1, "BCM5750 A1" },
276 { BGE_CHIPID_BCM5750_A3, "BCM5750 A3" },
277 { BGE_CHIPID_BCM5750_B0, "BCM5750 B0" },
278 { BGE_CHIPID_BCM5750_B1, "BCM5750 B1" },
279 { BGE_CHIPID_BCM5750_C0, "BCM5750 C0" },
280 { BGE_CHIPID_BCM5750_C1, "BCM5750 C1" },
281 { BGE_CHIPID_BCM5750_C2, "BCM5750 C2" },
282 { BGE_CHIPID_BCM5714_A0, "BCM5714 A0" },
283 { BGE_CHIPID_BCM5752_A0, "BCM5752 A0" },
284 { BGE_CHIPID_BCM5752_A1, "BCM5752 A1" },
285 { BGE_CHIPID_BCM5752_A2, "BCM5752 A2" },
286 { BGE_CHIPID_BCM5714_B0, "BCM5714 B0" },
287 { BGE_CHIPID_BCM5714_B3, "BCM5714 B3" },
288 { BGE_CHIPID_BCM5715_A0, "BCM5715 A0" },
289 { BGE_CHIPID_BCM5715_A1, "BCM5715 A1" },
290 { BGE_CHIPID_BCM5715_A3, "BCM5715 A3" },
291 { BGE_CHIPID_BCM5755_A0, "BCM5755 A0" },
292 { BGE_CHIPID_BCM5755_A1, "BCM5755 A1" },
293 { BGE_CHIPID_BCM5755_A2, "BCM5755 A2" },
294 { BGE_CHIPID_BCM5722_A0, "BCM5722 A0" },
295 { BGE_CHIPID_BCM5761_A0, "BCM5761 A0" },
296 { BGE_CHIPID_BCM5761_A1, "BCM5761 A1" },
297 { BGE_CHIPID_BCM5784_A0, "BCM5784 A0" },
298 { BGE_CHIPID_BCM5784_A1, "BCM5784 A1" },
299 /* 5754 and 5787 share the same ASIC ID */
300 { BGE_CHIPID_BCM5787_A0, "BCM5754/5787 A0" },
301 { BGE_CHIPID_BCM5787_A1, "BCM5754/5787 A1" },
302 { BGE_CHIPID_BCM5787_A2, "BCM5754/5787 A2" },
303 { BGE_CHIPID_BCM5906_A1, "BCM5906 A1" },
304 { BGE_CHIPID_BCM5906_A2, "BCM5906 A2" },
305 { BGE_CHIPID_BCM57780_A0, "BCM57780 A0" },
306 { BGE_CHIPID_BCM57780_A1, "BCM57780 A1" },
307
308 { 0, NULL }
309};
310
311/*
312 * Some defaults for major revisions, so that newer steppings
313 * that we don't know about have a shot at working.
314 */
315static const struct bge_revision bge_majorrevs[] = {
316 { BGE_ASICREV_BCM5700, "unknown BCM5700" },
317 { BGE_ASICREV_BCM5701, "unknown BCM5701" },
318 { BGE_ASICREV_BCM5703, "unknown BCM5703" },
319 { BGE_ASICREV_BCM5704, "unknown BCM5704" },
320 { BGE_ASICREV_BCM5705, "unknown BCM5705" },
321 { BGE_ASICREV_BCM5750, "unknown BCM5750" },
322 { BGE_ASICREV_BCM5714_A0, "unknown BCM5714" },
323 { BGE_ASICREV_BCM5752, "unknown BCM5752" },
324 { BGE_ASICREV_BCM5780, "unknown BCM5780" },
325 { BGE_ASICREV_BCM5714, "unknown BCM5714" },
326 { BGE_ASICREV_BCM5755, "unknown BCM5755" },
327 { BGE_ASICREV_BCM5761, "unknown BCM5761" },
328 { BGE_ASICREV_BCM5784, "unknown BCM5784" },
329 { BGE_ASICREV_BCM5785, "unknown BCM5785" },
330 /* 5754 and 5787 share the same ASIC ID */
331 { BGE_ASICREV_BCM5787, "unknown BCM5754/5787" },
332 { BGE_ASICREV_BCM5906, "unknown BCM5906" },
333 { BGE_ASICREV_BCM57780, "unknown BCM57780" },
334
335 { 0, NULL }
336};
337
338#define BGE_IS_JUMBO_CAPABLE(sc) ((sc)->bge_flags & BGE_FLAG_JUMBO)
339#define BGE_IS_5700_FAMILY(sc) ((sc)->bge_flags & BGE_FLAG_5700_FAMILY)
340#define BGE_IS_5705_PLUS(sc) ((sc)->bge_flags & BGE_FLAG_5705_PLUS)
341#define BGE_IS_5714_FAMILY(sc) ((sc)->bge_flags & BGE_FLAG_5714_FAMILY)
342#define BGE_IS_575X_PLUS(sc) ((sc)->bge_flags & BGE_FLAG_575X_PLUS)
343#define BGE_IS_5755_PLUS(sc) ((sc)->bge_flags & BGE_FLAG_5755_PLUS)
344
345const struct bge_revision * bge_lookup_rev(uint32_t);
346const struct bge_vendor * bge_lookup_vendor(uint16_t);
347
348typedef int (*bge_eaddr_fcn_t)(struct bge_softc *, uint8_t[]);
349
350static int bge_probe(device_t);
351static int bge_attach(device_t);
352static int bge_detach(device_t);
353static int bge_suspend(device_t);
354static int bge_resume(device_t);
355static void bge_release_resources(struct bge_softc *);
356static void bge_dma_map_addr(void *, bus_dma_segment_t *, int, int);
357static int bge_dma_alloc(device_t);
358static void bge_dma_free(struct bge_softc *);
359
360static int bge_get_eaddr_fw(struct bge_softc *sc, uint8_t ether_addr[]);
361static int bge_get_eaddr_mem(struct bge_softc *, uint8_t[]);
362static int bge_get_eaddr_nvram(struct bge_softc *, uint8_t[]);
363static int bge_get_eaddr_eeprom(struct bge_softc *, uint8_t[]);
364static int bge_get_eaddr(struct bge_softc *, uint8_t[]);
365
366static void bge_txeof(struct bge_softc *, uint16_t);
367static int bge_rxeof(struct bge_softc *, uint16_t, int);
368
369static void bge_asf_driver_up (struct bge_softc *);
370static void bge_tick(void *);
371static void bge_stats_update(struct bge_softc *);
372static void bge_stats_update_regs(struct bge_softc *);
373static struct mbuf *bge_setup_tso(struct bge_softc *, struct mbuf *,
374 uint16_t *);
375static int bge_encap(struct bge_softc *, struct mbuf **, uint32_t *);
376
377static void bge_intr(void *);
378static int bge_msi_intr(void *);
379static void bge_intr_task(void *, int);
380static void bge_start_locked(struct ifnet *);
381static void bge_start(struct ifnet *);
382static int bge_ioctl(struct ifnet *, u_long, caddr_t);
383static void bge_init_locked(struct bge_softc *);
384static void bge_init(void *);
385static void bge_stop(struct bge_softc *);
386static void bge_watchdog(struct bge_softc *);
387static int bge_shutdown(device_t);
388static int bge_ifmedia_upd_locked(struct ifnet *);
389static int bge_ifmedia_upd(struct ifnet *);
390static void bge_ifmedia_sts(struct ifnet *, struct ifmediareq *);
391
392static uint8_t bge_nvram_getbyte(struct bge_softc *, int, uint8_t *);
393static int bge_read_nvram(struct bge_softc *, caddr_t, int, int);
394
395static uint8_t bge_eeprom_getbyte(struct bge_softc *, int, uint8_t *);
396static int bge_read_eeprom(struct bge_softc *, caddr_t, int, int);
397
398static void bge_setpromisc(struct bge_softc *);
399static void bge_setmulti(struct bge_softc *);
400static void bge_setvlan(struct bge_softc *);
401
402static int bge_newbuf_std(struct bge_softc *, int);
403static int bge_newbuf_jumbo(struct bge_softc *, int);
404static int bge_init_rx_ring_std(struct bge_softc *);
405static void bge_free_rx_ring_std(struct bge_softc *);
406static int bge_init_rx_ring_jumbo(struct bge_softc *);
407static void bge_free_rx_ring_jumbo(struct bge_softc *);
408static void bge_free_tx_ring(struct bge_softc *);
409static int bge_init_tx_ring(struct bge_softc *);
410
411static int bge_chipinit(struct bge_softc *);
412static int bge_blockinit(struct bge_softc *);
413
414static int bge_has_eaddr(struct bge_softc *);
415static uint32_t bge_readmem_ind(struct bge_softc *, int);
416static void bge_writemem_ind(struct bge_softc *, int, int);
417static void bge_writembx(struct bge_softc *, int, int);
418#ifdef notdef
419static uint32_t bge_readreg_ind(struct bge_softc *, int);
420#endif
421static void bge_writemem_direct(struct bge_softc *, int, int);
422static void bge_writereg_ind(struct bge_softc *, int, int);
423static void bge_set_max_readrq(struct bge_softc *);
424
425static int bge_miibus_readreg(device_t, int, int);
426static int bge_miibus_writereg(device_t, int, int, int);
427static void bge_miibus_statchg(device_t);
428#ifdef DEVICE_POLLING
429static int bge_poll(struct ifnet *ifp, enum poll_cmd cmd, int count);
430#endif
431
432#define BGE_RESET_START 1
433#define BGE_RESET_STOP 2
434static void bge_sig_post_reset(struct bge_softc *, int);
435static void bge_sig_legacy(struct bge_softc *, int);
436static void bge_sig_pre_reset(struct bge_softc *, int);
437static int bge_reset(struct bge_softc *);
438static void bge_link_upd(struct bge_softc *);
439
440/*
441 * The BGE_REGISTER_DEBUG option is only for low-level debugging. It may
442 * leak information to untrusted users. It is also known to cause alignment
443 * traps on certain architectures.
444 */
445#ifdef BGE_REGISTER_DEBUG
446static int bge_sysctl_debug_info(SYSCTL_HANDLER_ARGS);
447static int bge_sysctl_reg_read(SYSCTL_HANDLER_ARGS);
448static int bge_sysctl_mem_read(SYSCTL_HANDLER_ARGS);
449#endif
450static void bge_add_sysctls(struct bge_softc *);
451static int bge_sysctl_stats(SYSCTL_HANDLER_ARGS);
452
453static device_method_t bge_methods[] = {
454 /* Device interface */
455 DEVMETHOD(device_probe, bge_probe),
456 DEVMETHOD(device_attach, bge_attach),
457 DEVMETHOD(device_detach, bge_detach),
458 DEVMETHOD(device_shutdown, bge_shutdown),
459 DEVMETHOD(device_suspend, bge_suspend),
460 DEVMETHOD(device_resume, bge_resume),
461
462 /* bus interface */
463 DEVMETHOD(bus_print_child, bus_generic_print_child),
464 DEVMETHOD(bus_driver_added, bus_generic_driver_added),
465
466 /* MII interface */
467 DEVMETHOD(miibus_readreg, bge_miibus_readreg),
468 DEVMETHOD(miibus_writereg, bge_miibus_writereg),
469 DEVMETHOD(miibus_statchg, bge_miibus_statchg),
470
471 { 0, 0 }
472};
473
474static driver_t bge_driver = {
475 "bge",
476 bge_methods,
477 sizeof(struct bge_softc)
478};
479
480static devclass_t bge_devclass;
481
482DRIVER_MODULE(bge, pci, bge_driver, bge_devclass, 0, 0);
483DRIVER_MODULE(miibus, bge, miibus_driver, miibus_devclass, 0, 0);
484
485static int bge_allow_asf = 1;
36
37/*
38 * Broadcom BCM570x family gigabit ethernet driver for FreeBSD.
39 *
40 * The Broadcom BCM5700 is based on technology originally developed by
41 * Alteon Networks as part of the Tigon I and Tigon II gigabit ethernet
42 * MAC chips. The BCM5700, sometimes refered to as the Tigon III, has
43 * two on-board MIPS R4000 CPUs and can have as much as 16MB of external
44 * SSRAM. The BCM5700 supports TCP, UDP and IP checksum offload, jumbo
45 * frames, highly configurable RX filtering, and 16 RX and TX queues
46 * (which, along with RX filter rules, can be used for QOS applications).
47 * Other features, such as TCP segmentation, may be available as part
48 * of value-added firmware updates. Unlike the Tigon I and Tigon II,
49 * firmware images can be stored in hardware and need not be compiled
50 * into the driver.
51 *
52 * The BCM5700 supports the PCI v2.2 and PCI-X v1.0 standards, and will
53 * function in a 32-bit/64-bit 33/66Mhz bus, or a 64-bit/133Mhz bus.
54 *
55 * The BCM5701 is a single-chip solution incorporating both the BCM5700
56 * MAC and a BCM5401 10/100/1000 PHY. Unlike the BCM5700, the BCM5701
57 * does not support external SSRAM.
58 *
59 * Broadcom also produces a variation of the BCM5700 under the "Altima"
60 * brand name, which is functionally similar but lacks PCI-X support.
61 *
62 * Without external SSRAM, you can only have at most 4 TX rings,
63 * and the use of the mini RX ring is disabled. This seems to imply
64 * that these features are simply not available on the BCM5701. As a
65 * result, this driver does not implement any support for the mini RX
66 * ring.
67 */
68
69#ifdef HAVE_KERNEL_OPTION_HEADERS
70#include "opt_device_polling.h"
71#endif
72
73#include <sys/param.h>
74#include <sys/endian.h>
75#include <sys/systm.h>
76#include <sys/sockio.h>
77#include <sys/mbuf.h>
78#include <sys/malloc.h>
79#include <sys/kernel.h>
80#include <sys/module.h>
81#include <sys/socket.h>
82#include <sys/sysctl.h>
83#include <sys/taskqueue.h>
84
85#include <net/if.h>
86#include <net/if_arp.h>
87#include <net/ethernet.h>
88#include <net/if_dl.h>
89#include <net/if_media.h>
90
91#include <net/bpf.h>
92
93#include <net/if_types.h>
94#include <net/if_vlan_var.h>
95
96#include <netinet/in_systm.h>
97#include <netinet/in.h>
98#include <netinet/ip.h>
99#include <netinet/tcp.h>
100
101#include <machine/bus.h>
102#include <machine/resource.h>
103#include <sys/bus.h>
104#include <sys/rman.h>
105
106#include <dev/mii/mii.h>
107#include <dev/mii/miivar.h>
108#include "miidevs.h"
109#include <dev/mii/brgphyreg.h>
110
111#ifdef __sparc64__
112#include <dev/ofw/ofw_bus.h>
113#include <dev/ofw/openfirm.h>
114#include <machine/ofw_machdep.h>
115#include <machine/ver.h>
116#endif
117
118#include <dev/pci/pcireg.h>
119#include <dev/pci/pcivar.h>
120
121#include <dev/bge/if_bgereg.h>
122
123#define BGE_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP)
124#define ETHER_MIN_NOPAD (ETHER_MIN_LEN - ETHER_CRC_LEN) /* i.e., 60 */
125
126MODULE_DEPEND(bge, pci, 1, 1, 1);
127MODULE_DEPEND(bge, ether, 1, 1, 1);
128MODULE_DEPEND(bge, miibus, 1, 1, 1);
129
130/* "device miibus" required. See GENERIC if you get errors here. */
131#include "miibus_if.h"
132
133/*
134 * Various supported device vendors/types and their names. Note: the
135 * spec seems to indicate that the hardware still has Alteon's vendor
136 * ID burned into it, though it will always be overriden by the vendor
137 * ID in the EEPROM. Just to be safe, we cover all possibilities.
138 */
139static const struct bge_type {
140 uint16_t bge_vid;
141 uint16_t bge_did;
142} bge_devs[] = {
143 { ALTEON_VENDORID, ALTEON_DEVICEID_BCM5700 },
144 { ALTEON_VENDORID, ALTEON_DEVICEID_BCM5701 },
145
146 { ALTIMA_VENDORID, ALTIMA_DEVICE_AC1000 },
147 { ALTIMA_VENDORID, ALTIMA_DEVICE_AC1002 },
148 { ALTIMA_VENDORID, ALTIMA_DEVICE_AC9100 },
149
150 { APPLE_VENDORID, APPLE_DEVICE_BCM5701 },
151
152 { BCOM_VENDORID, BCOM_DEVICEID_BCM5700 },
153 { BCOM_VENDORID, BCOM_DEVICEID_BCM5701 },
154 { BCOM_VENDORID, BCOM_DEVICEID_BCM5702 },
155 { BCOM_VENDORID, BCOM_DEVICEID_BCM5702_ALT },
156 { BCOM_VENDORID, BCOM_DEVICEID_BCM5702X },
157 { BCOM_VENDORID, BCOM_DEVICEID_BCM5703 },
158 { BCOM_VENDORID, BCOM_DEVICEID_BCM5703_ALT },
159 { BCOM_VENDORID, BCOM_DEVICEID_BCM5703X },
160 { BCOM_VENDORID, BCOM_DEVICEID_BCM5704C },
161 { BCOM_VENDORID, BCOM_DEVICEID_BCM5704S },
162 { BCOM_VENDORID, BCOM_DEVICEID_BCM5704S_ALT },
163 { BCOM_VENDORID, BCOM_DEVICEID_BCM5705 },
164 { BCOM_VENDORID, BCOM_DEVICEID_BCM5705F },
165 { BCOM_VENDORID, BCOM_DEVICEID_BCM5705K },
166 { BCOM_VENDORID, BCOM_DEVICEID_BCM5705M },
167 { BCOM_VENDORID, BCOM_DEVICEID_BCM5705M_ALT },
168 { BCOM_VENDORID, BCOM_DEVICEID_BCM5714C },
169 { BCOM_VENDORID, BCOM_DEVICEID_BCM5714S },
170 { BCOM_VENDORID, BCOM_DEVICEID_BCM5715 },
171 { BCOM_VENDORID, BCOM_DEVICEID_BCM5715S },
172 { BCOM_VENDORID, BCOM_DEVICEID_BCM5720 },
173 { BCOM_VENDORID, BCOM_DEVICEID_BCM5721 },
174 { BCOM_VENDORID, BCOM_DEVICEID_BCM5722 },
175 { BCOM_VENDORID, BCOM_DEVICEID_BCM5723 },
176 { BCOM_VENDORID, BCOM_DEVICEID_BCM5750 },
177 { BCOM_VENDORID, BCOM_DEVICEID_BCM5750M },
178 { BCOM_VENDORID, BCOM_DEVICEID_BCM5751 },
179 { BCOM_VENDORID, BCOM_DEVICEID_BCM5751F },
180 { BCOM_VENDORID, BCOM_DEVICEID_BCM5751M },
181 { BCOM_VENDORID, BCOM_DEVICEID_BCM5752 },
182 { BCOM_VENDORID, BCOM_DEVICEID_BCM5752M },
183 { BCOM_VENDORID, BCOM_DEVICEID_BCM5753 },
184 { BCOM_VENDORID, BCOM_DEVICEID_BCM5753F },
185 { BCOM_VENDORID, BCOM_DEVICEID_BCM5753M },
186 { BCOM_VENDORID, BCOM_DEVICEID_BCM5754 },
187 { BCOM_VENDORID, BCOM_DEVICEID_BCM5754M },
188 { BCOM_VENDORID, BCOM_DEVICEID_BCM5755 },
189 { BCOM_VENDORID, BCOM_DEVICEID_BCM5755M },
190 { BCOM_VENDORID, BCOM_DEVICEID_BCM5761 },
191 { BCOM_VENDORID, BCOM_DEVICEID_BCM5761E },
192 { BCOM_VENDORID, BCOM_DEVICEID_BCM5761S },
193 { BCOM_VENDORID, BCOM_DEVICEID_BCM5761SE },
194 { BCOM_VENDORID, BCOM_DEVICEID_BCM5764 },
195 { BCOM_VENDORID, BCOM_DEVICEID_BCM5780 },
196 { BCOM_VENDORID, BCOM_DEVICEID_BCM5780S },
197 { BCOM_VENDORID, BCOM_DEVICEID_BCM5781 },
198 { BCOM_VENDORID, BCOM_DEVICEID_BCM5782 },
199 { BCOM_VENDORID, BCOM_DEVICEID_BCM5784 },
200 { BCOM_VENDORID, BCOM_DEVICEID_BCM5785F },
201 { BCOM_VENDORID, BCOM_DEVICEID_BCM5785G },
202 { BCOM_VENDORID, BCOM_DEVICEID_BCM5786 },
203 { BCOM_VENDORID, BCOM_DEVICEID_BCM5787 },
204 { BCOM_VENDORID, BCOM_DEVICEID_BCM5787F },
205 { BCOM_VENDORID, BCOM_DEVICEID_BCM5787M },
206 { BCOM_VENDORID, BCOM_DEVICEID_BCM5788 },
207 { BCOM_VENDORID, BCOM_DEVICEID_BCM5789 },
208 { BCOM_VENDORID, BCOM_DEVICEID_BCM5901 },
209 { BCOM_VENDORID, BCOM_DEVICEID_BCM5901A2 },
210 { BCOM_VENDORID, BCOM_DEVICEID_BCM5903M },
211 { BCOM_VENDORID, BCOM_DEVICEID_BCM5906 },
212 { BCOM_VENDORID, BCOM_DEVICEID_BCM5906M },
213 { BCOM_VENDORID, BCOM_DEVICEID_BCM57760 },
214 { BCOM_VENDORID, BCOM_DEVICEID_BCM57780 },
215 { BCOM_VENDORID, BCOM_DEVICEID_BCM57788 },
216 { BCOM_VENDORID, BCOM_DEVICEID_BCM57790 },
217
218 { SK_VENDORID, SK_DEVICEID_ALTIMA },
219
220 { TC_VENDORID, TC_DEVICEID_3C996 },
221
222 { FJTSU_VENDORID, FJTSU_DEVICEID_PW008GE4 },
223 { FJTSU_VENDORID, FJTSU_DEVICEID_PW008GE5 },
224 { FJTSU_VENDORID, FJTSU_DEVICEID_PP250450 },
225
226 { 0, 0 }
227};
228
229static const struct bge_vendor {
230 uint16_t v_id;
231 const char *v_name;
232} bge_vendors[] = {
233 { ALTEON_VENDORID, "Alteon" },
234 { ALTIMA_VENDORID, "Altima" },
235 { APPLE_VENDORID, "Apple" },
236 { BCOM_VENDORID, "Broadcom" },
237 { SK_VENDORID, "SysKonnect" },
238 { TC_VENDORID, "3Com" },
239 { FJTSU_VENDORID, "Fujitsu" },
240
241 { 0, NULL }
242};
243
244static const struct bge_revision {
245 uint32_t br_chipid;
246 const char *br_name;
247} bge_revisions[] = {
248 { BGE_CHIPID_BCM5700_A0, "BCM5700 A0" },
249 { BGE_CHIPID_BCM5700_A1, "BCM5700 A1" },
250 { BGE_CHIPID_BCM5700_B0, "BCM5700 B0" },
251 { BGE_CHIPID_BCM5700_B1, "BCM5700 B1" },
252 { BGE_CHIPID_BCM5700_B2, "BCM5700 B2" },
253 { BGE_CHIPID_BCM5700_B3, "BCM5700 B3" },
254 { BGE_CHIPID_BCM5700_ALTIMA, "BCM5700 Altima" },
255 { BGE_CHIPID_BCM5700_C0, "BCM5700 C0" },
256 { BGE_CHIPID_BCM5701_A0, "BCM5701 A0" },
257 { BGE_CHIPID_BCM5701_B0, "BCM5701 B0" },
258 { BGE_CHIPID_BCM5701_B2, "BCM5701 B2" },
259 { BGE_CHIPID_BCM5701_B5, "BCM5701 B5" },
260 { BGE_CHIPID_BCM5703_A0, "BCM5703 A0" },
261 { BGE_CHIPID_BCM5703_A1, "BCM5703 A1" },
262 { BGE_CHIPID_BCM5703_A2, "BCM5703 A2" },
263 { BGE_CHIPID_BCM5703_A3, "BCM5703 A3" },
264 { BGE_CHIPID_BCM5703_B0, "BCM5703 B0" },
265 { BGE_CHIPID_BCM5704_A0, "BCM5704 A0" },
266 { BGE_CHIPID_BCM5704_A1, "BCM5704 A1" },
267 { BGE_CHIPID_BCM5704_A2, "BCM5704 A2" },
268 { BGE_CHIPID_BCM5704_A3, "BCM5704 A3" },
269 { BGE_CHIPID_BCM5704_B0, "BCM5704 B0" },
270 { BGE_CHIPID_BCM5705_A0, "BCM5705 A0" },
271 { BGE_CHIPID_BCM5705_A1, "BCM5705 A1" },
272 { BGE_CHIPID_BCM5705_A2, "BCM5705 A2" },
273 { BGE_CHIPID_BCM5705_A3, "BCM5705 A3" },
274 { BGE_CHIPID_BCM5750_A0, "BCM5750 A0" },
275 { BGE_CHIPID_BCM5750_A1, "BCM5750 A1" },
276 { BGE_CHIPID_BCM5750_A3, "BCM5750 A3" },
277 { BGE_CHIPID_BCM5750_B0, "BCM5750 B0" },
278 { BGE_CHIPID_BCM5750_B1, "BCM5750 B1" },
279 { BGE_CHIPID_BCM5750_C0, "BCM5750 C0" },
280 { BGE_CHIPID_BCM5750_C1, "BCM5750 C1" },
281 { BGE_CHIPID_BCM5750_C2, "BCM5750 C2" },
282 { BGE_CHIPID_BCM5714_A0, "BCM5714 A0" },
283 { BGE_CHIPID_BCM5752_A0, "BCM5752 A0" },
284 { BGE_CHIPID_BCM5752_A1, "BCM5752 A1" },
285 { BGE_CHIPID_BCM5752_A2, "BCM5752 A2" },
286 { BGE_CHIPID_BCM5714_B0, "BCM5714 B0" },
287 { BGE_CHIPID_BCM5714_B3, "BCM5714 B3" },
288 { BGE_CHIPID_BCM5715_A0, "BCM5715 A0" },
289 { BGE_CHIPID_BCM5715_A1, "BCM5715 A1" },
290 { BGE_CHIPID_BCM5715_A3, "BCM5715 A3" },
291 { BGE_CHIPID_BCM5755_A0, "BCM5755 A0" },
292 { BGE_CHIPID_BCM5755_A1, "BCM5755 A1" },
293 { BGE_CHIPID_BCM5755_A2, "BCM5755 A2" },
294 { BGE_CHIPID_BCM5722_A0, "BCM5722 A0" },
295 { BGE_CHIPID_BCM5761_A0, "BCM5761 A0" },
296 { BGE_CHIPID_BCM5761_A1, "BCM5761 A1" },
297 { BGE_CHIPID_BCM5784_A0, "BCM5784 A0" },
298 { BGE_CHIPID_BCM5784_A1, "BCM5784 A1" },
299 /* 5754 and 5787 share the same ASIC ID */
300 { BGE_CHIPID_BCM5787_A0, "BCM5754/5787 A0" },
301 { BGE_CHIPID_BCM5787_A1, "BCM5754/5787 A1" },
302 { BGE_CHIPID_BCM5787_A2, "BCM5754/5787 A2" },
303 { BGE_CHIPID_BCM5906_A1, "BCM5906 A1" },
304 { BGE_CHIPID_BCM5906_A2, "BCM5906 A2" },
305 { BGE_CHIPID_BCM57780_A0, "BCM57780 A0" },
306 { BGE_CHIPID_BCM57780_A1, "BCM57780 A1" },
307
308 { 0, NULL }
309};
310
311/*
312 * Some defaults for major revisions, so that newer steppings
313 * that we don't know about have a shot at working.
314 */
315static const struct bge_revision bge_majorrevs[] = {
316 { BGE_ASICREV_BCM5700, "unknown BCM5700" },
317 { BGE_ASICREV_BCM5701, "unknown BCM5701" },
318 { BGE_ASICREV_BCM5703, "unknown BCM5703" },
319 { BGE_ASICREV_BCM5704, "unknown BCM5704" },
320 { BGE_ASICREV_BCM5705, "unknown BCM5705" },
321 { BGE_ASICREV_BCM5750, "unknown BCM5750" },
322 { BGE_ASICREV_BCM5714_A0, "unknown BCM5714" },
323 { BGE_ASICREV_BCM5752, "unknown BCM5752" },
324 { BGE_ASICREV_BCM5780, "unknown BCM5780" },
325 { BGE_ASICREV_BCM5714, "unknown BCM5714" },
326 { BGE_ASICREV_BCM5755, "unknown BCM5755" },
327 { BGE_ASICREV_BCM5761, "unknown BCM5761" },
328 { BGE_ASICREV_BCM5784, "unknown BCM5784" },
329 { BGE_ASICREV_BCM5785, "unknown BCM5785" },
330 /* 5754 and 5787 share the same ASIC ID */
331 { BGE_ASICREV_BCM5787, "unknown BCM5754/5787" },
332 { BGE_ASICREV_BCM5906, "unknown BCM5906" },
333 { BGE_ASICREV_BCM57780, "unknown BCM57780" },
334
335 { 0, NULL }
336};
337
338#define BGE_IS_JUMBO_CAPABLE(sc) ((sc)->bge_flags & BGE_FLAG_JUMBO)
339#define BGE_IS_5700_FAMILY(sc) ((sc)->bge_flags & BGE_FLAG_5700_FAMILY)
340#define BGE_IS_5705_PLUS(sc) ((sc)->bge_flags & BGE_FLAG_5705_PLUS)
341#define BGE_IS_5714_FAMILY(sc) ((sc)->bge_flags & BGE_FLAG_5714_FAMILY)
342#define BGE_IS_575X_PLUS(sc) ((sc)->bge_flags & BGE_FLAG_575X_PLUS)
343#define BGE_IS_5755_PLUS(sc) ((sc)->bge_flags & BGE_FLAG_5755_PLUS)
344
345const struct bge_revision * bge_lookup_rev(uint32_t);
346const struct bge_vendor * bge_lookup_vendor(uint16_t);
347
348typedef int (*bge_eaddr_fcn_t)(struct bge_softc *, uint8_t[]);
349
350static int bge_probe(device_t);
351static int bge_attach(device_t);
352static int bge_detach(device_t);
353static int bge_suspend(device_t);
354static int bge_resume(device_t);
355static void bge_release_resources(struct bge_softc *);
356static void bge_dma_map_addr(void *, bus_dma_segment_t *, int, int);
357static int bge_dma_alloc(device_t);
358static void bge_dma_free(struct bge_softc *);
359
360static int bge_get_eaddr_fw(struct bge_softc *sc, uint8_t ether_addr[]);
361static int bge_get_eaddr_mem(struct bge_softc *, uint8_t[]);
362static int bge_get_eaddr_nvram(struct bge_softc *, uint8_t[]);
363static int bge_get_eaddr_eeprom(struct bge_softc *, uint8_t[]);
364static int bge_get_eaddr(struct bge_softc *, uint8_t[]);
365
366static void bge_txeof(struct bge_softc *, uint16_t);
367static int bge_rxeof(struct bge_softc *, uint16_t, int);
368
369static void bge_asf_driver_up (struct bge_softc *);
370static void bge_tick(void *);
371static void bge_stats_update(struct bge_softc *);
372static void bge_stats_update_regs(struct bge_softc *);
373static struct mbuf *bge_setup_tso(struct bge_softc *, struct mbuf *,
374 uint16_t *);
375static int bge_encap(struct bge_softc *, struct mbuf **, uint32_t *);
376
377static void bge_intr(void *);
378static int bge_msi_intr(void *);
379static void bge_intr_task(void *, int);
380static void bge_start_locked(struct ifnet *);
381static void bge_start(struct ifnet *);
382static int bge_ioctl(struct ifnet *, u_long, caddr_t);
383static void bge_init_locked(struct bge_softc *);
384static void bge_init(void *);
385static void bge_stop(struct bge_softc *);
386static void bge_watchdog(struct bge_softc *);
387static int bge_shutdown(device_t);
388static int bge_ifmedia_upd_locked(struct ifnet *);
389static int bge_ifmedia_upd(struct ifnet *);
390static void bge_ifmedia_sts(struct ifnet *, struct ifmediareq *);
391
392static uint8_t bge_nvram_getbyte(struct bge_softc *, int, uint8_t *);
393static int bge_read_nvram(struct bge_softc *, caddr_t, int, int);
394
395static uint8_t bge_eeprom_getbyte(struct bge_softc *, int, uint8_t *);
396static int bge_read_eeprom(struct bge_softc *, caddr_t, int, int);
397
398static void bge_setpromisc(struct bge_softc *);
399static void bge_setmulti(struct bge_softc *);
400static void bge_setvlan(struct bge_softc *);
401
402static int bge_newbuf_std(struct bge_softc *, int);
403static int bge_newbuf_jumbo(struct bge_softc *, int);
404static int bge_init_rx_ring_std(struct bge_softc *);
405static void bge_free_rx_ring_std(struct bge_softc *);
406static int bge_init_rx_ring_jumbo(struct bge_softc *);
407static void bge_free_rx_ring_jumbo(struct bge_softc *);
408static void bge_free_tx_ring(struct bge_softc *);
409static int bge_init_tx_ring(struct bge_softc *);
410
411static int bge_chipinit(struct bge_softc *);
412static int bge_blockinit(struct bge_softc *);
413
414static int bge_has_eaddr(struct bge_softc *);
415static uint32_t bge_readmem_ind(struct bge_softc *, int);
416static void bge_writemem_ind(struct bge_softc *, int, int);
417static void bge_writembx(struct bge_softc *, int, int);
418#ifdef notdef
419static uint32_t bge_readreg_ind(struct bge_softc *, int);
420#endif
421static void bge_writemem_direct(struct bge_softc *, int, int);
422static void bge_writereg_ind(struct bge_softc *, int, int);
423static void bge_set_max_readrq(struct bge_softc *);
424
425static int bge_miibus_readreg(device_t, int, int);
426static int bge_miibus_writereg(device_t, int, int, int);
427static void bge_miibus_statchg(device_t);
428#ifdef DEVICE_POLLING
429static int bge_poll(struct ifnet *ifp, enum poll_cmd cmd, int count);
430#endif
431
432#define BGE_RESET_START 1
433#define BGE_RESET_STOP 2
434static void bge_sig_post_reset(struct bge_softc *, int);
435static void bge_sig_legacy(struct bge_softc *, int);
436static void bge_sig_pre_reset(struct bge_softc *, int);
437static int bge_reset(struct bge_softc *);
438static void bge_link_upd(struct bge_softc *);
439
440/*
441 * The BGE_REGISTER_DEBUG option is only for low-level debugging. It may
442 * leak information to untrusted users. It is also known to cause alignment
443 * traps on certain architectures.
444 */
445#ifdef BGE_REGISTER_DEBUG
446static int bge_sysctl_debug_info(SYSCTL_HANDLER_ARGS);
447static int bge_sysctl_reg_read(SYSCTL_HANDLER_ARGS);
448static int bge_sysctl_mem_read(SYSCTL_HANDLER_ARGS);
449#endif
450static void bge_add_sysctls(struct bge_softc *);
451static int bge_sysctl_stats(SYSCTL_HANDLER_ARGS);
452
453static device_method_t bge_methods[] = {
454 /* Device interface */
455 DEVMETHOD(device_probe, bge_probe),
456 DEVMETHOD(device_attach, bge_attach),
457 DEVMETHOD(device_detach, bge_detach),
458 DEVMETHOD(device_shutdown, bge_shutdown),
459 DEVMETHOD(device_suspend, bge_suspend),
460 DEVMETHOD(device_resume, bge_resume),
461
462 /* bus interface */
463 DEVMETHOD(bus_print_child, bus_generic_print_child),
464 DEVMETHOD(bus_driver_added, bus_generic_driver_added),
465
466 /* MII interface */
467 DEVMETHOD(miibus_readreg, bge_miibus_readreg),
468 DEVMETHOD(miibus_writereg, bge_miibus_writereg),
469 DEVMETHOD(miibus_statchg, bge_miibus_statchg),
470
471 { 0, 0 }
472};
473
474static driver_t bge_driver = {
475 "bge",
476 bge_methods,
477 sizeof(struct bge_softc)
478};
479
480static devclass_t bge_devclass;
481
482DRIVER_MODULE(bge, pci, bge_driver, bge_devclass, 0, 0);
483DRIVER_MODULE(miibus, bge, miibus_driver, miibus_devclass, 0, 0);
484
485static int bge_allow_asf = 1;
486/*
487 * A common design characteristic for many Broadcom client controllers
488 * is that they only support a single outstanding DMA read operation
489 * on the PCIe bus. This means that it will take twice as long to fetch
490 * a TX frame that is split into header and payload buffers as it does
491 * to fetch a single, contiguous TX frame (2 reads vs. 1 read). For
492 * these controllers, coalescing buffers to reduce the number of memory
493 * reads is effective way to get maximum performance(about 940Mbps).
494 * Without collapsing TX buffers the maximum TCP bulk transfer
495 * performance is about 850Mbps. However forcing coalescing mbufs
496 * consumes a lot of CPU cycles, so leave it off by default.
497 */
498static int bge_forced_collapse = 0;
499
500TUNABLE_INT("hw.bge.allow_asf", &bge_allow_asf);
486
487TUNABLE_INT("hw.bge.allow_asf", &bge_allow_asf);
501TUNABLE_INT("hw.bge.forced_collapse", &bge_forced_collapse);
502
503SYSCTL_NODE(_hw, OID_AUTO, bge, CTLFLAG_RD, 0, "BGE driver parameters");
504SYSCTL_INT(_hw_bge, OID_AUTO, allow_asf, CTLFLAG_RD, &bge_allow_asf, 0,
505 "Allow ASF mode if available");
488
489SYSCTL_NODE(_hw, OID_AUTO, bge, CTLFLAG_RD, 0, "BGE driver parameters");
490SYSCTL_INT(_hw_bge, OID_AUTO, allow_asf, CTLFLAG_RD, &bge_allow_asf, 0,
491 "Allow ASF mode if available");
506SYSCTL_INT(_hw_bge, OID_AUTO, forced_collapse, CTLFLAG_RD, &bge_forced_collapse,
507 0, "Number of fragmented TX buffers of a frame allowed before "
508 "forced collapsing");
509
510#define SPARC64_BLADE_1500_MODEL "SUNW,Sun-Blade-1500"
511#define SPARC64_BLADE_1500_PATH_BGE "/pci@1f,700000/network@2"
512#define SPARC64_BLADE_2500_MODEL "SUNW,Sun-Blade-2500"
513#define SPARC64_BLADE_2500_PATH_BGE "/pci@1c,600000/network@3"
514#define SPARC64_OFW_SUBVENDOR "subsystem-vendor-id"
515
516static int
517bge_has_eaddr(struct bge_softc *sc)
518{
519#ifdef __sparc64__
520 char buf[sizeof(SPARC64_BLADE_1500_PATH_BGE)];
521 device_t dev;
522 uint32_t subvendor;
523
524 dev = sc->bge_dev;
525
526 /*
527 * The on-board BGEs found in sun4u machines aren't fitted with
528 * an EEPROM which means that we have to obtain the MAC address
529 * via OFW and that some tests will always fail. We distinguish
530 * such BGEs by the subvendor ID, which also has to be obtained
531 * from OFW instead of the PCI configuration space as the latter
532 * indicates Broadcom as the subvendor of the netboot interface.
533 * For early Blade 1500 and 2500 we even have to check the OFW
534 * device path as the subvendor ID always defaults to Broadcom
535 * there.
536 */
537 if (OF_getprop(ofw_bus_get_node(dev), SPARC64_OFW_SUBVENDOR,
538 &subvendor, sizeof(subvendor)) == sizeof(subvendor) &&
539 subvendor == SUN_VENDORID)
540 return (0);
541 memset(buf, 0, sizeof(buf));
542 if (OF_package_to_path(ofw_bus_get_node(dev), buf, sizeof(buf)) > 0) {
543 if (strcmp(sparc64_model, SPARC64_BLADE_1500_MODEL) == 0 &&
544 strcmp(buf, SPARC64_BLADE_1500_PATH_BGE) == 0)
545 return (0);
546 if (strcmp(sparc64_model, SPARC64_BLADE_2500_MODEL) == 0 &&
547 strcmp(buf, SPARC64_BLADE_2500_PATH_BGE) == 0)
548 return (0);
549 }
550#endif
551 return (1);
552}
553
554static uint32_t
555bge_readmem_ind(struct bge_softc *sc, int off)
556{
557 device_t dev;
558 uint32_t val;
559
560 dev = sc->bge_dev;
561
562 pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, off, 4);
563 val = pci_read_config(dev, BGE_PCI_MEMWIN_DATA, 4);
564 pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, 0, 4);
565 return (val);
566}
567
568static void
569bge_writemem_ind(struct bge_softc *sc, int off, int val)
570{
571 device_t dev;
572
573 dev = sc->bge_dev;
574
575 pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, off, 4);
576 pci_write_config(dev, BGE_PCI_MEMWIN_DATA, val, 4);
577 pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, 0, 4);
578}
579
580/*
581 * PCI Express only
582 */
583static void
584bge_set_max_readrq(struct bge_softc *sc)
585{
586 device_t dev;
587 uint16_t val;
588
589 dev = sc->bge_dev;
590
591 val = pci_read_config(dev, sc->bge_expcap + PCIR_EXPRESS_DEVICE_CTL, 2);
592 if ((val & PCIM_EXP_CTL_MAX_READ_REQUEST) !=
593 BGE_PCIE_DEVCTL_MAX_READRQ_4096) {
594 if (bootverbose)
595 device_printf(dev, "adjust device control 0x%04x ",
596 val);
597 val &= ~PCIM_EXP_CTL_MAX_READ_REQUEST;
598 val |= BGE_PCIE_DEVCTL_MAX_READRQ_4096;
599 pci_write_config(dev, sc->bge_expcap + PCIR_EXPRESS_DEVICE_CTL,
600 val, 2);
601 if (bootverbose)
602 printf("-> 0x%04x\n", val);
603 }
604}
605
606#ifdef notdef
607static uint32_t
608bge_readreg_ind(struct bge_softc *sc, int off)
609{
610 device_t dev;
611
612 dev = sc->bge_dev;
613
614 pci_write_config(dev, BGE_PCI_REG_BASEADDR, off, 4);
615 return (pci_read_config(dev, BGE_PCI_REG_DATA, 4));
616}
617#endif
618
619static void
620bge_writereg_ind(struct bge_softc *sc, int off, int val)
621{
622 device_t dev;
623
624 dev = sc->bge_dev;
625
626 pci_write_config(dev, BGE_PCI_REG_BASEADDR, off, 4);
627 pci_write_config(dev, BGE_PCI_REG_DATA, val, 4);
628}
629
630static void
631bge_writemem_direct(struct bge_softc *sc, int off, int val)
632{
633 CSR_WRITE_4(sc, off, val);
634}
635
636static void
637bge_writembx(struct bge_softc *sc, int off, int val)
638{
639 if (sc->bge_asicrev == BGE_ASICREV_BCM5906)
640 off += BGE_LPMBX_IRQ0_HI - BGE_MBX_IRQ0_HI;
641
642 CSR_WRITE_4(sc, off, val);
643}
644
645/*
646 * Map a single buffer address.
647 */
648
649static void
650bge_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error)
651{
652 struct bge_dmamap_arg *ctx;
653
654 if (error)
655 return;
656
657 ctx = arg;
658
659 if (nseg > ctx->bge_maxsegs) {
660 ctx->bge_maxsegs = 0;
661 return;
662 }
663
664 ctx->bge_busaddr = segs->ds_addr;
665}
666
667static uint8_t
668bge_nvram_getbyte(struct bge_softc *sc, int addr, uint8_t *dest)
669{
670 uint32_t access, byte = 0;
671 int i;
672
673 /* Lock. */
674 CSR_WRITE_4(sc, BGE_NVRAM_SWARB, BGE_NVRAMSWARB_SET1);
675 for (i = 0; i < 8000; i++) {
676 if (CSR_READ_4(sc, BGE_NVRAM_SWARB) & BGE_NVRAMSWARB_GNT1)
677 break;
678 DELAY(20);
679 }
680 if (i == 8000)
681 return (1);
682
683 /* Enable access. */
684 access = CSR_READ_4(sc, BGE_NVRAM_ACCESS);
685 CSR_WRITE_4(sc, BGE_NVRAM_ACCESS, access | BGE_NVRAMACC_ENABLE);
686
687 CSR_WRITE_4(sc, BGE_NVRAM_ADDR, addr & 0xfffffffc);
688 CSR_WRITE_4(sc, BGE_NVRAM_CMD, BGE_NVRAM_READCMD);
689 for (i = 0; i < BGE_TIMEOUT * 10; i++) {
690 DELAY(10);
691 if (CSR_READ_4(sc, BGE_NVRAM_CMD) & BGE_NVRAMCMD_DONE) {
692 DELAY(10);
693 break;
694 }
695 }
696
697 if (i == BGE_TIMEOUT * 10) {
698 if_printf(sc->bge_ifp, "nvram read timed out\n");
699 return (1);
700 }
701
702 /* Get result. */
703 byte = CSR_READ_4(sc, BGE_NVRAM_RDDATA);
704
705 *dest = (bswap32(byte) >> ((addr % 4) * 8)) & 0xFF;
706
707 /* Disable access. */
708 CSR_WRITE_4(sc, BGE_NVRAM_ACCESS, access);
709
710 /* Unlock. */
711 CSR_WRITE_4(sc, BGE_NVRAM_SWARB, BGE_NVRAMSWARB_CLR1);
712 CSR_READ_4(sc, BGE_NVRAM_SWARB);
713
714 return (0);
715}
716
717/*
718 * Read a sequence of bytes from NVRAM.
719 */
720static int
721bge_read_nvram(struct bge_softc *sc, caddr_t dest, int off, int cnt)
722{
723 int err = 0, i;
724 uint8_t byte = 0;
725
726 if (sc->bge_asicrev != BGE_ASICREV_BCM5906)
727 return (1);
728
729 for (i = 0; i < cnt; i++) {
730 err = bge_nvram_getbyte(sc, off + i, &byte);
731 if (err)
732 break;
733 *(dest + i) = byte;
734 }
735
736 return (err ? 1 : 0);
737}
738
739/*
740 * Read a byte of data stored in the EEPROM at address 'addr.' The
741 * BCM570x supports both the traditional bitbang interface and an
742 * auto access interface for reading the EEPROM. We use the auto
743 * access method.
744 */
745static uint8_t
746bge_eeprom_getbyte(struct bge_softc *sc, int addr, uint8_t *dest)
747{
748 int i;
749 uint32_t byte = 0;
750
751 /*
752 * Enable use of auto EEPROM access so we can avoid
753 * having to use the bitbang method.
754 */
755 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_AUTO_EEPROM);
756
757 /* Reset the EEPROM, load the clock period. */
758 CSR_WRITE_4(sc, BGE_EE_ADDR,
759 BGE_EEADDR_RESET | BGE_EEHALFCLK(BGE_HALFCLK_384SCL));
760 DELAY(20);
761
762 /* Issue the read EEPROM command. */
763 CSR_WRITE_4(sc, BGE_EE_ADDR, BGE_EE_READCMD | addr);
764
765 /* Wait for completion */
766 for(i = 0; i < BGE_TIMEOUT * 10; i++) {
767 DELAY(10);
768 if (CSR_READ_4(sc, BGE_EE_ADDR) & BGE_EEADDR_DONE)
769 break;
770 }
771
772 if (i == BGE_TIMEOUT * 10) {
773 device_printf(sc->bge_dev, "EEPROM read timed out\n");
774 return (1);
775 }
776
777 /* Get result. */
778 byte = CSR_READ_4(sc, BGE_EE_DATA);
779
780 *dest = (byte >> ((addr % 4) * 8)) & 0xFF;
781
782 return (0);
783}
784
785/*
786 * Read a sequence of bytes from the EEPROM.
787 */
788static int
789bge_read_eeprom(struct bge_softc *sc, caddr_t dest, int off, int cnt)
790{
791 int i, error = 0;
792 uint8_t byte = 0;
793
794 for (i = 0; i < cnt; i++) {
795 error = bge_eeprom_getbyte(sc, off + i, &byte);
796 if (error)
797 break;
798 *(dest + i) = byte;
799 }
800
801 return (error ? 1 : 0);
802}
803
804static int
805bge_miibus_readreg(device_t dev, int phy, int reg)
806{
807 struct bge_softc *sc;
808 uint32_t val, autopoll;
809 int i;
810
811 sc = device_get_softc(dev);
812
813 /*
814 * Broadcom's own driver always assumes the internal
815 * PHY is at GMII address 1. On some chips, the PHY responds
816 * to accesses at all addresses, which could cause us to
817 * bogusly attach the PHY 32 times at probe type. Always
818 * restricting the lookup to address 1 is simpler than
819 * trying to figure out which chips revisions should be
820 * special-cased.
821 */
822 if (phy != 1)
823 return (0);
824
825 /* Reading with autopolling on may trigger PCI errors */
826 autopoll = CSR_READ_4(sc, BGE_MI_MODE);
827 if (autopoll & BGE_MIMODE_AUTOPOLL) {
828 BGE_CLRBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL);
829 DELAY(40);
830 }
831
832 CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_READ | BGE_MICOMM_BUSY |
833 BGE_MIPHY(phy) | BGE_MIREG(reg));
834
835 for (i = 0; i < BGE_TIMEOUT; i++) {
836 DELAY(10);
837 val = CSR_READ_4(sc, BGE_MI_COMM);
838 if (!(val & BGE_MICOMM_BUSY))
839 break;
840 }
841
842 if (i == BGE_TIMEOUT) {
843 device_printf(sc->bge_dev,
844 "PHY read timed out (phy %d, reg %d, val 0x%08x)\n",
845 phy, reg, val);
846 val = 0;
847 goto done;
848 }
849
850 DELAY(5);
851 val = CSR_READ_4(sc, BGE_MI_COMM);
852
853done:
854 if (autopoll & BGE_MIMODE_AUTOPOLL) {
855 BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL);
856 DELAY(40);
857 }
858
859 if (val & BGE_MICOMM_READFAIL)
860 return (0);
861
862 return (val & 0xFFFF);
863}
864
865static int
866bge_miibus_writereg(device_t dev, int phy, int reg, int val)
867{
868 struct bge_softc *sc;
869 uint32_t autopoll;
870 int i;
871
872 sc = device_get_softc(dev);
873
874 if (sc->bge_asicrev == BGE_ASICREV_BCM5906 &&
875 (reg == BRGPHY_MII_1000CTL || reg == BRGPHY_MII_AUXCTL))
876 return(0);
877
878 /* Reading with autopolling on may trigger PCI errors */
879 autopoll = CSR_READ_4(sc, BGE_MI_MODE);
880 if (autopoll & BGE_MIMODE_AUTOPOLL) {
881 BGE_CLRBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL);
882 DELAY(40);
883 }
884
885 CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_WRITE | BGE_MICOMM_BUSY |
886 BGE_MIPHY(phy) | BGE_MIREG(reg) | val);
887
888 for (i = 0; i < BGE_TIMEOUT; i++) {
889 DELAY(10);
890 if (!(CSR_READ_4(sc, BGE_MI_COMM) & BGE_MICOMM_BUSY)) {
891 DELAY(5);
892 CSR_READ_4(sc, BGE_MI_COMM); /* dummy read */
893 break;
894 }
895 }
896
897 if (i == BGE_TIMEOUT) {
898 device_printf(sc->bge_dev,
899 "PHY write timed out (phy %d, reg %d, val %d)\n",
900 phy, reg, val);
901 return (0);
902 }
903
904 if (autopoll & BGE_MIMODE_AUTOPOLL) {
905 BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL);
906 DELAY(40);
907 }
908
909 return (0);
910}
911
912static void
913bge_miibus_statchg(device_t dev)
914{
915 struct bge_softc *sc;
916 struct mii_data *mii;
917 sc = device_get_softc(dev);
918 mii = device_get_softc(sc->bge_miibus);
919
920 BGE_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_PORTMODE);
921 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T)
922 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_PORTMODE_GMII);
923 else
924 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_PORTMODE_MII);
925
926 if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX)
927 BGE_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX);
928 else
929 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX);
930}
931
932/*
933 * Intialize a standard receive ring descriptor.
934 */
935static int
936bge_newbuf_std(struct bge_softc *sc, int i)
937{
938 struct mbuf *m;
939 struct bge_rx_bd *r;
940 bus_dma_segment_t segs[1];
941 bus_dmamap_t map;
942 int error, nsegs;
943
944 m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
945 if (m == NULL)
946 return (ENOBUFS);
947 m->m_len = m->m_pkthdr.len = MCLBYTES;
948 if ((sc->bge_flags & BGE_FLAG_RX_ALIGNBUG) == 0)
949 m_adj(m, ETHER_ALIGN);
950
951 error = bus_dmamap_load_mbuf_sg(sc->bge_cdata.bge_rx_mtag,
952 sc->bge_cdata.bge_rx_std_sparemap, m, segs, &nsegs, 0);
953 if (error != 0) {
954 m_freem(m);
955 return (error);
956 }
957 if (sc->bge_cdata.bge_rx_std_chain[i] != NULL) {
958 bus_dmamap_sync(sc->bge_cdata.bge_rx_mtag,
959 sc->bge_cdata.bge_rx_std_dmamap[i], BUS_DMASYNC_POSTREAD);
960 bus_dmamap_unload(sc->bge_cdata.bge_rx_mtag,
961 sc->bge_cdata.bge_rx_std_dmamap[i]);
962 }
963 map = sc->bge_cdata.bge_rx_std_dmamap[i];
964 sc->bge_cdata.bge_rx_std_dmamap[i] = sc->bge_cdata.bge_rx_std_sparemap;
965 sc->bge_cdata.bge_rx_std_sparemap = map;
966 sc->bge_cdata.bge_rx_std_chain[i] = m;
967 r = &sc->bge_ldata.bge_rx_std_ring[sc->bge_std];
968 r->bge_addr.bge_addr_lo = BGE_ADDR_LO(segs[0].ds_addr);
969 r->bge_addr.bge_addr_hi = BGE_ADDR_HI(segs[0].ds_addr);
970 r->bge_flags = BGE_RXBDFLAG_END;
971 r->bge_len = segs[0].ds_len;
972 r->bge_idx = i;
973
974 bus_dmamap_sync(sc->bge_cdata.bge_rx_mtag,
975 sc->bge_cdata.bge_rx_std_dmamap[i], BUS_DMASYNC_PREREAD);
976
977 return (0);
978}
979
980/*
981 * Initialize a jumbo receive ring descriptor. This allocates
982 * a jumbo buffer from the pool managed internally by the driver.
983 */
984static int
985bge_newbuf_jumbo(struct bge_softc *sc, int i)
986{
987 bus_dma_segment_t segs[BGE_NSEG_JUMBO];
988 bus_dmamap_t map;
989 struct bge_extrx_bd *r;
990 struct mbuf *m;
991 int error, nsegs;
992
993 MGETHDR(m, M_DONTWAIT, MT_DATA);
994 if (m == NULL)
995 return (ENOBUFS);
996
997 m_cljget(m, M_DONTWAIT, MJUM9BYTES);
998 if (!(m->m_flags & M_EXT)) {
999 m_freem(m);
1000 return (ENOBUFS);
1001 }
1002 m->m_len = m->m_pkthdr.len = MJUM9BYTES;
1003 if ((sc->bge_flags & BGE_FLAG_RX_ALIGNBUG) == 0)
1004 m_adj(m, ETHER_ALIGN);
1005
1006 error = bus_dmamap_load_mbuf_sg(sc->bge_cdata.bge_mtag_jumbo,
1007 sc->bge_cdata.bge_rx_jumbo_sparemap, m, segs, &nsegs, 0);
1008 if (error != 0) {
1009 m_freem(m);
1010 return (error);
1011 }
1012
1013 if (sc->bge_cdata.bge_rx_jumbo_chain[i] == NULL) {
1014 bus_dmamap_sync(sc->bge_cdata.bge_mtag_jumbo,
1015 sc->bge_cdata.bge_rx_jumbo_dmamap[i], BUS_DMASYNC_POSTREAD);
1016 bus_dmamap_unload(sc->bge_cdata.bge_mtag_jumbo,
1017 sc->bge_cdata.bge_rx_jumbo_dmamap[i]);
1018 }
1019 map = sc->bge_cdata.bge_rx_jumbo_dmamap[i];
1020 sc->bge_cdata.bge_rx_jumbo_dmamap[i] =
1021 sc->bge_cdata.bge_rx_jumbo_sparemap;
1022 sc->bge_cdata.bge_rx_jumbo_sparemap = map;
1023 sc->bge_cdata.bge_rx_jumbo_chain[i] = m;
1024 /*
1025 * Fill in the extended RX buffer descriptor.
1026 */
1027 r = &sc->bge_ldata.bge_rx_jumbo_ring[sc->bge_jumbo];
1028 r->bge_flags = BGE_RXBDFLAG_JUMBO_RING | BGE_RXBDFLAG_END;
1029 r->bge_idx = i;
1030 r->bge_len3 = r->bge_len2 = r->bge_len1 = 0;
1031 switch (nsegs) {
1032 case 4:
1033 r->bge_addr3.bge_addr_lo = BGE_ADDR_LO(segs[3].ds_addr);
1034 r->bge_addr3.bge_addr_hi = BGE_ADDR_HI(segs[3].ds_addr);
1035 r->bge_len3 = segs[3].ds_len;
1036 case 3:
1037 r->bge_addr2.bge_addr_lo = BGE_ADDR_LO(segs[2].ds_addr);
1038 r->bge_addr2.bge_addr_hi = BGE_ADDR_HI(segs[2].ds_addr);
1039 r->bge_len2 = segs[2].ds_len;
1040 case 2:
1041 r->bge_addr1.bge_addr_lo = BGE_ADDR_LO(segs[1].ds_addr);
1042 r->bge_addr1.bge_addr_hi = BGE_ADDR_HI(segs[1].ds_addr);
1043 r->bge_len1 = segs[1].ds_len;
1044 case 1:
1045 r->bge_addr0.bge_addr_lo = BGE_ADDR_LO(segs[0].ds_addr);
1046 r->bge_addr0.bge_addr_hi = BGE_ADDR_HI(segs[0].ds_addr);
1047 r->bge_len0 = segs[0].ds_len;
1048 break;
1049 default:
1050 panic("%s: %d segments\n", __func__, nsegs);
1051 }
1052
1053 bus_dmamap_sync(sc->bge_cdata.bge_mtag_jumbo,
1054 sc->bge_cdata.bge_rx_jumbo_dmamap[i], BUS_DMASYNC_PREREAD);
1055
1056 return (0);
1057}
1058
1059/*
1060 * The standard receive ring has 512 entries in it. At 2K per mbuf cluster,
1061 * that's 1MB or memory, which is a lot. For now, we fill only the first
1062 * 256 ring entries and hope that our CPU is fast enough to keep up with
1063 * the NIC.
1064 */
1065static int
1066bge_init_rx_ring_std(struct bge_softc *sc)
1067{
1068 int error, i;
1069
1070 bzero(sc->bge_ldata.bge_rx_std_ring, BGE_STD_RX_RING_SZ);
1071 sc->bge_std = 0;
1072 for (i = 0; i < BGE_SSLOTS; i++) {
1073 if ((error = bge_newbuf_std(sc, i)) != 0)
1074 return (error);
1075 BGE_INC(sc->bge_std, BGE_STD_RX_RING_CNT);
1076 };
1077
1078 bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag,
1079 sc->bge_cdata.bge_rx_std_ring_map, BUS_DMASYNC_PREWRITE);
1080
1081 sc->bge_std = i - 1;
1082 bge_writembx(sc, BGE_MBX_RX_STD_PROD_LO, sc->bge_std);
1083
1084 return (0);
1085}
1086
1087static void
1088bge_free_rx_ring_std(struct bge_softc *sc)
1089{
1090 int i;
1091
1092 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
1093 if (sc->bge_cdata.bge_rx_std_chain[i] != NULL) {
1094 bus_dmamap_sync(sc->bge_cdata.bge_rx_mtag,
1095 sc->bge_cdata.bge_rx_std_dmamap[i],
1096 BUS_DMASYNC_POSTREAD);
1097 bus_dmamap_unload(sc->bge_cdata.bge_rx_mtag,
1098 sc->bge_cdata.bge_rx_std_dmamap[i]);
1099 m_freem(sc->bge_cdata.bge_rx_std_chain[i]);
1100 sc->bge_cdata.bge_rx_std_chain[i] = NULL;
1101 }
1102 bzero((char *)&sc->bge_ldata.bge_rx_std_ring[i],
1103 sizeof(struct bge_rx_bd));
1104 }
1105}
1106
1107static int
1108bge_init_rx_ring_jumbo(struct bge_softc *sc)
1109{
1110 struct bge_rcb *rcb;
1111 int error, i;
1112
1113 bzero(sc->bge_ldata.bge_rx_jumbo_ring, BGE_JUMBO_RX_RING_SZ);
1114 sc->bge_jumbo = 0;
1115 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
1116 if ((error = bge_newbuf_jumbo(sc, i)) != 0)
1117 return (error);
1118 BGE_INC(sc->bge_jumbo, BGE_JUMBO_RX_RING_CNT);
1119 };
1120
1121 bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag,
1122 sc->bge_cdata.bge_rx_jumbo_ring_map, BUS_DMASYNC_PREWRITE);
1123
1124 sc->bge_jumbo = i - 1;
1125
1126 rcb = &sc->bge_ldata.bge_info.bge_jumbo_rx_rcb;
1127 rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(0,
1128 BGE_RCB_FLAG_USE_EXT_RX_BD);
1129 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags);
1130
1131 bge_writembx(sc, BGE_MBX_RX_JUMBO_PROD_LO, sc->bge_jumbo);
1132
1133 return (0);
1134}
1135
1136static void
1137bge_free_rx_ring_jumbo(struct bge_softc *sc)
1138{
1139 int i;
1140
1141 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
1142 if (sc->bge_cdata.bge_rx_jumbo_chain[i] != NULL) {
1143 bus_dmamap_sync(sc->bge_cdata.bge_mtag_jumbo,
1144 sc->bge_cdata.bge_rx_jumbo_dmamap[i],
1145 BUS_DMASYNC_POSTREAD);
1146 bus_dmamap_unload(sc->bge_cdata.bge_mtag_jumbo,
1147 sc->bge_cdata.bge_rx_jumbo_dmamap[i]);
1148 m_freem(sc->bge_cdata.bge_rx_jumbo_chain[i]);
1149 sc->bge_cdata.bge_rx_jumbo_chain[i] = NULL;
1150 }
1151 bzero((char *)&sc->bge_ldata.bge_rx_jumbo_ring[i],
1152 sizeof(struct bge_extrx_bd));
1153 }
1154}
1155
1156static void
1157bge_free_tx_ring(struct bge_softc *sc)
1158{
1159 int i;
1160
1161 if (sc->bge_ldata.bge_tx_ring == NULL)
1162 return;
1163
1164 for (i = 0; i < BGE_TX_RING_CNT; i++) {
1165 if (sc->bge_cdata.bge_tx_chain[i] != NULL) {
1166 bus_dmamap_sync(sc->bge_cdata.bge_tx_mtag,
1167 sc->bge_cdata.bge_tx_dmamap[i],
1168 BUS_DMASYNC_POSTWRITE);
1169 bus_dmamap_unload(sc->bge_cdata.bge_tx_mtag,
1170 sc->bge_cdata.bge_tx_dmamap[i]);
1171 m_freem(sc->bge_cdata.bge_tx_chain[i]);
1172 sc->bge_cdata.bge_tx_chain[i] = NULL;
1173 }
1174 bzero((char *)&sc->bge_ldata.bge_tx_ring[i],
1175 sizeof(struct bge_tx_bd));
1176 }
1177}
1178
1179static int
1180bge_init_tx_ring(struct bge_softc *sc)
1181{
1182 sc->bge_txcnt = 0;
1183 sc->bge_tx_saved_considx = 0;
1184
1185 bzero(sc->bge_ldata.bge_tx_ring, BGE_TX_RING_SZ);
1186 bus_dmamap_sync(sc->bge_cdata.bge_tx_ring_tag,
1187 sc->bge_cdata.bge_tx_ring_map, BUS_DMASYNC_PREWRITE);
1188
1189 /* Initialize transmit producer index for host-memory send ring. */
1190 sc->bge_tx_prodidx = 0;
1191 bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, sc->bge_tx_prodidx);
1192
1193 /* 5700 b2 errata */
1194 if (sc->bge_chiprev == BGE_CHIPREV_5700_BX)
1195 bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, sc->bge_tx_prodidx);
1196
1197 /* NIC-memory send ring not used; initialize to zero. */
1198 bge_writembx(sc, BGE_MBX_TX_NIC_PROD0_LO, 0);
1199 /* 5700 b2 errata */
1200 if (sc->bge_chiprev == BGE_CHIPREV_5700_BX)
1201 bge_writembx(sc, BGE_MBX_TX_NIC_PROD0_LO, 0);
1202
1203 return (0);
1204}
1205
1206static void
1207bge_setpromisc(struct bge_softc *sc)
1208{
1209 struct ifnet *ifp;
1210
1211 BGE_LOCK_ASSERT(sc);
1212
1213 ifp = sc->bge_ifp;
1214
1215 /* Enable or disable promiscuous mode as needed. */
1216 if (ifp->if_flags & IFF_PROMISC)
1217 BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC);
1218 else
1219 BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC);
1220}
1221
1222static void
1223bge_setmulti(struct bge_softc *sc)
1224{
1225 struct ifnet *ifp;
1226 struct ifmultiaddr *ifma;
1227 uint32_t hashes[4] = { 0, 0, 0, 0 };
1228 int h, i;
1229
1230 BGE_LOCK_ASSERT(sc);
1231
1232 ifp = sc->bge_ifp;
1233
1234 if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) {
1235 for (i = 0; i < 4; i++)
1236 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), 0xFFFFFFFF);
1237 return;
1238 }
1239
1240 /* First, zot all the existing filters. */
1241 for (i = 0; i < 4; i++)
1242 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), 0);
1243
1244 /* Now program new ones. */
1245 if_maddr_rlock(ifp);
1246 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1247 if (ifma->ifma_addr->sa_family != AF_LINK)
1248 continue;
1249 h = ether_crc32_le(LLADDR((struct sockaddr_dl *)
1250 ifma->ifma_addr), ETHER_ADDR_LEN) & 0x7F;
1251 hashes[(h & 0x60) >> 5] |= 1 << (h & 0x1F);
1252 }
1253 if_maddr_runlock(ifp);
1254
1255 for (i = 0; i < 4; i++)
1256 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), hashes[i]);
1257}
1258
1259static void
1260bge_setvlan(struct bge_softc *sc)
1261{
1262 struct ifnet *ifp;
1263
1264 BGE_LOCK_ASSERT(sc);
1265
1266 ifp = sc->bge_ifp;
1267
1268 /* Enable or disable VLAN tag stripping as needed. */
1269 if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING)
1270 BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_KEEP_VLAN_DIAG);
1271 else
1272 BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_KEEP_VLAN_DIAG);
1273}
1274
1275static void
1276bge_sig_pre_reset(sc, type)
1277 struct bge_softc *sc;
1278 int type;
1279{
1280 /*
1281 * Some chips don't like this so only do this if ASF is enabled
1282 */
1283 if (sc->bge_asf_mode)
1284 bge_writemem_ind(sc, BGE_SOFTWARE_GENCOMM, BGE_MAGIC_NUMBER);
1285
1286 if (sc->bge_asf_mode & ASF_NEW_HANDSHAKE) {
1287 switch (type) {
1288 case BGE_RESET_START:
1289 bge_writemem_ind(sc, BGE_SDI_STATUS, 0x1); /* START */
1290 break;
1291 case BGE_RESET_STOP:
1292 bge_writemem_ind(sc, BGE_SDI_STATUS, 0x2); /* UNLOAD */
1293 break;
1294 }
1295 }
1296}
1297
1298static void
1299bge_sig_post_reset(sc, type)
1300 struct bge_softc *sc;
1301 int type;
1302{
1303 if (sc->bge_asf_mode & ASF_NEW_HANDSHAKE) {
1304 switch (type) {
1305 case BGE_RESET_START:
1306 bge_writemem_ind(sc, BGE_SDI_STATUS, 0x80000001);
1307 /* START DONE */
1308 break;
1309 case BGE_RESET_STOP:
1310 bge_writemem_ind(sc, BGE_SDI_STATUS, 0x80000002);
1311 break;
1312 }
1313 }
1314}
1315
1316static void
1317bge_sig_legacy(sc, type)
1318 struct bge_softc *sc;
1319 int type;
1320{
1321 if (sc->bge_asf_mode) {
1322 switch (type) {
1323 case BGE_RESET_START:
1324 bge_writemem_ind(sc, BGE_SDI_STATUS, 0x1); /* START */
1325 break;
1326 case BGE_RESET_STOP:
1327 bge_writemem_ind(sc, BGE_SDI_STATUS, 0x2); /* UNLOAD */
1328 break;
1329 }
1330 }
1331}
1332
1333void bge_stop_fw(struct bge_softc *);
1334void
1335bge_stop_fw(sc)
1336 struct bge_softc *sc;
1337{
1338 int i;
1339
1340 if (sc->bge_asf_mode) {
1341 bge_writemem_ind(sc, BGE_SOFTWARE_GENCOMM_FW, BGE_FW_PAUSE);
1342 CSR_WRITE_4(sc, BGE_CPU_EVENT,
1343 CSR_READ_4(sc, BGE_CPU_EVENT) | (1 << 14));
1344
1345 for (i = 0; i < 100; i++ ) {
1346 if (!(CSR_READ_4(sc, BGE_CPU_EVENT) & (1 << 14)))
1347 break;
1348 DELAY(10);
1349 }
1350 }
1351}
1352
1353/*
1354 * Do endian, PCI and DMA initialization.
1355 */
1356static int
1357bge_chipinit(struct bge_softc *sc)
1358{
1359 uint32_t dma_rw_ctl;
1360 int i;
1361
1362 /* Set endianness before we access any non-PCI registers. */
1363 pci_write_config(sc->bge_dev, BGE_PCI_MISC_CTL, BGE_INIT, 4);
1364
1365 /* Clear the MAC control register */
1366 CSR_WRITE_4(sc, BGE_MAC_MODE, 0);
1367
1368 /*
1369 * Clear the MAC statistics block in the NIC's
1370 * internal memory.
1371 */
1372 for (i = BGE_STATS_BLOCK;
1373 i < BGE_STATS_BLOCK_END + 1; i += sizeof(uint32_t))
1374 BGE_MEMWIN_WRITE(sc, i, 0);
1375
1376 for (i = BGE_STATUS_BLOCK;
1377 i < BGE_STATUS_BLOCK_END + 1; i += sizeof(uint32_t))
1378 BGE_MEMWIN_WRITE(sc, i, 0);
1379
1380 /*
1381 * Set up the PCI DMA control register.
1382 */
1383 dma_rw_ctl = BGE_PCIDMARWCTL_RD_CMD_SHIFT(6) |
1384 BGE_PCIDMARWCTL_WR_CMD_SHIFT(7);
1385 if (sc->bge_flags & BGE_FLAG_PCIE) {
1386 /* Read watermark not used, 128 bytes for write. */
1387 dma_rw_ctl |= BGE_PCIDMARWCTL_WR_WAT_SHIFT(3);
1388 } else if (sc->bge_flags & BGE_FLAG_PCIX) {
1389 if (BGE_IS_5714_FAMILY(sc)) {
1390 /* 256 bytes for read and write. */
1391 dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(2) |
1392 BGE_PCIDMARWCTL_WR_WAT_SHIFT(2);
1393 dma_rw_ctl |= (sc->bge_asicrev == BGE_ASICREV_BCM5780) ?
1394 BGE_PCIDMARWCTL_ONEDMA_ATONCE_GLOBAL :
1395 BGE_PCIDMARWCTL_ONEDMA_ATONCE_LOCAL;
1396 } else if (sc->bge_asicrev == BGE_ASICREV_BCM5704) {
1397 /* 1536 bytes for read, 384 bytes for write. */
1398 dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(7) |
1399 BGE_PCIDMARWCTL_WR_WAT_SHIFT(3);
1400 } else {
1401 /* 384 bytes for read and write. */
1402 dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(3) |
1403 BGE_PCIDMARWCTL_WR_WAT_SHIFT(3) |
1404 0x0F;
1405 }
1406 if (sc->bge_asicrev == BGE_ASICREV_BCM5703 ||
1407 sc->bge_asicrev == BGE_ASICREV_BCM5704) {
1408 uint32_t tmp;
1409
1410 /* Set ONE_DMA_AT_ONCE for hardware workaround. */
1411 tmp = CSR_READ_4(sc, BGE_PCI_CLKCTL) & 0x1F;
1412 if (tmp == 6 || tmp == 7)
1413 dma_rw_ctl |=
1414 BGE_PCIDMARWCTL_ONEDMA_ATONCE_GLOBAL;
1415
1416 /* Set PCI-X DMA write workaround. */
1417 dma_rw_ctl |= BGE_PCIDMARWCTL_ASRT_ALL_BE;
1418 }
1419 } else {
1420 /* Conventional PCI bus: 256 bytes for read and write. */
1421 dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(7) |
1422 BGE_PCIDMARWCTL_WR_WAT_SHIFT(7);
1423
1424 if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
1425 sc->bge_asicrev != BGE_ASICREV_BCM5750)
1426 dma_rw_ctl |= 0x0F;
1427 }
1428 if (sc->bge_asicrev == BGE_ASICREV_BCM5700 ||
1429 sc->bge_asicrev == BGE_ASICREV_BCM5701)
1430 dma_rw_ctl |= BGE_PCIDMARWCTL_USE_MRM |
1431 BGE_PCIDMARWCTL_ASRT_ALL_BE;
1432 if (sc->bge_asicrev == BGE_ASICREV_BCM5703 ||
1433 sc->bge_asicrev == BGE_ASICREV_BCM5704)
1434 dma_rw_ctl &= ~BGE_PCIDMARWCTL_MINDMA;
1435 pci_write_config(sc->bge_dev, BGE_PCI_DMA_RW_CTL, dma_rw_ctl, 4);
1436
1437 /*
1438 * Set up general mode register.
1439 */
1440 CSR_WRITE_4(sc, BGE_MODE_CTL, BGE_DMA_SWAP_OPTIONS |
1441 BGE_MODECTL_MAC_ATTN_INTR | BGE_MODECTL_HOST_SEND_BDS |
1442 BGE_MODECTL_TX_NO_PHDR_CSUM);
1443
1444 /*
1445 * BCM5701 B5 have a bug causing data corruption when using
1446 * 64-bit DMA reads, which can be terminated early and then
1447 * completed later as 32-bit accesses, in combination with
1448 * certain bridges.
1449 */
1450 if (sc->bge_asicrev == BGE_ASICREV_BCM5701 &&
1451 sc->bge_chipid == BGE_CHIPID_BCM5701_B5)
1452 BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_FORCE_PCI32);
1453
1454 /*
1455 * Tell the firmware the driver is running
1456 */
1457 if (sc->bge_asf_mode & ASF_STACKUP)
1458 BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
1459
1460 /*
1461 * Disable memory write invalidate. Apparently it is not supported
1462 * properly by these devices. Also ensure that INTx isn't disabled,
1463 * as these chips need it even when using MSI.
1464 */
1465 PCI_CLRBIT(sc->bge_dev, BGE_PCI_CMD,
1466 PCIM_CMD_INTxDIS | PCIM_CMD_MWIEN, 4);
1467
1468 /* Set the timer prescaler (always 66Mhz) */
1469 CSR_WRITE_4(sc, BGE_MISC_CFG, BGE_32BITTIME_66MHZ);
1470
1471 /* XXX: The Linux tg3 driver does this at the start of brgphy_reset. */
1472 if (sc->bge_asicrev == BGE_ASICREV_BCM5906) {
1473 DELAY(40); /* XXX */
1474
1475 /* Put PHY into ready state */
1476 BGE_CLRBIT(sc, BGE_MISC_CFG, BGE_MISCCFG_EPHY_IDDQ);
1477 CSR_READ_4(sc, BGE_MISC_CFG); /* Flush */
1478 DELAY(40);
1479 }
1480
1481 return (0);
1482}
1483
1484static int
1485bge_blockinit(struct bge_softc *sc)
1486{
1487 struct bge_rcb *rcb;
1488 bus_size_t vrcb;
1489 bge_hostaddr taddr;
1490 uint32_t val;
1491 int i;
1492
1493 /*
1494 * Initialize the memory window pointer register so that
1495 * we can access the first 32K of internal NIC RAM. This will
1496 * allow us to set up the TX send ring RCBs and the RX return
1497 * ring RCBs, plus other things which live in NIC memory.
1498 */
1499 CSR_WRITE_4(sc, BGE_PCI_MEMWIN_BASEADDR, 0);
1500
1501 /* Note: the BCM5704 has a smaller mbuf space than other chips. */
1502
1503 if (!(BGE_IS_5705_PLUS(sc))) {
1504 /* Configure mbuf memory pool */
1505 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_BASEADDR, BGE_BUFFPOOL_1);
1506 if (sc->bge_asicrev == BGE_ASICREV_BCM5704)
1507 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x10000);
1508 else
1509 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x18000);
1510
1511 /* Configure DMA resource pool */
1512 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_BASEADDR,
1513 BGE_DMA_DESCRIPTORS);
1514 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LEN, 0x2000);
1515 }
1516
1517 /* Configure mbuf pool watermarks */
1518 if (!BGE_IS_5705_PLUS(sc)) {
1519 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x50);
1520 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x20);
1521 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x60);
1522 } else if (sc->bge_asicrev == BGE_ASICREV_BCM5906) {
1523 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0);
1524 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x04);
1525 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x10);
1526 } else {
1527 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0);
1528 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x10);
1529 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x60);
1530 }
1531
1532 /* Configure DMA resource watermarks */
1533 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LOWAT, 5);
1534 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_HIWAT, 10);
1535
1536 /* Enable buffer manager */
1537 if (!(BGE_IS_5705_PLUS(sc))) {
1538 CSR_WRITE_4(sc, BGE_BMAN_MODE,
1539 BGE_BMANMODE_ENABLE | BGE_BMANMODE_LOMBUF_ATTN);
1540
1541 /* Poll for buffer manager start indication */
1542 for (i = 0; i < BGE_TIMEOUT; i++) {
1543 DELAY(10);
1544 if (CSR_READ_4(sc, BGE_BMAN_MODE) & BGE_BMANMODE_ENABLE)
1545 break;
1546 }
1547
1548 if (i == BGE_TIMEOUT) {
1549 device_printf(sc->bge_dev,
1550 "buffer manager failed to start\n");
1551 return (ENXIO);
1552 }
1553 }
1554
1555 /* Enable flow-through queues */
1556 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF);
1557 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0);
1558
1559 /* Wait until queue initialization is complete */
1560 for (i = 0; i < BGE_TIMEOUT; i++) {
1561 DELAY(10);
1562 if (CSR_READ_4(sc, BGE_FTQ_RESET) == 0)
1563 break;
1564 }
1565
1566 if (i == BGE_TIMEOUT) {
1567 device_printf(sc->bge_dev, "flow-through queue init failed\n");
1568 return (ENXIO);
1569 }
1570
1571 /* Initialize the standard RX ring control block */
1572 rcb = &sc->bge_ldata.bge_info.bge_std_rx_rcb;
1573 rcb->bge_hostaddr.bge_addr_lo =
1574 BGE_ADDR_LO(sc->bge_ldata.bge_rx_std_ring_paddr);
1575 rcb->bge_hostaddr.bge_addr_hi =
1576 BGE_ADDR_HI(sc->bge_ldata.bge_rx_std_ring_paddr);
1577 bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag,
1578 sc->bge_cdata.bge_rx_std_ring_map, BUS_DMASYNC_PREREAD);
1579 if (BGE_IS_5705_PLUS(sc))
1580 rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(512, 0);
1581 else
1582 rcb->bge_maxlen_flags =
1583 BGE_RCB_MAXLEN_FLAGS(BGE_MAX_FRAMELEN, 0);
1584 rcb->bge_nicaddr = BGE_STD_RX_RINGS;
1585 CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_HI, rcb->bge_hostaddr.bge_addr_hi);
1586 CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_LO, rcb->bge_hostaddr.bge_addr_lo);
1587
1588 CSR_WRITE_4(sc, BGE_RX_STD_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags);
1589 CSR_WRITE_4(sc, BGE_RX_STD_RCB_NICADDR, rcb->bge_nicaddr);
1590
1591 /*
1592 * Initialize the jumbo RX ring control block
1593 * We set the 'ring disabled' bit in the flags
1594 * field until we're actually ready to start
1595 * using this ring (i.e. once we set the MTU
1596 * high enough to require it).
1597 */
1598 if (BGE_IS_JUMBO_CAPABLE(sc)) {
1599 rcb = &sc->bge_ldata.bge_info.bge_jumbo_rx_rcb;
1600
1601 rcb->bge_hostaddr.bge_addr_lo =
1602 BGE_ADDR_LO(sc->bge_ldata.bge_rx_jumbo_ring_paddr);
1603 rcb->bge_hostaddr.bge_addr_hi =
1604 BGE_ADDR_HI(sc->bge_ldata.bge_rx_jumbo_ring_paddr);
1605 bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag,
1606 sc->bge_cdata.bge_rx_jumbo_ring_map,
1607 BUS_DMASYNC_PREREAD);
1608 rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(0,
1609 BGE_RCB_FLAG_USE_EXT_RX_BD | BGE_RCB_FLAG_RING_DISABLED);
1610 rcb->bge_nicaddr = BGE_JUMBO_RX_RINGS;
1611 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_HI,
1612 rcb->bge_hostaddr.bge_addr_hi);
1613 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_LO,
1614 rcb->bge_hostaddr.bge_addr_lo);
1615
1616 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS,
1617 rcb->bge_maxlen_flags);
1618 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_NICADDR, rcb->bge_nicaddr);
1619
1620 /* Set up dummy disabled mini ring RCB */
1621 rcb = &sc->bge_ldata.bge_info.bge_mini_rx_rcb;
1622 rcb->bge_maxlen_flags =
1623 BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED);
1624 CSR_WRITE_4(sc, BGE_RX_MINI_RCB_MAXLEN_FLAGS,
1625 rcb->bge_maxlen_flags);
1626 }
1627
1628 /*
1629 * Set the BD ring replentish thresholds. The recommended
1630 * values are 1/8th the number of descriptors allocated to
1631 * each ring.
1632 * XXX The 5754 requires a lower threshold, so it might be a
1633 * requirement of all 575x family chips. The Linux driver sets
1634 * the lower threshold for all 5705 family chips as well, but there
1635 * are reports that it might not need to be so strict.
1636 *
1637 * XXX Linux does some extra fiddling here for the 5906 parts as
1638 * well.
1639 */
1640 if (BGE_IS_5705_PLUS(sc))
1641 val = 8;
1642 else
1643 val = BGE_STD_RX_RING_CNT / 8;
1644 CSR_WRITE_4(sc, BGE_RBDI_STD_REPL_THRESH, val);
1645 if (BGE_IS_JUMBO_CAPABLE(sc))
1646 CSR_WRITE_4(sc, BGE_RBDI_JUMBO_REPL_THRESH,
1647 BGE_JUMBO_RX_RING_CNT/8);
1648
1649 /*
1650 * Disable all unused send rings by setting the 'ring disabled'
1651 * bit in the flags field of all the TX send ring control blocks.
1652 * These are located in NIC memory.
1653 */
1654 vrcb = BGE_MEMWIN_START + BGE_SEND_RING_RCB;
1655 for (i = 0; i < BGE_TX_RINGS_EXTSSRAM_MAX; i++) {
1656 RCB_WRITE_4(sc, vrcb, bge_maxlen_flags,
1657 BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED));
1658 RCB_WRITE_4(sc, vrcb, bge_nicaddr, 0);
1659 vrcb += sizeof(struct bge_rcb);
1660 }
1661
1662 /* Configure TX RCB 0 (we use only the first ring) */
1663 vrcb = BGE_MEMWIN_START + BGE_SEND_RING_RCB;
1664 BGE_HOSTADDR(taddr, sc->bge_ldata.bge_tx_ring_paddr);
1665 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, taddr.bge_addr_hi);
1666 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, taddr.bge_addr_lo);
1667 RCB_WRITE_4(sc, vrcb, bge_nicaddr,
1668 BGE_NIC_TXRING_ADDR(0, BGE_TX_RING_CNT));
1669 if (!(BGE_IS_5705_PLUS(sc)))
1670 RCB_WRITE_4(sc, vrcb, bge_maxlen_flags,
1671 BGE_RCB_MAXLEN_FLAGS(BGE_TX_RING_CNT, 0));
1672
1673 /* Disable all unused RX return rings */
1674 vrcb = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB;
1675 for (i = 0; i < BGE_RX_RINGS_MAX; i++) {
1676 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, 0);
1677 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, 0);
1678 RCB_WRITE_4(sc, vrcb, bge_maxlen_flags,
1679 BGE_RCB_MAXLEN_FLAGS(sc->bge_return_ring_cnt,
1680 BGE_RCB_FLAG_RING_DISABLED));
1681 RCB_WRITE_4(sc, vrcb, bge_nicaddr, 0);
1682 bge_writembx(sc, BGE_MBX_RX_CONS0_LO +
1683 (i * (sizeof(uint64_t))), 0);
1684 vrcb += sizeof(struct bge_rcb);
1685 }
1686
1687 /* Initialize RX ring indexes */
1688 bge_writembx(sc, BGE_MBX_RX_STD_PROD_LO, 0);
1689 if (BGE_IS_JUMBO_CAPABLE(sc))
1690 bge_writembx(sc, BGE_MBX_RX_JUMBO_PROD_LO, 0);
1691 if (sc->bge_asicrev == BGE_ASICREV_BCM5700)
1692 bge_writembx(sc, BGE_MBX_RX_MINI_PROD_LO, 0);
1693
1694 /*
1695 * Set up RX return ring 0
1696 * Note that the NIC address for RX return rings is 0x00000000.
1697 * The return rings live entirely within the host, so the
1698 * nicaddr field in the RCB isn't used.
1699 */
1700 vrcb = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB;
1701 BGE_HOSTADDR(taddr, sc->bge_ldata.bge_rx_return_ring_paddr);
1702 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, taddr.bge_addr_hi);
1703 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, taddr.bge_addr_lo);
1704 RCB_WRITE_4(sc, vrcb, bge_nicaddr, 0x00000000);
1705 RCB_WRITE_4(sc, vrcb, bge_maxlen_flags,
1706 BGE_RCB_MAXLEN_FLAGS(sc->bge_return_ring_cnt, 0));
1707
1708 /* Set random backoff seed for TX */
1709 CSR_WRITE_4(sc, BGE_TX_RANDOM_BACKOFF,
1710 IF_LLADDR(sc->bge_ifp)[0] + IF_LLADDR(sc->bge_ifp)[1] +
1711 IF_LLADDR(sc->bge_ifp)[2] + IF_LLADDR(sc->bge_ifp)[3] +
1712 IF_LLADDR(sc->bge_ifp)[4] + IF_LLADDR(sc->bge_ifp)[5] +
1713 BGE_TX_BACKOFF_SEED_MASK);
1714
1715 /* Set inter-packet gap */
1716 CSR_WRITE_4(sc, BGE_TX_LENGTHS, 0x2620);
1717
1718 /*
1719 * Specify which ring to use for packets that don't match
1720 * any RX rules.
1721 */
1722 CSR_WRITE_4(sc, BGE_RX_RULES_CFG, 0x08);
1723
1724 /*
1725 * Configure number of RX lists. One interrupt distribution
1726 * list, sixteen active lists, one bad frames class.
1727 */
1728 CSR_WRITE_4(sc, BGE_RXLP_CFG, 0x181);
1729
1730 /* Inialize RX list placement stats mask. */
1731 CSR_WRITE_4(sc, BGE_RXLP_STATS_ENABLE_MASK, 0x007FFFFF);
1732 CSR_WRITE_4(sc, BGE_RXLP_STATS_CTL, 0x1);
1733
1734 /* Disable host coalescing until we get it set up */
1735 CSR_WRITE_4(sc, BGE_HCC_MODE, 0x00000000);
1736
1737 /* Poll to make sure it's shut down. */
1738 for (i = 0; i < BGE_TIMEOUT; i++) {
1739 DELAY(10);
1740 if (!(CSR_READ_4(sc, BGE_HCC_MODE) & BGE_HCCMODE_ENABLE))
1741 break;
1742 }
1743
1744 if (i == BGE_TIMEOUT) {
1745 device_printf(sc->bge_dev,
1746 "host coalescing engine failed to idle\n");
1747 return (ENXIO);
1748 }
1749
1750 /* Set up host coalescing defaults */
1751 CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS, sc->bge_rx_coal_ticks);
1752 CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS, sc->bge_tx_coal_ticks);
1753 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS, sc->bge_rx_max_coal_bds);
1754 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS, sc->bge_tx_max_coal_bds);
1755 if (!(BGE_IS_5705_PLUS(sc))) {
1756 CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS_INT, 0);
1757 CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS_INT, 0);
1758 }
1759 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS_INT, 1);
1760 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS_INT, 1);
1761
1762 /* Set up address of statistics block */
1763 if (!(BGE_IS_5705_PLUS(sc))) {
1764 CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_HI,
1765 BGE_ADDR_HI(sc->bge_ldata.bge_stats_paddr));
1766 CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_LO,
1767 BGE_ADDR_LO(sc->bge_ldata.bge_stats_paddr));
1768 CSR_WRITE_4(sc, BGE_HCC_STATS_BASEADDR, BGE_STATS_BLOCK);
1769 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_BASEADDR, BGE_STATUS_BLOCK);
1770 CSR_WRITE_4(sc, BGE_HCC_STATS_TICKS, sc->bge_stat_ticks);
1771 }
1772
1773 /* Set up address of status block */
1774 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_HI,
1775 BGE_ADDR_HI(sc->bge_ldata.bge_status_block_paddr));
1776 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_LO,
1777 BGE_ADDR_LO(sc->bge_ldata.bge_status_block_paddr));
1778 sc->bge_ldata.bge_status_block->bge_idx[0].bge_rx_prod_idx = 0;
1779 sc->bge_ldata.bge_status_block->bge_idx[0].bge_tx_cons_idx = 0;
1780
1781 /* Set up status block size. */
1782 if (sc->bge_asicrev == BGE_ASICREV_BCM5700 &&
1783 sc->bge_chipid != BGE_CHIPID_BCM5700_C0)
1784 val = BGE_STATBLKSZ_FULL;
1785 else
1786 val = BGE_STATBLKSZ_32BYTE;
1787
1788 /* Turn on host coalescing state machine */
1789 CSR_WRITE_4(sc, BGE_HCC_MODE, val | BGE_HCCMODE_ENABLE);
1790
1791 /* Turn on RX BD completion state machine and enable attentions */
1792 CSR_WRITE_4(sc, BGE_RBDC_MODE,
1793 BGE_RBDCMODE_ENABLE | BGE_RBDCMODE_ATTN);
1794
1795 /* Turn on RX list placement state machine */
1796 CSR_WRITE_4(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE);
1797
1798 /* Turn on RX list selector state machine. */
1799 if (!(BGE_IS_5705_PLUS(sc)))
1800 CSR_WRITE_4(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE);
1801
1802 /* Turn on DMA, clear stats */
1803 CSR_WRITE_4(sc, BGE_MAC_MODE, BGE_MACMODE_TXDMA_ENB |
1804 BGE_MACMODE_RXDMA_ENB | BGE_MACMODE_RX_STATS_CLEAR |
1805 BGE_MACMODE_TX_STATS_CLEAR | BGE_MACMODE_RX_STATS_ENB |
1806 BGE_MACMODE_TX_STATS_ENB | BGE_MACMODE_FRMHDR_DMA_ENB |
1807 ((sc->bge_flags & BGE_FLAG_TBI) ?
1808 BGE_PORTMODE_TBI : BGE_PORTMODE_MII));
1809
1810 /* Set misc. local control, enable interrupts on attentions */
1811 CSR_WRITE_4(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_ONATTN);
1812
1813#ifdef notdef
1814 /* Assert GPIO pins for PHY reset */
1815 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUT0 |
1816 BGE_MLC_MISCIO_OUT1 | BGE_MLC_MISCIO_OUT2);
1817 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUTEN0 |
1818 BGE_MLC_MISCIO_OUTEN1 | BGE_MLC_MISCIO_OUTEN2);
1819#endif
1820
1821 /* Turn on DMA completion state machine */
1822 if (!(BGE_IS_5705_PLUS(sc)))
1823 CSR_WRITE_4(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE);
1824
1825 val = BGE_WDMAMODE_ENABLE | BGE_WDMAMODE_ALL_ATTNS;
1826
1827 /* Enable host coalescing bug fix. */
1828 if (BGE_IS_5755_PLUS(sc))
1829 val |= BGE_WDMAMODE_STATUS_TAG_FIX;
1830
1831 /* Turn on write DMA state machine */
1832 CSR_WRITE_4(sc, BGE_WDMA_MODE, val);
1833 DELAY(40);
1834
1835 /* Turn on read DMA state machine */
1836 val = BGE_RDMAMODE_ENABLE | BGE_RDMAMODE_ALL_ATTNS;
1837 if (sc->bge_asicrev == BGE_ASICREV_BCM5784 ||
1838 sc->bge_asicrev == BGE_ASICREV_BCM5785 ||
1839 sc->bge_asicrev == BGE_ASICREV_BCM57780)
1840 val |= BGE_RDMAMODE_BD_SBD_CRPT_ATTN |
1841 BGE_RDMAMODE_MBUF_RBD_CRPT_ATTN |
1842 BGE_RDMAMODE_MBUF_SBD_CRPT_ATTN;
1843 if (sc->bge_flags & BGE_FLAG_PCIE)
1844 val |= BGE_RDMAMODE_FIFO_LONG_BURST;
1845 if (sc->bge_flags & BGE_FLAG_TSO)
1846 val |= BGE_RDMAMODE_TSO4_ENABLE;
1847 CSR_WRITE_4(sc, BGE_RDMA_MODE, val);
1848 DELAY(40);
1849
1850 /* Turn on RX data completion state machine */
1851 CSR_WRITE_4(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE);
1852
1853 /* Turn on RX BD initiator state machine */
1854 CSR_WRITE_4(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE);
1855
1856 /* Turn on RX data and RX BD initiator state machine */
1857 CSR_WRITE_4(sc, BGE_RDBDI_MODE, BGE_RDBDIMODE_ENABLE);
1858
1859 /* Turn on Mbuf cluster free state machine */
1860 if (!(BGE_IS_5705_PLUS(sc)))
1861 CSR_WRITE_4(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE);
1862
1863 /* Turn on send BD completion state machine */
1864 CSR_WRITE_4(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE);
1865
1866 /* Turn on send data completion state machine */
1867 val = BGE_SDCMODE_ENABLE;
1868 if (sc->bge_asicrev == BGE_ASICREV_BCM5761)
1869 val |= BGE_SDCMODE_CDELAY;
1870 CSR_WRITE_4(sc, BGE_SDC_MODE, val);
1871
1872 /* Turn on send data initiator state machine */
1873 if (sc->bge_flags & BGE_FLAG_TSO)
1874 CSR_WRITE_4(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE | 0x08);
1875 else
1876 CSR_WRITE_4(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE);
1877
1878 /* Turn on send BD initiator state machine */
1879 CSR_WRITE_4(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE);
1880
1881 /* Turn on send BD selector state machine */
1882 CSR_WRITE_4(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE);
1883
1884 CSR_WRITE_4(sc, BGE_SDI_STATS_ENABLE_MASK, 0x007FFFFF);
1885 CSR_WRITE_4(sc, BGE_SDI_STATS_CTL,
1886 BGE_SDISTATSCTL_ENABLE | BGE_SDISTATSCTL_FASTER);
1887
1888 /* ack/clear link change events */
1889 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED |
1890 BGE_MACSTAT_CFG_CHANGED | BGE_MACSTAT_MI_COMPLETE |
1891 BGE_MACSTAT_LINK_CHANGED);
1892 CSR_WRITE_4(sc, BGE_MI_STS, 0);
1893
1894 /* Enable PHY auto polling (for MII/GMII only) */
1895 if (sc->bge_flags & BGE_FLAG_TBI) {
1896 CSR_WRITE_4(sc, BGE_MI_STS, BGE_MISTS_LINK);
1897 } else {
1898 BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL | (10 << 16));
1899 if (sc->bge_asicrev == BGE_ASICREV_BCM5700 &&
1900 sc->bge_chipid != BGE_CHIPID_BCM5700_B2)
1901 CSR_WRITE_4(sc, BGE_MAC_EVT_ENB,
1902 BGE_EVTENB_MI_INTERRUPT);
1903 }
1904
1905 /*
1906 * Clear any pending link state attention.
1907 * Otherwise some link state change events may be lost until attention
1908 * is cleared by bge_intr() -> bge_link_upd() sequence.
1909 * It's not necessary on newer BCM chips - perhaps enabling link
1910 * state change attentions implies clearing pending attention.
1911 */
1912 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED |
1913 BGE_MACSTAT_CFG_CHANGED | BGE_MACSTAT_MI_COMPLETE |
1914 BGE_MACSTAT_LINK_CHANGED);
1915
1916 /* Enable link state change attentions. */
1917 BGE_SETBIT(sc, BGE_MAC_EVT_ENB, BGE_EVTENB_LINK_CHANGED);
1918
1919 return (0);
1920}
1921
1922const struct bge_revision *
1923bge_lookup_rev(uint32_t chipid)
1924{
1925 const struct bge_revision *br;
1926
1927 for (br = bge_revisions; br->br_name != NULL; br++) {
1928 if (br->br_chipid == chipid)
1929 return (br);
1930 }
1931
1932 for (br = bge_majorrevs; br->br_name != NULL; br++) {
1933 if (br->br_chipid == BGE_ASICREV(chipid))
1934 return (br);
1935 }
1936
1937 return (NULL);
1938}
1939
1940const struct bge_vendor *
1941bge_lookup_vendor(uint16_t vid)
1942{
1943 const struct bge_vendor *v;
1944
1945 for (v = bge_vendors; v->v_name != NULL; v++)
1946 if (v->v_id == vid)
1947 return (v);
1948
1949 panic("%s: unknown vendor %d", __func__, vid);
1950 return (NULL);
1951}
1952
1953/*
1954 * Probe for a Broadcom chip. Check the PCI vendor and device IDs
1955 * against our list and return its name if we find a match.
1956 *
1957 * Note that since the Broadcom controller contains VPD support, we
1958 * try to get the device name string from the controller itself instead
1959 * of the compiled-in string. It guarantees we'll always announce the
1960 * right product name. We fall back to the compiled-in string when
1961 * VPD is unavailable or corrupt.
1962 */
1963static int
1964bge_probe(device_t dev)
1965{
1966 const struct bge_type *t = bge_devs;
1967 struct bge_softc *sc = device_get_softc(dev);
1968 uint16_t vid, did;
1969
1970 sc->bge_dev = dev;
1971 vid = pci_get_vendor(dev);
1972 did = pci_get_device(dev);
1973 while(t->bge_vid != 0) {
1974 if ((vid == t->bge_vid) && (did == t->bge_did)) {
1975 char model[64], buf[96];
1976 const struct bge_revision *br;
1977 const struct bge_vendor *v;
1978 uint32_t id;
1979
1980 id = pci_read_config(dev, BGE_PCI_MISC_CTL, 4) >>
1981 BGE_PCIMISCCTL_ASICREV_SHIFT;
1982 if (BGE_ASICREV(id) == BGE_ASICREV_USE_PRODID_REG)
1983 id = pci_read_config(dev,
1984 BGE_PCI_PRODID_ASICREV, 4);
1985 br = bge_lookup_rev(id);
1986 v = bge_lookup_vendor(vid);
1987 {
1988#if __FreeBSD_version > 700024
1989 const char *pname;
1990
1991 if (bge_has_eaddr(sc) &&
1992 pci_get_vpd_ident(dev, &pname) == 0)
1993 snprintf(model, 64, "%s", pname);
1994 else
1995#endif
1996 snprintf(model, 64, "%s %s",
1997 v->v_name,
1998 br != NULL ? br->br_name :
1999 "NetXtreme Ethernet Controller");
2000 }
2001 snprintf(buf, 96, "%s, %sASIC rev. %#08x", model,
2002 br != NULL ? "" : "unknown ", id);
2003 device_set_desc_copy(dev, buf);
2004 if (pci_get_subvendor(dev) == DELL_VENDORID)
2005 sc->bge_flags |= BGE_FLAG_NO_3LED;
2006 if (did == BCOM_DEVICEID_BCM5755M)
2007 sc->bge_flags |= BGE_FLAG_ADJUST_TRIM;
2008 return (0);
2009 }
2010 t++;
2011 }
2012
2013 return (ENXIO);
2014}
2015
2016static void
2017bge_dma_free(struct bge_softc *sc)
2018{
2019 int i;
2020
2021 /* Destroy DMA maps for RX buffers. */
2022 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
2023 if (sc->bge_cdata.bge_rx_std_dmamap[i])
2024 bus_dmamap_destroy(sc->bge_cdata.bge_rx_mtag,
2025 sc->bge_cdata.bge_rx_std_dmamap[i]);
2026 }
2027 if (sc->bge_cdata.bge_rx_std_sparemap)
2028 bus_dmamap_destroy(sc->bge_cdata.bge_rx_mtag,
2029 sc->bge_cdata.bge_rx_std_sparemap);
2030
2031 /* Destroy DMA maps for jumbo RX buffers. */
2032 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
2033 if (sc->bge_cdata.bge_rx_jumbo_dmamap[i])
2034 bus_dmamap_destroy(sc->bge_cdata.bge_mtag_jumbo,
2035 sc->bge_cdata.bge_rx_jumbo_dmamap[i]);
2036 }
2037 if (sc->bge_cdata.bge_rx_jumbo_sparemap)
2038 bus_dmamap_destroy(sc->bge_cdata.bge_mtag_jumbo,
2039 sc->bge_cdata.bge_rx_jumbo_sparemap);
2040
2041 /* Destroy DMA maps for TX buffers. */
2042 for (i = 0; i < BGE_TX_RING_CNT; i++) {
2043 if (sc->bge_cdata.bge_tx_dmamap[i])
2044 bus_dmamap_destroy(sc->bge_cdata.bge_tx_mtag,
2045 sc->bge_cdata.bge_tx_dmamap[i]);
2046 }
2047
2048 if (sc->bge_cdata.bge_rx_mtag)
2049 bus_dma_tag_destroy(sc->bge_cdata.bge_rx_mtag);
2050 if (sc->bge_cdata.bge_tx_mtag)
2051 bus_dma_tag_destroy(sc->bge_cdata.bge_tx_mtag);
2052
2053
2054 /* Destroy standard RX ring. */
2055 if (sc->bge_cdata.bge_rx_std_ring_map)
2056 bus_dmamap_unload(sc->bge_cdata.bge_rx_std_ring_tag,
2057 sc->bge_cdata.bge_rx_std_ring_map);
2058 if (sc->bge_cdata.bge_rx_std_ring_map && sc->bge_ldata.bge_rx_std_ring)
2059 bus_dmamem_free(sc->bge_cdata.bge_rx_std_ring_tag,
2060 sc->bge_ldata.bge_rx_std_ring,
2061 sc->bge_cdata.bge_rx_std_ring_map);
2062
2063 if (sc->bge_cdata.bge_rx_std_ring_tag)
2064 bus_dma_tag_destroy(sc->bge_cdata.bge_rx_std_ring_tag);
2065
2066 /* Destroy jumbo RX ring. */
2067 if (sc->bge_cdata.bge_rx_jumbo_ring_map)
2068 bus_dmamap_unload(sc->bge_cdata.bge_rx_jumbo_ring_tag,
2069 sc->bge_cdata.bge_rx_jumbo_ring_map);
2070
2071 if (sc->bge_cdata.bge_rx_jumbo_ring_map &&
2072 sc->bge_ldata.bge_rx_jumbo_ring)
2073 bus_dmamem_free(sc->bge_cdata.bge_rx_jumbo_ring_tag,
2074 sc->bge_ldata.bge_rx_jumbo_ring,
2075 sc->bge_cdata.bge_rx_jumbo_ring_map);
2076
2077 if (sc->bge_cdata.bge_rx_jumbo_ring_tag)
2078 bus_dma_tag_destroy(sc->bge_cdata.bge_rx_jumbo_ring_tag);
2079
2080 /* Destroy RX return ring. */
2081 if (sc->bge_cdata.bge_rx_return_ring_map)
2082 bus_dmamap_unload(sc->bge_cdata.bge_rx_return_ring_tag,
2083 sc->bge_cdata.bge_rx_return_ring_map);
2084
2085 if (sc->bge_cdata.bge_rx_return_ring_map &&
2086 sc->bge_ldata.bge_rx_return_ring)
2087 bus_dmamem_free(sc->bge_cdata.bge_rx_return_ring_tag,
2088 sc->bge_ldata.bge_rx_return_ring,
2089 sc->bge_cdata.bge_rx_return_ring_map);
2090
2091 if (sc->bge_cdata.bge_rx_return_ring_tag)
2092 bus_dma_tag_destroy(sc->bge_cdata.bge_rx_return_ring_tag);
2093
2094 /* Destroy TX ring. */
2095 if (sc->bge_cdata.bge_tx_ring_map)
2096 bus_dmamap_unload(sc->bge_cdata.bge_tx_ring_tag,
2097 sc->bge_cdata.bge_tx_ring_map);
2098
2099 if (sc->bge_cdata.bge_tx_ring_map && sc->bge_ldata.bge_tx_ring)
2100 bus_dmamem_free(sc->bge_cdata.bge_tx_ring_tag,
2101 sc->bge_ldata.bge_tx_ring,
2102 sc->bge_cdata.bge_tx_ring_map);
2103
2104 if (sc->bge_cdata.bge_tx_ring_tag)
2105 bus_dma_tag_destroy(sc->bge_cdata.bge_tx_ring_tag);
2106
2107 /* Destroy status block. */
2108 if (sc->bge_cdata.bge_status_map)
2109 bus_dmamap_unload(sc->bge_cdata.bge_status_tag,
2110 sc->bge_cdata.bge_status_map);
2111
2112 if (sc->bge_cdata.bge_status_map && sc->bge_ldata.bge_status_block)
2113 bus_dmamem_free(sc->bge_cdata.bge_status_tag,
2114 sc->bge_ldata.bge_status_block,
2115 sc->bge_cdata.bge_status_map);
2116
2117 if (sc->bge_cdata.bge_status_tag)
2118 bus_dma_tag_destroy(sc->bge_cdata.bge_status_tag);
2119
2120 /* Destroy statistics block. */
2121 if (sc->bge_cdata.bge_stats_map)
2122 bus_dmamap_unload(sc->bge_cdata.bge_stats_tag,
2123 sc->bge_cdata.bge_stats_map);
2124
2125 if (sc->bge_cdata.bge_stats_map && sc->bge_ldata.bge_stats)
2126 bus_dmamem_free(sc->bge_cdata.bge_stats_tag,
2127 sc->bge_ldata.bge_stats,
2128 sc->bge_cdata.bge_stats_map);
2129
2130 if (sc->bge_cdata.bge_stats_tag)
2131 bus_dma_tag_destroy(sc->bge_cdata.bge_stats_tag);
2132
2133 /* Destroy the parent tag. */
2134 if (sc->bge_cdata.bge_parent_tag)
2135 bus_dma_tag_destroy(sc->bge_cdata.bge_parent_tag);
2136}
2137
2138static int
2139bge_dma_alloc(device_t dev)
2140{
2141 struct bge_dmamap_arg ctx;
2142 struct bge_softc *sc;
2143 bus_addr_t lowaddr;
2144 bus_size_t sbsz, txsegsz, txmaxsegsz;
2145 int i, error;
2146
2147 sc = device_get_softc(dev);
2148
2149 lowaddr = BUS_SPACE_MAXADDR;
2150 if ((sc->bge_flags & BGE_FLAG_40BIT_BUG) != 0)
2151 lowaddr = BGE_DMA_MAXADDR;
2152 if ((sc->bge_flags & BGE_FLAG_4G_BNDRY_BUG) != 0)
2153 lowaddr = BUS_SPACE_MAXADDR_32BIT;
2154 /*
2155 * Allocate the parent bus DMA tag appropriate for PCI.
2156 */
2157 error = bus_dma_tag_create(bus_get_dma_tag(sc->bge_dev),
2158 1, 0, lowaddr, BUS_SPACE_MAXADDR, NULL,
2159 NULL, BUS_SPACE_MAXSIZE_32BIT, 0, BUS_SPACE_MAXSIZE_32BIT,
2160 0, NULL, NULL, &sc->bge_cdata.bge_parent_tag);
2161
2162 if (error != 0) {
2163 device_printf(sc->bge_dev,
2164 "could not allocate parent dma tag\n");
2165 return (ENOMEM);
2166 }
2167
2168 /*
2169 * Create tag for Tx mbufs.
2170 */
2171 if (sc->bge_flags & BGE_FLAG_TSO) {
2172 txsegsz = BGE_TSOSEG_SZ;
2173 txmaxsegsz = 65535 + sizeof(struct ether_vlan_header);
2174 } else {
2175 txsegsz = MCLBYTES;
2176 txmaxsegsz = MCLBYTES * BGE_NSEG_NEW;
2177 }
2178 error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag, 1,
2179 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
2180 txmaxsegsz, BGE_NSEG_NEW, txsegsz, 0, NULL, NULL,
2181 &sc->bge_cdata.bge_tx_mtag);
2182
2183 if (error) {
2184 device_printf(sc->bge_dev, "could not allocate TX dma tag\n");
2185 return (ENOMEM);
2186 }
2187
2188 /*
2189 * Create tag for Rx mbufs.
2190 */
2191 error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag, 1, 0,
2192 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES, 1,
2193 MCLBYTES, 0, NULL, NULL, &sc->bge_cdata.bge_rx_mtag);
2194
2195 if (error) {
2196 device_printf(sc->bge_dev, "could not allocate RX dma tag\n");
2197 return (ENOMEM);
2198 }
2199
2200 /* Create DMA maps for RX buffers. */
2201 error = bus_dmamap_create(sc->bge_cdata.bge_rx_mtag, 0,
2202 &sc->bge_cdata.bge_rx_std_sparemap);
2203 if (error) {
2204 device_printf(sc->bge_dev,
2205 "can't create spare DMA map for RX\n");
2206 return (ENOMEM);
2207 }
2208 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
2209 error = bus_dmamap_create(sc->bge_cdata.bge_rx_mtag, 0,
2210 &sc->bge_cdata.bge_rx_std_dmamap[i]);
2211 if (error) {
2212 device_printf(sc->bge_dev,
2213 "can't create DMA map for RX\n");
2214 return (ENOMEM);
2215 }
2216 }
2217
2218 /* Create DMA maps for TX buffers. */
2219 for (i = 0; i < BGE_TX_RING_CNT; i++) {
2220 error = bus_dmamap_create(sc->bge_cdata.bge_tx_mtag, 0,
2221 &sc->bge_cdata.bge_tx_dmamap[i]);
2222 if (error) {
2223 device_printf(sc->bge_dev,
2224 "can't create DMA map for TX\n");
2225 return (ENOMEM);
2226 }
2227 }
2228
2229 /* Create tag for standard RX ring. */
2230 error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag,
2231 PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
2232 NULL, BGE_STD_RX_RING_SZ, 1, BGE_STD_RX_RING_SZ, 0,
2233 NULL, NULL, &sc->bge_cdata.bge_rx_std_ring_tag);
2234
2235 if (error) {
2236 device_printf(sc->bge_dev, "could not allocate dma tag\n");
2237 return (ENOMEM);
2238 }
2239
2240 /* Allocate DMA'able memory for standard RX ring. */
2241 error = bus_dmamem_alloc(sc->bge_cdata.bge_rx_std_ring_tag,
2242 (void **)&sc->bge_ldata.bge_rx_std_ring, BUS_DMA_NOWAIT,
2243 &sc->bge_cdata.bge_rx_std_ring_map);
2244 if (error)
2245 return (ENOMEM);
2246
2247 bzero((char *)sc->bge_ldata.bge_rx_std_ring, BGE_STD_RX_RING_SZ);
2248
2249 /* Load the address of the standard RX ring. */
2250 ctx.bge_maxsegs = 1;
2251 ctx.sc = sc;
2252
2253 error = bus_dmamap_load(sc->bge_cdata.bge_rx_std_ring_tag,
2254 sc->bge_cdata.bge_rx_std_ring_map, sc->bge_ldata.bge_rx_std_ring,
2255 BGE_STD_RX_RING_SZ, bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT);
2256
2257 if (error)
2258 return (ENOMEM);
2259
2260 sc->bge_ldata.bge_rx_std_ring_paddr = ctx.bge_busaddr;
2261
2262 /* Create tags for jumbo mbufs. */
2263 if (BGE_IS_JUMBO_CAPABLE(sc)) {
2264 error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag,
2265 1, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
2266 NULL, MJUM9BYTES, BGE_NSEG_JUMBO, PAGE_SIZE,
2267 0, NULL, NULL, &sc->bge_cdata.bge_mtag_jumbo);
2268 if (error) {
2269 device_printf(sc->bge_dev,
2270 "could not allocate jumbo dma tag\n");
2271 return (ENOMEM);
2272 }
2273
2274 /* Create tag for jumbo RX ring. */
2275 error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag,
2276 PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
2277 NULL, BGE_JUMBO_RX_RING_SZ, 1, BGE_JUMBO_RX_RING_SZ, 0,
2278 NULL, NULL, &sc->bge_cdata.bge_rx_jumbo_ring_tag);
2279
2280 if (error) {
2281 device_printf(sc->bge_dev,
2282 "could not allocate jumbo ring dma tag\n");
2283 return (ENOMEM);
2284 }
2285
2286 /* Allocate DMA'able memory for jumbo RX ring. */
2287 error = bus_dmamem_alloc(sc->bge_cdata.bge_rx_jumbo_ring_tag,
2288 (void **)&sc->bge_ldata.bge_rx_jumbo_ring,
2289 BUS_DMA_NOWAIT | BUS_DMA_ZERO,
2290 &sc->bge_cdata.bge_rx_jumbo_ring_map);
2291 if (error)
2292 return (ENOMEM);
2293
2294 /* Load the address of the jumbo RX ring. */
2295 ctx.bge_maxsegs = 1;
2296 ctx.sc = sc;
2297
2298 error = bus_dmamap_load(sc->bge_cdata.bge_rx_jumbo_ring_tag,
2299 sc->bge_cdata.bge_rx_jumbo_ring_map,
2300 sc->bge_ldata.bge_rx_jumbo_ring, BGE_JUMBO_RX_RING_SZ,
2301 bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT);
2302
2303 if (error)
2304 return (ENOMEM);
2305
2306 sc->bge_ldata.bge_rx_jumbo_ring_paddr = ctx.bge_busaddr;
2307
2308 /* Create DMA maps for jumbo RX buffers. */
2309 error = bus_dmamap_create(sc->bge_cdata.bge_mtag_jumbo,
2310 0, &sc->bge_cdata.bge_rx_jumbo_sparemap);
2311 if (error) {
2312 device_printf(sc->bge_dev,
2313 "can't create spare DMA map for jumbo RX\n");
2314 return (ENOMEM);
2315 }
2316 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
2317 error = bus_dmamap_create(sc->bge_cdata.bge_mtag_jumbo,
2318 0, &sc->bge_cdata.bge_rx_jumbo_dmamap[i]);
2319 if (error) {
2320 device_printf(sc->bge_dev,
2321 "can't create DMA map for jumbo RX\n");
2322 return (ENOMEM);
2323 }
2324 }
2325
2326 }
2327
2328 /* Create tag for RX return ring. */
2329 error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag,
2330 PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
2331 NULL, BGE_RX_RTN_RING_SZ(sc), 1, BGE_RX_RTN_RING_SZ(sc), 0,
2332 NULL, NULL, &sc->bge_cdata.bge_rx_return_ring_tag);
2333
2334 if (error) {
2335 device_printf(sc->bge_dev, "could not allocate dma tag\n");
2336 return (ENOMEM);
2337 }
2338
2339 /* Allocate DMA'able memory for RX return ring. */
2340 error = bus_dmamem_alloc(sc->bge_cdata.bge_rx_return_ring_tag,
2341 (void **)&sc->bge_ldata.bge_rx_return_ring, BUS_DMA_NOWAIT,
2342 &sc->bge_cdata.bge_rx_return_ring_map);
2343 if (error)
2344 return (ENOMEM);
2345
2346 bzero((char *)sc->bge_ldata.bge_rx_return_ring,
2347 BGE_RX_RTN_RING_SZ(sc));
2348
2349 /* Load the address of the RX return ring. */
2350 ctx.bge_maxsegs = 1;
2351 ctx.sc = sc;
2352
2353 error = bus_dmamap_load(sc->bge_cdata.bge_rx_return_ring_tag,
2354 sc->bge_cdata.bge_rx_return_ring_map,
2355 sc->bge_ldata.bge_rx_return_ring, BGE_RX_RTN_RING_SZ(sc),
2356 bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT);
2357
2358 if (error)
2359 return (ENOMEM);
2360
2361 sc->bge_ldata.bge_rx_return_ring_paddr = ctx.bge_busaddr;
2362
2363 /* Create tag for TX ring. */
2364 error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag,
2365 PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
2366 NULL, BGE_TX_RING_SZ, 1, BGE_TX_RING_SZ, 0, NULL, NULL,
2367 &sc->bge_cdata.bge_tx_ring_tag);
2368
2369 if (error) {
2370 device_printf(sc->bge_dev, "could not allocate dma tag\n");
2371 return (ENOMEM);
2372 }
2373
2374 /* Allocate DMA'able memory for TX ring. */
2375 error = bus_dmamem_alloc(sc->bge_cdata.bge_tx_ring_tag,
2376 (void **)&sc->bge_ldata.bge_tx_ring, BUS_DMA_NOWAIT,
2377 &sc->bge_cdata.bge_tx_ring_map);
2378 if (error)
2379 return (ENOMEM);
2380
2381 bzero((char *)sc->bge_ldata.bge_tx_ring, BGE_TX_RING_SZ);
2382
2383 /* Load the address of the TX ring. */
2384 ctx.bge_maxsegs = 1;
2385 ctx.sc = sc;
2386
2387 error = bus_dmamap_load(sc->bge_cdata.bge_tx_ring_tag,
2388 sc->bge_cdata.bge_tx_ring_map, sc->bge_ldata.bge_tx_ring,
2389 BGE_TX_RING_SZ, bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT);
2390
2391 if (error)
2392 return (ENOMEM);
2393
2394 sc->bge_ldata.bge_tx_ring_paddr = ctx.bge_busaddr;
2395
2396 /*
2397 * Create tag for status block.
2398 * Because we only use single Tx/Rx/Rx return ring, use
2399 * minimum status block size except BCM5700 AX/BX which
2400 * seems to want to see full status block size regardless
2401 * of configured number of ring.
2402 */
2403 if (sc->bge_asicrev == BGE_ASICREV_BCM5700 &&
2404 sc->bge_chipid != BGE_CHIPID_BCM5700_C0)
2405 sbsz = BGE_STATUS_BLK_SZ;
2406 else
2407 sbsz = 32;
2408 error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag,
2409 PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
2410 NULL, sbsz, 1, sbsz, 0, NULL, NULL, &sc->bge_cdata.bge_status_tag);
2411
2412 if (error) {
2413 device_printf(sc->bge_dev,
2414 "could not allocate status dma tag\n");
2415 return (ENOMEM);
2416 }
2417
2418 /* Allocate DMA'able memory for status block. */
2419 error = bus_dmamem_alloc(sc->bge_cdata.bge_status_tag,
2420 (void **)&sc->bge_ldata.bge_status_block, BUS_DMA_NOWAIT,
2421 &sc->bge_cdata.bge_status_map);
2422 if (error)
2423 return (ENOMEM);
2424
2425 bzero((char *)sc->bge_ldata.bge_status_block, sbsz);
2426
2427 /* Load the address of the status block. */
2428 ctx.sc = sc;
2429 ctx.bge_maxsegs = 1;
2430
2431 error = bus_dmamap_load(sc->bge_cdata.bge_status_tag,
2432 sc->bge_cdata.bge_status_map, sc->bge_ldata.bge_status_block,
2433 sbsz, bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT);
2434
2435 if (error)
2436 return (ENOMEM);
2437
2438 sc->bge_ldata.bge_status_block_paddr = ctx.bge_busaddr;
2439
2440 /* Create tag for statistics block. */
2441 error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag,
2442 PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
2443 NULL, BGE_STATS_SZ, 1, BGE_STATS_SZ, 0, NULL, NULL,
2444 &sc->bge_cdata.bge_stats_tag);
2445
2446 if (error) {
2447 device_printf(sc->bge_dev, "could not allocate dma tag\n");
2448 return (ENOMEM);
2449 }
2450
2451 /* Allocate DMA'able memory for statistics block. */
2452 error = bus_dmamem_alloc(sc->bge_cdata.bge_stats_tag,
2453 (void **)&sc->bge_ldata.bge_stats, BUS_DMA_NOWAIT,
2454 &sc->bge_cdata.bge_stats_map);
2455 if (error)
2456 return (ENOMEM);
2457
2458 bzero((char *)sc->bge_ldata.bge_stats, BGE_STATS_SZ);
2459
2460 /* Load the address of the statstics block. */
2461 ctx.sc = sc;
2462 ctx.bge_maxsegs = 1;
2463
2464 error = bus_dmamap_load(sc->bge_cdata.bge_stats_tag,
2465 sc->bge_cdata.bge_stats_map, sc->bge_ldata.bge_stats,
2466 BGE_STATS_SZ, bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT);
2467
2468 if (error)
2469 return (ENOMEM);
2470
2471 sc->bge_ldata.bge_stats_paddr = ctx.bge_busaddr;
2472
2473 return (0);
2474}
2475
2476/*
2477 * Return true if this device has more than one port.
2478 */
2479static int
2480bge_has_multiple_ports(struct bge_softc *sc)
2481{
2482 device_t dev = sc->bge_dev;
2483 u_int b, d, f, fscan, s;
2484
2485 d = pci_get_domain(dev);
2486 b = pci_get_bus(dev);
2487 s = pci_get_slot(dev);
2488 f = pci_get_function(dev);
2489 for (fscan = 0; fscan <= PCI_FUNCMAX; fscan++)
2490 if (fscan != f && pci_find_dbsf(d, b, s, fscan) != NULL)
2491 return (1);
2492 return (0);
2493}
2494
2495/*
2496 * Return true if MSI can be used with this device.
2497 */
2498static int
2499bge_can_use_msi(struct bge_softc *sc)
2500{
2501 int can_use_msi = 0;
2502
2503 switch (sc->bge_asicrev) {
2504 case BGE_ASICREV_BCM5714_A0:
2505 case BGE_ASICREV_BCM5714:
2506 /*
2507 * Apparently, MSI doesn't work when these chips are
2508 * configured in single-port mode.
2509 */
2510 if (bge_has_multiple_ports(sc))
2511 can_use_msi = 1;
2512 break;
2513 case BGE_ASICREV_BCM5750:
2514 if (sc->bge_chiprev != BGE_CHIPREV_5750_AX &&
2515 sc->bge_chiprev != BGE_CHIPREV_5750_BX)
2516 can_use_msi = 1;
2517 break;
2518 default:
2519 if (BGE_IS_575X_PLUS(sc))
2520 can_use_msi = 1;
2521 }
2522 return (can_use_msi);
2523}
2524
2525static int
2526bge_attach(device_t dev)
2527{
2528 struct ifnet *ifp;
2529 struct bge_softc *sc;
2530 uint32_t hwcfg = 0, misccfg;
2531 u_char eaddr[ETHER_ADDR_LEN];
2532 int error, msicount, reg, rid, trys;
2533
2534 sc = device_get_softc(dev);
2535 sc->bge_dev = dev;
2536
2537 TASK_INIT(&sc->bge_intr_task, 0, bge_intr_task, sc);
2538
2539 /*
2540 * Map control/status registers.
2541 */
2542 pci_enable_busmaster(dev);
2543
2544 rid = BGE_PCI_BAR0;
2545 sc->bge_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
2546 RF_ACTIVE);
2547
2548 if (sc->bge_res == NULL) {
2549 device_printf (sc->bge_dev, "couldn't map memory\n");
2550 error = ENXIO;
2551 goto fail;
2552 }
2553
2554 /* Save various chip information. */
2555 sc->bge_chipid =
2556 pci_read_config(dev, BGE_PCI_MISC_CTL, 4) >>
2557 BGE_PCIMISCCTL_ASICREV_SHIFT;
2558 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_USE_PRODID_REG)
2559 sc->bge_chipid = pci_read_config(dev, BGE_PCI_PRODID_ASICREV,
2560 4);
2561 sc->bge_asicrev = BGE_ASICREV(sc->bge_chipid);
2562 sc->bge_chiprev = BGE_CHIPREV(sc->bge_chipid);
2563
2564 /*
2565 * Don't enable Ethernet@WireSpeed for the 5700, 5906, or the
2566 * 5705 A0 and A1 chips.
2567 */
2568 if (sc->bge_asicrev != BGE_ASICREV_BCM5700 &&
2569 sc->bge_asicrev != BGE_ASICREV_BCM5906 &&
2570 sc->bge_chipid != BGE_CHIPID_BCM5705_A0 &&
2571 sc->bge_chipid != BGE_CHIPID_BCM5705_A1)
2572 sc->bge_flags |= BGE_FLAG_WIRESPEED;
2573
2574 if (bge_has_eaddr(sc))
2575 sc->bge_flags |= BGE_FLAG_EADDR;
2576
2577 /* Save chipset family. */
2578 switch (sc->bge_asicrev) {
2579 case BGE_ASICREV_BCM5755:
2580 case BGE_ASICREV_BCM5761:
2581 case BGE_ASICREV_BCM5784:
2582 case BGE_ASICREV_BCM5785:
2583 case BGE_ASICREV_BCM5787:
2584 case BGE_ASICREV_BCM57780:
2585 sc->bge_flags |= BGE_FLAG_5755_PLUS | BGE_FLAG_575X_PLUS |
2586 BGE_FLAG_5705_PLUS;
2587 break;
2588 case BGE_ASICREV_BCM5700:
2589 case BGE_ASICREV_BCM5701:
2590 case BGE_ASICREV_BCM5703:
2591 case BGE_ASICREV_BCM5704:
2592 sc->bge_flags |= BGE_FLAG_5700_FAMILY | BGE_FLAG_JUMBO;
2593 break;
2594 case BGE_ASICREV_BCM5714_A0:
2595 case BGE_ASICREV_BCM5780:
2596 case BGE_ASICREV_BCM5714:
2597 sc->bge_flags |= BGE_FLAG_5714_FAMILY /* | BGE_FLAG_JUMBO */;
2598 /* FALLTHROUGH */
2599 case BGE_ASICREV_BCM5750:
2600 case BGE_ASICREV_BCM5752:
2601 case BGE_ASICREV_BCM5906:
2602 sc->bge_flags |= BGE_FLAG_575X_PLUS;
2603 /* FALLTHROUGH */
2604 case BGE_ASICREV_BCM5705:
2605 sc->bge_flags |= BGE_FLAG_5705_PLUS;
2606 break;
2607 }
2608
2609 /* Set various bug flags. */
2610 if (sc->bge_chipid == BGE_CHIPID_BCM5701_A0 ||
2611 sc->bge_chipid == BGE_CHIPID_BCM5701_B0)
2612 sc->bge_flags |= BGE_FLAG_CRC_BUG;
2613 if (sc->bge_chiprev == BGE_CHIPREV_5703_AX ||
2614 sc->bge_chiprev == BGE_CHIPREV_5704_AX)
2615 sc->bge_flags |= BGE_FLAG_ADC_BUG;
2616 if (sc->bge_chipid == BGE_CHIPID_BCM5704_A0)
2617 sc->bge_flags |= BGE_FLAG_5704_A0_BUG;
2618 if (BGE_IS_5705_PLUS(sc) &&
2619 !(sc->bge_flags & BGE_FLAG_ADJUST_TRIM)) {
2620 if (sc->bge_asicrev == BGE_ASICREV_BCM5755 ||
2621 sc->bge_asicrev == BGE_ASICREV_BCM5761 ||
2622 sc->bge_asicrev == BGE_ASICREV_BCM5784 ||
2623 sc->bge_asicrev == BGE_ASICREV_BCM5787) {
2624 if (sc->bge_chipid != BGE_CHIPID_BCM5722_A0)
2625 sc->bge_flags |= BGE_FLAG_JITTER_BUG;
2626 } else if (sc->bge_asicrev != BGE_ASICREV_BCM5906)
2627 sc->bge_flags |= BGE_FLAG_BER_BUG;
2628 }
2629
2630 /*
2631 * All controllers that are not 5755 or higher have 4GB
2632 * boundary DMA bug.
2633 * Whenever an address crosses a multiple of the 4GB boundary
2634 * (including 4GB, 8Gb, 12Gb, etc.) and makes the transition
2635 * from 0xX_FFFF_FFFF to 0x(X+1)_0000_0000 an internal DMA
2636 * state machine will lockup and cause the device to hang.
2637 */
2638 if (BGE_IS_5755_PLUS(sc) == 0)
2639 sc->bge_flags |= BGE_FLAG_4G_BNDRY_BUG;
2640
2641 /*
2642 * We could possibly check for BCOM_DEVICEID_BCM5788 in bge_probe()
2643 * but I do not know the DEVICEID for the 5788M.
2644 */
2645 misccfg = CSR_READ_4(sc, BGE_MISC_CFG) & BGE_MISCCFG_BOARD_ID;
2646 if (misccfg == BGE_MISCCFG_BOARD_ID_5788 ||
2647 misccfg == BGE_MISCCFG_BOARD_ID_5788M)
2648 sc->bge_flags |= BGE_FLAG_5788;
2649
2650 /*
2651 * Some controllers seem to require a special firmware to use
2652 * TSO. But the firmware is not available to FreeBSD and Linux
2653 * claims that the TSO performed by the firmware is slower than
2654 * hardware based TSO. Moreover the firmware based TSO has one
2655 * known bug which can't handle TSO if ethernet header + IP/TCP
2656 * header is greater than 80 bytes. The workaround for the TSO
2657 * bug exist but it seems it's too expensive than not using
2658 * TSO at all. Some hardwares also have the TSO bug so limit
2659 * the TSO to the controllers that are not affected TSO issues
2660 * (e.g. 5755 or higher).
2661 */
2662 if (BGE_IS_5755_PLUS(sc))
2663 sc->bge_flags |= BGE_FLAG_TSO;
2664
2665 /*
2666 * Check if this is a PCI-X or PCI Express device.
2667 */
2668 if (pci_find_extcap(dev, PCIY_EXPRESS, &reg) == 0) {
2669 /*
2670 * Found a PCI Express capabilities register, this
2671 * must be a PCI Express device.
2672 */
2673 sc->bge_flags |= BGE_FLAG_PCIE;
2674 sc->bge_expcap = reg;
2675 bge_set_max_readrq(sc);
2676 } else {
2677 /*
2678 * Check if the device is in PCI-X Mode.
2679 * (This bit is not valid on PCI Express controllers.)
2680 */
2681 if (pci_find_extcap(dev, PCIY_PCIX, &reg) == 0)
2682 sc->bge_pcixcap = reg;
2683 if ((pci_read_config(dev, BGE_PCI_PCISTATE, 4) &
2684 BGE_PCISTATE_PCI_BUSMODE) == 0)
2685 sc->bge_flags |= BGE_FLAG_PCIX;
2686 }
2687
2688 /*
2689 * The 40bit DMA bug applies to the 5714/5715 controllers and is
2690 * not actually a MAC controller bug but an issue with the embedded
2691 * PCIe to PCI-X bridge in the device. Use 40bit DMA workaround.
2692 */
2693 if (BGE_IS_5714_FAMILY(sc) && (sc->bge_flags & BGE_FLAG_PCIX))
2694 sc->bge_flags |= BGE_FLAG_40BIT_BUG;
2695 /*
2696 * Allocate the interrupt, using MSI if possible. These devices
2697 * support 8 MSI messages, but only the first one is used in
2698 * normal operation.
2699 */
2700 rid = 0;
2701 if (pci_find_extcap(sc->bge_dev, PCIY_MSI, &reg) == 0) {
2702 sc->bge_msicap = reg;
2703 if (bge_can_use_msi(sc)) {
2704 msicount = pci_msi_count(dev);
2705 if (msicount > 1)
2706 msicount = 1;
2707 } else
2708 msicount = 0;
2709 if (msicount == 1 && pci_alloc_msi(dev, &msicount) == 0) {
2710 rid = 1;
2711 sc->bge_flags |= BGE_FLAG_MSI;
2712 }
2713 }
2714
2715 sc->bge_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
2716 RF_SHAREABLE | RF_ACTIVE);
2717
2718 if (sc->bge_irq == NULL) {
2719 device_printf(sc->bge_dev, "couldn't map interrupt\n");
2720 error = ENXIO;
2721 goto fail;
2722 }
2723
2724 if (bootverbose)
2725 device_printf(dev,
2726 "CHIP ID 0x%08x; ASIC REV 0x%02x; CHIP REV 0x%02x; %s\n",
2727 sc->bge_chipid, sc->bge_asicrev, sc->bge_chiprev,
2728 (sc->bge_flags & BGE_FLAG_PCIX) ? "PCI-X" :
2729 ((sc->bge_flags & BGE_FLAG_PCIE) ? "PCI-E" : "PCI"));
2730
2731 BGE_LOCK_INIT(sc, device_get_nameunit(dev));
2732
2733 /* Try to reset the chip. */
2734 if (bge_reset(sc)) {
2735 device_printf(sc->bge_dev, "chip reset failed\n");
2736 error = ENXIO;
2737 goto fail;
2738 }
2739
2740 sc->bge_asf_mode = 0;
2741 if (bge_allow_asf && (bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_SIG)
2742 == BGE_MAGIC_NUMBER)) {
2743 if (bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_NICCFG)
2744 & BGE_HWCFG_ASF) {
2745 sc->bge_asf_mode |= ASF_ENABLE;
2746 sc->bge_asf_mode |= ASF_STACKUP;
2747 if (sc->bge_asicrev == BGE_ASICREV_BCM5750) {
2748 sc->bge_asf_mode |= ASF_NEW_HANDSHAKE;
2749 }
2750 }
2751 }
2752
2753 /* Try to reset the chip again the nice way. */
2754 bge_stop_fw(sc);
2755 bge_sig_pre_reset(sc, BGE_RESET_STOP);
2756 if (bge_reset(sc)) {
2757 device_printf(sc->bge_dev, "chip reset failed\n");
2758 error = ENXIO;
2759 goto fail;
2760 }
2761
2762 bge_sig_legacy(sc, BGE_RESET_STOP);
2763 bge_sig_post_reset(sc, BGE_RESET_STOP);
2764
2765 if (bge_chipinit(sc)) {
2766 device_printf(sc->bge_dev, "chip initialization failed\n");
2767 error = ENXIO;
2768 goto fail;
2769 }
2770
2771 error = bge_get_eaddr(sc, eaddr);
2772 if (error) {
2773 device_printf(sc->bge_dev,
2774 "failed to read station address\n");
2775 error = ENXIO;
2776 goto fail;
2777 }
2778
2779 /* 5705 limits RX return ring to 512 entries. */
2780 if (BGE_IS_5705_PLUS(sc))
2781 sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT_5705;
2782 else
2783 sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT;
2784
2785 if (bge_dma_alloc(dev)) {
2786 device_printf(sc->bge_dev,
2787 "failed to allocate DMA resources\n");
2788 error = ENXIO;
2789 goto fail;
2790 }
2791
2792 /* Set default tuneable values. */
2793 sc->bge_stat_ticks = BGE_TICKS_PER_SEC;
2794 sc->bge_rx_coal_ticks = 150;
2795 sc->bge_tx_coal_ticks = 150;
2796 sc->bge_rx_max_coal_bds = 10;
2797 sc->bge_tx_max_coal_bds = 10;
2798
2799 /* Set up ifnet structure */
2800 ifp = sc->bge_ifp = if_alloc(IFT_ETHER);
2801 if (ifp == NULL) {
2802 device_printf(sc->bge_dev, "failed to if_alloc()\n");
2803 error = ENXIO;
2804 goto fail;
2805 }
2806 ifp->if_softc = sc;
2807 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
2808 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
2809 ifp->if_ioctl = bge_ioctl;
2810 ifp->if_start = bge_start;
2811 ifp->if_init = bge_init;
2812 ifp->if_snd.ifq_drv_maxlen = BGE_TX_RING_CNT - 1;
2813 IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen);
2814 IFQ_SET_READY(&ifp->if_snd);
2815 ifp->if_hwassist = BGE_CSUM_FEATURES;
2816 ifp->if_capabilities = IFCAP_HWCSUM | IFCAP_VLAN_HWTAGGING |
2817 IFCAP_VLAN_MTU;
2818 if ((sc->bge_flags & BGE_FLAG_TSO) != 0) {
2819 ifp->if_hwassist |= CSUM_TSO;
2820 ifp->if_capabilities |= IFCAP_TSO4;
2821 }
2822#ifdef IFCAP_VLAN_HWCSUM
2823 ifp->if_capabilities |= IFCAP_VLAN_HWCSUM;
2824#endif
2825 ifp->if_capenable = ifp->if_capabilities;
2826#ifdef DEVICE_POLLING
2827 ifp->if_capabilities |= IFCAP_POLLING;
2828#endif
2829
2830 /*
2831 * 5700 B0 chips do not support checksumming correctly due
2832 * to hardware bugs.
2833 */
2834 if (sc->bge_chipid == BGE_CHIPID_BCM5700_B0) {
2835 ifp->if_capabilities &= ~IFCAP_HWCSUM;
2836 ifp->if_capenable &= ~IFCAP_HWCSUM;
2837 ifp->if_hwassist = 0;
2838 }
2839
2840 /*
2841 * Figure out what sort of media we have by checking the
2842 * hardware config word in the first 32k of NIC internal memory,
2843 * or fall back to examining the EEPROM if necessary.
2844 * Note: on some BCM5700 cards, this value appears to be unset.
2845 * If that's the case, we have to rely on identifying the NIC
2846 * by its PCI subsystem ID, as we do below for the SysKonnect
2847 * SK-9D41.
2848 */
2849 if (bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_SIG) == BGE_MAGIC_NUMBER)
2850 hwcfg = bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_NICCFG);
2851 else if ((sc->bge_flags & BGE_FLAG_EADDR) &&
2852 (sc->bge_asicrev != BGE_ASICREV_BCM5906)) {
2853 if (bge_read_eeprom(sc, (caddr_t)&hwcfg, BGE_EE_HWCFG_OFFSET,
2854 sizeof(hwcfg))) {
2855 device_printf(sc->bge_dev, "failed to read EEPROM\n");
2856 error = ENXIO;
2857 goto fail;
2858 }
2859 hwcfg = ntohl(hwcfg);
2860 }
2861
2862 if ((hwcfg & BGE_HWCFG_MEDIA) == BGE_MEDIA_FIBER)
2863 sc->bge_flags |= BGE_FLAG_TBI;
2864
2865 /* The SysKonnect SK-9D41 is a 1000baseSX card. */
2866 if ((pci_read_config(dev, BGE_PCI_SUBSYS, 4) >> 16) == SK_SUBSYSID_9D41)
2867 sc->bge_flags |= BGE_FLAG_TBI;
2868
2869 if (sc->bge_flags & BGE_FLAG_TBI) {
2870 ifmedia_init(&sc->bge_ifmedia, IFM_IMASK, bge_ifmedia_upd,
2871 bge_ifmedia_sts);
2872 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER | IFM_1000_SX, 0, NULL);
2873 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER | IFM_1000_SX | IFM_FDX,
2874 0, NULL);
2875 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER | IFM_AUTO, 0, NULL);
2876 ifmedia_set(&sc->bge_ifmedia, IFM_ETHER | IFM_AUTO);
2877 sc->bge_ifmedia.ifm_media = sc->bge_ifmedia.ifm_cur->ifm_media;
2878 } else {
2879 /*
2880 * Do transceiver setup and tell the firmware the
2881 * driver is down so we can try to get access the
2882 * probe if ASF is running. Retry a couple of times
2883 * if we get a conflict with the ASF firmware accessing
2884 * the PHY.
2885 */
2886 trys = 0;
2887 BGE_CLRBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
2888again:
2889 bge_asf_driver_up(sc);
2890
2891 if (mii_phy_probe(dev, &sc->bge_miibus,
2892 bge_ifmedia_upd, bge_ifmedia_sts)) {
2893 if (trys++ < 4) {
2894 device_printf(sc->bge_dev, "Try again\n");
2895 bge_miibus_writereg(sc->bge_dev, 1, MII_BMCR,
2896 BMCR_RESET);
2897 goto again;
2898 }
2899
2900 device_printf(sc->bge_dev, "MII without any PHY!\n");
2901 error = ENXIO;
2902 goto fail;
2903 }
2904
2905 /*
2906 * Now tell the firmware we are going up after probing the PHY
2907 */
2908 if (sc->bge_asf_mode & ASF_STACKUP)
2909 BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
2910 }
2911
2912 /*
2913 * When using the BCM5701 in PCI-X mode, data corruption has
2914 * been observed in the first few bytes of some received packets.
2915 * Aligning the packet buffer in memory eliminates the corruption.
2916 * Unfortunately, this misaligns the packet payloads. On platforms
2917 * which do not support unaligned accesses, we will realign the
2918 * payloads by copying the received packets.
2919 */
2920 if (sc->bge_asicrev == BGE_ASICREV_BCM5701 &&
2921 sc->bge_flags & BGE_FLAG_PCIX)
2922 sc->bge_flags |= BGE_FLAG_RX_ALIGNBUG;
2923
2924 /*
2925 * Call MI attach routine.
2926 */
2927 ether_ifattach(ifp, eaddr);
2928 callout_init_mtx(&sc->bge_stat_ch, &sc->bge_mtx, 0);
2929
2930 /* Tell upper layer we support long frames. */
2931 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
2932
2933 /*
2934 * Hookup IRQ last.
2935 */
2936#if __FreeBSD_version > 700030
2937 if (BGE_IS_5755_PLUS(sc) && sc->bge_flags & BGE_FLAG_MSI) {
2938 /* Take advantage of single-shot MSI. */
2939 CSR_WRITE_4(sc, BGE_MSI_MODE, CSR_READ_4(sc, BGE_MSI_MODE) &
2940 ~BGE_MSIMODE_ONE_SHOT_DISABLE);
2941 sc->bge_tq = taskqueue_create_fast("bge_taskq", M_WAITOK,
2942 taskqueue_thread_enqueue, &sc->bge_tq);
2943 if (sc->bge_tq == NULL) {
2944 device_printf(dev, "could not create taskqueue.\n");
2945 ether_ifdetach(ifp);
2946 error = ENXIO;
2947 goto fail;
2948 }
2949 taskqueue_start_threads(&sc->bge_tq, 1, PI_NET, "%s taskq",
2950 device_get_nameunit(sc->bge_dev));
2951 error = bus_setup_intr(dev, sc->bge_irq,
2952 INTR_TYPE_NET | INTR_MPSAFE, bge_msi_intr, NULL, sc,
2953 &sc->bge_intrhand);
2954 if (error)
2955 ether_ifdetach(ifp);
2956 } else
2957 error = bus_setup_intr(dev, sc->bge_irq,
2958 INTR_TYPE_NET | INTR_MPSAFE, NULL, bge_intr, sc,
2959 &sc->bge_intrhand);
2960#else
2961 error = bus_setup_intr(dev, sc->bge_irq, INTR_TYPE_NET | INTR_MPSAFE,
2962 bge_intr, sc, &sc->bge_intrhand);
2963#endif
2964
2965 if (error) {
2966 bge_detach(dev);
2967 device_printf(sc->bge_dev, "couldn't set up irq\n");
2968 }
2969
2970 bge_add_sysctls(sc);
2971
2972 return (0);
2973
2974fail:
2975 bge_release_resources(sc);
2976
2977 return (error);
2978}
2979
2980static int
2981bge_detach(device_t dev)
2982{
2983 struct bge_softc *sc;
2984 struct ifnet *ifp;
2985
2986 sc = device_get_softc(dev);
2987 ifp = sc->bge_ifp;
2988
2989#ifdef DEVICE_POLLING
2990 if (ifp->if_capenable & IFCAP_POLLING)
2991 ether_poll_deregister(ifp);
2992#endif
2993
2994 BGE_LOCK(sc);
2995 bge_stop(sc);
2996 bge_reset(sc);
2997 BGE_UNLOCK(sc);
2998
2999 callout_drain(&sc->bge_stat_ch);
3000
3001 if (sc->bge_tq)
3002 taskqueue_drain(sc->bge_tq, &sc->bge_intr_task);
3003 ether_ifdetach(ifp);
3004
3005 if (sc->bge_flags & BGE_FLAG_TBI) {
3006 ifmedia_removeall(&sc->bge_ifmedia);
3007 } else {
3008 bus_generic_detach(dev);
3009 device_delete_child(dev, sc->bge_miibus);
3010 }
3011
3012 bge_release_resources(sc);
3013
3014 return (0);
3015}
3016
3017static void
3018bge_release_resources(struct bge_softc *sc)
3019{
3020 device_t dev;
3021
3022 dev = sc->bge_dev;
3023
3024 if (sc->bge_tq != NULL)
3025 taskqueue_free(sc->bge_tq);
3026
3027 if (sc->bge_intrhand != NULL)
3028 bus_teardown_intr(dev, sc->bge_irq, sc->bge_intrhand);
3029
3030 if (sc->bge_irq != NULL)
3031 bus_release_resource(dev, SYS_RES_IRQ,
3032 sc->bge_flags & BGE_FLAG_MSI ? 1 : 0, sc->bge_irq);
3033
3034 if (sc->bge_flags & BGE_FLAG_MSI)
3035 pci_release_msi(dev);
3036
3037 if (sc->bge_res != NULL)
3038 bus_release_resource(dev, SYS_RES_MEMORY,
3039 BGE_PCI_BAR0, sc->bge_res);
3040
3041 if (sc->bge_ifp != NULL)
3042 if_free(sc->bge_ifp);
3043
3044 bge_dma_free(sc);
3045
3046 if (mtx_initialized(&sc->bge_mtx)) /* XXX */
3047 BGE_LOCK_DESTROY(sc);
3048}
3049
3050static int
3051bge_reset(struct bge_softc *sc)
3052{
3053 device_t dev;
3054 uint32_t cachesize, command, pcistate, reset, val;
3055 void (*write_op)(struct bge_softc *, int, int);
3056 uint16_t devctl;
3057 int i;
3058
3059 dev = sc->bge_dev;
3060
3061 if (BGE_IS_575X_PLUS(sc) && !BGE_IS_5714_FAMILY(sc) &&
3062 (sc->bge_asicrev != BGE_ASICREV_BCM5906)) {
3063 if (sc->bge_flags & BGE_FLAG_PCIE)
3064 write_op = bge_writemem_direct;
3065 else
3066 write_op = bge_writemem_ind;
3067 } else
3068 write_op = bge_writereg_ind;
3069
3070 /* Save some important PCI state. */
3071 cachesize = pci_read_config(dev, BGE_PCI_CACHESZ, 4);
3072 command = pci_read_config(dev, BGE_PCI_CMD, 4);
3073 pcistate = pci_read_config(dev, BGE_PCI_PCISTATE, 4);
3074
3075 pci_write_config(dev, BGE_PCI_MISC_CTL,
3076 BGE_PCIMISCCTL_INDIRECT_ACCESS | BGE_PCIMISCCTL_MASK_PCI_INTR |
3077 BGE_HIF_SWAP_OPTIONS | BGE_PCIMISCCTL_PCISTATE_RW, 4);
3078
3079 /* Disable fastboot on controllers that support it. */
3080 if (sc->bge_asicrev == BGE_ASICREV_BCM5752 ||
3081 BGE_IS_5755_PLUS(sc)) {
3082 if (bootverbose)
3083 device_printf(sc->bge_dev, "Disabling fastboot\n");
3084 CSR_WRITE_4(sc, BGE_FASTBOOT_PC, 0x0);
3085 }
3086
3087 /*
3088 * Write the magic number to SRAM at offset 0xB50.
3089 * When firmware finishes its initialization it will
3090 * write ~BGE_MAGIC_NUMBER to the same location.
3091 */
3092 bge_writemem_ind(sc, BGE_SOFTWARE_GENCOMM, BGE_MAGIC_NUMBER);
3093
3094 reset = BGE_MISCCFG_RESET_CORE_CLOCKS | BGE_32BITTIME_66MHZ;
3095
3096 /* XXX: Broadcom Linux driver. */
3097 if (sc->bge_flags & BGE_FLAG_PCIE) {
3098 if (CSR_READ_4(sc, 0x7E2C) == 0x60) /* PCIE 1.0 */
3099 CSR_WRITE_4(sc, 0x7E2C, 0x20);
3100 if (sc->bge_chipid != BGE_CHIPID_BCM5750_A0) {
3101 /* Prevent PCIE link training during global reset */
3102 CSR_WRITE_4(sc, BGE_MISC_CFG, 1 << 29);
3103 reset |= 1 << 29;
3104 }
3105 }
3106
3107 /*
3108 * Set GPHY Power Down Override to leave GPHY
3109 * powered up in D0 uninitialized.
3110 */
3111 if (BGE_IS_5705_PLUS(sc))
3112 reset |= 0x04000000;
3113
3114 /* Issue global reset */
3115 write_op(sc, BGE_MISC_CFG, reset);
3116
3117 if (sc->bge_asicrev == BGE_ASICREV_BCM5906) {
3118 val = CSR_READ_4(sc, BGE_VCPU_STATUS);
3119 CSR_WRITE_4(sc, BGE_VCPU_STATUS,
3120 val | BGE_VCPU_STATUS_DRV_RESET);
3121 val = CSR_READ_4(sc, BGE_VCPU_EXT_CTRL);
3122 CSR_WRITE_4(sc, BGE_VCPU_EXT_CTRL,
3123 val & ~BGE_VCPU_EXT_CTRL_HALT_CPU);
3124 }
3125
3126 DELAY(1000);
3127
3128 /* XXX: Broadcom Linux driver. */
3129 if (sc->bge_flags & BGE_FLAG_PCIE) {
3130 if (sc->bge_chipid == BGE_CHIPID_BCM5750_A0) {
3131 DELAY(500000); /* wait for link training to complete */
3132 val = pci_read_config(dev, 0xC4, 4);
3133 pci_write_config(dev, 0xC4, val | (1 << 15), 4);
3134 }
3135 devctl = pci_read_config(dev,
3136 sc->bge_expcap + PCIR_EXPRESS_DEVICE_CTL, 2);
3137 /* Clear enable no snoop and disable relaxed ordering. */
3138 devctl &= ~(0x0010 | 0x0800);
3139 /* Set PCIE max payload size to 128. */
3140 devctl &= ~PCIM_EXP_CTL_MAX_PAYLOAD;
3141 pci_write_config(dev, sc->bge_expcap + PCIR_EXPRESS_DEVICE_CTL,
3142 devctl, 2);
3143 /* Clear error status. */
3144 pci_write_config(dev, sc->bge_expcap + PCIR_EXPRESS_DEVICE_STA,
3145 0, 2);
3146 }
3147
3148 /* Reset some of the PCI state that got zapped by reset. */
3149 pci_write_config(dev, BGE_PCI_MISC_CTL,
3150 BGE_PCIMISCCTL_INDIRECT_ACCESS | BGE_PCIMISCCTL_MASK_PCI_INTR |
3151 BGE_HIF_SWAP_OPTIONS | BGE_PCIMISCCTL_PCISTATE_RW, 4);
3152 pci_write_config(dev, BGE_PCI_CACHESZ, cachesize, 4);
3153 pci_write_config(dev, BGE_PCI_CMD, command, 4);
3154 write_op(sc, BGE_MISC_CFG, BGE_32BITTIME_66MHZ);
3155
3156 /* Re-enable MSI, if neccesary, and enable the memory arbiter. */
3157 if (BGE_IS_5714_FAMILY(sc)) {
3158 /* This chip disables MSI on reset. */
3159 if (sc->bge_flags & BGE_FLAG_MSI) {
3160 val = pci_read_config(dev,
3161 sc->bge_msicap + PCIR_MSI_CTRL, 2);
3162 pci_write_config(dev,
3163 sc->bge_msicap + PCIR_MSI_CTRL,
3164 val | PCIM_MSICTRL_MSI_ENABLE, 2);
3165 val = CSR_READ_4(sc, BGE_MSI_MODE);
3166 CSR_WRITE_4(sc, BGE_MSI_MODE,
3167 val | BGE_MSIMODE_ENABLE);
3168 }
3169 val = CSR_READ_4(sc, BGE_MARB_MODE);
3170 CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE | val);
3171 } else
3172 CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE);
3173
3174 if (sc->bge_asicrev == BGE_ASICREV_BCM5906) {
3175 for (i = 0; i < BGE_TIMEOUT; i++) {
3176 val = CSR_READ_4(sc, BGE_VCPU_STATUS);
3177 if (val & BGE_VCPU_STATUS_INIT_DONE)
3178 break;
3179 DELAY(100);
3180 }
3181 if (i == BGE_TIMEOUT) {
3182 device_printf(sc->bge_dev, "reset timed out\n");
3183 return (1);
3184 }
3185 } else {
3186 /*
3187 * Poll until we see the 1's complement of the magic number.
3188 * This indicates that the firmware initialization is complete.
3189 * We expect this to fail if no chip containing the Ethernet
3190 * address is fitted though.
3191 */
3192 for (i = 0; i < BGE_TIMEOUT; i++) {
3193 DELAY(10);
3194 val = bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM);
3195 if (val == ~BGE_MAGIC_NUMBER)
3196 break;
3197 }
3198
3199 if ((sc->bge_flags & BGE_FLAG_EADDR) && i == BGE_TIMEOUT)
3200 device_printf(sc->bge_dev, "firmware handshake timed out, "
3201 "found 0x%08x\n", val);
3202 }
3203
3204 /*
3205 * XXX Wait for the value of the PCISTATE register to
3206 * return to its original pre-reset state. This is a
3207 * fairly good indicator of reset completion. If we don't
3208 * wait for the reset to fully complete, trying to read
3209 * from the device's non-PCI registers may yield garbage
3210 * results.
3211 */
3212 for (i = 0; i < BGE_TIMEOUT; i++) {
3213 if (pci_read_config(dev, BGE_PCI_PCISTATE, 4) == pcistate)
3214 break;
3215 DELAY(10);
3216 }
3217
3218 if (sc->bge_flags & BGE_FLAG_PCIE) {
3219 reset = bge_readmem_ind(sc, 0x7C00);
3220 bge_writemem_ind(sc, 0x7C00, reset | (1 << 25));
3221 }
3222
3223 /* Fix up byte swapping. */
3224 CSR_WRITE_4(sc, BGE_MODE_CTL, BGE_DMA_SWAP_OPTIONS |
3225 BGE_MODECTL_BYTESWAP_DATA);
3226
3227 /* Tell the ASF firmware we are up */
3228 if (sc->bge_asf_mode & ASF_STACKUP)
3229 BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
3230
3231 CSR_WRITE_4(sc, BGE_MAC_MODE, 0);
3232
3233 /*
3234 * The 5704 in TBI mode apparently needs some special
3235 * adjustment to insure the SERDES drive level is set
3236 * to 1.2V.
3237 */
3238 if (sc->bge_asicrev == BGE_ASICREV_BCM5704 &&
3239 sc->bge_flags & BGE_FLAG_TBI) {
3240 val = CSR_READ_4(sc, BGE_SERDES_CFG);
3241 val = (val & ~0xFFF) | 0x880;
3242 CSR_WRITE_4(sc, BGE_SERDES_CFG, val);
3243 }
3244
3245 /* XXX: Broadcom Linux driver. */
3246 if (sc->bge_flags & BGE_FLAG_PCIE &&
3247 sc->bge_chipid != BGE_CHIPID_BCM5750_A0) {
3248 val = CSR_READ_4(sc, 0x7C00);
3249 CSR_WRITE_4(sc, 0x7C00, val | (1 << 25));
3250 }
3251 DELAY(10000);
3252
3253 return(0);
3254}
3255
3256/*
3257 * Frame reception handling. This is called if there's a frame
3258 * on the receive return list.
3259 *
3260 * Note: we have to be able to handle two possibilities here:
3261 * 1) the frame is from the jumbo receive ring
3262 * 2) the frame is from the standard receive ring
3263 */
3264
3265static int
3266bge_rxeof(struct bge_softc *sc, uint16_t rx_prod, int holdlck)
3267{
3268 struct ifnet *ifp;
3269 int rx_npkts = 0, stdcnt = 0, jumbocnt = 0;
3270 uint16_t rx_cons;
3271
3272 rx_cons = sc->bge_rx_saved_considx;
3273
3274 /* Nothing to do. */
3275 if (rx_cons == rx_prod)
3276 return (rx_npkts);
3277
3278 ifp = sc->bge_ifp;
3279
3280 bus_dmamap_sync(sc->bge_cdata.bge_rx_return_ring_tag,
3281 sc->bge_cdata.bge_rx_return_ring_map, BUS_DMASYNC_POSTREAD);
3282 bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag,
3283 sc->bge_cdata.bge_rx_std_ring_map, BUS_DMASYNC_POSTWRITE);
3284 if (ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN + ETHER_VLAN_ENCAP_LEN >
3285 (MCLBYTES - ETHER_ALIGN))
3286 bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag,
3287 sc->bge_cdata.bge_rx_jumbo_ring_map, BUS_DMASYNC_POSTWRITE);
3288
3289 while (rx_cons != rx_prod) {
3290 struct bge_rx_bd *cur_rx;
3291 uint32_t rxidx;
3292 struct mbuf *m = NULL;
3293 uint16_t vlan_tag = 0;
3294 int have_tag = 0;
3295
3296#ifdef DEVICE_POLLING
3297 if (ifp->if_capenable & IFCAP_POLLING) {
3298 if (sc->rxcycles <= 0)
3299 break;
3300 sc->rxcycles--;
3301 }
3302#endif
3303
3304 cur_rx = &sc->bge_ldata.bge_rx_return_ring[rx_cons];
3305
3306 rxidx = cur_rx->bge_idx;
3307 BGE_INC(rx_cons, sc->bge_return_ring_cnt);
3308
3309 if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING &&
3310 cur_rx->bge_flags & BGE_RXBDFLAG_VLAN_TAG) {
3311 have_tag = 1;
3312 vlan_tag = cur_rx->bge_vlan_tag;
3313 }
3314
3315 if (cur_rx->bge_flags & BGE_RXBDFLAG_JUMBO_RING) {
3316 jumbocnt++;
3317 m = sc->bge_cdata.bge_rx_jumbo_chain[rxidx];
3318 if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) {
3319 BGE_INC(sc->bge_jumbo, BGE_JUMBO_RX_RING_CNT);
3320 continue;
3321 }
3322 if (bge_newbuf_jumbo(sc, rxidx) != 0) {
3323 BGE_INC(sc->bge_jumbo, BGE_JUMBO_RX_RING_CNT);
3324 ifp->if_iqdrops++;
3325 continue;
3326 }
3327 BGE_INC(sc->bge_jumbo, BGE_JUMBO_RX_RING_CNT);
3328 } else {
3329 stdcnt++;
3330 if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) {
3331 BGE_INC(sc->bge_std, BGE_STD_RX_RING_CNT);
3332 continue;
3333 }
3334 m = sc->bge_cdata.bge_rx_std_chain[rxidx];
3335 if (bge_newbuf_std(sc, rxidx) != 0) {
3336 BGE_INC(sc->bge_std, BGE_STD_RX_RING_CNT);
3337 ifp->if_iqdrops++;
3338 continue;
3339 }
3340 BGE_INC(sc->bge_std, BGE_STD_RX_RING_CNT);
3341 }
3342
3343 ifp->if_ipackets++;
3344#ifndef __NO_STRICT_ALIGNMENT
3345 /*
3346 * For architectures with strict alignment we must make sure
3347 * the payload is aligned.
3348 */
3349 if (sc->bge_flags & BGE_FLAG_RX_ALIGNBUG) {
3350 bcopy(m->m_data, m->m_data + ETHER_ALIGN,
3351 cur_rx->bge_len);
3352 m->m_data += ETHER_ALIGN;
3353 }
3354#endif
3355 m->m_pkthdr.len = m->m_len = cur_rx->bge_len - ETHER_CRC_LEN;
3356 m->m_pkthdr.rcvif = ifp;
3357
3358 if (ifp->if_capenable & IFCAP_RXCSUM) {
3359 if (cur_rx->bge_flags & BGE_RXBDFLAG_IP_CSUM) {
3360 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
3361 if ((cur_rx->bge_ip_csum ^ 0xFFFF) == 0)
3362 m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
3363 }
3364 if (cur_rx->bge_flags & BGE_RXBDFLAG_TCP_UDP_CSUM &&
3365 m->m_pkthdr.len >= ETHER_MIN_NOPAD) {
3366 m->m_pkthdr.csum_data =
3367 cur_rx->bge_tcp_udp_csum;
3368 m->m_pkthdr.csum_flags |=
3369 CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
3370 }
3371 }
3372
3373 /*
3374 * If we received a packet with a vlan tag,
3375 * attach that information to the packet.
3376 */
3377 if (have_tag) {
3378#if __FreeBSD_version > 700022
3379 m->m_pkthdr.ether_vtag = vlan_tag;
3380 m->m_flags |= M_VLANTAG;
3381#else
3382 VLAN_INPUT_TAG_NEW(ifp, m, vlan_tag);
3383 if (m == NULL)
3384 continue;
3385#endif
3386 }
3387
3388 if (holdlck != 0) {
3389 BGE_UNLOCK(sc);
3390 (*ifp->if_input)(ifp, m);
3391 BGE_LOCK(sc);
3392 } else
3393 (*ifp->if_input)(ifp, m);
3394 rx_npkts++;
3395
3396 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
3397 return (rx_npkts);
3398 }
3399
3400 bus_dmamap_sync(sc->bge_cdata.bge_rx_return_ring_tag,
3401 sc->bge_cdata.bge_rx_return_ring_map, BUS_DMASYNC_PREREAD);
3402 if (stdcnt > 0)
3403 bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag,
3404 sc->bge_cdata.bge_rx_std_ring_map, BUS_DMASYNC_PREWRITE);
3405
3406 if (jumbocnt > 0)
3407 bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag,
3408 sc->bge_cdata.bge_rx_jumbo_ring_map, BUS_DMASYNC_PREWRITE);
3409
3410 sc->bge_rx_saved_considx = rx_cons;
3411 bge_writembx(sc, BGE_MBX_RX_CONS0_LO, sc->bge_rx_saved_considx);
3412 if (stdcnt)
3413 bge_writembx(sc, BGE_MBX_RX_STD_PROD_LO, sc->bge_std);
3414 if (jumbocnt)
3415 bge_writembx(sc, BGE_MBX_RX_JUMBO_PROD_LO, sc->bge_jumbo);
3416#ifdef notyet
3417 /*
3418 * This register wraps very quickly under heavy packet drops.
3419 * If you need correct statistics, you can enable this check.
3420 */
3421 if (BGE_IS_5705_PLUS(sc))
3422 ifp->if_ierrors += CSR_READ_4(sc, BGE_RXLP_LOCSTAT_IFIN_DROPS);
3423#endif
3424 return (rx_npkts);
3425}
3426
3427static void
3428bge_txeof(struct bge_softc *sc, uint16_t tx_cons)
3429{
3430 struct bge_tx_bd *cur_tx = NULL;
3431 struct ifnet *ifp;
3432
3433 BGE_LOCK_ASSERT(sc);
3434
3435 /* Nothing to do. */
3436 if (sc->bge_tx_saved_considx == tx_cons)
3437 return;
3438
3439 ifp = sc->bge_ifp;
3440
3441 bus_dmamap_sync(sc->bge_cdata.bge_tx_ring_tag,
3442 sc->bge_cdata.bge_tx_ring_map, BUS_DMASYNC_POSTWRITE);
3443 /*
3444 * Go through our tx ring and free mbufs for those
3445 * frames that have been sent.
3446 */
3447 while (sc->bge_tx_saved_considx != tx_cons) {
3448 uint32_t idx = 0;
3449
3450 idx = sc->bge_tx_saved_considx;
3451 cur_tx = &sc->bge_ldata.bge_tx_ring[idx];
3452 if (cur_tx->bge_flags & BGE_TXBDFLAG_END)
3453 ifp->if_opackets++;
3454 if (sc->bge_cdata.bge_tx_chain[idx] != NULL) {
3455 bus_dmamap_sync(sc->bge_cdata.bge_tx_mtag,
3456 sc->bge_cdata.bge_tx_dmamap[idx],
3457 BUS_DMASYNC_POSTWRITE);
3458 bus_dmamap_unload(sc->bge_cdata.bge_tx_mtag,
3459 sc->bge_cdata.bge_tx_dmamap[idx]);
3460 m_freem(sc->bge_cdata.bge_tx_chain[idx]);
3461 sc->bge_cdata.bge_tx_chain[idx] = NULL;
3462 }
3463 sc->bge_txcnt--;
3464 BGE_INC(sc->bge_tx_saved_considx, BGE_TX_RING_CNT);
3465 }
3466
3467 if (cur_tx != NULL)
3468 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
3469 if (sc->bge_txcnt == 0)
3470 sc->bge_timer = 0;
3471}
3472
3473#ifdef DEVICE_POLLING
3474static int
3475bge_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
3476{
3477 struct bge_softc *sc = ifp->if_softc;
3478 uint16_t rx_prod, tx_cons;
3479 uint32_t statusword;
3480 int rx_npkts = 0;
3481
3482 BGE_LOCK(sc);
3483 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
3484 BGE_UNLOCK(sc);
3485 return (rx_npkts);
3486 }
3487
3488 bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
3489 sc->bge_cdata.bge_status_map,
3490 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
3491 rx_prod = sc->bge_ldata.bge_status_block->bge_idx[0].bge_rx_prod_idx;
3492 tx_cons = sc->bge_ldata.bge_status_block->bge_idx[0].bge_tx_cons_idx;
3493
3494 statusword = atomic_readandclear_32(
3495 &sc->bge_ldata.bge_status_block->bge_status);
3496
3497 bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
3498 sc->bge_cdata.bge_status_map,
3499 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3500
3501 /* Note link event. It will be processed by POLL_AND_CHECK_STATUS. */
3502 if (statusword & BGE_STATFLAG_LINKSTATE_CHANGED)
3503 sc->bge_link_evt++;
3504
3505 if (cmd == POLL_AND_CHECK_STATUS)
3506 if ((sc->bge_asicrev == BGE_ASICREV_BCM5700 &&
3507 sc->bge_chipid != BGE_CHIPID_BCM5700_B2) ||
3508 sc->bge_link_evt || (sc->bge_flags & BGE_FLAG_TBI))
3509 bge_link_upd(sc);
3510
3511 sc->rxcycles = count;
3512 rx_npkts = bge_rxeof(sc, rx_prod, 1);
3513 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
3514 BGE_UNLOCK(sc);
3515 return (rx_npkts);
3516 }
3517 bge_txeof(sc, tx_cons);
3518 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
3519 bge_start_locked(ifp);
3520
3521 BGE_UNLOCK(sc);
3522 return (rx_npkts);
3523}
3524#endif /* DEVICE_POLLING */
3525
3526static int
3527bge_msi_intr(void *arg)
3528{
3529 struct bge_softc *sc;
3530
3531 sc = (struct bge_softc *)arg;
3532 /*
3533 * This interrupt is not shared and controller already
3534 * disabled further interrupt.
3535 */
3536 taskqueue_enqueue(sc->bge_tq, &sc->bge_intr_task);
3537 return (FILTER_HANDLED);
3538}
3539
3540static void
3541bge_intr_task(void *arg, int pending)
3542{
3543 struct bge_softc *sc;
3544 struct ifnet *ifp;
3545 uint32_t status;
3546 uint16_t rx_prod, tx_cons;
3547
3548 sc = (struct bge_softc *)arg;
3549 ifp = sc->bge_ifp;
3550
3551 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
3552 return;
3553
3554 /* Get updated status block. */
3555 bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
3556 sc->bge_cdata.bge_status_map,
3557 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
3558
3559 /* Save producer/consumer indexess. */
3560 rx_prod = sc->bge_ldata.bge_status_block->bge_idx[0].bge_rx_prod_idx;
3561 tx_cons = sc->bge_ldata.bge_status_block->bge_idx[0].bge_tx_cons_idx;
3562 status = sc->bge_ldata.bge_status_block->bge_status;
3563 sc->bge_ldata.bge_status_block->bge_status = 0;
3564 bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
3565 sc->bge_cdata.bge_status_map,
3566 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3567 /* Let controller work. */
3568 bge_writembx(sc, BGE_MBX_IRQ0_LO, 0);
3569
3570 if ((status & BGE_STATFLAG_LINKSTATE_CHANGED) != 0) {
3571 BGE_LOCK(sc);
3572 bge_link_upd(sc);
3573 BGE_UNLOCK(sc);
3574 }
3575 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
3576 /* Check RX return ring producer/consumer. */
3577 bge_rxeof(sc, rx_prod, 0);
3578 }
3579 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
3580 BGE_LOCK(sc);
3581 /* Check TX ring producer/consumer. */
3582 bge_txeof(sc, tx_cons);
3583 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
3584 bge_start_locked(ifp);
3585 BGE_UNLOCK(sc);
3586 }
3587}
3588
3589static void
3590bge_intr(void *xsc)
3591{
3592 struct bge_softc *sc;
3593 struct ifnet *ifp;
3594 uint32_t statusword;
3595 uint16_t rx_prod, tx_cons;
3596
3597 sc = xsc;
3598
3599 BGE_LOCK(sc);
3600
3601 ifp = sc->bge_ifp;
3602
3603#ifdef DEVICE_POLLING
3604 if (ifp->if_capenable & IFCAP_POLLING) {
3605 BGE_UNLOCK(sc);
3606 return;
3607 }
3608#endif
3609
3610 /*
3611 * Ack the interrupt by writing something to BGE_MBX_IRQ0_LO. Don't
3612 * disable interrupts by writing nonzero like we used to, since with
3613 * our current organization this just gives complications and
3614 * pessimizations for re-enabling interrupts. We used to have races
3615 * instead of the necessary complications. Disabling interrupts
3616 * would just reduce the chance of a status update while we are
3617 * running (by switching to the interrupt-mode coalescence
3618 * parameters), but this chance is already very low so it is more
3619 * efficient to get another interrupt than prevent it.
3620 *
3621 * We do the ack first to ensure another interrupt if there is a
3622 * status update after the ack. We don't check for the status
3623 * changing later because it is more efficient to get another
3624 * interrupt than prevent it, not quite as above (not checking is
3625 * a smaller optimization than not toggling the interrupt enable,
3626 * since checking doesn't involve PCI accesses and toggling require
3627 * the status check). So toggling would probably be a pessimization
3628 * even with MSI. It would only be needed for using a task queue.
3629 */
3630 bge_writembx(sc, BGE_MBX_IRQ0_LO, 0);
3631
3632 /*
3633 * Do the mandatory PCI flush as well as get the link status.
3634 */
3635 statusword = CSR_READ_4(sc, BGE_MAC_STS) & BGE_MACSTAT_LINK_CHANGED;
3636
3637 /* Make sure the descriptor ring indexes are coherent. */
3638 bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
3639 sc->bge_cdata.bge_status_map,
3640 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
3641 rx_prod = sc->bge_ldata.bge_status_block->bge_idx[0].bge_rx_prod_idx;
3642 tx_cons = sc->bge_ldata.bge_status_block->bge_idx[0].bge_tx_cons_idx;
3643 sc->bge_ldata.bge_status_block->bge_status = 0;
3644 bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
3645 sc->bge_cdata.bge_status_map,
3646 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3647
3648 if ((sc->bge_asicrev == BGE_ASICREV_BCM5700 &&
3649 sc->bge_chipid != BGE_CHIPID_BCM5700_B2) ||
3650 statusword || sc->bge_link_evt)
3651 bge_link_upd(sc);
3652
3653 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
3654 /* Check RX return ring producer/consumer. */
3655 bge_rxeof(sc, rx_prod, 1);
3656 }
3657
3658 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
3659 /* Check TX ring producer/consumer. */
3660 bge_txeof(sc, tx_cons);
3661 }
3662
3663 if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
3664 !IFQ_DRV_IS_EMPTY(&ifp->if_snd))
3665 bge_start_locked(ifp);
3666
3667 BGE_UNLOCK(sc);
3668}
3669
3670static void
3671bge_asf_driver_up(struct bge_softc *sc)
3672{
3673 if (sc->bge_asf_mode & ASF_STACKUP) {
3674 /* Send ASF heartbeat aprox. every 2s */
3675 if (sc->bge_asf_count)
3676 sc->bge_asf_count --;
3677 else {
3678 sc->bge_asf_count = 5;
3679 bge_writemem_ind(sc, BGE_SOFTWARE_GENCOMM_FW,
3680 BGE_FW_DRV_ALIVE);
3681 bge_writemem_ind(sc, BGE_SOFTWARE_GENNCOMM_FW_LEN, 4);
3682 bge_writemem_ind(sc, BGE_SOFTWARE_GENNCOMM_FW_DATA, 3);
3683 CSR_WRITE_4(sc, BGE_CPU_EVENT,
3684 CSR_READ_4(sc, BGE_CPU_EVENT) | (1 << 14));
3685 }
3686 }
3687}
3688
3689static void
3690bge_tick(void *xsc)
3691{
3692 struct bge_softc *sc = xsc;
3693 struct mii_data *mii = NULL;
3694
3695 BGE_LOCK_ASSERT(sc);
3696
3697 /* Synchronize with possible callout reset/stop. */
3698 if (callout_pending(&sc->bge_stat_ch) ||
3699 !callout_active(&sc->bge_stat_ch))
3700 return;
3701
3702 if (BGE_IS_5705_PLUS(sc))
3703 bge_stats_update_regs(sc);
3704 else
3705 bge_stats_update(sc);
3706
3707 if ((sc->bge_flags & BGE_FLAG_TBI) == 0) {
3708 mii = device_get_softc(sc->bge_miibus);
3709 /*
3710 * Do not touch PHY if we have link up. This could break
3711 * IPMI/ASF mode or produce extra input errors
3712 * (extra errors was reported for bcm5701 & bcm5704).
3713 */
3714 if (!sc->bge_link)
3715 mii_tick(mii);
3716 } else {
3717 /*
3718 * Since in TBI mode auto-polling can't be used we should poll
3719 * link status manually. Here we register pending link event
3720 * and trigger interrupt.
3721 */
3722#ifdef DEVICE_POLLING
3723 /* In polling mode we poll link state in bge_poll(). */
3724 if (!(sc->bge_ifp->if_capenable & IFCAP_POLLING))
3725#endif
3726 {
3727 sc->bge_link_evt++;
3728 if (sc->bge_asicrev == BGE_ASICREV_BCM5700 ||
3729 sc->bge_flags & BGE_FLAG_5788)
3730 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_SET);
3731 else
3732 BGE_SETBIT(sc, BGE_HCC_MODE, BGE_HCCMODE_COAL_NOW);
3733 }
3734 }
3735
3736 bge_asf_driver_up(sc);
3737 bge_watchdog(sc);
3738
3739 callout_reset(&sc->bge_stat_ch, hz, bge_tick, sc);
3740}
3741
3742static void
3743bge_stats_update_regs(struct bge_softc *sc)
3744{
3745 struct ifnet *ifp;
3746
3747 ifp = sc->bge_ifp;
3748
3749 ifp->if_collisions += CSR_READ_4(sc, BGE_MAC_STATS +
3750 offsetof(struct bge_mac_stats_regs, etherStatsCollisions));
3751
3752 ifp->if_ierrors += CSR_READ_4(sc, BGE_RXLP_LOCSTAT_OUT_OF_BDS);
3753 ifp->if_ierrors += CSR_READ_4(sc, BGE_RXLP_LOCSTAT_IFIN_DROPS);
3754 ifp->if_ierrors += CSR_READ_4(sc, BGE_RXLP_LOCSTAT_IFIN_ERRORS);
3755}
3756
3757static void
3758bge_stats_update(struct bge_softc *sc)
3759{
3760 struct ifnet *ifp;
3761 bus_size_t stats;
3762 uint32_t cnt; /* current register value */
3763
3764 ifp = sc->bge_ifp;
3765
3766 stats = BGE_MEMWIN_START + BGE_STATS_BLOCK;
3767
3768#define READ_STAT(sc, stats, stat) \
3769 CSR_READ_4(sc, stats + offsetof(struct bge_stats, stat))
3770
3771 cnt = READ_STAT(sc, stats, txstats.etherStatsCollisions.bge_addr_lo);
3772 ifp->if_collisions += (uint32_t)(cnt - sc->bge_tx_collisions);
3773 sc->bge_tx_collisions = cnt;
3774
3775 cnt = READ_STAT(sc, stats, ifInDiscards.bge_addr_lo);
3776 ifp->if_ierrors += (uint32_t)(cnt - sc->bge_rx_discards);
3777 sc->bge_rx_discards = cnt;
3778
3779 cnt = READ_STAT(sc, stats, txstats.ifOutDiscards.bge_addr_lo);
3780 ifp->if_oerrors += (uint32_t)(cnt - sc->bge_tx_discards);
3781 sc->bge_tx_discards = cnt;
3782
3783#undef READ_STAT
3784}
3785
3786/*
3787 * Pad outbound frame to ETHER_MIN_NOPAD for an unusual reason.
3788 * The bge hardware will pad out Tx runts to ETHER_MIN_NOPAD,
3789 * but when such padded frames employ the bge IP/TCP checksum offload,
3790 * the hardware checksum assist gives incorrect results (possibly
3791 * from incorporating its own padding into the UDP/TCP checksum; who knows).
3792 * If we pad such runts with zeros, the onboard checksum comes out correct.
3793 */
3794static __inline int
3795bge_cksum_pad(struct mbuf *m)
3796{
3797 int padlen = ETHER_MIN_NOPAD - m->m_pkthdr.len;
3798 struct mbuf *last;
3799
3800 /* If there's only the packet-header and we can pad there, use it. */
3801 if (m->m_pkthdr.len == m->m_len && M_WRITABLE(m) &&
3802 M_TRAILINGSPACE(m) >= padlen) {
3803 last = m;
3804 } else {
3805 /*
3806 * Walk packet chain to find last mbuf. We will either
3807 * pad there, or append a new mbuf and pad it.
3808 */
3809 for (last = m; last->m_next != NULL; last = last->m_next);
3810 if (!(M_WRITABLE(last) && M_TRAILINGSPACE(last) >= padlen)) {
3811 /* Allocate new empty mbuf, pad it. Compact later. */
3812 struct mbuf *n;
3813
3814 MGET(n, M_DONTWAIT, MT_DATA);
3815 if (n == NULL)
3816 return (ENOBUFS);
3817 n->m_len = 0;
3818 last->m_next = n;
3819 last = n;
3820 }
3821 }
3822
3823 /* Now zero the pad area, to avoid the bge cksum-assist bug. */
3824 memset(mtod(last, caddr_t) + last->m_len, 0, padlen);
3825 last->m_len += padlen;
3826 m->m_pkthdr.len += padlen;
3827
3828 return (0);
3829}
3830
3831static struct mbuf *
3832bge_setup_tso(struct bge_softc *sc, struct mbuf *m, uint16_t *mss)
3833{
3834 struct ether_header *eh;
3835 struct ip *ip;
3836 struct tcphdr *tcp;
3837 struct mbuf *n;
3838 uint16_t hlen;
3839 uint32_t ip_off, poff;
3840
3841 if (M_WRITABLE(m) == 0) {
3842 /* Get a writable copy. */
3843 n = m_dup(m, M_DONTWAIT);
3844 m_freem(m);
3845 if (n == NULL)
3846 return (NULL);
3847 m = n;
3848 }
3849 ip_off = sizeof(struct ether_header);
3850 m = m_pullup(m, ip_off);
3851 if (m == NULL)
3852 return (NULL);
3853 eh = mtod(m, struct ether_header *);
3854 /* Check the existence of VLAN tag. */
3855 if (eh->ether_type == htons(ETHERTYPE_VLAN)) {
3856 ip_off = sizeof(struct ether_vlan_header);
3857 m = m_pullup(m, ip_off);
3858 if (m == NULL)
3859 return (NULL);
3860 }
3861 m = m_pullup(m, ip_off + sizeof(struct ip));
3862 if (m == NULL)
3863 return (NULL);
3864 ip = (struct ip *)(mtod(m, char *) + ip_off);
3865 poff = ip_off + (ip->ip_hl << 2);
3866 m = m_pullup(m, poff + sizeof(struct tcphdr));
3867 if (m == NULL)
3868 return (NULL);
3869 tcp = (struct tcphdr *)(mtod(m, char *) + poff);
3870 m = m_pullup(m, poff + sizeof(struct tcphdr) + tcp->th_off);
3871 if (m == NULL)
3872 return (NULL);
3873 /*
3874 * It seems controller doesn't modify IP length and TCP pseudo
3875 * checksum. These checksum computed by upper stack should be 0.
3876 */
3877 *mss = m->m_pkthdr.tso_segsz;
3878 ip->ip_sum = 0;
3879 ip->ip_len = htons(*mss + (ip->ip_hl << 2) + (tcp->th_off << 2));
3880 /* Clear pseudo checksum computed by TCP stack. */
3881 tcp->th_sum = 0;
3882 /*
3883 * Broadcom controllers uses different descriptor format for
3884 * TSO depending on ASIC revision. Due to TSO-capable firmware
3885 * license issue and lower performance of firmware based TSO
3886 * we only support hardware based TSO which is applicable for
3887 * BCM5755 or newer controllers. Hardware based TSO uses 11
3888 * bits to store MSS and upper 5 bits are used to store IP/TCP
3889 * header length(including IP/TCP options). The header length
3890 * is expressed as 32 bits unit.
3891 */
3892 hlen = ((ip->ip_hl << 2) + (tcp->th_off << 2)) >> 2;
3893 *mss |= (hlen << 11);
3894 return (m);
3895}
3896
3897/*
3898 * Encapsulate an mbuf chain in the tx ring by coupling the mbuf data
3899 * pointers to descriptors.
3900 */
3901static int
3902bge_encap(struct bge_softc *sc, struct mbuf **m_head, uint32_t *txidx)
3903{
3904 bus_dma_segment_t segs[BGE_NSEG_NEW];
3905 bus_dmamap_t map;
3906 struct bge_tx_bd *d;
3907 struct mbuf *m = *m_head;
3908 uint32_t idx = *txidx;
3909 uint16_t csum_flags, mss, vlan_tag;
3910 int nsegs, i, error;
3911
3912 csum_flags = 0;
3913 mss = 0;
3914 vlan_tag = 0;
3915 if ((m->m_pkthdr.csum_flags & CSUM_TSO) != 0) {
3916 *m_head = m = bge_setup_tso(sc, m, &mss);
3917 if (*m_head == NULL)
3918 return (ENOBUFS);
3919 csum_flags |= BGE_TXBDFLAG_CPU_PRE_DMA |
3920 BGE_TXBDFLAG_CPU_POST_DMA;
3921 } else if ((m->m_pkthdr.csum_flags & BGE_CSUM_FEATURES) != 0) {
3922 if (m->m_pkthdr.csum_flags & CSUM_IP)
3923 csum_flags |= BGE_TXBDFLAG_IP_CSUM;
3924 if (m->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP)) {
3925 csum_flags |= BGE_TXBDFLAG_TCP_UDP_CSUM;
3926 if (m->m_pkthdr.len < ETHER_MIN_NOPAD &&
3927 (error = bge_cksum_pad(m)) != 0) {
3928 m_freem(m);
3929 *m_head = NULL;
3930 return (error);
3931 }
3932 }
3933 if (m->m_flags & M_LASTFRAG)
3934 csum_flags |= BGE_TXBDFLAG_IP_FRAG_END;
3935 else if (m->m_flags & M_FRAG)
3936 csum_flags |= BGE_TXBDFLAG_IP_FRAG;
3937 }
3938
3939 if ((m->m_pkthdr.csum_flags & CSUM_TSO) == 0 &&
492
493#define SPARC64_BLADE_1500_MODEL "SUNW,Sun-Blade-1500"
494#define SPARC64_BLADE_1500_PATH_BGE "/pci@1f,700000/network@2"
495#define SPARC64_BLADE_2500_MODEL "SUNW,Sun-Blade-2500"
496#define SPARC64_BLADE_2500_PATH_BGE "/pci@1c,600000/network@3"
497#define SPARC64_OFW_SUBVENDOR "subsystem-vendor-id"
498
499static int
500bge_has_eaddr(struct bge_softc *sc)
501{
502#ifdef __sparc64__
503 char buf[sizeof(SPARC64_BLADE_1500_PATH_BGE)];
504 device_t dev;
505 uint32_t subvendor;
506
507 dev = sc->bge_dev;
508
509 /*
510 * The on-board BGEs found in sun4u machines aren't fitted with
511 * an EEPROM which means that we have to obtain the MAC address
512 * via OFW and that some tests will always fail. We distinguish
513 * such BGEs by the subvendor ID, which also has to be obtained
514 * from OFW instead of the PCI configuration space as the latter
515 * indicates Broadcom as the subvendor of the netboot interface.
516 * For early Blade 1500 and 2500 we even have to check the OFW
517 * device path as the subvendor ID always defaults to Broadcom
518 * there.
519 */
520 if (OF_getprop(ofw_bus_get_node(dev), SPARC64_OFW_SUBVENDOR,
521 &subvendor, sizeof(subvendor)) == sizeof(subvendor) &&
522 subvendor == SUN_VENDORID)
523 return (0);
524 memset(buf, 0, sizeof(buf));
525 if (OF_package_to_path(ofw_bus_get_node(dev), buf, sizeof(buf)) > 0) {
526 if (strcmp(sparc64_model, SPARC64_BLADE_1500_MODEL) == 0 &&
527 strcmp(buf, SPARC64_BLADE_1500_PATH_BGE) == 0)
528 return (0);
529 if (strcmp(sparc64_model, SPARC64_BLADE_2500_MODEL) == 0 &&
530 strcmp(buf, SPARC64_BLADE_2500_PATH_BGE) == 0)
531 return (0);
532 }
533#endif
534 return (1);
535}
536
537static uint32_t
538bge_readmem_ind(struct bge_softc *sc, int off)
539{
540 device_t dev;
541 uint32_t val;
542
543 dev = sc->bge_dev;
544
545 pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, off, 4);
546 val = pci_read_config(dev, BGE_PCI_MEMWIN_DATA, 4);
547 pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, 0, 4);
548 return (val);
549}
550
551static void
552bge_writemem_ind(struct bge_softc *sc, int off, int val)
553{
554 device_t dev;
555
556 dev = sc->bge_dev;
557
558 pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, off, 4);
559 pci_write_config(dev, BGE_PCI_MEMWIN_DATA, val, 4);
560 pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, 0, 4);
561}
562
563/*
564 * PCI Express only
565 */
566static void
567bge_set_max_readrq(struct bge_softc *sc)
568{
569 device_t dev;
570 uint16_t val;
571
572 dev = sc->bge_dev;
573
574 val = pci_read_config(dev, sc->bge_expcap + PCIR_EXPRESS_DEVICE_CTL, 2);
575 if ((val & PCIM_EXP_CTL_MAX_READ_REQUEST) !=
576 BGE_PCIE_DEVCTL_MAX_READRQ_4096) {
577 if (bootverbose)
578 device_printf(dev, "adjust device control 0x%04x ",
579 val);
580 val &= ~PCIM_EXP_CTL_MAX_READ_REQUEST;
581 val |= BGE_PCIE_DEVCTL_MAX_READRQ_4096;
582 pci_write_config(dev, sc->bge_expcap + PCIR_EXPRESS_DEVICE_CTL,
583 val, 2);
584 if (bootverbose)
585 printf("-> 0x%04x\n", val);
586 }
587}
588
589#ifdef notdef
590static uint32_t
591bge_readreg_ind(struct bge_softc *sc, int off)
592{
593 device_t dev;
594
595 dev = sc->bge_dev;
596
597 pci_write_config(dev, BGE_PCI_REG_BASEADDR, off, 4);
598 return (pci_read_config(dev, BGE_PCI_REG_DATA, 4));
599}
600#endif
601
602static void
603bge_writereg_ind(struct bge_softc *sc, int off, int val)
604{
605 device_t dev;
606
607 dev = sc->bge_dev;
608
609 pci_write_config(dev, BGE_PCI_REG_BASEADDR, off, 4);
610 pci_write_config(dev, BGE_PCI_REG_DATA, val, 4);
611}
612
613static void
614bge_writemem_direct(struct bge_softc *sc, int off, int val)
615{
616 CSR_WRITE_4(sc, off, val);
617}
618
619static void
620bge_writembx(struct bge_softc *sc, int off, int val)
621{
622 if (sc->bge_asicrev == BGE_ASICREV_BCM5906)
623 off += BGE_LPMBX_IRQ0_HI - BGE_MBX_IRQ0_HI;
624
625 CSR_WRITE_4(sc, off, val);
626}
627
628/*
629 * Map a single buffer address.
630 */
631
632static void
633bge_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error)
634{
635 struct bge_dmamap_arg *ctx;
636
637 if (error)
638 return;
639
640 ctx = arg;
641
642 if (nseg > ctx->bge_maxsegs) {
643 ctx->bge_maxsegs = 0;
644 return;
645 }
646
647 ctx->bge_busaddr = segs->ds_addr;
648}
649
650static uint8_t
651bge_nvram_getbyte(struct bge_softc *sc, int addr, uint8_t *dest)
652{
653 uint32_t access, byte = 0;
654 int i;
655
656 /* Lock. */
657 CSR_WRITE_4(sc, BGE_NVRAM_SWARB, BGE_NVRAMSWARB_SET1);
658 for (i = 0; i < 8000; i++) {
659 if (CSR_READ_4(sc, BGE_NVRAM_SWARB) & BGE_NVRAMSWARB_GNT1)
660 break;
661 DELAY(20);
662 }
663 if (i == 8000)
664 return (1);
665
666 /* Enable access. */
667 access = CSR_READ_4(sc, BGE_NVRAM_ACCESS);
668 CSR_WRITE_4(sc, BGE_NVRAM_ACCESS, access | BGE_NVRAMACC_ENABLE);
669
670 CSR_WRITE_4(sc, BGE_NVRAM_ADDR, addr & 0xfffffffc);
671 CSR_WRITE_4(sc, BGE_NVRAM_CMD, BGE_NVRAM_READCMD);
672 for (i = 0; i < BGE_TIMEOUT * 10; i++) {
673 DELAY(10);
674 if (CSR_READ_4(sc, BGE_NVRAM_CMD) & BGE_NVRAMCMD_DONE) {
675 DELAY(10);
676 break;
677 }
678 }
679
680 if (i == BGE_TIMEOUT * 10) {
681 if_printf(sc->bge_ifp, "nvram read timed out\n");
682 return (1);
683 }
684
685 /* Get result. */
686 byte = CSR_READ_4(sc, BGE_NVRAM_RDDATA);
687
688 *dest = (bswap32(byte) >> ((addr % 4) * 8)) & 0xFF;
689
690 /* Disable access. */
691 CSR_WRITE_4(sc, BGE_NVRAM_ACCESS, access);
692
693 /* Unlock. */
694 CSR_WRITE_4(sc, BGE_NVRAM_SWARB, BGE_NVRAMSWARB_CLR1);
695 CSR_READ_4(sc, BGE_NVRAM_SWARB);
696
697 return (0);
698}
699
700/*
701 * Read a sequence of bytes from NVRAM.
702 */
703static int
704bge_read_nvram(struct bge_softc *sc, caddr_t dest, int off, int cnt)
705{
706 int err = 0, i;
707 uint8_t byte = 0;
708
709 if (sc->bge_asicrev != BGE_ASICREV_BCM5906)
710 return (1);
711
712 for (i = 0; i < cnt; i++) {
713 err = bge_nvram_getbyte(sc, off + i, &byte);
714 if (err)
715 break;
716 *(dest + i) = byte;
717 }
718
719 return (err ? 1 : 0);
720}
721
722/*
723 * Read a byte of data stored in the EEPROM at address 'addr.' The
724 * BCM570x supports both the traditional bitbang interface and an
725 * auto access interface for reading the EEPROM. We use the auto
726 * access method.
727 */
728static uint8_t
729bge_eeprom_getbyte(struct bge_softc *sc, int addr, uint8_t *dest)
730{
731 int i;
732 uint32_t byte = 0;
733
734 /*
735 * Enable use of auto EEPROM access so we can avoid
736 * having to use the bitbang method.
737 */
738 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_AUTO_EEPROM);
739
740 /* Reset the EEPROM, load the clock period. */
741 CSR_WRITE_4(sc, BGE_EE_ADDR,
742 BGE_EEADDR_RESET | BGE_EEHALFCLK(BGE_HALFCLK_384SCL));
743 DELAY(20);
744
745 /* Issue the read EEPROM command. */
746 CSR_WRITE_4(sc, BGE_EE_ADDR, BGE_EE_READCMD | addr);
747
748 /* Wait for completion */
749 for(i = 0; i < BGE_TIMEOUT * 10; i++) {
750 DELAY(10);
751 if (CSR_READ_4(sc, BGE_EE_ADDR) & BGE_EEADDR_DONE)
752 break;
753 }
754
755 if (i == BGE_TIMEOUT * 10) {
756 device_printf(sc->bge_dev, "EEPROM read timed out\n");
757 return (1);
758 }
759
760 /* Get result. */
761 byte = CSR_READ_4(sc, BGE_EE_DATA);
762
763 *dest = (byte >> ((addr % 4) * 8)) & 0xFF;
764
765 return (0);
766}
767
768/*
769 * Read a sequence of bytes from the EEPROM.
770 */
771static int
772bge_read_eeprom(struct bge_softc *sc, caddr_t dest, int off, int cnt)
773{
774 int i, error = 0;
775 uint8_t byte = 0;
776
777 for (i = 0; i < cnt; i++) {
778 error = bge_eeprom_getbyte(sc, off + i, &byte);
779 if (error)
780 break;
781 *(dest + i) = byte;
782 }
783
784 return (error ? 1 : 0);
785}
786
787static int
788bge_miibus_readreg(device_t dev, int phy, int reg)
789{
790 struct bge_softc *sc;
791 uint32_t val, autopoll;
792 int i;
793
794 sc = device_get_softc(dev);
795
796 /*
797 * Broadcom's own driver always assumes the internal
798 * PHY is at GMII address 1. On some chips, the PHY responds
799 * to accesses at all addresses, which could cause us to
800 * bogusly attach the PHY 32 times at probe type. Always
801 * restricting the lookup to address 1 is simpler than
802 * trying to figure out which chips revisions should be
803 * special-cased.
804 */
805 if (phy != 1)
806 return (0);
807
808 /* Reading with autopolling on may trigger PCI errors */
809 autopoll = CSR_READ_4(sc, BGE_MI_MODE);
810 if (autopoll & BGE_MIMODE_AUTOPOLL) {
811 BGE_CLRBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL);
812 DELAY(40);
813 }
814
815 CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_READ | BGE_MICOMM_BUSY |
816 BGE_MIPHY(phy) | BGE_MIREG(reg));
817
818 for (i = 0; i < BGE_TIMEOUT; i++) {
819 DELAY(10);
820 val = CSR_READ_4(sc, BGE_MI_COMM);
821 if (!(val & BGE_MICOMM_BUSY))
822 break;
823 }
824
825 if (i == BGE_TIMEOUT) {
826 device_printf(sc->bge_dev,
827 "PHY read timed out (phy %d, reg %d, val 0x%08x)\n",
828 phy, reg, val);
829 val = 0;
830 goto done;
831 }
832
833 DELAY(5);
834 val = CSR_READ_4(sc, BGE_MI_COMM);
835
836done:
837 if (autopoll & BGE_MIMODE_AUTOPOLL) {
838 BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL);
839 DELAY(40);
840 }
841
842 if (val & BGE_MICOMM_READFAIL)
843 return (0);
844
845 return (val & 0xFFFF);
846}
847
848static int
849bge_miibus_writereg(device_t dev, int phy, int reg, int val)
850{
851 struct bge_softc *sc;
852 uint32_t autopoll;
853 int i;
854
855 sc = device_get_softc(dev);
856
857 if (sc->bge_asicrev == BGE_ASICREV_BCM5906 &&
858 (reg == BRGPHY_MII_1000CTL || reg == BRGPHY_MII_AUXCTL))
859 return(0);
860
861 /* Reading with autopolling on may trigger PCI errors */
862 autopoll = CSR_READ_4(sc, BGE_MI_MODE);
863 if (autopoll & BGE_MIMODE_AUTOPOLL) {
864 BGE_CLRBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL);
865 DELAY(40);
866 }
867
868 CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_WRITE | BGE_MICOMM_BUSY |
869 BGE_MIPHY(phy) | BGE_MIREG(reg) | val);
870
871 for (i = 0; i < BGE_TIMEOUT; i++) {
872 DELAY(10);
873 if (!(CSR_READ_4(sc, BGE_MI_COMM) & BGE_MICOMM_BUSY)) {
874 DELAY(5);
875 CSR_READ_4(sc, BGE_MI_COMM); /* dummy read */
876 break;
877 }
878 }
879
880 if (i == BGE_TIMEOUT) {
881 device_printf(sc->bge_dev,
882 "PHY write timed out (phy %d, reg %d, val %d)\n",
883 phy, reg, val);
884 return (0);
885 }
886
887 if (autopoll & BGE_MIMODE_AUTOPOLL) {
888 BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL);
889 DELAY(40);
890 }
891
892 return (0);
893}
894
895static void
896bge_miibus_statchg(device_t dev)
897{
898 struct bge_softc *sc;
899 struct mii_data *mii;
900 sc = device_get_softc(dev);
901 mii = device_get_softc(sc->bge_miibus);
902
903 BGE_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_PORTMODE);
904 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T)
905 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_PORTMODE_GMII);
906 else
907 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_PORTMODE_MII);
908
909 if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX)
910 BGE_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX);
911 else
912 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX);
913}
914
915/*
916 * Intialize a standard receive ring descriptor.
917 */
918static int
919bge_newbuf_std(struct bge_softc *sc, int i)
920{
921 struct mbuf *m;
922 struct bge_rx_bd *r;
923 bus_dma_segment_t segs[1];
924 bus_dmamap_t map;
925 int error, nsegs;
926
927 m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
928 if (m == NULL)
929 return (ENOBUFS);
930 m->m_len = m->m_pkthdr.len = MCLBYTES;
931 if ((sc->bge_flags & BGE_FLAG_RX_ALIGNBUG) == 0)
932 m_adj(m, ETHER_ALIGN);
933
934 error = bus_dmamap_load_mbuf_sg(sc->bge_cdata.bge_rx_mtag,
935 sc->bge_cdata.bge_rx_std_sparemap, m, segs, &nsegs, 0);
936 if (error != 0) {
937 m_freem(m);
938 return (error);
939 }
940 if (sc->bge_cdata.bge_rx_std_chain[i] != NULL) {
941 bus_dmamap_sync(sc->bge_cdata.bge_rx_mtag,
942 sc->bge_cdata.bge_rx_std_dmamap[i], BUS_DMASYNC_POSTREAD);
943 bus_dmamap_unload(sc->bge_cdata.bge_rx_mtag,
944 sc->bge_cdata.bge_rx_std_dmamap[i]);
945 }
946 map = sc->bge_cdata.bge_rx_std_dmamap[i];
947 sc->bge_cdata.bge_rx_std_dmamap[i] = sc->bge_cdata.bge_rx_std_sparemap;
948 sc->bge_cdata.bge_rx_std_sparemap = map;
949 sc->bge_cdata.bge_rx_std_chain[i] = m;
950 r = &sc->bge_ldata.bge_rx_std_ring[sc->bge_std];
951 r->bge_addr.bge_addr_lo = BGE_ADDR_LO(segs[0].ds_addr);
952 r->bge_addr.bge_addr_hi = BGE_ADDR_HI(segs[0].ds_addr);
953 r->bge_flags = BGE_RXBDFLAG_END;
954 r->bge_len = segs[0].ds_len;
955 r->bge_idx = i;
956
957 bus_dmamap_sync(sc->bge_cdata.bge_rx_mtag,
958 sc->bge_cdata.bge_rx_std_dmamap[i], BUS_DMASYNC_PREREAD);
959
960 return (0);
961}
962
963/*
964 * Initialize a jumbo receive ring descriptor. This allocates
965 * a jumbo buffer from the pool managed internally by the driver.
966 */
967static int
968bge_newbuf_jumbo(struct bge_softc *sc, int i)
969{
970 bus_dma_segment_t segs[BGE_NSEG_JUMBO];
971 bus_dmamap_t map;
972 struct bge_extrx_bd *r;
973 struct mbuf *m;
974 int error, nsegs;
975
976 MGETHDR(m, M_DONTWAIT, MT_DATA);
977 if (m == NULL)
978 return (ENOBUFS);
979
980 m_cljget(m, M_DONTWAIT, MJUM9BYTES);
981 if (!(m->m_flags & M_EXT)) {
982 m_freem(m);
983 return (ENOBUFS);
984 }
985 m->m_len = m->m_pkthdr.len = MJUM9BYTES;
986 if ((sc->bge_flags & BGE_FLAG_RX_ALIGNBUG) == 0)
987 m_adj(m, ETHER_ALIGN);
988
989 error = bus_dmamap_load_mbuf_sg(sc->bge_cdata.bge_mtag_jumbo,
990 sc->bge_cdata.bge_rx_jumbo_sparemap, m, segs, &nsegs, 0);
991 if (error != 0) {
992 m_freem(m);
993 return (error);
994 }
995
996 if (sc->bge_cdata.bge_rx_jumbo_chain[i] == NULL) {
997 bus_dmamap_sync(sc->bge_cdata.bge_mtag_jumbo,
998 sc->bge_cdata.bge_rx_jumbo_dmamap[i], BUS_DMASYNC_POSTREAD);
999 bus_dmamap_unload(sc->bge_cdata.bge_mtag_jumbo,
1000 sc->bge_cdata.bge_rx_jumbo_dmamap[i]);
1001 }
1002 map = sc->bge_cdata.bge_rx_jumbo_dmamap[i];
1003 sc->bge_cdata.bge_rx_jumbo_dmamap[i] =
1004 sc->bge_cdata.bge_rx_jumbo_sparemap;
1005 sc->bge_cdata.bge_rx_jumbo_sparemap = map;
1006 sc->bge_cdata.bge_rx_jumbo_chain[i] = m;
1007 /*
1008 * Fill in the extended RX buffer descriptor.
1009 */
1010 r = &sc->bge_ldata.bge_rx_jumbo_ring[sc->bge_jumbo];
1011 r->bge_flags = BGE_RXBDFLAG_JUMBO_RING | BGE_RXBDFLAG_END;
1012 r->bge_idx = i;
1013 r->bge_len3 = r->bge_len2 = r->bge_len1 = 0;
1014 switch (nsegs) {
1015 case 4:
1016 r->bge_addr3.bge_addr_lo = BGE_ADDR_LO(segs[3].ds_addr);
1017 r->bge_addr3.bge_addr_hi = BGE_ADDR_HI(segs[3].ds_addr);
1018 r->bge_len3 = segs[3].ds_len;
1019 case 3:
1020 r->bge_addr2.bge_addr_lo = BGE_ADDR_LO(segs[2].ds_addr);
1021 r->bge_addr2.bge_addr_hi = BGE_ADDR_HI(segs[2].ds_addr);
1022 r->bge_len2 = segs[2].ds_len;
1023 case 2:
1024 r->bge_addr1.bge_addr_lo = BGE_ADDR_LO(segs[1].ds_addr);
1025 r->bge_addr1.bge_addr_hi = BGE_ADDR_HI(segs[1].ds_addr);
1026 r->bge_len1 = segs[1].ds_len;
1027 case 1:
1028 r->bge_addr0.bge_addr_lo = BGE_ADDR_LO(segs[0].ds_addr);
1029 r->bge_addr0.bge_addr_hi = BGE_ADDR_HI(segs[0].ds_addr);
1030 r->bge_len0 = segs[0].ds_len;
1031 break;
1032 default:
1033 panic("%s: %d segments\n", __func__, nsegs);
1034 }
1035
1036 bus_dmamap_sync(sc->bge_cdata.bge_mtag_jumbo,
1037 sc->bge_cdata.bge_rx_jumbo_dmamap[i], BUS_DMASYNC_PREREAD);
1038
1039 return (0);
1040}
1041
1042/*
1043 * The standard receive ring has 512 entries in it. At 2K per mbuf cluster,
1044 * that's 1MB or memory, which is a lot. For now, we fill only the first
1045 * 256 ring entries and hope that our CPU is fast enough to keep up with
1046 * the NIC.
1047 */
1048static int
1049bge_init_rx_ring_std(struct bge_softc *sc)
1050{
1051 int error, i;
1052
1053 bzero(sc->bge_ldata.bge_rx_std_ring, BGE_STD_RX_RING_SZ);
1054 sc->bge_std = 0;
1055 for (i = 0; i < BGE_SSLOTS; i++) {
1056 if ((error = bge_newbuf_std(sc, i)) != 0)
1057 return (error);
1058 BGE_INC(sc->bge_std, BGE_STD_RX_RING_CNT);
1059 };
1060
1061 bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag,
1062 sc->bge_cdata.bge_rx_std_ring_map, BUS_DMASYNC_PREWRITE);
1063
1064 sc->bge_std = i - 1;
1065 bge_writembx(sc, BGE_MBX_RX_STD_PROD_LO, sc->bge_std);
1066
1067 return (0);
1068}
1069
1070static void
1071bge_free_rx_ring_std(struct bge_softc *sc)
1072{
1073 int i;
1074
1075 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
1076 if (sc->bge_cdata.bge_rx_std_chain[i] != NULL) {
1077 bus_dmamap_sync(sc->bge_cdata.bge_rx_mtag,
1078 sc->bge_cdata.bge_rx_std_dmamap[i],
1079 BUS_DMASYNC_POSTREAD);
1080 bus_dmamap_unload(sc->bge_cdata.bge_rx_mtag,
1081 sc->bge_cdata.bge_rx_std_dmamap[i]);
1082 m_freem(sc->bge_cdata.bge_rx_std_chain[i]);
1083 sc->bge_cdata.bge_rx_std_chain[i] = NULL;
1084 }
1085 bzero((char *)&sc->bge_ldata.bge_rx_std_ring[i],
1086 sizeof(struct bge_rx_bd));
1087 }
1088}
1089
1090static int
1091bge_init_rx_ring_jumbo(struct bge_softc *sc)
1092{
1093 struct bge_rcb *rcb;
1094 int error, i;
1095
1096 bzero(sc->bge_ldata.bge_rx_jumbo_ring, BGE_JUMBO_RX_RING_SZ);
1097 sc->bge_jumbo = 0;
1098 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
1099 if ((error = bge_newbuf_jumbo(sc, i)) != 0)
1100 return (error);
1101 BGE_INC(sc->bge_jumbo, BGE_JUMBO_RX_RING_CNT);
1102 };
1103
1104 bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag,
1105 sc->bge_cdata.bge_rx_jumbo_ring_map, BUS_DMASYNC_PREWRITE);
1106
1107 sc->bge_jumbo = i - 1;
1108
1109 rcb = &sc->bge_ldata.bge_info.bge_jumbo_rx_rcb;
1110 rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(0,
1111 BGE_RCB_FLAG_USE_EXT_RX_BD);
1112 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags);
1113
1114 bge_writembx(sc, BGE_MBX_RX_JUMBO_PROD_LO, sc->bge_jumbo);
1115
1116 return (0);
1117}
1118
1119static void
1120bge_free_rx_ring_jumbo(struct bge_softc *sc)
1121{
1122 int i;
1123
1124 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
1125 if (sc->bge_cdata.bge_rx_jumbo_chain[i] != NULL) {
1126 bus_dmamap_sync(sc->bge_cdata.bge_mtag_jumbo,
1127 sc->bge_cdata.bge_rx_jumbo_dmamap[i],
1128 BUS_DMASYNC_POSTREAD);
1129 bus_dmamap_unload(sc->bge_cdata.bge_mtag_jumbo,
1130 sc->bge_cdata.bge_rx_jumbo_dmamap[i]);
1131 m_freem(sc->bge_cdata.bge_rx_jumbo_chain[i]);
1132 sc->bge_cdata.bge_rx_jumbo_chain[i] = NULL;
1133 }
1134 bzero((char *)&sc->bge_ldata.bge_rx_jumbo_ring[i],
1135 sizeof(struct bge_extrx_bd));
1136 }
1137}
1138
1139static void
1140bge_free_tx_ring(struct bge_softc *sc)
1141{
1142 int i;
1143
1144 if (sc->bge_ldata.bge_tx_ring == NULL)
1145 return;
1146
1147 for (i = 0; i < BGE_TX_RING_CNT; i++) {
1148 if (sc->bge_cdata.bge_tx_chain[i] != NULL) {
1149 bus_dmamap_sync(sc->bge_cdata.bge_tx_mtag,
1150 sc->bge_cdata.bge_tx_dmamap[i],
1151 BUS_DMASYNC_POSTWRITE);
1152 bus_dmamap_unload(sc->bge_cdata.bge_tx_mtag,
1153 sc->bge_cdata.bge_tx_dmamap[i]);
1154 m_freem(sc->bge_cdata.bge_tx_chain[i]);
1155 sc->bge_cdata.bge_tx_chain[i] = NULL;
1156 }
1157 bzero((char *)&sc->bge_ldata.bge_tx_ring[i],
1158 sizeof(struct bge_tx_bd));
1159 }
1160}
1161
1162static int
1163bge_init_tx_ring(struct bge_softc *sc)
1164{
1165 sc->bge_txcnt = 0;
1166 sc->bge_tx_saved_considx = 0;
1167
1168 bzero(sc->bge_ldata.bge_tx_ring, BGE_TX_RING_SZ);
1169 bus_dmamap_sync(sc->bge_cdata.bge_tx_ring_tag,
1170 sc->bge_cdata.bge_tx_ring_map, BUS_DMASYNC_PREWRITE);
1171
1172 /* Initialize transmit producer index for host-memory send ring. */
1173 sc->bge_tx_prodidx = 0;
1174 bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, sc->bge_tx_prodidx);
1175
1176 /* 5700 b2 errata */
1177 if (sc->bge_chiprev == BGE_CHIPREV_5700_BX)
1178 bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, sc->bge_tx_prodidx);
1179
1180 /* NIC-memory send ring not used; initialize to zero. */
1181 bge_writembx(sc, BGE_MBX_TX_NIC_PROD0_LO, 0);
1182 /* 5700 b2 errata */
1183 if (sc->bge_chiprev == BGE_CHIPREV_5700_BX)
1184 bge_writembx(sc, BGE_MBX_TX_NIC_PROD0_LO, 0);
1185
1186 return (0);
1187}
1188
1189static void
1190bge_setpromisc(struct bge_softc *sc)
1191{
1192 struct ifnet *ifp;
1193
1194 BGE_LOCK_ASSERT(sc);
1195
1196 ifp = sc->bge_ifp;
1197
1198 /* Enable or disable promiscuous mode as needed. */
1199 if (ifp->if_flags & IFF_PROMISC)
1200 BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC);
1201 else
1202 BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC);
1203}
1204
1205static void
1206bge_setmulti(struct bge_softc *sc)
1207{
1208 struct ifnet *ifp;
1209 struct ifmultiaddr *ifma;
1210 uint32_t hashes[4] = { 0, 0, 0, 0 };
1211 int h, i;
1212
1213 BGE_LOCK_ASSERT(sc);
1214
1215 ifp = sc->bge_ifp;
1216
1217 if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) {
1218 for (i = 0; i < 4; i++)
1219 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), 0xFFFFFFFF);
1220 return;
1221 }
1222
1223 /* First, zot all the existing filters. */
1224 for (i = 0; i < 4; i++)
1225 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), 0);
1226
1227 /* Now program new ones. */
1228 if_maddr_rlock(ifp);
1229 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1230 if (ifma->ifma_addr->sa_family != AF_LINK)
1231 continue;
1232 h = ether_crc32_le(LLADDR((struct sockaddr_dl *)
1233 ifma->ifma_addr), ETHER_ADDR_LEN) & 0x7F;
1234 hashes[(h & 0x60) >> 5] |= 1 << (h & 0x1F);
1235 }
1236 if_maddr_runlock(ifp);
1237
1238 for (i = 0; i < 4; i++)
1239 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), hashes[i]);
1240}
1241
1242static void
1243bge_setvlan(struct bge_softc *sc)
1244{
1245 struct ifnet *ifp;
1246
1247 BGE_LOCK_ASSERT(sc);
1248
1249 ifp = sc->bge_ifp;
1250
1251 /* Enable or disable VLAN tag stripping as needed. */
1252 if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING)
1253 BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_KEEP_VLAN_DIAG);
1254 else
1255 BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_KEEP_VLAN_DIAG);
1256}
1257
1258static void
1259bge_sig_pre_reset(sc, type)
1260 struct bge_softc *sc;
1261 int type;
1262{
1263 /*
1264 * Some chips don't like this so only do this if ASF is enabled
1265 */
1266 if (sc->bge_asf_mode)
1267 bge_writemem_ind(sc, BGE_SOFTWARE_GENCOMM, BGE_MAGIC_NUMBER);
1268
1269 if (sc->bge_asf_mode & ASF_NEW_HANDSHAKE) {
1270 switch (type) {
1271 case BGE_RESET_START:
1272 bge_writemem_ind(sc, BGE_SDI_STATUS, 0x1); /* START */
1273 break;
1274 case BGE_RESET_STOP:
1275 bge_writemem_ind(sc, BGE_SDI_STATUS, 0x2); /* UNLOAD */
1276 break;
1277 }
1278 }
1279}
1280
1281static void
1282bge_sig_post_reset(sc, type)
1283 struct bge_softc *sc;
1284 int type;
1285{
1286 if (sc->bge_asf_mode & ASF_NEW_HANDSHAKE) {
1287 switch (type) {
1288 case BGE_RESET_START:
1289 bge_writemem_ind(sc, BGE_SDI_STATUS, 0x80000001);
1290 /* START DONE */
1291 break;
1292 case BGE_RESET_STOP:
1293 bge_writemem_ind(sc, BGE_SDI_STATUS, 0x80000002);
1294 break;
1295 }
1296 }
1297}
1298
1299static void
1300bge_sig_legacy(sc, type)
1301 struct bge_softc *sc;
1302 int type;
1303{
1304 if (sc->bge_asf_mode) {
1305 switch (type) {
1306 case BGE_RESET_START:
1307 bge_writemem_ind(sc, BGE_SDI_STATUS, 0x1); /* START */
1308 break;
1309 case BGE_RESET_STOP:
1310 bge_writemem_ind(sc, BGE_SDI_STATUS, 0x2); /* UNLOAD */
1311 break;
1312 }
1313 }
1314}
1315
1316void bge_stop_fw(struct bge_softc *);
1317void
1318bge_stop_fw(sc)
1319 struct bge_softc *sc;
1320{
1321 int i;
1322
1323 if (sc->bge_asf_mode) {
1324 bge_writemem_ind(sc, BGE_SOFTWARE_GENCOMM_FW, BGE_FW_PAUSE);
1325 CSR_WRITE_4(sc, BGE_CPU_EVENT,
1326 CSR_READ_4(sc, BGE_CPU_EVENT) | (1 << 14));
1327
1328 for (i = 0; i < 100; i++ ) {
1329 if (!(CSR_READ_4(sc, BGE_CPU_EVENT) & (1 << 14)))
1330 break;
1331 DELAY(10);
1332 }
1333 }
1334}
1335
1336/*
1337 * Do endian, PCI and DMA initialization.
1338 */
1339static int
1340bge_chipinit(struct bge_softc *sc)
1341{
1342 uint32_t dma_rw_ctl;
1343 int i;
1344
1345 /* Set endianness before we access any non-PCI registers. */
1346 pci_write_config(sc->bge_dev, BGE_PCI_MISC_CTL, BGE_INIT, 4);
1347
1348 /* Clear the MAC control register */
1349 CSR_WRITE_4(sc, BGE_MAC_MODE, 0);
1350
1351 /*
1352 * Clear the MAC statistics block in the NIC's
1353 * internal memory.
1354 */
1355 for (i = BGE_STATS_BLOCK;
1356 i < BGE_STATS_BLOCK_END + 1; i += sizeof(uint32_t))
1357 BGE_MEMWIN_WRITE(sc, i, 0);
1358
1359 for (i = BGE_STATUS_BLOCK;
1360 i < BGE_STATUS_BLOCK_END + 1; i += sizeof(uint32_t))
1361 BGE_MEMWIN_WRITE(sc, i, 0);
1362
1363 /*
1364 * Set up the PCI DMA control register.
1365 */
1366 dma_rw_ctl = BGE_PCIDMARWCTL_RD_CMD_SHIFT(6) |
1367 BGE_PCIDMARWCTL_WR_CMD_SHIFT(7);
1368 if (sc->bge_flags & BGE_FLAG_PCIE) {
1369 /* Read watermark not used, 128 bytes for write. */
1370 dma_rw_ctl |= BGE_PCIDMARWCTL_WR_WAT_SHIFT(3);
1371 } else if (sc->bge_flags & BGE_FLAG_PCIX) {
1372 if (BGE_IS_5714_FAMILY(sc)) {
1373 /* 256 bytes for read and write. */
1374 dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(2) |
1375 BGE_PCIDMARWCTL_WR_WAT_SHIFT(2);
1376 dma_rw_ctl |= (sc->bge_asicrev == BGE_ASICREV_BCM5780) ?
1377 BGE_PCIDMARWCTL_ONEDMA_ATONCE_GLOBAL :
1378 BGE_PCIDMARWCTL_ONEDMA_ATONCE_LOCAL;
1379 } else if (sc->bge_asicrev == BGE_ASICREV_BCM5704) {
1380 /* 1536 bytes for read, 384 bytes for write. */
1381 dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(7) |
1382 BGE_PCIDMARWCTL_WR_WAT_SHIFT(3);
1383 } else {
1384 /* 384 bytes for read and write. */
1385 dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(3) |
1386 BGE_PCIDMARWCTL_WR_WAT_SHIFT(3) |
1387 0x0F;
1388 }
1389 if (sc->bge_asicrev == BGE_ASICREV_BCM5703 ||
1390 sc->bge_asicrev == BGE_ASICREV_BCM5704) {
1391 uint32_t tmp;
1392
1393 /* Set ONE_DMA_AT_ONCE for hardware workaround. */
1394 tmp = CSR_READ_4(sc, BGE_PCI_CLKCTL) & 0x1F;
1395 if (tmp == 6 || tmp == 7)
1396 dma_rw_ctl |=
1397 BGE_PCIDMARWCTL_ONEDMA_ATONCE_GLOBAL;
1398
1399 /* Set PCI-X DMA write workaround. */
1400 dma_rw_ctl |= BGE_PCIDMARWCTL_ASRT_ALL_BE;
1401 }
1402 } else {
1403 /* Conventional PCI bus: 256 bytes for read and write. */
1404 dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(7) |
1405 BGE_PCIDMARWCTL_WR_WAT_SHIFT(7);
1406
1407 if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
1408 sc->bge_asicrev != BGE_ASICREV_BCM5750)
1409 dma_rw_ctl |= 0x0F;
1410 }
1411 if (sc->bge_asicrev == BGE_ASICREV_BCM5700 ||
1412 sc->bge_asicrev == BGE_ASICREV_BCM5701)
1413 dma_rw_ctl |= BGE_PCIDMARWCTL_USE_MRM |
1414 BGE_PCIDMARWCTL_ASRT_ALL_BE;
1415 if (sc->bge_asicrev == BGE_ASICREV_BCM5703 ||
1416 sc->bge_asicrev == BGE_ASICREV_BCM5704)
1417 dma_rw_ctl &= ~BGE_PCIDMARWCTL_MINDMA;
1418 pci_write_config(sc->bge_dev, BGE_PCI_DMA_RW_CTL, dma_rw_ctl, 4);
1419
1420 /*
1421 * Set up general mode register.
1422 */
1423 CSR_WRITE_4(sc, BGE_MODE_CTL, BGE_DMA_SWAP_OPTIONS |
1424 BGE_MODECTL_MAC_ATTN_INTR | BGE_MODECTL_HOST_SEND_BDS |
1425 BGE_MODECTL_TX_NO_PHDR_CSUM);
1426
1427 /*
1428 * BCM5701 B5 have a bug causing data corruption when using
1429 * 64-bit DMA reads, which can be terminated early and then
1430 * completed later as 32-bit accesses, in combination with
1431 * certain bridges.
1432 */
1433 if (sc->bge_asicrev == BGE_ASICREV_BCM5701 &&
1434 sc->bge_chipid == BGE_CHIPID_BCM5701_B5)
1435 BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_FORCE_PCI32);
1436
1437 /*
1438 * Tell the firmware the driver is running
1439 */
1440 if (sc->bge_asf_mode & ASF_STACKUP)
1441 BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
1442
1443 /*
1444 * Disable memory write invalidate. Apparently it is not supported
1445 * properly by these devices. Also ensure that INTx isn't disabled,
1446 * as these chips need it even when using MSI.
1447 */
1448 PCI_CLRBIT(sc->bge_dev, BGE_PCI_CMD,
1449 PCIM_CMD_INTxDIS | PCIM_CMD_MWIEN, 4);
1450
1451 /* Set the timer prescaler (always 66Mhz) */
1452 CSR_WRITE_4(sc, BGE_MISC_CFG, BGE_32BITTIME_66MHZ);
1453
1454 /* XXX: The Linux tg3 driver does this at the start of brgphy_reset. */
1455 if (sc->bge_asicrev == BGE_ASICREV_BCM5906) {
1456 DELAY(40); /* XXX */
1457
1458 /* Put PHY into ready state */
1459 BGE_CLRBIT(sc, BGE_MISC_CFG, BGE_MISCCFG_EPHY_IDDQ);
1460 CSR_READ_4(sc, BGE_MISC_CFG); /* Flush */
1461 DELAY(40);
1462 }
1463
1464 return (0);
1465}
1466
1467static int
1468bge_blockinit(struct bge_softc *sc)
1469{
1470 struct bge_rcb *rcb;
1471 bus_size_t vrcb;
1472 bge_hostaddr taddr;
1473 uint32_t val;
1474 int i;
1475
1476 /*
1477 * Initialize the memory window pointer register so that
1478 * we can access the first 32K of internal NIC RAM. This will
1479 * allow us to set up the TX send ring RCBs and the RX return
1480 * ring RCBs, plus other things which live in NIC memory.
1481 */
1482 CSR_WRITE_4(sc, BGE_PCI_MEMWIN_BASEADDR, 0);
1483
1484 /* Note: the BCM5704 has a smaller mbuf space than other chips. */
1485
1486 if (!(BGE_IS_5705_PLUS(sc))) {
1487 /* Configure mbuf memory pool */
1488 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_BASEADDR, BGE_BUFFPOOL_1);
1489 if (sc->bge_asicrev == BGE_ASICREV_BCM5704)
1490 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x10000);
1491 else
1492 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x18000);
1493
1494 /* Configure DMA resource pool */
1495 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_BASEADDR,
1496 BGE_DMA_DESCRIPTORS);
1497 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LEN, 0x2000);
1498 }
1499
1500 /* Configure mbuf pool watermarks */
1501 if (!BGE_IS_5705_PLUS(sc)) {
1502 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x50);
1503 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x20);
1504 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x60);
1505 } else if (sc->bge_asicrev == BGE_ASICREV_BCM5906) {
1506 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0);
1507 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x04);
1508 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x10);
1509 } else {
1510 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0);
1511 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x10);
1512 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x60);
1513 }
1514
1515 /* Configure DMA resource watermarks */
1516 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LOWAT, 5);
1517 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_HIWAT, 10);
1518
1519 /* Enable buffer manager */
1520 if (!(BGE_IS_5705_PLUS(sc))) {
1521 CSR_WRITE_4(sc, BGE_BMAN_MODE,
1522 BGE_BMANMODE_ENABLE | BGE_BMANMODE_LOMBUF_ATTN);
1523
1524 /* Poll for buffer manager start indication */
1525 for (i = 0; i < BGE_TIMEOUT; i++) {
1526 DELAY(10);
1527 if (CSR_READ_4(sc, BGE_BMAN_MODE) & BGE_BMANMODE_ENABLE)
1528 break;
1529 }
1530
1531 if (i == BGE_TIMEOUT) {
1532 device_printf(sc->bge_dev,
1533 "buffer manager failed to start\n");
1534 return (ENXIO);
1535 }
1536 }
1537
1538 /* Enable flow-through queues */
1539 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF);
1540 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0);
1541
1542 /* Wait until queue initialization is complete */
1543 for (i = 0; i < BGE_TIMEOUT; i++) {
1544 DELAY(10);
1545 if (CSR_READ_4(sc, BGE_FTQ_RESET) == 0)
1546 break;
1547 }
1548
1549 if (i == BGE_TIMEOUT) {
1550 device_printf(sc->bge_dev, "flow-through queue init failed\n");
1551 return (ENXIO);
1552 }
1553
1554 /* Initialize the standard RX ring control block */
1555 rcb = &sc->bge_ldata.bge_info.bge_std_rx_rcb;
1556 rcb->bge_hostaddr.bge_addr_lo =
1557 BGE_ADDR_LO(sc->bge_ldata.bge_rx_std_ring_paddr);
1558 rcb->bge_hostaddr.bge_addr_hi =
1559 BGE_ADDR_HI(sc->bge_ldata.bge_rx_std_ring_paddr);
1560 bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag,
1561 sc->bge_cdata.bge_rx_std_ring_map, BUS_DMASYNC_PREREAD);
1562 if (BGE_IS_5705_PLUS(sc))
1563 rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(512, 0);
1564 else
1565 rcb->bge_maxlen_flags =
1566 BGE_RCB_MAXLEN_FLAGS(BGE_MAX_FRAMELEN, 0);
1567 rcb->bge_nicaddr = BGE_STD_RX_RINGS;
1568 CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_HI, rcb->bge_hostaddr.bge_addr_hi);
1569 CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_LO, rcb->bge_hostaddr.bge_addr_lo);
1570
1571 CSR_WRITE_4(sc, BGE_RX_STD_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags);
1572 CSR_WRITE_4(sc, BGE_RX_STD_RCB_NICADDR, rcb->bge_nicaddr);
1573
1574 /*
1575 * Initialize the jumbo RX ring control block
1576 * We set the 'ring disabled' bit in the flags
1577 * field until we're actually ready to start
1578 * using this ring (i.e. once we set the MTU
1579 * high enough to require it).
1580 */
1581 if (BGE_IS_JUMBO_CAPABLE(sc)) {
1582 rcb = &sc->bge_ldata.bge_info.bge_jumbo_rx_rcb;
1583
1584 rcb->bge_hostaddr.bge_addr_lo =
1585 BGE_ADDR_LO(sc->bge_ldata.bge_rx_jumbo_ring_paddr);
1586 rcb->bge_hostaddr.bge_addr_hi =
1587 BGE_ADDR_HI(sc->bge_ldata.bge_rx_jumbo_ring_paddr);
1588 bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag,
1589 sc->bge_cdata.bge_rx_jumbo_ring_map,
1590 BUS_DMASYNC_PREREAD);
1591 rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(0,
1592 BGE_RCB_FLAG_USE_EXT_RX_BD | BGE_RCB_FLAG_RING_DISABLED);
1593 rcb->bge_nicaddr = BGE_JUMBO_RX_RINGS;
1594 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_HI,
1595 rcb->bge_hostaddr.bge_addr_hi);
1596 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_LO,
1597 rcb->bge_hostaddr.bge_addr_lo);
1598
1599 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS,
1600 rcb->bge_maxlen_flags);
1601 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_NICADDR, rcb->bge_nicaddr);
1602
1603 /* Set up dummy disabled mini ring RCB */
1604 rcb = &sc->bge_ldata.bge_info.bge_mini_rx_rcb;
1605 rcb->bge_maxlen_flags =
1606 BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED);
1607 CSR_WRITE_4(sc, BGE_RX_MINI_RCB_MAXLEN_FLAGS,
1608 rcb->bge_maxlen_flags);
1609 }
1610
1611 /*
1612 * Set the BD ring replentish thresholds. The recommended
1613 * values are 1/8th the number of descriptors allocated to
1614 * each ring.
1615 * XXX The 5754 requires a lower threshold, so it might be a
1616 * requirement of all 575x family chips. The Linux driver sets
1617 * the lower threshold for all 5705 family chips as well, but there
1618 * are reports that it might not need to be so strict.
1619 *
1620 * XXX Linux does some extra fiddling here for the 5906 parts as
1621 * well.
1622 */
1623 if (BGE_IS_5705_PLUS(sc))
1624 val = 8;
1625 else
1626 val = BGE_STD_RX_RING_CNT / 8;
1627 CSR_WRITE_4(sc, BGE_RBDI_STD_REPL_THRESH, val);
1628 if (BGE_IS_JUMBO_CAPABLE(sc))
1629 CSR_WRITE_4(sc, BGE_RBDI_JUMBO_REPL_THRESH,
1630 BGE_JUMBO_RX_RING_CNT/8);
1631
1632 /*
1633 * Disable all unused send rings by setting the 'ring disabled'
1634 * bit in the flags field of all the TX send ring control blocks.
1635 * These are located in NIC memory.
1636 */
1637 vrcb = BGE_MEMWIN_START + BGE_SEND_RING_RCB;
1638 for (i = 0; i < BGE_TX_RINGS_EXTSSRAM_MAX; i++) {
1639 RCB_WRITE_4(sc, vrcb, bge_maxlen_flags,
1640 BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED));
1641 RCB_WRITE_4(sc, vrcb, bge_nicaddr, 0);
1642 vrcb += sizeof(struct bge_rcb);
1643 }
1644
1645 /* Configure TX RCB 0 (we use only the first ring) */
1646 vrcb = BGE_MEMWIN_START + BGE_SEND_RING_RCB;
1647 BGE_HOSTADDR(taddr, sc->bge_ldata.bge_tx_ring_paddr);
1648 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, taddr.bge_addr_hi);
1649 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, taddr.bge_addr_lo);
1650 RCB_WRITE_4(sc, vrcb, bge_nicaddr,
1651 BGE_NIC_TXRING_ADDR(0, BGE_TX_RING_CNT));
1652 if (!(BGE_IS_5705_PLUS(sc)))
1653 RCB_WRITE_4(sc, vrcb, bge_maxlen_flags,
1654 BGE_RCB_MAXLEN_FLAGS(BGE_TX_RING_CNT, 0));
1655
1656 /* Disable all unused RX return rings */
1657 vrcb = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB;
1658 for (i = 0; i < BGE_RX_RINGS_MAX; i++) {
1659 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, 0);
1660 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, 0);
1661 RCB_WRITE_4(sc, vrcb, bge_maxlen_flags,
1662 BGE_RCB_MAXLEN_FLAGS(sc->bge_return_ring_cnt,
1663 BGE_RCB_FLAG_RING_DISABLED));
1664 RCB_WRITE_4(sc, vrcb, bge_nicaddr, 0);
1665 bge_writembx(sc, BGE_MBX_RX_CONS0_LO +
1666 (i * (sizeof(uint64_t))), 0);
1667 vrcb += sizeof(struct bge_rcb);
1668 }
1669
1670 /* Initialize RX ring indexes */
1671 bge_writembx(sc, BGE_MBX_RX_STD_PROD_LO, 0);
1672 if (BGE_IS_JUMBO_CAPABLE(sc))
1673 bge_writembx(sc, BGE_MBX_RX_JUMBO_PROD_LO, 0);
1674 if (sc->bge_asicrev == BGE_ASICREV_BCM5700)
1675 bge_writembx(sc, BGE_MBX_RX_MINI_PROD_LO, 0);
1676
1677 /*
1678 * Set up RX return ring 0
1679 * Note that the NIC address for RX return rings is 0x00000000.
1680 * The return rings live entirely within the host, so the
1681 * nicaddr field in the RCB isn't used.
1682 */
1683 vrcb = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB;
1684 BGE_HOSTADDR(taddr, sc->bge_ldata.bge_rx_return_ring_paddr);
1685 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, taddr.bge_addr_hi);
1686 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, taddr.bge_addr_lo);
1687 RCB_WRITE_4(sc, vrcb, bge_nicaddr, 0x00000000);
1688 RCB_WRITE_4(sc, vrcb, bge_maxlen_flags,
1689 BGE_RCB_MAXLEN_FLAGS(sc->bge_return_ring_cnt, 0));
1690
1691 /* Set random backoff seed for TX */
1692 CSR_WRITE_4(sc, BGE_TX_RANDOM_BACKOFF,
1693 IF_LLADDR(sc->bge_ifp)[0] + IF_LLADDR(sc->bge_ifp)[1] +
1694 IF_LLADDR(sc->bge_ifp)[2] + IF_LLADDR(sc->bge_ifp)[3] +
1695 IF_LLADDR(sc->bge_ifp)[4] + IF_LLADDR(sc->bge_ifp)[5] +
1696 BGE_TX_BACKOFF_SEED_MASK);
1697
1698 /* Set inter-packet gap */
1699 CSR_WRITE_4(sc, BGE_TX_LENGTHS, 0x2620);
1700
1701 /*
1702 * Specify which ring to use for packets that don't match
1703 * any RX rules.
1704 */
1705 CSR_WRITE_4(sc, BGE_RX_RULES_CFG, 0x08);
1706
1707 /*
1708 * Configure number of RX lists. One interrupt distribution
1709 * list, sixteen active lists, one bad frames class.
1710 */
1711 CSR_WRITE_4(sc, BGE_RXLP_CFG, 0x181);
1712
1713 /* Inialize RX list placement stats mask. */
1714 CSR_WRITE_4(sc, BGE_RXLP_STATS_ENABLE_MASK, 0x007FFFFF);
1715 CSR_WRITE_4(sc, BGE_RXLP_STATS_CTL, 0x1);
1716
1717 /* Disable host coalescing until we get it set up */
1718 CSR_WRITE_4(sc, BGE_HCC_MODE, 0x00000000);
1719
1720 /* Poll to make sure it's shut down. */
1721 for (i = 0; i < BGE_TIMEOUT; i++) {
1722 DELAY(10);
1723 if (!(CSR_READ_4(sc, BGE_HCC_MODE) & BGE_HCCMODE_ENABLE))
1724 break;
1725 }
1726
1727 if (i == BGE_TIMEOUT) {
1728 device_printf(sc->bge_dev,
1729 "host coalescing engine failed to idle\n");
1730 return (ENXIO);
1731 }
1732
1733 /* Set up host coalescing defaults */
1734 CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS, sc->bge_rx_coal_ticks);
1735 CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS, sc->bge_tx_coal_ticks);
1736 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS, sc->bge_rx_max_coal_bds);
1737 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS, sc->bge_tx_max_coal_bds);
1738 if (!(BGE_IS_5705_PLUS(sc))) {
1739 CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS_INT, 0);
1740 CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS_INT, 0);
1741 }
1742 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS_INT, 1);
1743 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS_INT, 1);
1744
1745 /* Set up address of statistics block */
1746 if (!(BGE_IS_5705_PLUS(sc))) {
1747 CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_HI,
1748 BGE_ADDR_HI(sc->bge_ldata.bge_stats_paddr));
1749 CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_LO,
1750 BGE_ADDR_LO(sc->bge_ldata.bge_stats_paddr));
1751 CSR_WRITE_4(sc, BGE_HCC_STATS_BASEADDR, BGE_STATS_BLOCK);
1752 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_BASEADDR, BGE_STATUS_BLOCK);
1753 CSR_WRITE_4(sc, BGE_HCC_STATS_TICKS, sc->bge_stat_ticks);
1754 }
1755
1756 /* Set up address of status block */
1757 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_HI,
1758 BGE_ADDR_HI(sc->bge_ldata.bge_status_block_paddr));
1759 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_LO,
1760 BGE_ADDR_LO(sc->bge_ldata.bge_status_block_paddr));
1761 sc->bge_ldata.bge_status_block->bge_idx[0].bge_rx_prod_idx = 0;
1762 sc->bge_ldata.bge_status_block->bge_idx[0].bge_tx_cons_idx = 0;
1763
1764 /* Set up status block size. */
1765 if (sc->bge_asicrev == BGE_ASICREV_BCM5700 &&
1766 sc->bge_chipid != BGE_CHIPID_BCM5700_C0)
1767 val = BGE_STATBLKSZ_FULL;
1768 else
1769 val = BGE_STATBLKSZ_32BYTE;
1770
1771 /* Turn on host coalescing state machine */
1772 CSR_WRITE_4(sc, BGE_HCC_MODE, val | BGE_HCCMODE_ENABLE);
1773
1774 /* Turn on RX BD completion state machine and enable attentions */
1775 CSR_WRITE_4(sc, BGE_RBDC_MODE,
1776 BGE_RBDCMODE_ENABLE | BGE_RBDCMODE_ATTN);
1777
1778 /* Turn on RX list placement state machine */
1779 CSR_WRITE_4(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE);
1780
1781 /* Turn on RX list selector state machine. */
1782 if (!(BGE_IS_5705_PLUS(sc)))
1783 CSR_WRITE_4(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE);
1784
1785 /* Turn on DMA, clear stats */
1786 CSR_WRITE_4(sc, BGE_MAC_MODE, BGE_MACMODE_TXDMA_ENB |
1787 BGE_MACMODE_RXDMA_ENB | BGE_MACMODE_RX_STATS_CLEAR |
1788 BGE_MACMODE_TX_STATS_CLEAR | BGE_MACMODE_RX_STATS_ENB |
1789 BGE_MACMODE_TX_STATS_ENB | BGE_MACMODE_FRMHDR_DMA_ENB |
1790 ((sc->bge_flags & BGE_FLAG_TBI) ?
1791 BGE_PORTMODE_TBI : BGE_PORTMODE_MII));
1792
1793 /* Set misc. local control, enable interrupts on attentions */
1794 CSR_WRITE_4(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_ONATTN);
1795
1796#ifdef notdef
1797 /* Assert GPIO pins for PHY reset */
1798 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUT0 |
1799 BGE_MLC_MISCIO_OUT1 | BGE_MLC_MISCIO_OUT2);
1800 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUTEN0 |
1801 BGE_MLC_MISCIO_OUTEN1 | BGE_MLC_MISCIO_OUTEN2);
1802#endif
1803
1804 /* Turn on DMA completion state machine */
1805 if (!(BGE_IS_5705_PLUS(sc)))
1806 CSR_WRITE_4(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE);
1807
1808 val = BGE_WDMAMODE_ENABLE | BGE_WDMAMODE_ALL_ATTNS;
1809
1810 /* Enable host coalescing bug fix. */
1811 if (BGE_IS_5755_PLUS(sc))
1812 val |= BGE_WDMAMODE_STATUS_TAG_FIX;
1813
1814 /* Turn on write DMA state machine */
1815 CSR_WRITE_4(sc, BGE_WDMA_MODE, val);
1816 DELAY(40);
1817
1818 /* Turn on read DMA state machine */
1819 val = BGE_RDMAMODE_ENABLE | BGE_RDMAMODE_ALL_ATTNS;
1820 if (sc->bge_asicrev == BGE_ASICREV_BCM5784 ||
1821 sc->bge_asicrev == BGE_ASICREV_BCM5785 ||
1822 sc->bge_asicrev == BGE_ASICREV_BCM57780)
1823 val |= BGE_RDMAMODE_BD_SBD_CRPT_ATTN |
1824 BGE_RDMAMODE_MBUF_RBD_CRPT_ATTN |
1825 BGE_RDMAMODE_MBUF_SBD_CRPT_ATTN;
1826 if (sc->bge_flags & BGE_FLAG_PCIE)
1827 val |= BGE_RDMAMODE_FIFO_LONG_BURST;
1828 if (sc->bge_flags & BGE_FLAG_TSO)
1829 val |= BGE_RDMAMODE_TSO4_ENABLE;
1830 CSR_WRITE_4(sc, BGE_RDMA_MODE, val);
1831 DELAY(40);
1832
1833 /* Turn on RX data completion state machine */
1834 CSR_WRITE_4(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE);
1835
1836 /* Turn on RX BD initiator state machine */
1837 CSR_WRITE_4(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE);
1838
1839 /* Turn on RX data and RX BD initiator state machine */
1840 CSR_WRITE_4(sc, BGE_RDBDI_MODE, BGE_RDBDIMODE_ENABLE);
1841
1842 /* Turn on Mbuf cluster free state machine */
1843 if (!(BGE_IS_5705_PLUS(sc)))
1844 CSR_WRITE_4(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE);
1845
1846 /* Turn on send BD completion state machine */
1847 CSR_WRITE_4(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE);
1848
1849 /* Turn on send data completion state machine */
1850 val = BGE_SDCMODE_ENABLE;
1851 if (sc->bge_asicrev == BGE_ASICREV_BCM5761)
1852 val |= BGE_SDCMODE_CDELAY;
1853 CSR_WRITE_4(sc, BGE_SDC_MODE, val);
1854
1855 /* Turn on send data initiator state machine */
1856 if (sc->bge_flags & BGE_FLAG_TSO)
1857 CSR_WRITE_4(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE | 0x08);
1858 else
1859 CSR_WRITE_4(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE);
1860
1861 /* Turn on send BD initiator state machine */
1862 CSR_WRITE_4(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE);
1863
1864 /* Turn on send BD selector state machine */
1865 CSR_WRITE_4(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE);
1866
1867 CSR_WRITE_4(sc, BGE_SDI_STATS_ENABLE_MASK, 0x007FFFFF);
1868 CSR_WRITE_4(sc, BGE_SDI_STATS_CTL,
1869 BGE_SDISTATSCTL_ENABLE | BGE_SDISTATSCTL_FASTER);
1870
1871 /* ack/clear link change events */
1872 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED |
1873 BGE_MACSTAT_CFG_CHANGED | BGE_MACSTAT_MI_COMPLETE |
1874 BGE_MACSTAT_LINK_CHANGED);
1875 CSR_WRITE_4(sc, BGE_MI_STS, 0);
1876
1877 /* Enable PHY auto polling (for MII/GMII only) */
1878 if (sc->bge_flags & BGE_FLAG_TBI) {
1879 CSR_WRITE_4(sc, BGE_MI_STS, BGE_MISTS_LINK);
1880 } else {
1881 BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL | (10 << 16));
1882 if (sc->bge_asicrev == BGE_ASICREV_BCM5700 &&
1883 sc->bge_chipid != BGE_CHIPID_BCM5700_B2)
1884 CSR_WRITE_4(sc, BGE_MAC_EVT_ENB,
1885 BGE_EVTENB_MI_INTERRUPT);
1886 }
1887
1888 /*
1889 * Clear any pending link state attention.
1890 * Otherwise some link state change events may be lost until attention
1891 * is cleared by bge_intr() -> bge_link_upd() sequence.
1892 * It's not necessary on newer BCM chips - perhaps enabling link
1893 * state change attentions implies clearing pending attention.
1894 */
1895 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED |
1896 BGE_MACSTAT_CFG_CHANGED | BGE_MACSTAT_MI_COMPLETE |
1897 BGE_MACSTAT_LINK_CHANGED);
1898
1899 /* Enable link state change attentions. */
1900 BGE_SETBIT(sc, BGE_MAC_EVT_ENB, BGE_EVTENB_LINK_CHANGED);
1901
1902 return (0);
1903}
1904
1905const struct bge_revision *
1906bge_lookup_rev(uint32_t chipid)
1907{
1908 const struct bge_revision *br;
1909
1910 for (br = bge_revisions; br->br_name != NULL; br++) {
1911 if (br->br_chipid == chipid)
1912 return (br);
1913 }
1914
1915 for (br = bge_majorrevs; br->br_name != NULL; br++) {
1916 if (br->br_chipid == BGE_ASICREV(chipid))
1917 return (br);
1918 }
1919
1920 return (NULL);
1921}
1922
1923const struct bge_vendor *
1924bge_lookup_vendor(uint16_t vid)
1925{
1926 const struct bge_vendor *v;
1927
1928 for (v = bge_vendors; v->v_name != NULL; v++)
1929 if (v->v_id == vid)
1930 return (v);
1931
1932 panic("%s: unknown vendor %d", __func__, vid);
1933 return (NULL);
1934}
1935
1936/*
1937 * Probe for a Broadcom chip. Check the PCI vendor and device IDs
1938 * against our list and return its name if we find a match.
1939 *
1940 * Note that since the Broadcom controller contains VPD support, we
1941 * try to get the device name string from the controller itself instead
1942 * of the compiled-in string. It guarantees we'll always announce the
1943 * right product name. We fall back to the compiled-in string when
1944 * VPD is unavailable or corrupt.
1945 */
1946static int
1947bge_probe(device_t dev)
1948{
1949 const struct bge_type *t = bge_devs;
1950 struct bge_softc *sc = device_get_softc(dev);
1951 uint16_t vid, did;
1952
1953 sc->bge_dev = dev;
1954 vid = pci_get_vendor(dev);
1955 did = pci_get_device(dev);
1956 while(t->bge_vid != 0) {
1957 if ((vid == t->bge_vid) && (did == t->bge_did)) {
1958 char model[64], buf[96];
1959 const struct bge_revision *br;
1960 const struct bge_vendor *v;
1961 uint32_t id;
1962
1963 id = pci_read_config(dev, BGE_PCI_MISC_CTL, 4) >>
1964 BGE_PCIMISCCTL_ASICREV_SHIFT;
1965 if (BGE_ASICREV(id) == BGE_ASICREV_USE_PRODID_REG)
1966 id = pci_read_config(dev,
1967 BGE_PCI_PRODID_ASICREV, 4);
1968 br = bge_lookup_rev(id);
1969 v = bge_lookup_vendor(vid);
1970 {
1971#if __FreeBSD_version > 700024
1972 const char *pname;
1973
1974 if (bge_has_eaddr(sc) &&
1975 pci_get_vpd_ident(dev, &pname) == 0)
1976 snprintf(model, 64, "%s", pname);
1977 else
1978#endif
1979 snprintf(model, 64, "%s %s",
1980 v->v_name,
1981 br != NULL ? br->br_name :
1982 "NetXtreme Ethernet Controller");
1983 }
1984 snprintf(buf, 96, "%s, %sASIC rev. %#08x", model,
1985 br != NULL ? "" : "unknown ", id);
1986 device_set_desc_copy(dev, buf);
1987 if (pci_get_subvendor(dev) == DELL_VENDORID)
1988 sc->bge_flags |= BGE_FLAG_NO_3LED;
1989 if (did == BCOM_DEVICEID_BCM5755M)
1990 sc->bge_flags |= BGE_FLAG_ADJUST_TRIM;
1991 return (0);
1992 }
1993 t++;
1994 }
1995
1996 return (ENXIO);
1997}
1998
1999static void
2000bge_dma_free(struct bge_softc *sc)
2001{
2002 int i;
2003
2004 /* Destroy DMA maps for RX buffers. */
2005 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
2006 if (sc->bge_cdata.bge_rx_std_dmamap[i])
2007 bus_dmamap_destroy(sc->bge_cdata.bge_rx_mtag,
2008 sc->bge_cdata.bge_rx_std_dmamap[i]);
2009 }
2010 if (sc->bge_cdata.bge_rx_std_sparemap)
2011 bus_dmamap_destroy(sc->bge_cdata.bge_rx_mtag,
2012 sc->bge_cdata.bge_rx_std_sparemap);
2013
2014 /* Destroy DMA maps for jumbo RX buffers. */
2015 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
2016 if (sc->bge_cdata.bge_rx_jumbo_dmamap[i])
2017 bus_dmamap_destroy(sc->bge_cdata.bge_mtag_jumbo,
2018 sc->bge_cdata.bge_rx_jumbo_dmamap[i]);
2019 }
2020 if (sc->bge_cdata.bge_rx_jumbo_sparemap)
2021 bus_dmamap_destroy(sc->bge_cdata.bge_mtag_jumbo,
2022 sc->bge_cdata.bge_rx_jumbo_sparemap);
2023
2024 /* Destroy DMA maps for TX buffers. */
2025 for (i = 0; i < BGE_TX_RING_CNT; i++) {
2026 if (sc->bge_cdata.bge_tx_dmamap[i])
2027 bus_dmamap_destroy(sc->bge_cdata.bge_tx_mtag,
2028 sc->bge_cdata.bge_tx_dmamap[i]);
2029 }
2030
2031 if (sc->bge_cdata.bge_rx_mtag)
2032 bus_dma_tag_destroy(sc->bge_cdata.bge_rx_mtag);
2033 if (sc->bge_cdata.bge_tx_mtag)
2034 bus_dma_tag_destroy(sc->bge_cdata.bge_tx_mtag);
2035
2036
2037 /* Destroy standard RX ring. */
2038 if (sc->bge_cdata.bge_rx_std_ring_map)
2039 bus_dmamap_unload(sc->bge_cdata.bge_rx_std_ring_tag,
2040 sc->bge_cdata.bge_rx_std_ring_map);
2041 if (sc->bge_cdata.bge_rx_std_ring_map && sc->bge_ldata.bge_rx_std_ring)
2042 bus_dmamem_free(sc->bge_cdata.bge_rx_std_ring_tag,
2043 sc->bge_ldata.bge_rx_std_ring,
2044 sc->bge_cdata.bge_rx_std_ring_map);
2045
2046 if (sc->bge_cdata.bge_rx_std_ring_tag)
2047 bus_dma_tag_destroy(sc->bge_cdata.bge_rx_std_ring_tag);
2048
2049 /* Destroy jumbo RX ring. */
2050 if (sc->bge_cdata.bge_rx_jumbo_ring_map)
2051 bus_dmamap_unload(sc->bge_cdata.bge_rx_jumbo_ring_tag,
2052 sc->bge_cdata.bge_rx_jumbo_ring_map);
2053
2054 if (sc->bge_cdata.bge_rx_jumbo_ring_map &&
2055 sc->bge_ldata.bge_rx_jumbo_ring)
2056 bus_dmamem_free(sc->bge_cdata.bge_rx_jumbo_ring_tag,
2057 sc->bge_ldata.bge_rx_jumbo_ring,
2058 sc->bge_cdata.bge_rx_jumbo_ring_map);
2059
2060 if (sc->bge_cdata.bge_rx_jumbo_ring_tag)
2061 bus_dma_tag_destroy(sc->bge_cdata.bge_rx_jumbo_ring_tag);
2062
2063 /* Destroy RX return ring. */
2064 if (sc->bge_cdata.bge_rx_return_ring_map)
2065 bus_dmamap_unload(sc->bge_cdata.bge_rx_return_ring_tag,
2066 sc->bge_cdata.bge_rx_return_ring_map);
2067
2068 if (sc->bge_cdata.bge_rx_return_ring_map &&
2069 sc->bge_ldata.bge_rx_return_ring)
2070 bus_dmamem_free(sc->bge_cdata.bge_rx_return_ring_tag,
2071 sc->bge_ldata.bge_rx_return_ring,
2072 sc->bge_cdata.bge_rx_return_ring_map);
2073
2074 if (sc->bge_cdata.bge_rx_return_ring_tag)
2075 bus_dma_tag_destroy(sc->bge_cdata.bge_rx_return_ring_tag);
2076
2077 /* Destroy TX ring. */
2078 if (sc->bge_cdata.bge_tx_ring_map)
2079 bus_dmamap_unload(sc->bge_cdata.bge_tx_ring_tag,
2080 sc->bge_cdata.bge_tx_ring_map);
2081
2082 if (sc->bge_cdata.bge_tx_ring_map && sc->bge_ldata.bge_tx_ring)
2083 bus_dmamem_free(sc->bge_cdata.bge_tx_ring_tag,
2084 sc->bge_ldata.bge_tx_ring,
2085 sc->bge_cdata.bge_tx_ring_map);
2086
2087 if (sc->bge_cdata.bge_tx_ring_tag)
2088 bus_dma_tag_destroy(sc->bge_cdata.bge_tx_ring_tag);
2089
2090 /* Destroy status block. */
2091 if (sc->bge_cdata.bge_status_map)
2092 bus_dmamap_unload(sc->bge_cdata.bge_status_tag,
2093 sc->bge_cdata.bge_status_map);
2094
2095 if (sc->bge_cdata.bge_status_map && sc->bge_ldata.bge_status_block)
2096 bus_dmamem_free(sc->bge_cdata.bge_status_tag,
2097 sc->bge_ldata.bge_status_block,
2098 sc->bge_cdata.bge_status_map);
2099
2100 if (sc->bge_cdata.bge_status_tag)
2101 bus_dma_tag_destroy(sc->bge_cdata.bge_status_tag);
2102
2103 /* Destroy statistics block. */
2104 if (sc->bge_cdata.bge_stats_map)
2105 bus_dmamap_unload(sc->bge_cdata.bge_stats_tag,
2106 sc->bge_cdata.bge_stats_map);
2107
2108 if (sc->bge_cdata.bge_stats_map && sc->bge_ldata.bge_stats)
2109 bus_dmamem_free(sc->bge_cdata.bge_stats_tag,
2110 sc->bge_ldata.bge_stats,
2111 sc->bge_cdata.bge_stats_map);
2112
2113 if (sc->bge_cdata.bge_stats_tag)
2114 bus_dma_tag_destroy(sc->bge_cdata.bge_stats_tag);
2115
2116 /* Destroy the parent tag. */
2117 if (sc->bge_cdata.bge_parent_tag)
2118 bus_dma_tag_destroy(sc->bge_cdata.bge_parent_tag);
2119}
2120
2121static int
2122bge_dma_alloc(device_t dev)
2123{
2124 struct bge_dmamap_arg ctx;
2125 struct bge_softc *sc;
2126 bus_addr_t lowaddr;
2127 bus_size_t sbsz, txsegsz, txmaxsegsz;
2128 int i, error;
2129
2130 sc = device_get_softc(dev);
2131
2132 lowaddr = BUS_SPACE_MAXADDR;
2133 if ((sc->bge_flags & BGE_FLAG_40BIT_BUG) != 0)
2134 lowaddr = BGE_DMA_MAXADDR;
2135 if ((sc->bge_flags & BGE_FLAG_4G_BNDRY_BUG) != 0)
2136 lowaddr = BUS_SPACE_MAXADDR_32BIT;
2137 /*
2138 * Allocate the parent bus DMA tag appropriate for PCI.
2139 */
2140 error = bus_dma_tag_create(bus_get_dma_tag(sc->bge_dev),
2141 1, 0, lowaddr, BUS_SPACE_MAXADDR, NULL,
2142 NULL, BUS_SPACE_MAXSIZE_32BIT, 0, BUS_SPACE_MAXSIZE_32BIT,
2143 0, NULL, NULL, &sc->bge_cdata.bge_parent_tag);
2144
2145 if (error != 0) {
2146 device_printf(sc->bge_dev,
2147 "could not allocate parent dma tag\n");
2148 return (ENOMEM);
2149 }
2150
2151 /*
2152 * Create tag for Tx mbufs.
2153 */
2154 if (sc->bge_flags & BGE_FLAG_TSO) {
2155 txsegsz = BGE_TSOSEG_SZ;
2156 txmaxsegsz = 65535 + sizeof(struct ether_vlan_header);
2157 } else {
2158 txsegsz = MCLBYTES;
2159 txmaxsegsz = MCLBYTES * BGE_NSEG_NEW;
2160 }
2161 error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag, 1,
2162 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
2163 txmaxsegsz, BGE_NSEG_NEW, txsegsz, 0, NULL, NULL,
2164 &sc->bge_cdata.bge_tx_mtag);
2165
2166 if (error) {
2167 device_printf(sc->bge_dev, "could not allocate TX dma tag\n");
2168 return (ENOMEM);
2169 }
2170
2171 /*
2172 * Create tag for Rx mbufs.
2173 */
2174 error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag, 1, 0,
2175 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES, 1,
2176 MCLBYTES, 0, NULL, NULL, &sc->bge_cdata.bge_rx_mtag);
2177
2178 if (error) {
2179 device_printf(sc->bge_dev, "could not allocate RX dma tag\n");
2180 return (ENOMEM);
2181 }
2182
2183 /* Create DMA maps for RX buffers. */
2184 error = bus_dmamap_create(sc->bge_cdata.bge_rx_mtag, 0,
2185 &sc->bge_cdata.bge_rx_std_sparemap);
2186 if (error) {
2187 device_printf(sc->bge_dev,
2188 "can't create spare DMA map for RX\n");
2189 return (ENOMEM);
2190 }
2191 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
2192 error = bus_dmamap_create(sc->bge_cdata.bge_rx_mtag, 0,
2193 &sc->bge_cdata.bge_rx_std_dmamap[i]);
2194 if (error) {
2195 device_printf(sc->bge_dev,
2196 "can't create DMA map for RX\n");
2197 return (ENOMEM);
2198 }
2199 }
2200
2201 /* Create DMA maps for TX buffers. */
2202 for (i = 0; i < BGE_TX_RING_CNT; i++) {
2203 error = bus_dmamap_create(sc->bge_cdata.bge_tx_mtag, 0,
2204 &sc->bge_cdata.bge_tx_dmamap[i]);
2205 if (error) {
2206 device_printf(sc->bge_dev,
2207 "can't create DMA map for TX\n");
2208 return (ENOMEM);
2209 }
2210 }
2211
2212 /* Create tag for standard RX ring. */
2213 error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag,
2214 PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
2215 NULL, BGE_STD_RX_RING_SZ, 1, BGE_STD_RX_RING_SZ, 0,
2216 NULL, NULL, &sc->bge_cdata.bge_rx_std_ring_tag);
2217
2218 if (error) {
2219 device_printf(sc->bge_dev, "could not allocate dma tag\n");
2220 return (ENOMEM);
2221 }
2222
2223 /* Allocate DMA'able memory for standard RX ring. */
2224 error = bus_dmamem_alloc(sc->bge_cdata.bge_rx_std_ring_tag,
2225 (void **)&sc->bge_ldata.bge_rx_std_ring, BUS_DMA_NOWAIT,
2226 &sc->bge_cdata.bge_rx_std_ring_map);
2227 if (error)
2228 return (ENOMEM);
2229
2230 bzero((char *)sc->bge_ldata.bge_rx_std_ring, BGE_STD_RX_RING_SZ);
2231
2232 /* Load the address of the standard RX ring. */
2233 ctx.bge_maxsegs = 1;
2234 ctx.sc = sc;
2235
2236 error = bus_dmamap_load(sc->bge_cdata.bge_rx_std_ring_tag,
2237 sc->bge_cdata.bge_rx_std_ring_map, sc->bge_ldata.bge_rx_std_ring,
2238 BGE_STD_RX_RING_SZ, bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT);
2239
2240 if (error)
2241 return (ENOMEM);
2242
2243 sc->bge_ldata.bge_rx_std_ring_paddr = ctx.bge_busaddr;
2244
2245 /* Create tags for jumbo mbufs. */
2246 if (BGE_IS_JUMBO_CAPABLE(sc)) {
2247 error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag,
2248 1, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
2249 NULL, MJUM9BYTES, BGE_NSEG_JUMBO, PAGE_SIZE,
2250 0, NULL, NULL, &sc->bge_cdata.bge_mtag_jumbo);
2251 if (error) {
2252 device_printf(sc->bge_dev,
2253 "could not allocate jumbo dma tag\n");
2254 return (ENOMEM);
2255 }
2256
2257 /* Create tag for jumbo RX ring. */
2258 error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag,
2259 PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
2260 NULL, BGE_JUMBO_RX_RING_SZ, 1, BGE_JUMBO_RX_RING_SZ, 0,
2261 NULL, NULL, &sc->bge_cdata.bge_rx_jumbo_ring_tag);
2262
2263 if (error) {
2264 device_printf(sc->bge_dev,
2265 "could not allocate jumbo ring dma tag\n");
2266 return (ENOMEM);
2267 }
2268
2269 /* Allocate DMA'able memory for jumbo RX ring. */
2270 error = bus_dmamem_alloc(sc->bge_cdata.bge_rx_jumbo_ring_tag,
2271 (void **)&sc->bge_ldata.bge_rx_jumbo_ring,
2272 BUS_DMA_NOWAIT | BUS_DMA_ZERO,
2273 &sc->bge_cdata.bge_rx_jumbo_ring_map);
2274 if (error)
2275 return (ENOMEM);
2276
2277 /* Load the address of the jumbo RX ring. */
2278 ctx.bge_maxsegs = 1;
2279 ctx.sc = sc;
2280
2281 error = bus_dmamap_load(sc->bge_cdata.bge_rx_jumbo_ring_tag,
2282 sc->bge_cdata.bge_rx_jumbo_ring_map,
2283 sc->bge_ldata.bge_rx_jumbo_ring, BGE_JUMBO_RX_RING_SZ,
2284 bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT);
2285
2286 if (error)
2287 return (ENOMEM);
2288
2289 sc->bge_ldata.bge_rx_jumbo_ring_paddr = ctx.bge_busaddr;
2290
2291 /* Create DMA maps for jumbo RX buffers. */
2292 error = bus_dmamap_create(sc->bge_cdata.bge_mtag_jumbo,
2293 0, &sc->bge_cdata.bge_rx_jumbo_sparemap);
2294 if (error) {
2295 device_printf(sc->bge_dev,
2296 "can't create spare DMA map for jumbo RX\n");
2297 return (ENOMEM);
2298 }
2299 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
2300 error = bus_dmamap_create(sc->bge_cdata.bge_mtag_jumbo,
2301 0, &sc->bge_cdata.bge_rx_jumbo_dmamap[i]);
2302 if (error) {
2303 device_printf(sc->bge_dev,
2304 "can't create DMA map for jumbo RX\n");
2305 return (ENOMEM);
2306 }
2307 }
2308
2309 }
2310
2311 /* Create tag for RX return ring. */
2312 error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag,
2313 PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
2314 NULL, BGE_RX_RTN_RING_SZ(sc), 1, BGE_RX_RTN_RING_SZ(sc), 0,
2315 NULL, NULL, &sc->bge_cdata.bge_rx_return_ring_tag);
2316
2317 if (error) {
2318 device_printf(sc->bge_dev, "could not allocate dma tag\n");
2319 return (ENOMEM);
2320 }
2321
2322 /* Allocate DMA'able memory for RX return ring. */
2323 error = bus_dmamem_alloc(sc->bge_cdata.bge_rx_return_ring_tag,
2324 (void **)&sc->bge_ldata.bge_rx_return_ring, BUS_DMA_NOWAIT,
2325 &sc->bge_cdata.bge_rx_return_ring_map);
2326 if (error)
2327 return (ENOMEM);
2328
2329 bzero((char *)sc->bge_ldata.bge_rx_return_ring,
2330 BGE_RX_RTN_RING_SZ(sc));
2331
2332 /* Load the address of the RX return ring. */
2333 ctx.bge_maxsegs = 1;
2334 ctx.sc = sc;
2335
2336 error = bus_dmamap_load(sc->bge_cdata.bge_rx_return_ring_tag,
2337 sc->bge_cdata.bge_rx_return_ring_map,
2338 sc->bge_ldata.bge_rx_return_ring, BGE_RX_RTN_RING_SZ(sc),
2339 bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT);
2340
2341 if (error)
2342 return (ENOMEM);
2343
2344 sc->bge_ldata.bge_rx_return_ring_paddr = ctx.bge_busaddr;
2345
2346 /* Create tag for TX ring. */
2347 error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag,
2348 PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
2349 NULL, BGE_TX_RING_SZ, 1, BGE_TX_RING_SZ, 0, NULL, NULL,
2350 &sc->bge_cdata.bge_tx_ring_tag);
2351
2352 if (error) {
2353 device_printf(sc->bge_dev, "could not allocate dma tag\n");
2354 return (ENOMEM);
2355 }
2356
2357 /* Allocate DMA'able memory for TX ring. */
2358 error = bus_dmamem_alloc(sc->bge_cdata.bge_tx_ring_tag,
2359 (void **)&sc->bge_ldata.bge_tx_ring, BUS_DMA_NOWAIT,
2360 &sc->bge_cdata.bge_tx_ring_map);
2361 if (error)
2362 return (ENOMEM);
2363
2364 bzero((char *)sc->bge_ldata.bge_tx_ring, BGE_TX_RING_SZ);
2365
2366 /* Load the address of the TX ring. */
2367 ctx.bge_maxsegs = 1;
2368 ctx.sc = sc;
2369
2370 error = bus_dmamap_load(sc->bge_cdata.bge_tx_ring_tag,
2371 sc->bge_cdata.bge_tx_ring_map, sc->bge_ldata.bge_tx_ring,
2372 BGE_TX_RING_SZ, bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT);
2373
2374 if (error)
2375 return (ENOMEM);
2376
2377 sc->bge_ldata.bge_tx_ring_paddr = ctx.bge_busaddr;
2378
2379 /*
2380 * Create tag for status block.
2381 * Because we only use single Tx/Rx/Rx return ring, use
2382 * minimum status block size except BCM5700 AX/BX which
2383 * seems to want to see full status block size regardless
2384 * of configured number of ring.
2385 */
2386 if (sc->bge_asicrev == BGE_ASICREV_BCM5700 &&
2387 sc->bge_chipid != BGE_CHIPID_BCM5700_C0)
2388 sbsz = BGE_STATUS_BLK_SZ;
2389 else
2390 sbsz = 32;
2391 error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag,
2392 PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
2393 NULL, sbsz, 1, sbsz, 0, NULL, NULL, &sc->bge_cdata.bge_status_tag);
2394
2395 if (error) {
2396 device_printf(sc->bge_dev,
2397 "could not allocate status dma tag\n");
2398 return (ENOMEM);
2399 }
2400
2401 /* Allocate DMA'able memory for status block. */
2402 error = bus_dmamem_alloc(sc->bge_cdata.bge_status_tag,
2403 (void **)&sc->bge_ldata.bge_status_block, BUS_DMA_NOWAIT,
2404 &sc->bge_cdata.bge_status_map);
2405 if (error)
2406 return (ENOMEM);
2407
2408 bzero((char *)sc->bge_ldata.bge_status_block, sbsz);
2409
2410 /* Load the address of the status block. */
2411 ctx.sc = sc;
2412 ctx.bge_maxsegs = 1;
2413
2414 error = bus_dmamap_load(sc->bge_cdata.bge_status_tag,
2415 sc->bge_cdata.bge_status_map, sc->bge_ldata.bge_status_block,
2416 sbsz, bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT);
2417
2418 if (error)
2419 return (ENOMEM);
2420
2421 sc->bge_ldata.bge_status_block_paddr = ctx.bge_busaddr;
2422
2423 /* Create tag for statistics block. */
2424 error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag,
2425 PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
2426 NULL, BGE_STATS_SZ, 1, BGE_STATS_SZ, 0, NULL, NULL,
2427 &sc->bge_cdata.bge_stats_tag);
2428
2429 if (error) {
2430 device_printf(sc->bge_dev, "could not allocate dma tag\n");
2431 return (ENOMEM);
2432 }
2433
2434 /* Allocate DMA'able memory for statistics block. */
2435 error = bus_dmamem_alloc(sc->bge_cdata.bge_stats_tag,
2436 (void **)&sc->bge_ldata.bge_stats, BUS_DMA_NOWAIT,
2437 &sc->bge_cdata.bge_stats_map);
2438 if (error)
2439 return (ENOMEM);
2440
2441 bzero((char *)sc->bge_ldata.bge_stats, BGE_STATS_SZ);
2442
2443 /* Load the address of the statstics block. */
2444 ctx.sc = sc;
2445 ctx.bge_maxsegs = 1;
2446
2447 error = bus_dmamap_load(sc->bge_cdata.bge_stats_tag,
2448 sc->bge_cdata.bge_stats_map, sc->bge_ldata.bge_stats,
2449 BGE_STATS_SZ, bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT);
2450
2451 if (error)
2452 return (ENOMEM);
2453
2454 sc->bge_ldata.bge_stats_paddr = ctx.bge_busaddr;
2455
2456 return (0);
2457}
2458
2459/*
2460 * Return true if this device has more than one port.
2461 */
2462static int
2463bge_has_multiple_ports(struct bge_softc *sc)
2464{
2465 device_t dev = sc->bge_dev;
2466 u_int b, d, f, fscan, s;
2467
2468 d = pci_get_domain(dev);
2469 b = pci_get_bus(dev);
2470 s = pci_get_slot(dev);
2471 f = pci_get_function(dev);
2472 for (fscan = 0; fscan <= PCI_FUNCMAX; fscan++)
2473 if (fscan != f && pci_find_dbsf(d, b, s, fscan) != NULL)
2474 return (1);
2475 return (0);
2476}
2477
2478/*
2479 * Return true if MSI can be used with this device.
2480 */
2481static int
2482bge_can_use_msi(struct bge_softc *sc)
2483{
2484 int can_use_msi = 0;
2485
2486 switch (sc->bge_asicrev) {
2487 case BGE_ASICREV_BCM5714_A0:
2488 case BGE_ASICREV_BCM5714:
2489 /*
2490 * Apparently, MSI doesn't work when these chips are
2491 * configured in single-port mode.
2492 */
2493 if (bge_has_multiple_ports(sc))
2494 can_use_msi = 1;
2495 break;
2496 case BGE_ASICREV_BCM5750:
2497 if (sc->bge_chiprev != BGE_CHIPREV_5750_AX &&
2498 sc->bge_chiprev != BGE_CHIPREV_5750_BX)
2499 can_use_msi = 1;
2500 break;
2501 default:
2502 if (BGE_IS_575X_PLUS(sc))
2503 can_use_msi = 1;
2504 }
2505 return (can_use_msi);
2506}
2507
2508static int
2509bge_attach(device_t dev)
2510{
2511 struct ifnet *ifp;
2512 struct bge_softc *sc;
2513 uint32_t hwcfg = 0, misccfg;
2514 u_char eaddr[ETHER_ADDR_LEN];
2515 int error, msicount, reg, rid, trys;
2516
2517 sc = device_get_softc(dev);
2518 sc->bge_dev = dev;
2519
2520 TASK_INIT(&sc->bge_intr_task, 0, bge_intr_task, sc);
2521
2522 /*
2523 * Map control/status registers.
2524 */
2525 pci_enable_busmaster(dev);
2526
2527 rid = BGE_PCI_BAR0;
2528 sc->bge_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
2529 RF_ACTIVE);
2530
2531 if (sc->bge_res == NULL) {
2532 device_printf (sc->bge_dev, "couldn't map memory\n");
2533 error = ENXIO;
2534 goto fail;
2535 }
2536
2537 /* Save various chip information. */
2538 sc->bge_chipid =
2539 pci_read_config(dev, BGE_PCI_MISC_CTL, 4) >>
2540 BGE_PCIMISCCTL_ASICREV_SHIFT;
2541 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_USE_PRODID_REG)
2542 sc->bge_chipid = pci_read_config(dev, BGE_PCI_PRODID_ASICREV,
2543 4);
2544 sc->bge_asicrev = BGE_ASICREV(sc->bge_chipid);
2545 sc->bge_chiprev = BGE_CHIPREV(sc->bge_chipid);
2546
2547 /*
2548 * Don't enable Ethernet@WireSpeed for the 5700, 5906, or the
2549 * 5705 A0 and A1 chips.
2550 */
2551 if (sc->bge_asicrev != BGE_ASICREV_BCM5700 &&
2552 sc->bge_asicrev != BGE_ASICREV_BCM5906 &&
2553 sc->bge_chipid != BGE_CHIPID_BCM5705_A0 &&
2554 sc->bge_chipid != BGE_CHIPID_BCM5705_A1)
2555 sc->bge_flags |= BGE_FLAG_WIRESPEED;
2556
2557 if (bge_has_eaddr(sc))
2558 sc->bge_flags |= BGE_FLAG_EADDR;
2559
2560 /* Save chipset family. */
2561 switch (sc->bge_asicrev) {
2562 case BGE_ASICREV_BCM5755:
2563 case BGE_ASICREV_BCM5761:
2564 case BGE_ASICREV_BCM5784:
2565 case BGE_ASICREV_BCM5785:
2566 case BGE_ASICREV_BCM5787:
2567 case BGE_ASICREV_BCM57780:
2568 sc->bge_flags |= BGE_FLAG_5755_PLUS | BGE_FLAG_575X_PLUS |
2569 BGE_FLAG_5705_PLUS;
2570 break;
2571 case BGE_ASICREV_BCM5700:
2572 case BGE_ASICREV_BCM5701:
2573 case BGE_ASICREV_BCM5703:
2574 case BGE_ASICREV_BCM5704:
2575 sc->bge_flags |= BGE_FLAG_5700_FAMILY | BGE_FLAG_JUMBO;
2576 break;
2577 case BGE_ASICREV_BCM5714_A0:
2578 case BGE_ASICREV_BCM5780:
2579 case BGE_ASICREV_BCM5714:
2580 sc->bge_flags |= BGE_FLAG_5714_FAMILY /* | BGE_FLAG_JUMBO */;
2581 /* FALLTHROUGH */
2582 case BGE_ASICREV_BCM5750:
2583 case BGE_ASICREV_BCM5752:
2584 case BGE_ASICREV_BCM5906:
2585 sc->bge_flags |= BGE_FLAG_575X_PLUS;
2586 /* FALLTHROUGH */
2587 case BGE_ASICREV_BCM5705:
2588 sc->bge_flags |= BGE_FLAG_5705_PLUS;
2589 break;
2590 }
2591
2592 /* Set various bug flags. */
2593 if (sc->bge_chipid == BGE_CHIPID_BCM5701_A0 ||
2594 sc->bge_chipid == BGE_CHIPID_BCM5701_B0)
2595 sc->bge_flags |= BGE_FLAG_CRC_BUG;
2596 if (sc->bge_chiprev == BGE_CHIPREV_5703_AX ||
2597 sc->bge_chiprev == BGE_CHIPREV_5704_AX)
2598 sc->bge_flags |= BGE_FLAG_ADC_BUG;
2599 if (sc->bge_chipid == BGE_CHIPID_BCM5704_A0)
2600 sc->bge_flags |= BGE_FLAG_5704_A0_BUG;
2601 if (BGE_IS_5705_PLUS(sc) &&
2602 !(sc->bge_flags & BGE_FLAG_ADJUST_TRIM)) {
2603 if (sc->bge_asicrev == BGE_ASICREV_BCM5755 ||
2604 sc->bge_asicrev == BGE_ASICREV_BCM5761 ||
2605 sc->bge_asicrev == BGE_ASICREV_BCM5784 ||
2606 sc->bge_asicrev == BGE_ASICREV_BCM5787) {
2607 if (sc->bge_chipid != BGE_CHIPID_BCM5722_A0)
2608 sc->bge_flags |= BGE_FLAG_JITTER_BUG;
2609 } else if (sc->bge_asicrev != BGE_ASICREV_BCM5906)
2610 sc->bge_flags |= BGE_FLAG_BER_BUG;
2611 }
2612
2613 /*
2614 * All controllers that are not 5755 or higher have 4GB
2615 * boundary DMA bug.
2616 * Whenever an address crosses a multiple of the 4GB boundary
2617 * (including 4GB, 8Gb, 12Gb, etc.) and makes the transition
2618 * from 0xX_FFFF_FFFF to 0x(X+1)_0000_0000 an internal DMA
2619 * state machine will lockup and cause the device to hang.
2620 */
2621 if (BGE_IS_5755_PLUS(sc) == 0)
2622 sc->bge_flags |= BGE_FLAG_4G_BNDRY_BUG;
2623
2624 /*
2625 * We could possibly check for BCOM_DEVICEID_BCM5788 in bge_probe()
2626 * but I do not know the DEVICEID for the 5788M.
2627 */
2628 misccfg = CSR_READ_4(sc, BGE_MISC_CFG) & BGE_MISCCFG_BOARD_ID;
2629 if (misccfg == BGE_MISCCFG_BOARD_ID_5788 ||
2630 misccfg == BGE_MISCCFG_BOARD_ID_5788M)
2631 sc->bge_flags |= BGE_FLAG_5788;
2632
2633 /*
2634 * Some controllers seem to require a special firmware to use
2635 * TSO. But the firmware is not available to FreeBSD and Linux
2636 * claims that the TSO performed by the firmware is slower than
2637 * hardware based TSO. Moreover the firmware based TSO has one
2638 * known bug which can't handle TSO if ethernet header + IP/TCP
2639 * header is greater than 80 bytes. The workaround for the TSO
2640 * bug exist but it seems it's too expensive than not using
2641 * TSO at all. Some hardwares also have the TSO bug so limit
2642 * the TSO to the controllers that are not affected TSO issues
2643 * (e.g. 5755 or higher).
2644 */
2645 if (BGE_IS_5755_PLUS(sc))
2646 sc->bge_flags |= BGE_FLAG_TSO;
2647
2648 /*
2649 * Check if this is a PCI-X or PCI Express device.
2650 */
2651 if (pci_find_extcap(dev, PCIY_EXPRESS, &reg) == 0) {
2652 /*
2653 * Found a PCI Express capabilities register, this
2654 * must be a PCI Express device.
2655 */
2656 sc->bge_flags |= BGE_FLAG_PCIE;
2657 sc->bge_expcap = reg;
2658 bge_set_max_readrq(sc);
2659 } else {
2660 /*
2661 * Check if the device is in PCI-X Mode.
2662 * (This bit is not valid on PCI Express controllers.)
2663 */
2664 if (pci_find_extcap(dev, PCIY_PCIX, &reg) == 0)
2665 sc->bge_pcixcap = reg;
2666 if ((pci_read_config(dev, BGE_PCI_PCISTATE, 4) &
2667 BGE_PCISTATE_PCI_BUSMODE) == 0)
2668 sc->bge_flags |= BGE_FLAG_PCIX;
2669 }
2670
2671 /*
2672 * The 40bit DMA bug applies to the 5714/5715 controllers and is
2673 * not actually a MAC controller bug but an issue with the embedded
2674 * PCIe to PCI-X bridge in the device. Use 40bit DMA workaround.
2675 */
2676 if (BGE_IS_5714_FAMILY(sc) && (sc->bge_flags & BGE_FLAG_PCIX))
2677 sc->bge_flags |= BGE_FLAG_40BIT_BUG;
2678 /*
2679 * Allocate the interrupt, using MSI if possible. These devices
2680 * support 8 MSI messages, but only the first one is used in
2681 * normal operation.
2682 */
2683 rid = 0;
2684 if (pci_find_extcap(sc->bge_dev, PCIY_MSI, &reg) == 0) {
2685 sc->bge_msicap = reg;
2686 if (bge_can_use_msi(sc)) {
2687 msicount = pci_msi_count(dev);
2688 if (msicount > 1)
2689 msicount = 1;
2690 } else
2691 msicount = 0;
2692 if (msicount == 1 && pci_alloc_msi(dev, &msicount) == 0) {
2693 rid = 1;
2694 sc->bge_flags |= BGE_FLAG_MSI;
2695 }
2696 }
2697
2698 sc->bge_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
2699 RF_SHAREABLE | RF_ACTIVE);
2700
2701 if (sc->bge_irq == NULL) {
2702 device_printf(sc->bge_dev, "couldn't map interrupt\n");
2703 error = ENXIO;
2704 goto fail;
2705 }
2706
2707 if (bootverbose)
2708 device_printf(dev,
2709 "CHIP ID 0x%08x; ASIC REV 0x%02x; CHIP REV 0x%02x; %s\n",
2710 sc->bge_chipid, sc->bge_asicrev, sc->bge_chiprev,
2711 (sc->bge_flags & BGE_FLAG_PCIX) ? "PCI-X" :
2712 ((sc->bge_flags & BGE_FLAG_PCIE) ? "PCI-E" : "PCI"));
2713
2714 BGE_LOCK_INIT(sc, device_get_nameunit(dev));
2715
2716 /* Try to reset the chip. */
2717 if (bge_reset(sc)) {
2718 device_printf(sc->bge_dev, "chip reset failed\n");
2719 error = ENXIO;
2720 goto fail;
2721 }
2722
2723 sc->bge_asf_mode = 0;
2724 if (bge_allow_asf && (bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_SIG)
2725 == BGE_MAGIC_NUMBER)) {
2726 if (bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_NICCFG)
2727 & BGE_HWCFG_ASF) {
2728 sc->bge_asf_mode |= ASF_ENABLE;
2729 sc->bge_asf_mode |= ASF_STACKUP;
2730 if (sc->bge_asicrev == BGE_ASICREV_BCM5750) {
2731 sc->bge_asf_mode |= ASF_NEW_HANDSHAKE;
2732 }
2733 }
2734 }
2735
2736 /* Try to reset the chip again the nice way. */
2737 bge_stop_fw(sc);
2738 bge_sig_pre_reset(sc, BGE_RESET_STOP);
2739 if (bge_reset(sc)) {
2740 device_printf(sc->bge_dev, "chip reset failed\n");
2741 error = ENXIO;
2742 goto fail;
2743 }
2744
2745 bge_sig_legacy(sc, BGE_RESET_STOP);
2746 bge_sig_post_reset(sc, BGE_RESET_STOP);
2747
2748 if (bge_chipinit(sc)) {
2749 device_printf(sc->bge_dev, "chip initialization failed\n");
2750 error = ENXIO;
2751 goto fail;
2752 }
2753
2754 error = bge_get_eaddr(sc, eaddr);
2755 if (error) {
2756 device_printf(sc->bge_dev,
2757 "failed to read station address\n");
2758 error = ENXIO;
2759 goto fail;
2760 }
2761
2762 /* 5705 limits RX return ring to 512 entries. */
2763 if (BGE_IS_5705_PLUS(sc))
2764 sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT_5705;
2765 else
2766 sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT;
2767
2768 if (bge_dma_alloc(dev)) {
2769 device_printf(sc->bge_dev,
2770 "failed to allocate DMA resources\n");
2771 error = ENXIO;
2772 goto fail;
2773 }
2774
2775 /* Set default tuneable values. */
2776 sc->bge_stat_ticks = BGE_TICKS_PER_SEC;
2777 sc->bge_rx_coal_ticks = 150;
2778 sc->bge_tx_coal_ticks = 150;
2779 sc->bge_rx_max_coal_bds = 10;
2780 sc->bge_tx_max_coal_bds = 10;
2781
2782 /* Set up ifnet structure */
2783 ifp = sc->bge_ifp = if_alloc(IFT_ETHER);
2784 if (ifp == NULL) {
2785 device_printf(sc->bge_dev, "failed to if_alloc()\n");
2786 error = ENXIO;
2787 goto fail;
2788 }
2789 ifp->if_softc = sc;
2790 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
2791 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
2792 ifp->if_ioctl = bge_ioctl;
2793 ifp->if_start = bge_start;
2794 ifp->if_init = bge_init;
2795 ifp->if_snd.ifq_drv_maxlen = BGE_TX_RING_CNT - 1;
2796 IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen);
2797 IFQ_SET_READY(&ifp->if_snd);
2798 ifp->if_hwassist = BGE_CSUM_FEATURES;
2799 ifp->if_capabilities = IFCAP_HWCSUM | IFCAP_VLAN_HWTAGGING |
2800 IFCAP_VLAN_MTU;
2801 if ((sc->bge_flags & BGE_FLAG_TSO) != 0) {
2802 ifp->if_hwassist |= CSUM_TSO;
2803 ifp->if_capabilities |= IFCAP_TSO4;
2804 }
2805#ifdef IFCAP_VLAN_HWCSUM
2806 ifp->if_capabilities |= IFCAP_VLAN_HWCSUM;
2807#endif
2808 ifp->if_capenable = ifp->if_capabilities;
2809#ifdef DEVICE_POLLING
2810 ifp->if_capabilities |= IFCAP_POLLING;
2811#endif
2812
2813 /*
2814 * 5700 B0 chips do not support checksumming correctly due
2815 * to hardware bugs.
2816 */
2817 if (sc->bge_chipid == BGE_CHIPID_BCM5700_B0) {
2818 ifp->if_capabilities &= ~IFCAP_HWCSUM;
2819 ifp->if_capenable &= ~IFCAP_HWCSUM;
2820 ifp->if_hwassist = 0;
2821 }
2822
2823 /*
2824 * Figure out what sort of media we have by checking the
2825 * hardware config word in the first 32k of NIC internal memory,
2826 * or fall back to examining the EEPROM if necessary.
2827 * Note: on some BCM5700 cards, this value appears to be unset.
2828 * If that's the case, we have to rely on identifying the NIC
2829 * by its PCI subsystem ID, as we do below for the SysKonnect
2830 * SK-9D41.
2831 */
2832 if (bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_SIG) == BGE_MAGIC_NUMBER)
2833 hwcfg = bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_NICCFG);
2834 else if ((sc->bge_flags & BGE_FLAG_EADDR) &&
2835 (sc->bge_asicrev != BGE_ASICREV_BCM5906)) {
2836 if (bge_read_eeprom(sc, (caddr_t)&hwcfg, BGE_EE_HWCFG_OFFSET,
2837 sizeof(hwcfg))) {
2838 device_printf(sc->bge_dev, "failed to read EEPROM\n");
2839 error = ENXIO;
2840 goto fail;
2841 }
2842 hwcfg = ntohl(hwcfg);
2843 }
2844
2845 if ((hwcfg & BGE_HWCFG_MEDIA) == BGE_MEDIA_FIBER)
2846 sc->bge_flags |= BGE_FLAG_TBI;
2847
2848 /* The SysKonnect SK-9D41 is a 1000baseSX card. */
2849 if ((pci_read_config(dev, BGE_PCI_SUBSYS, 4) >> 16) == SK_SUBSYSID_9D41)
2850 sc->bge_flags |= BGE_FLAG_TBI;
2851
2852 if (sc->bge_flags & BGE_FLAG_TBI) {
2853 ifmedia_init(&sc->bge_ifmedia, IFM_IMASK, bge_ifmedia_upd,
2854 bge_ifmedia_sts);
2855 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER | IFM_1000_SX, 0, NULL);
2856 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER | IFM_1000_SX | IFM_FDX,
2857 0, NULL);
2858 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER | IFM_AUTO, 0, NULL);
2859 ifmedia_set(&sc->bge_ifmedia, IFM_ETHER | IFM_AUTO);
2860 sc->bge_ifmedia.ifm_media = sc->bge_ifmedia.ifm_cur->ifm_media;
2861 } else {
2862 /*
2863 * Do transceiver setup and tell the firmware the
2864 * driver is down so we can try to get access the
2865 * probe if ASF is running. Retry a couple of times
2866 * if we get a conflict with the ASF firmware accessing
2867 * the PHY.
2868 */
2869 trys = 0;
2870 BGE_CLRBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
2871again:
2872 bge_asf_driver_up(sc);
2873
2874 if (mii_phy_probe(dev, &sc->bge_miibus,
2875 bge_ifmedia_upd, bge_ifmedia_sts)) {
2876 if (trys++ < 4) {
2877 device_printf(sc->bge_dev, "Try again\n");
2878 bge_miibus_writereg(sc->bge_dev, 1, MII_BMCR,
2879 BMCR_RESET);
2880 goto again;
2881 }
2882
2883 device_printf(sc->bge_dev, "MII without any PHY!\n");
2884 error = ENXIO;
2885 goto fail;
2886 }
2887
2888 /*
2889 * Now tell the firmware we are going up after probing the PHY
2890 */
2891 if (sc->bge_asf_mode & ASF_STACKUP)
2892 BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
2893 }
2894
2895 /*
2896 * When using the BCM5701 in PCI-X mode, data corruption has
2897 * been observed in the first few bytes of some received packets.
2898 * Aligning the packet buffer in memory eliminates the corruption.
2899 * Unfortunately, this misaligns the packet payloads. On platforms
2900 * which do not support unaligned accesses, we will realign the
2901 * payloads by copying the received packets.
2902 */
2903 if (sc->bge_asicrev == BGE_ASICREV_BCM5701 &&
2904 sc->bge_flags & BGE_FLAG_PCIX)
2905 sc->bge_flags |= BGE_FLAG_RX_ALIGNBUG;
2906
2907 /*
2908 * Call MI attach routine.
2909 */
2910 ether_ifattach(ifp, eaddr);
2911 callout_init_mtx(&sc->bge_stat_ch, &sc->bge_mtx, 0);
2912
2913 /* Tell upper layer we support long frames. */
2914 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
2915
2916 /*
2917 * Hookup IRQ last.
2918 */
2919#if __FreeBSD_version > 700030
2920 if (BGE_IS_5755_PLUS(sc) && sc->bge_flags & BGE_FLAG_MSI) {
2921 /* Take advantage of single-shot MSI. */
2922 CSR_WRITE_4(sc, BGE_MSI_MODE, CSR_READ_4(sc, BGE_MSI_MODE) &
2923 ~BGE_MSIMODE_ONE_SHOT_DISABLE);
2924 sc->bge_tq = taskqueue_create_fast("bge_taskq", M_WAITOK,
2925 taskqueue_thread_enqueue, &sc->bge_tq);
2926 if (sc->bge_tq == NULL) {
2927 device_printf(dev, "could not create taskqueue.\n");
2928 ether_ifdetach(ifp);
2929 error = ENXIO;
2930 goto fail;
2931 }
2932 taskqueue_start_threads(&sc->bge_tq, 1, PI_NET, "%s taskq",
2933 device_get_nameunit(sc->bge_dev));
2934 error = bus_setup_intr(dev, sc->bge_irq,
2935 INTR_TYPE_NET | INTR_MPSAFE, bge_msi_intr, NULL, sc,
2936 &sc->bge_intrhand);
2937 if (error)
2938 ether_ifdetach(ifp);
2939 } else
2940 error = bus_setup_intr(dev, sc->bge_irq,
2941 INTR_TYPE_NET | INTR_MPSAFE, NULL, bge_intr, sc,
2942 &sc->bge_intrhand);
2943#else
2944 error = bus_setup_intr(dev, sc->bge_irq, INTR_TYPE_NET | INTR_MPSAFE,
2945 bge_intr, sc, &sc->bge_intrhand);
2946#endif
2947
2948 if (error) {
2949 bge_detach(dev);
2950 device_printf(sc->bge_dev, "couldn't set up irq\n");
2951 }
2952
2953 bge_add_sysctls(sc);
2954
2955 return (0);
2956
2957fail:
2958 bge_release_resources(sc);
2959
2960 return (error);
2961}
2962
2963static int
2964bge_detach(device_t dev)
2965{
2966 struct bge_softc *sc;
2967 struct ifnet *ifp;
2968
2969 sc = device_get_softc(dev);
2970 ifp = sc->bge_ifp;
2971
2972#ifdef DEVICE_POLLING
2973 if (ifp->if_capenable & IFCAP_POLLING)
2974 ether_poll_deregister(ifp);
2975#endif
2976
2977 BGE_LOCK(sc);
2978 bge_stop(sc);
2979 bge_reset(sc);
2980 BGE_UNLOCK(sc);
2981
2982 callout_drain(&sc->bge_stat_ch);
2983
2984 if (sc->bge_tq)
2985 taskqueue_drain(sc->bge_tq, &sc->bge_intr_task);
2986 ether_ifdetach(ifp);
2987
2988 if (sc->bge_flags & BGE_FLAG_TBI) {
2989 ifmedia_removeall(&sc->bge_ifmedia);
2990 } else {
2991 bus_generic_detach(dev);
2992 device_delete_child(dev, sc->bge_miibus);
2993 }
2994
2995 bge_release_resources(sc);
2996
2997 return (0);
2998}
2999
3000static void
3001bge_release_resources(struct bge_softc *sc)
3002{
3003 device_t dev;
3004
3005 dev = sc->bge_dev;
3006
3007 if (sc->bge_tq != NULL)
3008 taskqueue_free(sc->bge_tq);
3009
3010 if (sc->bge_intrhand != NULL)
3011 bus_teardown_intr(dev, sc->bge_irq, sc->bge_intrhand);
3012
3013 if (sc->bge_irq != NULL)
3014 bus_release_resource(dev, SYS_RES_IRQ,
3015 sc->bge_flags & BGE_FLAG_MSI ? 1 : 0, sc->bge_irq);
3016
3017 if (sc->bge_flags & BGE_FLAG_MSI)
3018 pci_release_msi(dev);
3019
3020 if (sc->bge_res != NULL)
3021 bus_release_resource(dev, SYS_RES_MEMORY,
3022 BGE_PCI_BAR0, sc->bge_res);
3023
3024 if (sc->bge_ifp != NULL)
3025 if_free(sc->bge_ifp);
3026
3027 bge_dma_free(sc);
3028
3029 if (mtx_initialized(&sc->bge_mtx)) /* XXX */
3030 BGE_LOCK_DESTROY(sc);
3031}
3032
3033static int
3034bge_reset(struct bge_softc *sc)
3035{
3036 device_t dev;
3037 uint32_t cachesize, command, pcistate, reset, val;
3038 void (*write_op)(struct bge_softc *, int, int);
3039 uint16_t devctl;
3040 int i;
3041
3042 dev = sc->bge_dev;
3043
3044 if (BGE_IS_575X_PLUS(sc) && !BGE_IS_5714_FAMILY(sc) &&
3045 (sc->bge_asicrev != BGE_ASICREV_BCM5906)) {
3046 if (sc->bge_flags & BGE_FLAG_PCIE)
3047 write_op = bge_writemem_direct;
3048 else
3049 write_op = bge_writemem_ind;
3050 } else
3051 write_op = bge_writereg_ind;
3052
3053 /* Save some important PCI state. */
3054 cachesize = pci_read_config(dev, BGE_PCI_CACHESZ, 4);
3055 command = pci_read_config(dev, BGE_PCI_CMD, 4);
3056 pcistate = pci_read_config(dev, BGE_PCI_PCISTATE, 4);
3057
3058 pci_write_config(dev, BGE_PCI_MISC_CTL,
3059 BGE_PCIMISCCTL_INDIRECT_ACCESS | BGE_PCIMISCCTL_MASK_PCI_INTR |
3060 BGE_HIF_SWAP_OPTIONS | BGE_PCIMISCCTL_PCISTATE_RW, 4);
3061
3062 /* Disable fastboot on controllers that support it. */
3063 if (sc->bge_asicrev == BGE_ASICREV_BCM5752 ||
3064 BGE_IS_5755_PLUS(sc)) {
3065 if (bootverbose)
3066 device_printf(sc->bge_dev, "Disabling fastboot\n");
3067 CSR_WRITE_4(sc, BGE_FASTBOOT_PC, 0x0);
3068 }
3069
3070 /*
3071 * Write the magic number to SRAM at offset 0xB50.
3072 * When firmware finishes its initialization it will
3073 * write ~BGE_MAGIC_NUMBER to the same location.
3074 */
3075 bge_writemem_ind(sc, BGE_SOFTWARE_GENCOMM, BGE_MAGIC_NUMBER);
3076
3077 reset = BGE_MISCCFG_RESET_CORE_CLOCKS | BGE_32BITTIME_66MHZ;
3078
3079 /* XXX: Broadcom Linux driver. */
3080 if (sc->bge_flags & BGE_FLAG_PCIE) {
3081 if (CSR_READ_4(sc, 0x7E2C) == 0x60) /* PCIE 1.0 */
3082 CSR_WRITE_4(sc, 0x7E2C, 0x20);
3083 if (sc->bge_chipid != BGE_CHIPID_BCM5750_A0) {
3084 /* Prevent PCIE link training during global reset */
3085 CSR_WRITE_4(sc, BGE_MISC_CFG, 1 << 29);
3086 reset |= 1 << 29;
3087 }
3088 }
3089
3090 /*
3091 * Set GPHY Power Down Override to leave GPHY
3092 * powered up in D0 uninitialized.
3093 */
3094 if (BGE_IS_5705_PLUS(sc))
3095 reset |= 0x04000000;
3096
3097 /* Issue global reset */
3098 write_op(sc, BGE_MISC_CFG, reset);
3099
3100 if (sc->bge_asicrev == BGE_ASICREV_BCM5906) {
3101 val = CSR_READ_4(sc, BGE_VCPU_STATUS);
3102 CSR_WRITE_4(sc, BGE_VCPU_STATUS,
3103 val | BGE_VCPU_STATUS_DRV_RESET);
3104 val = CSR_READ_4(sc, BGE_VCPU_EXT_CTRL);
3105 CSR_WRITE_4(sc, BGE_VCPU_EXT_CTRL,
3106 val & ~BGE_VCPU_EXT_CTRL_HALT_CPU);
3107 }
3108
3109 DELAY(1000);
3110
3111 /* XXX: Broadcom Linux driver. */
3112 if (sc->bge_flags & BGE_FLAG_PCIE) {
3113 if (sc->bge_chipid == BGE_CHIPID_BCM5750_A0) {
3114 DELAY(500000); /* wait for link training to complete */
3115 val = pci_read_config(dev, 0xC4, 4);
3116 pci_write_config(dev, 0xC4, val | (1 << 15), 4);
3117 }
3118 devctl = pci_read_config(dev,
3119 sc->bge_expcap + PCIR_EXPRESS_DEVICE_CTL, 2);
3120 /* Clear enable no snoop and disable relaxed ordering. */
3121 devctl &= ~(0x0010 | 0x0800);
3122 /* Set PCIE max payload size to 128. */
3123 devctl &= ~PCIM_EXP_CTL_MAX_PAYLOAD;
3124 pci_write_config(dev, sc->bge_expcap + PCIR_EXPRESS_DEVICE_CTL,
3125 devctl, 2);
3126 /* Clear error status. */
3127 pci_write_config(dev, sc->bge_expcap + PCIR_EXPRESS_DEVICE_STA,
3128 0, 2);
3129 }
3130
3131 /* Reset some of the PCI state that got zapped by reset. */
3132 pci_write_config(dev, BGE_PCI_MISC_CTL,
3133 BGE_PCIMISCCTL_INDIRECT_ACCESS | BGE_PCIMISCCTL_MASK_PCI_INTR |
3134 BGE_HIF_SWAP_OPTIONS | BGE_PCIMISCCTL_PCISTATE_RW, 4);
3135 pci_write_config(dev, BGE_PCI_CACHESZ, cachesize, 4);
3136 pci_write_config(dev, BGE_PCI_CMD, command, 4);
3137 write_op(sc, BGE_MISC_CFG, BGE_32BITTIME_66MHZ);
3138
3139 /* Re-enable MSI, if neccesary, and enable the memory arbiter. */
3140 if (BGE_IS_5714_FAMILY(sc)) {
3141 /* This chip disables MSI on reset. */
3142 if (sc->bge_flags & BGE_FLAG_MSI) {
3143 val = pci_read_config(dev,
3144 sc->bge_msicap + PCIR_MSI_CTRL, 2);
3145 pci_write_config(dev,
3146 sc->bge_msicap + PCIR_MSI_CTRL,
3147 val | PCIM_MSICTRL_MSI_ENABLE, 2);
3148 val = CSR_READ_4(sc, BGE_MSI_MODE);
3149 CSR_WRITE_4(sc, BGE_MSI_MODE,
3150 val | BGE_MSIMODE_ENABLE);
3151 }
3152 val = CSR_READ_4(sc, BGE_MARB_MODE);
3153 CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE | val);
3154 } else
3155 CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE);
3156
3157 if (sc->bge_asicrev == BGE_ASICREV_BCM5906) {
3158 for (i = 0; i < BGE_TIMEOUT; i++) {
3159 val = CSR_READ_4(sc, BGE_VCPU_STATUS);
3160 if (val & BGE_VCPU_STATUS_INIT_DONE)
3161 break;
3162 DELAY(100);
3163 }
3164 if (i == BGE_TIMEOUT) {
3165 device_printf(sc->bge_dev, "reset timed out\n");
3166 return (1);
3167 }
3168 } else {
3169 /*
3170 * Poll until we see the 1's complement of the magic number.
3171 * This indicates that the firmware initialization is complete.
3172 * We expect this to fail if no chip containing the Ethernet
3173 * address is fitted though.
3174 */
3175 for (i = 0; i < BGE_TIMEOUT; i++) {
3176 DELAY(10);
3177 val = bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM);
3178 if (val == ~BGE_MAGIC_NUMBER)
3179 break;
3180 }
3181
3182 if ((sc->bge_flags & BGE_FLAG_EADDR) && i == BGE_TIMEOUT)
3183 device_printf(sc->bge_dev, "firmware handshake timed out, "
3184 "found 0x%08x\n", val);
3185 }
3186
3187 /*
3188 * XXX Wait for the value of the PCISTATE register to
3189 * return to its original pre-reset state. This is a
3190 * fairly good indicator of reset completion. If we don't
3191 * wait for the reset to fully complete, trying to read
3192 * from the device's non-PCI registers may yield garbage
3193 * results.
3194 */
3195 for (i = 0; i < BGE_TIMEOUT; i++) {
3196 if (pci_read_config(dev, BGE_PCI_PCISTATE, 4) == pcistate)
3197 break;
3198 DELAY(10);
3199 }
3200
3201 if (sc->bge_flags & BGE_FLAG_PCIE) {
3202 reset = bge_readmem_ind(sc, 0x7C00);
3203 bge_writemem_ind(sc, 0x7C00, reset | (1 << 25));
3204 }
3205
3206 /* Fix up byte swapping. */
3207 CSR_WRITE_4(sc, BGE_MODE_CTL, BGE_DMA_SWAP_OPTIONS |
3208 BGE_MODECTL_BYTESWAP_DATA);
3209
3210 /* Tell the ASF firmware we are up */
3211 if (sc->bge_asf_mode & ASF_STACKUP)
3212 BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
3213
3214 CSR_WRITE_4(sc, BGE_MAC_MODE, 0);
3215
3216 /*
3217 * The 5704 in TBI mode apparently needs some special
3218 * adjustment to insure the SERDES drive level is set
3219 * to 1.2V.
3220 */
3221 if (sc->bge_asicrev == BGE_ASICREV_BCM5704 &&
3222 sc->bge_flags & BGE_FLAG_TBI) {
3223 val = CSR_READ_4(sc, BGE_SERDES_CFG);
3224 val = (val & ~0xFFF) | 0x880;
3225 CSR_WRITE_4(sc, BGE_SERDES_CFG, val);
3226 }
3227
3228 /* XXX: Broadcom Linux driver. */
3229 if (sc->bge_flags & BGE_FLAG_PCIE &&
3230 sc->bge_chipid != BGE_CHIPID_BCM5750_A0) {
3231 val = CSR_READ_4(sc, 0x7C00);
3232 CSR_WRITE_4(sc, 0x7C00, val | (1 << 25));
3233 }
3234 DELAY(10000);
3235
3236 return(0);
3237}
3238
3239/*
3240 * Frame reception handling. This is called if there's a frame
3241 * on the receive return list.
3242 *
3243 * Note: we have to be able to handle two possibilities here:
3244 * 1) the frame is from the jumbo receive ring
3245 * 2) the frame is from the standard receive ring
3246 */
3247
3248static int
3249bge_rxeof(struct bge_softc *sc, uint16_t rx_prod, int holdlck)
3250{
3251 struct ifnet *ifp;
3252 int rx_npkts = 0, stdcnt = 0, jumbocnt = 0;
3253 uint16_t rx_cons;
3254
3255 rx_cons = sc->bge_rx_saved_considx;
3256
3257 /* Nothing to do. */
3258 if (rx_cons == rx_prod)
3259 return (rx_npkts);
3260
3261 ifp = sc->bge_ifp;
3262
3263 bus_dmamap_sync(sc->bge_cdata.bge_rx_return_ring_tag,
3264 sc->bge_cdata.bge_rx_return_ring_map, BUS_DMASYNC_POSTREAD);
3265 bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag,
3266 sc->bge_cdata.bge_rx_std_ring_map, BUS_DMASYNC_POSTWRITE);
3267 if (ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN + ETHER_VLAN_ENCAP_LEN >
3268 (MCLBYTES - ETHER_ALIGN))
3269 bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag,
3270 sc->bge_cdata.bge_rx_jumbo_ring_map, BUS_DMASYNC_POSTWRITE);
3271
3272 while (rx_cons != rx_prod) {
3273 struct bge_rx_bd *cur_rx;
3274 uint32_t rxidx;
3275 struct mbuf *m = NULL;
3276 uint16_t vlan_tag = 0;
3277 int have_tag = 0;
3278
3279#ifdef DEVICE_POLLING
3280 if (ifp->if_capenable & IFCAP_POLLING) {
3281 if (sc->rxcycles <= 0)
3282 break;
3283 sc->rxcycles--;
3284 }
3285#endif
3286
3287 cur_rx = &sc->bge_ldata.bge_rx_return_ring[rx_cons];
3288
3289 rxidx = cur_rx->bge_idx;
3290 BGE_INC(rx_cons, sc->bge_return_ring_cnt);
3291
3292 if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING &&
3293 cur_rx->bge_flags & BGE_RXBDFLAG_VLAN_TAG) {
3294 have_tag = 1;
3295 vlan_tag = cur_rx->bge_vlan_tag;
3296 }
3297
3298 if (cur_rx->bge_flags & BGE_RXBDFLAG_JUMBO_RING) {
3299 jumbocnt++;
3300 m = sc->bge_cdata.bge_rx_jumbo_chain[rxidx];
3301 if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) {
3302 BGE_INC(sc->bge_jumbo, BGE_JUMBO_RX_RING_CNT);
3303 continue;
3304 }
3305 if (bge_newbuf_jumbo(sc, rxidx) != 0) {
3306 BGE_INC(sc->bge_jumbo, BGE_JUMBO_RX_RING_CNT);
3307 ifp->if_iqdrops++;
3308 continue;
3309 }
3310 BGE_INC(sc->bge_jumbo, BGE_JUMBO_RX_RING_CNT);
3311 } else {
3312 stdcnt++;
3313 if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) {
3314 BGE_INC(sc->bge_std, BGE_STD_RX_RING_CNT);
3315 continue;
3316 }
3317 m = sc->bge_cdata.bge_rx_std_chain[rxidx];
3318 if (bge_newbuf_std(sc, rxidx) != 0) {
3319 BGE_INC(sc->bge_std, BGE_STD_RX_RING_CNT);
3320 ifp->if_iqdrops++;
3321 continue;
3322 }
3323 BGE_INC(sc->bge_std, BGE_STD_RX_RING_CNT);
3324 }
3325
3326 ifp->if_ipackets++;
3327#ifndef __NO_STRICT_ALIGNMENT
3328 /*
3329 * For architectures with strict alignment we must make sure
3330 * the payload is aligned.
3331 */
3332 if (sc->bge_flags & BGE_FLAG_RX_ALIGNBUG) {
3333 bcopy(m->m_data, m->m_data + ETHER_ALIGN,
3334 cur_rx->bge_len);
3335 m->m_data += ETHER_ALIGN;
3336 }
3337#endif
3338 m->m_pkthdr.len = m->m_len = cur_rx->bge_len - ETHER_CRC_LEN;
3339 m->m_pkthdr.rcvif = ifp;
3340
3341 if (ifp->if_capenable & IFCAP_RXCSUM) {
3342 if (cur_rx->bge_flags & BGE_RXBDFLAG_IP_CSUM) {
3343 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
3344 if ((cur_rx->bge_ip_csum ^ 0xFFFF) == 0)
3345 m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
3346 }
3347 if (cur_rx->bge_flags & BGE_RXBDFLAG_TCP_UDP_CSUM &&
3348 m->m_pkthdr.len >= ETHER_MIN_NOPAD) {
3349 m->m_pkthdr.csum_data =
3350 cur_rx->bge_tcp_udp_csum;
3351 m->m_pkthdr.csum_flags |=
3352 CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
3353 }
3354 }
3355
3356 /*
3357 * If we received a packet with a vlan tag,
3358 * attach that information to the packet.
3359 */
3360 if (have_tag) {
3361#if __FreeBSD_version > 700022
3362 m->m_pkthdr.ether_vtag = vlan_tag;
3363 m->m_flags |= M_VLANTAG;
3364#else
3365 VLAN_INPUT_TAG_NEW(ifp, m, vlan_tag);
3366 if (m == NULL)
3367 continue;
3368#endif
3369 }
3370
3371 if (holdlck != 0) {
3372 BGE_UNLOCK(sc);
3373 (*ifp->if_input)(ifp, m);
3374 BGE_LOCK(sc);
3375 } else
3376 (*ifp->if_input)(ifp, m);
3377 rx_npkts++;
3378
3379 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
3380 return (rx_npkts);
3381 }
3382
3383 bus_dmamap_sync(sc->bge_cdata.bge_rx_return_ring_tag,
3384 sc->bge_cdata.bge_rx_return_ring_map, BUS_DMASYNC_PREREAD);
3385 if (stdcnt > 0)
3386 bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag,
3387 sc->bge_cdata.bge_rx_std_ring_map, BUS_DMASYNC_PREWRITE);
3388
3389 if (jumbocnt > 0)
3390 bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag,
3391 sc->bge_cdata.bge_rx_jumbo_ring_map, BUS_DMASYNC_PREWRITE);
3392
3393 sc->bge_rx_saved_considx = rx_cons;
3394 bge_writembx(sc, BGE_MBX_RX_CONS0_LO, sc->bge_rx_saved_considx);
3395 if (stdcnt)
3396 bge_writembx(sc, BGE_MBX_RX_STD_PROD_LO, sc->bge_std);
3397 if (jumbocnt)
3398 bge_writembx(sc, BGE_MBX_RX_JUMBO_PROD_LO, sc->bge_jumbo);
3399#ifdef notyet
3400 /*
3401 * This register wraps very quickly under heavy packet drops.
3402 * If you need correct statistics, you can enable this check.
3403 */
3404 if (BGE_IS_5705_PLUS(sc))
3405 ifp->if_ierrors += CSR_READ_4(sc, BGE_RXLP_LOCSTAT_IFIN_DROPS);
3406#endif
3407 return (rx_npkts);
3408}
3409
3410static void
3411bge_txeof(struct bge_softc *sc, uint16_t tx_cons)
3412{
3413 struct bge_tx_bd *cur_tx = NULL;
3414 struct ifnet *ifp;
3415
3416 BGE_LOCK_ASSERT(sc);
3417
3418 /* Nothing to do. */
3419 if (sc->bge_tx_saved_considx == tx_cons)
3420 return;
3421
3422 ifp = sc->bge_ifp;
3423
3424 bus_dmamap_sync(sc->bge_cdata.bge_tx_ring_tag,
3425 sc->bge_cdata.bge_tx_ring_map, BUS_DMASYNC_POSTWRITE);
3426 /*
3427 * Go through our tx ring and free mbufs for those
3428 * frames that have been sent.
3429 */
3430 while (sc->bge_tx_saved_considx != tx_cons) {
3431 uint32_t idx = 0;
3432
3433 idx = sc->bge_tx_saved_considx;
3434 cur_tx = &sc->bge_ldata.bge_tx_ring[idx];
3435 if (cur_tx->bge_flags & BGE_TXBDFLAG_END)
3436 ifp->if_opackets++;
3437 if (sc->bge_cdata.bge_tx_chain[idx] != NULL) {
3438 bus_dmamap_sync(sc->bge_cdata.bge_tx_mtag,
3439 sc->bge_cdata.bge_tx_dmamap[idx],
3440 BUS_DMASYNC_POSTWRITE);
3441 bus_dmamap_unload(sc->bge_cdata.bge_tx_mtag,
3442 sc->bge_cdata.bge_tx_dmamap[idx]);
3443 m_freem(sc->bge_cdata.bge_tx_chain[idx]);
3444 sc->bge_cdata.bge_tx_chain[idx] = NULL;
3445 }
3446 sc->bge_txcnt--;
3447 BGE_INC(sc->bge_tx_saved_considx, BGE_TX_RING_CNT);
3448 }
3449
3450 if (cur_tx != NULL)
3451 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
3452 if (sc->bge_txcnt == 0)
3453 sc->bge_timer = 0;
3454}
3455
3456#ifdef DEVICE_POLLING
3457static int
3458bge_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
3459{
3460 struct bge_softc *sc = ifp->if_softc;
3461 uint16_t rx_prod, tx_cons;
3462 uint32_t statusword;
3463 int rx_npkts = 0;
3464
3465 BGE_LOCK(sc);
3466 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
3467 BGE_UNLOCK(sc);
3468 return (rx_npkts);
3469 }
3470
3471 bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
3472 sc->bge_cdata.bge_status_map,
3473 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
3474 rx_prod = sc->bge_ldata.bge_status_block->bge_idx[0].bge_rx_prod_idx;
3475 tx_cons = sc->bge_ldata.bge_status_block->bge_idx[0].bge_tx_cons_idx;
3476
3477 statusword = atomic_readandclear_32(
3478 &sc->bge_ldata.bge_status_block->bge_status);
3479
3480 bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
3481 sc->bge_cdata.bge_status_map,
3482 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3483
3484 /* Note link event. It will be processed by POLL_AND_CHECK_STATUS. */
3485 if (statusword & BGE_STATFLAG_LINKSTATE_CHANGED)
3486 sc->bge_link_evt++;
3487
3488 if (cmd == POLL_AND_CHECK_STATUS)
3489 if ((sc->bge_asicrev == BGE_ASICREV_BCM5700 &&
3490 sc->bge_chipid != BGE_CHIPID_BCM5700_B2) ||
3491 sc->bge_link_evt || (sc->bge_flags & BGE_FLAG_TBI))
3492 bge_link_upd(sc);
3493
3494 sc->rxcycles = count;
3495 rx_npkts = bge_rxeof(sc, rx_prod, 1);
3496 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
3497 BGE_UNLOCK(sc);
3498 return (rx_npkts);
3499 }
3500 bge_txeof(sc, tx_cons);
3501 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
3502 bge_start_locked(ifp);
3503
3504 BGE_UNLOCK(sc);
3505 return (rx_npkts);
3506}
3507#endif /* DEVICE_POLLING */
3508
3509static int
3510bge_msi_intr(void *arg)
3511{
3512 struct bge_softc *sc;
3513
3514 sc = (struct bge_softc *)arg;
3515 /*
3516 * This interrupt is not shared and controller already
3517 * disabled further interrupt.
3518 */
3519 taskqueue_enqueue(sc->bge_tq, &sc->bge_intr_task);
3520 return (FILTER_HANDLED);
3521}
3522
3523static void
3524bge_intr_task(void *arg, int pending)
3525{
3526 struct bge_softc *sc;
3527 struct ifnet *ifp;
3528 uint32_t status;
3529 uint16_t rx_prod, tx_cons;
3530
3531 sc = (struct bge_softc *)arg;
3532 ifp = sc->bge_ifp;
3533
3534 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
3535 return;
3536
3537 /* Get updated status block. */
3538 bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
3539 sc->bge_cdata.bge_status_map,
3540 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
3541
3542 /* Save producer/consumer indexess. */
3543 rx_prod = sc->bge_ldata.bge_status_block->bge_idx[0].bge_rx_prod_idx;
3544 tx_cons = sc->bge_ldata.bge_status_block->bge_idx[0].bge_tx_cons_idx;
3545 status = sc->bge_ldata.bge_status_block->bge_status;
3546 sc->bge_ldata.bge_status_block->bge_status = 0;
3547 bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
3548 sc->bge_cdata.bge_status_map,
3549 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3550 /* Let controller work. */
3551 bge_writembx(sc, BGE_MBX_IRQ0_LO, 0);
3552
3553 if ((status & BGE_STATFLAG_LINKSTATE_CHANGED) != 0) {
3554 BGE_LOCK(sc);
3555 bge_link_upd(sc);
3556 BGE_UNLOCK(sc);
3557 }
3558 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
3559 /* Check RX return ring producer/consumer. */
3560 bge_rxeof(sc, rx_prod, 0);
3561 }
3562 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
3563 BGE_LOCK(sc);
3564 /* Check TX ring producer/consumer. */
3565 bge_txeof(sc, tx_cons);
3566 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
3567 bge_start_locked(ifp);
3568 BGE_UNLOCK(sc);
3569 }
3570}
3571
3572static void
3573bge_intr(void *xsc)
3574{
3575 struct bge_softc *sc;
3576 struct ifnet *ifp;
3577 uint32_t statusword;
3578 uint16_t rx_prod, tx_cons;
3579
3580 sc = xsc;
3581
3582 BGE_LOCK(sc);
3583
3584 ifp = sc->bge_ifp;
3585
3586#ifdef DEVICE_POLLING
3587 if (ifp->if_capenable & IFCAP_POLLING) {
3588 BGE_UNLOCK(sc);
3589 return;
3590 }
3591#endif
3592
3593 /*
3594 * Ack the interrupt by writing something to BGE_MBX_IRQ0_LO. Don't
3595 * disable interrupts by writing nonzero like we used to, since with
3596 * our current organization this just gives complications and
3597 * pessimizations for re-enabling interrupts. We used to have races
3598 * instead of the necessary complications. Disabling interrupts
3599 * would just reduce the chance of a status update while we are
3600 * running (by switching to the interrupt-mode coalescence
3601 * parameters), but this chance is already very low so it is more
3602 * efficient to get another interrupt than prevent it.
3603 *
3604 * We do the ack first to ensure another interrupt if there is a
3605 * status update after the ack. We don't check for the status
3606 * changing later because it is more efficient to get another
3607 * interrupt than prevent it, not quite as above (not checking is
3608 * a smaller optimization than not toggling the interrupt enable,
3609 * since checking doesn't involve PCI accesses and toggling require
3610 * the status check). So toggling would probably be a pessimization
3611 * even with MSI. It would only be needed for using a task queue.
3612 */
3613 bge_writembx(sc, BGE_MBX_IRQ0_LO, 0);
3614
3615 /*
3616 * Do the mandatory PCI flush as well as get the link status.
3617 */
3618 statusword = CSR_READ_4(sc, BGE_MAC_STS) & BGE_MACSTAT_LINK_CHANGED;
3619
3620 /* Make sure the descriptor ring indexes are coherent. */
3621 bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
3622 sc->bge_cdata.bge_status_map,
3623 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
3624 rx_prod = sc->bge_ldata.bge_status_block->bge_idx[0].bge_rx_prod_idx;
3625 tx_cons = sc->bge_ldata.bge_status_block->bge_idx[0].bge_tx_cons_idx;
3626 sc->bge_ldata.bge_status_block->bge_status = 0;
3627 bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
3628 sc->bge_cdata.bge_status_map,
3629 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3630
3631 if ((sc->bge_asicrev == BGE_ASICREV_BCM5700 &&
3632 sc->bge_chipid != BGE_CHIPID_BCM5700_B2) ||
3633 statusword || sc->bge_link_evt)
3634 bge_link_upd(sc);
3635
3636 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
3637 /* Check RX return ring producer/consumer. */
3638 bge_rxeof(sc, rx_prod, 1);
3639 }
3640
3641 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
3642 /* Check TX ring producer/consumer. */
3643 bge_txeof(sc, tx_cons);
3644 }
3645
3646 if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
3647 !IFQ_DRV_IS_EMPTY(&ifp->if_snd))
3648 bge_start_locked(ifp);
3649
3650 BGE_UNLOCK(sc);
3651}
3652
3653static void
3654bge_asf_driver_up(struct bge_softc *sc)
3655{
3656 if (sc->bge_asf_mode & ASF_STACKUP) {
3657 /* Send ASF heartbeat aprox. every 2s */
3658 if (sc->bge_asf_count)
3659 sc->bge_asf_count --;
3660 else {
3661 sc->bge_asf_count = 5;
3662 bge_writemem_ind(sc, BGE_SOFTWARE_GENCOMM_FW,
3663 BGE_FW_DRV_ALIVE);
3664 bge_writemem_ind(sc, BGE_SOFTWARE_GENNCOMM_FW_LEN, 4);
3665 bge_writemem_ind(sc, BGE_SOFTWARE_GENNCOMM_FW_DATA, 3);
3666 CSR_WRITE_4(sc, BGE_CPU_EVENT,
3667 CSR_READ_4(sc, BGE_CPU_EVENT) | (1 << 14));
3668 }
3669 }
3670}
3671
3672static void
3673bge_tick(void *xsc)
3674{
3675 struct bge_softc *sc = xsc;
3676 struct mii_data *mii = NULL;
3677
3678 BGE_LOCK_ASSERT(sc);
3679
3680 /* Synchronize with possible callout reset/stop. */
3681 if (callout_pending(&sc->bge_stat_ch) ||
3682 !callout_active(&sc->bge_stat_ch))
3683 return;
3684
3685 if (BGE_IS_5705_PLUS(sc))
3686 bge_stats_update_regs(sc);
3687 else
3688 bge_stats_update(sc);
3689
3690 if ((sc->bge_flags & BGE_FLAG_TBI) == 0) {
3691 mii = device_get_softc(sc->bge_miibus);
3692 /*
3693 * Do not touch PHY if we have link up. This could break
3694 * IPMI/ASF mode or produce extra input errors
3695 * (extra errors was reported for bcm5701 & bcm5704).
3696 */
3697 if (!sc->bge_link)
3698 mii_tick(mii);
3699 } else {
3700 /*
3701 * Since in TBI mode auto-polling can't be used we should poll
3702 * link status manually. Here we register pending link event
3703 * and trigger interrupt.
3704 */
3705#ifdef DEVICE_POLLING
3706 /* In polling mode we poll link state in bge_poll(). */
3707 if (!(sc->bge_ifp->if_capenable & IFCAP_POLLING))
3708#endif
3709 {
3710 sc->bge_link_evt++;
3711 if (sc->bge_asicrev == BGE_ASICREV_BCM5700 ||
3712 sc->bge_flags & BGE_FLAG_5788)
3713 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_SET);
3714 else
3715 BGE_SETBIT(sc, BGE_HCC_MODE, BGE_HCCMODE_COAL_NOW);
3716 }
3717 }
3718
3719 bge_asf_driver_up(sc);
3720 bge_watchdog(sc);
3721
3722 callout_reset(&sc->bge_stat_ch, hz, bge_tick, sc);
3723}
3724
3725static void
3726bge_stats_update_regs(struct bge_softc *sc)
3727{
3728 struct ifnet *ifp;
3729
3730 ifp = sc->bge_ifp;
3731
3732 ifp->if_collisions += CSR_READ_4(sc, BGE_MAC_STATS +
3733 offsetof(struct bge_mac_stats_regs, etherStatsCollisions));
3734
3735 ifp->if_ierrors += CSR_READ_4(sc, BGE_RXLP_LOCSTAT_OUT_OF_BDS);
3736 ifp->if_ierrors += CSR_READ_4(sc, BGE_RXLP_LOCSTAT_IFIN_DROPS);
3737 ifp->if_ierrors += CSR_READ_4(sc, BGE_RXLP_LOCSTAT_IFIN_ERRORS);
3738}
3739
3740static void
3741bge_stats_update(struct bge_softc *sc)
3742{
3743 struct ifnet *ifp;
3744 bus_size_t stats;
3745 uint32_t cnt; /* current register value */
3746
3747 ifp = sc->bge_ifp;
3748
3749 stats = BGE_MEMWIN_START + BGE_STATS_BLOCK;
3750
3751#define READ_STAT(sc, stats, stat) \
3752 CSR_READ_4(sc, stats + offsetof(struct bge_stats, stat))
3753
3754 cnt = READ_STAT(sc, stats, txstats.etherStatsCollisions.bge_addr_lo);
3755 ifp->if_collisions += (uint32_t)(cnt - sc->bge_tx_collisions);
3756 sc->bge_tx_collisions = cnt;
3757
3758 cnt = READ_STAT(sc, stats, ifInDiscards.bge_addr_lo);
3759 ifp->if_ierrors += (uint32_t)(cnt - sc->bge_rx_discards);
3760 sc->bge_rx_discards = cnt;
3761
3762 cnt = READ_STAT(sc, stats, txstats.ifOutDiscards.bge_addr_lo);
3763 ifp->if_oerrors += (uint32_t)(cnt - sc->bge_tx_discards);
3764 sc->bge_tx_discards = cnt;
3765
3766#undef READ_STAT
3767}
3768
3769/*
3770 * Pad outbound frame to ETHER_MIN_NOPAD for an unusual reason.
3771 * The bge hardware will pad out Tx runts to ETHER_MIN_NOPAD,
3772 * but when such padded frames employ the bge IP/TCP checksum offload,
3773 * the hardware checksum assist gives incorrect results (possibly
3774 * from incorporating its own padding into the UDP/TCP checksum; who knows).
3775 * If we pad such runts with zeros, the onboard checksum comes out correct.
3776 */
3777static __inline int
3778bge_cksum_pad(struct mbuf *m)
3779{
3780 int padlen = ETHER_MIN_NOPAD - m->m_pkthdr.len;
3781 struct mbuf *last;
3782
3783 /* If there's only the packet-header and we can pad there, use it. */
3784 if (m->m_pkthdr.len == m->m_len && M_WRITABLE(m) &&
3785 M_TRAILINGSPACE(m) >= padlen) {
3786 last = m;
3787 } else {
3788 /*
3789 * Walk packet chain to find last mbuf. We will either
3790 * pad there, or append a new mbuf and pad it.
3791 */
3792 for (last = m; last->m_next != NULL; last = last->m_next);
3793 if (!(M_WRITABLE(last) && M_TRAILINGSPACE(last) >= padlen)) {
3794 /* Allocate new empty mbuf, pad it. Compact later. */
3795 struct mbuf *n;
3796
3797 MGET(n, M_DONTWAIT, MT_DATA);
3798 if (n == NULL)
3799 return (ENOBUFS);
3800 n->m_len = 0;
3801 last->m_next = n;
3802 last = n;
3803 }
3804 }
3805
3806 /* Now zero the pad area, to avoid the bge cksum-assist bug. */
3807 memset(mtod(last, caddr_t) + last->m_len, 0, padlen);
3808 last->m_len += padlen;
3809 m->m_pkthdr.len += padlen;
3810
3811 return (0);
3812}
3813
3814static struct mbuf *
3815bge_setup_tso(struct bge_softc *sc, struct mbuf *m, uint16_t *mss)
3816{
3817 struct ether_header *eh;
3818 struct ip *ip;
3819 struct tcphdr *tcp;
3820 struct mbuf *n;
3821 uint16_t hlen;
3822 uint32_t ip_off, poff;
3823
3824 if (M_WRITABLE(m) == 0) {
3825 /* Get a writable copy. */
3826 n = m_dup(m, M_DONTWAIT);
3827 m_freem(m);
3828 if (n == NULL)
3829 return (NULL);
3830 m = n;
3831 }
3832 ip_off = sizeof(struct ether_header);
3833 m = m_pullup(m, ip_off);
3834 if (m == NULL)
3835 return (NULL);
3836 eh = mtod(m, struct ether_header *);
3837 /* Check the existence of VLAN tag. */
3838 if (eh->ether_type == htons(ETHERTYPE_VLAN)) {
3839 ip_off = sizeof(struct ether_vlan_header);
3840 m = m_pullup(m, ip_off);
3841 if (m == NULL)
3842 return (NULL);
3843 }
3844 m = m_pullup(m, ip_off + sizeof(struct ip));
3845 if (m == NULL)
3846 return (NULL);
3847 ip = (struct ip *)(mtod(m, char *) + ip_off);
3848 poff = ip_off + (ip->ip_hl << 2);
3849 m = m_pullup(m, poff + sizeof(struct tcphdr));
3850 if (m == NULL)
3851 return (NULL);
3852 tcp = (struct tcphdr *)(mtod(m, char *) + poff);
3853 m = m_pullup(m, poff + sizeof(struct tcphdr) + tcp->th_off);
3854 if (m == NULL)
3855 return (NULL);
3856 /*
3857 * It seems controller doesn't modify IP length and TCP pseudo
3858 * checksum. These checksum computed by upper stack should be 0.
3859 */
3860 *mss = m->m_pkthdr.tso_segsz;
3861 ip->ip_sum = 0;
3862 ip->ip_len = htons(*mss + (ip->ip_hl << 2) + (tcp->th_off << 2));
3863 /* Clear pseudo checksum computed by TCP stack. */
3864 tcp->th_sum = 0;
3865 /*
3866 * Broadcom controllers uses different descriptor format for
3867 * TSO depending on ASIC revision. Due to TSO-capable firmware
3868 * license issue and lower performance of firmware based TSO
3869 * we only support hardware based TSO which is applicable for
3870 * BCM5755 or newer controllers. Hardware based TSO uses 11
3871 * bits to store MSS and upper 5 bits are used to store IP/TCP
3872 * header length(including IP/TCP options). The header length
3873 * is expressed as 32 bits unit.
3874 */
3875 hlen = ((ip->ip_hl << 2) + (tcp->th_off << 2)) >> 2;
3876 *mss |= (hlen << 11);
3877 return (m);
3878}
3879
3880/*
3881 * Encapsulate an mbuf chain in the tx ring by coupling the mbuf data
3882 * pointers to descriptors.
3883 */
3884static int
3885bge_encap(struct bge_softc *sc, struct mbuf **m_head, uint32_t *txidx)
3886{
3887 bus_dma_segment_t segs[BGE_NSEG_NEW];
3888 bus_dmamap_t map;
3889 struct bge_tx_bd *d;
3890 struct mbuf *m = *m_head;
3891 uint32_t idx = *txidx;
3892 uint16_t csum_flags, mss, vlan_tag;
3893 int nsegs, i, error;
3894
3895 csum_flags = 0;
3896 mss = 0;
3897 vlan_tag = 0;
3898 if ((m->m_pkthdr.csum_flags & CSUM_TSO) != 0) {
3899 *m_head = m = bge_setup_tso(sc, m, &mss);
3900 if (*m_head == NULL)
3901 return (ENOBUFS);
3902 csum_flags |= BGE_TXBDFLAG_CPU_PRE_DMA |
3903 BGE_TXBDFLAG_CPU_POST_DMA;
3904 } else if ((m->m_pkthdr.csum_flags & BGE_CSUM_FEATURES) != 0) {
3905 if (m->m_pkthdr.csum_flags & CSUM_IP)
3906 csum_flags |= BGE_TXBDFLAG_IP_CSUM;
3907 if (m->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP)) {
3908 csum_flags |= BGE_TXBDFLAG_TCP_UDP_CSUM;
3909 if (m->m_pkthdr.len < ETHER_MIN_NOPAD &&
3910 (error = bge_cksum_pad(m)) != 0) {
3911 m_freem(m);
3912 *m_head = NULL;
3913 return (error);
3914 }
3915 }
3916 if (m->m_flags & M_LASTFRAG)
3917 csum_flags |= BGE_TXBDFLAG_IP_FRAG_END;
3918 else if (m->m_flags & M_FRAG)
3919 csum_flags |= BGE_TXBDFLAG_IP_FRAG;
3920 }
3921
3922 if ((m->m_pkthdr.csum_flags & CSUM_TSO) == 0 &&
3940 bge_forced_collapse > 0 && (sc->bge_flags & BGE_FLAG_PCIE) != 0 &&
3941 m->m_next != NULL) {
3923 sc->bge_forced_collapse > 0 &&
3924 (sc->bge_flags & BGE_FLAG_PCIE) != 0 && m->m_next != NULL) {
3942 /*
3943 * Forcedly collapse mbuf chains to overcome hardware
3944 * limitation which only support a single outstanding
3945 * DMA read operation.
3946 */
3925 /*
3926 * Forcedly collapse mbuf chains to overcome hardware
3927 * limitation which only support a single outstanding
3928 * DMA read operation.
3929 */
3947 if (bge_forced_collapse == 1)
3930 if (sc->bge_forced_collapse == 1)
3948 m = m_defrag(m, M_DONTWAIT);
3949 else
3931 m = m_defrag(m, M_DONTWAIT);
3932 else
3950 m = m_collapse(m, M_DONTWAIT, bge_forced_collapse);
3933 m = m_collapse(m, M_DONTWAIT, sc->bge_forced_collapse);
3951 if (m == NULL) {
3952 m_freem(*m_head);
3953 *m_head = NULL;
3954 return (ENOBUFS);
3955 }
3956 *m_head = m;
3957 }
3958
3959 map = sc->bge_cdata.bge_tx_dmamap[idx];
3960 error = bus_dmamap_load_mbuf_sg(sc->bge_cdata.bge_tx_mtag, map, m, segs,
3961 &nsegs, BUS_DMA_NOWAIT);
3962 if (error == EFBIG) {
3963 m = m_collapse(m, M_DONTWAIT, BGE_NSEG_NEW);
3964 if (m == NULL) {
3965 m_freem(*m_head);
3966 *m_head = NULL;
3967 return (ENOBUFS);
3968 }
3969 *m_head = m;
3970 error = bus_dmamap_load_mbuf_sg(sc->bge_cdata.bge_tx_mtag, map,
3971 m, segs, &nsegs, BUS_DMA_NOWAIT);
3972 if (error) {
3973 m_freem(m);
3974 *m_head = NULL;
3975 return (error);
3976 }
3977 } else if (error != 0)
3978 return (error);
3979
3980 /* Check if we have enough free send BDs. */
3981 if (sc->bge_txcnt + nsegs >= BGE_TX_RING_CNT) {
3982 bus_dmamap_unload(sc->bge_cdata.bge_tx_mtag, map);
3983 return (ENOBUFS);
3984 }
3985
3986 bus_dmamap_sync(sc->bge_cdata.bge_tx_mtag, map, BUS_DMASYNC_PREWRITE);
3987
3988#if __FreeBSD_version > 700022
3989 if (m->m_flags & M_VLANTAG) {
3990 csum_flags |= BGE_TXBDFLAG_VLAN_TAG;
3991 vlan_tag = m->m_pkthdr.ether_vtag;
3992 }
3993#else
3994 {
3995 struct m_tag *mtag;
3996
3997 if ((mtag = VLAN_OUTPUT_TAG(sc->bge_ifp, m)) != NULL) {
3998 csum_flags |= BGE_TXBDFLAG_VLAN_TAG;
3999 vlan_tag = VLAN_TAG_VALUE(mtag);
4000 }
4001 }
4002#endif
4003 for (i = 0; ; i++) {
4004 d = &sc->bge_ldata.bge_tx_ring[idx];
4005 d->bge_addr.bge_addr_lo = BGE_ADDR_LO(segs[i].ds_addr);
4006 d->bge_addr.bge_addr_hi = BGE_ADDR_HI(segs[i].ds_addr);
4007 d->bge_len = segs[i].ds_len;
4008 d->bge_flags = csum_flags;
4009 d->bge_vlan_tag = vlan_tag;
4010 d->bge_mss = mss;
4011 if (i == nsegs - 1)
4012 break;
4013 BGE_INC(idx, BGE_TX_RING_CNT);
4014 }
4015
4016 /* Mark the last segment as end of packet... */
4017 d->bge_flags |= BGE_TXBDFLAG_END;
4018
4019 /*
4020 * Insure that the map for this transmission
4021 * is placed at the array index of the last descriptor
4022 * in this chain.
4023 */
4024 sc->bge_cdata.bge_tx_dmamap[*txidx] = sc->bge_cdata.bge_tx_dmamap[idx];
4025 sc->bge_cdata.bge_tx_dmamap[idx] = map;
4026 sc->bge_cdata.bge_tx_chain[idx] = m;
4027 sc->bge_txcnt += nsegs;
4028
4029 BGE_INC(idx, BGE_TX_RING_CNT);
4030 *txidx = idx;
4031
4032 return (0);
4033}
4034
4035/*
4036 * Main transmit routine. To avoid having to do mbuf copies, we put pointers
4037 * to the mbuf data regions directly in the transmit descriptors.
4038 */
4039static void
4040bge_start_locked(struct ifnet *ifp)
4041{
4042 struct bge_softc *sc;
4043 struct mbuf *m_head;
4044 uint32_t prodidx;
4045 int count;
4046
4047 sc = ifp->if_softc;
4048 BGE_LOCK_ASSERT(sc);
4049
4050 if (!sc->bge_link ||
4051 (ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
4052 IFF_DRV_RUNNING)
4053 return;
4054
4055 prodidx = sc->bge_tx_prodidx;
4056
4057 for (count = 0; !IFQ_DRV_IS_EMPTY(&ifp->if_snd);) {
4058 if (sc->bge_txcnt > BGE_TX_RING_CNT - 16) {
4059 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
4060 break;
4061 }
4062 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
4063 if (m_head == NULL)
4064 break;
4065
4066 /*
4067 * XXX
4068 * The code inside the if() block is never reached since we
4069 * must mark CSUM_IP_FRAGS in our if_hwassist to start getting
4070 * requests to checksum TCP/UDP in a fragmented packet.
4071 *
4072 * XXX
4073 * safety overkill. If this is a fragmented packet chain
4074 * with delayed TCP/UDP checksums, then only encapsulate
4075 * it if we have enough descriptors to handle the entire
4076 * chain at once.
4077 * (paranoia -- may not actually be needed)
4078 */
4079 if (m_head->m_flags & M_FIRSTFRAG &&
4080 m_head->m_pkthdr.csum_flags & (CSUM_DELAY_DATA)) {
4081 if ((BGE_TX_RING_CNT - sc->bge_txcnt) <
4082 m_head->m_pkthdr.csum_data + 16) {
4083 IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
4084 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
4085 break;
4086 }
4087 }
4088
4089 /*
4090 * Pack the data into the transmit ring. If we
4091 * don't have room, set the OACTIVE flag and wait
4092 * for the NIC to drain the ring.
4093 */
4094 if (bge_encap(sc, &m_head, &prodidx)) {
4095 if (m_head == NULL)
4096 break;
4097 IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
4098 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
4099 break;
4100 }
4101 ++count;
4102
4103 /*
4104 * If there's a BPF listener, bounce a copy of this frame
4105 * to him.
4106 */
4107#ifdef ETHER_BPF_MTAP
4108 ETHER_BPF_MTAP(ifp, m_head);
4109#else
4110 BPF_MTAP(ifp, m_head);
4111#endif
4112 }
4113
4114 if (count > 0) {
4115 bus_dmamap_sync(sc->bge_cdata.bge_tx_ring_tag,
4116 sc->bge_cdata.bge_tx_ring_map, BUS_DMASYNC_PREWRITE);
4117 /* Transmit. */
4118 bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx);
4119 /* 5700 b2 errata */
4120 if (sc->bge_chiprev == BGE_CHIPREV_5700_BX)
4121 bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx);
4122
4123 sc->bge_tx_prodidx = prodidx;
4124
4125 /*
4126 * Set a timeout in case the chip goes out to lunch.
4127 */
4128 sc->bge_timer = 5;
4129 }
4130}
4131
4132/*
4133 * Main transmit routine. To avoid having to do mbuf copies, we put pointers
4134 * to the mbuf data regions directly in the transmit descriptors.
4135 */
4136static void
4137bge_start(struct ifnet *ifp)
4138{
4139 struct bge_softc *sc;
4140
4141 sc = ifp->if_softc;
4142 BGE_LOCK(sc);
4143 bge_start_locked(ifp);
4144 BGE_UNLOCK(sc);
4145}
4146
4147static void
4148bge_init_locked(struct bge_softc *sc)
4149{
4150 struct ifnet *ifp;
4151 uint16_t *m;
4152
4153 BGE_LOCK_ASSERT(sc);
4154
4155 ifp = sc->bge_ifp;
4156
4157 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
4158 return;
4159
4160 /* Cancel pending I/O and flush buffers. */
4161 bge_stop(sc);
4162
4163 bge_stop_fw(sc);
4164 bge_sig_pre_reset(sc, BGE_RESET_START);
4165 bge_reset(sc);
4166 bge_sig_legacy(sc, BGE_RESET_START);
4167 bge_sig_post_reset(sc, BGE_RESET_START);
4168
4169 bge_chipinit(sc);
4170
4171 /*
4172 * Init the various state machines, ring
4173 * control blocks and firmware.
4174 */
4175 if (bge_blockinit(sc)) {
4176 device_printf(sc->bge_dev, "initialization failure\n");
4177 return;
4178 }
4179
4180 ifp = sc->bge_ifp;
4181
4182 /* Specify MTU. */
4183 CSR_WRITE_4(sc, BGE_RX_MTU, ifp->if_mtu +
4184 ETHER_HDR_LEN + ETHER_CRC_LEN +
4185 (ifp->if_capenable & IFCAP_VLAN_MTU ? ETHER_VLAN_ENCAP_LEN : 0));
4186
4187 /* Load our MAC address. */
4188 m = (uint16_t *)IF_LLADDR(sc->bge_ifp);
4189 CSR_WRITE_4(sc, BGE_MAC_ADDR1_LO, htons(m[0]));
4190 CSR_WRITE_4(sc, BGE_MAC_ADDR1_HI, (htons(m[1]) << 16) | htons(m[2]));
4191
4192 /* Program promiscuous mode. */
4193 bge_setpromisc(sc);
4194
4195 /* Program multicast filter. */
4196 bge_setmulti(sc);
4197
4198 /* Program VLAN tag stripping. */
4199 bge_setvlan(sc);
4200
4201 /* Init RX ring. */
4202 if (bge_init_rx_ring_std(sc) != 0) {
4203 device_printf(sc->bge_dev, "no memory for std Rx buffers.\n");
4204 bge_stop(sc);
4205 return;
4206 }
4207
4208 /*
4209 * Workaround for a bug in 5705 ASIC rev A0. Poll the NIC's
4210 * memory to insure that the chip has in fact read the first
4211 * entry of the ring.
4212 */
4213 if (sc->bge_chipid == BGE_CHIPID_BCM5705_A0) {
4214 uint32_t v, i;
4215 for (i = 0; i < 10; i++) {
4216 DELAY(20);
4217 v = bge_readmem_ind(sc, BGE_STD_RX_RINGS + 8);
4218 if (v == (MCLBYTES - ETHER_ALIGN))
4219 break;
4220 }
4221 if (i == 10)
4222 device_printf (sc->bge_dev,
4223 "5705 A0 chip failed to load RX ring\n");
4224 }
4225
4226 /* Init jumbo RX ring. */
4227 if (ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN + ETHER_VLAN_ENCAP_LEN >
4228 (MCLBYTES - ETHER_ALIGN)) {
4229 if (bge_init_rx_ring_jumbo(sc) != 0) {
4230 device_printf(sc->bge_dev, "no memory for std Rx buffers.\n");
4231 bge_stop(sc);
4232 return;
4233 }
4234 }
4235
4236 /* Init our RX return ring index. */
4237 sc->bge_rx_saved_considx = 0;
4238
4239 /* Init our RX/TX stat counters. */
4240 sc->bge_rx_discards = sc->bge_tx_discards = sc->bge_tx_collisions = 0;
4241
4242 /* Init TX ring. */
4243 bge_init_tx_ring(sc);
4244
4245 /* Turn on transmitter. */
4246 BGE_SETBIT(sc, BGE_TX_MODE, BGE_TXMODE_ENABLE);
4247
4248 /* Turn on receiver. */
4249 BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE);
4250
4251 /* Tell firmware we're alive. */
4252 BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
4253
4254#ifdef DEVICE_POLLING
4255 /* Disable interrupts if we are polling. */
4256 if (ifp->if_capenable & IFCAP_POLLING) {
4257 BGE_SETBIT(sc, BGE_PCI_MISC_CTL,
4258 BGE_PCIMISCCTL_MASK_PCI_INTR);
4259 bge_writembx(sc, BGE_MBX_IRQ0_LO, 1);
4260 } else
4261#endif
4262
4263 /* Enable host interrupts. */
4264 {
4265 BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_CLEAR_INTA);
4266 BGE_CLRBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR);
4267 bge_writembx(sc, BGE_MBX_IRQ0_LO, 0);
4268 }
4269
4270 bge_ifmedia_upd_locked(ifp);
4271
4272 ifp->if_drv_flags |= IFF_DRV_RUNNING;
4273 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
4274
4275 callout_reset(&sc->bge_stat_ch, hz, bge_tick, sc);
4276}
4277
4278static void
4279bge_init(void *xsc)
4280{
4281 struct bge_softc *sc = xsc;
4282
4283 BGE_LOCK(sc);
4284 bge_init_locked(sc);
4285 BGE_UNLOCK(sc);
4286}
4287
4288/*
4289 * Set media options.
4290 */
4291static int
4292bge_ifmedia_upd(struct ifnet *ifp)
4293{
4294 struct bge_softc *sc = ifp->if_softc;
4295 int res;
4296
4297 BGE_LOCK(sc);
4298 res = bge_ifmedia_upd_locked(ifp);
4299 BGE_UNLOCK(sc);
4300
4301 return (res);
4302}
4303
4304static int
4305bge_ifmedia_upd_locked(struct ifnet *ifp)
4306{
4307 struct bge_softc *sc = ifp->if_softc;
4308 struct mii_data *mii;
4309 struct mii_softc *miisc;
4310 struct ifmedia *ifm;
4311
4312 BGE_LOCK_ASSERT(sc);
4313
4314 ifm = &sc->bge_ifmedia;
4315
4316 /* If this is a 1000baseX NIC, enable the TBI port. */
4317 if (sc->bge_flags & BGE_FLAG_TBI) {
4318 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
4319 return (EINVAL);
4320 switch(IFM_SUBTYPE(ifm->ifm_media)) {
4321 case IFM_AUTO:
4322 /*
4323 * The BCM5704 ASIC appears to have a special
4324 * mechanism for programming the autoneg
4325 * advertisement registers in TBI mode.
4326 */
4327 if (sc->bge_asicrev == BGE_ASICREV_BCM5704) {
4328 uint32_t sgdig;
4329 sgdig = CSR_READ_4(sc, BGE_SGDIG_STS);
4330 if (sgdig & BGE_SGDIGSTS_DONE) {
4331 CSR_WRITE_4(sc, BGE_TX_TBI_AUTONEG, 0);
4332 sgdig = CSR_READ_4(sc, BGE_SGDIG_CFG);
4333 sgdig |= BGE_SGDIGCFG_AUTO |
4334 BGE_SGDIGCFG_PAUSE_CAP |
4335 BGE_SGDIGCFG_ASYM_PAUSE;
4336 CSR_WRITE_4(sc, BGE_SGDIG_CFG,
4337 sgdig | BGE_SGDIGCFG_SEND);
4338 DELAY(5);
4339 CSR_WRITE_4(sc, BGE_SGDIG_CFG, sgdig);
4340 }
4341 }
4342 break;
4343 case IFM_1000_SX:
4344 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) {
4345 BGE_CLRBIT(sc, BGE_MAC_MODE,
4346 BGE_MACMODE_HALF_DUPLEX);
4347 } else {
4348 BGE_SETBIT(sc, BGE_MAC_MODE,
4349 BGE_MACMODE_HALF_DUPLEX);
4350 }
4351 break;
4352 default:
4353 return (EINVAL);
4354 }
4355 return (0);
4356 }
4357
4358 sc->bge_link_evt++;
4359 mii = device_get_softc(sc->bge_miibus);
4360 if (mii->mii_instance)
4361 LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
4362 mii_phy_reset(miisc);
4363 mii_mediachg(mii);
4364
4365 /*
4366 * Force an interrupt so that we will call bge_link_upd
4367 * if needed and clear any pending link state attention.
4368 * Without this we are not getting any further interrupts
4369 * for link state changes and thus will not UP the link and
4370 * not be able to send in bge_start_locked. The only
4371 * way to get things working was to receive a packet and
4372 * get an RX intr.
4373 * bge_tick should help for fiber cards and we might not
4374 * need to do this here if BGE_FLAG_TBI is set but as
4375 * we poll for fiber anyway it should not harm.
4376 */
4377 if (sc->bge_asicrev == BGE_ASICREV_BCM5700 ||
4378 sc->bge_flags & BGE_FLAG_5788)
4379 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_SET);
4380 else
4381 BGE_SETBIT(sc, BGE_HCC_MODE, BGE_HCCMODE_COAL_NOW);
4382
4383 return (0);
4384}
4385
4386/*
4387 * Report current media status.
4388 */
4389static void
4390bge_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
4391{
4392 struct bge_softc *sc = ifp->if_softc;
4393 struct mii_data *mii;
4394
4395 BGE_LOCK(sc);
4396
4397 if (sc->bge_flags & BGE_FLAG_TBI) {
4398 ifmr->ifm_status = IFM_AVALID;
4399 ifmr->ifm_active = IFM_ETHER;
4400 if (CSR_READ_4(sc, BGE_MAC_STS) &
4401 BGE_MACSTAT_TBI_PCS_SYNCHED)
4402 ifmr->ifm_status |= IFM_ACTIVE;
4403 else {
4404 ifmr->ifm_active |= IFM_NONE;
4405 BGE_UNLOCK(sc);
4406 return;
4407 }
4408 ifmr->ifm_active |= IFM_1000_SX;
4409 if (CSR_READ_4(sc, BGE_MAC_MODE) & BGE_MACMODE_HALF_DUPLEX)
4410 ifmr->ifm_active |= IFM_HDX;
4411 else
4412 ifmr->ifm_active |= IFM_FDX;
4413 BGE_UNLOCK(sc);
4414 return;
4415 }
4416
4417 mii = device_get_softc(sc->bge_miibus);
4418 mii_pollstat(mii);
4419 ifmr->ifm_active = mii->mii_media_active;
4420 ifmr->ifm_status = mii->mii_media_status;
4421
4422 BGE_UNLOCK(sc);
4423}
4424
4425static int
4426bge_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
4427{
4428 struct bge_softc *sc = ifp->if_softc;
4429 struct ifreq *ifr = (struct ifreq *) data;
4430 struct mii_data *mii;
4431 int flags, mask, error = 0;
4432
4433 switch (command) {
4434 case SIOCSIFMTU:
4435 if (ifr->ifr_mtu < ETHERMIN ||
4436 ((BGE_IS_JUMBO_CAPABLE(sc)) &&
4437 ifr->ifr_mtu > BGE_JUMBO_MTU) ||
4438 ((!BGE_IS_JUMBO_CAPABLE(sc)) &&
4439 ifr->ifr_mtu > ETHERMTU))
4440 error = EINVAL;
4441 else if (ifp->if_mtu != ifr->ifr_mtu) {
4442 ifp->if_mtu = ifr->ifr_mtu;
4443 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
4444 bge_init(sc);
4445 }
4446 break;
4447 case SIOCSIFFLAGS:
4448 BGE_LOCK(sc);
4449 if (ifp->if_flags & IFF_UP) {
4450 /*
4451 * If only the state of the PROMISC flag changed,
4452 * then just use the 'set promisc mode' command
4453 * instead of reinitializing the entire NIC. Doing
4454 * a full re-init means reloading the firmware and
4455 * waiting for it to start up, which may take a
4456 * second or two. Similarly for ALLMULTI.
4457 */
4458 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
4459 flags = ifp->if_flags ^ sc->bge_if_flags;
4460 if (flags & IFF_PROMISC)
4461 bge_setpromisc(sc);
4462 if (flags & IFF_ALLMULTI)
4463 bge_setmulti(sc);
4464 } else
4465 bge_init_locked(sc);
4466 } else {
4467 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
4468 bge_stop(sc);
4469 }
4470 }
4471 sc->bge_if_flags = ifp->if_flags;
4472 BGE_UNLOCK(sc);
4473 error = 0;
4474 break;
4475 case SIOCADDMULTI:
4476 case SIOCDELMULTI:
4477 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
4478 BGE_LOCK(sc);
4479 bge_setmulti(sc);
4480 BGE_UNLOCK(sc);
4481 error = 0;
4482 }
4483 break;
4484 case SIOCSIFMEDIA:
4485 case SIOCGIFMEDIA:
4486 if (sc->bge_flags & BGE_FLAG_TBI) {
4487 error = ifmedia_ioctl(ifp, ifr,
4488 &sc->bge_ifmedia, command);
4489 } else {
4490 mii = device_get_softc(sc->bge_miibus);
4491 error = ifmedia_ioctl(ifp, ifr,
4492 &mii->mii_media, command);
4493 }
4494 break;
4495 case SIOCSIFCAP:
4496 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
4497#ifdef DEVICE_POLLING
4498 if (mask & IFCAP_POLLING) {
4499 if (ifr->ifr_reqcap & IFCAP_POLLING) {
4500 error = ether_poll_register(bge_poll, ifp);
4501 if (error)
4502 return (error);
4503 BGE_LOCK(sc);
4504 BGE_SETBIT(sc, BGE_PCI_MISC_CTL,
4505 BGE_PCIMISCCTL_MASK_PCI_INTR);
4506 bge_writembx(sc, BGE_MBX_IRQ0_LO, 1);
4507 ifp->if_capenable |= IFCAP_POLLING;
4508 BGE_UNLOCK(sc);
4509 } else {
4510 error = ether_poll_deregister(ifp);
4511 /* Enable interrupt even in error case */
4512 BGE_LOCK(sc);
4513 BGE_CLRBIT(sc, BGE_PCI_MISC_CTL,
4514 BGE_PCIMISCCTL_MASK_PCI_INTR);
4515 bge_writembx(sc, BGE_MBX_IRQ0_LO, 0);
4516 ifp->if_capenable &= ~IFCAP_POLLING;
4517 BGE_UNLOCK(sc);
4518 }
4519 }
4520#endif
4521 if (mask & IFCAP_HWCSUM) {
4522 ifp->if_capenable ^= IFCAP_HWCSUM;
4523 if (IFCAP_HWCSUM & ifp->if_capenable &&
4524 IFCAP_HWCSUM & ifp->if_capabilities)
4525 ifp->if_hwassist |= BGE_CSUM_FEATURES;
4526 else
4527 ifp->if_hwassist &= ~BGE_CSUM_FEATURES;
4528#ifdef VLAN_CAPABILITIES
4529 VLAN_CAPABILITIES(ifp);
4530#endif
4531 }
4532
4533 if ((mask & IFCAP_TSO4) != 0 &&
4534 (ifp->if_capabilities & IFCAP_TSO4) != 0) {
4535 ifp->if_capenable ^= IFCAP_TSO4;
4536 if ((ifp->if_capenable & IFCAP_TSO4) != 0)
4537 ifp->if_hwassist |= CSUM_TSO;
4538 else
4539 ifp->if_hwassist &= ~CSUM_TSO;
4540 }
4541
4542 if (mask & IFCAP_VLAN_MTU) {
4543 ifp->if_capenable ^= IFCAP_VLAN_MTU;
4544 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
4545 bge_init(sc);
4546 }
4547
4548 if (mask & IFCAP_VLAN_HWTAGGING) {
4549 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
4550 BGE_LOCK(sc);
4551 bge_setvlan(sc);
4552 BGE_UNLOCK(sc);
4553#ifdef VLAN_CAPABILITIES
4554 VLAN_CAPABILITIES(ifp);
4555#endif
4556 }
4557
4558 break;
4559 default:
4560 error = ether_ioctl(ifp, command, data);
4561 break;
4562 }
4563
4564 return (error);
4565}
4566
4567static void
4568bge_watchdog(struct bge_softc *sc)
4569{
4570 struct ifnet *ifp;
4571
4572 BGE_LOCK_ASSERT(sc);
4573
4574 if (sc->bge_timer == 0 || --sc->bge_timer)
4575 return;
4576
4577 ifp = sc->bge_ifp;
4578
4579 if_printf(ifp, "watchdog timeout -- resetting\n");
4580
4581 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
4582 bge_init_locked(sc);
4583
4584 ifp->if_oerrors++;
4585}
4586
4587/*
4588 * Stop the adapter and free any mbufs allocated to the
4589 * RX and TX lists.
4590 */
4591static void
4592bge_stop(struct bge_softc *sc)
4593{
4594 struct ifnet *ifp;
4595
4596 BGE_LOCK_ASSERT(sc);
4597
4598 ifp = sc->bge_ifp;
4599
4600 callout_stop(&sc->bge_stat_ch);
4601
4602 /* Disable host interrupts. */
4603 BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR);
4604 bge_writembx(sc, BGE_MBX_IRQ0_LO, 1);
4605
4606 /*
4607 * Tell firmware we're shutting down.
4608 */
4609 bge_stop_fw(sc);
4610 bge_sig_pre_reset(sc, BGE_RESET_STOP);
4611
4612 /*
4613 * Disable all of the receiver blocks.
4614 */
4615 BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE);
4616 BGE_CLRBIT(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE);
4617 BGE_CLRBIT(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE);
4618 if (!(BGE_IS_5705_PLUS(sc)))
4619 BGE_CLRBIT(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE);
4620 BGE_CLRBIT(sc, BGE_RDBDI_MODE, BGE_RBDIMODE_ENABLE);
4621 BGE_CLRBIT(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE);
4622 BGE_CLRBIT(sc, BGE_RBDC_MODE, BGE_RBDCMODE_ENABLE);
4623
4624 /*
4625 * Disable all of the transmit blocks.
4626 */
4627 BGE_CLRBIT(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE);
4628 BGE_CLRBIT(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE);
4629 BGE_CLRBIT(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE);
4630 BGE_CLRBIT(sc, BGE_RDMA_MODE, BGE_RDMAMODE_ENABLE);
4631 BGE_CLRBIT(sc, BGE_SDC_MODE, BGE_SDCMODE_ENABLE);
4632 if (!(BGE_IS_5705_PLUS(sc)))
4633 BGE_CLRBIT(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE);
4634 BGE_CLRBIT(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE);
4635
4636 /*
4637 * Shut down all of the memory managers and related
4638 * state machines.
4639 */
4640 BGE_CLRBIT(sc, BGE_HCC_MODE, BGE_HCCMODE_ENABLE);
4641 BGE_CLRBIT(sc, BGE_WDMA_MODE, BGE_WDMAMODE_ENABLE);
4642 if (!(BGE_IS_5705_PLUS(sc)))
4643 BGE_CLRBIT(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE);
4644 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF);
4645 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0);
4646 if (!(BGE_IS_5705_PLUS(sc))) {
4647 BGE_CLRBIT(sc, BGE_BMAN_MODE, BGE_BMANMODE_ENABLE);
4648 BGE_CLRBIT(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE);
4649 }
4650
4651 bge_reset(sc);
4652 bge_sig_legacy(sc, BGE_RESET_STOP);
4653 bge_sig_post_reset(sc, BGE_RESET_STOP);
4654
4655 /*
4656 * Keep the ASF firmware running if up.
4657 */
4658 if (sc->bge_asf_mode & ASF_STACKUP)
4659 BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
4660 else
4661 BGE_CLRBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
4662
4663 /* Free the RX lists. */
4664 bge_free_rx_ring_std(sc);
4665
4666 /* Free jumbo RX list. */
4667 if (BGE_IS_JUMBO_CAPABLE(sc))
4668 bge_free_rx_ring_jumbo(sc);
4669
4670 /* Free TX buffers. */
4671 bge_free_tx_ring(sc);
4672
4673 sc->bge_tx_saved_considx = BGE_TXCONS_UNSET;
4674
4675 /* Clear MAC's link state (PHY may still have link UP). */
4676 if (bootverbose && sc->bge_link)
4677 if_printf(sc->bge_ifp, "link DOWN\n");
4678 sc->bge_link = 0;
4679
4680 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
4681}
4682
4683/*
4684 * Stop all chip I/O so that the kernel's probe routines don't
4685 * get confused by errant DMAs when rebooting.
4686 */
4687static int
4688bge_shutdown(device_t dev)
4689{
4690 struct bge_softc *sc;
4691
4692 sc = device_get_softc(dev);
4693 BGE_LOCK(sc);
4694 bge_stop(sc);
4695 bge_reset(sc);
4696 BGE_UNLOCK(sc);
4697
4698 return (0);
4699}
4700
4701static int
4702bge_suspend(device_t dev)
4703{
4704 struct bge_softc *sc;
4705
4706 sc = device_get_softc(dev);
4707 BGE_LOCK(sc);
4708 bge_stop(sc);
4709 BGE_UNLOCK(sc);
4710
4711 return (0);
4712}
4713
4714static int
4715bge_resume(device_t dev)
4716{
4717 struct bge_softc *sc;
4718 struct ifnet *ifp;
4719
4720 sc = device_get_softc(dev);
4721 BGE_LOCK(sc);
4722 ifp = sc->bge_ifp;
4723 if (ifp->if_flags & IFF_UP) {
4724 bge_init_locked(sc);
4725 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
4726 bge_start_locked(ifp);
4727 }
4728 BGE_UNLOCK(sc);
4729
4730 return (0);
4731}
4732
4733static void
4734bge_link_upd(struct bge_softc *sc)
4735{
4736 struct mii_data *mii;
4737 uint32_t link, status;
4738
4739 BGE_LOCK_ASSERT(sc);
4740
4741 /* Clear 'pending link event' flag. */
4742 sc->bge_link_evt = 0;
4743
4744 /*
4745 * Process link state changes.
4746 * Grrr. The link status word in the status block does
4747 * not work correctly on the BCM5700 rev AX and BX chips,
4748 * according to all available information. Hence, we have
4749 * to enable MII interrupts in order to properly obtain
4750 * async link changes. Unfortunately, this also means that
4751 * we have to read the MAC status register to detect link
4752 * changes, thereby adding an additional register access to
4753 * the interrupt handler.
4754 *
4755 * XXX: perhaps link state detection procedure used for
4756 * BGE_CHIPID_BCM5700_B2 can be used for others BCM5700 revisions.
4757 */
4758
4759 if (sc->bge_asicrev == BGE_ASICREV_BCM5700 &&
4760 sc->bge_chipid != BGE_CHIPID_BCM5700_B2) {
4761 status = CSR_READ_4(sc, BGE_MAC_STS);
4762 if (status & BGE_MACSTAT_MI_INTERRUPT) {
4763 mii = device_get_softc(sc->bge_miibus);
4764 mii_pollstat(mii);
4765 if (!sc->bge_link &&
4766 mii->mii_media_status & IFM_ACTIVE &&
4767 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) {
4768 sc->bge_link++;
4769 if (bootverbose)
4770 if_printf(sc->bge_ifp, "link UP\n");
4771 } else if (sc->bge_link &&
4772 (!(mii->mii_media_status & IFM_ACTIVE) ||
4773 IFM_SUBTYPE(mii->mii_media_active) == IFM_NONE)) {
4774 sc->bge_link = 0;
4775 if (bootverbose)
4776 if_printf(sc->bge_ifp, "link DOWN\n");
4777 }
4778
4779 /* Clear the interrupt. */
4780 CSR_WRITE_4(sc, BGE_MAC_EVT_ENB,
4781 BGE_EVTENB_MI_INTERRUPT);
4782 bge_miibus_readreg(sc->bge_dev, 1, BRGPHY_MII_ISR);
4783 bge_miibus_writereg(sc->bge_dev, 1, BRGPHY_MII_IMR,
4784 BRGPHY_INTRS);
4785 }
4786 return;
4787 }
4788
4789 if (sc->bge_flags & BGE_FLAG_TBI) {
4790 status = CSR_READ_4(sc, BGE_MAC_STS);
4791 if (status & BGE_MACSTAT_TBI_PCS_SYNCHED) {
4792 if (!sc->bge_link) {
4793 sc->bge_link++;
4794 if (sc->bge_asicrev == BGE_ASICREV_BCM5704)
4795 BGE_CLRBIT(sc, BGE_MAC_MODE,
4796 BGE_MACMODE_TBI_SEND_CFGS);
4797 CSR_WRITE_4(sc, BGE_MAC_STS, 0xFFFFFFFF);
4798 if (bootverbose)
4799 if_printf(sc->bge_ifp, "link UP\n");
4800 if_link_state_change(sc->bge_ifp,
4801 LINK_STATE_UP);
4802 }
4803 } else if (sc->bge_link) {
4804 sc->bge_link = 0;
4805 if (bootverbose)
4806 if_printf(sc->bge_ifp, "link DOWN\n");
4807 if_link_state_change(sc->bge_ifp, LINK_STATE_DOWN);
4808 }
4809 } else if (CSR_READ_4(sc, BGE_MI_MODE) & BGE_MIMODE_AUTOPOLL) {
4810 /*
4811 * Some broken BCM chips have BGE_STATFLAG_LINKSTATE_CHANGED bit
4812 * in status word always set. Workaround this bug by reading
4813 * PHY link status directly.
4814 */
4815 link = (CSR_READ_4(sc, BGE_MI_STS) & BGE_MISTS_LINK) ? 1 : 0;
4816
4817 if (link != sc->bge_link ||
4818 sc->bge_asicrev == BGE_ASICREV_BCM5700) {
4819 mii = device_get_softc(sc->bge_miibus);
4820 mii_pollstat(mii);
4821 if (!sc->bge_link &&
4822 mii->mii_media_status & IFM_ACTIVE &&
4823 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) {
4824 sc->bge_link++;
4825 if (bootverbose)
4826 if_printf(sc->bge_ifp, "link UP\n");
4827 } else if (sc->bge_link &&
4828 (!(mii->mii_media_status & IFM_ACTIVE) ||
4829 IFM_SUBTYPE(mii->mii_media_active) == IFM_NONE)) {
4830 sc->bge_link = 0;
4831 if (bootverbose)
4832 if_printf(sc->bge_ifp, "link DOWN\n");
4833 }
4834 }
4835 } else {
4836 /*
4837 * Discard link events for MII/GMII controllers
4838 * if MI auto-polling is disabled.
4839 */
4840 }
4841
4842 /* Clear the attention. */
4843 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED |
4844 BGE_MACSTAT_CFG_CHANGED | BGE_MACSTAT_MI_COMPLETE |
4845 BGE_MACSTAT_LINK_CHANGED);
4846}
4847
4848#define BGE_SYSCTL_STAT(sc, ctx, desc, parent, node, oid) \
4849 SYSCTL_ADD_PROC(ctx, parent, OID_AUTO, oid, CTLTYPE_UINT|CTLFLAG_RD, \
4850 sc, offsetof(struct bge_stats, node), bge_sysctl_stats, "IU", \
4851 desc)
4852
4853static void
4854bge_add_sysctls(struct bge_softc *sc)
4855{
4856 struct sysctl_ctx_list *ctx;
4857 struct sysctl_oid_list *children, *schildren;
4858 struct sysctl_oid *tree;
4859
4860 ctx = device_get_sysctl_ctx(sc->bge_dev);
4861 children = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->bge_dev));
4862
4863#ifdef BGE_REGISTER_DEBUG
4864 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "debug_info",
4865 CTLTYPE_INT | CTLFLAG_RW, sc, 0, bge_sysctl_debug_info, "I",
4866 "Debug Information");
4867
4868 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "reg_read",
4869 CTLTYPE_INT | CTLFLAG_RW, sc, 0, bge_sysctl_reg_read, "I",
4870 "Register Read");
4871
4872 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "mem_read",
4873 CTLTYPE_INT | CTLFLAG_RW, sc, 0, bge_sysctl_mem_read, "I",
4874 "Memory Read");
4875
4876#endif
4877
3934 if (m == NULL) {
3935 m_freem(*m_head);
3936 *m_head = NULL;
3937 return (ENOBUFS);
3938 }
3939 *m_head = m;
3940 }
3941
3942 map = sc->bge_cdata.bge_tx_dmamap[idx];
3943 error = bus_dmamap_load_mbuf_sg(sc->bge_cdata.bge_tx_mtag, map, m, segs,
3944 &nsegs, BUS_DMA_NOWAIT);
3945 if (error == EFBIG) {
3946 m = m_collapse(m, M_DONTWAIT, BGE_NSEG_NEW);
3947 if (m == NULL) {
3948 m_freem(*m_head);
3949 *m_head = NULL;
3950 return (ENOBUFS);
3951 }
3952 *m_head = m;
3953 error = bus_dmamap_load_mbuf_sg(sc->bge_cdata.bge_tx_mtag, map,
3954 m, segs, &nsegs, BUS_DMA_NOWAIT);
3955 if (error) {
3956 m_freem(m);
3957 *m_head = NULL;
3958 return (error);
3959 }
3960 } else if (error != 0)
3961 return (error);
3962
3963 /* Check if we have enough free send BDs. */
3964 if (sc->bge_txcnt + nsegs >= BGE_TX_RING_CNT) {
3965 bus_dmamap_unload(sc->bge_cdata.bge_tx_mtag, map);
3966 return (ENOBUFS);
3967 }
3968
3969 bus_dmamap_sync(sc->bge_cdata.bge_tx_mtag, map, BUS_DMASYNC_PREWRITE);
3970
3971#if __FreeBSD_version > 700022
3972 if (m->m_flags & M_VLANTAG) {
3973 csum_flags |= BGE_TXBDFLAG_VLAN_TAG;
3974 vlan_tag = m->m_pkthdr.ether_vtag;
3975 }
3976#else
3977 {
3978 struct m_tag *mtag;
3979
3980 if ((mtag = VLAN_OUTPUT_TAG(sc->bge_ifp, m)) != NULL) {
3981 csum_flags |= BGE_TXBDFLAG_VLAN_TAG;
3982 vlan_tag = VLAN_TAG_VALUE(mtag);
3983 }
3984 }
3985#endif
3986 for (i = 0; ; i++) {
3987 d = &sc->bge_ldata.bge_tx_ring[idx];
3988 d->bge_addr.bge_addr_lo = BGE_ADDR_LO(segs[i].ds_addr);
3989 d->bge_addr.bge_addr_hi = BGE_ADDR_HI(segs[i].ds_addr);
3990 d->bge_len = segs[i].ds_len;
3991 d->bge_flags = csum_flags;
3992 d->bge_vlan_tag = vlan_tag;
3993 d->bge_mss = mss;
3994 if (i == nsegs - 1)
3995 break;
3996 BGE_INC(idx, BGE_TX_RING_CNT);
3997 }
3998
3999 /* Mark the last segment as end of packet... */
4000 d->bge_flags |= BGE_TXBDFLAG_END;
4001
4002 /*
4003 * Insure that the map for this transmission
4004 * is placed at the array index of the last descriptor
4005 * in this chain.
4006 */
4007 sc->bge_cdata.bge_tx_dmamap[*txidx] = sc->bge_cdata.bge_tx_dmamap[idx];
4008 sc->bge_cdata.bge_tx_dmamap[idx] = map;
4009 sc->bge_cdata.bge_tx_chain[idx] = m;
4010 sc->bge_txcnt += nsegs;
4011
4012 BGE_INC(idx, BGE_TX_RING_CNT);
4013 *txidx = idx;
4014
4015 return (0);
4016}
4017
4018/*
4019 * Main transmit routine. To avoid having to do mbuf copies, we put pointers
4020 * to the mbuf data regions directly in the transmit descriptors.
4021 */
4022static void
4023bge_start_locked(struct ifnet *ifp)
4024{
4025 struct bge_softc *sc;
4026 struct mbuf *m_head;
4027 uint32_t prodidx;
4028 int count;
4029
4030 sc = ifp->if_softc;
4031 BGE_LOCK_ASSERT(sc);
4032
4033 if (!sc->bge_link ||
4034 (ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
4035 IFF_DRV_RUNNING)
4036 return;
4037
4038 prodidx = sc->bge_tx_prodidx;
4039
4040 for (count = 0; !IFQ_DRV_IS_EMPTY(&ifp->if_snd);) {
4041 if (sc->bge_txcnt > BGE_TX_RING_CNT - 16) {
4042 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
4043 break;
4044 }
4045 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
4046 if (m_head == NULL)
4047 break;
4048
4049 /*
4050 * XXX
4051 * The code inside the if() block is never reached since we
4052 * must mark CSUM_IP_FRAGS in our if_hwassist to start getting
4053 * requests to checksum TCP/UDP in a fragmented packet.
4054 *
4055 * XXX
4056 * safety overkill. If this is a fragmented packet chain
4057 * with delayed TCP/UDP checksums, then only encapsulate
4058 * it if we have enough descriptors to handle the entire
4059 * chain at once.
4060 * (paranoia -- may not actually be needed)
4061 */
4062 if (m_head->m_flags & M_FIRSTFRAG &&
4063 m_head->m_pkthdr.csum_flags & (CSUM_DELAY_DATA)) {
4064 if ((BGE_TX_RING_CNT - sc->bge_txcnt) <
4065 m_head->m_pkthdr.csum_data + 16) {
4066 IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
4067 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
4068 break;
4069 }
4070 }
4071
4072 /*
4073 * Pack the data into the transmit ring. If we
4074 * don't have room, set the OACTIVE flag and wait
4075 * for the NIC to drain the ring.
4076 */
4077 if (bge_encap(sc, &m_head, &prodidx)) {
4078 if (m_head == NULL)
4079 break;
4080 IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
4081 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
4082 break;
4083 }
4084 ++count;
4085
4086 /*
4087 * If there's a BPF listener, bounce a copy of this frame
4088 * to him.
4089 */
4090#ifdef ETHER_BPF_MTAP
4091 ETHER_BPF_MTAP(ifp, m_head);
4092#else
4093 BPF_MTAP(ifp, m_head);
4094#endif
4095 }
4096
4097 if (count > 0) {
4098 bus_dmamap_sync(sc->bge_cdata.bge_tx_ring_tag,
4099 sc->bge_cdata.bge_tx_ring_map, BUS_DMASYNC_PREWRITE);
4100 /* Transmit. */
4101 bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx);
4102 /* 5700 b2 errata */
4103 if (sc->bge_chiprev == BGE_CHIPREV_5700_BX)
4104 bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx);
4105
4106 sc->bge_tx_prodidx = prodidx;
4107
4108 /*
4109 * Set a timeout in case the chip goes out to lunch.
4110 */
4111 sc->bge_timer = 5;
4112 }
4113}
4114
4115/*
4116 * Main transmit routine. To avoid having to do mbuf copies, we put pointers
4117 * to the mbuf data regions directly in the transmit descriptors.
4118 */
4119static void
4120bge_start(struct ifnet *ifp)
4121{
4122 struct bge_softc *sc;
4123
4124 sc = ifp->if_softc;
4125 BGE_LOCK(sc);
4126 bge_start_locked(ifp);
4127 BGE_UNLOCK(sc);
4128}
4129
4130static void
4131bge_init_locked(struct bge_softc *sc)
4132{
4133 struct ifnet *ifp;
4134 uint16_t *m;
4135
4136 BGE_LOCK_ASSERT(sc);
4137
4138 ifp = sc->bge_ifp;
4139
4140 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
4141 return;
4142
4143 /* Cancel pending I/O and flush buffers. */
4144 bge_stop(sc);
4145
4146 bge_stop_fw(sc);
4147 bge_sig_pre_reset(sc, BGE_RESET_START);
4148 bge_reset(sc);
4149 bge_sig_legacy(sc, BGE_RESET_START);
4150 bge_sig_post_reset(sc, BGE_RESET_START);
4151
4152 bge_chipinit(sc);
4153
4154 /*
4155 * Init the various state machines, ring
4156 * control blocks and firmware.
4157 */
4158 if (bge_blockinit(sc)) {
4159 device_printf(sc->bge_dev, "initialization failure\n");
4160 return;
4161 }
4162
4163 ifp = sc->bge_ifp;
4164
4165 /* Specify MTU. */
4166 CSR_WRITE_4(sc, BGE_RX_MTU, ifp->if_mtu +
4167 ETHER_HDR_LEN + ETHER_CRC_LEN +
4168 (ifp->if_capenable & IFCAP_VLAN_MTU ? ETHER_VLAN_ENCAP_LEN : 0));
4169
4170 /* Load our MAC address. */
4171 m = (uint16_t *)IF_LLADDR(sc->bge_ifp);
4172 CSR_WRITE_4(sc, BGE_MAC_ADDR1_LO, htons(m[0]));
4173 CSR_WRITE_4(sc, BGE_MAC_ADDR1_HI, (htons(m[1]) << 16) | htons(m[2]));
4174
4175 /* Program promiscuous mode. */
4176 bge_setpromisc(sc);
4177
4178 /* Program multicast filter. */
4179 bge_setmulti(sc);
4180
4181 /* Program VLAN tag stripping. */
4182 bge_setvlan(sc);
4183
4184 /* Init RX ring. */
4185 if (bge_init_rx_ring_std(sc) != 0) {
4186 device_printf(sc->bge_dev, "no memory for std Rx buffers.\n");
4187 bge_stop(sc);
4188 return;
4189 }
4190
4191 /*
4192 * Workaround for a bug in 5705 ASIC rev A0. Poll the NIC's
4193 * memory to insure that the chip has in fact read the first
4194 * entry of the ring.
4195 */
4196 if (sc->bge_chipid == BGE_CHIPID_BCM5705_A0) {
4197 uint32_t v, i;
4198 for (i = 0; i < 10; i++) {
4199 DELAY(20);
4200 v = bge_readmem_ind(sc, BGE_STD_RX_RINGS + 8);
4201 if (v == (MCLBYTES - ETHER_ALIGN))
4202 break;
4203 }
4204 if (i == 10)
4205 device_printf (sc->bge_dev,
4206 "5705 A0 chip failed to load RX ring\n");
4207 }
4208
4209 /* Init jumbo RX ring. */
4210 if (ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN + ETHER_VLAN_ENCAP_LEN >
4211 (MCLBYTES - ETHER_ALIGN)) {
4212 if (bge_init_rx_ring_jumbo(sc) != 0) {
4213 device_printf(sc->bge_dev, "no memory for std Rx buffers.\n");
4214 bge_stop(sc);
4215 return;
4216 }
4217 }
4218
4219 /* Init our RX return ring index. */
4220 sc->bge_rx_saved_considx = 0;
4221
4222 /* Init our RX/TX stat counters. */
4223 sc->bge_rx_discards = sc->bge_tx_discards = sc->bge_tx_collisions = 0;
4224
4225 /* Init TX ring. */
4226 bge_init_tx_ring(sc);
4227
4228 /* Turn on transmitter. */
4229 BGE_SETBIT(sc, BGE_TX_MODE, BGE_TXMODE_ENABLE);
4230
4231 /* Turn on receiver. */
4232 BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE);
4233
4234 /* Tell firmware we're alive. */
4235 BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
4236
4237#ifdef DEVICE_POLLING
4238 /* Disable interrupts if we are polling. */
4239 if (ifp->if_capenable & IFCAP_POLLING) {
4240 BGE_SETBIT(sc, BGE_PCI_MISC_CTL,
4241 BGE_PCIMISCCTL_MASK_PCI_INTR);
4242 bge_writembx(sc, BGE_MBX_IRQ0_LO, 1);
4243 } else
4244#endif
4245
4246 /* Enable host interrupts. */
4247 {
4248 BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_CLEAR_INTA);
4249 BGE_CLRBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR);
4250 bge_writembx(sc, BGE_MBX_IRQ0_LO, 0);
4251 }
4252
4253 bge_ifmedia_upd_locked(ifp);
4254
4255 ifp->if_drv_flags |= IFF_DRV_RUNNING;
4256 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
4257
4258 callout_reset(&sc->bge_stat_ch, hz, bge_tick, sc);
4259}
4260
4261static void
4262bge_init(void *xsc)
4263{
4264 struct bge_softc *sc = xsc;
4265
4266 BGE_LOCK(sc);
4267 bge_init_locked(sc);
4268 BGE_UNLOCK(sc);
4269}
4270
4271/*
4272 * Set media options.
4273 */
4274static int
4275bge_ifmedia_upd(struct ifnet *ifp)
4276{
4277 struct bge_softc *sc = ifp->if_softc;
4278 int res;
4279
4280 BGE_LOCK(sc);
4281 res = bge_ifmedia_upd_locked(ifp);
4282 BGE_UNLOCK(sc);
4283
4284 return (res);
4285}
4286
4287static int
4288bge_ifmedia_upd_locked(struct ifnet *ifp)
4289{
4290 struct bge_softc *sc = ifp->if_softc;
4291 struct mii_data *mii;
4292 struct mii_softc *miisc;
4293 struct ifmedia *ifm;
4294
4295 BGE_LOCK_ASSERT(sc);
4296
4297 ifm = &sc->bge_ifmedia;
4298
4299 /* If this is a 1000baseX NIC, enable the TBI port. */
4300 if (sc->bge_flags & BGE_FLAG_TBI) {
4301 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
4302 return (EINVAL);
4303 switch(IFM_SUBTYPE(ifm->ifm_media)) {
4304 case IFM_AUTO:
4305 /*
4306 * The BCM5704 ASIC appears to have a special
4307 * mechanism for programming the autoneg
4308 * advertisement registers in TBI mode.
4309 */
4310 if (sc->bge_asicrev == BGE_ASICREV_BCM5704) {
4311 uint32_t sgdig;
4312 sgdig = CSR_READ_4(sc, BGE_SGDIG_STS);
4313 if (sgdig & BGE_SGDIGSTS_DONE) {
4314 CSR_WRITE_4(sc, BGE_TX_TBI_AUTONEG, 0);
4315 sgdig = CSR_READ_4(sc, BGE_SGDIG_CFG);
4316 sgdig |= BGE_SGDIGCFG_AUTO |
4317 BGE_SGDIGCFG_PAUSE_CAP |
4318 BGE_SGDIGCFG_ASYM_PAUSE;
4319 CSR_WRITE_4(sc, BGE_SGDIG_CFG,
4320 sgdig | BGE_SGDIGCFG_SEND);
4321 DELAY(5);
4322 CSR_WRITE_4(sc, BGE_SGDIG_CFG, sgdig);
4323 }
4324 }
4325 break;
4326 case IFM_1000_SX:
4327 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) {
4328 BGE_CLRBIT(sc, BGE_MAC_MODE,
4329 BGE_MACMODE_HALF_DUPLEX);
4330 } else {
4331 BGE_SETBIT(sc, BGE_MAC_MODE,
4332 BGE_MACMODE_HALF_DUPLEX);
4333 }
4334 break;
4335 default:
4336 return (EINVAL);
4337 }
4338 return (0);
4339 }
4340
4341 sc->bge_link_evt++;
4342 mii = device_get_softc(sc->bge_miibus);
4343 if (mii->mii_instance)
4344 LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
4345 mii_phy_reset(miisc);
4346 mii_mediachg(mii);
4347
4348 /*
4349 * Force an interrupt so that we will call bge_link_upd
4350 * if needed and clear any pending link state attention.
4351 * Without this we are not getting any further interrupts
4352 * for link state changes and thus will not UP the link and
4353 * not be able to send in bge_start_locked. The only
4354 * way to get things working was to receive a packet and
4355 * get an RX intr.
4356 * bge_tick should help for fiber cards and we might not
4357 * need to do this here if BGE_FLAG_TBI is set but as
4358 * we poll for fiber anyway it should not harm.
4359 */
4360 if (sc->bge_asicrev == BGE_ASICREV_BCM5700 ||
4361 sc->bge_flags & BGE_FLAG_5788)
4362 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_SET);
4363 else
4364 BGE_SETBIT(sc, BGE_HCC_MODE, BGE_HCCMODE_COAL_NOW);
4365
4366 return (0);
4367}
4368
4369/*
4370 * Report current media status.
4371 */
4372static void
4373bge_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
4374{
4375 struct bge_softc *sc = ifp->if_softc;
4376 struct mii_data *mii;
4377
4378 BGE_LOCK(sc);
4379
4380 if (sc->bge_flags & BGE_FLAG_TBI) {
4381 ifmr->ifm_status = IFM_AVALID;
4382 ifmr->ifm_active = IFM_ETHER;
4383 if (CSR_READ_4(sc, BGE_MAC_STS) &
4384 BGE_MACSTAT_TBI_PCS_SYNCHED)
4385 ifmr->ifm_status |= IFM_ACTIVE;
4386 else {
4387 ifmr->ifm_active |= IFM_NONE;
4388 BGE_UNLOCK(sc);
4389 return;
4390 }
4391 ifmr->ifm_active |= IFM_1000_SX;
4392 if (CSR_READ_4(sc, BGE_MAC_MODE) & BGE_MACMODE_HALF_DUPLEX)
4393 ifmr->ifm_active |= IFM_HDX;
4394 else
4395 ifmr->ifm_active |= IFM_FDX;
4396 BGE_UNLOCK(sc);
4397 return;
4398 }
4399
4400 mii = device_get_softc(sc->bge_miibus);
4401 mii_pollstat(mii);
4402 ifmr->ifm_active = mii->mii_media_active;
4403 ifmr->ifm_status = mii->mii_media_status;
4404
4405 BGE_UNLOCK(sc);
4406}
4407
4408static int
4409bge_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
4410{
4411 struct bge_softc *sc = ifp->if_softc;
4412 struct ifreq *ifr = (struct ifreq *) data;
4413 struct mii_data *mii;
4414 int flags, mask, error = 0;
4415
4416 switch (command) {
4417 case SIOCSIFMTU:
4418 if (ifr->ifr_mtu < ETHERMIN ||
4419 ((BGE_IS_JUMBO_CAPABLE(sc)) &&
4420 ifr->ifr_mtu > BGE_JUMBO_MTU) ||
4421 ((!BGE_IS_JUMBO_CAPABLE(sc)) &&
4422 ifr->ifr_mtu > ETHERMTU))
4423 error = EINVAL;
4424 else if (ifp->if_mtu != ifr->ifr_mtu) {
4425 ifp->if_mtu = ifr->ifr_mtu;
4426 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
4427 bge_init(sc);
4428 }
4429 break;
4430 case SIOCSIFFLAGS:
4431 BGE_LOCK(sc);
4432 if (ifp->if_flags & IFF_UP) {
4433 /*
4434 * If only the state of the PROMISC flag changed,
4435 * then just use the 'set promisc mode' command
4436 * instead of reinitializing the entire NIC. Doing
4437 * a full re-init means reloading the firmware and
4438 * waiting for it to start up, which may take a
4439 * second or two. Similarly for ALLMULTI.
4440 */
4441 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
4442 flags = ifp->if_flags ^ sc->bge_if_flags;
4443 if (flags & IFF_PROMISC)
4444 bge_setpromisc(sc);
4445 if (flags & IFF_ALLMULTI)
4446 bge_setmulti(sc);
4447 } else
4448 bge_init_locked(sc);
4449 } else {
4450 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
4451 bge_stop(sc);
4452 }
4453 }
4454 sc->bge_if_flags = ifp->if_flags;
4455 BGE_UNLOCK(sc);
4456 error = 0;
4457 break;
4458 case SIOCADDMULTI:
4459 case SIOCDELMULTI:
4460 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
4461 BGE_LOCK(sc);
4462 bge_setmulti(sc);
4463 BGE_UNLOCK(sc);
4464 error = 0;
4465 }
4466 break;
4467 case SIOCSIFMEDIA:
4468 case SIOCGIFMEDIA:
4469 if (sc->bge_flags & BGE_FLAG_TBI) {
4470 error = ifmedia_ioctl(ifp, ifr,
4471 &sc->bge_ifmedia, command);
4472 } else {
4473 mii = device_get_softc(sc->bge_miibus);
4474 error = ifmedia_ioctl(ifp, ifr,
4475 &mii->mii_media, command);
4476 }
4477 break;
4478 case SIOCSIFCAP:
4479 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
4480#ifdef DEVICE_POLLING
4481 if (mask & IFCAP_POLLING) {
4482 if (ifr->ifr_reqcap & IFCAP_POLLING) {
4483 error = ether_poll_register(bge_poll, ifp);
4484 if (error)
4485 return (error);
4486 BGE_LOCK(sc);
4487 BGE_SETBIT(sc, BGE_PCI_MISC_CTL,
4488 BGE_PCIMISCCTL_MASK_PCI_INTR);
4489 bge_writembx(sc, BGE_MBX_IRQ0_LO, 1);
4490 ifp->if_capenable |= IFCAP_POLLING;
4491 BGE_UNLOCK(sc);
4492 } else {
4493 error = ether_poll_deregister(ifp);
4494 /* Enable interrupt even in error case */
4495 BGE_LOCK(sc);
4496 BGE_CLRBIT(sc, BGE_PCI_MISC_CTL,
4497 BGE_PCIMISCCTL_MASK_PCI_INTR);
4498 bge_writembx(sc, BGE_MBX_IRQ0_LO, 0);
4499 ifp->if_capenable &= ~IFCAP_POLLING;
4500 BGE_UNLOCK(sc);
4501 }
4502 }
4503#endif
4504 if (mask & IFCAP_HWCSUM) {
4505 ifp->if_capenable ^= IFCAP_HWCSUM;
4506 if (IFCAP_HWCSUM & ifp->if_capenable &&
4507 IFCAP_HWCSUM & ifp->if_capabilities)
4508 ifp->if_hwassist |= BGE_CSUM_FEATURES;
4509 else
4510 ifp->if_hwassist &= ~BGE_CSUM_FEATURES;
4511#ifdef VLAN_CAPABILITIES
4512 VLAN_CAPABILITIES(ifp);
4513#endif
4514 }
4515
4516 if ((mask & IFCAP_TSO4) != 0 &&
4517 (ifp->if_capabilities & IFCAP_TSO4) != 0) {
4518 ifp->if_capenable ^= IFCAP_TSO4;
4519 if ((ifp->if_capenable & IFCAP_TSO4) != 0)
4520 ifp->if_hwassist |= CSUM_TSO;
4521 else
4522 ifp->if_hwassist &= ~CSUM_TSO;
4523 }
4524
4525 if (mask & IFCAP_VLAN_MTU) {
4526 ifp->if_capenable ^= IFCAP_VLAN_MTU;
4527 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
4528 bge_init(sc);
4529 }
4530
4531 if (mask & IFCAP_VLAN_HWTAGGING) {
4532 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
4533 BGE_LOCK(sc);
4534 bge_setvlan(sc);
4535 BGE_UNLOCK(sc);
4536#ifdef VLAN_CAPABILITIES
4537 VLAN_CAPABILITIES(ifp);
4538#endif
4539 }
4540
4541 break;
4542 default:
4543 error = ether_ioctl(ifp, command, data);
4544 break;
4545 }
4546
4547 return (error);
4548}
4549
4550static void
4551bge_watchdog(struct bge_softc *sc)
4552{
4553 struct ifnet *ifp;
4554
4555 BGE_LOCK_ASSERT(sc);
4556
4557 if (sc->bge_timer == 0 || --sc->bge_timer)
4558 return;
4559
4560 ifp = sc->bge_ifp;
4561
4562 if_printf(ifp, "watchdog timeout -- resetting\n");
4563
4564 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
4565 bge_init_locked(sc);
4566
4567 ifp->if_oerrors++;
4568}
4569
4570/*
4571 * Stop the adapter and free any mbufs allocated to the
4572 * RX and TX lists.
4573 */
4574static void
4575bge_stop(struct bge_softc *sc)
4576{
4577 struct ifnet *ifp;
4578
4579 BGE_LOCK_ASSERT(sc);
4580
4581 ifp = sc->bge_ifp;
4582
4583 callout_stop(&sc->bge_stat_ch);
4584
4585 /* Disable host interrupts. */
4586 BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR);
4587 bge_writembx(sc, BGE_MBX_IRQ0_LO, 1);
4588
4589 /*
4590 * Tell firmware we're shutting down.
4591 */
4592 bge_stop_fw(sc);
4593 bge_sig_pre_reset(sc, BGE_RESET_STOP);
4594
4595 /*
4596 * Disable all of the receiver blocks.
4597 */
4598 BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE);
4599 BGE_CLRBIT(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE);
4600 BGE_CLRBIT(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE);
4601 if (!(BGE_IS_5705_PLUS(sc)))
4602 BGE_CLRBIT(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE);
4603 BGE_CLRBIT(sc, BGE_RDBDI_MODE, BGE_RBDIMODE_ENABLE);
4604 BGE_CLRBIT(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE);
4605 BGE_CLRBIT(sc, BGE_RBDC_MODE, BGE_RBDCMODE_ENABLE);
4606
4607 /*
4608 * Disable all of the transmit blocks.
4609 */
4610 BGE_CLRBIT(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE);
4611 BGE_CLRBIT(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE);
4612 BGE_CLRBIT(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE);
4613 BGE_CLRBIT(sc, BGE_RDMA_MODE, BGE_RDMAMODE_ENABLE);
4614 BGE_CLRBIT(sc, BGE_SDC_MODE, BGE_SDCMODE_ENABLE);
4615 if (!(BGE_IS_5705_PLUS(sc)))
4616 BGE_CLRBIT(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE);
4617 BGE_CLRBIT(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE);
4618
4619 /*
4620 * Shut down all of the memory managers and related
4621 * state machines.
4622 */
4623 BGE_CLRBIT(sc, BGE_HCC_MODE, BGE_HCCMODE_ENABLE);
4624 BGE_CLRBIT(sc, BGE_WDMA_MODE, BGE_WDMAMODE_ENABLE);
4625 if (!(BGE_IS_5705_PLUS(sc)))
4626 BGE_CLRBIT(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE);
4627 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF);
4628 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0);
4629 if (!(BGE_IS_5705_PLUS(sc))) {
4630 BGE_CLRBIT(sc, BGE_BMAN_MODE, BGE_BMANMODE_ENABLE);
4631 BGE_CLRBIT(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE);
4632 }
4633
4634 bge_reset(sc);
4635 bge_sig_legacy(sc, BGE_RESET_STOP);
4636 bge_sig_post_reset(sc, BGE_RESET_STOP);
4637
4638 /*
4639 * Keep the ASF firmware running if up.
4640 */
4641 if (sc->bge_asf_mode & ASF_STACKUP)
4642 BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
4643 else
4644 BGE_CLRBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
4645
4646 /* Free the RX lists. */
4647 bge_free_rx_ring_std(sc);
4648
4649 /* Free jumbo RX list. */
4650 if (BGE_IS_JUMBO_CAPABLE(sc))
4651 bge_free_rx_ring_jumbo(sc);
4652
4653 /* Free TX buffers. */
4654 bge_free_tx_ring(sc);
4655
4656 sc->bge_tx_saved_considx = BGE_TXCONS_UNSET;
4657
4658 /* Clear MAC's link state (PHY may still have link UP). */
4659 if (bootverbose && sc->bge_link)
4660 if_printf(sc->bge_ifp, "link DOWN\n");
4661 sc->bge_link = 0;
4662
4663 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
4664}
4665
4666/*
4667 * Stop all chip I/O so that the kernel's probe routines don't
4668 * get confused by errant DMAs when rebooting.
4669 */
4670static int
4671bge_shutdown(device_t dev)
4672{
4673 struct bge_softc *sc;
4674
4675 sc = device_get_softc(dev);
4676 BGE_LOCK(sc);
4677 bge_stop(sc);
4678 bge_reset(sc);
4679 BGE_UNLOCK(sc);
4680
4681 return (0);
4682}
4683
4684static int
4685bge_suspend(device_t dev)
4686{
4687 struct bge_softc *sc;
4688
4689 sc = device_get_softc(dev);
4690 BGE_LOCK(sc);
4691 bge_stop(sc);
4692 BGE_UNLOCK(sc);
4693
4694 return (0);
4695}
4696
4697static int
4698bge_resume(device_t dev)
4699{
4700 struct bge_softc *sc;
4701 struct ifnet *ifp;
4702
4703 sc = device_get_softc(dev);
4704 BGE_LOCK(sc);
4705 ifp = sc->bge_ifp;
4706 if (ifp->if_flags & IFF_UP) {
4707 bge_init_locked(sc);
4708 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
4709 bge_start_locked(ifp);
4710 }
4711 BGE_UNLOCK(sc);
4712
4713 return (0);
4714}
4715
4716static void
4717bge_link_upd(struct bge_softc *sc)
4718{
4719 struct mii_data *mii;
4720 uint32_t link, status;
4721
4722 BGE_LOCK_ASSERT(sc);
4723
4724 /* Clear 'pending link event' flag. */
4725 sc->bge_link_evt = 0;
4726
4727 /*
4728 * Process link state changes.
4729 * Grrr. The link status word in the status block does
4730 * not work correctly on the BCM5700 rev AX and BX chips,
4731 * according to all available information. Hence, we have
4732 * to enable MII interrupts in order to properly obtain
4733 * async link changes. Unfortunately, this also means that
4734 * we have to read the MAC status register to detect link
4735 * changes, thereby adding an additional register access to
4736 * the interrupt handler.
4737 *
4738 * XXX: perhaps link state detection procedure used for
4739 * BGE_CHIPID_BCM5700_B2 can be used for others BCM5700 revisions.
4740 */
4741
4742 if (sc->bge_asicrev == BGE_ASICREV_BCM5700 &&
4743 sc->bge_chipid != BGE_CHIPID_BCM5700_B2) {
4744 status = CSR_READ_4(sc, BGE_MAC_STS);
4745 if (status & BGE_MACSTAT_MI_INTERRUPT) {
4746 mii = device_get_softc(sc->bge_miibus);
4747 mii_pollstat(mii);
4748 if (!sc->bge_link &&
4749 mii->mii_media_status & IFM_ACTIVE &&
4750 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) {
4751 sc->bge_link++;
4752 if (bootverbose)
4753 if_printf(sc->bge_ifp, "link UP\n");
4754 } else if (sc->bge_link &&
4755 (!(mii->mii_media_status & IFM_ACTIVE) ||
4756 IFM_SUBTYPE(mii->mii_media_active) == IFM_NONE)) {
4757 sc->bge_link = 0;
4758 if (bootverbose)
4759 if_printf(sc->bge_ifp, "link DOWN\n");
4760 }
4761
4762 /* Clear the interrupt. */
4763 CSR_WRITE_4(sc, BGE_MAC_EVT_ENB,
4764 BGE_EVTENB_MI_INTERRUPT);
4765 bge_miibus_readreg(sc->bge_dev, 1, BRGPHY_MII_ISR);
4766 bge_miibus_writereg(sc->bge_dev, 1, BRGPHY_MII_IMR,
4767 BRGPHY_INTRS);
4768 }
4769 return;
4770 }
4771
4772 if (sc->bge_flags & BGE_FLAG_TBI) {
4773 status = CSR_READ_4(sc, BGE_MAC_STS);
4774 if (status & BGE_MACSTAT_TBI_PCS_SYNCHED) {
4775 if (!sc->bge_link) {
4776 sc->bge_link++;
4777 if (sc->bge_asicrev == BGE_ASICREV_BCM5704)
4778 BGE_CLRBIT(sc, BGE_MAC_MODE,
4779 BGE_MACMODE_TBI_SEND_CFGS);
4780 CSR_WRITE_4(sc, BGE_MAC_STS, 0xFFFFFFFF);
4781 if (bootverbose)
4782 if_printf(sc->bge_ifp, "link UP\n");
4783 if_link_state_change(sc->bge_ifp,
4784 LINK_STATE_UP);
4785 }
4786 } else if (sc->bge_link) {
4787 sc->bge_link = 0;
4788 if (bootverbose)
4789 if_printf(sc->bge_ifp, "link DOWN\n");
4790 if_link_state_change(sc->bge_ifp, LINK_STATE_DOWN);
4791 }
4792 } else if (CSR_READ_4(sc, BGE_MI_MODE) & BGE_MIMODE_AUTOPOLL) {
4793 /*
4794 * Some broken BCM chips have BGE_STATFLAG_LINKSTATE_CHANGED bit
4795 * in status word always set. Workaround this bug by reading
4796 * PHY link status directly.
4797 */
4798 link = (CSR_READ_4(sc, BGE_MI_STS) & BGE_MISTS_LINK) ? 1 : 0;
4799
4800 if (link != sc->bge_link ||
4801 sc->bge_asicrev == BGE_ASICREV_BCM5700) {
4802 mii = device_get_softc(sc->bge_miibus);
4803 mii_pollstat(mii);
4804 if (!sc->bge_link &&
4805 mii->mii_media_status & IFM_ACTIVE &&
4806 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) {
4807 sc->bge_link++;
4808 if (bootverbose)
4809 if_printf(sc->bge_ifp, "link UP\n");
4810 } else if (sc->bge_link &&
4811 (!(mii->mii_media_status & IFM_ACTIVE) ||
4812 IFM_SUBTYPE(mii->mii_media_active) == IFM_NONE)) {
4813 sc->bge_link = 0;
4814 if (bootverbose)
4815 if_printf(sc->bge_ifp, "link DOWN\n");
4816 }
4817 }
4818 } else {
4819 /*
4820 * Discard link events for MII/GMII controllers
4821 * if MI auto-polling is disabled.
4822 */
4823 }
4824
4825 /* Clear the attention. */
4826 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED |
4827 BGE_MACSTAT_CFG_CHANGED | BGE_MACSTAT_MI_COMPLETE |
4828 BGE_MACSTAT_LINK_CHANGED);
4829}
4830
4831#define BGE_SYSCTL_STAT(sc, ctx, desc, parent, node, oid) \
4832 SYSCTL_ADD_PROC(ctx, parent, OID_AUTO, oid, CTLTYPE_UINT|CTLFLAG_RD, \
4833 sc, offsetof(struct bge_stats, node), bge_sysctl_stats, "IU", \
4834 desc)
4835
4836static void
4837bge_add_sysctls(struct bge_softc *sc)
4838{
4839 struct sysctl_ctx_list *ctx;
4840 struct sysctl_oid_list *children, *schildren;
4841 struct sysctl_oid *tree;
4842
4843 ctx = device_get_sysctl_ctx(sc->bge_dev);
4844 children = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->bge_dev));
4845
4846#ifdef BGE_REGISTER_DEBUG
4847 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "debug_info",
4848 CTLTYPE_INT | CTLFLAG_RW, sc, 0, bge_sysctl_debug_info, "I",
4849 "Debug Information");
4850
4851 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "reg_read",
4852 CTLTYPE_INT | CTLFLAG_RW, sc, 0, bge_sysctl_reg_read, "I",
4853 "Register Read");
4854
4855 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "mem_read",
4856 CTLTYPE_INT | CTLFLAG_RW, sc, 0, bge_sysctl_mem_read, "I",
4857 "Memory Read");
4858
4859#endif
4860
4861 /*
4862 * A common design characteristic for many Broadcom client controllers
4863 * is that they only support a single outstanding DMA read operation
4864 * on the PCIe bus. This means that it will take twice as long to fetch
4865 * a TX frame that is split into header and payload buffers as it does
4866 * to fetch a single, contiguous TX frame (2 reads vs. 1 read). For
4867 * these controllers, coalescing buffers to reduce the number of memory
4868 * reads is effective way to get maximum performance(about 940Mbps).
4869 * Without collapsing TX buffers the maximum TCP bulk transfer
4870 * performance is about 850Mbps. However forcing coalescing mbufs
4871 * consumes a lot of CPU cycles, so leave it off by default.
4872 */
4873 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "forced_collapse",
4874 CTLFLAG_RW, &sc->bge_forced_collapse, 0,
4875 "Number of fragmented TX buffers of a frame allowed before "
4876 "forced collapsing");
4877 resource_int_value(device_get_name(sc->bge_dev),
4878 device_get_unit(sc->bge_dev), "forced_collapse",
4879 &sc->bge_forced_collapse);
4880
4878 if (BGE_IS_5705_PLUS(sc))
4879 return;
4880
4881 tree = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "stats", CTLFLAG_RD,
4882 NULL, "BGE Statistics");
4883 schildren = children = SYSCTL_CHILDREN(tree);
4884 BGE_SYSCTL_STAT(sc, ctx, "Frames Dropped Due To Filters",
4885 children, COSFramesDroppedDueToFilters,
4886 "FramesDroppedDueToFilters");
4887 BGE_SYSCTL_STAT(sc, ctx, "NIC DMA Write Queue Full",
4888 children, nicDmaWriteQueueFull, "DmaWriteQueueFull");
4889 BGE_SYSCTL_STAT(sc, ctx, "NIC DMA Write High Priority Queue Full",
4890 children, nicDmaWriteHighPriQueueFull, "DmaWriteHighPriQueueFull");
4891 BGE_SYSCTL_STAT(sc, ctx, "NIC No More RX Buffer Descriptors",
4892 children, nicNoMoreRxBDs, "NoMoreRxBDs");
4893 BGE_SYSCTL_STAT(sc, ctx, "Discarded Input Frames",
4894 children, ifInDiscards, "InputDiscards");
4895 BGE_SYSCTL_STAT(sc, ctx, "Input Errors",
4896 children, ifInErrors, "InputErrors");
4897 BGE_SYSCTL_STAT(sc, ctx, "NIC Recv Threshold Hit",
4898 children, nicRecvThresholdHit, "RecvThresholdHit");
4899 BGE_SYSCTL_STAT(sc, ctx, "NIC DMA Read Queue Full",
4900 children, nicDmaReadQueueFull, "DmaReadQueueFull");
4901 BGE_SYSCTL_STAT(sc, ctx, "NIC DMA Read High Priority Queue Full",
4902 children, nicDmaReadHighPriQueueFull, "DmaReadHighPriQueueFull");
4903 BGE_SYSCTL_STAT(sc, ctx, "NIC Send Data Complete Queue Full",
4904 children, nicSendDataCompQueueFull, "SendDataCompQueueFull");
4905 BGE_SYSCTL_STAT(sc, ctx, "NIC Ring Set Send Producer Index",
4906 children, nicRingSetSendProdIndex, "RingSetSendProdIndex");
4907 BGE_SYSCTL_STAT(sc, ctx, "NIC Ring Status Update",
4908 children, nicRingStatusUpdate, "RingStatusUpdate");
4909 BGE_SYSCTL_STAT(sc, ctx, "NIC Interrupts",
4910 children, nicInterrupts, "Interrupts");
4911 BGE_SYSCTL_STAT(sc, ctx, "NIC Avoided Interrupts",
4912 children, nicAvoidedInterrupts, "AvoidedInterrupts");
4913 BGE_SYSCTL_STAT(sc, ctx, "NIC Send Threshold Hit",
4914 children, nicSendThresholdHit, "SendThresholdHit");
4915
4916 tree = SYSCTL_ADD_NODE(ctx, schildren, OID_AUTO, "rx", CTLFLAG_RD,
4917 NULL, "BGE RX Statistics");
4918 children = SYSCTL_CHILDREN(tree);
4919 BGE_SYSCTL_STAT(sc, ctx, "Inbound Octets",
4920 children, rxstats.ifHCInOctets, "Octets");
4921 BGE_SYSCTL_STAT(sc, ctx, "Fragments",
4922 children, rxstats.etherStatsFragments, "Fragments");
4923 BGE_SYSCTL_STAT(sc, ctx, "Inbound Unicast Packets",
4924 children, rxstats.ifHCInUcastPkts, "UcastPkts");
4925 BGE_SYSCTL_STAT(sc, ctx, "Inbound Multicast Packets",
4926 children, rxstats.ifHCInMulticastPkts, "MulticastPkts");
4927 BGE_SYSCTL_STAT(sc, ctx, "FCS Errors",
4928 children, rxstats.dot3StatsFCSErrors, "FCSErrors");
4929 BGE_SYSCTL_STAT(sc, ctx, "Alignment Errors",
4930 children, rxstats.dot3StatsAlignmentErrors, "AlignmentErrors");
4931 BGE_SYSCTL_STAT(sc, ctx, "XON Pause Frames Received",
4932 children, rxstats.xonPauseFramesReceived, "xonPauseFramesReceived");
4933 BGE_SYSCTL_STAT(sc, ctx, "XOFF Pause Frames Received",
4934 children, rxstats.xoffPauseFramesReceived,
4935 "xoffPauseFramesReceived");
4936 BGE_SYSCTL_STAT(sc, ctx, "MAC Control Frames Received",
4937 children, rxstats.macControlFramesReceived,
4938 "ControlFramesReceived");
4939 BGE_SYSCTL_STAT(sc, ctx, "XOFF State Entered",
4940 children, rxstats.xoffStateEntered, "xoffStateEntered");
4941 BGE_SYSCTL_STAT(sc, ctx, "Frames Too Long",
4942 children, rxstats.dot3StatsFramesTooLong, "FramesTooLong");
4943 BGE_SYSCTL_STAT(sc, ctx, "Jabbers",
4944 children, rxstats.etherStatsJabbers, "Jabbers");
4945 BGE_SYSCTL_STAT(sc, ctx, "Undersized Packets",
4946 children, rxstats.etherStatsUndersizePkts, "UndersizePkts");
4947 BGE_SYSCTL_STAT(sc, ctx, "Inbound Range Length Errors",
4948 children, rxstats.inRangeLengthError, "inRangeLengthError");
4949 BGE_SYSCTL_STAT(sc, ctx, "Outbound Range Length Errors",
4950 children, rxstats.outRangeLengthError, "outRangeLengthError");
4951
4952 tree = SYSCTL_ADD_NODE(ctx, schildren, OID_AUTO, "tx", CTLFLAG_RD,
4953 NULL, "BGE TX Statistics");
4954 children = SYSCTL_CHILDREN(tree);
4955 BGE_SYSCTL_STAT(sc, ctx, "Outbound Octets",
4956 children, txstats.ifHCOutOctets, "Octets");
4957 BGE_SYSCTL_STAT(sc, ctx, "TX Collisions",
4958 children, txstats.etherStatsCollisions, "Collisions");
4959 BGE_SYSCTL_STAT(sc, ctx, "XON Sent",
4960 children, txstats.outXonSent, "XonSent");
4961 BGE_SYSCTL_STAT(sc, ctx, "XOFF Sent",
4962 children, txstats.outXoffSent, "XoffSent");
4963 BGE_SYSCTL_STAT(sc, ctx, "Flow Control Done",
4964 children, txstats.flowControlDone, "flowControlDone");
4965 BGE_SYSCTL_STAT(sc, ctx, "Internal MAC TX errors",
4966 children, txstats.dot3StatsInternalMacTransmitErrors,
4967 "InternalMacTransmitErrors");
4968 BGE_SYSCTL_STAT(sc, ctx, "Single Collision Frames",
4969 children, txstats.dot3StatsSingleCollisionFrames,
4970 "SingleCollisionFrames");
4971 BGE_SYSCTL_STAT(sc, ctx, "Multiple Collision Frames",
4972 children, txstats.dot3StatsMultipleCollisionFrames,
4973 "MultipleCollisionFrames");
4974 BGE_SYSCTL_STAT(sc, ctx, "Deferred Transmissions",
4975 children, txstats.dot3StatsDeferredTransmissions,
4976 "DeferredTransmissions");
4977 BGE_SYSCTL_STAT(sc, ctx, "Excessive Collisions",
4978 children, txstats.dot3StatsExcessiveCollisions,
4979 "ExcessiveCollisions");
4980 BGE_SYSCTL_STAT(sc, ctx, "Late Collisions",
4981 children, txstats.dot3StatsLateCollisions,
4982 "LateCollisions");
4983 BGE_SYSCTL_STAT(sc, ctx, "Outbound Unicast Packets",
4984 children, txstats.ifHCOutUcastPkts, "UcastPkts");
4985 BGE_SYSCTL_STAT(sc, ctx, "Outbound Multicast Packets",
4986 children, txstats.ifHCOutMulticastPkts, "MulticastPkts");
4987 BGE_SYSCTL_STAT(sc, ctx, "Outbound Broadcast Packets",
4988 children, txstats.ifHCOutBroadcastPkts, "BroadcastPkts");
4989 BGE_SYSCTL_STAT(sc, ctx, "Carrier Sense Errors",
4990 children, txstats.dot3StatsCarrierSenseErrors,
4991 "CarrierSenseErrors");
4992 BGE_SYSCTL_STAT(sc, ctx, "Outbound Discards",
4993 children, txstats.ifOutDiscards, "Discards");
4994 BGE_SYSCTL_STAT(sc, ctx, "Outbound Errors",
4995 children, txstats.ifOutErrors, "Errors");
4996}
4997
4998static int
4999bge_sysctl_stats(SYSCTL_HANDLER_ARGS)
5000{
5001 struct bge_softc *sc;
5002 uint32_t result;
5003 int offset;
5004
5005 sc = (struct bge_softc *)arg1;
5006 offset = arg2;
5007 result = CSR_READ_4(sc, BGE_MEMWIN_START + BGE_STATS_BLOCK + offset +
5008 offsetof(bge_hostaddr, bge_addr_lo));
5009 return (sysctl_handle_int(oidp, &result, 0, req));
5010}
5011
5012#ifdef BGE_REGISTER_DEBUG
5013static int
5014bge_sysctl_debug_info(SYSCTL_HANDLER_ARGS)
5015{
5016 struct bge_softc *sc;
5017 uint16_t *sbdata;
5018 int error;
5019 int result;
5020 int i, j;
5021
5022 result = -1;
5023 error = sysctl_handle_int(oidp, &result, 0, req);
5024 if (error || (req->newptr == NULL))
5025 return (error);
5026
5027 if (result == 1) {
5028 sc = (struct bge_softc *)arg1;
5029
5030 sbdata = (uint16_t *)sc->bge_ldata.bge_status_block;
5031 printf("Status Block:\n");
5032 for (i = 0x0; i < (BGE_STATUS_BLK_SZ / 4); ) {
5033 printf("%06x:", i);
5034 for (j = 0; j < 8; j++) {
5035 printf(" %04x", sbdata[i]);
5036 i += 4;
5037 }
5038 printf("\n");
5039 }
5040
5041 printf("Registers:\n");
5042 for (i = 0x800; i < 0xA00; ) {
5043 printf("%06x:", i);
5044 for (j = 0; j < 8; j++) {
5045 printf(" %08x", CSR_READ_4(sc, i));
5046 i += 4;
5047 }
5048 printf("\n");
5049 }
5050
5051 printf("Hardware Flags:\n");
5052 if (BGE_IS_5755_PLUS(sc))
5053 printf(" - 5755 Plus\n");
5054 if (BGE_IS_575X_PLUS(sc))
5055 printf(" - 575X Plus\n");
5056 if (BGE_IS_5705_PLUS(sc))
5057 printf(" - 5705 Plus\n");
5058 if (BGE_IS_5714_FAMILY(sc))
5059 printf(" - 5714 Family\n");
5060 if (BGE_IS_5700_FAMILY(sc))
5061 printf(" - 5700 Family\n");
5062 if (sc->bge_flags & BGE_FLAG_JUMBO)
5063 printf(" - Supports Jumbo Frames\n");
5064 if (sc->bge_flags & BGE_FLAG_PCIX)
5065 printf(" - PCI-X Bus\n");
5066 if (sc->bge_flags & BGE_FLAG_PCIE)
5067 printf(" - PCI Express Bus\n");
5068 if (sc->bge_flags & BGE_FLAG_NO_3LED)
5069 printf(" - No 3 LEDs\n");
5070 if (sc->bge_flags & BGE_FLAG_RX_ALIGNBUG)
5071 printf(" - RX Alignment Bug\n");
5072 }
5073
5074 return (error);
5075}
5076
5077static int
5078bge_sysctl_reg_read(SYSCTL_HANDLER_ARGS)
5079{
5080 struct bge_softc *sc;
5081 int error;
5082 uint16_t result;
5083 uint32_t val;
5084
5085 result = -1;
5086 error = sysctl_handle_int(oidp, &result, 0, req);
5087 if (error || (req->newptr == NULL))
5088 return (error);
5089
5090 if (result < 0x8000) {
5091 sc = (struct bge_softc *)arg1;
5092 val = CSR_READ_4(sc, result);
5093 printf("reg 0x%06X = 0x%08X\n", result, val);
5094 }
5095
5096 return (error);
5097}
5098
5099static int
5100bge_sysctl_mem_read(SYSCTL_HANDLER_ARGS)
5101{
5102 struct bge_softc *sc;
5103 int error;
5104 uint16_t result;
5105 uint32_t val;
5106
5107 result = -1;
5108 error = sysctl_handle_int(oidp, &result, 0, req);
5109 if (error || (req->newptr == NULL))
5110 return (error);
5111
5112 if (result < 0x8000) {
5113 sc = (struct bge_softc *)arg1;
5114 val = bge_readmem_ind(sc, result);
5115 printf("mem 0x%06X = 0x%08X\n", result, val);
5116 }
5117
5118 return (error);
5119}
5120#endif
5121
5122static int
5123bge_get_eaddr_fw(struct bge_softc *sc, uint8_t ether_addr[])
5124{
5125
5126 if (sc->bge_flags & BGE_FLAG_EADDR)
5127 return (1);
5128
5129#ifdef __sparc64__
5130 OF_getetheraddr(sc->bge_dev, ether_addr);
5131 return (0);
5132#endif
5133 return (1);
5134}
5135
5136static int
5137bge_get_eaddr_mem(struct bge_softc *sc, uint8_t ether_addr[])
5138{
5139 uint32_t mac_addr;
5140
5141 mac_addr = bge_readmem_ind(sc, 0x0c14);
5142 if ((mac_addr >> 16) == 0x484b) {
5143 ether_addr[0] = (uint8_t)(mac_addr >> 8);
5144 ether_addr[1] = (uint8_t)mac_addr;
5145 mac_addr = bge_readmem_ind(sc, 0x0c18);
5146 ether_addr[2] = (uint8_t)(mac_addr >> 24);
5147 ether_addr[3] = (uint8_t)(mac_addr >> 16);
5148 ether_addr[4] = (uint8_t)(mac_addr >> 8);
5149 ether_addr[5] = (uint8_t)mac_addr;
5150 return (0);
5151 }
5152 return (1);
5153}
5154
5155static int
5156bge_get_eaddr_nvram(struct bge_softc *sc, uint8_t ether_addr[])
5157{
5158 int mac_offset = BGE_EE_MAC_OFFSET;
5159
5160 if (sc->bge_asicrev == BGE_ASICREV_BCM5906)
5161 mac_offset = BGE_EE_MAC_OFFSET_5906;
5162
5163 return (bge_read_nvram(sc, ether_addr, mac_offset + 2,
5164 ETHER_ADDR_LEN));
5165}
5166
5167static int
5168bge_get_eaddr_eeprom(struct bge_softc *sc, uint8_t ether_addr[])
5169{
5170
5171 if (sc->bge_asicrev == BGE_ASICREV_BCM5906)
5172 return (1);
5173
5174 return (bge_read_eeprom(sc, ether_addr, BGE_EE_MAC_OFFSET + 2,
5175 ETHER_ADDR_LEN));
5176}
5177
5178static int
5179bge_get_eaddr(struct bge_softc *sc, uint8_t eaddr[])
5180{
5181 static const bge_eaddr_fcn_t bge_eaddr_funcs[] = {
5182 /* NOTE: Order is critical */
5183 bge_get_eaddr_fw,
5184 bge_get_eaddr_mem,
5185 bge_get_eaddr_nvram,
5186 bge_get_eaddr_eeprom,
5187 NULL
5188 };
5189 const bge_eaddr_fcn_t *func;
5190
5191 for (func = bge_eaddr_funcs; *func != NULL; ++func) {
5192 if ((*func)(sc, eaddr) == 0)
5193 break;
5194 }
5195 return (*func == NULL ? ENXIO : 0);
5196}
4881 if (BGE_IS_5705_PLUS(sc))
4882 return;
4883
4884 tree = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "stats", CTLFLAG_RD,
4885 NULL, "BGE Statistics");
4886 schildren = children = SYSCTL_CHILDREN(tree);
4887 BGE_SYSCTL_STAT(sc, ctx, "Frames Dropped Due To Filters",
4888 children, COSFramesDroppedDueToFilters,
4889 "FramesDroppedDueToFilters");
4890 BGE_SYSCTL_STAT(sc, ctx, "NIC DMA Write Queue Full",
4891 children, nicDmaWriteQueueFull, "DmaWriteQueueFull");
4892 BGE_SYSCTL_STAT(sc, ctx, "NIC DMA Write High Priority Queue Full",
4893 children, nicDmaWriteHighPriQueueFull, "DmaWriteHighPriQueueFull");
4894 BGE_SYSCTL_STAT(sc, ctx, "NIC No More RX Buffer Descriptors",
4895 children, nicNoMoreRxBDs, "NoMoreRxBDs");
4896 BGE_SYSCTL_STAT(sc, ctx, "Discarded Input Frames",
4897 children, ifInDiscards, "InputDiscards");
4898 BGE_SYSCTL_STAT(sc, ctx, "Input Errors",
4899 children, ifInErrors, "InputErrors");
4900 BGE_SYSCTL_STAT(sc, ctx, "NIC Recv Threshold Hit",
4901 children, nicRecvThresholdHit, "RecvThresholdHit");
4902 BGE_SYSCTL_STAT(sc, ctx, "NIC DMA Read Queue Full",
4903 children, nicDmaReadQueueFull, "DmaReadQueueFull");
4904 BGE_SYSCTL_STAT(sc, ctx, "NIC DMA Read High Priority Queue Full",
4905 children, nicDmaReadHighPriQueueFull, "DmaReadHighPriQueueFull");
4906 BGE_SYSCTL_STAT(sc, ctx, "NIC Send Data Complete Queue Full",
4907 children, nicSendDataCompQueueFull, "SendDataCompQueueFull");
4908 BGE_SYSCTL_STAT(sc, ctx, "NIC Ring Set Send Producer Index",
4909 children, nicRingSetSendProdIndex, "RingSetSendProdIndex");
4910 BGE_SYSCTL_STAT(sc, ctx, "NIC Ring Status Update",
4911 children, nicRingStatusUpdate, "RingStatusUpdate");
4912 BGE_SYSCTL_STAT(sc, ctx, "NIC Interrupts",
4913 children, nicInterrupts, "Interrupts");
4914 BGE_SYSCTL_STAT(sc, ctx, "NIC Avoided Interrupts",
4915 children, nicAvoidedInterrupts, "AvoidedInterrupts");
4916 BGE_SYSCTL_STAT(sc, ctx, "NIC Send Threshold Hit",
4917 children, nicSendThresholdHit, "SendThresholdHit");
4918
4919 tree = SYSCTL_ADD_NODE(ctx, schildren, OID_AUTO, "rx", CTLFLAG_RD,
4920 NULL, "BGE RX Statistics");
4921 children = SYSCTL_CHILDREN(tree);
4922 BGE_SYSCTL_STAT(sc, ctx, "Inbound Octets",
4923 children, rxstats.ifHCInOctets, "Octets");
4924 BGE_SYSCTL_STAT(sc, ctx, "Fragments",
4925 children, rxstats.etherStatsFragments, "Fragments");
4926 BGE_SYSCTL_STAT(sc, ctx, "Inbound Unicast Packets",
4927 children, rxstats.ifHCInUcastPkts, "UcastPkts");
4928 BGE_SYSCTL_STAT(sc, ctx, "Inbound Multicast Packets",
4929 children, rxstats.ifHCInMulticastPkts, "MulticastPkts");
4930 BGE_SYSCTL_STAT(sc, ctx, "FCS Errors",
4931 children, rxstats.dot3StatsFCSErrors, "FCSErrors");
4932 BGE_SYSCTL_STAT(sc, ctx, "Alignment Errors",
4933 children, rxstats.dot3StatsAlignmentErrors, "AlignmentErrors");
4934 BGE_SYSCTL_STAT(sc, ctx, "XON Pause Frames Received",
4935 children, rxstats.xonPauseFramesReceived, "xonPauseFramesReceived");
4936 BGE_SYSCTL_STAT(sc, ctx, "XOFF Pause Frames Received",
4937 children, rxstats.xoffPauseFramesReceived,
4938 "xoffPauseFramesReceived");
4939 BGE_SYSCTL_STAT(sc, ctx, "MAC Control Frames Received",
4940 children, rxstats.macControlFramesReceived,
4941 "ControlFramesReceived");
4942 BGE_SYSCTL_STAT(sc, ctx, "XOFF State Entered",
4943 children, rxstats.xoffStateEntered, "xoffStateEntered");
4944 BGE_SYSCTL_STAT(sc, ctx, "Frames Too Long",
4945 children, rxstats.dot3StatsFramesTooLong, "FramesTooLong");
4946 BGE_SYSCTL_STAT(sc, ctx, "Jabbers",
4947 children, rxstats.etherStatsJabbers, "Jabbers");
4948 BGE_SYSCTL_STAT(sc, ctx, "Undersized Packets",
4949 children, rxstats.etherStatsUndersizePkts, "UndersizePkts");
4950 BGE_SYSCTL_STAT(sc, ctx, "Inbound Range Length Errors",
4951 children, rxstats.inRangeLengthError, "inRangeLengthError");
4952 BGE_SYSCTL_STAT(sc, ctx, "Outbound Range Length Errors",
4953 children, rxstats.outRangeLengthError, "outRangeLengthError");
4954
4955 tree = SYSCTL_ADD_NODE(ctx, schildren, OID_AUTO, "tx", CTLFLAG_RD,
4956 NULL, "BGE TX Statistics");
4957 children = SYSCTL_CHILDREN(tree);
4958 BGE_SYSCTL_STAT(sc, ctx, "Outbound Octets",
4959 children, txstats.ifHCOutOctets, "Octets");
4960 BGE_SYSCTL_STAT(sc, ctx, "TX Collisions",
4961 children, txstats.etherStatsCollisions, "Collisions");
4962 BGE_SYSCTL_STAT(sc, ctx, "XON Sent",
4963 children, txstats.outXonSent, "XonSent");
4964 BGE_SYSCTL_STAT(sc, ctx, "XOFF Sent",
4965 children, txstats.outXoffSent, "XoffSent");
4966 BGE_SYSCTL_STAT(sc, ctx, "Flow Control Done",
4967 children, txstats.flowControlDone, "flowControlDone");
4968 BGE_SYSCTL_STAT(sc, ctx, "Internal MAC TX errors",
4969 children, txstats.dot3StatsInternalMacTransmitErrors,
4970 "InternalMacTransmitErrors");
4971 BGE_SYSCTL_STAT(sc, ctx, "Single Collision Frames",
4972 children, txstats.dot3StatsSingleCollisionFrames,
4973 "SingleCollisionFrames");
4974 BGE_SYSCTL_STAT(sc, ctx, "Multiple Collision Frames",
4975 children, txstats.dot3StatsMultipleCollisionFrames,
4976 "MultipleCollisionFrames");
4977 BGE_SYSCTL_STAT(sc, ctx, "Deferred Transmissions",
4978 children, txstats.dot3StatsDeferredTransmissions,
4979 "DeferredTransmissions");
4980 BGE_SYSCTL_STAT(sc, ctx, "Excessive Collisions",
4981 children, txstats.dot3StatsExcessiveCollisions,
4982 "ExcessiveCollisions");
4983 BGE_SYSCTL_STAT(sc, ctx, "Late Collisions",
4984 children, txstats.dot3StatsLateCollisions,
4985 "LateCollisions");
4986 BGE_SYSCTL_STAT(sc, ctx, "Outbound Unicast Packets",
4987 children, txstats.ifHCOutUcastPkts, "UcastPkts");
4988 BGE_SYSCTL_STAT(sc, ctx, "Outbound Multicast Packets",
4989 children, txstats.ifHCOutMulticastPkts, "MulticastPkts");
4990 BGE_SYSCTL_STAT(sc, ctx, "Outbound Broadcast Packets",
4991 children, txstats.ifHCOutBroadcastPkts, "BroadcastPkts");
4992 BGE_SYSCTL_STAT(sc, ctx, "Carrier Sense Errors",
4993 children, txstats.dot3StatsCarrierSenseErrors,
4994 "CarrierSenseErrors");
4995 BGE_SYSCTL_STAT(sc, ctx, "Outbound Discards",
4996 children, txstats.ifOutDiscards, "Discards");
4997 BGE_SYSCTL_STAT(sc, ctx, "Outbound Errors",
4998 children, txstats.ifOutErrors, "Errors");
4999}
5000
5001static int
5002bge_sysctl_stats(SYSCTL_HANDLER_ARGS)
5003{
5004 struct bge_softc *sc;
5005 uint32_t result;
5006 int offset;
5007
5008 sc = (struct bge_softc *)arg1;
5009 offset = arg2;
5010 result = CSR_READ_4(sc, BGE_MEMWIN_START + BGE_STATS_BLOCK + offset +
5011 offsetof(bge_hostaddr, bge_addr_lo));
5012 return (sysctl_handle_int(oidp, &result, 0, req));
5013}
5014
5015#ifdef BGE_REGISTER_DEBUG
5016static int
5017bge_sysctl_debug_info(SYSCTL_HANDLER_ARGS)
5018{
5019 struct bge_softc *sc;
5020 uint16_t *sbdata;
5021 int error;
5022 int result;
5023 int i, j;
5024
5025 result = -1;
5026 error = sysctl_handle_int(oidp, &result, 0, req);
5027 if (error || (req->newptr == NULL))
5028 return (error);
5029
5030 if (result == 1) {
5031 sc = (struct bge_softc *)arg1;
5032
5033 sbdata = (uint16_t *)sc->bge_ldata.bge_status_block;
5034 printf("Status Block:\n");
5035 for (i = 0x0; i < (BGE_STATUS_BLK_SZ / 4); ) {
5036 printf("%06x:", i);
5037 for (j = 0; j < 8; j++) {
5038 printf(" %04x", sbdata[i]);
5039 i += 4;
5040 }
5041 printf("\n");
5042 }
5043
5044 printf("Registers:\n");
5045 for (i = 0x800; i < 0xA00; ) {
5046 printf("%06x:", i);
5047 for (j = 0; j < 8; j++) {
5048 printf(" %08x", CSR_READ_4(sc, i));
5049 i += 4;
5050 }
5051 printf("\n");
5052 }
5053
5054 printf("Hardware Flags:\n");
5055 if (BGE_IS_5755_PLUS(sc))
5056 printf(" - 5755 Plus\n");
5057 if (BGE_IS_575X_PLUS(sc))
5058 printf(" - 575X Plus\n");
5059 if (BGE_IS_5705_PLUS(sc))
5060 printf(" - 5705 Plus\n");
5061 if (BGE_IS_5714_FAMILY(sc))
5062 printf(" - 5714 Family\n");
5063 if (BGE_IS_5700_FAMILY(sc))
5064 printf(" - 5700 Family\n");
5065 if (sc->bge_flags & BGE_FLAG_JUMBO)
5066 printf(" - Supports Jumbo Frames\n");
5067 if (sc->bge_flags & BGE_FLAG_PCIX)
5068 printf(" - PCI-X Bus\n");
5069 if (sc->bge_flags & BGE_FLAG_PCIE)
5070 printf(" - PCI Express Bus\n");
5071 if (sc->bge_flags & BGE_FLAG_NO_3LED)
5072 printf(" - No 3 LEDs\n");
5073 if (sc->bge_flags & BGE_FLAG_RX_ALIGNBUG)
5074 printf(" - RX Alignment Bug\n");
5075 }
5076
5077 return (error);
5078}
5079
5080static int
5081bge_sysctl_reg_read(SYSCTL_HANDLER_ARGS)
5082{
5083 struct bge_softc *sc;
5084 int error;
5085 uint16_t result;
5086 uint32_t val;
5087
5088 result = -1;
5089 error = sysctl_handle_int(oidp, &result, 0, req);
5090 if (error || (req->newptr == NULL))
5091 return (error);
5092
5093 if (result < 0x8000) {
5094 sc = (struct bge_softc *)arg1;
5095 val = CSR_READ_4(sc, result);
5096 printf("reg 0x%06X = 0x%08X\n", result, val);
5097 }
5098
5099 return (error);
5100}
5101
5102static int
5103bge_sysctl_mem_read(SYSCTL_HANDLER_ARGS)
5104{
5105 struct bge_softc *sc;
5106 int error;
5107 uint16_t result;
5108 uint32_t val;
5109
5110 result = -1;
5111 error = sysctl_handle_int(oidp, &result, 0, req);
5112 if (error || (req->newptr == NULL))
5113 return (error);
5114
5115 if (result < 0x8000) {
5116 sc = (struct bge_softc *)arg1;
5117 val = bge_readmem_ind(sc, result);
5118 printf("mem 0x%06X = 0x%08X\n", result, val);
5119 }
5120
5121 return (error);
5122}
5123#endif
5124
5125static int
5126bge_get_eaddr_fw(struct bge_softc *sc, uint8_t ether_addr[])
5127{
5128
5129 if (sc->bge_flags & BGE_FLAG_EADDR)
5130 return (1);
5131
5132#ifdef __sparc64__
5133 OF_getetheraddr(sc->bge_dev, ether_addr);
5134 return (0);
5135#endif
5136 return (1);
5137}
5138
5139static int
5140bge_get_eaddr_mem(struct bge_softc *sc, uint8_t ether_addr[])
5141{
5142 uint32_t mac_addr;
5143
5144 mac_addr = bge_readmem_ind(sc, 0x0c14);
5145 if ((mac_addr >> 16) == 0x484b) {
5146 ether_addr[0] = (uint8_t)(mac_addr >> 8);
5147 ether_addr[1] = (uint8_t)mac_addr;
5148 mac_addr = bge_readmem_ind(sc, 0x0c18);
5149 ether_addr[2] = (uint8_t)(mac_addr >> 24);
5150 ether_addr[3] = (uint8_t)(mac_addr >> 16);
5151 ether_addr[4] = (uint8_t)(mac_addr >> 8);
5152 ether_addr[5] = (uint8_t)mac_addr;
5153 return (0);
5154 }
5155 return (1);
5156}
5157
5158static int
5159bge_get_eaddr_nvram(struct bge_softc *sc, uint8_t ether_addr[])
5160{
5161 int mac_offset = BGE_EE_MAC_OFFSET;
5162
5163 if (sc->bge_asicrev == BGE_ASICREV_BCM5906)
5164 mac_offset = BGE_EE_MAC_OFFSET_5906;
5165
5166 return (bge_read_nvram(sc, ether_addr, mac_offset + 2,
5167 ETHER_ADDR_LEN));
5168}
5169
5170static int
5171bge_get_eaddr_eeprom(struct bge_softc *sc, uint8_t ether_addr[])
5172{
5173
5174 if (sc->bge_asicrev == BGE_ASICREV_BCM5906)
5175 return (1);
5176
5177 return (bge_read_eeprom(sc, ether_addr, BGE_EE_MAC_OFFSET + 2,
5178 ETHER_ADDR_LEN));
5179}
5180
5181static int
5182bge_get_eaddr(struct bge_softc *sc, uint8_t eaddr[])
5183{
5184 static const bge_eaddr_fcn_t bge_eaddr_funcs[] = {
5185 /* NOTE: Order is critical */
5186 bge_get_eaddr_fw,
5187 bge_get_eaddr_mem,
5188 bge_get_eaddr_nvram,
5189 bge_get_eaddr_eeprom,
5190 NULL
5191 };
5192 const bge_eaddr_fcn_t *func;
5193
5194 for (func = bge_eaddr_funcs; *func != NULL; ++func) {
5195 if ((*func)(sc, eaddr) == 0)
5196 break;
5197 }
5198 return (*func == NULL ? ENXIO : 0);
5199}