Deleted Added
full compact
if_bge.c (226866) if_bge.c (226867)
1/*-
2 * Copyright (c) 2001 Wind River Systems
3 * Copyright (c) 1997, 1998, 1999, 2001
4 * Bill Paul <wpaul@windriver.com>. All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. All advertising materials mentioning features or use of this software
15 * must display the following acknowledgement:
16 * This product includes software developed by Bill Paul.
17 * 4. Neither the name of the author nor the names of any co-contributors
18 * may be used to endorse or promote products derived from this software
19 * without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
25 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
31 * THE POSSIBILITY OF SUCH DAMAGE.
32 */
33
34#include <sys/cdefs.h>
1/*-
2 * Copyright (c) 2001 Wind River Systems
3 * Copyright (c) 1997, 1998, 1999, 2001
4 * Bill Paul <wpaul@windriver.com>. All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. All advertising materials mentioning features or use of this software
15 * must display the following acknowledgement:
16 * This product includes software developed by Bill Paul.
17 * 4. Neither the name of the author nor the names of any co-contributors
18 * may be used to endorse or promote products derived from this software
19 * without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
25 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
31 * THE POSSIBILITY OF SUCH DAMAGE.
32 */
33
34#include <sys/cdefs.h>
35__FBSDID("$FreeBSD: head/sys/dev/bge/if_bge.c 226866 2011-10-27 21:27:37Z yongari $");
35__FBSDID("$FreeBSD: head/sys/dev/bge/if_bge.c 226867 2011-10-27 22:10:52Z yongari $");
36
37/*
38 * Broadcom BCM570x family gigabit ethernet driver for FreeBSD.
39 *
40 * The Broadcom BCM5700 is based on technology originally developed by
41 * Alteon Networks as part of the Tigon I and Tigon II gigabit ethernet
42 * MAC chips. The BCM5700, sometimes referred to as the Tigon III, has
43 * two on-board MIPS R4000 CPUs and can have as much as 16MB of external
44 * SSRAM. The BCM5700 supports TCP, UDP and IP checksum offload, jumbo
45 * frames, highly configurable RX filtering, and 16 RX and TX queues
46 * (which, along with RX filter rules, can be used for QOS applications).
47 * Other features, such as TCP segmentation, may be available as part
48 * of value-added firmware updates. Unlike the Tigon I and Tigon II,
49 * firmware images can be stored in hardware and need not be compiled
50 * into the driver.
51 *
52 * The BCM5700 supports the PCI v2.2 and PCI-X v1.0 standards, and will
53 * function in a 32-bit/64-bit 33/66Mhz bus, or a 64-bit/133Mhz bus.
54 *
55 * The BCM5701 is a single-chip solution incorporating both the BCM5700
56 * MAC and a BCM5401 10/100/1000 PHY. Unlike the BCM5700, the BCM5701
57 * does not support external SSRAM.
58 *
59 * Broadcom also produces a variation of the BCM5700 under the "Altima"
60 * brand name, which is functionally similar but lacks PCI-X support.
61 *
62 * Without external SSRAM, you can only have at most 4 TX rings,
63 * and the use of the mini RX ring is disabled. This seems to imply
64 * that these features are simply not available on the BCM5701. As a
65 * result, this driver does not implement any support for the mini RX
66 * ring.
67 */
68
69#ifdef HAVE_KERNEL_OPTION_HEADERS
70#include "opt_device_polling.h"
71#endif
72
73#include <sys/param.h>
74#include <sys/endian.h>
75#include <sys/systm.h>
76#include <sys/sockio.h>
77#include <sys/mbuf.h>
78#include <sys/malloc.h>
79#include <sys/kernel.h>
80#include <sys/module.h>
81#include <sys/socket.h>
82#include <sys/sysctl.h>
83#include <sys/taskqueue.h>
84
85#include <net/if.h>
86#include <net/if_arp.h>
87#include <net/ethernet.h>
88#include <net/if_dl.h>
89#include <net/if_media.h>
90
91#include <net/bpf.h>
92
93#include <net/if_types.h>
94#include <net/if_vlan_var.h>
95
96#include <netinet/in_systm.h>
97#include <netinet/in.h>
98#include <netinet/ip.h>
99#include <netinet/tcp.h>
100
101#include <machine/bus.h>
102#include <machine/resource.h>
103#include <sys/bus.h>
104#include <sys/rman.h>
105
106#include <dev/mii/mii.h>
107#include <dev/mii/miivar.h>
108#include "miidevs.h"
109#include <dev/mii/brgphyreg.h>
110
111#ifdef __sparc64__
112#include <dev/ofw/ofw_bus.h>
113#include <dev/ofw/openfirm.h>
114#include <machine/ofw_machdep.h>
115#include <machine/ver.h>
116#endif
117
118#include <dev/pci/pcireg.h>
119#include <dev/pci/pcivar.h>
120
121#include <dev/bge/if_bgereg.h>
122
123#define BGE_CSUM_FEATURES (CSUM_IP | CSUM_TCP)
124#define ETHER_MIN_NOPAD (ETHER_MIN_LEN - ETHER_CRC_LEN) /* i.e., 60 */
125
126MODULE_DEPEND(bge, pci, 1, 1, 1);
127MODULE_DEPEND(bge, ether, 1, 1, 1);
128MODULE_DEPEND(bge, miibus, 1, 1, 1);
129
130/* "device miibus" required. See GENERIC if you get errors here. */
131#include "miibus_if.h"
132
133/*
134 * Various supported device vendors/types and their names. Note: the
135 * spec seems to indicate that the hardware still has Alteon's vendor
136 * ID burned into it, though it will always be overriden by the vendor
137 * ID in the EEPROM. Just to be safe, we cover all possibilities.
138 */
139static const struct bge_type {
140 uint16_t bge_vid;
141 uint16_t bge_did;
142} const bge_devs[] = {
143 { ALTEON_VENDORID, ALTEON_DEVICEID_BCM5700 },
144 { ALTEON_VENDORID, ALTEON_DEVICEID_BCM5701 },
145
146 { ALTIMA_VENDORID, ALTIMA_DEVICE_AC1000 },
147 { ALTIMA_VENDORID, ALTIMA_DEVICE_AC1002 },
148 { ALTIMA_VENDORID, ALTIMA_DEVICE_AC9100 },
149
150 { APPLE_VENDORID, APPLE_DEVICE_BCM5701 },
151
152 { BCOM_VENDORID, BCOM_DEVICEID_BCM5700 },
153 { BCOM_VENDORID, BCOM_DEVICEID_BCM5701 },
154 { BCOM_VENDORID, BCOM_DEVICEID_BCM5702 },
155 { BCOM_VENDORID, BCOM_DEVICEID_BCM5702_ALT },
156 { BCOM_VENDORID, BCOM_DEVICEID_BCM5702X },
157 { BCOM_VENDORID, BCOM_DEVICEID_BCM5703 },
158 { BCOM_VENDORID, BCOM_DEVICEID_BCM5703_ALT },
159 { BCOM_VENDORID, BCOM_DEVICEID_BCM5703X },
160 { BCOM_VENDORID, BCOM_DEVICEID_BCM5704C },
161 { BCOM_VENDORID, BCOM_DEVICEID_BCM5704S },
162 { BCOM_VENDORID, BCOM_DEVICEID_BCM5704S_ALT },
163 { BCOM_VENDORID, BCOM_DEVICEID_BCM5705 },
164 { BCOM_VENDORID, BCOM_DEVICEID_BCM5705F },
165 { BCOM_VENDORID, BCOM_DEVICEID_BCM5705K },
166 { BCOM_VENDORID, BCOM_DEVICEID_BCM5705M },
167 { BCOM_VENDORID, BCOM_DEVICEID_BCM5705M_ALT },
168 { BCOM_VENDORID, BCOM_DEVICEID_BCM5714C },
169 { BCOM_VENDORID, BCOM_DEVICEID_BCM5714S },
170 { BCOM_VENDORID, BCOM_DEVICEID_BCM5715 },
171 { BCOM_VENDORID, BCOM_DEVICEID_BCM5715S },
172 { BCOM_VENDORID, BCOM_DEVICEID_BCM5717 },
173 { BCOM_VENDORID, BCOM_DEVICEID_BCM5718 },
174 { BCOM_VENDORID, BCOM_DEVICEID_BCM5719 },
175 { BCOM_VENDORID, BCOM_DEVICEID_BCM5720 },
176 { BCOM_VENDORID, BCOM_DEVICEID_BCM5721 },
177 { BCOM_VENDORID, BCOM_DEVICEID_BCM5722 },
178 { BCOM_VENDORID, BCOM_DEVICEID_BCM5723 },
179 { BCOM_VENDORID, BCOM_DEVICEID_BCM5750 },
180 { BCOM_VENDORID, BCOM_DEVICEID_BCM5750M },
181 { BCOM_VENDORID, BCOM_DEVICEID_BCM5751 },
182 { BCOM_VENDORID, BCOM_DEVICEID_BCM5751F },
183 { BCOM_VENDORID, BCOM_DEVICEID_BCM5751M },
184 { BCOM_VENDORID, BCOM_DEVICEID_BCM5752 },
185 { BCOM_VENDORID, BCOM_DEVICEID_BCM5752M },
186 { BCOM_VENDORID, BCOM_DEVICEID_BCM5753 },
187 { BCOM_VENDORID, BCOM_DEVICEID_BCM5753F },
188 { BCOM_VENDORID, BCOM_DEVICEID_BCM5753M },
189 { BCOM_VENDORID, BCOM_DEVICEID_BCM5754 },
190 { BCOM_VENDORID, BCOM_DEVICEID_BCM5754M },
191 { BCOM_VENDORID, BCOM_DEVICEID_BCM5755 },
192 { BCOM_VENDORID, BCOM_DEVICEID_BCM5755M },
193 { BCOM_VENDORID, BCOM_DEVICEID_BCM5756 },
194 { BCOM_VENDORID, BCOM_DEVICEID_BCM5761 },
195 { BCOM_VENDORID, BCOM_DEVICEID_BCM5761E },
196 { BCOM_VENDORID, BCOM_DEVICEID_BCM5761S },
197 { BCOM_VENDORID, BCOM_DEVICEID_BCM5761SE },
198 { BCOM_VENDORID, BCOM_DEVICEID_BCM5764 },
199 { BCOM_VENDORID, BCOM_DEVICEID_BCM5780 },
200 { BCOM_VENDORID, BCOM_DEVICEID_BCM5780S },
201 { BCOM_VENDORID, BCOM_DEVICEID_BCM5781 },
202 { BCOM_VENDORID, BCOM_DEVICEID_BCM5782 },
203 { BCOM_VENDORID, BCOM_DEVICEID_BCM5784 },
204 { BCOM_VENDORID, BCOM_DEVICEID_BCM5785F },
205 { BCOM_VENDORID, BCOM_DEVICEID_BCM5785G },
206 { BCOM_VENDORID, BCOM_DEVICEID_BCM5786 },
207 { BCOM_VENDORID, BCOM_DEVICEID_BCM5787 },
208 { BCOM_VENDORID, BCOM_DEVICEID_BCM5787F },
209 { BCOM_VENDORID, BCOM_DEVICEID_BCM5787M },
210 { BCOM_VENDORID, BCOM_DEVICEID_BCM5788 },
211 { BCOM_VENDORID, BCOM_DEVICEID_BCM5789 },
212 { BCOM_VENDORID, BCOM_DEVICEID_BCM5901 },
213 { BCOM_VENDORID, BCOM_DEVICEID_BCM5901A2 },
214 { BCOM_VENDORID, BCOM_DEVICEID_BCM5903M },
215 { BCOM_VENDORID, BCOM_DEVICEID_BCM5906 },
216 { BCOM_VENDORID, BCOM_DEVICEID_BCM5906M },
217 { BCOM_VENDORID, BCOM_DEVICEID_BCM57760 },
218 { BCOM_VENDORID, BCOM_DEVICEID_BCM57761 },
219 { BCOM_VENDORID, BCOM_DEVICEID_BCM57765 },
220 { BCOM_VENDORID, BCOM_DEVICEID_BCM57780 },
221 { BCOM_VENDORID, BCOM_DEVICEID_BCM57781 },
222 { BCOM_VENDORID, BCOM_DEVICEID_BCM57785 },
223 { BCOM_VENDORID, BCOM_DEVICEID_BCM57788 },
224 { BCOM_VENDORID, BCOM_DEVICEID_BCM57790 },
225 { BCOM_VENDORID, BCOM_DEVICEID_BCM57791 },
226 { BCOM_VENDORID, BCOM_DEVICEID_BCM57795 },
227
228 { SK_VENDORID, SK_DEVICEID_ALTIMA },
229
230 { TC_VENDORID, TC_DEVICEID_3C996 },
231
232 { FJTSU_VENDORID, FJTSU_DEVICEID_PW008GE4 },
233 { FJTSU_VENDORID, FJTSU_DEVICEID_PW008GE5 },
234 { FJTSU_VENDORID, FJTSU_DEVICEID_PP250450 },
235
236 { 0, 0 }
237};
238
239static const struct bge_vendor {
240 uint16_t v_id;
241 const char *v_name;
242} const bge_vendors[] = {
243 { ALTEON_VENDORID, "Alteon" },
244 { ALTIMA_VENDORID, "Altima" },
245 { APPLE_VENDORID, "Apple" },
246 { BCOM_VENDORID, "Broadcom" },
247 { SK_VENDORID, "SysKonnect" },
248 { TC_VENDORID, "3Com" },
249 { FJTSU_VENDORID, "Fujitsu" },
250
251 { 0, NULL }
252};
253
254static const struct bge_revision {
255 uint32_t br_chipid;
256 const char *br_name;
257} const bge_revisions[] = {
258 { BGE_CHIPID_BCM5700_A0, "BCM5700 A0" },
259 { BGE_CHIPID_BCM5700_A1, "BCM5700 A1" },
260 { BGE_CHIPID_BCM5700_B0, "BCM5700 B0" },
261 { BGE_CHIPID_BCM5700_B1, "BCM5700 B1" },
262 { BGE_CHIPID_BCM5700_B2, "BCM5700 B2" },
263 { BGE_CHIPID_BCM5700_B3, "BCM5700 B3" },
264 { BGE_CHIPID_BCM5700_ALTIMA, "BCM5700 Altima" },
265 { BGE_CHIPID_BCM5700_C0, "BCM5700 C0" },
266 { BGE_CHIPID_BCM5701_A0, "BCM5701 A0" },
267 { BGE_CHIPID_BCM5701_B0, "BCM5701 B0" },
268 { BGE_CHIPID_BCM5701_B2, "BCM5701 B2" },
269 { BGE_CHIPID_BCM5701_B5, "BCM5701 B5" },
270 { BGE_CHIPID_BCM5703_A0, "BCM5703 A0" },
271 { BGE_CHIPID_BCM5703_A1, "BCM5703 A1" },
272 { BGE_CHIPID_BCM5703_A2, "BCM5703 A2" },
273 { BGE_CHIPID_BCM5703_A3, "BCM5703 A3" },
274 { BGE_CHIPID_BCM5703_B0, "BCM5703 B0" },
275 { BGE_CHIPID_BCM5704_A0, "BCM5704 A0" },
276 { BGE_CHIPID_BCM5704_A1, "BCM5704 A1" },
277 { BGE_CHIPID_BCM5704_A2, "BCM5704 A2" },
278 { BGE_CHIPID_BCM5704_A3, "BCM5704 A3" },
279 { BGE_CHIPID_BCM5704_B0, "BCM5704 B0" },
280 { BGE_CHIPID_BCM5705_A0, "BCM5705 A0" },
281 { BGE_CHIPID_BCM5705_A1, "BCM5705 A1" },
282 { BGE_CHIPID_BCM5705_A2, "BCM5705 A2" },
283 { BGE_CHIPID_BCM5705_A3, "BCM5705 A3" },
284 { BGE_CHIPID_BCM5750_A0, "BCM5750 A0" },
285 { BGE_CHIPID_BCM5750_A1, "BCM5750 A1" },
286 { BGE_CHIPID_BCM5750_A3, "BCM5750 A3" },
287 { BGE_CHIPID_BCM5750_B0, "BCM5750 B0" },
288 { BGE_CHIPID_BCM5750_B1, "BCM5750 B1" },
289 { BGE_CHIPID_BCM5750_C0, "BCM5750 C0" },
290 { BGE_CHIPID_BCM5750_C1, "BCM5750 C1" },
291 { BGE_CHIPID_BCM5750_C2, "BCM5750 C2" },
292 { BGE_CHIPID_BCM5714_A0, "BCM5714 A0" },
293 { BGE_CHIPID_BCM5752_A0, "BCM5752 A0" },
294 { BGE_CHIPID_BCM5752_A1, "BCM5752 A1" },
295 { BGE_CHIPID_BCM5752_A2, "BCM5752 A2" },
296 { BGE_CHIPID_BCM5714_B0, "BCM5714 B0" },
297 { BGE_CHIPID_BCM5714_B3, "BCM5714 B3" },
298 { BGE_CHIPID_BCM5715_A0, "BCM5715 A0" },
299 { BGE_CHIPID_BCM5715_A1, "BCM5715 A1" },
300 { BGE_CHIPID_BCM5715_A3, "BCM5715 A3" },
301 { BGE_CHIPID_BCM5717_A0, "BCM5717 A0" },
302 { BGE_CHIPID_BCM5717_B0, "BCM5717 B0" },
303 { BGE_CHIPID_BCM5719_A0, "BCM5719 A0" },
304 { BGE_CHIPID_BCM5755_A0, "BCM5755 A0" },
305 { BGE_CHIPID_BCM5755_A1, "BCM5755 A1" },
306 { BGE_CHIPID_BCM5755_A2, "BCM5755 A2" },
307 { BGE_CHIPID_BCM5722_A0, "BCM5722 A0" },
308 { BGE_CHIPID_BCM5761_A0, "BCM5761 A0" },
309 { BGE_CHIPID_BCM5761_A1, "BCM5761 A1" },
310 { BGE_CHIPID_BCM5784_A0, "BCM5784 A0" },
311 { BGE_CHIPID_BCM5784_A1, "BCM5784 A1" },
312 /* 5754 and 5787 share the same ASIC ID */
313 { BGE_CHIPID_BCM5787_A0, "BCM5754/5787 A0" },
314 { BGE_CHIPID_BCM5787_A1, "BCM5754/5787 A1" },
315 { BGE_CHIPID_BCM5787_A2, "BCM5754/5787 A2" },
316 { BGE_CHIPID_BCM5906_A1, "BCM5906 A1" },
317 { BGE_CHIPID_BCM5906_A2, "BCM5906 A2" },
318 { BGE_CHIPID_BCM57765_A0, "BCM57765 A0" },
319 { BGE_CHIPID_BCM57765_B0, "BCM57765 B0" },
320 { BGE_CHIPID_BCM57780_A0, "BCM57780 A0" },
321 { BGE_CHIPID_BCM57780_A1, "BCM57780 A1" },
322
323 { 0, NULL }
324};
325
326/*
327 * Some defaults for major revisions, so that newer steppings
328 * that we don't know about have a shot at working.
329 */
330static const struct bge_revision const bge_majorrevs[] = {
331 { BGE_ASICREV_BCM5700, "unknown BCM5700" },
332 { BGE_ASICREV_BCM5701, "unknown BCM5701" },
333 { BGE_ASICREV_BCM5703, "unknown BCM5703" },
334 { BGE_ASICREV_BCM5704, "unknown BCM5704" },
335 { BGE_ASICREV_BCM5705, "unknown BCM5705" },
336 { BGE_ASICREV_BCM5750, "unknown BCM5750" },
337 { BGE_ASICREV_BCM5714_A0, "unknown BCM5714" },
338 { BGE_ASICREV_BCM5752, "unknown BCM5752" },
339 { BGE_ASICREV_BCM5780, "unknown BCM5780" },
340 { BGE_ASICREV_BCM5714, "unknown BCM5714" },
341 { BGE_ASICREV_BCM5755, "unknown BCM5755" },
342 { BGE_ASICREV_BCM5761, "unknown BCM5761" },
343 { BGE_ASICREV_BCM5784, "unknown BCM5784" },
344 { BGE_ASICREV_BCM5785, "unknown BCM5785" },
345 /* 5754 and 5787 share the same ASIC ID */
346 { BGE_ASICREV_BCM5787, "unknown BCM5754/5787" },
347 { BGE_ASICREV_BCM5906, "unknown BCM5906" },
348 { BGE_ASICREV_BCM57765, "unknown BCM57765" },
349 { BGE_ASICREV_BCM57780, "unknown BCM57780" },
350 { BGE_ASICREV_BCM5717, "unknown BCM5717" },
351 { BGE_ASICREV_BCM5719, "unknown BCM5719" },
352
353 { 0, NULL }
354};
355
356#define BGE_IS_JUMBO_CAPABLE(sc) ((sc)->bge_flags & BGE_FLAG_JUMBO)
357#define BGE_IS_5700_FAMILY(sc) ((sc)->bge_flags & BGE_FLAG_5700_FAMILY)
358#define BGE_IS_5705_PLUS(sc) ((sc)->bge_flags & BGE_FLAG_5705_PLUS)
359#define BGE_IS_5714_FAMILY(sc) ((sc)->bge_flags & BGE_FLAG_5714_FAMILY)
360#define BGE_IS_575X_PLUS(sc) ((sc)->bge_flags & BGE_FLAG_575X_PLUS)
361#define BGE_IS_5755_PLUS(sc) ((sc)->bge_flags & BGE_FLAG_5755_PLUS)
362#define BGE_IS_5717_PLUS(sc) ((sc)->bge_flags & BGE_FLAG_5717_PLUS)
363
364const struct bge_revision * bge_lookup_rev(uint32_t);
365const struct bge_vendor * bge_lookup_vendor(uint16_t);
366
367typedef int (*bge_eaddr_fcn_t)(struct bge_softc *, uint8_t[]);
368
369static int bge_probe(device_t);
370static int bge_attach(device_t);
371static int bge_detach(device_t);
372static int bge_suspend(device_t);
373static int bge_resume(device_t);
374static void bge_release_resources(struct bge_softc *);
375static void bge_dma_map_addr(void *, bus_dma_segment_t *, int, int);
376static int bge_dma_alloc(struct bge_softc *);
377static void bge_dma_free(struct bge_softc *);
378static int bge_dma_ring_alloc(struct bge_softc *, bus_size_t, bus_size_t,
379 bus_dma_tag_t *, uint8_t **, bus_dmamap_t *, bus_addr_t *, const char *);
380
381static int bge_get_eaddr_fw(struct bge_softc *sc, uint8_t ether_addr[]);
382static int bge_get_eaddr_mem(struct bge_softc *, uint8_t[]);
383static int bge_get_eaddr_nvram(struct bge_softc *, uint8_t[]);
384static int bge_get_eaddr_eeprom(struct bge_softc *, uint8_t[]);
385static int bge_get_eaddr(struct bge_softc *, uint8_t[]);
386
387static void bge_txeof(struct bge_softc *, uint16_t);
388static void bge_rxcsum(struct bge_softc *, struct bge_rx_bd *, struct mbuf *);
389static int bge_rxeof(struct bge_softc *, uint16_t, int);
390
391static void bge_asf_driver_up (struct bge_softc *);
392static void bge_tick(void *);
393static void bge_stats_clear_regs(struct bge_softc *);
394static void bge_stats_update(struct bge_softc *);
395static void bge_stats_update_regs(struct bge_softc *);
396static struct mbuf *bge_check_short_dma(struct mbuf *);
397static struct mbuf *bge_setup_tso(struct bge_softc *, struct mbuf *,
398 uint16_t *, uint16_t *);
399static int bge_encap(struct bge_softc *, struct mbuf **, uint32_t *);
400
401static void bge_intr(void *);
402static int bge_msi_intr(void *);
403static void bge_intr_task(void *, int);
404static void bge_start_locked(struct ifnet *);
405static void bge_start(struct ifnet *);
406static int bge_ioctl(struct ifnet *, u_long, caddr_t);
407static void bge_init_locked(struct bge_softc *);
408static void bge_init(void *);
409static void bge_stop_block(struct bge_softc *, bus_size_t, uint32_t);
410static void bge_stop(struct bge_softc *);
411static void bge_watchdog(struct bge_softc *);
412static int bge_shutdown(device_t);
413static int bge_ifmedia_upd_locked(struct ifnet *);
414static int bge_ifmedia_upd(struct ifnet *);
415static void bge_ifmedia_sts(struct ifnet *, struct ifmediareq *);
416
417static uint8_t bge_nvram_getbyte(struct bge_softc *, int, uint8_t *);
418static int bge_read_nvram(struct bge_softc *, caddr_t, int, int);
419
420static uint8_t bge_eeprom_getbyte(struct bge_softc *, int, uint8_t *);
421static int bge_read_eeprom(struct bge_softc *, caddr_t, int, int);
422
423static void bge_setpromisc(struct bge_softc *);
424static void bge_setmulti(struct bge_softc *);
425static void bge_setvlan(struct bge_softc *);
426
427static __inline void bge_rxreuse_std(struct bge_softc *, int);
428static __inline void bge_rxreuse_jumbo(struct bge_softc *, int);
429static int bge_newbuf_std(struct bge_softc *, int);
430static int bge_newbuf_jumbo(struct bge_softc *, int);
431static int bge_init_rx_ring_std(struct bge_softc *);
432static void bge_free_rx_ring_std(struct bge_softc *);
433static int bge_init_rx_ring_jumbo(struct bge_softc *);
434static void bge_free_rx_ring_jumbo(struct bge_softc *);
435static void bge_free_tx_ring(struct bge_softc *);
436static int bge_init_tx_ring(struct bge_softc *);
437
438static int bge_chipinit(struct bge_softc *);
439static int bge_blockinit(struct bge_softc *);
440
441static int bge_has_eaddr(struct bge_softc *);
442static uint32_t bge_readmem_ind(struct bge_softc *, int);
443static void bge_writemem_ind(struct bge_softc *, int, int);
444static void bge_writembx(struct bge_softc *, int, int);
445#ifdef notdef
446static uint32_t bge_readreg_ind(struct bge_softc *, int);
447#endif
448static void bge_writemem_direct(struct bge_softc *, int, int);
449static void bge_writereg_ind(struct bge_softc *, int, int);
450
451static int bge_miibus_readreg(device_t, int, int);
452static int bge_miibus_writereg(device_t, int, int, int);
453static void bge_miibus_statchg(device_t);
454#ifdef DEVICE_POLLING
455static int bge_poll(struct ifnet *ifp, enum poll_cmd cmd, int count);
456#endif
457
458#define BGE_RESET_START 1
459#define BGE_RESET_STOP 2
460static void bge_sig_post_reset(struct bge_softc *, int);
461static void bge_sig_legacy(struct bge_softc *, int);
462static void bge_sig_pre_reset(struct bge_softc *, int);
463static void bge_stop_fw(struct bge_softc *);
464static int bge_reset(struct bge_softc *);
465static void bge_link_upd(struct bge_softc *);
466
467/*
468 * The BGE_REGISTER_DEBUG option is only for low-level debugging. It may
469 * leak information to untrusted users. It is also known to cause alignment
470 * traps on certain architectures.
471 */
472#ifdef BGE_REGISTER_DEBUG
473static int bge_sysctl_debug_info(SYSCTL_HANDLER_ARGS);
474static int bge_sysctl_reg_read(SYSCTL_HANDLER_ARGS);
475static int bge_sysctl_mem_read(SYSCTL_HANDLER_ARGS);
476#endif
477static void bge_add_sysctls(struct bge_softc *);
478static void bge_add_sysctl_stats_regs(struct bge_softc *,
479 struct sysctl_ctx_list *, struct sysctl_oid_list *);
480static void bge_add_sysctl_stats(struct bge_softc *, struct sysctl_ctx_list *,
481 struct sysctl_oid_list *);
482static int bge_sysctl_stats(SYSCTL_HANDLER_ARGS);
483
484static device_method_t bge_methods[] = {
485 /* Device interface */
486 DEVMETHOD(device_probe, bge_probe),
487 DEVMETHOD(device_attach, bge_attach),
488 DEVMETHOD(device_detach, bge_detach),
489 DEVMETHOD(device_shutdown, bge_shutdown),
490 DEVMETHOD(device_suspend, bge_suspend),
491 DEVMETHOD(device_resume, bge_resume),
492
493 /* bus interface */
494 DEVMETHOD(bus_print_child, bus_generic_print_child),
495 DEVMETHOD(bus_driver_added, bus_generic_driver_added),
496
497 /* MII interface */
498 DEVMETHOD(miibus_readreg, bge_miibus_readreg),
499 DEVMETHOD(miibus_writereg, bge_miibus_writereg),
500 DEVMETHOD(miibus_statchg, bge_miibus_statchg),
501
502 { 0, 0 }
503};
504
505static driver_t bge_driver = {
506 "bge",
507 bge_methods,
508 sizeof(struct bge_softc)
509};
510
511static devclass_t bge_devclass;
512
513DRIVER_MODULE(bge, pci, bge_driver, bge_devclass, 0, 0);
514DRIVER_MODULE(miibus, bge, miibus_driver, miibus_devclass, 0, 0);
515
516static int bge_allow_asf = 1;
517
518TUNABLE_INT("hw.bge.allow_asf", &bge_allow_asf);
519
520SYSCTL_NODE(_hw, OID_AUTO, bge, CTLFLAG_RD, 0, "BGE driver parameters");
521SYSCTL_INT(_hw_bge, OID_AUTO, allow_asf, CTLFLAG_RD, &bge_allow_asf, 0,
522 "Allow ASF mode if available");
523
524#define SPARC64_BLADE_1500_MODEL "SUNW,Sun-Blade-1500"
525#define SPARC64_BLADE_1500_PATH_BGE "/pci@1f,700000/network@2"
526#define SPARC64_BLADE_2500_MODEL "SUNW,Sun-Blade-2500"
527#define SPARC64_BLADE_2500_PATH_BGE "/pci@1c,600000/network@3"
528#define SPARC64_OFW_SUBVENDOR "subsystem-vendor-id"
529
530static int
531bge_has_eaddr(struct bge_softc *sc)
532{
533#ifdef __sparc64__
534 char buf[sizeof(SPARC64_BLADE_1500_PATH_BGE)];
535 device_t dev;
536 uint32_t subvendor;
537
538 dev = sc->bge_dev;
539
540 /*
541 * The on-board BGEs found in sun4u machines aren't fitted with
542 * an EEPROM which means that we have to obtain the MAC address
543 * via OFW and that some tests will always fail. We distinguish
544 * such BGEs by the subvendor ID, which also has to be obtained
545 * from OFW instead of the PCI configuration space as the latter
546 * indicates Broadcom as the subvendor of the netboot interface.
547 * For early Blade 1500 and 2500 we even have to check the OFW
548 * device path as the subvendor ID always defaults to Broadcom
549 * there.
550 */
551 if (OF_getprop(ofw_bus_get_node(dev), SPARC64_OFW_SUBVENDOR,
552 &subvendor, sizeof(subvendor)) == sizeof(subvendor) &&
553 (subvendor == FJTSU_VENDORID || subvendor == SUN_VENDORID))
554 return (0);
555 memset(buf, 0, sizeof(buf));
556 if (OF_package_to_path(ofw_bus_get_node(dev), buf, sizeof(buf)) > 0) {
557 if (strcmp(sparc64_model, SPARC64_BLADE_1500_MODEL) == 0 &&
558 strcmp(buf, SPARC64_BLADE_1500_PATH_BGE) == 0)
559 return (0);
560 if (strcmp(sparc64_model, SPARC64_BLADE_2500_MODEL) == 0 &&
561 strcmp(buf, SPARC64_BLADE_2500_PATH_BGE) == 0)
562 return (0);
563 }
564#endif
565 return (1);
566}
567
568static uint32_t
569bge_readmem_ind(struct bge_softc *sc, int off)
570{
571 device_t dev;
572 uint32_t val;
573
574 if (sc->bge_asicrev == BGE_ASICREV_BCM5906 &&
575 off >= BGE_STATS_BLOCK && off < BGE_SEND_RING_1_TO_4)
576 return (0);
577
578 dev = sc->bge_dev;
579
580 pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, off, 4);
581 val = pci_read_config(dev, BGE_PCI_MEMWIN_DATA, 4);
582 pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, 0, 4);
583 return (val);
584}
585
586static void
587bge_writemem_ind(struct bge_softc *sc, int off, int val)
588{
589 device_t dev;
590
591 if (sc->bge_asicrev == BGE_ASICREV_BCM5906 &&
592 off >= BGE_STATS_BLOCK && off < BGE_SEND_RING_1_TO_4)
593 return;
594
595 dev = sc->bge_dev;
596
597 pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, off, 4);
598 pci_write_config(dev, BGE_PCI_MEMWIN_DATA, val, 4);
599 pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, 0, 4);
600}
601
602#ifdef notdef
603static uint32_t
604bge_readreg_ind(struct bge_softc *sc, int off)
605{
606 device_t dev;
607
608 dev = sc->bge_dev;
609
610 pci_write_config(dev, BGE_PCI_REG_BASEADDR, off, 4);
611 return (pci_read_config(dev, BGE_PCI_REG_DATA, 4));
612}
613#endif
614
615static void
616bge_writereg_ind(struct bge_softc *sc, int off, int val)
617{
618 device_t dev;
619
620 dev = sc->bge_dev;
621
622 pci_write_config(dev, BGE_PCI_REG_BASEADDR, off, 4);
623 pci_write_config(dev, BGE_PCI_REG_DATA, val, 4);
624}
625
626static void
627bge_writemem_direct(struct bge_softc *sc, int off, int val)
628{
629 CSR_WRITE_4(sc, off, val);
630}
631
632static void
633bge_writembx(struct bge_softc *sc, int off, int val)
634{
635 if (sc->bge_asicrev == BGE_ASICREV_BCM5906)
636 off += BGE_LPMBX_IRQ0_HI - BGE_MBX_IRQ0_HI;
637
638 CSR_WRITE_4(sc, off, val);
639}
640
641/*
642 * Map a single buffer address.
643 */
644
645static void
646bge_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error)
647{
648 struct bge_dmamap_arg *ctx;
649
650 if (error)
651 return;
652
653 KASSERT(nseg == 1, ("%s: %d segments returned!", __func__, nseg));
654
655 ctx = arg;
656 ctx->bge_busaddr = segs->ds_addr;
657}
658
659static uint8_t
660bge_nvram_getbyte(struct bge_softc *sc, int addr, uint8_t *dest)
661{
662 uint32_t access, byte = 0;
663 int i;
664
665 /* Lock. */
666 CSR_WRITE_4(sc, BGE_NVRAM_SWARB, BGE_NVRAMSWARB_SET1);
667 for (i = 0; i < 8000; i++) {
668 if (CSR_READ_4(sc, BGE_NVRAM_SWARB) & BGE_NVRAMSWARB_GNT1)
669 break;
670 DELAY(20);
671 }
672 if (i == 8000)
673 return (1);
674
675 /* Enable access. */
676 access = CSR_READ_4(sc, BGE_NVRAM_ACCESS);
677 CSR_WRITE_4(sc, BGE_NVRAM_ACCESS, access | BGE_NVRAMACC_ENABLE);
678
679 CSR_WRITE_4(sc, BGE_NVRAM_ADDR, addr & 0xfffffffc);
680 CSR_WRITE_4(sc, BGE_NVRAM_CMD, BGE_NVRAM_READCMD);
681 for (i = 0; i < BGE_TIMEOUT * 10; i++) {
682 DELAY(10);
683 if (CSR_READ_4(sc, BGE_NVRAM_CMD) & BGE_NVRAMCMD_DONE) {
684 DELAY(10);
685 break;
686 }
687 }
688
689 if (i == BGE_TIMEOUT * 10) {
690 if_printf(sc->bge_ifp, "nvram read timed out\n");
691 return (1);
692 }
693
694 /* Get result. */
695 byte = CSR_READ_4(sc, BGE_NVRAM_RDDATA);
696
697 *dest = (bswap32(byte) >> ((addr % 4) * 8)) & 0xFF;
698
699 /* Disable access. */
700 CSR_WRITE_4(sc, BGE_NVRAM_ACCESS, access);
701
702 /* Unlock. */
703 CSR_WRITE_4(sc, BGE_NVRAM_SWARB, BGE_NVRAMSWARB_CLR1);
704 CSR_READ_4(sc, BGE_NVRAM_SWARB);
705
706 return (0);
707}
708
709/*
710 * Read a sequence of bytes from NVRAM.
711 */
712static int
713bge_read_nvram(struct bge_softc *sc, caddr_t dest, int off, int cnt)
714{
715 int err = 0, i;
716 uint8_t byte = 0;
717
718 if (sc->bge_asicrev != BGE_ASICREV_BCM5906)
719 return (1);
720
721 for (i = 0; i < cnt; i++) {
722 err = bge_nvram_getbyte(sc, off + i, &byte);
723 if (err)
724 break;
725 *(dest + i) = byte;
726 }
727
728 return (err ? 1 : 0);
729}
730
731/*
732 * Read a byte of data stored in the EEPROM at address 'addr.' The
733 * BCM570x supports both the traditional bitbang interface and an
734 * auto access interface for reading the EEPROM. We use the auto
735 * access method.
736 */
737static uint8_t
738bge_eeprom_getbyte(struct bge_softc *sc, int addr, uint8_t *dest)
739{
740 int i;
741 uint32_t byte = 0;
742
743 /*
744 * Enable use of auto EEPROM access so we can avoid
745 * having to use the bitbang method.
746 */
747 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_AUTO_EEPROM);
748
749 /* Reset the EEPROM, load the clock period. */
750 CSR_WRITE_4(sc, BGE_EE_ADDR,
751 BGE_EEADDR_RESET | BGE_EEHALFCLK(BGE_HALFCLK_384SCL));
752 DELAY(20);
753
754 /* Issue the read EEPROM command. */
755 CSR_WRITE_4(sc, BGE_EE_ADDR, BGE_EE_READCMD | addr);
756
757 /* Wait for completion */
758 for(i = 0; i < BGE_TIMEOUT * 10; i++) {
759 DELAY(10);
760 if (CSR_READ_4(sc, BGE_EE_ADDR) & BGE_EEADDR_DONE)
761 break;
762 }
763
764 if (i == BGE_TIMEOUT * 10) {
765 device_printf(sc->bge_dev, "EEPROM read timed out\n");
766 return (1);
767 }
768
769 /* Get result. */
770 byte = CSR_READ_4(sc, BGE_EE_DATA);
771
772 *dest = (byte >> ((addr % 4) * 8)) & 0xFF;
773
774 return (0);
775}
776
777/*
778 * Read a sequence of bytes from the EEPROM.
779 */
780static int
781bge_read_eeprom(struct bge_softc *sc, caddr_t dest, int off, int cnt)
782{
783 int i, error = 0;
784 uint8_t byte = 0;
785
786 for (i = 0; i < cnt; i++) {
787 error = bge_eeprom_getbyte(sc, off + i, &byte);
788 if (error)
789 break;
790 *(dest + i) = byte;
791 }
792
793 return (error ? 1 : 0);
794}
795
796static int
797bge_miibus_readreg(device_t dev, int phy, int reg)
798{
799 struct bge_softc *sc;
800 uint32_t val;
801 int i;
802
803 sc = device_get_softc(dev);
804
805 /* Clear the autopoll bit if set, otherwise may trigger PCI errors. */
806 if ((sc->bge_mi_mode & BGE_MIMODE_AUTOPOLL) != 0) {
807 CSR_WRITE_4(sc, BGE_MI_MODE,
808 sc->bge_mi_mode & ~BGE_MIMODE_AUTOPOLL);
809 DELAY(80);
810 }
811
812 CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_READ | BGE_MICOMM_BUSY |
813 BGE_MIPHY(phy) | BGE_MIREG(reg));
814
815 /* Poll for the PHY register access to complete. */
816 for (i = 0; i < BGE_TIMEOUT; i++) {
817 DELAY(10);
818 val = CSR_READ_4(sc, BGE_MI_COMM);
819 if ((val & BGE_MICOMM_BUSY) == 0) {
820 DELAY(5);
821 val = CSR_READ_4(sc, BGE_MI_COMM);
822 break;
823 }
824 }
825
826 if (i == BGE_TIMEOUT) {
827 device_printf(sc->bge_dev,
828 "PHY read timed out (phy %d, reg %d, val 0x%08x)\n",
829 phy, reg, val);
830 val = 0;
831 }
832
833 /* Restore the autopoll bit if necessary. */
834 if ((sc->bge_mi_mode & BGE_MIMODE_AUTOPOLL) != 0) {
835 CSR_WRITE_4(sc, BGE_MI_MODE, sc->bge_mi_mode);
836 DELAY(80);
837 }
838
839 if (val & BGE_MICOMM_READFAIL)
840 return (0);
841
842 return (val & 0xFFFF);
843}
844
845static int
846bge_miibus_writereg(device_t dev, int phy, int reg, int val)
847{
848 struct bge_softc *sc;
849 int i;
850
851 sc = device_get_softc(dev);
852
853 if (sc->bge_asicrev == BGE_ASICREV_BCM5906 &&
854 (reg == BRGPHY_MII_1000CTL || reg == BRGPHY_MII_AUXCTL))
855 return (0);
856
857 /* Clear the autopoll bit if set, otherwise may trigger PCI errors. */
858 if ((sc->bge_mi_mode & BGE_MIMODE_AUTOPOLL) != 0) {
859 CSR_WRITE_4(sc, BGE_MI_MODE,
860 sc->bge_mi_mode & ~BGE_MIMODE_AUTOPOLL);
861 DELAY(80);
862 }
863
864 CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_WRITE | BGE_MICOMM_BUSY |
865 BGE_MIPHY(phy) | BGE_MIREG(reg) | val);
866
867 for (i = 0; i < BGE_TIMEOUT; i++) {
868 DELAY(10);
869 if (!(CSR_READ_4(sc, BGE_MI_COMM) & BGE_MICOMM_BUSY)) {
870 DELAY(5);
871 CSR_READ_4(sc, BGE_MI_COMM); /* dummy read */
872 break;
873 }
874 }
875
876 /* Restore the autopoll bit if necessary. */
877 if ((sc->bge_mi_mode & BGE_MIMODE_AUTOPOLL) != 0) {
878 CSR_WRITE_4(sc, BGE_MI_MODE, sc->bge_mi_mode);
879 DELAY(80);
880 }
881
882 if (i == BGE_TIMEOUT)
883 device_printf(sc->bge_dev,
884 "PHY write timed out (phy %d, reg %d, val %d)\n",
885 phy, reg, val);
886
887 return (0);
888}
889
890static void
891bge_miibus_statchg(device_t dev)
892{
893 struct bge_softc *sc;
894 struct mii_data *mii;
895 sc = device_get_softc(dev);
896 mii = device_get_softc(sc->bge_miibus);
897
898 if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) ==
899 (IFM_ACTIVE | IFM_AVALID)) {
900 switch (IFM_SUBTYPE(mii->mii_media_active)) {
901 case IFM_10_T:
902 case IFM_100_TX:
903 sc->bge_link = 1;
904 break;
905 case IFM_1000_T:
906 case IFM_1000_SX:
907 case IFM_2500_SX:
908 if (sc->bge_asicrev != BGE_ASICREV_BCM5906)
909 sc->bge_link = 1;
910 else
911 sc->bge_link = 0;
912 break;
913 default:
914 sc->bge_link = 0;
915 break;
916 }
917 } else
918 sc->bge_link = 0;
919 if (sc->bge_link == 0)
920 return;
921 BGE_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_PORTMODE);
922 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T ||
923 IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_SX)
924 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_PORTMODE_GMII);
925 else
926 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_PORTMODE_MII);
927
928 if (IFM_OPTIONS(mii->mii_media_active & IFM_FDX) != 0) {
929 BGE_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX);
930 if ((IFM_OPTIONS(mii->mii_media_active) &
931 IFM_ETH_TXPAUSE) != 0)
932 BGE_SETBIT(sc, BGE_TX_MODE, BGE_TXMODE_FLOWCTL_ENABLE);
933 else
934 BGE_CLRBIT(sc, BGE_TX_MODE, BGE_TXMODE_FLOWCTL_ENABLE);
935 if ((IFM_OPTIONS(mii->mii_media_active) &
936 IFM_ETH_RXPAUSE) != 0)
937 BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_FLOWCTL_ENABLE);
938 else
939 BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_FLOWCTL_ENABLE);
940 } else {
941 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX);
942 BGE_CLRBIT(sc, BGE_TX_MODE, BGE_TXMODE_FLOWCTL_ENABLE);
943 BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_FLOWCTL_ENABLE);
944 }
945}
946
947/*
948 * Intialize a standard receive ring descriptor.
949 */
950static int
951bge_newbuf_std(struct bge_softc *sc, int i)
952{
953 struct mbuf *m;
954 struct bge_rx_bd *r;
955 bus_dma_segment_t segs[1];
956 bus_dmamap_t map;
957 int error, nsegs;
958
959 if (sc->bge_flags & BGE_FLAG_JUMBO_STD &&
960 (sc->bge_ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN +
961 ETHER_VLAN_ENCAP_LEN > (MCLBYTES - ETHER_ALIGN))) {
962 m = m_getjcl(M_DONTWAIT, MT_DATA, M_PKTHDR, MJUM9BYTES);
963 if (m == NULL)
964 return (ENOBUFS);
965 m->m_len = m->m_pkthdr.len = MJUM9BYTES;
966 } else {
967 m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
968 if (m == NULL)
969 return (ENOBUFS);
970 m->m_len = m->m_pkthdr.len = MCLBYTES;
971 }
972 if ((sc->bge_flags & BGE_FLAG_RX_ALIGNBUG) == 0)
973 m_adj(m, ETHER_ALIGN);
974
975 error = bus_dmamap_load_mbuf_sg(sc->bge_cdata.bge_rx_mtag,
976 sc->bge_cdata.bge_rx_std_sparemap, m, segs, &nsegs, 0);
977 if (error != 0) {
978 m_freem(m);
979 return (error);
980 }
981 if (sc->bge_cdata.bge_rx_std_chain[i] != NULL) {
982 bus_dmamap_sync(sc->bge_cdata.bge_rx_mtag,
983 sc->bge_cdata.bge_rx_std_dmamap[i], BUS_DMASYNC_POSTREAD);
984 bus_dmamap_unload(sc->bge_cdata.bge_rx_mtag,
985 sc->bge_cdata.bge_rx_std_dmamap[i]);
986 }
987 map = sc->bge_cdata.bge_rx_std_dmamap[i];
988 sc->bge_cdata.bge_rx_std_dmamap[i] = sc->bge_cdata.bge_rx_std_sparemap;
989 sc->bge_cdata.bge_rx_std_sparemap = map;
990 sc->bge_cdata.bge_rx_std_chain[i] = m;
991 sc->bge_cdata.bge_rx_std_seglen[i] = segs[0].ds_len;
992 r = &sc->bge_ldata.bge_rx_std_ring[sc->bge_std];
993 r->bge_addr.bge_addr_lo = BGE_ADDR_LO(segs[0].ds_addr);
994 r->bge_addr.bge_addr_hi = BGE_ADDR_HI(segs[0].ds_addr);
995 r->bge_flags = BGE_RXBDFLAG_END;
996 r->bge_len = segs[0].ds_len;
997 r->bge_idx = i;
998
999 bus_dmamap_sync(sc->bge_cdata.bge_rx_mtag,
1000 sc->bge_cdata.bge_rx_std_dmamap[i], BUS_DMASYNC_PREREAD);
1001
1002 return (0);
1003}
1004
1005/*
1006 * Initialize a jumbo receive ring descriptor. This allocates
1007 * a jumbo buffer from the pool managed internally by the driver.
1008 */
1009static int
1010bge_newbuf_jumbo(struct bge_softc *sc, int i)
1011{
1012 bus_dma_segment_t segs[BGE_NSEG_JUMBO];
1013 bus_dmamap_t map;
1014 struct bge_extrx_bd *r;
1015 struct mbuf *m;
1016 int error, nsegs;
1017
1018 MGETHDR(m, M_DONTWAIT, MT_DATA);
1019 if (m == NULL)
1020 return (ENOBUFS);
1021
1022 m_cljget(m, M_DONTWAIT, MJUM9BYTES);
1023 if (!(m->m_flags & M_EXT)) {
1024 m_freem(m);
1025 return (ENOBUFS);
1026 }
1027 m->m_len = m->m_pkthdr.len = MJUM9BYTES;
1028 if ((sc->bge_flags & BGE_FLAG_RX_ALIGNBUG) == 0)
1029 m_adj(m, ETHER_ALIGN);
1030
1031 error = bus_dmamap_load_mbuf_sg(sc->bge_cdata.bge_mtag_jumbo,
1032 sc->bge_cdata.bge_rx_jumbo_sparemap, m, segs, &nsegs, 0);
1033 if (error != 0) {
1034 m_freem(m);
1035 return (error);
1036 }
1037
1038 if (sc->bge_cdata.bge_rx_jumbo_chain[i] != NULL) {
1039 bus_dmamap_sync(sc->bge_cdata.bge_mtag_jumbo,
1040 sc->bge_cdata.bge_rx_jumbo_dmamap[i], BUS_DMASYNC_POSTREAD);
1041 bus_dmamap_unload(sc->bge_cdata.bge_mtag_jumbo,
1042 sc->bge_cdata.bge_rx_jumbo_dmamap[i]);
1043 }
1044 map = sc->bge_cdata.bge_rx_jumbo_dmamap[i];
1045 sc->bge_cdata.bge_rx_jumbo_dmamap[i] =
1046 sc->bge_cdata.bge_rx_jumbo_sparemap;
1047 sc->bge_cdata.bge_rx_jumbo_sparemap = map;
1048 sc->bge_cdata.bge_rx_jumbo_chain[i] = m;
1049 sc->bge_cdata.bge_rx_jumbo_seglen[i][0] = 0;
1050 sc->bge_cdata.bge_rx_jumbo_seglen[i][1] = 0;
1051 sc->bge_cdata.bge_rx_jumbo_seglen[i][2] = 0;
1052 sc->bge_cdata.bge_rx_jumbo_seglen[i][3] = 0;
1053
1054 /*
1055 * Fill in the extended RX buffer descriptor.
1056 */
1057 r = &sc->bge_ldata.bge_rx_jumbo_ring[sc->bge_jumbo];
1058 r->bge_flags = BGE_RXBDFLAG_JUMBO_RING | BGE_RXBDFLAG_END;
1059 r->bge_idx = i;
1060 r->bge_len3 = r->bge_len2 = r->bge_len1 = 0;
1061 switch (nsegs) {
1062 case 4:
1063 r->bge_addr3.bge_addr_lo = BGE_ADDR_LO(segs[3].ds_addr);
1064 r->bge_addr3.bge_addr_hi = BGE_ADDR_HI(segs[3].ds_addr);
1065 r->bge_len3 = segs[3].ds_len;
1066 sc->bge_cdata.bge_rx_jumbo_seglen[i][3] = segs[3].ds_len;
1067 case 3:
1068 r->bge_addr2.bge_addr_lo = BGE_ADDR_LO(segs[2].ds_addr);
1069 r->bge_addr2.bge_addr_hi = BGE_ADDR_HI(segs[2].ds_addr);
1070 r->bge_len2 = segs[2].ds_len;
1071 sc->bge_cdata.bge_rx_jumbo_seglen[i][2] = segs[2].ds_len;
1072 case 2:
1073 r->bge_addr1.bge_addr_lo = BGE_ADDR_LO(segs[1].ds_addr);
1074 r->bge_addr1.bge_addr_hi = BGE_ADDR_HI(segs[1].ds_addr);
1075 r->bge_len1 = segs[1].ds_len;
1076 sc->bge_cdata.bge_rx_jumbo_seglen[i][1] = segs[1].ds_len;
1077 case 1:
1078 r->bge_addr0.bge_addr_lo = BGE_ADDR_LO(segs[0].ds_addr);
1079 r->bge_addr0.bge_addr_hi = BGE_ADDR_HI(segs[0].ds_addr);
1080 r->bge_len0 = segs[0].ds_len;
1081 sc->bge_cdata.bge_rx_jumbo_seglen[i][0] = segs[0].ds_len;
1082 break;
1083 default:
1084 panic("%s: %d segments\n", __func__, nsegs);
1085 }
1086
1087 bus_dmamap_sync(sc->bge_cdata.bge_mtag_jumbo,
1088 sc->bge_cdata.bge_rx_jumbo_dmamap[i], BUS_DMASYNC_PREREAD);
1089
1090 return (0);
1091}
1092
1093static int
1094bge_init_rx_ring_std(struct bge_softc *sc)
1095{
1096 int error, i;
1097
1098 bzero(sc->bge_ldata.bge_rx_std_ring, BGE_STD_RX_RING_SZ);
1099 sc->bge_std = 0;
1100 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
1101 if ((error = bge_newbuf_std(sc, i)) != 0)
1102 return (error);
1103 BGE_INC(sc->bge_std, BGE_STD_RX_RING_CNT);
1104 }
1105
1106 bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag,
1107 sc->bge_cdata.bge_rx_std_ring_map, BUS_DMASYNC_PREWRITE);
1108
1109 sc->bge_std = 0;
1110 bge_writembx(sc, BGE_MBX_RX_STD_PROD_LO, BGE_STD_RX_RING_CNT - 1);
1111
1112 return (0);
1113}
1114
1115static void
1116bge_free_rx_ring_std(struct bge_softc *sc)
1117{
1118 int i;
1119
1120 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
1121 if (sc->bge_cdata.bge_rx_std_chain[i] != NULL) {
1122 bus_dmamap_sync(sc->bge_cdata.bge_rx_mtag,
1123 sc->bge_cdata.bge_rx_std_dmamap[i],
1124 BUS_DMASYNC_POSTREAD);
1125 bus_dmamap_unload(sc->bge_cdata.bge_rx_mtag,
1126 sc->bge_cdata.bge_rx_std_dmamap[i]);
1127 m_freem(sc->bge_cdata.bge_rx_std_chain[i]);
1128 sc->bge_cdata.bge_rx_std_chain[i] = NULL;
1129 }
1130 bzero((char *)&sc->bge_ldata.bge_rx_std_ring[i],
1131 sizeof(struct bge_rx_bd));
1132 }
1133}
1134
1135static int
1136bge_init_rx_ring_jumbo(struct bge_softc *sc)
1137{
1138 struct bge_rcb *rcb;
1139 int error, i;
1140
1141 bzero(sc->bge_ldata.bge_rx_jumbo_ring, BGE_JUMBO_RX_RING_SZ);
1142 sc->bge_jumbo = 0;
1143 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
1144 if ((error = bge_newbuf_jumbo(sc, i)) != 0)
1145 return (error);
1146 BGE_INC(sc->bge_jumbo, BGE_JUMBO_RX_RING_CNT);
1147 }
1148
1149 bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag,
1150 sc->bge_cdata.bge_rx_jumbo_ring_map, BUS_DMASYNC_PREWRITE);
1151
1152 sc->bge_jumbo = 0;
1153
1154 /* Enable the jumbo receive producer ring. */
1155 rcb = &sc->bge_ldata.bge_info.bge_jumbo_rx_rcb;
1156 rcb->bge_maxlen_flags =
1157 BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_USE_EXT_RX_BD);
1158 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags);
1159
1160 bge_writembx(sc, BGE_MBX_RX_JUMBO_PROD_LO, BGE_JUMBO_RX_RING_CNT - 1);
1161
1162 return (0);
1163}
1164
1165static void
1166bge_free_rx_ring_jumbo(struct bge_softc *sc)
1167{
1168 int i;
1169
1170 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
1171 if (sc->bge_cdata.bge_rx_jumbo_chain[i] != NULL) {
1172 bus_dmamap_sync(sc->bge_cdata.bge_mtag_jumbo,
1173 sc->bge_cdata.bge_rx_jumbo_dmamap[i],
1174 BUS_DMASYNC_POSTREAD);
1175 bus_dmamap_unload(sc->bge_cdata.bge_mtag_jumbo,
1176 sc->bge_cdata.bge_rx_jumbo_dmamap[i]);
1177 m_freem(sc->bge_cdata.bge_rx_jumbo_chain[i]);
1178 sc->bge_cdata.bge_rx_jumbo_chain[i] = NULL;
1179 }
1180 bzero((char *)&sc->bge_ldata.bge_rx_jumbo_ring[i],
1181 sizeof(struct bge_extrx_bd));
1182 }
1183}
1184
1185static void
1186bge_free_tx_ring(struct bge_softc *sc)
1187{
1188 int i;
1189
1190 if (sc->bge_ldata.bge_tx_ring == NULL)
1191 return;
1192
1193 for (i = 0; i < BGE_TX_RING_CNT; i++) {
1194 if (sc->bge_cdata.bge_tx_chain[i] != NULL) {
1195 bus_dmamap_sync(sc->bge_cdata.bge_tx_mtag,
1196 sc->bge_cdata.bge_tx_dmamap[i],
1197 BUS_DMASYNC_POSTWRITE);
1198 bus_dmamap_unload(sc->bge_cdata.bge_tx_mtag,
1199 sc->bge_cdata.bge_tx_dmamap[i]);
1200 m_freem(sc->bge_cdata.bge_tx_chain[i]);
1201 sc->bge_cdata.bge_tx_chain[i] = NULL;
1202 }
1203 bzero((char *)&sc->bge_ldata.bge_tx_ring[i],
1204 sizeof(struct bge_tx_bd));
1205 }
1206}
1207
1208static int
1209bge_init_tx_ring(struct bge_softc *sc)
1210{
1211 sc->bge_txcnt = 0;
1212 sc->bge_tx_saved_considx = 0;
1213
1214 bzero(sc->bge_ldata.bge_tx_ring, BGE_TX_RING_SZ);
1215 bus_dmamap_sync(sc->bge_cdata.bge_tx_ring_tag,
1216 sc->bge_cdata.bge_tx_ring_map, BUS_DMASYNC_PREWRITE);
1217
1218 /* Initialize transmit producer index for host-memory send ring. */
1219 sc->bge_tx_prodidx = 0;
1220 bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, sc->bge_tx_prodidx);
1221
1222 /* 5700 b2 errata */
1223 if (sc->bge_chiprev == BGE_CHIPREV_5700_BX)
1224 bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, sc->bge_tx_prodidx);
1225
1226 /* NIC-memory send ring not used; initialize to zero. */
1227 bge_writembx(sc, BGE_MBX_TX_NIC_PROD0_LO, 0);
1228 /* 5700 b2 errata */
1229 if (sc->bge_chiprev == BGE_CHIPREV_5700_BX)
1230 bge_writembx(sc, BGE_MBX_TX_NIC_PROD0_LO, 0);
1231
1232 return (0);
1233}
1234
1235static void
1236bge_setpromisc(struct bge_softc *sc)
1237{
1238 struct ifnet *ifp;
1239
1240 BGE_LOCK_ASSERT(sc);
1241
1242 ifp = sc->bge_ifp;
1243
1244 /* Enable or disable promiscuous mode as needed. */
1245 if (ifp->if_flags & IFF_PROMISC)
1246 BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC);
1247 else
1248 BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC);
1249}
1250
1251static void
1252bge_setmulti(struct bge_softc *sc)
1253{
1254 struct ifnet *ifp;
1255 struct ifmultiaddr *ifma;
1256 uint32_t hashes[4] = { 0, 0, 0, 0 };
1257 int h, i;
1258
1259 BGE_LOCK_ASSERT(sc);
1260
1261 ifp = sc->bge_ifp;
1262
1263 if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) {
1264 for (i = 0; i < 4; i++)
1265 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), 0xFFFFFFFF);
1266 return;
1267 }
1268
1269 /* First, zot all the existing filters. */
1270 for (i = 0; i < 4; i++)
1271 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), 0);
1272
1273 /* Now program new ones. */
1274 if_maddr_rlock(ifp);
1275 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1276 if (ifma->ifma_addr->sa_family != AF_LINK)
1277 continue;
1278 h = ether_crc32_le(LLADDR((struct sockaddr_dl *)
1279 ifma->ifma_addr), ETHER_ADDR_LEN) & 0x7F;
1280 hashes[(h & 0x60) >> 5] |= 1 << (h & 0x1F);
1281 }
1282 if_maddr_runlock(ifp);
1283
1284 for (i = 0; i < 4; i++)
1285 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), hashes[i]);
1286}
1287
1288static void
1289bge_setvlan(struct bge_softc *sc)
1290{
1291 struct ifnet *ifp;
1292
1293 BGE_LOCK_ASSERT(sc);
1294
1295 ifp = sc->bge_ifp;
1296
1297 /* Enable or disable VLAN tag stripping as needed. */
1298 if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING)
1299 BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_KEEP_VLAN_DIAG);
1300 else
1301 BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_KEEP_VLAN_DIAG);
1302}
1303
1304static void
1305bge_sig_pre_reset(struct bge_softc *sc, int type)
1306{
1307
1308 /*
1309 * Some chips don't like this so only do this if ASF is enabled
1310 */
1311 if (sc->bge_asf_mode)
1312 bge_writemem_ind(sc, BGE_SRAM_FW_MB, BGE_SRAM_FW_MB_MAGIC);
1313
1314 if (sc->bge_asf_mode & ASF_NEW_HANDSHAKE) {
1315 switch (type) {
1316 case BGE_RESET_START:
1317 bge_writemem_ind(sc, BGE_SRAM_FW_DRV_STATE_MB,
1318 BGE_FW_DRV_STATE_START);
1319 break;
1320 case BGE_RESET_STOP:
1321 bge_writemem_ind(sc, BGE_SRAM_FW_DRV_STATE_MB,
1322 BGE_FW_DRV_STATE_UNLOAD);
1323 break;
1324 }
1325 }
1326}
1327
1328static void
1329bge_sig_post_reset(struct bge_softc *sc, int type)
1330{
1331
1332 if (sc->bge_asf_mode & ASF_NEW_HANDSHAKE) {
1333 switch (type) {
1334 case BGE_RESET_START:
1335 bge_writemem_ind(sc, BGE_SRAM_FW_DRV_STATE_MB,
1336 BGE_FW_DRV_STATE_START_DONE);
1337 /* START DONE */
1338 break;
1339 case BGE_RESET_STOP:
1340 bge_writemem_ind(sc, BGE_SRAM_FW_DRV_STATE_MB,
1341 BGE_FW_DRV_STATE_UNLOAD_DONE);
1342 break;
1343 }
1344 }
1345}
1346
1347static void
1348bge_sig_legacy(struct bge_softc *sc, int type)
1349{
1350
1351 if (sc->bge_asf_mode) {
1352 switch (type) {
1353 case BGE_RESET_START:
1354 bge_writemem_ind(sc, BGE_SRAM_FW_DRV_STATE_MB,
1355 BGE_FW_DRV_STATE_START);
1356 break;
1357 case BGE_RESET_STOP:
1358 bge_writemem_ind(sc, BGE_SRAM_FW_DRV_STATE_MB,
1359 BGE_FW_DRV_STATE_UNLOAD);
1360 break;
1361 }
1362 }
1363}
1364
1365static void
1366bge_stop_fw(struct bge_softc *sc)
1367{
1368 int i;
1369
1370 if (sc->bge_asf_mode) {
1371 bge_writemem_ind(sc, BGE_SRAM_FW_CMD_MB, BGE_FW_CMD_PAUSE);
1372 CSR_WRITE_4(sc, BGE_RX_CPU_EVENT,
1373 CSR_READ_4(sc, BGE_RX_CPU_EVENT) | BGE_RX_CPU_DRV_EVENT);
1374
1375 for (i = 0; i < 100; i++ ) {
1376 if (!(CSR_READ_4(sc, BGE_RX_CPU_EVENT) &
1377 BGE_RX_CPU_DRV_EVENT))
1378 break;
1379 DELAY(10);
1380 }
1381 }
1382}
1383
1384/*
1385 * Do endian, PCI and DMA initialization.
1386 */
1387static int
1388bge_chipinit(struct bge_softc *sc)
1389{
1390 uint32_t dma_rw_ctl, misc_ctl;
1391 uint16_t val;
1392 int i;
1393
1394 /* Set endianness before we access any non-PCI registers. */
1395 misc_ctl = BGE_INIT;
1396 if (sc->bge_flags & BGE_FLAG_TAGGED_STATUS)
1397 misc_ctl |= BGE_PCIMISCCTL_TAGGED_STATUS;
1398 pci_write_config(sc->bge_dev, BGE_PCI_MISC_CTL, misc_ctl, 4);
1399
1400 /* Clear the MAC control register */
1401 CSR_WRITE_4(sc, BGE_MAC_MODE, 0);
1402
1403 /*
1404 * Clear the MAC statistics block in the NIC's
1405 * internal memory.
1406 */
1407 for (i = BGE_STATS_BLOCK;
1408 i < BGE_STATS_BLOCK_END + 1; i += sizeof(uint32_t))
1409 BGE_MEMWIN_WRITE(sc, i, 0);
1410
1411 for (i = BGE_STATUS_BLOCK;
1412 i < BGE_STATUS_BLOCK_END + 1; i += sizeof(uint32_t))
1413 BGE_MEMWIN_WRITE(sc, i, 0);
1414
1415 if (sc->bge_chiprev == BGE_CHIPREV_5704_BX) {
1416 /*
1417 * Fix data corruption caused by non-qword write with WB.
1418 * Fix master abort in PCI mode.
1419 * Fix PCI latency timer.
1420 */
1421 val = pci_read_config(sc->bge_dev, BGE_PCI_MSI_DATA + 2, 2);
1422 val |= (1 << 10) | (1 << 12) | (1 << 13);
1423 pci_write_config(sc->bge_dev, BGE_PCI_MSI_DATA + 2, val, 2);
1424 }
1425
1426 /*
1427 * Set up the PCI DMA control register.
1428 */
1429 dma_rw_ctl = BGE_PCIDMARWCTL_RD_CMD_SHIFT(6) |
1430 BGE_PCIDMARWCTL_WR_CMD_SHIFT(7);
1431 if (sc->bge_flags & BGE_FLAG_PCIE) {
1432 /* Read watermark not used, 128 bytes for write. */
1433 dma_rw_ctl |= BGE_PCIDMARWCTL_WR_WAT_SHIFT(3);
1434 } else if (sc->bge_flags & BGE_FLAG_PCIX) {
1435 if (BGE_IS_5714_FAMILY(sc)) {
1436 /* 256 bytes for read and write. */
1437 dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(2) |
1438 BGE_PCIDMARWCTL_WR_WAT_SHIFT(2);
1439 dma_rw_ctl |= (sc->bge_asicrev == BGE_ASICREV_BCM5780) ?
1440 BGE_PCIDMARWCTL_ONEDMA_ATONCE_GLOBAL :
1441 BGE_PCIDMARWCTL_ONEDMA_ATONCE_LOCAL;
1442 } else if (sc->bge_asicrev == BGE_ASICREV_BCM5703) {
1443 /*
1444 * In the BCM5703, the DMA read watermark should
1445 * be set to less than or equal to the maximum
1446 * memory read byte count of the PCI-X command
1447 * register.
1448 */
1449 dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(4) |
1450 BGE_PCIDMARWCTL_WR_WAT_SHIFT(3);
1451 } else if (sc->bge_asicrev == BGE_ASICREV_BCM5704) {
1452 /* 1536 bytes for read, 384 bytes for write. */
1453 dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(7) |
1454 BGE_PCIDMARWCTL_WR_WAT_SHIFT(3);
1455 } else {
1456 /* 384 bytes for read and write. */
1457 dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(3) |
1458 BGE_PCIDMARWCTL_WR_WAT_SHIFT(3) |
1459 0x0F;
1460 }
1461 if (sc->bge_asicrev == BGE_ASICREV_BCM5703 ||
1462 sc->bge_asicrev == BGE_ASICREV_BCM5704) {
1463 uint32_t tmp;
1464
1465 /* Set ONE_DMA_AT_ONCE for hardware workaround. */
1466 tmp = CSR_READ_4(sc, BGE_PCI_CLKCTL) & 0x1F;
1467 if (tmp == 6 || tmp == 7)
1468 dma_rw_ctl |=
1469 BGE_PCIDMARWCTL_ONEDMA_ATONCE_GLOBAL;
1470
1471 /* Set PCI-X DMA write workaround. */
1472 dma_rw_ctl |= BGE_PCIDMARWCTL_ASRT_ALL_BE;
1473 }
1474 } else {
1475 /* Conventional PCI bus: 256 bytes for read and write. */
1476 dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(7) |
1477 BGE_PCIDMARWCTL_WR_WAT_SHIFT(7);
1478
1479 if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
1480 sc->bge_asicrev != BGE_ASICREV_BCM5750)
1481 dma_rw_ctl |= 0x0F;
1482 }
1483 if (sc->bge_asicrev == BGE_ASICREV_BCM5700 ||
1484 sc->bge_asicrev == BGE_ASICREV_BCM5701)
1485 dma_rw_ctl |= BGE_PCIDMARWCTL_USE_MRM |
1486 BGE_PCIDMARWCTL_ASRT_ALL_BE;
1487 if (sc->bge_asicrev == BGE_ASICREV_BCM5703 ||
1488 sc->bge_asicrev == BGE_ASICREV_BCM5704)
1489 dma_rw_ctl &= ~BGE_PCIDMARWCTL_MINDMA;
1490 if (BGE_IS_5717_PLUS(sc)) {
1491 dma_rw_ctl &= ~BGE_PCIDMARWCTL_DIS_CACHE_ALIGNMENT;
1492 if (sc->bge_chipid == BGE_CHIPID_BCM57765_A0)
1493 dma_rw_ctl &= ~BGE_PCIDMARWCTL_CRDRDR_RDMA_MRRS_MSK;
1494 /*
1495 * Enable HW workaround for controllers that misinterpret
1496 * a status tag update and leave interrupts permanently
1497 * disabled.
1498 */
1499 if (sc->bge_asicrev != BGE_ASICREV_BCM5717 &&
1500 sc->bge_asicrev != BGE_ASICREV_BCM57765)
1501 dma_rw_ctl |= BGE_PCIDMARWCTL_TAGGED_STATUS_WA;
1502 }
1503 pci_write_config(sc->bge_dev, BGE_PCI_DMA_RW_CTL, dma_rw_ctl, 4);
1504
1505 /*
1506 * Set up general mode register.
1507 */
1508 CSR_WRITE_4(sc, BGE_MODE_CTL, BGE_DMA_SWAP_OPTIONS |
1509 BGE_MODECTL_MAC_ATTN_INTR | BGE_MODECTL_HOST_SEND_BDS |
1510 BGE_MODECTL_TX_NO_PHDR_CSUM);
1511
1512 /*
1513 * BCM5701 B5 have a bug causing data corruption when using
1514 * 64-bit DMA reads, which can be terminated early and then
1515 * completed later as 32-bit accesses, in combination with
1516 * certain bridges.
1517 */
1518 if (sc->bge_asicrev == BGE_ASICREV_BCM5701 &&
1519 sc->bge_chipid == BGE_CHIPID_BCM5701_B5)
1520 BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_FORCE_PCI32);
1521
1522 /*
1523 * Tell the firmware the driver is running
1524 */
1525 if (sc->bge_asf_mode & ASF_STACKUP)
1526 BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
1527
1528 /*
1529 * Disable memory write invalidate. Apparently it is not supported
1530 * properly by these devices. Also ensure that INTx isn't disabled,
1531 * as these chips need it even when using MSI.
1532 */
1533 PCI_CLRBIT(sc->bge_dev, BGE_PCI_CMD,
1534 PCIM_CMD_INTxDIS | PCIM_CMD_MWIEN, 4);
1535
1536 /* Set the timer prescaler (always 66Mhz) */
1537 CSR_WRITE_4(sc, BGE_MISC_CFG, BGE_32BITTIME_66MHZ);
1538
1539 /* XXX: The Linux tg3 driver does this at the start of brgphy_reset. */
1540 if (sc->bge_asicrev == BGE_ASICREV_BCM5906) {
1541 DELAY(40); /* XXX */
1542
1543 /* Put PHY into ready state */
1544 BGE_CLRBIT(sc, BGE_MISC_CFG, BGE_MISCCFG_EPHY_IDDQ);
1545 CSR_READ_4(sc, BGE_MISC_CFG); /* Flush */
1546 DELAY(40);
1547 }
1548
1549 return (0);
1550}
1551
1552static int
1553bge_blockinit(struct bge_softc *sc)
1554{
1555 struct bge_rcb *rcb;
1556 bus_size_t vrcb;
1557 bge_hostaddr taddr;
1558 uint32_t dmactl, val;
1559 int i, limit;
1560
1561 /*
1562 * Initialize the memory window pointer register so that
1563 * we can access the first 32K of internal NIC RAM. This will
1564 * allow us to set up the TX send ring RCBs and the RX return
1565 * ring RCBs, plus other things which live in NIC memory.
1566 */
1567 CSR_WRITE_4(sc, BGE_PCI_MEMWIN_BASEADDR, 0);
1568
1569 /* Note: the BCM5704 has a smaller mbuf space than other chips. */
1570
1571 if (!(BGE_IS_5705_PLUS(sc))) {
1572 /* Configure mbuf memory pool */
1573 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_BASEADDR, BGE_BUFFPOOL_1);
1574 if (sc->bge_asicrev == BGE_ASICREV_BCM5704)
1575 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x10000);
1576 else
1577 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x18000);
1578
1579 /* Configure DMA resource pool */
1580 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_BASEADDR,
1581 BGE_DMA_DESCRIPTORS);
1582 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LEN, 0x2000);
1583 }
1584
1585 /* Configure mbuf pool watermarks */
1586 if (sc->bge_asicrev == BGE_ASICREV_BCM5717 ||
1587 sc->bge_asicrev == BGE_ASICREV_BCM57765) {
1588 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0);
1589 if (sc->bge_ifp->if_mtu > ETHERMTU) {
1590 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x7e);
1591 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0xea);
1592 } else {
1593 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x2a);
1594 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0xa0);
1595 }
1596 } else if (!BGE_IS_5705_PLUS(sc)) {
1597 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x50);
1598 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x20);
1599 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x60);
1600 } else if (sc->bge_asicrev == BGE_ASICREV_BCM5906) {
1601 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0);
1602 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x04);
1603 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x10);
1604 } else {
1605 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0);
1606 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x10);
1607 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x60);
1608 }
1609
1610 /* Configure DMA resource watermarks */
1611 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LOWAT, 5);
1612 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_HIWAT, 10);
1613
1614 /* Enable buffer manager */
1615 val = BGE_BMANMODE_ENABLE | BGE_BMANMODE_LOMBUF_ATTN;
1616 /*
1617 * Change the arbitration algorithm of TXMBUF read request to
1618 * round-robin instead of priority based for BCM5719. When
1619 * TXFIFO is almost empty, RDMA will hold its request until
1620 * TXFIFO is not almost empty.
1621 */
1622 if (sc->bge_asicrev == BGE_ASICREV_BCM5719)
1623 val |= BGE_BMANMODE_NO_TX_UNDERRUN;
1624 CSR_WRITE_4(sc, BGE_BMAN_MODE, val);
1625
1626 /* Poll for buffer manager start indication */
1627 for (i = 0; i < BGE_TIMEOUT; i++) {
1628 DELAY(10);
1629 if (CSR_READ_4(sc, BGE_BMAN_MODE) & BGE_BMANMODE_ENABLE)
1630 break;
1631 }
1632
1633 if (i == BGE_TIMEOUT) {
1634 device_printf(sc->bge_dev, "buffer manager failed to start\n");
1635 return (ENXIO);
1636 }
1637
1638 /* Enable flow-through queues */
1639 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF);
1640 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0);
1641
1642 /* Wait until queue initialization is complete */
1643 for (i = 0; i < BGE_TIMEOUT; i++) {
1644 DELAY(10);
1645 if (CSR_READ_4(sc, BGE_FTQ_RESET) == 0)
1646 break;
1647 }
1648
1649 if (i == BGE_TIMEOUT) {
1650 device_printf(sc->bge_dev, "flow-through queue init failed\n");
1651 return (ENXIO);
1652 }
1653
1654 /*
1655 * Summary of rings supported by the controller:
1656 *
1657 * Standard Receive Producer Ring
1658 * - This ring is used to feed receive buffers for "standard"
1659 * sized frames (typically 1536 bytes) to the controller.
1660 *
1661 * Jumbo Receive Producer Ring
1662 * - This ring is used to feed receive buffers for jumbo sized
1663 * frames (i.e. anything bigger than the "standard" frames)
1664 * to the controller.
1665 *
1666 * Mini Receive Producer Ring
1667 * - This ring is used to feed receive buffers for "mini"
1668 * sized frames to the controller.
1669 * - This feature required external memory for the controller
1670 * but was never used in a production system. Should always
1671 * be disabled.
1672 *
1673 * Receive Return Ring
1674 * - After the controller has placed an incoming frame into a
1675 * receive buffer that buffer is moved into a receive return
1676 * ring. The driver is then responsible to passing the
1677 * buffer up to the stack. Many versions of the controller
1678 * support multiple RR rings.
1679 *
1680 * Send Ring
1681 * - This ring is used for outgoing frames. Many versions of
1682 * the controller support multiple send rings.
1683 */
1684
1685 /* Initialize the standard receive producer ring control block. */
1686 rcb = &sc->bge_ldata.bge_info.bge_std_rx_rcb;
1687 rcb->bge_hostaddr.bge_addr_lo =
1688 BGE_ADDR_LO(sc->bge_ldata.bge_rx_std_ring_paddr);
1689 rcb->bge_hostaddr.bge_addr_hi =
1690 BGE_ADDR_HI(sc->bge_ldata.bge_rx_std_ring_paddr);
1691 bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag,
1692 sc->bge_cdata.bge_rx_std_ring_map, BUS_DMASYNC_PREREAD);
1693 if (BGE_IS_5717_PLUS(sc)) {
1694 /*
1695 * Bits 31-16: Programmable ring size (2048, 1024, 512, .., 32)
1696 * Bits 15-2 : Maximum RX frame size
1697 * Bit 1 : 1 = Ring Disabled, 0 = Ring ENabled
1698 * Bit 0 : Reserved
1699 */
1700 rcb->bge_maxlen_flags =
1701 BGE_RCB_MAXLEN_FLAGS(512, BGE_MAX_FRAMELEN << 2);
1702 } else if (BGE_IS_5705_PLUS(sc)) {
1703 /*
1704 * Bits 31-16: Programmable ring size (512, 256, 128, 64, 32)
1705 * Bits 15-2 : Reserved (should be 0)
1706 * Bit 1 : 1 = Ring Disabled, 0 = Ring Enabled
1707 * Bit 0 : Reserved
1708 */
1709 rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(512, 0);
1710 } else {
1711 /*
1712 * Ring size is always XXX entries
1713 * Bits 31-16: Maximum RX frame size
1714 * Bits 15-2 : Reserved (should be 0)
1715 * Bit 1 : 1 = Ring Disabled, 0 = Ring Enabled
1716 * Bit 0 : Reserved
1717 */
1718 rcb->bge_maxlen_flags =
1719 BGE_RCB_MAXLEN_FLAGS(BGE_MAX_FRAMELEN, 0);
1720 }
1721 if (sc->bge_asicrev == BGE_ASICREV_BCM5717 ||
1722 sc->bge_asicrev == BGE_ASICREV_BCM5719)
1723 rcb->bge_nicaddr = BGE_STD_RX_RINGS_5717;
1724 else
1725 rcb->bge_nicaddr = BGE_STD_RX_RINGS;
1726 /* Write the standard receive producer ring control block. */
1727 CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_HI, rcb->bge_hostaddr.bge_addr_hi);
1728 CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_LO, rcb->bge_hostaddr.bge_addr_lo);
1729 CSR_WRITE_4(sc, BGE_RX_STD_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags);
1730 CSR_WRITE_4(sc, BGE_RX_STD_RCB_NICADDR, rcb->bge_nicaddr);
1731
1732 /* Reset the standard receive producer ring producer index. */
1733 bge_writembx(sc, BGE_MBX_RX_STD_PROD_LO, 0);
1734
1735 /*
1736 * Initialize the jumbo RX producer ring control
1737 * block. We set the 'ring disabled' bit in the
1738 * flags field until we're actually ready to start
1739 * using this ring (i.e. once we set the MTU
1740 * high enough to require it).
1741 */
1742 if (BGE_IS_JUMBO_CAPABLE(sc)) {
1743 rcb = &sc->bge_ldata.bge_info.bge_jumbo_rx_rcb;
1744 /* Get the jumbo receive producer ring RCB parameters. */
1745 rcb->bge_hostaddr.bge_addr_lo =
1746 BGE_ADDR_LO(sc->bge_ldata.bge_rx_jumbo_ring_paddr);
1747 rcb->bge_hostaddr.bge_addr_hi =
1748 BGE_ADDR_HI(sc->bge_ldata.bge_rx_jumbo_ring_paddr);
1749 bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag,
1750 sc->bge_cdata.bge_rx_jumbo_ring_map,
1751 BUS_DMASYNC_PREREAD);
1752 rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(0,
1753 BGE_RCB_FLAG_USE_EXT_RX_BD | BGE_RCB_FLAG_RING_DISABLED);
1754 if (sc->bge_asicrev == BGE_ASICREV_BCM5717 ||
1755 sc->bge_asicrev == BGE_ASICREV_BCM5719)
1756 rcb->bge_nicaddr = BGE_JUMBO_RX_RINGS_5717;
1757 else
1758 rcb->bge_nicaddr = BGE_JUMBO_RX_RINGS;
1759 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_HI,
1760 rcb->bge_hostaddr.bge_addr_hi);
1761 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_LO,
1762 rcb->bge_hostaddr.bge_addr_lo);
1763 /* Program the jumbo receive producer ring RCB parameters. */
1764 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS,
1765 rcb->bge_maxlen_flags);
1766 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_NICADDR, rcb->bge_nicaddr);
1767 /* Reset the jumbo receive producer ring producer index. */
1768 bge_writembx(sc, BGE_MBX_RX_JUMBO_PROD_LO, 0);
1769 }
1770
1771 /* Disable the mini receive producer ring RCB. */
1772 if (BGE_IS_5700_FAMILY(sc)) {
1773 rcb = &sc->bge_ldata.bge_info.bge_mini_rx_rcb;
1774 rcb->bge_maxlen_flags =
1775 BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED);
1776 CSR_WRITE_4(sc, BGE_RX_MINI_RCB_MAXLEN_FLAGS,
1777 rcb->bge_maxlen_flags);
1778 /* Reset the mini receive producer ring producer index. */
1779 bge_writembx(sc, BGE_MBX_RX_MINI_PROD_LO, 0);
1780 }
1781
1782 /* Choose de-pipeline mode for BCM5906 A0, A1 and A2. */
1783 if (sc->bge_asicrev == BGE_ASICREV_BCM5906) {
1784 if (sc->bge_chipid == BGE_CHIPID_BCM5906_A0 ||
1785 sc->bge_chipid == BGE_CHIPID_BCM5906_A1 ||
1786 sc->bge_chipid == BGE_CHIPID_BCM5906_A2)
1787 CSR_WRITE_4(sc, BGE_ISO_PKT_TX,
1788 (CSR_READ_4(sc, BGE_ISO_PKT_TX) & ~3) | 2);
1789 }
1790 /*
1791 * The BD ring replenish thresholds control how often the
1792 * hardware fetches new BD's from the producer rings in host
1793 * memory. Setting the value too low on a busy system can
1794 * starve the hardware and recue the throughpout.
1795 *
1796 * Set the BD ring replentish thresholds. The recommended
1797 * values are 1/8th the number of descriptors allocated to
1798 * each ring.
1799 * XXX The 5754 requires a lower threshold, so it might be a
1800 * requirement of all 575x family chips. The Linux driver sets
1801 * the lower threshold for all 5705 family chips as well, but there
1802 * are reports that it might not need to be so strict.
1803 *
1804 * XXX Linux does some extra fiddling here for the 5906 parts as
1805 * well.
1806 */
1807 if (BGE_IS_5705_PLUS(sc))
1808 val = 8;
1809 else
1810 val = BGE_STD_RX_RING_CNT / 8;
1811 CSR_WRITE_4(sc, BGE_RBDI_STD_REPL_THRESH, val);
1812 if (BGE_IS_JUMBO_CAPABLE(sc))
1813 CSR_WRITE_4(sc, BGE_RBDI_JUMBO_REPL_THRESH,
1814 BGE_JUMBO_RX_RING_CNT/8);
1815 if (BGE_IS_5717_PLUS(sc)) {
1816 CSR_WRITE_4(sc, BGE_STD_REPLENISH_LWM, 32);
1817 CSR_WRITE_4(sc, BGE_JMB_REPLENISH_LWM, 16);
1818 }
1819
1820 /*
1821 * Disable all send rings by setting the 'ring disabled' bit
1822 * in the flags field of all the TX send ring control blocks,
1823 * located in NIC memory.
1824 */
1825 if (!BGE_IS_5705_PLUS(sc))
1826 /* 5700 to 5704 had 16 send rings. */
1827 limit = BGE_TX_RINGS_EXTSSRAM_MAX;
1828 else
1829 limit = 1;
1830 vrcb = BGE_MEMWIN_START + BGE_SEND_RING_RCB;
1831 for (i = 0; i < limit; i++) {
1832 RCB_WRITE_4(sc, vrcb, bge_maxlen_flags,
1833 BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED));
1834 RCB_WRITE_4(sc, vrcb, bge_nicaddr, 0);
1835 vrcb += sizeof(struct bge_rcb);
1836 }
1837
1838 /* Configure send ring RCB 0 (we use only the first ring) */
1839 vrcb = BGE_MEMWIN_START + BGE_SEND_RING_RCB;
1840 BGE_HOSTADDR(taddr, sc->bge_ldata.bge_tx_ring_paddr);
1841 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, taddr.bge_addr_hi);
1842 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, taddr.bge_addr_lo);
1843 if (sc->bge_asicrev == BGE_ASICREV_BCM5717 ||
1844 sc->bge_asicrev == BGE_ASICREV_BCM5719)
1845 RCB_WRITE_4(sc, vrcb, bge_nicaddr, BGE_SEND_RING_5717);
1846 else
1847 RCB_WRITE_4(sc, vrcb, bge_nicaddr,
1848 BGE_NIC_TXRING_ADDR(0, BGE_TX_RING_CNT));
1849 RCB_WRITE_4(sc, vrcb, bge_maxlen_flags,
1850 BGE_RCB_MAXLEN_FLAGS(BGE_TX_RING_CNT, 0));
1851
1852 /*
1853 * Disable all receive return rings by setting the
1854 * 'ring diabled' bit in the flags field of all the receive
1855 * return ring control blocks, located in NIC memory.
1856 */
1857 if (sc->bge_asicrev == BGE_ASICREV_BCM5717 ||
1858 sc->bge_asicrev == BGE_ASICREV_BCM5719) {
1859 /* Should be 17, use 16 until we get an SRAM map. */
1860 limit = 16;
1861 } else if (!BGE_IS_5705_PLUS(sc))
1862 limit = BGE_RX_RINGS_MAX;
1863 else if (sc->bge_asicrev == BGE_ASICREV_BCM5755 ||
1864 sc->bge_asicrev == BGE_ASICREV_BCM57765)
1865 limit = 4;
1866 else
1867 limit = 1;
1868 /* Disable all receive return rings. */
1869 vrcb = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB;
1870 for (i = 0; i < limit; i++) {
1871 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, 0);
1872 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, 0);
1873 RCB_WRITE_4(sc, vrcb, bge_maxlen_flags,
1874 BGE_RCB_FLAG_RING_DISABLED);
1875 RCB_WRITE_4(sc, vrcb, bge_nicaddr, 0);
1876 bge_writembx(sc, BGE_MBX_RX_CONS0_LO +
1877 (i * (sizeof(uint64_t))), 0);
1878 vrcb += sizeof(struct bge_rcb);
1879 }
1880
1881 /*
1882 * Set up receive return ring 0. Note that the NIC address
1883 * for RX return rings is 0x0. The return rings live entirely
1884 * within the host, so the nicaddr field in the RCB isn't used.
1885 */
1886 vrcb = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB;
1887 BGE_HOSTADDR(taddr, sc->bge_ldata.bge_rx_return_ring_paddr);
1888 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, taddr.bge_addr_hi);
1889 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, taddr.bge_addr_lo);
1890 RCB_WRITE_4(sc, vrcb, bge_nicaddr, 0);
1891 RCB_WRITE_4(sc, vrcb, bge_maxlen_flags,
1892 BGE_RCB_MAXLEN_FLAGS(sc->bge_return_ring_cnt, 0));
1893
1894 /* Set random backoff seed for TX */
1895 CSR_WRITE_4(sc, BGE_TX_RANDOM_BACKOFF,
1896 IF_LLADDR(sc->bge_ifp)[0] + IF_LLADDR(sc->bge_ifp)[1] +
1897 IF_LLADDR(sc->bge_ifp)[2] + IF_LLADDR(sc->bge_ifp)[3] +
1898 IF_LLADDR(sc->bge_ifp)[4] + IF_LLADDR(sc->bge_ifp)[5] +
1899 BGE_TX_BACKOFF_SEED_MASK);
1900
1901 /* Set inter-packet gap */
1902 CSR_WRITE_4(sc, BGE_TX_LENGTHS, 0x2620);
1903
1904 /*
1905 * Specify which ring to use for packets that don't match
1906 * any RX rules.
1907 */
1908 CSR_WRITE_4(sc, BGE_RX_RULES_CFG, 0x08);
1909
1910 /*
1911 * Configure number of RX lists. One interrupt distribution
1912 * list, sixteen active lists, one bad frames class.
1913 */
1914 CSR_WRITE_4(sc, BGE_RXLP_CFG, 0x181);
1915
1916 /* Inialize RX list placement stats mask. */
1917 CSR_WRITE_4(sc, BGE_RXLP_STATS_ENABLE_MASK, 0x007FFFFF);
1918 CSR_WRITE_4(sc, BGE_RXLP_STATS_CTL, 0x1);
1919
1920 /* Disable host coalescing until we get it set up */
1921 CSR_WRITE_4(sc, BGE_HCC_MODE, 0x00000000);
1922
1923 /* Poll to make sure it's shut down. */
1924 for (i = 0; i < BGE_TIMEOUT; i++) {
1925 DELAY(10);
1926 if (!(CSR_READ_4(sc, BGE_HCC_MODE) & BGE_HCCMODE_ENABLE))
1927 break;
1928 }
1929
1930 if (i == BGE_TIMEOUT) {
1931 device_printf(sc->bge_dev,
1932 "host coalescing engine failed to idle\n");
1933 return (ENXIO);
1934 }
1935
1936 /* Set up host coalescing defaults */
1937 CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS, sc->bge_rx_coal_ticks);
1938 CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS, sc->bge_tx_coal_ticks);
1939 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS, sc->bge_rx_max_coal_bds);
1940 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS, sc->bge_tx_max_coal_bds);
1941 if (!(BGE_IS_5705_PLUS(sc))) {
1942 CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS_INT, 0);
1943 CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS_INT, 0);
1944 }
1945 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS_INT, 1);
1946 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS_INT, 1);
1947
1948 /* Set up address of statistics block */
1949 if (!(BGE_IS_5705_PLUS(sc))) {
1950 CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_HI,
1951 BGE_ADDR_HI(sc->bge_ldata.bge_stats_paddr));
1952 CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_LO,
1953 BGE_ADDR_LO(sc->bge_ldata.bge_stats_paddr));
1954 CSR_WRITE_4(sc, BGE_HCC_STATS_BASEADDR, BGE_STATS_BLOCK);
1955 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_BASEADDR, BGE_STATUS_BLOCK);
1956 CSR_WRITE_4(sc, BGE_HCC_STATS_TICKS, sc->bge_stat_ticks);
1957 }
1958
1959 /* Set up address of status block */
1960 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_HI,
1961 BGE_ADDR_HI(sc->bge_ldata.bge_status_block_paddr));
1962 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_LO,
1963 BGE_ADDR_LO(sc->bge_ldata.bge_status_block_paddr));
1964
1965 /* Set up status block size. */
1966 if (sc->bge_asicrev == BGE_ASICREV_BCM5700 &&
1967 sc->bge_chipid != BGE_CHIPID_BCM5700_C0) {
1968 val = BGE_STATBLKSZ_FULL;
1969 bzero(sc->bge_ldata.bge_status_block, BGE_STATUS_BLK_SZ);
1970 } else {
1971 val = BGE_STATBLKSZ_32BYTE;
1972 bzero(sc->bge_ldata.bge_status_block, 32);
1973 }
1974 bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
1975 sc->bge_cdata.bge_status_map,
1976 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1977
1978 /* Turn on host coalescing state machine */
1979 CSR_WRITE_4(sc, BGE_HCC_MODE, val | BGE_HCCMODE_ENABLE);
1980
1981 /* Turn on RX BD completion state machine and enable attentions */
1982 CSR_WRITE_4(sc, BGE_RBDC_MODE,
1983 BGE_RBDCMODE_ENABLE | BGE_RBDCMODE_ATTN);
1984
1985 /* Turn on RX list placement state machine */
1986 CSR_WRITE_4(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE);
1987
1988 /* Turn on RX list selector state machine. */
1989 if (!(BGE_IS_5705_PLUS(sc)))
1990 CSR_WRITE_4(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE);
1991
1992 val = BGE_MACMODE_TXDMA_ENB | BGE_MACMODE_RXDMA_ENB |
1993 BGE_MACMODE_RX_STATS_CLEAR | BGE_MACMODE_TX_STATS_CLEAR |
1994 BGE_MACMODE_RX_STATS_ENB | BGE_MACMODE_TX_STATS_ENB |
1995 BGE_MACMODE_FRMHDR_DMA_ENB;
1996
1997 if (sc->bge_flags & BGE_FLAG_TBI)
1998 val |= BGE_PORTMODE_TBI;
1999 else if (sc->bge_flags & BGE_FLAG_MII_SERDES)
2000 val |= BGE_PORTMODE_GMII;
2001 else
2002 val |= BGE_PORTMODE_MII;
2003
2004 /* Turn on DMA, clear stats */
2005 CSR_WRITE_4(sc, BGE_MAC_MODE, val);
2006
2007 /* Set misc. local control, enable interrupts on attentions */
2008 CSR_WRITE_4(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_ONATTN);
2009
2010#ifdef notdef
2011 /* Assert GPIO pins for PHY reset */
2012 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUT0 |
2013 BGE_MLC_MISCIO_OUT1 | BGE_MLC_MISCIO_OUT2);
2014 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUTEN0 |
2015 BGE_MLC_MISCIO_OUTEN1 | BGE_MLC_MISCIO_OUTEN2);
2016#endif
2017
2018 /* Turn on DMA completion state machine */
2019 if (!(BGE_IS_5705_PLUS(sc)))
2020 CSR_WRITE_4(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE);
2021
2022 val = BGE_WDMAMODE_ENABLE | BGE_WDMAMODE_ALL_ATTNS;
2023
2024 /* Enable host coalescing bug fix. */
2025 if (BGE_IS_5755_PLUS(sc))
2026 val |= BGE_WDMAMODE_STATUS_TAG_FIX;
2027
2028 /* Request larger DMA burst size to get better performance. */
2029 if (sc->bge_asicrev == BGE_ASICREV_BCM5785)
2030 val |= BGE_WDMAMODE_BURST_ALL_DATA;
2031
2032 /* Turn on write DMA state machine */
2033 CSR_WRITE_4(sc, BGE_WDMA_MODE, val);
2034 DELAY(40);
2035
2036 /* Turn on read DMA state machine */
2037 val = BGE_RDMAMODE_ENABLE | BGE_RDMAMODE_ALL_ATTNS;
2038
2039 if (sc->bge_asicrev == BGE_ASICREV_BCM5717)
2040 val |= BGE_RDMAMODE_MULT_DMA_RD_DIS;
2041
2042 if (sc->bge_asicrev == BGE_ASICREV_BCM5784 ||
2043 sc->bge_asicrev == BGE_ASICREV_BCM5785 ||
2044 sc->bge_asicrev == BGE_ASICREV_BCM57780)
2045 val |= BGE_RDMAMODE_BD_SBD_CRPT_ATTN |
2046 BGE_RDMAMODE_MBUF_RBD_CRPT_ATTN |
2047 BGE_RDMAMODE_MBUF_SBD_CRPT_ATTN;
2048 if (sc->bge_flags & BGE_FLAG_PCIE)
2049 val |= BGE_RDMAMODE_FIFO_LONG_BURST;
2050 if (sc->bge_flags & (BGE_FLAG_TSO | BGE_FLAG_TSO3)) {
2051 val |= BGE_RDMAMODE_TSO4_ENABLE;
2052 if (sc->bge_flags & BGE_FLAG_TSO3 ||
2053 sc->bge_asicrev == BGE_ASICREV_BCM5785 ||
2054 sc->bge_asicrev == BGE_ASICREV_BCM57780)
2055 val |= BGE_RDMAMODE_TSO6_ENABLE;
2056 }
2057 if (sc->bge_asicrev == BGE_ASICREV_BCM5761 ||
2058 sc->bge_asicrev == BGE_ASICREV_BCM5784 ||
2059 sc->bge_asicrev == BGE_ASICREV_BCM5785 ||
2060 sc->bge_asicrev == BGE_ASICREV_BCM57780 ||
2061 BGE_IS_5717_PLUS(sc)) {
2062 dmactl = CSR_READ_4(sc, BGE_RDMA_RSRVCTRL);
2063 /*
2064 * Adjust tx margin to prevent TX data corruption and
2065 * fix internal FIFO overflow.
2066 */
2067 if (sc->bge_asicrev == BGE_ASICREV_BCM5719) {
2068 dmactl &= ~(BGE_RDMA_RSRVCTRL_FIFO_LWM_MASK |
2069 BGE_RDMA_RSRVCTRL_FIFO_HWM_MASK |
2070 BGE_RDMA_RSRVCTRL_TXMRGN_MASK);
2071 dmactl |= BGE_RDMA_RSRVCTRL_FIFO_LWM_1_5K |
2072 BGE_RDMA_RSRVCTRL_FIFO_HWM_1_5K |
2073 BGE_RDMA_RSRVCTRL_TXMRGN_320B;
2074 }
2075 /*
2076 * Enable fix for read DMA FIFO overruns.
2077 * The fix is to limit the number of RX BDs
2078 * the hardware would fetch at a fime.
2079 */
2080 CSR_WRITE_4(sc, BGE_RDMA_RSRVCTRL, dmactl |
2081 BGE_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
2082 }
2083
2084 if (sc->bge_asicrev == BGE_ASICREV_BCM5719) {
2085 CSR_WRITE_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL,
2086 CSR_READ_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL) |
2087 BGE_RDMA_LSO_CRPTEN_CTRL_BLEN_BD_4K |
2088 BGE_RDMA_LSO_CRPTEN_CTRL_BLEN_LSO_4K);
2089 }
2090
2091 CSR_WRITE_4(sc, BGE_RDMA_MODE, val);
2092 DELAY(40);
2093
2094 /* Turn on RX data completion state machine */
2095 CSR_WRITE_4(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE);
2096
2097 /* Turn on RX BD initiator state machine */
2098 CSR_WRITE_4(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE);
2099
2100 /* Turn on RX data and RX BD initiator state machine */
2101 CSR_WRITE_4(sc, BGE_RDBDI_MODE, BGE_RDBDIMODE_ENABLE);
2102
2103 /* Turn on Mbuf cluster free state machine */
2104 if (!(BGE_IS_5705_PLUS(sc)))
2105 CSR_WRITE_4(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE);
2106
2107 /* Turn on send BD completion state machine */
2108 CSR_WRITE_4(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE);
2109
2110 /* Turn on send data completion state machine */
2111 val = BGE_SDCMODE_ENABLE;
2112 if (sc->bge_asicrev == BGE_ASICREV_BCM5761)
2113 val |= BGE_SDCMODE_CDELAY;
2114 CSR_WRITE_4(sc, BGE_SDC_MODE, val);
2115
2116 /* Turn on send data initiator state machine */
2117 if (sc->bge_flags & (BGE_FLAG_TSO | BGE_FLAG_TSO3))
2118 CSR_WRITE_4(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE |
2119 BGE_SDIMODE_HW_LSO_PRE_DMA);
2120 else
2121 CSR_WRITE_4(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE);
2122
2123 /* Turn on send BD initiator state machine */
2124 CSR_WRITE_4(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE);
2125
2126 /* Turn on send BD selector state machine */
2127 CSR_WRITE_4(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE);
2128
2129 CSR_WRITE_4(sc, BGE_SDI_STATS_ENABLE_MASK, 0x007FFFFF);
2130 CSR_WRITE_4(sc, BGE_SDI_STATS_CTL,
2131 BGE_SDISTATSCTL_ENABLE | BGE_SDISTATSCTL_FASTER);
2132
2133 /* ack/clear link change events */
2134 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED |
2135 BGE_MACSTAT_CFG_CHANGED | BGE_MACSTAT_MI_COMPLETE |
2136 BGE_MACSTAT_LINK_CHANGED);
2137 CSR_WRITE_4(sc, BGE_MI_STS, 0);
2138
2139 /*
2140 * Enable attention when the link has changed state for
2141 * devices that use auto polling.
2142 */
2143 if (sc->bge_flags & BGE_FLAG_TBI) {
2144 CSR_WRITE_4(sc, BGE_MI_STS, BGE_MISTS_LINK);
2145 } else {
2146 if (sc->bge_mi_mode & BGE_MIMODE_AUTOPOLL) {
2147 CSR_WRITE_4(sc, BGE_MI_MODE, sc->bge_mi_mode);
2148 DELAY(80);
2149 }
2150 if (sc->bge_asicrev == BGE_ASICREV_BCM5700 &&
2151 sc->bge_chipid != BGE_CHIPID_BCM5700_B2)
2152 CSR_WRITE_4(sc, BGE_MAC_EVT_ENB,
2153 BGE_EVTENB_MI_INTERRUPT);
2154 }
2155
2156 /*
2157 * Clear any pending link state attention.
2158 * Otherwise some link state change events may be lost until attention
2159 * is cleared by bge_intr() -> bge_link_upd() sequence.
2160 * It's not necessary on newer BCM chips - perhaps enabling link
2161 * state change attentions implies clearing pending attention.
2162 */
2163 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED |
2164 BGE_MACSTAT_CFG_CHANGED | BGE_MACSTAT_MI_COMPLETE |
2165 BGE_MACSTAT_LINK_CHANGED);
2166
2167 /* Enable link state change attentions. */
2168 BGE_SETBIT(sc, BGE_MAC_EVT_ENB, BGE_EVTENB_LINK_CHANGED);
2169
2170 return (0);
2171}
2172
2173const struct bge_revision *
2174bge_lookup_rev(uint32_t chipid)
2175{
2176 const struct bge_revision *br;
2177
2178 for (br = bge_revisions; br->br_name != NULL; br++) {
2179 if (br->br_chipid == chipid)
2180 return (br);
2181 }
2182
2183 for (br = bge_majorrevs; br->br_name != NULL; br++) {
2184 if (br->br_chipid == BGE_ASICREV(chipid))
2185 return (br);
2186 }
2187
2188 return (NULL);
2189}
2190
2191const struct bge_vendor *
2192bge_lookup_vendor(uint16_t vid)
2193{
2194 const struct bge_vendor *v;
2195
2196 for (v = bge_vendors; v->v_name != NULL; v++)
2197 if (v->v_id == vid)
2198 return (v);
2199
2200 panic("%s: unknown vendor %d", __func__, vid);
2201 return (NULL);
2202}
2203
2204/*
2205 * Probe for a Broadcom chip. Check the PCI vendor and device IDs
2206 * against our list and return its name if we find a match.
2207 *
2208 * Note that since the Broadcom controller contains VPD support, we
2209 * try to get the device name string from the controller itself instead
2210 * of the compiled-in string. It guarantees we'll always announce the
2211 * right product name. We fall back to the compiled-in string when
2212 * VPD is unavailable or corrupt.
2213 */
2214static int
2215bge_probe(device_t dev)
2216{
2217 char buf[96];
2218 char model[64];
2219 const struct bge_revision *br;
2220 const char *pname;
2221 struct bge_softc *sc = device_get_softc(dev);
2222 const struct bge_type *t = bge_devs;
2223 const struct bge_vendor *v;
2224 uint32_t id;
2225 uint16_t did, vid;
2226
2227 sc->bge_dev = dev;
2228 vid = pci_get_vendor(dev);
2229 did = pci_get_device(dev);
2230 while(t->bge_vid != 0) {
2231 if ((vid == t->bge_vid) && (did == t->bge_did)) {
2232 id = pci_read_config(dev, BGE_PCI_MISC_CTL, 4) >>
2233 BGE_PCIMISCCTL_ASICREV_SHIFT;
2234 if (BGE_ASICREV(id) == BGE_ASICREV_USE_PRODID_REG) {
2235 /*
2236 * Find the ASCI revision. Different chips
2237 * use different registers.
2238 */
2239 switch (pci_get_device(dev)) {
2240 case BCOM_DEVICEID_BCM5717:
2241 case BCOM_DEVICEID_BCM5718:
2242 case BCOM_DEVICEID_BCM5719:
2243 id = pci_read_config(dev,
2244 BGE_PCI_GEN2_PRODID_ASICREV, 4);
2245 break;
2246 case BCOM_DEVICEID_BCM57761:
2247 case BCOM_DEVICEID_BCM57765:
2248 case BCOM_DEVICEID_BCM57781:
2249 case BCOM_DEVICEID_BCM57785:
2250 case BCOM_DEVICEID_BCM57791:
2251 case BCOM_DEVICEID_BCM57795:
2252 id = pci_read_config(dev,
2253 BGE_PCI_GEN15_PRODID_ASICREV, 4);
2254 break;
2255 default:
2256 id = pci_read_config(dev,
2257 BGE_PCI_PRODID_ASICREV, 4);
2258 }
2259 }
2260 br = bge_lookup_rev(id);
2261 v = bge_lookup_vendor(vid);
2262 if (bge_has_eaddr(sc) &&
2263 pci_get_vpd_ident(dev, &pname) == 0)
2264 snprintf(model, 64, "%s", pname);
2265 else
2266 snprintf(model, 64, "%s %s", v->v_name,
2267 br != NULL ? br->br_name :
2268 "NetXtreme Ethernet Controller");
2269 snprintf(buf, 96, "%s, %sASIC rev. %#08x", model,
2270 br != NULL ? "" : "unknown ", id);
2271 device_set_desc_copy(dev, buf);
2272 return (0);
2273 }
2274 t++;
2275 }
2276
2277 return (ENXIO);
2278}
2279
2280static void
2281bge_dma_free(struct bge_softc *sc)
2282{
2283 int i;
2284
2285 /* Destroy DMA maps for RX buffers. */
2286 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
2287 if (sc->bge_cdata.bge_rx_std_dmamap[i])
2288 bus_dmamap_destroy(sc->bge_cdata.bge_rx_mtag,
2289 sc->bge_cdata.bge_rx_std_dmamap[i]);
2290 }
2291 if (sc->bge_cdata.bge_rx_std_sparemap)
2292 bus_dmamap_destroy(sc->bge_cdata.bge_rx_mtag,
2293 sc->bge_cdata.bge_rx_std_sparemap);
2294
2295 /* Destroy DMA maps for jumbo RX buffers. */
2296 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
2297 if (sc->bge_cdata.bge_rx_jumbo_dmamap[i])
2298 bus_dmamap_destroy(sc->bge_cdata.bge_mtag_jumbo,
2299 sc->bge_cdata.bge_rx_jumbo_dmamap[i]);
2300 }
2301 if (sc->bge_cdata.bge_rx_jumbo_sparemap)
2302 bus_dmamap_destroy(sc->bge_cdata.bge_mtag_jumbo,
2303 sc->bge_cdata.bge_rx_jumbo_sparemap);
2304
2305 /* Destroy DMA maps for TX buffers. */
2306 for (i = 0; i < BGE_TX_RING_CNT; i++) {
2307 if (sc->bge_cdata.bge_tx_dmamap[i])
2308 bus_dmamap_destroy(sc->bge_cdata.bge_tx_mtag,
2309 sc->bge_cdata.bge_tx_dmamap[i]);
2310 }
2311
2312 if (sc->bge_cdata.bge_rx_mtag)
2313 bus_dma_tag_destroy(sc->bge_cdata.bge_rx_mtag);
2314 if (sc->bge_cdata.bge_tx_mtag)
2315 bus_dma_tag_destroy(sc->bge_cdata.bge_tx_mtag);
2316
2317
2318 /* Destroy standard RX ring. */
2319 if (sc->bge_cdata.bge_rx_std_ring_map)
2320 bus_dmamap_unload(sc->bge_cdata.bge_rx_std_ring_tag,
2321 sc->bge_cdata.bge_rx_std_ring_map);
2322 if (sc->bge_cdata.bge_rx_std_ring_map && sc->bge_ldata.bge_rx_std_ring)
2323 bus_dmamem_free(sc->bge_cdata.bge_rx_std_ring_tag,
2324 sc->bge_ldata.bge_rx_std_ring,
2325 sc->bge_cdata.bge_rx_std_ring_map);
2326
2327 if (sc->bge_cdata.bge_rx_std_ring_tag)
2328 bus_dma_tag_destroy(sc->bge_cdata.bge_rx_std_ring_tag);
2329
2330 /* Destroy jumbo RX ring. */
2331 if (sc->bge_cdata.bge_rx_jumbo_ring_map)
2332 bus_dmamap_unload(sc->bge_cdata.bge_rx_jumbo_ring_tag,
2333 sc->bge_cdata.bge_rx_jumbo_ring_map);
2334
2335 if (sc->bge_cdata.bge_rx_jumbo_ring_map &&
2336 sc->bge_ldata.bge_rx_jumbo_ring)
2337 bus_dmamem_free(sc->bge_cdata.bge_rx_jumbo_ring_tag,
2338 sc->bge_ldata.bge_rx_jumbo_ring,
2339 sc->bge_cdata.bge_rx_jumbo_ring_map);
2340
2341 if (sc->bge_cdata.bge_rx_jumbo_ring_tag)
2342 bus_dma_tag_destroy(sc->bge_cdata.bge_rx_jumbo_ring_tag);
2343
2344 /* Destroy RX return ring. */
2345 if (sc->bge_cdata.bge_rx_return_ring_map)
2346 bus_dmamap_unload(sc->bge_cdata.bge_rx_return_ring_tag,
2347 sc->bge_cdata.bge_rx_return_ring_map);
2348
2349 if (sc->bge_cdata.bge_rx_return_ring_map &&
2350 sc->bge_ldata.bge_rx_return_ring)
2351 bus_dmamem_free(sc->bge_cdata.bge_rx_return_ring_tag,
2352 sc->bge_ldata.bge_rx_return_ring,
2353 sc->bge_cdata.bge_rx_return_ring_map);
2354
2355 if (sc->bge_cdata.bge_rx_return_ring_tag)
2356 bus_dma_tag_destroy(sc->bge_cdata.bge_rx_return_ring_tag);
2357
2358 /* Destroy TX ring. */
2359 if (sc->bge_cdata.bge_tx_ring_map)
2360 bus_dmamap_unload(sc->bge_cdata.bge_tx_ring_tag,
2361 sc->bge_cdata.bge_tx_ring_map);
2362
2363 if (sc->bge_cdata.bge_tx_ring_map && sc->bge_ldata.bge_tx_ring)
2364 bus_dmamem_free(sc->bge_cdata.bge_tx_ring_tag,
2365 sc->bge_ldata.bge_tx_ring,
2366 sc->bge_cdata.bge_tx_ring_map);
2367
2368 if (sc->bge_cdata.bge_tx_ring_tag)
2369 bus_dma_tag_destroy(sc->bge_cdata.bge_tx_ring_tag);
2370
2371 /* Destroy status block. */
2372 if (sc->bge_cdata.bge_status_map)
2373 bus_dmamap_unload(sc->bge_cdata.bge_status_tag,
2374 sc->bge_cdata.bge_status_map);
2375
2376 if (sc->bge_cdata.bge_status_map && sc->bge_ldata.bge_status_block)
2377 bus_dmamem_free(sc->bge_cdata.bge_status_tag,
2378 sc->bge_ldata.bge_status_block,
2379 sc->bge_cdata.bge_status_map);
2380
2381 if (sc->bge_cdata.bge_status_tag)
2382 bus_dma_tag_destroy(sc->bge_cdata.bge_status_tag);
2383
2384 /* Destroy statistics block. */
2385 if (sc->bge_cdata.bge_stats_map)
2386 bus_dmamap_unload(sc->bge_cdata.bge_stats_tag,
2387 sc->bge_cdata.bge_stats_map);
2388
2389 if (sc->bge_cdata.bge_stats_map && sc->bge_ldata.bge_stats)
2390 bus_dmamem_free(sc->bge_cdata.bge_stats_tag,
2391 sc->bge_ldata.bge_stats,
2392 sc->bge_cdata.bge_stats_map);
2393
2394 if (sc->bge_cdata.bge_stats_tag)
2395 bus_dma_tag_destroy(sc->bge_cdata.bge_stats_tag);
2396
2397 if (sc->bge_cdata.bge_buffer_tag)
2398 bus_dma_tag_destroy(sc->bge_cdata.bge_buffer_tag);
2399
2400 /* Destroy the parent tag. */
2401 if (sc->bge_cdata.bge_parent_tag)
2402 bus_dma_tag_destroy(sc->bge_cdata.bge_parent_tag);
2403}
2404
2405static int
2406bge_dma_ring_alloc(struct bge_softc *sc, bus_size_t alignment,
2407 bus_size_t maxsize, bus_dma_tag_t *tag, uint8_t **ring, bus_dmamap_t *map,
2408 bus_addr_t *paddr, const char *msg)
2409{
2410 struct bge_dmamap_arg ctx;
2411 bus_addr_t lowaddr;
2412 bus_size_t ring_end;
2413 int error;
2414
2415 lowaddr = BUS_SPACE_MAXADDR;
2416again:
2417 error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag,
2418 alignment, 0, lowaddr, BUS_SPACE_MAXADDR, NULL,
2419 NULL, maxsize, 1, maxsize, 0, NULL, NULL, tag);
2420 if (error != 0) {
2421 device_printf(sc->bge_dev,
2422 "could not create %s dma tag\n", msg);
2423 return (ENOMEM);
2424 }
2425 /* Allocate DMA'able memory for ring. */
2426 error = bus_dmamem_alloc(*tag, (void **)ring,
2427 BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT, map);
2428 if (error != 0) {
2429 device_printf(sc->bge_dev,
2430 "could not allocate DMA'able memory for %s\n", msg);
2431 return (ENOMEM);
2432 }
2433 /* Load the address of the ring. */
2434 ctx.bge_busaddr = 0;
2435 error = bus_dmamap_load(*tag, *map, *ring, maxsize, bge_dma_map_addr,
2436 &ctx, BUS_DMA_NOWAIT);
2437 if (error != 0) {
2438 device_printf(sc->bge_dev,
2439 "could not load DMA'able memory for %s\n", msg);
2440 return (ENOMEM);
2441 }
2442 *paddr = ctx.bge_busaddr;
2443 ring_end = *paddr + maxsize;
2444 if ((sc->bge_flags & BGE_FLAG_4G_BNDRY_BUG) != 0 &&
2445 BGE_ADDR_HI(*paddr) != BGE_ADDR_HI(ring_end)) {
2446 /*
2447 * 4GB boundary crossed. Limit maximum allowable DMA
2448 * address space to 32bit and try again.
2449 */
2450 bus_dmamap_unload(*tag, *map);
2451 bus_dmamem_free(*tag, *ring, *map);
2452 bus_dma_tag_destroy(*tag);
2453 if (bootverbose)
2454 device_printf(sc->bge_dev, "4GB boundary crossed, "
2455 "limit DMA address space to 32bit for %s\n", msg);
2456 *ring = NULL;
2457 *tag = NULL;
2458 *map = NULL;
2459 lowaddr = BUS_SPACE_MAXADDR_32BIT;
2460 goto again;
2461 }
2462 return (0);
2463}
2464
2465static int
2466bge_dma_alloc(struct bge_softc *sc)
2467{
2468 bus_addr_t lowaddr;
2469 bus_size_t boundary, sbsz, rxmaxsegsz, txsegsz, txmaxsegsz;
2470 int i, error;
2471
2472 lowaddr = BUS_SPACE_MAXADDR;
2473 if ((sc->bge_flags & BGE_FLAG_40BIT_BUG) != 0)
2474 lowaddr = BGE_DMA_MAXADDR;
2475 /*
2476 * Allocate the parent bus DMA tag appropriate for PCI.
2477 */
2478 error = bus_dma_tag_create(bus_get_dma_tag(sc->bge_dev),
2479 1, 0, lowaddr, BUS_SPACE_MAXADDR, NULL,
2480 NULL, BUS_SPACE_MAXSIZE_32BIT, 0, BUS_SPACE_MAXSIZE_32BIT,
2481 0, NULL, NULL, &sc->bge_cdata.bge_parent_tag);
2482 if (error != 0) {
2483 device_printf(sc->bge_dev,
2484 "could not allocate parent dma tag\n");
2485 return (ENOMEM);
2486 }
2487
2488 /* Create tag for standard RX ring. */
2489 error = bge_dma_ring_alloc(sc, PAGE_SIZE, BGE_STD_RX_RING_SZ,
2490 &sc->bge_cdata.bge_rx_std_ring_tag,
2491 (uint8_t **)&sc->bge_ldata.bge_rx_std_ring,
2492 &sc->bge_cdata.bge_rx_std_ring_map,
2493 &sc->bge_ldata.bge_rx_std_ring_paddr, "RX ring");
2494 if (error)
2495 return (error);
2496
2497 /* Create tag for RX return ring. */
2498 error = bge_dma_ring_alloc(sc, PAGE_SIZE, BGE_RX_RTN_RING_SZ(sc),
2499 &sc->bge_cdata.bge_rx_return_ring_tag,
2500 (uint8_t **)&sc->bge_ldata.bge_rx_return_ring,
2501 &sc->bge_cdata.bge_rx_return_ring_map,
2502 &sc->bge_ldata.bge_rx_return_ring_paddr, "RX return ring");
2503 if (error)
2504 return (error);
2505
2506 /* Create tag for TX ring. */
2507 error = bge_dma_ring_alloc(sc, PAGE_SIZE, BGE_TX_RING_SZ,
2508 &sc->bge_cdata.bge_tx_ring_tag,
2509 (uint8_t **)&sc->bge_ldata.bge_tx_ring,
2510 &sc->bge_cdata.bge_tx_ring_map,
2511 &sc->bge_ldata.bge_tx_ring_paddr, "TX ring");
2512 if (error)
2513 return (error);
2514
2515 /*
2516 * Create tag for status block.
2517 * Because we only use single Tx/Rx/Rx return ring, use
2518 * minimum status block size except BCM5700 AX/BX which
2519 * seems to want to see full status block size regardless
2520 * of configured number of ring.
2521 */
2522 if (sc->bge_asicrev == BGE_ASICREV_BCM5700 &&
2523 sc->bge_chipid != BGE_CHIPID_BCM5700_C0)
2524 sbsz = BGE_STATUS_BLK_SZ;
2525 else
2526 sbsz = 32;
2527 error = bge_dma_ring_alloc(sc, PAGE_SIZE, sbsz,
2528 &sc->bge_cdata.bge_status_tag,
2529 (uint8_t **)&sc->bge_ldata.bge_status_block,
2530 &sc->bge_cdata.bge_status_map,
2531 &sc->bge_ldata.bge_status_block_paddr, "status block");
2532 if (error)
2533 return (error);
2534
2535 /* Create tag for statistics block. */
2536 error = bge_dma_ring_alloc(sc, PAGE_SIZE, BGE_STATS_SZ,
2537 &sc->bge_cdata.bge_stats_tag,
2538 (uint8_t **)&sc->bge_ldata.bge_stats,
2539 &sc->bge_cdata.bge_stats_map,
2540 &sc->bge_ldata.bge_stats_paddr, "statistics block");
2541 if (error)
2542 return (error);
2543
2544 /* Create tag for jumbo RX ring. */
2545 if (BGE_IS_JUMBO_CAPABLE(sc)) {
2546 error = bge_dma_ring_alloc(sc, PAGE_SIZE, BGE_JUMBO_RX_RING_SZ,
2547 &sc->bge_cdata.bge_rx_jumbo_ring_tag,
2548 (uint8_t **)&sc->bge_ldata.bge_rx_jumbo_ring,
2549 &sc->bge_cdata.bge_rx_jumbo_ring_map,
2550 &sc->bge_ldata.bge_rx_jumbo_ring_paddr, "jumbo RX ring");
2551 if (error)
2552 return (error);
2553 }
2554
2555 /* Create parent tag for buffers. */
2556 boundary = 0;
2557 if ((sc->bge_flags & BGE_FLAG_4G_BNDRY_BUG) != 0) {
2558 boundary = BGE_DMA_BNDRY;
2559 /*
2560 * XXX
2561 * watchdog timeout issue was observed on BCM5704 which
2562 * lives behind PCI-X bridge(e.g AMD 8131 PCI-X bridge).
2563 * Limiting DMA address space to 32bits seems to address
2564 * it.
2565 */
2566 if (sc->bge_flags & BGE_FLAG_PCIX)
2567 lowaddr = BUS_SPACE_MAXADDR_32BIT;
2568 }
2569 error = bus_dma_tag_create(bus_get_dma_tag(sc->bge_dev),
2570 1, boundary, lowaddr, BUS_SPACE_MAXADDR, NULL,
2571 NULL, BUS_SPACE_MAXSIZE_32BIT, 0, BUS_SPACE_MAXSIZE_32BIT,
2572 0, NULL, NULL, &sc->bge_cdata.bge_buffer_tag);
2573 if (error != 0) {
2574 device_printf(sc->bge_dev,
2575 "could not allocate buffer dma tag\n");
2576 return (ENOMEM);
2577 }
2578 /* Create tag for Tx mbufs. */
2579 if (sc->bge_flags & (BGE_FLAG_TSO | BGE_FLAG_TSO3)) {
2580 txsegsz = BGE_TSOSEG_SZ;
2581 txmaxsegsz = 65535 + sizeof(struct ether_vlan_header);
2582 } else {
2583 txsegsz = MCLBYTES;
2584 txmaxsegsz = MCLBYTES * BGE_NSEG_NEW;
2585 }
2586 error = bus_dma_tag_create(sc->bge_cdata.bge_buffer_tag, 1,
2587 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
2588 txmaxsegsz, BGE_NSEG_NEW, txsegsz, 0, NULL, NULL,
2589 &sc->bge_cdata.bge_tx_mtag);
2590
2591 if (error) {
2592 device_printf(sc->bge_dev, "could not allocate TX dma tag\n");
2593 return (ENOMEM);
2594 }
2595
2596 /* Create tag for Rx mbufs. */
2597 if (sc->bge_flags & BGE_FLAG_JUMBO_STD)
2598 rxmaxsegsz = MJUM9BYTES;
2599 else
2600 rxmaxsegsz = MCLBYTES;
2601 error = bus_dma_tag_create(sc->bge_cdata.bge_buffer_tag, 1, 0,
2602 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, rxmaxsegsz, 1,
2603 rxmaxsegsz, 0, NULL, NULL, &sc->bge_cdata.bge_rx_mtag);
2604
2605 if (error) {
2606 device_printf(sc->bge_dev, "could not allocate RX dma tag\n");
2607 return (ENOMEM);
2608 }
2609
2610 /* Create DMA maps for RX buffers. */
2611 error = bus_dmamap_create(sc->bge_cdata.bge_rx_mtag, 0,
2612 &sc->bge_cdata.bge_rx_std_sparemap);
2613 if (error) {
2614 device_printf(sc->bge_dev,
2615 "can't create spare DMA map for RX\n");
2616 return (ENOMEM);
2617 }
2618 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
2619 error = bus_dmamap_create(sc->bge_cdata.bge_rx_mtag, 0,
2620 &sc->bge_cdata.bge_rx_std_dmamap[i]);
2621 if (error) {
2622 device_printf(sc->bge_dev,
2623 "can't create DMA map for RX\n");
2624 return (ENOMEM);
2625 }
2626 }
2627
2628 /* Create DMA maps for TX buffers. */
2629 for (i = 0; i < BGE_TX_RING_CNT; i++) {
2630 error = bus_dmamap_create(sc->bge_cdata.bge_tx_mtag, 0,
2631 &sc->bge_cdata.bge_tx_dmamap[i]);
2632 if (error) {
2633 device_printf(sc->bge_dev,
2634 "can't create DMA map for TX\n");
2635 return (ENOMEM);
2636 }
2637 }
2638
2639 /* Create tags for jumbo RX buffers. */
2640 if (BGE_IS_JUMBO_CAPABLE(sc)) {
2641 error = bus_dma_tag_create(sc->bge_cdata.bge_buffer_tag,
2642 1, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
2643 NULL, MJUM9BYTES, BGE_NSEG_JUMBO, PAGE_SIZE,
2644 0, NULL, NULL, &sc->bge_cdata.bge_mtag_jumbo);
2645 if (error) {
2646 device_printf(sc->bge_dev,
2647 "could not allocate jumbo dma tag\n");
2648 return (ENOMEM);
2649 }
2650 /* Create DMA maps for jumbo RX buffers. */
2651 error = bus_dmamap_create(sc->bge_cdata.bge_mtag_jumbo,
2652 0, &sc->bge_cdata.bge_rx_jumbo_sparemap);
2653 if (error) {
2654 device_printf(sc->bge_dev,
2655 "can't create spare DMA map for jumbo RX\n");
2656 return (ENOMEM);
2657 }
2658 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
2659 error = bus_dmamap_create(sc->bge_cdata.bge_mtag_jumbo,
2660 0, &sc->bge_cdata.bge_rx_jumbo_dmamap[i]);
2661 if (error) {
2662 device_printf(sc->bge_dev,
2663 "can't create DMA map for jumbo RX\n");
2664 return (ENOMEM);
2665 }
2666 }
2667 }
2668
2669 return (0);
2670}
2671
2672/*
2673 * Return true if this device has more than one port.
2674 */
2675static int
2676bge_has_multiple_ports(struct bge_softc *sc)
2677{
2678 device_t dev = sc->bge_dev;
2679 u_int b, d, f, fscan, s;
2680
2681 d = pci_get_domain(dev);
2682 b = pci_get_bus(dev);
2683 s = pci_get_slot(dev);
2684 f = pci_get_function(dev);
2685 for (fscan = 0; fscan <= PCI_FUNCMAX; fscan++)
2686 if (fscan != f && pci_find_dbsf(d, b, s, fscan) != NULL)
2687 return (1);
2688 return (0);
2689}
2690
2691/*
2692 * Return true if MSI can be used with this device.
2693 */
2694static int
2695bge_can_use_msi(struct bge_softc *sc)
2696{
2697 int can_use_msi = 0;
2698
2699 /* Disable MSI for polling(4). */
2700#ifdef DEVICE_POLLING
2701 return (0);
2702#endif
2703 switch (sc->bge_asicrev) {
2704 case BGE_ASICREV_BCM5714_A0:
2705 case BGE_ASICREV_BCM5714:
2706 /*
2707 * Apparently, MSI doesn't work when these chips are
2708 * configured in single-port mode.
2709 */
2710 if (bge_has_multiple_ports(sc))
2711 can_use_msi = 1;
2712 break;
2713 case BGE_ASICREV_BCM5750:
2714 if (sc->bge_chiprev != BGE_CHIPREV_5750_AX &&
2715 sc->bge_chiprev != BGE_CHIPREV_5750_BX)
2716 can_use_msi = 1;
2717 break;
2718 default:
2719 if (BGE_IS_575X_PLUS(sc))
2720 can_use_msi = 1;
2721 }
2722 return (can_use_msi);
2723}
2724
2725static int
2726bge_attach(device_t dev)
2727{
2728 struct ifnet *ifp;
2729 struct bge_softc *sc;
2730 uint32_t hwcfg = 0, misccfg;
2731 u_char eaddr[ETHER_ADDR_LEN];
2732 int capmask, error, f, msicount, phy_addr, reg, rid, trys;
2733
2734 sc = device_get_softc(dev);
2735 sc->bge_dev = dev;
2736
2737 TASK_INIT(&sc->bge_intr_task, 0, bge_intr_task, sc);
2738
2739 /*
2740 * Map control/status registers.
2741 */
2742 pci_enable_busmaster(dev);
2743
2744 rid = PCIR_BAR(0);
2745 sc->bge_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
2746 RF_ACTIVE);
2747
2748 if (sc->bge_res == NULL) {
2749 device_printf (sc->bge_dev, "couldn't map memory\n");
2750 error = ENXIO;
2751 goto fail;
2752 }
2753
2754 /* Save various chip information. */
2755 sc->bge_chipid =
2756 pci_read_config(dev, BGE_PCI_MISC_CTL, 4) >>
2757 BGE_PCIMISCCTL_ASICREV_SHIFT;
2758 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_USE_PRODID_REG) {
2759 /*
2760 * Find the ASCI revision. Different chips use different
2761 * registers.
2762 */
2763 switch (pci_get_device(dev)) {
2764 case BCOM_DEVICEID_BCM5717:
2765 case BCOM_DEVICEID_BCM5718:
2766 case BCOM_DEVICEID_BCM5719:
2767 sc->bge_chipid = pci_read_config(dev,
2768 BGE_PCI_GEN2_PRODID_ASICREV, 4);
2769 break;
2770 case BCOM_DEVICEID_BCM57761:
2771 case BCOM_DEVICEID_BCM57765:
2772 case BCOM_DEVICEID_BCM57781:
2773 case BCOM_DEVICEID_BCM57785:
2774 case BCOM_DEVICEID_BCM57791:
2775 case BCOM_DEVICEID_BCM57795:
2776 sc->bge_chipid = pci_read_config(dev,
2777 BGE_PCI_GEN15_PRODID_ASICREV, 4);
2778 break;
2779 default:
2780 sc->bge_chipid = pci_read_config(dev,
2781 BGE_PCI_PRODID_ASICREV, 4);
2782 }
2783 }
2784 sc->bge_asicrev = BGE_ASICREV(sc->bge_chipid);
2785 sc->bge_chiprev = BGE_CHIPREV(sc->bge_chipid);
2786
2787 /* Set default PHY address. */
2788 phy_addr = 1;
2789 /*
2790 * PHY address mapping for various devices.
2791 *
2792 * | F0 Cu | F0 Sr | F1 Cu | F1 Sr |
2793 * ---------+-------+-------+-------+-------+
2794 * BCM57XX | 1 | X | X | X |
2795 * BCM5704 | 1 | X | 1 | X |
2796 * BCM5717 | 1 | 8 | 2 | 9 |
2797 * BCM5719 | 1 | 8 | 2 | 9 |
2798 *
2799 * Other addresses may respond but they are not
2800 * IEEE compliant PHYs and should be ignored.
2801 */
2802 if (sc->bge_asicrev == BGE_ASICREV_BCM5717 ||
2803 sc->bge_asicrev == BGE_ASICREV_BCM5719) {
2804 f = pci_get_function(dev);
2805 if (sc->bge_chipid == BGE_CHIPID_BCM5717_A0) {
2806 if (CSR_READ_4(sc, BGE_SGDIG_STS) &
2807 BGE_SGDIGSTS_IS_SERDES)
2808 phy_addr = f + 8;
2809 else
2810 phy_addr = f + 1;
2811 } else {
2812 if (CSR_READ_4(sc, BGE_CPMU_PHY_STRAP) &
2813 BGE_CPMU_PHY_STRAP_IS_SERDES)
2814 phy_addr = f + 8;
2815 else
2816 phy_addr = f + 1;
2817 }
2818 }
2819
2820 /*
2821 * Don't enable Ethernet@WireSpeed for the 5700, 5906, or the
2822 * 5705 A0 and A1 chips.
2823 */
2824 if (sc->bge_asicrev == BGE_ASICREV_BCM5700 ||
2825 (sc->bge_asicrev == BGE_ASICREV_BCM5705 &&
2826 (sc->bge_chipid != BGE_CHIPID_BCM5705_A0 &&
2827 sc->bge_chipid != BGE_CHIPID_BCM5705_A1)) ||
2828 sc->bge_asicrev == BGE_ASICREV_BCM5906)
2829 sc->bge_phy_flags |= BGE_PHY_NO_WIRESPEED;
2830
2831 if (bge_has_eaddr(sc))
2832 sc->bge_flags |= BGE_FLAG_EADDR;
2833
2834 /* Save chipset family. */
2835 switch (sc->bge_asicrev) {
2836 case BGE_ASICREV_BCM5717:
2837 case BGE_ASICREV_BCM5719:
2838 case BGE_ASICREV_BCM57765:
2839 sc->bge_flags |= BGE_FLAG_5717_PLUS | BGE_FLAG_5755_PLUS |
2840 BGE_FLAG_575X_PLUS | BGE_FLAG_5705_PLUS | BGE_FLAG_JUMBO |
2841 BGE_FLAG_JUMBO_FRAME;
2842 if (sc->bge_asicrev == BGE_ASICREV_BCM5719 &&
2843 sc->bge_chipid == BGE_CHIPID_BCM5719_A0) {
2844 /* Jumbo frame on BCM5719 A0 does not work. */
2845 sc->bge_flags &= ~BGE_FLAG_JUMBO;
2846 }
2847 break;
2848 case BGE_ASICREV_BCM5755:
2849 case BGE_ASICREV_BCM5761:
2850 case BGE_ASICREV_BCM5784:
2851 case BGE_ASICREV_BCM5785:
2852 case BGE_ASICREV_BCM5787:
2853 case BGE_ASICREV_BCM57780:
2854 sc->bge_flags |= BGE_FLAG_5755_PLUS | BGE_FLAG_575X_PLUS |
2855 BGE_FLAG_5705_PLUS;
2856 break;
2857 case BGE_ASICREV_BCM5700:
2858 case BGE_ASICREV_BCM5701:
2859 case BGE_ASICREV_BCM5703:
2860 case BGE_ASICREV_BCM5704:
2861 sc->bge_flags |= BGE_FLAG_5700_FAMILY | BGE_FLAG_JUMBO;
2862 break;
2863 case BGE_ASICREV_BCM5714_A0:
2864 case BGE_ASICREV_BCM5780:
2865 case BGE_ASICREV_BCM5714:
2866 sc->bge_flags |= BGE_FLAG_5714_FAMILY | BGE_FLAG_JUMBO_STD;
2867 /* FALLTHROUGH */
2868 case BGE_ASICREV_BCM5750:
2869 case BGE_ASICREV_BCM5752:
2870 case BGE_ASICREV_BCM5906:
2871 sc->bge_flags |= BGE_FLAG_575X_PLUS;
2872 /* FALLTHROUGH */
2873 case BGE_ASICREV_BCM5705:
2874 sc->bge_flags |= BGE_FLAG_5705_PLUS;
2875 break;
2876 }
2877
2878 /* Set various PHY bug flags. */
2879 if (sc->bge_chipid == BGE_CHIPID_BCM5701_A0 ||
2880 sc->bge_chipid == BGE_CHIPID_BCM5701_B0)
2881 sc->bge_phy_flags |= BGE_PHY_CRC_BUG;
2882 if (sc->bge_chiprev == BGE_CHIPREV_5703_AX ||
2883 sc->bge_chiprev == BGE_CHIPREV_5704_AX)
2884 sc->bge_phy_flags |= BGE_PHY_ADC_BUG;
2885 if (sc->bge_chipid == BGE_CHIPID_BCM5704_A0)
2886 sc->bge_phy_flags |= BGE_PHY_5704_A0_BUG;
2887 if (pci_get_subvendor(dev) == DELL_VENDORID)
2888 sc->bge_phy_flags |= BGE_PHY_NO_3LED;
2889 if ((BGE_IS_5705_PLUS(sc)) &&
2890 sc->bge_asicrev != BGE_ASICREV_BCM5906 &&
2891 sc->bge_asicrev != BGE_ASICREV_BCM5717 &&
2892 sc->bge_asicrev != BGE_ASICREV_BCM5719 &&
2893 sc->bge_asicrev != BGE_ASICREV_BCM5785 &&
2894 sc->bge_asicrev != BGE_ASICREV_BCM57765 &&
2895 sc->bge_asicrev != BGE_ASICREV_BCM57780) {
2896 if (sc->bge_asicrev == BGE_ASICREV_BCM5755 ||
2897 sc->bge_asicrev == BGE_ASICREV_BCM5761 ||
2898 sc->bge_asicrev == BGE_ASICREV_BCM5784 ||
2899 sc->bge_asicrev == BGE_ASICREV_BCM5787) {
2900 if (pci_get_device(dev) != BCOM_DEVICEID_BCM5722 &&
2901 pci_get_device(dev) != BCOM_DEVICEID_BCM5756)
2902 sc->bge_phy_flags |= BGE_PHY_JITTER_BUG;
2903 if (pci_get_device(dev) == BCOM_DEVICEID_BCM5755M)
2904 sc->bge_phy_flags |= BGE_PHY_ADJUST_TRIM;
2905 } else
2906 sc->bge_phy_flags |= BGE_PHY_BER_BUG;
2907 }
2908
2909 /* Identify the chips that use an CPMU. */
2910 if (BGE_IS_5717_PLUS(sc) ||
2911 sc->bge_asicrev == BGE_ASICREV_BCM5784 ||
2912 sc->bge_asicrev == BGE_ASICREV_BCM5761 ||
2913 sc->bge_asicrev == BGE_ASICREV_BCM5785 ||
2914 sc->bge_asicrev == BGE_ASICREV_BCM57780)
2915 sc->bge_flags |= BGE_FLAG_CPMU_PRESENT;
2916 if ((sc->bge_flags & BGE_FLAG_CPMU_PRESENT) != 0)
2917 sc->bge_mi_mode = BGE_MIMODE_500KHZ_CONST;
2918 else
2919 sc->bge_mi_mode = BGE_MIMODE_BASE;
2920 /* Enable auto polling for BCM570[0-5]. */
2921 if (BGE_IS_5700_FAMILY(sc) || sc->bge_asicrev == BGE_ASICREV_BCM5705)
2922 sc->bge_mi_mode |= BGE_MIMODE_AUTOPOLL;
2923
2924 /*
2925 * All Broadcom controllers have 4GB boundary DMA bug.
2926 * Whenever an address crosses a multiple of the 4GB boundary
2927 * (including 4GB, 8Gb, 12Gb, etc.) and makes the transition
2928 * from 0xX_FFFF_FFFF to 0x(X+1)_0000_0000 an internal DMA
2929 * state machine will lockup and cause the device to hang.
2930 */
2931 sc->bge_flags |= BGE_FLAG_4G_BNDRY_BUG;
2932
2933 /* BCM5755 or higher and BCM5906 have short DMA bug. */
2934 if (BGE_IS_5755_PLUS(sc) || sc->bge_asicrev == BGE_ASICREV_BCM5906)
2935 sc->bge_flags |= BGE_FLAG_SHORT_DMA_BUG;
2936
2937 /*
2938 * BCM5719 cannot handle DMA requests for DMA segments that
2939 * have larger than 4KB in size. However the maximum DMA
2940 * segment size created in DMA tag is 4KB for TSO, so we
2941 * wouldn't encounter the issue here.
2942 */
2943 if (sc->bge_asicrev == BGE_ASICREV_BCM5719)
2944 sc->bge_flags |= BGE_FLAG_4K_RDMA_BUG;
2945
2946 misccfg = CSR_READ_4(sc, BGE_MISC_CFG) & BGE_MISCCFG_BOARD_ID;
2947 if (sc->bge_asicrev == BGE_ASICREV_BCM5705) {
2948 if (misccfg == BGE_MISCCFG_BOARD_ID_5788 ||
2949 misccfg == BGE_MISCCFG_BOARD_ID_5788M)
2950 sc->bge_flags |= BGE_FLAG_5788;
2951 }
2952
2953 capmask = BMSR_DEFCAPMASK;
2954 if ((sc->bge_asicrev == BGE_ASICREV_BCM5703 &&
2955 (misccfg == 0x4000 || misccfg == 0x8000)) ||
2956 (sc->bge_asicrev == BGE_ASICREV_BCM5705 &&
2957 pci_get_vendor(dev) == BCOM_VENDORID &&
2958 (pci_get_device(dev) == BCOM_DEVICEID_BCM5901 ||
2959 pci_get_device(dev) == BCOM_DEVICEID_BCM5901A2 ||
2960 pci_get_device(dev) == BCOM_DEVICEID_BCM5705F)) ||
2961 (pci_get_vendor(dev) == BCOM_VENDORID &&
2962 (pci_get_device(dev) == BCOM_DEVICEID_BCM5751F ||
2963 pci_get_device(dev) == BCOM_DEVICEID_BCM5753F ||
2964 pci_get_device(dev) == BCOM_DEVICEID_BCM5787F)) ||
2965 pci_get_device(dev) == BCOM_DEVICEID_BCM57790 ||
2966 sc->bge_asicrev == BGE_ASICREV_BCM5906) {
2967 /* These chips are 10/100 only. */
2968 capmask &= ~BMSR_EXTSTAT;
2969 }
2970
2971 /*
2972 * Some controllers seem to require a special firmware to use
2973 * TSO. But the firmware is not available to FreeBSD and Linux
2974 * claims that the TSO performed by the firmware is slower than
2975 * hardware based TSO. Moreover the firmware based TSO has one
2976 * known bug which can't handle TSO if ethernet header + IP/TCP
2977 * header is greater than 80 bytes. The workaround for the TSO
2978 * bug exist but it seems it's too expensive than not using
2979 * TSO at all. Some hardwares also have the TSO bug so limit
2980 * the TSO to the controllers that are not affected TSO issues
2981 * (e.g. 5755 or higher).
2982 */
2983 if (BGE_IS_5717_PLUS(sc)) {
2984 /* BCM5717 requires different TSO configuration. */
2985 sc->bge_flags |= BGE_FLAG_TSO3;
2986 if (sc->bge_asicrev == BGE_ASICREV_BCM5719 &&
2987 sc->bge_chipid == BGE_CHIPID_BCM5719_A0) {
2988 /* TSO on BCM5719 A0 does not work. */
2989 sc->bge_flags &= ~BGE_FLAG_TSO3;
2990 }
2991 } else if (BGE_IS_5755_PLUS(sc)) {
2992 /*
2993 * BCM5754 and BCM5787 shares the same ASIC id so
2994 * explicit device id check is required.
2995 * Due to unknown reason TSO does not work on BCM5755M.
2996 */
2997 if (pci_get_device(dev) != BCOM_DEVICEID_BCM5754 &&
2998 pci_get_device(dev) != BCOM_DEVICEID_BCM5754M &&
2999 pci_get_device(dev) != BCOM_DEVICEID_BCM5755M)
3000 sc->bge_flags |= BGE_FLAG_TSO;
3001 }
3002
3003 /*
3004 * Check if this is a PCI-X or PCI Express device.
3005 */
3006 if (pci_find_cap(dev, PCIY_EXPRESS, &reg) == 0) {
3007 /*
3008 * Found a PCI Express capabilities register, this
3009 * must be a PCI Express device.
3010 */
3011 sc->bge_flags |= BGE_FLAG_PCIE;
3012 sc->bge_expcap = reg;
3013 if (sc->bge_asicrev == BGE_ASICREV_BCM5719)
3014 pci_set_max_read_req(dev, 2048);
3015 else if (pci_get_max_read_req(dev) != 4096)
3016 pci_set_max_read_req(dev, 4096);
3017 } else {
3018 /*
3019 * Check if the device is in PCI-X Mode.
3020 * (This bit is not valid on PCI Express controllers.)
3021 */
3022 if (pci_find_cap(dev, PCIY_PCIX, &reg) == 0)
3023 sc->bge_pcixcap = reg;
3024 if ((pci_read_config(dev, BGE_PCI_PCISTATE, 4) &
3025 BGE_PCISTATE_PCI_BUSMODE) == 0)
3026 sc->bge_flags |= BGE_FLAG_PCIX;
3027 }
3028
3029 /*
3030 * The 40bit DMA bug applies to the 5714/5715 controllers and is
3031 * not actually a MAC controller bug but an issue with the embedded
3032 * PCIe to PCI-X bridge in the device. Use 40bit DMA workaround.
3033 */
3034 if (BGE_IS_5714_FAMILY(sc) && (sc->bge_flags & BGE_FLAG_PCIX))
3035 sc->bge_flags |= BGE_FLAG_40BIT_BUG;
3036 /*
3037 * Allocate the interrupt, using MSI if possible. These devices
3038 * support 8 MSI messages, but only the first one is used in
3039 * normal operation.
3040 */
3041 rid = 0;
3042 if (pci_find_cap(sc->bge_dev, PCIY_MSI, &reg) == 0) {
3043 sc->bge_msicap = reg;
3044 if (bge_can_use_msi(sc)) {
3045 msicount = pci_msi_count(dev);
3046 if (msicount > 1)
3047 msicount = 1;
3048 } else
3049 msicount = 0;
3050 if (msicount == 1 && pci_alloc_msi(dev, &msicount) == 0) {
3051 rid = 1;
3052 sc->bge_flags |= BGE_FLAG_MSI;
3053 }
3054 }
3055
3056 /*
3057 * All controllers except BCM5700 supports tagged status but
3058 * we use tagged status only for MSI case on BCM5717. Otherwise
3059 * MSI on BCM5717 does not work.
3060 */
3061#ifndef DEVICE_POLLING
3062 if (sc->bge_flags & BGE_FLAG_MSI && BGE_IS_5717_PLUS(sc))
3063 sc->bge_flags |= BGE_FLAG_TAGGED_STATUS;
3064#endif
3065
3066 sc->bge_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
3067 RF_SHAREABLE | RF_ACTIVE);
3068
3069 if (sc->bge_irq == NULL) {
3070 device_printf(sc->bge_dev, "couldn't map interrupt\n");
3071 error = ENXIO;
3072 goto fail;
3073 }
3074
3075 device_printf(dev,
3076 "CHIP ID 0x%08x; ASIC REV 0x%02x; CHIP REV 0x%02x; %s\n",
3077 sc->bge_chipid, sc->bge_asicrev, sc->bge_chiprev,
3078 (sc->bge_flags & BGE_FLAG_PCIX) ? "PCI-X" :
3079 ((sc->bge_flags & BGE_FLAG_PCIE) ? "PCI-E" : "PCI"));
3080
3081 BGE_LOCK_INIT(sc, device_get_nameunit(dev));
3082
3083 /* Try to reset the chip. */
3084 if (bge_reset(sc)) {
3085 device_printf(sc->bge_dev, "chip reset failed\n");
3086 error = ENXIO;
3087 goto fail;
3088 }
3089
3090 sc->bge_asf_mode = 0;
3091 if (bge_allow_asf && (bge_readmem_ind(sc, BGE_SRAM_DATA_SIG) ==
3092 BGE_SRAM_DATA_SIG_MAGIC)) {
3093 if (bge_readmem_ind(sc, BGE_SRAM_DATA_CFG)
3094 & BGE_HWCFG_ASF) {
3095 sc->bge_asf_mode |= ASF_ENABLE;
3096 sc->bge_asf_mode |= ASF_STACKUP;
3097 if (BGE_IS_575X_PLUS(sc))
3098 sc->bge_asf_mode |= ASF_NEW_HANDSHAKE;
3099 }
3100 }
3101
3102 /* Try to reset the chip again the nice way. */
3103 bge_stop_fw(sc);
3104 bge_sig_pre_reset(sc, BGE_RESET_STOP);
3105 if (bge_reset(sc)) {
3106 device_printf(sc->bge_dev, "chip reset failed\n");
3107 error = ENXIO;
3108 goto fail;
3109 }
3110
3111 bge_sig_legacy(sc, BGE_RESET_STOP);
3112 bge_sig_post_reset(sc, BGE_RESET_STOP);
3113
3114 if (bge_chipinit(sc)) {
3115 device_printf(sc->bge_dev, "chip initialization failed\n");
3116 error = ENXIO;
3117 goto fail;
3118 }
3119
3120 error = bge_get_eaddr(sc, eaddr);
3121 if (error) {
3122 device_printf(sc->bge_dev,
3123 "failed to read station address\n");
3124 error = ENXIO;
3125 goto fail;
3126 }
3127
3128 /* 5705 limits RX return ring to 512 entries. */
3129 if (BGE_IS_5717_PLUS(sc))
3130 sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT;
3131 else if (BGE_IS_5705_PLUS(sc))
3132 sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT_5705;
3133 else
3134 sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT;
3135
3136 if (bge_dma_alloc(sc)) {
3137 device_printf(sc->bge_dev,
3138 "failed to allocate DMA resources\n");
3139 error = ENXIO;
3140 goto fail;
3141 }
3142
3143 bge_add_sysctls(sc);
3144
3145 /* Set default tuneable values. */
3146 sc->bge_stat_ticks = BGE_TICKS_PER_SEC;
3147 sc->bge_rx_coal_ticks = 150;
3148 sc->bge_tx_coal_ticks = 150;
3149 sc->bge_rx_max_coal_bds = 10;
3150 sc->bge_tx_max_coal_bds = 10;
3151
3152 /* Initialize checksum features to use. */
3153 sc->bge_csum_features = BGE_CSUM_FEATURES;
3154 if (sc->bge_forced_udpcsum != 0)
3155 sc->bge_csum_features |= CSUM_UDP;
3156
3157 /* Set up ifnet structure */
3158 ifp = sc->bge_ifp = if_alloc(IFT_ETHER);
3159 if (ifp == NULL) {
3160 device_printf(sc->bge_dev, "failed to if_alloc()\n");
3161 error = ENXIO;
3162 goto fail;
3163 }
3164 ifp->if_softc = sc;
3165 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
3166 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
3167 ifp->if_ioctl = bge_ioctl;
3168 ifp->if_start = bge_start;
3169 ifp->if_init = bge_init;
3170 ifp->if_snd.ifq_drv_maxlen = BGE_TX_RING_CNT - 1;
3171 IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen);
3172 IFQ_SET_READY(&ifp->if_snd);
3173 ifp->if_hwassist = sc->bge_csum_features;
3174 ifp->if_capabilities = IFCAP_HWCSUM | IFCAP_VLAN_HWTAGGING |
3175 IFCAP_VLAN_MTU;
3176 if ((sc->bge_flags & (BGE_FLAG_TSO | BGE_FLAG_TSO3)) != 0) {
3177 ifp->if_hwassist |= CSUM_TSO;
3178 ifp->if_capabilities |= IFCAP_TSO4 | IFCAP_VLAN_HWTSO;
3179 }
3180#ifdef IFCAP_VLAN_HWCSUM
3181 ifp->if_capabilities |= IFCAP_VLAN_HWCSUM;
3182#endif
3183 ifp->if_capenable = ifp->if_capabilities;
3184#ifdef DEVICE_POLLING
3185 ifp->if_capabilities |= IFCAP_POLLING;
3186#endif
3187
3188 /*
3189 * 5700 B0 chips do not support checksumming correctly due
3190 * to hardware bugs.
3191 */
3192 if (sc->bge_chipid == BGE_CHIPID_BCM5700_B0) {
3193 ifp->if_capabilities &= ~IFCAP_HWCSUM;
3194 ifp->if_capenable &= ~IFCAP_HWCSUM;
3195 ifp->if_hwassist = 0;
3196 }
3197
3198 /*
3199 * Figure out what sort of media we have by checking the
3200 * hardware config word in the first 32k of NIC internal memory,
3201 * or fall back to examining the EEPROM if necessary.
3202 * Note: on some BCM5700 cards, this value appears to be unset.
3203 * If that's the case, we have to rely on identifying the NIC
3204 * by its PCI subsystem ID, as we do below for the SysKonnect
3205 * SK-9D41.
3206 */
3207 if (bge_readmem_ind(sc, BGE_SRAM_DATA_SIG) == BGE_SRAM_DATA_SIG_MAGIC)
3208 hwcfg = bge_readmem_ind(sc, BGE_SRAM_DATA_CFG);
3209 else if ((sc->bge_flags & BGE_FLAG_EADDR) &&
3210 (sc->bge_asicrev != BGE_ASICREV_BCM5906)) {
3211 if (bge_read_eeprom(sc, (caddr_t)&hwcfg, BGE_EE_HWCFG_OFFSET,
3212 sizeof(hwcfg))) {
3213 device_printf(sc->bge_dev, "failed to read EEPROM\n");
3214 error = ENXIO;
3215 goto fail;
3216 }
3217 hwcfg = ntohl(hwcfg);
3218 }
3219
3220 /* The SysKonnect SK-9D41 is a 1000baseSX card. */
3221 if ((pci_read_config(dev, BGE_PCI_SUBSYS, 4) >> 16) ==
3222 SK_SUBSYSID_9D41 || (hwcfg & BGE_HWCFG_MEDIA) == BGE_MEDIA_FIBER) {
3223 if (BGE_IS_5714_FAMILY(sc))
3224 sc->bge_flags |= BGE_FLAG_MII_SERDES;
3225 else
3226 sc->bge_flags |= BGE_FLAG_TBI;
3227 }
3228
3229 if (sc->bge_flags & BGE_FLAG_TBI) {
3230 ifmedia_init(&sc->bge_ifmedia, IFM_IMASK, bge_ifmedia_upd,
3231 bge_ifmedia_sts);
3232 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER | IFM_1000_SX, 0, NULL);
3233 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER | IFM_1000_SX | IFM_FDX,
3234 0, NULL);
3235 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER | IFM_AUTO, 0, NULL);
3236 ifmedia_set(&sc->bge_ifmedia, IFM_ETHER | IFM_AUTO);
3237 sc->bge_ifmedia.ifm_media = sc->bge_ifmedia.ifm_cur->ifm_media;
3238 } else {
3239 /*
3240 * Do transceiver setup and tell the firmware the
3241 * driver is down so we can try to get access the
3242 * probe if ASF is running. Retry a couple of times
3243 * if we get a conflict with the ASF firmware accessing
3244 * the PHY.
3245 */
3246 trys = 0;
3247 BGE_CLRBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
3248again:
3249 bge_asf_driver_up(sc);
3250
3251 error = mii_attach(dev, &sc->bge_miibus, ifp, bge_ifmedia_upd,
3252 bge_ifmedia_sts, capmask, phy_addr, MII_OFFSET_ANY,
3253 MIIF_DOPAUSE);
3254 if (error != 0) {
3255 if (trys++ < 4) {
3256 device_printf(sc->bge_dev, "Try again\n");
3257 bge_miibus_writereg(sc->bge_dev, 1, MII_BMCR,
3258 BMCR_RESET);
3259 goto again;
3260 }
3261 device_printf(sc->bge_dev, "attaching PHYs failed\n");
3262 goto fail;
3263 }
3264
3265 /*
3266 * Now tell the firmware we are going up after probing the PHY
3267 */
3268 if (sc->bge_asf_mode & ASF_STACKUP)
3269 BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
3270 }
3271
3272 /*
3273 * When using the BCM5701 in PCI-X mode, data corruption has
3274 * been observed in the first few bytes of some received packets.
3275 * Aligning the packet buffer in memory eliminates the corruption.
3276 * Unfortunately, this misaligns the packet payloads. On platforms
3277 * which do not support unaligned accesses, we will realign the
3278 * payloads by copying the received packets.
3279 */
3280 if (sc->bge_asicrev == BGE_ASICREV_BCM5701 &&
3281 sc->bge_flags & BGE_FLAG_PCIX)
3282 sc->bge_flags |= BGE_FLAG_RX_ALIGNBUG;
3283
3284 /*
3285 * Call MI attach routine.
3286 */
3287 ether_ifattach(ifp, eaddr);
3288 callout_init_mtx(&sc->bge_stat_ch, &sc->bge_mtx, 0);
3289
3290 /* Tell upper layer we support long frames. */
3291 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
3292
3293 /*
3294 * Hookup IRQ last.
3295 */
3296 if (BGE_IS_5755_PLUS(sc) && sc->bge_flags & BGE_FLAG_MSI) {
3297 /* Take advantage of single-shot MSI. */
3298 CSR_WRITE_4(sc, BGE_MSI_MODE, CSR_READ_4(sc, BGE_MSI_MODE) &
3299 ~BGE_MSIMODE_ONE_SHOT_DISABLE);
3300 sc->bge_tq = taskqueue_create_fast("bge_taskq", M_WAITOK,
3301 taskqueue_thread_enqueue, &sc->bge_tq);
3302 if (sc->bge_tq == NULL) {
3303 device_printf(dev, "could not create taskqueue.\n");
3304 ether_ifdetach(ifp);
3305 error = ENXIO;
3306 goto fail;
3307 }
3308 taskqueue_start_threads(&sc->bge_tq, 1, PI_NET, "%s taskq",
3309 device_get_nameunit(sc->bge_dev));
3310 error = bus_setup_intr(dev, sc->bge_irq,
3311 INTR_TYPE_NET | INTR_MPSAFE, bge_msi_intr, NULL, sc,
3312 &sc->bge_intrhand);
3313 if (error)
3314 ether_ifdetach(ifp);
3315 } else
3316 error = bus_setup_intr(dev, sc->bge_irq,
3317 INTR_TYPE_NET | INTR_MPSAFE, NULL, bge_intr, sc,
3318 &sc->bge_intrhand);
3319
3320 if (error) {
3321 bge_detach(dev);
3322 device_printf(sc->bge_dev, "couldn't set up irq\n");
3323 }
3324
3325 return (0);
3326
3327fail:
3328 bge_release_resources(sc);
3329
3330 return (error);
3331}
3332
3333static int
3334bge_detach(device_t dev)
3335{
3336 struct bge_softc *sc;
3337 struct ifnet *ifp;
3338
3339 sc = device_get_softc(dev);
3340 ifp = sc->bge_ifp;
3341
3342#ifdef DEVICE_POLLING
3343 if (ifp->if_capenable & IFCAP_POLLING)
3344 ether_poll_deregister(ifp);
3345#endif
3346
3347 BGE_LOCK(sc);
3348 bge_stop(sc);
3349 bge_reset(sc);
3350 BGE_UNLOCK(sc);
3351
3352 callout_drain(&sc->bge_stat_ch);
3353
3354 if (sc->bge_tq)
3355 taskqueue_drain(sc->bge_tq, &sc->bge_intr_task);
3356 ether_ifdetach(ifp);
3357
3358 if (sc->bge_flags & BGE_FLAG_TBI) {
3359 ifmedia_removeall(&sc->bge_ifmedia);
3360 } else {
3361 bus_generic_detach(dev);
3362 device_delete_child(dev, sc->bge_miibus);
3363 }
3364
3365 bge_release_resources(sc);
3366
3367 return (0);
3368}
3369
3370static void
3371bge_release_resources(struct bge_softc *sc)
3372{
3373 device_t dev;
3374
3375 dev = sc->bge_dev;
3376
3377 if (sc->bge_tq != NULL)
3378 taskqueue_free(sc->bge_tq);
3379
3380 if (sc->bge_intrhand != NULL)
3381 bus_teardown_intr(dev, sc->bge_irq, sc->bge_intrhand);
3382
3383 if (sc->bge_irq != NULL)
3384 bus_release_resource(dev, SYS_RES_IRQ,
3385 sc->bge_flags & BGE_FLAG_MSI ? 1 : 0, sc->bge_irq);
3386
3387 if (sc->bge_flags & BGE_FLAG_MSI)
3388 pci_release_msi(dev);
3389
3390 if (sc->bge_res != NULL)
3391 bus_release_resource(dev, SYS_RES_MEMORY,
3392 PCIR_BAR(0), sc->bge_res);
3393
3394 if (sc->bge_ifp != NULL)
3395 if_free(sc->bge_ifp);
3396
3397 bge_dma_free(sc);
3398
3399 if (mtx_initialized(&sc->bge_mtx)) /* XXX */
3400 BGE_LOCK_DESTROY(sc);
3401}
3402
3403static int
3404bge_reset(struct bge_softc *sc)
3405{
3406 device_t dev;
3407 uint32_t cachesize, command, pcistate, reset, val;
3408 void (*write_op)(struct bge_softc *, int, int);
3409 uint16_t devctl;
3410 int i;
3411
3412 dev = sc->bge_dev;
3413
3414 if (BGE_IS_575X_PLUS(sc) && !BGE_IS_5714_FAMILY(sc) &&
3415 (sc->bge_asicrev != BGE_ASICREV_BCM5906)) {
3416 if (sc->bge_flags & BGE_FLAG_PCIE)
3417 write_op = bge_writemem_direct;
3418 else
3419 write_op = bge_writemem_ind;
3420 } else
3421 write_op = bge_writereg_ind;
3422
3423 /* Save some important PCI state. */
3424 cachesize = pci_read_config(dev, BGE_PCI_CACHESZ, 4);
3425 command = pci_read_config(dev, BGE_PCI_CMD, 4);
3426 pcistate = pci_read_config(dev, BGE_PCI_PCISTATE, 4);
3427
3428 pci_write_config(dev, BGE_PCI_MISC_CTL,
3429 BGE_PCIMISCCTL_INDIRECT_ACCESS | BGE_PCIMISCCTL_MASK_PCI_INTR |
3430 BGE_HIF_SWAP_OPTIONS | BGE_PCIMISCCTL_PCISTATE_RW, 4);
3431
3432 /* Disable fastboot on controllers that support it. */
3433 if (sc->bge_asicrev == BGE_ASICREV_BCM5752 ||
3434 BGE_IS_5755_PLUS(sc)) {
3435 if (bootverbose)
3436 device_printf(dev, "Disabling fastboot\n");
3437 CSR_WRITE_4(sc, BGE_FASTBOOT_PC, 0x0);
3438 }
3439
3440 /*
3441 * Write the magic number to SRAM at offset 0xB50.
3442 * When firmware finishes its initialization it will
3443 * write ~BGE_SRAM_FW_MB_MAGIC to the same location.
3444 */
3445 bge_writemem_ind(sc, BGE_SRAM_FW_MB, BGE_SRAM_FW_MB_MAGIC);
3446
3447 reset = BGE_MISCCFG_RESET_CORE_CLOCKS | BGE_32BITTIME_66MHZ;
3448
3449 /* XXX: Broadcom Linux driver. */
3450 if (sc->bge_flags & BGE_FLAG_PCIE) {
3451 if (CSR_READ_4(sc, 0x7E2C) == 0x60) /* PCIE 1.0 */
3452 CSR_WRITE_4(sc, 0x7E2C, 0x20);
3453 if (sc->bge_chipid != BGE_CHIPID_BCM5750_A0) {
3454 /* Prevent PCIE link training during global reset */
3455 CSR_WRITE_4(sc, BGE_MISC_CFG, 1 << 29);
3456 reset |= 1 << 29;
3457 }
3458 }
3459
3460 /*
3461 * Set GPHY Power Down Override to leave GPHY
3462 * powered up in D0 uninitialized.
3463 */
3464 if (BGE_IS_5705_PLUS(sc) &&
3465 (sc->bge_flags & BGE_FLAG_CPMU_PRESENT) == 0)
3466 reset |= BGE_MISCCFG_GPHY_PD_OVERRIDE;
3467
3468 /* Issue global reset */
3469 write_op(sc, BGE_MISC_CFG, reset);
3470
3471 if (sc->bge_asicrev == BGE_ASICREV_BCM5906) {
3472 val = CSR_READ_4(sc, BGE_VCPU_STATUS);
3473 CSR_WRITE_4(sc, BGE_VCPU_STATUS,
3474 val | BGE_VCPU_STATUS_DRV_RESET);
3475 val = CSR_READ_4(sc, BGE_VCPU_EXT_CTRL);
3476 CSR_WRITE_4(sc, BGE_VCPU_EXT_CTRL,
3477 val & ~BGE_VCPU_EXT_CTRL_HALT_CPU);
3478 }
3479
3480 DELAY(1000);
3481
3482 /* XXX: Broadcom Linux driver. */
3483 if (sc->bge_flags & BGE_FLAG_PCIE) {
3484 if (sc->bge_chipid == BGE_CHIPID_BCM5750_A0) {
3485 DELAY(500000); /* wait for link training to complete */
3486 val = pci_read_config(dev, 0xC4, 4);
3487 pci_write_config(dev, 0xC4, val | (1 << 15), 4);
3488 }
3489 devctl = pci_read_config(dev,
3490 sc->bge_expcap + PCIR_EXPRESS_DEVICE_CTL, 2);
3491 /* Clear enable no snoop and disable relaxed ordering. */
3492 devctl &= ~(PCIM_EXP_CTL_RELAXED_ORD_ENABLE |
3493 PCIM_EXP_CTL_NOSNOOP_ENABLE);
3494 /* Set PCIE max payload size to 128. */
3495 devctl &= ~PCIM_EXP_CTL_MAX_PAYLOAD;
3496 pci_write_config(dev, sc->bge_expcap + PCIR_EXPRESS_DEVICE_CTL,
3497 devctl, 2);
3498 /* Clear error status. */
3499 pci_write_config(dev, sc->bge_expcap + PCIR_EXPRESS_DEVICE_STA,
3500 PCIM_EXP_STA_CORRECTABLE_ERROR |
3501 PCIM_EXP_STA_NON_FATAL_ERROR | PCIM_EXP_STA_FATAL_ERROR |
3502 PCIM_EXP_STA_UNSUPPORTED_REQ, 2);
3503 }
3504
3505 /* Reset some of the PCI state that got zapped by reset. */
3506 pci_write_config(dev, BGE_PCI_MISC_CTL,
3507 BGE_PCIMISCCTL_INDIRECT_ACCESS | BGE_PCIMISCCTL_MASK_PCI_INTR |
3508 BGE_HIF_SWAP_OPTIONS | BGE_PCIMISCCTL_PCISTATE_RW, 4);
3509 pci_write_config(dev, BGE_PCI_CACHESZ, cachesize, 4);
3510 pci_write_config(dev, BGE_PCI_CMD, command, 4);
3511 write_op(sc, BGE_MISC_CFG, BGE_32BITTIME_66MHZ);
3512 /*
3513 * Disable PCI-X relaxed ordering to ensure status block update
3514 * comes first then packet buffer DMA. Otherwise driver may
3515 * read stale status block.
3516 */
3517 if (sc->bge_flags & BGE_FLAG_PCIX) {
3518 devctl = pci_read_config(dev,
3519 sc->bge_pcixcap + PCIXR_COMMAND, 2);
3520 devctl &= ~PCIXM_COMMAND_ERO;
3521 if (sc->bge_asicrev == BGE_ASICREV_BCM5703) {
3522 devctl &= ~PCIXM_COMMAND_MAX_READ;
3523 devctl |= PCIXM_COMMAND_MAX_READ_2048;
3524 } else if (sc->bge_asicrev == BGE_ASICREV_BCM5704) {
3525 devctl &= ~(PCIXM_COMMAND_MAX_SPLITS |
3526 PCIXM_COMMAND_MAX_READ);
3527 devctl |= PCIXM_COMMAND_MAX_READ_2048;
3528 }
3529 pci_write_config(dev, sc->bge_pcixcap + PCIXR_COMMAND,
3530 devctl, 2);
3531 }
3532 /* Re-enable MSI, if necessary, and enable the memory arbiter. */
3533 if (BGE_IS_5714_FAMILY(sc)) {
3534 /* This chip disables MSI on reset. */
3535 if (sc->bge_flags & BGE_FLAG_MSI) {
3536 val = pci_read_config(dev,
3537 sc->bge_msicap + PCIR_MSI_CTRL, 2);
3538 pci_write_config(dev,
3539 sc->bge_msicap + PCIR_MSI_CTRL,
3540 val | PCIM_MSICTRL_MSI_ENABLE, 2);
3541 val = CSR_READ_4(sc, BGE_MSI_MODE);
3542 CSR_WRITE_4(sc, BGE_MSI_MODE,
3543 val | BGE_MSIMODE_ENABLE);
3544 }
3545 val = CSR_READ_4(sc, BGE_MARB_MODE);
3546 CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE | val);
3547 } else
3548 CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE);
3549
3550 if (sc->bge_asicrev == BGE_ASICREV_BCM5906) {
3551 for (i = 0; i < BGE_TIMEOUT; i++) {
3552 val = CSR_READ_4(sc, BGE_VCPU_STATUS);
3553 if (val & BGE_VCPU_STATUS_INIT_DONE)
3554 break;
3555 DELAY(100);
3556 }
3557 if (i == BGE_TIMEOUT) {
3558 device_printf(dev, "reset timed out\n");
3559 return (1);
3560 }
3561 } else {
3562 /*
3563 * Poll until we see the 1's complement of the magic number.
3564 * This indicates that the firmware initialization is complete.
3565 * We expect this to fail if no chip containing the Ethernet
3566 * address is fitted though.
3567 */
3568 for (i = 0; i < BGE_TIMEOUT; i++) {
3569 DELAY(10);
3570 val = bge_readmem_ind(sc, BGE_SRAM_FW_MB);
3571 if (val == ~BGE_SRAM_FW_MB_MAGIC)
3572 break;
3573 }
3574
3575 if ((sc->bge_flags & BGE_FLAG_EADDR) && i == BGE_TIMEOUT)
3576 device_printf(dev,
3577 "firmware handshake timed out, found 0x%08x\n",
3578 val);
3579 /* BCM57765 A0 needs additional time before accessing. */
3580 if (sc->bge_chipid == BGE_CHIPID_BCM57765_A0)
3581 DELAY(10 * 1000); /* XXX */
3582 }
3583
3584 /*
3585 * XXX Wait for the value of the PCISTATE register to
3586 * return to its original pre-reset state. This is a
3587 * fairly good indicator of reset completion. If we don't
3588 * wait for the reset to fully complete, trying to read
3589 * from the device's non-PCI registers may yield garbage
3590 * results.
3591 */
3592 for (i = 0; i < BGE_TIMEOUT; i++) {
3593 if (pci_read_config(dev, BGE_PCI_PCISTATE, 4) == pcistate)
3594 break;
3595 DELAY(10);
3596 }
3597
3598 /* Fix up byte swapping. */
3599 CSR_WRITE_4(sc, BGE_MODE_CTL, BGE_DMA_SWAP_OPTIONS |
3600 BGE_MODECTL_BYTESWAP_DATA);
3601
3602 /* Tell the ASF firmware we are up */
3603 if (sc->bge_asf_mode & ASF_STACKUP)
3604 BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
3605
3606 CSR_WRITE_4(sc, BGE_MAC_MODE, 0);
3607
3608 /*
3609 * The 5704 in TBI mode apparently needs some special
3610 * adjustment to insure the SERDES drive level is set
3611 * to 1.2V.
3612 */
3613 if (sc->bge_asicrev == BGE_ASICREV_BCM5704 &&
3614 sc->bge_flags & BGE_FLAG_TBI) {
3615 val = CSR_READ_4(sc, BGE_SERDES_CFG);
3616 val = (val & ~0xFFF) | 0x880;
3617 CSR_WRITE_4(sc, BGE_SERDES_CFG, val);
3618 }
3619
3620 /* XXX: Broadcom Linux driver. */
3621 if (sc->bge_flags & BGE_FLAG_PCIE &&
3622 !BGE_IS_5717_PLUS(sc) &&
3623 sc->bge_chipid != BGE_CHIPID_BCM5750_A0 &&
3624 sc->bge_asicrev != BGE_ASICREV_BCM5785) {
3625 /* Enable Data FIFO protection. */
3626 val = CSR_READ_4(sc, 0x7C00);
3627 CSR_WRITE_4(sc, 0x7C00, val | (1 << 25));
3628 }
3629 DELAY(10000);
3630
3631 return (0);
3632}
3633
3634static __inline void
3635bge_rxreuse_std(struct bge_softc *sc, int i)
3636{
3637 struct bge_rx_bd *r;
3638
3639 r = &sc->bge_ldata.bge_rx_std_ring[sc->bge_std];
3640 r->bge_flags = BGE_RXBDFLAG_END;
3641 r->bge_len = sc->bge_cdata.bge_rx_std_seglen[i];
3642 r->bge_idx = i;
3643 BGE_INC(sc->bge_std, BGE_STD_RX_RING_CNT);
3644}
3645
3646static __inline void
3647bge_rxreuse_jumbo(struct bge_softc *sc, int i)
3648{
3649 struct bge_extrx_bd *r;
3650
3651 r = &sc->bge_ldata.bge_rx_jumbo_ring[sc->bge_jumbo];
3652 r->bge_flags = BGE_RXBDFLAG_JUMBO_RING | BGE_RXBDFLAG_END;
3653 r->bge_len0 = sc->bge_cdata.bge_rx_jumbo_seglen[i][0];
3654 r->bge_len1 = sc->bge_cdata.bge_rx_jumbo_seglen[i][1];
3655 r->bge_len2 = sc->bge_cdata.bge_rx_jumbo_seglen[i][2];
3656 r->bge_len3 = sc->bge_cdata.bge_rx_jumbo_seglen[i][3];
3657 r->bge_idx = i;
3658 BGE_INC(sc->bge_jumbo, BGE_JUMBO_RX_RING_CNT);
3659}
3660
3661/*
3662 * Frame reception handling. This is called if there's a frame
3663 * on the receive return list.
3664 *
3665 * Note: we have to be able to handle two possibilities here:
3666 * 1) the frame is from the jumbo receive ring
3667 * 2) the frame is from the standard receive ring
3668 */
3669
3670static int
3671bge_rxeof(struct bge_softc *sc, uint16_t rx_prod, int holdlck)
3672{
3673 struct ifnet *ifp;
3674 int rx_npkts = 0, stdcnt = 0, jumbocnt = 0;
3675 uint16_t rx_cons;
3676
3677 rx_cons = sc->bge_rx_saved_considx;
3678
3679 /* Nothing to do. */
3680 if (rx_cons == rx_prod)
3681 return (rx_npkts);
3682
3683 ifp = sc->bge_ifp;
3684
3685 bus_dmamap_sync(sc->bge_cdata.bge_rx_return_ring_tag,
3686 sc->bge_cdata.bge_rx_return_ring_map, BUS_DMASYNC_POSTREAD);
3687 bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag,
3688 sc->bge_cdata.bge_rx_std_ring_map, BUS_DMASYNC_POSTWRITE);
3689 if (BGE_IS_JUMBO_CAPABLE(sc) &&
3690 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN + ETHER_VLAN_ENCAP_LEN >
3691 (MCLBYTES - ETHER_ALIGN))
3692 bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag,
3693 sc->bge_cdata.bge_rx_jumbo_ring_map, BUS_DMASYNC_POSTWRITE);
3694
3695 while (rx_cons != rx_prod) {
3696 struct bge_rx_bd *cur_rx;
3697 uint32_t rxidx;
3698 struct mbuf *m = NULL;
3699 uint16_t vlan_tag = 0;
3700 int have_tag = 0;
3701
3702#ifdef DEVICE_POLLING
3703 if (ifp->if_capenable & IFCAP_POLLING) {
3704 if (sc->rxcycles <= 0)
3705 break;
3706 sc->rxcycles--;
3707 }
3708#endif
3709
3710 cur_rx = &sc->bge_ldata.bge_rx_return_ring[rx_cons];
3711
3712 rxidx = cur_rx->bge_idx;
3713 BGE_INC(rx_cons, sc->bge_return_ring_cnt);
3714
3715 if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING &&
3716 cur_rx->bge_flags & BGE_RXBDFLAG_VLAN_TAG) {
3717 have_tag = 1;
3718 vlan_tag = cur_rx->bge_vlan_tag;
3719 }
3720
3721 if (cur_rx->bge_flags & BGE_RXBDFLAG_JUMBO_RING) {
3722 jumbocnt++;
3723 m = sc->bge_cdata.bge_rx_jumbo_chain[rxidx];
3724 if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) {
3725 bge_rxreuse_jumbo(sc, rxidx);
3726 continue;
3727 }
3728 if (bge_newbuf_jumbo(sc, rxidx) != 0) {
3729 bge_rxreuse_jumbo(sc, rxidx);
3730 ifp->if_iqdrops++;
3731 continue;
3732 }
3733 BGE_INC(sc->bge_jumbo, BGE_JUMBO_RX_RING_CNT);
3734 } else {
3735 stdcnt++;
3736 m = sc->bge_cdata.bge_rx_std_chain[rxidx];
3737 if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) {
3738 bge_rxreuse_std(sc, rxidx);
3739 continue;
3740 }
3741 if (bge_newbuf_std(sc, rxidx) != 0) {
3742 bge_rxreuse_std(sc, rxidx);
3743 ifp->if_iqdrops++;
3744 continue;
3745 }
3746 BGE_INC(sc->bge_std, BGE_STD_RX_RING_CNT);
3747 }
3748
3749 ifp->if_ipackets++;
3750#ifndef __NO_STRICT_ALIGNMENT
3751 /*
3752 * For architectures with strict alignment we must make sure
3753 * the payload is aligned.
3754 */
3755 if (sc->bge_flags & BGE_FLAG_RX_ALIGNBUG) {
3756 bcopy(m->m_data, m->m_data + ETHER_ALIGN,
3757 cur_rx->bge_len);
3758 m->m_data += ETHER_ALIGN;
3759 }
3760#endif
3761 m->m_pkthdr.len = m->m_len = cur_rx->bge_len - ETHER_CRC_LEN;
3762 m->m_pkthdr.rcvif = ifp;
3763
3764 if (ifp->if_capenable & IFCAP_RXCSUM)
3765 bge_rxcsum(sc, cur_rx, m);
3766
3767 /*
3768 * If we received a packet with a vlan tag,
3769 * attach that information to the packet.
3770 */
3771 if (have_tag) {
3772 m->m_pkthdr.ether_vtag = vlan_tag;
3773 m->m_flags |= M_VLANTAG;
3774 }
3775
3776 if (holdlck != 0) {
3777 BGE_UNLOCK(sc);
3778 (*ifp->if_input)(ifp, m);
3779 BGE_LOCK(sc);
3780 } else
3781 (*ifp->if_input)(ifp, m);
3782 rx_npkts++;
3783
3784 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
3785 return (rx_npkts);
3786 }
3787
3788 bus_dmamap_sync(sc->bge_cdata.bge_rx_return_ring_tag,
3789 sc->bge_cdata.bge_rx_return_ring_map, BUS_DMASYNC_PREREAD);
3790 if (stdcnt > 0)
3791 bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag,
3792 sc->bge_cdata.bge_rx_std_ring_map, BUS_DMASYNC_PREWRITE);
3793
3794 if (jumbocnt > 0)
3795 bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag,
3796 sc->bge_cdata.bge_rx_jumbo_ring_map, BUS_DMASYNC_PREWRITE);
3797
3798 sc->bge_rx_saved_considx = rx_cons;
3799 bge_writembx(sc, BGE_MBX_RX_CONS0_LO, sc->bge_rx_saved_considx);
3800 if (stdcnt)
3801 bge_writembx(sc, BGE_MBX_RX_STD_PROD_LO, (sc->bge_std +
3802 BGE_STD_RX_RING_CNT - 1) % BGE_STD_RX_RING_CNT);
3803 if (jumbocnt)
3804 bge_writembx(sc, BGE_MBX_RX_JUMBO_PROD_LO, (sc->bge_jumbo +
3805 BGE_JUMBO_RX_RING_CNT - 1) % BGE_JUMBO_RX_RING_CNT);
3806#ifdef notyet
3807 /*
3808 * This register wraps very quickly under heavy packet drops.
3809 * If you need correct statistics, you can enable this check.
3810 */
3811 if (BGE_IS_5705_PLUS(sc))
3812 ifp->if_ierrors += CSR_READ_4(sc, BGE_RXLP_LOCSTAT_IFIN_DROPS);
3813#endif
3814 return (rx_npkts);
3815}
3816
3817static void
3818bge_rxcsum(struct bge_softc *sc, struct bge_rx_bd *cur_rx, struct mbuf *m)
3819{
3820
3821 if (BGE_IS_5717_PLUS(sc)) {
3822 if ((cur_rx->bge_flags & BGE_RXBDFLAG_IPV6) == 0) {
3823 if (cur_rx->bge_flags & BGE_RXBDFLAG_IP_CSUM) {
3824 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
3825 if ((cur_rx->bge_error_flag &
3826 BGE_RXERRFLAG_IP_CSUM_NOK) == 0)
3827 m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
3828 }
3829 if (cur_rx->bge_flags & BGE_RXBDFLAG_TCP_UDP_CSUM) {
3830 m->m_pkthdr.csum_data =
3831 cur_rx->bge_tcp_udp_csum;
3832 m->m_pkthdr.csum_flags |= CSUM_DATA_VALID |
3833 CSUM_PSEUDO_HDR;
3834 }
3835 }
3836 } else {
3837 if (cur_rx->bge_flags & BGE_RXBDFLAG_IP_CSUM) {
3838 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
3839 if ((cur_rx->bge_ip_csum ^ 0xFFFF) == 0)
3840 m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
3841 }
3842 if (cur_rx->bge_flags & BGE_RXBDFLAG_TCP_UDP_CSUM &&
3843 m->m_pkthdr.len >= ETHER_MIN_NOPAD) {
3844 m->m_pkthdr.csum_data =
3845 cur_rx->bge_tcp_udp_csum;
3846 m->m_pkthdr.csum_flags |= CSUM_DATA_VALID |
3847 CSUM_PSEUDO_HDR;
3848 }
3849 }
3850}
3851
3852static void
3853bge_txeof(struct bge_softc *sc, uint16_t tx_cons)
3854{
3855 struct bge_tx_bd *cur_tx;
3856 struct ifnet *ifp;
3857
3858 BGE_LOCK_ASSERT(sc);
3859
3860 /* Nothing to do. */
3861 if (sc->bge_tx_saved_considx == tx_cons)
3862 return;
3863
3864 ifp = sc->bge_ifp;
3865
3866 bus_dmamap_sync(sc->bge_cdata.bge_tx_ring_tag,
3867 sc->bge_cdata.bge_tx_ring_map, BUS_DMASYNC_POSTWRITE);
3868 /*
3869 * Go through our tx ring and free mbufs for those
3870 * frames that have been sent.
3871 */
3872 while (sc->bge_tx_saved_considx != tx_cons) {
3873 uint32_t idx;
3874
3875 idx = sc->bge_tx_saved_considx;
3876 cur_tx = &sc->bge_ldata.bge_tx_ring[idx];
3877 if (cur_tx->bge_flags & BGE_TXBDFLAG_END)
3878 ifp->if_opackets++;
3879 if (sc->bge_cdata.bge_tx_chain[idx] != NULL) {
3880 bus_dmamap_sync(sc->bge_cdata.bge_tx_mtag,
3881 sc->bge_cdata.bge_tx_dmamap[idx],
3882 BUS_DMASYNC_POSTWRITE);
3883 bus_dmamap_unload(sc->bge_cdata.bge_tx_mtag,
3884 sc->bge_cdata.bge_tx_dmamap[idx]);
3885 m_freem(sc->bge_cdata.bge_tx_chain[idx]);
3886 sc->bge_cdata.bge_tx_chain[idx] = NULL;
3887 }
3888 sc->bge_txcnt--;
3889 BGE_INC(sc->bge_tx_saved_considx, BGE_TX_RING_CNT);
3890 }
3891
3892 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
3893 if (sc->bge_txcnt == 0)
3894 sc->bge_timer = 0;
3895}
3896
3897#ifdef DEVICE_POLLING
3898static int
3899bge_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
3900{
3901 struct bge_softc *sc = ifp->if_softc;
3902 uint16_t rx_prod, tx_cons;
3903 uint32_t statusword;
3904 int rx_npkts = 0;
3905
3906 BGE_LOCK(sc);
3907 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
3908 BGE_UNLOCK(sc);
3909 return (rx_npkts);
3910 }
3911
3912 bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
3913 sc->bge_cdata.bge_status_map,
3914 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
3915 rx_prod = sc->bge_ldata.bge_status_block->bge_idx[0].bge_rx_prod_idx;
3916 tx_cons = sc->bge_ldata.bge_status_block->bge_idx[0].bge_tx_cons_idx;
3917
3918 statusword = sc->bge_ldata.bge_status_block->bge_status;
3919 sc->bge_ldata.bge_status_block->bge_status = 0;
3920
3921 bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
3922 sc->bge_cdata.bge_status_map,
3923 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3924
3925 /* Note link event. It will be processed by POLL_AND_CHECK_STATUS. */
3926 if (statusword & BGE_STATFLAG_LINKSTATE_CHANGED)
3927 sc->bge_link_evt++;
3928
3929 if (cmd == POLL_AND_CHECK_STATUS)
3930 if ((sc->bge_asicrev == BGE_ASICREV_BCM5700 &&
3931 sc->bge_chipid != BGE_CHIPID_BCM5700_B2) ||
3932 sc->bge_link_evt || (sc->bge_flags & BGE_FLAG_TBI))
3933 bge_link_upd(sc);
3934
3935 sc->rxcycles = count;
3936 rx_npkts = bge_rxeof(sc, rx_prod, 1);
3937 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
3938 BGE_UNLOCK(sc);
3939 return (rx_npkts);
3940 }
3941 bge_txeof(sc, tx_cons);
3942 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
3943 bge_start_locked(ifp);
3944
3945 BGE_UNLOCK(sc);
3946 return (rx_npkts);
3947}
3948#endif /* DEVICE_POLLING */
3949
3950static int
3951bge_msi_intr(void *arg)
3952{
3953 struct bge_softc *sc;
3954
3955 sc = (struct bge_softc *)arg;
3956 /*
3957 * This interrupt is not shared and controller already
3958 * disabled further interrupt.
3959 */
3960 taskqueue_enqueue(sc->bge_tq, &sc->bge_intr_task);
3961 return (FILTER_HANDLED);
3962}
3963
3964static void
3965bge_intr_task(void *arg, int pending)
3966{
3967 struct bge_softc *sc;
3968 struct ifnet *ifp;
3969 uint32_t status, status_tag;
3970 uint16_t rx_prod, tx_cons;
3971
3972 sc = (struct bge_softc *)arg;
3973 ifp = sc->bge_ifp;
3974
3975 BGE_LOCK(sc);
3976 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
3977 BGE_UNLOCK(sc);
3978 return;
3979 }
3980
3981 /* Get updated status block. */
3982 bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
3983 sc->bge_cdata.bge_status_map,
3984 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
3985
3986 /* Save producer/consumer indexess. */
3987 rx_prod = sc->bge_ldata.bge_status_block->bge_idx[0].bge_rx_prod_idx;
3988 tx_cons = sc->bge_ldata.bge_status_block->bge_idx[0].bge_tx_cons_idx;
3989 status = sc->bge_ldata.bge_status_block->bge_status;
3990 status_tag = sc->bge_ldata.bge_status_block->bge_status_tag << 24;
3991 sc->bge_ldata.bge_status_block->bge_status = 0;
3992 bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
3993 sc->bge_cdata.bge_status_map,
3994 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3995 if ((sc->bge_flags & BGE_FLAG_TAGGED_STATUS) == 0)
3996 status_tag = 0;
3997
3998 if ((status & BGE_STATFLAG_LINKSTATE_CHANGED) != 0)
3999 bge_link_upd(sc);
4000
4001 /* Let controller work. */
4002 bge_writembx(sc, BGE_MBX_IRQ0_LO, status_tag);
4003
4004 if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
4005 sc->bge_rx_saved_considx != rx_prod) {
4006 /* Check RX return ring producer/consumer. */
4007 BGE_UNLOCK(sc);
4008 bge_rxeof(sc, rx_prod, 0);
4009 BGE_LOCK(sc);
4010 }
4011 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
4012 /* Check TX ring producer/consumer. */
4013 bge_txeof(sc, tx_cons);
4014 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
4015 bge_start_locked(ifp);
4016 }
4017 BGE_UNLOCK(sc);
4018}
4019
4020static void
4021bge_intr(void *xsc)
4022{
4023 struct bge_softc *sc;
4024 struct ifnet *ifp;
4025 uint32_t statusword;
4026 uint16_t rx_prod, tx_cons;
4027
4028 sc = xsc;
4029
4030 BGE_LOCK(sc);
4031
4032 ifp = sc->bge_ifp;
4033
4034#ifdef DEVICE_POLLING
4035 if (ifp->if_capenable & IFCAP_POLLING) {
4036 BGE_UNLOCK(sc);
4037 return;
4038 }
4039#endif
4040
4041 /*
4042 * Ack the interrupt by writing something to BGE_MBX_IRQ0_LO. Don't
4043 * disable interrupts by writing nonzero like we used to, since with
4044 * our current organization this just gives complications and
4045 * pessimizations for re-enabling interrupts. We used to have races
4046 * instead of the necessary complications. Disabling interrupts
4047 * would just reduce the chance of a status update while we are
4048 * running (by switching to the interrupt-mode coalescence
4049 * parameters), but this chance is already very low so it is more
4050 * efficient to get another interrupt than prevent it.
4051 *
4052 * We do the ack first to ensure another interrupt if there is a
4053 * status update after the ack. We don't check for the status
4054 * changing later because it is more efficient to get another
4055 * interrupt than prevent it, not quite as above (not checking is
4056 * a smaller optimization than not toggling the interrupt enable,
4057 * since checking doesn't involve PCI accesses and toggling require
4058 * the status check). So toggling would probably be a pessimization
4059 * even with MSI. It would only be needed for using a task queue.
4060 */
4061 bge_writembx(sc, BGE_MBX_IRQ0_LO, 0);
4062
4063 /*
4064 * Do the mandatory PCI flush as well as get the link status.
4065 */
4066 statusword = CSR_READ_4(sc, BGE_MAC_STS) & BGE_MACSTAT_LINK_CHANGED;
4067
4068 /* Make sure the descriptor ring indexes are coherent. */
4069 bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
4070 sc->bge_cdata.bge_status_map,
4071 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
4072 rx_prod = sc->bge_ldata.bge_status_block->bge_idx[0].bge_rx_prod_idx;
4073 tx_cons = sc->bge_ldata.bge_status_block->bge_idx[0].bge_tx_cons_idx;
4074 sc->bge_ldata.bge_status_block->bge_status = 0;
4075 bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
4076 sc->bge_cdata.bge_status_map,
4077 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
4078
4079 if ((sc->bge_asicrev == BGE_ASICREV_BCM5700 &&
4080 sc->bge_chipid != BGE_CHIPID_BCM5700_B2) ||
4081 statusword || sc->bge_link_evt)
4082 bge_link_upd(sc);
4083
4084 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
4085 /* Check RX return ring producer/consumer. */
4086 bge_rxeof(sc, rx_prod, 1);
4087 }
4088
4089 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
4090 /* Check TX ring producer/consumer. */
4091 bge_txeof(sc, tx_cons);
4092 }
4093
4094 if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
4095 !IFQ_DRV_IS_EMPTY(&ifp->if_snd))
4096 bge_start_locked(ifp);
4097
4098 BGE_UNLOCK(sc);
4099}
4100
4101static void
4102bge_asf_driver_up(struct bge_softc *sc)
4103{
4104 if (sc->bge_asf_mode & ASF_STACKUP) {
4105 /* Send ASF heartbeat aprox. every 2s */
4106 if (sc->bge_asf_count)
4107 sc->bge_asf_count --;
4108 else {
4109 sc->bge_asf_count = 2;
4110 bge_writemem_ind(sc, BGE_SRAM_FW_CMD_MB,
4111 BGE_FW_CMD_DRV_ALIVE);
4112 bge_writemem_ind(sc, BGE_SRAM_FW_CMD_LEN_MB, 4);
36
37/*
38 * Broadcom BCM570x family gigabit ethernet driver for FreeBSD.
39 *
40 * The Broadcom BCM5700 is based on technology originally developed by
41 * Alteon Networks as part of the Tigon I and Tigon II gigabit ethernet
42 * MAC chips. The BCM5700, sometimes referred to as the Tigon III, has
43 * two on-board MIPS R4000 CPUs and can have as much as 16MB of external
44 * SSRAM. The BCM5700 supports TCP, UDP and IP checksum offload, jumbo
45 * frames, highly configurable RX filtering, and 16 RX and TX queues
46 * (which, along with RX filter rules, can be used for QOS applications).
47 * Other features, such as TCP segmentation, may be available as part
48 * of value-added firmware updates. Unlike the Tigon I and Tigon II,
49 * firmware images can be stored in hardware and need not be compiled
50 * into the driver.
51 *
52 * The BCM5700 supports the PCI v2.2 and PCI-X v1.0 standards, and will
53 * function in a 32-bit/64-bit 33/66Mhz bus, or a 64-bit/133Mhz bus.
54 *
55 * The BCM5701 is a single-chip solution incorporating both the BCM5700
56 * MAC and a BCM5401 10/100/1000 PHY. Unlike the BCM5700, the BCM5701
57 * does not support external SSRAM.
58 *
59 * Broadcom also produces a variation of the BCM5700 under the "Altima"
60 * brand name, which is functionally similar but lacks PCI-X support.
61 *
62 * Without external SSRAM, you can only have at most 4 TX rings,
63 * and the use of the mini RX ring is disabled. This seems to imply
64 * that these features are simply not available on the BCM5701. As a
65 * result, this driver does not implement any support for the mini RX
66 * ring.
67 */
68
69#ifdef HAVE_KERNEL_OPTION_HEADERS
70#include "opt_device_polling.h"
71#endif
72
73#include <sys/param.h>
74#include <sys/endian.h>
75#include <sys/systm.h>
76#include <sys/sockio.h>
77#include <sys/mbuf.h>
78#include <sys/malloc.h>
79#include <sys/kernel.h>
80#include <sys/module.h>
81#include <sys/socket.h>
82#include <sys/sysctl.h>
83#include <sys/taskqueue.h>
84
85#include <net/if.h>
86#include <net/if_arp.h>
87#include <net/ethernet.h>
88#include <net/if_dl.h>
89#include <net/if_media.h>
90
91#include <net/bpf.h>
92
93#include <net/if_types.h>
94#include <net/if_vlan_var.h>
95
96#include <netinet/in_systm.h>
97#include <netinet/in.h>
98#include <netinet/ip.h>
99#include <netinet/tcp.h>
100
101#include <machine/bus.h>
102#include <machine/resource.h>
103#include <sys/bus.h>
104#include <sys/rman.h>
105
106#include <dev/mii/mii.h>
107#include <dev/mii/miivar.h>
108#include "miidevs.h"
109#include <dev/mii/brgphyreg.h>
110
111#ifdef __sparc64__
112#include <dev/ofw/ofw_bus.h>
113#include <dev/ofw/openfirm.h>
114#include <machine/ofw_machdep.h>
115#include <machine/ver.h>
116#endif
117
118#include <dev/pci/pcireg.h>
119#include <dev/pci/pcivar.h>
120
121#include <dev/bge/if_bgereg.h>
122
123#define BGE_CSUM_FEATURES (CSUM_IP | CSUM_TCP)
124#define ETHER_MIN_NOPAD (ETHER_MIN_LEN - ETHER_CRC_LEN) /* i.e., 60 */
125
126MODULE_DEPEND(bge, pci, 1, 1, 1);
127MODULE_DEPEND(bge, ether, 1, 1, 1);
128MODULE_DEPEND(bge, miibus, 1, 1, 1);
129
130/* "device miibus" required. See GENERIC if you get errors here. */
131#include "miibus_if.h"
132
133/*
134 * Various supported device vendors/types and their names. Note: the
135 * spec seems to indicate that the hardware still has Alteon's vendor
136 * ID burned into it, though it will always be overriden by the vendor
137 * ID in the EEPROM. Just to be safe, we cover all possibilities.
138 */
139static const struct bge_type {
140 uint16_t bge_vid;
141 uint16_t bge_did;
142} const bge_devs[] = {
143 { ALTEON_VENDORID, ALTEON_DEVICEID_BCM5700 },
144 { ALTEON_VENDORID, ALTEON_DEVICEID_BCM5701 },
145
146 { ALTIMA_VENDORID, ALTIMA_DEVICE_AC1000 },
147 { ALTIMA_VENDORID, ALTIMA_DEVICE_AC1002 },
148 { ALTIMA_VENDORID, ALTIMA_DEVICE_AC9100 },
149
150 { APPLE_VENDORID, APPLE_DEVICE_BCM5701 },
151
152 { BCOM_VENDORID, BCOM_DEVICEID_BCM5700 },
153 { BCOM_VENDORID, BCOM_DEVICEID_BCM5701 },
154 { BCOM_VENDORID, BCOM_DEVICEID_BCM5702 },
155 { BCOM_VENDORID, BCOM_DEVICEID_BCM5702_ALT },
156 { BCOM_VENDORID, BCOM_DEVICEID_BCM5702X },
157 { BCOM_VENDORID, BCOM_DEVICEID_BCM5703 },
158 { BCOM_VENDORID, BCOM_DEVICEID_BCM5703_ALT },
159 { BCOM_VENDORID, BCOM_DEVICEID_BCM5703X },
160 { BCOM_VENDORID, BCOM_DEVICEID_BCM5704C },
161 { BCOM_VENDORID, BCOM_DEVICEID_BCM5704S },
162 { BCOM_VENDORID, BCOM_DEVICEID_BCM5704S_ALT },
163 { BCOM_VENDORID, BCOM_DEVICEID_BCM5705 },
164 { BCOM_VENDORID, BCOM_DEVICEID_BCM5705F },
165 { BCOM_VENDORID, BCOM_DEVICEID_BCM5705K },
166 { BCOM_VENDORID, BCOM_DEVICEID_BCM5705M },
167 { BCOM_VENDORID, BCOM_DEVICEID_BCM5705M_ALT },
168 { BCOM_VENDORID, BCOM_DEVICEID_BCM5714C },
169 { BCOM_VENDORID, BCOM_DEVICEID_BCM5714S },
170 { BCOM_VENDORID, BCOM_DEVICEID_BCM5715 },
171 { BCOM_VENDORID, BCOM_DEVICEID_BCM5715S },
172 { BCOM_VENDORID, BCOM_DEVICEID_BCM5717 },
173 { BCOM_VENDORID, BCOM_DEVICEID_BCM5718 },
174 { BCOM_VENDORID, BCOM_DEVICEID_BCM5719 },
175 { BCOM_VENDORID, BCOM_DEVICEID_BCM5720 },
176 { BCOM_VENDORID, BCOM_DEVICEID_BCM5721 },
177 { BCOM_VENDORID, BCOM_DEVICEID_BCM5722 },
178 { BCOM_VENDORID, BCOM_DEVICEID_BCM5723 },
179 { BCOM_VENDORID, BCOM_DEVICEID_BCM5750 },
180 { BCOM_VENDORID, BCOM_DEVICEID_BCM5750M },
181 { BCOM_VENDORID, BCOM_DEVICEID_BCM5751 },
182 { BCOM_VENDORID, BCOM_DEVICEID_BCM5751F },
183 { BCOM_VENDORID, BCOM_DEVICEID_BCM5751M },
184 { BCOM_VENDORID, BCOM_DEVICEID_BCM5752 },
185 { BCOM_VENDORID, BCOM_DEVICEID_BCM5752M },
186 { BCOM_VENDORID, BCOM_DEVICEID_BCM5753 },
187 { BCOM_VENDORID, BCOM_DEVICEID_BCM5753F },
188 { BCOM_VENDORID, BCOM_DEVICEID_BCM5753M },
189 { BCOM_VENDORID, BCOM_DEVICEID_BCM5754 },
190 { BCOM_VENDORID, BCOM_DEVICEID_BCM5754M },
191 { BCOM_VENDORID, BCOM_DEVICEID_BCM5755 },
192 { BCOM_VENDORID, BCOM_DEVICEID_BCM5755M },
193 { BCOM_VENDORID, BCOM_DEVICEID_BCM5756 },
194 { BCOM_VENDORID, BCOM_DEVICEID_BCM5761 },
195 { BCOM_VENDORID, BCOM_DEVICEID_BCM5761E },
196 { BCOM_VENDORID, BCOM_DEVICEID_BCM5761S },
197 { BCOM_VENDORID, BCOM_DEVICEID_BCM5761SE },
198 { BCOM_VENDORID, BCOM_DEVICEID_BCM5764 },
199 { BCOM_VENDORID, BCOM_DEVICEID_BCM5780 },
200 { BCOM_VENDORID, BCOM_DEVICEID_BCM5780S },
201 { BCOM_VENDORID, BCOM_DEVICEID_BCM5781 },
202 { BCOM_VENDORID, BCOM_DEVICEID_BCM5782 },
203 { BCOM_VENDORID, BCOM_DEVICEID_BCM5784 },
204 { BCOM_VENDORID, BCOM_DEVICEID_BCM5785F },
205 { BCOM_VENDORID, BCOM_DEVICEID_BCM5785G },
206 { BCOM_VENDORID, BCOM_DEVICEID_BCM5786 },
207 { BCOM_VENDORID, BCOM_DEVICEID_BCM5787 },
208 { BCOM_VENDORID, BCOM_DEVICEID_BCM5787F },
209 { BCOM_VENDORID, BCOM_DEVICEID_BCM5787M },
210 { BCOM_VENDORID, BCOM_DEVICEID_BCM5788 },
211 { BCOM_VENDORID, BCOM_DEVICEID_BCM5789 },
212 { BCOM_VENDORID, BCOM_DEVICEID_BCM5901 },
213 { BCOM_VENDORID, BCOM_DEVICEID_BCM5901A2 },
214 { BCOM_VENDORID, BCOM_DEVICEID_BCM5903M },
215 { BCOM_VENDORID, BCOM_DEVICEID_BCM5906 },
216 { BCOM_VENDORID, BCOM_DEVICEID_BCM5906M },
217 { BCOM_VENDORID, BCOM_DEVICEID_BCM57760 },
218 { BCOM_VENDORID, BCOM_DEVICEID_BCM57761 },
219 { BCOM_VENDORID, BCOM_DEVICEID_BCM57765 },
220 { BCOM_VENDORID, BCOM_DEVICEID_BCM57780 },
221 { BCOM_VENDORID, BCOM_DEVICEID_BCM57781 },
222 { BCOM_VENDORID, BCOM_DEVICEID_BCM57785 },
223 { BCOM_VENDORID, BCOM_DEVICEID_BCM57788 },
224 { BCOM_VENDORID, BCOM_DEVICEID_BCM57790 },
225 { BCOM_VENDORID, BCOM_DEVICEID_BCM57791 },
226 { BCOM_VENDORID, BCOM_DEVICEID_BCM57795 },
227
228 { SK_VENDORID, SK_DEVICEID_ALTIMA },
229
230 { TC_VENDORID, TC_DEVICEID_3C996 },
231
232 { FJTSU_VENDORID, FJTSU_DEVICEID_PW008GE4 },
233 { FJTSU_VENDORID, FJTSU_DEVICEID_PW008GE5 },
234 { FJTSU_VENDORID, FJTSU_DEVICEID_PP250450 },
235
236 { 0, 0 }
237};
238
239static const struct bge_vendor {
240 uint16_t v_id;
241 const char *v_name;
242} const bge_vendors[] = {
243 { ALTEON_VENDORID, "Alteon" },
244 { ALTIMA_VENDORID, "Altima" },
245 { APPLE_VENDORID, "Apple" },
246 { BCOM_VENDORID, "Broadcom" },
247 { SK_VENDORID, "SysKonnect" },
248 { TC_VENDORID, "3Com" },
249 { FJTSU_VENDORID, "Fujitsu" },
250
251 { 0, NULL }
252};
253
254static const struct bge_revision {
255 uint32_t br_chipid;
256 const char *br_name;
257} const bge_revisions[] = {
258 { BGE_CHIPID_BCM5700_A0, "BCM5700 A0" },
259 { BGE_CHIPID_BCM5700_A1, "BCM5700 A1" },
260 { BGE_CHIPID_BCM5700_B0, "BCM5700 B0" },
261 { BGE_CHIPID_BCM5700_B1, "BCM5700 B1" },
262 { BGE_CHIPID_BCM5700_B2, "BCM5700 B2" },
263 { BGE_CHIPID_BCM5700_B3, "BCM5700 B3" },
264 { BGE_CHIPID_BCM5700_ALTIMA, "BCM5700 Altima" },
265 { BGE_CHIPID_BCM5700_C0, "BCM5700 C0" },
266 { BGE_CHIPID_BCM5701_A0, "BCM5701 A0" },
267 { BGE_CHIPID_BCM5701_B0, "BCM5701 B0" },
268 { BGE_CHIPID_BCM5701_B2, "BCM5701 B2" },
269 { BGE_CHIPID_BCM5701_B5, "BCM5701 B5" },
270 { BGE_CHIPID_BCM5703_A0, "BCM5703 A0" },
271 { BGE_CHIPID_BCM5703_A1, "BCM5703 A1" },
272 { BGE_CHIPID_BCM5703_A2, "BCM5703 A2" },
273 { BGE_CHIPID_BCM5703_A3, "BCM5703 A3" },
274 { BGE_CHIPID_BCM5703_B0, "BCM5703 B0" },
275 { BGE_CHIPID_BCM5704_A0, "BCM5704 A0" },
276 { BGE_CHIPID_BCM5704_A1, "BCM5704 A1" },
277 { BGE_CHIPID_BCM5704_A2, "BCM5704 A2" },
278 { BGE_CHIPID_BCM5704_A3, "BCM5704 A3" },
279 { BGE_CHIPID_BCM5704_B0, "BCM5704 B0" },
280 { BGE_CHIPID_BCM5705_A0, "BCM5705 A0" },
281 { BGE_CHIPID_BCM5705_A1, "BCM5705 A1" },
282 { BGE_CHIPID_BCM5705_A2, "BCM5705 A2" },
283 { BGE_CHIPID_BCM5705_A3, "BCM5705 A3" },
284 { BGE_CHIPID_BCM5750_A0, "BCM5750 A0" },
285 { BGE_CHIPID_BCM5750_A1, "BCM5750 A1" },
286 { BGE_CHIPID_BCM5750_A3, "BCM5750 A3" },
287 { BGE_CHIPID_BCM5750_B0, "BCM5750 B0" },
288 { BGE_CHIPID_BCM5750_B1, "BCM5750 B1" },
289 { BGE_CHIPID_BCM5750_C0, "BCM5750 C0" },
290 { BGE_CHIPID_BCM5750_C1, "BCM5750 C1" },
291 { BGE_CHIPID_BCM5750_C2, "BCM5750 C2" },
292 { BGE_CHIPID_BCM5714_A0, "BCM5714 A0" },
293 { BGE_CHIPID_BCM5752_A0, "BCM5752 A0" },
294 { BGE_CHIPID_BCM5752_A1, "BCM5752 A1" },
295 { BGE_CHIPID_BCM5752_A2, "BCM5752 A2" },
296 { BGE_CHIPID_BCM5714_B0, "BCM5714 B0" },
297 { BGE_CHIPID_BCM5714_B3, "BCM5714 B3" },
298 { BGE_CHIPID_BCM5715_A0, "BCM5715 A0" },
299 { BGE_CHIPID_BCM5715_A1, "BCM5715 A1" },
300 { BGE_CHIPID_BCM5715_A3, "BCM5715 A3" },
301 { BGE_CHIPID_BCM5717_A0, "BCM5717 A0" },
302 { BGE_CHIPID_BCM5717_B0, "BCM5717 B0" },
303 { BGE_CHIPID_BCM5719_A0, "BCM5719 A0" },
304 { BGE_CHIPID_BCM5755_A0, "BCM5755 A0" },
305 { BGE_CHIPID_BCM5755_A1, "BCM5755 A1" },
306 { BGE_CHIPID_BCM5755_A2, "BCM5755 A2" },
307 { BGE_CHIPID_BCM5722_A0, "BCM5722 A0" },
308 { BGE_CHIPID_BCM5761_A0, "BCM5761 A0" },
309 { BGE_CHIPID_BCM5761_A1, "BCM5761 A1" },
310 { BGE_CHIPID_BCM5784_A0, "BCM5784 A0" },
311 { BGE_CHIPID_BCM5784_A1, "BCM5784 A1" },
312 /* 5754 and 5787 share the same ASIC ID */
313 { BGE_CHIPID_BCM5787_A0, "BCM5754/5787 A0" },
314 { BGE_CHIPID_BCM5787_A1, "BCM5754/5787 A1" },
315 { BGE_CHIPID_BCM5787_A2, "BCM5754/5787 A2" },
316 { BGE_CHIPID_BCM5906_A1, "BCM5906 A1" },
317 { BGE_CHIPID_BCM5906_A2, "BCM5906 A2" },
318 { BGE_CHIPID_BCM57765_A0, "BCM57765 A0" },
319 { BGE_CHIPID_BCM57765_B0, "BCM57765 B0" },
320 { BGE_CHIPID_BCM57780_A0, "BCM57780 A0" },
321 { BGE_CHIPID_BCM57780_A1, "BCM57780 A1" },
322
323 { 0, NULL }
324};
325
326/*
327 * Some defaults for major revisions, so that newer steppings
328 * that we don't know about have a shot at working.
329 */
330static const struct bge_revision const bge_majorrevs[] = {
331 { BGE_ASICREV_BCM5700, "unknown BCM5700" },
332 { BGE_ASICREV_BCM5701, "unknown BCM5701" },
333 { BGE_ASICREV_BCM5703, "unknown BCM5703" },
334 { BGE_ASICREV_BCM5704, "unknown BCM5704" },
335 { BGE_ASICREV_BCM5705, "unknown BCM5705" },
336 { BGE_ASICREV_BCM5750, "unknown BCM5750" },
337 { BGE_ASICREV_BCM5714_A0, "unknown BCM5714" },
338 { BGE_ASICREV_BCM5752, "unknown BCM5752" },
339 { BGE_ASICREV_BCM5780, "unknown BCM5780" },
340 { BGE_ASICREV_BCM5714, "unknown BCM5714" },
341 { BGE_ASICREV_BCM5755, "unknown BCM5755" },
342 { BGE_ASICREV_BCM5761, "unknown BCM5761" },
343 { BGE_ASICREV_BCM5784, "unknown BCM5784" },
344 { BGE_ASICREV_BCM5785, "unknown BCM5785" },
345 /* 5754 and 5787 share the same ASIC ID */
346 { BGE_ASICREV_BCM5787, "unknown BCM5754/5787" },
347 { BGE_ASICREV_BCM5906, "unknown BCM5906" },
348 { BGE_ASICREV_BCM57765, "unknown BCM57765" },
349 { BGE_ASICREV_BCM57780, "unknown BCM57780" },
350 { BGE_ASICREV_BCM5717, "unknown BCM5717" },
351 { BGE_ASICREV_BCM5719, "unknown BCM5719" },
352
353 { 0, NULL }
354};
355
356#define BGE_IS_JUMBO_CAPABLE(sc) ((sc)->bge_flags & BGE_FLAG_JUMBO)
357#define BGE_IS_5700_FAMILY(sc) ((sc)->bge_flags & BGE_FLAG_5700_FAMILY)
358#define BGE_IS_5705_PLUS(sc) ((sc)->bge_flags & BGE_FLAG_5705_PLUS)
359#define BGE_IS_5714_FAMILY(sc) ((sc)->bge_flags & BGE_FLAG_5714_FAMILY)
360#define BGE_IS_575X_PLUS(sc) ((sc)->bge_flags & BGE_FLAG_575X_PLUS)
361#define BGE_IS_5755_PLUS(sc) ((sc)->bge_flags & BGE_FLAG_5755_PLUS)
362#define BGE_IS_5717_PLUS(sc) ((sc)->bge_flags & BGE_FLAG_5717_PLUS)
363
364const struct bge_revision * bge_lookup_rev(uint32_t);
365const struct bge_vendor * bge_lookup_vendor(uint16_t);
366
367typedef int (*bge_eaddr_fcn_t)(struct bge_softc *, uint8_t[]);
368
369static int bge_probe(device_t);
370static int bge_attach(device_t);
371static int bge_detach(device_t);
372static int bge_suspend(device_t);
373static int bge_resume(device_t);
374static void bge_release_resources(struct bge_softc *);
375static void bge_dma_map_addr(void *, bus_dma_segment_t *, int, int);
376static int bge_dma_alloc(struct bge_softc *);
377static void bge_dma_free(struct bge_softc *);
378static int bge_dma_ring_alloc(struct bge_softc *, bus_size_t, bus_size_t,
379 bus_dma_tag_t *, uint8_t **, bus_dmamap_t *, bus_addr_t *, const char *);
380
381static int bge_get_eaddr_fw(struct bge_softc *sc, uint8_t ether_addr[]);
382static int bge_get_eaddr_mem(struct bge_softc *, uint8_t[]);
383static int bge_get_eaddr_nvram(struct bge_softc *, uint8_t[]);
384static int bge_get_eaddr_eeprom(struct bge_softc *, uint8_t[]);
385static int bge_get_eaddr(struct bge_softc *, uint8_t[]);
386
387static void bge_txeof(struct bge_softc *, uint16_t);
388static void bge_rxcsum(struct bge_softc *, struct bge_rx_bd *, struct mbuf *);
389static int bge_rxeof(struct bge_softc *, uint16_t, int);
390
391static void bge_asf_driver_up (struct bge_softc *);
392static void bge_tick(void *);
393static void bge_stats_clear_regs(struct bge_softc *);
394static void bge_stats_update(struct bge_softc *);
395static void bge_stats_update_regs(struct bge_softc *);
396static struct mbuf *bge_check_short_dma(struct mbuf *);
397static struct mbuf *bge_setup_tso(struct bge_softc *, struct mbuf *,
398 uint16_t *, uint16_t *);
399static int bge_encap(struct bge_softc *, struct mbuf **, uint32_t *);
400
401static void bge_intr(void *);
402static int bge_msi_intr(void *);
403static void bge_intr_task(void *, int);
404static void bge_start_locked(struct ifnet *);
405static void bge_start(struct ifnet *);
406static int bge_ioctl(struct ifnet *, u_long, caddr_t);
407static void bge_init_locked(struct bge_softc *);
408static void bge_init(void *);
409static void bge_stop_block(struct bge_softc *, bus_size_t, uint32_t);
410static void bge_stop(struct bge_softc *);
411static void bge_watchdog(struct bge_softc *);
412static int bge_shutdown(device_t);
413static int bge_ifmedia_upd_locked(struct ifnet *);
414static int bge_ifmedia_upd(struct ifnet *);
415static void bge_ifmedia_sts(struct ifnet *, struct ifmediareq *);
416
417static uint8_t bge_nvram_getbyte(struct bge_softc *, int, uint8_t *);
418static int bge_read_nvram(struct bge_softc *, caddr_t, int, int);
419
420static uint8_t bge_eeprom_getbyte(struct bge_softc *, int, uint8_t *);
421static int bge_read_eeprom(struct bge_softc *, caddr_t, int, int);
422
423static void bge_setpromisc(struct bge_softc *);
424static void bge_setmulti(struct bge_softc *);
425static void bge_setvlan(struct bge_softc *);
426
427static __inline void bge_rxreuse_std(struct bge_softc *, int);
428static __inline void bge_rxreuse_jumbo(struct bge_softc *, int);
429static int bge_newbuf_std(struct bge_softc *, int);
430static int bge_newbuf_jumbo(struct bge_softc *, int);
431static int bge_init_rx_ring_std(struct bge_softc *);
432static void bge_free_rx_ring_std(struct bge_softc *);
433static int bge_init_rx_ring_jumbo(struct bge_softc *);
434static void bge_free_rx_ring_jumbo(struct bge_softc *);
435static void bge_free_tx_ring(struct bge_softc *);
436static int bge_init_tx_ring(struct bge_softc *);
437
438static int bge_chipinit(struct bge_softc *);
439static int bge_blockinit(struct bge_softc *);
440
441static int bge_has_eaddr(struct bge_softc *);
442static uint32_t bge_readmem_ind(struct bge_softc *, int);
443static void bge_writemem_ind(struct bge_softc *, int, int);
444static void bge_writembx(struct bge_softc *, int, int);
445#ifdef notdef
446static uint32_t bge_readreg_ind(struct bge_softc *, int);
447#endif
448static void bge_writemem_direct(struct bge_softc *, int, int);
449static void bge_writereg_ind(struct bge_softc *, int, int);
450
451static int bge_miibus_readreg(device_t, int, int);
452static int bge_miibus_writereg(device_t, int, int, int);
453static void bge_miibus_statchg(device_t);
454#ifdef DEVICE_POLLING
455static int bge_poll(struct ifnet *ifp, enum poll_cmd cmd, int count);
456#endif
457
458#define BGE_RESET_START 1
459#define BGE_RESET_STOP 2
460static void bge_sig_post_reset(struct bge_softc *, int);
461static void bge_sig_legacy(struct bge_softc *, int);
462static void bge_sig_pre_reset(struct bge_softc *, int);
463static void bge_stop_fw(struct bge_softc *);
464static int bge_reset(struct bge_softc *);
465static void bge_link_upd(struct bge_softc *);
466
467/*
468 * The BGE_REGISTER_DEBUG option is only for low-level debugging. It may
469 * leak information to untrusted users. It is also known to cause alignment
470 * traps on certain architectures.
471 */
472#ifdef BGE_REGISTER_DEBUG
473static int bge_sysctl_debug_info(SYSCTL_HANDLER_ARGS);
474static int bge_sysctl_reg_read(SYSCTL_HANDLER_ARGS);
475static int bge_sysctl_mem_read(SYSCTL_HANDLER_ARGS);
476#endif
477static void bge_add_sysctls(struct bge_softc *);
478static void bge_add_sysctl_stats_regs(struct bge_softc *,
479 struct sysctl_ctx_list *, struct sysctl_oid_list *);
480static void bge_add_sysctl_stats(struct bge_softc *, struct sysctl_ctx_list *,
481 struct sysctl_oid_list *);
482static int bge_sysctl_stats(SYSCTL_HANDLER_ARGS);
483
484static device_method_t bge_methods[] = {
485 /* Device interface */
486 DEVMETHOD(device_probe, bge_probe),
487 DEVMETHOD(device_attach, bge_attach),
488 DEVMETHOD(device_detach, bge_detach),
489 DEVMETHOD(device_shutdown, bge_shutdown),
490 DEVMETHOD(device_suspend, bge_suspend),
491 DEVMETHOD(device_resume, bge_resume),
492
493 /* bus interface */
494 DEVMETHOD(bus_print_child, bus_generic_print_child),
495 DEVMETHOD(bus_driver_added, bus_generic_driver_added),
496
497 /* MII interface */
498 DEVMETHOD(miibus_readreg, bge_miibus_readreg),
499 DEVMETHOD(miibus_writereg, bge_miibus_writereg),
500 DEVMETHOD(miibus_statchg, bge_miibus_statchg),
501
502 { 0, 0 }
503};
504
505static driver_t bge_driver = {
506 "bge",
507 bge_methods,
508 sizeof(struct bge_softc)
509};
510
511static devclass_t bge_devclass;
512
513DRIVER_MODULE(bge, pci, bge_driver, bge_devclass, 0, 0);
514DRIVER_MODULE(miibus, bge, miibus_driver, miibus_devclass, 0, 0);
515
516static int bge_allow_asf = 1;
517
518TUNABLE_INT("hw.bge.allow_asf", &bge_allow_asf);
519
520SYSCTL_NODE(_hw, OID_AUTO, bge, CTLFLAG_RD, 0, "BGE driver parameters");
521SYSCTL_INT(_hw_bge, OID_AUTO, allow_asf, CTLFLAG_RD, &bge_allow_asf, 0,
522 "Allow ASF mode if available");
523
524#define SPARC64_BLADE_1500_MODEL "SUNW,Sun-Blade-1500"
525#define SPARC64_BLADE_1500_PATH_BGE "/pci@1f,700000/network@2"
526#define SPARC64_BLADE_2500_MODEL "SUNW,Sun-Blade-2500"
527#define SPARC64_BLADE_2500_PATH_BGE "/pci@1c,600000/network@3"
528#define SPARC64_OFW_SUBVENDOR "subsystem-vendor-id"
529
530static int
531bge_has_eaddr(struct bge_softc *sc)
532{
533#ifdef __sparc64__
534 char buf[sizeof(SPARC64_BLADE_1500_PATH_BGE)];
535 device_t dev;
536 uint32_t subvendor;
537
538 dev = sc->bge_dev;
539
540 /*
541 * The on-board BGEs found in sun4u machines aren't fitted with
542 * an EEPROM which means that we have to obtain the MAC address
543 * via OFW and that some tests will always fail. We distinguish
544 * such BGEs by the subvendor ID, which also has to be obtained
545 * from OFW instead of the PCI configuration space as the latter
546 * indicates Broadcom as the subvendor of the netboot interface.
547 * For early Blade 1500 and 2500 we even have to check the OFW
548 * device path as the subvendor ID always defaults to Broadcom
549 * there.
550 */
551 if (OF_getprop(ofw_bus_get_node(dev), SPARC64_OFW_SUBVENDOR,
552 &subvendor, sizeof(subvendor)) == sizeof(subvendor) &&
553 (subvendor == FJTSU_VENDORID || subvendor == SUN_VENDORID))
554 return (0);
555 memset(buf, 0, sizeof(buf));
556 if (OF_package_to_path(ofw_bus_get_node(dev), buf, sizeof(buf)) > 0) {
557 if (strcmp(sparc64_model, SPARC64_BLADE_1500_MODEL) == 0 &&
558 strcmp(buf, SPARC64_BLADE_1500_PATH_BGE) == 0)
559 return (0);
560 if (strcmp(sparc64_model, SPARC64_BLADE_2500_MODEL) == 0 &&
561 strcmp(buf, SPARC64_BLADE_2500_PATH_BGE) == 0)
562 return (0);
563 }
564#endif
565 return (1);
566}
567
568static uint32_t
569bge_readmem_ind(struct bge_softc *sc, int off)
570{
571 device_t dev;
572 uint32_t val;
573
574 if (sc->bge_asicrev == BGE_ASICREV_BCM5906 &&
575 off >= BGE_STATS_BLOCK && off < BGE_SEND_RING_1_TO_4)
576 return (0);
577
578 dev = sc->bge_dev;
579
580 pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, off, 4);
581 val = pci_read_config(dev, BGE_PCI_MEMWIN_DATA, 4);
582 pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, 0, 4);
583 return (val);
584}
585
586static void
587bge_writemem_ind(struct bge_softc *sc, int off, int val)
588{
589 device_t dev;
590
591 if (sc->bge_asicrev == BGE_ASICREV_BCM5906 &&
592 off >= BGE_STATS_BLOCK && off < BGE_SEND_RING_1_TO_4)
593 return;
594
595 dev = sc->bge_dev;
596
597 pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, off, 4);
598 pci_write_config(dev, BGE_PCI_MEMWIN_DATA, val, 4);
599 pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, 0, 4);
600}
601
602#ifdef notdef
603static uint32_t
604bge_readreg_ind(struct bge_softc *sc, int off)
605{
606 device_t dev;
607
608 dev = sc->bge_dev;
609
610 pci_write_config(dev, BGE_PCI_REG_BASEADDR, off, 4);
611 return (pci_read_config(dev, BGE_PCI_REG_DATA, 4));
612}
613#endif
614
615static void
616bge_writereg_ind(struct bge_softc *sc, int off, int val)
617{
618 device_t dev;
619
620 dev = sc->bge_dev;
621
622 pci_write_config(dev, BGE_PCI_REG_BASEADDR, off, 4);
623 pci_write_config(dev, BGE_PCI_REG_DATA, val, 4);
624}
625
626static void
627bge_writemem_direct(struct bge_softc *sc, int off, int val)
628{
629 CSR_WRITE_4(sc, off, val);
630}
631
632static void
633bge_writembx(struct bge_softc *sc, int off, int val)
634{
635 if (sc->bge_asicrev == BGE_ASICREV_BCM5906)
636 off += BGE_LPMBX_IRQ0_HI - BGE_MBX_IRQ0_HI;
637
638 CSR_WRITE_4(sc, off, val);
639}
640
641/*
642 * Map a single buffer address.
643 */
644
645static void
646bge_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error)
647{
648 struct bge_dmamap_arg *ctx;
649
650 if (error)
651 return;
652
653 KASSERT(nseg == 1, ("%s: %d segments returned!", __func__, nseg));
654
655 ctx = arg;
656 ctx->bge_busaddr = segs->ds_addr;
657}
658
659static uint8_t
660bge_nvram_getbyte(struct bge_softc *sc, int addr, uint8_t *dest)
661{
662 uint32_t access, byte = 0;
663 int i;
664
665 /* Lock. */
666 CSR_WRITE_4(sc, BGE_NVRAM_SWARB, BGE_NVRAMSWARB_SET1);
667 for (i = 0; i < 8000; i++) {
668 if (CSR_READ_4(sc, BGE_NVRAM_SWARB) & BGE_NVRAMSWARB_GNT1)
669 break;
670 DELAY(20);
671 }
672 if (i == 8000)
673 return (1);
674
675 /* Enable access. */
676 access = CSR_READ_4(sc, BGE_NVRAM_ACCESS);
677 CSR_WRITE_4(sc, BGE_NVRAM_ACCESS, access | BGE_NVRAMACC_ENABLE);
678
679 CSR_WRITE_4(sc, BGE_NVRAM_ADDR, addr & 0xfffffffc);
680 CSR_WRITE_4(sc, BGE_NVRAM_CMD, BGE_NVRAM_READCMD);
681 for (i = 0; i < BGE_TIMEOUT * 10; i++) {
682 DELAY(10);
683 if (CSR_READ_4(sc, BGE_NVRAM_CMD) & BGE_NVRAMCMD_DONE) {
684 DELAY(10);
685 break;
686 }
687 }
688
689 if (i == BGE_TIMEOUT * 10) {
690 if_printf(sc->bge_ifp, "nvram read timed out\n");
691 return (1);
692 }
693
694 /* Get result. */
695 byte = CSR_READ_4(sc, BGE_NVRAM_RDDATA);
696
697 *dest = (bswap32(byte) >> ((addr % 4) * 8)) & 0xFF;
698
699 /* Disable access. */
700 CSR_WRITE_4(sc, BGE_NVRAM_ACCESS, access);
701
702 /* Unlock. */
703 CSR_WRITE_4(sc, BGE_NVRAM_SWARB, BGE_NVRAMSWARB_CLR1);
704 CSR_READ_4(sc, BGE_NVRAM_SWARB);
705
706 return (0);
707}
708
709/*
710 * Read a sequence of bytes from NVRAM.
711 */
712static int
713bge_read_nvram(struct bge_softc *sc, caddr_t dest, int off, int cnt)
714{
715 int err = 0, i;
716 uint8_t byte = 0;
717
718 if (sc->bge_asicrev != BGE_ASICREV_BCM5906)
719 return (1);
720
721 for (i = 0; i < cnt; i++) {
722 err = bge_nvram_getbyte(sc, off + i, &byte);
723 if (err)
724 break;
725 *(dest + i) = byte;
726 }
727
728 return (err ? 1 : 0);
729}
730
731/*
732 * Read a byte of data stored in the EEPROM at address 'addr.' The
733 * BCM570x supports both the traditional bitbang interface and an
734 * auto access interface for reading the EEPROM. We use the auto
735 * access method.
736 */
737static uint8_t
738bge_eeprom_getbyte(struct bge_softc *sc, int addr, uint8_t *dest)
739{
740 int i;
741 uint32_t byte = 0;
742
743 /*
744 * Enable use of auto EEPROM access so we can avoid
745 * having to use the bitbang method.
746 */
747 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_AUTO_EEPROM);
748
749 /* Reset the EEPROM, load the clock period. */
750 CSR_WRITE_4(sc, BGE_EE_ADDR,
751 BGE_EEADDR_RESET | BGE_EEHALFCLK(BGE_HALFCLK_384SCL));
752 DELAY(20);
753
754 /* Issue the read EEPROM command. */
755 CSR_WRITE_4(sc, BGE_EE_ADDR, BGE_EE_READCMD | addr);
756
757 /* Wait for completion */
758 for(i = 0; i < BGE_TIMEOUT * 10; i++) {
759 DELAY(10);
760 if (CSR_READ_4(sc, BGE_EE_ADDR) & BGE_EEADDR_DONE)
761 break;
762 }
763
764 if (i == BGE_TIMEOUT * 10) {
765 device_printf(sc->bge_dev, "EEPROM read timed out\n");
766 return (1);
767 }
768
769 /* Get result. */
770 byte = CSR_READ_4(sc, BGE_EE_DATA);
771
772 *dest = (byte >> ((addr % 4) * 8)) & 0xFF;
773
774 return (0);
775}
776
777/*
778 * Read a sequence of bytes from the EEPROM.
779 */
780static int
781bge_read_eeprom(struct bge_softc *sc, caddr_t dest, int off, int cnt)
782{
783 int i, error = 0;
784 uint8_t byte = 0;
785
786 for (i = 0; i < cnt; i++) {
787 error = bge_eeprom_getbyte(sc, off + i, &byte);
788 if (error)
789 break;
790 *(dest + i) = byte;
791 }
792
793 return (error ? 1 : 0);
794}
795
796static int
797bge_miibus_readreg(device_t dev, int phy, int reg)
798{
799 struct bge_softc *sc;
800 uint32_t val;
801 int i;
802
803 sc = device_get_softc(dev);
804
805 /* Clear the autopoll bit if set, otherwise may trigger PCI errors. */
806 if ((sc->bge_mi_mode & BGE_MIMODE_AUTOPOLL) != 0) {
807 CSR_WRITE_4(sc, BGE_MI_MODE,
808 sc->bge_mi_mode & ~BGE_MIMODE_AUTOPOLL);
809 DELAY(80);
810 }
811
812 CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_READ | BGE_MICOMM_BUSY |
813 BGE_MIPHY(phy) | BGE_MIREG(reg));
814
815 /* Poll for the PHY register access to complete. */
816 for (i = 0; i < BGE_TIMEOUT; i++) {
817 DELAY(10);
818 val = CSR_READ_4(sc, BGE_MI_COMM);
819 if ((val & BGE_MICOMM_BUSY) == 0) {
820 DELAY(5);
821 val = CSR_READ_4(sc, BGE_MI_COMM);
822 break;
823 }
824 }
825
826 if (i == BGE_TIMEOUT) {
827 device_printf(sc->bge_dev,
828 "PHY read timed out (phy %d, reg %d, val 0x%08x)\n",
829 phy, reg, val);
830 val = 0;
831 }
832
833 /* Restore the autopoll bit if necessary. */
834 if ((sc->bge_mi_mode & BGE_MIMODE_AUTOPOLL) != 0) {
835 CSR_WRITE_4(sc, BGE_MI_MODE, sc->bge_mi_mode);
836 DELAY(80);
837 }
838
839 if (val & BGE_MICOMM_READFAIL)
840 return (0);
841
842 return (val & 0xFFFF);
843}
844
845static int
846bge_miibus_writereg(device_t dev, int phy, int reg, int val)
847{
848 struct bge_softc *sc;
849 int i;
850
851 sc = device_get_softc(dev);
852
853 if (sc->bge_asicrev == BGE_ASICREV_BCM5906 &&
854 (reg == BRGPHY_MII_1000CTL || reg == BRGPHY_MII_AUXCTL))
855 return (0);
856
857 /* Clear the autopoll bit if set, otherwise may trigger PCI errors. */
858 if ((sc->bge_mi_mode & BGE_MIMODE_AUTOPOLL) != 0) {
859 CSR_WRITE_4(sc, BGE_MI_MODE,
860 sc->bge_mi_mode & ~BGE_MIMODE_AUTOPOLL);
861 DELAY(80);
862 }
863
864 CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_WRITE | BGE_MICOMM_BUSY |
865 BGE_MIPHY(phy) | BGE_MIREG(reg) | val);
866
867 for (i = 0; i < BGE_TIMEOUT; i++) {
868 DELAY(10);
869 if (!(CSR_READ_4(sc, BGE_MI_COMM) & BGE_MICOMM_BUSY)) {
870 DELAY(5);
871 CSR_READ_4(sc, BGE_MI_COMM); /* dummy read */
872 break;
873 }
874 }
875
876 /* Restore the autopoll bit if necessary. */
877 if ((sc->bge_mi_mode & BGE_MIMODE_AUTOPOLL) != 0) {
878 CSR_WRITE_4(sc, BGE_MI_MODE, sc->bge_mi_mode);
879 DELAY(80);
880 }
881
882 if (i == BGE_TIMEOUT)
883 device_printf(sc->bge_dev,
884 "PHY write timed out (phy %d, reg %d, val %d)\n",
885 phy, reg, val);
886
887 return (0);
888}
889
890static void
891bge_miibus_statchg(device_t dev)
892{
893 struct bge_softc *sc;
894 struct mii_data *mii;
895 sc = device_get_softc(dev);
896 mii = device_get_softc(sc->bge_miibus);
897
898 if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) ==
899 (IFM_ACTIVE | IFM_AVALID)) {
900 switch (IFM_SUBTYPE(mii->mii_media_active)) {
901 case IFM_10_T:
902 case IFM_100_TX:
903 sc->bge_link = 1;
904 break;
905 case IFM_1000_T:
906 case IFM_1000_SX:
907 case IFM_2500_SX:
908 if (sc->bge_asicrev != BGE_ASICREV_BCM5906)
909 sc->bge_link = 1;
910 else
911 sc->bge_link = 0;
912 break;
913 default:
914 sc->bge_link = 0;
915 break;
916 }
917 } else
918 sc->bge_link = 0;
919 if (sc->bge_link == 0)
920 return;
921 BGE_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_PORTMODE);
922 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T ||
923 IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_SX)
924 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_PORTMODE_GMII);
925 else
926 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_PORTMODE_MII);
927
928 if (IFM_OPTIONS(mii->mii_media_active & IFM_FDX) != 0) {
929 BGE_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX);
930 if ((IFM_OPTIONS(mii->mii_media_active) &
931 IFM_ETH_TXPAUSE) != 0)
932 BGE_SETBIT(sc, BGE_TX_MODE, BGE_TXMODE_FLOWCTL_ENABLE);
933 else
934 BGE_CLRBIT(sc, BGE_TX_MODE, BGE_TXMODE_FLOWCTL_ENABLE);
935 if ((IFM_OPTIONS(mii->mii_media_active) &
936 IFM_ETH_RXPAUSE) != 0)
937 BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_FLOWCTL_ENABLE);
938 else
939 BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_FLOWCTL_ENABLE);
940 } else {
941 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX);
942 BGE_CLRBIT(sc, BGE_TX_MODE, BGE_TXMODE_FLOWCTL_ENABLE);
943 BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_FLOWCTL_ENABLE);
944 }
945}
946
947/*
948 * Intialize a standard receive ring descriptor.
949 */
950static int
951bge_newbuf_std(struct bge_softc *sc, int i)
952{
953 struct mbuf *m;
954 struct bge_rx_bd *r;
955 bus_dma_segment_t segs[1];
956 bus_dmamap_t map;
957 int error, nsegs;
958
959 if (sc->bge_flags & BGE_FLAG_JUMBO_STD &&
960 (sc->bge_ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN +
961 ETHER_VLAN_ENCAP_LEN > (MCLBYTES - ETHER_ALIGN))) {
962 m = m_getjcl(M_DONTWAIT, MT_DATA, M_PKTHDR, MJUM9BYTES);
963 if (m == NULL)
964 return (ENOBUFS);
965 m->m_len = m->m_pkthdr.len = MJUM9BYTES;
966 } else {
967 m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
968 if (m == NULL)
969 return (ENOBUFS);
970 m->m_len = m->m_pkthdr.len = MCLBYTES;
971 }
972 if ((sc->bge_flags & BGE_FLAG_RX_ALIGNBUG) == 0)
973 m_adj(m, ETHER_ALIGN);
974
975 error = bus_dmamap_load_mbuf_sg(sc->bge_cdata.bge_rx_mtag,
976 sc->bge_cdata.bge_rx_std_sparemap, m, segs, &nsegs, 0);
977 if (error != 0) {
978 m_freem(m);
979 return (error);
980 }
981 if (sc->bge_cdata.bge_rx_std_chain[i] != NULL) {
982 bus_dmamap_sync(sc->bge_cdata.bge_rx_mtag,
983 sc->bge_cdata.bge_rx_std_dmamap[i], BUS_DMASYNC_POSTREAD);
984 bus_dmamap_unload(sc->bge_cdata.bge_rx_mtag,
985 sc->bge_cdata.bge_rx_std_dmamap[i]);
986 }
987 map = sc->bge_cdata.bge_rx_std_dmamap[i];
988 sc->bge_cdata.bge_rx_std_dmamap[i] = sc->bge_cdata.bge_rx_std_sparemap;
989 sc->bge_cdata.bge_rx_std_sparemap = map;
990 sc->bge_cdata.bge_rx_std_chain[i] = m;
991 sc->bge_cdata.bge_rx_std_seglen[i] = segs[0].ds_len;
992 r = &sc->bge_ldata.bge_rx_std_ring[sc->bge_std];
993 r->bge_addr.bge_addr_lo = BGE_ADDR_LO(segs[0].ds_addr);
994 r->bge_addr.bge_addr_hi = BGE_ADDR_HI(segs[0].ds_addr);
995 r->bge_flags = BGE_RXBDFLAG_END;
996 r->bge_len = segs[0].ds_len;
997 r->bge_idx = i;
998
999 bus_dmamap_sync(sc->bge_cdata.bge_rx_mtag,
1000 sc->bge_cdata.bge_rx_std_dmamap[i], BUS_DMASYNC_PREREAD);
1001
1002 return (0);
1003}
1004
1005/*
1006 * Initialize a jumbo receive ring descriptor. This allocates
1007 * a jumbo buffer from the pool managed internally by the driver.
1008 */
1009static int
1010bge_newbuf_jumbo(struct bge_softc *sc, int i)
1011{
1012 bus_dma_segment_t segs[BGE_NSEG_JUMBO];
1013 bus_dmamap_t map;
1014 struct bge_extrx_bd *r;
1015 struct mbuf *m;
1016 int error, nsegs;
1017
1018 MGETHDR(m, M_DONTWAIT, MT_DATA);
1019 if (m == NULL)
1020 return (ENOBUFS);
1021
1022 m_cljget(m, M_DONTWAIT, MJUM9BYTES);
1023 if (!(m->m_flags & M_EXT)) {
1024 m_freem(m);
1025 return (ENOBUFS);
1026 }
1027 m->m_len = m->m_pkthdr.len = MJUM9BYTES;
1028 if ((sc->bge_flags & BGE_FLAG_RX_ALIGNBUG) == 0)
1029 m_adj(m, ETHER_ALIGN);
1030
1031 error = bus_dmamap_load_mbuf_sg(sc->bge_cdata.bge_mtag_jumbo,
1032 sc->bge_cdata.bge_rx_jumbo_sparemap, m, segs, &nsegs, 0);
1033 if (error != 0) {
1034 m_freem(m);
1035 return (error);
1036 }
1037
1038 if (sc->bge_cdata.bge_rx_jumbo_chain[i] != NULL) {
1039 bus_dmamap_sync(sc->bge_cdata.bge_mtag_jumbo,
1040 sc->bge_cdata.bge_rx_jumbo_dmamap[i], BUS_DMASYNC_POSTREAD);
1041 bus_dmamap_unload(sc->bge_cdata.bge_mtag_jumbo,
1042 sc->bge_cdata.bge_rx_jumbo_dmamap[i]);
1043 }
1044 map = sc->bge_cdata.bge_rx_jumbo_dmamap[i];
1045 sc->bge_cdata.bge_rx_jumbo_dmamap[i] =
1046 sc->bge_cdata.bge_rx_jumbo_sparemap;
1047 sc->bge_cdata.bge_rx_jumbo_sparemap = map;
1048 sc->bge_cdata.bge_rx_jumbo_chain[i] = m;
1049 sc->bge_cdata.bge_rx_jumbo_seglen[i][0] = 0;
1050 sc->bge_cdata.bge_rx_jumbo_seglen[i][1] = 0;
1051 sc->bge_cdata.bge_rx_jumbo_seglen[i][2] = 0;
1052 sc->bge_cdata.bge_rx_jumbo_seglen[i][3] = 0;
1053
1054 /*
1055 * Fill in the extended RX buffer descriptor.
1056 */
1057 r = &sc->bge_ldata.bge_rx_jumbo_ring[sc->bge_jumbo];
1058 r->bge_flags = BGE_RXBDFLAG_JUMBO_RING | BGE_RXBDFLAG_END;
1059 r->bge_idx = i;
1060 r->bge_len3 = r->bge_len2 = r->bge_len1 = 0;
1061 switch (nsegs) {
1062 case 4:
1063 r->bge_addr3.bge_addr_lo = BGE_ADDR_LO(segs[3].ds_addr);
1064 r->bge_addr3.bge_addr_hi = BGE_ADDR_HI(segs[3].ds_addr);
1065 r->bge_len3 = segs[3].ds_len;
1066 sc->bge_cdata.bge_rx_jumbo_seglen[i][3] = segs[3].ds_len;
1067 case 3:
1068 r->bge_addr2.bge_addr_lo = BGE_ADDR_LO(segs[2].ds_addr);
1069 r->bge_addr2.bge_addr_hi = BGE_ADDR_HI(segs[2].ds_addr);
1070 r->bge_len2 = segs[2].ds_len;
1071 sc->bge_cdata.bge_rx_jumbo_seglen[i][2] = segs[2].ds_len;
1072 case 2:
1073 r->bge_addr1.bge_addr_lo = BGE_ADDR_LO(segs[1].ds_addr);
1074 r->bge_addr1.bge_addr_hi = BGE_ADDR_HI(segs[1].ds_addr);
1075 r->bge_len1 = segs[1].ds_len;
1076 sc->bge_cdata.bge_rx_jumbo_seglen[i][1] = segs[1].ds_len;
1077 case 1:
1078 r->bge_addr0.bge_addr_lo = BGE_ADDR_LO(segs[0].ds_addr);
1079 r->bge_addr0.bge_addr_hi = BGE_ADDR_HI(segs[0].ds_addr);
1080 r->bge_len0 = segs[0].ds_len;
1081 sc->bge_cdata.bge_rx_jumbo_seglen[i][0] = segs[0].ds_len;
1082 break;
1083 default:
1084 panic("%s: %d segments\n", __func__, nsegs);
1085 }
1086
1087 bus_dmamap_sync(sc->bge_cdata.bge_mtag_jumbo,
1088 sc->bge_cdata.bge_rx_jumbo_dmamap[i], BUS_DMASYNC_PREREAD);
1089
1090 return (0);
1091}
1092
1093static int
1094bge_init_rx_ring_std(struct bge_softc *sc)
1095{
1096 int error, i;
1097
1098 bzero(sc->bge_ldata.bge_rx_std_ring, BGE_STD_RX_RING_SZ);
1099 sc->bge_std = 0;
1100 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
1101 if ((error = bge_newbuf_std(sc, i)) != 0)
1102 return (error);
1103 BGE_INC(sc->bge_std, BGE_STD_RX_RING_CNT);
1104 }
1105
1106 bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag,
1107 sc->bge_cdata.bge_rx_std_ring_map, BUS_DMASYNC_PREWRITE);
1108
1109 sc->bge_std = 0;
1110 bge_writembx(sc, BGE_MBX_RX_STD_PROD_LO, BGE_STD_RX_RING_CNT - 1);
1111
1112 return (0);
1113}
1114
1115static void
1116bge_free_rx_ring_std(struct bge_softc *sc)
1117{
1118 int i;
1119
1120 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
1121 if (sc->bge_cdata.bge_rx_std_chain[i] != NULL) {
1122 bus_dmamap_sync(sc->bge_cdata.bge_rx_mtag,
1123 sc->bge_cdata.bge_rx_std_dmamap[i],
1124 BUS_DMASYNC_POSTREAD);
1125 bus_dmamap_unload(sc->bge_cdata.bge_rx_mtag,
1126 sc->bge_cdata.bge_rx_std_dmamap[i]);
1127 m_freem(sc->bge_cdata.bge_rx_std_chain[i]);
1128 sc->bge_cdata.bge_rx_std_chain[i] = NULL;
1129 }
1130 bzero((char *)&sc->bge_ldata.bge_rx_std_ring[i],
1131 sizeof(struct bge_rx_bd));
1132 }
1133}
1134
1135static int
1136bge_init_rx_ring_jumbo(struct bge_softc *sc)
1137{
1138 struct bge_rcb *rcb;
1139 int error, i;
1140
1141 bzero(sc->bge_ldata.bge_rx_jumbo_ring, BGE_JUMBO_RX_RING_SZ);
1142 sc->bge_jumbo = 0;
1143 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
1144 if ((error = bge_newbuf_jumbo(sc, i)) != 0)
1145 return (error);
1146 BGE_INC(sc->bge_jumbo, BGE_JUMBO_RX_RING_CNT);
1147 }
1148
1149 bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag,
1150 sc->bge_cdata.bge_rx_jumbo_ring_map, BUS_DMASYNC_PREWRITE);
1151
1152 sc->bge_jumbo = 0;
1153
1154 /* Enable the jumbo receive producer ring. */
1155 rcb = &sc->bge_ldata.bge_info.bge_jumbo_rx_rcb;
1156 rcb->bge_maxlen_flags =
1157 BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_USE_EXT_RX_BD);
1158 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags);
1159
1160 bge_writembx(sc, BGE_MBX_RX_JUMBO_PROD_LO, BGE_JUMBO_RX_RING_CNT - 1);
1161
1162 return (0);
1163}
1164
1165static void
1166bge_free_rx_ring_jumbo(struct bge_softc *sc)
1167{
1168 int i;
1169
1170 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
1171 if (sc->bge_cdata.bge_rx_jumbo_chain[i] != NULL) {
1172 bus_dmamap_sync(sc->bge_cdata.bge_mtag_jumbo,
1173 sc->bge_cdata.bge_rx_jumbo_dmamap[i],
1174 BUS_DMASYNC_POSTREAD);
1175 bus_dmamap_unload(sc->bge_cdata.bge_mtag_jumbo,
1176 sc->bge_cdata.bge_rx_jumbo_dmamap[i]);
1177 m_freem(sc->bge_cdata.bge_rx_jumbo_chain[i]);
1178 sc->bge_cdata.bge_rx_jumbo_chain[i] = NULL;
1179 }
1180 bzero((char *)&sc->bge_ldata.bge_rx_jumbo_ring[i],
1181 sizeof(struct bge_extrx_bd));
1182 }
1183}
1184
1185static void
1186bge_free_tx_ring(struct bge_softc *sc)
1187{
1188 int i;
1189
1190 if (sc->bge_ldata.bge_tx_ring == NULL)
1191 return;
1192
1193 for (i = 0; i < BGE_TX_RING_CNT; i++) {
1194 if (sc->bge_cdata.bge_tx_chain[i] != NULL) {
1195 bus_dmamap_sync(sc->bge_cdata.bge_tx_mtag,
1196 sc->bge_cdata.bge_tx_dmamap[i],
1197 BUS_DMASYNC_POSTWRITE);
1198 bus_dmamap_unload(sc->bge_cdata.bge_tx_mtag,
1199 sc->bge_cdata.bge_tx_dmamap[i]);
1200 m_freem(sc->bge_cdata.bge_tx_chain[i]);
1201 sc->bge_cdata.bge_tx_chain[i] = NULL;
1202 }
1203 bzero((char *)&sc->bge_ldata.bge_tx_ring[i],
1204 sizeof(struct bge_tx_bd));
1205 }
1206}
1207
1208static int
1209bge_init_tx_ring(struct bge_softc *sc)
1210{
1211 sc->bge_txcnt = 0;
1212 sc->bge_tx_saved_considx = 0;
1213
1214 bzero(sc->bge_ldata.bge_tx_ring, BGE_TX_RING_SZ);
1215 bus_dmamap_sync(sc->bge_cdata.bge_tx_ring_tag,
1216 sc->bge_cdata.bge_tx_ring_map, BUS_DMASYNC_PREWRITE);
1217
1218 /* Initialize transmit producer index for host-memory send ring. */
1219 sc->bge_tx_prodidx = 0;
1220 bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, sc->bge_tx_prodidx);
1221
1222 /* 5700 b2 errata */
1223 if (sc->bge_chiprev == BGE_CHIPREV_5700_BX)
1224 bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, sc->bge_tx_prodidx);
1225
1226 /* NIC-memory send ring not used; initialize to zero. */
1227 bge_writembx(sc, BGE_MBX_TX_NIC_PROD0_LO, 0);
1228 /* 5700 b2 errata */
1229 if (sc->bge_chiprev == BGE_CHIPREV_5700_BX)
1230 bge_writembx(sc, BGE_MBX_TX_NIC_PROD0_LO, 0);
1231
1232 return (0);
1233}
1234
1235static void
1236bge_setpromisc(struct bge_softc *sc)
1237{
1238 struct ifnet *ifp;
1239
1240 BGE_LOCK_ASSERT(sc);
1241
1242 ifp = sc->bge_ifp;
1243
1244 /* Enable or disable promiscuous mode as needed. */
1245 if (ifp->if_flags & IFF_PROMISC)
1246 BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC);
1247 else
1248 BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC);
1249}
1250
1251static void
1252bge_setmulti(struct bge_softc *sc)
1253{
1254 struct ifnet *ifp;
1255 struct ifmultiaddr *ifma;
1256 uint32_t hashes[4] = { 0, 0, 0, 0 };
1257 int h, i;
1258
1259 BGE_LOCK_ASSERT(sc);
1260
1261 ifp = sc->bge_ifp;
1262
1263 if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) {
1264 for (i = 0; i < 4; i++)
1265 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), 0xFFFFFFFF);
1266 return;
1267 }
1268
1269 /* First, zot all the existing filters. */
1270 for (i = 0; i < 4; i++)
1271 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), 0);
1272
1273 /* Now program new ones. */
1274 if_maddr_rlock(ifp);
1275 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1276 if (ifma->ifma_addr->sa_family != AF_LINK)
1277 continue;
1278 h = ether_crc32_le(LLADDR((struct sockaddr_dl *)
1279 ifma->ifma_addr), ETHER_ADDR_LEN) & 0x7F;
1280 hashes[(h & 0x60) >> 5] |= 1 << (h & 0x1F);
1281 }
1282 if_maddr_runlock(ifp);
1283
1284 for (i = 0; i < 4; i++)
1285 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), hashes[i]);
1286}
1287
1288static void
1289bge_setvlan(struct bge_softc *sc)
1290{
1291 struct ifnet *ifp;
1292
1293 BGE_LOCK_ASSERT(sc);
1294
1295 ifp = sc->bge_ifp;
1296
1297 /* Enable or disable VLAN tag stripping as needed. */
1298 if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING)
1299 BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_KEEP_VLAN_DIAG);
1300 else
1301 BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_KEEP_VLAN_DIAG);
1302}
1303
1304static void
1305bge_sig_pre_reset(struct bge_softc *sc, int type)
1306{
1307
1308 /*
1309 * Some chips don't like this so only do this if ASF is enabled
1310 */
1311 if (sc->bge_asf_mode)
1312 bge_writemem_ind(sc, BGE_SRAM_FW_MB, BGE_SRAM_FW_MB_MAGIC);
1313
1314 if (sc->bge_asf_mode & ASF_NEW_HANDSHAKE) {
1315 switch (type) {
1316 case BGE_RESET_START:
1317 bge_writemem_ind(sc, BGE_SRAM_FW_DRV_STATE_MB,
1318 BGE_FW_DRV_STATE_START);
1319 break;
1320 case BGE_RESET_STOP:
1321 bge_writemem_ind(sc, BGE_SRAM_FW_DRV_STATE_MB,
1322 BGE_FW_DRV_STATE_UNLOAD);
1323 break;
1324 }
1325 }
1326}
1327
1328static void
1329bge_sig_post_reset(struct bge_softc *sc, int type)
1330{
1331
1332 if (sc->bge_asf_mode & ASF_NEW_HANDSHAKE) {
1333 switch (type) {
1334 case BGE_RESET_START:
1335 bge_writemem_ind(sc, BGE_SRAM_FW_DRV_STATE_MB,
1336 BGE_FW_DRV_STATE_START_DONE);
1337 /* START DONE */
1338 break;
1339 case BGE_RESET_STOP:
1340 bge_writemem_ind(sc, BGE_SRAM_FW_DRV_STATE_MB,
1341 BGE_FW_DRV_STATE_UNLOAD_DONE);
1342 break;
1343 }
1344 }
1345}
1346
1347static void
1348bge_sig_legacy(struct bge_softc *sc, int type)
1349{
1350
1351 if (sc->bge_asf_mode) {
1352 switch (type) {
1353 case BGE_RESET_START:
1354 bge_writemem_ind(sc, BGE_SRAM_FW_DRV_STATE_MB,
1355 BGE_FW_DRV_STATE_START);
1356 break;
1357 case BGE_RESET_STOP:
1358 bge_writemem_ind(sc, BGE_SRAM_FW_DRV_STATE_MB,
1359 BGE_FW_DRV_STATE_UNLOAD);
1360 break;
1361 }
1362 }
1363}
1364
1365static void
1366bge_stop_fw(struct bge_softc *sc)
1367{
1368 int i;
1369
1370 if (sc->bge_asf_mode) {
1371 bge_writemem_ind(sc, BGE_SRAM_FW_CMD_MB, BGE_FW_CMD_PAUSE);
1372 CSR_WRITE_4(sc, BGE_RX_CPU_EVENT,
1373 CSR_READ_4(sc, BGE_RX_CPU_EVENT) | BGE_RX_CPU_DRV_EVENT);
1374
1375 for (i = 0; i < 100; i++ ) {
1376 if (!(CSR_READ_4(sc, BGE_RX_CPU_EVENT) &
1377 BGE_RX_CPU_DRV_EVENT))
1378 break;
1379 DELAY(10);
1380 }
1381 }
1382}
1383
1384/*
1385 * Do endian, PCI and DMA initialization.
1386 */
1387static int
1388bge_chipinit(struct bge_softc *sc)
1389{
1390 uint32_t dma_rw_ctl, misc_ctl;
1391 uint16_t val;
1392 int i;
1393
1394 /* Set endianness before we access any non-PCI registers. */
1395 misc_ctl = BGE_INIT;
1396 if (sc->bge_flags & BGE_FLAG_TAGGED_STATUS)
1397 misc_ctl |= BGE_PCIMISCCTL_TAGGED_STATUS;
1398 pci_write_config(sc->bge_dev, BGE_PCI_MISC_CTL, misc_ctl, 4);
1399
1400 /* Clear the MAC control register */
1401 CSR_WRITE_4(sc, BGE_MAC_MODE, 0);
1402
1403 /*
1404 * Clear the MAC statistics block in the NIC's
1405 * internal memory.
1406 */
1407 for (i = BGE_STATS_BLOCK;
1408 i < BGE_STATS_BLOCK_END + 1; i += sizeof(uint32_t))
1409 BGE_MEMWIN_WRITE(sc, i, 0);
1410
1411 for (i = BGE_STATUS_BLOCK;
1412 i < BGE_STATUS_BLOCK_END + 1; i += sizeof(uint32_t))
1413 BGE_MEMWIN_WRITE(sc, i, 0);
1414
1415 if (sc->bge_chiprev == BGE_CHIPREV_5704_BX) {
1416 /*
1417 * Fix data corruption caused by non-qword write with WB.
1418 * Fix master abort in PCI mode.
1419 * Fix PCI latency timer.
1420 */
1421 val = pci_read_config(sc->bge_dev, BGE_PCI_MSI_DATA + 2, 2);
1422 val |= (1 << 10) | (1 << 12) | (1 << 13);
1423 pci_write_config(sc->bge_dev, BGE_PCI_MSI_DATA + 2, val, 2);
1424 }
1425
1426 /*
1427 * Set up the PCI DMA control register.
1428 */
1429 dma_rw_ctl = BGE_PCIDMARWCTL_RD_CMD_SHIFT(6) |
1430 BGE_PCIDMARWCTL_WR_CMD_SHIFT(7);
1431 if (sc->bge_flags & BGE_FLAG_PCIE) {
1432 /* Read watermark not used, 128 bytes for write. */
1433 dma_rw_ctl |= BGE_PCIDMARWCTL_WR_WAT_SHIFT(3);
1434 } else if (sc->bge_flags & BGE_FLAG_PCIX) {
1435 if (BGE_IS_5714_FAMILY(sc)) {
1436 /* 256 bytes for read and write. */
1437 dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(2) |
1438 BGE_PCIDMARWCTL_WR_WAT_SHIFT(2);
1439 dma_rw_ctl |= (sc->bge_asicrev == BGE_ASICREV_BCM5780) ?
1440 BGE_PCIDMARWCTL_ONEDMA_ATONCE_GLOBAL :
1441 BGE_PCIDMARWCTL_ONEDMA_ATONCE_LOCAL;
1442 } else if (sc->bge_asicrev == BGE_ASICREV_BCM5703) {
1443 /*
1444 * In the BCM5703, the DMA read watermark should
1445 * be set to less than or equal to the maximum
1446 * memory read byte count of the PCI-X command
1447 * register.
1448 */
1449 dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(4) |
1450 BGE_PCIDMARWCTL_WR_WAT_SHIFT(3);
1451 } else if (sc->bge_asicrev == BGE_ASICREV_BCM5704) {
1452 /* 1536 bytes for read, 384 bytes for write. */
1453 dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(7) |
1454 BGE_PCIDMARWCTL_WR_WAT_SHIFT(3);
1455 } else {
1456 /* 384 bytes for read and write. */
1457 dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(3) |
1458 BGE_PCIDMARWCTL_WR_WAT_SHIFT(3) |
1459 0x0F;
1460 }
1461 if (sc->bge_asicrev == BGE_ASICREV_BCM5703 ||
1462 sc->bge_asicrev == BGE_ASICREV_BCM5704) {
1463 uint32_t tmp;
1464
1465 /* Set ONE_DMA_AT_ONCE for hardware workaround. */
1466 tmp = CSR_READ_4(sc, BGE_PCI_CLKCTL) & 0x1F;
1467 if (tmp == 6 || tmp == 7)
1468 dma_rw_ctl |=
1469 BGE_PCIDMARWCTL_ONEDMA_ATONCE_GLOBAL;
1470
1471 /* Set PCI-X DMA write workaround. */
1472 dma_rw_ctl |= BGE_PCIDMARWCTL_ASRT_ALL_BE;
1473 }
1474 } else {
1475 /* Conventional PCI bus: 256 bytes for read and write. */
1476 dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(7) |
1477 BGE_PCIDMARWCTL_WR_WAT_SHIFT(7);
1478
1479 if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
1480 sc->bge_asicrev != BGE_ASICREV_BCM5750)
1481 dma_rw_ctl |= 0x0F;
1482 }
1483 if (sc->bge_asicrev == BGE_ASICREV_BCM5700 ||
1484 sc->bge_asicrev == BGE_ASICREV_BCM5701)
1485 dma_rw_ctl |= BGE_PCIDMARWCTL_USE_MRM |
1486 BGE_PCIDMARWCTL_ASRT_ALL_BE;
1487 if (sc->bge_asicrev == BGE_ASICREV_BCM5703 ||
1488 sc->bge_asicrev == BGE_ASICREV_BCM5704)
1489 dma_rw_ctl &= ~BGE_PCIDMARWCTL_MINDMA;
1490 if (BGE_IS_5717_PLUS(sc)) {
1491 dma_rw_ctl &= ~BGE_PCIDMARWCTL_DIS_CACHE_ALIGNMENT;
1492 if (sc->bge_chipid == BGE_CHIPID_BCM57765_A0)
1493 dma_rw_ctl &= ~BGE_PCIDMARWCTL_CRDRDR_RDMA_MRRS_MSK;
1494 /*
1495 * Enable HW workaround for controllers that misinterpret
1496 * a status tag update and leave interrupts permanently
1497 * disabled.
1498 */
1499 if (sc->bge_asicrev != BGE_ASICREV_BCM5717 &&
1500 sc->bge_asicrev != BGE_ASICREV_BCM57765)
1501 dma_rw_ctl |= BGE_PCIDMARWCTL_TAGGED_STATUS_WA;
1502 }
1503 pci_write_config(sc->bge_dev, BGE_PCI_DMA_RW_CTL, dma_rw_ctl, 4);
1504
1505 /*
1506 * Set up general mode register.
1507 */
1508 CSR_WRITE_4(sc, BGE_MODE_CTL, BGE_DMA_SWAP_OPTIONS |
1509 BGE_MODECTL_MAC_ATTN_INTR | BGE_MODECTL_HOST_SEND_BDS |
1510 BGE_MODECTL_TX_NO_PHDR_CSUM);
1511
1512 /*
1513 * BCM5701 B5 have a bug causing data corruption when using
1514 * 64-bit DMA reads, which can be terminated early and then
1515 * completed later as 32-bit accesses, in combination with
1516 * certain bridges.
1517 */
1518 if (sc->bge_asicrev == BGE_ASICREV_BCM5701 &&
1519 sc->bge_chipid == BGE_CHIPID_BCM5701_B5)
1520 BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_FORCE_PCI32);
1521
1522 /*
1523 * Tell the firmware the driver is running
1524 */
1525 if (sc->bge_asf_mode & ASF_STACKUP)
1526 BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
1527
1528 /*
1529 * Disable memory write invalidate. Apparently it is not supported
1530 * properly by these devices. Also ensure that INTx isn't disabled,
1531 * as these chips need it even when using MSI.
1532 */
1533 PCI_CLRBIT(sc->bge_dev, BGE_PCI_CMD,
1534 PCIM_CMD_INTxDIS | PCIM_CMD_MWIEN, 4);
1535
1536 /* Set the timer prescaler (always 66Mhz) */
1537 CSR_WRITE_4(sc, BGE_MISC_CFG, BGE_32BITTIME_66MHZ);
1538
1539 /* XXX: The Linux tg3 driver does this at the start of brgphy_reset. */
1540 if (sc->bge_asicrev == BGE_ASICREV_BCM5906) {
1541 DELAY(40); /* XXX */
1542
1543 /* Put PHY into ready state */
1544 BGE_CLRBIT(sc, BGE_MISC_CFG, BGE_MISCCFG_EPHY_IDDQ);
1545 CSR_READ_4(sc, BGE_MISC_CFG); /* Flush */
1546 DELAY(40);
1547 }
1548
1549 return (0);
1550}
1551
1552static int
1553bge_blockinit(struct bge_softc *sc)
1554{
1555 struct bge_rcb *rcb;
1556 bus_size_t vrcb;
1557 bge_hostaddr taddr;
1558 uint32_t dmactl, val;
1559 int i, limit;
1560
1561 /*
1562 * Initialize the memory window pointer register so that
1563 * we can access the first 32K of internal NIC RAM. This will
1564 * allow us to set up the TX send ring RCBs and the RX return
1565 * ring RCBs, plus other things which live in NIC memory.
1566 */
1567 CSR_WRITE_4(sc, BGE_PCI_MEMWIN_BASEADDR, 0);
1568
1569 /* Note: the BCM5704 has a smaller mbuf space than other chips. */
1570
1571 if (!(BGE_IS_5705_PLUS(sc))) {
1572 /* Configure mbuf memory pool */
1573 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_BASEADDR, BGE_BUFFPOOL_1);
1574 if (sc->bge_asicrev == BGE_ASICREV_BCM5704)
1575 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x10000);
1576 else
1577 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x18000);
1578
1579 /* Configure DMA resource pool */
1580 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_BASEADDR,
1581 BGE_DMA_DESCRIPTORS);
1582 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LEN, 0x2000);
1583 }
1584
1585 /* Configure mbuf pool watermarks */
1586 if (sc->bge_asicrev == BGE_ASICREV_BCM5717 ||
1587 sc->bge_asicrev == BGE_ASICREV_BCM57765) {
1588 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0);
1589 if (sc->bge_ifp->if_mtu > ETHERMTU) {
1590 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x7e);
1591 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0xea);
1592 } else {
1593 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x2a);
1594 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0xa0);
1595 }
1596 } else if (!BGE_IS_5705_PLUS(sc)) {
1597 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x50);
1598 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x20);
1599 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x60);
1600 } else if (sc->bge_asicrev == BGE_ASICREV_BCM5906) {
1601 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0);
1602 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x04);
1603 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x10);
1604 } else {
1605 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0);
1606 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x10);
1607 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x60);
1608 }
1609
1610 /* Configure DMA resource watermarks */
1611 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LOWAT, 5);
1612 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_HIWAT, 10);
1613
1614 /* Enable buffer manager */
1615 val = BGE_BMANMODE_ENABLE | BGE_BMANMODE_LOMBUF_ATTN;
1616 /*
1617 * Change the arbitration algorithm of TXMBUF read request to
1618 * round-robin instead of priority based for BCM5719. When
1619 * TXFIFO is almost empty, RDMA will hold its request until
1620 * TXFIFO is not almost empty.
1621 */
1622 if (sc->bge_asicrev == BGE_ASICREV_BCM5719)
1623 val |= BGE_BMANMODE_NO_TX_UNDERRUN;
1624 CSR_WRITE_4(sc, BGE_BMAN_MODE, val);
1625
1626 /* Poll for buffer manager start indication */
1627 for (i = 0; i < BGE_TIMEOUT; i++) {
1628 DELAY(10);
1629 if (CSR_READ_4(sc, BGE_BMAN_MODE) & BGE_BMANMODE_ENABLE)
1630 break;
1631 }
1632
1633 if (i == BGE_TIMEOUT) {
1634 device_printf(sc->bge_dev, "buffer manager failed to start\n");
1635 return (ENXIO);
1636 }
1637
1638 /* Enable flow-through queues */
1639 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF);
1640 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0);
1641
1642 /* Wait until queue initialization is complete */
1643 for (i = 0; i < BGE_TIMEOUT; i++) {
1644 DELAY(10);
1645 if (CSR_READ_4(sc, BGE_FTQ_RESET) == 0)
1646 break;
1647 }
1648
1649 if (i == BGE_TIMEOUT) {
1650 device_printf(sc->bge_dev, "flow-through queue init failed\n");
1651 return (ENXIO);
1652 }
1653
1654 /*
1655 * Summary of rings supported by the controller:
1656 *
1657 * Standard Receive Producer Ring
1658 * - This ring is used to feed receive buffers for "standard"
1659 * sized frames (typically 1536 bytes) to the controller.
1660 *
1661 * Jumbo Receive Producer Ring
1662 * - This ring is used to feed receive buffers for jumbo sized
1663 * frames (i.e. anything bigger than the "standard" frames)
1664 * to the controller.
1665 *
1666 * Mini Receive Producer Ring
1667 * - This ring is used to feed receive buffers for "mini"
1668 * sized frames to the controller.
1669 * - This feature required external memory for the controller
1670 * but was never used in a production system. Should always
1671 * be disabled.
1672 *
1673 * Receive Return Ring
1674 * - After the controller has placed an incoming frame into a
1675 * receive buffer that buffer is moved into a receive return
1676 * ring. The driver is then responsible to passing the
1677 * buffer up to the stack. Many versions of the controller
1678 * support multiple RR rings.
1679 *
1680 * Send Ring
1681 * - This ring is used for outgoing frames. Many versions of
1682 * the controller support multiple send rings.
1683 */
1684
1685 /* Initialize the standard receive producer ring control block. */
1686 rcb = &sc->bge_ldata.bge_info.bge_std_rx_rcb;
1687 rcb->bge_hostaddr.bge_addr_lo =
1688 BGE_ADDR_LO(sc->bge_ldata.bge_rx_std_ring_paddr);
1689 rcb->bge_hostaddr.bge_addr_hi =
1690 BGE_ADDR_HI(sc->bge_ldata.bge_rx_std_ring_paddr);
1691 bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag,
1692 sc->bge_cdata.bge_rx_std_ring_map, BUS_DMASYNC_PREREAD);
1693 if (BGE_IS_5717_PLUS(sc)) {
1694 /*
1695 * Bits 31-16: Programmable ring size (2048, 1024, 512, .., 32)
1696 * Bits 15-2 : Maximum RX frame size
1697 * Bit 1 : 1 = Ring Disabled, 0 = Ring ENabled
1698 * Bit 0 : Reserved
1699 */
1700 rcb->bge_maxlen_flags =
1701 BGE_RCB_MAXLEN_FLAGS(512, BGE_MAX_FRAMELEN << 2);
1702 } else if (BGE_IS_5705_PLUS(sc)) {
1703 /*
1704 * Bits 31-16: Programmable ring size (512, 256, 128, 64, 32)
1705 * Bits 15-2 : Reserved (should be 0)
1706 * Bit 1 : 1 = Ring Disabled, 0 = Ring Enabled
1707 * Bit 0 : Reserved
1708 */
1709 rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(512, 0);
1710 } else {
1711 /*
1712 * Ring size is always XXX entries
1713 * Bits 31-16: Maximum RX frame size
1714 * Bits 15-2 : Reserved (should be 0)
1715 * Bit 1 : 1 = Ring Disabled, 0 = Ring Enabled
1716 * Bit 0 : Reserved
1717 */
1718 rcb->bge_maxlen_flags =
1719 BGE_RCB_MAXLEN_FLAGS(BGE_MAX_FRAMELEN, 0);
1720 }
1721 if (sc->bge_asicrev == BGE_ASICREV_BCM5717 ||
1722 sc->bge_asicrev == BGE_ASICREV_BCM5719)
1723 rcb->bge_nicaddr = BGE_STD_RX_RINGS_5717;
1724 else
1725 rcb->bge_nicaddr = BGE_STD_RX_RINGS;
1726 /* Write the standard receive producer ring control block. */
1727 CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_HI, rcb->bge_hostaddr.bge_addr_hi);
1728 CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_LO, rcb->bge_hostaddr.bge_addr_lo);
1729 CSR_WRITE_4(sc, BGE_RX_STD_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags);
1730 CSR_WRITE_4(sc, BGE_RX_STD_RCB_NICADDR, rcb->bge_nicaddr);
1731
1732 /* Reset the standard receive producer ring producer index. */
1733 bge_writembx(sc, BGE_MBX_RX_STD_PROD_LO, 0);
1734
1735 /*
1736 * Initialize the jumbo RX producer ring control
1737 * block. We set the 'ring disabled' bit in the
1738 * flags field until we're actually ready to start
1739 * using this ring (i.e. once we set the MTU
1740 * high enough to require it).
1741 */
1742 if (BGE_IS_JUMBO_CAPABLE(sc)) {
1743 rcb = &sc->bge_ldata.bge_info.bge_jumbo_rx_rcb;
1744 /* Get the jumbo receive producer ring RCB parameters. */
1745 rcb->bge_hostaddr.bge_addr_lo =
1746 BGE_ADDR_LO(sc->bge_ldata.bge_rx_jumbo_ring_paddr);
1747 rcb->bge_hostaddr.bge_addr_hi =
1748 BGE_ADDR_HI(sc->bge_ldata.bge_rx_jumbo_ring_paddr);
1749 bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag,
1750 sc->bge_cdata.bge_rx_jumbo_ring_map,
1751 BUS_DMASYNC_PREREAD);
1752 rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(0,
1753 BGE_RCB_FLAG_USE_EXT_RX_BD | BGE_RCB_FLAG_RING_DISABLED);
1754 if (sc->bge_asicrev == BGE_ASICREV_BCM5717 ||
1755 sc->bge_asicrev == BGE_ASICREV_BCM5719)
1756 rcb->bge_nicaddr = BGE_JUMBO_RX_RINGS_5717;
1757 else
1758 rcb->bge_nicaddr = BGE_JUMBO_RX_RINGS;
1759 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_HI,
1760 rcb->bge_hostaddr.bge_addr_hi);
1761 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_LO,
1762 rcb->bge_hostaddr.bge_addr_lo);
1763 /* Program the jumbo receive producer ring RCB parameters. */
1764 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS,
1765 rcb->bge_maxlen_flags);
1766 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_NICADDR, rcb->bge_nicaddr);
1767 /* Reset the jumbo receive producer ring producer index. */
1768 bge_writembx(sc, BGE_MBX_RX_JUMBO_PROD_LO, 0);
1769 }
1770
1771 /* Disable the mini receive producer ring RCB. */
1772 if (BGE_IS_5700_FAMILY(sc)) {
1773 rcb = &sc->bge_ldata.bge_info.bge_mini_rx_rcb;
1774 rcb->bge_maxlen_flags =
1775 BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED);
1776 CSR_WRITE_4(sc, BGE_RX_MINI_RCB_MAXLEN_FLAGS,
1777 rcb->bge_maxlen_flags);
1778 /* Reset the mini receive producer ring producer index. */
1779 bge_writembx(sc, BGE_MBX_RX_MINI_PROD_LO, 0);
1780 }
1781
1782 /* Choose de-pipeline mode for BCM5906 A0, A1 and A2. */
1783 if (sc->bge_asicrev == BGE_ASICREV_BCM5906) {
1784 if (sc->bge_chipid == BGE_CHIPID_BCM5906_A0 ||
1785 sc->bge_chipid == BGE_CHIPID_BCM5906_A1 ||
1786 sc->bge_chipid == BGE_CHIPID_BCM5906_A2)
1787 CSR_WRITE_4(sc, BGE_ISO_PKT_TX,
1788 (CSR_READ_4(sc, BGE_ISO_PKT_TX) & ~3) | 2);
1789 }
1790 /*
1791 * The BD ring replenish thresholds control how often the
1792 * hardware fetches new BD's from the producer rings in host
1793 * memory. Setting the value too low on a busy system can
1794 * starve the hardware and recue the throughpout.
1795 *
1796 * Set the BD ring replentish thresholds. The recommended
1797 * values are 1/8th the number of descriptors allocated to
1798 * each ring.
1799 * XXX The 5754 requires a lower threshold, so it might be a
1800 * requirement of all 575x family chips. The Linux driver sets
1801 * the lower threshold for all 5705 family chips as well, but there
1802 * are reports that it might not need to be so strict.
1803 *
1804 * XXX Linux does some extra fiddling here for the 5906 parts as
1805 * well.
1806 */
1807 if (BGE_IS_5705_PLUS(sc))
1808 val = 8;
1809 else
1810 val = BGE_STD_RX_RING_CNT / 8;
1811 CSR_WRITE_4(sc, BGE_RBDI_STD_REPL_THRESH, val);
1812 if (BGE_IS_JUMBO_CAPABLE(sc))
1813 CSR_WRITE_4(sc, BGE_RBDI_JUMBO_REPL_THRESH,
1814 BGE_JUMBO_RX_RING_CNT/8);
1815 if (BGE_IS_5717_PLUS(sc)) {
1816 CSR_WRITE_4(sc, BGE_STD_REPLENISH_LWM, 32);
1817 CSR_WRITE_4(sc, BGE_JMB_REPLENISH_LWM, 16);
1818 }
1819
1820 /*
1821 * Disable all send rings by setting the 'ring disabled' bit
1822 * in the flags field of all the TX send ring control blocks,
1823 * located in NIC memory.
1824 */
1825 if (!BGE_IS_5705_PLUS(sc))
1826 /* 5700 to 5704 had 16 send rings. */
1827 limit = BGE_TX_RINGS_EXTSSRAM_MAX;
1828 else
1829 limit = 1;
1830 vrcb = BGE_MEMWIN_START + BGE_SEND_RING_RCB;
1831 for (i = 0; i < limit; i++) {
1832 RCB_WRITE_4(sc, vrcb, bge_maxlen_flags,
1833 BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED));
1834 RCB_WRITE_4(sc, vrcb, bge_nicaddr, 0);
1835 vrcb += sizeof(struct bge_rcb);
1836 }
1837
1838 /* Configure send ring RCB 0 (we use only the first ring) */
1839 vrcb = BGE_MEMWIN_START + BGE_SEND_RING_RCB;
1840 BGE_HOSTADDR(taddr, sc->bge_ldata.bge_tx_ring_paddr);
1841 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, taddr.bge_addr_hi);
1842 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, taddr.bge_addr_lo);
1843 if (sc->bge_asicrev == BGE_ASICREV_BCM5717 ||
1844 sc->bge_asicrev == BGE_ASICREV_BCM5719)
1845 RCB_WRITE_4(sc, vrcb, bge_nicaddr, BGE_SEND_RING_5717);
1846 else
1847 RCB_WRITE_4(sc, vrcb, bge_nicaddr,
1848 BGE_NIC_TXRING_ADDR(0, BGE_TX_RING_CNT));
1849 RCB_WRITE_4(sc, vrcb, bge_maxlen_flags,
1850 BGE_RCB_MAXLEN_FLAGS(BGE_TX_RING_CNT, 0));
1851
1852 /*
1853 * Disable all receive return rings by setting the
1854 * 'ring diabled' bit in the flags field of all the receive
1855 * return ring control blocks, located in NIC memory.
1856 */
1857 if (sc->bge_asicrev == BGE_ASICREV_BCM5717 ||
1858 sc->bge_asicrev == BGE_ASICREV_BCM5719) {
1859 /* Should be 17, use 16 until we get an SRAM map. */
1860 limit = 16;
1861 } else if (!BGE_IS_5705_PLUS(sc))
1862 limit = BGE_RX_RINGS_MAX;
1863 else if (sc->bge_asicrev == BGE_ASICREV_BCM5755 ||
1864 sc->bge_asicrev == BGE_ASICREV_BCM57765)
1865 limit = 4;
1866 else
1867 limit = 1;
1868 /* Disable all receive return rings. */
1869 vrcb = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB;
1870 for (i = 0; i < limit; i++) {
1871 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, 0);
1872 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, 0);
1873 RCB_WRITE_4(sc, vrcb, bge_maxlen_flags,
1874 BGE_RCB_FLAG_RING_DISABLED);
1875 RCB_WRITE_4(sc, vrcb, bge_nicaddr, 0);
1876 bge_writembx(sc, BGE_MBX_RX_CONS0_LO +
1877 (i * (sizeof(uint64_t))), 0);
1878 vrcb += sizeof(struct bge_rcb);
1879 }
1880
1881 /*
1882 * Set up receive return ring 0. Note that the NIC address
1883 * for RX return rings is 0x0. The return rings live entirely
1884 * within the host, so the nicaddr field in the RCB isn't used.
1885 */
1886 vrcb = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB;
1887 BGE_HOSTADDR(taddr, sc->bge_ldata.bge_rx_return_ring_paddr);
1888 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, taddr.bge_addr_hi);
1889 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, taddr.bge_addr_lo);
1890 RCB_WRITE_4(sc, vrcb, bge_nicaddr, 0);
1891 RCB_WRITE_4(sc, vrcb, bge_maxlen_flags,
1892 BGE_RCB_MAXLEN_FLAGS(sc->bge_return_ring_cnt, 0));
1893
1894 /* Set random backoff seed for TX */
1895 CSR_WRITE_4(sc, BGE_TX_RANDOM_BACKOFF,
1896 IF_LLADDR(sc->bge_ifp)[0] + IF_LLADDR(sc->bge_ifp)[1] +
1897 IF_LLADDR(sc->bge_ifp)[2] + IF_LLADDR(sc->bge_ifp)[3] +
1898 IF_LLADDR(sc->bge_ifp)[4] + IF_LLADDR(sc->bge_ifp)[5] +
1899 BGE_TX_BACKOFF_SEED_MASK);
1900
1901 /* Set inter-packet gap */
1902 CSR_WRITE_4(sc, BGE_TX_LENGTHS, 0x2620);
1903
1904 /*
1905 * Specify which ring to use for packets that don't match
1906 * any RX rules.
1907 */
1908 CSR_WRITE_4(sc, BGE_RX_RULES_CFG, 0x08);
1909
1910 /*
1911 * Configure number of RX lists. One interrupt distribution
1912 * list, sixteen active lists, one bad frames class.
1913 */
1914 CSR_WRITE_4(sc, BGE_RXLP_CFG, 0x181);
1915
1916 /* Inialize RX list placement stats mask. */
1917 CSR_WRITE_4(sc, BGE_RXLP_STATS_ENABLE_MASK, 0x007FFFFF);
1918 CSR_WRITE_4(sc, BGE_RXLP_STATS_CTL, 0x1);
1919
1920 /* Disable host coalescing until we get it set up */
1921 CSR_WRITE_4(sc, BGE_HCC_MODE, 0x00000000);
1922
1923 /* Poll to make sure it's shut down. */
1924 for (i = 0; i < BGE_TIMEOUT; i++) {
1925 DELAY(10);
1926 if (!(CSR_READ_4(sc, BGE_HCC_MODE) & BGE_HCCMODE_ENABLE))
1927 break;
1928 }
1929
1930 if (i == BGE_TIMEOUT) {
1931 device_printf(sc->bge_dev,
1932 "host coalescing engine failed to idle\n");
1933 return (ENXIO);
1934 }
1935
1936 /* Set up host coalescing defaults */
1937 CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS, sc->bge_rx_coal_ticks);
1938 CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS, sc->bge_tx_coal_ticks);
1939 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS, sc->bge_rx_max_coal_bds);
1940 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS, sc->bge_tx_max_coal_bds);
1941 if (!(BGE_IS_5705_PLUS(sc))) {
1942 CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS_INT, 0);
1943 CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS_INT, 0);
1944 }
1945 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS_INT, 1);
1946 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS_INT, 1);
1947
1948 /* Set up address of statistics block */
1949 if (!(BGE_IS_5705_PLUS(sc))) {
1950 CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_HI,
1951 BGE_ADDR_HI(sc->bge_ldata.bge_stats_paddr));
1952 CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_LO,
1953 BGE_ADDR_LO(sc->bge_ldata.bge_stats_paddr));
1954 CSR_WRITE_4(sc, BGE_HCC_STATS_BASEADDR, BGE_STATS_BLOCK);
1955 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_BASEADDR, BGE_STATUS_BLOCK);
1956 CSR_WRITE_4(sc, BGE_HCC_STATS_TICKS, sc->bge_stat_ticks);
1957 }
1958
1959 /* Set up address of status block */
1960 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_HI,
1961 BGE_ADDR_HI(sc->bge_ldata.bge_status_block_paddr));
1962 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_LO,
1963 BGE_ADDR_LO(sc->bge_ldata.bge_status_block_paddr));
1964
1965 /* Set up status block size. */
1966 if (sc->bge_asicrev == BGE_ASICREV_BCM5700 &&
1967 sc->bge_chipid != BGE_CHIPID_BCM5700_C0) {
1968 val = BGE_STATBLKSZ_FULL;
1969 bzero(sc->bge_ldata.bge_status_block, BGE_STATUS_BLK_SZ);
1970 } else {
1971 val = BGE_STATBLKSZ_32BYTE;
1972 bzero(sc->bge_ldata.bge_status_block, 32);
1973 }
1974 bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
1975 sc->bge_cdata.bge_status_map,
1976 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1977
1978 /* Turn on host coalescing state machine */
1979 CSR_WRITE_4(sc, BGE_HCC_MODE, val | BGE_HCCMODE_ENABLE);
1980
1981 /* Turn on RX BD completion state machine and enable attentions */
1982 CSR_WRITE_4(sc, BGE_RBDC_MODE,
1983 BGE_RBDCMODE_ENABLE | BGE_RBDCMODE_ATTN);
1984
1985 /* Turn on RX list placement state machine */
1986 CSR_WRITE_4(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE);
1987
1988 /* Turn on RX list selector state machine. */
1989 if (!(BGE_IS_5705_PLUS(sc)))
1990 CSR_WRITE_4(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE);
1991
1992 val = BGE_MACMODE_TXDMA_ENB | BGE_MACMODE_RXDMA_ENB |
1993 BGE_MACMODE_RX_STATS_CLEAR | BGE_MACMODE_TX_STATS_CLEAR |
1994 BGE_MACMODE_RX_STATS_ENB | BGE_MACMODE_TX_STATS_ENB |
1995 BGE_MACMODE_FRMHDR_DMA_ENB;
1996
1997 if (sc->bge_flags & BGE_FLAG_TBI)
1998 val |= BGE_PORTMODE_TBI;
1999 else if (sc->bge_flags & BGE_FLAG_MII_SERDES)
2000 val |= BGE_PORTMODE_GMII;
2001 else
2002 val |= BGE_PORTMODE_MII;
2003
2004 /* Turn on DMA, clear stats */
2005 CSR_WRITE_4(sc, BGE_MAC_MODE, val);
2006
2007 /* Set misc. local control, enable interrupts on attentions */
2008 CSR_WRITE_4(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_ONATTN);
2009
2010#ifdef notdef
2011 /* Assert GPIO pins for PHY reset */
2012 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUT0 |
2013 BGE_MLC_MISCIO_OUT1 | BGE_MLC_MISCIO_OUT2);
2014 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUTEN0 |
2015 BGE_MLC_MISCIO_OUTEN1 | BGE_MLC_MISCIO_OUTEN2);
2016#endif
2017
2018 /* Turn on DMA completion state machine */
2019 if (!(BGE_IS_5705_PLUS(sc)))
2020 CSR_WRITE_4(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE);
2021
2022 val = BGE_WDMAMODE_ENABLE | BGE_WDMAMODE_ALL_ATTNS;
2023
2024 /* Enable host coalescing bug fix. */
2025 if (BGE_IS_5755_PLUS(sc))
2026 val |= BGE_WDMAMODE_STATUS_TAG_FIX;
2027
2028 /* Request larger DMA burst size to get better performance. */
2029 if (sc->bge_asicrev == BGE_ASICREV_BCM5785)
2030 val |= BGE_WDMAMODE_BURST_ALL_DATA;
2031
2032 /* Turn on write DMA state machine */
2033 CSR_WRITE_4(sc, BGE_WDMA_MODE, val);
2034 DELAY(40);
2035
2036 /* Turn on read DMA state machine */
2037 val = BGE_RDMAMODE_ENABLE | BGE_RDMAMODE_ALL_ATTNS;
2038
2039 if (sc->bge_asicrev == BGE_ASICREV_BCM5717)
2040 val |= BGE_RDMAMODE_MULT_DMA_RD_DIS;
2041
2042 if (sc->bge_asicrev == BGE_ASICREV_BCM5784 ||
2043 sc->bge_asicrev == BGE_ASICREV_BCM5785 ||
2044 sc->bge_asicrev == BGE_ASICREV_BCM57780)
2045 val |= BGE_RDMAMODE_BD_SBD_CRPT_ATTN |
2046 BGE_RDMAMODE_MBUF_RBD_CRPT_ATTN |
2047 BGE_RDMAMODE_MBUF_SBD_CRPT_ATTN;
2048 if (sc->bge_flags & BGE_FLAG_PCIE)
2049 val |= BGE_RDMAMODE_FIFO_LONG_BURST;
2050 if (sc->bge_flags & (BGE_FLAG_TSO | BGE_FLAG_TSO3)) {
2051 val |= BGE_RDMAMODE_TSO4_ENABLE;
2052 if (sc->bge_flags & BGE_FLAG_TSO3 ||
2053 sc->bge_asicrev == BGE_ASICREV_BCM5785 ||
2054 sc->bge_asicrev == BGE_ASICREV_BCM57780)
2055 val |= BGE_RDMAMODE_TSO6_ENABLE;
2056 }
2057 if (sc->bge_asicrev == BGE_ASICREV_BCM5761 ||
2058 sc->bge_asicrev == BGE_ASICREV_BCM5784 ||
2059 sc->bge_asicrev == BGE_ASICREV_BCM5785 ||
2060 sc->bge_asicrev == BGE_ASICREV_BCM57780 ||
2061 BGE_IS_5717_PLUS(sc)) {
2062 dmactl = CSR_READ_4(sc, BGE_RDMA_RSRVCTRL);
2063 /*
2064 * Adjust tx margin to prevent TX data corruption and
2065 * fix internal FIFO overflow.
2066 */
2067 if (sc->bge_asicrev == BGE_ASICREV_BCM5719) {
2068 dmactl &= ~(BGE_RDMA_RSRVCTRL_FIFO_LWM_MASK |
2069 BGE_RDMA_RSRVCTRL_FIFO_HWM_MASK |
2070 BGE_RDMA_RSRVCTRL_TXMRGN_MASK);
2071 dmactl |= BGE_RDMA_RSRVCTRL_FIFO_LWM_1_5K |
2072 BGE_RDMA_RSRVCTRL_FIFO_HWM_1_5K |
2073 BGE_RDMA_RSRVCTRL_TXMRGN_320B;
2074 }
2075 /*
2076 * Enable fix for read DMA FIFO overruns.
2077 * The fix is to limit the number of RX BDs
2078 * the hardware would fetch at a fime.
2079 */
2080 CSR_WRITE_4(sc, BGE_RDMA_RSRVCTRL, dmactl |
2081 BGE_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
2082 }
2083
2084 if (sc->bge_asicrev == BGE_ASICREV_BCM5719) {
2085 CSR_WRITE_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL,
2086 CSR_READ_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL) |
2087 BGE_RDMA_LSO_CRPTEN_CTRL_BLEN_BD_4K |
2088 BGE_RDMA_LSO_CRPTEN_CTRL_BLEN_LSO_4K);
2089 }
2090
2091 CSR_WRITE_4(sc, BGE_RDMA_MODE, val);
2092 DELAY(40);
2093
2094 /* Turn on RX data completion state machine */
2095 CSR_WRITE_4(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE);
2096
2097 /* Turn on RX BD initiator state machine */
2098 CSR_WRITE_4(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE);
2099
2100 /* Turn on RX data and RX BD initiator state machine */
2101 CSR_WRITE_4(sc, BGE_RDBDI_MODE, BGE_RDBDIMODE_ENABLE);
2102
2103 /* Turn on Mbuf cluster free state machine */
2104 if (!(BGE_IS_5705_PLUS(sc)))
2105 CSR_WRITE_4(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE);
2106
2107 /* Turn on send BD completion state machine */
2108 CSR_WRITE_4(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE);
2109
2110 /* Turn on send data completion state machine */
2111 val = BGE_SDCMODE_ENABLE;
2112 if (sc->bge_asicrev == BGE_ASICREV_BCM5761)
2113 val |= BGE_SDCMODE_CDELAY;
2114 CSR_WRITE_4(sc, BGE_SDC_MODE, val);
2115
2116 /* Turn on send data initiator state machine */
2117 if (sc->bge_flags & (BGE_FLAG_TSO | BGE_FLAG_TSO3))
2118 CSR_WRITE_4(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE |
2119 BGE_SDIMODE_HW_LSO_PRE_DMA);
2120 else
2121 CSR_WRITE_4(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE);
2122
2123 /* Turn on send BD initiator state machine */
2124 CSR_WRITE_4(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE);
2125
2126 /* Turn on send BD selector state machine */
2127 CSR_WRITE_4(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE);
2128
2129 CSR_WRITE_4(sc, BGE_SDI_STATS_ENABLE_MASK, 0x007FFFFF);
2130 CSR_WRITE_4(sc, BGE_SDI_STATS_CTL,
2131 BGE_SDISTATSCTL_ENABLE | BGE_SDISTATSCTL_FASTER);
2132
2133 /* ack/clear link change events */
2134 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED |
2135 BGE_MACSTAT_CFG_CHANGED | BGE_MACSTAT_MI_COMPLETE |
2136 BGE_MACSTAT_LINK_CHANGED);
2137 CSR_WRITE_4(sc, BGE_MI_STS, 0);
2138
2139 /*
2140 * Enable attention when the link has changed state for
2141 * devices that use auto polling.
2142 */
2143 if (sc->bge_flags & BGE_FLAG_TBI) {
2144 CSR_WRITE_4(sc, BGE_MI_STS, BGE_MISTS_LINK);
2145 } else {
2146 if (sc->bge_mi_mode & BGE_MIMODE_AUTOPOLL) {
2147 CSR_WRITE_4(sc, BGE_MI_MODE, sc->bge_mi_mode);
2148 DELAY(80);
2149 }
2150 if (sc->bge_asicrev == BGE_ASICREV_BCM5700 &&
2151 sc->bge_chipid != BGE_CHIPID_BCM5700_B2)
2152 CSR_WRITE_4(sc, BGE_MAC_EVT_ENB,
2153 BGE_EVTENB_MI_INTERRUPT);
2154 }
2155
2156 /*
2157 * Clear any pending link state attention.
2158 * Otherwise some link state change events may be lost until attention
2159 * is cleared by bge_intr() -> bge_link_upd() sequence.
2160 * It's not necessary on newer BCM chips - perhaps enabling link
2161 * state change attentions implies clearing pending attention.
2162 */
2163 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED |
2164 BGE_MACSTAT_CFG_CHANGED | BGE_MACSTAT_MI_COMPLETE |
2165 BGE_MACSTAT_LINK_CHANGED);
2166
2167 /* Enable link state change attentions. */
2168 BGE_SETBIT(sc, BGE_MAC_EVT_ENB, BGE_EVTENB_LINK_CHANGED);
2169
2170 return (0);
2171}
2172
2173const struct bge_revision *
2174bge_lookup_rev(uint32_t chipid)
2175{
2176 const struct bge_revision *br;
2177
2178 for (br = bge_revisions; br->br_name != NULL; br++) {
2179 if (br->br_chipid == chipid)
2180 return (br);
2181 }
2182
2183 for (br = bge_majorrevs; br->br_name != NULL; br++) {
2184 if (br->br_chipid == BGE_ASICREV(chipid))
2185 return (br);
2186 }
2187
2188 return (NULL);
2189}
2190
2191const struct bge_vendor *
2192bge_lookup_vendor(uint16_t vid)
2193{
2194 const struct bge_vendor *v;
2195
2196 for (v = bge_vendors; v->v_name != NULL; v++)
2197 if (v->v_id == vid)
2198 return (v);
2199
2200 panic("%s: unknown vendor %d", __func__, vid);
2201 return (NULL);
2202}
2203
2204/*
2205 * Probe for a Broadcom chip. Check the PCI vendor and device IDs
2206 * against our list and return its name if we find a match.
2207 *
2208 * Note that since the Broadcom controller contains VPD support, we
2209 * try to get the device name string from the controller itself instead
2210 * of the compiled-in string. It guarantees we'll always announce the
2211 * right product name. We fall back to the compiled-in string when
2212 * VPD is unavailable or corrupt.
2213 */
2214static int
2215bge_probe(device_t dev)
2216{
2217 char buf[96];
2218 char model[64];
2219 const struct bge_revision *br;
2220 const char *pname;
2221 struct bge_softc *sc = device_get_softc(dev);
2222 const struct bge_type *t = bge_devs;
2223 const struct bge_vendor *v;
2224 uint32_t id;
2225 uint16_t did, vid;
2226
2227 sc->bge_dev = dev;
2228 vid = pci_get_vendor(dev);
2229 did = pci_get_device(dev);
2230 while(t->bge_vid != 0) {
2231 if ((vid == t->bge_vid) && (did == t->bge_did)) {
2232 id = pci_read_config(dev, BGE_PCI_MISC_CTL, 4) >>
2233 BGE_PCIMISCCTL_ASICREV_SHIFT;
2234 if (BGE_ASICREV(id) == BGE_ASICREV_USE_PRODID_REG) {
2235 /*
2236 * Find the ASCI revision. Different chips
2237 * use different registers.
2238 */
2239 switch (pci_get_device(dev)) {
2240 case BCOM_DEVICEID_BCM5717:
2241 case BCOM_DEVICEID_BCM5718:
2242 case BCOM_DEVICEID_BCM5719:
2243 id = pci_read_config(dev,
2244 BGE_PCI_GEN2_PRODID_ASICREV, 4);
2245 break;
2246 case BCOM_DEVICEID_BCM57761:
2247 case BCOM_DEVICEID_BCM57765:
2248 case BCOM_DEVICEID_BCM57781:
2249 case BCOM_DEVICEID_BCM57785:
2250 case BCOM_DEVICEID_BCM57791:
2251 case BCOM_DEVICEID_BCM57795:
2252 id = pci_read_config(dev,
2253 BGE_PCI_GEN15_PRODID_ASICREV, 4);
2254 break;
2255 default:
2256 id = pci_read_config(dev,
2257 BGE_PCI_PRODID_ASICREV, 4);
2258 }
2259 }
2260 br = bge_lookup_rev(id);
2261 v = bge_lookup_vendor(vid);
2262 if (bge_has_eaddr(sc) &&
2263 pci_get_vpd_ident(dev, &pname) == 0)
2264 snprintf(model, 64, "%s", pname);
2265 else
2266 snprintf(model, 64, "%s %s", v->v_name,
2267 br != NULL ? br->br_name :
2268 "NetXtreme Ethernet Controller");
2269 snprintf(buf, 96, "%s, %sASIC rev. %#08x", model,
2270 br != NULL ? "" : "unknown ", id);
2271 device_set_desc_copy(dev, buf);
2272 return (0);
2273 }
2274 t++;
2275 }
2276
2277 return (ENXIO);
2278}
2279
2280static void
2281bge_dma_free(struct bge_softc *sc)
2282{
2283 int i;
2284
2285 /* Destroy DMA maps for RX buffers. */
2286 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
2287 if (sc->bge_cdata.bge_rx_std_dmamap[i])
2288 bus_dmamap_destroy(sc->bge_cdata.bge_rx_mtag,
2289 sc->bge_cdata.bge_rx_std_dmamap[i]);
2290 }
2291 if (sc->bge_cdata.bge_rx_std_sparemap)
2292 bus_dmamap_destroy(sc->bge_cdata.bge_rx_mtag,
2293 sc->bge_cdata.bge_rx_std_sparemap);
2294
2295 /* Destroy DMA maps for jumbo RX buffers. */
2296 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
2297 if (sc->bge_cdata.bge_rx_jumbo_dmamap[i])
2298 bus_dmamap_destroy(sc->bge_cdata.bge_mtag_jumbo,
2299 sc->bge_cdata.bge_rx_jumbo_dmamap[i]);
2300 }
2301 if (sc->bge_cdata.bge_rx_jumbo_sparemap)
2302 bus_dmamap_destroy(sc->bge_cdata.bge_mtag_jumbo,
2303 sc->bge_cdata.bge_rx_jumbo_sparemap);
2304
2305 /* Destroy DMA maps for TX buffers. */
2306 for (i = 0; i < BGE_TX_RING_CNT; i++) {
2307 if (sc->bge_cdata.bge_tx_dmamap[i])
2308 bus_dmamap_destroy(sc->bge_cdata.bge_tx_mtag,
2309 sc->bge_cdata.bge_tx_dmamap[i]);
2310 }
2311
2312 if (sc->bge_cdata.bge_rx_mtag)
2313 bus_dma_tag_destroy(sc->bge_cdata.bge_rx_mtag);
2314 if (sc->bge_cdata.bge_tx_mtag)
2315 bus_dma_tag_destroy(sc->bge_cdata.bge_tx_mtag);
2316
2317
2318 /* Destroy standard RX ring. */
2319 if (sc->bge_cdata.bge_rx_std_ring_map)
2320 bus_dmamap_unload(sc->bge_cdata.bge_rx_std_ring_tag,
2321 sc->bge_cdata.bge_rx_std_ring_map);
2322 if (sc->bge_cdata.bge_rx_std_ring_map && sc->bge_ldata.bge_rx_std_ring)
2323 bus_dmamem_free(sc->bge_cdata.bge_rx_std_ring_tag,
2324 sc->bge_ldata.bge_rx_std_ring,
2325 sc->bge_cdata.bge_rx_std_ring_map);
2326
2327 if (sc->bge_cdata.bge_rx_std_ring_tag)
2328 bus_dma_tag_destroy(sc->bge_cdata.bge_rx_std_ring_tag);
2329
2330 /* Destroy jumbo RX ring. */
2331 if (sc->bge_cdata.bge_rx_jumbo_ring_map)
2332 bus_dmamap_unload(sc->bge_cdata.bge_rx_jumbo_ring_tag,
2333 sc->bge_cdata.bge_rx_jumbo_ring_map);
2334
2335 if (sc->bge_cdata.bge_rx_jumbo_ring_map &&
2336 sc->bge_ldata.bge_rx_jumbo_ring)
2337 bus_dmamem_free(sc->bge_cdata.bge_rx_jumbo_ring_tag,
2338 sc->bge_ldata.bge_rx_jumbo_ring,
2339 sc->bge_cdata.bge_rx_jumbo_ring_map);
2340
2341 if (sc->bge_cdata.bge_rx_jumbo_ring_tag)
2342 bus_dma_tag_destroy(sc->bge_cdata.bge_rx_jumbo_ring_tag);
2343
2344 /* Destroy RX return ring. */
2345 if (sc->bge_cdata.bge_rx_return_ring_map)
2346 bus_dmamap_unload(sc->bge_cdata.bge_rx_return_ring_tag,
2347 sc->bge_cdata.bge_rx_return_ring_map);
2348
2349 if (sc->bge_cdata.bge_rx_return_ring_map &&
2350 sc->bge_ldata.bge_rx_return_ring)
2351 bus_dmamem_free(sc->bge_cdata.bge_rx_return_ring_tag,
2352 sc->bge_ldata.bge_rx_return_ring,
2353 sc->bge_cdata.bge_rx_return_ring_map);
2354
2355 if (sc->bge_cdata.bge_rx_return_ring_tag)
2356 bus_dma_tag_destroy(sc->bge_cdata.bge_rx_return_ring_tag);
2357
2358 /* Destroy TX ring. */
2359 if (sc->bge_cdata.bge_tx_ring_map)
2360 bus_dmamap_unload(sc->bge_cdata.bge_tx_ring_tag,
2361 sc->bge_cdata.bge_tx_ring_map);
2362
2363 if (sc->bge_cdata.bge_tx_ring_map && sc->bge_ldata.bge_tx_ring)
2364 bus_dmamem_free(sc->bge_cdata.bge_tx_ring_tag,
2365 sc->bge_ldata.bge_tx_ring,
2366 sc->bge_cdata.bge_tx_ring_map);
2367
2368 if (sc->bge_cdata.bge_tx_ring_tag)
2369 bus_dma_tag_destroy(sc->bge_cdata.bge_tx_ring_tag);
2370
2371 /* Destroy status block. */
2372 if (sc->bge_cdata.bge_status_map)
2373 bus_dmamap_unload(sc->bge_cdata.bge_status_tag,
2374 sc->bge_cdata.bge_status_map);
2375
2376 if (sc->bge_cdata.bge_status_map && sc->bge_ldata.bge_status_block)
2377 bus_dmamem_free(sc->bge_cdata.bge_status_tag,
2378 sc->bge_ldata.bge_status_block,
2379 sc->bge_cdata.bge_status_map);
2380
2381 if (sc->bge_cdata.bge_status_tag)
2382 bus_dma_tag_destroy(sc->bge_cdata.bge_status_tag);
2383
2384 /* Destroy statistics block. */
2385 if (sc->bge_cdata.bge_stats_map)
2386 bus_dmamap_unload(sc->bge_cdata.bge_stats_tag,
2387 sc->bge_cdata.bge_stats_map);
2388
2389 if (sc->bge_cdata.bge_stats_map && sc->bge_ldata.bge_stats)
2390 bus_dmamem_free(sc->bge_cdata.bge_stats_tag,
2391 sc->bge_ldata.bge_stats,
2392 sc->bge_cdata.bge_stats_map);
2393
2394 if (sc->bge_cdata.bge_stats_tag)
2395 bus_dma_tag_destroy(sc->bge_cdata.bge_stats_tag);
2396
2397 if (sc->bge_cdata.bge_buffer_tag)
2398 bus_dma_tag_destroy(sc->bge_cdata.bge_buffer_tag);
2399
2400 /* Destroy the parent tag. */
2401 if (sc->bge_cdata.bge_parent_tag)
2402 bus_dma_tag_destroy(sc->bge_cdata.bge_parent_tag);
2403}
2404
2405static int
2406bge_dma_ring_alloc(struct bge_softc *sc, bus_size_t alignment,
2407 bus_size_t maxsize, bus_dma_tag_t *tag, uint8_t **ring, bus_dmamap_t *map,
2408 bus_addr_t *paddr, const char *msg)
2409{
2410 struct bge_dmamap_arg ctx;
2411 bus_addr_t lowaddr;
2412 bus_size_t ring_end;
2413 int error;
2414
2415 lowaddr = BUS_SPACE_MAXADDR;
2416again:
2417 error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag,
2418 alignment, 0, lowaddr, BUS_SPACE_MAXADDR, NULL,
2419 NULL, maxsize, 1, maxsize, 0, NULL, NULL, tag);
2420 if (error != 0) {
2421 device_printf(sc->bge_dev,
2422 "could not create %s dma tag\n", msg);
2423 return (ENOMEM);
2424 }
2425 /* Allocate DMA'able memory for ring. */
2426 error = bus_dmamem_alloc(*tag, (void **)ring,
2427 BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT, map);
2428 if (error != 0) {
2429 device_printf(sc->bge_dev,
2430 "could not allocate DMA'able memory for %s\n", msg);
2431 return (ENOMEM);
2432 }
2433 /* Load the address of the ring. */
2434 ctx.bge_busaddr = 0;
2435 error = bus_dmamap_load(*tag, *map, *ring, maxsize, bge_dma_map_addr,
2436 &ctx, BUS_DMA_NOWAIT);
2437 if (error != 0) {
2438 device_printf(sc->bge_dev,
2439 "could not load DMA'able memory for %s\n", msg);
2440 return (ENOMEM);
2441 }
2442 *paddr = ctx.bge_busaddr;
2443 ring_end = *paddr + maxsize;
2444 if ((sc->bge_flags & BGE_FLAG_4G_BNDRY_BUG) != 0 &&
2445 BGE_ADDR_HI(*paddr) != BGE_ADDR_HI(ring_end)) {
2446 /*
2447 * 4GB boundary crossed. Limit maximum allowable DMA
2448 * address space to 32bit and try again.
2449 */
2450 bus_dmamap_unload(*tag, *map);
2451 bus_dmamem_free(*tag, *ring, *map);
2452 bus_dma_tag_destroy(*tag);
2453 if (bootverbose)
2454 device_printf(sc->bge_dev, "4GB boundary crossed, "
2455 "limit DMA address space to 32bit for %s\n", msg);
2456 *ring = NULL;
2457 *tag = NULL;
2458 *map = NULL;
2459 lowaddr = BUS_SPACE_MAXADDR_32BIT;
2460 goto again;
2461 }
2462 return (0);
2463}
2464
2465static int
2466bge_dma_alloc(struct bge_softc *sc)
2467{
2468 bus_addr_t lowaddr;
2469 bus_size_t boundary, sbsz, rxmaxsegsz, txsegsz, txmaxsegsz;
2470 int i, error;
2471
2472 lowaddr = BUS_SPACE_MAXADDR;
2473 if ((sc->bge_flags & BGE_FLAG_40BIT_BUG) != 0)
2474 lowaddr = BGE_DMA_MAXADDR;
2475 /*
2476 * Allocate the parent bus DMA tag appropriate for PCI.
2477 */
2478 error = bus_dma_tag_create(bus_get_dma_tag(sc->bge_dev),
2479 1, 0, lowaddr, BUS_SPACE_MAXADDR, NULL,
2480 NULL, BUS_SPACE_MAXSIZE_32BIT, 0, BUS_SPACE_MAXSIZE_32BIT,
2481 0, NULL, NULL, &sc->bge_cdata.bge_parent_tag);
2482 if (error != 0) {
2483 device_printf(sc->bge_dev,
2484 "could not allocate parent dma tag\n");
2485 return (ENOMEM);
2486 }
2487
2488 /* Create tag for standard RX ring. */
2489 error = bge_dma_ring_alloc(sc, PAGE_SIZE, BGE_STD_RX_RING_SZ,
2490 &sc->bge_cdata.bge_rx_std_ring_tag,
2491 (uint8_t **)&sc->bge_ldata.bge_rx_std_ring,
2492 &sc->bge_cdata.bge_rx_std_ring_map,
2493 &sc->bge_ldata.bge_rx_std_ring_paddr, "RX ring");
2494 if (error)
2495 return (error);
2496
2497 /* Create tag for RX return ring. */
2498 error = bge_dma_ring_alloc(sc, PAGE_SIZE, BGE_RX_RTN_RING_SZ(sc),
2499 &sc->bge_cdata.bge_rx_return_ring_tag,
2500 (uint8_t **)&sc->bge_ldata.bge_rx_return_ring,
2501 &sc->bge_cdata.bge_rx_return_ring_map,
2502 &sc->bge_ldata.bge_rx_return_ring_paddr, "RX return ring");
2503 if (error)
2504 return (error);
2505
2506 /* Create tag for TX ring. */
2507 error = bge_dma_ring_alloc(sc, PAGE_SIZE, BGE_TX_RING_SZ,
2508 &sc->bge_cdata.bge_tx_ring_tag,
2509 (uint8_t **)&sc->bge_ldata.bge_tx_ring,
2510 &sc->bge_cdata.bge_tx_ring_map,
2511 &sc->bge_ldata.bge_tx_ring_paddr, "TX ring");
2512 if (error)
2513 return (error);
2514
2515 /*
2516 * Create tag for status block.
2517 * Because we only use single Tx/Rx/Rx return ring, use
2518 * minimum status block size except BCM5700 AX/BX which
2519 * seems to want to see full status block size regardless
2520 * of configured number of ring.
2521 */
2522 if (sc->bge_asicrev == BGE_ASICREV_BCM5700 &&
2523 sc->bge_chipid != BGE_CHIPID_BCM5700_C0)
2524 sbsz = BGE_STATUS_BLK_SZ;
2525 else
2526 sbsz = 32;
2527 error = bge_dma_ring_alloc(sc, PAGE_SIZE, sbsz,
2528 &sc->bge_cdata.bge_status_tag,
2529 (uint8_t **)&sc->bge_ldata.bge_status_block,
2530 &sc->bge_cdata.bge_status_map,
2531 &sc->bge_ldata.bge_status_block_paddr, "status block");
2532 if (error)
2533 return (error);
2534
2535 /* Create tag for statistics block. */
2536 error = bge_dma_ring_alloc(sc, PAGE_SIZE, BGE_STATS_SZ,
2537 &sc->bge_cdata.bge_stats_tag,
2538 (uint8_t **)&sc->bge_ldata.bge_stats,
2539 &sc->bge_cdata.bge_stats_map,
2540 &sc->bge_ldata.bge_stats_paddr, "statistics block");
2541 if (error)
2542 return (error);
2543
2544 /* Create tag for jumbo RX ring. */
2545 if (BGE_IS_JUMBO_CAPABLE(sc)) {
2546 error = bge_dma_ring_alloc(sc, PAGE_SIZE, BGE_JUMBO_RX_RING_SZ,
2547 &sc->bge_cdata.bge_rx_jumbo_ring_tag,
2548 (uint8_t **)&sc->bge_ldata.bge_rx_jumbo_ring,
2549 &sc->bge_cdata.bge_rx_jumbo_ring_map,
2550 &sc->bge_ldata.bge_rx_jumbo_ring_paddr, "jumbo RX ring");
2551 if (error)
2552 return (error);
2553 }
2554
2555 /* Create parent tag for buffers. */
2556 boundary = 0;
2557 if ((sc->bge_flags & BGE_FLAG_4G_BNDRY_BUG) != 0) {
2558 boundary = BGE_DMA_BNDRY;
2559 /*
2560 * XXX
2561 * watchdog timeout issue was observed on BCM5704 which
2562 * lives behind PCI-X bridge(e.g AMD 8131 PCI-X bridge).
2563 * Limiting DMA address space to 32bits seems to address
2564 * it.
2565 */
2566 if (sc->bge_flags & BGE_FLAG_PCIX)
2567 lowaddr = BUS_SPACE_MAXADDR_32BIT;
2568 }
2569 error = bus_dma_tag_create(bus_get_dma_tag(sc->bge_dev),
2570 1, boundary, lowaddr, BUS_SPACE_MAXADDR, NULL,
2571 NULL, BUS_SPACE_MAXSIZE_32BIT, 0, BUS_SPACE_MAXSIZE_32BIT,
2572 0, NULL, NULL, &sc->bge_cdata.bge_buffer_tag);
2573 if (error != 0) {
2574 device_printf(sc->bge_dev,
2575 "could not allocate buffer dma tag\n");
2576 return (ENOMEM);
2577 }
2578 /* Create tag for Tx mbufs. */
2579 if (sc->bge_flags & (BGE_FLAG_TSO | BGE_FLAG_TSO3)) {
2580 txsegsz = BGE_TSOSEG_SZ;
2581 txmaxsegsz = 65535 + sizeof(struct ether_vlan_header);
2582 } else {
2583 txsegsz = MCLBYTES;
2584 txmaxsegsz = MCLBYTES * BGE_NSEG_NEW;
2585 }
2586 error = bus_dma_tag_create(sc->bge_cdata.bge_buffer_tag, 1,
2587 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
2588 txmaxsegsz, BGE_NSEG_NEW, txsegsz, 0, NULL, NULL,
2589 &sc->bge_cdata.bge_tx_mtag);
2590
2591 if (error) {
2592 device_printf(sc->bge_dev, "could not allocate TX dma tag\n");
2593 return (ENOMEM);
2594 }
2595
2596 /* Create tag for Rx mbufs. */
2597 if (sc->bge_flags & BGE_FLAG_JUMBO_STD)
2598 rxmaxsegsz = MJUM9BYTES;
2599 else
2600 rxmaxsegsz = MCLBYTES;
2601 error = bus_dma_tag_create(sc->bge_cdata.bge_buffer_tag, 1, 0,
2602 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, rxmaxsegsz, 1,
2603 rxmaxsegsz, 0, NULL, NULL, &sc->bge_cdata.bge_rx_mtag);
2604
2605 if (error) {
2606 device_printf(sc->bge_dev, "could not allocate RX dma tag\n");
2607 return (ENOMEM);
2608 }
2609
2610 /* Create DMA maps for RX buffers. */
2611 error = bus_dmamap_create(sc->bge_cdata.bge_rx_mtag, 0,
2612 &sc->bge_cdata.bge_rx_std_sparemap);
2613 if (error) {
2614 device_printf(sc->bge_dev,
2615 "can't create spare DMA map for RX\n");
2616 return (ENOMEM);
2617 }
2618 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
2619 error = bus_dmamap_create(sc->bge_cdata.bge_rx_mtag, 0,
2620 &sc->bge_cdata.bge_rx_std_dmamap[i]);
2621 if (error) {
2622 device_printf(sc->bge_dev,
2623 "can't create DMA map for RX\n");
2624 return (ENOMEM);
2625 }
2626 }
2627
2628 /* Create DMA maps for TX buffers. */
2629 for (i = 0; i < BGE_TX_RING_CNT; i++) {
2630 error = bus_dmamap_create(sc->bge_cdata.bge_tx_mtag, 0,
2631 &sc->bge_cdata.bge_tx_dmamap[i]);
2632 if (error) {
2633 device_printf(sc->bge_dev,
2634 "can't create DMA map for TX\n");
2635 return (ENOMEM);
2636 }
2637 }
2638
2639 /* Create tags for jumbo RX buffers. */
2640 if (BGE_IS_JUMBO_CAPABLE(sc)) {
2641 error = bus_dma_tag_create(sc->bge_cdata.bge_buffer_tag,
2642 1, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
2643 NULL, MJUM9BYTES, BGE_NSEG_JUMBO, PAGE_SIZE,
2644 0, NULL, NULL, &sc->bge_cdata.bge_mtag_jumbo);
2645 if (error) {
2646 device_printf(sc->bge_dev,
2647 "could not allocate jumbo dma tag\n");
2648 return (ENOMEM);
2649 }
2650 /* Create DMA maps for jumbo RX buffers. */
2651 error = bus_dmamap_create(sc->bge_cdata.bge_mtag_jumbo,
2652 0, &sc->bge_cdata.bge_rx_jumbo_sparemap);
2653 if (error) {
2654 device_printf(sc->bge_dev,
2655 "can't create spare DMA map for jumbo RX\n");
2656 return (ENOMEM);
2657 }
2658 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
2659 error = bus_dmamap_create(sc->bge_cdata.bge_mtag_jumbo,
2660 0, &sc->bge_cdata.bge_rx_jumbo_dmamap[i]);
2661 if (error) {
2662 device_printf(sc->bge_dev,
2663 "can't create DMA map for jumbo RX\n");
2664 return (ENOMEM);
2665 }
2666 }
2667 }
2668
2669 return (0);
2670}
2671
2672/*
2673 * Return true if this device has more than one port.
2674 */
2675static int
2676bge_has_multiple_ports(struct bge_softc *sc)
2677{
2678 device_t dev = sc->bge_dev;
2679 u_int b, d, f, fscan, s;
2680
2681 d = pci_get_domain(dev);
2682 b = pci_get_bus(dev);
2683 s = pci_get_slot(dev);
2684 f = pci_get_function(dev);
2685 for (fscan = 0; fscan <= PCI_FUNCMAX; fscan++)
2686 if (fscan != f && pci_find_dbsf(d, b, s, fscan) != NULL)
2687 return (1);
2688 return (0);
2689}
2690
2691/*
2692 * Return true if MSI can be used with this device.
2693 */
2694static int
2695bge_can_use_msi(struct bge_softc *sc)
2696{
2697 int can_use_msi = 0;
2698
2699 /* Disable MSI for polling(4). */
2700#ifdef DEVICE_POLLING
2701 return (0);
2702#endif
2703 switch (sc->bge_asicrev) {
2704 case BGE_ASICREV_BCM5714_A0:
2705 case BGE_ASICREV_BCM5714:
2706 /*
2707 * Apparently, MSI doesn't work when these chips are
2708 * configured in single-port mode.
2709 */
2710 if (bge_has_multiple_ports(sc))
2711 can_use_msi = 1;
2712 break;
2713 case BGE_ASICREV_BCM5750:
2714 if (sc->bge_chiprev != BGE_CHIPREV_5750_AX &&
2715 sc->bge_chiprev != BGE_CHIPREV_5750_BX)
2716 can_use_msi = 1;
2717 break;
2718 default:
2719 if (BGE_IS_575X_PLUS(sc))
2720 can_use_msi = 1;
2721 }
2722 return (can_use_msi);
2723}
2724
2725static int
2726bge_attach(device_t dev)
2727{
2728 struct ifnet *ifp;
2729 struct bge_softc *sc;
2730 uint32_t hwcfg = 0, misccfg;
2731 u_char eaddr[ETHER_ADDR_LEN];
2732 int capmask, error, f, msicount, phy_addr, reg, rid, trys;
2733
2734 sc = device_get_softc(dev);
2735 sc->bge_dev = dev;
2736
2737 TASK_INIT(&sc->bge_intr_task, 0, bge_intr_task, sc);
2738
2739 /*
2740 * Map control/status registers.
2741 */
2742 pci_enable_busmaster(dev);
2743
2744 rid = PCIR_BAR(0);
2745 sc->bge_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
2746 RF_ACTIVE);
2747
2748 if (sc->bge_res == NULL) {
2749 device_printf (sc->bge_dev, "couldn't map memory\n");
2750 error = ENXIO;
2751 goto fail;
2752 }
2753
2754 /* Save various chip information. */
2755 sc->bge_chipid =
2756 pci_read_config(dev, BGE_PCI_MISC_CTL, 4) >>
2757 BGE_PCIMISCCTL_ASICREV_SHIFT;
2758 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_USE_PRODID_REG) {
2759 /*
2760 * Find the ASCI revision. Different chips use different
2761 * registers.
2762 */
2763 switch (pci_get_device(dev)) {
2764 case BCOM_DEVICEID_BCM5717:
2765 case BCOM_DEVICEID_BCM5718:
2766 case BCOM_DEVICEID_BCM5719:
2767 sc->bge_chipid = pci_read_config(dev,
2768 BGE_PCI_GEN2_PRODID_ASICREV, 4);
2769 break;
2770 case BCOM_DEVICEID_BCM57761:
2771 case BCOM_DEVICEID_BCM57765:
2772 case BCOM_DEVICEID_BCM57781:
2773 case BCOM_DEVICEID_BCM57785:
2774 case BCOM_DEVICEID_BCM57791:
2775 case BCOM_DEVICEID_BCM57795:
2776 sc->bge_chipid = pci_read_config(dev,
2777 BGE_PCI_GEN15_PRODID_ASICREV, 4);
2778 break;
2779 default:
2780 sc->bge_chipid = pci_read_config(dev,
2781 BGE_PCI_PRODID_ASICREV, 4);
2782 }
2783 }
2784 sc->bge_asicrev = BGE_ASICREV(sc->bge_chipid);
2785 sc->bge_chiprev = BGE_CHIPREV(sc->bge_chipid);
2786
2787 /* Set default PHY address. */
2788 phy_addr = 1;
2789 /*
2790 * PHY address mapping for various devices.
2791 *
2792 * | F0 Cu | F0 Sr | F1 Cu | F1 Sr |
2793 * ---------+-------+-------+-------+-------+
2794 * BCM57XX | 1 | X | X | X |
2795 * BCM5704 | 1 | X | 1 | X |
2796 * BCM5717 | 1 | 8 | 2 | 9 |
2797 * BCM5719 | 1 | 8 | 2 | 9 |
2798 *
2799 * Other addresses may respond but they are not
2800 * IEEE compliant PHYs and should be ignored.
2801 */
2802 if (sc->bge_asicrev == BGE_ASICREV_BCM5717 ||
2803 sc->bge_asicrev == BGE_ASICREV_BCM5719) {
2804 f = pci_get_function(dev);
2805 if (sc->bge_chipid == BGE_CHIPID_BCM5717_A0) {
2806 if (CSR_READ_4(sc, BGE_SGDIG_STS) &
2807 BGE_SGDIGSTS_IS_SERDES)
2808 phy_addr = f + 8;
2809 else
2810 phy_addr = f + 1;
2811 } else {
2812 if (CSR_READ_4(sc, BGE_CPMU_PHY_STRAP) &
2813 BGE_CPMU_PHY_STRAP_IS_SERDES)
2814 phy_addr = f + 8;
2815 else
2816 phy_addr = f + 1;
2817 }
2818 }
2819
2820 /*
2821 * Don't enable Ethernet@WireSpeed for the 5700, 5906, or the
2822 * 5705 A0 and A1 chips.
2823 */
2824 if (sc->bge_asicrev == BGE_ASICREV_BCM5700 ||
2825 (sc->bge_asicrev == BGE_ASICREV_BCM5705 &&
2826 (sc->bge_chipid != BGE_CHIPID_BCM5705_A0 &&
2827 sc->bge_chipid != BGE_CHIPID_BCM5705_A1)) ||
2828 sc->bge_asicrev == BGE_ASICREV_BCM5906)
2829 sc->bge_phy_flags |= BGE_PHY_NO_WIRESPEED;
2830
2831 if (bge_has_eaddr(sc))
2832 sc->bge_flags |= BGE_FLAG_EADDR;
2833
2834 /* Save chipset family. */
2835 switch (sc->bge_asicrev) {
2836 case BGE_ASICREV_BCM5717:
2837 case BGE_ASICREV_BCM5719:
2838 case BGE_ASICREV_BCM57765:
2839 sc->bge_flags |= BGE_FLAG_5717_PLUS | BGE_FLAG_5755_PLUS |
2840 BGE_FLAG_575X_PLUS | BGE_FLAG_5705_PLUS | BGE_FLAG_JUMBO |
2841 BGE_FLAG_JUMBO_FRAME;
2842 if (sc->bge_asicrev == BGE_ASICREV_BCM5719 &&
2843 sc->bge_chipid == BGE_CHIPID_BCM5719_A0) {
2844 /* Jumbo frame on BCM5719 A0 does not work. */
2845 sc->bge_flags &= ~BGE_FLAG_JUMBO;
2846 }
2847 break;
2848 case BGE_ASICREV_BCM5755:
2849 case BGE_ASICREV_BCM5761:
2850 case BGE_ASICREV_BCM5784:
2851 case BGE_ASICREV_BCM5785:
2852 case BGE_ASICREV_BCM5787:
2853 case BGE_ASICREV_BCM57780:
2854 sc->bge_flags |= BGE_FLAG_5755_PLUS | BGE_FLAG_575X_PLUS |
2855 BGE_FLAG_5705_PLUS;
2856 break;
2857 case BGE_ASICREV_BCM5700:
2858 case BGE_ASICREV_BCM5701:
2859 case BGE_ASICREV_BCM5703:
2860 case BGE_ASICREV_BCM5704:
2861 sc->bge_flags |= BGE_FLAG_5700_FAMILY | BGE_FLAG_JUMBO;
2862 break;
2863 case BGE_ASICREV_BCM5714_A0:
2864 case BGE_ASICREV_BCM5780:
2865 case BGE_ASICREV_BCM5714:
2866 sc->bge_flags |= BGE_FLAG_5714_FAMILY | BGE_FLAG_JUMBO_STD;
2867 /* FALLTHROUGH */
2868 case BGE_ASICREV_BCM5750:
2869 case BGE_ASICREV_BCM5752:
2870 case BGE_ASICREV_BCM5906:
2871 sc->bge_flags |= BGE_FLAG_575X_PLUS;
2872 /* FALLTHROUGH */
2873 case BGE_ASICREV_BCM5705:
2874 sc->bge_flags |= BGE_FLAG_5705_PLUS;
2875 break;
2876 }
2877
2878 /* Set various PHY bug flags. */
2879 if (sc->bge_chipid == BGE_CHIPID_BCM5701_A0 ||
2880 sc->bge_chipid == BGE_CHIPID_BCM5701_B0)
2881 sc->bge_phy_flags |= BGE_PHY_CRC_BUG;
2882 if (sc->bge_chiprev == BGE_CHIPREV_5703_AX ||
2883 sc->bge_chiprev == BGE_CHIPREV_5704_AX)
2884 sc->bge_phy_flags |= BGE_PHY_ADC_BUG;
2885 if (sc->bge_chipid == BGE_CHIPID_BCM5704_A0)
2886 sc->bge_phy_flags |= BGE_PHY_5704_A0_BUG;
2887 if (pci_get_subvendor(dev) == DELL_VENDORID)
2888 sc->bge_phy_flags |= BGE_PHY_NO_3LED;
2889 if ((BGE_IS_5705_PLUS(sc)) &&
2890 sc->bge_asicrev != BGE_ASICREV_BCM5906 &&
2891 sc->bge_asicrev != BGE_ASICREV_BCM5717 &&
2892 sc->bge_asicrev != BGE_ASICREV_BCM5719 &&
2893 sc->bge_asicrev != BGE_ASICREV_BCM5785 &&
2894 sc->bge_asicrev != BGE_ASICREV_BCM57765 &&
2895 sc->bge_asicrev != BGE_ASICREV_BCM57780) {
2896 if (sc->bge_asicrev == BGE_ASICREV_BCM5755 ||
2897 sc->bge_asicrev == BGE_ASICREV_BCM5761 ||
2898 sc->bge_asicrev == BGE_ASICREV_BCM5784 ||
2899 sc->bge_asicrev == BGE_ASICREV_BCM5787) {
2900 if (pci_get_device(dev) != BCOM_DEVICEID_BCM5722 &&
2901 pci_get_device(dev) != BCOM_DEVICEID_BCM5756)
2902 sc->bge_phy_flags |= BGE_PHY_JITTER_BUG;
2903 if (pci_get_device(dev) == BCOM_DEVICEID_BCM5755M)
2904 sc->bge_phy_flags |= BGE_PHY_ADJUST_TRIM;
2905 } else
2906 sc->bge_phy_flags |= BGE_PHY_BER_BUG;
2907 }
2908
2909 /* Identify the chips that use an CPMU. */
2910 if (BGE_IS_5717_PLUS(sc) ||
2911 sc->bge_asicrev == BGE_ASICREV_BCM5784 ||
2912 sc->bge_asicrev == BGE_ASICREV_BCM5761 ||
2913 sc->bge_asicrev == BGE_ASICREV_BCM5785 ||
2914 sc->bge_asicrev == BGE_ASICREV_BCM57780)
2915 sc->bge_flags |= BGE_FLAG_CPMU_PRESENT;
2916 if ((sc->bge_flags & BGE_FLAG_CPMU_PRESENT) != 0)
2917 sc->bge_mi_mode = BGE_MIMODE_500KHZ_CONST;
2918 else
2919 sc->bge_mi_mode = BGE_MIMODE_BASE;
2920 /* Enable auto polling for BCM570[0-5]. */
2921 if (BGE_IS_5700_FAMILY(sc) || sc->bge_asicrev == BGE_ASICREV_BCM5705)
2922 sc->bge_mi_mode |= BGE_MIMODE_AUTOPOLL;
2923
2924 /*
2925 * All Broadcom controllers have 4GB boundary DMA bug.
2926 * Whenever an address crosses a multiple of the 4GB boundary
2927 * (including 4GB, 8Gb, 12Gb, etc.) and makes the transition
2928 * from 0xX_FFFF_FFFF to 0x(X+1)_0000_0000 an internal DMA
2929 * state machine will lockup and cause the device to hang.
2930 */
2931 sc->bge_flags |= BGE_FLAG_4G_BNDRY_BUG;
2932
2933 /* BCM5755 or higher and BCM5906 have short DMA bug. */
2934 if (BGE_IS_5755_PLUS(sc) || sc->bge_asicrev == BGE_ASICREV_BCM5906)
2935 sc->bge_flags |= BGE_FLAG_SHORT_DMA_BUG;
2936
2937 /*
2938 * BCM5719 cannot handle DMA requests for DMA segments that
2939 * have larger than 4KB in size. However the maximum DMA
2940 * segment size created in DMA tag is 4KB for TSO, so we
2941 * wouldn't encounter the issue here.
2942 */
2943 if (sc->bge_asicrev == BGE_ASICREV_BCM5719)
2944 sc->bge_flags |= BGE_FLAG_4K_RDMA_BUG;
2945
2946 misccfg = CSR_READ_4(sc, BGE_MISC_CFG) & BGE_MISCCFG_BOARD_ID;
2947 if (sc->bge_asicrev == BGE_ASICREV_BCM5705) {
2948 if (misccfg == BGE_MISCCFG_BOARD_ID_5788 ||
2949 misccfg == BGE_MISCCFG_BOARD_ID_5788M)
2950 sc->bge_flags |= BGE_FLAG_5788;
2951 }
2952
2953 capmask = BMSR_DEFCAPMASK;
2954 if ((sc->bge_asicrev == BGE_ASICREV_BCM5703 &&
2955 (misccfg == 0x4000 || misccfg == 0x8000)) ||
2956 (sc->bge_asicrev == BGE_ASICREV_BCM5705 &&
2957 pci_get_vendor(dev) == BCOM_VENDORID &&
2958 (pci_get_device(dev) == BCOM_DEVICEID_BCM5901 ||
2959 pci_get_device(dev) == BCOM_DEVICEID_BCM5901A2 ||
2960 pci_get_device(dev) == BCOM_DEVICEID_BCM5705F)) ||
2961 (pci_get_vendor(dev) == BCOM_VENDORID &&
2962 (pci_get_device(dev) == BCOM_DEVICEID_BCM5751F ||
2963 pci_get_device(dev) == BCOM_DEVICEID_BCM5753F ||
2964 pci_get_device(dev) == BCOM_DEVICEID_BCM5787F)) ||
2965 pci_get_device(dev) == BCOM_DEVICEID_BCM57790 ||
2966 sc->bge_asicrev == BGE_ASICREV_BCM5906) {
2967 /* These chips are 10/100 only. */
2968 capmask &= ~BMSR_EXTSTAT;
2969 }
2970
2971 /*
2972 * Some controllers seem to require a special firmware to use
2973 * TSO. But the firmware is not available to FreeBSD and Linux
2974 * claims that the TSO performed by the firmware is slower than
2975 * hardware based TSO. Moreover the firmware based TSO has one
2976 * known bug which can't handle TSO if ethernet header + IP/TCP
2977 * header is greater than 80 bytes. The workaround for the TSO
2978 * bug exist but it seems it's too expensive than not using
2979 * TSO at all. Some hardwares also have the TSO bug so limit
2980 * the TSO to the controllers that are not affected TSO issues
2981 * (e.g. 5755 or higher).
2982 */
2983 if (BGE_IS_5717_PLUS(sc)) {
2984 /* BCM5717 requires different TSO configuration. */
2985 sc->bge_flags |= BGE_FLAG_TSO3;
2986 if (sc->bge_asicrev == BGE_ASICREV_BCM5719 &&
2987 sc->bge_chipid == BGE_CHIPID_BCM5719_A0) {
2988 /* TSO on BCM5719 A0 does not work. */
2989 sc->bge_flags &= ~BGE_FLAG_TSO3;
2990 }
2991 } else if (BGE_IS_5755_PLUS(sc)) {
2992 /*
2993 * BCM5754 and BCM5787 shares the same ASIC id so
2994 * explicit device id check is required.
2995 * Due to unknown reason TSO does not work on BCM5755M.
2996 */
2997 if (pci_get_device(dev) != BCOM_DEVICEID_BCM5754 &&
2998 pci_get_device(dev) != BCOM_DEVICEID_BCM5754M &&
2999 pci_get_device(dev) != BCOM_DEVICEID_BCM5755M)
3000 sc->bge_flags |= BGE_FLAG_TSO;
3001 }
3002
3003 /*
3004 * Check if this is a PCI-X or PCI Express device.
3005 */
3006 if (pci_find_cap(dev, PCIY_EXPRESS, &reg) == 0) {
3007 /*
3008 * Found a PCI Express capabilities register, this
3009 * must be a PCI Express device.
3010 */
3011 sc->bge_flags |= BGE_FLAG_PCIE;
3012 sc->bge_expcap = reg;
3013 if (sc->bge_asicrev == BGE_ASICREV_BCM5719)
3014 pci_set_max_read_req(dev, 2048);
3015 else if (pci_get_max_read_req(dev) != 4096)
3016 pci_set_max_read_req(dev, 4096);
3017 } else {
3018 /*
3019 * Check if the device is in PCI-X Mode.
3020 * (This bit is not valid on PCI Express controllers.)
3021 */
3022 if (pci_find_cap(dev, PCIY_PCIX, &reg) == 0)
3023 sc->bge_pcixcap = reg;
3024 if ((pci_read_config(dev, BGE_PCI_PCISTATE, 4) &
3025 BGE_PCISTATE_PCI_BUSMODE) == 0)
3026 sc->bge_flags |= BGE_FLAG_PCIX;
3027 }
3028
3029 /*
3030 * The 40bit DMA bug applies to the 5714/5715 controllers and is
3031 * not actually a MAC controller bug but an issue with the embedded
3032 * PCIe to PCI-X bridge in the device. Use 40bit DMA workaround.
3033 */
3034 if (BGE_IS_5714_FAMILY(sc) && (sc->bge_flags & BGE_FLAG_PCIX))
3035 sc->bge_flags |= BGE_FLAG_40BIT_BUG;
3036 /*
3037 * Allocate the interrupt, using MSI if possible. These devices
3038 * support 8 MSI messages, but only the first one is used in
3039 * normal operation.
3040 */
3041 rid = 0;
3042 if (pci_find_cap(sc->bge_dev, PCIY_MSI, &reg) == 0) {
3043 sc->bge_msicap = reg;
3044 if (bge_can_use_msi(sc)) {
3045 msicount = pci_msi_count(dev);
3046 if (msicount > 1)
3047 msicount = 1;
3048 } else
3049 msicount = 0;
3050 if (msicount == 1 && pci_alloc_msi(dev, &msicount) == 0) {
3051 rid = 1;
3052 sc->bge_flags |= BGE_FLAG_MSI;
3053 }
3054 }
3055
3056 /*
3057 * All controllers except BCM5700 supports tagged status but
3058 * we use tagged status only for MSI case on BCM5717. Otherwise
3059 * MSI on BCM5717 does not work.
3060 */
3061#ifndef DEVICE_POLLING
3062 if (sc->bge_flags & BGE_FLAG_MSI && BGE_IS_5717_PLUS(sc))
3063 sc->bge_flags |= BGE_FLAG_TAGGED_STATUS;
3064#endif
3065
3066 sc->bge_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
3067 RF_SHAREABLE | RF_ACTIVE);
3068
3069 if (sc->bge_irq == NULL) {
3070 device_printf(sc->bge_dev, "couldn't map interrupt\n");
3071 error = ENXIO;
3072 goto fail;
3073 }
3074
3075 device_printf(dev,
3076 "CHIP ID 0x%08x; ASIC REV 0x%02x; CHIP REV 0x%02x; %s\n",
3077 sc->bge_chipid, sc->bge_asicrev, sc->bge_chiprev,
3078 (sc->bge_flags & BGE_FLAG_PCIX) ? "PCI-X" :
3079 ((sc->bge_flags & BGE_FLAG_PCIE) ? "PCI-E" : "PCI"));
3080
3081 BGE_LOCK_INIT(sc, device_get_nameunit(dev));
3082
3083 /* Try to reset the chip. */
3084 if (bge_reset(sc)) {
3085 device_printf(sc->bge_dev, "chip reset failed\n");
3086 error = ENXIO;
3087 goto fail;
3088 }
3089
3090 sc->bge_asf_mode = 0;
3091 if (bge_allow_asf && (bge_readmem_ind(sc, BGE_SRAM_DATA_SIG) ==
3092 BGE_SRAM_DATA_SIG_MAGIC)) {
3093 if (bge_readmem_ind(sc, BGE_SRAM_DATA_CFG)
3094 & BGE_HWCFG_ASF) {
3095 sc->bge_asf_mode |= ASF_ENABLE;
3096 sc->bge_asf_mode |= ASF_STACKUP;
3097 if (BGE_IS_575X_PLUS(sc))
3098 sc->bge_asf_mode |= ASF_NEW_HANDSHAKE;
3099 }
3100 }
3101
3102 /* Try to reset the chip again the nice way. */
3103 bge_stop_fw(sc);
3104 bge_sig_pre_reset(sc, BGE_RESET_STOP);
3105 if (bge_reset(sc)) {
3106 device_printf(sc->bge_dev, "chip reset failed\n");
3107 error = ENXIO;
3108 goto fail;
3109 }
3110
3111 bge_sig_legacy(sc, BGE_RESET_STOP);
3112 bge_sig_post_reset(sc, BGE_RESET_STOP);
3113
3114 if (bge_chipinit(sc)) {
3115 device_printf(sc->bge_dev, "chip initialization failed\n");
3116 error = ENXIO;
3117 goto fail;
3118 }
3119
3120 error = bge_get_eaddr(sc, eaddr);
3121 if (error) {
3122 device_printf(sc->bge_dev,
3123 "failed to read station address\n");
3124 error = ENXIO;
3125 goto fail;
3126 }
3127
3128 /* 5705 limits RX return ring to 512 entries. */
3129 if (BGE_IS_5717_PLUS(sc))
3130 sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT;
3131 else if (BGE_IS_5705_PLUS(sc))
3132 sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT_5705;
3133 else
3134 sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT;
3135
3136 if (bge_dma_alloc(sc)) {
3137 device_printf(sc->bge_dev,
3138 "failed to allocate DMA resources\n");
3139 error = ENXIO;
3140 goto fail;
3141 }
3142
3143 bge_add_sysctls(sc);
3144
3145 /* Set default tuneable values. */
3146 sc->bge_stat_ticks = BGE_TICKS_PER_SEC;
3147 sc->bge_rx_coal_ticks = 150;
3148 sc->bge_tx_coal_ticks = 150;
3149 sc->bge_rx_max_coal_bds = 10;
3150 sc->bge_tx_max_coal_bds = 10;
3151
3152 /* Initialize checksum features to use. */
3153 sc->bge_csum_features = BGE_CSUM_FEATURES;
3154 if (sc->bge_forced_udpcsum != 0)
3155 sc->bge_csum_features |= CSUM_UDP;
3156
3157 /* Set up ifnet structure */
3158 ifp = sc->bge_ifp = if_alloc(IFT_ETHER);
3159 if (ifp == NULL) {
3160 device_printf(sc->bge_dev, "failed to if_alloc()\n");
3161 error = ENXIO;
3162 goto fail;
3163 }
3164 ifp->if_softc = sc;
3165 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
3166 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
3167 ifp->if_ioctl = bge_ioctl;
3168 ifp->if_start = bge_start;
3169 ifp->if_init = bge_init;
3170 ifp->if_snd.ifq_drv_maxlen = BGE_TX_RING_CNT - 1;
3171 IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen);
3172 IFQ_SET_READY(&ifp->if_snd);
3173 ifp->if_hwassist = sc->bge_csum_features;
3174 ifp->if_capabilities = IFCAP_HWCSUM | IFCAP_VLAN_HWTAGGING |
3175 IFCAP_VLAN_MTU;
3176 if ((sc->bge_flags & (BGE_FLAG_TSO | BGE_FLAG_TSO3)) != 0) {
3177 ifp->if_hwassist |= CSUM_TSO;
3178 ifp->if_capabilities |= IFCAP_TSO4 | IFCAP_VLAN_HWTSO;
3179 }
3180#ifdef IFCAP_VLAN_HWCSUM
3181 ifp->if_capabilities |= IFCAP_VLAN_HWCSUM;
3182#endif
3183 ifp->if_capenable = ifp->if_capabilities;
3184#ifdef DEVICE_POLLING
3185 ifp->if_capabilities |= IFCAP_POLLING;
3186#endif
3187
3188 /*
3189 * 5700 B0 chips do not support checksumming correctly due
3190 * to hardware bugs.
3191 */
3192 if (sc->bge_chipid == BGE_CHIPID_BCM5700_B0) {
3193 ifp->if_capabilities &= ~IFCAP_HWCSUM;
3194 ifp->if_capenable &= ~IFCAP_HWCSUM;
3195 ifp->if_hwassist = 0;
3196 }
3197
3198 /*
3199 * Figure out what sort of media we have by checking the
3200 * hardware config word in the first 32k of NIC internal memory,
3201 * or fall back to examining the EEPROM if necessary.
3202 * Note: on some BCM5700 cards, this value appears to be unset.
3203 * If that's the case, we have to rely on identifying the NIC
3204 * by its PCI subsystem ID, as we do below for the SysKonnect
3205 * SK-9D41.
3206 */
3207 if (bge_readmem_ind(sc, BGE_SRAM_DATA_SIG) == BGE_SRAM_DATA_SIG_MAGIC)
3208 hwcfg = bge_readmem_ind(sc, BGE_SRAM_DATA_CFG);
3209 else if ((sc->bge_flags & BGE_FLAG_EADDR) &&
3210 (sc->bge_asicrev != BGE_ASICREV_BCM5906)) {
3211 if (bge_read_eeprom(sc, (caddr_t)&hwcfg, BGE_EE_HWCFG_OFFSET,
3212 sizeof(hwcfg))) {
3213 device_printf(sc->bge_dev, "failed to read EEPROM\n");
3214 error = ENXIO;
3215 goto fail;
3216 }
3217 hwcfg = ntohl(hwcfg);
3218 }
3219
3220 /* The SysKonnect SK-9D41 is a 1000baseSX card. */
3221 if ((pci_read_config(dev, BGE_PCI_SUBSYS, 4) >> 16) ==
3222 SK_SUBSYSID_9D41 || (hwcfg & BGE_HWCFG_MEDIA) == BGE_MEDIA_FIBER) {
3223 if (BGE_IS_5714_FAMILY(sc))
3224 sc->bge_flags |= BGE_FLAG_MII_SERDES;
3225 else
3226 sc->bge_flags |= BGE_FLAG_TBI;
3227 }
3228
3229 if (sc->bge_flags & BGE_FLAG_TBI) {
3230 ifmedia_init(&sc->bge_ifmedia, IFM_IMASK, bge_ifmedia_upd,
3231 bge_ifmedia_sts);
3232 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER | IFM_1000_SX, 0, NULL);
3233 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER | IFM_1000_SX | IFM_FDX,
3234 0, NULL);
3235 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER | IFM_AUTO, 0, NULL);
3236 ifmedia_set(&sc->bge_ifmedia, IFM_ETHER | IFM_AUTO);
3237 sc->bge_ifmedia.ifm_media = sc->bge_ifmedia.ifm_cur->ifm_media;
3238 } else {
3239 /*
3240 * Do transceiver setup and tell the firmware the
3241 * driver is down so we can try to get access the
3242 * probe if ASF is running. Retry a couple of times
3243 * if we get a conflict with the ASF firmware accessing
3244 * the PHY.
3245 */
3246 trys = 0;
3247 BGE_CLRBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
3248again:
3249 bge_asf_driver_up(sc);
3250
3251 error = mii_attach(dev, &sc->bge_miibus, ifp, bge_ifmedia_upd,
3252 bge_ifmedia_sts, capmask, phy_addr, MII_OFFSET_ANY,
3253 MIIF_DOPAUSE);
3254 if (error != 0) {
3255 if (trys++ < 4) {
3256 device_printf(sc->bge_dev, "Try again\n");
3257 bge_miibus_writereg(sc->bge_dev, 1, MII_BMCR,
3258 BMCR_RESET);
3259 goto again;
3260 }
3261 device_printf(sc->bge_dev, "attaching PHYs failed\n");
3262 goto fail;
3263 }
3264
3265 /*
3266 * Now tell the firmware we are going up after probing the PHY
3267 */
3268 if (sc->bge_asf_mode & ASF_STACKUP)
3269 BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
3270 }
3271
3272 /*
3273 * When using the BCM5701 in PCI-X mode, data corruption has
3274 * been observed in the first few bytes of some received packets.
3275 * Aligning the packet buffer in memory eliminates the corruption.
3276 * Unfortunately, this misaligns the packet payloads. On platforms
3277 * which do not support unaligned accesses, we will realign the
3278 * payloads by copying the received packets.
3279 */
3280 if (sc->bge_asicrev == BGE_ASICREV_BCM5701 &&
3281 sc->bge_flags & BGE_FLAG_PCIX)
3282 sc->bge_flags |= BGE_FLAG_RX_ALIGNBUG;
3283
3284 /*
3285 * Call MI attach routine.
3286 */
3287 ether_ifattach(ifp, eaddr);
3288 callout_init_mtx(&sc->bge_stat_ch, &sc->bge_mtx, 0);
3289
3290 /* Tell upper layer we support long frames. */
3291 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
3292
3293 /*
3294 * Hookup IRQ last.
3295 */
3296 if (BGE_IS_5755_PLUS(sc) && sc->bge_flags & BGE_FLAG_MSI) {
3297 /* Take advantage of single-shot MSI. */
3298 CSR_WRITE_4(sc, BGE_MSI_MODE, CSR_READ_4(sc, BGE_MSI_MODE) &
3299 ~BGE_MSIMODE_ONE_SHOT_DISABLE);
3300 sc->bge_tq = taskqueue_create_fast("bge_taskq", M_WAITOK,
3301 taskqueue_thread_enqueue, &sc->bge_tq);
3302 if (sc->bge_tq == NULL) {
3303 device_printf(dev, "could not create taskqueue.\n");
3304 ether_ifdetach(ifp);
3305 error = ENXIO;
3306 goto fail;
3307 }
3308 taskqueue_start_threads(&sc->bge_tq, 1, PI_NET, "%s taskq",
3309 device_get_nameunit(sc->bge_dev));
3310 error = bus_setup_intr(dev, sc->bge_irq,
3311 INTR_TYPE_NET | INTR_MPSAFE, bge_msi_intr, NULL, sc,
3312 &sc->bge_intrhand);
3313 if (error)
3314 ether_ifdetach(ifp);
3315 } else
3316 error = bus_setup_intr(dev, sc->bge_irq,
3317 INTR_TYPE_NET | INTR_MPSAFE, NULL, bge_intr, sc,
3318 &sc->bge_intrhand);
3319
3320 if (error) {
3321 bge_detach(dev);
3322 device_printf(sc->bge_dev, "couldn't set up irq\n");
3323 }
3324
3325 return (0);
3326
3327fail:
3328 bge_release_resources(sc);
3329
3330 return (error);
3331}
3332
3333static int
3334bge_detach(device_t dev)
3335{
3336 struct bge_softc *sc;
3337 struct ifnet *ifp;
3338
3339 sc = device_get_softc(dev);
3340 ifp = sc->bge_ifp;
3341
3342#ifdef DEVICE_POLLING
3343 if (ifp->if_capenable & IFCAP_POLLING)
3344 ether_poll_deregister(ifp);
3345#endif
3346
3347 BGE_LOCK(sc);
3348 bge_stop(sc);
3349 bge_reset(sc);
3350 BGE_UNLOCK(sc);
3351
3352 callout_drain(&sc->bge_stat_ch);
3353
3354 if (sc->bge_tq)
3355 taskqueue_drain(sc->bge_tq, &sc->bge_intr_task);
3356 ether_ifdetach(ifp);
3357
3358 if (sc->bge_flags & BGE_FLAG_TBI) {
3359 ifmedia_removeall(&sc->bge_ifmedia);
3360 } else {
3361 bus_generic_detach(dev);
3362 device_delete_child(dev, sc->bge_miibus);
3363 }
3364
3365 bge_release_resources(sc);
3366
3367 return (0);
3368}
3369
3370static void
3371bge_release_resources(struct bge_softc *sc)
3372{
3373 device_t dev;
3374
3375 dev = sc->bge_dev;
3376
3377 if (sc->bge_tq != NULL)
3378 taskqueue_free(sc->bge_tq);
3379
3380 if (sc->bge_intrhand != NULL)
3381 bus_teardown_intr(dev, sc->bge_irq, sc->bge_intrhand);
3382
3383 if (sc->bge_irq != NULL)
3384 bus_release_resource(dev, SYS_RES_IRQ,
3385 sc->bge_flags & BGE_FLAG_MSI ? 1 : 0, sc->bge_irq);
3386
3387 if (sc->bge_flags & BGE_FLAG_MSI)
3388 pci_release_msi(dev);
3389
3390 if (sc->bge_res != NULL)
3391 bus_release_resource(dev, SYS_RES_MEMORY,
3392 PCIR_BAR(0), sc->bge_res);
3393
3394 if (sc->bge_ifp != NULL)
3395 if_free(sc->bge_ifp);
3396
3397 bge_dma_free(sc);
3398
3399 if (mtx_initialized(&sc->bge_mtx)) /* XXX */
3400 BGE_LOCK_DESTROY(sc);
3401}
3402
3403static int
3404bge_reset(struct bge_softc *sc)
3405{
3406 device_t dev;
3407 uint32_t cachesize, command, pcistate, reset, val;
3408 void (*write_op)(struct bge_softc *, int, int);
3409 uint16_t devctl;
3410 int i;
3411
3412 dev = sc->bge_dev;
3413
3414 if (BGE_IS_575X_PLUS(sc) && !BGE_IS_5714_FAMILY(sc) &&
3415 (sc->bge_asicrev != BGE_ASICREV_BCM5906)) {
3416 if (sc->bge_flags & BGE_FLAG_PCIE)
3417 write_op = bge_writemem_direct;
3418 else
3419 write_op = bge_writemem_ind;
3420 } else
3421 write_op = bge_writereg_ind;
3422
3423 /* Save some important PCI state. */
3424 cachesize = pci_read_config(dev, BGE_PCI_CACHESZ, 4);
3425 command = pci_read_config(dev, BGE_PCI_CMD, 4);
3426 pcistate = pci_read_config(dev, BGE_PCI_PCISTATE, 4);
3427
3428 pci_write_config(dev, BGE_PCI_MISC_CTL,
3429 BGE_PCIMISCCTL_INDIRECT_ACCESS | BGE_PCIMISCCTL_MASK_PCI_INTR |
3430 BGE_HIF_SWAP_OPTIONS | BGE_PCIMISCCTL_PCISTATE_RW, 4);
3431
3432 /* Disable fastboot on controllers that support it. */
3433 if (sc->bge_asicrev == BGE_ASICREV_BCM5752 ||
3434 BGE_IS_5755_PLUS(sc)) {
3435 if (bootverbose)
3436 device_printf(dev, "Disabling fastboot\n");
3437 CSR_WRITE_4(sc, BGE_FASTBOOT_PC, 0x0);
3438 }
3439
3440 /*
3441 * Write the magic number to SRAM at offset 0xB50.
3442 * When firmware finishes its initialization it will
3443 * write ~BGE_SRAM_FW_MB_MAGIC to the same location.
3444 */
3445 bge_writemem_ind(sc, BGE_SRAM_FW_MB, BGE_SRAM_FW_MB_MAGIC);
3446
3447 reset = BGE_MISCCFG_RESET_CORE_CLOCKS | BGE_32BITTIME_66MHZ;
3448
3449 /* XXX: Broadcom Linux driver. */
3450 if (sc->bge_flags & BGE_FLAG_PCIE) {
3451 if (CSR_READ_4(sc, 0x7E2C) == 0x60) /* PCIE 1.0 */
3452 CSR_WRITE_4(sc, 0x7E2C, 0x20);
3453 if (sc->bge_chipid != BGE_CHIPID_BCM5750_A0) {
3454 /* Prevent PCIE link training during global reset */
3455 CSR_WRITE_4(sc, BGE_MISC_CFG, 1 << 29);
3456 reset |= 1 << 29;
3457 }
3458 }
3459
3460 /*
3461 * Set GPHY Power Down Override to leave GPHY
3462 * powered up in D0 uninitialized.
3463 */
3464 if (BGE_IS_5705_PLUS(sc) &&
3465 (sc->bge_flags & BGE_FLAG_CPMU_PRESENT) == 0)
3466 reset |= BGE_MISCCFG_GPHY_PD_OVERRIDE;
3467
3468 /* Issue global reset */
3469 write_op(sc, BGE_MISC_CFG, reset);
3470
3471 if (sc->bge_asicrev == BGE_ASICREV_BCM5906) {
3472 val = CSR_READ_4(sc, BGE_VCPU_STATUS);
3473 CSR_WRITE_4(sc, BGE_VCPU_STATUS,
3474 val | BGE_VCPU_STATUS_DRV_RESET);
3475 val = CSR_READ_4(sc, BGE_VCPU_EXT_CTRL);
3476 CSR_WRITE_4(sc, BGE_VCPU_EXT_CTRL,
3477 val & ~BGE_VCPU_EXT_CTRL_HALT_CPU);
3478 }
3479
3480 DELAY(1000);
3481
3482 /* XXX: Broadcom Linux driver. */
3483 if (sc->bge_flags & BGE_FLAG_PCIE) {
3484 if (sc->bge_chipid == BGE_CHIPID_BCM5750_A0) {
3485 DELAY(500000); /* wait for link training to complete */
3486 val = pci_read_config(dev, 0xC4, 4);
3487 pci_write_config(dev, 0xC4, val | (1 << 15), 4);
3488 }
3489 devctl = pci_read_config(dev,
3490 sc->bge_expcap + PCIR_EXPRESS_DEVICE_CTL, 2);
3491 /* Clear enable no snoop and disable relaxed ordering. */
3492 devctl &= ~(PCIM_EXP_CTL_RELAXED_ORD_ENABLE |
3493 PCIM_EXP_CTL_NOSNOOP_ENABLE);
3494 /* Set PCIE max payload size to 128. */
3495 devctl &= ~PCIM_EXP_CTL_MAX_PAYLOAD;
3496 pci_write_config(dev, sc->bge_expcap + PCIR_EXPRESS_DEVICE_CTL,
3497 devctl, 2);
3498 /* Clear error status. */
3499 pci_write_config(dev, sc->bge_expcap + PCIR_EXPRESS_DEVICE_STA,
3500 PCIM_EXP_STA_CORRECTABLE_ERROR |
3501 PCIM_EXP_STA_NON_FATAL_ERROR | PCIM_EXP_STA_FATAL_ERROR |
3502 PCIM_EXP_STA_UNSUPPORTED_REQ, 2);
3503 }
3504
3505 /* Reset some of the PCI state that got zapped by reset. */
3506 pci_write_config(dev, BGE_PCI_MISC_CTL,
3507 BGE_PCIMISCCTL_INDIRECT_ACCESS | BGE_PCIMISCCTL_MASK_PCI_INTR |
3508 BGE_HIF_SWAP_OPTIONS | BGE_PCIMISCCTL_PCISTATE_RW, 4);
3509 pci_write_config(dev, BGE_PCI_CACHESZ, cachesize, 4);
3510 pci_write_config(dev, BGE_PCI_CMD, command, 4);
3511 write_op(sc, BGE_MISC_CFG, BGE_32BITTIME_66MHZ);
3512 /*
3513 * Disable PCI-X relaxed ordering to ensure status block update
3514 * comes first then packet buffer DMA. Otherwise driver may
3515 * read stale status block.
3516 */
3517 if (sc->bge_flags & BGE_FLAG_PCIX) {
3518 devctl = pci_read_config(dev,
3519 sc->bge_pcixcap + PCIXR_COMMAND, 2);
3520 devctl &= ~PCIXM_COMMAND_ERO;
3521 if (sc->bge_asicrev == BGE_ASICREV_BCM5703) {
3522 devctl &= ~PCIXM_COMMAND_MAX_READ;
3523 devctl |= PCIXM_COMMAND_MAX_READ_2048;
3524 } else if (sc->bge_asicrev == BGE_ASICREV_BCM5704) {
3525 devctl &= ~(PCIXM_COMMAND_MAX_SPLITS |
3526 PCIXM_COMMAND_MAX_READ);
3527 devctl |= PCIXM_COMMAND_MAX_READ_2048;
3528 }
3529 pci_write_config(dev, sc->bge_pcixcap + PCIXR_COMMAND,
3530 devctl, 2);
3531 }
3532 /* Re-enable MSI, if necessary, and enable the memory arbiter. */
3533 if (BGE_IS_5714_FAMILY(sc)) {
3534 /* This chip disables MSI on reset. */
3535 if (sc->bge_flags & BGE_FLAG_MSI) {
3536 val = pci_read_config(dev,
3537 sc->bge_msicap + PCIR_MSI_CTRL, 2);
3538 pci_write_config(dev,
3539 sc->bge_msicap + PCIR_MSI_CTRL,
3540 val | PCIM_MSICTRL_MSI_ENABLE, 2);
3541 val = CSR_READ_4(sc, BGE_MSI_MODE);
3542 CSR_WRITE_4(sc, BGE_MSI_MODE,
3543 val | BGE_MSIMODE_ENABLE);
3544 }
3545 val = CSR_READ_4(sc, BGE_MARB_MODE);
3546 CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE | val);
3547 } else
3548 CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE);
3549
3550 if (sc->bge_asicrev == BGE_ASICREV_BCM5906) {
3551 for (i = 0; i < BGE_TIMEOUT; i++) {
3552 val = CSR_READ_4(sc, BGE_VCPU_STATUS);
3553 if (val & BGE_VCPU_STATUS_INIT_DONE)
3554 break;
3555 DELAY(100);
3556 }
3557 if (i == BGE_TIMEOUT) {
3558 device_printf(dev, "reset timed out\n");
3559 return (1);
3560 }
3561 } else {
3562 /*
3563 * Poll until we see the 1's complement of the magic number.
3564 * This indicates that the firmware initialization is complete.
3565 * We expect this to fail if no chip containing the Ethernet
3566 * address is fitted though.
3567 */
3568 for (i = 0; i < BGE_TIMEOUT; i++) {
3569 DELAY(10);
3570 val = bge_readmem_ind(sc, BGE_SRAM_FW_MB);
3571 if (val == ~BGE_SRAM_FW_MB_MAGIC)
3572 break;
3573 }
3574
3575 if ((sc->bge_flags & BGE_FLAG_EADDR) && i == BGE_TIMEOUT)
3576 device_printf(dev,
3577 "firmware handshake timed out, found 0x%08x\n",
3578 val);
3579 /* BCM57765 A0 needs additional time before accessing. */
3580 if (sc->bge_chipid == BGE_CHIPID_BCM57765_A0)
3581 DELAY(10 * 1000); /* XXX */
3582 }
3583
3584 /*
3585 * XXX Wait for the value of the PCISTATE register to
3586 * return to its original pre-reset state. This is a
3587 * fairly good indicator of reset completion. If we don't
3588 * wait for the reset to fully complete, trying to read
3589 * from the device's non-PCI registers may yield garbage
3590 * results.
3591 */
3592 for (i = 0; i < BGE_TIMEOUT; i++) {
3593 if (pci_read_config(dev, BGE_PCI_PCISTATE, 4) == pcistate)
3594 break;
3595 DELAY(10);
3596 }
3597
3598 /* Fix up byte swapping. */
3599 CSR_WRITE_4(sc, BGE_MODE_CTL, BGE_DMA_SWAP_OPTIONS |
3600 BGE_MODECTL_BYTESWAP_DATA);
3601
3602 /* Tell the ASF firmware we are up */
3603 if (sc->bge_asf_mode & ASF_STACKUP)
3604 BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
3605
3606 CSR_WRITE_4(sc, BGE_MAC_MODE, 0);
3607
3608 /*
3609 * The 5704 in TBI mode apparently needs some special
3610 * adjustment to insure the SERDES drive level is set
3611 * to 1.2V.
3612 */
3613 if (sc->bge_asicrev == BGE_ASICREV_BCM5704 &&
3614 sc->bge_flags & BGE_FLAG_TBI) {
3615 val = CSR_READ_4(sc, BGE_SERDES_CFG);
3616 val = (val & ~0xFFF) | 0x880;
3617 CSR_WRITE_4(sc, BGE_SERDES_CFG, val);
3618 }
3619
3620 /* XXX: Broadcom Linux driver. */
3621 if (sc->bge_flags & BGE_FLAG_PCIE &&
3622 !BGE_IS_5717_PLUS(sc) &&
3623 sc->bge_chipid != BGE_CHIPID_BCM5750_A0 &&
3624 sc->bge_asicrev != BGE_ASICREV_BCM5785) {
3625 /* Enable Data FIFO protection. */
3626 val = CSR_READ_4(sc, 0x7C00);
3627 CSR_WRITE_4(sc, 0x7C00, val | (1 << 25));
3628 }
3629 DELAY(10000);
3630
3631 return (0);
3632}
3633
3634static __inline void
3635bge_rxreuse_std(struct bge_softc *sc, int i)
3636{
3637 struct bge_rx_bd *r;
3638
3639 r = &sc->bge_ldata.bge_rx_std_ring[sc->bge_std];
3640 r->bge_flags = BGE_RXBDFLAG_END;
3641 r->bge_len = sc->bge_cdata.bge_rx_std_seglen[i];
3642 r->bge_idx = i;
3643 BGE_INC(sc->bge_std, BGE_STD_RX_RING_CNT);
3644}
3645
3646static __inline void
3647bge_rxreuse_jumbo(struct bge_softc *sc, int i)
3648{
3649 struct bge_extrx_bd *r;
3650
3651 r = &sc->bge_ldata.bge_rx_jumbo_ring[sc->bge_jumbo];
3652 r->bge_flags = BGE_RXBDFLAG_JUMBO_RING | BGE_RXBDFLAG_END;
3653 r->bge_len0 = sc->bge_cdata.bge_rx_jumbo_seglen[i][0];
3654 r->bge_len1 = sc->bge_cdata.bge_rx_jumbo_seglen[i][1];
3655 r->bge_len2 = sc->bge_cdata.bge_rx_jumbo_seglen[i][2];
3656 r->bge_len3 = sc->bge_cdata.bge_rx_jumbo_seglen[i][3];
3657 r->bge_idx = i;
3658 BGE_INC(sc->bge_jumbo, BGE_JUMBO_RX_RING_CNT);
3659}
3660
3661/*
3662 * Frame reception handling. This is called if there's a frame
3663 * on the receive return list.
3664 *
3665 * Note: we have to be able to handle two possibilities here:
3666 * 1) the frame is from the jumbo receive ring
3667 * 2) the frame is from the standard receive ring
3668 */
3669
3670static int
3671bge_rxeof(struct bge_softc *sc, uint16_t rx_prod, int holdlck)
3672{
3673 struct ifnet *ifp;
3674 int rx_npkts = 0, stdcnt = 0, jumbocnt = 0;
3675 uint16_t rx_cons;
3676
3677 rx_cons = sc->bge_rx_saved_considx;
3678
3679 /* Nothing to do. */
3680 if (rx_cons == rx_prod)
3681 return (rx_npkts);
3682
3683 ifp = sc->bge_ifp;
3684
3685 bus_dmamap_sync(sc->bge_cdata.bge_rx_return_ring_tag,
3686 sc->bge_cdata.bge_rx_return_ring_map, BUS_DMASYNC_POSTREAD);
3687 bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag,
3688 sc->bge_cdata.bge_rx_std_ring_map, BUS_DMASYNC_POSTWRITE);
3689 if (BGE_IS_JUMBO_CAPABLE(sc) &&
3690 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN + ETHER_VLAN_ENCAP_LEN >
3691 (MCLBYTES - ETHER_ALIGN))
3692 bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag,
3693 sc->bge_cdata.bge_rx_jumbo_ring_map, BUS_DMASYNC_POSTWRITE);
3694
3695 while (rx_cons != rx_prod) {
3696 struct bge_rx_bd *cur_rx;
3697 uint32_t rxidx;
3698 struct mbuf *m = NULL;
3699 uint16_t vlan_tag = 0;
3700 int have_tag = 0;
3701
3702#ifdef DEVICE_POLLING
3703 if (ifp->if_capenable & IFCAP_POLLING) {
3704 if (sc->rxcycles <= 0)
3705 break;
3706 sc->rxcycles--;
3707 }
3708#endif
3709
3710 cur_rx = &sc->bge_ldata.bge_rx_return_ring[rx_cons];
3711
3712 rxidx = cur_rx->bge_idx;
3713 BGE_INC(rx_cons, sc->bge_return_ring_cnt);
3714
3715 if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING &&
3716 cur_rx->bge_flags & BGE_RXBDFLAG_VLAN_TAG) {
3717 have_tag = 1;
3718 vlan_tag = cur_rx->bge_vlan_tag;
3719 }
3720
3721 if (cur_rx->bge_flags & BGE_RXBDFLAG_JUMBO_RING) {
3722 jumbocnt++;
3723 m = sc->bge_cdata.bge_rx_jumbo_chain[rxidx];
3724 if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) {
3725 bge_rxreuse_jumbo(sc, rxidx);
3726 continue;
3727 }
3728 if (bge_newbuf_jumbo(sc, rxidx) != 0) {
3729 bge_rxreuse_jumbo(sc, rxidx);
3730 ifp->if_iqdrops++;
3731 continue;
3732 }
3733 BGE_INC(sc->bge_jumbo, BGE_JUMBO_RX_RING_CNT);
3734 } else {
3735 stdcnt++;
3736 m = sc->bge_cdata.bge_rx_std_chain[rxidx];
3737 if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) {
3738 bge_rxreuse_std(sc, rxidx);
3739 continue;
3740 }
3741 if (bge_newbuf_std(sc, rxidx) != 0) {
3742 bge_rxreuse_std(sc, rxidx);
3743 ifp->if_iqdrops++;
3744 continue;
3745 }
3746 BGE_INC(sc->bge_std, BGE_STD_RX_RING_CNT);
3747 }
3748
3749 ifp->if_ipackets++;
3750#ifndef __NO_STRICT_ALIGNMENT
3751 /*
3752 * For architectures with strict alignment we must make sure
3753 * the payload is aligned.
3754 */
3755 if (sc->bge_flags & BGE_FLAG_RX_ALIGNBUG) {
3756 bcopy(m->m_data, m->m_data + ETHER_ALIGN,
3757 cur_rx->bge_len);
3758 m->m_data += ETHER_ALIGN;
3759 }
3760#endif
3761 m->m_pkthdr.len = m->m_len = cur_rx->bge_len - ETHER_CRC_LEN;
3762 m->m_pkthdr.rcvif = ifp;
3763
3764 if (ifp->if_capenable & IFCAP_RXCSUM)
3765 bge_rxcsum(sc, cur_rx, m);
3766
3767 /*
3768 * If we received a packet with a vlan tag,
3769 * attach that information to the packet.
3770 */
3771 if (have_tag) {
3772 m->m_pkthdr.ether_vtag = vlan_tag;
3773 m->m_flags |= M_VLANTAG;
3774 }
3775
3776 if (holdlck != 0) {
3777 BGE_UNLOCK(sc);
3778 (*ifp->if_input)(ifp, m);
3779 BGE_LOCK(sc);
3780 } else
3781 (*ifp->if_input)(ifp, m);
3782 rx_npkts++;
3783
3784 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
3785 return (rx_npkts);
3786 }
3787
3788 bus_dmamap_sync(sc->bge_cdata.bge_rx_return_ring_tag,
3789 sc->bge_cdata.bge_rx_return_ring_map, BUS_DMASYNC_PREREAD);
3790 if (stdcnt > 0)
3791 bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag,
3792 sc->bge_cdata.bge_rx_std_ring_map, BUS_DMASYNC_PREWRITE);
3793
3794 if (jumbocnt > 0)
3795 bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag,
3796 sc->bge_cdata.bge_rx_jumbo_ring_map, BUS_DMASYNC_PREWRITE);
3797
3798 sc->bge_rx_saved_considx = rx_cons;
3799 bge_writembx(sc, BGE_MBX_RX_CONS0_LO, sc->bge_rx_saved_considx);
3800 if (stdcnt)
3801 bge_writembx(sc, BGE_MBX_RX_STD_PROD_LO, (sc->bge_std +
3802 BGE_STD_RX_RING_CNT - 1) % BGE_STD_RX_RING_CNT);
3803 if (jumbocnt)
3804 bge_writembx(sc, BGE_MBX_RX_JUMBO_PROD_LO, (sc->bge_jumbo +
3805 BGE_JUMBO_RX_RING_CNT - 1) % BGE_JUMBO_RX_RING_CNT);
3806#ifdef notyet
3807 /*
3808 * This register wraps very quickly under heavy packet drops.
3809 * If you need correct statistics, you can enable this check.
3810 */
3811 if (BGE_IS_5705_PLUS(sc))
3812 ifp->if_ierrors += CSR_READ_4(sc, BGE_RXLP_LOCSTAT_IFIN_DROPS);
3813#endif
3814 return (rx_npkts);
3815}
3816
3817static void
3818bge_rxcsum(struct bge_softc *sc, struct bge_rx_bd *cur_rx, struct mbuf *m)
3819{
3820
3821 if (BGE_IS_5717_PLUS(sc)) {
3822 if ((cur_rx->bge_flags & BGE_RXBDFLAG_IPV6) == 0) {
3823 if (cur_rx->bge_flags & BGE_RXBDFLAG_IP_CSUM) {
3824 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
3825 if ((cur_rx->bge_error_flag &
3826 BGE_RXERRFLAG_IP_CSUM_NOK) == 0)
3827 m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
3828 }
3829 if (cur_rx->bge_flags & BGE_RXBDFLAG_TCP_UDP_CSUM) {
3830 m->m_pkthdr.csum_data =
3831 cur_rx->bge_tcp_udp_csum;
3832 m->m_pkthdr.csum_flags |= CSUM_DATA_VALID |
3833 CSUM_PSEUDO_HDR;
3834 }
3835 }
3836 } else {
3837 if (cur_rx->bge_flags & BGE_RXBDFLAG_IP_CSUM) {
3838 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
3839 if ((cur_rx->bge_ip_csum ^ 0xFFFF) == 0)
3840 m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
3841 }
3842 if (cur_rx->bge_flags & BGE_RXBDFLAG_TCP_UDP_CSUM &&
3843 m->m_pkthdr.len >= ETHER_MIN_NOPAD) {
3844 m->m_pkthdr.csum_data =
3845 cur_rx->bge_tcp_udp_csum;
3846 m->m_pkthdr.csum_flags |= CSUM_DATA_VALID |
3847 CSUM_PSEUDO_HDR;
3848 }
3849 }
3850}
3851
3852static void
3853bge_txeof(struct bge_softc *sc, uint16_t tx_cons)
3854{
3855 struct bge_tx_bd *cur_tx;
3856 struct ifnet *ifp;
3857
3858 BGE_LOCK_ASSERT(sc);
3859
3860 /* Nothing to do. */
3861 if (sc->bge_tx_saved_considx == tx_cons)
3862 return;
3863
3864 ifp = sc->bge_ifp;
3865
3866 bus_dmamap_sync(sc->bge_cdata.bge_tx_ring_tag,
3867 sc->bge_cdata.bge_tx_ring_map, BUS_DMASYNC_POSTWRITE);
3868 /*
3869 * Go through our tx ring and free mbufs for those
3870 * frames that have been sent.
3871 */
3872 while (sc->bge_tx_saved_considx != tx_cons) {
3873 uint32_t idx;
3874
3875 idx = sc->bge_tx_saved_considx;
3876 cur_tx = &sc->bge_ldata.bge_tx_ring[idx];
3877 if (cur_tx->bge_flags & BGE_TXBDFLAG_END)
3878 ifp->if_opackets++;
3879 if (sc->bge_cdata.bge_tx_chain[idx] != NULL) {
3880 bus_dmamap_sync(sc->bge_cdata.bge_tx_mtag,
3881 sc->bge_cdata.bge_tx_dmamap[idx],
3882 BUS_DMASYNC_POSTWRITE);
3883 bus_dmamap_unload(sc->bge_cdata.bge_tx_mtag,
3884 sc->bge_cdata.bge_tx_dmamap[idx]);
3885 m_freem(sc->bge_cdata.bge_tx_chain[idx]);
3886 sc->bge_cdata.bge_tx_chain[idx] = NULL;
3887 }
3888 sc->bge_txcnt--;
3889 BGE_INC(sc->bge_tx_saved_considx, BGE_TX_RING_CNT);
3890 }
3891
3892 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
3893 if (sc->bge_txcnt == 0)
3894 sc->bge_timer = 0;
3895}
3896
3897#ifdef DEVICE_POLLING
3898static int
3899bge_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
3900{
3901 struct bge_softc *sc = ifp->if_softc;
3902 uint16_t rx_prod, tx_cons;
3903 uint32_t statusword;
3904 int rx_npkts = 0;
3905
3906 BGE_LOCK(sc);
3907 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
3908 BGE_UNLOCK(sc);
3909 return (rx_npkts);
3910 }
3911
3912 bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
3913 sc->bge_cdata.bge_status_map,
3914 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
3915 rx_prod = sc->bge_ldata.bge_status_block->bge_idx[0].bge_rx_prod_idx;
3916 tx_cons = sc->bge_ldata.bge_status_block->bge_idx[0].bge_tx_cons_idx;
3917
3918 statusword = sc->bge_ldata.bge_status_block->bge_status;
3919 sc->bge_ldata.bge_status_block->bge_status = 0;
3920
3921 bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
3922 sc->bge_cdata.bge_status_map,
3923 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3924
3925 /* Note link event. It will be processed by POLL_AND_CHECK_STATUS. */
3926 if (statusword & BGE_STATFLAG_LINKSTATE_CHANGED)
3927 sc->bge_link_evt++;
3928
3929 if (cmd == POLL_AND_CHECK_STATUS)
3930 if ((sc->bge_asicrev == BGE_ASICREV_BCM5700 &&
3931 sc->bge_chipid != BGE_CHIPID_BCM5700_B2) ||
3932 sc->bge_link_evt || (sc->bge_flags & BGE_FLAG_TBI))
3933 bge_link_upd(sc);
3934
3935 sc->rxcycles = count;
3936 rx_npkts = bge_rxeof(sc, rx_prod, 1);
3937 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
3938 BGE_UNLOCK(sc);
3939 return (rx_npkts);
3940 }
3941 bge_txeof(sc, tx_cons);
3942 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
3943 bge_start_locked(ifp);
3944
3945 BGE_UNLOCK(sc);
3946 return (rx_npkts);
3947}
3948#endif /* DEVICE_POLLING */
3949
3950static int
3951bge_msi_intr(void *arg)
3952{
3953 struct bge_softc *sc;
3954
3955 sc = (struct bge_softc *)arg;
3956 /*
3957 * This interrupt is not shared and controller already
3958 * disabled further interrupt.
3959 */
3960 taskqueue_enqueue(sc->bge_tq, &sc->bge_intr_task);
3961 return (FILTER_HANDLED);
3962}
3963
3964static void
3965bge_intr_task(void *arg, int pending)
3966{
3967 struct bge_softc *sc;
3968 struct ifnet *ifp;
3969 uint32_t status, status_tag;
3970 uint16_t rx_prod, tx_cons;
3971
3972 sc = (struct bge_softc *)arg;
3973 ifp = sc->bge_ifp;
3974
3975 BGE_LOCK(sc);
3976 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
3977 BGE_UNLOCK(sc);
3978 return;
3979 }
3980
3981 /* Get updated status block. */
3982 bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
3983 sc->bge_cdata.bge_status_map,
3984 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
3985
3986 /* Save producer/consumer indexess. */
3987 rx_prod = sc->bge_ldata.bge_status_block->bge_idx[0].bge_rx_prod_idx;
3988 tx_cons = sc->bge_ldata.bge_status_block->bge_idx[0].bge_tx_cons_idx;
3989 status = sc->bge_ldata.bge_status_block->bge_status;
3990 status_tag = sc->bge_ldata.bge_status_block->bge_status_tag << 24;
3991 sc->bge_ldata.bge_status_block->bge_status = 0;
3992 bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
3993 sc->bge_cdata.bge_status_map,
3994 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3995 if ((sc->bge_flags & BGE_FLAG_TAGGED_STATUS) == 0)
3996 status_tag = 0;
3997
3998 if ((status & BGE_STATFLAG_LINKSTATE_CHANGED) != 0)
3999 bge_link_upd(sc);
4000
4001 /* Let controller work. */
4002 bge_writembx(sc, BGE_MBX_IRQ0_LO, status_tag);
4003
4004 if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
4005 sc->bge_rx_saved_considx != rx_prod) {
4006 /* Check RX return ring producer/consumer. */
4007 BGE_UNLOCK(sc);
4008 bge_rxeof(sc, rx_prod, 0);
4009 BGE_LOCK(sc);
4010 }
4011 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
4012 /* Check TX ring producer/consumer. */
4013 bge_txeof(sc, tx_cons);
4014 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
4015 bge_start_locked(ifp);
4016 }
4017 BGE_UNLOCK(sc);
4018}
4019
4020static void
4021bge_intr(void *xsc)
4022{
4023 struct bge_softc *sc;
4024 struct ifnet *ifp;
4025 uint32_t statusword;
4026 uint16_t rx_prod, tx_cons;
4027
4028 sc = xsc;
4029
4030 BGE_LOCK(sc);
4031
4032 ifp = sc->bge_ifp;
4033
4034#ifdef DEVICE_POLLING
4035 if (ifp->if_capenable & IFCAP_POLLING) {
4036 BGE_UNLOCK(sc);
4037 return;
4038 }
4039#endif
4040
4041 /*
4042 * Ack the interrupt by writing something to BGE_MBX_IRQ0_LO. Don't
4043 * disable interrupts by writing nonzero like we used to, since with
4044 * our current organization this just gives complications and
4045 * pessimizations for re-enabling interrupts. We used to have races
4046 * instead of the necessary complications. Disabling interrupts
4047 * would just reduce the chance of a status update while we are
4048 * running (by switching to the interrupt-mode coalescence
4049 * parameters), but this chance is already very low so it is more
4050 * efficient to get another interrupt than prevent it.
4051 *
4052 * We do the ack first to ensure another interrupt if there is a
4053 * status update after the ack. We don't check for the status
4054 * changing later because it is more efficient to get another
4055 * interrupt than prevent it, not quite as above (not checking is
4056 * a smaller optimization than not toggling the interrupt enable,
4057 * since checking doesn't involve PCI accesses and toggling require
4058 * the status check). So toggling would probably be a pessimization
4059 * even with MSI. It would only be needed for using a task queue.
4060 */
4061 bge_writembx(sc, BGE_MBX_IRQ0_LO, 0);
4062
4063 /*
4064 * Do the mandatory PCI flush as well as get the link status.
4065 */
4066 statusword = CSR_READ_4(sc, BGE_MAC_STS) & BGE_MACSTAT_LINK_CHANGED;
4067
4068 /* Make sure the descriptor ring indexes are coherent. */
4069 bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
4070 sc->bge_cdata.bge_status_map,
4071 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
4072 rx_prod = sc->bge_ldata.bge_status_block->bge_idx[0].bge_rx_prod_idx;
4073 tx_cons = sc->bge_ldata.bge_status_block->bge_idx[0].bge_tx_cons_idx;
4074 sc->bge_ldata.bge_status_block->bge_status = 0;
4075 bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
4076 sc->bge_cdata.bge_status_map,
4077 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
4078
4079 if ((sc->bge_asicrev == BGE_ASICREV_BCM5700 &&
4080 sc->bge_chipid != BGE_CHIPID_BCM5700_B2) ||
4081 statusword || sc->bge_link_evt)
4082 bge_link_upd(sc);
4083
4084 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
4085 /* Check RX return ring producer/consumer. */
4086 bge_rxeof(sc, rx_prod, 1);
4087 }
4088
4089 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
4090 /* Check TX ring producer/consumer. */
4091 bge_txeof(sc, tx_cons);
4092 }
4093
4094 if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
4095 !IFQ_DRV_IS_EMPTY(&ifp->if_snd))
4096 bge_start_locked(ifp);
4097
4098 BGE_UNLOCK(sc);
4099}
4100
4101static void
4102bge_asf_driver_up(struct bge_softc *sc)
4103{
4104 if (sc->bge_asf_mode & ASF_STACKUP) {
4105 /* Send ASF heartbeat aprox. every 2s */
4106 if (sc->bge_asf_count)
4107 sc->bge_asf_count --;
4108 else {
4109 sc->bge_asf_count = 2;
4110 bge_writemem_ind(sc, BGE_SRAM_FW_CMD_MB,
4111 BGE_FW_CMD_DRV_ALIVE);
4112 bge_writemem_ind(sc, BGE_SRAM_FW_CMD_LEN_MB, 4);
4113 bge_writemem_ind(sc, BGE_SRAM_FW_CMD_DATA_MB, 3);
4113 bge_writemem_ind(sc, BGE_SRAM_FW_CMD_DATA_MB,
4114 BGE_FW_HB_TIMEOUT_SEC);
4114 CSR_WRITE_4(sc, BGE_RX_CPU_EVENT,
4115 CSR_READ_4(sc, BGE_RX_CPU_EVENT) |
4116 BGE_RX_CPU_DRV_EVENT);
4117 }
4118 }
4119}
4120
4121static void
4122bge_tick(void *xsc)
4123{
4124 struct bge_softc *sc = xsc;
4125 struct mii_data *mii = NULL;
4126
4127 BGE_LOCK_ASSERT(sc);
4128
4129 /* Synchronize with possible callout reset/stop. */
4130 if (callout_pending(&sc->bge_stat_ch) ||
4131 !callout_active(&sc->bge_stat_ch))
4132 return;
4133
4134 if (BGE_IS_5705_PLUS(sc))
4135 bge_stats_update_regs(sc);
4136 else
4137 bge_stats_update(sc);
4138
4139 if ((sc->bge_flags & BGE_FLAG_TBI) == 0) {
4140 mii = device_get_softc(sc->bge_miibus);
4141 /*
4142 * Do not touch PHY if we have link up. This could break
4143 * IPMI/ASF mode or produce extra input errors
4144 * (extra errors was reported for bcm5701 & bcm5704).
4145 */
4146 if (!sc->bge_link)
4147 mii_tick(mii);
4148 } else {
4149 /*
4150 * Since in TBI mode auto-polling can't be used we should poll
4151 * link status manually. Here we register pending link event
4152 * and trigger interrupt.
4153 */
4154#ifdef DEVICE_POLLING
4155 /* In polling mode we poll link state in bge_poll(). */
4156 if (!(sc->bge_ifp->if_capenable & IFCAP_POLLING))
4157#endif
4158 {
4159 sc->bge_link_evt++;
4160 if (sc->bge_asicrev == BGE_ASICREV_BCM5700 ||
4161 sc->bge_flags & BGE_FLAG_5788)
4162 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_SET);
4163 else
4164 BGE_SETBIT(sc, BGE_HCC_MODE, BGE_HCCMODE_COAL_NOW);
4165 }
4166 }
4167
4168 bge_asf_driver_up(sc);
4169 bge_watchdog(sc);
4170
4171 callout_reset(&sc->bge_stat_ch, hz, bge_tick, sc);
4172}
4173
4174static void
4175bge_stats_update_regs(struct bge_softc *sc)
4176{
4177 struct ifnet *ifp;
4178 struct bge_mac_stats *stats;
4179
4180 ifp = sc->bge_ifp;
4181 stats = &sc->bge_mac_stats;
4182
4183 stats->ifHCOutOctets +=
4184 CSR_READ_4(sc, BGE_TX_MAC_STATS_OCTETS);
4185 stats->etherStatsCollisions +=
4186 CSR_READ_4(sc, BGE_TX_MAC_STATS_COLLS);
4187 stats->outXonSent +=
4188 CSR_READ_4(sc, BGE_TX_MAC_STATS_XON_SENT);
4189 stats->outXoffSent +=
4190 CSR_READ_4(sc, BGE_TX_MAC_STATS_XOFF_SENT);
4191 stats->dot3StatsInternalMacTransmitErrors +=
4192 CSR_READ_4(sc, BGE_TX_MAC_STATS_ERRORS);
4193 stats->dot3StatsSingleCollisionFrames +=
4194 CSR_READ_4(sc, BGE_TX_MAC_STATS_SINGLE_COLL);
4195 stats->dot3StatsMultipleCollisionFrames +=
4196 CSR_READ_4(sc, BGE_TX_MAC_STATS_MULTI_COLL);
4197 stats->dot3StatsDeferredTransmissions +=
4198 CSR_READ_4(sc, BGE_TX_MAC_STATS_DEFERRED);
4199 stats->dot3StatsExcessiveCollisions +=
4200 CSR_READ_4(sc, BGE_TX_MAC_STATS_EXCESS_COLL);
4201 stats->dot3StatsLateCollisions +=
4202 CSR_READ_4(sc, BGE_TX_MAC_STATS_LATE_COLL);
4203 stats->ifHCOutUcastPkts +=
4204 CSR_READ_4(sc, BGE_TX_MAC_STATS_UCAST);
4205 stats->ifHCOutMulticastPkts +=
4206 CSR_READ_4(sc, BGE_TX_MAC_STATS_MCAST);
4207 stats->ifHCOutBroadcastPkts +=
4208 CSR_READ_4(sc, BGE_TX_MAC_STATS_BCAST);
4209
4210 stats->ifHCInOctets +=
4211 CSR_READ_4(sc, BGE_RX_MAC_STATS_OCTESTS);
4212 stats->etherStatsFragments +=
4213 CSR_READ_4(sc, BGE_RX_MAC_STATS_FRAGMENTS);
4214 stats->ifHCInUcastPkts +=
4215 CSR_READ_4(sc, BGE_RX_MAC_STATS_UCAST);
4216 stats->ifHCInMulticastPkts +=
4217 CSR_READ_4(sc, BGE_RX_MAC_STATS_MCAST);
4218 stats->ifHCInBroadcastPkts +=
4219 CSR_READ_4(sc, BGE_RX_MAC_STATS_BCAST);
4220 stats->dot3StatsFCSErrors +=
4221 CSR_READ_4(sc, BGE_RX_MAC_STATS_FCS_ERRORS);
4222 stats->dot3StatsAlignmentErrors +=
4223 CSR_READ_4(sc, BGE_RX_MAC_STATS_ALGIN_ERRORS);
4224 stats->xonPauseFramesReceived +=
4225 CSR_READ_4(sc, BGE_RX_MAC_STATS_XON_RCVD);
4226 stats->xoffPauseFramesReceived +=
4227 CSR_READ_4(sc, BGE_RX_MAC_STATS_XOFF_RCVD);
4228 stats->macControlFramesReceived +=
4229 CSR_READ_4(sc, BGE_RX_MAC_STATS_CTRL_RCVD);
4230 stats->xoffStateEntered +=
4231 CSR_READ_4(sc, BGE_RX_MAC_STATS_XOFF_ENTERED);
4232 stats->dot3StatsFramesTooLong +=
4233 CSR_READ_4(sc, BGE_RX_MAC_STATS_FRAME_TOO_LONG);
4234 stats->etherStatsJabbers +=
4235 CSR_READ_4(sc, BGE_RX_MAC_STATS_JABBERS);
4236 stats->etherStatsUndersizePkts +=
4237 CSR_READ_4(sc, BGE_RX_MAC_STATS_UNDERSIZE);
4238
4239 stats->FramesDroppedDueToFilters +=
4240 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_FILTDROP);
4241 stats->DmaWriteQueueFull +=
4242 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_DMA_WRQ_FULL);
4243 stats->DmaWriteHighPriQueueFull +=
4244 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_DMA_HPWRQ_FULL);
4245 stats->NoMoreRxBDs +=
4246 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_OUT_OF_BDS);
4247 stats->InputDiscards +=
4248 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_IFIN_DROPS);
4249 stats->InputErrors +=
4250 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_IFIN_ERRORS);
4251 stats->RecvThresholdHit +=
4252 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_RXTHRESH_HIT);
4253
4254 ifp->if_collisions = (u_long)stats->etherStatsCollisions;
4255 ifp->if_ierrors = (u_long)(stats->NoMoreRxBDs + stats->InputDiscards +
4256 stats->InputErrors);
4257}
4258
4259static void
4260bge_stats_clear_regs(struct bge_softc *sc)
4261{
4262
4263 CSR_READ_4(sc, BGE_TX_MAC_STATS_OCTETS);
4264 CSR_READ_4(sc, BGE_TX_MAC_STATS_COLLS);
4265 CSR_READ_4(sc, BGE_TX_MAC_STATS_XON_SENT);
4266 CSR_READ_4(sc, BGE_TX_MAC_STATS_XOFF_SENT);
4267 CSR_READ_4(sc, BGE_TX_MAC_STATS_ERRORS);
4268 CSR_READ_4(sc, BGE_TX_MAC_STATS_SINGLE_COLL);
4269 CSR_READ_4(sc, BGE_TX_MAC_STATS_MULTI_COLL);
4270 CSR_READ_4(sc, BGE_TX_MAC_STATS_DEFERRED);
4271 CSR_READ_4(sc, BGE_TX_MAC_STATS_EXCESS_COLL);
4272 CSR_READ_4(sc, BGE_TX_MAC_STATS_LATE_COLL);
4273 CSR_READ_4(sc, BGE_TX_MAC_STATS_UCAST);
4274 CSR_READ_4(sc, BGE_TX_MAC_STATS_MCAST);
4275 CSR_READ_4(sc, BGE_TX_MAC_STATS_BCAST);
4276
4277 CSR_READ_4(sc, BGE_RX_MAC_STATS_OCTESTS);
4278 CSR_READ_4(sc, BGE_RX_MAC_STATS_FRAGMENTS);
4279 CSR_READ_4(sc, BGE_RX_MAC_STATS_UCAST);
4280 CSR_READ_4(sc, BGE_RX_MAC_STATS_MCAST);
4281 CSR_READ_4(sc, BGE_RX_MAC_STATS_BCAST);
4282 CSR_READ_4(sc, BGE_RX_MAC_STATS_FCS_ERRORS);
4283 CSR_READ_4(sc, BGE_RX_MAC_STATS_ALGIN_ERRORS);
4284 CSR_READ_4(sc, BGE_RX_MAC_STATS_XON_RCVD);
4285 CSR_READ_4(sc, BGE_RX_MAC_STATS_XOFF_RCVD);
4286 CSR_READ_4(sc, BGE_RX_MAC_STATS_CTRL_RCVD);
4287 CSR_READ_4(sc, BGE_RX_MAC_STATS_XOFF_ENTERED);
4288 CSR_READ_4(sc, BGE_RX_MAC_STATS_FRAME_TOO_LONG);
4289 CSR_READ_4(sc, BGE_RX_MAC_STATS_JABBERS);
4290 CSR_READ_4(sc, BGE_RX_MAC_STATS_UNDERSIZE);
4291
4292 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_FILTDROP);
4293 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_DMA_WRQ_FULL);
4294 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_DMA_HPWRQ_FULL);
4295 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_OUT_OF_BDS);
4296 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_IFIN_DROPS);
4297 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_IFIN_ERRORS);
4298 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_RXTHRESH_HIT);
4299}
4300
4301static void
4302bge_stats_update(struct bge_softc *sc)
4303{
4304 struct ifnet *ifp;
4305 bus_size_t stats;
4306 uint32_t cnt; /* current register value */
4307
4308 ifp = sc->bge_ifp;
4309
4310 stats = BGE_MEMWIN_START + BGE_STATS_BLOCK;
4311
4312#define READ_STAT(sc, stats, stat) \
4313 CSR_READ_4(sc, stats + offsetof(struct bge_stats, stat))
4314
4315 cnt = READ_STAT(sc, stats, txstats.etherStatsCollisions.bge_addr_lo);
4316 ifp->if_collisions += (uint32_t)(cnt - sc->bge_tx_collisions);
4317 sc->bge_tx_collisions = cnt;
4318
4319 cnt = READ_STAT(sc, stats, ifInDiscards.bge_addr_lo);
4320 ifp->if_ierrors += (uint32_t)(cnt - sc->bge_rx_discards);
4321 sc->bge_rx_discards = cnt;
4322
4323 cnt = READ_STAT(sc, stats, txstats.ifOutDiscards.bge_addr_lo);
4324 ifp->if_oerrors += (uint32_t)(cnt - sc->bge_tx_discards);
4325 sc->bge_tx_discards = cnt;
4326
4327#undef READ_STAT
4328}
4329
4330/*
4331 * Pad outbound frame to ETHER_MIN_NOPAD for an unusual reason.
4332 * The bge hardware will pad out Tx runts to ETHER_MIN_NOPAD,
4333 * but when such padded frames employ the bge IP/TCP checksum offload,
4334 * the hardware checksum assist gives incorrect results (possibly
4335 * from incorporating its own padding into the UDP/TCP checksum; who knows).
4336 * If we pad such runts with zeros, the onboard checksum comes out correct.
4337 */
4338static __inline int
4339bge_cksum_pad(struct mbuf *m)
4340{
4341 int padlen = ETHER_MIN_NOPAD - m->m_pkthdr.len;
4342 struct mbuf *last;
4343
4344 /* If there's only the packet-header and we can pad there, use it. */
4345 if (m->m_pkthdr.len == m->m_len && M_WRITABLE(m) &&
4346 M_TRAILINGSPACE(m) >= padlen) {
4347 last = m;
4348 } else {
4349 /*
4350 * Walk packet chain to find last mbuf. We will either
4351 * pad there, or append a new mbuf and pad it.
4352 */
4353 for (last = m; last->m_next != NULL; last = last->m_next);
4354 if (!(M_WRITABLE(last) && M_TRAILINGSPACE(last) >= padlen)) {
4355 /* Allocate new empty mbuf, pad it. Compact later. */
4356 struct mbuf *n;
4357
4358 MGET(n, M_DONTWAIT, MT_DATA);
4359 if (n == NULL)
4360 return (ENOBUFS);
4361 n->m_len = 0;
4362 last->m_next = n;
4363 last = n;
4364 }
4365 }
4366
4367 /* Now zero the pad area, to avoid the bge cksum-assist bug. */
4368 memset(mtod(last, caddr_t) + last->m_len, 0, padlen);
4369 last->m_len += padlen;
4370 m->m_pkthdr.len += padlen;
4371
4372 return (0);
4373}
4374
4375static struct mbuf *
4376bge_check_short_dma(struct mbuf *m)
4377{
4378 struct mbuf *n;
4379 int found;
4380
4381 /*
4382 * If device receive two back-to-back send BDs with less than
4383 * or equal to 8 total bytes then the device may hang. The two
4384 * back-to-back send BDs must in the same frame for this failure
4385 * to occur. Scan mbuf chains and see whether two back-to-back
4386 * send BDs are there. If this is the case, allocate new mbuf
4387 * and copy the frame to workaround the silicon bug.
4388 */
4389 for (n = m, found = 0; n != NULL; n = n->m_next) {
4390 if (n->m_len < 8) {
4391 found++;
4392 if (found > 1)
4393 break;
4394 continue;
4395 }
4396 found = 0;
4397 }
4398
4399 if (found > 1) {
4400 n = m_defrag(m, M_DONTWAIT);
4401 if (n == NULL)
4402 m_freem(m);
4403 } else
4404 n = m;
4405 return (n);
4406}
4407
4408static struct mbuf *
4409bge_setup_tso(struct bge_softc *sc, struct mbuf *m, uint16_t *mss,
4410 uint16_t *flags)
4411{
4412 struct ip *ip;
4413 struct tcphdr *tcp;
4414 struct mbuf *n;
4415 uint16_t hlen;
4416 uint32_t poff;
4417
4418 if (M_WRITABLE(m) == 0) {
4419 /* Get a writable copy. */
4420 n = m_dup(m, M_DONTWAIT);
4421 m_freem(m);
4422 if (n == NULL)
4423 return (NULL);
4424 m = n;
4425 }
4426 m = m_pullup(m, sizeof(struct ether_header) + sizeof(struct ip));
4427 if (m == NULL)
4428 return (NULL);
4429 ip = (struct ip *)(mtod(m, char *) + sizeof(struct ether_header));
4430 poff = sizeof(struct ether_header) + (ip->ip_hl << 2);
4431 m = m_pullup(m, poff + sizeof(struct tcphdr));
4432 if (m == NULL)
4433 return (NULL);
4434 tcp = (struct tcphdr *)(mtod(m, char *) + poff);
4435 m = m_pullup(m, poff + (tcp->th_off << 2));
4436 if (m == NULL)
4437 return (NULL);
4438 /*
4439 * It seems controller doesn't modify IP length and TCP pseudo
4440 * checksum. These checksum computed by upper stack should be 0.
4441 */
4442 *mss = m->m_pkthdr.tso_segsz;
4443 ip = (struct ip *)(mtod(m, char *) + sizeof(struct ether_header));
4444 ip->ip_sum = 0;
4445 ip->ip_len = htons(*mss + (ip->ip_hl << 2) + (tcp->th_off << 2));
4446 /* Clear pseudo checksum computed by TCP stack. */
4447 tcp = (struct tcphdr *)(mtod(m, char *) + poff);
4448 tcp->th_sum = 0;
4449 /*
4450 * Broadcom controllers uses different descriptor format for
4451 * TSO depending on ASIC revision. Due to TSO-capable firmware
4452 * license issue and lower performance of firmware based TSO
4453 * we only support hardware based TSO.
4454 */
4455 /* Calculate header length, incl. TCP/IP options, in 32 bit units. */
4456 hlen = ((ip->ip_hl << 2) + (tcp->th_off << 2)) >> 2;
4457 if (sc->bge_flags & BGE_FLAG_TSO3) {
4458 /*
4459 * For BCM5717 and newer controllers, hardware based TSO
4460 * uses the 14 lower bits of the bge_mss field to store the
4461 * MSS and the upper 2 bits to store the lowest 2 bits of
4462 * the IP/TCP header length. The upper 6 bits of the header
4463 * length are stored in the bge_flags[14:10,4] field. Jumbo
4464 * frames are supported.
4465 */
4466 *mss |= ((hlen & 0x3) << 14);
4467 *flags |= ((hlen & 0xF8) << 7) | ((hlen & 0x4) << 2);
4468 } else {
4469 /*
4470 * For BCM5755 and newer controllers, hardware based TSO uses
4471 * the lower 11 bits to store the MSS and the upper 5 bits to
4472 * store the IP/TCP header length. Jumbo frames are not
4473 * supported.
4474 */
4475 *mss |= (hlen << 11);
4476 }
4477 return (m);
4478}
4479
4480/*
4481 * Encapsulate an mbuf chain in the tx ring by coupling the mbuf data
4482 * pointers to descriptors.
4483 */
4484static int
4485bge_encap(struct bge_softc *sc, struct mbuf **m_head, uint32_t *txidx)
4486{
4487 bus_dma_segment_t segs[BGE_NSEG_NEW];
4488 bus_dmamap_t map;
4489 struct bge_tx_bd *d;
4490 struct mbuf *m = *m_head;
4491 uint32_t idx = *txidx;
4492 uint16_t csum_flags, mss, vlan_tag;
4493 int nsegs, i, error;
4494
4495 csum_flags = 0;
4496 mss = 0;
4497 vlan_tag = 0;
4498 if ((sc->bge_flags & BGE_FLAG_SHORT_DMA_BUG) != 0 &&
4499 m->m_next != NULL) {
4500 *m_head = bge_check_short_dma(m);
4501 if (*m_head == NULL)
4502 return (ENOBUFS);
4503 m = *m_head;
4504 }
4505 if ((m->m_pkthdr.csum_flags & CSUM_TSO) != 0) {
4506 *m_head = m = bge_setup_tso(sc, m, &mss, &csum_flags);
4507 if (*m_head == NULL)
4508 return (ENOBUFS);
4509 csum_flags |= BGE_TXBDFLAG_CPU_PRE_DMA |
4510 BGE_TXBDFLAG_CPU_POST_DMA;
4511 } else if ((m->m_pkthdr.csum_flags & sc->bge_csum_features) != 0) {
4512 if (m->m_pkthdr.csum_flags & CSUM_IP)
4513 csum_flags |= BGE_TXBDFLAG_IP_CSUM;
4514 if (m->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP)) {
4515 csum_flags |= BGE_TXBDFLAG_TCP_UDP_CSUM;
4516 if (m->m_pkthdr.len < ETHER_MIN_NOPAD &&
4517 (error = bge_cksum_pad(m)) != 0) {
4518 m_freem(m);
4519 *m_head = NULL;
4520 return (error);
4521 }
4522 }
4523 if (m->m_flags & M_LASTFRAG)
4524 csum_flags |= BGE_TXBDFLAG_IP_FRAG_END;
4525 else if (m->m_flags & M_FRAG)
4526 csum_flags |= BGE_TXBDFLAG_IP_FRAG;
4527 }
4528
4529 if ((m->m_pkthdr.csum_flags & CSUM_TSO) == 0) {
4530 if (sc->bge_flags & BGE_FLAG_JUMBO_FRAME &&
4531 m->m_pkthdr.len > ETHER_MAX_LEN)
4532 csum_flags |= BGE_TXBDFLAG_JUMBO_FRAME;
4533 if (sc->bge_forced_collapse > 0 &&
4534 (sc->bge_flags & BGE_FLAG_PCIE) != 0 && m->m_next != NULL) {
4535 /*
4536 * Forcedly collapse mbuf chains to overcome hardware
4537 * limitation which only support a single outstanding
4538 * DMA read operation.
4539 */
4540 if (sc->bge_forced_collapse == 1)
4541 m = m_defrag(m, M_DONTWAIT);
4542 else
4543 m = m_collapse(m, M_DONTWAIT,
4544 sc->bge_forced_collapse);
4545 if (m == NULL)
4546 m = *m_head;
4547 *m_head = m;
4548 }
4549 }
4550
4551 map = sc->bge_cdata.bge_tx_dmamap[idx];
4552 error = bus_dmamap_load_mbuf_sg(sc->bge_cdata.bge_tx_mtag, map, m, segs,
4553 &nsegs, BUS_DMA_NOWAIT);
4554 if (error == EFBIG) {
4555 m = m_collapse(m, M_DONTWAIT, BGE_NSEG_NEW);
4556 if (m == NULL) {
4557 m_freem(*m_head);
4558 *m_head = NULL;
4559 return (ENOBUFS);
4560 }
4561 *m_head = m;
4562 error = bus_dmamap_load_mbuf_sg(sc->bge_cdata.bge_tx_mtag, map,
4563 m, segs, &nsegs, BUS_DMA_NOWAIT);
4564 if (error) {
4565 m_freem(m);
4566 *m_head = NULL;
4567 return (error);
4568 }
4569 } else if (error != 0)
4570 return (error);
4571
4572 /* Check if we have enough free send BDs. */
4573 if (sc->bge_txcnt + nsegs >= BGE_TX_RING_CNT) {
4574 bus_dmamap_unload(sc->bge_cdata.bge_tx_mtag, map);
4575 return (ENOBUFS);
4576 }
4577
4578 bus_dmamap_sync(sc->bge_cdata.bge_tx_mtag, map, BUS_DMASYNC_PREWRITE);
4579
4580 if (m->m_flags & M_VLANTAG) {
4581 csum_flags |= BGE_TXBDFLAG_VLAN_TAG;
4582 vlan_tag = m->m_pkthdr.ether_vtag;
4583 }
4584 for (i = 0; ; i++) {
4585 d = &sc->bge_ldata.bge_tx_ring[idx];
4586 d->bge_addr.bge_addr_lo = BGE_ADDR_LO(segs[i].ds_addr);
4587 d->bge_addr.bge_addr_hi = BGE_ADDR_HI(segs[i].ds_addr);
4588 d->bge_len = segs[i].ds_len;
4589 d->bge_flags = csum_flags;
4590 d->bge_vlan_tag = vlan_tag;
4591 d->bge_mss = mss;
4592 if (i == nsegs - 1)
4593 break;
4594 BGE_INC(idx, BGE_TX_RING_CNT);
4595 }
4596
4597 /* Mark the last segment as end of packet... */
4598 d->bge_flags |= BGE_TXBDFLAG_END;
4599
4600 /*
4601 * Insure that the map for this transmission
4602 * is placed at the array index of the last descriptor
4603 * in this chain.
4604 */
4605 sc->bge_cdata.bge_tx_dmamap[*txidx] = sc->bge_cdata.bge_tx_dmamap[idx];
4606 sc->bge_cdata.bge_tx_dmamap[idx] = map;
4607 sc->bge_cdata.bge_tx_chain[idx] = m;
4608 sc->bge_txcnt += nsegs;
4609
4610 BGE_INC(idx, BGE_TX_RING_CNT);
4611 *txidx = idx;
4612
4613 return (0);
4614}
4615
4616/*
4617 * Main transmit routine. To avoid having to do mbuf copies, we put pointers
4618 * to the mbuf data regions directly in the transmit descriptors.
4619 */
4620static void
4621bge_start_locked(struct ifnet *ifp)
4622{
4623 struct bge_softc *sc;
4624 struct mbuf *m_head;
4625 uint32_t prodidx;
4626 int count;
4627
4628 sc = ifp->if_softc;
4629 BGE_LOCK_ASSERT(sc);
4630
4631 if (!sc->bge_link ||
4632 (ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
4633 IFF_DRV_RUNNING)
4634 return;
4635
4636 prodidx = sc->bge_tx_prodidx;
4637
4638 for (count = 0; !IFQ_DRV_IS_EMPTY(&ifp->if_snd);) {
4639 if (sc->bge_txcnt > BGE_TX_RING_CNT - 16) {
4640 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
4641 break;
4642 }
4643 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
4644 if (m_head == NULL)
4645 break;
4646
4647 /*
4648 * XXX
4649 * The code inside the if() block is never reached since we
4650 * must mark CSUM_IP_FRAGS in our if_hwassist to start getting
4651 * requests to checksum TCP/UDP in a fragmented packet.
4652 *
4653 * XXX
4654 * safety overkill. If this is a fragmented packet chain
4655 * with delayed TCP/UDP checksums, then only encapsulate
4656 * it if we have enough descriptors to handle the entire
4657 * chain at once.
4658 * (paranoia -- may not actually be needed)
4659 */
4660 if (m_head->m_flags & M_FIRSTFRAG &&
4661 m_head->m_pkthdr.csum_flags & (CSUM_DELAY_DATA)) {
4662 if ((BGE_TX_RING_CNT - sc->bge_txcnt) <
4663 m_head->m_pkthdr.csum_data + 16) {
4664 IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
4665 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
4666 break;
4667 }
4668 }
4669
4670 /*
4671 * Pack the data into the transmit ring. If we
4672 * don't have room, set the OACTIVE flag and wait
4673 * for the NIC to drain the ring.
4674 */
4675 if (bge_encap(sc, &m_head, &prodidx)) {
4676 if (m_head == NULL)
4677 break;
4678 IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
4679 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
4680 break;
4681 }
4682 ++count;
4683
4684 /*
4685 * If there's a BPF listener, bounce a copy of this frame
4686 * to him.
4687 */
4688#ifdef ETHER_BPF_MTAP
4689 ETHER_BPF_MTAP(ifp, m_head);
4690#else
4691 BPF_MTAP(ifp, m_head);
4692#endif
4693 }
4694
4695 if (count > 0) {
4696 bus_dmamap_sync(sc->bge_cdata.bge_tx_ring_tag,
4697 sc->bge_cdata.bge_tx_ring_map, BUS_DMASYNC_PREWRITE);
4698 /* Transmit. */
4699 bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx);
4700 /* 5700 b2 errata */
4701 if (sc->bge_chiprev == BGE_CHIPREV_5700_BX)
4702 bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx);
4703
4704 sc->bge_tx_prodidx = prodidx;
4705
4706 /*
4707 * Set a timeout in case the chip goes out to lunch.
4708 */
4709 sc->bge_timer = 5;
4710 }
4711}
4712
4713/*
4714 * Main transmit routine. To avoid having to do mbuf copies, we put pointers
4715 * to the mbuf data regions directly in the transmit descriptors.
4716 */
4717static void
4718bge_start(struct ifnet *ifp)
4719{
4720 struct bge_softc *sc;
4721
4722 sc = ifp->if_softc;
4723 BGE_LOCK(sc);
4724 bge_start_locked(ifp);
4725 BGE_UNLOCK(sc);
4726}
4727
4728static void
4729bge_init_locked(struct bge_softc *sc)
4730{
4731 struct ifnet *ifp;
4732 uint16_t *m;
4733 uint32_t mode;
4734
4735 BGE_LOCK_ASSERT(sc);
4736
4737 ifp = sc->bge_ifp;
4738
4739 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
4740 return;
4741
4742 /* Cancel pending I/O and flush buffers. */
4743 bge_stop(sc);
4744
4745 bge_stop_fw(sc);
4746 bge_sig_pre_reset(sc, BGE_RESET_START);
4747 bge_reset(sc);
4748 bge_sig_legacy(sc, BGE_RESET_START);
4749 bge_sig_post_reset(sc, BGE_RESET_START);
4750
4751 bge_chipinit(sc);
4752
4753 /*
4754 * Init the various state machines, ring
4755 * control blocks and firmware.
4756 */
4757 if (bge_blockinit(sc)) {
4758 device_printf(sc->bge_dev, "initialization failure\n");
4759 return;
4760 }
4761
4762 ifp = sc->bge_ifp;
4763
4764 /* Specify MTU. */
4765 CSR_WRITE_4(sc, BGE_RX_MTU, ifp->if_mtu +
4766 ETHER_HDR_LEN + ETHER_CRC_LEN +
4767 (ifp->if_capenable & IFCAP_VLAN_MTU ? ETHER_VLAN_ENCAP_LEN : 0));
4768
4769 /* Load our MAC address. */
4770 m = (uint16_t *)IF_LLADDR(sc->bge_ifp);
4771 CSR_WRITE_4(sc, BGE_MAC_ADDR1_LO, htons(m[0]));
4772 CSR_WRITE_4(sc, BGE_MAC_ADDR1_HI, (htons(m[1]) << 16) | htons(m[2]));
4773
4774 /* Program promiscuous mode. */
4775 bge_setpromisc(sc);
4776
4777 /* Program multicast filter. */
4778 bge_setmulti(sc);
4779
4780 /* Program VLAN tag stripping. */
4781 bge_setvlan(sc);
4782
4783 /* Override UDP checksum offloading. */
4784 if (sc->bge_forced_udpcsum == 0)
4785 sc->bge_csum_features &= ~CSUM_UDP;
4786 else
4787 sc->bge_csum_features |= CSUM_UDP;
4788 if (ifp->if_capabilities & IFCAP_TXCSUM &&
4789 ifp->if_capenable & IFCAP_TXCSUM) {
4790 ifp->if_hwassist &= ~(BGE_CSUM_FEATURES | CSUM_UDP);
4791 ifp->if_hwassist |= sc->bge_csum_features;
4792 }
4793
4794 /* Init RX ring. */
4795 if (bge_init_rx_ring_std(sc) != 0) {
4796 device_printf(sc->bge_dev, "no memory for std Rx buffers.\n");
4797 bge_stop(sc);
4798 return;
4799 }
4800
4801 /*
4802 * Workaround for a bug in 5705 ASIC rev A0. Poll the NIC's
4803 * memory to insure that the chip has in fact read the first
4804 * entry of the ring.
4805 */
4806 if (sc->bge_chipid == BGE_CHIPID_BCM5705_A0) {
4807 uint32_t v, i;
4808 for (i = 0; i < 10; i++) {
4809 DELAY(20);
4810 v = bge_readmem_ind(sc, BGE_STD_RX_RINGS + 8);
4811 if (v == (MCLBYTES - ETHER_ALIGN))
4812 break;
4813 }
4814 if (i == 10)
4815 device_printf (sc->bge_dev,
4816 "5705 A0 chip failed to load RX ring\n");
4817 }
4818
4819 /* Init jumbo RX ring. */
4820 if (BGE_IS_JUMBO_CAPABLE(sc) &&
4821 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN + ETHER_VLAN_ENCAP_LEN >
4822 (MCLBYTES - ETHER_ALIGN)) {
4823 if (bge_init_rx_ring_jumbo(sc) != 0) {
4824 device_printf(sc->bge_dev,
4825 "no memory for jumbo Rx buffers.\n");
4826 bge_stop(sc);
4827 return;
4828 }
4829 }
4830
4831 /* Init our RX return ring index. */
4832 sc->bge_rx_saved_considx = 0;
4833
4834 /* Init our RX/TX stat counters. */
4835 sc->bge_rx_discards = sc->bge_tx_discards = sc->bge_tx_collisions = 0;
4836
4837 /* Init TX ring. */
4838 bge_init_tx_ring(sc);
4839
4840 /* Enable TX MAC state machine lockup fix. */
4841 mode = CSR_READ_4(sc, BGE_TX_MODE);
4842 if (BGE_IS_5755_PLUS(sc) || sc->bge_asicrev == BGE_ASICREV_BCM5906)
4843 mode |= BGE_TXMODE_MBUF_LOCKUP_FIX;
4844 /* Turn on transmitter. */
4845 CSR_WRITE_4(sc, BGE_TX_MODE, mode | BGE_TXMODE_ENABLE);
4846
4847 /* Turn on receiver. */
4848 BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE);
4849
4850 /*
4851 * Set the number of good frames to receive after RX MBUF
4852 * Low Watermark has been reached. After the RX MAC receives
4853 * this number of frames, it will drop subsequent incoming
4854 * frames until the MBUF High Watermark is reached.
4855 */
4856 if (sc->bge_asicrev == BGE_ASICREV_BCM57765)
4857 CSR_WRITE_4(sc, BGE_MAX_RX_FRAME_LOWAT, 1);
4858 else
4859 CSR_WRITE_4(sc, BGE_MAX_RX_FRAME_LOWAT, 2);
4860
4861 /* Clear MAC statistics. */
4862 if (BGE_IS_5705_PLUS(sc))
4863 bge_stats_clear_regs(sc);
4864
4865 /* Tell firmware we're alive. */
4866 BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
4867
4868#ifdef DEVICE_POLLING
4869 /* Disable interrupts if we are polling. */
4870 if (ifp->if_capenable & IFCAP_POLLING) {
4871 BGE_SETBIT(sc, BGE_PCI_MISC_CTL,
4872 BGE_PCIMISCCTL_MASK_PCI_INTR);
4873 bge_writembx(sc, BGE_MBX_IRQ0_LO, 1);
4874 } else
4875#endif
4876
4877 /* Enable host interrupts. */
4878 {
4879 BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_CLEAR_INTA);
4880 BGE_CLRBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR);
4881 bge_writembx(sc, BGE_MBX_IRQ0_LO, 0);
4882 }
4883
4884 bge_ifmedia_upd_locked(ifp);
4885
4886 ifp->if_drv_flags |= IFF_DRV_RUNNING;
4887 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
4888
4889 callout_reset(&sc->bge_stat_ch, hz, bge_tick, sc);
4890}
4891
4892static void
4893bge_init(void *xsc)
4894{
4895 struct bge_softc *sc = xsc;
4896
4897 BGE_LOCK(sc);
4898 bge_init_locked(sc);
4899 BGE_UNLOCK(sc);
4900}
4901
4902/*
4903 * Set media options.
4904 */
4905static int
4906bge_ifmedia_upd(struct ifnet *ifp)
4907{
4908 struct bge_softc *sc = ifp->if_softc;
4909 int res;
4910
4911 BGE_LOCK(sc);
4912 res = bge_ifmedia_upd_locked(ifp);
4913 BGE_UNLOCK(sc);
4914
4915 return (res);
4916}
4917
4918static int
4919bge_ifmedia_upd_locked(struct ifnet *ifp)
4920{
4921 struct bge_softc *sc = ifp->if_softc;
4922 struct mii_data *mii;
4923 struct mii_softc *miisc;
4924 struct ifmedia *ifm;
4925
4926 BGE_LOCK_ASSERT(sc);
4927
4928 ifm = &sc->bge_ifmedia;
4929
4930 /* If this is a 1000baseX NIC, enable the TBI port. */
4931 if (sc->bge_flags & BGE_FLAG_TBI) {
4932 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
4933 return (EINVAL);
4934 switch(IFM_SUBTYPE(ifm->ifm_media)) {
4935 case IFM_AUTO:
4936 /*
4937 * The BCM5704 ASIC appears to have a special
4938 * mechanism for programming the autoneg
4939 * advertisement registers in TBI mode.
4940 */
4941 if (sc->bge_asicrev == BGE_ASICREV_BCM5704) {
4942 uint32_t sgdig;
4943 sgdig = CSR_READ_4(sc, BGE_SGDIG_STS);
4944 if (sgdig & BGE_SGDIGSTS_DONE) {
4945 CSR_WRITE_4(sc, BGE_TX_TBI_AUTONEG, 0);
4946 sgdig = CSR_READ_4(sc, BGE_SGDIG_CFG);
4947 sgdig |= BGE_SGDIGCFG_AUTO |
4948 BGE_SGDIGCFG_PAUSE_CAP |
4949 BGE_SGDIGCFG_ASYM_PAUSE;
4950 CSR_WRITE_4(sc, BGE_SGDIG_CFG,
4951 sgdig | BGE_SGDIGCFG_SEND);
4952 DELAY(5);
4953 CSR_WRITE_4(sc, BGE_SGDIG_CFG, sgdig);
4954 }
4955 }
4956 break;
4957 case IFM_1000_SX:
4958 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) {
4959 BGE_CLRBIT(sc, BGE_MAC_MODE,
4960 BGE_MACMODE_HALF_DUPLEX);
4961 } else {
4962 BGE_SETBIT(sc, BGE_MAC_MODE,
4963 BGE_MACMODE_HALF_DUPLEX);
4964 }
4965 break;
4966 default:
4967 return (EINVAL);
4968 }
4969 return (0);
4970 }
4971
4972 sc->bge_link_evt++;
4973 mii = device_get_softc(sc->bge_miibus);
4974 LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
4975 PHY_RESET(miisc);
4976 mii_mediachg(mii);
4977
4978 /*
4979 * Force an interrupt so that we will call bge_link_upd
4980 * if needed and clear any pending link state attention.
4981 * Without this we are not getting any further interrupts
4982 * for link state changes and thus will not UP the link and
4983 * not be able to send in bge_start_locked. The only
4984 * way to get things working was to receive a packet and
4985 * get an RX intr.
4986 * bge_tick should help for fiber cards and we might not
4987 * need to do this here if BGE_FLAG_TBI is set but as
4988 * we poll for fiber anyway it should not harm.
4989 */
4990 if (sc->bge_asicrev == BGE_ASICREV_BCM5700 ||
4991 sc->bge_flags & BGE_FLAG_5788)
4992 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_SET);
4993 else
4994 BGE_SETBIT(sc, BGE_HCC_MODE, BGE_HCCMODE_COAL_NOW);
4995
4996 return (0);
4997}
4998
4999/*
5000 * Report current media status.
5001 */
5002static void
5003bge_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
5004{
5005 struct bge_softc *sc = ifp->if_softc;
5006 struct mii_data *mii;
5007
5008 BGE_LOCK(sc);
5009
5010 if (sc->bge_flags & BGE_FLAG_TBI) {
5011 ifmr->ifm_status = IFM_AVALID;
5012 ifmr->ifm_active = IFM_ETHER;
5013 if (CSR_READ_4(sc, BGE_MAC_STS) &
5014 BGE_MACSTAT_TBI_PCS_SYNCHED)
5015 ifmr->ifm_status |= IFM_ACTIVE;
5016 else {
5017 ifmr->ifm_active |= IFM_NONE;
5018 BGE_UNLOCK(sc);
5019 return;
5020 }
5021 ifmr->ifm_active |= IFM_1000_SX;
5022 if (CSR_READ_4(sc, BGE_MAC_MODE) & BGE_MACMODE_HALF_DUPLEX)
5023 ifmr->ifm_active |= IFM_HDX;
5024 else
5025 ifmr->ifm_active |= IFM_FDX;
5026 BGE_UNLOCK(sc);
5027 return;
5028 }
5029
5030 mii = device_get_softc(sc->bge_miibus);
5031 mii_pollstat(mii);
5032 ifmr->ifm_active = mii->mii_media_active;
5033 ifmr->ifm_status = mii->mii_media_status;
5034
5035 BGE_UNLOCK(sc);
5036}
5037
5038static int
5039bge_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
5040{
5041 struct bge_softc *sc = ifp->if_softc;
5042 struct ifreq *ifr = (struct ifreq *) data;
5043 struct mii_data *mii;
5044 int flags, mask, error = 0;
5045
5046 switch (command) {
5047 case SIOCSIFMTU:
5048 if (BGE_IS_JUMBO_CAPABLE(sc) ||
5049 (sc->bge_flags & BGE_FLAG_JUMBO_STD)) {
5050 if (ifr->ifr_mtu < ETHERMIN ||
5051 ifr->ifr_mtu > BGE_JUMBO_MTU) {
5052 error = EINVAL;
5053 break;
5054 }
5055 } else if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > ETHERMTU) {
5056 error = EINVAL;
5057 break;
5058 }
5059 BGE_LOCK(sc);
5060 if (ifp->if_mtu != ifr->ifr_mtu) {
5061 ifp->if_mtu = ifr->ifr_mtu;
5062 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
5063 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
5064 bge_init_locked(sc);
5065 }
5066 }
5067 BGE_UNLOCK(sc);
5068 break;
5069 case SIOCSIFFLAGS:
5070 BGE_LOCK(sc);
5071 if (ifp->if_flags & IFF_UP) {
5072 /*
5073 * If only the state of the PROMISC flag changed,
5074 * then just use the 'set promisc mode' command
5075 * instead of reinitializing the entire NIC. Doing
5076 * a full re-init means reloading the firmware and
5077 * waiting for it to start up, which may take a
5078 * second or two. Similarly for ALLMULTI.
5079 */
5080 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
5081 flags = ifp->if_flags ^ sc->bge_if_flags;
5082 if (flags & IFF_PROMISC)
5083 bge_setpromisc(sc);
5084 if (flags & IFF_ALLMULTI)
5085 bge_setmulti(sc);
5086 } else
5087 bge_init_locked(sc);
5088 } else {
5089 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
5090 bge_stop(sc);
5091 }
5092 }
5093 sc->bge_if_flags = ifp->if_flags;
5094 BGE_UNLOCK(sc);
5095 error = 0;
5096 break;
5097 case SIOCADDMULTI:
5098 case SIOCDELMULTI:
5099 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
5100 BGE_LOCK(sc);
5101 bge_setmulti(sc);
5102 BGE_UNLOCK(sc);
5103 error = 0;
5104 }
5105 break;
5106 case SIOCSIFMEDIA:
5107 case SIOCGIFMEDIA:
5108 if (sc->bge_flags & BGE_FLAG_TBI) {
5109 error = ifmedia_ioctl(ifp, ifr,
5110 &sc->bge_ifmedia, command);
5111 } else {
5112 mii = device_get_softc(sc->bge_miibus);
5113 error = ifmedia_ioctl(ifp, ifr,
5114 &mii->mii_media, command);
5115 }
5116 break;
5117 case SIOCSIFCAP:
5118 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
5119#ifdef DEVICE_POLLING
5120 if (mask & IFCAP_POLLING) {
5121 if (ifr->ifr_reqcap & IFCAP_POLLING) {
5122 error = ether_poll_register(bge_poll, ifp);
5123 if (error)
5124 return (error);
5125 BGE_LOCK(sc);
5126 BGE_SETBIT(sc, BGE_PCI_MISC_CTL,
5127 BGE_PCIMISCCTL_MASK_PCI_INTR);
5128 bge_writembx(sc, BGE_MBX_IRQ0_LO, 1);
5129 ifp->if_capenable |= IFCAP_POLLING;
5130 BGE_UNLOCK(sc);
5131 } else {
5132 error = ether_poll_deregister(ifp);
5133 /* Enable interrupt even in error case */
5134 BGE_LOCK(sc);
5135 BGE_CLRBIT(sc, BGE_PCI_MISC_CTL,
5136 BGE_PCIMISCCTL_MASK_PCI_INTR);
5137 bge_writembx(sc, BGE_MBX_IRQ0_LO, 0);
5138 ifp->if_capenable &= ~IFCAP_POLLING;
5139 BGE_UNLOCK(sc);
5140 }
5141 }
5142#endif
5143 if ((mask & IFCAP_TXCSUM) != 0 &&
5144 (ifp->if_capabilities & IFCAP_TXCSUM) != 0) {
5145 ifp->if_capenable ^= IFCAP_TXCSUM;
5146 if ((ifp->if_capenable & IFCAP_TXCSUM) != 0)
5147 ifp->if_hwassist |= sc->bge_csum_features;
5148 else
5149 ifp->if_hwassist &= ~sc->bge_csum_features;
5150 }
5151
5152 if ((mask & IFCAP_RXCSUM) != 0 &&
5153 (ifp->if_capabilities & IFCAP_RXCSUM) != 0)
5154 ifp->if_capenable ^= IFCAP_RXCSUM;
5155
5156 if ((mask & IFCAP_TSO4) != 0 &&
5157 (ifp->if_capabilities & IFCAP_TSO4) != 0) {
5158 ifp->if_capenable ^= IFCAP_TSO4;
5159 if ((ifp->if_capenable & IFCAP_TSO4) != 0)
5160 ifp->if_hwassist |= CSUM_TSO;
5161 else
5162 ifp->if_hwassist &= ~CSUM_TSO;
5163 }
5164
5165 if (mask & IFCAP_VLAN_MTU) {
5166 ifp->if_capenable ^= IFCAP_VLAN_MTU;
5167 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
5168 bge_init(sc);
5169 }
5170
5171 if ((mask & IFCAP_VLAN_HWTSO) != 0 &&
5172 (ifp->if_capabilities & IFCAP_VLAN_HWTSO) != 0)
5173 ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
5174 if ((mask & IFCAP_VLAN_HWTAGGING) != 0 &&
5175 (ifp->if_capabilities & IFCAP_VLAN_HWTAGGING) != 0) {
5176 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
5177 if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) == 0)
5178 ifp->if_capenable &= ~IFCAP_VLAN_HWTSO;
5179 BGE_LOCK(sc);
5180 bge_setvlan(sc);
5181 BGE_UNLOCK(sc);
5182 }
5183#ifdef VLAN_CAPABILITIES
5184 VLAN_CAPABILITIES(ifp);
5185#endif
5186 break;
5187 default:
5188 error = ether_ioctl(ifp, command, data);
5189 break;
5190 }
5191
5192 return (error);
5193}
5194
5195static void
5196bge_watchdog(struct bge_softc *sc)
5197{
5198 struct ifnet *ifp;
5199
5200 BGE_LOCK_ASSERT(sc);
5201
5202 if (sc->bge_timer == 0 || --sc->bge_timer)
5203 return;
5204
5205 ifp = sc->bge_ifp;
5206
5207 if_printf(ifp, "watchdog timeout -- resetting\n");
5208
5209 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
5210 bge_init_locked(sc);
5211
5212 ifp->if_oerrors++;
5213}
5214
5215static void
5216bge_stop_block(struct bge_softc *sc, bus_size_t reg, uint32_t bit)
5217{
5218 int i;
5219
5220 BGE_CLRBIT(sc, reg, bit);
5221
5222 for (i = 0; i < BGE_TIMEOUT; i++) {
5223 if ((CSR_READ_4(sc, reg) & bit) == 0)
5224 return;
5225 DELAY(100);
5226 }
5227}
5228
5229/*
5230 * Stop the adapter and free any mbufs allocated to the
5231 * RX and TX lists.
5232 */
5233static void
5234bge_stop(struct bge_softc *sc)
5235{
5236 struct ifnet *ifp;
5237
5238 BGE_LOCK_ASSERT(sc);
5239
5240 ifp = sc->bge_ifp;
5241
5242 callout_stop(&sc->bge_stat_ch);
5243
5244 /* Disable host interrupts. */
5245 BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR);
5246 bge_writembx(sc, BGE_MBX_IRQ0_LO, 1);
5247
5248 /*
5249 * Tell firmware we're shutting down.
5250 */
5251 bge_stop_fw(sc);
5252 bge_sig_pre_reset(sc, BGE_RESET_STOP);
5253
5254 /*
5255 * Disable all of the receiver blocks.
5256 */
5257 bge_stop_block(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE);
5258 bge_stop_block(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE);
5259 bge_stop_block(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE);
5260 if (BGE_IS_5700_FAMILY(sc))
5261 bge_stop_block(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE);
5262 bge_stop_block(sc, BGE_RDBDI_MODE, BGE_RBDIMODE_ENABLE);
5263 bge_stop_block(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE);
5264 bge_stop_block(sc, BGE_RBDC_MODE, BGE_RBDCMODE_ENABLE);
5265
5266 /*
5267 * Disable all of the transmit blocks.
5268 */
5269 bge_stop_block(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE);
5270 bge_stop_block(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE);
5271 bge_stop_block(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE);
5272 bge_stop_block(sc, BGE_RDMA_MODE, BGE_RDMAMODE_ENABLE);
5273 bge_stop_block(sc, BGE_SDC_MODE, BGE_SDCMODE_ENABLE);
5274 if (BGE_IS_5700_FAMILY(sc))
5275 bge_stop_block(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE);
5276 bge_stop_block(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE);
5277
5278 /*
5279 * Shut down all of the memory managers and related
5280 * state machines.
5281 */
5282 bge_stop_block(sc, BGE_HCC_MODE, BGE_HCCMODE_ENABLE);
5283 bge_stop_block(sc, BGE_WDMA_MODE, BGE_WDMAMODE_ENABLE);
5284 if (BGE_IS_5700_FAMILY(sc))
5285 bge_stop_block(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE);
5286
5287 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF);
5288 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0);
5289 if (!(BGE_IS_5705_PLUS(sc))) {
5290 BGE_CLRBIT(sc, BGE_BMAN_MODE, BGE_BMANMODE_ENABLE);
5291 BGE_CLRBIT(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE);
5292 }
5293 /* Update MAC statistics. */
5294 if (BGE_IS_5705_PLUS(sc))
5295 bge_stats_update_regs(sc);
5296
5297 bge_reset(sc);
5298 bge_sig_legacy(sc, BGE_RESET_STOP);
5299 bge_sig_post_reset(sc, BGE_RESET_STOP);
5300
5301 /*
5302 * Keep the ASF firmware running if up.
5303 */
5304 if (sc->bge_asf_mode & ASF_STACKUP)
5305 BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
5306 else
5307 BGE_CLRBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
5308
5309 /* Free the RX lists. */
5310 bge_free_rx_ring_std(sc);
5311
5312 /* Free jumbo RX list. */
5313 if (BGE_IS_JUMBO_CAPABLE(sc))
5314 bge_free_rx_ring_jumbo(sc);
5315
5316 /* Free TX buffers. */
5317 bge_free_tx_ring(sc);
5318
5319 sc->bge_tx_saved_considx = BGE_TXCONS_UNSET;
5320
5321 /* Clear MAC's link state (PHY may still have link UP). */
5322 if (bootverbose && sc->bge_link)
5323 if_printf(sc->bge_ifp, "link DOWN\n");
5324 sc->bge_link = 0;
5325
5326 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
5327}
5328
5329/*
5330 * Stop all chip I/O so that the kernel's probe routines don't
5331 * get confused by errant DMAs when rebooting.
5332 */
5333static int
5334bge_shutdown(device_t dev)
5335{
5336 struct bge_softc *sc;
5337
5338 sc = device_get_softc(dev);
5339 BGE_LOCK(sc);
5340 bge_stop(sc);
5341 bge_reset(sc);
5342 BGE_UNLOCK(sc);
5343
5344 return (0);
5345}
5346
5347static int
5348bge_suspend(device_t dev)
5349{
5350 struct bge_softc *sc;
5351
5352 sc = device_get_softc(dev);
5353 BGE_LOCK(sc);
5354 bge_stop(sc);
5355 BGE_UNLOCK(sc);
5356
5357 return (0);
5358}
5359
5360static int
5361bge_resume(device_t dev)
5362{
5363 struct bge_softc *sc;
5364 struct ifnet *ifp;
5365
5366 sc = device_get_softc(dev);
5367 BGE_LOCK(sc);
5368 ifp = sc->bge_ifp;
5369 if (ifp->if_flags & IFF_UP) {
5370 bge_init_locked(sc);
5371 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
5372 bge_start_locked(ifp);
5373 }
5374 BGE_UNLOCK(sc);
5375
5376 return (0);
5377}
5378
5379static void
5380bge_link_upd(struct bge_softc *sc)
5381{
5382 struct mii_data *mii;
5383 uint32_t link, status;
5384
5385 BGE_LOCK_ASSERT(sc);
5386
5387 /* Clear 'pending link event' flag. */
5388 sc->bge_link_evt = 0;
5389
5390 /*
5391 * Process link state changes.
5392 * Grrr. The link status word in the status block does
5393 * not work correctly on the BCM5700 rev AX and BX chips,
5394 * according to all available information. Hence, we have
5395 * to enable MII interrupts in order to properly obtain
5396 * async link changes. Unfortunately, this also means that
5397 * we have to read the MAC status register to detect link
5398 * changes, thereby adding an additional register access to
5399 * the interrupt handler.
5400 *
5401 * XXX: perhaps link state detection procedure used for
5402 * BGE_CHIPID_BCM5700_B2 can be used for others BCM5700 revisions.
5403 */
5404
5405 if (sc->bge_asicrev == BGE_ASICREV_BCM5700 &&
5406 sc->bge_chipid != BGE_CHIPID_BCM5700_B2) {
5407 status = CSR_READ_4(sc, BGE_MAC_STS);
5408 if (status & BGE_MACSTAT_MI_INTERRUPT) {
5409 mii = device_get_softc(sc->bge_miibus);
5410 mii_pollstat(mii);
5411 if (!sc->bge_link &&
5412 mii->mii_media_status & IFM_ACTIVE &&
5413 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) {
5414 sc->bge_link++;
5415 if (bootverbose)
5416 if_printf(sc->bge_ifp, "link UP\n");
5417 } else if (sc->bge_link &&
5418 (!(mii->mii_media_status & IFM_ACTIVE) ||
5419 IFM_SUBTYPE(mii->mii_media_active) == IFM_NONE)) {
5420 sc->bge_link = 0;
5421 if (bootverbose)
5422 if_printf(sc->bge_ifp, "link DOWN\n");
5423 }
5424
5425 /* Clear the interrupt. */
5426 CSR_WRITE_4(sc, BGE_MAC_EVT_ENB,
5427 BGE_EVTENB_MI_INTERRUPT);
5428 bge_miibus_readreg(sc->bge_dev, 1, BRGPHY_MII_ISR);
5429 bge_miibus_writereg(sc->bge_dev, 1, BRGPHY_MII_IMR,
5430 BRGPHY_INTRS);
5431 }
5432 return;
5433 }
5434
5435 if (sc->bge_flags & BGE_FLAG_TBI) {
5436 status = CSR_READ_4(sc, BGE_MAC_STS);
5437 if (status & BGE_MACSTAT_TBI_PCS_SYNCHED) {
5438 if (!sc->bge_link) {
5439 sc->bge_link++;
5440 if (sc->bge_asicrev == BGE_ASICREV_BCM5704)
5441 BGE_CLRBIT(sc, BGE_MAC_MODE,
5442 BGE_MACMODE_TBI_SEND_CFGS);
5443 CSR_WRITE_4(sc, BGE_MAC_STS, 0xFFFFFFFF);
5444 if (bootverbose)
5445 if_printf(sc->bge_ifp, "link UP\n");
5446 if_link_state_change(sc->bge_ifp,
5447 LINK_STATE_UP);
5448 }
5449 } else if (sc->bge_link) {
5450 sc->bge_link = 0;
5451 if (bootverbose)
5452 if_printf(sc->bge_ifp, "link DOWN\n");
5453 if_link_state_change(sc->bge_ifp, LINK_STATE_DOWN);
5454 }
5455 } else if ((sc->bge_mi_mode & BGE_MIMODE_AUTOPOLL) != 0) {
5456 /*
5457 * Some broken BCM chips have BGE_STATFLAG_LINKSTATE_CHANGED bit
5458 * in status word always set. Workaround this bug by reading
5459 * PHY link status directly.
5460 */
5461 link = (CSR_READ_4(sc, BGE_MI_STS) & BGE_MISTS_LINK) ? 1 : 0;
5462
5463 if (link != sc->bge_link ||
5464 sc->bge_asicrev == BGE_ASICREV_BCM5700) {
5465 mii = device_get_softc(sc->bge_miibus);
5466 mii_pollstat(mii);
5467 if (!sc->bge_link &&
5468 mii->mii_media_status & IFM_ACTIVE &&
5469 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) {
5470 sc->bge_link++;
5471 if (bootverbose)
5472 if_printf(sc->bge_ifp, "link UP\n");
5473 } else if (sc->bge_link &&
5474 (!(mii->mii_media_status & IFM_ACTIVE) ||
5475 IFM_SUBTYPE(mii->mii_media_active) == IFM_NONE)) {
5476 sc->bge_link = 0;
5477 if (bootverbose)
5478 if_printf(sc->bge_ifp, "link DOWN\n");
5479 }
5480 }
5481 } else {
5482 /*
5483 * For controllers that call mii_tick, we have to poll
5484 * link status.
5485 */
5486 mii = device_get_softc(sc->bge_miibus);
5487 mii_pollstat(mii);
5488 bge_miibus_statchg(sc->bge_dev);
5489 }
5490
5491 /* Clear the attention. */
5492 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED |
5493 BGE_MACSTAT_CFG_CHANGED | BGE_MACSTAT_MI_COMPLETE |
5494 BGE_MACSTAT_LINK_CHANGED);
5495}
5496
5497static void
5498bge_add_sysctls(struct bge_softc *sc)
5499{
5500 struct sysctl_ctx_list *ctx;
5501 struct sysctl_oid_list *children;
5502 char tn[32];
5503 int unit;
5504
5505 ctx = device_get_sysctl_ctx(sc->bge_dev);
5506 children = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->bge_dev));
5507
5508#ifdef BGE_REGISTER_DEBUG
5509 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "debug_info",
5510 CTLTYPE_INT | CTLFLAG_RW, sc, 0, bge_sysctl_debug_info, "I",
5511 "Debug Information");
5512
5513 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "reg_read",
5514 CTLTYPE_INT | CTLFLAG_RW, sc, 0, bge_sysctl_reg_read, "I",
5515 "Register Read");
5516
5517 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "mem_read",
5518 CTLTYPE_INT | CTLFLAG_RW, sc, 0, bge_sysctl_mem_read, "I",
5519 "Memory Read");
5520
5521#endif
5522
5523 unit = device_get_unit(sc->bge_dev);
5524 /*
5525 * A common design characteristic for many Broadcom client controllers
5526 * is that they only support a single outstanding DMA read operation
5527 * on the PCIe bus. This means that it will take twice as long to fetch
5528 * a TX frame that is split into header and payload buffers as it does
5529 * to fetch a single, contiguous TX frame (2 reads vs. 1 read). For
5530 * these controllers, coalescing buffers to reduce the number of memory
5531 * reads is effective way to get maximum performance(about 940Mbps).
5532 * Without collapsing TX buffers the maximum TCP bulk transfer
5533 * performance is about 850Mbps. However forcing coalescing mbufs
5534 * consumes a lot of CPU cycles, so leave it off by default.
5535 */
5536 sc->bge_forced_collapse = 0;
5537 snprintf(tn, sizeof(tn), "dev.bge.%d.forced_collapse", unit);
5538 TUNABLE_INT_FETCH(tn, &sc->bge_forced_collapse);
5539 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "forced_collapse",
5540 CTLFLAG_RW, &sc->bge_forced_collapse, 0,
5541 "Number of fragmented TX buffers of a frame allowed before "
5542 "forced collapsing");
5543
5544 /*
5545 * It seems all Broadcom controllers have a bug that can generate UDP
5546 * datagrams with checksum value 0 when TX UDP checksum offloading is
5547 * enabled. Generating UDP checksum value 0 is RFC 768 violation.
5548 * Even though the probability of generating such UDP datagrams is
5549 * low, I don't want to see FreeBSD boxes to inject such datagrams
5550 * into network so disable UDP checksum offloading by default. Users
5551 * still override this behavior by setting a sysctl variable,
5552 * dev.bge.0.forced_udpcsum.
5553 */
5554 sc->bge_forced_udpcsum = 0;
5555 snprintf(tn, sizeof(tn), "dev.bge.%d.bge_forced_udpcsum", unit);
5556 TUNABLE_INT_FETCH(tn, &sc->bge_forced_udpcsum);
5557 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "forced_udpcsum",
5558 CTLFLAG_RW, &sc->bge_forced_udpcsum, 0,
5559 "Enable UDP checksum offloading even if controller can "
5560 "generate UDP checksum value 0");
5561
5562 if (BGE_IS_5705_PLUS(sc))
5563 bge_add_sysctl_stats_regs(sc, ctx, children);
5564 else
5565 bge_add_sysctl_stats(sc, ctx, children);
5566}
5567
5568#define BGE_SYSCTL_STAT(sc, ctx, desc, parent, node, oid) \
5569 SYSCTL_ADD_PROC(ctx, parent, OID_AUTO, oid, CTLTYPE_UINT|CTLFLAG_RD, \
5570 sc, offsetof(struct bge_stats, node), bge_sysctl_stats, "IU", \
5571 desc)
5572
5573static void
5574bge_add_sysctl_stats(struct bge_softc *sc, struct sysctl_ctx_list *ctx,
5575 struct sysctl_oid_list *parent)
5576{
5577 struct sysctl_oid *tree;
5578 struct sysctl_oid_list *children, *schildren;
5579
5580 tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "stats", CTLFLAG_RD,
5581 NULL, "BGE Statistics");
5582 schildren = children = SYSCTL_CHILDREN(tree);
5583 BGE_SYSCTL_STAT(sc, ctx, "Frames Dropped Due To Filters",
5584 children, COSFramesDroppedDueToFilters,
5585 "FramesDroppedDueToFilters");
5586 BGE_SYSCTL_STAT(sc, ctx, "NIC DMA Write Queue Full",
5587 children, nicDmaWriteQueueFull, "DmaWriteQueueFull");
5588 BGE_SYSCTL_STAT(sc, ctx, "NIC DMA Write High Priority Queue Full",
5589 children, nicDmaWriteHighPriQueueFull, "DmaWriteHighPriQueueFull");
5590 BGE_SYSCTL_STAT(sc, ctx, "NIC No More RX Buffer Descriptors",
5591 children, nicNoMoreRxBDs, "NoMoreRxBDs");
5592 BGE_SYSCTL_STAT(sc, ctx, "Discarded Input Frames",
5593 children, ifInDiscards, "InputDiscards");
5594 BGE_SYSCTL_STAT(sc, ctx, "Input Errors",
5595 children, ifInErrors, "InputErrors");
5596 BGE_SYSCTL_STAT(sc, ctx, "NIC Recv Threshold Hit",
5597 children, nicRecvThresholdHit, "RecvThresholdHit");
5598 BGE_SYSCTL_STAT(sc, ctx, "NIC DMA Read Queue Full",
5599 children, nicDmaReadQueueFull, "DmaReadQueueFull");
5600 BGE_SYSCTL_STAT(sc, ctx, "NIC DMA Read High Priority Queue Full",
5601 children, nicDmaReadHighPriQueueFull, "DmaReadHighPriQueueFull");
5602 BGE_SYSCTL_STAT(sc, ctx, "NIC Send Data Complete Queue Full",
5603 children, nicSendDataCompQueueFull, "SendDataCompQueueFull");
5604 BGE_SYSCTL_STAT(sc, ctx, "NIC Ring Set Send Producer Index",
5605 children, nicRingSetSendProdIndex, "RingSetSendProdIndex");
5606 BGE_SYSCTL_STAT(sc, ctx, "NIC Ring Status Update",
5607 children, nicRingStatusUpdate, "RingStatusUpdate");
5608 BGE_SYSCTL_STAT(sc, ctx, "NIC Interrupts",
5609 children, nicInterrupts, "Interrupts");
5610 BGE_SYSCTL_STAT(sc, ctx, "NIC Avoided Interrupts",
5611 children, nicAvoidedInterrupts, "AvoidedInterrupts");
5612 BGE_SYSCTL_STAT(sc, ctx, "NIC Send Threshold Hit",
5613 children, nicSendThresholdHit, "SendThresholdHit");
5614
5615 tree = SYSCTL_ADD_NODE(ctx, schildren, OID_AUTO, "rx", CTLFLAG_RD,
5616 NULL, "BGE RX Statistics");
5617 children = SYSCTL_CHILDREN(tree);
5618 BGE_SYSCTL_STAT(sc, ctx, "Inbound Octets",
5619 children, rxstats.ifHCInOctets, "ifHCInOctets");
5620 BGE_SYSCTL_STAT(sc, ctx, "Fragments",
5621 children, rxstats.etherStatsFragments, "Fragments");
5622 BGE_SYSCTL_STAT(sc, ctx, "Inbound Unicast Packets",
5623 children, rxstats.ifHCInUcastPkts, "UnicastPkts");
5624 BGE_SYSCTL_STAT(sc, ctx, "Inbound Multicast Packets",
5625 children, rxstats.ifHCInMulticastPkts, "MulticastPkts");
5626 BGE_SYSCTL_STAT(sc, ctx, "FCS Errors",
5627 children, rxstats.dot3StatsFCSErrors, "FCSErrors");
5628 BGE_SYSCTL_STAT(sc, ctx, "Alignment Errors",
5629 children, rxstats.dot3StatsAlignmentErrors, "AlignmentErrors");
5630 BGE_SYSCTL_STAT(sc, ctx, "XON Pause Frames Received",
5631 children, rxstats.xonPauseFramesReceived, "xonPauseFramesReceived");
5632 BGE_SYSCTL_STAT(sc, ctx, "XOFF Pause Frames Received",
5633 children, rxstats.xoffPauseFramesReceived,
5634 "xoffPauseFramesReceived");
5635 BGE_SYSCTL_STAT(sc, ctx, "MAC Control Frames Received",
5636 children, rxstats.macControlFramesReceived,
5637 "ControlFramesReceived");
5638 BGE_SYSCTL_STAT(sc, ctx, "XOFF State Entered",
5639 children, rxstats.xoffStateEntered, "xoffStateEntered");
5640 BGE_SYSCTL_STAT(sc, ctx, "Frames Too Long",
5641 children, rxstats.dot3StatsFramesTooLong, "FramesTooLong");
5642 BGE_SYSCTL_STAT(sc, ctx, "Jabbers",
5643 children, rxstats.etherStatsJabbers, "Jabbers");
5644 BGE_SYSCTL_STAT(sc, ctx, "Undersized Packets",
5645 children, rxstats.etherStatsUndersizePkts, "UndersizePkts");
5646 BGE_SYSCTL_STAT(sc, ctx, "Inbound Range Length Errors",
5647 children, rxstats.inRangeLengthError, "inRangeLengthError");
5648 BGE_SYSCTL_STAT(sc, ctx, "Outbound Range Length Errors",
5649 children, rxstats.outRangeLengthError, "outRangeLengthError");
5650
5651 tree = SYSCTL_ADD_NODE(ctx, schildren, OID_AUTO, "tx", CTLFLAG_RD,
5652 NULL, "BGE TX Statistics");
5653 children = SYSCTL_CHILDREN(tree);
5654 BGE_SYSCTL_STAT(sc, ctx, "Outbound Octets",
5655 children, txstats.ifHCOutOctets, "ifHCOutOctets");
5656 BGE_SYSCTL_STAT(sc, ctx, "TX Collisions",
5657 children, txstats.etherStatsCollisions, "Collisions");
5658 BGE_SYSCTL_STAT(sc, ctx, "XON Sent",
5659 children, txstats.outXonSent, "XonSent");
5660 BGE_SYSCTL_STAT(sc, ctx, "XOFF Sent",
5661 children, txstats.outXoffSent, "XoffSent");
5662 BGE_SYSCTL_STAT(sc, ctx, "Flow Control Done",
5663 children, txstats.flowControlDone, "flowControlDone");
5664 BGE_SYSCTL_STAT(sc, ctx, "Internal MAC TX errors",
5665 children, txstats.dot3StatsInternalMacTransmitErrors,
5666 "InternalMacTransmitErrors");
5667 BGE_SYSCTL_STAT(sc, ctx, "Single Collision Frames",
5668 children, txstats.dot3StatsSingleCollisionFrames,
5669 "SingleCollisionFrames");
5670 BGE_SYSCTL_STAT(sc, ctx, "Multiple Collision Frames",
5671 children, txstats.dot3StatsMultipleCollisionFrames,
5672 "MultipleCollisionFrames");
5673 BGE_SYSCTL_STAT(sc, ctx, "Deferred Transmissions",
5674 children, txstats.dot3StatsDeferredTransmissions,
5675 "DeferredTransmissions");
5676 BGE_SYSCTL_STAT(sc, ctx, "Excessive Collisions",
5677 children, txstats.dot3StatsExcessiveCollisions,
5678 "ExcessiveCollisions");
5679 BGE_SYSCTL_STAT(sc, ctx, "Late Collisions",
5680 children, txstats.dot3StatsLateCollisions,
5681 "LateCollisions");
5682 BGE_SYSCTL_STAT(sc, ctx, "Outbound Unicast Packets",
5683 children, txstats.ifHCOutUcastPkts, "UnicastPkts");
5684 BGE_SYSCTL_STAT(sc, ctx, "Outbound Multicast Packets",
5685 children, txstats.ifHCOutMulticastPkts, "MulticastPkts");
5686 BGE_SYSCTL_STAT(sc, ctx, "Outbound Broadcast Packets",
5687 children, txstats.ifHCOutBroadcastPkts, "BroadcastPkts");
5688 BGE_SYSCTL_STAT(sc, ctx, "Carrier Sense Errors",
5689 children, txstats.dot3StatsCarrierSenseErrors,
5690 "CarrierSenseErrors");
5691 BGE_SYSCTL_STAT(sc, ctx, "Outbound Discards",
5692 children, txstats.ifOutDiscards, "Discards");
5693 BGE_SYSCTL_STAT(sc, ctx, "Outbound Errors",
5694 children, txstats.ifOutErrors, "Errors");
5695}
5696
5697#undef BGE_SYSCTL_STAT
5698
5699#define BGE_SYSCTL_STAT_ADD64(c, h, n, p, d) \
5700 SYSCTL_ADD_UQUAD(c, h, OID_AUTO, n, CTLFLAG_RD, p, d)
5701
5702static void
5703bge_add_sysctl_stats_regs(struct bge_softc *sc, struct sysctl_ctx_list *ctx,
5704 struct sysctl_oid_list *parent)
5705{
5706 struct sysctl_oid *tree;
5707 struct sysctl_oid_list *child, *schild;
5708 struct bge_mac_stats *stats;
5709
5710 stats = &sc->bge_mac_stats;
5711 tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "stats", CTLFLAG_RD,
5712 NULL, "BGE Statistics");
5713 schild = child = SYSCTL_CHILDREN(tree);
5714 BGE_SYSCTL_STAT_ADD64(ctx, child, "FramesDroppedDueToFilters",
5715 &stats->FramesDroppedDueToFilters, "Frames Dropped Due to Filters");
5716 BGE_SYSCTL_STAT_ADD64(ctx, child, "DmaWriteQueueFull",
5717 &stats->DmaWriteQueueFull, "NIC DMA Write Queue Full");
5718 BGE_SYSCTL_STAT_ADD64(ctx, child, "DmaWriteHighPriQueueFull",
5719 &stats->DmaWriteHighPriQueueFull,
5720 "NIC DMA Write High Priority Queue Full");
5721 BGE_SYSCTL_STAT_ADD64(ctx, child, "NoMoreRxBDs",
5722 &stats->NoMoreRxBDs, "NIC No More RX Buffer Descriptors");
5723 BGE_SYSCTL_STAT_ADD64(ctx, child, "InputDiscards",
5724 &stats->InputDiscards, "Discarded Input Frames");
5725 BGE_SYSCTL_STAT_ADD64(ctx, child, "InputErrors",
5726 &stats->InputErrors, "Input Errors");
5727 BGE_SYSCTL_STAT_ADD64(ctx, child, "RecvThresholdHit",
5728 &stats->RecvThresholdHit, "NIC Recv Threshold Hit");
5729
5730 tree = SYSCTL_ADD_NODE(ctx, schild, OID_AUTO, "rx", CTLFLAG_RD,
5731 NULL, "BGE RX Statistics");
5732 child = SYSCTL_CHILDREN(tree);
5733 BGE_SYSCTL_STAT_ADD64(ctx, child, "ifHCInOctets",
5734 &stats->ifHCInOctets, "Inbound Octets");
5735 BGE_SYSCTL_STAT_ADD64(ctx, child, "Fragments",
5736 &stats->etherStatsFragments, "Fragments");
5737 BGE_SYSCTL_STAT_ADD64(ctx, child, "UnicastPkts",
5738 &stats->ifHCInUcastPkts, "Inbound Unicast Packets");
5739 BGE_SYSCTL_STAT_ADD64(ctx, child, "MulticastPkts",
5740 &stats->ifHCInMulticastPkts, "Inbound Multicast Packets");
5741 BGE_SYSCTL_STAT_ADD64(ctx, child, "BroadcastPkts",
5742 &stats->ifHCInBroadcastPkts, "Inbound Broadcast Packets");
5743 BGE_SYSCTL_STAT_ADD64(ctx, child, "FCSErrors",
5744 &stats->dot3StatsFCSErrors, "FCS Errors");
5745 BGE_SYSCTL_STAT_ADD64(ctx, child, "AlignmentErrors",
5746 &stats->dot3StatsAlignmentErrors, "Alignment Errors");
5747 BGE_SYSCTL_STAT_ADD64(ctx, child, "xonPauseFramesReceived",
5748 &stats->xonPauseFramesReceived, "XON Pause Frames Received");
5749 BGE_SYSCTL_STAT_ADD64(ctx, child, "xoffPauseFramesReceived",
5750 &stats->xoffPauseFramesReceived, "XOFF Pause Frames Received");
5751 BGE_SYSCTL_STAT_ADD64(ctx, child, "ControlFramesReceived",
5752 &stats->macControlFramesReceived, "MAC Control Frames Received");
5753 BGE_SYSCTL_STAT_ADD64(ctx, child, "xoffStateEntered",
5754 &stats->xoffStateEntered, "XOFF State Entered");
5755 BGE_SYSCTL_STAT_ADD64(ctx, child, "FramesTooLong",
5756 &stats->dot3StatsFramesTooLong, "Frames Too Long");
5757 BGE_SYSCTL_STAT_ADD64(ctx, child, "Jabbers",
5758 &stats->etherStatsJabbers, "Jabbers");
5759 BGE_SYSCTL_STAT_ADD64(ctx, child, "UndersizePkts",
5760 &stats->etherStatsUndersizePkts, "Undersized Packets");
5761
5762 tree = SYSCTL_ADD_NODE(ctx, schild, OID_AUTO, "tx", CTLFLAG_RD,
5763 NULL, "BGE TX Statistics");
5764 child = SYSCTL_CHILDREN(tree);
5765 BGE_SYSCTL_STAT_ADD64(ctx, child, "ifHCOutOctets",
5766 &stats->ifHCOutOctets, "Outbound Octets");
5767 BGE_SYSCTL_STAT_ADD64(ctx, child, "Collisions",
5768 &stats->etherStatsCollisions, "TX Collisions");
5769 BGE_SYSCTL_STAT_ADD64(ctx, child, "XonSent",
5770 &stats->outXonSent, "XON Sent");
5771 BGE_SYSCTL_STAT_ADD64(ctx, child, "XoffSent",
5772 &stats->outXoffSent, "XOFF Sent");
5773 BGE_SYSCTL_STAT_ADD64(ctx, child, "InternalMacTransmitErrors",
5774 &stats->dot3StatsInternalMacTransmitErrors,
5775 "Internal MAC TX Errors");
5776 BGE_SYSCTL_STAT_ADD64(ctx, child, "SingleCollisionFrames",
5777 &stats->dot3StatsSingleCollisionFrames, "Single Collision Frames");
5778 BGE_SYSCTL_STAT_ADD64(ctx, child, "MultipleCollisionFrames",
5779 &stats->dot3StatsMultipleCollisionFrames,
5780 "Multiple Collision Frames");
5781 BGE_SYSCTL_STAT_ADD64(ctx, child, "DeferredTransmissions",
5782 &stats->dot3StatsDeferredTransmissions, "Deferred Transmissions");
5783 BGE_SYSCTL_STAT_ADD64(ctx, child, "ExcessiveCollisions",
5784 &stats->dot3StatsExcessiveCollisions, "Excessive Collisions");
5785 BGE_SYSCTL_STAT_ADD64(ctx, child, "LateCollisions",
5786 &stats->dot3StatsLateCollisions, "Late Collisions");
5787 BGE_SYSCTL_STAT_ADD64(ctx, child, "UnicastPkts",
5788 &stats->ifHCOutUcastPkts, "Outbound Unicast Packets");
5789 BGE_SYSCTL_STAT_ADD64(ctx, child, "MulticastPkts",
5790 &stats->ifHCOutMulticastPkts, "Outbound Multicast Packets");
5791 BGE_SYSCTL_STAT_ADD64(ctx, child, "BroadcastPkts",
5792 &stats->ifHCOutBroadcastPkts, "Outbound Broadcast Packets");
5793}
5794
5795#undef BGE_SYSCTL_STAT_ADD64
5796
5797static int
5798bge_sysctl_stats(SYSCTL_HANDLER_ARGS)
5799{
5800 struct bge_softc *sc;
5801 uint32_t result;
5802 int offset;
5803
5804 sc = (struct bge_softc *)arg1;
5805 offset = arg2;
5806 result = CSR_READ_4(sc, BGE_MEMWIN_START + BGE_STATS_BLOCK + offset +
5807 offsetof(bge_hostaddr, bge_addr_lo));
5808 return (sysctl_handle_int(oidp, &result, 0, req));
5809}
5810
5811#ifdef BGE_REGISTER_DEBUG
5812static int
5813bge_sysctl_debug_info(SYSCTL_HANDLER_ARGS)
5814{
5815 struct bge_softc *sc;
5816 uint16_t *sbdata;
5817 int error, result, sbsz;
5818 int i, j;
5819
5820 result = -1;
5821 error = sysctl_handle_int(oidp, &result, 0, req);
5822 if (error || (req->newptr == NULL))
5823 return (error);
5824
5825 if (result == 1) {
5826 sc = (struct bge_softc *)arg1;
5827
5828 if (sc->bge_asicrev == BGE_ASICREV_BCM5700 &&
5829 sc->bge_chipid != BGE_CHIPID_BCM5700_C0)
5830 sbsz = BGE_STATUS_BLK_SZ;
5831 else
5832 sbsz = 32;
5833 sbdata = (uint16_t *)sc->bge_ldata.bge_status_block;
5834 printf("Status Block:\n");
5835 BGE_LOCK(sc);
5836 bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
5837 sc->bge_cdata.bge_status_map,
5838 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
5839 for (i = 0x0; i < sbsz / sizeof(uint16_t); ) {
5840 printf("%06x:", i);
5841 for (j = 0; j < 8; j++)
5842 printf(" %04x", sbdata[i++]);
5843 printf("\n");
5844 }
5845
5846 printf("Registers:\n");
5847 for (i = 0x800; i < 0xA00; ) {
5848 printf("%06x:", i);
5849 for (j = 0; j < 8; j++) {
5850 printf(" %08x", CSR_READ_4(sc, i));
5851 i += 4;
5852 }
5853 printf("\n");
5854 }
5855 BGE_UNLOCK(sc);
5856
5857 printf("Hardware Flags:\n");
5858 if (BGE_IS_5717_PLUS(sc))
5859 printf(" - 5717 Plus\n");
5860 if (BGE_IS_5755_PLUS(sc))
5861 printf(" - 5755 Plus\n");
5862 if (BGE_IS_575X_PLUS(sc))
5863 printf(" - 575X Plus\n");
5864 if (BGE_IS_5705_PLUS(sc))
5865 printf(" - 5705 Plus\n");
5866 if (BGE_IS_5714_FAMILY(sc))
5867 printf(" - 5714 Family\n");
5868 if (BGE_IS_5700_FAMILY(sc))
5869 printf(" - 5700 Family\n");
5870 if (sc->bge_flags & BGE_FLAG_JUMBO)
5871 printf(" - Supports Jumbo Frames\n");
5872 if (sc->bge_flags & BGE_FLAG_PCIX)
5873 printf(" - PCI-X Bus\n");
5874 if (sc->bge_flags & BGE_FLAG_PCIE)
5875 printf(" - PCI Express Bus\n");
5876 if (sc->bge_phy_flags & BGE_PHY_NO_3LED)
5877 printf(" - No 3 LEDs\n");
5878 if (sc->bge_flags & BGE_FLAG_RX_ALIGNBUG)
5879 printf(" - RX Alignment Bug\n");
5880 }
5881
5882 return (error);
5883}
5884
5885static int
5886bge_sysctl_reg_read(SYSCTL_HANDLER_ARGS)
5887{
5888 struct bge_softc *sc;
5889 int error;
5890 uint16_t result;
5891 uint32_t val;
5892
5893 result = -1;
5894 error = sysctl_handle_int(oidp, &result, 0, req);
5895 if (error || (req->newptr == NULL))
5896 return (error);
5897
5898 if (result < 0x8000) {
5899 sc = (struct bge_softc *)arg1;
5900 val = CSR_READ_4(sc, result);
5901 printf("reg 0x%06X = 0x%08X\n", result, val);
5902 }
5903
5904 return (error);
5905}
5906
5907static int
5908bge_sysctl_mem_read(SYSCTL_HANDLER_ARGS)
5909{
5910 struct bge_softc *sc;
5911 int error;
5912 uint16_t result;
5913 uint32_t val;
5914
5915 result = -1;
5916 error = sysctl_handle_int(oidp, &result, 0, req);
5917 if (error || (req->newptr == NULL))
5918 return (error);
5919
5920 if (result < 0x8000) {
5921 sc = (struct bge_softc *)arg1;
5922 val = bge_readmem_ind(sc, result);
5923 printf("mem 0x%06X = 0x%08X\n", result, val);
5924 }
5925
5926 return (error);
5927}
5928#endif
5929
5930static int
5931bge_get_eaddr_fw(struct bge_softc *sc, uint8_t ether_addr[])
5932{
5933
5934 if (sc->bge_flags & BGE_FLAG_EADDR)
5935 return (1);
5936
5937#ifdef __sparc64__
5938 OF_getetheraddr(sc->bge_dev, ether_addr);
5939 return (0);
5940#endif
5941 return (1);
5942}
5943
5944static int
5945bge_get_eaddr_mem(struct bge_softc *sc, uint8_t ether_addr[])
5946{
5947 uint32_t mac_addr;
5948
5949 mac_addr = bge_readmem_ind(sc, BGE_SRAM_MAC_ADDR_HIGH_MB);
5950 if ((mac_addr >> 16) == 0x484b) {
5951 ether_addr[0] = (uint8_t)(mac_addr >> 8);
5952 ether_addr[1] = (uint8_t)mac_addr;
5953 mac_addr = bge_readmem_ind(sc, BGE_SRAM_MAC_ADDR_LOW_MB);
5954 ether_addr[2] = (uint8_t)(mac_addr >> 24);
5955 ether_addr[3] = (uint8_t)(mac_addr >> 16);
5956 ether_addr[4] = (uint8_t)(mac_addr >> 8);
5957 ether_addr[5] = (uint8_t)mac_addr;
5958 return (0);
5959 }
5960 return (1);
5961}
5962
5963static int
5964bge_get_eaddr_nvram(struct bge_softc *sc, uint8_t ether_addr[])
5965{
5966 int mac_offset = BGE_EE_MAC_OFFSET;
5967
5968 if (sc->bge_asicrev == BGE_ASICREV_BCM5906)
5969 mac_offset = BGE_EE_MAC_OFFSET_5906;
5970
5971 return (bge_read_nvram(sc, ether_addr, mac_offset + 2,
5972 ETHER_ADDR_LEN));
5973}
5974
5975static int
5976bge_get_eaddr_eeprom(struct bge_softc *sc, uint8_t ether_addr[])
5977{
5978
5979 if (sc->bge_asicrev == BGE_ASICREV_BCM5906)
5980 return (1);
5981
5982 return (bge_read_eeprom(sc, ether_addr, BGE_EE_MAC_OFFSET + 2,
5983 ETHER_ADDR_LEN));
5984}
5985
5986static int
5987bge_get_eaddr(struct bge_softc *sc, uint8_t eaddr[])
5988{
5989 static const bge_eaddr_fcn_t bge_eaddr_funcs[] = {
5990 /* NOTE: Order is critical */
5991 bge_get_eaddr_fw,
5992 bge_get_eaddr_mem,
5993 bge_get_eaddr_nvram,
5994 bge_get_eaddr_eeprom,
5995 NULL
5996 };
5997 const bge_eaddr_fcn_t *func;
5998
5999 for (func = bge_eaddr_funcs; *func != NULL; ++func) {
6000 if ((*func)(sc, eaddr) == 0)
6001 break;
6002 }
6003 return (*func == NULL ? ENXIO : 0);
6004}
4115 CSR_WRITE_4(sc, BGE_RX_CPU_EVENT,
4116 CSR_READ_4(sc, BGE_RX_CPU_EVENT) |
4117 BGE_RX_CPU_DRV_EVENT);
4118 }
4119 }
4120}
4121
4122static void
4123bge_tick(void *xsc)
4124{
4125 struct bge_softc *sc = xsc;
4126 struct mii_data *mii = NULL;
4127
4128 BGE_LOCK_ASSERT(sc);
4129
4130 /* Synchronize with possible callout reset/stop. */
4131 if (callout_pending(&sc->bge_stat_ch) ||
4132 !callout_active(&sc->bge_stat_ch))
4133 return;
4134
4135 if (BGE_IS_5705_PLUS(sc))
4136 bge_stats_update_regs(sc);
4137 else
4138 bge_stats_update(sc);
4139
4140 if ((sc->bge_flags & BGE_FLAG_TBI) == 0) {
4141 mii = device_get_softc(sc->bge_miibus);
4142 /*
4143 * Do not touch PHY if we have link up. This could break
4144 * IPMI/ASF mode or produce extra input errors
4145 * (extra errors was reported for bcm5701 & bcm5704).
4146 */
4147 if (!sc->bge_link)
4148 mii_tick(mii);
4149 } else {
4150 /*
4151 * Since in TBI mode auto-polling can't be used we should poll
4152 * link status manually. Here we register pending link event
4153 * and trigger interrupt.
4154 */
4155#ifdef DEVICE_POLLING
4156 /* In polling mode we poll link state in bge_poll(). */
4157 if (!(sc->bge_ifp->if_capenable & IFCAP_POLLING))
4158#endif
4159 {
4160 sc->bge_link_evt++;
4161 if (sc->bge_asicrev == BGE_ASICREV_BCM5700 ||
4162 sc->bge_flags & BGE_FLAG_5788)
4163 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_SET);
4164 else
4165 BGE_SETBIT(sc, BGE_HCC_MODE, BGE_HCCMODE_COAL_NOW);
4166 }
4167 }
4168
4169 bge_asf_driver_up(sc);
4170 bge_watchdog(sc);
4171
4172 callout_reset(&sc->bge_stat_ch, hz, bge_tick, sc);
4173}
4174
4175static void
4176bge_stats_update_regs(struct bge_softc *sc)
4177{
4178 struct ifnet *ifp;
4179 struct bge_mac_stats *stats;
4180
4181 ifp = sc->bge_ifp;
4182 stats = &sc->bge_mac_stats;
4183
4184 stats->ifHCOutOctets +=
4185 CSR_READ_4(sc, BGE_TX_MAC_STATS_OCTETS);
4186 stats->etherStatsCollisions +=
4187 CSR_READ_4(sc, BGE_TX_MAC_STATS_COLLS);
4188 stats->outXonSent +=
4189 CSR_READ_4(sc, BGE_TX_MAC_STATS_XON_SENT);
4190 stats->outXoffSent +=
4191 CSR_READ_4(sc, BGE_TX_MAC_STATS_XOFF_SENT);
4192 stats->dot3StatsInternalMacTransmitErrors +=
4193 CSR_READ_4(sc, BGE_TX_MAC_STATS_ERRORS);
4194 stats->dot3StatsSingleCollisionFrames +=
4195 CSR_READ_4(sc, BGE_TX_MAC_STATS_SINGLE_COLL);
4196 stats->dot3StatsMultipleCollisionFrames +=
4197 CSR_READ_4(sc, BGE_TX_MAC_STATS_MULTI_COLL);
4198 stats->dot3StatsDeferredTransmissions +=
4199 CSR_READ_4(sc, BGE_TX_MAC_STATS_DEFERRED);
4200 stats->dot3StatsExcessiveCollisions +=
4201 CSR_READ_4(sc, BGE_TX_MAC_STATS_EXCESS_COLL);
4202 stats->dot3StatsLateCollisions +=
4203 CSR_READ_4(sc, BGE_TX_MAC_STATS_LATE_COLL);
4204 stats->ifHCOutUcastPkts +=
4205 CSR_READ_4(sc, BGE_TX_MAC_STATS_UCAST);
4206 stats->ifHCOutMulticastPkts +=
4207 CSR_READ_4(sc, BGE_TX_MAC_STATS_MCAST);
4208 stats->ifHCOutBroadcastPkts +=
4209 CSR_READ_4(sc, BGE_TX_MAC_STATS_BCAST);
4210
4211 stats->ifHCInOctets +=
4212 CSR_READ_4(sc, BGE_RX_MAC_STATS_OCTESTS);
4213 stats->etherStatsFragments +=
4214 CSR_READ_4(sc, BGE_RX_MAC_STATS_FRAGMENTS);
4215 stats->ifHCInUcastPkts +=
4216 CSR_READ_4(sc, BGE_RX_MAC_STATS_UCAST);
4217 stats->ifHCInMulticastPkts +=
4218 CSR_READ_4(sc, BGE_RX_MAC_STATS_MCAST);
4219 stats->ifHCInBroadcastPkts +=
4220 CSR_READ_4(sc, BGE_RX_MAC_STATS_BCAST);
4221 stats->dot3StatsFCSErrors +=
4222 CSR_READ_4(sc, BGE_RX_MAC_STATS_FCS_ERRORS);
4223 stats->dot3StatsAlignmentErrors +=
4224 CSR_READ_4(sc, BGE_RX_MAC_STATS_ALGIN_ERRORS);
4225 stats->xonPauseFramesReceived +=
4226 CSR_READ_4(sc, BGE_RX_MAC_STATS_XON_RCVD);
4227 stats->xoffPauseFramesReceived +=
4228 CSR_READ_4(sc, BGE_RX_MAC_STATS_XOFF_RCVD);
4229 stats->macControlFramesReceived +=
4230 CSR_READ_4(sc, BGE_RX_MAC_STATS_CTRL_RCVD);
4231 stats->xoffStateEntered +=
4232 CSR_READ_4(sc, BGE_RX_MAC_STATS_XOFF_ENTERED);
4233 stats->dot3StatsFramesTooLong +=
4234 CSR_READ_4(sc, BGE_RX_MAC_STATS_FRAME_TOO_LONG);
4235 stats->etherStatsJabbers +=
4236 CSR_READ_4(sc, BGE_RX_MAC_STATS_JABBERS);
4237 stats->etherStatsUndersizePkts +=
4238 CSR_READ_4(sc, BGE_RX_MAC_STATS_UNDERSIZE);
4239
4240 stats->FramesDroppedDueToFilters +=
4241 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_FILTDROP);
4242 stats->DmaWriteQueueFull +=
4243 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_DMA_WRQ_FULL);
4244 stats->DmaWriteHighPriQueueFull +=
4245 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_DMA_HPWRQ_FULL);
4246 stats->NoMoreRxBDs +=
4247 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_OUT_OF_BDS);
4248 stats->InputDiscards +=
4249 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_IFIN_DROPS);
4250 stats->InputErrors +=
4251 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_IFIN_ERRORS);
4252 stats->RecvThresholdHit +=
4253 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_RXTHRESH_HIT);
4254
4255 ifp->if_collisions = (u_long)stats->etherStatsCollisions;
4256 ifp->if_ierrors = (u_long)(stats->NoMoreRxBDs + stats->InputDiscards +
4257 stats->InputErrors);
4258}
4259
4260static void
4261bge_stats_clear_regs(struct bge_softc *sc)
4262{
4263
4264 CSR_READ_4(sc, BGE_TX_MAC_STATS_OCTETS);
4265 CSR_READ_4(sc, BGE_TX_MAC_STATS_COLLS);
4266 CSR_READ_4(sc, BGE_TX_MAC_STATS_XON_SENT);
4267 CSR_READ_4(sc, BGE_TX_MAC_STATS_XOFF_SENT);
4268 CSR_READ_4(sc, BGE_TX_MAC_STATS_ERRORS);
4269 CSR_READ_4(sc, BGE_TX_MAC_STATS_SINGLE_COLL);
4270 CSR_READ_4(sc, BGE_TX_MAC_STATS_MULTI_COLL);
4271 CSR_READ_4(sc, BGE_TX_MAC_STATS_DEFERRED);
4272 CSR_READ_4(sc, BGE_TX_MAC_STATS_EXCESS_COLL);
4273 CSR_READ_4(sc, BGE_TX_MAC_STATS_LATE_COLL);
4274 CSR_READ_4(sc, BGE_TX_MAC_STATS_UCAST);
4275 CSR_READ_4(sc, BGE_TX_MAC_STATS_MCAST);
4276 CSR_READ_4(sc, BGE_TX_MAC_STATS_BCAST);
4277
4278 CSR_READ_4(sc, BGE_RX_MAC_STATS_OCTESTS);
4279 CSR_READ_4(sc, BGE_RX_MAC_STATS_FRAGMENTS);
4280 CSR_READ_4(sc, BGE_RX_MAC_STATS_UCAST);
4281 CSR_READ_4(sc, BGE_RX_MAC_STATS_MCAST);
4282 CSR_READ_4(sc, BGE_RX_MAC_STATS_BCAST);
4283 CSR_READ_4(sc, BGE_RX_MAC_STATS_FCS_ERRORS);
4284 CSR_READ_4(sc, BGE_RX_MAC_STATS_ALGIN_ERRORS);
4285 CSR_READ_4(sc, BGE_RX_MAC_STATS_XON_RCVD);
4286 CSR_READ_4(sc, BGE_RX_MAC_STATS_XOFF_RCVD);
4287 CSR_READ_4(sc, BGE_RX_MAC_STATS_CTRL_RCVD);
4288 CSR_READ_4(sc, BGE_RX_MAC_STATS_XOFF_ENTERED);
4289 CSR_READ_4(sc, BGE_RX_MAC_STATS_FRAME_TOO_LONG);
4290 CSR_READ_4(sc, BGE_RX_MAC_STATS_JABBERS);
4291 CSR_READ_4(sc, BGE_RX_MAC_STATS_UNDERSIZE);
4292
4293 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_FILTDROP);
4294 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_DMA_WRQ_FULL);
4295 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_DMA_HPWRQ_FULL);
4296 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_OUT_OF_BDS);
4297 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_IFIN_DROPS);
4298 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_IFIN_ERRORS);
4299 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_RXTHRESH_HIT);
4300}
4301
4302static void
4303bge_stats_update(struct bge_softc *sc)
4304{
4305 struct ifnet *ifp;
4306 bus_size_t stats;
4307 uint32_t cnt; /* current register value */
4308
4309 ifp = sc->bge_ifp;
4310
4311 stats = BGE_MEMWIN_START + BGE_STATS_BLOCK;
4312
4313#define READ_STAT(sc, stats, stat) \
4314 CSR_READ_4(sc, stats + offsetof(struct bge_stats, stat))
4315
4316 cnt = READ_STAT(sc, stats, txstats.etherStatsCollisions.bge_addr_lo);
4317 ifp->if_collisions += (uint32_t)(cnt - sc->bge_tx_collisions);
4318 sc->bge_tx_collisions = cnt;
4319
4320 cnt = READ_STAT(sc, stats, ifInDiscards.bge_addr_lo);
4321 ifp->if_ierrors += (uint32_t)(cnt - sc->bge_rx_discards);
4322 sc->bge_rx_discards = cnt;
4323
4324 cnt = READ_STAT(sc, stats, txstats.ifOutDiscards.bge_addr_lo);
4325 ifp->if_oerrors += (uint32_t)(cnt - sc->bge_tx_discards);
4326 sc->bge_tx_discards = cnt;
4327
4328#undef READ_STAT
4329}
4330
4331/*
4332 * Pad outbound frame to ETHER_MIN_NOPAD for an unusual reason.
4333 * The bge hardware will pad out Tx runts to ETHER_MIN_NOPAD,
4334 * but when such padded frames employ the bge IP/TCP checksum offload,
4335 * the hardware checksum assist gives incorrect results (possibly
4336 * from incorporating its own padding into the UDP/TCP checksum; who knows).
4337 * If we pad such runts with zeros, the onboard checksum comes out correct.
4338 */
4339static __inline int
4340bge_cksum_pad(struct mbuf *m)
4341{
4342 int padlen = ETHER_MIN_NOPAD - m->m_pkthdr.len;
4343 struct mbuf *last;
4344
4345 /* If there's only the packet-header and we can pad there, use it. */
4346 if (m->m_pkthdr.len == m->m_len && M_WRITABLE(m) &&
4347 M_TRAILINGSPACE(m) >= padlen) {
4348 last = m;
4349 } else {
4350 /*
4351 * Walk packet chain to find last mbuf. We will either
4352 * pad there, or append a new mbuf and pad it.
4353 */
4354 for (last = m; last->m_next != NULL; last = last->m_next);
4355 if (!(M_WRITABLE(last) && M_TRAILINGSPACE(last) >= padlen)) {
4356 /* Allocate new empty mbuf, pad it. Compact later. */
4357 struct mbuf *n;
4358
4359 MGET(n, M_DONTWAIT, MT_DATA);
4360 if (n == NULL)
4361 return (ENOBUFS);
4362 n->m_len = 0;
4363 last->m_next = n;
4364 last = n;
4365 }
4366 }
4367
4368 /* Now zero the pad area, to avoid the bge cksum-assist bug. */
4369 memset(mtod(last, caddr_t) + last->m_len, 0, padlen);
4370 last->m_len += padlen;
4371 m->m_pkthdr.len += padlen;
4372
4373 return (0);
4374}
4375
4376static struct mbuf *
4377bge_check_short_dma(struct mbuf *m)
4378{
4379 struct mbuf *n;
4380 int found;
4381
4382 /*
4383 * If device receive two back-to-back send BDs with less than
4384 * or equal to 8 total bytes then the device may hang. The two
4385 * back-to-back send BDs must in the same frame for this failure
4386 * to occur. Scan mbuf chains and see whether two back-to-back
4387 * send BDs are there. If this is the case, allocate new mbuf
4388 * and copy the frame to workaround the silicon bug.
4389 */
4390 for (n = m, found = 0; n != NULL; n = n->m_next) {
4391 if (n->m_len < 8) {
4392 found++;
4393 if (found > 1)
4394 break;
4395 continue;
4396 }
4397 found = 0;
4398 }
4399
4400 if (found > 1) {
4401 n = m_defrag(m, M_DONTWAIT);
4402 if (n == NULL)
4403 m_freem(m);
4404 } else
4405 n = m;
4406 return (n);
4407}
4408
4409static struct mbuf *
4410bge_setup_tso(struct bge_softc *sc, struct mbuf *m, uint16_t *mss,
4411 uint16_t *flags)
4412{
4413 struct ip *ip;
4414 struct tcphdr *tcp;
4415 struct mbuf *n;
4416 uint16_t hlen;
4417 uint32_t poff;
4418
4419 if (M_WRITABLE(m) == 0) {
4420 /* Get a writable copy. */
4421 n = m_dup(m, M_DONTWAIT);
4422 m_freem(m);
4423 if (n == NULL)
4424 return (NULL);
4425 m = n;
4426 }
4427 m = m_pullup(m, sizeof(struct ether_header) + sizeof(struct ip));
4428 if (m == NULL)
4429 return (NULL);
4430 ip = (struct ip *)(mtod(m, char *) + sizeof(struct ether_header));
4431 poff = sizeof(struct ether_header) + (ip->ip_hl << 2);
4432 m = m_pullup(m, poff + sizeof(struct tcphdr));
4433 if (m == NULL)
4434 return (NULL);
4435 tcp = (struct tcphdr *)(mtod(m, char *) + poff);
4436 m = m_pullup(m, poff + (tcp->th_off << 2));
4437 if (m == NULL)
4438 return (NULL);
4439 /*
4440 * It seems controller doesn't modify IP length and TCP pseudo
4441 * checksum. These checksum computed by upper stack should be 0.
4442 */
4443 *mss = m->m_pkthdr.tso_segsz;
4444 ip = (struct ip *)(mtod(m, char *) + sizeof(struct ether_header));
4445 ip->ip_sum = 0;
4446 ip->ip_len = htons(*mss + (ip->ip_hl << 2) + (tcp->th_off << 2));
4447 /* Clear pseudo checksum computed by TCP stack. */
4448 tcp = (struct tcphdr *)(mtod(m, char *) + poff);
4449 tcp->th_sum = 0;
4450 /*
4451 * Broadcom controllers uses different descriptor format for
4452 * TSO depending on ASIC revision. Due to TSO-capable firmware
4453 * license issue and lower performance of firmware based TSO
4454 * we only support hardware based TSO.
4455 */
4456 /* Calculate header length, incl. TCP/IP options, in 32 bit units. */
4457 hlen = ((ip->ip_hl << 2) + (tcp->th_off << 2)) >> 2;
4458 if (sc->bge_flags & BGE_FLAG_TSO3) {
4459 /*
4460 * For BCM5717 and newer controllers, hardware based TSO
4461 * uses the 14 lower bits of the bge_mss field to store the
4462 * MSS and the upper 2 bits to store the lowest 2 bits of
4463 * the IP/TCP header length. The upper 6 bits of the header
4464 * length are stored in the bge_flags[14:10,4] field. Jumbo
4465 * frames are supported.
4466 */
4467 *mss |= ((hlen & 0x3) << 14);
4468 *flags |= ((hlen & 0xF8) << 7) | ((hlen & 0x4) << 2);
4469 } else {
4470 /*
4471 * For BCM5755 and newer controllers, hardware based TSO uses
4472 * the lower 11 bits to store the MSS and the upper 5 bits to
4473 * store the IP/TCP header length. Jumbo frames are not
4474 * supported.
4475 */
4476 *mss |= (hlen << 11);
4477 }
4478 return (m);
4479}
4480
4481/*
4482 * Encapsulate an mbuf chain in the tx ring by coupling the mbuf data
4483 * pointers to descriptors.
4484 */
4485static int
4486bge_encap(struct bge_softc *sc, struct mbuf **m_head, uint32_t *txidx)
4487{
4488 bus_dma_segment_t segs[BGE_NSEG_NEW];
4489 bus_dmamap_t map;
4490 struct bge_tx_bd *d;
4491 struct mbuf *m = *m_head;
4492 uint32_t idx = *txidx;
4493 uint16_t csum_flags, mss, vlan_tag;
4494 int nsegs, i, error;
4495
4496 csum_flags = 0;
4497 mss = 0;
4498 vlan_tag = 0;
4499 if ((sc->bge_flags & BGE_FLAG_SHORT_DMA_BUG) != 0 &&
4500 m->m_next != NULL) {
4501 *m_head = bge_check_short_dma(m);
4502 if (*m_head == NULL)
4503 return (ENOBUFS);
4504 m = *m_head;
4505 }
4506 if ((m->m_pkthdr.csum_flags & CSUM_TSO) != 0) {
4507 *m_head = m = bge_setup_tso(sc, m, &mss, &csum_flags);
4508 if (*m_head == NULL)
4509 return (ENOBUFS);
4510 csum_flags |= BGE_TXBDFLAG_CPU_PRE_DMA |
4511 BGE_TXBDFLAG_CPU_POST_DMA;
4512 } else if ((m->m_pkthdr.csum_flags & sc->bge_csum_features) != 0) {
4513 if (m->m_pkthdr.csum_flags & CSUM_IP)
4514 csum_flags |= BGE_TXBDFLAG_IP_CSUM;
4515 if (m->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP)) {
4516 csum_flags |= BGE_TXBDFLAG_TCP_UDP_CSUM;
4517 if (m->m_pkthdr.len < ETHER_MIN_NOPAD &&
4518 (error = bge_cksum_pad(m)) != 0) {
4519 m_freem(m);
4520 *m_head = NULL;
4521 return (error);
4522 }
4523 }
4524 if (m->m_flags & M_LASTFRAG)
4525 csum_flags |= BGE_TXBDFLAG_IP_FRAG_END;
4526 else if (m->m_flags & M_FRAG)
4527 csum_flags |= BGE_TXBDFLAG_IP_FRAG;
4528 }
4529
4530 if ((m->m_pkthdr.csum_flags & CSUM_TSO) == 0) {
4531 if (sc->bge_flags & BGE_FLAG_JUMBO_FRAME &&
4532 m->m_pkthdr.len > ETHER_MAX_LEN)
4533 csum_flags |= BGE_TXBDFLAG_JUMBO_FRAME;
4534 if (sc->bge_forced_collapse > 0 &&
4535 (sc->bge_flags & BGE_FLAG_PCIE) != 0 && m->m_next != NULL) {
4536 /*
4537 * Forcedly collapse mbuf chains to overcome hardware
4538 * limitation which only support a single outstanding
4539 * DMA read operation.
4540 */
4541 if (sc->bge_forced_collapse == 1)
4542 m = m_defrag(m, M_DONTWAIT);
4543 else
4544 m = m_collapse(m, M_DONTWAIT,
4545 sc->bge_forced_collapse);
4546 if (m == NULL)
4547 m = *m_head;
4548 *m_head = m;
4549 }
4550 }
4551
4552 map = sc->bge_cdata.bge_tx_dmamap[idx];
4553 error = bus_dmamap_load_mbuf_sg(sc->bge_cdata.bge_tx_mtag, map, m, segs,
4554 &nsegs, BUS_DMA_NOWAIT);
4555 if (error == EFBIG) {
4556 m = m_collapse(m, M_DONTWAIT, BGE_NSEG_NEW);
4557 if (m == NULL) {
4558 m_freem(*m_head);
4559 *m_head = NULL;
4560 return (ENOBUFS);
4561 }
4562 *m_head = m;
4563 error = bus_dmamap_load_mbuf_sg(sc->bge_cdata.bge_tx_mtag, map,
4564 m, segs, &nsegs, BUS_DMA_NOWAIT);
4565 if (error) {
4566 m_freem(m);
4567 *m_head = NULL;
4568 return (error);
4569 }
4570 } else if (error != 0)
4571 return (error);
4572
4573 /* Check if we have enough free send BDs. */
4574 if (sc->bge_txcnt + nsegs >= BGE_TX_RING_CNT) {
4575 bus_dmamap_unload(sc->bge_cdata.bge_tx_mtag, map);
4576 return (ENOBUFS);
4577 }
4578
4579 bus_dmamap_sync(sc->bge_cdata.bge_tx_mtag, map, BUS_DMASYNC_PREWRITE);
4580
4581 if (m->m_flags & M_VLANTAG) {
4582 csum_flags |= BGE_TXBDFLAG_VLAN_TAG;
4583 vlan_tag = m->m_pkthdr.ether_vtag;
4584 }
4585 for (i = 0; ; i++) {
4586 d = &sc->bge_ldata.bge_tx_ring[idx];
4587 d->bge_addr.bge_addr_lo = BGE_ADDR_LO(segs[i].ds_addr);
4588 d->bge_addr.bge_addr_hi = BGE_ADDR_HI(segs[i].ds_addr);
4589 d->bge_len = segs[i].ds_len;
4590 d->bge_flags = csum_flags;
4591 d->bge_vlan_tag = vlan_tag;
4592 d->bge_mss = mss;
4593 if (i == nsegs - 1)
4594 break;
4595 BGE_INC(idx, BGE_TX_RING_CNT);
4596 }
4597
4598 /* Mark the last segment as end of packet... */
4599 d->bge_flags |= BGE_TXBDFLAG_END;
4600
4601 /*
4602 * Insure that the map for this transmission
4603 * is placed at the array index of the last descriptor
4604 * in this chain.
4605 */
4606 sc->bge_cdata.bge_tx_dmamap[*txidx] = sc->bge_cdata.bge_tx_dmamap[idx];
4607 sc->bge_cdata.bge_tx_dmamap[idx] = map;
4608 sc->bge_cdata.bge_tx_chain[idx] = m;
4609 sc->bge_txcnt += nsegs;
4610
4611 BGE_INC(idx, BGE_TX_RING_CNT);
4612 *txidx = idx;
4613
4614 return (0);
4615}
4616
4617/*
4618 * Main transmit routine. To avoid having to do mbuf copies, we put pointers
4619 * to the mbuf data regions directly in the transmit descriptors.
4620 */
4621static void
4622bge_start_locked(struct ifnet *ifp)
4623{
4624 struct bge_softc *sc;
4625 struct mbuf *m_head;
4626 uint32_t prodidx;
4627 int count;
4628
4629 sc = ifp->if_softc;
4630 BGE_LOCK_ASSERT(sc);
4631
4632 if (!sc->bge_link ||
4633 (ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
4634 IFF_DRV_RUNNING)
4635 return;
4636
4637 prodidx = sc->bge_tx_prodidx;
4638
4639 for (count = 0; !IFQ_DRV_IS_EMPTY(&ifp->if_snd);) {
4640 if (sc->bge_txcnt > BGE_TX_RING_CNT - 16) {
4641 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
4642 break;
4643 }
4644 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
4645 if (m_head == NULL)
4646 break;
4647
4648 /*
4649 * XXX
4650 * The code inside the if() block is never reached since we
4651 * must mark CSUM_IP_FRAGS in our if_hwassist to start getting
4652 * requests to checksum TCP/UDP in a fragmented packet.
4653 *
4654 * XXX
4655 * safety overkill. If this is a fragmented packet chain
4656 * with delayed TCP/UDP checksums, then only encapsulate
4657 * it if we have enough descriptors to handle the entire
4658 * chain at once.
4659 * (paranoia -- may not actually be needed)
4660 */
4661 if (m_head->m_flags & M_FIRSTFRAG &&
4662 m_head->m_pkthdr.csum_flags & (CSUM_DELAY_DATA)) {
4663 if ((BGE_TX_RING_CNT - sc->bge_txcnt) <
4664 m_head->m_pkthdr.csum_data + 16) {
4665 IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
4666 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
4667 break;
4668 }
4669 }
4670
4671 /*
4672 * Pack the data into the transmit ring. If we
4673 * don't have room, set the OACTIVE flag and wait
4674 * for the NIC to drain the ring.
4675 */
4676 if (bge_encap(sc, &m_head, &prodidx)) {
4677 if (m_head == NULL)
4678 break;
4679 IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
4680 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
4681 break;
4682 }
4683 ++count;
4684
4685 /*
4686 * If there's a BPF listener, bounce a copy of this frame
4687 * to him.
4688 */
4689#ifdef ETHER_BPF_MTAP
4690 ETHER_BPF_MTAP(ifp, m_head);
4691#else
4692 BPF_MTAP(ifp, m_head);
4693#endif
4694 }
4695
4696 if (count > 0) {
4697 bus_dmamap_sync(sc->bge_cdata.bge_tx_ring_tag,
4698 sc->bge_cdata.bge_tx_ring_map, BUS_DMASYNC_PREWRITE);
4699 /* Transmit. */
4700 bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx);
4701 /* 5700 b2 errata */
4702 if (sc->bge_chiprev == BGE_CHIPREV_5700_BX)
4703 bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx);
4704
4705 sc->bge_tx_prodidx = prodidx;
4706
4707 /*
4708 * Set a timeout in case the chip goes out to lunch.
4709 */
4710 sc->bge_timer = 5;
4711 }
4712}
4713
4714/*
4715 * Main transmit routine. To avoid having to do mbuf copies, we put pointers
4716 * to the mbuf data regions directly in the transmit descriptors.
4717 */
4718static void
4719bge_start(struct ifnet *ifp)
4720{
4721 struct bge_softc *sc;
4722
4723 sc = ifp->if_softc;
4724 BGE_LOCK(sc);
4725 bge_start_locked(ifp);
4726 BGE_UNLOCK(sc);
4727}
4728
4729static void
4730bge_init_locked(struct bge_softc *sc)
4731{
4732 struct ifnet *ifp;
4733 uint16_t *m;
4734 uint32_t mode;
4735
4736 BGE_LOCK_ASSERT(sc);
4737
4738 ifp = sc->bge_ifp;
4739
4740 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
4741 return;
4742
4743 /* Cancel pending I/O and flush buffers. */
4744 bge_stop(sc);
4745
4746 bge_stop_fw(sc);
4747 bge_sig_pre_reset(sc, BGE_RESET_START);
4748 bge_reset(sc);
4749 bge_sig_legacy(sc, BGE_RESET_START);
4750 bge_sig_post_reset(sc, BGE_RESET_START);
4751
4752 bge_chipinit(sc);
4753
4754 /*
4755 * Init the various state machines, ring
4756 * control blocks and firmware.
4757 */
4758 if (bge_blockinit(sc)) {
4759 device_printf(sc->bge_dev, "initialization failure\n");
4760 return;
4761 }
4762
4763 ifp = sc->bge_ifp;
4764
4765 /* Specify MTU. */
4766 CSR_WRITE_4(sc, BGE_RX_MTU, ifp->if_mtu +
4767 ETHER_HDR_LEN + ETHER_CRC_LEN +
4768 (ifp->if_capenable & IFCAP_VLAN_MTU ? ETHER_VLAN_ENCAP_LEN : 0));
4769
4770 /* Load our MAC address. */
4771 m = (uint16_t *)IF_LLADDR(sc->bge_ifp);
4772 CSR_WRITE_4(sc, BGE_MAC_ADDR1_LO, htons(m[0]));
4773 CSR_WRITE_4(sc, BGE_MAC_ADDR1_HI, (htons(m[1]) << 16) | htons(m[2]));
4774
4775 /* Program promiscuous mode. */
4776 bge_setpromisc(sc);
4777
4778 /* Program multicast filter. */
4779 bge_setmulti(sc);
4780
4781 /* Program VLAN tag stripping. */
4782 bge_setvlan(sc);
4783
4784 /* Override UDP checksum offloading. */
4785 if (sc->bge_forced_udpcsum == 0)
4786 sc->bge_csum_features &= ~CSUM_UDP;
4787 else
4788 sc->bge_csum_features |= CSUM_UDP;
4789 if (ifp->if_capabilities & IFCAP_TXCSUM &&
4790 ifp->if_capenable & IFCAP_TXCSUM) {
4791 ifp->if_hwassist &= ~(BGE_CSUM_FEATURES | CSUM_UDP);
4792 ifp->if_hwassist |= sc->bge_csum_features;
4793 }
4794
4795 /* Init RX ring. */
4796 if (bge_init_rx_ring_std(sc) != 0) {
4797 device_printf(sc->bge_dev, "no memory for std Rx buffers.\n");
4798 bge_stop(sc);
4799 return;
4800 }
4801
4802 /*
4803 * Workaround for a bug in 5705 ASIC rev A0. Poll the NIC's
4804 * memory to insure that the chip has in fact read the first
4805 * entry of the ring.
4806 */
4807 if (sc->bge_chipid == BGE_CHIPID_BCM5705_A0) {
4808 uint32_t v, i;
4809 for (i = 0; i < 10; i++) {
4810 DELAY(20);
4811 v = bge_readmem_ind(sc, BGE_STD_RX_RINGS + 8);
4812 if (v == (MCLBYTES - ETHER_ALIGN))
4813 break;
4814 }
4815 if (i == 10)
4816 device_printf (sc->bge_dev,
4817 "5705 A0 chip failed to load RX ring\n");
4818 }
4819
4820 /* Init jumbo RX ring. */
4821 if (BGE_IS_JUMBO_CAPABLE(sc) &&
4822 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN + ETHER_VLAN_ENCAP_LEN >
4823 (MCLBYTES - ETHER_ALIGN)) {
4824 if (bge_init_rx_ring_jumbo(sc) != 0) {
4825 device_printf(sc->bge_dev,
4826 "no memory for jumbo Rx buffers.\n");
4827 bge_stop(sc);
4828 return;
4829 }
4830 }
4831
4832 /* Init our RX return ring index. */
4833 sc->bge_rx_saved_considx = 0;
4834
4835 /* Init our RX/TX stat counters. */
4836 sc->bge_rx_discards = sc->bge_tx_discards = sc->bge_tx_collisions = 0;
4837
4838 /* Init TX ring. */
4839 bge_init_tx_ring(sc);
4840
4841 /* Enable TX MAC state machine lockup fix. */
4842 mode = CSR_READ_4(sc, BGE_TX_MODE);
4843 if (BGE_IS_5755_PLUS(sc) || sc->bge_asicrev == BGE_ASICREV_BCM5906)
4844 mode |= BGE_TXMODE_MBUF_LOCKUP_FIX;
4845 /* Turn on transmitter. */
4846 CSR_WRITE_4(sc, BGE_TX_MODE, mode | BGE_TXMODE_ENABLE);
4847
4848 /* Turn on receiver. */
4849 BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE);
4850
4851 /*
4852 * Set the number of good frames to receive after RX MBUF
4853 * Low Watermark has been reached. After the RX MAC receives
4854 * this number of frames, it will drop subsequent incoming
4855 * frames until the MBUF High Watermark is reached.
4856 */
4857 if (sc->bge_asicrev == BGE_ASICREV_BCM57765)
4858 CSR_WRITE_4(sc, BGE_MAX_RX_FRAME_LOWAT, 1);
4859 else
4860 CSR_WRITE_4(sc, BGE_MAX_RX_FRAME_LOWAT, 2);
4861
4862 /* Clear MAC statistics. */
4863 if (BGE_IS_5705_PLUS(sc))
4864 bge_stats_clear_regs(sc);
4865
4866 /* Tell firmware we're alive. */
4867 BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
4868
4869#ifdef DEVICE_POLLING
4870 /* Disable interrupts if we are polling. */
4871 if (ifp->if_capenable & IFCAP_POLLING) {
4872 BGE_SETBIT(sc, BGE_PCI_MISC_CTL,
4873 BGE_PCIMISCCTL_MASK_PCI_INTR);
4874 bge_writembx(sc, BGE_MBX_IRQ0_LO, 1);
4875 } else
4876#endif
4877
4878 /* Enable host interrupts. */
4879 {
4880 BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_CLEAR_INTA);
4881 BGE_CLRBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR);
4882 bge_writembx(sc, BGE_MBX_IRQ0_LO, 0);
4883 }
4884
4885 bge_ifmedia_upd_locked(ifp);
4886
4887 ifp->if_drv_flags |= IFF_DRV_RUNNING;
4888 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
4889
4890 callout_reset(&sc->bge_stat_ch, hz, bge_tick, sc);
4891}
4892
4893static void
4894bge_init(void *xsc)
4895{
4896 struct bge_softc *sc = xsc;
4897
4898 BGE_LOCK(sc);
4899 bge_init_locked(sc);
4900 BGE_UNLOCK(sc);
4901}
4902
4903/*
4904 * Set media options.
4905 */
4906static int
4907bge_ifmedia_upd(struct ifnet *ifp)
4908{
4909 struct bge_softc *sc = ifp->if_softc;
4910 int res;
4911
4912 BGE_LOCK(sc);
4913 res = bge_ifmedia_upd_locked(ifp);
4914 BGE_UNLOCK(sc);
4915
4916 return (res);
4917}
4918
4919static int
4920bge_ifmedia_upd_locked(struct ifnet *ifp)
4921{
4922 struct bge_softc *sc = ifp->if_softc;
4923 struct mii_data *mii;
4924 struct mii_softc *miisc;
4925 struct ifmedia *ifm;
4926
4927 BGE_LOCK_ASSERT(sc);
4928
4929 ifm = &sc->bge_ifmedia;
4930
4931 /* If this is a 1000baseX NIC, enable the TBI port. */
4932 if (sc->bge_flags & BGE_FLAG_TBI) {
4933 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
4934 return (EINVAL);
4935 switch(IFM_SUBTYPE(ifm->ifm_media)) {
4936 case IFM_AUTO:
4937 /*
4938 * The BCM5704 ASIC appears to have a special
4939 * mechanism for programming the autoneg
4940 * advertisement registers in TBI mode.
4941 */
4942 if (sc->bge_asicrev == BGE_ASICREV_BCM5704) {
4943 uint32_t sgdig;
4944 sgdig = CSR_READ_4(sc, BGE_SGDIG_STS);
4945 if (sgdig & BGE_SGDIGSTS_DONE) {
4946 CSR_WRITE_4(sc, BGE_TX_TBI_AUTONEG, 0);
4947 sgdig = CSR_READ_4(sc, BGE_SGDIG_CFG);
4948 sgdig |= BGE_SGDIGCFG_AUTO |
4949 BGE_SGDIGCFG_PAUSE_CAP |
4950 BGE_SGDIGCFG_ASYM_PAUSE;
4951 CSR_WRITE_4(sc, BGE_SGDIG_CFG,
4952 sgdig | BGE_SGDIGCFG_SEND);
4953 DELAY(5);
4954 CSR_WRITE_4(sc, BGE_SGDIG_CFG, sgdig);
4955 }
4956 }
4957 break;
4958 case IFM_1000_SX:
4959 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) {
4960 BGE_CLRBIT(sc, BGE_MAC_MODE,
4961 BGE_MACMODE_HALF_DUPLEX);
4962 } else {
4963 BGE_SETBIT(sc, BGE_MAC_MODE,
4964 BGE_MACMODE_HALF_DUPLEX);
4965 }
4966 break;
4967 default:
4968 return (EINVAL);
4969 }
4970 return (0);
4971 }
4972
4973 sc->bge_link_evt++;
4974 mii = device_get_softc(sc->bge_miibus);
4975 LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
4976 PHY_RESET(miisc);
4977 mii_mediachg(mii);
4978
4979 /*
4980 * Force an interrupt so that we will call bge_link_upd
4981 * if needed and clear any pending link state attention.
4982 * Without this we are not getting any further interrupts
4983 * for link state changes and thus will not UP the link and
4984 * not be able to send in bge_start_locked. The only
4985 * way to get things working was to receive a packet and
4986 * get an RX intr.
4987 * bge_tick should help for fiber cards and we might not
4988 * need to do this here if BGE_FLAG_TBI is set but as
4989 * we poll for fiber anyway it should not harm.
4990 */
4991 if (sc->bge_asicrev == BGE_ASICREV_BCM5700 ||
4992 sc->bge_flags & BGE_FLAG_5788)
4993 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_SET);
4994 else
4995 BGE_SETBIT(sc, BGE_HCC_MODE, BGE_HCCMODE_COAL_NOW);
4996
4997 return (0);
4998}
4999
5000/*
5001 * Report current media status.
5002 */
5003static void
5004bge_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
5005{
5006 struct bge_softc *sc = ifp->if_softc;
5007 struct mii_data *mii;
5008
5009 BGE_LOCK(sc);
5010
5011 if (sc->bge_flags & BGE_FLAG_TBI) {
5012 ifmr->ifm_status = IFM_AVALID;
5013 ifmr->ifm_active = IFM_ETHER;
5014 if (CSR_READ_4(sc, BGE_MAC_STS) &
5015 BGE_MACSTAT_TBI_PCS_SYNCHED)
5016 ifmr->ifm_status |= IFM_ACTIVE;
5017 else {
5018 ifmr->ifm_active |= IFM_NONE;
5019 BGE_UNLOCK(sc);
5020 return;
5021 }
5022 ifmr->ifm_active |= IFM_1000_SX;
5023 if (CSR_READ_4(sc, BGE_MAC_MODE) & BGE_MACMODE_HALF_DUPLEX)
5024 ifmr->ifm_active |= IFM_HDX;
5025 else
5026 ifmr->ifm_active |= IFM_FDX;
5027 BGE_UNLOCK(sc);
5028 return;
5029 }
5030
5031 mii = device_get_softc(sc->bge_miibus);
5032 mii_pollstat(mii);
5033 ifmr->ifm_active = mii->mii_media_active;
5034 ifmr->ifm_status = mii->mii_media_status;
5035
5036 BGE_UNLOCK(sc);
5037}
5038
5039static int
5040bge_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
5041{
5042 struct bge_softc *sc = ifp->if_softc;
5043 struct ifreq *ifr = (struct ifreq *) data;
5044 struct mii_data *mii;
5045 int flags, mask, error = 0;
5046
5047 switch (command) {
5048 case SIOCSIFMTU:
5049 if (BGE_IS_JUMBO_CAPABLE(sc) ||
5050 (sc->bge_flags & BGE_FLAG_JUMBO_STD)) {
5051 if (ifr->ifr_mtu < ETHERMIN ||
5052 ifr->ifr_mtu > BGE_JUMBO_MTU) {
5053 error = EINVAL;
5054 break;
5055 }
5056 } else if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > ETHERMTU) {
5057 error = EINVAL;
5058 break;
5059 }
5060 BGE_LOCK(sc);
5061 if (ifp->if_mtu != ifr->ifr_mtu) {
5062 ifp->if_mtu = ifr->ifr_mtu;
5063 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
5064 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
5065 bge_init_locked(sc);
5066 }
5067 }
5068 BGE_UNLOCK(sc);
5069 break;
5070 case SIOCSIFFLAGS:
5071 BGE_LOCK(sc);
5072 if (ifp->if_flags & IFF_UP) {
5073 /*
5074 * If only the state of the PROMISC flag changed,
5075 * then just use the 'set promisc mode' command
5076 * instead of reinitializing the entire NIC. Doing
5077 * a full re-init means reloading the firmware and
5078 * waiting for it to start up, which may take a
5079 * second or two. Similarly for ALLMULTI.
5080 */
5081 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
5082 flags = ifp->if_flags ^ sc->bge_if_flags;
5083 if (flags & IFF_PROMISC)
5084 bge_setpromisc(sc);
5085 if (flags & IFF_ALLMULTI)
5086 bge_setmulti(sc);
5087 } else
5088 bge_init_locked(sc);
5089 } else {
5090 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
5091 bge_stop(sc);
5092 }
5093 }
5094 sc->bge_if_flags = ifp->if_flags;
5095 BGE_UNLOCK(sc);
5096 error = 0;
5097 break;
5098 case SIOCADDMULTI:
5099 case SIOCDELMULTI:
5100 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
5101 BGE_LOCK(sc);
5102 bge_setmulti(sc);
5103 BGE_UNLOCK(sc);
5104 error = 0;
5105 }
5106 break;
5107 case SIOCSIFMEDIA:
5108 case SIOCGIFMEDIA:
5109 if (sc->bge_flags & BGE_FLAG_TBI) {
5110 error = ifmedia_ioctl(ifp, ifr,
5111 &sc->bge_ifmedia, command);
5112 } else {
5113 mii = device_get_softc(sc->bge_miibus);
5114 error = ifmedia_ioctl(ifp, ifr,
5115 &mii->mii_media, command);
5116 }
5117 break;
5118 case SIOCSIFCAP:
5119 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
5120#ifdef DEVICE_POLLING
5121 if (mask & IFCAP_POLLING) {
5122 if (ifr->ifr_reqcap & IFCAP_POLLING) {
5123 error = ether_poll_register(bge_poll, ifp);
5124 if (error)
5125 return (error);
5126 BGE_LOCK(sc);
5127 BGE_SETBIT(sc, BGE_PCI_MISC_CTL,
5128 BGE_PCIMISCCTL_MASK_PCI_INTR);
5129 bge_writembx(sc, BGE_MBX_IRQ0_LO, 1);
5130 ifp->if_capenable |= IFCAP_POLLING;
5131 BGE_UNLOCK(sc);
5132 } else {
5133 error = ether_poll_deregister(ifp);
5134 /* Enable interrupt even in error case */
5135 BGE_LOCK(sc);
5136 BGE_CLRBIT(sc, BGE_PCI_MISC_CTL,
5137 BGE_PCIMISCCTL_MASK_PCI_INTR);
5138 bge_writembx(sc, BGE_MBX_IRQ0_LO, 0);
5139 ifp->if_capenable &= ~IFCAP_POLLING;
5140 BGE_UNLOCK(sc);
5141 }
5142 }
5143#endif
5144 if ((mask & IFCAP_TXCSUM) != 0 &&
5145 (ifp->if_capabilities & IFCAP_TXCSUM) != 0) {
5146 ifp->if_capenable ^= IFCAP_TXCSUM;
5147 if ((ifp->if_capenable & IFCAP_TXCSUM) != 0)
5148 ifp->if_hwassist |= sc->bge_csum_features;
5149 else
5150 ifp->if_hwassist &= ~sc->bge_csum_features;
5151 }
5152
5153 if ((mask & IFCAP_RXCSUM) != 0 &&
5154 (ifp->if_capabilities & IFCAP_RXCSUM) != 0)
5155 ifp->if_capenable ^= IFCAP_RXCSUM;
5156
5157 if ((mask & IFCAP_TSO4) != 0 &&
5158 (ifp->if_capabilities & IFCAP_TSO4) != 0) {
5159 ifp->if_capenable ^= IFCAP_TSO4;
5160 if ((ifp->if_capenable & IFCAP_TSO4) != 0)
5161 ifp->if_hwassist |= CSUM_TSO;
5162 else
5163 ifp->if_hwassist &= ~CSUM_TSO;
5164 }
5165
5166 if (mask & IFCAP_VLAN_MTU) {
5167 ifp->if_capenable ^= IFCAP_VLAN_MTU;
5168 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
5169 bge_init(sc);
5170 }
5171
5172 if ((mask & IFCAP_VLAN_HWTSO) != 0 &&
5173 (ifp->if_capabilities & IFCAP_VLAN_HWTSO) != 0)
5174 ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
5175 if ((mask & IFCAP_VLAN_HWTAGGING) != 0 &&
5176 (ifp->if_capabilities & IFCAP_VLAN_HWTAGGING) != 0) {
5177 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
5178 if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) == 0)
5179 ifp->if_capenable &= ~IFCAP_VLAN_HWTSO;
5180 BGE_LOCK(sc);
5181 bge_setvlan(sc);
5182 BGE_UNLOCK(sc);
5183 }
5184#ifdef VLAN_CAPABILITIES
5185 VLAN_CAPABILITIES(ifp);
5186#endif
5187 break;
5188 default:
5189 error = ether_ioctl(ifp, command, data);
5190 break;
5191 }
5192
5193 return (error);
5194}
5195
5196static void
5197bge_watchdog(struct bge_softc *sc)
5198{
5199 struct ifnet *ifp;
5200
5201 BGE_LOCK_ASSERT(sc);
5202
5203 if (sc->bge_timer == 0 || --sc->bge_timer)
5204 return;
5205
5206 ifp = sc->bge_ifp;
5207
5208 if_printf(ifp, "watchdog timeout -- resetting\n");
5209
5210 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
5211 bge_init_locked(sc);
5212
5213 ifp->if_oerrors++;
5214}
5215
5216static void
5217bge_stop_block(struct bge_softc *sc, bus_size_t reg, uint32_t bit)
5218{
5219 int i;
5220
5221 BGE_CLRBIT(sc, reg, bit);
5222
5223 for (i = 0; i < BGE_TIMEOUT; i++) {
5224 if ((CSR_READ_4(sc, reg) & bit) == 0)
5225 return;
5226 DELAY(100);
5227 }
5228}
5229
5230/*
5231 * Stop the adapter and free any mbufs allocated to the
5232 * RX and TX lists.
5233 */
5234static void
5235bge_stop(struct bge_softc *sc)
5236{
5237 struct ifnet *ifp;
5238
5239 BGE_LOCK_ASSERT(sc);
5240
5241 ifp = sc->bge_ifp;
5242
5243 callout_stop(&sc->bge_stat_ch);
5244
5245 /* Disable host interrupts. */
5246 BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR);
5247 bge_writembx(sc, BGE_MBX_IRQ0_LO, 1);
5248
5249 /*
5250 * Tell firmware we're shutting down.
5251 */
5252 bge_stop_fw(sc);
5253 bge_sig_pre_reset(sc, BGE_RESET_STOP);
5254
5255 /*
5256 * Disable all of the receiver blocks.
5257 */
5258 bge_stop_block(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE);
5259 bge_stop_block(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE);
5260 bge_stop_block(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE);
5261 if (BGE_IS_5700_FAMILY(sc))
5262 bge_stop_block(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE);
5263 bge_stop_block(sc, BGE_RDBDI_MODE, BGE_RBDIMODE_ENABLE);
5264 bge_stop_block(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE);
5265 bge_stop_block(sc, BGE_RBDC_MODE, BGE_RBDCMODE_ENABLE);
5266
5267 /*
5268 * Disable all of the transmit blocks.
5269 */
5270 bge_stop_block(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE);
5271 bge_stop_block(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE);
5272 bge_stop_block(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE);
5273 bge_stop_block(sc, BGE_RDMA_MODE, BGE_RDMAMODE_ENABLE);
5274 bge_stop_block(sc, BGE_SDC_MODE, BGE_SDCMODE_ENABLE);
5275 if (BGE_IS_5700_FAMILY(sc))
5276 bge_stop_block(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE);
5277 bge_stop_block(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE);
5278
5279 /*
5280 * Shut down all of the memory managers and related
5281 * state machines.
5282 */
5283 bge_stop_block(sc, BGE_HCC_MODE, BGE_HCCMODE_ENABLE);
5284 bge_stop_block(sc, BGE_WDMA_MODE, BGE_WDMAMODE_ENABLE);
5285 if (BGE_IS_5700_FAMILY(sc))
5286 bge_stop_block(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE);
5287
5288 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF);
5289 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0);
5290 if (!(BGE_IS_5705_PLUS(sc))) {
5291 BGE_CLRBIT(sc, BGE_BMAN_MODE, BGE_BMANMODE_ENABLE);
5292 BGE_CLRBIT(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE);
5293 }
5294 /* Update MAC statistics. */
5295 if (BGE_IS_5705_PLUS(sc))
5296 bge_stats_update_regs(sc);
5297
5298 bge_reset(sc);
5299 bge_sig_legacy(sc, BGE_RESET_STOP);
5300 bge_sig_post_reset(sc, BGE_RESET_STOP);
5301
5302 /*
5303 * Keep the ASF firmware running if up.
5304 */
5305 if (sc->bge_asf_mode & ASF_STACKUP)
5306 BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
5307 else
5308 BGE_CLRBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
5309
5310 /* Free the RX lists. */
5311 bge_free_rx_ring_std(sc);
5312
5313 /* Free jumbo RX list. */
5314 if (BGE_IS_JUMBO_CAPABLE(sc))
5315 bge_free_rx_ring_jumbo(sc);
5316
5317 /* Free TX buffers. */
5318 bge_free_tx_ring(sc);
5319
5320 sc->bge_tx_saved_considx = BGE_TXCONS_UNSET;
5321
5322 /* Clear MAC's link state (PHY may still have link UP). */
5323 if (bootverbose && sc->bge_link)
5324 if_printf(sc->bge_ifp, "link DOWN\n");
5325 sc->bge_link = 0;
5326
5327 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
5328}
5329
5330/*
5331 * Stop all chip I/O so that the kernel's probe routines don't
5332 * get confused by errant DMAs when rebooting.
5333 */
5334static int
5335bge_shutdown(device_t dev)
5336{
5337 struct bge_softc *sc;
5338
5339 sc = device_get_softc(dev);
5340 BGE_LOCK(sc);
5341 bge_stop(sc);
5342 bge_reset(sc);
5343 BGE_UNLOCK(sc);
5344
5345 return (0);
5346}
5347
5348static int
5349bge_suspend(device_t dev)
5350{
5351 struct bge_softc *sc;
5352
5353 sc = device_get_softc(dev);
5354 BGE_LOCK(sc);
5355 bge_stop(sc);
5356 BGE_UNLOCK(sc);
5357
5358 return (0);
5359}
5360
5361static int
5362bge_resume(device_t dev)
5363{
5364 struct bge_softc *sc;
5365 struct ifnet *ifp;
5366
5367 sc = device_get_softc(dev);
5368 BGE_LOCK(sc);
5369 ifp = sc->bge_ifp;
5370 if (ifp->if_flags & IFF_UP) {
5371 bge_init_locked(sc);
5372 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
5373 bge_start_locked(ifp);
5374 }
5375 BGE_UNLOCK(sc);
5376
5377 return (0);
5378}
5379
5380static void
5381bge_link_upd(struct bge_softc *sc)
5382{
5383 struct mii_data *mii;
5384 uint32_t link, status;
5385
5386 BGE_LOCK_ASSERT(sc);
5387
5388 /* Clear 'pending link event' flag. */
5389 sc->bge_link_evt = 0;
5390
5391 /*
5392 * Process link state changes.
5393 * Grrr. The link status word in the status block does
5394 * not work correctly on the BCM5700 rev AX and BX chips,
5395 * according to all available information. Hence, we have
5396 * to enable MII interrupts in order to properly obtain
5397 * async link changes. Unfortunately, this also means that
5398 * we have to read the MAC status register to detect link
5399 * changes, thereby adding an additional register access to
5400 * the interrupt handler.
5401 *
5402 * XXX: perhaps link state detection procedure used for
5403 * BGE_CHIPID_BCM5700_B2 can be used for others BCM5700 revisions.
5404 */
5405
5406 if (sc->bge_asicrev == BGE_ASICREV_BCM5700 &&
5407 sc->bge_chipid != BGE_CHIPID_BCM5700_B2) {
5408 status = CSR_READ_4(sc, BGE_MAC_STS);
5409 if (status & BGE_MACSTAT_MI_INTERRUPT) {
5410 mii = device_get_softc(sc->bge_miibus);
5411 mii_pollstat(mii);
5412 if (!sc->bge_link &&
5413 mii->mii_media_status & IFM_ACTIVE &&
5414 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) {
5415 sc->bge_link++;
5416 if (bootverbose)
5417 if_printf(sc->bge_ifp, "link UP\n");
5418 } else if (sc->bge_link &&
5419 (!(mii->mii_media_status & IFM_ACTIVE) ||
5420 IFM_SUBTYPE(mii->mii_media_active) == IFM_NONE)) {
5421 sc->bge_link = 0;
5422 if (bootverbose)
5423 if_printf(sc->bge_ifp, "link DOWN\n");
5424 }
5425
5426 /* Clear the interrupt. */
5427 CSR_WRITE_4(sc, BGE_MAC_EVT_ENB,
5428 BGE_EVTENB_MI_INTERRUPT);
5429 bge_miibus_readreg(sc->bge_dev, 1, BRGPHY_MII_ISR);
5430 bge_miibus_writereg(sc->bge_dev, 1, BRGPHY_MII_IMR,
5431 BRGPHY_INTRS);
5432 }
5433 return;
5434 }
5435
5436 if (sc->bge_flags & BGE_FLAG_TBI) {
5437 status = CSR_READ_4(sc, BGE_MAC_STS);
5438 if (status & BGE_MACSTAT_TBI_PCS_SYNCHED) {
5439 if (!sc->bge_link) {
5440 sc->bge_link++;
5441 if (sc->bge_asicrev == BGE_ASICREV_BCM5704)
5442 BGE_CLRBIT(sc, BGE_MAC_MODE,
5443 BGE_MACMODE_TBI_SEND_CFGS);
5444 CSR_WRITE_4(sc, BGE_MAC_STS, 0xFFFFFFFF);
5445 if (bootverbose)
5446 if_printf(sc->bge_ifp, "link UP\n");
5447 if_link_state_change(sc->bge_ifp,
5448 LINK_STATE_UP);
5449 }
5450 } else if (sc->bge_link) {
5451 sc->bge_link = 0;
5452 if (bootverbose)
5453 if_printf(sc->bge_ifp, "link DOWN\n");
5454 if_link_state_change(sc->bge_ifp, LINK_STATE_DOWN);
5455 }
5456 } else if ((sc->bge_mi_mode & BGE_MIMODE_AUTOPOLL) != 0) {
5457 /*
5458 * Some broken BCM chips have BGE_STATFLAG_LINKSTATE_CHANGED bit
5459 * in status word always set. Workaround this bug by reading
5460 * PHY link status directly.
5461 */
5462 link = (CSR_READ_4(sc, BGE_MI_STS) & BGE_MISTS_LINK) ? 1 : 0;
5463
5464 if (link != sc->bge_link ||
5465 sc->bge_asicrev == BGE_ASICREV_BCM5700) {
5466 mii = device_get_softc(sc->bge_miibus);
5467 mii_pollstat(mii);
5468 if (!sc->bge_link &&
5469 mii->mii_media_status & IFM_ACTIVE &&
5470 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) {
5471 sc->bge_link++;
5472 if (bootverbose)
5473 if_printf(sc->bge_ifp, "link UP\n");
5474 } else if (sc->bge_link &&
5475 (!(mii->mii_media_status & IFM_ACTIVE) ||
5476 IFM_SUBTYPE(mii->mii_media_active) == IFM_NONE)) {
5477 sc->bge_link = 0;
5478 if (bootverbose)
5479 if_printf(sc->bge_ifp, "link DOWN\n");
5480 }
5481 }
5482 } else {
5483 /*
5484 * For controllers that call mii_tick, we have to poll
5485 * link status.
5486 */
5487 mii = device_get_softc(sc->bge_miibus);
5488 mii_pollstat(mii);
5489 bge_miibus_statchg(sc->bge_dev);
5490 }
5491
5492 /* Clear the attention. */
5493 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED |
5494 BGE_MACSTAT_CFG_CHANGED | BGE_MACSTAT_MI_COMPLETE |
5495 BGE_MACSTAT_LINK_CHANGED);
5496}
5497
5498static void
5499bge_add_sysctls(struct bge_softc *sc)
5500{
5501 struct sysctl_ctx_list *ctx;
5502 struct sysctl_oid_list *children;
5503 char tn[32];
5504 int unit;
5505
5506 ctx = device_get_sysctl_ctx(sc->bge_dev);
5507 children = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->bge_dev));
5508
5509#ifdef BGE_REGISTER_DEBUG
5510 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "debug_info",
5511 CTLTYPE_INT | CTLFLAG_RW, sc, 0, bge_sysctl_debug_info, "I",
5512 "Debug Information");
5513
5514 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "reg_read",
5515 CTLTYPE_INT | CTLFLAG_RW, sc, 0, bge_sysctl_reg_read, "I",
5516 "Register Read");
5517
5518 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "mem_read",
5519 CTLTYPE_INT | CTLFLAG_RW, sc, 0, bge_sysctl_mem_read, "I",
5520 "Memory Read");
5521
5522#endif
5523
5524 unit = device_get_unit(sc->bge_dev);
5525 /*
5526 * A common design characteristic for many Broadcom client controllers
5527 * is that they only support a single outstanding DMA read operation
5528 * on the PCIe bus. This means that it will take twice as long to fetch
5529 * a TX frame that is split into header and payload buffers as it does
5530 * to fetch a single, contiguous TX frame (2 reads vs. 1 read). For
5531 * these controllers, coalescing buffers to reduce the number of memory
5532 * reads is effective way to get maximum performance(about 940Mbps).
5533 * Without collapsing TX buffers the maximum TCP bulk transfer
5534 * performance is about 850Mbps. However forcing coalescing mbufs
5535 * consumes a lot of CPU cycles, so leave it off by default.
5536 */
5537 sc->bge_forced_collapse = 0;
5538 snprintf(tn, sizeof(tn), "dev.bge.%d.forced_collapse", unit);
5539 TUNABLE_INT_FETCH(tn, &sc->bge_forced_collapse);
5540 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "forced_collapse",
5541 CTLFLAG_RW, &sc->bge_forced_collapse, 0,
5542 "Number of fragmented TX buffers of a frame allowed before "
5543 "forced collapsing");
5544
5545 /*
5546 * It seems all Broadcom controllers have a bug that can generate UDP
5547 * datagrams with checksum value 0 when TX UDP checksum offloading is
5548 * enabled. Generating UDP checksum value 0 is RFC 768 violation.
5549 * Even though the probability of generating such UDP datagrams is
5550 * low, I don't want to see FreeBSD boxes to inject such datagrams
5551 * into network so disable UDP checksum offloading by default. Users
5552 * still override this behavior by setting a sysctl variable,
5553 * dev.bge.0.forced_udpcsum.
5554 */
5555 sc->bge_forced_udpcsum = 0;
5556 snprintf(tn, sizeof(tn), "dev.bge.%d.bge_forced_udpcsum", unit);
5557 TUNABLE_INT_FETCH(tn, &sc->bge_forced_udpcsum);
5558 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "forced_udpcsum",
5559 CTLFLAG_RW, &sc->bge_forced_udpcsum, 0,
5560 "Enable UDP checksum offloading even if controller can "
5561 "generate UDP checksum value 0");
5562
5563 if (BGE_IS_5705_PLUS(sc))
5564 bge_add_sysctl_stats_regs(sc, ctx, children);
5565 else
5566 bge_add_sysctl_stats(sc, ctx, children);
5567}
5568
5569#define BGE_SYSCTL_STAT(sc, ctx, desc, parent, node, oid) \
5570 SYSCTL_ADD_PROC(ctx, parent, OID_AUTO, oid, CTLTYPE_UINT|CTLFLAG_RD, \
5571 sc, offsetof(struct bge_stats, node), bge_sysctl_stats, "IU", \
5572 desc)
5573
5574static void
5575bge_add_sysctl_stats(struct bge_softc *sc, struct sysctl_ctx_list *ctx,
5576 struct sysctl_oid_list *parent)
5577{
5578 struct sysctl_oid *tree;
5579 struct sysctl_oid_list *children, *schildren;
5580
5581 tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "stats", CTLFLAG_RD,
5582 NULL, "BGE Statistics");
5583 schildren = children = SYSCTL_CHILDREN(tree);
5584 BGE_SYSCTL_STAT(sc, ctx, "Frames Dropped Due To Filters",
5585 children, COSFramesDroppedDueToFilters,
5586 "FramesDroppedDueToFilters");
5587 BGE_SYSCTL_STAT(sc, ctx, "NIC DMA Write Queue Full",
5588 children, nicDmaWriteQueueFull, "DmaWriteQueueFull");
5589 BGE_SYSCTL_STAT(sc, ctx, "NIC DMA Write High Priority Queue Full",
5590 children, nicDmaWriteHighPriQueueFull, "DmaWriteHighPriQueueFull");
5591 BGE_SYSCTL_STAT(sc, ctx, "NIC No More RX Buffer Descriptors",
5592 children, nicNoMoreRxBDs, "NoMoreRxBDs");
5593 BGE_SYSCTL_STAT(sc, ctx, "Discarded Input Frames",
5594 children, ifInDiscards, "InputDiscards");
5595 BGE_SYSCTL_STAT(sc, ctx, "Input Errors",
5596 children, ifInErrors, "InputErrors");
5597 BGE_SYSCTL_STAT(sc, ctx, "NIC Recv Threshold Hit",
5598 children, nicRecvThresholdHit, "RecvThresholdHit");
5599 BGE_SYSCTL_STAT(sc, ctx, "NIC DMA Read Queue Full",
5600 children, nicDmaReadQueueFull, "DmaReadQueueFull");
5601 BGE_SYSCTL_STAT(sc, ctx, "NIC DMA Read High Priority Queue Full",
5602 children, nicDmaReadHighPriQueueFull, "DmaReadHighPriQueueFull");
5603 BGE_SYSCTL_STAT(sc, ctx, "NIC Send Data Complete Queue Full",
5604 children, nicSendDataCompQueueFull, "SendDataCompQueueFull");
5605 BGE_SYSCTL_STAT(sc, ctx, "NIC Ring Set Send Producer Index",
5606 children, nicRingSetSendProdIndex, "RingSetSendProdIndex");
5607 BGE_SYSCTL_STAT(sc, ctx, "NIC Ring Status Update",
5608 children, nicRingStatusUpdate, "RingStatusUpdate");
5609 BGE_SYSCTL_STAT(sc, ctx, "NIC Interrupts",
5610 children, nicInterrupts, "Interrupts");
5611 BGE_SYSCTL_STAT(sc, ctx, "NIC Avoided Interrupts",
5612 children, nicAvoidedInterrupts, "AvoidedInterrupts");
5613 BGE_SYSCTL_STAT(sc, ctx, "NIC Send Threshold Hit",
5614 children, nicSendThresholdHit, "SendThresholdHit");
5615
5616 tree = SYSCTL_ADD_NODE(ctx, schildren, OID_AUTO, "rx", CTLFLAG_RD,
5617 NULL, "BGE RX Statistics");
5618 children = SYSCTL_CHILDREN(tree);
5619 BGE_SYSCTL_STAT(sc, ctx, "Inbound Octets",
5620 children, rxstats.ifHCInOctets, "ifHCInOctets");
5621 BGE_SYSCTL_STAT(sc, ctx, "Fragments",
5622 children, rxstats.etherStatsFragments, "Fragments");
5623 BGE_SYSCTL_STAT(sc, ctx, "Inbound Unicast Packets",
5624 children, rxstats.ifHCInUcastPkts, "UnicastPkts");
5625 BGE_SYSCTL_STAT(sc, ctx, "Inbound Multicast Packets",
5626 children, rxstats.ifHCInMulticastPkts, "MulticastPkts");
5627 BGE_SYSCTL_STAT(sc, ctx, "FCS Errors",
5628 children, rxstats.dot3StatsFCSErrors, "FCSErrors");
5629 BGE_SYSCTL_STAT(sc, ctx, "Alignment Errors",
5630 children, rxstats.dot3StatsAlignmentErrors, "AlignmentErrors");
5631 BGE_SYSCTL_STAT(sc, ctx, "XON Pause Frames Received",
5632 children, rxstats.xonPauseFramesReceived, "xonPauseFramesReceived");
5633 BGE_SYSCTL_STAT(sc, ctx, "XOFF Pause Frames Received",
5634 children, rxstats.xoffPauseFramesReceived,
5635 "xoffPauseFramesReceived");
5636 BGE_SYSCTL_STAT(sc, ctx, "MAC Control Frames Received",
5637 children, rxstats.macControlFramesReceived,
5638 "ControlFramesReceived");
5639 BGE_SYSCTL_STAT(sc, ctx, "XOFF State Entered",
5640 children, rxstats.xoffStateEntered, "xoffStateEntered");
5641 BGE_SYSCTL_STAT(sc, ctx, "Frames Too Long",
5642 children, rxstats.dot3StatsFramesTooLong, "FramesTooLong");
5643 BGE_SYSCTL_STAT(sc, ctx, "Jabbers",
5644 children, rxstats.etherStatsJabbers, "Jabbers");
5645 BGE_SYSCTL_STAT(sc, ctx, "Undersized Packets",
5646 children, rxstats.etherStatsUndersizePkts, "UndersizePkts");
5647 BGE_SYSCTL_STAT(sc, ctx, "Inbound Range Length Errors",
5648 children, rxstats.inRangeLengthError, "inRangeLengthError");
5649 BGE_SYSCTL_STAT(sc, ctx, "Outbound Range Length Errors",
5650 children, rxstats.outRangeLengthError, "outRangeLengthError");
5651
5652 tree = SYSCTL_ADD_NODE(ctx, schildren, OID_AUTO, "tx", CTLFLAG_RD,
5653 NULL, "BGE TX Statistics");
5654 children = SYSCTL_CHILDREN(tree);
5655 BGE_SYSCTL_STAT(sc, ctx, "Outbound Octets",
5656 children, txstats.ifHCOutOctets, "ifHCOutOctets");
5657 BGE_SYSCTL_STAT(sc, ctx, "TX Collisions",
5658 children, txstats.etherStatsCollisions, "Collisions");
5659 BGE_SYSCTL_STAT(sc, ctx, "XON Sent",
5660 children, txstats.outXonSent, "XonSent");
5661 BGE_SYSCTL_STAT(sc, ctx, "XOFF Sent",
5662 children, txstats.outXoffSent, "XoffSent");
5663 BGE_SYSCTL_STAT(sc, ctx, "Flow Control Done",
5664 children, txstats.flowControlDone, "flowControlDone");
5665 BGE_SYSCTL_STAT(sc, ctx, "Internal MAC TX errors",
5666 children, txstats.dot3StatsInternalMacTransmitErrors,
5667 "InternalMacTransmitErrors");
5668 BGE_SYSCTL_STAT(sc, ctx, "Single Collision Frames",
5669 children, txstats.dot3StatsSingleCollisionFrames,
5670 "SingleCollisionFrames");
5671 BGE_SYSCTL_STAT(sc, ctx, "Multiple Collision Frames",
5672 children, txstats.dot3StatsMultipleCollisionFrames,
5673 "MultipleCollisionFrames");
5674 BGE_SYSCTL_STAT(sc, ctx, "Deferred Transmissions",
5675 children, txstats.dot3StatsDeferredTransmissions,
5676 "DeferredTransmissions");
5677 BGE_SYSCTL_STAT(sc, ctx, "Excessive Collisions",
5678 children, txstats.dot3StatsExcessiveCollisions,
5679 "ExcessiveCollisions");
5680 BGE_SYSCTL_STAT(sc, ctx, "Late Collisions",
5681 children, txstats.dot3StatsLateCollisions,
5682 "LateCollisions");
5683 BGE_SYSCTL_STAT(sc, ctx, "Outbound Unicast Packets",
5684 children, txstats.ifHCOutUcastPkts, "UnicastPkts");
5685 BGE_SYSCTL_STAT(sc, ctx, "Outbound Multicast Packets",
5686 children, txstats.ifHCOutMulticastPkts, "MulticastPkts");
5687 BGE_SYSCTL_STAT(sc, ctx, "Outbound Broadcast Packets",
5688 children, txstats.ifHCOutBroadcastPkts, "BroadcastPkts");
5689 BGE_SYSCTL_STAT(sc, ctx, "Carrier Sense Errors",
5690 children, txstats.dot3StatsCarrierSenseErrors,
5691 "CarrierSenseErrors");
5692 BGE_SYSCTL_STAT(sc, ctx, "Outbound Discards",
5693 children, txstats.ifOutDiscards, "Discards");
5694 BGE_SYSCTL_STAT(sc, ctx, "Outbound Errors",
5695 children, txstats.ifOutErrors, "Errors");
5696}
5697
5698#undef BGE_SYSCTL_STAT
5699
5700#define BGE_SYSCTL_STAT_ADD64(c, h, n, p, d) \
5701 SYSCTL_ADD_UQUAD(c, h, OID_AUTO, n, CTLFLAG_RD, p, d)
5702
5703static void
5704bge_add_sysctl_stats_regs(struct bge_softc *sc, struct sysctl_ctx_list *ctx,
5705 struct sysctl_oid_list *parent)
5706{
5707 struct sysctl_oid *tree;
5708 struct sysctl_oid_list *child, *schild;
5709 struct bge_mac_stats *stats;
5710
5711 stats = &sc->bge_mac_stats;
5712 tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "stats", CTLFLAG_RD,
5713 NULL, "BGE Statistics");
5714 schild = child = SYSCTL_CHILDREN(tree);
5715 BGE_SYSCTL_STAT_ADD64(ctx, child, "FramesDroppedDueToFilters",
5716 &stats->FramesDroppedDueToFilters, "Frames Dropped Due to Filters");
5717 BGE_SYSCTL_STAT_ADD64(ctx, child, "DmaWriteQueueFull",
5718 &stats->DmaWriteQueueFull, "NIC DMA Write Queue Full");
5719 BGE_SYSCTL_STAT_ADD64(ctx, child, "DmaWriteHighPriQueueFull",
5720 &stats->DmaWriteHighPriQueueFull,
5721 "NIC DMA Write High Priority Queue Full");
5722 BGE_SYSCTL_STAT_ADD64(ctx, child, "NoMoreRxBDs",
5723 &stats->NoMoreRxBDs, "NIC No More RX Buffer Descriptors");
5724 BGE_SYSCTL_STAT_ADD64(ctx, child, "InputDiscards",
5725 &stats->InputDiscards, "Discarded Input Frames");
5726 BGE_SYSCTL_STAT_ADD64(ctx, child, "InputErrors",
5727 &stats->InputErrors, "Input Errors");
5728 BGE_SYSCTL_STAT_ADD64(ctx, child, "RecvThresholdHit",
5729 &stats->RecvThresholdHit, "NIC Recv Threshold Hit");
5730
5731 tree = SYSCTL_ADD_NODE(ctx, schild, OID_AUTO, "rx", CTLFLAG_RD,
5732 NULL, "BGE RX Statistics");
5733 child = SYSCTL_CHILDREN(tree);
5734 BGE_SYSCTL_STAT_ADD64(ctx, child, "ifHCInOctets",
5735 &stats->ifHCInOctets, "Inbound Octets");
5736 BGE_SYSCTL_STAT_ADD64(ctx, child, "Fragments",
5737 &stats->etherStatsFragments, "Fragments");
5738 BGE_SYSCTL_STAT_ADD64(ctx, child, "UnicastPkts",
5739 &stats->ifHCInUcastPkts, "Inbound Unicast Packets");
5740 BGE_SYSCTL_STAT_ADD64(ctx, child, "MulticastPkts",
5741 &stats->ifHCInMulticastPkts, "Inbound Multicast Packets");
5742 BGE_SYSCTL_STAT_ADD64(ctx, child, "BroadcastPkts",
5743 &stats->ifHCInBroadcastPkts, "Inbound Broadcast Packets");
5744 BGE_SYSCTL_STAT_ADD64(ctx, child, "FCSErrors",
5745 &stats->dot3StatsFCSErrors, "FCS Errors");
5746 BGE_SYSCTL_STAT_ADD64(ctx, child, "AlignmentErrors",
5747 &stats->dot3StatsAlignmentErrors, "Alignment Errors");
5748 BGE_SYSCTL_STAT_ADD64(ctx, child, "xonPauseFramesReceived",
5749 &stats->xonPauseFramesReceived, "XON Pause Frames Received");
5750 BGE_SYSCTL_STAT_ADD64(ctx, child, "xoffPauseFramesReceived",
5751 &stats->xoffPauseFramesReceived, "XOFF Pause Frames Received");
5752 BGE_SYSCTL_STAT_ADD64(ctx, child, "ControlFramesReceived",
5753 &stats->macControlFramesReceived, "MAC Control Frames Received");
5754 BGE_SYSCTL_STAT_ADD64(ctx, child, "xoffStateEntered",
5755 &stats->xoffStateEntered, "XOFF State Entered");
5756 BGE_SYSCTL_STAT_ADD64(ctx, child, "FramesTooLong",
5757 &stats->dot3StatsFramesTooLong, "Frames Too Long");
5758 BGE_SYSCTL_STAT_ADD64(ctx, child, "Jabbers",
5759 &stats->etherStatsJabbers, "Jabbers");
5760 BGE_SYSCTL_STAT_ADD64(ctx, child, "UndersizePkts",
5761 &stats->etherStatsUndersizePkts, "Undersized Packets");
5762
5763 tree = SYSCTL_ADD_NODE(ctx, schild, OID_AUTO, "tx", CTLFLAG_RD,
5764 NULL, "BGE TX Statistics");
5765 child = SYSCTL_CHILDREN(tree);
5766 BGE_SYSCTL_STAT_ADD64(ctx, child, "ifHCOutOctets",
5767 &stats->ifHCOutOctets, "Outbound Octets");
5768 BGE_SYSCTL_STAT_ADD64(ctx, child, "Collisions",
5769 &stats->etherStatsCollisions, "TX Collisions");
5770 BGE_SYSCTL_STAT_ADD64(ctx, child, "XonSent",
5771 &stats->outXonSent, "XON Sent");
5772 BGE_SYSCTL_STAT_ADD64(ctx, child, "XoffSent",
5773 &stats->outXoffSent, "XOFF Sent");
5774 BGE_SYSCTL_STAT_ADD64(ctx, child, "InternalMacTransmitErrors",
5775 &stats->dot3StatsInternalMacTransmitErrors,
5776 "Internal MAC TX Errors");
5777 BGE_SYSCTL_STAT_ADD64(ctx, child, "SingleCollisionFrames",
5778 &stats->dot3StatsSingleCollisionFrames, "Single Collision Frames");
5779 BGE_SYSCTL_STAT_ADD64(ctx, child, "MultipleCollisionFrames",
5780 &stats->dot3StatsMultipleCollisionFrames,
5781 "Multiple Collision Frames");
5782 BGE_SYSCTL_STAT_ADD64(ctx, child, "DeferredTransmissions",
5783 &stats->dot3StatsDeferredTransmissions, "Deferred Transmissions");
5784 BGE_SYSCTL_STAT_ADD64(ctx, child, "ExcessiveCollisions",
5785 &stats->dot3StatsExcessiveCollisions, "Excessive Collisions");
5786 BGE_SYSCTL_STAT_ADD64(ctx, child, "LateCollisions",
5787 &stats->dot3StatsLateCollisions, "Late Collisions");
5788 BGE_SYSCTL_STAT_ADD64(ctx, child, "UnicastPkts",
5789 &stats->ifHCOutUcastPkts, "Outbound Unicast Packets");
5790 BGE_SYSCTL_STAT_ADD64(ctx, child, "MulticastPkts",
5791 &stats->ifHCOutMulticastPkts, "Outbound Multicast Packets");
5792 BGE_SYSCTL_STAT_ADD64(ctx, child, "BroadcastPkts",
5793 &stats->ifHCOutBroadcastPkts, "Outbound Broadcast Packets");
5794}
5795
5796#undef BGE_SYSCTL_STAT_ADD64
5797
5798static int
5799bge_sysctl_stats(SYSCTL_HANDLER_ARGS)
5800{
5801 struct bge_softc *sc;
5802 uint32_t result;
5803 int offset;
5804
5805 sc = (struct bge_softc *)arg1;
5806 offset = arg2;
5807 result = CSR_READ_4(sc, BGE_MEMWIN_START + BGE_STATS_BLOCK + offset +
5808 offsetof(bge_hostaddr, bge_addr_lo));
5809 return (sysctl_handle_int(oidp, &result, 0, req));
5810}
5811
5812#ifdef BGE_REGISTER_DEBUG
5813static int
5814bge_sysctl_debug_info(SYSCTL_HANDLER_ARGS)
5815{
5816 struct bge_softc *sc;
5817 uint16_t *sbdata;
5818 int error, result, sbsz;
5819 int i, j;
5820
5821 result = -1;
5822 error = sysctl_handle_int(oidp, &result, 0, req);
5823 if (error || (req->newptr == NULL))
5824 return (error);
5825
5826 if (result == 1) {
5827 sc = (struct bge_softc *)arg1;
5828
5829 if (sc->bge_asicrev == BGE_ASICREV_BCM5700 &&
5830 sc->bge_chipid != BGE_CHIPID_BCM5700_C0)
5831 sbsz = BGE_STATUS_BLK_SZ;
5832 else
5833 sbsz = 32;
5834 sbdata = (uint16_t *)sc->bge_ldata.bge_status_block;
5835 printf("Status Block:\n");
5836 BGE_LOCK(sc);
5837 bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
5838 sc->bge_cdata.bge_status_map,
5839 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
5840 for (i = 0x0; i < sbsz / sizeof(uint16_t); ) {
5841 printf("%06x:", i);
5842 for (j = 0; j < 8; j++)
5843 printf(" %04x", sbdata[i++]);
5844 printf("\n");
5845 }
5846
5847 printf("Registers:\n");
5848 for (i = 0x800; i < 0xA00; ) {
5849 printf("%06x:", i);
5850 for (j = 0; j < 8; j++) {
5851 printf(" %08x", CSR_READ_4(sc, i));
5852 i += 4;
5853 }
5854 printf("\n");
5855 }
5856 BGE_UNLOCK(sc);
5857
5858 printf("Hardware Flags:\n");
5859 if (BGE_IS_5717_PLUS(sc))
5860 printf(" - 5717 Plus\n");
5861 if (BGE_IS_5755_PLUS(sc))
5862 printf(" - 5755 Plus\n");
5863 if (BGE_IS_575X_PLUS(sc))
5864 printf(" - 575X Plus\n");
5865 if (BGE_IS_5705_PLUS(sc))
5866 printf(" - 5705 Plus\n");
5867 if (BGE_IS_5714_FAMILY(sc))
5868 printf(" - 5714 Family\n");
5869 if (BGE_IS_5700_FAMILY(sc))
5870 printf(" - 5700 Family\n");
5871 if (sc->bge_flags & BGE_FLAG_JUMBO)
5872 printf(" - Supports Jumbo Frames\n");
5873 if (sc->bge_flags & BGE_FLAG_PCIX)
5874 printf(" - PCI-X Bus\n");
5875 if (sc->bge_flags & BGE_FLAG_PCIE)
5876 printf(" - PCI Express Bus\n");
5877 if (sc->bge_phy_flags & BGE_PHY_NO_3LED)
5878 printf(" - No 3 LEDs\n");
5879 if (sc->bge_flags & BGE_FLAG_RX_ALIGNBUG)
5880 printf(" - RX Alignment Bug\n");
5881 }
5882
5883 return (error);
5884}
5885
5886static int
5887bge_sysctl_reg_read(SYSCTL_HANDLER_ARGS)
5888{
5889 struct bge_softc *sc;
5890 int error;
5891 uint16_t result;
5892 uint32_t val;
5893
5894 result = -1;
5895 error = sysctl_handle_int(oidp, &result, 0, req);
5896 if (error || (req->newptr == NULL))
5897 return (error);
5898
5899 if (result < 0x8000) {
5900 sc = (struct bge_softc *)arg1;
5901 val = CSR_READ_4(sc, result);
5902 printf("reg 0x%06X = 0x%08X\n", result, val);
5903 }
5904
5905 return (error);
5906}
5907
5908static int
5909bge_sysctl_mem_read(SYSCTL_HANDLER_ARGS)
5910{
5911 struct bge_softc *sc;
5912 int error;
5913 uint16_t result;
5914 uint32_t val;
5915
5916 result = -1;
5917 error = sysctl_handle_int(oidp, &result, 0, req);
5918 if (error || (req->newptr == NULL))
5919 return (error);
5920
5921 if (result < 0x8000) {
5922 sc = (struct bge_softc *)arg1;
5923 val = bge_readmem_ind(sc, result);
5924 printf("mem 0x%06X = 0x%08X\n", result, val);
5925 }
5926
5927 return (error);
5928}
5929#endif
5930
5931static int
5932bge_get_eaddr_fw(struct bge_softc *sc, uint8_t ether_addr[])
5933{
5934
5935 if (sc->bge_flags & BGE_FLAG_EADDR)
5936 return (1);
5937
5938#ifdef __sparc64__
5939 OF_getetheraddr(sc->bge_dev, ether_addr);
5940 return (0);
5941#endif
5942 return (1);
5943}
5944
5945static int
5946bge_get_eaddr_mem(struct bge_softc *sc, uint8_t ether_addr[])
5947{
5948 uint32_t mac_addr;
5949
5950 mac_addr = bge_readmem_ind(sc, BGE_SRAM_MAC_ADDR_HIGH_MB);
5951 if ((mac_addr >> 16) == 0x484b) {
5952 ether_addr[0] = (uint8_t)(mac_addr >> 8);
5953 ether_addr[1] = (uint8_t)mac_addr;
5954 mac_addr = bge_readmem_ind(sc, BGE_SRAM_MAC_ADDR_LOW_MB);
5955 ether_addr[2] = (uint8_t)(mac_addr >> 24);
5956 ether_addr[3] = (uint8_t)(mac_addr >> 16);
5957 ether_addr[4] = (uint8_t)(mac_addr >> 8);
5958 ether_addr[5] = (uint8_t)mac_addr;
5959 return (0);
5960 }
5961 return (1);
5962}
5963
5964static int
5965bge_get_eaddr_nvram(struct bge_softc *sc, uint8_t ether_addr[])
5966{
5967 int mac_offset = BGE_EE_MAC_OFFSET;
5968
5969 if (sc->bge_asicrev == BGE_ASICREV_BCM5906)
5970 mac_offset = BGE_EE_MAC_OFFSET_5906;
5971
5972 return (bge_read_nvram(sc, ether_addr, mac_offset + 2,
5973 ETHER_ADDR_LEN));
5974}
5975
5976static int
5977bge_get_eaddr_eeprom(struct bge_softc *sc, uint8_t ether_addr[])
5978{
5979
5980 if (sc->bge_asicrev == BGE_ASICREV_BCM5906)
5981 return (1);
5982
5983 return (bge_read_eeprom(sc, ether_addr, BGE_EE_MAC_OFFSET + 2,
5984 ETHER_ADDR_LEN));
5985}
5986
5987static int
5988bge_get_eaddr(struct bge_softc *sc, uint8_t eaddr[])
5989{
5990 static const bge_eaddr_fcn_t bge_eaddr_funcs[] = {
5991 /* NOTE: Order is critical */
5992 bge_get_eaddr_fw,
5993 bge_get_eaddr_mem,
5994 bge_get_eaddr_nvram,
5995 bge_get_eaddr_eeprom,
5996 NULL
5997 };
5998 const bge_eaddr_fcn_t *func;
5999
6000 for (func = bge_eaddr_funcs; *func != NULL; ++func) {
6001 if ((*func)(sc, eaddr) == 0)
6002 break;
6003 }
6004 return (*func == NULL ? ENXIO : 0);
6005}