Deleted Added
full compact
if_bge.c (226820) if_bge.c (226821)
1/*-
2 * Copyright (c) 2001 Wind River Systems
3 * Copyright (c) 1997, 1998, 1999, 2001
4 * Bill Paul <wpaul@windriver.com>. All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. All advertising materials mentioning features or use of this software
15 * must display the following acknowledgement:
16 * This product includes software developed by Bill Paul.
17 * 4. Neither the name of the author nor the names of any co-contributors
18 * may be used to endorse or promote products derived from this software
19 * without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
25 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
31 * THE POSSIBILITY OF SUCH DAMAGE.
32 */
33
34#include <sys/cdefs.h>
1/*-
2 * Copyright (c) 2001 Wind River Systems
3 * Copyright (c) 1997, 1998, 1999, 2001
4 * Bill Paul <wpaul@windriver.com>. All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. All advertising materials mentioning features or use of this software
15 * must display the following acknowledgement:
16 * This product includes software developed by Bill Paul.
17 * 4. Neither the name of the author nor the names of any co-contributors
18 * may be used to endorse or promote products derived from this software
19 * without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
25 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
31 * THE POSSIBILITY OF SUCH DAMAGE.
32 */
33
34#include <sys/cdefs.h>
35__FBSDID("$FreeBSD: head/sys/dev/bge/if_bge.c 226820 2011-10-26 23:22:32Z yongari $");
35__FBSDID("$FreeBSD: head/sys/dev/bge/if_bge.c 226821 2011-10-26 23:52:02Z yongari $");
36
37/*
38 * Broadcom BCM570x family gigabit ethernet driver for FreeBSD.
39 *
40 * The Broadcom BCM5700 is based on technology originally developed by
41 * Alteon Networks as part of the Tigon I and Tigon II gigabit ethernet
42 * MAC chips. The BCM5700, sometimes referred to as the Tigon III, has
43 * two on-board MIPS R4000 CPUs and can have as much as 16MB of external
44 * SSRAM. The BCM5700 supports TCP, UDP and IP checksum offload, jumbo
45 * frames, highly configurable RX filtering, and 16 RX and TX queues
46 * (which, along with RX filter rules, can be used for QOS applications).
47 * Other features, such as TCP segmentation, may be available as part
48 * of value-added firmware updates. Unlike the Tigon I and Tigon II,
49 * firmware images can be stored in hardware and need not be compiled
50 * into the driver.
51 *
52 * The BCM5700 supports the PCI v2.2 and PCI-X v1.0 standards, and will
53 * function in a 32-bit/64-bit 33/66Mhz bus, or a 64-bit/133Mhz bus.
54 *
55 * The BCM5701 is a single-chip solution incorporating both the BCM5700
56 * MAC and a BCM5401 10/100/1000 PHY. Unlike the BCM5700, the BCM5701
57 * does not support external SSRAM.
58 *
59 * Broadcom also produces a variation of the BCM5700 under the "Altima"
60 * brand name, which is functionally similar but lacks PCI-X support.
61 *
62 * Without external SSRAM, you can only have at most 4 TX rings,
63 * and the use of the mini RX ring is disabled. This seems to imply
64 * that these features are simply not available on the BCM5701. As a
65 * result, this driver does not implement any support for the mini RX
66 * ring.
67 */
68
69#ifdef HAVE_KERNEL_OPTION_HEADERS
70#include "opt_device_polling.h"
71#endif
72
73#include <sys/param.h>
74#include <sys/endian.h>
75#include <sys/systm.h>
76#include <sys/sockio.h>
77#include <sys/mbuf.h>
78#include <sys/malloc.h>
79#include <sys/kernel.h>
80#include <sys/module.h>
81#include <sys/socket.h>
82#include <sys/sysctl.h>
83#include <sys/taskqueue.h>
84
85#include <net/if.h>
86#include <net/if_arp.h>
87#include <net/ethernet.h>
88#include <net/if_dl.h>
89#include <net/if_media.h>
90
91#include <net/bpf.h>
92
93#include <net/if_types.h>
94#include <net/if_vlan_var.h>
95
96#include <netinet/in_systm.h>
97#include <netinet/in.h>
98#include <netinet/ip.h>
99#include <netinet/tcp.h>
100
101#include <machine/bus.h>
102#include <machine/resource.h>
103#include <sys/bus.h>
104#include <sys/rman.h>
105
106#include <dev/mii/mii.h>
107#include <dev/mii/miivar.h>
108#include "miidevs.h"
109#include <dev/mii/brgphyreg.h>
110
111#ifdef __sparc64__
112#include <dev/ofw/ofw_bus.h>
113#include <dev/ofw/openfirm.h>
114#include <machine/ofw_machdep.h>
115#include <machine/ver.h>
116#endif
117
118#include <dev/pci/pcireg.h>
119#include <dev/pci/pcivar.h>
120
121#include <dev/bge/if_bgereg.h>
122
123#define BGE_CSUM_FEATURES (CSUM_IP | CSUM_TCP)
124#define ETHER_MIN_NOPAD (ETHER_MIN_LEN - ETHER_CRC_LEN) /* i.e., 60 */
125
126MODULE_DEPEND(bge, pci, 1, 1, 1);
127MODULE_DEPEND(bge, ether, 1, 1, 1);
128MODULE_DEPEND(bge, miibus, 1, 1, 1);
129
130/* "device miibus" required. See GENERIC if you get errors here. */
131#include "miibus_if.h"
132
133/*
134 * Various supported device vendors/types and their names. Note: the
135 * spec seems to indicate that the hardware still has Alteon's vendor
136 * ID burned into it, though it will always be overriden by the vendor
137 * ID in the EEPROM. Just to be safe, we cover all possibilities.
138 */
139static const struct bge_type {
140 uint16_t bge_vid;
141 uint16_t bge_did;
142} const bge_devs[] = {
143 { ALTEON_VENDORID, ALTEON_DEVICEID_BCM5700 },
144 { ALTEON_VENDORID, ALTEON_DEVICEID_BCM5701 },
145
146 { ALTIMA_VENDORID, ALTIMA_DEVICE_AC1000 },
147 { ALTIMA_VENDORID, ALTIMA_DEVICE_AC1002 },
148 { ALTIMA_VENDORID, ALTIMA_DEVICE_AC9100 },
149
150 { APPLE_VENDORID, APPLE_DEVICE_BCM5701 },
151
152 { BCOM_VENDORID, BCOM_DEVICEID_BCM5700 },
153 { BCOM_VENDORID, BCOM_DEVICEID_BCM5701 },
154 { BCOM_VENDORID, BCOM_DEVICEID_BCM5702 },
155 { BCOM_VENDORID, BCOM_DEVICEID_BCM5702_ALT },
156 { BCOM_VENDORID, BCOM_DEVICEID_BCM5702X },
157 { BCOM_VENDORID, BCOM_DEVICEID_BCM5703 },
158 { BCOM_VENDORID, BCOM_DEVICEID_BCM5703_ALT },
159 { BCOM_VENDORID, BCOM_DEVICEID_BCM5703X },
160 { BCOM_VENDORID, BCOM_DEVICEID_BCM5704C },
161 { BCOM_VENDORID, BCOM_DEVICEID_BCM5704S },
162 { BCOM_VENDORID, BCOM_DEVICEID_BCM5704S_ALT },
163 { BCOM_VENDORID, BCOM_DEVICEID_BCM5705 },
164 { BCOM_VENDORID, BCOM_DEVICEID_BCM5705F },
165 { BCOM_VENDORID, BCOM_DEVICEID_BCM5705K },
166 { BCOM_VENDORID, BCOM_DEVICEID_BCM5705M },
167 { BCOM_VENDORID, BCOM_DEVICEID_BCM5705M_ALT },
168 { BCOM_VENDORID, BCOM_DEVICEID_BCM5714C },
169 { BCOM_VENDORID, BCOM_DEVICEID_BCM5714S },
170 { BCOM_VENDORID, BCOM_DEVICEID_BCM5715 },
171 { BCOM_VENDORID, BCOM_DEVICEID_BCM5715S },
172 { BCOM_VENDORID, BCOM_DEVICEID_BCM5717 },
173 { BCOM_VENDORID, BCOM_DEVICEID_BCM5718 },
174 { BCOM_VENDORID, BCOM_DEVICEID_BCM5719 },
175 { BCOM_VENDORID, BCOM_DEVICEID_BCM5720 },
176 { BCOM_VENDORID, BCOM_DEVICEID_BCM5721 },
177 { BCOM_VENDORID, BCOM_DEVICEID_BCM5722 },
178 { BCOM_VENDORID, BCOM_DEVICEID_BCM5723 },
179 { BCOM_VENDORID, BCOM_DEVICEID_BCM5750 },
180 { BCOM_VENDORID, BCOM_DEVICEID_BCM5750M },
181 { BCOM_VENDORID, BCOM_DEVICEID_BCM5751 },
182 { BCOM_VENDORID, BCOM_DEVICEID_BCM5751F },
183 { BCOM_VENDORID, BCOM_DEVICEID_BCM5751M },
184 { BCOM_VENDORID, BCOM_DEVICEID_BCM5752 },
185 { BCOM_VENDORID, BCOM_DEVICEID_BCM5752M },
186 { BCOM_VENDORID, BCOM_DEVICEID_BCM5753 },
187 { BCOM_VENDORID, BCOM_DEVICEID_BCM5753F },
188 { BCOM_VENDORID, BCOM_DEVICEID_BCM5753M },
189 { BCOM_VENDORID, BCOM_DEVICEID_BCM5754 },
190 { BCOM_VENDORID, BCOM_DEVICEID_BCM5754M },
191 { BCOM_VENDORID, BCOM_DEVICEID_BCM5755 },
192 { BCOM_VENDORID, BCOM_DEVICEID_BCM5755M },
193 { BCOM_VENDORID, BCOM_DEVICEID_BCM5756 },
194 { BCOM_VENDORID, BCOM_DEVICEID_BCM5761 },
195 { BCOM_VENDORID, BCOM_DEVICEID_BCM5761E },
196 { BCOM_VENDORID, BCOM_DEVICEID_BCM5761S },
197 { BCOM_VENDORID, BCOM_DEVICEID_BCM5761SE },
198 { BCOM_VENDORID, BCOM_DEVICEID_BCM5764 },
199 { BCOM_VENDORID, BCOM_DEVICEID_BCM5780 },
200 { BCOM_VENDORID, BCOM_DEVICEID_BCM5780S },
201 { BCOM_VENDORID, BCOM_DEVICEID_BCM5781 },
202 { BCOM_VENDORID, BCOM_DEVICEID_BCM5782 },
203 { BCOM_VENDORID, BCOM_DEVICEID_BCM5784 },
204 { BCOM_VENDORID, BCOM_DEVICEID_BCM5785F },
205 { BCOM_VENDORID, BCOM_DEVICEID_BCM5785G },
206 { BCOM_VENDORID, BCOM_DEVICEID_BCM5786 },
207 { BCOM_VENDORID, BCOM_DEVICEID_BCM5787 },
208 { BCOM_VENDORID, BCOM_DEVICEID_BCM5787F },
209 { BCOM_VENDORID, BCOM_DEVICEID_BCM5787M },
210 { BCOM_VENDORID, BCOM_DEVICEID_BCM5788 },
211 { BCOM_VENDORID, BCOM_DEVICEID_BCM5789 },
212 { BCOM_VENDORID, BCOM_DEVICEID_BCM5901 },
213 { BCOM_VENDORID, BCOM_DEVICEID_BCM5901A2 },
214 { BCOM_VENDORID, BCOM_DEVICEID_BCM5903M },
215 { BCOM_VENDORID, BCOM_DEVICEID_BCM5906 },
216 { BCOM_VENDORID, BCOM_DEVICEID_BCM5906M },
217 { BCOM_VENDORID, BCOM_DEVICEID_BCM57760 },
218 { BCOM_VENDORID, BCOM_DEVICEID_BCM57761 },
219 { BCOM_VENDORID, BCOM_DEVICEID_BCM57765 },
220 { BCOM_VENDORID, BCOM_DEVICEID_BCM57780 },
221 { BCOM_VENDORID, BCOM_DEVICEID_BCM57781 },
222 { BCOM_VENDORID, BCOM_DEVICEID_BCM57785 },
223 { BCOM_VENDORID, BCOM_DEVICEID_BCM57788 },
224 { BCOM_VENDORID, BCOM_DEVICEID_BCM57790 },
225 { BCOM_VENDORID, BCOM_DEVICEID_BCM57791 },
226 { BCOM_VENDORID, BCOM_DEVICEID_BCM57795 },
227
228 { SK_VENDORID, SK_DEVICEID_ALTIMA },
229
230 { TC_VENDORID, TC_DEVICEID_3C996 },
231
232 { FJTSU_VENDORID, FJTSU_DEVICEID_PW008GE4 },
233 { FJTSU_VENDORID, FJTSU_DEVICEID_PW008GE5 },
234 { FJTSU_VENDORID, FJTSU_DEVICEID_PP250450 },
235
236 { 0, 0 }
237};
238
239static const struct bge_vendor {
240 uint16_t v_id;
241 const char *v_name;
242} const bge_vendors[] = {
243 { ALTEON_VENDORID, "Alteon" },
244 { ALTIMA_VENDORID, "Altima" },
245 { APPLE_VENDORID, "Apple" },
246 { BCOM_VENDORID, "Broadcom" },
247 { SK_VENDORID, "SysKonnect" },
248 { TC_VENDORID, "3Com" },
249 { FJTSU_VENDORID, "Fujitsu" },
250
251 { 0, NULL }
252};
253
254static const struct bge_revision {
255 uint32_t br_chipid;
256 const char *br_name;
257} const bge_revisions[] = {
258 { BGE_CHIPID_BCM5700_A0, "BCM5700 A0" },
259 { BGE_CHIPID_BCM5700_A1, "BCM5700 A1" },
260 { BGE_CHIPID_BCM5700_B0, "BCM5700 B0" },
261 { BGE_CHIPID_BCM5700_B1, "BCM5700 B1" },
262 { BGE_CHIPID_BCM5700_B2, "BCM5700 B2" },
263 { BGE_CHIPID_BCM5700_B3, "BCM5700 B3" },
264 { BGE_CHIPID_BCM5700_ALTIMA, "BCM5700 Altima" },
265 { BGE_CHIPID_BCM5700_C0, "BCM5700 C0" },
266 { BGE_CHIPID_BCM5701_A0, "BCM5701 A0" },
267 { BGE_CHIPID_BCM5701_B0, "BCM5701 B0" },
268 { BGE_CHIPID_BCM5701_B2, "BCM5701 B2" },
269 { BGE_CHIPID_BCM5701_B5, "BCM5701 B5" },
270 { BGE_CHIPID_BCM5703_A0, "BCM5703 A0" },
271 { BGE_CHIPID_BCM5703_A1, "BCM5703 A1" },
272 { BGE_CHIPID_BCM5703_A2, "BCM5703 A2" },
273 { BGE_CHIPID_BCM5703_A3, "BCM5703 A3" },
274 { BGE_CHIPID_BCM5703_B0, "BCM5703 B0" },
275 { BGE_CHIPID_BCM5704_A0, "BCM5704 A0" },
276 { BGE_CHIPID_BCM5704_A1, "BCM5704 A1" },
277 { BGE_CHIPID_BCM5704_A2, "BCM5704 A2" },
278 { BGE_CHIPID_BCM5704_A3, "BCM5704 A3" },
279 { BGE_CHIPID_BCM5704_B0, "BCM5704 B0" },
280 { BGE_CHIPID_BCM5705_A0, "BCM5705 A0" },
281 { BGE_CHIPID_BCM5705_A1, "BCM5705 A1" },
282 { BGE_CHIPID_BCM5705_A2, "BCM5705 A2" },
283 { BGE_CHIPID_BCM5705_A3, "BCM5705 A3" },
284 { BGE_CHIPID_BCM5750_A0, "BCM5750 A0" },
285 { BGE_CHIPID_BCM5750_A1, "BCM5750 A1" },
286 { BGE_CHIPID_BCM5750_A3, "BCM5750 A3" },
287 { BGE_CHIPID_BCM5750_B0, "BCM5750 B0" },
288 { BGE_CHIPID_BCM5750_B1, "BCM5750 B1" },
289 { BGE_CHIPID_BCM5750_C0, "BCM5750 C0" },
290 { BGE_CHIPID_BCM5750_C1, "BCM5750 C1" },
291 { BGE_CHIPID_BCM5750_C2, "BCM5750 C2" },
292 { BGE_CHIPID_BCM5714_A0, "BCM5714 A0" },
293 { BGE_CHIPID_BCM5752_A0, "BCM5752 A0" },
294 { BGE_CHIPID_BCM5752_A1, "BCM5752 A1" },
295 { BGE_CHIPID_BCM5752_A2, "BCM5752 A2" },
296 { BGE_CHIPID_BCM5714_B0, "BCM5714 B0" },
297 { BGE_CHIPID_BCM5714_B3, "BCM5714 B3" },
298 { BGE_CHIPID_BCM5715_A0, "BCM5715 A0" },
299 { BGE_CHIPID_BCM5715_A1, "BCM5715 A1" },
300 { BGE_CHIPID_BCM5715_A3, "BCM5715 A3" },
301 { BGE_CHIPID_BCM5717_A0, "BCM5717 A0" },
302 { BGE_CHIPID_BCM5717_B0, "BCM5717 B0" },
303 { BGE_CHIPID_BCM5719_A0, "BCM5719 A0" },
304 { BGE_CHIPID_BCM5755_A0, "BCM5755 A0" },
305 { BGE_CHIPID_BCM5755_A1, "BCM5755 A1" },
306 { BGE_CHIPID_BCM5755_A2, "BCM5755 A2" },
307 { BGE_CHIPID_BCM5722_A0, "BCM5722 A0" },
308 { BGE_CHIPID_BCM5761_A0, "BCM5761 A0" },
309 { BGE_CHIPID_BCM5761_A1, "BCM5761 A1" },
310 { BGE_CHIPID_BCM5784_A0, "BCM5784 A0" },
311 { BGE_CHIPID_BCM5784_A1, "BCM5784 A1" },
312 /* 5754 and 5787 share the same ASIC ID */
313 { BGE_CHIPID_BCM5787_A0, "BCM5754/5787 A0" },
314 { BGE_CHIPID_BCM5787_A1, "BCM5754/5787 A1" },
315 { BGE_CHIPID_BCM5787_A2, "BCM5754/5787 A2" },
316 { BGE_CHIPID_BCM5906_A1, "BCM5906 A1" },
317 { BGE_CHIPID_BCM5906_A2, "BCM5906 A2" },
318 { BGE_CHIPID_BCM57765_A0, "BCM57765 A0" },
319 { BGE_CHIPID_BCM57765_B0, "BCM57765 B0" },
320 { BGE_CHIPID_BCM57780_A0, "BCM57780 A0" },
321 { BGE_CHIPID_BCM57780_A1, "BCM57780 A1" },
322
323 { 0, NULL }
324};
325
326/*
327 * Some defaults for major revisions, so that newer steppings
328 * that we don't know about have a shot at working.
329 */
330static const struct bge_revision const bge_majorrevs[] = {
331 { BGE_ASICREV_BCM5700, "unknown BCM5700" },
332 { BGE_ASICREV_BCM5701, "unknown BCM5701" },
333 { BGE_ASICREV_BCM5703, "unknown BCM5703" },
334 { BGE_ASICREV_BCM5704, "unknown BCM5704" },
335 { BGE_ASICREV_BCM5705, "unknown BCM5705" },
336 { BGE_ASICREV_BCM5750, "unknown BCM5750" },
337 { BGE_ASICREV_BCM5714_A0, "unknown BCM5714" },
338 { BGE_ASICREV_BCM5752, "unknown BCM5752" },
339 { BGE_ASICREV_BCM5780, "unknown BCM5780" },
340 { BGE_ASICREV_BCM5714, "unknown BCM5714" },
341 { BGE_ASICREV_BCM5755, "unknown BCM5755" },
342 { BGE_ASICREV_BCM5761, "unknown BCM5761" },
343 { BGE_ASICREV_BCM5784, "unknown BCM5784" },
344 { BGE_ASICREV_BCM5785, "unknown BCM5785" },
345 /* 5754 and 5787 share the same ASIC ID */
346 { BGE_ASICREV_BCM5787, "unknown BCM5754/5787" },
347 { BGE_ASICREV_BCM5906, "unknown BCM5906" },
348 { BGE_ASICREV_BCM57765, "unknown BCM57765" },
349 { BGE_ASICREV_BCM57780, "unknown BCM57780" },
350 { BGE_ASICREV_BCM5717, "unknown BCM5717" },
351 { BGE_ASICREV_BCM5719, "unknown BCM5719" },
352
353 { 0, NULL }
354};
355
356#define BGE_IS_JUMBO_CAPABLE(sc) ((sc)->bge_flags & BGE_FLAG_JUMBO)
357#define BGE_IS_5700_FAMILY(sc) ((sc)->bge_flags & BGE_FLAG_5700_FAMILY)
358#define BGE_IS_5705_PLUS(sc) ((sc)->bge_flags & BGE_FLAG_5705_PLUS)
359#define BGE_IS_5714_FAMILY(sc) ((sc)->bge_flags & BGE_FLAG_5714_FAMILY)
360#define BGE_IS_575X_PLUS(sc) ((sc)->bge_flags & BGE_FLAG_575X_PLUS)
361#define BGE_IS_5755_PLUS(sc) ((sc)->bge_flags & BGE_FLAG_5755_PLUS)
362#define BGE_IS_5717_PLUS(sc) ((sc)->bge_flags & BGE_FLAG_5717_PLUS)
363
364const struct bge_revision * bge_lookup_rev(uint32_t);
365const struct bge_vendor * bge_lookup_vendor(uint16_t);
366
367typedef int (*bge_eaddr_fcn_t)(struct bge_softc *, uint8_t[]);
368
369static int bge_probe(device_t);
370static int bge_attach(device_t);
371static int bge_detach(device_t);
372static int bge_suspend(device_t);
373static int bge_resume(device_t);
374static void bge_release_resources(struct bge_softc *);
375static void bge_dma_map_addr(void *, bus_dma_segment_t *, int, int);
376static int bge_dma_alloc(struct bge_softc *);
377static void bge_dma_free(struct bge_softc *);
378static int bge_dma_ring_alloc(struct bge_softc *, bus_size_t, bus_size_t,
379 bus_dma_tag_t *, uint8_t **, bus_dmamap_t *, bus_addr_t *, const char *);
380
381static int bge_get_eaddr_fw(struct bge_softc *sc, uint8_t ether_addr[]);
382static int bge_get_eaddr_mem(struct bge_softc *, uint8_t[]);
383static int bge_get_eaddr_nvram(struct bge_softc *, uint8_t[]);
384static int bge_get_eaddr_eeprom(struct bge_softc *, uint8_t[]);
385static int bge_get_eaddr(struct bge_softc *, uint8_t[]);
386
387static void bge_txeof(struct bge_softc *, uint16_t);
388static void bge_rxcsum(struct bge_softc *, struct bge_rx_bd *, struct mbuf *);
389static int bge_rxeof(struct bge_softc *, uint16_t, int);
390
391static void bge_asf_driver_up (struct bge_softc *);
392static void bge_tick(void *);
393static void bge_stats_clear_regs(struct bge_softc *);
394static void bge_stats_update(struct bge_softc *);
395static void bge_stats_update_regs(struct bge_softc *);
396static struct mbuf *bge_check_short_dma(struct mbuf *);
397static struct mbuf *bge_setup_tso(struct bge_softc *, struct mbuf *,
398 uint16_t *, uint16_t *);
399static int bge_encap(struct bge_softc *, struct mbuf **, uint32_t *);
400
401static void bge_intr(void *);
402static int bge_msi_intr(void *);
403static void bge_intr_task(void *, int);
404static void bge_start_locked(struct ifnet *);
405static void bge_start(struct ifnet *);
406static int bge_ioctl(struct ifnet *, u_long, caddr_t);
407static void bge_init_locked(struct bge_softc *);
408static void bge_init(void *);
409static void bge_stop_block(struct bge_softc *, bus_size_t, uint32_t);
410static void bge_stop(struct bge_softc *);
411static void bge_watchdog(struct bge_softc *);
412static int bge_shutdown(device_t);
413static int bge_ifmedia_upd_locked(struct ifnet *);
414static int bge_ifmedia_upd(struct ifnet *);
415static void bge_ifmedia_sts(struct ifnet *, struct ifmediareq *);
416
417static uint8_t bge_nvram_getbyte(struct bge_softc *, int, uint8_t *);
418static int bge_read_nvram(struct bge_softc *, caddr_t, int, int);
419
420static uint8_t bge_eeprom_getbyte(struct bge_softc *, int, uint8_t *);
421static int bge_read_eeprom(struct bge_softc *, caddr_t, int, int);
422
423static void bge_setpromisc(struct bge_softc *);
424static void bge_setmulti(struct bge_softc *);
425static void bge_setvlan(struct bge_softc *);
426
427static __inline void bge_rxreuse_std(struct bge_softc *, int);
428static __inline void bge_rxreuse_jumbo(struct bge_softc *, int);
429static int bge_newbuf_std(struct bge_softc *, int);
430static int bge_newbuf_jumbo(struct bge_softc *, int);
431static int bge_init_rx_ring_std(struct bge_softc *);
432static void bge_free_rx_ring_std(struct bge_softc *);
433static int bge_init_rx_ring_jumbo(struct bge_softc *);
434static void bge_free_rx_ring_jumbo(struct bge_softc *);
435static void bge_free_tx_ring(struct bge_softc *);
436static int bge_init_tx_ring(struct bge_softc *);
437
438static int bge_chipinit(struct bge_softc *);
439static int bge_blockinit(struct bge_softc *);
440
441static int bge_has_eaddr(struct bge_softc *);
442static uint32_t bge_readmem_ind(struct bge_softc *, int);
443static void bge_writemem_ind(struct bge_softc *, int, int);
444static void bge_writembx(struct bge_softc *, int, int);
445#ifdef notdef
446static uint32_t bge_readreg_ind(struct bge_softc *, int);
447#endif
448static void bge_writemem_direct(struct bge_softc *, int, int);
449static void bge_writereg_ind(struct bge_softc *, int, int);
450
451static int bge_miibus_readreg(device_t, int, int);
452static int bge_miibus_writereg(device_t, int, int, int);
453static void bge_miibus_statchg(device_t);
454#ifdef DEVICE_POLLING
455static int bge_poll(struct ifnet *ifp, enum poll_cmd cmd, int count);
456#endif
457
458#define BGE_RESET_START 1
459#define BGE_RESET_STOP 2
460static void bge_sig_post_reset(struct bge_softc *, int);
461static void bge_sig_legacy(struct bge_softc *, int);
462static void bge_sig_pre_reset(struct bge_softc *, int);
463static void bge_stop_fw(struct bge_softc *);
464static int bge_reset(struct bge_softc *);
465static void bge_link_upd(struct bge_softc *);
466
467/*
468 * The BGE_REGISTER_DEBUG option is only for low-level debugging. It may
469 * leak information to untrusted users. It is also known to cause alignment
470 * traps on certain architectures.
471 */
472#ifdef BGE_REGISTER_DEBUG
473static int bge_sysctl_debug_info(SYSCTL_HANDLER_ARGS);
474static int bge_sysctl_reg_read(SYSCTL_HANDLER_ARGS);
475static int bge_sysctl_mem_read(SYSCTL_HANDLER_ARGS);
476#endif
477static void bge_add_sysctls(struct bge_softc *);
478static void bge_add_sysctl_stats_regs(struct bge_softc *,
479 struct sysctl_ctx_list *, struct sysctl_oid_list *);
480static void bge_add_sysctl_stats(struct bge_softc *, struct sysctl_ctx_list *,
481 struct sysctl_oid_list *);
482static int bge_sysctl_stats(SYSCTL_HANDLER_ARGS);
483
484static device_method_t bge_methods[] = {
485 /* Device interface */
486 DEVMETHOD(device_probe, bge_probe),
487 DEVMETHOD(device_attach, bge_attach),
488 DEVMETHOD(device_detach, bge_detach),
489 DEVMETHOD(device_shutdown, bge_shutdown),
490 DEVMETHOD(device_suspend, bge_suspend),
491 DEVMETHOD(device_resume, bge_resume),
492
493 /* bus interface */
494 DEVMETHOD(bus_print_child, bus_generic_print_child),
495 DEVMETHOD(bus_driver_added, bus_generic_driver_added),
496
497 /* MII interface */
498 DEVMETHOD(miibus_readreg, bge_miibus_readreg),
499 DEVMETHOD(miibus_writereg, bge_miibus_writereg),
500 DEVMETHOD(miibus_statchg, bge_miibus_statchg),
501
502 { 0, 0 }
503};
504
505static driver_t bge_driver = {
506 "bge",
507 bge_methods,
508 sizeof(struct bge_softc)
509};
510
511static devclass_t bge_devclass;
512
513DRIVER_MODULE(bge, pci, bge_driver, bge_devclass, 0, 0);
514DRIVER_MODULE(miibus, bge, miibus_driver, miibus_devclass, 0, 0);
515
516static int bge_allow_asf = 1;
517
518TUNABLE_INT("hw.bge.allow_asf", &bge_allow_asf);
519
520SYSCTL_NODE(_hw, OID_AUTO, bge, CTLFLAG_RD, 0, "BGE driver parameters");
521SYSCTL_INT(_hw_bge, OID_AUTO, allow_asf, CTLFLAG_RD, &bge_allow_asf, 0,
522 "Allow ASF mode if available");
523
524#define SPARC64_BLADE_1500_MODEL "SUNW,Sun-Blade-1500"
525#define SPARC64_BLADE_1500_PATH_BGE "/pci@1f,700000/network@2"
526#define SPARC64_BLADE_2500_MODEL "SUNW,Sun-Blade-2500"
527#define SPARC64_BLADE_2500_PATH_BGE "/pci@1c,600000/network@3"
528#define SPARC64_OFW_SUBVENDOR "subsystem-vendor-id"
529
530static int
531bge_has_eaddr(struct bge_softc *sc)
532{
533#ifdef __sparc64__
534 char buf[sizeof(SPARC64_BLADE_1500_PATH_BGE)];
535 device_t dev;
536 uint32_t subvendor;
537
538 dev = sc->bge_dev;
539
540 /*
541 * The on-board BGEs found in sun4u machines aren't fitted with
542 * an EEPROM which means that we have to obtain the MAC address
543 * via OFW and that some tests will always fail. We distinguish
544 * such BGEs by the subvendor ID, which also has to be obtained
545 * from OFW instead of the PCI configuration space as the latter
546 * indicates Broadcom as the subvendor of the netboot interface.
547 * For early Blade 1500 and 2500 we even have to check the OFW
548 * device path as the subvendor ID always defaults to Broadcom
549 * there.
550 */
551 if (OF_getprop(ofw_bus_get_node(dev), SPARC64_OFW_SUBVENDOR,
552 &subvendor, sizeof(subvendor)) == sizeof(subvendor) &&
553 (subvendor == FJTSU_VENDORID || subvendor == SUN_VENDORID))
554 return (0);
555 memset(buf, 0, sizeof(buf));
556 if (OF_package_to_path(ofw_bus_get_node(dev), buf, sizeof(buf)) > 0) {
557 if (strcmp(sparc64_model, SPARC64_BLADE_1500_MODEL) == 0 &&
558 strcmp(buf, SPARC64_BLADE_1500_PATH_BGE) == 0)
559 return (0);
560 if (strcmp(sparc64_model, SPARC64_BLADE_2500_MODEL) == 0 &&
561 strcmp(buf, SPARC64_BLADE_2500_PATH_BGE) == 0)
562 return (0);
563 }
564#endif
565 return (1);
566}
567
568static uint32_t
569bge_readmem_ind(struct bge_softc *sc, int off)
570{
571 device_t dev;
572 uint32_t val;
573
574 if (sc->bge_asicrev == BGE_ASICREV_BCM5906 &&
575 off >= BGE_STATS_BLOCK && off < BGE_SEND_RING_1_TO_4)
576 return (0);
577
578 dev = sc->bge_dev;
579
580 pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, off, 4);
581 val = pci_read_config(dev, BGE_PCI_MEMWIN_DATA, 4);
582 pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, 0, 4);
583 return (val);
584}
585
586static void
587bge_writemem_ind(struct bge_softc *sc, int off, int val)
588{
589 device_t dev;
590
591 if (sc->bge_asicrev == BGE_ASICREV_BCM5906 &&
592 off >= BGE_STATS_BLOCK && off < BGE_SEND_RING_1_TO_4)
593 return;
594
595 dev = sc->bge_dev;
596
597 pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, off, 4);
598 pci_write_config(dev, BGE_PCI_MEMWIN_DATA, val, 4);
599 pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, 0, 4);
600}
601
602#ifdef notdef
603static uint32_t
604bge_readreg_ind(struct bge_softc *sc, int off)
605{
606 device_t dev;
607
608 dev = sc->bge_dev;
609
610 pci_write_config(dev, BGE_PCI_REG_BASEADDR, off, 4);
611 return (pci_read_config(dev, BGE_PCI_REG_DATA, 4));
612}
613#endif
614
615static void
616bge_writereg_ind(struct bge_softc *sc, int off, int val)
617{
618 device_t dev;
619
620 dev = sc->bge_dev;
621
622 pci_write_config(dev, BGE_PCI_REG_BASEADDR, off, 4);
623 pci_write_config(dev, BGE_PCI_REG_DATA, val, 4);
624}
625
626static void
627bge_writemem_direct(struct bge_softc *sc, int off, int val)
628{
629 CSR_WRITE_4(sc, off, val);
630}
631
632static void
633bge_writembx(struct bge_softc *sc, int off, int val)
634{
635 if (sc->bge_asicrev == BGE_ASICREV_BCM5906)
636 off += BGE_LPMBX_IRQ0_HI - BGE_MBX_IRQ0_HI;
637
638 CSR_WRITE_4(sc, off, val);
639}
640
641/*
642 * Map a single buffer address.
643 */
644
645static void
646bge_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error)
647{
648 struct bge_dmamap_arg *ctx;
649
650 if (error)
651 return;
652
653 KASSERT(nseg == 1, ("%s: %d segments returned!", __func__, nseg));
654
655 ctx = arg;
656 ctx->bge_busaddr = segs->ds_addr;
657}
658
659static uint8_t
660bge_nvram_getbyte(struct bge_softc *sc, int addr, uint8_t *dest)
661{
662 uint32_t access, byte = 0;
663 int i;
664
665 /* Lock. */
666 CSR_WRITE_4(sc, BGE_NVRAM_SWARB, BGE_NVRAMSWARB_SET1);
667 for (i = 0; i < 8000; i++) {
668 if (CSR_READ_4(sc, BGE_NVRAM_SWARB) & BGE_NVRAMSWARB_GNT1)
669 break;
670 DELAY(20);
671 }
672 if (i == 8000)
673 return (1);
674
675 /* Enable access. */
676 access = CSR_READ_4(sc, BGE_NVRAM_ACCESS);
677 CSR_WRITE_4(sc, BGE_NVRAM_ACCESS, access | BGE_NVRAMACC_ENABLE);
678
679 CSR_WRITE_4(sc, BGE_NVRAM_ADDR, addr & 0xfffffffc);
680 CSR_WRITE_4(sc, BGE_NVRAM_CMD, BGE_NVRAM_READCMD);
681 for (i = 0; i < BGE_TIMEOUT * 10; i++) {
682 DELAY(10);
683 if (CSR_READ_4(sc, BGE_NVRAM_CMD) & BGE_NVRAMCMD_DONE) {
684 DELAY(10);
685 break;
686 }
687 }
688
689 if (i == BGE_TIMEOUT * 10) {
690 if_printf(sc->bge_ifp, "nvram read timed out\n");
691 return (1);
692 }
693
694 /* Get result. */
695 byte = CSR_READ_4(sc, BGE_NVRAM_RDDATA);
696
697 *dest = (bswap32(byte) >> ((addr % 4) * 8)) & 0xFF;
698
699 /* Disable access. */
700 CSR_WRITE_4(sc, BGE_NVRAM_ACCESS, access);
701
702 /* Unlock. */
703 CSR_WRITE_4(sc, BGE_NVRAM_SWARB, BGE_NVRAMSWARB_CLR1);
704 CSR_READ_4(sc, BGE_NVRAM_SWARB);
705
706 return (0);
707}
708
709/*
710 * Read a sequence of bytes from NVRAM.
711 */
712static int
713bge_read_nvram(struct bge_softc *sc, caddr_t dest, int off, int cnt)
714{
715 int err = 0, i;
716 uint8_t byte = 0;
717
718 if (sc->bge_asicrev != BGE_ASICREV_BCM5906)
719 return (1);
720
721 for (i = 0; i < cnt; i++) {
722 err = bge_nvram_getbyte(sc, off + i, &byte);
723 if (err)
724 break;
725 *(dest + i) = byte;
726 }
727
728 return (err ? 1 : 0);
729}
730
731/*
732 * Read a byte of data stored in the EEPROM at address 'addr.' The
733 * BCM570x supports both the traditional bitbang interface and an
734 * auto access interface for reading the EEPROM. We use the auto
735 * access method.
736 */
737static uint8_t
738bge_eeprom_getbyte(struct bge_softc *sc, int addr, uint8_t *dest)
739{
740 int i;
741 uint32_t byte = 0;
742
743 /*
744 * Enable use of auto EEPROM access so we can avoid
745 * having to use the bitbang method.
746 */
747 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_AUTO_EEPROM);
748
749 /* Reset the EEPROM, load the clock period. */
750 CSR_WRITE_4(sc, BGE_EE_ADDR,
751 BGE_EEADDR_RESET | BGE_EEHALFCLK(BGE_HALFCLK_384SCL));
752 DELAY(20);
753
754 /* Issue the read EEPROM command. */
755 CSR_WRITE_4(sc, BGE_EE_ADDR, BGE_EE_READCMD | addr);
756
757 /* Wait for completion */
758 for(i = 0; i < BGE_TIMEOUT * 10; i++) {
759 DELAY(10);
760 if (CSR_READ_4(sc, BGE_EE_ADDR) & BGE_EEADDR_DONE)
761 break;
762 }
763
764 if (i == BGE_TIMEOUT * 10) {
765 device_printf(sc->bge_dev, "EEPROM read timed out\n");
766 return (1);
767 }
768
769 /* Get result. */
770 byte = CSR_READ_4(sc, BGE_EE_DATA);
771
772 *dest = (byte >> ((addr % 4) * 8)) & 0xFF;
773
774 return (0);
775}
776
777/*
778 * Read a sequence of bytes from the EEPROM.
779 */
780static int
781bge_read_eeprom(struct bge_softc *sc, caddr_t dest, int off, int cnt)
782{
783 int i, error = 0;
784 uint8_t byte = 0;
785
786 for (i = 0; i < cnt; i++) {
787 error = bge_eeprom_getbyte(sc, off + i, &byte);
788 if (error)
789 break;
790 *(dest + i) = byte;
791 }
792
793 return (error ? 1 : 0);
794}
795
796static int
797bge_miibus_readreg(device_t dev, int phy, int reg)
798{
799 struct bge_softc *sc;
800 uint32_t val;
801 int i;
802
803 sc = device_get_softc(dev);
804
805 /* Clear the autopoll bit if set, otherwise may trigger PCI errors. */
806 if ((sc->bge_mi_mode & BGE_MIMODE_AUTOPOLL) != 0) {
807 CSR_WRITE_4(sc, BGE_MI_MODE,
808 sc->bge_mi_mode & ~BGE_MIMODE_AUTOPOLL);
809 DELAY(80);
810 }
811
812 CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_READ | BGE_MICOMM_BUSY |
813 BGE_MIPHY(phy) | BGE_MIREG(reg));
814
815 /* Poll for the PHY register access to complete. */
816 for (i = 0; i < BGE_TIMEOUT; i++) {
817 DELAY(10);
818 val = CSR_READ_4(sc, BGE_MI_COMM);
819 if ((val & BGE_MICOMM_BUSY) == 0) {
820 DELAY(5);
821 val = CSR_READ_4(sc, BGE_MI_COMM);
822 break;
823 }
824 }
825
826 if (i == BGE_TIMEOUT) {
827 device_printf(sc->bge_dev,
828 "PHY read timed out (phy %d, reg %d, val 0x%08x)\n",
829 phy, reg, val);
830 val = 0;
831 }
832
833 /* Restore the autopoll bit if necessary. */
834 if ((sc->bge_mi_mode & BGE_MIMODE_AUTOPOLL) != 0) {
835 CSR_WRITE_4(sc, BGE_MI_MODE, sc->bge_mi_mode);
836 DELAY(80);
837 }
838
839 if (val & BGE_MICOMM_READFAIL)
840 return (0);
841
842 return (val & 0xFFFF);
843}
844
845static int
846bge_miibus_writereg(device_t dev, int phy, int reg, int val)
847{
848 struct bge_softc *sc;
849 int i;
850
851 sc = device_get_softc(dev);
852
853 if (sc->bge_asicrev == BGE_ASICREV_BCM5906 &&
854 (reg == BRGPHY_MII_1000CTL || reg == BRGPHY_MII_AUXCTL))
855 return (0);
856
857 /* Clear the autopoll bit if set, otherwise may trigger PCI errors. */
858 if ((sc->bge_mi_mode & BGE_MIMODE_AUTOPOLL) != 0) {
859 CSR_WRITE_4(sc, BGE_MI_MODE,
860 sc->bge_mi_mode & ~BGE_MIMODE_AUTOPOLL);
861 DELAY(80);
862 }
863
864 CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_WRITE | BGE_MICOMM_BUSY |
865 BGE_MIPHY(phy) | BGE_MIREG(reg) | val);
866
867 for (i = 0; i < BGE_TIMEOUT; i++) {
868 DELAY(10);
869 if (!(CSR_READ_4(sc, BGE_MI_COMM) & BGE_MICOMM_BUSY)) {
870 DELAY(5);
871 CSR_READ_4(sc, BGE_MI_COMM); /* dummy read */
872 break;
873 }
874 }
875
876 /* Restore the autopoll bit if necessary. */
877 if ((sc->bge_mi_mode & BGE_MIMODE_AUTOPOLL) != 0) {
878 CSR_WRITE_4(sc, BGE_MI_MODE, sc->bge_mi_mode);
879 DELAY(80);
880 }
881
882 if (i == BGE_TIMEOUT)
883 device_printf(sc->bge_dev,
884 "PHY write timed out (phy %d, reg %d, val %d)\n",
885 phy, reg, val);
886
887 return (0);
888}
889
890static void
891bge_miibus_statchg(device_t dev)
892{
893 struct bge_softc *sc;
894 struct mii_data *mii;
895 sc = device_get_softc(dev);
896 mii = device_get_softc(sc->bge_miibus);
897
898 if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) ==
899 (IFM_ACTIVE | IFM_AVALID)) {
900 switch (IFM_SUBTYPE(mii->mii_media_active)) {
901 case IFM_10_T:
902 case IFM_100_TX:
903 sc->bge_link = 1;
904 break;
905 case IFM_1000_T:
906 case IFM_1000_SX:
907 case IFM_2500_SX:
908 if (sc->bge_asicrev != BGE_ASICREV_BCM5906)
909 sc->bge_link = 1;
910 else
911 sc->bge_link = 0;
912 break;
913 default:
914 sc->bge_link = 0;
915 break;
916 }
917 } else
918 sc->bge_link = 0;
919 if (sc->bge_link == 0)
920 return;
921 BGE_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_PORTMODE);
922 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T ||
923 IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_SX)
924 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_PORTMODE_GMII);
925 else
926 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_PORTMODE_MII);
927
928 if (IFM_OPTIONS(mii->mii_media_active & IFM_FDX) != 0) {
929 BGE_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX);
930 if ((IFM_OPTIONS(mii->mii_media_active) &
931 IFM_ETH_TXPAUSE) != 0)
932 BGE_SETBIT(sc, BGE_TX_MODE, BGE_TXMODE_FLOWCTL_ENABLE);
933 else
934 BGE_CLRBIT(sc, BGE_TX_MODE, BGE_TXMODE_FLOWCTL_ENABLE);
935 if ((IFM_OPTIONS(mii->mii_media_active) &
936 IFM_ETH_RXPAUSE) != 0)
937 BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_FLOWCTL_ENABLE);
938 else
939 BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_FLOWCTL_ENABLE);
940 } else {
941 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX);
942 BGE_CLRBIT(sc, BGE_TX_MODE, BGE_TXMODE_FLOWCTL_ENABLE);
943 BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_FLOWCTL_ENABLE);
944 }
945}
946
947/*
948 * Intialize a standard receive ring descriptor.
949 */
950static int
951bge_newbuf_std(struct bge_softc *sc, int i)
952{
953 struct mbuf *m;
954 struct bge_rx_bd *r;
955 bus_dma_segment_t segs[1];
956 bus_dmamap_t map;
957 int error, nsegs;
958
959 if (sc->bge_flags & BGE_FLAG_JUMBO_STD &&
960 (sc->bge_ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN +
961 ETHER_VLAN_ENCAP_LEN > (MCLBYTES - ETHER_ALIGN))) {
962 m = m_getjcl(M_DONTWAIT, MT_DATA, M_PKTHDR, MJUM9BYTES);
963 if (m == NULL)
964 return (ENOBUFS);
965 m->m_len = m->m_pkthdr.len = MJUM9BYTES;
966 } else {
967 m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
968 if (m == NULL)
969 return (ENOBUFS);
970 m->m_len = m->m_pkthdr.len = MCLBYTES;
971 }
972 if ((sc->bge_flags & BGE_FLAG_RX_ALIGNBUG) == 0)
973 m_adj(m, ETHER_ALIGN);
974
975 error = bus_dmamap_load_mbuf_sg(sc->bge_cdata.bge_rx_mtag,
976 sc->bge_cdata.bge_rx_std_sparemap, m, segs, &nsegs, 0);
977 if (error != 0) {
978 m_freem(m);
979 return (error);
980 }
981 if (sc->bge_cdata.bge_rx_std_chain[i] != NULL) {
982 bus_dmamap_sync(sc->bge_cdata.bge_rx_mtag,
983 sc->bge_cdata.bge_rx_std_dmamap[i], BUS_DMASYNC_POSTREAD);
984 bus_dmamap_unload(sc->bge_cdata.bge_rx_mtag,
985 sc->bge_cdata.bge_rx_std_dmamap[i]);
986 }
987 map = sc->bge_cdata.bge_rx_std_dmamap[i];
988 sc->bge_cdata.bge_rx_std_dmamap[i] = sc->bge_cdata.bge_rx_std_sparemap;
989 sc->bge_cdata.bge_rx_std_sparemap = map;
990 sc->bge_cdata.bge_rx_std_chain[i] = m;
991 sc->bge_cdata.bge_rx_std_seglen[i] = segs[0].ds_len;
992 r = &sc->bge_ldata.bge_rx_std_ring[sc->bge_std];
993 r->bge_addr.bge_addr_lo = BGE_ADDR_LO(segs[0].ds_addr);
994 r->bge_addr.bge_addr_hi = BGE_ADDR_HI(segs[0].ds_addr);
995 r->bge_flags = BGE_RXBDFLAG_END;
996 r->bge_len = segs[0].ds_len;
997 r->bge_idx = i;
998
999 bus_dmamap_sync(sc->bge_cdata.bge_rx_mtag,
1000 sc->bge_cdata.bge_rx_std_dmamap[i], BUS_DMASYNC_PREREAD);
1001
1002 return (0);
1003}
1004
1005/*
1006 * Initialize a jumbo receive ring descriptor. This allocates
1007 * a jumbo buffer from the pool managed internally by the driver.
1008 */
1009static int
1010bge_newbuf_jumbo(struct bge_softc *sc, int i)
1011{
1012 bus_dma_segment_t segs[BGE_NSEG_JUMBO];
1013 bus_dmamap_t map;
1014 struct bge_extrx_bd *r;
1015 struct mbuf *m;
1016 int error, nsegs;
1017
1018 MGETHDR(m, M_DONTWAIT, MT_DATA);
1019 if (m == NULL)
1020 return (ENOBUFS);
1021
1022 m_cljget(m, M_DONTWAIT, MJUM9BYTES);
1023 if (!(m->m_flags & M_EXT)) {
1024 m_freem(m);
1025 return (ENOBUFS);
1026 }
1027 m->m_len = m->m_pkthdr.len = MJUM9BYTES;
1028 if ((sc->bge_flags & BGE_FLAG_RX_ALIGNBUG) == 0)
1029 m_adj(m, ETHER_ALIGN);
1030
1031 error = bus_dmamap_load_mbuf_sg(sc->bge_cdata.bge_mtag_jumbo,
1032 sc->bge_cdata.bge_rx_jumbo_sparemap, m, segs, &nsegs, 0);
1033 if (error != 0) {
1034 m_freem(m);
1035 return (error);
1036 }
1037
1038 if (sc->bge_cdata.bge_rx_jumbo_chain[i] != NULL) {
1039 bus_dmamap_sync(sc->bge_cdata.bge_mtag_jumbo,
1040 sc->bge_cdata.bge_rx_jumbo_dmamap[i], BUS_DMASYNC_POSTREAD);
1041 bus_dmamap_unload(sc->bge_cdata.bge_mtag_jumbo,
1042 sc->bge_cdata.bge_rx_jumbo_dmamap[i]);
1043 }
1044 map = sc->bge_cdata.bge_rx_jumbo_dmamap[i];
1045 sc->bge_cdata.bge_rx_jumbo_dmamap[i] =
1046 sc->bge_cdata.bge_rx_jumbo_sparemap;
1047 sc->bge_cdata.bge_rx_jumbo_sparemap = map;
1048 sc->bge_cdata.bge_rx_jumbo_chain[i] = m;
1049 sc->bge_cdata.bge_rx_jumbo_seglen[i][0] = 0;
1050 sc->bge_cdata.bge_rx_jumbo_seglen[i][1] = 0;
1051 sc->bge_cdata.bge_rx_jumbo_seglen[i][2] = 0;
1052 sc->bge_cdata.bge_rx_jumbo_seglen[i][3] = 0;
1053
1054 /*
1055 * Fill in the extended RX buffer descriptor.
1056 */
1057 r = &sc->bge_ldata.bge_rx_jumbo_ring[sc->bge_jumbo];
1058 r->bge_flags = BGE_RXBDFLAG_JUMBO_RING | BGE_RXBDFLAG_END;
1059 r->bge_idx = i;
1060 r->bge_len3 = r->bge_len2 = r->bge_len1 = 0;
1061 switch (nsegs) {
1062 case 4:
1063 r->bge_addr3.bge_addr_lo = BGE_ADDR_LO(segs[3].ds_addr);
1064 r->bge_addr3.bge_addr_hi = BGE_ADDR_HI(segs[3].ds_addr);
1065 r->bge_len3 = segs[3].ds_len;
1066 sc->bge_cdata.bge_rx_jumbo_seglen[i][3] = segs[3].ds_len;
1067 case 3:
1068 r->bge_addr2.bge_addr_lo = BGE_ADDR_LO(segs[2].ds_addr);
1069 r->bge_addr2.bge_addr_hi = BGE_ADDR_HI(segs[2].ds_addr);
1070 r->bge_len2 = segs[2].ds_len;
1071 sc->bge_cdata.bge_rx_jumbo_seglen[i][2] = segs[2].ds_len;
1072 case 2:
1073 r->bge_addr1.bge_addr_lo = BGE_ADDR_LO(segs[1].ds_addr);
1074 r->bge_addr1.bge_addr_hi = BGE_ADDR_HI(segs[1].ds_addr);
1075 r->bge_len1 = segs[1].ds_len;
1076 sc->bge_cdata.bge_rx_jumbo_seglen[i][1] = segs[1].ds_len;
1077 case 1:
1078 r->bge_addr0.bge_addr_lo = BGE_ADDR_LO(segs[0].ds_addr);
1079 r->bge_addr0.bge_addr_hi = BGE_ADDR_HI(segs[0].ds_addr);
1080 r->bge_len0 = segs[0].ds_len;
1081 sc->bge_cdata.bge_rx_jumbo_seglen[i][0] = segs[0].ds_len;
1082 break;
1083 default:
1084 panic("%s: %d segments\n", __func__, nsegs);
1085 }
1086
1087 bus_dmamap_sync(sc->bge_cdata.bge_mtag_jumbo,
1088 sc->bge_cdata.bge_rx_jumbo_dmamap[i], BUS_DMASYNC_PREREAD);
1089
1090 return (0);
1091}
1092
1093static int
1094bge_init_rx_ring_std(struct bge_softc *sc)
1095{
1096 int error, i;
1097
1098 bzero(sc->bge_ldata.bge_rx_std_ring, BGE_STD_RX_RING_SZ);
1099 sc->bge_std = 0;
1100 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
1101 if ((error = bge_newbuf_std(sc, i)) != 0)
1102 return (error);
1103 BGE_INC(sc->bge_std, BGE_STD_RX_RING_CNT);
1104 }
1105
1106 bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag,
1107 sc->bge_cdata.bge_rx_std_ring_map, BUS_DMASYNC_PREWRITE);
1108
1109 sc->bge_std = 0;
1110 bge_writembx(sc, BGE_MBX_RX_STD_PROD_LO, BGE_STD_RX_RING_CNT - 1);
1111
1112 return (0);
1113}
1114
1115static void
1116bge_free_rx_ring_std(struct bge_softc *sc)
1117{
1118 int i;
1119
1120 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
1121 if (sc->bge_cdata.bge_rx_std_chain[i] != NULL) {
1122 bus_dmamap_sync(sc->bge_cdata.bge_rx_mtag,
1123 sc->bge_cdata.bge_rx_std_dmamap[i],
1124 BUS_DMASYNC_POSTREAD);
1125 bus_dmamap_unload(sc->bge_cdata.bge_rx_mtag,
1126 sc->bge_cdata.bge_rx_std_dmamap[i]);
1127 m_freem(sc->bge_cdata.bge_rx_std_chain[i]);
1128 sc->bge_cdata.bge_rx_std_chain[i] = NULL;
1129 }
1130 bzero((char *)&sc->bge_ldata.bge_rx_std_ring[i],
1131 sizeof(struct bge_rx_bd));
1132 }
1133}
1134
1135static int
1136bge_init_rx_ring_jumbo(struct bge_softc *sc)
1137{
1138 struct bge_rcb *rcb;
1139 int error, i;
1140
1141 bzero(sc->bge_ldata.bge_rx_jumbo_ring, BGE_JUMBO_RX_RING_SZ);
1142 sc->bge_jumbo = 0;
1143 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
1144 if ((error = bge_newbuf_jumbo(sc, i)) != 0)
1145 return (error);
1146 BGE_INC(sc->bge_jumbo, BGE_JUMBO_RX_RING_CNT);
1147 }
1148
1149 bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag,
1150 sc->bge_cdata.bge_rx_jumbo_ring_map, BUS_DMASYNC_PREWRITE);
1151
1152 sc->bge_jumbo = 0;
1153
1154 /* Enable the jumbo receive producer ring. */
1155 rcb = &sc->bge_ldata.bge_info.bge_jumbo_rx_rcb;
1156 rcb->bge_maxlen_flags =
1157 BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_USE_EXT_RX_BD);
1158 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags);
1159
1160 bge_writembx(sc, BGE_MBX_RX_JUMBO_PROD_LO, BGE_JUMBO_RX_RING_CNT - 1);
1161
1162 return (0);
1163}
1164
1165static void
1166bge_free_rx_ring_jumbo(struct bge_softc *sc)
1167{
1168 int i;
1169
1170 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
1171 if (sc->bge_cdata.bge_rx_jumbo_chain[i] != NULL) {
1172 bus_dmamap_sync(sc->bge_cdata.bge_mtag_jumbo,
1173 sc->bge_cdata.bge_rx_jumbo_dmamap[i],
1174 BUS_DMASYNC_POSTREAD);
1175 bus_dmamap_unload(sc->bge_cdata.bge_mtag_jumbo,
1176 sc->bge_cdata.bge_rx_jumbo_dmamap[i]);
1177 m_freem(sc->bge_cdata.bge_rx_jumbo_chain[i]);
1178 sc->bge_cdata.bge_rx_jumbo_chain[i] = NULL;
1179 }
1180 bzero((char *)&sc->bge_ldata.bge_rx_jumbo_ring[i],
1181 sizeof(struct bge_extrx_bd));
1182 }
1183}
1184
1185static void
1186bge_free_tx_ring(struct bge_softc *sc)
1187{
1188 int i;
1189
1190 if (sc->bge_ldata.bge_tx_ring == NULL)
1191 return;
1192
1193 for (i = 0; i < BGE_TX_RING_CNT; i++) {
1194 if (sc->bge_cdata.bge_tx_chain[i] != NULL) {
1195 bus_dmamap_sync(sc->bge_cdata.bge_tx_mtag,
1196 sc->bge_cdata.bge_tx_dmamap[i],
1197 BUS_DMASYNC_POSTWRITE);
1198 bus_dmamap_unload(sc->bge_cdata.bge_tx_mtag,
1199 sc->bge_cdata.bge_tx_dmamap[i]);
1200 m_freem(sc->bge_cdata.bge_tx_chain[i]);
1201 sc->bge_cdata.bge_tx_chain[i] = NULL;
1202 }
1203 bzero((char *)&sc->bge_ldata.bge_tx_ring[i],
1204 sizeof(struct bge_tx_bd));
1205 }
1206}
1207
1208static int
1209bge_init_tx_ring(struct bge_softc *sc)
1210{
1211 sc->bge_txcnt = 0;
1212 sc->bge_tx_saved_considx = 0;
1213
1214 bzero(sc->bge_ldata.bge_tx_ring, BGE_TX_RING_SZ);
1215 bus_dmamap_sync(sc->bge_cdata.bge_tx_ring_tag,
1216 sc->bge_cdata.bge_tx_ring_map, BUS_DMASYNC_PREWRITE);
1217
1218 /* Initialize transmit producer index for host-memory send ring. */
1219 sc->bge_tx_prodidx = 0;
1220 bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, sc->bge_tx_prodidx);
1221
1222 /* 5700 b2 errata */
1223 if (sc->bge_chiprev == BGE_CHIPREV_5700_BX)
1224 bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, sc->bge_tx_prodidx);
1225
1226 /* NIC-memory send ring not used; initialize to zero. */
1227 bge_writembx(sc, BGE_MBX_TX_NIC_PROD0_LO, 0);
1228 /* 5700 b2 errata */
1229 if (sc->bge_chiprev == BGE_CHIPREV_5700_BX)
1230 bge_writembx(sc, BGE_MBX_TX_NIC_PROD0_LO, 0);
1231
1232 return (0);
1233}
1234
1235static void
1236bge_setpromisc(struct bge_softc *sc)
1237{
1238 struct ifnet *ifp;
1239
1240 BGE_LOCK_ASSERT(sc);
1241
1242 ifp = sc->bge_ifp;
1243
1244 /* Enable or disable promiscuous mode as needed. */
1245 if (ifp->if_flags & IFF_PROMISC)
1246 BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC);
1247 else
1248 BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC);
1249}
1250
1251static void
1252bge_setmulti(struct bge_softc *sc)
1253{
1254 struct ifnet *ifp;
1255 struct ifmultiaddr *ifma;
1256 uint32_t hashes[4] = { 0, 0, 0, 0 };
1257 int h, i;
1258
1259 BGE_LOCK_ASSERT(sc);
1260
1261 ifp = sc->bge_ifp;
1262
1263 if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) {
1264 for (i = 0; i < 4; i++)
1265 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), 0xFFFFFFFF);
1266 return;
1267 }
1268
1269 /* First, zot all the existing filters. */
1270 for (i = 0; i < 4; i++)
1271 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), 0);
1272
1273 /* Now program new ones. */
1274 if_maddr_rlock(ifp);
1275 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1276 if (ifma->ifma_addr->sa_family != AF_LINK)
1277 continue;
1278 h = ether_crc32_le(LLADDR((struct sockaddr_dl *)
1279 ifma->ifma_addr), ETHER_ADDR_LEN) & 0x7F;
1280 hashes[(h & 0x60) >> 5] |= 1 << (h & 0x1F);
1281 }
1282 if_maddr_runlock(ifp);
1283
1284 for (i = 0; i < 4; i++)
1285 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), hashes[i]);
1286}
1287
1288static void
1289bge_setvlan(struct bge_softc *sc)
1290{
1291 struct ifnet *ifp;
1292
1293 BGE_LOCK_ASSERT(sc);
1294
1295 ifp = sc->bge_ifp;
1296
1297 /* Enable or disable VLAN tag stripping as needed. */
1298 if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING)
1299 BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_KEEP_VLAN_DIAG);
1300 else
1301 BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_KEEP_VLAN_DIAG);
1302}
1303
1304static void
1305bge_sig_pre_reset(struct bge_softc *sc, int type)
1306{
1307
1308 /*
1309 * Some chips don't like this so only do this if ASF is enabled
1310 */
1311 if (sc->bge_asf_mode)
1312 bge_writemem_ind(sc, BGE_SRAM_FW_MB, BGE_SRAM_FW_MB_MAGIC);
1313
1314 if (sc->bge_asf_mode & ASF_NEW_HANDSHAKE) {
1315 switch (type) {
1316 case BGE_RESET_START:
36
37/*
38 * Broadcom BCM570x family gigabit ethernet driver for FreeBSD.
39 *
40 * The Broadcom BCM5700 is based on technology originally developed by
41 * Alteon Networks as part of the Tigon I and Tigon II gigabit ethernet
42 * MAC chips. The BCM5700, sometimes referred to as the Tigon III, has
43 * two on-board MIPS R4000 CPUs and can have as much as 16MB of external
44 * SSRAM. The BCM5700 supports TCP, UDP and IP checksum offload, jumbo
45 * frames, highly configurable RX filtering, and 16 RX and TX queues
46 * (which, along with RX filter rules, can be used for QOS applications).
47 * Other features, such as TCP segmentation, may be available as part
48 * of value-added firmware updates. Unlike the Tigon I and Tigon II,
49 * firmware images can be stored in hardware and need not be compiled
50 * into the driver.
51 *
52 * The BCM5700 supports the PCI v2.2 and PCI-X v1.0 standards, and will
53 * function in a 32-bit/64-bit 33/66Mhz bus, or a 64-bit/133Mhz bus.
54 *
55 * The BCM5701 is a single-chip solution incorporating both the BCM5700
56 * MAC and a BCM5401 10/100/1000 PHY. Unlike the BCM5700, the BCM5701
57 * does not support external SSRAM.
58 *
59 * Broadcom also produces a variation of the BCM5700 under the "Altima"
60 * brand name, which is functionally similar but lacks PCI-X support.
61 *
62 * Without external SSRAM, you can only have at most 4 TX rings,
63 * and the use of the mini RX ring is disabled. This seems to imply
64 * that these features are simply not available on the BCM5701. As a
65 * result, this driver does not implement any support for the mini RX
66 * ring.
67 */
68
69#ifdef HAVE_KERNEL_OPTION_HEADERS
70#include "opt_device_polling.h"
71#endif
72
73#include <sys/param.h>
74#include <sys/endian.h>
75#include <sys/systm.h>
76#include <sys/sockio.h>
77#include <sys/mbuf.h>
78#include <sys/malloc.h>
79#include <sys/kernel.h>
80#include <sys/module.h>
81#include <sys/socket.h>
82#include <sys/sysctl.h>
83#include <sys/taskqueue.h>
84
85#include <net/if.h>
86#include <net/if_arp.h>
87#include <net/ethernet.h>
88#include <net/if_dl.h>
89#include <net/if_media.h>
90
91#include <net/bpf.h>
92
93#include <net/if_types.h>
94#include <net/if_vlan_var.h>
95
96#include <netinet/in_systm.h>
97#include <netinet/in.h>
98#include <netinet/ip.h>
99#include <netinet/tcp.h>
100
101#include <machine/bus.h>
102#include <machine/resource.h>
103#include <sys/bus.h>
104#include <sys/rman.h>
105
106#include <dev/mii/mii.h>
107#include <dev/mii/miivar.h>
108#include "miidevs.h"
109#include <dev/mii/brgphyreg.h>
110
111#ifdef __sparc64__
112#include <dev/ofw/ofw_bus.h>
113#include <dev/ofw/openfirm.h>
114#include <machine/ofw_machdep.h>
115#include <machine/ver.h>
116#endif
117
118#include <dev/pci/pcireg.h>
119#include <dev/pci/pcivar.h>
120
121#include <dev/bge/if_bgereg.h>
122
123#define BGE_CSUM_FEATURES (CSUM_IP | CSUM_TCP)
124#define ETHER_MIN_NOPAD (ETHER_MIN_LEN - ETHER_CRC_LEN) /* i.e., 60 */
125
126MODULE_DEPEND(bge, pci, 1, 1, 1);
127MODULE_DEPEND(bge, ether, 1, 1, 1);
128MODULE_DEPEND(bge, miibus, 1, 1, 1);
129
130/* "device miibus" required. See GENERIC if you get errors here. */
131#include "miibus_if.h"
132
133/*
134 * Various supported device vendors/types and their names. Note: the
135 * spec seems to indicate that the hardware still has Alteon's vendor
136 * ID burned into it, though it will always be overriden by the vendor
137 * ID in the EEPROM. Just to be safe, we cover all possibilities.
138 */
139static const struct bge_type {
140 uint16_t bge_vid;
141 uint16_t bge_did;
142} const bge_devs[] = {
143 { ALTEON_VENDORID, ALTEON_DEVICEID_BCM5700 },
144 { ALTEON_VENDORID, ALTEON_DEVICEID_BCM5701 },
145
146 { ALTIMA_VENDORID, ALTIMA_DEVICE_AC1000 },
147 { ALTIMA_VENDORID, ALTIMA_DEVICE_AC1002 },
148 { ALTIMA_VENDORID, ALTIMA_DEVICE_AC9100 },
149
150 { APPLE_VENDORID, APPLE_DEVICE_BCM5701 },
151
152 { BCOM_VENDORID, BCOM_DEVICEID_BCM5700 },
153 { BCOM_VENDORID, BCOM_DEVICEID_BCM5701 },
154 { BCOM_VENDORID, BCOM_DEVICEID_BCM5702 },
155 { BCOM_VENDORID, BCOM_DEVICEID_BCM5702_ALT },
156 { BCOM_VENDORID, BCOM_DEVICEID_BCM5702X },
157 { BCOM_VENDORID, BCOM_DEVICEID_BCM5703 },
158 { BCOM_VENDORID, BCOM_DEVICEID_BCM5703_ALT },
159 { BCOM_VENDORID, BCOM_DEVICEID_BCM5703X },
160 { BCOM_VENDORID, BCOM_DEVICEID_BCM5704C },
161 { BCOM_VENDORID, BCOM_DEVICEID_BCM5704S },
162 { BCOM_VENDORID, BCOM_DEVICEID_BCM5704S_ALT },
163 { BCOM_VENDORID, BCOM_DEVICEID_BCM5705 },
164 { BCOM_VENDORID, BCOM_DEVICEID_BCM5705F },
165 { BCOM_VENDORID, BCOM_DEVICEID_BCM5705K },
166 { BCOM_VENDORID, BCOM_DEVICEID_BCM5705M },
167 { BCOM_VENDORID, BCOM_DEVICEID_BCM5705M_ALT },
168 { BCOM_VENDORID, BCOM_DEVICEID_BCM5714C },
169 { BCOM_VENDORID, BCOM_DEVICEID_BCM5714S },
170 { BCOM_VENDORID, BCOM_DEVICEID_BCM5715 },
171 { BCOM_VENDORID, BCOM_DEVICEID_BCM5715S },
172 { BCOM_VENDORID, BCOM_DEVICEID_BCM5717 },
173 { BCOM_VENDORID, BCOM_DEVICEID_BCM5718 },
174 { BCOM_VENDORID, BCOM_DEVICEID_BCM5719 },
175 { BCOM_VENDORID, BCOM_DEVICEID_BCM5720 },
176 { BCOM_VENDORID, BCOM_DEVICEID_BCM5721 },
177 { BCOM_VENDORID, BCOM_DEVICEID_BCM5722 },
178 { BCOM_VENDORID, BCOM_DEVICEID_BCM5723 },
179 { BCOM_VENDORID, BCOM_DEVICEID_BCM5750 },
180 { BCOM_VENDORID, BCOM_DEVICEID_BCM5750M },
181 { BCOM_VENDORID, BCOM_DEVICEID_BCM5751 },
182 { BCOM_VENDORID, BCOM_DEVICEID_BCM5751F },
183 { BCOM_VENDORID, BCOM_DEVICEID_BCM5751M },
184 { BCOM_VENDORID, BCOM_DEVICEID_BCM5752 },
185 { BCOM_VENDORID, BCOM_DEVICEID_BCM5752M },
186 { BCOM_VENDORID, BCOM_DEVICEID_BCM5753 },
187 { BCOM_VENDORID, BCOM_DEVICEID_BCM5753F },
188 { BCOM_VENDORID, BCOM_DEVICEID_BCM5753M },
189 { BCOM_VENDORID, BCOM_DEVICEID_BCM5754 },
190 { BCOM_VENDORID, BCOM_DEVICEID_BCM5754M },
191 { BCOM_VENDORID, BCOM_DEVICEID_BCM5755 },
192 { BCOM_VENDORID, BCOM_DEVICEID_BCM5755M },
193 { BCOM_VENDORID, BCOM_DEVICEID_BCM5756 },
194 { BCOM_VENDORID, BCOM_DEVICEID_BCM5761 },
195 { BCOM_VENDORID, BCOM_DEVICEID_BCM5761E },
196 { BCOM_VENDORID, BCOM_DEVICEID_BCM5761S },
197 { BCOM_VENDORID, BCOM_DEVICEID_BCM5761SE },
198 { BCOM_VENDORID, BCOM_DEVICEID_BCM5764 },
199 { BCOM_VENDORID, BCOM_DEVICEID_BCM5780 },
200 { BCOM_VENDORID, BCOM_DEVICEID_BCM5780S },
201 { BCOM_VENDORID, BCOM_DEVICEID_BCM5781 },
202 { BCOM_VENDORID, BCOM_DEVICEID_BCM5782 },
203 { BCOM_VENDORID, BCOM_DEVICEID_BCM5784 },
204 { BCOM_VENDORID, BCOM_DEVICEID_BCM5785F },
205 { BCOM_VENDORID, BCOM_DEVICEID_BCM5785G },
206 { BCOM_VENDORID, BCOM_DEVICEID_BCM5786 },
207 { BCOM_VENDORID, BCOM_DEVICEID_BCM5787 },
208 { BCOM_VENDORID, BCOM_DEVICEID_BCM5787F },
209 { BCOM_VENDORID, BCOM_DEVICEID_BCM5787M },
210 { BCOM_VENDORID, BCOM_DEVICEID_BCM5788 },
211 { BCOM_VENDORID, BCOM_DEVICEID_BCM5789 },
212 { BCOM_VENDORID, BCOM_DEVICEID_BCM5901 },
213 { BCOM_VENDORID, BCOM_DEVICEID_BCM5901A2 },
214 { BCOM_VENDORID, BCOM_DEVICEID_BCM5903M },
215 { BCOM_VENDORID, BCOM_DEVICEID_BCM5906 },
216 { BCOM_VENDORID, BCOM_DEVICEID_BCM5906M },
217 { BCOM_VENDORID, BCOM_DEVICEID_BCM57760 },
218 { BCOM_VENDORID, BCOM_DEVICEID_BCM57761 },
219 { BCOM_VENDORID, BCOM_DEVICEID_BCM57765 },
220 { BCOM_VENDORID, BCOM_DEVICEID_BCM57780 },
221 { BCOM_VENDORID, BCOM_DEVICEID_BCM57781 },
222 { BCOM_VENDORID, BCOM_DEVICEID_BCM57785 },
223 { BCOM_VENDORID, BCOM_DEVICEID_BCM57788 },
224 { BCOM_VENDORID, BCOM_DEVICEID_BCM57790 },
225 { BCOM_VENDORID, BCOM_DEVICEID_BCM57791 },
226 { BCOM_VENDORID, BCOM_DEVICEID_BCM57795 },
227
228 { SK_VENDORID, SK_DEVICEID_ALTIMA },
229
230 { TC_VENDORID, TC_DEVICEID_3C996 },
231
232 { FJTSU_VENDORID, FJTSU_DEVICEID_PW008GE4 },
233 { FJTSU_VENDORID, FJTSU_DEVICEID_PW008GE5 },
234 { FJTSU_VENDORID, FJTSU_DEVICEID_PP250450 },
235
236 { 0, 0 }
237};
238
239static const struct bge_vendor {
240 uint16_t v_id;
241 const char *v_name;
242} const bge_vendors[] = {
243 { ALTEON_VENDORID, "Alteon" },
244 { ALTIMA_VENDORID, "Altima" },
245 { APPLE_VENDORID, "Apple" },
246 { BCOM_VENDORID, "Broadcom" },
247 { SK_VENDORID, "SysKonnect" },
248 { TC_VENDORID, "3Com" },
249 { FJTSU_VENDORID, "Fujitsu" },
250
251 { 0, NULL }
252};
253
254static const struct bge_revision {
255 uint32_t br_chipid;
256 const char *br_name;
257} const bge_revisions[] = {
258 { BGE_CHIPID_BCM5700_A0, "BCM5700 A0" },
259 { BGE_CHIPID_BCM5700_A1, "BCM5700 A1" },
260 { BGE_CHIPID_BCM5700_B0, "BCM5700 B0" },
261 { BGE_CHIPID_BCM5700_B1, "BCM5700 B1" },
262 { BGE_CHIPID_BCM5700_B2, "BCM5700 B2" },
263 { BGE_CHIPID_BCM5700_B3, "BCM5700 B3" },
264 { BGE_CHIPID_BCM5700_ALTIMA, "BCM5700 Altima" },
265 { BGE_CHIPID_BCM5700_C0, "BCM5700 C0" },
266 { BGE_CHIPID_BCM5701_A0, "BCM5701 A0" },
267 { BGE_CHIPID_BCM5701_B0, "BCM5701 B0" },
268 { BGE_CHIPID_BCM5701_B2, "BCM5701 B2" },
269 { BGE_CHIPID_BCM5701_B5, "BCM5701 B5" },
270 { BGE_CHIPID_BCM5703_A0, "BCM5703 A0" },
271 { BGE_CHIPID_BCM5703_A1, "BCM5703 A1" },
272 { BGE_CHIPID_BCM5703_A2, "BCM5703 A2" },
273 { BGE_CHIPID_BCM5703_A3, "BCM5703 A3" },
274 { BGE_CHIPID_BCM5703_B0, "BCM5703 B0" },
275 { BGE_CHIPID_BCM5704_A0, "BCM5704 A0" },
276 { BGE_CHIPID_BCM5704_A1, "BCM5704 A1" },
277 { BGE_CHIPID_BCM5704_A2, "BCM5704 A2" },
278 { BGE_CHIPID_BCM5704_A3, "BCM5704 A3" },
279 { BGE_CHIPID_BCM5704_B0, "BCM5704 B0" },
280 { BGE_CHIPID_BCM5705_A0, "BCM5705 A0" },
281 { BGE_CHIPID_BCM5705_A1, "BCM5705 A1" },
282 { BGE_CHIPID_BCM5705_A2, "BCM5705 A2" },
283 { BGE_CHIPID_BCM5705_A3, "BCM5705 A3" },
284 { BGE_CHIPID_BCM5750_A0, "BCM5750 A0" },
285 { BGE_CHIPID_BCM5750_A1, "BCM5750 A1" },
286 { BGE_CHIPID_BCM5750_A3, "BCM5750 A3" },
287 { BGE_CHIPID_BCM5750_B0, "BCM5750 B0" },
288 { BGE_CHIPID_BCM5750_B1, "BCM5750 B1" },
289 { BGE_CHIPID_BCM5750_C0, "BCM5750 C0" },
290 { BGE_CHIPID_BCM5750_C1, "BCM5750 C1" },
291 { BGE_CHIPID_BCM5750_C2, "BCM5750 C2" },
292 { BGE_CHIPID_BCM5714_A0, "BCM5714 A0" },
293 { BGE_CHIPID_BCM5752_A0, "BCM5752 A0" },
294 { BGE_CHIPID_BCM5752_A1, "BCM5752 A1" },
295 { BGE_CHIPID_BCM5752_A2, "BCM5752 A2" },
296 { BGE_CHIPID_BCM5714_B0, "BCM5714 B0" },
297 { BGE_CHIPID_BCM5714_B3, "BCM5714 B3" },
298 { BGE_CHIPID_BCM5715_A0, "BCM5715 A0" },
299 { BGE_CHIPID_BCM5715_A1, "BCM5715 A1" },
300 { BGE_CHIPID_BCM5715_A3, "BCM5715 A3" },
301 { BGE_CHIPID_BCM5717_A0, "BCM5717 A0" },
302 { BGE_CHIPID_BCM5717_B0, "BCM5717 B0" },
303 { BGE_CHIPID_BCM5719_A0, "BCM5719 A0" },
304 { BGE_CHIPID_BCM5755_A0, "BCM5755 A0" },
305 { BGE_CHIPID_BCM5755_A1, "BCM5755 A1" },
306 { BGE_CHIPID_BCM5755_A2, "BCM5755 A2" },
307 { BGE_CHIPID_BCM5722_A0, "BCM5722 A0" },
308 { BGE_CHIPID_BCM5761_A0, "BCM5761 A0" },
309 { BGE_CHIPID_BCM5761_A1, "BCM5761 A1" },
310 { BGE_CHIPID_BCM5784_A0, "BCM5784 A0" },
311 { BGE_CHIPID_BCM5784_A1, "BCM5784 A1" },
312 /* 5754 and 5787 share the same ASIC ID */
313 { BGE_CHIPID_BCM5787_A0, "BCM5754/5787 A0" },
314 { BGE_CHIPID_BCM5787_A1, "BCM5754/5787 A1" },
315 { BGE_CHIPID_BCM5787_A2, "BCM5754/5787 A2" },
316 { BGE_CHIPID_BCM5906_A1, "BCM5906 A1" },
317 { BGE_CHIPID_BCM5906_A2, "BCM5906 A2" },
318 { BGE_CHIPID_BCM57765_A0, "BCM57765 A0" },
319 { BGE_CHIPID_BCM57765_B0, "BCM57765 B0" },
320 { BGE_CHIPID_BCM57780_A0, "BCM57780 A0" },
321 { BGE_CHIPID_BCM57780_A1, "BCM57780 A1" },
322
323 { 0, NULL }
324};
325
326/*
327 * Some defaults for major revisions, so that newer steppings
328 * that we don't know about have a shot at working.
329 */
330static const struct bge_revision const bge_majorrevs[] = {
331 { BGE_ASICREV_BCM5700, "unknown BCM5700" },
332 { BGE_ASICREV_BCM5701, "unknown BCM5701" },
333 { BGE_ASICREV_BCM5703, "unknown BCM5703" },
334 { BGE_ASICREV_BCM5704, "unknown BCM5704" },
335 { BGE_ASICREV_BCM5705, "unknown BCM5705" },
336 { BGE_ASICREV_BCM5750, "unknown BCM5750" },
337 { BGE_ASICREV_BCM5714_A0, "unknown BCM5714" },
338 { BGE_ASICREV_BCM5752, "unknown BCM5752" },
339 { BGE_ASICREV_BCM5780, "unknown BCM5780" },
340 { BGE_ASICREV_BCM5714, "unknown BCM5714" },
341 { BGE_ASICREV_BCM5755, "unknown BCM5755" },
342 { BGE_ASICREV_BCM5761, "unknown BCM5761" },
343 { BGE_ASICREV_BCM5784, "unknown BCM5784" },
344 { BGE_ASICREV_BCM5785, "unknown BCM5785" },
345 /* 5754 and 5787 share the same ASIC ID */
346 { BGE_ASICREV_BCM5787, "unknown BCM5754/5787" },
347 { BGE_ASICREV_BCM5906, "unknown BCM5906" },
348 { BGE_ASICREV_BCM57765, "unknown BCM57765" },
349 { BGE_ASICREV_BCM57780, "unknown BCM57780" },
350 { BGE_ASICREV_BCM5717, "unknown BCM5717" },
351 { BGE_ASICREV_BCM5719, "unknown BCM5719" },
352
353 { 0, NULL }
354};
355
356#define BGE_IS_JUMBO_CAPABLE(sc) ((sc)->bge_flags & BGE_FLAG_JUMBO)
357#define BGE_IS_5700_FAMILY(sc) ((sc)->bge_flags & BGE_FLAG_5700_FAMILY)
358#define BGE_IS_5705_PLUS(sc) ((sc)->bge_flags & BGE_FLAG_5705_PLUS)
359#define BGE_IS_5714_FAMILY(sc) ((sc)->bge_flags & BGE_FLAG_5714_FAMILY)
360#define BGE_IS_575X_PLUS(sc) ((sc)->bge_flags & BGE_FLAG_575X_PLUS)
361#define BGE_IS_5755_PLUS(sc) ((sc)->bge_flags & BGE_FLAG_5755_PLUS)
362#define BGE_IS_5717_PLUS(sc) ((sc)->bge_flags & BGE_FLAG_5717_PLUS)
363
364const struct bge_revision * bge_lookup_rev(uint32_t);
365const struct bge_vendor * bge_lookup_vendor(uint16_t);
366
367typedef int (*bge_eaddr_fcn_t)(struct bge_softc *, uint8_t[]);
368
369static int bge_probe(device_t);
370static int bge_attach(device_t);
371static int bge_detach(device_t);
372static int bge_suspend(device_t);
373static int bge_resume(device_t);
374static void bge_release_resources(struct bge_softc *);
375static void bge_dma_map_addr(void *, bus_dma_segment_t *, int, int);
376static int bge_dma_alloc(struct bge_softc *);
377static void bge_dma_free(struct bge_softc *);
378static int bge_dma_ring_alloc(struct bge_softc *, bus_size_t, bus_size_t,
379 bus_dma_tag_t *, uint8_t **, bus_dmamap_t *, bus_addr_t *, const char *);
380
381static int bge_get_eaddr_fw(struct bge_softc *sc, uint8_t ether_addr[]);
382static int bge_get_eaddr_mem(struct bge_softc *, uint8_t[]);
383static int bge_get_eaddr_nvram(struct bge_softc *, uint8_t[]);
384static int bge_get_eaddr_eeprom(struct bge_softc *, uint8_t[]);
385static int bge_get_eaddr(struct bge_softc *, uint8_t[]);
386
387static void bge_txeof(struct bge_softc *, uint16_t);
388static void bge_rxcsum(struct bge_softc *, struct bge_rx_bd *, struct mbuf *);
389static int bge_rxeof(struct bge_softc *, uint16_t, int);
390
391static void bge_asf_driver_up (struct bge_softc *);
392static void bge_tick(void *);
393static void bge_stats_clear_regs(struct bge_softc *);
394static void bge_stats_update(struct bge_softc *);
395static void bge_stats_update_regs(struct bge_softc *);
396static struct mbuf *bge_check_short_dma(struct mbuf *);
397static struct mbuf *bge_setup_tso(struct bge_softc *, struct mbuf *,
398 uint16_t *, uint16_t *);
399static int bge_encap(struct bge_softc *, struct mbuf **, uint32_t *);
400
401static void bge_intr(void *);
402static int bge_msi_intr(void *);
403static void bge_intr_task(void *, int);
404static void bge_start_locked(struct ifnet *);
405static void bge_start(struct ifnet *);
406static int bge_ioctl(struct ifnet *, u_long, caddr_t);
407static void bge_init_locked(struct bge_softc *);
408static void bge_init(void *);
409static void bge_stop_block(struct bge_softc *, bus_size_t, uint32_t);
410static void bge_stop(struct bge_softc *);
411static void bge_watchdog(struct bge_softc *);
412static int bge_shutdown(device_t);
413static int bge_ifmedia_upd_locked(struct ifnet *);
414static int bge_ifmedia_upd(struct ifnet *);
415static void bge_ifmedia_sts(struct ifnet *, struct ifmediareq *);
416
417static uint8_t bge_nvram_getbyte(struct bge_softc *, int, uint8_t *);
418static int bge_read_nvram(struct bge_softc *, caddr_t, int, int);
419
420static uint8_t bge_eeprom_getbyte(struct bge_softc *, int, uint8_t *);
421static int bge_read_eeprom(struct bge_softc *, caddr_t, int, int);
422
423static void bge_setpromisc(struct bge_softc *);
424static void bge_setmulti(struct bge_softc *);
425static void bge_setvlan(struct bge_softc *);
426
427static __inline void bge_rxreuse_std(struct bge_softc *, int);
428static __inline void bge_rxreuse_jumbo(struct bge_softc *, int);
429static int bge_newbuf_std(struct bge_softc *, int);
430static int bge_newbuf_jumbo(struct bge_softc *, int);
431static int bge_init_rx_ring_std(struct bge_softc *);
432static void bge_free_rx_ring_std(struct bge_softc *);
433static int bge_init_rx_ring_jumbo(struct bge_softc *);
434static void bge_free_rx_ring_jumbo(struct bge_softc *);
435static void bge_free_tx_ring(struct bge_softc *);
436static int bge_init_tx_ring(struct bge_softc *);
437
438static int bge_chipinit(struct bge_softc *);
439static int bge_blockinit(struct bge_softc *);
440
441static int bge_has_eaddr(struct bge_softc *);
442static uint32_t bge_readmem_ind(struct bge_softc *, int);
443static void bge_writemem_ind(struct bge_softc *, int, int);
444static void bge_writembx(struct bge_softc *, int, int);
445#ifdef notdef
446static uint32_t bge_readreg_ind(struct bge_softc *, int);
447#endif
448static void bge_writemem_direct(struct bge_softc *, int, int);
449static void bge_writereg_ind(struct bge_softc *, int, int);
450
451static int bge_miibus_readreg(device_t, int, int);
452static int bge_miibus_writereg(device_t, int, int, int);
453static void bge_miibus_statchg(device_t);
454#ifdef DEVICE_POLLING
455static int bge_poll(struct ifnet *ifp, enum poll_cmd cmd, int count);
456#endif
457
458#define BGE_RESET_START 1
459#define BGE_RESET_STOP 2
460static void bge_sig_post_reset(struct bge_softc *, int);
461static void bge_sig_legacy(struct bge_softc *, int);
462static void bge_sig_pre_reset(struct bge_softc *, int);
463static void bge_stop_fw(struct bge_softc *);
464static int bge_reset(struct bge_softc *);
465static void bge_link_upd(struct bge_softc *);
466
467/*
468 * The BGE_REGISTER_DEBUG option is only for low-level debugging. It may
469 * leak information to untrusted users. It is also known to cause alignment
470 * traps on certain architectures.
471 */
472#ifdef BGE_REGISTER_DEBUG
473static int bge_sysctl_debug_info(SYSCTL_HANDLER_ARGS);
474static int bge_sysctl_reg_read(SYSCTL_HANDLER_ARGS);
475static int bge_sysctl_mem_read(SYSCTL_HANDLER_ARGS);
476#endif
477static void bge_add_sysctls(struct bge_softc *);
478static void bge_add_sysctl_stats_regs(struct bge_softc *,
479 struct sysctl_ctx_list *, struct sysctl_oid_list *);
480static void bge_add_sysctl_stats(struct bge_softc *, struct sysctl_ctx_list *,
481 struct sysctl_oid_list *);
482static int bge_sysctl_stats(SYSCTL_HANDLER_ARGS);
483
484static device_method_t bge_methods[] = {
485 /* Device interface */
486 DEVMETHOD(device_probe, bge_probe),
487 DEVMETHOD(device_attach, bge_attach),
488 DEVMETHOD(device_detach, bge_detach),
489 DEVMETHOD(device_shutdown, bge_shutdown),
490 DEVMETHOD(device_suspend, bge_suspend),
491 DEVMETHOD(device_resume, bge_resume),
492
493 /* bus interface */
494 DEVMETHOD(bus_print_child, bus_generic_print_child),
495 DEVMETHOD(bus_driver_added, bus_generic_driver_added),
496
497 /* MII interface */
498 DEVMETHOD(miibus_readreg, bge_miibus_readreg),
499 DEVMETHOD(miibus_writereg, bge_miibus_writereg),
500 DEVMETHOD(miibus_statchg, bge_miibus_statchg),
501
502 { 0, 0 }
503};
504
505static driver_t bge_driver = {
506 "bge",
507 bge_methods,
508 sizeof(struct bge_softc)
509};
510
511static devclass_t bge_devclass;
512
513DRIVER_MODULE(bge, pci, bge_driver, bge_devclass, 0, 0);
514DRIVER_MODULE(miibus, bge, miibus_driver, miibus_devclass, 0, 0);
515
516static int bge_allow_asf = 1;
517
518TUNABLE_INT("hw.bge.allow_asf", &bge_allow_asf);
519
520SYSCTL_NODE(_hw, OID_AUTO, bge, CTLFLAG_RD, 0, "BGE driver parameters");
521SYSCTL_INT(_hw_bge, OID_AUTO, allow_asf, CTLFLAG_RD, &bge_allow_asf, 0,
522 "Allow ASF mode if available");
523
524#define SPARC64_BLADE_1500_MODEL "SUNW,Sun-Blade-1500"
525#define SPARC64_BLADE_1500_PATH_BGE "/pci@1f,700000/network@2"
526#define SPARC64_BLADE_2500_MODEL "SUNW,Sun-Blade-2500"
527#define SPARC64_BLADE_2500_PATH_BGE "/pci@1c,600000/network@3"
528#define SPARC64_OFW_SUBVENDOR "subsystem-vendor-id"
529
530static int
531bge_has_eaddr(struct bge_softc *sc)
532{
533#ifdef __sparc64__
534 char buf[sizeof(SPARC64_BLADE_1500_PATH_BGE)];
535 device_t dev;
536 uint32_t subvendor;
537
538 dev = sc->bge_dev;
539
540 /*
541 * The on-board BGEs found in sun4u machines aren't fitted with
542 * an EEPROM which means that we have to obtain the MAC address
543 * via OFW and that some tests will always fail. We distinguish
544 * such BGEs by the subvendor ID, which also has to be obtained
545 * from OFW instead of the PCI configuration space as the latter
546 * indicates Broadcom as the subvendor of the netboot interface.
547 * For early Blade 1500 and 2500 we even have to check the OFW
548 * device path as the subvendor ID always defaults to Broadcom
549 * there.
550 */
551 if (OF_getprop(ofw_bus_get_node(dev), SPARC64_OFW_SUBVENDOR,
552 &subvendor, sizeof(subvendor)) == sizeof(subvendor) &&
553 (subvendor == FJTSU_VENDORID || subvendor == SUN_VENDORID))
554 return (0);
555 memset(buf, 0, sizeof(buf));
556 if (OF_package_to_path(ofw_bus_get_node(dev), buf, sizeof(buf)) > 0) {
557 if (strcmp(sparc64_model, SPARC64_BLADE_1500_MODEL) == 0 &&
558 strcmp(buf, SPARC64_BLADE_1500_PATH_BGE) == 0)
559 return (0);
560 if (strcmp(sparc64_model, SPARC64_BLADE_2500_MODEL) == 0 &&
561 strcmp(buf, SPARC64_BLADE_2500_PATH_BGE) == 0)
562 return (0);
563 }
564#endif
565 return (1);
566}
567
568static uint32_t
569bge_readmem_ind(struct bge_softc *sc, int off)
570{
571 device_t dev;
572 uint32_t val;
573
574 if (sc->bge_asicrev == BGE_ASICREV_BCM5906 &&
575 off >= BGE_STATS_BLOCK && off < BGE_SEND_RING_1_TO_4)
576 return (0);
577
578 dev = sc->bge_dev;
579
580 pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, off, 4);
581 val = pci_read_config(dev, BGE_PCI_MEMWIN_DATA, 4);
582 pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, 0, 4);
583 return (val);
584}
585
586static void
587bge_writemem_ind(struct bge_softc *sc, int off, int val)
588{
589 device_t dev;
590
591 if (sc->bge_asicrev == BGE_ASICREV_BCM5906 &&
592 off >= BGE_STATS_BLOCK && off < BGE_SEND_RING_1_TO_4)
593 return;
594
595 dev = sc->bge_dev;
596
597 pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, off, 4);
598 pci_write_config(dev, BGE_PCI_MEMWIN_DATA, val, 4);
599 pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, 0, 4);
600}
601
602#ifdef notdef
603static uint32_t
604bge_readreg_ind(struct bge_softc *sc, int off)
605{
606 device_t dev;
607
608 dev = sc->bge_dev;
609
610 pci_write_config(dev, BGE_PCI_REG_BASEADDR, off, 4);
611 return (pci_read_config(dev, BGE_PCI_REG_DATA, 4));
612}
613#endif
614
615static void
616bge_writereg_ind(struct bge_softc *sc, int off, int val)
617{
618 device_t dev;
619
620 dev = sc->bge_dev;
621
622 pci_write_config(dev, BGE_PCI_REG_BASEADDR, off, 4);
623 pci_write_config(dev, BGE_PCI_REG_DATA, val, 4);
624}
625
626static void
627bge_writemem_direct(struct bge_softc *sc, int off, int val)
628{
629 CSR_WRITE_4(sc, off, val);
630}
631
632static void
633bge_writembx(struct bge_softc *sc, int off, int val)
634{
635 if (sc->bge_asicrev == BGE_ASICREV_BCM5906)
636 off += BGE_LPMBX_IRQ0_HI - BGE_MBX_IRQ0_HI;
637
638 CSR_WRITE_4(sc, off, val);
639}
640
641/*
642 * Map a single buffer address.
643 */
644
645static void
646bge_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error)
647{
648 struct bge_dmamap_arg *ctx;
649
650 if (error)
651 return;
652
653 KASSERT(nseg == 1, ("%s: %d segments returned!", __func__, nseg));
654
655 ctx = arg;
656 ctx->bge_busaddr = segs->ds_addr;
657}
658
659static uint8_t
660bge_nvram_getbyte(struct bge_softc *sc, int addr, uint8_t *dest)
661{
662 uint32_t access, byte = 0;
663 int i;
664
665 /* Lock. */
666 CSR_WRITE_4(sc, BGE_NVRAM_SWARB, BGE_NVRAMSWARB_SET1);
667 for (i = 0; i < 8000; i++) {
668 if (CSR_READ_4(sc, BGE_NVRAM_SWARB) & BGE_NVRAMSWARB_GNT1)
669 break;
670 DELAY(20);
671 }
672 if (i == 8000)
673 return (1);
674
675 /* Enable access. */
676 access = CSR_READ_4(sc, BGE_NVRAM_ACCESS);
677 CSR_WRITE_4(sc, BGE_NVRAM_ACCESS, access | BGE_NVRAMACC_ENABLE);
678
679 CSR_WRITE_4(sc, BGE_NVRAM_ADDR, addr & 0xfffffffc);
680 CSR_WRITE_4(sc, BGE_NVRAM_CMD, BGE_NVRAM_READCMD);
681 for (i = 0; i < BGE_TIMEOUT * 10; i++) {
682 DELAY(10);
683 if (CSR_READ_4(sc, BGE_NVRAM_CMD) & BGE_NVRAMCMD_DONE) {
684 DELAY(10);
685 break;
686 }
687 }
688
689 if (i == BGE_TIMEOUT * 10) {
690 if_printf(sc->bge_ifp, "nvram read timed out\n");
691 return (1);
692 }
693
694 /* Get result. */
695 byte = CSR_READ_4(sc, BGE_NVRAM_RDDATA);
696
697 *dest = (bswap32(byte) >> ((addr % 4) * 8)) & 0xFF;
698
699 /* Disable access. */
700 CSR_WRITE_4(sc, BGE_NVRAM_ACCESS, access);
701
702 /* Unlock. */
703 CSR_WRITE_4(sc, BGE_NVRAM_SWARB, BGE_NVRAMSWARB_CLR1);
704 CSR_READ_4(sc, BGE_NVRAM_SWARB);
705
706 return (0);
707}
708
709/*
710 * Read a sequence of bytes from NVRAM.
711 */
712static int
713bge_read_nvram(struct bge_softc *sc, caddr_t dest, int off, int cnt)
714{
715 int err = 0, i;
716 uint8_t byte = 0;
717
718 if (sc->bge_asicrev != BGE_ASICREV_BCM5906)
719 return (1);
720
721 for (i = 0; i < cnt; i++) {
722 err = bge_nvram_getbyte(sc, off + i, &byte);
723 if (err)
724 break;
725 *(dest + i) = byte;
726 }
727
728 return (err ? 1 : 0);
729}
730
731/*
732 * Read a byte of data stored in the EEPROM at address 'addr.' The
733 * BCM570x supports both the traditional bitbang interface and an
734 * auto access interface for reading the EEPROM. We use the auto
735 * access method.
736 */
737static uint8_t
738bge_eeprom_getbyte(struct bge_softc *sc, int addr, uint8_t *dest)
739{
740 int i;
741 uint32_t byte = 0;
742
743 /*
744 * Enable use of auto EEPROM access so we can avoid
745 * having to use the bitbang method.
746 */
747 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_AUTO_EEPROM);
748
749 /* Reset the EEPROM, load the clock period. */
750 CSR_WRITE_4(sc, BGE_EE_ADDR,
751 BGE_EEADDR_RESET | BGE_EEHALFCLK(BGE_HALFCLK_384SCL));
752 DELAY(20);
753
754 /* Issue the read EEPROM command. */
755 CSR_WRITE_4(sc, BGE_EE_ADDR, BGE_EE_READCMD | addr);
756
757 /* Wait for completion */
758 for(i = 0; i < BGE_TIMEOUT * 10; i++) {
759 DELAY(10);
760 if (CSR_READ_4(sc, BGE_EE_ADDR) & BGE_EEADDR_DONE)
761 break;
762 }
763
764 if (i == BGE_TIMEOUT * 10) {
765 device_printf(sc->bge_dev, "EEPROM read timed out\n");
766 return (1);
767 }
768
769 /* Get result. */
770 byte = CSR_READ_4(sc, BGE_EE_DATA);
771
772 *dest = (byte >> ((addr % 4) * 8)) & 0xFF;
773
774 return (0);
775}
776
777/*
778 * Read a sequence of bytes from the EEPROM.
779 */
780static int
781bge_read_eeprom(struct bge_softc *sc, caddr_t dest, int off, int cnt)
782{
783 int i, error = 0;
784 uint8_t byte = 0;
785
786 for (i = 0; i < cnt; i++) {
787 error = bge_eeprom_getbyte(sc, off + i, &byte);
788 if (error)
789 break;
790 *(dest + i) = byte;
791 }
792
793 return (error ? 1 : 0);
794}
795
796static int
797bge_miibus_readreg(device_t dev, int phy, int reg)
798{
799 struct bge_softc *sc;
800 uint32_t val;
801 int i;
802
803 sc = device_get_softc(dev);
804
805 /* Clear the autopoll bit if set, otherwise may trigger PCI errors. */
806 if ((sc->bge_mi_mode & BGE_MIMODE_AUTOPOLL) != 0) {
807 CSR_WRITE_4(sc, BGE_MI_MODE,
808 sc->bge_mi_mode & ~BGE_MIMODE_AUTOPOLL);
809 DELAY(80);
810 }
811
812 CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_READ | BGE_MICOMM_BUSY |
813 BGE_MIPHY(phy) | BGE_MIREG(reg));
814
815 /* Poll for the PHY register access to complete. */
816 for (i = 0; i < BGE_TIMEOUT; i++) {
817 DELAY(10);
818 val = CSR_READ_4(sc, BGE_MI_COMM);
819 if ((val & BGE_MICOMM_BUSY) == 0) {
820 DELAY(5);
821 val = CSR_READ_4(sc, BGE_MI_COMM);
822 break;
823 }
824 }
825
826 if (i == BGE_TIMEOUT) {
827 device_printf(sc->bge_dev,
828 "PHY read timed out (phy %d, reg %d, val 0x%08x)\n",
829 phy, reg, val);
830 val = 0;
831 }
832
833 /* Restore the autopoll bit if necessary. */
834 if ((sc->bge_mi_mode & BGE_MIMODE_AUTOPOLL) != 0) {
835 CSR_WRITE_4(sc, BGE_MI_MODE, sc->bge_mi_mode);
836 DELAY(80);
837 }
838
839 if (val & BGE_MICOMM_READFAIL)
840 return (0);
841
842 return (val & 0xFFFF);
843}
844
845static int
846bge_miibus_writereg(device_t dev, int phy, int reg, int val)
847{
848 struct bge_softc *sc;
849 int i;
850
851 sc = device_get_softc(dev);
852
853 if (sc->bge_asicrev == BGE_ASICREV_BCM5906 &&
854 (reg == BRGPHY_MII_1000CTL || reg == BRGPHY_MII_AUXCTL))
855 return (0);
856
857 /* Clear the autopoll bit if set, otherwise may trigger PCI errors. */
858 if ((sc->bge_mi_mode & BGE_MIMODE_AUTOPOLL) != 0) {
859 CSR_WRITE_4(sc, BGE_MI_MODE,
860 sc->bge_mi_mode & ~BGE_MIMODE_AUTOPOLL);
861 DELAY(80);
862 }
863
864 CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_WRITE | BGE_MICOMM_BUSY |
865 BGE_MIPHY(phy) | BGE_MIREG(reg) | val);
866
867 for (i = 0; i < BGE_TIMEOUT; i++) {
868 DELAY(10);
869 if (!(CSR_READ_4(sc, BGE_MI_COMM) & BGE_MICOMM_BUSY)) {
870 DELAY(5);
871 CSR_READ_4(sc, BGE_MI_COMM); /* dummy read */
872 break;
873 }
874 }
875
876 /* Restore the autopoll bit if necessary. */
877 if ((sc->bge_mi_mode & BGE_MIMODE_AUTOPOLL) != 0) {
878 CSR_WRITE_4(sc, BGE_MI_MODE, sc->bge_mi_mode);
879 DELAY(80);
880 }
881
882 if (i == BGE_TIMEOUT)
883 device_printf(sc->bge_dev,
884 "PHY write timed out (phy %d, reg %d, val %d)\n",
885 phy, reg, val);
886
887 return (0);
888}
889
890static void
891bge_miibus_statchg(device_t dev)
892{
893 struct bge_softc *sc;
894 struct mii_data *mii;
895 sc = device_get_softc(dev);
896 mii = device_get_softc(sc->bge_miibus);
897
898 if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) ==
899 (IFM_ACTIVE | IFM_AVALID)) {
900 switch (IFM_SUBTYPE(mii->mii_media_active)) {
901 case IFM_10_T:
902 case IFM_100_TX:
903 sc->bge_link = 1;
904 break;
905 case IFM_1000_T:
906 case IFM_1000_SX:
907 case IFM_2500_SX:
908 if (sc->bge_asicrev != BGE_ASICREV_BCM5906)
909 sc->bge_link = 1;
910 else
911 sc->bge_link = 0;
912 break;
913 default:
914 sc->bge_link = 0;
915 break;
916 }
917 } else
918 sc->bge_link = 0;
919 if (sc->bge_link == 0)
920 return;
921 BGE_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_PORTMODE);
922 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T ||
923 IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_SX)
924 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_PORTMODE_GMII);
925 else
926 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_PORTMODE_MII);
927
928 if (IFM_OPTIONS(mii->mii_media_active & IFM_FDX) != 0) {
929 BGE_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX);
930 if ((IFM_OPTIONS(mii->mii_media_active) &
931 IFM_ETH_TXPAUSE) != 0)
932 BGE_SETBIT(sc, BGE_TX_MODE, BGE_TXMODE_FLOWCTL_ENABLE);
933 else
934 BGE_CLRBIT(sc, BGE_TX_MODE, BGE_TXMODE_FLOWCTL_ENABLE);
935 if ((IFM_OPTIONS(mii->mii_media_active) &
936 IFM_ETH_RXPAUSE) != 0)
937 BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_FLOWCTL_ENABLE);
938 else
939 BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_FLOWCTL_ENABLE);
940 } else {
941 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX);
942 BGE_CLRBIT(sc, BGE_TX_MODE, BGE_TXMODE_FLOWCTL_ENABLE);
943 BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_FLOWCTL_ENABLE);
944 }
945}
946
947/*
948 * Intialize a standard receive ring descriptor.
949 */
950static int
951bge_newbuf_std(struct bge_softc *sc, int i)
952{
953 struct mbuf *m;
954 struct bge_rx_bd *r;
955 bus_dma_segment_t segs[1];
956 bus_dmamap_t map;
957 int error, nsegs;
958
959 if (sc->bge_flags & BGE_FLAG_JUMBO_STD &&
960 (sc->bge_ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN +
961 ETHER_VLAN_ENCAP_LEN > (MCLBYTES - ETHER_ALIGN))) {
962 m = m_getjcl(M_DONTWAIT, MT_DATA, M_PKTHDR, MJUM9BYTES);
963 if (m == NULL)
964 return (ENOBUFS);
965 m->m_len = m->m_pkthdr.len = MJUM9BYTES;
966 } else {
967 m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
968 if (m == NULL)
969 return (ENOBUFS);
970 m->m_len = m->m_pkthdr.len = MCLBYTES;
971 }
972 if ((sc->bge_flags & BGE_FLAG_RX_ALIGNBUG) == 0)
973 m_adj(m, ETHER_ALIGN);
974
975 error = bus_dmamap_load_mbuf_sg(sc->bge_cdata.bge_rx_mtag,
976 sc->bge_cdata.bge_rx_std_sparemap, m, segs, &nsegs, 0);
977 if (error != 0) {
978 m_freem(m);
979 return (error);
980 }
981 if (sc->bge_cdata.bge_rx_std_chain[i] != NULL) {
982 bus_dmamap_sync(sc->bge_cdata.bge_rx_mtag,
983 sc->bge_cdata.bge_rx_std_dmamap[i], BUS_DMASYNC_POSTREAD);
984 bus_dmamap_unload(sc->bge_cdata.bge_rx_mtag,
985 sc->bge_cdata.bge_rx_std_dmamap[i]);
986 }
987 map = sc->bge_cdata.bge_rx_std_dmamap[i];
988 sc->bge_cdata.bge_rx_std_dmamap[i] = sc->bge_cdata.bge_rx_std_sparemap;
989 sc->bge_cdata.bge_rx_std_sparemap = map;
990 sc->bge_cdata.bge_rx_std_chain[i] = m;
991 sc->bge_cdata.bge_rx_std_seglen[i] = segs[0].ds_len;
992 r = &sc->bge_ldata.bge_rx_std_ring[sc->bge_std];
993 r->bge_addr.bge_addr_lo = BGE_ADDR_LO(segs[0].ds_addr);
994 r->bge_addr.bge_addr_hi = BGE_ADDR_HI(segs[0].ds_addr);
995 r->bge_flags = BGE_RXBDFLAG_END;
996 r->bge_len = segs[0].ds_len;
997 r->bge_idx = i;
998
999 bus_dmamap_sync(sc->bge_cdata.bge_rx_mtag,
1000 sc->bge_cdata.bge_rx_std_dmamap[i], BUS_DMASYNC_PREREAD);
1001
1002 return (0);
1003}
1004
1005/*
1006 * Initialize a jumbo receive ring descriptor. This allocates
1007 * a jumbo buffer from the pool managed internally by the driver.
1008 */
1009static int
1010bge_newbuf_jumbo(struct bge_softc *sc, int i)
1011{
1012 bus_dma_segment_t segs[BGE_NSEG_JUMBO];
1013 bus_dmamap_t map;
1014 struct bge_extrx_bd *r;
1015 struct mbuf *m;
1016 int error, nsegs;
1017
1018 MGETHDR(m, M_DONTWAIT, MT_DATA);
1019 if (m == NULL)
1020 return (ENOBUFS);
1021
1022 m_cljget(m, M_DONTWAIT, MJUM9BYTES);
1023 if (!(m->m_flags & M_EXT)) {
1024 m_freem(m);
1025 return (ENOBUFS);
1026 }
1027 m->m_len = m->m_pkthdr.len = MJUM9BYTES;
1028 if ((sc->bge_flags & BGE_FLAG_RX_ALIGNBUG) == 0)
1029 m_adj(m, ETHER_ALIGN);
1030
1031 error = bus_dmamap_load_mbuf_sg(sc->bge_cdata.bge_mtag_jumbo,
1032 sc->bge_cdata.bge_rx_jumbo_sparemap, m, segs, &nsegs, 0);
1033 if (error != 0) {
1034 m_freem(m);
1035 return (error);
1036 }
1037
1038 if (sc->bge_cdata.bge_rx_jumbo_chain[i] != NULL) {
1039 bus_dmamap_sync(sc->bge_cdata.bge_mtag_jumbo,
1040 sc->bge_cdata.bge_rx_jumbo_dmamap[i], BUS_DMASYNC_POSTREAD);
1041 bus_dmamap_unload(sc->bge_cdata.bge_mtag_jumbo,
1042 sc->bge_cdata.bge_rx_jumbo_dmamap[i]);
1043 }
1044 map = sc->bge_cdata.bge_rx_jumbo_dmamap[i];
1045 sc->bge_cdata.bge_rx_jumbo_dmamap[i] =
1046 sc->bge_cdata.bge_rx_jumbo_sparemap;
1047 sc->bge_cdata.bge_rx_jumbo_sparemap = map;
1048 sc->bge_cdata.bge_rx_jumbo_chain[i] = m;
1049 sc->bge_cdata.bge_rx_jumbo_seglen[i][0] = 0;
1050 sc->bge_cdata.bge_rx_jumbo_seglen[i][1] = 0;
1051 sc->bge_cdata.bge_rx_jumbo_seglen[i][2] = 0;
1052 sc->bge_cdata.bge_rx_jumbo_seglen[i][3] = 0;
1053
1054 /*
1055 * Fill in the extended RX buffer descriptor.
1056 */
1057 r = &sc->bge_ldata.bge_rx_jumbo_ring[sc->bge_jumbo];
1058 r->bge_flags = BGE_RXBDFLAG_JUMBO_RING | BGE_RXBDFLAG_END;
1059 r->bge_idx = i;
1060 r->bge_len3 = r->bge_len2 = r->bge_len1 = 0;
1061 switch (nsegs) {
1062 case 4:
1063 r->bge_addr3.bge_addr_lo = BGE_ADDR_LO(segs[3].ds_addr);
1064 r->bge_addr3.bge_addr_hi = BGE_ADDR_HI(segs[3].ds_addr);
1065 r->bge_len3 = segs[3].ds_len;
1066 sc->bge_cdata.bge_rx_jumbo_seglen[i][3] = segs[3].ds_len;
1067 case 3:
1068 r->bge_addr2.bge_addr_lo = BGE_ADDR_LO(segs[2].ds_addr);
1069 r->bge_addr2.bge_addr_hi = BGE_ADDR_HI(segs[2].ds_addr);
1070 r->bge_len2 = segs[2].ds_len;
1071 sc->bge_cdata.bge_rx_jumbo_seglen[i][2] = segs[2].ds_len;
1072 case 2:
1073 r->bge_addr1.bge_addr_lo = BGE_ADDR_LO(segs[1].ds_addr);
1074 r->bge_addr1.bge_addr_hi = BGE_ADDR_HI(segs[1].ds_addr);
1075 r->bge_len1 = segs[1].ds_len;
1076 sc->bge_cdata.bge_rx_jumbo_seglen[i][1] = segs[1].ds_len;
1077 case 1:
1078 r->bge_addr0.bge_addr_lo = BGE_ADDR_LO(segs[0].ds_addr);
1079 r->bge_addr0.bge_addr_hi = BGE_ADDR_HI(segs[0].ds_addr);
1080 r->bge_len0 = segs[0].ds_len;
1081 sc->bge_cdata.bge_rx_jumbo_seglen[i][0] = segs[0].ds_len;
1082 break;
1083 default:
1084 panic("%s: %d segments\n", __func__, nsegs);
1085 }
1086
1087 bus_dmamap_sync(sc->bge_cdata.bge_mtag_jumbo,
1088 sc->bge_cdata.bge_rx_jumbo_dmamap[i], BUS_DMASYNC_PREREAD);
1089
1090 return (0);
1091}
1092
1093static int
1094bge_init_rx_ring_std(struct bge_softc *sc)
1095{
1096 int error, i;
1097
1098 bzero(sc->bge_ldata.bge_rx_std_ring, BGE_STD_RX_RING_SZ);
1099 sc->bge_std = 0;
1100 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
1101 if ((error = bge_newbuf_std(sc, i)) != 0)
1102 return (error);
1103 BGE_INC(sc->bge_std, BGE_STD_RX_RING_CNT);
1104 }
1105
1106 bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag,
1107 sc->bge_cdata.bge_rx_std_ring_map, BUS_DMASYNC_PREWRITE);
1108
1109 sc->bge_std = 0;
1110 bge_writembx(sc, BGE_MBX_RX_STD_PROD_LO, BGE_STD_RX_RING_CNT - 1);
1111
1112 return (0);
1113}
1114
1115static void
1116bge_free_rx_ring_std(struct bge_softc *sc)
1117{
1118 int i;
1119
1120 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
1121 if (sc->bge_cdata.bge_rx_std_chain[i] != NULL) {
1122 bus_dmamap_sync(sc->bge_cdata.bge_rx_mtag,
1123 sc->bge_cdata.bge_rx_std_dmamap[i],
1124 BUS_DMASYNC_POSTREAD);
1125 bus_dmamap_unload(sc->bge_cdata.bge_rx_mtag,
1126 sc->bge_cdata.bge_rx_std_dmamap[i]);
1127 m_freem(sc->bge_cdata.bge_rx_std_chain[i]);
1128 sc->bge_cdata.bge_rx_std_chain[i] = NULL;
1129 }
1130 bzero((char *)&sc->bge_ldata.bge_rx_std_ring[i],
1131 sizeof(struct bge_rx_bd));
1132 }
1133}
1134
1135static int
1136bge_init_rx_ring_jumbo(struct bge_softc *sc)
1137{
1138 struct bge_rcb *rcb;
1139 int error, i;
1140
1141 bzero(sc->bge_ldata.bge_rx_jumbo_ring, BGE_JUMBO_RX_RING_SZ);
1142 sc->bge_jumbo = 0;
1143 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
1144 if ((error = bge_newbuf_jumbo(sc, i)) != 0)
1145 return (error);
1146 BGE_INC(sc->bge_jumbo, BGE_JUMBO_RX_RING_CNT);
1147 }
1148
1149 bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag,
1150 sc->bge_cdata.bge_rx_jumbo_ring_map, BUS_DMASYNC_PREWRITE);
1151
1152 sc->bge_jumbo = 0;
1153
1154 /* Enable the jumbo receive producer ring. */
1155 rcb = &sc->bge_ldata.bge_info.bge_jumbo_rx_rcb;
1156 rcb->bge_maxlen_flags =
1157 BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_USE_EXT_RX_BD);
1158 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags);
1159
1160 bge_writembx(sc, BGE_MBX_RX_JUMBO_PROD_LO, BGE_JUMBO_RX_RING_CNT - 1);
1161
1162 return (0);
1163}
1164
1165static void
1166bge_free_rx_ring_jumbo(struct bge_softc *sc)
1167{
1168 int i;
1169
1170 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
1171 if (sc->bge_cdata.bge_rx_jumbo_chain[i] != NULL) {
1172 bus_dmamap_sync(sc->bge_cdata.bge_mtag_jumbo,
1173 sc->bge_cdata.bge_rx_jumbo_dmamap[i],
1174 BUS_DMASYNC_POSTREAD);
1175 bus_dmamap_unload(sc->bge_cdata.bge_mtag_jumbo,
1176 sc->bge_cdata.bge_rx_jumbo_dmamap[i]);
1177 m_freem(sc->bge_cdata.bge_rx_jumbo_chain[i]);
1178 sc->bge_cdata.bge_rx_jumbo_chain[i] = NULL;
1179 }
1180 bzero((char *)&sc->bge_ldata.bge_rx_jumbo_ring[i],
1181 sizeof(struct bge_extrx_bd));
1182 }
1183}
1184
1185static void
1186bge_free_tx_ring(struct bge_softc *sc)
1187{
1188 int i;
1189
1190 if (sc->bge_ldata.bge_tx_ring == NULL)
1191 return;
1192
1193 for (i = 0; i < BGE_TX_RING_CNT; i++) {
1194 if (sc->bge_cdata.bge_tx_chain[i] != NULL) {
1195 bus_dmamap_sync(sc->bge_cdata.bge_tx_mtag,
1196 sc->bge_cdata.bge_tx_dmamap[i],
1197 BUS_DMASYNC_POSTWRITE);
1198 bus_dmamap_unload(sc->bge_cdata.bge_tx_mtag,
1199 sc->bge_cdata.bge_tx_dmamap[i]);
1200 m_freem(sc->bge_cdata.bge_tx_chain[i]);
1201 sc->bge_cdata.bge_tx_chain[i] = NULL;
1202 }
1203 bzero((char *)&sc->bge_ldata.bge_tx_ring[i],
1204 sizeof(struct bge_tx_bd));
1205 }
1206}
1207
1208static int
1209bge_init_tx_ring(struct bge_softc *sc)
1210{
1211 sc->bge_txcnt = 0;
1212 sc->bge_tx_saved_considx = 0;
1213
1214 bzero(sc->bge_ldata.bge_tx_ring, BGE_TX_RING_SZ);
1215 bus_dmamap_sync(sc->bge_cdata.bge_tx_ring_tag,
1216 sc->bge_cdata.bge_tx_ring_map, BUS_DMASYNC_PREWRITE);
1217
1218 /* Initialize transmit producer index for host-memory send ring. */
1219 sc->bge_tx_prodidx = 0;
1220 bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, sc->bge_tx_prodidx);
1221
1222 /* 5700 b2 errata */
1223 if (sc->bge_chiprev == BGE_CHIPREV_5700_BX)
1224 bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, sc->bge_tx_prodidx);
1225
1226 /* NIC-memory send ring not used; initialize to zero. */
1227 bge_writembx(sc, BGE_MBX_TX_NIC_PROD0_LO, 0);
1228 /* 5700 b2 errata */
1229 if (sc->bge_chiprev == BGE_CHIPREV_5700_BX)
1230 bge_writembx(sc, BGE_MBX_TX_NIC_PROD0_LO, 0);
1231
1232 return (0);
1233}
1234
1235static void
1236bge_setpromisc(struct bge_softc *sc)
1237{
1238 struct ifnet *ifp;
1239
1240 BGE_LOCK_ASSERT(sc);
1241
1242 ifp = sc->bge_ifp;
1243
1244 /* Enable or disable promiscuous mode as needed. */
1245 if (ifp->if_flags & IFF_PROMISC)
1246 BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC);
1247 else
1248 BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC);
1249}
1250
1251static void
1252bge_setmulti(struct bge_softc *sc)
1253{
1254 struct ifnet *ifp;
1255 struct ifmultiaddr *ifma;
1256 uint32_t hashes[4] = { 0, 0, 0, 0 };
1257 int h, i;
1258
1259 BGE_LOCK_ASSERT(sc);
1260
1261 ifp = sc->bge_ifp;
1262
1263 if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) {
1264 for (i = 0; i < 4; i++)
1265 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), 0xFFFFFFFF);
1266 return;
1267 }
1268
1269 /* First, zot all the existing filters. */
1270 for (i = 0; i < 4; i++)
1271 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), 0);
1272
1273 /* Now program new ones. */
1274 if_maddr_rlock(ifp);
1275 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1276 if (ifma->ifma_addr->sa_family != AF_LINK)
1277 continue;
1278 h = ether_crc32_le(LLADDR((struct sockaddr_dl *)
1279 ifma->ifma_addr), ETHER_ADDR_LEN) & 0x7F;
1280 hashes[(h & 0x60) >> 5] |= 1 << (h & 0x1F);
1281 }
1282 if_maddr_runlock(ifp);
1283
1284 for (i = 0; i < 4; i++)
1285 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), hashes[i]);
1286}
1287
1288static void
1289bge_setvlan(struct bge_softc *sc)
1290{
1291 struct ifnet *ifp;
1292
1293 BGE_LOCK_ASSERT(sc);
1294
1295 ifp = sc->bge_ifp;
1296
1297 /* Enable or disable VLAN tag stripping as needed. */
1298 if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING)
1299 BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_KEEP_VLAN_DIAG);
1300 else
1301 BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_KEEP_VLAN_DIAG);
1302}
1303
1304static void
1305bge_sig_pre_reset(struct bge_softc *sc, int type)
1306{
1307
1308 /*
1309 * Some chips don't like this so only do this if ASF is enabled
1310 */
1311 if (sc->bge_asf_mode)
1312 bge_writemem_ind(sc, BGE_SRAM_FW_MB, BGE_SRAM_FW_MB_MAGIC);
1313
1314 if (sc->bge_asf_mode & ASF_NEW_HANDSHAKE) {
1315 switch (type) {
1316 case BGE_RESET_START:
1317 bge_writemem_ind(sc, BGE_SDI_STATUS, 0x1); /* START */
1317 bge_writemem_ind(sc, BGE_SRAM_FW_DRV_STATE_MB,
1318 BGE_FW_DRV_STATE_START);
1318 break;
1319 case BGE_RESET_STOP:
1319 break;
1320 case BGE_RESET_STOP:
1320 bge_writemem_ind(sc, BGE_SDI_STATUS, 0x2); /* UNLOAD */
1321 bge_writemem_ind(sc, BGE_SRAM_FW_DRV_STATE_MB,
1322 BGE_FW_DRV_STATE_UNLOAD);
1321 break;
1322 }
1323 }
1324}
1325
1326static void
1327bge_sig_post_reset(struct bge_softc *sc, int type)
1328{
1329
1330 if (sc->bge_asf_mode & ASF_NEW_HANDSHAKE) {
1331 switch (type) {
1332 case BGE_RESET_START:
1323 break;
1324 }
1325 }
1326}
1327
1328static void
1329bge_sig_post_reset(struct bge_softc *sc, int type)
1330{
1331
1332 if (sc->bge_asf_mode & ASF_NEW_HANDSHAKE) {
1333 switch (type) {
1334 case BGE_RESET_START:
1333 bge_writemem_ind(sc, BGE_SDI_STATUS, 0x80000001);
1335 bge_writemem_ind(sc, BGE_SRAM_FW_DRV_STATE_MB,
1336 BGE_FW_DRV_STATE_START_DONE);
1334 /* START DONE */
1335 break;
1336 case BGE_RESET_STOP:
1337 /* START DONE */
1338 break;
1339 case BGE_RESET_STOP:
1337 bge_writemem_ind(sc, BGE_SDI_STATUS, 0x80000002);
1340 bge_writemem_ind(sc, BGE_SRAM_FW_DRV_STATE_MB,
1341 BGE_FW_DRV_STATE_UNLOAD_DONE);
1338 break;
1339 }
1340 }
1341}
1342
1343static void
1344bge_sig_legacy(struct bge_softc *sc, int type)
1345{
1346
1347 if (sc->bge_asf_mode) {
1348 switch (type) {
1349 case BGE_RESET_START:
1342 break;
1343 }
1344 }
1345}
1346
1347static void
1348bge_sig_legacy(struct bge_softc *sc, int type)
1349{
1350
1351 if (sc->bge_asf_mode) {
1352 switch (type) {
1353 case BGE_RESET_START:
1350 bge_writemem_ind(sc, BGE_SDI_STATUS, 0x1); /* START */
1354 bge_writemem_ind(sc, BGE_SRAM_FW_DRV_STATE_MB,
1355 BGE_FW_DRV_STATE_START);
1351 break;
1352 case BGE_RESET_STOP:
1356 break;
1357 case BGE_RESET_STOP:
1353 bge_writemem_ind(sc, BGE_SDI_STATUS, 0x2); /* UNLOAD */
1358 bge_writemem_ind(sc, BGE_SRAM_FW_DRV_STATE_MB,
1359 BGE_FW_DRV_STATE_UNLOAD);
1354 break;
1355 }
1356 }
1357}
1358
1359static void
1360bge_stop_fw(struct bge_softc *sc)
1361{
1362 int i;
1363
1364 if (sc->bge_asf_mode) {
1365 bge_writemem_ind(sc, BGE_SRAM_FW_CMD_MB, BGE_FW_PAUSE);
1366 CSR_WRITE_4(sc, BGE_RX_CPU_EVENT,
1367 CSR_READ_4(sc, BGE_RX_CPU_EVENT) | (1 << 14));
1368
1369 for (i = 0; i < 100; i++ ) {
1370 if (!(CSR_READ_4(sc, BGE_RX_CPU_EVENT) & (1 << 14)))
1371 break;
1372 DELAY(10);
1373 }
1374 }
1375}
1376
1377/*
1378 * Do endian, PCI and DMA initialization.
1379 */
1380static int
1381bge_chipinit(struct bge_softc *sc)
1382{
1383 uint32_t dma_rw_ctl, misc_ctl;
1384 uint16_t val;
1385 int i;
1386
1387 /* Set endianness before we access any non-PCI registers. */
1388 misc_ctl = BGE_INIT;
1389 if (sc->bge_flags & BGE_FLAG_TAGGED_STATUS)
1390 misc_ctl |= BGE_PCIMISCCTL_TAGGED_STATUS;
1391 pci_write_config(sc->bge_dev, BGE_PCI_MISC_CTL, misc_ctl, 4);
1392
1393 /* Clear the MAC control register */
1394 CSR_WRITE_4(sc, BGE_MAC_MODE, 0);
1395
1396 /*
1397 * Clear the MAC statistics block in the NIC's
1398 * internal memory.
1399 */
1400 for (i = BGE_STATS_BLOCK;
1401 i < BGE_STATS_BLOCK_END + 1; i += sizeof(uint32_t))
1402 BGE_MEMWIN_WRITE(sc, i, 0);
1403
1404 for (i = BGE_STATUS_BLOCK;
1405 i < BGE_STATUS_BLOCK_END + 1; i += sizeof(uint32_t))
1406 BGE_MEMWIN_WRITE(sc, i, 0);
1407
1408 if (sc->bge_chiprev == BGE_CHIPREV_5704_BX) {
1409 /*
1410 * Fix data corruption caused by non-qword write with WB.
1411 * Fix master abort in PCI mode.
1412 * Fix PCI latency timer.
1413 */
1414 val = pci_read_config(sc->bge_dev, BGE_PCI_MSI_DATA + 2, 2);
1415 val |= (1 << 10) | (1 << 12) | (1 << 13);
1416 pci_write_config(sc->bge_dev, BGE_PCI_MSI_DATA + 2, val, 2);
1417 }
1418
1419 /*
1420 * Set up the PCI DMA control register.
1421 */
1422 dma_rw_ctl = BGE_PCIDMARWCTL_RD_CMD_SHIFT(6) |
1423 BGE_PCIDMARWCTL_WR_CMD_SHIFT(7);
1424 if (sc->bge_flags & BGE_FLAG_PCIE) {
1425 /* Read watermark not used, 128 bytes for write. */
1426 dma_rw_ctl |= BGE_PCIDMARWCTL_WR_WAT_SHIFT(3);
1427 } else if (sc->bge_flags & BGE_FLAG_PCIX) {
1428 if (BGE_IS_5714_FAMILY(sc)) {
1429 /* 256 bytes for read and write. */
1430 dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(2) |
1431 BGE_PCIDMARWCTL_WR_WAT_SHIFT(2);
1432 dma_rw_ctl |= (sc->bge_asicrev == BGE_ASICREV_BCM5780) ?
1433 BGE_PCIDMARWCTL_ONEDMA_ATONCE_GLOBAL :
1434 BGE_PCIDMARWCTL_ONEDMA_ATONCE_LOCAL;
1435 } else if (sc->bge_asicrev == BGE_ASICREV_BCM5703) {
1436 /*
1437 * In the BCM5703, the DMA read watermark should
1438 * be set to less than or equal to the maximum
1439 * memory read byte count of the PCI-X command
1440 * register.
1441 */
1442 dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(4) |
1443 BGE_PCIDMARWCTL_WR_WAT_SHIFT(3);
1444 } else if (sc->bge_asicrev == BGE_ASICREV_BCM5704) {
1445 /* 1536 bytes for read, 384 bytes for write. */
1446 dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(7) |
1447 BGE_PCIDMARWCTL_WR_WAT_SHIFT(3);
1448 } else {
1449 /* 384 bytes for read and write. */
1450 dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(3) |
1451 BGE_PCIDMARWCTL_WR_WAT_SHIFT(3) |
1452 0x0F;
1453 }
1454 if (sc->bge_asicrev == BGE_ASICREV_BCM5703 ||
1455 sc->bge_asicrev == BGE_ASICREV_BCM5704) {
1456 uint32_t tmp;
1457
1458 /* Set ONE_DMA_AT_ONCE for hardware workaround. */
1459 tmp = CSR_READ_4(sc, BGE_PCI_CLKCTL) & 0x1F;
1460 if (tmp == 6 || tmp == 7)
1461 dma_rw_ctl |=
1462 BGE_PCIDMARWCTL_ONEDMA_ATONCE_GLOBAL;
1463
1464 /* Set PCI-X DMA write workaround. */
1465 dma_rw_ctl |= BGE_PCIDMARWCTL_ASRT_ALL_BE;
1466 }
1467 } else {
1468 /* Conventional PCI bus: 256 bytes for read and write. */
1469 dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(7) |
1470 BGE_PCIDMARWCTL_WR_WAT_SHIFT(7);
1471
1472 if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
1473 sc->bge_asicrev != BGE_ASICREV_BCM5750)
1474 dma_rw_ctl |= 0x0F;
1475 }
1476 if (sc->bge_asicrev == BGE_ASICREV_BCM5700 ||
1477 sc->bge_asicrev == BGE_ASICREV_BCM5701)
1478 dma_rw_ctl |= BGE_PCIDMARWCTL_USE_MRM |
1479 BGE_PCIDMARWCTL_ASRT_ALL_BE;
1480 if (sc->bge_asicrev == BGE_ASICREV_BCM5703 ||
1481 sc->bge_asicrev == BGE_ASICREV_BCM5704)
1482 dma_rw_ctl &= ~BGE_PCIDMARWCTL_MINDMA;
1483 if (BGE_IS_5717_PLUS(sc)) {
1484 dma_rw_ctl &= ~BGE_PCIDMARWCTL_DIS_CACHE_ALIGNMENT;
1485 if (sc->bge_chipid == BGE_CHIPID_BCM57765_A0)
1486 dma_rw_ctl &= ~BGE_PCIDMARWCTL_CRDRDR_RDMA_MRRS_MSK;
1487 /*
1488 * Enable HW workaround for controllers that misinterpret
1489 * a status tag update and leave interrupts permanently
1490 * disabled.
1491 */
1492 if (sc->bge_asicrev != BGE_ASICREV_BCM5717 &&
1493 sc->bge_asicrev != BGE_ASICREV_BCM57765)
1494 dma_rw_ctl |= BGE_PCIDMARWCTL_TAGGED_STATUS_WA;
1495 }
1496 pci_write_config(sc->bge_dev, BGE_PCI_DMA_RW_CTL, dma_rw_ctl, 4);
1497
1498 /*
1499 * Set up general mode register.
1500 */
1501 CSR_WRITE_4(sc, BGE_MODE_CTL, BGE_DMA_SWAP_OPTIONS |
1502 BGE_MODECTL_MAC_ATTN_INTR | BGE_MODECTL_HOST_SEND_BDS |
1503 BGE_MODECTL_TX_NO_PHDR_CSUM);
1504
1505 /*
1506 * BCM5701 B5 have a bug causing data corruption when using
1507 * 64-bit DMA reads, which can be terminated early and then
1508 * completed later as 32-bit accesses, in combination with
1509 * certain bridges.
1510 */
1511 if (sc->bge_asicrev == BGE_ASICREV_BCM5701 &&
1512 sc->bge_chipid == BGE_CHIPID_BCM5701_B5)
1513 BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_FORCE_PCI32);
1514
1515 /*
1516 * Tell the firmware the driver is running
1517 */
1518 if (sc->bge_asf_mode & ASF_STACKUP)
1519 BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
1520
1521 /*
1522 * Disable memory write invalidate. Apparently it is not supported
1523 * properly by these devices. Also ensure that INTx isn't disabled,
1524 * as these chips need it even when using MSI.
1525 */
1526 PCI_CLRBIT(sc->bge_dev, BGE_PCI_CMD,
1527 PCIM_CMD_INTxDIS | PCIM_CMD_MWIEN, 4);
1528
1529 /* Set the timer prescaler (always 66Mhz) */
1530 CSR_WRITE_4(sc, BGE_MISC_CFG, BGE_32BITTIME_66MHZ);
1531
1532 /* XXX: The Linux tg3 driver does this at the start of brgphy_reset. */
1533 if (sc->bge_asicrev == BGE_ASICREV_BCM5906) {
1534 DELAY(40); /* XXX */
1535
1536 /* Put PHY into ready state */
1537 BGE_CLRBIT(sc, BGE_MISC_CFG, BGE_MISCCFG_EPHY_IDDQ);
1538 CSR_READ_4(sc, BGE_MISC_CFG); /* Flush */
1539 DELAY(40);
1540 }
1541
1542 return (0);
1543}
1544
1545static int
1546bge_blockinit(struct bge_softc *sc)
1547{
1548 struct bge_rcb *rcb;
1549 bus_size_t vrcb;
1550 bge_hostaddr taddr;
1551 uint32_t dmactl, val;
1552 int i, limit;
1553
1554 /*
1555 * Initialize the memory window pointer register so that
1556 * we can access the first 32K of internal NIC RAM. This will
1557 * allow us to set up the TX send ring RCBs and the RX return
1558 * ring RCBs, plus other things which live in NIC memory.
1559 */
1560 CSR_WRITE_4(sc, BGE_PCI_MEMWIN_BASEADDR, 0);
1561
1562 /* Note: the BCM5704 has a smaller mbuf space than other chips. */
1563
1564 if (!(BGE_IS_5705_PLUS(sc))) {
1565 /* Configure mbuf memory pool */
1566 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_BASEADDR, BGE_BUFFPOOL_1);
1567 if (sc->bge_asicrev == BGE_ASICREV_BCM5704)
1568 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x10000);
1569 else
1570 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x18000);
1571
1572 /* Configure DMA resource pool */
1573 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_BASEADDR,
1574 BGE_DMA_DESCRIPTORS);
1575 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LEN, 0x2000);
1576 }
1577
1578 /* Configure mbuf pool watermarks */
1579 if (sc->bge_asicrev == BGE_ASICREV_BCM5717 ||
1580 sc->bge_asicrev == BGE_ASICREV_BCM57765) {
1581 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0);
1582 if (sc->bge_ifp->if_mtu > ETHERMTU) {
1583 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x7e);
1584 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0xea);
1585 } else {
1586 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x2a);
1587 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0xa0);
1588 }
1589 } else if (!BGE_IS_5705_PLUS(sc)) {
1590 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x50);
1591 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x20);
1592 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x60);
1593 } else if (sc->bge_asicrev == BGE_ASICREV_BCM5906) {
1594 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0);
1595 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x04);
1596 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x10);
1597 } else {
1598 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0);
1599 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x10);
1600 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x60);
1601 }
1602
1603 /* Configure DMA resource watermarks */
1604 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LOWAT, 5);
1605 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_HIWAT, 10);
1606
1607 /* Enable buffer manager */
1608 val = BGE_BMANMODE_ENABLE | BGE_BMANMODE_LOMBUF_ATTN;
1609 /*
1610 * Change the arbitration algorithm of TXMBUF read request to
1611 * round-robin instead of priority based for BCM5719. When
1612 * TXFIFO is almost empty, RDMA will hold its request until
1613 * TXFIFO is not almost empty.
1614 */
1615 if (sc->bge_asicrev == BGE_ASICREV_BCM5719)
1616 val |= BGE_BMANMODE_NO_TX_UNDERRUN;
1617 CSR_WRITE_4(sc, BGE_BMAN_MODE, val);
1618
1619 /* Poll for buffer manager start indication */
1620 for (i = 0; i < BGE_TIMEOUT; i++) {
1621 DELAY(10);
1622 if (CSR_READ_4(sc, BGE_BMAN_MODE) & BGE_BMANMODE_ENABLE)
1623 break;
1624 }
1625
1626 if (i == BGE_TIMEOUT) {
1627 device_printf(sc->bge_dev, "buffer manager failed to start\n");
1628 return (ENXIO);
1629 }
1630
1631 /* Enable flow-through queues */
1632 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF);
1633 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0);
1634
1635 /* Wait until queue initialization is complete */
1636 for (i = 0; i < BGE_TIMEOUT; i++) {
1637 DELAY(10);
1638 if (CSR_READ_4(sc, BGE_FTQ_RESET) == 0)
1639 break;
1640 }
1641
1642 if (i == BGE_TIMEOUT) {
1643 device_printf(sc->bge_dev, "flow-through queue init failed\n");
1644 return (ENXIO);
1645 }
1646
1647 /*
1648 * Summary of rings supported by the controller:
1649 *
1650 * Standard Receive Producer Ring
1651 * - This ring is used to feed receive buffers for "standard"
1652 * sized frames (typically 1536 bytes) to the controller.
1653 *
1654 * Jumbo Receive Producer Ring
1655 * - This ring is used to feed receive buffers for jumbo sized
1656 * frames (i.e. anything bigger than the "standard" frames)
1657 * to the controller.
1658 *
1659 * Mini Receive Producer Ring
1660 * - This ring is used to feed receive buffers for "mini"
1661 * sized frames to the controller.
1662 * - This feature required external memory for the controller
1663 * but was never used in a production system. Should always
1664 * be disabled.
1665 *
1666 * Receive Return Ring
1667 * - After the controller has placed an incoming frame into a
1668 * receive buffer that buffer is moved into a receive return
1669 * ring. The driver is then responsible to passing the
1670 * buffer up to the stack. Many versions of the controller
1671 * support multiple RR rings.
1672 *
1673 * Send Ring
1674 * - This ring is used for outgoing frames. Many versions of
1675 * the controller support multiple send rings.
1676 */
1677
1678 /* Initialize the standard receive producer ring control block. */
1679 rcb = &sc->bge_ldata.bge_info.bge_std_rx_rcb;
1680 rcb->bge_hostaddr.bge_addr_lo =
1681 BGE_ADDR_LO(sc->bge_ldata.bge_rx_std_ring_paddr);
1682 rcb->bge_hostaddr.bge_addr_hi =
1683 BGE_ADDR_HI(sc->bge_ldata.bge_rx_std_ring_paddr);
1684 bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag,
1685 sc->bge_cdata.bge_rx_std_ring_map, BUS_DMASYNC_PREREAD);
1686 if (BGE_IS_5717_PLUS(sc)) {
1687 /*
1688 * Bits 31-16: Programmable ring size (2048, 1024, 512, .., 32)
1689 * Bits 15-2 : Maximum RX frame size
1690 * Bit 1 : 1 = Ring Disabled, 0 = Ring ENabled
1691 * Bit 0 : Reserved
1692 */
1693 rcb->bge_maxlen_flags =
1694 BGE_RCB_MAXLEN_FLAGS(512, BGE_MAX_FRAMELEN << 2);
1695 } else if (BGE_IS_5705_PLUS(sc)) {
1696 /*
1697 * Bits 31-16: Programmable ring size (512, 256, 128, 64, 32)
1698 * Bits 15-2 : Reserved (should be 0)
1699 * Bit 1 : 1 = Ring Disabled, 0 = Ring Enabled
1700 * Bit 0 : Reserved
1701 */
1702 rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(512, 0);
1703 } else {
1704 /*
1705 * Ring size is always XXX entries
1706 * Bits 31-16: Maximum RX frame size
1707 * Bits 15-2 : Reserved (should be 0)
1708 * Bit 1 : 1 = Ring Disabled, 0 = Ring Enabled
1709 * Bit 0 : Reserved
1710 */
1711 rcb->bge_maxlen_flags =
1712 BGE_RCB_MAXLEN_FLAGS(BGE_MAX_FRAMELEN, 0);
1713 }
1714 if (sc->bge_asicrev == BGE_ASICREV_BCM5717 ||
1715 sc->bge_asicrev == BGE_ASICREV_BCM5719)
1716 rcb->bge_nicaddr = BGE_STD_RX_RINGS_5717;
1717 else
1718 rcb->bge_nicaddr = BGE_STD_RX_RINGS;
1719 /* Write the standard receive producer ring control block. */
1720 CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_HI, rcb->bge_hostaddr.bge_addr_hi);
1721 CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_LO, rcb->bge_hostaddr.bge_addr_lo);
1722 CSR_WRITE_4(sc, BGE_RX_STD_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags);
1723 CSR_WRITE_4(sc, BGE_RX_STD_RCB_NICADDR, rcb->bge_nicaddr);
1724
1725 /* Reset the standard receive producer ring producer index. */
1726 bge_writembx(sc, BGE_MBX_RX_STD_PROD_LO, 0);
1727
1728 /*
1729 * Initialize the jumbo RX producer ring control
1730 * block. We set the 'ring disabled' bit in the
1731 * flags field until we're actually ready to start
1732 * using this ring (i.e. once we set the MTU
1733 * high enough to require it).
1734 */
1735 if (BGE_IS_JUMBO_CAPABLE(sc)) {
1736 rcb = &sc->bge_ldata.bge_info.bge_jumbo_rx_rcb;
1737 /* Get the jumbo receive producer ring RCB parameters. */
1738 rcb->bge_hostaddr.bge_addr_lo =
1739 BGE_ADDR_LO(sc->bge_ldata.bge_rx_jumbo_ring_paddr);
1740 rcb->bge_hostaddr.bge_addr_hi =
1741 BGE_ADDR_HI(sc->bge_ldata.bge_rx_jumbo_ring_paddr);
1742 bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag,
1743 sc->bge_cdata.bge_rx_jumbo_ring_map,
1744 BUS_DMASYNC_PREREAD);
1745 rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(0,
1746 BGE_RCB_FLAG_USE_EXT_RX_BD | BGE_RCB_FLAG_RING_DISABLED);
1747 if (sc->bge_asicrev == BGE_ASICREV_BCM5717 ||
1748 sc->bge_asicrev == BGE_ASICREV_BCM5719)
1749 rcb->bge_nicaddr = BGE_JUMBO_RX_RINGS_5717;
1750 else
1751 rcb->bge_nicaddr = BGE_JUMBO_RX_RINGS;
1752 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_HI,
1753 rcb->bge_hostaddr.bge_addr_hi);
1754 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_LO,
1755 rcb->bge_hostaddr.bge_addr_lo);
1756 /* Program the jumbo receive producer ring RCB parameters. */
1757 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS,
1758 rcb->bge_maxlen_flags);
1759 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_NICADDR, rcb->bge_nicaddr);
1760 /* Reset the jumbo receive producer ring producer index. */
1761 bge_writembx(sc, BGE_MBX_RX_JUMBO_PROD_LO, 0);
1762 }
1763
1764 /* Disable the mini receive producer ring RCB. */
1765 if (BGE_IS_5700_FAMILY(sc)) {
1766 rcb = &sc->bge_ldata.bge_info.bge_mini_rx_rcb;
1767 rcb->bge_maxlen_flags =
1768 BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED);
1769 CSR_WRITE_4(sc, BGE_RX_MINI_RCB_MAXLEN_FLAGS,
1770 rcb->bge_maxlen_flags);
1771 /* Reset the mini receive producer ring producer index. */
1772 bge_writembx(sc, BGE_MBX_RX_MINI_PROD_LO, 0);
1773 }
1774
1775 /* Choose de-pipeline mode for BCM5906 A0, A1 and A2. */
1776 if (sc->bge_asicrev == BGE_ASICREV_BCM5906) {
1777 if (sc->bge_chipid == BGE_CHIPID_BCM5906_A0 ||
1778 sc->bge_chipid == BGE_CHIPID_BCM5906_A1 ||
1779 sc->bge_chipid == BGE_CHIPID_BCM5906_A2)
1780 CSR_WRITE_4(sc, BGE_ISO_PKT_TX,
1781 (CSR_READ_4(sc, BGE_ISO_PKT_TX) & ~3) | 2);
1782 }
1783 /*
1784 * The BD ring replenish thresholds control how often the
1785 * hardware fetches new BD's from the producer rings in host
1786 * memory. Setting the value too low on a busy system can
1787 * starve the hardware and recue the throughpout.
1788 *
1789 * Set the BD ring replentish thresholds. The recommended
1790 * values are 1/8th the number of descriptors allocated to
1791 * each ring.
1792 * XXX The 5754 requires a lower threshold, so it might be a
1793 * requirement of all 575x family chips. The Linux driver sets
1794 * the lower threshold for all 5705 family chips as well, but there
1795 * are reports that it might not need to be so strict.
1796 *
1797 * XXX Linux does some extra fiddling here for the 5906 parts as
1798 * well.
1799 */
1800 if (BGE_IS_5705_PLUS(sc))
1801 val = 8;
1802 else
1803 val = BGE_STD_RX_RING_CNT / 8;
1804 CSR_WRITE_4(sc, BGE_RBDI_STD_REPL_THRESH, val);
1805 if (BGE_IS_JUMBO_CAPABLE(sc))
1806 CSR_WRITE_4(sc, BGE_RBDI_JUMBO_REPL_THRESH,
1807 BGE_JUMBO_RX_RING_CNT/8);
1808 if (BGE_IS_5717_PLUS(sc)) {
1809 CSR_WRITE_4(sc, BGE_STD_REPLENISH_LWM, 32);
1810 CSR_WRITE_4(sc, BGE_JMB_REPLENISH_LWM, 16);
1811 }
1812
1813 /*
1814 * Disable all send rings by setting the 'ring disabled' bit
1815 * in the flags field of all the TX send ring control blocks,
1816 * located in NIC memory.
1817 */
1818 if (!BGE_IS_5705_PLUS(sc))
1819 /* 5700 to 5704 had 16 send rings. */
1820 limit = BGE_TX_RINGS_EXTSSRAM_MAX;
1821 else
1822 limit = 1;
1823 vrcb = BGE_MEMWIN_START + BGE_SEND_RING_RCB;
1824 for (i = 0; i < limit; i++) {
1825 RCB_WRITE_4(sc, vrcb, bge_maxlen_flags,
1826 BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED));
1827 RCB_WRITE_4(sc, vrcb, bge_nicaddr, 0);
1828 vrcb += sizeof(struct bge_rcb);
1829 }
1830
1831 /* Configure send ring RCB 0 (we use only the first ring) */
1832 vrcb = BGE_MEMWIN_START + BGE_SEND_RING_RCB;
1833 BGE_HOSTADDR(taddr, sc->bge_ldata.bge_tx_ring_paddr);
1834 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, taddr.bge_addr_hi);
1835 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, taddr.bge_addr_lo);
1836 if (sc->bge_asicrev == BGE_ASICREV_BCM5717 ||
1837 sc->bge_asicrev == BGE_ASICREV_BCM5719)
1838 RCB_WRITE_4(sc, vrcb, bge_nicaddr, BGE_SEND_RING_5717);
1839 else
1840 RCB_WRITE_4(sc, vrcb, bge_nicaddr,
1841 BGE_NIC_TXRING_ADDR(0, BGE_TX_RING_CNT));
1842 RCB_WRITE_4(sc, vrcb, bge_maxlen_flags,
1843 BGE_RCB_MAXLEN_FLAGS(BGE_TX_RING_CNT, 0));
1844
1845 /*
1846 * Disable all receive return rings by setting the
1847 * 'ring diabled' bit in the flags field of all the receive
1848 * return ring control blocks, located in NIC memory.
1849 */
1850 if (sc->bge_asicrev == BGE_ASICREV_BCM5717 ||
1851 sc->bge_asicrev == BGE_ASICREV_BCM5719) {
1852 /* Should be 17, use 16 until we get an SRAM map. */
1853 limit = 16;
1854 } else if (!BGE_IS_5705_PLUS(sc))
1855 limit = BGE_RX_RINGS_MAX;
1856 else if (sc->bge_asicrev == BGE_ASICREV_BCM5755 ||
1857 sc->bge_asicrev == BGE_ASICREV_BCM57765)
1858 limit = 4;
1859 else
1860 limit = 1;
1861 /* Disable all receive return rings. */
1862 vrcb = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB;
1863 for (i = 0; i < limit; i++) {
1864 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, 0);
1865 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, 0);
1866 RCB_WRITE_4(sc, vrcb, bge_maxlen_flags,
1867 BGE_RCB_FLAG_RING_DISABLED);
1868 RCB_WRITE_4(sc, vrcb, bge_nicaddr, 0);
1869 bge_writembx(sc, BGE_MBX_RX_CONS0_LO +
1870 (i * (sizeof(uint64_t))), 0);
1871 vrcb += sizeof(struct bge_rcb);
1872 }
1873
1874 /*
1875 * Set up receive return ring 0. Note that the NIC address
1876 * for RX return rings is 0x0. The return rings live entirely
1877 * within the host, so the nicaddr field in the RCB isn't used.
1878 */
1879 vrcb = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB;
1880 BGE_HOSTADDR(taddr, sc->bge_ldata.bge_rx_return_ring_paddr);
1881 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, taddr.bge_addr_hi);
1882 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, taddr.bge_addr_lo);
1883 RCB_WRITE_4(sc, vrcb, bge_nicaddr, 0);
1884 RCB_WRITE_4(sc, vrcb, bge_maxlen_flags,
1885 BGE_RCB_MAXLEN_FLAGS(sc->bge_return_ring_cnt, 0));
1886
1887 /* Set random backoff seed for TX */
1888 CSR_WRITE_4(sc, BGE_TX_RANDOM_BACKOFF,
1889 IF_LLADDR(sc->bge_ifp)[0] + IF_LLADDR(sc->bge_ifp)[1] +
1890 IF_LLADDR(sc->bge_ifp)[2] + IF_LLADDR(sc->bge_ifp)[3] +
1891 IF_LLADDR(sc->bge_ifp)[4] + IF_LLADDR(sc->bge_ifp)[5] +
1892 BGE_TX_BACKOFF_SEED_MASK);
1893
1894 /* Set inter-packet gap */
1895 CSR_WRITE_4(sc, BGE_TX_LENGTHS, 0x2620);
1896
1897 /*
1898 * Specify which ring to use for packets that don't match
1899 * any RX rules.
1900 */
1901 CSR_WRITE_4(sc, BGE_RX_RULES_CFG, 0x08);
1902
1903 /*
1904 * Configure number of RX lists. One interrupt distribution
1905 * list, sixteen active lists, one bad frames class.
1906 */
1907 CSR_WRITE_4(sc, BGE_RXLP_CFG, 0x181);
1908
1909 /* Inialize RX list placement stats mask. */
1910 CSR_WRITE_4(sc, BGE_RXLP_STATS_ENABLE_MASK, 0x007FFFFF);
1911 CSR_WRITE_4(sc, BGE_RXLP_STATS_CTL, 0x1);
1912
1913 /* Disable host coalescing until we get it set up */
1914 CSR_WRITE_4(sc, BGE_HCC_MODE, 0x00000000);
1915
1916 /* Poll to make sure it's shut down. */
1917 for (i = 0; i < BGE_TIMEOUT; i++) {
1918 DELAY(10);
1919 if (!(CSR_READ_4(sc, BGE_HCC_MODE) & BGE_HCCMODE_ENABLE))
1920 break;
1921 }
1922
1923 if (i == BGE_TIMEOUT) {
1924 device_printf(sc->bge_dev,
1925 "host coalescing engine failed to idle\n");
1926 return (ENXIO);
1927 }
1928
1929 /* Set up host coalescing defaults */
1930 CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS, sc->bge_rx_coal_ticks);
1931 CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS, sc->bge_tx_coal_ticks);
1932 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS, sc->bge_rx_max_coal_bds);
1933 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS, sc->bge_tx_max_coal_bds);
1934 if (!(BGE_IS_5705_PLUS(sc))) {
1935 CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS_INT, 0);
1936 CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS_INT, 0);
1937 }
1938 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS_INT, 1);
1939 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS_INT, 1);
1940
1941 /* Set up address of statistics block */
1942 if (!(BGE_IS_5705_PLUS(sc))) {
1943 CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_HI,
1944 BGE_ADDR_HI(sc->bge_ldata.bge_stats_paddr));
1945 CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_LO,
1946 BGE_ADDR_LO(sc->bge_ldata.bge_stats_paddr));
1947 CSR_WRITE_4(sc, BGE_HCC_STATS_BASEADDR, BGE_STATS_BLOCK);
1948 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_BASEADDR, BGE_STATUS_BLOCK);
1949 CSR_WRITE_4(sc, BGE_HCC_STATS_TICKS, sc->bge_stat_ticks);
1950 }
1951
1952 /* Set up address of status block */
1953 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_HI,
1954 BGE_ADDR_HI(sc->bge_ldata.bge_status_block_paddr));
1955 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_LO,
1956 BGE_ADDR_LO(sc->bge_ldata.bge_status_block_paddr));
1957
1958 /* Set up status block size. */
1959 if (sc->bge_asicrev == BGE_ASICREV_BCM5700 &&
1960 sc->bge_chipid != BGE_CHIPID_BCM5700_C0) {
1961 val = BGE_STATBLKSZ_FULL;
1962 bzero(sc->bge_ldata.bge_status_block, BGE_STATUS_BLK_SZ);
1963 } else {
1964 val = BGE_STATBLKSZ_32BYTE;
1965 bzero(sc->bge_ldata.bge_status_block, 32);
1966 }
1967 bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
1968 sc->bge_cdata.bge_status_map,
1969 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1970
1971 /* Turn on host coalescing state machine */
1972 CSR_WRITE_4(sc, BGE_HCC_MODE, val | BGE_HCCMODE_ENABLE);
1973
1974 /* Turn on RX BD completion state machine and enable attentions */
1975 CSR_WRITE_4(sc, BGE_RBDC_MODE,
1976 BGE_RBDCMODE_ENABLE | BGE_RBDCMODE_ATTN);
1977
1978 /* Turn on RX list placement state machine */
1979 CSR_WRITE_4(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE);
1980
1981 /* Turn on RX list selector state machine. */
1982 if (!(BGE_IS_5705_PLUS(sc)))
1983 CSR_WRITE_4(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE);
1984
1985 val = BGE_MACMODE_TXDMA_ENB | BGE_MACMODE_RXDMA_ENB |
1986 BGE_MACMODE_RX_STATS_CLEAR | BGE_MACMODE_TX_STATS_CLEAR |
1987 BGE_MACMODE_RX_STATS_ENB | BGE_MACMODE_TX_STATS_ENB |
1988 BGE_MACMODE_FRMHDR_DMA_ENB;
1989
1990 if (sc->bge_flags & BGE_FLAG_TBI)
1991 val |= BGE_PORTMODE_TBI;
1992 else if (sc->bge_flags & BGE_FLAG_MII_SERDES)
1993 val |= BGE_PORTMODE_GMII;
1994 else
1995 val |= BGE_PORTMODE_MII;
1996
1997 /* Turn on DMA, clear stats */
1998 CSR_WRITE_4(sc, BGE_MAC_MODE, val);
1999
2000 /* Set misc. local control, enable interrupts on attentions */
2001 CSR_WRITE_4(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_ONATTN);
2002
2003#ifdef notdef
2004 /* Assert GPIO pins for PHY reset */
2005 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUT0 |
2006 BGE_MLC_MISCIO_OUT1 | BGE_MLC_MISCIO_OUT2);
2007 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUTEN0 |
2008 BGE_MLC_MISCIO_OUTEN1 | BGE_MLC_MISCIO_OUTEN2);
2009#endif
2010
2011 /* Turn on DMA completion state machine */
2012 if (!(BGE_IS_5705_PLUS(sc)))
2013 CSR_WRITE_4(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE);
2014
2015 val = BGE_WDMAMODE_ENABLE | BGE_WDMAMODE_ALL_ATTNS;
2016
2017 /* Enable host coalescing bug fix. */
2018 if (BGE_IS_5755_PLUS(sc))
2019 val |= BGE_WDMAMODE_STATUS_TAG_FIX;
2020
2021 /* Request larger DMA burst size to get better performance. */
2022 if (sc->bge_asicrev == BGE_ASICREV_BCM5785)
2023 val |= BGE_WDMAMODE_BURST_ALL_DATA;
2024
2025 /* Turn on write DMA state machine */
2026 CSR_WRITE_4(sc, BGE_WDMA_MODE, val);
2027 DELAY(40);
2028
2029 /* Turn on read DMA state machine */
2030 val = BGE_RDMAMODE_ENABLE | BGE_RDMAMODE_ALL_ATTNS;
2031
2032 if (sc->bge_asicrev == BGE_ASICREV_BCM5717)
2033 val |= BGE_RDMAMODE_MULT_DMA_RD_DIS;
2034
2035 if (sc->bge_asicrev == BGE_ASICREV_BCM5784 ||
2036 sc->bge_asicrev == BGE_ASICREV_BCM5785 ||
2037 sc->bge_asicrev == BGE_ASICREV_BCM57780)
2038 val |= BGE_RDMAMODE_BD_SBD_CRPT_ATTN |
2039 BGE_RDMAMODE_MBUF_RBD_CRPT_ATTN |
2040 BGE_RDMAMODE_MBUF_SBD_CRPT_ATTN;
2041 if (sc->bge_flags & BGE_FLAG_PCIE)
2042 val |= BGE_RDMAMODE_FIFO_LONG_BURST;
2043 if (sc->bge_flags & (BGE_FLAG_TSO | BGE_FLAG_TSO3)) {
2044 val |= BGE_RDMAMODE_TSO4_ENABLE;
2045 if (sc->bge_flags & BGE_FLAG_TSO3 ||
2046 sc->bge_asicrev == BGE_ASICREV_BCM5785 ||
2047 sc->bge_asicrev == BGE_ASICREV_BCM57780)
2048 val |= BGE_RDMAMODE_TSO6_ENABLE;
2049 }
2050 if (sc->bge_asicrev == BGE_ASICREV_BCM5761 ||
2051 sc->bge_asicrev == BGE_ASICREV_BCM5784 ||
2052 sc->bge_asicrev == BGE_ASICREV_BCM5785 ||
2053 sc->bge_asicrev == BGE_ASICREV_BCM57780 ||
2054 BGE_IS_5717_PLUS(sc)) {
2055 dmactl = CSR_READ_4(sc, BGE_RDMA_RSRVCTRL);
2056 /*
2057 * Adjust tx margin to prevent TX data corruption and
2058 * fix internal FIFO overflow.
2059 */
2060 if (sc->bge_asicrev == BGE_ASICREV_BCM5719) {
2061 dmactl &= ~(BGE_RDMA_RSRVCTRL_FIFO_LWM_MASK |
2062 BGE_RDMA_RSRVCTRL_FIFO_HWM_MASK |
2063 BGE_RDMA_RSRVCTRL_TXMRGN_MASK);
2064 dmactl |= BGE_RDMA_RSRVCTRL_FIFO_LWM_1_5K |
2065 BGE_RDMA_RSRVCTRL_FIFO_HWM_1_5K |
2066 BGE_RDMA_RSRVCTRL_TXMRGN_320B;
2067 }
2068 /*
2069 * Enable fix for read DMA FIFO overruns.
2070 * The fix is to limit the number of RX BDs
2071 * the hardware would fetch at a fime.
2072 */
2073 CSR_WRITE_4(sc, BGE_RDMA_RSRVCTRL, dmactl |
2074 BGE_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
2075 }
2076
2077 if (sc->bge_asicrev == BGE_ASICREV_BCM5719) {
2078 CSR_WRITE_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL,
2079 CSR_READ_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL) |
2080 BGE_RDMA_LSO_CRPTEN_CTRL_BLEN_BD_4K |
2081 BGE_RDMA_LSO_CRPTEN_CTRL_BLEN_LSO_4K);
2082 }
2083
2084 CSR_WRITE_4(sc, BGE_RDMA_MODE, val);
2085 DELAY(40);
2086
2087 /* Turn on RX data completion state machine */
2088 CSR_WRITE_4(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE);
2089
2090 /* Turn on RX BD initiator state machine */
2091 CSR_WRITE_4(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE);
2092
2093 /* Turn on RX data and RX BD initiator state machine */
2094 CSR_WRITE_4(sc, BGE_RDBDI_MODE, BGE_RDBDIMODE_ENABLE);
2095
2096 /* Turn on Mbuf cluster free state machine */
2097 if (!(BGE_IS_5705_PLUS(sc)))
2098 CSR_WRITE_4(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE);
2099
2100 /* Turn on send BD completion state machine */
2101 CSR_WRITE_4(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE);
2102
2103 /* Turn on send data completion state machine */
2104 val = BGE_SDCMODE_ENABLE;
2105 if (sc->bge_asicrev == BGE_ASICREV_BCM5761)
2106 val |= BGE_SDCMODE_CDELAY;
2107 CSR_WRITE_4(sc, BGE_SDC_MODE, val);
2108
2109 /* Turn on send data initiator state machine */
2110 if (sc->bge_flags & (BGE_FLAG_TSO | BGE_FLAG_TSO3))
2111 CSR_WRITE_4(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE |
2112 BGE_SDIMODE_HW_LSO_PRE_DMA);
2113 else
2114 CSR_WRITE_4(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE);
2115
2116 /* Turn on send BD initiator state machine */
2117 CSR_WRITE_4(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE);
2118
2119 /* Turn on send BD selector state machine */
2120 CSR_WRITE_4(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE);
2121
2122 CSR_WRITE_4(sc, BGE_SDI_STATS_ENABLE_MASK, 0x007FFFFF);
2123 CSR_WRITE_4(sc, BGE_SDI_STATS_CTL,
2124 BGE_SDISTATSCTL_ENABLE | BGE_SDISTATSCTL_FASTER);
2125
2126 /* ack/clear link change events */
2127 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED |
2128 BGE_MACSTAT_CFG_CHANGED | BGE_MACSTAT_MI_COMPLETE |
2129 BGE_MACSTAT_LINK_CHANGED);
2130 CSR_WRITE_4(sc, BGE_MI_STS, 0);
2131
2132 /*
2133 * Enable attention when the link has changed state for
2134 * devices that use auto polling.
2135 */
2136 if (sc->bge_flags & BGE_FLAG_TBI) {
2137 CSR_WRITE_4(sc, BGE_MI_STS, BGE_MISTS_LINK);
2138 } else {
2139 if (sc->bge_mi_mode & BGE_MIMODE_AUTOPOLL) {
2140 CSR_WRITE_4(sc, BGE_MI_MODE, sc->bge_mi_mode);
2141 DELAY(80);
2142 }
2143 if (sc->bge_asicrev == BGE_ASICREV_BCM5700 &&
2144 sc->bge_chipid != BGE_CHIPID_BCM5700_B2)
2145 CSR_WRITE_4(sc, BGE_MAC_EVT_ENB,
2146 BGE_EVTENB_MI_INTERRUPT);
2147 }
2148
2149 /*
2150 * Clear any pending link state attention.
2151 * Otherwise some link state change events may be lost until attention
2152 * is cleared by bge_intr() -> bge_link_upd() sequence.
2153 * It's not necessary on newer BCM chips - perhaps enabling link
2154 * state change attentions implies clearing pending attention.
2155 */
2156 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED |
2157 BGE_MACSTAT_CFG_CHANGED | BGE_MACSTAT_MI_COMPLETE |
2158 BGE_MACSTAT_LINK_CHANGED);
2159
2160 /* Enable link state change attentions. */
2161 BGE_SETBIT(sc, BGE_MAC_EVT_ENB, BGE_EVTENB_LINK_CHANGED);
2162
2163 return (0);
2164}
2165
2166const struct bge_revision *
2167bge_lookup_rev(uint32_t chipid)
2168{
2169 const struct bge_revision *br;
2170
2171 for (br = bge_revisions; br->br_name != NULL; br++) {
2172 if (br->br_chipid == chipid)
2173 return (br);
2174 }
2175
2176 for (br = bge_majorrevs; br->br_name != NULL; br++) {
2177 if (br->br_chipid == BGE_ASICREV(chipid))
2178 return (br);
2179 }
2180
2181 return (NULL);
2182}
2183
2184const struct bge_vendor *
2185bge_lookup_vendor(uint16_t vid)
2186{
2187 const struct bge_vendor *v;
2188
2189 for (v = bge_vendors; v->v_name != NULL; v++)
2190 if (v->v_id == vid)
2191 return (v);
2192
2193 panic("%s: unknown vendor %d", __func__, vid);
2194 return (NULL);
2195}
2196
2197/*
2198 * Probe for a Broadcom chip. Check the PCI vendor and device IDs
2199 * against our list and return its name if we find a match.
2200 *
2201 * Note that since the Broadcom controller contains VPD support, we
2202 * try to get the device name string from the controller itself instead
2203 * of the compiled-in string. It guarantees we'll always announce the
2204 * right product name. We fall back to the compiled-in string when
2205 * VPD is unavailable or corrupt.
2206 */
2207static int
2208bge_probe(device_t dev)
2209{
2210 char buf[96];
2211 char model[64];
2212 const struct bge_revision *br;
2213 const char *pname;
2214 struct bge_softc *sc = device_get_softc(dev);
2215 const struct bge_type *t = bge_devs;
2216 const struct bge_vendor *v;
2217 uint32_t id;
2218 uint16_t did, vid;
2219
2220 sc->bge_dev = dev;
2221 vid = pci_get_vendor(dev);
2222 did = pci_get_device(dev);
2223 while(t->bge_vid != 0) {
2224 if ((vid == t->bge_vid) && (did == t->bge_did)) {
2225 id = pci_read_config(dev, BGE_PCI_MISC_CTL, 4) >>
2226 BGE_PCIMISCCTL_ASICREV_SHIFT;
2227 if (BGE_ASICREV(id) == BGE_ASICREV_USE_PRODID_REG) {
2228 /*
2229 * Find the ASCI revision. Different chips
2230 * use different registers.
2231 */
2232 switch (pci_get_device(dev)) {
2233 case BCOM_DEVICEID_BCM5717:
2234 case BCOM_DEVICEID_BCM5718:
2235 case BCOM_DEVICEID_BCM5719:
2236 id = pci_read_config(dev,
2237 BGE_PCI_GEN2_PRODID_ASICREV, 4);
2238 break;
2239 case BCOM_DEVICEID_BCM57761:
2240 case BCOM_DEVICEID_BCM57765:
2241 case BCOM_DEVICEID_BCM57781:
2242 case BCOM_DEVICEID_BCM57785:
2243 case BCOM_DEVICEID_BCM57791:
2244 case BCOM_DEVICEID_BCM57795:
2245 id = pci_read_config(dev,
2246 BGE_PCI_GEN15_PRODID_ASICREV, 4);
2247 break;
2248 default:
2249 id = pci_read_config(dev,
2250 BGE_PCI_PRODID_ASICREV, 4);
2251 }
2252 }
2253 br = bge_lookup_rev(id);
2254 v = bge_lookup_vendor(vid);
2255 if (bge_has_eaddr(sc) &&
2256 pci_get_vpd_ident(dev, &pname) == 0)
2257 snprintf(model, 64, "%s", pname);
2258 else
2259 snprintf(model, 64, "%s %s", v->v_name,
2260 br != NULL ? br->br_name :
2261 "NetXtreme Ethernet Controller");
2262 snprintf(buf, 96, "%s, %sASIC rev. %#08x", model,
2263 br != NULL ? "" : "unknown ", id);
2264 device_set_desc_copy(dev, buf);
2265 return (0);
2266 }
2267 t++;
2268 }
2269
2270 return (ENXIO);
2271}
2272
2273static void
2274bge_dma_free(struct bge_softc *sc)
2275{
2276 int i;
2277
2278 /* Destroy DMA maps for RX buffers. */
2279 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
2280 if (sc->bge_cdata.bge_rx_std_dmamap[i])
2281 bus_dmamap_destroy(sc->bge_cdata.bge_rx_mtag,
2282 sc->bge_cdata.bge_rx_std_dmamap[i]);
2283 }
2284 if (sc->bge_cdata.bge_rx_std_sparemap)
2285 bus_dmamap_destroy(sc->bge_cdata.bge_rx_mtag,
2286 sc->bge_cdata.bge_rx_std_sparemap);
2287
2288 /* Destroy DMA maps for jumbo RX buffers. */
2289 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
2290 if (sc->bge_cdata.bge_rx_jumbo_dmamap[i])
2291 bus_dmamap_destroy(sc->bge_cdata.bge_mtag_jumbo,
2292 sc->bge_cdata.bge_rx_jumbo_dmamap[i]);
2293 }
2294 if (sc->bge_cdata.bge_rx_jumbo_sparemap)
2295 bus_dmamap_destroy(sc->bge_cdata.bge_mtag_jumbo,
2296 sc->bge_cdata.bge_rx_jumbo_sparemap);
2297
2298 /* Destroy DMA maps for TX buffers. */
2299 for (i = 0; i < BGE_TX_RING_CNT; i++) {
2300 if (sc->bge_cdata.bge_tx_dmamap[i])
2301 bus_dmamap_destroy(sc->bge_cdata.bge_tx_mtag,
2302 sc->bge_cdata.bge_tx_dmamap[i]);
2303 }
2304
2305 if (sc->bge_cdata.bge_rx_mtag)
2306 bus_dma_tag_destroy(sc->bge_cdata.bge_rx_mtag);
2307 if (sc->bge_cdata.bge_tx_mtag)
2308 bus_dma_tag_destroy(sc->bge_cdata.bge_tx_mtag);
2309
2310
2311 /* Destroy standard RX ring. */
2312 if (sc->bge_cdata.bge_rx_std_ring_map)
2313 bus_dmamap_unload(sc->bge_cdata.bge_rx_std_ring_tag,
2314 sc->bge_cdata.bge_rx_std_ring_map);
2315 if (sc->bge_cdata.bge_rx_std_ring_map && sc->bge_ldata.bge_rx_std_ring)
2316 bus_dmamem_free(sc->bge_cdata.bge_rx_std_ring_tag,
2317 sc->bge_ldata.bge_rx_std_ring,
2318 sc->bge_cdata.bge_rx_std_ring_map);
2319
2320 if (sc->bge_cdata.bge_rx_std_ring_tag)
2321 bus_dma_tag_destroy(sc->bge_cdata.bge_rx_std_ring_tag);
2322
2323 /* Destroy jumbo RX ring. */
2324 if (sc->bge_cdata.bge_rx_jumbo_ring_map)
2325 bus_dmamap_unload(sc->bge_cdata.bge_rx_jumbo_ring_tag,
2326 sc->bge_cdata.bge_rx_jumbo_ring_map);
2327
2328 if (sc->bge_cdata.bge_rx_jumbo_ring_map &&
2329 sc->bge_ldata.bge_rx_jumbo_ring)
2330 bus_dmamem_free(sc->bge_cdata.bge_rx_jumbo_ring_tag,
2331 sc->bge_ldata.bge_rx_jumbo_ring,
2332 sc->bge_cdata.bge_rx_jumbo_ring_map);
2333
2334 if (sc->bge_cdata.bge_rx_jumbo_ring_tag)
2335 bus_dma_tag_destroy(sc->bge_cdata.bge_rx_jumbo_ring_tag);
2336
2337 /* Destroy RX return ring. */
2338 if (sc->bge_cdata.bge_rx_return_ring_map)
2339 bus_dmamap_unload(sc->bge_cdata.bge_rx_return_ring_tag,
2340 sc->bge_cdata.bge_rx_return_ring_map);
2341
2342 if (sc->bge_cdata.bge_rx_return_ring_map &&
2343 sc->bge_ldata.bge_rx_return_ring)
2344 bus_dmamem_free(sc->bge_cdata.bge_rx_return_ring_tag,
2345 sc->bge_ldata.bge_rx_return_ring,
2346 sc->bge_cdata.bge_rx_return_ring_map);
2347
2348 if (sc->bge_cdata.bge_rx_return_ring_tag)
2349 bus_dma_tag_destroy(sc->bge_cdata.bge_rx_return_ring_tag);
2350
2351 /* Destroy TX ring. */
2352 if (sc->bge_cdata.bge_tx_ring_map)
2353 bus_dmamap_unload(sc->bge_cdata.bge_tx_ring_tag,
2354 sc->bge_cdata.bge_tx_ring_map);
2355
2356 if (sc->bge_cdata.bge_tx_ring_map && sc->bge_ldata.bge_tx_ring)
2357 bus_dmamem_free(sc->bge_cdata.bge_tx_ring_tag,
2358 sc->bge_ldata.bge_tx_ring,
2359 sc->bge_cdata.bge_tx_ring_map);
2360
2361 if (sc->bge_cdata.bge_tx_ring_tag)
2362 bus_dma_tag_destroy(sc->bge_cdata.bge_tx_ring_tag);
2363
2364 /* Destroy status block. */
2365 if (sc->bge_cdata.bge_status_map)
2366 bus_dmamap_unload(sc->bge_cdata.bge_status_tag,
2367 sc->bge_cdata.bge_status_map);
2368
2369 if (sc->bge_cdata.bge_status_map && sc->bge_ldata.bge_status_block)
2370 bus_dmamem_free(sc->bge_cdata.bge_status_tag,
2371 sc->bge_ldata.bge_status_block,
2372 sc->bge_cdata.bge_status_map);
2373
2374 if (sc->bge_cdata.bge_status_tag)
2375 bus_dma_tag_destroy(sc->bge_cdata.bge_status_tag);
2376
2377 /* Destroy statistics block. */
2378 if (sc->bge_cdata.bge_stats_map)
2379 bus_dmamap_unload(sc->bge_cdata.bge_stats_tag,
2380 sc->bge_cdata.bge_stats_map);
2381
2382 if (sc->bge_cdata.bge_stats_map && sc->bge_ldata.bge_stats)
2383 bus_dmamem_free(sc->bge_cdata.bge_stats_tag,
2384 sc->bge_ldata.bge_stats,
2385 sc->bge_cdata.bge_stats_map);
2386
2387 if (sc->bge_cdata.bge_stats_tag)
2388 bus_dma_tag_destroy(sc->bge_cdata.bge_stats_tag);
2389
2390 if (sc->bge_cdata.bge_buffer_tag)
2391 bus_dma_tag_destroy(sc->bge_cdata.bge_buffer_tag);
2392
2393 /* Destroy the parent tag. */
2394 if (sc->bge_cdata.bge_parent_tag)
2395 bus_dma_tag_destroy(sc->bge_cdata.bge_parent_tag);
2396}
2397
2398static int
2399bge_dma_ring_alloc(struct bge_softc *sc, bus_size_t alignment,
2400 bus_size_t maxsize, bus_dma_tag_t *tag, uint8_t **ring, bus_dmamap_t *map,
2401 bus_addr_t *paddr, const char *msg)
2402{
2403 struct bge_dmamap_arg ctx;
2404 bus_addr_t lowaddr;
2405 bus_size_t ring_end;
2406 int error;
2407
2408 lowaddr = BUS_SPACE_MAXADDR;
2409again:
2410 error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag,
2411 alignment, 0, lowaddr, BUS_SPACE_MAXADDR, NULL,
2412 NULL, maxsize, 1, maxsize, 0, NULL, NULL, tag);
2413 if (error != 0) {
2414 device_printf(sc->bge_dev,
2415 "could not create %s dma tag\n", msg);
2416 return (ENOMEM);
2417 }
2418 /* Allocate DMA'able memory for ring. */
2419 error = bus_dmamem_alloc(*tag, (void **)ring,
2420 BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT, map);
2421 if (error != 0) {
2422 device_printf(sc->bge_dev,
2423 "could not allocate DMA'able memory for %s\n", msg);
2424 return (ENOMEM);
2425 }
2426 /* Load the address of the ring. */
2427 ctx.bge_busaddr = 0;
2428 error = bus_dmamap_load(*tag, *map, *ring, maxsize, bge_dma_map_addr,
2429 &ctx, BUS_DMA_NOWAIT);
2430 if (error != 0) {
2431 device_printf(sc->bge_dev,
2432 "could not load DMA'able memory for %s\n", msg);
2433 return (ENOMEM);
2434 }
2435 *paddr = ctx.bge_busaddr;
2436 ring_end = *paddr + maxsize;
2437 if ((sc->bge_flags & BGE_FLAG_4G_BNDRY_BUG) != 0 &&
2438 BGE_ADDR_HI(*paddr) != BGE_ADDR_HI(ring_end)) {
2439 /*
2440 * 4GB boundary crossed. Limit maximum allowable DMA
2441 * address space to 32bit and try again.
2442 */
2443 bus_dmamap_unload(*tag, *map);
2444 bus_dmamem_free(*tag, *ring, *map);
2445 bus_dma_tag_destroy(*tag);
2446 if (bootverbose)
2447 device_printf(sc->bge_dev, "4GB boundary crossed, "
2448 "limit DMA address space to 32bit for %s\n", msg);
2449 *ring = NULL;
2450 *tag = NULL;
2451 *map = NULL;
2452 lowaddr = BUS_SPACE_MAXADDR_32BIT;
2453 goto again;
2454 }
2455 return (0);
2456}
2457
2458static int
2459bge_dma_alloc(struct bge_softc *sc)
2460{
2461 bus_addr_t lowaddr;
2462 bus_size_t boundary, sbsz, rxmaxsegsz, txsegsz, txmaxsegsz;
2463 int i, error;
2464
2465 lowaddr = BUS_SPACE_MAXADDR;
2466 if ((sc->bge_flags & BGE_FLAG_40BIT_BUG) != 0)
2467 lowaddr = BGE_DMA_MAXADDR;
2468 /*
2469 * Allocate the parent bus DMA tag appropriate for PCI.
2470 */
2471 error = bus_dma_tag_create(bus_get_dma_tag(sc->bge_dev),
2472 1, 0, lowaddr, BUS_SPACE_MAXADDR, NULL,
2473 NULL, BUS_SPACE_MAXSIZE_32BIT, 0, BUS_SPACE_MAXSIZE_32BIT,
2474 0, NULL, NULL, &sc->bge_cdata.bge_parent_tag);
2475 if (error != 0) {
2476 device_printf(sc->bge_dev,
2477 "could not allocate parent dma tag\n");
2478 return (ENOMEM);
2479 }
2480
2481 /* Create tag for standard RX ring. */
2482 error = bge_dma_ring_alloc(sc, PAGE_SIZE, BGE_STD_RX_RING_SZ,
2483 &sc->bge_cdata.bge_rx_std_ring_tag,
2484 (uint8_t **)&sc->bge_ldata.bge_rx_std_ring,
2485 &sc->bge_cdata.bge_rx_std_ring_map,
2486 &sc->bge_ldata.bge_rx_std_ring_paddr, "RX ring");
2487 if (error)
2488 return (error);
2489
2490 /* Create tag for RX return ring. */
2491 error = bge_dma_ring_alloc(sc, PAGE_SIZE, BGE_RX_RTN_RING_SZ(sc),
2492 &sc->bge_cdata.bge_rx_return_ring_tag,
2493 (uint8_t **)&sc->bge_ldata.bge_rx_return_ring,
2494 &sc->bge_cdata.bge_rx_return_ring_map,
2495 &sc->bge_ldata.bge_rx_return_ring_paddr, "RX return ring");
2496 if (error)
2497 return (error);
2498
2499 /* Create tag for TX ring. */
2500 error = bge_dma_ring_alloc(sc, PAGE_SIZE, BGE_TX_RING_SZ,
2501 &sc->bge_cdata.bge_tx_ring_tag,
2502 (uint8_t **)&sc->bge_ldata.bge_tx_ring,
2503 &sc->bge_cdata.bge_tx_ring_map,
2504 &sc->bge_ldata.bge_tx_ring_paddr, "TX ring");
2505 if (error)
2506 return (error);
2507
2508 /*
2509 * Create tag for status block.
2510 * Because we only use single Tx/Rx/Rx return ring, use
2511 * minimum status block size except BCM5700 AX/BX which
2512 * seems to want to see full status block size regardless
2513 * of configured number of ring.
2514 */
2515 if (sc->bge_asicrev == BGE_ASICREV_BCM5700 &&
2516 sc->bge_chipid != BGE_CHIPID_BCM5700_C0)
2517 sbsz = BGE_STATUS_BLK_SZ;
2518 else
2519 sbsz = 32;
2520 error = bge_dma_ring_alloc(sc, PAGE_SIZE, sbsz,
2521 &sc->bge_cdata.bge_status_tag,
2522 (uint8_t **)&sc->bge_ldata.bge_status_block,
2523 &sc->bge_cdata.bge_status_map,
2524 &sc->bge_ldata.bge_status_block_paddr, "status block");
2525 if (error)
2526 return (error);
2527
2528 /* Create tag for statistics block. */
2529 error = bge_dma_ring_alloc(sc, PAGE_SIZE, BGE_STATS_SZ,
2530 &sc->bge_cdata.bge_stats_tag,
2531 (uint8_t **)&sc->bge_ldata.bge_stats,
2532 &sc->bge_cdata.bge_stats_map,
2533 &sc->bge_ldata.bge_stats_paddr, "statistics block");
2534 if (error)
2535 return (error);
2536
2537 /* Create tag for jumbo RX ring. */
2538 if (BGE_IS_JUMBO_CAPABLE(sc)) {
2539 error = bge_dma_ring_alloc(sc, PAGE_SIZE, BGE_JUMBO_RX_RING_SZ,
2540 &sc->bge_cdata.bge_rx_jumbo_ring_tag,
2541 (uint8_t **)&sc->bge_ldata.bge_rx_jumbo_ring,
2542 &sc->bge_cdata.bge_rx_jumbo_ring_map,
2543 &sc->bge_ldata.bge_rx_jumbo_ring_paddr, "jumbo RX ring");
2544 if (error)
2545 return (error);
2546 }
2547
2548 /* Create parent tag for buffers. */
2549 boundary = 0;
2550 if ((sc->bge_flags & BGE_FLAG_4G_BNDRY_BUG) != 0) {
2551 boundary = BGE_DMA_BNDRY;
2552 /*
2553 * XXX
2554 * watchdog timeout issue was observed on BCM5704 which
2555 * lives behind PCI-X bridge(e.g AMD 8131 PCI-X bridge).
2556 * Limiting DMA address space to 32bits seems to address
2557 * it.
2558 */
2559 if (sc->bge_flags & BGE_FLAG_PCIX)
2560 lowaddr = BUS_SPACE_MAXADDR_32BIT;
2561 }
2562 error = bus_dma_tag_create(bus_get_dma_tag(sc->bge_dev),
2563 1, boundary, lowaddr, BUS_SPACE_MAXADDR, NULL,
2564 NULL, BUS_SPACE_MAXSIZE_32BIT, 0, BUS_SPACE_MAXSIZE_32BIT,
2565 0, NULL, NULL, &sc->bge_cdata.bge_buffer_tag);
2566 if (error != 0) {
2567 device_printf(sc->bge_dev,
2568 "could not allocate buffer dma tag\n");
2569 return (ENOMEM);
2570 }
2571 /* Create tag for Tx mbufs. */
2572 if (sc->bge_flags & (BGE_FLAG_TSO | BGE_FLAG_TSO3)) {
2573 txsegsz = BGE_TSOSEG_SZ;
2574 txmaxsegsz = 65535 + sizeof(struct ether_vlan_header);
2575 } else {
2576 txsegsz = MCLBYTES;
2577 txmaxsegsz = MCLBYTES * BGE_NSEG_NEW;
2578 }
2579 error = bus_dma_tag_create(sc->bge_cdata.bge_buffer_tag, 1,
2580 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
2581 txmaxsegsz, BGE_NSEG_NEW, txsegsz, 0, NULL, NULL,
2582 &sc->bge_cdata.bge_tx_mtag);
2583
2584 if (error) {
2585 device_printf(sc->bge_dev, "could not allocate TX dma tag\n");
2586 return (ENOMEM);
2587 }
2588
2589 /* Create tag for Rx mbufs. */
2590 if (sc->bge_flags & BGE_FLAG_JUMBO_STD)
2591 rxmaxsegsz = MJUM9BYTES;
2592 else
2593 rxmaxsegsz = MCLBYTES;
2594 error = bus_dma_tag_create(sc->bge_cdata.bge_buffer_tag, 1, 0,
2595 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, rxmaxsegsz, 1,
2596 rxmaxsegsz, 0, NULL, NULL, &sc->bge_cdata.bge_rx_mtag);
2597
2598 if (error) {
2599 device_printf(sc->bge_dev, "could not allocate RX dma tag\n");
2600 return (ENOMEM);
2601 }
2602
2603 /* Create DMA maps for RX buffers. */
2604 error = bus_dmamap_create(sc->bge_cdata.bge_rx_mtag, 0,
2605 &sc->bge_cdata.bge_rx_std_sparemap);
2606 if (error) {
2607 device_printf(sc->bge_dev,
2608 "can't create spare DMA map for RX\n");
2609 return (ENOMEM);
2610 }
2611 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
2612 error = bus_dmamap_create(sc->bge_cdata.bge_rx_mtag, 0,
2613 &sc->bge_cdata.bge_rx_std_dmamap[i]);
2614 if (error) {
2615 device_printf(sc->bge_dev,
2616 "can't create DMA map for RX\n");
2617 return (ENOMEM);
2618 }
2619 }
2620
2621 /* Create DMA maps for TX buffers. */
2622 for (i = 0; i < BGE_TX_RING_CNT; i++) {
2623 error = bus_dmamap_create(sc->bge_cdata.bge_tx_mtag, 0,
2624 &sc->bge_cdata.bge_tx_dmamap[i]);
2625 if (error) {
2626 device_printf(sc->bge_dev,
2627 "can't create DMA map for TX\n");
2628 return (ENOMEM);
2629 }
2630 }
2631
2632 /* Create tags for jumbo RX buffers. */
2633 if (BGE_IS_JUMBO_CAPABLE(sc)) {
2634 error = bus_dma_tag_create(sc->bge_cdata.bge_buffer_tag,
2635 1, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
2636 NULL, MJUM9BYTES, BGE_NSEG_JUMBO, PAGE_SIZE,
2637 0, NULL, NULL, &sc->bge_cdata.bge_mtag_jumbo);
2638 if (error) {
2639 device_printf(sc->bge_dev,
2640 "could not allocate jumbo dma tag\n");
2641 return (ENOMEM);
2642 }
2643 /* Create DMA maps for jumbo RX buffers. */
2644 error = bus_dmamap_create(sc->bge_cdata.bge_mtag_jumbo,
2645 0, &sc->bge_cdata.bge_rx_jumbo_sparemap);
2646 if (error) {
2647 device_printf(sc->bge_dev,
2648 "can't create spare DMA map for jumbo RX\n");
2649 return (ENOMEM);
2650 }
2651 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
2652 error = bus_dmamap_create(sc->bge_cdata.bge_mtag_jumbo,
2653 0, &sc->bge_cdata.bge_rx_jumbo_dmamap[i]);
2654 if (error) {
2655 device_printf(sc->bge_dev,
2656 "can't create DMA map for jumbo RX\n");
2657 return (ENOMEM);
2658 }
2659 }
2660 }
2661
2662 return (0);
2663}
2664
2665/*
2666 * Return true if this device has more than one port.
2667 */
2668static int
2669bge_has_multiple_ports(struct bge_softc *sc)
2670{
2671 device_t dev = sc->bge_dev;
2672 u_int b, d, f, fscan, s;
2673
2674 d = pci_get_domain(dev);
2675 b = pci_get_bus(dev);
2676 s = pci_get_slot(dev);
2677 f = pci_get_function(dev);
2678 for (fscan = 0; fscan <= PCI_FUNCMAX; fscan++)
2679 if (fscan != f && pci_find_dbsf(d, b, s, fscan) != NULL)
2680 return (1);
2681 return (0);
2682}
2683
2684/*
2685 * Return true if MSI can be used with this device.
2686 */
2687static int
2688bge_can_use_msi(struct bge_softc *sc)
2689{
2690 int can_use_msi = 0;
2691
2692 /* Disable MSI for polling(4). */
2693#ifdef DEVICE_POLLING
2694 return (0);
2695#endif
2696 switch (sc->bge_asicrev) {
2697 case BGE_ASICREV_BCM5714_A0:
2698 case BGE_ASICREV_BCM5714:
2699 /*
2700 * Apparently, MSI doesn't work when these chips are
2701 * configured in single-port mode.
2702 */
2703 if (bge_has_multiple_ports(sc))
2704 can_use_msi = 1;
2705 break;
2706 case BGE_ASICREV_BCM5750:
2707 if (sc->bge_chiprev != BGE_CHIPREV_5750_AX &&
2708 sc->bge_chiprev != BGE_CHIPREV_5750_BX)
2709 can_use_msi = 1;
2710 break;
2711 default:
2712 if (BGE_IS_575X_PLUS(sc))
2713 can_use_msi = 1;
2714 }
2715 return (can_use_msi);
2716}
2717
2718static int
2719bge_attach(device_t dev)
2720{
2721 struct ifnet *ifp;
2722 struct bge_softc *sc;
2723 uint32_t hwcfg = 0, misccfg;
2724 u_char eaddr[ETHER_ADDR_LEN];
2725 int capmask, error, f, msicount, phy_addr, reg, rid, trys;
2726
2727 sc = device_get_softc(dev);
2728 sc->bge_dev = dev;
2729
2730 TASK_INIT(&sc->bge_intr_task, 0, bge_intr_task, sc);
2731
2732 /*
2733 * Map control/status registers.
2734 */
2735 pci_enable_busmaster(dev);
2736
2737 rid = PCIR_BAR(0);
2738 sc->bge_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
2739 RF_ACTIVE);
2740
2741 if (sc->bge_res == NULL) {
2742 device_printf (sc->bge_dev, "couldn't map memory\n");
2743 error = ENXIO;
2744 goto fail;
2745 }
2746
2747 /* Save various chip information. */
2748 sc->bge_chipid =
2749 pci_read_config(dev, BGE_PCI_MISC_CTL, 4) >>
2750 BGE_PCIMISCCTL_ASICREV_SHIFT;
2751 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_USE_PRODID_REG) {
2752 /*
2753 * Find the ASCI revision. Different chips use different
2754 * registers.
2755 */
2756 switch (pci_get_device(dev)) {
2757 case BCOM_DEVICEID_BCM5717:
2758 case BCOM_DEVICEID_BCM5718:
2759 case BCOM_DEVICEID_BCM5719:
2760 sc->bge_chipid = pci_read_config(dev,
2761 BGE_PCI_GEN2_PRODID_ASICREV, 4);
2762 break;
2763 case BCOM_DEVICEID_BCM57761:
2764 case BCOM_DEVICEID_BCM57765:
2765 case BCOM_DEVICEID_BCM57781:
2766 case BCOM_DEVICEID_BCM57785:
2767 case BCOM_DEVICEID_BCM57791:
2768 case BCOM_DEVICEID_BCM57795:
2769 sc->bge_chipid = pci_read_config(dev,
2770 BGE_PCI_GEN15_PRODID_ASICREV, 4);
2771 break;
2772 default:
2773 sc->bge_chipid = pci_read_config(dev,
2774 BGE_PCI_PRODID_ASICREV, 4);
2775 }
2776 }
2777 sc->bge_asicrev = BGE_ASICREV(sc->bge_chipid);
2778 sc->bge_chiprev = BGE_CHIPREV(sc->bge_chipid);
2779
2780 /* Set default PHY address. */
2781 phy_addr = 1;
2782 /*
2783 * PHY address mapping for various devices.
2784 *
2785 * | F0 Cu | F0 Sr | F1 Cu | F1 Sr |
2786 * ---------+-------+-------+-------+-------+
2787 * BCM57XX | 1 | X | X | X |
2788 * BCM5704 | 1 | X | 1 | X |
2789 * BCM5717 | 1 | 8 | 2 | 9 |
2790 * BCM5719 | 1 | 8 | 2 | 9 |
2791 *
2792 * Other addresses may respond but they are not
2793 * IEEE compliant PHYs and should be ignored.
2794 */
2795 if (sc->bge_asicrev == BGE_ASICREV_BCM5717 ||
2796 sc->bge_asicrev == BGE_ASICREV_BCM5719) {
2797 f = pci_get_function(dev);
2798 if (sc->bge_chipid == BGE_CHIPID_BCM5717_A0) {
2799 if (CSR_READ_4(sc, BGE_SGDIG_STS) &
2800 BGE_SGDIGSTS_IS_SERDES)
2801 phy_addr = f + 8;
2802 else
2803 phy_addr = f + 1;
2804 } else {
2805 if (CSR_READ_4(sc, BGE_CPMU_PHY_STRAP) &
2806 BGE_CPMU_PHY_STRAP_IS_SERDES)
2807 phy_addr = f + 8;
2808 else
2809 phy_addr = f + 1;
2810 }
2811 }
2812
2813 /*
2814 * Don't enable Ethernet@WireSpeed for the 5700, 5906, or the
2815 * 5705 A0 and A1 chips.
2816 */
2817 if (sc->bge_asicrev == BGE_ASICREV_BCM5700 ||
2818 (sc->bge_asicrev == BGE_ASICREV_BCM5705 &&
2819 (sc->bge_chipid != BGE_CHIPID_BCM5705_A0 &&
2820 sc->bge_chipid != BGE_CHIPID_BCM5705_A1)) ||
2821 sc->bge_asicrev == BGE_ASICREV_BCM5906)
2822 sc->bge_phy_flags |= BGE_PHY_NO_WIRESPEED;
2823
2824 if (bge_has_eaddr(sc))
2825 sc->bge_flags |= BGE_FLAG_EADDR;
2826
2827 /* Save chipset family. */
2828 switch (sc->bge_asicrev) {
2829 case BGE_ASICREV_BCM5717:
2830 case BGE_ASICREV_BCM5719:
2831 case BGE_ASICREV_BCM57765:
2832 sc->bge_flags |= BGE_FLAG_5717_PLUS | BGE_FLAG_5755_PLUS |
2833 BGE_FLAG_575X_PLUS | BGE_FLAG_5705_PLUS | BGE_FLAG_JUMBO |
2834 BGE_FLAG_JUMBO_FRAME;
2835 if (sc->bge_asicrev == BGE_ASICREV_BCM5719 &&
2836 sc->bge_chipid == BGE_CHIPID_BCM5719_A0) {
2837 /* Jumbo frame on BCM5719 A0 does not work. */
2838 sc->bge_flags &= ~BGE_FLAG_JUMBO;
2839 }
2840 break;
2841 case BGE_ASICREV_BCM5755:
2842 case BGE_ASICREV_BCM5761:
2843 case BGE_ASICREV_BCM5784:
2844 case BGE_ASICREV_BCM5785:
2845 case BGE_ASICREV_BCM5787:
2846 case BGE_ASICREV_BCM57780:
2847 sc->bge_flags |= BGE_FLAG_5755_PLUS | BGE_FLAG_575X_PLUS |
2848 BGE_FLAG_5705_PLUS;
2849 break;
2850 case BGE_ASICREV_BCM5700:
2851 case BGE_ASICREV_BCM5701:
2852 case BGE_ASICREV_BCM5703:
2853 case BGE_ASICREV_BCM5704:
2854 sc->bge_flags |= BGE_FLAG_5700_FAMILY | BGE_FLAG_JUMBO;
2855 break;
2856 case BGE_ASICREV_BCM5714_A0:
2857 case BGE_ASICREV_BCM5780:
2858 case BGE_ASICREV_BCM5714:
2859 sc->bge_flags |= BGE_FLAG_5714_FAMILY | BGE_FLAG_JUMBO_STD;
2860 /* FALLTHROUGH */
2861 case BGE_ASICREV_BCM5750:
2862 case BGE_ASICREV_BCM5752:
2863 case BGE_ASICREV_BCM5906:
2864 sc->bge_flags |= BGE_FLAG_575X_PLUS;
2865 /* FALLTHROUGH */
2866 case BGE_ASICREV_BCM5705:
2867 sc->bge_flags |= BGE_FLAG_5705_PLUS;
2868 break;
2869 }
2870
2871 /* Set various PHY bug flags. */
2872 if (sc->bge_chipid == BGE_CHIPID_BCM5701_A0 ||
2873 sc->bge_chipid == BGE_CHIPID_BCM5701_B0)
2874 sc->bge_phy_flags |= BGE_PHY_CRC_BUG;
2875 if (sc->bge_chiprev == BGE_CHIPREV_5703_AX ||
2876 sc->bge_chiprev == BGE_CHIPREV_5704_AX)
2877 sc->bge_phy_flags |= BGE_PHY_ADC_BUG;
2878 if (sc->bge_chipid == BGE_CHIPID_BCM5704_A0)
2879 sc->bge_phy_flags |= BGE_PHY_5704_A0_BUG;
2880 if (pci_get_subvendor(dev) == DELL_VENDORID)
2881 sc->bge_phy_flags |= BGE_PHY_NO_3LED;
2882 if ((BGE_IS_5705_PLUS(sc)) &&
2883 sc->bge_asicrev != BGE_ASICREV_BCM5906 &&
2884 sc->bge_asicrev != BGE_ASICREV_BCM5717 &&
2885 sc->bge_asicrev != BGE_ASICREV_BCM5719 &&
2886 sc->bge_asicrev != BGE_ASICREV_BCM5785 &&
2887 sc->bge_asicrev != BGE_ASICREV_BCM57765 &&
2888 sc->bge_asicrev != BGE_ASICREV_BCM57780) {
2889 if (sc->bge_asicrev == BGE_ASICREV_BCM5755 ||
2890 sc->bge_asicrev == BGE_ASICREV_BCM5761 ||
2891 sc->bge_asicrev == BGE_ASICREV_BCM5784 ||
2892 sc->bge_asicrev == BGE_ASICREV_BCM5787) {
2893 if (pci_get_device(dev) != BCOM_DEVICEID_BCM5722 &&
2894 pci_get_device(dev) != BCOM_DEVICEID_BCM5756)
2895 sc->bge_phy_flags |= BGE_PHY_JITTER_BUG;
2896 if (pci_get_device(dev) == BCOM_DEVICEID_BCM5755M)
2897 sc->bge_phy_flags |= BGE_PHY_ADJUST_TRIM;
2898 } else
2899 sc->bge_phy_flags |= BGE_PHY_BER_BUG;
2900 }
2901
2902 /* Identify the chips that use an CPMU. */
2903 if (BGE_IS_5717_PLUS(sc) ||
2904 sc->bge_asicrev == BGE_ASICREV_BCM5784 ||
2905 sc->bge_asicrev == BGE_ASICREV_BCM5761 ||
2906 sc->bge_asicrev == BGE_ASICREV_BCM5785 ||
2907 sc->bge_asicrev == BGE_ASICREV_BCM57780)
2908 sc->bge_flags |= BGE_FLAG_CPMU_PRESENT;
2909 if ((sc->bge_flags & BGE_FLAG_CPMU_PRESENT) != 0)
2910 sc->bge_mi_mode = BGE_MIMODE_500KHZ_CONST;
2911 else
2912 sc->bge_mi_mode = BGE_MIMODE_BASE;
2913 /* Enable auto polling for BCM570[0-5]. */
2914 if (BGE_IS_5700_FAMILY(sc) || sc->bge_asicrev == BGE_ASICREV_BCM5705)
2915 sc->bge_mi_mode |= BGE_MIMODE_AUTOPOLL;
2916
2917 /*
2918 * All Broadcom controllers have 4GB boundary DMA bug.
2919 * Whenever an address crosses a multiple of the 4GB boundary
2920 * (including 4GB, 8Gb, 12Gb, etc.) and makes the transition
2921 * from 0xX_FFFF_FFFF to 0x(X+1)_0000_0000 an internal DMA
2922 * state machine will lockup and cause the device to hang.
2923 */
2924 sc->bge_flags |= BGE_FLAG_4G_BNDRY_BUG;
2925
2926 /* BCM5755 or higher and BCM5906 have short DMA bug. */
2927 if (BGE_IS_5755_PLUS(sc) || sc->bge_asicrev == BGE_ASICREV_BCM5906)
2928 sc->bge_flags |= BGE_FLAG_SHORT_DMA_BUG;
2929
2930 /*
2931 * BCM5719 cannot handle DMA requests for DMA segments that
2932 * have larger than 4KB in size. However the maximum DMA
2933 * segment size created in DMA tag is 4KB for TSO, so we
2934 * wouldn't encounter the issue here.
2935 */
2936 if (sc->bge_asicrev == BGE_ASICREV_BCM5719)
2937 sc->bge_flags |= BGE_FLAG_4K_RDMA_BUG;
2938
2939 misccfg = CSR_READ_4(sc, BGE_MISC_CFG) & BGE_MISCCFG_BOARD_ID;
2940 if (sc->bge_asicrev == BGE_ASICREV_BCM5705) {
2941 if (misccfg == BGE_MISCCFG_BOARD_ID_5788 ||
2942 misccfg == BGE_MISCCFG_BOARD_ID_5788M)
2943 sc->bge_flags |= BGE_FLAG_5788;
2944 }
2945
2946 capmask = BMSR_DEFCAPMASK;
2947 if ((sc->bge_asicrev == BGE_ASICREV_BCM5703 &&
2948 (misccfg == 0x4000 || misccfg == 0x8000)) ||
2949 (sc->bge_asicrev == BGE_ASICREV_BCM5705 &&
2950 pci_get_vendor(dev) == BCOM_VENDORID &&
2951 (pci_get_device(dev) == BCOM_DEVICEID_BCM5901 ||
2952 pci_get_device(dev) == BCOM_DEVICEID_BCM5901A2 ||
2953 pci_get_device(dev) == BCOM_DEVICEID_BCM5705F)) ||
2954 (pci_get_vendor(dev) == BCOM_VENDORID &&
2955 (pci_get_device(dev) == BCOM_DEVICEID_BCM5751F ||
2956 pci_get_device(dev) == BCOM_DEVICEID_BCM5753F ||
2957 pci_get_device(dev) == BCOM_DEVICEID_BCM5787F)) ||
2958 pci_get_device(dev) == BCOM_DEVICEID_BCM57790 ||
2959 sc->bge_asicrev == BGE_ASICREV_BCM5906) {
2960 /* These chips are 10/100 only. */
2961 capmask &= ~BMSR_EXTSTAT;
2962 }
2963
2964 /*
2965 * Some controllers seem to require a special firmware to use
2966 * TSO. But the firmware is not available to FreeBSD and Linux
2967 * claims that the TSO performed by the firmware is slower than
2968 * hardware based TSO. Moreover the firmware based TSO has one
2969 * known bug which can't handle TSO if ethernet header + IP/TCP
2970 * header is greater than 80 bytes. The workaround for the TSO
2971 * bug exist but it seems it's too expensive than not using
2972 * TSO at all. Some hardwares also have the TSO bug so limit
2973 * the TSO to the controllers that are not affected TSO issues
2974 * (e.g. 5755 or higher).
2975 */
2976 if (BGE_IS_5717_PLUS(sc)) {
2977 /* BCM5717 requires different TSO configuration. */
2978 sc->bge_flags |= BGE_FLAG_TSO3;
2979 if (sc->bge_asicrev == BGE_ASICREV_BCM5719 &&
2980 sc->bge_chipid == BGE_CHIPID_BCM5719_A0) {
2981 /* TSO on BCM5719 A0 does not work. */
2982 sc->bge_flags &= ~BGE_FLAG_TSO3;
2983 }
2984 } else if (BGE_IS_5755_PLUS(sc)) {
2985 /*
2986 * BCM5754 and BCM5787 shares the same ASIC id so
2987 * explicit device id check is required.
2988 * Due to unknown reason TSO does not work on BCM5755M.
2989 */
2990 if (pci_get_device(dev) != BCOM_DEVICEID_BCM5754 &&
2991 pci_get_device(dev) != BCOM_DEVICEID_BCM5754M &&
2992 pci_get_device(dev) != BCOM_DEVICEID_BCM5755M)
2993 sc->bge_flags |= BGE_FLAG_TSO;
2994 }
2995
2996 /*
2997 * Check if this is a PCI-X or PCI Express device.
2998 */
2999 if (pci_find_cap(dev, PCIY_EXPRESS, &reg) == 0) {
3000 /*
3001 * Found a PCI Express capabilities register, this
3002 * must be a PCI Express device.
3003 */
3004 sc->bge_flags |= BGE_FLAG_PCIE;
3005 sc->bge_expcap = reg;
3006 if (sc->bge_asicrev == BGE_ASICREV_BCM5719)
3007 pci_set_max_read_req(dev, 2048);
3008 else if (pci_get_max_read_req(dev) != 4096)
3009 pci_set_max_read_req(dev, 4096);
3010 } else {
3011 /*
3012 * Check if the device is in PCI-X Mode.
3013 * (This bit is not valid on PCI Express controllers.)
3014 */
3015 if (pci_find_cap(dev, PCIY_PCIX, &reg) == 0)
3016 sc->bge_pcixcap = reg;
3017 if ((pci_read_config(dev, BGE_PCI_PCISTATE, 4) &
3018 BGE_PCISTATE_PCI_BUSMODE) == 0)
3019 sc->bge_flags |= BGE_FLAG_PCIX;
3020 }
3021
3022 /*
3023 * The 40bit DMA bug applies to the 5714/5715 controllers and is
3024 * not actually a MAC controller bug but an issue with the embedded
3025 * PCIe to PCI-X bridge in the device. Use 40bit DMA workaround.
3026 */
3027 if (BGE_IS_5714_FAMILY(sc) && (sc->bge_flags & BGE_FLAG_PCIX))
3028 sc->bge_flags |= BGE_FLAG_40BIT_BUG;
3029 /*
3030 * Allocate the interrupt, using MSI if possible. These devices
3031 * support 8 MSI messages, but only the first one is used in
3032 * normal operation.
3033 */
3034 rid = 0;
3035 if (pci_find_cap(sc->bge_dev, PCIY_MSI, &reg) == 0) {
3036 sc->bge_msicap = reg;
3037 if (bge_can_use_msi(sc)) {
3038 msicount = pci_msi_count(dev);
3039 if (msicount > 1)
3040 msicount = 1;
3041 } else
3042 msicount = 0;
3043 if (msicount == 1 && pci_alloc_msi(dev, &msicount) == 0) {
3044 rid = 1;
3045 sc->bge_flags |= BGE_FLAG_MSI;
3046 }
3047 }
3048
3049 /*
3050 * All controllers except BCM5700 supports tagged status but
3051 * we use tagged status only for MSI case on BCM5717. Otherwise
3052 * MSI on BCM5717 does not work.
3053 */
3054#ifndef DEVICE_POLLING
3055 if (sc->bge_flags & BGE_FLAG_MSI && BGE_IS_5717_PLUS(sc))
3056 sc->bge_flags |= BGE_FLAG_TAGGED_STATUS;
3057#endif
3058
3059 sc->bge_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
3060 RF_SHAREABLE | RF_ACTIVE);
3061
3062 if (sc->bge_irq == NULL) {
3063 device_printf(sc->bge_dev, "couldn't map interrupt\n");
3064 error = ENXIO;
3065 goto fail;
3066 }
3067
3068 device_printf(dev,
3069 "CHIP ID 0x%08x; ASIC REV 0x%02x; CHIP REV 0x%02x; %s\n",
3070 sc->bge_chipid, sc->bge_asicrev, sc->bge_chiprev,
3071 (sc->bge_flags & BGE_FLAG_PCIX) ? "PCI-X" :
3072 ((sc->bge_flags & BGE_FLAG_PCIE) ? "PCI-E" : "PCI"));
3073
3074 BGE_LOCK_INIT(sc, device_get_nameunit(dev));
3075
3076 /* Try to reset the chip. */
3077 if (bge_reset(sc)) {
3078 device_printf(sc->bge_dev, "chip reset failed\n");
3079 error = ENXIO;
3080 goto fail;
3081 }
3082
3083 sc->bge_asf_mode = 0;
3084 if (bge_allow_asf && (bge_readmem_ind(sc, BGE_SRAM_DATA_SIG) ==
3085 BGE_SRAM_DATA_SIG_MAGIC)) {
3086 if (bge_readmem_ind(sc, BGE_SRAM_DATA_CFG)
3087 & BGE_HWCFG_ASF) {
3088 sc->bge_asf_mode |= ASF_ENABLE;
3089 sc->bge_asf_mode |= ASF_STACKUP;
3090 if (BGE_IS_575X_PLUS(sc))
3091 sc->bge_asf_mode |= ASF_NEW_HANDSHAKE;
3092 }
3093 }
3094
3095 /* Try to reset the chip again the nice way. */
3096 bge_stop_fw(sc);
3097 bge_sig_pre_reset(sc, BGE_RESET_STOP);
3098 if (bge_reset(sc)) {
3099 device_printf(sc->bge_dev, "chip reset failed\n");
3100 error = ENXIO;
3101 goto fail;
3102 }
3103
3104 bge_sig_legacy(sc, BGE_RESET_STOP);
3105 bge_sig_post_reset(sc, BGE_RESET_STOP);
3106
3107 if (bge_chipinit(sc)) {
3108 device_printf(sc->bge_dev, "chip initialization failed\n");
3109 error = ENXIO;
3110 goto fail;
3111 }
3112
3113 error = bge_get_eaddr(sc, eaddr);
3114 if (error) {
3115 device_printf(sc->bge_dev,
3116 "failed to read station address\n");
3117 error = ENXIO;
3118 goto fail;
3119 }
3120
3121 /* 5705 limits RX return ring to 512 entries. */
3122 if (BGE_IS_5717_PLUS(sc))
3123 sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT;
3124 else if (BGE_IS_5705_PLUS(sc))
3125 sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT_5705;
3126 else
3127 sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT;
3128
3129 if (bge_dma_alloc(sc)) {
3130 device_printf(sc->bge_dev,
3131 "failed to allocate DMA resources\n");
3132 error = ENXIO;
3133 goto fail;
3134 }
3135
3136 bge_add_sysctls(sc);
3137
3138 /* Set default tuneable values. */
3139 sc->bge_stat_ticks = BGE_TICKS_PER_SEC;
3140 sc->bge_rx_coal_ticks = 150;
3141 sc->bge_tx_coal_ticks = 150;
3142 sc->bge_rx_max_coal_bds = 10;
3143 sc->bge_tx_max_coal_bds = 10;
3144
3145 /* Initialize checksum features to use. */
3146 sc->bge_csum_features = BGE_CSUM_FEATURES;
3147 if (sc->bge_forced_udpcsum != 0)
3148 sc->bge_csum_features |= CSUM_UDP;
3149
3150 /* Set up ifnet structure */
3151 ifp = sc->bge_ifp = if_alloc(IFT_ETHER);
3152 if (ifp == NULL) {
3153 device_printf(sc->bge_dev, "failed to if_alloc()\n");
3154 error = ENXIO;
3155 goto fail;
3156 }
3157 ifp->if_softc = sc;
3158 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
3159 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
3160 ifp->if_ioctl = bge_ioctl;
3161 ifp->if_start = bge_start;
3162 ifp->if_init = bge_init;
3163 ifp->if_snd.ifq_drv_maxlen = BGE_TX_RING_CNT - 1;
3164 IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen);
3165 IFQ_SET_READY(&ifp->if_snd);
3166 ifp->if_hwassist = sc->bge_csum_features;
3167 ifp->if_capabilities = IFCAP_HWCSUM | IFCAP_VLAN_HWTAGGING |
3168 IFCAP_VLAN_MTU;
3169 if ((sc->bge_flags & (BGE_FLAG_TSO | BGE_FLAG_TSO3)) != 0) {
3170 ifp->if_hwassist |= CSUM_TSO;
3171 ifp->if_capabilities |= IFCAP_TSO4 | IFCAP_VLAN_HWTSO;
3172 }
3173#ifdef IFCAP_VLAN_HWCSUM
3174 ifp->if_capabilities |= IFCAP_VLAN_HWCSUM;
3175#endif
3176 ifp->if_capenable = ifp->if_capabilities;
3177#ifdef DEVICE_POLLING
3178 ifp->if_capabilities |= IFCAP_POLLING;
3179#endif
3180
3181 /*
3182 * 5700 B0 chips do not support checksumming correctly due
3183 * to hardware bugs.
3184 */
3185 if (sc->bge_chipid == BGE_CHIPID_BCM5700_B0) {
3186 ifp->if_capabilities &= ~IFCAP_HWCSUM;
3187 ifp->if_capenable &= ~IFCAP_HWCSUM;
3188 ifp->if_hwassist = 0;
3189 }
3190
3191 /*
3192 * Figure out what sort of media we have by checking the
3193 * hardware config word in the first 32k of NIC internal memory,
3194 * or fall back to examining the EEPROM if necessary.
3195 * Note: on some BCM5700 cards, this value appears to be unset.
3196 * If that's the case, we have to rely on identifying the NIC
3197 * by its PCI subsystem ID, as we do below for the SysKonnect
3198 * SK-9D41.
3199 */
3200 if (bge_readmem_ind(sc, BGE_SRAM_DATA_SIG) == BGE_SRAM_DATA_SIG_MAGIC)
3201 hwcfg = bge_readmem_ind(sc, BGE_SRAM_DATA_CFG);
3202 else if ((sc->bge_flags & BGE_FLAG_EADDR) &&
3203 (sc->bge_asicrev != BGE_ASICREV_BCM5906)) {
3204 if (bge_read_eeprom(sc, (caddr_t)&hwcfg, BGE_EE_HWCFG_OFFSET,
3205 sizeof(hwcfg))) {
3206 device_printf(sc->bge_dev, "failed to read EEPROM\n");
3207 error = ENXIO;
3208 goto fail;
3209 }
3210 hwcfg = ntohl(hwcfg);
3211 }
3212
3213 /* The SysKonnect SK-9D41 is a 1000baseSX card. */
3214 if ((pci_read_config(dev, BGE_PCI_SUBSYS, 4) >> 16) ==
3215 SK_SUBSYSID_9D41 || (hwcfg & BGE_HWCFG_MEDIA) == BGE_MEDIA_FIBER) {
3216 if (BGE_IS_5714_FAMILY(sc))
3217 sc->bge_flags |= BGE_FLAG_MII_SERDES;
3218 else
3219 sc->bge_flags |= BGE_FLAG_TBI;
3220 }
3221
3222 if (sc->bge_flags & BGE_FLAG_TBI) {
3223 ifmedia_init(&sc->bge_ifmedia, IFM_IMASK, bge_ifmedia_upd,
3224 bge_ifmedia_sts);
3225 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER | IFM_1000_SX, 0, NULL);
3226 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER | IFM_1000_SX | IFM_FDX,
3227 0, NULL);
3228 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER | IFM_AUTO, 0, NULL);
3229 ifmedia_set(&sc->bge_ifmedia, IFM_ETHER | IFM_AUTO);
3230 sc->bge_ifmedia.ifm_media = sc->bge_ifmedia.ifm_cur->ifm_media;
3231 } else {
3232 /*
3233 * Do transceiver setup and tell the firmware the
3234 * driver is down so we can try to get access the
3235 * probe if ASF is running. Retry a couple of times
3236 * if we get a conflict with the ASF firmware accessing
3237 * the PHY.
3238 */
3239 trys = 0;
3240 BGE_CLRBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
3241again:
3242 bge_asf_driver_up(sc);
3243
3244 error = mii_attach(dev, &sc->bge_miibus, ifp, bge_ifmedia_upd,
3245 bge_ifmedia_sts, capmask, phy_addr, MII_OFFSET_ANY,
3246 MIIF_DOPAUSE);
3247 if (error != 0) {
3248 if (trys++ < 4) {
3249 device_printf(sc->bge_dev, "Try again\n");
3250 bge_miibus_writereg(sc->bge_dev, 1, MII_BMCR,
3251 BMCR_RESET);
3252 goto again;
3253 }
3254 device_printf(sc->bge_dev, "attaching PHYs failed\n");
3255 goto fail;
3256 }
3257
3258 /*
3259 * Now tell the firmware we are going up after probing the PHY
3260 */
3261 if (sc->bge_asf_mode & ASF_STACKUP)
3262 BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
3263 }
3264
3265 /*
3266 * When using the BCM5701 in PCI-X mode, data corruption has
3267 * been observed in the first few bytes of some received packets.
3268 * Aligning the packet buffer in memory eliminates the corruption.
3269 * Unfortunately, this misaligns the packet payloads. On platforms
3270 * which do not support unaligned accesses, we will realign the
3271 * payloads by copying the received packets.
3272 */
3273 if (sc->bge_asicrev == BGE_ASICREV_BCM5701 &&
3274 sc->bge_flags & BGE_FLAG_PCIX)
3275 sc->bge_flags |= BGE_FLAG_RX_ALIGNBUG;
3276
3277 /*
3278 * Call MI attach routine.
3279 */
3280 ether_ifattach(ifp, eaddr);
3281 callout_init_mtx(&sc->bge_stat_ch, &sc->bge_mtx, 0);
3282
3283 /* Tell upper layer we support long frames. */
3284 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
3285
3286 /*
3287 * Hookup IRQ last.
3288 */
3289 if (BGE_IS_5755_PLUS(sc) && sc->bge_flags & BGE_FLAG_MSI) {
3290 /* Take advantage of single-shot MSI. */
3291 CSR_WRITE_4(sc, BGE_MSI_MODE, CSR_READ_4(sc, BGE_MSI_MODE) &
3292 ~BGE_MSIMODE_ONE_SHOT_DISABLE);
3293 sc->bge_tq = taskqueue_create_fast("bge_taskq", M_WAITOK,
3294 taskqueue_thread_enqueue, &sc->bge_tq);
3295 if (sc->bge_tq == NULL) {
3296 device_printf(dev, "could not create taskqueue.\n");
3297 ether_ifdetach(ifp);
3298 error = ENXIO;
3299 goto fail;
3300 }
3301 taskqueue_start_threads(&sc->bge_tq, 1, PI_NET, "%s taskq",
3302 device_get_nameunit(sc->bge_dev));
3303 error = bus_setup_intr(dev, sc->bge_irq,
3304 INTR_TYPE_NET | INTR_MPSAFE, bge_msi_intr, NULL, sc,
3305 &sc->bge_intrhand);
3306 if (error)
3307 ether_ifdetach(ifp);
3308 } else
3309 error = bus_setup_intr(dev, sc->bge_irq,
3310 INTR_TYPE_NET | INTR_MPSAFE, NULL, bge_intr, sc,
3311 &sc->bge_intrhand);
3312
3313 if (error) {
3314 bge_detach(dev);
3315 device_printf(sc->bge_dev, "couldn't set up irq\n");
3316 }
3317
3318 return (0);
3319
3320fail:
3321 bge_release_resources(sc);
3322
3323 return (error);
3324}
3325
3326static int
3327bge_detach(device_t dev)
3328{
3329 struct bge_softc *sc;
3330 struct ifnet *ifp;
3331
3332 sc = device_get_softc(dev);
3333 ifp = sc->bge_ifp;
3334
3335#ifdef DEVICE_POLLING
3336 if (ifp->if_capenable & IFCAP_POLLING)
3337 ether_poll_deregister(ifp);
3338#endif
3339
3340 BGE_LOCK(sc);
3341 bge_stop(sc);
3342 bge_reset(sc);
3343 BGE_UNLOCK(sc);
3344
3345 callout_drain(&sc->bge_stat_ch);
3346
3347 if (sc->bge_tq)
3348 taskqueue_drain(sc->bge_tq, &sc->bge_intr_task);
3349 ether_ifdetach(ifp);
3350
3351 if (sc->bge_flags & BGE_FLAG_TBI) {
3352 ifmedia_removeall(&sc->bge_ifmedia);
3353 } else {
3354 bus_generic_detach(dev);
3355 device_delete_child(dev, sc->bge_miibus);
3356 }
3357
3358 bge_release_resources(sc);
3359
3360 return (0);
3361}
3362
3363static void
3364bge_release_resources(struct bge_softc *sc)
3365{
3366 device_t dev;
3367
3368 dev = sc->bge_dev;
3369
3370 if (sc->bge_tq != NULL)
3371 taskqueue_free(sc->bge_tq);
3372
3373 if (sc->bge_intrhand != NULL)
3374 bus_teardown_intr(dev, sc->bge_irq, sc->bge_intrhand);
3375
3376 if (sc->bge_irq != NULL)
3377 bus_release_resource(dev, SYS_RES_IRQ,
3378 sc->bge_flags & BGE_FLAG_MSI ? 1 : 0, sc->bge_irq);
3379
3380 if (sc->bge_flags & BGE_FLAG_MSI)
3381 pci_release_msi(dev);
3382
3383 if (sc->bge_res != NULL)
3384 bus_release_resource(dev, SYS_RES_MEMORY,
3385 PCIR_BAR(0), sc->bge_res);
3386
3387 if (sc->bge_ifp != NULL)
3388 if_free(sc->bge_ifp);
3389
3390 bge_dma_free(sc);
3391
3392 if (mtx_initialized(&sc->bge_mtx)) /* XXX */
3393 BGE_LOCK_DESTROY(sc);
3394}
3395
3396static int
3397bge_reset(struct bge_softc *sc)
3398{
3399 device_t dev;
3400 uint32_t cachesize, command, pcistate, reset, val;
3401 void (*write_op)(struct bge_softc *, int, int);
3402 uint16_t devctl;
3403 int i;
3404
3405 dev = sc->bge_dev;
3406
3407 if (BGE_IS_575X_PLUS(sc) && !BGE_IS_5714_FAMILY(sc) &&
3408 (sc->bge_asicrev != BGE_ASICREV_BCM5906)) {
3409 if (sc->bge_flags & BGE_FLAG_PCIE)
3410 write_op = bge_writemem_direct;
3411 else
3412 write_op = bge_writemem_ind;
3413 } else
3414 write_op = bge_writereg_ind;
3415
3416 /* Save some important PCI state. */
3417 cachesize = pci_read_config(dev, BGE_PCI_CACHESZ, 4);
3418 command = pci_read_config(dev, BGE_PCI_CMD, 4);
3419 pcistate = pci_read_config(dev, BGE_PCI_PCISTATE, 4);
3420
3421 pci_write_config(dev, BGE_PCI_MISC_CTL,
3422 BGE_PCIMISCCTL_INDIRECT_ACCESS | BGE_PCIMISCCTL_MASK_PCI_INTR |
3423 BGE_HIF_SWAP_OPTIONS | BGE_PCIMISCCTL_PCISTATE_RW, 4);
3424
3425 /* Disable fastboot on controllers that support it. */
3426 if (sc->bge_asicrev == BGE_ASICREV_BCM5752 ||
3427 BGE_IS_5755_PLUS(sc)) {
3428 if (bootverbose)
3429 device_printf(dev, "Disabling fastboot\n");
3430 CSR_WRITE_4(sc, BGE_FASTBOOT_PC, 0x0);
3431 }
3432
3433 /*
3434 * Write the magic number to SRAM at offset 0xB50.
3435 * When firmware finishes its initialization it will
3436 * write ~BGE_SRAM_FW_MB_MAGIC to the same location.
3437 */
3438 bge_writemem_ind(sc, BGE_SRAM_FW_MB, BGE_SRAM_FW_MB_MAGIC);
3439
3440 reset = BGE_MISCCFG_RESET_CORE_CLOCKS | BGE_32BITTIME_66MHZ;
3441
3442 /* XXX: Broadcom Linux driver. */
3443 if (sc->bge_flags & BGE_FLAG_PCIE) {
3444 if (CSR_READ_4(sc, 0x7E2C) == 0x60) /* PCIE 1.0 */
3445 CSR_WRITE_4(sc, 0x7E2C, 0x20);
3446 if (sc->bge_chipid != BGE_CHIPID_BCM5750_A0) {
3447 /* Prevent PCIE link training during global reset */
3448 CSR_WRITE_4(sc, BGE_MISC_CFG, 1 << 29);
3449 reset |= 1 << 29;
3450 }
3451 }
3452
3453 /*
3454 * Set GPHY Power Down Override to leave GPHY
3455 * powered up in D0 uninitialized.
3456 */
3457 if (BGE_IS_5705_PLUS(sc) &&
3458 (sc->bge_flags & BGE_FLAG_CPMU_PRESENT) == 0)
3459 reset |= BGE_MISCCFG_GPHY_PD_OVERRIDE;
3460
3461 /* Issue global reset */
3462 write_op(sc, BGE_MISC_CFG, reset);
3463
3464 if (sc->bge_asicrev == BGE_ASICREV_BCM5906) {
3465 val = CSR_READ_4(sc, BGE_VCPU_STATUS);
3466 CSR_WRITE_4(sc, BGE_VCPU_STATUS,
3467 val | BGE_VCPU_STATUS_DRV_RESET);
3468 val = CSR_READ_4(sc, BGE_VCPU_EXT_CTRL);
3469 CSR_WRITE_4(sc, BGE_VCPU_EXT_CTRL,
3470 val & ~BGE_VCPU_EXT_CTRL_HALT_CPU);
3471 }
3472
3473 DELAY(1000);
3474
3475 /* XXX: Broadcom Linux driver. */
3476 if (sc->bge_flags & BGE_FLAG_PCIE) {
3477 if (sc->bge_chipid == BGE_CHIPID_BCM5750_A0) {
3478 DELAY(500000); /* wait for link training to complete */
3479 val = pci_read_config(dev, 0xC4, 4);
3480 pci_write_config(dev, 0xC4, val | (1 << 15), 4);
3481 }
3482 devctl = pci_read_config(dev,
3483 sc->bge_expcap + PCIR_EXPRESS_DEVICE_CTL, 2);
3484 /* Clear enable no snoop and disable relaxed ordering. */
3485 devctl &= ~(PCIM_EXP_CTL_RELAXED_ORD_ENABLE |
3486 PCIM_EXP_CTL_NOSNOOP_ENABLE);
3487 /* Set PCIE max payload size to 128. */
3488 devctl &= ~PCIM_EXP_CTL_MAX_PAYLOAD;
3489 pci_write_config(dev, sc->bge_expcap + PCIR_EXPRESS_DEVICE_CTL,
3490 devctl, 2);
3491 /* Clear error status. */
3492 pci_write_config(dev, sc->bge_expcap + PCIR_EXPRESS_DEVICE_STA,
3493 PCIM_EXP_STA_CORRECTABLE_ERROR |
3494 PCIM_EXP_STA_NON_FATAL_ERROR | PCIM_EXP_STA_FATAL_ERROR |
3495 PCIM_EXP_STA_UNSUPPORTED_REQ, 2);
3496 }
3497
3498 /* Reset some of the PCI state that got zapped by reset. */
3499 pci_write_config(dev, BGE_PCI_MISC_CTL,
3500 BGE_PCIMISCCTL_INDIRECT_ACCESS | BGE_PCIMISCCTL_MASK_PCI_INTR |
3501 BGE_HIF_SWAP_OPTIONS | BGE_PCIMISCCTL_PCISTATE_RW, 4);
3502 pci_write_config(dev, BGE_PCI_CACHESZ, cachesize, 4);
3503 pci_write_config(dev, BGE_PCI_CMD, command, 4);
3504 write_op(sc, BGE_MISC_CFG, BGE_32BITTIME_66MHZ);
3505 /*
3506 * Disable PCI-X relaxed ordering to ensure status block update
3507 * comes first then packet buffer DMA. Otherwise driver may
3508 * read stale status block.
3509 */
3510 if (sc->bge_flags & BGE_FLAG_PCIX) {
3511 devctl = pci_read_config(dev,
3512 sc->bge_pcixcap + PCIXR_COMMAND, 2);
3513 devctl &= ~PCIXM_COMMAND_ERO;
3514 if (sc->bge_asicrev == BGE_ASICREV_BCM5703) {
3515 devctl &= ~PCIXM_COMMAND_MAX_READ;
3516 devctl |= PCIXM_COMMAND_MAX_READ_2048;
3517 } else if (sc->bge_asicrev == BGE_ASICREV_BCM5704) {
3518 devctl &= ~(PCIXM_COMMAND_MAX_SPLITS |
3519 PCIXM_COMMAND_MAX_READ);
3520 devctl |= PCIXM_COMMAND_MAX_READ_2048;
3521 }
3522 pci_write_config(dev, sc->bge_pcixcap + PCIXR_COMMAND,
3523 devctl, 2);
3524 }
3525 /* Re-enable MSI, if necessary, and enable the memory arbiter. */
3526 if (BGE_IS_5714_FAMILY(sc)) {
3527 /* This chip disables MSI on reset. */
3528 if (sc->bge_flags & BGE_FLAG_MSI) {
3529 val = pci_read_config(dev,
3530 sc->bge_msicap + PCIR_MSI_CTRL, 2);
3531 pci_write_config(dev,
3532 sc->bge_msicap + PCIR_MSI_CTRL,
3533 val | PCIM_MSICTRL_MSI_ENABLE, 2);
3534 val = CSR_READ_4(sc, BGE_MSI_MODE);
3535 CSR_WRITE_4(sc, BGE_MSI_MODE,
3536 val | BGE_MSIMODE_ENABLE);
3537 }
3538 val = CSR_READ_4(sc, BGE_MARB_MODE);
3539 CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE | val);
3540 } else
3541 CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE);
3542
3543 if (sc->bge_asicrev == BGE_ASICREV_BCM5906) {
3544 for (i = 0; i < BGE_TIMEOUT; i++) {
3545 val = CSR_READ_4(sc, BGE_VCPU_STATUS);
3546 if (val & BGE_VCPU_STATUS_INIT_DONE)
3547 break;
3548 DELAY(100);
3549 }
3550 if (i == BGE_TIMEOUT) {
3551 device_printf(dev, "reset timed out\n");
3552 return (1);
3553 }
3554 } else {
3555 /*
3556 * Poll until we see the 1's complement of the magic number.
3557 * This indicates that the firmware initialization is complete.
3558 * We expect this to fail if no chip containing the Ethernet
3559 * address is fitted though.
3560 */
3561 for (i = 0; i < BGE_TIMEOUT; i++) {
3562 DELAY(10);
3563 val = bge_readmem_ind(sc, BGE_SRAM_FW_MB);
3564 if (val == ~BGE_SRAM_FW_MB_MAGIC)
3565 break;
3566 }
3567
3568 if ((sc->bge_flags & BGE_FLAG_EADDR) && i == BGE_TIMEOUT)
3569 device_printf(dev,
3570 "firmware handshake timed out, found 0x%08x\n",
3571 val);
3572 /* BCM57765 A0 needs additional time before accessing. */
3573 if (sc->bge_chipid == BGE_CHIPID_BCM57765_A0)
3574 DELAY(10 * 1000); /* XXX */
3575 }
3576
3577 /*
3578 * XXX Wait for the value of the PCISTATE register to
3579 * return to its original pre-reset state. This is a
3580 * fairly good indicator of reset completion. If we don't
3581 * wait for the reset to fully complete, trying to read
3582 * from the device's non-PCI registers may yield garbage
3583 * results.
3584 */
3585 for (i = 0; i < BGE_TIMEOUT; i++) {
3586 if (pci_read_config(dev, BGE_PCI_PCISTATE, 4) == pcistate)
3587 break;
3588 DELAY(10);
3589 }
3590
3591 /* Fix up byte swapping. */
3592 CSR_WRITE_4(sc, BGE_MODE_CTL, BGE_DMA_SWAP_OPTIONS |
3593 BGE_MODECTL_BYTESWAP_DATA);
3594
3595 /* Tell the ASF firmware we are up */
3596 if (sc->bge_asf_mode & ASF_STACKUP)
3597 BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
3598
3599 CSR_WRITE_4(sc, BGE_MAC_MODE, 0);
3600
3601 /*
3602 * The 5704 in TBI mode apparently needs some special
3603 * adjustment to insure the SERDES drive level is set
3604 * to 1.2V.
3605 */
3606 if (sc->bge_asicrev == BGE_ASICREV_BCM5704 &&
3607 sc->bge_flags & BGE_FLAG_TBI) {
3608 val = CSR_READ_4(sc, BGE_SERDES_CFG);
3609 val = (val & ~0xFFF) | 0x880;
3610 CSR_WRITE_4(sc, BGE_SERDES_CFG, val);
3611 }
3612
3613 /* XXX: Broadcom Linux driver. */
3614 if (sc->bge_flags & BGE_FLAG_PCIE &&
3615 !BGE_IS_5717_PLUS(sc) &&
3616 sc->bge_chipid != BGE_CHIPID_BCM5750_A0 &&
3617 sc->bge_asicrev != BGE_ASICREV_BCM5785) {
3618 /* Enable Data FIFO protection. */
3619 val = CSR_READ_4(sc, 0x7C00);
3620 CSR_WRITE_4(sc, 0x7C00, val | (1 << 25));
3621 }
3622 DELAY(10000);
3623
3624 return (0);
3625}
3626
3627static __inline void
3628bge_rxreuse_std(struct bge_softc *sc, int i)
3629{
3630 struct bge_rx_bd *r;
3631
3632 r = &sc->bge_ldata.bge_rx_std_ring[sc->bge_std];
3633 r->bge_flags = BGE_RXBDFLAG_END;
3634 r->bge_len = sc->bge_cdata.bge_rx_std_seglen[i];
3635 r->bge_idx = i;
3636 BGE_INC(sc->bge_std, BGE_STD_RX_RING_CNT);
3637}
3638
3639static __inline void
3640bge_rxreuse_jumbo(struct bge_softc *sc, int i)
3641{
3642 struct bge_extrx_bd *r;
3643
3644 r = &sc->bge_ldata.bge_rx_jumbo_ring[sc->bge_jumbo];
3645 r->bge_flags = BGE_RXBDFLAG_JUMBO_RING | BGE_RXBDFLAG_END;
3646 r->bge_len0 = sc->bge_cdata.bge_rx_jumbo_seglen[i][0];
3647 r->bge_len1 = sc->bge_cdata.bge_rx_jumbo_seglen[i][1];
3648 r->bge_len2 = sc->bge_cdata.bge_rx_jumbo_seglen[i][2];
3649 r->bge_len3 = sc->bge_cdata.bge_rx_jumbo_seglen[i][3];
3650 r->bge_idx = i;
3651 BGE_INC(sc->bge_jumbo, BGE_JUMBO_RX_RING_CNT);
3652}
3653
3654/*
3655 * Frame reception handling. This is called if there's a frame
3656 * on the receive return list.
3657 *
3658 * Note: we have to be able to handle two possibilities here:
3659 * 1) the frame is from the jumbo receive ring
3660 * 2) the frame is from the standard receive ring
3661 */
3662
3663static int
3664bge_rxeof(struct bge_softc *sc, uint16_t rx_prod, int holdlck)
3665{
3666 struct ifnet *ifp;
3667 int rx_npkts = 0, stdcnt = 0, jumbocnt = 0;
3668 uint16_t rx_cons;
3669
3670 rx_cons = sc->bge_rx_saved_considx;
3671
3672 /* Nothing to do. */
3673 if (rx_cons == rx_prod)
3674 return (rx_npkts);
3675
3676 ifp = sc->bge_ifp;
3677
3678 bus_dmamap_sync(sc->bge_cdata.bge_rx_return_ring_tag,
3679 sc->bge_cdata.bge_rx_return_ring_map, BUS_DMASYNC_POSTREAD);
3680 bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag,
3681 sc->bge_cdata.bge_rx_std_ring_map, BUS_DMASYNC_POSTWRITE);
3682 if (BGE_IS_JUMBO_CAPABLE(sc) &&
3683 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN + ETHER_VLAN_ENCAP_LEN >
3684 (MCLBYTES - ETHER_ALIGN))
3685 bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag,
3686 sc->bge_cdata.bge_rx_jumbo_ring_map, BUS_DMASYNC_POSTWRITE);
3687
3688 while (rx_cons != rx_prod) {
3689 struct bge_rx_bd *cur_rx;
3690 uint32_t rxidx;
3691 struct mbuf *m = NULL;
3692 uint16_t vlan_tag = 0;
3693 int have_tag = 0;
3694
3695#ifdef DEVICE_POLLING
3696 if (ifp->if_capenable & IFCAP_POLLING) {
3697 if (sc->rxcycles <= 0)
3698 break;
3699 sc->rxcycles--;
3700 }
3701#endif
3702
3703 cur_rx = &sc->bge_ldata.bge_rx_return_ring[rx_cons];
3704
3705 rxidx = cur_rx->bge_idx;
3706 BGE_INC(rx_cons, sc->bge_return_ring_cnt);
3707
3708 if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING &&
3709 cur_rx->bge_flags & BGE_RXBDFLAG_VLAN_TAG) {
3710 have_tag = 1;
3711 vlan_tag = cur_rx->bge_vlan_tag;
3712 }
3713
3714 if (cur_rx->bge_flags & BGE_RXBDFLAG_JUMBO_RING) {
3715 jumbocnt++;
3716 m = sc->bge_cdata.bge_rx_jumbo_chain[rxidx];
3717 if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) {
3718 bge_rxreuse_jumbo(sc, rxidx);
3719 continue;
3720 }
3721 if (bge_newbuf_jumbo(sc, rxidx) != 0) {
3722 bge_rxreuse_jumbo(sc, rxidx);
3723 ifp->if_iqdrops++;
3724 continue;
3725 }
3726 BGE_INC(sc->bge_jumbo, BGE_JUMBO_RX_RING_CNT);
3727 } else {
3728 stdcnt++;
3729 m = sc->bge_cdata.bge_rx_std_chain[rxidx];
3730 if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) {
3731 bge_rxreuse_std(sc, rxidx);
3732 continue;
3733 }
3734 if (bge_newbuf_std(sc, rxidx) != 0) {
3735 bge_rxreuse_std(sc, rxidx);
3736 ifp->if_iqdrops++;
3737 continue;
3738 }
3739 BGE_INC(sc->bge_std, BGE_STD_RX_RING_CNT);
3740 }
3741
3742 ifp->if_ipackets++;
3743#ifndef __NO_STRICT_ALIGNMENT
3744 /*
3745 * For architectures with strict alignment we must make sure
3746 * the payload is aligned.
3747 */
3748 if (sc->bge_flags & BGE_FLAG_RX_ALIGNBUG) {
3749 bcopy(m->m_data, m->m_data + ETHER_ALIGN,
3750 cur_rx->bge_len);
3751 m->m_data += ETHER_ALIGN;
3752 }
3753#endif
3754 m->m_pkthdr.len = m->m_len = cur_rx->bge_len - ETHER_CRC_LEN;
3755 m->m_pkthdr.rcvif = ifp;
3756
3757 if (ifp->if_capenable & IFCAP_RXCSUM)
3758 bge_rxcsum(sc, cur_rx, m);
3759
3760 /*
3761 * If we received a packet with a vlan tag,
3762 * attach that information to the packet.
3763 */
3764 if (have_tag) {
3765 m->m_pkthdr.ether_vtag = vlan_tag;
3766 m->m_flags |= M_VLANTAG;
3767 }
3768
3769 if (holdlck != 0) {
3770 BGE_UNLOCK(sc);
3771 (*ifp->if_input)(ifp, m);
3772 BGE_LOCK(sc);
3773 } else
3774 (*ifp->if_input)(ifp, m);
3775 rx_npkts++;
3776
3777 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
3778 return (rx_npkts);
3779 }
3780
3781 bus_dmamap_sync(sc->bge_cdata.bge_rx_return_ring_tag,
3782 sc->bge_cdata.bge_rx_return_ring_map, BUS_DMASYNC_PREREAD);
3783 if (stdcnt > 0)
3784 bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag,
3785 sc->bge_cdata.bge_rx_std_ring_map, BUS_DMASYNC_PREWRITE);
3786
3787 if (jumbocnt > 0)
3788 bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag,
3789 sc->bge_cdata.bge_rx_jumbo_ring_map, BUS_DMASYNC_PREWRITE);
3790
3791 sc->bge_rx_saved_considx = rx_cons;
3792 bge_writembx(sc, BGE_MBX_RX_CONS0_LO, sc->bge_rx_saved_considx);
3793 if (stdcnt)
3794 bge_writembx(sc, BGE_MBX_RX_STD_PROD_LO, (sc->bge_std +
3795 BGE_STD_RX_RING_CNT - 1) % BGE_STD_RX_RING_CNT);
3796 if (jumbocnt)
3797 bge_writembx(sc, BGE_MBX_RX_JUMBO_PROD_LO, (sc->bge_jumbo +
3798 BGE_JUMBO_RX_RING_CNT - 1) % BGE_JUMBO_RX_RING_CNT);
3799#ifdef notyet
3800 /*
3801 * This register wraps very quickly under heavy packet drops.
3802 * If you need correct statistics, you can enable this check.
3803 */
3804 if (BGE_IS_5705_PLUS(sc))
3805 ifp->if_ierrors += CSR_READ_4(sc, BGE_RXLP_LOCSTAT_IFIN_DROPS);
3806#endif
3807 return (rx_npkts);
3808}
3809
3810static void
3811bge_rxcsum(struct bge_softc *sc, struct bge_rx_bd *cur_rx, struct mbuf *m)
3812{
3813
3814 if (BGE_IS_5717_PLUS(sc)) {
3815 if ((cur_rx->bge_flags & BGE_RXBDFLAG_IPV6) == 0) {
3816 if (cur_rx->bge_flags & BGE_RXBDFLAG_IP_CSUM) {
3817 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
3818 if ((cur_rx->bge_error_flag &
3819 BGE_RXERRFLAG_IP_CSUM_NOK) == 0)
3820 m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
3821 }
3822 if (cur_rx->bge_flags & BGE_RXBDFLAG_TCP_UDP_CSUM) {
3823 m->m_pkthdr.csum_data =
3824 cur_rx->bge_tcp_udp_csum;
3825 m->m_pkthdr.csum_flags |= CSUM_DATA_VALID |
3826 CSUM_PSEUDO_HDR;
3827 }
3828 }
3829 } else {
3830 if (cur_rx->bge_flags & BGE_RXBDFLAG_IP_CSUM) {
3831 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
3832 if ((cur_rx->bge_ip_csum ^ 0xFFFF) == 0)
3833 m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
3834 }
3835 if (cur_rx->bge_flags & BGE_RXBDFLAG_TCP_UDP_CSUM &&
3836 m->m_pkthdr.len >= ETHER_MIN_NOPAD) {
3837 m->m_pkthdr.csum_data =
3838 cur_rx->bge_tcp_udp_csum;
3839 m->m_pkthdr.csum_flags |= CSUM_DATA_VALID |
3840 CSUM_PSEUDO_HDR;
3841 }
3842 }
3843}
3844
3845static void
3846bge_txeof(struct bge_softc *sc, uint16_t tx_cons)
3847{
3848 struct bge_tx_bd *cur_tx;
3849 struct ifnet *ifp;
3850
3851 BGE_LOCK_ASSERT(sc);
3852
3853 /* Nothing to do. */
3854 if (sc->bge_tx_saved_considx == tx_cons)
3855 return;
3856
3857 ifp = sc->bge_ifp;
3858
3859 bus_dmamap_sync(sc->bge_cdata.bge_tx_ring_tag,
3860 sc->bge_cdata.bge_tx_ring_map, BUS_DMASYNC_POSTWRITE);
3861 /*
3862 * Go through our tx ring and free mbufs for those
3863 * frames that have been sent.
3864 */
3865 while (sc->bge_tx_saved_considx != tx_cons) {
3866 uint32_t idx;
3867
3868 idx = sc->bge_tx_saved_considx;
3869 cur_tx = &sc->bge_ldata.bge_tx_ring[idx];
3870 if (cur_tx->bge_flags & BGE_TXBDFLAG_END)
3871 ifp->if_opackets++;
3872 if (sc->bge_cdata.bge_tx_chain[idx] != NULL) {
3873 bus_dmamap_sync(sc->bge_cdata.bge_tx_mtag,
3874 sc->bge_cdata.bge_tx_dmamap[idx],
3875 BUS_DMASYNC_POSTWRITE);
3876 bus_dmamap_unload(sc->bge_cdata.bge_tx_mtag,
3877 sc->bge_cdata.bge_tx_dmamap[idx]);
3878 m_freem(sc->bge_cdata.bge_tx_chain[idx]);
3879 sc->bge_cdata.bge_tx_chain[idx] = NULL;
3880 }
3881 sc->bge_txcnt--;
3882 BGE_INC(sc->bge_tx_saved_considx, BGE_TX_RING_CNT);
3883 }
3884
3885 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
3886 if (sc->bge_txcnt == 0)
3887 sc->bge_timer = 0;
3888}
3889
3890#ifdef DEVICE_POLLING
3891static int
3892bge_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
3893{
3894 struct bge_softc *sc = ifp->if_softc;
3895 uint16_t rx_prod, tx_cons;
3896 uint32_t statusword;
3897 int rx_npkts = 0;
3898
3899 BGE_LOCK(sc);
3900 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
3901 BGE_UNLOCK(sc);
3902 return (rx_npkts);
3903 }
3904
3905 bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
3906 sc->bge_cdata.bge_status_map,
3907 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
3908 rx_prod = sc->bge_ldata.bge_status_block->bge_idx[0].bge_rx_prod_idx;
3909 tx_cons = sc->bge_ldata.bge_status_block->bge_idx[0].bge_tx_cons_idx;
3910
3911 statusword = sc->bge_ldata.bge_status_block->bge_status;
3912 sc->bge_ldata.bge_status_block->bge_status = 0;
3913
3914 bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
3915 sc->bge_cdata.bge_status_map,
3916 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3917
3918 /* Note link event. It will be processed by POLL_AND_CHECK_STATUS. */
3919 if (statusword & BGE_STATFLAG_LINKSTATE_CHANGED)
3920 sc->bge_link_evt++;
3921
3922 if (cmd == POLL_AND_CHECK_STATUS)
3923 if ((sc->bge_asicrev == BGE_ASICREV_BCM5700 &&
3924 sc->bge_chipid != BGE_CHIPID_BCM5700_B2) ||
3925 sc->bge_link_evt || (sc->bge_flags & BGE_FLAG_TBI))
3926 bge_link_upd(sc);
3927
3928 sc->rxcycles = count;
3929 rx_npkts = bge_rxeof(sc, rx_prod, 1);
3930 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
3931 BGE_UNLOCK(sc);
3932 return (rx_npkts);
3933 }
3934 bge_txeof(sc, tx_cons);
3935 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
3936 bge_start_locked(ifp);
3937
3938 BGE_UNLOCK(sc);
3939 return (rx_npkts);
3940}
3941#endif /* DEVICE_POLLING */
3942
3943static int
3944bge_msi_intr(void *arg)
3945{
3946 struct bge_softc *sc;
3947
3948 sc = (struct bge_softc *)arg;
3949 /*
3950 * This interrupt is not shared and controller already
3951 * disabled further interrupt.
3952 */
3953 taskqueue_enqueue(sc->bge_tq, &sc->bge_intr_task);
3954 return (FILTER_HANDLED);
3955}
3956
3957static void
3958bge_intr_task(void *arg, int pending)
3959{
3960 struct bge_softc *sc;
3961 struct ifnet *ifp;
3962 uint32_t status, status_tag;
3963 uint16_t rx_prod, tx_cons;
3964
3965 sc = (struct bge_softc *)arg;
3966 ifp = sc->bge_ifp;
3967
3968 BGE_LOCK(sc);
3969 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
3970 BGE_UNLOCK(sc);
3971 return;
3972 }
3973
3974 /* Get updated status block. */
3975 bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
3976 sc->bge_cdata.bge_status_map,
3977 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
3978
3979 /* Save producer/consumer indexess. */
3980 rx_prod = sc->bge_ldata.bge_status_block->bge_idx[0].bge_rx_prod_idx;
3981 tx_cons = sc->bge_ldata.bge_status_block->bge_idx[0].bge_tx_cons_idx;
3982 status = sc->bge_ldata.bge_status_block->bge_status;
3983 status_tag = sc->bge_ldata.bge_status_block->bge_status_tag << 24;
3984 sc->bge_ldata.bge_status_block->bge_status = 0;
3985 bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
3986 sc->bge_cdata.bge_status_map,
3987 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3988 if ((sc->bge_flags & BGE_FLAG_TAGGED_STATUS) == 0)
3989 status_tag = 0;
3990
3991 if ((status & BGE_STATFLAG_LINKSTATE_CHANGED) != 0)
3992 bge_link_upd(sc);
3993
3994 /* Let controller work. */
3995 bge_writembx(sc, BGE_MBX_IRQ0_LO, status_tag);
3996
3997 if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
3998 sc->bge_rx_saved_considx != rx_prod) {
3999 /* Check RX return ring producer/consumer. */
4000 BGE_UNLOCK(sc);
4001 bge_rxeof(sc, rx_prod, 0);
4002 BGE_LOCK(sc);
4003 }
4004 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
4005 /* Check TX ring producer/consumer. */
4006 bge_txeof(sc, tx_cons);
4007 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
4008 bge_start_locked(ifp);
4009 }
4010 BGE_UNLOCK(sc);
4011}
4012
4013static void
4014bge_intr(void *xsc)
4015{
4016 struct bge_softc *sc;
4017 struct ifnet *ifp;
4018 uint32_t statusword;
4019 uint16_t rx_prod, tx_cons;
4020
4021 sc = xsc;
4022
4023 BGE_LOCK(sc);
4024
4025 ifp = sc->bge_ifp;
4026
4027#ifdef DEVICE_POLLING
4028 if (ifp->if_capenable & IFCAP_POLLING) {
4029 BGE_UNLOCK(sc);
4030 return;
4031 }
4032#endif
4033
4034 /*
4035 * Ack the interrupt by writing something to BGE_MBX_IRQ0_LO. Don't
4036 * disable interrupts by writing nonzero like we used to, since with
4037 * our current organization this just gives complications and
4038 * pessimizations for re-enabling interrupts. We used to have races
4039 * instead of the necessary complications. Disabling interrupts
4040 * would just reduce the chance of a status update while we are
4041 * running (by switching to the interrupt-mode coalescence
4042 * parameters), but this chance is already very low so it is more
4043 * efficient to get another interrupt than prevent it.
4044 *
4045 * We do the ack first to ensure another interrupt if there is a
4046 * status update after the ack. We don't check for the status
4047 * changing later because it is more efficient to get another
4048 * interrupt than prevent it, not quite as above (not checking is
4049 * a smaller optimization than not toggling the interrupt enable,
4050 * since checking doesn't involve PCI accesses and toggling require
4051 * the status check). So toggling would probably be a pessimization
4052 * even with MSI. It would only be needed for using a task queue.
4053 */
4054 bge_writembx(sc, BGE_MBX_IRQ0_LO, 0);
4055
4056 /*
4057 * Do the mandatory PCI flush as well as get the link status.
4058 */
4059 statusword = CSR_READ_4(sc, BGE_MAC_STS) & BGE_MACSTAT_LINK_CHANGED;
4060
4061 /* Make sure the descriptor ring indexes are coherent. */
4062 bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
4063 sc->bge_cdata.bge_status_map,
4064 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
4065 rx_prod = sc->bge_ldata.bge_status_block->bge_idx[0].bge_rx_prod_idx;
4066 tx_cons = sc->bge_ldata.bge_status_block->bge_idx[0].bge_tx_cons_idx;
4067 sc->bge_ldata.bge_status_block->bge_status = 0;
4068 bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
4069 sc->bge_cdata.bge_status_map,
4070 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
4071
4072 if ((sc->bge_asicrev == BGE_ASICREV_BCM5700 &&
4073 sc->bge_chipid != BGE_CHIPID_BCM5700_B2) ||
4074 statusword || sc->bge_link_evt)
4075 bge_link_upd(sc);
4076
4077 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
4078 /* Check RX return ring producer/consumer. */
4079 bge_rxeof(sc, rx_prod, 1);
4080 }
4081
4082 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
4083 /* Check TX ring producer/consumer. */
4084 bge_txeof(sc, tx_cons);
4085 }
4086
4087 if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
4088 !IFQ_DRV_IS_EMPTY(&ifp->if_snd))
4089 bge_start_locked(ifp);
4090
4091 BGE_UNLOCK(sc);
4092}
4093
4094static void
4095bge_asf_driver_up(struct bge_softc *sc)
4096{
4097 if (sc->bge_asf_mode & ASF_STACKUP) {
4098 /* Send ASF heartbeat aprox. every 2s */
4099 if (sc->bge_asf_count)
4100 sc->bge_asf_count --;
4101 else {
4102 sc->bge_asf_count = 2;
4103 bge_writemem_ind(sc, BGE_SRAM_FW_CMD_MB,
4104 BGE_FW_DRV_ALIVE);
4105 bge_writemem_ind(sc, BGE_SRAM_FW_CMD_LEN_MB, 4);
4106 bge_writemem_ind(sc, BGE_SRAM_FW_CMD_DATA_MB, 3);
4107 CSR_WRITE_4(sc, BGE_RX_CPU_EVENT,
4108 CSR_READ_4(sc, BGE_RX_CPU_EVENT) | (1 << 14));
4109 }
4110 }
4111}
4112
4113static void
4114bge_tick(void *xsc)
4115{
4116 struct bge_softc *sc = xsc;
4117 struct mii_data *mii = NULL;
4118
4119 BGE_LOCK_ASSERT(sc);
4120
4121 /* Synchronize with possible callout reset/stop. */
4122 if (callout_pending(&sc->bge_stat_ch) ||
4123 !callout_active(&sc->bge_stat_ch))
4124 return;
4125
4126 if (BGE_IS_5705_PLUS(sc))
4127 bge_stats_update_regs(sc);
4128 else
4129 bge_stats_update(sc);
4130
4131 if ((sc->bge_flags & BGE_FLAG_TBI) == 0) {
4132 mii = device_get_softc(sc->bge_miibus);
4133 /*
4134 * Do not touch PHY if we have link up. This could break
4135 * IPMI/ASF mode or produce extra input errors
4136 * (extra errors was reported for bcm5701 & bcm5704).
4137 */
4138 if (!sc->bge_link)
4139 mii_tick(mii);
4140 } else {
4141 /*
4142 * Since in TBI mode auto-polling can't be used we should poll
4143 * link status manually. Here we register pending link event
4144 * and trigger interrupt.
4145 */
4146#ifdef DEVICE_POLLING
4147 /* In polling mode we poll link state in bge_poll(). */
4148 if (!(sc->bge_ifp->if_capenable & IFCAP_POLLING))
4149#endif
4150 {
4151 sc->bge_link_evt++;
4152 if (sc->bge_asicrev == BGE_ASICREV_BCM5700 ||
4153 sc->bge_flags & BGE_FLAG_5788)
4154 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_SET);
4155 else
4156 BGE_SETBIT(sc, BGE_HCC_MODE, BGE_HCCMODE_COAL_NOW);
4157 }
4158 }
4159
4160 bge_asf_driver_up(sc);
4161 bge_watchdog(sc);
4162
4163 callout_reset(&sc->bge_stat_ch, hz, bge_tick, sc);
4164}
4165
4166static void
4167bge_stats_update_regs(struct bge_softc *sc)
4168{
4169 struct ifnet *ifp;
4170 struct bge_mac_stats *stats;
4171
4172 ifp = sc->bge_ifp;
4173 stats = &sc->bge_mac_stats;
4174
4175 stats->ifHCOutOctets +=
4176 CSR_READ_4(sc, BGE_TX_MAC_STATS_OCTETS);
4177 stats->etherStatsCollisions +=
4178 CSR_READ_4(sc, BGE_TX_MAC_STATS_COLLS);
4179 stats->outXonSent +=
4180 CSR_READ_4(sc, BGE_TX_MAC_STATS_XON_SENT);
4181 stats->outXoffSent +=
4182 CSR_READ_4(sc, BGE_TX_MAC_STATS_XOFF_SENT);
4183 stats->dot3StatsInternalMacTransmitErrors +=
4184 CSR_READ_4(sc, BGE_TX_MAC_STATS_ERRORS);
4185 stats->dot3StatsSingleCollisionFrames +=
4186 CSR_READ_4(sc, BGE_TX_MAC_STATS_SINGLE_COLL);
4187 stats->dot3StatsMultipleCollisionFrames +=
4188 CSR_READ_4(sc, BGE_TX_MAC_STATS_MULTI_COLL);
4189 stats->dot3StatsDeferredTransmissions +=
4190 CSR_READ_4(sc, BGE_TX_MAC_STATS_DEFERRED);
4191 stats->dot3StatsExcessiveCollisions +=
4192 CSR_READ_4(sc, BGE_TX_MAC_STATS_EXCESS_COLL);
4193 stats->dot3StatsLateCollisions +=
4194 CSR_READ_4(sc, BGE_TX_MAC_STATS_LATE_COLL);
4195 stats->ifHCOutUcastPkts +=
4196 CSR_READ_4(sc, BGE_TX_MAC_STATS_UCAST);
4197 stats->ifHCOutMulticastPkts +=
4198 CSR_READ_4(sc, BGE_TX_MAC_STATS_MCAST);
4199 stats->ifHCOutBroadcastPkts +=
4200 CSR_READ_4(sc, BGE_TX_MAC_STATS_BCAST);
4201
4202 stats->ifHCInOctets +=
4203 CSR_READ_4(sc, BGE_RX_MAC_STATS_OCTESTS);
4204 stats->etherStatsFragments +=
4205 CSR_READ_4(sc, BGE_RX_MAC_STATS_FRAGMENTS);
4206 stats->ifHCInUcastPkts +=
4207 CSR_READ_4(sc, BGE_RX_MAC_STATS_UCAST);
4208 stats->ifHCInMulticastPkts +=
4209 CSR_READ_4(sc, BGE_RX_MAC_STATS_MCAST);
4210 stats->ifHCInBroadcastPkts +=
4211 CSR_READ_4(sc, BGE_RX_MAC_STATS_BCAST);
4212 stats->dot3StatsFCSErrors +=
4213 CSR_READ_4(sc, BGE_RX_MAC_STATS_FCS_ERRORS);
4214 stats->dot3StatsAlignmentErrors +=
4215 CSR_READ_4(sc, BGE_RX_MAC_STATS_ALGIN_ERRORS);
4216 stats->xonPauseFramesReceived +=
4217 CSR_READ_4(sc, BGE_RX_MAC_STATS_XON_RCVD);
4218 stats->xoffPauseFramesReceived +=
4219 CSR_READ_4(sc, BGE_RX_MAC_STATS_XOFF_RCVD);
4220 stats->macControlFramesReceived +=
4221 CSR_READ_4(sc, BGE_RX_MAC_STATS_CTRL_RCVD);
4222 stats->xoffStateEntered +=
4223 CSR_READ_4(sc, BGE_RX_MAC_STATS_XOFF_ENTERED);
4224 stats->dot3StatsFramesTooLong +=
4225 CSR_READ_4(sc, BGE_RX_MAC_STATS_FRAME_TOO_LONG);
4226 stats->etherStatsJabbers +=
4227 CSR_READ_4(sc, BGE_RX_MAC_STATS_JABBERS);
4228 stats->etherStatsUndersizePkts +=
4229 CSR_READ_4(sc, BGE_RX_MAC_STATS_UNDERSIZE);
4230
4231 stats->FramesDroppedDueToFilters +=
4232 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_FILTDROP);
4233 stats->DmaWriteQueueFull +=
4234 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_DMA_WRQ_FULL);
4235 stats->DmaWriteHighPriQueueFull +=
4236 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_DMA_HPWRQ_FULL);
4237 stats->NoMoreRxBDs +=
4238 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_OUT_OF_BDS);
4239 stats->InputDiscards +=
4240 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_IFIN_DROPS);
4241 stats->InputErrors +=
4242 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_IFIN_ERRORS);
4243 stats->RecvThresholdHit +=
4244 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_RXTHRESH_HIT);
4245
4246 ifp->if_collisions = (u_long)stats->etherStatsCollisions;
4247 ifp->if_ierrors = (u_long)(stats->NoMoreRxBDs + stats->InputDiscards +
4248 stats->InputErrors);
4249}
4250
4251static void
4252bge_stats_clear_regs(struct bge_softc *sc)
4253{
4254
4255 CSR_READ_4(sc, BGE_TX_MAC_STATS_OCTETS);
4256 CSR_READ_4(sc, BGE_TX_MAC_STATS_COLLS);
4257 CSR_READ_4(sc, BGE_TX_MAC_STATS_XON_SENT);
4258 CSR_READ_4(sc, BGE_TX_MAC_STATS_XOFF_SENT);
4259 CSR_READ_4(sc, BGE_TX_MAC_STATS_ERRORS);
4260 CSR_READ_4(sc, BGE_TX_MAC_STATS_SINGLE_COLL);
4261 CSR_READ_4(sc, BGE_TX_MAC_STATS_MULTI_COLL);
4262 CSR_READ_4(sc, BGE_TX_MAC_STATS_DEFERRED);
4263 CSR_READ_4(sc, BGE_TX_MAC_STATS_EXCESS_COLL);
4264 CSR_READ_4(sc, BGE_TX_MAC_STATS_LATE_COLL);
4265 CSR_READ_4(sc, BGE_TX_MAC_STATS_UCAST);
4266 CSR_READ_4(sc, BGE_TX_MAC_STATS_MCAST);
4267 CSR_READ_4(sc, BGE_TX_MAC_STATS_BCAST);
4268
4269 CSR_READ_4(sc, BGE_RX_MAC_STATS_OCTESTS);
4270 CSR_READ_4(sc, BGE_RX_MAC_STATS_FRAGMENTS);
4271 CSR_READ_4(sc, BGE_RX_MAC_STATS_UCAST);
4272 CSR_READ_4(sc, BGE_RX_MAC_STATS_MCAST);
4273 CSR_READ_4(sc, BGE_RX_MAC_STATS_BCAST);
4274 CSR_READ_4(sc, BGE_RX_MAC_STATS_FCS_ERRORS);
4275 CSR_READ_4(sc, BGE_RX_MAC_STATS_ALGIN_ERRORS);
4276 CSR_READ_4(sc, BGE_RX_MAC_STATS_XON_RCVD);
4277 CSR_READ_4(sc, BGE_RX_MAC_STATS_XOFF_RCVD);
4278 CSR_READ_4(sc, BGE_RX_MAC_STATS_CTRL_RCVD);
4279 CSR_READ_4(sc, BGE_RX_MAC_STATS_XOFF_ENTERED);
4280 CSR_READ_4(sc, BGE_RX_MAC_STATS_FRAME_TOO_LONG);
4281 CSR_READ_4(sc, BGE_RX_MAC_STATS_JABBERS);
4282 CSR_READ_4(sc, BGE_RX_MAC_STATS_UNDERSIZE);
4283
4284 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_FILTDROP);
4285 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_DMA_WRQ_FULL);
4286 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_DMA_HPWRQ_FULL);
4287 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_OUT_OF_BDS);
4288 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_IFIN_DROPS);
4289 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_IFIN_ERRORS);
4290 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_RXTHRESH_HIT);
4291}
4292
4293static void
4294bge_stats_update(struct bge_softc *sc)
4295{
4296 struct ifnet *ifp;
4297 bus_size_t stats;
4298 uint32_t cnt; /* current register value */
4299
4300 ifp = sc->bge_ifp;
4301
4302 stats = BGE_MEMWIN_START + BGE_STATS_BLOCK;
4303
4304#define READ_STAT(sc, stats, stat) \
4305 CSR_READ_4(sc, stats + offsetof(struct bge_stats, stat))
4306
4307 cnt = READ_STAT(sc, stats, txstats.etherStatsCollisions.bge_addr_lo);
4308 ifp->if_collisions += (uint32_t)(cnt - sc->bge_tx_collisions);
4309 sc->bge_tx_collisions = cnt;
4310
4311 cnt = READ_STAT(sc, stats, ifInDiscards.bge_addr_lo);
4312 ifp->if_ierrors += (uint32_t)(cnt - sc->bge_rx_discards);
4313 sc->bge_rx_discards = cnt;
4314
4315 cnt = READ_STAT(sc, stats, txstats.ifOutDiscards.bge_addr_lo);
4316 ifp->if_oerrors += (uint32_t)(cnt - sc->bge_tx_discards);
4317 sc->bge_tx_discards = cnt;
4318
4319#undef READ_STAT
4320}
4321
4322/*
4323 * Pad outbound frame to ETHER_MIN_NOPAD for an unusual reason.
4324 * The bge hardware will pad out Tx runts to ETHER_MIN_NOPAD,
4325 * but when such padded frames employ the bge IP/TCP checksum offload,
4326 * the hardware checksum assist gives incorrect results (possibly
4327 * from incorporating its own padding into the UDP/TCP checksum; who knows).
4328 * If we pad such runts with zeros, the onboard checksum comes out correct.
4329 */
4330static __inline int
4331bge_cksum_pad(struct mbuf *m)
4332{
4333 int padlen = ETHER_MIN_NOPAD - m->m_pkthdr.len;
4334 struct mbuf *last;
4335
4336 /* If there's only the packet-header and we can pad there, use it. */
4337 if (m->m_pkthdr.len == m->m_len && M_WRITABLE(m) &&
4338 M_TRAILINGSPACE(m) >= padlen) {
4339 last = m;
4340 } else {
4341 /*
4342 * Walk packet chain to find last mbuf. We will either
4343 * pad there, or append a new mbuf and pad it.
4344 */
4345 for (last = m; last->m_next != NULL; last = last->m_next);
4346 if (!(M_WRITABLE(last) && M_TRAILINGSPACE(last) >= padlen)) {
4347 /* Allocate new empty mbuf, pad it. Compact later. */
4348 struct mbuf *n;
4349
4350 MGET(n, M_DONTWAIT, MT_DATA);
4351 if (n == NULL)
4352 return (ENOBUFS);
4353 n->m_len = 0;
4354 last->m_next = n;
4355 last = n;
4356 }
4357 }
4358
4359 /* Now zero the pad area, to avoid the bge cksum-assist bug. */
4360 memset(mtod(last, caddr_t) + last->m_len, 0, padlen);
4361 last->m_len += padlen;
4362 m->m_pkthdr.len += padlen;
4363
4364 return (0);
4365}
4366
4367static struct mbuf *
4368bge_check_short_dma(struct mbuf *m)
4369{
4370 struct mbuf *n;
4371 int found;
4372
4373 /*
4374 * If device receive two back-to-back send BDs with less than
4375 * or equal to 8 total bytes then the device may hang. The two
4376 * back-to-back send BDs must in the same frame for this failure
4377 * to occur. Scan mbuf chains and see whether two back-to-back
4378 * send BDs are there. If this is the case, allocate new mbuf
4379 * and copy the frame to workaround the silicon bug.
4380 */
4381 for (n = m, found = 0; n != NULL; n = n->m_next) {
4382 if (n->m_len < 8) {
4383 found++;
4384 if (found > 1)
4385 break;
4386 continue;
4387 }
4388 found = 0;
4389 }
4390
4391 if (found > 1) {
4392 n = m_defrag(m, M_DONTWAIT);
4393 if (n == NULL)
4394 m_freem(m);
4395 } else
4396 n = m;
4397 return (n);
4398}
4399
4400static struct mbuf *
4401bge_setup_tso(struct bge_softc *sc, struct mbuf *m, uint16_t *mss,
4402 uint16_t *flags)
4403{
4404 struct ip *ip;
4405 struct tcphdr *tcp;
4406 struct mbuf *n;
4407 uint16_t hlen;
4408 uint32_t poff;
4409
4410 if (M_WRITABLE(m) == 0) {
4411 /* Get a writable copy. */
4412 n = m_dup(m, M_DONTWAIT);
4413 m_freem(m);
4414 if (n == NULL)
4415 return (NULL);
4416 m = n;
4417 }
4418 m = m_pullup(m, sizeof(struct ether_header) + sizeof(struct ip));
4419 if (m == NULL)
4420 return (NULL);
4421 ip = (struct ip *)(mtod(m, char *) + sizeof(struct ether_header));
4422 poff = sizeof(struct ether_header) + (ip->ip_hl << 2);
4423 m = m_pullup(m, poff + sizeof(struct tcphdr));
4424 if (m == NULL)
4425 return (NULL);
4426 tcp = (struct tcphdr *)(mtod(m, char *) + poff);
4427 m = m_pullup(m, poff + (tcp->th_off << 2));
4428 if (m == NULL)
4429 return (NULL);
4430 /*
4431 * It seems controller doesn't modify IP length and TCP pseudo
4432 * checksum. These checksum computed by upper stack should be 0.
4433 */
4434 *mss = m->m_pkthdr.tso_segsz;
4435 ip = (struct ip *)(mtod(m, char *) + sizeof(struct ether_header));
4436 ip->ip_sum = 0;
4437 ip->ip_len = htons(*mss + (ip->ip_hl << 2) + (tcp->th_off << 2));
4438 /* Clear pseudo checksum computed by TCP stack. */
4439 tcp = (struct tcphdr *)(mtod(m, char *) + poff);
4440 tcp->th_sum = 0;
4441 /*
4442 * Broadcom controllers uses different descriptor format for
4443 * TSO depending on ASIC revision. Due to TSO-capable firmware
4444 * license issue and lower performance of firmware based TSO
4445 * we only support hardware based TSO.
4446 */
4447 /* Calculate header length, incl. TCP/IP options, in 32 bit units. */
4448 hlen = ((ip->ip_hl << 2) + (tcp->th_off << 2)) >> 2;
4449 if (sc->bge_flags & BGE_FLAG_TSO3) {
4450 /*
4451 * For BCM5717 and newer controllers, hardware based TSO
4452 * uses the 14 lower bits of the bge_mss field to store the
4453 * MSS and the upper 2 bits to store the lowest 2 bits of
4454 * the IP/TCP header length. The upper 6 bits of the header
4455 * length are stored in the bge_flags[14:10,4] field. Jumbo
4456 * frames are supported.
4457 */
4458 *mss |= ((hlen & 0x3) << 14);
4459 *flags |= ((hlen & 0xF8) << 7) | ((hlen & 0x4) << 2);
4460 } else {
4461 /*
4462 * For BCM5755 and newer controllers, hardware based TSO uses
4463 * the lower 11 bits to store the MSS and the upper 5 bits to
4464 * store the IP/TCP header length. Jumbo frames are not
4465 * supported.
4466 */
4467 *mss |= (hlen << 11);
4468 }
4469 return (m);
4470}
4471
4472/*
4473 * Encapsulate an mbuf chain in the tx ring by coupling the mbuf data
4474 * pointers to descriptors.
4475 */
4476static int
4477bge_encap(struct bge_softc *sc, struct mbuf **m_head, uint32_t *txidx)
4478{
4479 bus_dma_segment_t segs[BGE_NSEG_NEW];
4480 bus_dmamap_t map;
4481 struct bge_tx_bd *d;
4482 struct mbuf *m = *m_head;
4483 uint32_t idx = *txidx;
4484 uint16_t csum_flags, mss, vlan_tag;
4485 int nsegs, i, error;
4486
4487 csum_flags = 0;
4488 mss = 0;
4489 vlan_tag = 0;
4490 if ((sc->bge_flags & BGE_FLAG_SHORT_DMA_BUG) != 0 &&
4491 m->m_next != NULL) {
4492 *m_head = bge_check_short_dma(m);
4493 if (*m_head == NULL)
4494 return (ENOBUFS);
4495 m = *m_head;
4496 }
4497 if ((m->m_pkthdr.csum_flags & CSUM_TSO) != 0) {
4498 *m_head = m = bge_setup_tso(sc, m, &mss, &csum_flags);
4499 if (*m_head == NULL)
4500 return (ENOBUFS);
4501 csum_flags |= BGE_TXBDFLAG_CPU_PRE_DMA |
4502 BGE_TXBDFLAG_CPU_POST_DMA;
4503 } else if ((m->m_pkthdr.csum_flags & sc->bge_csum_features) != 0) {
4504 if (m->m_pkthdr.csum_flags & CSUM_IP)
4505 csum_flags |= BGE_TXBDFLAG_IP_CSUM;
4506 if (m->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP)) {
4507 csum_flags |= BGE_TXBDFLAG_TCP_UDP_CSUM;
4508 if (m->m_pkthdr.len < ETHER_MIN_NOPAD &&
4509 (error = bge_cksum_pad(m)) != 0) {
4510 m_freem(m);
4511 *m_head = NULL;
4512 return (error);
4513 }
4514 }
4515 if (m->m_flags & M_LASTFRAG)
4516 csum_flags |= BGE_TXBDFLAG_IP_FRAG_END;
4517 else if (m->m_flags & M_FRAG)
4518 csum_flags |= BGE_TXBDFLAG_IP_FRAG;
4519 }
4520
4521 if ((m->m_pkthdr.csum_flags & CSUM_TSO) == 0) {
4522 if (sc->bge_flags & BGE_FLAG_JUMBO_FRAME &&
4523 m->m_pkthdr.len > ETHER_MAX_LEN)
4524 csum_flags |= BGE_TXBDFLAG_JUMBO_FRAME;
4525 if (sc->bge_forced_collapse > 0 &&
4526 (sc->bge_flags & BGE_FLAG_PCIE) != 0 && m->m_next != NULL) {
4527 /*
4528 * Forcedly collapse mbuf chains to overcome hardware
4529 * limitation which only support a single outstanding
4530 * DMA read operation.
4531 */
4532 if (sc->bge_forced_collapse == 1)
4533 m = m_defrag(m, M_DONTWAIT);
4534 else
4535 m = m_collapse(m, M_DONTWAIT,
4536 sc->bge_forced_collapse);
4537 if (m == NULL)
4538 m = *m_head;
4539 *m_head = m;
4540 }
4541 }
4542
4543 map = sc->bge_cdata.bge_tx_dmamap[idx];
4544 error = bus_dmamap_load_mbuf_sg(sc->bge_cdata.bge_tx_mtag, map, m, segs,
4545 &nsegs, BUS_DMA_NOWAIT);
4546 if (error == EFBIG) {
4547 m = m_collapse(m, M_DONTWAIT, BGE_NSEG_NEW);
4548 if (m == NULL) {
4549 m_freem(*m_head);
4550 *m_head = NULL;
4551 return (ENOBUFS);
4552 }
4553 *m_head = m;
4554 error = bus_dmamap_load_mbuf_sg(sc->bge_cdata.bge_tx_mtag, map,
4555 m, segs, &nsegs, BUS_DMA_NOWAIT);
4556 if (error) {
4557 m_freem(m);
4558 *m_head = NULL;
4559 return (error);
4560 }
4561 } else if (error != 0)
4562 return (error);
4563
4564 /* Check if we have enough free send BDs. */
4565 if (sc->bge_txcnt + nsegs >= BGE_TX_RING_CNT) {
4566 bus_dmamap_unload(sc->bge_cdata.bge_tx_mtag, map);
4567 return (ENOBUFS);
4568 }
4569
4570 bus_dmamap_sync(sc->bge_cdata.bge_tx_mtag, map, BUS_DMASYNC_PREWRITE);
4571
4572 if (m->m_flags & M_VLANTAG) {
4573 csum_flags |= BGE_TXBDFLAG_VLAN_TAG;
4574 vlan_tag = m->m_pkthdr.ether_vtag;
4575 }
4576 for (i = 0; ; i++) {
4577 d = &sc->bge_ldata.bge_tx_ring[idx];
4578 d->bge_addr.bge_addr_lo = BGE_ADDR_LO(segs[i].ds_addr);
4579 d->bge_addr.bge_addr_hi = BGE_ADDR_HI(segs[i].ds_addr);
4580 d->bge_len = segs[i].ds_len;
4581 d->bge_flags = csum_flags;
4582 d->bge_vlan_tag = vlan_tag;
4583 d->bge_mss = mss;
4584 if (i == nsegs - 1)
4585 break;
4586 BGE_INC(idx, BGE_TX_RING_CNT);
4587 }
4588
4589 /* Mark the last segment as end of packet... */
4590 d->bge_flags |= BGE_TXBDFLAG_END;
4591
4592 /*
4593 * Insure that the map for this transmission
4594 * is placed at the array index of the last descriptor
4595 * in this chain.
4596 */
4597 sc->bge_cdata.bge_tx_dmamap[*txidx] = sc->bge_cdata.bge_tx_dmamap[idx];
4598 sc->bge_cdata.bge_tx_dmamap[idx] = map;
4599 sc->bge_cdata.bge_tx_chain[idx] = m;
4600 sc->bge_txcnt += nsegs;
4601
4602 BGE_INC(idx, BGE_TX_RING_CNT);
4603 *txidx = idx;
4604
4605 return (0);
4606}
4607
4608/*
4609 * Main transmit routine. To avoid having to do mbuf copies, we put pointers
4610 * to the mbuf data regions directly in the transmit descriptors.
4611 */
4612static void
4613bge_start_locked(struct ifnet *ifp)
4614{
4615 struct bge_softc *sc;
4616 struct mbuf *m_head;
4617 uint32_t prodidx;
4618 int count;
4619
4620 sc = ifp->if_softc;
4621 BGE_LOCK_ASSERT(sc);
4622
4623 if (!sc->bge_link ||
4624 (ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
4625 IFF_DRV_RUNNING)
4626 return;
4627
4628 prodidx = sc->bge_tx_prodidx;
4629
4630 for (count = 0; !IFQ_DRV_IS_EMPTY(&ifp->if_snd);) {
4631 if (sc->bge_txcnt > BGE_TX_RING_CNT - 16) {
4632 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
4633 break;
4634 }
4635 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
4636 if (m_head == NULL)
4637 break;
4638
4639 /*
4640 * XXX
4641 * The code inside the if() block is never reached since we
4642 * must mark CSUM_IP_FRAGS in our if_hwassist to start getting
4643 * requests to checksum TCP/UDP in a fragmented packet.
4644 *
4645 * XXX
4646 * safety overkill. If this is a fragmented packet chain
4647 * with delayed TCP/UDP checksums, then only encapsulate
4648 * it if we have enough descriptors to handle the entire
4649 * chain at once.
4650 * (paranoia -- may not actually be needed)
4651 */
4652 if (m_head->m_flags & M_FIRSTFRAG &&
4653 m_head->m_pkthdr.csum_flags & (CSUM_DELAY_DATA)) {
4654 if ((BGE_TX_RING_CNT - sc->bge_txcnt) <
4655 m_head->m_pkthdr.csum_data + 16) {
4656 IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
4657 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
4658 break;
4659 }
4660 }
4661
4662 /*
4663 * Pack the data into the transmit ring. If we
4664 * don't have room, set the OACTIVE flag and wait
4665 * for the NIC to drain the ring.
4666 */
4667 if (bge_encap(sc, &m_head, &prodidx)) {
4668 if (m_head == NULL)
4669 break;
4670 IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
4671 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
4672 break;
4673 }
4674 ++count;
4675
4676 /*
4677 * If there's a BPF listener, bounce a copy of this frame
4678 * to him.
4679 */
4680#ifdef ETHER_BPF_MTAP
4681 ETHER_BPF_MTAP(ifp, m_head);
4682#else
4683 BPF_MTAP(ifp, m_head);
4684#endif
4685 }
4686
4687 if (count > 0) {
4688 bus_dmamap_sync(sc->bge_cdata.bge_tx_ring_tag,
4689 sc->bge_cdata.bge_tx_ring_map, BUS_DMASYNC_PREWRITE);
4690 /* Transmit. */
4691 bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx);
4692 /* 5700 b2 errata */
4693 if (sc->bge_chiprev == BGE_CHIPREV_5700_BX)
4694 bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx);
4695
4696 sc->bge_tx_prodidx = prodidx;
4697
4698 /*
4699 * Set a timeout in case the chip goes out to lunch.
4700 */
4701 sc->bge_timer = 5;
4702 }
4703}
4704
4705/*
4706 * Main transmit routine. To avoid having to do mbuf copies, we put pointers
4707 * to the mbuf data regions directly in the transmit descriptors.
4708 */
4709static void
4710bge_start(struct ifnet *ifp)
4711{
4712 struct bge_softc *sc;
4713
4714 sc = ifp->if_softc;
4715 BGE_LOCK(sc);
4716 bge_start_locked(ifp);
4717 BGE_UNLOCK(sc);
4718}
4719
4720static void
4721bge_init_locked(struct bge_softc *sc)
4722{
4723 struct ifnet *ifp;
4724 uint16_t *m;
4725 uint32_t mode;
4726
4727 BGE_LOCK_ASSERT(sc);
4728
4729 ifp = sc->bge_ifp;
4730
4731 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
4732 return;
4733
4734 /* Cancel pending I/O and flush buffers. */
4735 bge_stop(sc);
4736
4737 bge_stop_fw(sc);
4738 bge_sig_pre_reset(sc, BGE_RESET_START);
4739 bge_reset(sc);
4740 bge_sig_legacy(sc, BGE_RESET_START);
4741 bge_sig_post_reset(sc, BGE_RESET_START);
4742
4743 bge_chipinit(sc);
4744
4745 /*
4746 * Init the various state machines, ring
4747 * control blocks and firmware.
4748 */
4749 if (bge_blockinit(sc)) {
4750 device_printf(sc->bge_dev, "initialization failure\n");
4751 return;
4752 }
4753
4754 ifp = sc->bge_ifp;
4755
4756 /* Specify MTU. */
4757 CSR_WRITE_4(sc, BGE_RX_MTU, ifp->if_mtu +
4758 ETHER_HDR_LEN + ETHER_CRC_LEN +
4759 (ifp->if_capenable & IFCAP_VLAN_MTU ? ETHER_VLAN_ENCAP_LEN : 0));
4760
4761 /* Load our MAC address. */
4762 m = (uint16_t *)IF_LLADDR(sc->bge_ifp);
4763 CSR_WRITE_4(sc, BGE_MAC_ADDR1_LO, htons(m[0]));
4764 CSR_WRITE_4(sc, BGE_MAC_ADDR1_HI, (htons(m[1]) << 16) | htons(m[2]));
4765
4766 /* Program promiscuous mode. */
4767 bge_setpromisc(sc);
4768
4769 /* Program multicast filter. */
4770 bge_setmulti(sc);
4771
4772 /* Program VLAN tag stripping. */
4773 bge_setvlan(sc);
4774
4775 /* Override UDP checksum offloading. */
4776 if (sc->bge_forced_udpcsum == 0)
4777 sc->bge_csum_features &= ~CSUM_UDP;
4778 else
4779 sc->bge_csum_features |= CSUM_UDP;
4780 if (ifp->if_capabilities & IFCAP_TXCSUM &&
4781 ifp->if_capenable & IFCAP_TXCSUM) {
4782 ifp->if_hwassist &= ~(BGE_CSUM_FEATURES | CSUM_UDP);
4783 ifp->if_hwassist |= sc->bge_csum_features;
4784 }
4785
4786 /* Init RX ring. */
4787 if (bge_init_rx_ring_std(sc) != 0) {
4788 device_printf(sc->bge_dev, "no memory for std Rx buffers.\n");
4789 bge_stop(sc);
4790 return;
4791 }
4792
4793 /*
4794 * Workaround for a bug in 5705 ASIC rev A0. Poll the NIC's
4795 * memory to insure that the chip has in fact read the first
4796 * entry of the ring.
4797 */
4798 if (sc->bge_chipid == BGE_CHIPID_BCM5705_A0) {
4799 uint32_t v, i;
4800 for (i = 0; i < 10; i++) {
4801 DELAY(20);
4802 v = bge_readmem_ind(sc, BGE_STD_RX_RINGS + 8);
4803 if (v == (MCLBYTES - ETHER_ALIGN))
4804 break;
4805 }
4806 if (i == 10)
4807 device_printf (sc->bge_dev,
4808 "5705 A0 chip failed to load RX ring\n");
4809 }
4810
4811 /* Init jumbo RX ring. */
4812 if (BGE_IS_JUMBO_CAPABLE(sc) &&
4813 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN + ETHER_VLAN_ENCAP_LEN >
4814 (MCLBYTES - ETHER_ALIGN)) {
4815 if (bge_init_rx_ring_jumbo(sc) != 0) {
4816 device_printf(sc->bge_dev,
4817 "no memory for jumbo Rx buffers.\n");
4818 bge_stop(sc);
4819 return;
4820 }
4821 }
4822
4823 /* Init our RX return ring index. */
4824 sc->bge_rx_saved_considx = 0;
4825
4826 /* Init our RX/TX stat counters. */
4827 sc->bge_rx_discards = sc->bge_tx_discards = sc->bge_tx_collisions = 0;
4828
4829 /* Init TX ring. */
4830 bge_init_tx_ring(sc);
4831
4832 /* Enable TX MAC state machine lockup fix. */
4833 mode = CSR_READ_4(sc, BGE_TX_MODE);
4834 if (BGE_IS_5755_PLUS(sc) || sc->bge_asicrev == BGE_ASICREV_BCM5906)
4835 mode |= BGE_TXMODE_MBUF_LOCKUP_FIX;
4836 /* Turn on transmitter. */
4837 CSR_WRITE_4(sc, BGE_TX_MODE, mode | BGE_TXMODE_ENABLE);
4838
4839 /* Turn on receiver. */
4840 BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE);
4841
4842 /*
4843 * Set the number of good frames to receive after RX MBUF
4844 * Low Watermark has been reached. After the RX MAC receives
4845 * this number of frames, it will drop subsequent incoming
4846 * frames until the MBUF High Watermark is reached.
4847 */
4848 if (sc->bge_asicrev == BGE_ASICREV_BCM57765)
4849 CSR_WRITE_4(sc, BGE_MAX_RX_FRAME_LOWAT, 1);
4850 else
4851 CSR_WRITE_4(sc, BGE_MAX_RX_FRAME_LOWAT, 2);
4852
4853 /* Clear MAC statistics. */
4854 if (BGE_IS_5705_PLUS(sc))
4855 bge_stats_clear_regs(sc);
4856
4857 /* Tell firmware we're alive. */
4858 BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
4859
4860#ifdef DEVICE_POLLING
4861 /* Disable interrupts if we are polling. */
4862 if (ifp->if_capenable & IFCAP_POLLING) {
4863 BGE_SETBIT(sc, BGE_PCI_MISC_CTL,
4864 BGE_PCIMISCCTL_MASK_PCI_INTR);
4865 bge_writembx(sc, BGE_MBX_IRQ0_LO, 1);
4866 } else
4867#endif
4868
4869 /* Enable host interrupts. */
4870 {
4871 BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_CLEAR_INTA);
4872 BGE_CLRBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR);
4873 bge_writembx(sc, BGE_MBX_IRQ0_LO, 0);
4874 }
4875
4876 bge_ifmedia_upd_locked(ifp);
4877
4878 ifp->if_drv_flags |= IFF_DRV_RUNNING;
4879 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
4880
4881 callout_reset(&sc->bge_stat_ch, hz, bge_tick, sc);
4882}
4883
4884static void
4885bge_init(void *xsc)
4886{
4887 struct bge_softc *sc = xsc;
4888
4889 BGE_LOCK(sc);
4890 bge_init_locked(sc);
4891 BGE_UNLOCK(sc);
4892}
4893
4894/*
4895 * Set media options.
4896 */
4897static int
4898bge_ifmedia_upd(struct ifnet *ifp)
4899{
4900 struct bge_softc *sc = ifp->if_softc;
4901 int res;
4902
4903 BGE_LOCK(sc);
4904 res = bge_ifmedia_upd_locked(ifp);
4905 BGE_UNLOCK(sc);
4906
4907 return (res);
4908}
4909
4910static int
4911bge_ifmedia_upd_locked(struct ifnet *ifp)
4912{
4913 struct bge_softc *sc = ifp->if_softc;
4914 struct mii_data *mii;
4915 struct mii_softc *miisc;
4916 struct ifmedia *ifm;
4917
4918 BGE_LOCK_ASSERT(sc);
4919
4920 ifm = &sc->bge_ifmedia;
4921
4922 /* If this is a 1000baseX NIC, enable the TBI port. */
4923 if (sc->bge_flags & BGE_FLAG_TBI) {
4924 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
4925 return (EINVAL);
4926 switch(IFM_SUBTYPE(ifm->ifm_media)) {
4927 case IFM_AUTO:
4928 /*
4929 * The BCM5704 ASIC appears to have a special
4930 * mechanism for programming the autoneg
4931 * advertisement registers in TBI mode.
4932 */
4933 if (sc->bge_asicrev == BGE_ASICREV_BCM5704) {
4934 uint32_t sgdig;
4935 sgdig = CSR_READ_4(sc, BGE_SGDIG_STS);
4936 if (sgdig & BGE_SGDIGSTS_DONE) {
4937 CSR_WRITE_4(sc, BGE_TX_TBI_AUTONEG, 0);
4938 sgdig = CSR_READ_4(sc, BGE_SGDIG_CFG);
4939 sgdig |= BGE_SGDIGCFG_AUTO |
4940 BGE_SGDIGCFG_PAUSE_CAP |
4941 BGE_SGDIGCFG_ASYM_PAUSE;
4942 CSR_WRITE_4(sc, BGE_SGDIG_CFG,
4943 sgdig | BGE_SGDIGCFG_SEND);
4944 DELAY(5);
4945 CSR_WRITE_4(sc, BGE_SGDIG_CFG, sgdig);
4946 }
4947 }
4948 break;
4949 case IFM_1000_SX:
4950 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) {
4951 BGE_CLRBIT(sc, BGE_MAC_MODE,
4952 BGE_MACMODE_HALF_DUPLEX);
4953 } else {
4954 BGE_SETBIT(sc, BGE_MAC_MODE,
4955 BGE_MACMODE_HALF_DUPLEX);
4956 }
4957 break;
4958 default:
4959 return (EINVAL);
4960 }
4961 return (0);
4962 }
4963
4964 sc->bge_link_evt++;
4965 mii = device_get_softc(sc->bge_miibus);
4966 LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
4967 PHY_RESET(miisc);
4968 mii_mediachg(mii);
4969
4970 /*
4971 * Force an interrupt so that we will call bge_link_upd
4972 * if needed and clear any pending link state attention.
4973 * Without this we are not getting any further interrupts
4974 * for link state changes and thus will not UP the link and
4975 * not be able to send in bge_start_locked. The only
4976 * way to get things working was to receive a packet and
4977 * get an RX intr.
4978 * bge_tick should help for fiber cards and we might not
4979 * need to do this here if BGE_FLAG_TBI is set but as
4980 * we poll for fiber anyway it should not harm.
4981 */
4982 if (sc->bge_asicrev == BGE_ASICREV_BCM5700 ||
4983 sc->bge_flags & BGE_FLAG_5788)
4984 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_SET);
4985 else
4986 BGE_SETBIT(sc, BGE_HCC_MODE, BGE_HCCMODE_COAL_NOW);
4987
4988 return (0);
4989}
4990
4991/*
4992 * Report current media status.
4993 */
4994static void
4995bge_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
4996{
4997 struct bge_softc *sc = ifp->if_softc;
4998 struct mii_data *mii;
4999
5000 BGE_LOCK(sc);
5001
5002 if (sc->bge_flags & BGE_FLAG_TBI) {
5003 ifmr->ifm_status = IFM_AVALID;
5004 ifmr->ifm_active = IFM_ETHER;
5005 if (CSR_READ_4(sc, BGE_MAC_STS) &
5006 BGE_MACSTAT_TBI_PCS_SYNCHED)
5007 ifmr->ifm_status |= IFM_ACTIVE;
5008 else {
5009 ifmr->ifm_active |= IFM_NONE;
5010 BGE_UNLOCK(sc);
5011 return;
5012 }
5013 ifmr->ifm_active |= IFM_1000_SX;
5014 if (CSR_READ_4(sc, BGE_MAC_MODE) & BGE_MACMODE_HALF_DUPLEX)
5015 ifmr->ifm_active |= IFM_HDX;
5016 else
5017 ifmr->ifm_active |= IFM_FDX;
5018 BGE_UNLOCK(sc);
5019 return;
5020 }
5021
5022 mii = device_get_softc(sc->bge_miibus);
5023 mii_pollstat(mii);
5024 ifmr->ifm_active = mii->mii_media_active;
5025 ifmr->ifm_status = mii->mii_media_status;
5026
5027 BGE_UNLOCK(sc);
5028}
5029
5030static int
5031bge_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
5032{
5033 struct bge_softc *sc = ifp->if_softc;
5034 struct ifreq *ifr = (struct ifreq *) data;
5035 struct mii_data *mii;
5036 int flags, mask, error = 0;
5037
5038 switch (command) {
5039 case SIOCSIFMTU:
5040 if (BGE_IS_JUMBO_CAPABLE(sc) ||
5041 (sc->bge_flags & BGE_FLAG_JUMBO_STD)) {
5042 if (ifr->ifr_mtu < ETHERMIN ||
5043 ifr->ifr_mtu > BGE_JUMBO_MTU) {
5044 error = EINVAL;
5045 break;
5046 }
5047 } else if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > ETHERMTU) {
5048 error = EINVAL;
5049 break;
5050 }
5051 BGE_LOCK(sc);
5052 if (ifp->if_mtu != ifr->ifr_mtu) {
5053 ifp->if_mtu = ifr->ifr_mtu;
5054 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
5055 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
5056 bge_init_locked(sc);
5057 }
5058 }
5059 BGE_UNLOCK(sc);
5060 break;
5061 case SIOCSIFFLAGS:
5062 BGE_LOCK(sc);
5063 if (ifp->if_flags & IFF_UP) {
5064 /*
5065 * If only the state of the PROMISC flag changed,
5066 * then just use the 'set promisc mode' command
5067 * instead of reinitializing the entire NIC. Doing
5068 * a full re-init means reloading the firmware and
5069 * waiting for it to start up, which may take a
5070 * second or two. Similarly for ALLMULTI.
5071 */
5072 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
5073 flags = ifp->if_flags ^ sc->bge_if_flags;
5074 if (flags & IFF_PROMISC)
5075 bge_setpromisc(sc);
5076 if (flags & IFF_ALLMULTI)
5077 bge_setmulti(sc);
5078 } else
5079 bge_init_locked(sc);
5080 } else {
5081 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
5082 bge_stop(sc);
5083 }
5084 }
5085 sc->bge_if_flags = ifp->if_flags;
5086 BGE_UNLOCK(sc);
5087 error = 0;
5088 break;
5089 case SIOCADDMULTI:
5090 case SIOCDELMULTI:
5091 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
5092 BGE_LOCK(sc);
5093 bge_setmulti(sc);
5094 BGE_UNLOCK(sc);
5095 error = 0;
5096 }
5097 break;
5098 case SIOCSIFMEDIA:
5099 case SIOCGIFMEDIA:
5100 if (sc->bge_flags & BGE_FLAG_TBI) {
5101 error = ifmedia_ioctl(ifp, ifr,
5102 &sc->bge_ifmedia, command);
5103 } else {
5104 mii = device_get_softc(sc->bge_miibus);
5105 error = ifmedia_ioctl(ifp, ifr,
5106 &mii->mii_media, command);
5107 }
5108 break;
5109 case SIOCSIFCAP:
5110 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
5111#ifdef DEVICE_POLLING
5112 if (mask & IFCAP_POLLING) {
5113 if (ifr->ifr_reqcap & IFCAP_POLLING) {
5114 error = ether_poll_register(bge_poll, ifp);
5115 if (error)
5116 return (error);
5117 BGE_LOCK(sc);
5118 BGE_SETBIT(sc, BGE_PCI_MISC_CTL,
5119 BGE_PCIMISCCTL_MASK_PCI_INTR);
5120 bge_writembx(sc, BGE_MBX_IRQ0_LO, 1);
5121 ifp->if_capenable |= IFCAP_POLLING;
5122 BGE_UNLOCK(sc);
5123 } else {
5124 error = ether_poll_deregister(ifp);
5125 /* Enable interrupt even in error case */
5126 BGE_LOCK(sc);
5127 BGE_CLRBIT(sc, BGE_PCI_MISC_CTL,
5128 BGE_PCIMISCCTL_MASK_PCI_INTR);
5129 bge_writembx(sc, BGE_MBX_IRQ0_LO, 0);
5130 ifp->if_capenable &= ~IFCAP_POLLING;
5131 BGE_UNLOCK(sc);
5132 }
5133 }
5134#endif
5135 if ((mask & IFCAP_TXCSUM) != 0 &&
5136 (ifp->if_capabilities & IFCAP_TXCSUM) != 0) {
5137 ifp->if_capenable ^= IFCAP_TXCSUM;
5138 if ((ifp->if_capenable & IFCAP_TXCSUM) != 0)
5139 ifp->if_hwassist |= sc->bge_csum_features;
5140 else
5141 ifp->if_hwassist &= ~sc->bge_csum_features;
5142 }
5143
5144 if ((mask & IFCAP_RXCSUM) != 0 &&
5145 (ifp->if_capabilities & IFCAP_RXCSUM) != 0)
5146 ifp->if_capenable ^= IFCAP_RXCSUM;
5147
5148 if ((mask & IFCAP_TSO4) != 0 &&
5149 (ifp->if_capabilities & IFCAP_TSO4) != 0) {
5150 ifp->if_capenable ^= IFCAP_TSO4;
5151 if ((ifp->if_capenable & IFCAP_TSO4) != 0)
5152 ifp->if_hwassist |= CSUM_TSO;
5153 else
5154 ifp->if_hwassist &= ~CSUM_TSO;
5155 }
5156
5157 if (mask & IFCAP_VLAN_MTU) {
5158 ifp->if_capenable ^= IFCAP_VLAN_MTU;
5159 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
5160 bge_init(sc);
5161 }
5162
5163 if ((mask & IFCAP_VLAN_HWTSO) != 0 &&
5164 (ifp->if_capabilities & IFCAP_VLAN_HWTSO) != 0)
5165 ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
5166 if ((mask & IFCAP_VLAN_HWTAGGING) != 0 &&
5167 (ifp->if_capabilities & IFCAP_VLAN_HWTAGGING) != 0) {
5168 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
5169 if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) == 0)
5170 ifp->if_capenable &= ~IFCAP_VLAN_HWTSO;
5171 BGE_LOCK(sc);
5172 bge_setvlan(sc);
5173 BGE_UNLOCK(sc);
5174 }
5175#ifdef VLAN_CAPABILITIES
5176 VLAN_CAPABILITIES(ifp);
5177#endif
5178 break;
5179 default:
5180 error = ether_ioctl(ifp, command, data);
5181 break;
5182 }
5183
5184 return (error);
5185}
5186
5187static void
5188bge_watchdog(struct bge_softc *sc)
5189{
5190 struct ifnet *ifp;
5191
5192 BGE_LOCK_ASSERT(sc);
5193
5194 if (sc->bge_timer == 0 || --sc->bge_timer)
5195 return;
5196
5197 ifp = sc->bge_ifp;
5198
5199 if_printf(ifp, "watchdog timeout -- resetting\n");
5200
5201 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
5202 bge_init_locked(sc);
5203
5204 ifp->if_oerrors++;
5205}
5206
5207static void
5208bge_stop_block(struct bge_softc *sc, bus_size_t reg, uint32_t bit)
5209{
5210 int i;
5211
5212 BGE_CLRBIT(sc, reg, bit);
5213
5214 for (i = 0; i < BGE_TIMEOUT; i++) {
5215 if ((CSR_READ_4(sc, reg) & bit) == 0)
5216 return;
5217 DELAY(100);
5218 }
5219}
5220
5221/*
5222 * Stop the adapter and free any mbufs allocated to the
5223 * RX and TX lists.
5224 */
5225static void
5226bge_stop(struct bge_softc *sc)
5227{
5228 struct ifnet *ifp;
5229
5230 BGE_LOCK_ASSERT(sc);
5231
5232 ifp = sc->bge_ifp;
5233
5234 callout_stop(&sc->bge_stat_ch);
5235
5236 /* Disable host interrupts. */
5237 BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR);
5238 bge_writembx(sc, BGE_MBX_IRQ0_LO, 1);
5239
5240 /*
5241 * Tell firmware we're shutting down.
5242 */
5243 bge_stop_fw(sc);
5244 bge_sig_pre_reset(sc, BGE_RESET_STOP);
5245
5246 /*
5247 * Disable all of the receiver blocks.
5248 */
5249 bge_stop_block(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE);
5250 bge_stop_block(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE);
5251 bge_stop_block(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE);
5252 if (BGE_IS_5700_FAMILY(sc))
5253 bge_stop_block(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE);
5254 bge_stop_block(sc, BGE_RDBDI_MODE, BGE_RBDIMODE_ENABLE);
5255 bge_stop_block(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE);
5256 bge_stop_block(sc, BGE_RBDC_MODE, BGE_RBDCMODE_ENABLE);
5257
5258 /*
5259 * Disable all of the transmit blocks.
5260 */
5261 bge_stop_block(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE);
5262 bge_stop_block(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE);
5263 bge_stop_block(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE);
5264 bge_stop_block(sc, BGE_RDMA_MODE, BGE_RDMAMODE_ENABLE);
5265 bge_stop_block(sc, BGE_SDC_MODE, BGE_SDCMODE_ENABLE);
5266 if (BGE_IS_5700_FAMILY(sc))
5267 bge_stop_block(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE);
5268 bge_stop_block(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE);
5269
5270 /*
5271 * Shut down all of the memory managers and related
5272 * state machines.
5273 */
5274 bge_stop_block(sc, BGE_HCC_MODE, BGE_HCCMODE_ENABLE);
5275 bge_stop_block(sc, BGE_WDMA_MODE, BGE_WDMAMODE_ENABLE);
5276 if (BGE_IS_5700_FAMILY(sc))
5277 bge_stop_block(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE);
5278
5279 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF);
5280 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0);
5281 if (!(BGE_IS_5705_PLUS(sc))) {
5282 BGE_CLRBIT(sc, BGE_BMAN_MODE, BGE_BMANMODE_ENABLE);
5283 BGE_CLRBIT(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE);
5284 }
5285 /* Update MAC statistics. */
5286 if (BGE_IS_5705_PLUS(sc))
5287 bge_stats_update_regs(sc);
5288
5289 bge_reset(sc);
5290 bge_sig_legacy(sc, BGE_RESET_STOP);
5291 bge_sig_post_reset(sc, BGE_RESET_STOP);
5292
5293 /*
5294 * Keep the ASF firmware running if up.
5295 */
5296 if (sc->bge_asf_mode & ASF_STACKUP)
5297 BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
5298 else
5299 BGE_CLRBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
5300
5301 /* Free the RX lists. */
5302 bge_free_rx_ring_std(sc);
5303
5304 /* Free jumbo RX list. */
5305 if (BGE_IS_JUMBO_CAPABLE(sc))
5306 bge_free_rx_ring_jumbo(sc);
5307
5308 /* Free TX buffers. */
5309 bge_free_tx_ring(sc);
5310
5311 sc->bge_tx_saved_considx = BGE_TXCONS_UNSET;
5312
5313 /* Clear MAC's link state (PHY may still have link UP). */
5314 if (bootverbose && sc->bge_link)
5315 if_printf(sc->bge_ifp, "link DOWN\n");
5316 sc->bge_link = 0;
5317
5318 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
5319}
5320
5321/*
5322 * Stop all chip I/O so that the kernel's probe routines don't
5323 * get confused by errant DMAs when rebooting.
5324 */
5325static int
5326bge_shutdown(device_t dev)
5327{
5328 struct bge_softc *sc;
5329
5330 sc = device_get_softc(dev);
5331 BGE_LOCK(sc);
5332 bge_stop(sc);
5333 bge_reset(sc);
5334 BGE_UNLOCK(sc);
5335
5336 return (0);
5337}
5338
5339static int
5340bge_suspend(device_t dev)
5341{
5342 struct bge_softc *sc;
5343
5344 sc = device_get_softc(dev);
5345 BGE_LOCK(sc);
5346 bge_stop(sc);
5347 BGE_UNLOCK(sc);
5348
5349 return (0);
5350}
5351
5352static int
5353bge_resume(device_t dev)
5354{
5355 struct bge_softc *sc;
5356 struct ifnet *ifp;
5357
5358 sc = device_get_softc(dev);
5359 BGE_LOCK(sc);
5360 ifp = sc->bge_ifp;
5361 if (ifp->if_flags & IFF_UP) {
5362 bge_init_locked(sc);
5363 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
5364 bge_start_locked(ifp);
5365 }
5366 BGE_UNLOCK(sc);
5367
5368 return (0);
5369}
5370
5371static void
5372bge_link_upd(struct bge_softc *sc)
5373{
5374 struct mii_data *mii;
5375 uint32_t link, status;
5376
5377 BGE_LOCK_ASSERT(sc);
5378
5379 /* Clear 'pending link event' flag. */
5380 sc->bge_link_evt = 0;
5381
5382 /*
5383 * Process link state changes.
5384 * Grrr. The link status word in the status block does
5385 * not work correctly on the BCM5700 rev AX and BX chips,
5386 * according to all available information. Hence, we have
5387 * to enable MII interrupts in order to properly obtain
5388 * async link changes. Unfortunately, this also means that
5389 * we have to read the MAC status register to detect link
5390 * changes, thereby adding an additional register access to
5391 * the interrupt handler.
5392 *
5393 * XXX: perhaps link state detection procedure used for
5394 * BGE_CHIPID_BCM5700_B2 can be used for others BCM5700 revisions.
5395 */
5396
5397 if (sc->bge_asicrev == BGE_ASICREV_BCM5700 &&
5398 sc->bge_chipid != BGE_CHIPID_BCM5700_B2) {
5399 status = CSR_READ_4(sc, BGE_MAC_STS);
5400 if (status & BGE_MACSTAT_MI_INTERRUPT) {
5401 mii = device_get_softc(sc->bge_miibus);
5402 mii_pollstat(mii);
5403 if (!sc->bge_link &&
5404 mii->mii_media_status & IFM_ACTIVE &&
5405 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) {
5406 sc->bge_link++;
5407 if (bootverbose)
5408 if_printf(sc->bge_ifp, "link UP\n");
5409 } else if (sc->bge_link &&
5410 (!(mii->mii_media_status & IFM_ACTIVE) ||
5411 IFM_SUBTYPE(mii->mii_media_active) == IFM_NONE)) {
5412 sc->bge_link = 0;
5413 if (bootverbose)
5414 if_printf(sc->bge_ifp, "link DOWN\n");
5415 }
5416
5417 /* Clear the interrupt. */
5418 CSR_WRITE_4(sc, BGE_MAC_EVT_ENB,
5419 BGE_EVTENB_MI_INTERRUPT);
5420 bge_miibus_readreg(sc->bge_dev, 1, BRGPHY_MII_ISR);
5421 bge_miibus_writereg(sc->bge_dev, 1, BRGPHY_MII_IMR,
5422 BRGPHY_INTRS);
5423 }
5424 return;
5425 }
5426
5427 if (sc->bge_flags & BGE_FLAG_TBI) {
5428 status = CSR_READ_4(sc, BGE_MAC_STS);
5429 if (status & BGE_MACSTAT_TBI_PCS_SYNCHED) {
5430 if (!sc->bge_link) {
5431 sc->bge_link++;
5432 if (sc->bge_asicrev == BGE_ASICREV_BCM5704)
5433 BGE_CLRBIT(sc, BGE_MAC_MODE,
5434 BGE_MACMODE_TBI_SEND_CFGS);
5435 CSR_WRITE_4(sc, BGE_MAC_STS, 0xFFFFFFFF);
5436 if (bootverbose)
5437 if_printf(sc->bge_ifp, "link UP\n");
5438 if_link_state_change(sc->bge_ifp,
5439 LINK_STATE_UP);
5440 }
5441 } else if (sc->bge_link) {
5442 sc->bge_link = 0;
5443 if (bootverbose)
5444 if_printf(sc->bge_ifp, "link DOWN\n");
5445 if_link_state_change(sc->bge_ifp, LINK_STATE_DOWN);
5446 }
5447 } else if ((sc->bge_mi_mode & BGE_MIMODE_AUTOPOLL) != 0) {
5448 /*
5449 * Some broken BCM chips have BGE_STATFLAG_LINKSTATE_CHANGED bit
5450 * in status word always set. Workaround this bug by reading
5451 * PHY link status directly.
5452 */
5453 link = (CSR_READ_4(sc, BGE_MI_STS) & BGE_MISTS_LINK) ? 1 : 0;
5454
5455 if (link != sc->bge_link ||
5456 sc->bge_asicrev == BGE_ASICREV_BCM5700) {
5457 mii = device_get_softc(sc->bge_miibus);
5458 mii_pollstat(mii);
5459 if (!sc->bge_link &&
5460 mii->mii_media_status & IFM_ACTIVE &&
5461 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) {
5462 sc->bge_link++;
5463 if (bootverbose)
5464 if_printf(sc->bge_ifp, "link UP\n");
5465 } else if (sc->bge_link &&
5466 (!(mii->mii_media_status & IFM_ACTIVE) ||
5467 IFM_SUBTYPE(mii->mii_media_active) == IFM_NONE)) {
5468 sc->bge_link = 0;
5469 if (bootverbose)
5470 if_printf(sc->bge_ifp, "link DOWN\n");
5471 }
5472 }
5473 } else {
5474 /*
5475 * For controllers that call mii_tick, we have to poll
5476 * link status.
5477 */
5478 mii = device_get_softc(sc->bge_miibus);
5479 mii_pollstat(mii);
5480 bge_miibus_statchg(sc->bge_dev);
5481 }
5482
5483 /* Clear the attention. */
5484 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED |
5485 BGE_MACSTAT_CFG_CHANGED | BGE_MACSTAT_MI_COMPLETE |
5486 BGE_MACSTAT_LINK_CHANGED);
5487}
5488
5489static void
5490bge_add_sysctls(struct bge_softc *sc)
5491{
5492 struct sysctl_ctx_list *ctx;
5493 struct sysctl_oid_list *children;
5494 char tn[32];
5495 int unit;
5496
5497 ctx = device_get_sysctl_ctx(sc->bge_dev);
5498 children = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->bge_dev));
5499
5500#ifdef BGE_REGISTER_DEBUG
5501 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "debug_info",
5502 CTLTYPE_INT | CTLFLAG_RW, sc, 0, bge_sysctl_debug_info, "I",
5503 "Debug Information");
5504
5505 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "reg_read",
5506 CTLTYPE_INT | CTLFLAG_RW, sc, 0, bge_sysctl_reg_read, "I",
5507 "Register Read");
5508
5509 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "mem_read",
5510 CTLTYPE_INT | CTLFLAG_RW, sc, 0, bge_sysctl_mem_read, "I",
5511 "Memory Read");
5512
5513#endif
5514
5515 unit = device_get_unit(sc->bge_dev);
5516 /*
5517 * A common design characteristic for many Broadcom client controllers
5518 * is that they only support a single outstanding DMA read operation
5519 * on the PCIe bus. This means that it will take twice as long to fetch
5520 * a TX frame that is split into header and payload buffers as it does
5521 * to fetch a single, contiguous TX frame (2 reads vs. 1 read). For
5522 * these controllers, coalescing buffers to reduce the number of memory
5523 * reads is effective way to get maximum performance(about 940Mbps).
5524 * Without collapsing TX buffers the maximum TCP bulk transfer
5525 * performance is about 850Mbps. However forcing coalescing mbufs
5526 * consumes a lot of CPU cycles, so leave it off by default.
5527 */
5528 sc->bge_forced_collapse = 0;
5529 snprintf(tn, sizeof(tn), "dev.bge.%d.forced_collapse", unit);
5530 TUNABLE_INT_FETCH(tn, &sc->bge_forced_collapse);
5531 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "forced_collapse",
5532 CTLFLAG_RW, &sc->bge_forced_collapse, 0,
5533 "Number of fragmented TX buffers of a frame allowed before "
5534 "forced collapsing");
5535
5536 /*
5537 * It seems all Broadcom controllers have a bug that can generate UDP
5538 * datagrams with checksum value 0 when TX UDP checksum offloading is
5539 * enabled. Generating UDP checksum value 0 is RFC 768 violation.
5540 * Even though the probability of generating such UDP datagrams is
5541 * low, I don't want to see FreeBSD boxes to inject such datagrams
5542 * into network so disable UDP checksum offloading by default. Users
5543 * still override this behavior by setting a sysctl variable,
5544 * dev.bge.0.forced_udpcsum.
5545 */
5546 sc->bge_forced_udpcsum = 0;
5547 snprintf(tn, sizeof(tn), "dev.bge.%d.bge_forced_udpcsum", unit);
5548 TUNABLE_INT_FETCH(tn, &sc->bge_forced_udpcsum);
5549 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "forced_udpcsum",
5550 CTLFLAG_RW, &sc->bge_forced_udpcsum, 0,
5551 "Enable UDP checksum offloading even if controller can "
5552 "generate UDP checksum value 0");
5553
5554 if (BGE_IS_5705_PLUS(sc))
5555 bge_add_sysctl_stats_regs(sc, ctx, children);
5556 else
5557 bge_add_sysctl_stats(sc, ctx, children);
5558}
5559
5560#define BGE_SYSCTL_STAT(sc, ctx, desc, parent, node, oid) \
5561 SYSCTL_ADD_PROC(ctx, parent, OID_AUTO, oid, CTLTYPE_UINT|CTLFLAG_RD, \
5562 sc, offsetof(struct bge_stats, node), bge_sysctl_stats, "IU", \
5563 desc)
5564
5565static void
5566bge_add_sysctl_stats(struct bge_softc *sc, struct sysctl_ctx_list *ctx,
5567 struct sysctl_oid_list *parent)
5568{
5569 struct sysctl_oid *tree;
5570 struct sysctl_oid_list *children, *schildren;
5571
5572 tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "stats", CTLFLAG_RD,
5573 NULL, "BGE Statistics");
5574 schildren = children = SYSCTL_CHILDREN(tree);
5575 BGE_SYSCTL_STAT(sc, ctx, "Frames Dropped Due To Filters",
5576 children, COSFramesDroppedDueToFilters,
5577 "FramesDroppedDueToFilters");
5578 BGE_SYSCTL_STAT(sc, ctx, "NIC DMA Write Queue Full",
5579 children, nicDmaWriteQueueFull, "DmaWriteQueueFull");
5580 BGE_SYSCTL_STAT(sc, ctx, "NIC DMA Write High Priority Queue Full",
5581 children, nicDmaWriteHighPriQueueFull, "DmaWriteHighPriQueueFull");
5582 BGE_SYSCTL_STAT(sc, ctx, "NIC No More RX Buffer Descriptors",
5583 children, nicNoMoreRxBDs, "NoMoreRxBDs");
5584 BGE_SYSCTL_STAT(sc, ctx, "Discarded Input Frames",
5585 children, ifInDiscards, "InputDiscards");
5586 BGE_SYSCTL_STAT(sc, ctx, "Input Errors",
5587 children, ifInErrors, "InputErrors");
5588 BGE_SYSCTL_STAT(sc, ctx, "NIC Recv Threshold Hit",
5589 children, nicRecvThresholdHit, "RecvThresholdHit");
5590 BGE_SYSCTL_STAT(sc, ctx, "NIC DMA Read Queue Full",
5591 children, nicDmaReadQueueFull, "DmaReadQueueFull");
5592 BGE_SYSCTL_STAT(sc, ctx, "NIC DMA Read High Priority Queue Full",
5593 children, nicDmaReadHighPriQueueFull, "DmaReadHighPriQueueFull");
5594 BGE_SYSCTL_STAT(sc, ctx, "NIC Send Data Complete Queue Full",
5595 children, nicSendDataCompQueueFull, "SendDataCompQueueFull");
5596 BGE_SYSCTL_STAT(sc, ctx, "NIC Ring Set Send Producer Index",
5597 children, nicRingSetSendProdIndex, "RingSetSendProdIndex");
5598 BGE_SYSCTL_STAT(sc, ctx, "NIC Ring Status Update",
5599 children, nicRingStatusUpdate, "RingStatusUpdate");
5600 BGE_SYSCTL_STAT(sc, ctx, "NIC Interrupts",
5601 children, nicInterrupts, "Interrupts");
5602 BGE_SYSCTL_STAT(sc, ctx, "NIC Avoided Interrupts",
5603 children, nicAvoidedInterrupts, "AvoidedInterrupts");
5604 BGE_SYSCTL_STAT(sc, ctx, "NIC Send Threshold Hit",
5605 children, nicSendThresholdHit, "SendThresholdHit");
5606
5607 tree = SYSCTL_ADD_NODE(ctx, schildren, OID_AUTO, "rx", CTLFLAG_RD,
5608 NULL, "BGE RX Statistics");
5609 children = SYSCTL_CHILDREN(tree);
5610 BGE_SYSCTL_STAT(sc, ctx, "Inbound Octets",
5611 children, rxstats.ifHCInOctets, "ifHCInOctets");
5612 BGE_SYSCTL_STAT(sc, ctx, "Fragments",
5613 children, rxstats.etherStatsFragments, "Fragments");
5614 BGE_SYSCTL_STAT(sc, ctx, "Inbound Unicast Packets",
5615 children, rxstats.ifHCInUcastPkts, "UnicastPkts");
5616 BGE_SYSCTL_STAT(sc, ctx, "Inbound Multicast Packets",
5617 children, rxstats.ifHCInMulticastPkts, "MulticastPkts");
5618 BGE_SYSCTL_STAT(sc, ctx, "FCS Errors",
5619 children, rxstats.dot3StatsFCSErrors, "FCSErrors");
5620 BGE_SYSCTL_STAT(sc, ctx, "Alignment Errors",
5621 children, rxstats.dot3StatsAlignmentErrors, "AlignmentErrors");
5622 BGE_SYSCTL_STAT(sc, ctx, "XON Pause Frames Received",
5623 children, rxstats.xonPauseFramesReceived, "xonPauseFramesReceived");
5624 BGE_SYSCTL_STAT(sc, ctx, "XOFF Pause Frames Received",
5625 children, rxstats.xoffPauseFramesReceived,
5626 "xoffPauseFramesReceived");
5627 BGE_SYSCTL_STAT(sc, ctx, "MAC Control Frames Received",
5628 children, rxstats.macControlFramesReceived,
5629 "ControlFramesReceived");
5630 BGE_SYSCTL_STAT(sc, ctx, "XOFF State Entered",
5631 children, rxstats.xoffStateEntered, "xoffStateEntered");
5632 BGE_SYSCTL_STAT(sc, ctx, "Frames Too Long",
5633 children, rxstats.dot3StatsFramesTooLong, "FramesTooLong");
5634 BGE_SYSCTL_STAT(sc, ctx, "Jabbers",
5635 children, rxstats.etherStatsJabbers, "Jabbers");
5636 BGE_SYSCTL_STAT(sc, ctx, "Undersized Packets",
5637 children, rxstats.etherStatsUndersizePkts, "UndersizePkts");
5638 BGE_SYSCTL_STAT(sc, ctx, "Inbound Range Length Errors",
5639 children, rxstats.inRangeLengthError, "inRangeLengthError");
5640 BGE_SYSCTL_STAT(sc, ctx, "Outbound Range Length Errors",
5641 children, rxstats.outRangeLengthError, "outRangeLengthError");
5642
5643 tree = SYSCTL_ADD_NODE(ctx, schildren, OID_AUTO, "tx", CTLFLAG_RD,
5644 NULL, "BGE TX Statistics");
5645 children = SYSCTL_CHILDREN(tree);
5646 BGE_SYSCTL_STAT(sc, ctx, "Outbound Octets",
5647 children, txstats.ifHCOutOctets, "ifHCOutOctets");
5648 BGE_SYSCTL_STAT(sc, ctx, "TX Collisions",
5649 children, txstats.etherStatsCollisions, "Collisions");
5650 BGE_SYSCTL_STAT(sc, ctx, "XON Sent",
5651 children, txstats.outXonSent, "XonSent");
5652 BGE_SYSCTL_STAT(sc, ctx, "XOFF Sent",
5653 children, txstats.outXoffSent, "XoffSent");
5654 BGE_SYSCTL_STAT(sc, ctx, "Flow Control Done",
5655 children, txstats.flowControlDone, "flowControlDone");
5656 BGE_SYSCTL_STAT(sc, ctx, "Internal MAC TX errors",
5657 children, txstats.dot3StatsInternalMacTransmitErrors,
5658 "InternalMacTransmitErrors");
5659 BGE_SYSCTL_STAT(sc, ctx, "Single Collision Frames",
5660 children, txstats.dot3StatsSingleCollisionFrames,
5661 "SingleCollisionFrames");
5662 BGE_SYSCTL_STAT(sc, ctx, "Multiple Collision Frames",
5663 children, txstats.dot3StatsMultipleCollisionFrames,
5664 "MultipleCollisionFrames");
5665 BGE_SYSCTL_STAT(sc, ctx, "Deferred Transmissions",
5666 children, txstats.dot3StatsDeferredTransmissions,
5667 "DeferredTransmissions");
5668 BGE_SYSCTL_STAT(sc, ctx, "Excessive Collisions",
5669 children, txstats.dot3StatsExcessiveCollisions,
5670 "ExcessiveCollisions");
5671 BGE_SYSCTL_STAT(sc, ctx, "Late Collisions",
5672 children, txstats.dot3StatsLateCollisions,
5673 "LateCollisions");
5674 BGE_SYSCTL_STAT(sc, ctx, "Outbound Unicast Packets",
5675 children, txstats.ifHCOutUcastPkts, "UnicastPkts");
5676 BGE_SYSCTL_STAT(sc, ctx, "Outbound Multicast Packets",
5677 children, txstats.ifHCOutMulticastPkts, "MulticastPkts");
5678 BGE_SYSCTL_STAT(sc, ctx, "Outbound Broadcast Packets",
5679 children, txstats.ifHCOutBroadcastPkts, "BroadcastPkts");
5680 BGE_SYSCTL_STAT(sc, ctx, "Carrier Sense Errors",
5681 children, txstats.dot3StatsCarrierSenseErrors,
5682 "CarrierSenseErrors");
5683 BGE_SYSCTL_STAT(sc, ctx, "Outbound Discards",
5684 children, txstats.ifOutDiscards, "Discards");
5685 BGE_SYSCTL_STAT(sc, ctx, "Outbound Errors",
5686 children, txstats.ifOutErrors, "Errors");
5687}
5688
5689#undef BGE_SYSCTL_STAT
5690
5691#define BGE_SYSCTL_STAT_ADD64(c, h, n, p, d) \
5692 SYSCTL_ADD_UQUAD(c, h, OID_AUTO, n, CTLFLAG_RD, p, d)
5693
5694static void
5695bge_add_sysctl_stats_regs(struct bge_softc *sc, struct sysctl_ctx_list *ctx,
5696 struct sysctl_oid_list *parent)
5697{
5698 struct sysctl_oid *tree;
5699 struct sysctl_oid_list *child, *schild;
5700 struct bge_mac_stats *stats;
5701
5702 stats = &sc->bge_mac_stats;
5703 tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "stats", CTLFLAG_RD,
5704 NULL, "BGE Statistics");
5705 schild = child = SYSCTL_CHILDREN(tree);
5706 BGE_SYSCTL_STAT_ADD64(ctx, child, "FramesDroppedDueToFilters",
5707 &stats->FramesDroppedDueToFilters, "Frames Dropped Due to Filters");
5708 BGE_SYSCTL_STAT_ADD64(ctx, child, "DmaWriteQueueFull",
5709 &stats->DmaWriteQueueFull, "NIC DMA Write Queue Full");
5710 BGE_SYSCTL_STAT_ADD64(ctx, child, "DmaWriteHighPriQueueFull",
5711 &stats->DmaWriteHighPriQueueFull,
5712 "NIC DMA Write High Priority Queue Full");
5713 BGE_SYSCTL_STAT_ADD64(ctx, child, "NoMoreRxBDs",
5714 &stats->NoMoreRxBDs, "NIC No More RX Buffer Descriptors");
5715 BGE_SYSCTL_STAT_ADD64(ctx, child, "InputDiscards",
5716 &stats->InputDiscards, "Discarded Input Frames");
5717 BGE_SYSCTL_STAT_ADD64(ctx, child, "InputErrors",
5718 &stats->InputErrors, "Input Errors");
5719 BGE_SYSCTL_STAT_ADD64(ctx, child, "RecvThresholdHit",
5720 &stats->RecvThresholdHit, "NIC Recv Threshold Hit");
5721
5722 tree = SYSCTL_ADD_NODE(ctx, schild, OID_AUTO, "rx", CTLFLAG_RD,
5723 NULL, "BGE RX Statistics");
5724 child = SYSCTL_CHILDREN(tree);
5725 BGE_SYSCTL_STAT_ADD64(ctx, child, "ifHCInOctets",
5726 &stats->ifHCInOctets, "Inbound Octets");
5727 BGE_SYSCTL_STAT_ADD64(ctx, child, "Fragments",
5728 &stats->etherStatsFragments, "Fragments");
5729 BGE_SYSCTL_STAT_ADD64(ctx, child, "UnicastPkts",
5730 &stats->ifHCInUcastPkts, "Inbound Unicast Packets");
5731 BGE_SYSCTL_STAT_ADD64(ctx, child, "MulticastPkts",
5732 &stats->ifHCInMulticastPkts, "Inbound Multicast Packets");
5733 BGE_SYSCTL_STAT_ADD64(ctx, child, "BroadcastPkts",
5734 &stats->ifHCInBroadcastPkts, "Inbound Broadcast Packets");
5735 BGE_SYSCTL_STAT_ADD64(ctx, child, "FCSErrors",
5736 &stats->dot3StatsFCSErrors, "FCS Errors");
5737 BGE_SYSCTL_STAT_ADD64(ctx, child, "AlignmentErrors",
5738 &stats->dot3StatsAlignmentErrors, "Alignment Errors");
5739 BGE_SYSCTL_STAT_ADD64(ctx, child, "xonPauseFramesReceived",
5740 &stats->xonPauseFramesReceived, "XON Pause Frames Received");
5741 BGE_SYSCTL_STAT_ADD64(ctx, child, "xoffPauseFramesReceived",
5742 &stats->xoffPauseFramesReceived, "XOFF Pause Frames Received");
5743 BGE_SYSCTL_STAT_ADD64(ctx, child, "ControlFramesReceived",
5744 &stats->macControlFramesReceived, "MAC Control Frames Received");
5745 BGE_SYSCTL_STAT_ADD64(ctx, child, "xoffStateEntered",
5746 &stats->xoffStateEntered, "XOFF State Entered");
5747 BGE_SYSCTL_STAT_ADD64(ctx, child, "FramesTooLong",
5748 &stats->dot3StatsFramesTooLong, "Frames Too Long");
5749 BGE_SYSCTL_STAT_ADD64(ctx, child, "Jabbers",
5750 &stats->etherStatsJabbers, "Jabbers");
5751 BGE_SYSCTL_STAT_ADD64(ctx, child, "UndersizePkts",
5752 &stats->etherStatsUndersizePkts, "Undersized Packets");
5753
5754 tree = SYSCTL_ADD_NODE(ctx, schild, OID_AUTO, "tx", CTLFLAG_RD,
5755 NULL, "BGE TX Statistics");
5756 child = SYSCTL_CHILDREN(tree);
5757 BGE_SYSCTL_STAT_ADD64(ctx, child, "ifHCOutOctets",
5758 &stats->ifHCOutOctets, "Outbound Octets");
5759 BGE_SYSCTL_STAT_ADD64(ctx, child, "Collisions",
5760 &stats->etherStatsCollisions, "TX Collisions");
5761 BGE_SYSCTL_STAT_ADD64(ctx, child, "XonSent",
5762 &stats->outXonSent, "XON Sent");
5763 BGE_SYSCTL_STAT_ADD64(ctx, child, "XoffSent",
5764 &stats->outXoffSent, "XOFF Sent");
5765 BGE_SYSCTL_STAT_ADD64(ctx, child, "InternalMacTransmitErrors",
5766 &stats->dot3StatsInternalMacTransmitErrors,
5767 "Internal MAC TX Errors");
5768 BGE_SYSCTL_STAT_ADD64(ctx, child, "SingleCollisionFrames",
5769 &stats->dot3StatsSingleCollisionFrames, "Single Collision Frames");
5770 BGE_SYSCTL_STAT_ADD64(ctx, child, "MultipleCollisionFrames",
5771 &stats->dot3StatsMultipleCollisionFrames,
5772 "Multiple Collision Frames");
5773 BGE_SYSCTL_STAT_ADD64(ctx, child, "DeferredTransmissions",
5774 &stats->dot3StatsDeferredTransmissions, "Deferred Transmissions");
5775 BGE_SYSCTL_STAT_ADD64(ctx, child, "ExcessiveCollisions",
5776 &stats->dot3StatsExcessiveCollisions, "Excessive Collisions");
5777 BGE_SYSCTL_STAT_ADD64(ctx, child, "LateCollisions",
5778 &stats->dot3StatsLateCollisions, "Late Collisions");
5779 BGE_SYSCTL_STAT_ADD64(ctx, child, "UnicastPkts",
5780 &stats->ifHCOutUcastPkts, "Outbound Unicast Packets");
5781 BGE_SYSCTL_STAT_ADD64(ctx, child, "MulticastPkts",
5782 &stats->ifHCOutMulticastPkts, "Outbound Multicast Packets");
5783 BGE_SYSCTL_STAT_ADD64(ctx, child, "BroadcastPkts",
5784 &stats->ifHCOutBroadcastPkts, "Outbound Broadcast Packets");
5785}
5786
5787#undef BGE_SYSCTL_STAT_ADD64
5788
5789static int
5790bge_sysctl_stats(SYSCTL_HANDLER_ARGS)
5791{
5792 struct bge_softc *sc;
5793 uint32_t result;
5794 int offset;
5795
5796 sc = (struct bge_softc *)arg1;
5797 offset = arg2;
5798 result = CSR_READ_4(sc, BGE_MEMWIN_START + BGE_STATS_BLOCK + offset +
5799 offsetof(bge_hostaddr, bge_addr_lo));
5800 return (sysctl_handle_int(oidp, &result, 0, req));
5801}
5802
5803#ifdef BGE_REGISTER_DEBUG
5804static int
5805bge_sysctl_debug_info(SYSCTL_HANDLER_ARGS)
5806{
5807 struct bge_softc *sc;
5808 uint16_t *sbdata;
5809 int error, result, sbsz;
5810 int i, j;
5811
5812 result = -1;
5813 error = sysctl_handle_int(oidp, &result, 0, req);
5814 if (error || (req->newptr == NULL))
5815 return (error);
5816
5817 if (result == 1) {
5818 sc = (struct bge_softc *)arg1;
5819
5820 if (sc->bge_asicrev == BGE_ASICREV_BCM5700 &&
5821 sc->bge_chipid != BGE_CHIPID_BCM5700_C0)
5822 sbsz = BGE_STATUS_BLK_SZ;
5823 else
5824 sbsz = 32;
5825 sbdata = (uint16_t *)sc->bge_ldata.bge_status_block;
5826 printf("Status Block:\n");
5827 BGE_LOCK(sc);
5828 bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
5829 sc->bge_cdata.bge_status_map,
5830 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
5831 for (i = 0x0; i < sbsz / sizeof(uint16_t); ) {
5832 printf("%06x:", i);
5833 for (j = 0; j < 8; j++)
5834 printf(" %04x", sbdata[i++]);
5835 printf("\n");
5836 }
5837
5838 printf("Registers:\n");
5839 for (i = 0x800; i < 0xA00; ) {
5840 printf("%06x:", i);
5841 for (j = 0; j < 8; j++) {
5842 printf(" %08x", CSR_READ_4(sc, i));
5843 i += 4;
5844 }
5845 printf("\n");
5846 }
5847 BGE_UNLOCK(sc);
5848
5849 printf("Hardware Flags:\n");
5850 if (BGE_IS_5717_PLUS(sc))
5851 printf(" - 5717 Plus\n");
5852 if (BGE_IS_5755_PLUS(sc))
5853 printf(" - 5755 Plus\n");
5854 if (BGE_IS_575X_PLUS(sc))
5855 printf(" - 575X Plus\n");
5856 if (BGE_IS_5705_PLUS(sc))
5857 printf(" - 5705 Plus\n");
5858 if (BGE_IS_5714_FAMILY(sc))
5859 printf(" - 5714 Family\n");
5860 if (BGE_IS_5700_FAMILY(sc))
5861 printf(" - 5700 Family\n");
5862 if (sc->bge_flags & BGE_FLAG_JUMBO)
5863 printf(" - Supports Jumbo Frames\n");
5864 if (sc->bge_flags & BGE_FLAG_PCIX)
5865 printf(" - PCI-X Bus\n");
5866 if (sc->bge_flags & BGE_FLAG_PCIE)
5867 printf(" - PCI Express Bus\n");
5868 if (sc->bge_phy_flags & BGE_PHY_NO_3LED)
5869 printf(" - No 3 LEDs\n");
5870 if (sc->bge_flags & BGE_FLAG_RX_ALIGNBUG)
5871 printf(" - RX Alignment Bug\n");
5872 }
5873
5874 return (error);
5875}
5876
5877static int
5878bge_sysctl_reg_read(SYSCTL_HANDLER_ARGS)
5879{
5880 struct bge_softc *sc;
5881 int error;
5882 uint16_t result;
5883 uint32_t val;
5884
5885 result = -1;
5886 error = sysctl_handle_int(oidp, &result, 0, req);
5887 if (error || (req->newptr == NULL))
5888 return (error);
5889
5890 if (result < 0x8000) {
5891 sc = (struct bge_softc *)arg1;
5892 val = CSR_READ_4(sc, result);
5893 printf("reg 0x%06X = 0x%08X\n", result, val);
5894 }
5895
5896 return (error);
5897}
5898
5899static int
5900bge_sysctl_mem_read(SYSCTL_HANDLER_ARGS)
5901{
5902 struct bge_softc *sc;
5903 int error;
5904 uint16_t result;
5905 uint32_t val;
5906
5907 result = -1;
5908 error = sysctl_handle_int(oidp, &result, 0, req);
5909 if (error || (req->newptr == NULL))
5910 return (error);
5911
5912 if (result < 0x8000) {
5913 sc = (struct bge_softc *)arg1;
5914 val = bge_readmem_ind(sc, result);
5915 printf("mem 0x%06X = 0x%08X\n", result, val);
5916 }
5917
5918 return (error);
5919}
5920#endif
5921
5922static int
5923bge_get_eaddr_fw(struct bge_softc *sc, uint8_t ether_addr[])
5924{
5925
5926 if (sc->bge_flags & BGE_FLAG_EADDR)
5927 return (1);
5928
5929#ifdef __sparc64__
5930 OF_getetheraddr(sc->bge_dev, ether_addr);
5931 return (0);
5932#endif
5933 return (1);
5934}
5935
5936static int
5937bge_get_eaddr_mem(struct bge_softc *sc, uint8_t ether_addr[])
5938{
5939 uint32_t mac_addr;
5940
5941 mac_addr = bge_readmem_ind(sc, BGE_SRAM_MAC_ADDR_HIGH_MB);
5942 if ((mac_addr >> 16) == 0x484b) {
5943 ether_addr[0] = (uint8_t)(mac_addr >> 8);
5944 ether_addr[1] = (uint8_t)mac_addr;
5945 mac_addr = bge_readmem_ind(sc, BGE_SRAM_MAC_ADDR_LOW_MB);
5946 ether_addr[2] = (uint8_t)(mac_addr >> 24);
5947 ether_addr[3] = (uint8_t)(mac_addr >> 16);
5948 ether_addr[4] = (uint8_t)(mac_addr >> 8);
5949 ether_addr[5] = (uint8_t)mac_addr;
5950 return (0);
5951 }
5952 return (1);
5953}
5954
5955static int
5956bge_get_eaddr_nvram(struct bge_softc *sc, uint8_t ether_addr[])
5957{
5958 int mac_offset = BGE_EE_MAC_OFFSET;
5959
5960 if (sc->bge_asicrev == BGE_ASICREV_BCM5906)
5961 mac_offset = BGE_EE_MAC_OFFSET_5906;
5962
5963 return (bge_read_nvram(sc, ether_addr, mac_offset + 2,
5964 ETHER_ADDR_LEN));
5965}
5966
5967static int
5968bge_get_eaddr_eeprom(struct bge_softc *sc, uint8_t ether_addr[])
5969{
5970
5971 if (sc->bge_asicrev == BGE_ASICREV_BCM5906)
5972 return (1);
5973
5974 return (bge_read_eeprom(sc, ether_addr, BGE_EE_MAC_OFFSET + 2,
5975 ETHER_ADDR_LEN));
5976}
5977
5978static int
5979bge_get_eaddr(struct bge_softc *sc, uint8_t eaddr[])
5980{
5981 static const bge_eaddr_fcn_t bge_eaddr_funcs[] = {
5982 /* NOTE: Order is critical */
5983 bge_get_eaddr_fw,
5984 bge_get_eaddr_mem,
5985 bge_get_eaddr_nvram,
5986 bge_get_eaddr_eeprom,
5987 NULL
5988 };
5989 const bge_eaddr_fcn_t *func;
5990
5991 for (func = bge_eaddr_funcs; *func != NULL; ++func) {
5992 if ((*func)(sc, eaddr) == 0)
5993 break;
5994 }
5995 return (*func == NULL ? ENXIO : 0);
5996}
1360 break;
1361 }
1362 }
1363}
1364
1365static void
1366bge_stop_fw(struct bge_softc *sc)
1367{
1368 int i;
1369
1370 if (sc->bge_asf_mode) {
1371 bge_writemem_ind(sc, BGE_SRAM_FW_CMD_MB, BGE_FW_PAUSE);
1372 CSR_WRITE_4(sc, BGE_RX_CPU_EVENT,
1373 CSR_READ_4(sc, BGE_RX_CPU_EVENT) | (1 << 14));
1374
1375 for (i = 0; i < 100; i++ ) {
1376 if (!(CSR_READ_4(sc, BGE_RX_CPU_EVENT) & (1 << 14)))
1377 break;
1378 DELAY(10);
1379 }
1380 }
1381}
1382
1383/*
1384 * Do endian, PCI and DMA initialization.
1385 */
1386static int
1387bge_chipinit(struct bge_softc *sc)
1388{
1389 uint32_t dma_rw_ctl, misc_ctl;
1390 uint16_t val;
1391 int i;
1392
1393 /* Set endianness before we access any non-PCI registers. */
1394 misc_ctl = BGE_INIT;
1395 if (sc->bge_flags & BGE_FLAG_TAGGED_STATUS)
1396 misc_ctl |= BGE_PCIMISCCTL_TAGGED_STATUS;
1397 pci_write_config(sc->bge_dev, BGE_PCI_MISC_CTL, misc_ctl, 4);
1398
1399 /* Clear the MAC control register */
1400 CSR_WRITE_4(sc, BGE_MAC_MODE, 0);
1401
1402 /*
1403 * Clear the MAC statistics block in the NIC's
1404 * internal memory.
1405 */
1406 for (i = BGE_STATS_BLOCK;
1407 i < BGE_STATS_BLOCK_END + 1; i += sizeof(uint32_t))
1408 BGE_MEMWIN_WRITE(sc, i, 0);
1409
1410 for (i = BGE_STATUS_BLOCK;
1411 i < BGE_STATUS_BLOCK_END + 1; i += sizeof(uint32_t))
1412 BGE_MEMWIN_WRITE(sc, i, 0);
1413
1414 if (sc->bge_chiprev == BGE_CHIPREV_5704_BX) {
1415 /*
1416 * Fix data corruption caused by non-qword write with WB.
1417 * Fix master abort in PCI mode.
1418 * Fix PCI latency timer.
1419 */
1420 val = pci_read_config(sc->bge_dev, BGE_PCI_MSI_DATA + 2, 2);
1421 val |= (1 << 10) | (1 << 12) | (1 << 13);
1422 pci_write_config(sc->bge_dev, BGE_PCI_MSI_DATA + 2, val, 2);
1423 }
1424
1425 /*
1426 * Set up the PCI DMA control register.
1427 */
1428 dma_rw_ctl = BGE_PCIDMARWCTL_RD_CMD_SHIFT(6) |
1429 BGE_PCIDMARWCTL_WR_CMD_SHIFT(7);
1430 if (sc->bge_flags & BGE_FLAG_PCIE) {
1431 /* Read watermark not used, 128 bytes for write. */
1432 dma_rw_ctl |= BGE_PCIDMARWCTL_WR_WAT_SHIFT(3);
1433 } else if (sc->bge_flags & BGE_FLAG_PCIX) {
1434 if (BGE_IS_5714_FAMILY(sc)) {
1435 /* 256 bytes for read and write. */
1436 dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(2) |
1437 BGE_PCIDMARWCTL_WR_WAT_SHIFT(2);
1438 dma_rw_ctl |= (sc->bge_asicrev == BGE_ASICREV_BCM5780) ?
1439 BGE_PCIDMARWCTL_ONEDMA_ATONCE_GLOBAL :
1440 BGE_PCIDMARWCTL_ONEDMA_ATONCE_LOCAL;
1441 } else if (sc->bge_asicrev == BGE_ASICREV_BCM5703) {
1442 /*
1443 * In the BCM5703, the DMA read watermark should
1444 * be set to less than or equal to the maximum
1445 * memory read byte count of the PCI-X command
1446 * register.
1447 */
1448 dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(4) |
1449 BGE_PCIDMARWCTL_WR_WAT_SHIFT(3);
1450 } else if (sc->bge_asicrev == BGE_ASICREV_BCM5704) {
1451 /* 1536 bytes for read, 384 bytes for write. */
1452 dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(7) |
1453 BGE_PCIDMARWCTL_WR_WAT_SHIFT(3);
1454 } else {
1455 /* 384 bytes for read and write. */
1456 dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(3) |
1457 BGE_PCIDMARWCTL_WR_WAT_SHIFT(3) |
1458 0x0F;
1459 }
1460 if (sc->bge_asicrev == BGE_ASICREV_BCM5703 ||
1461 sc->bge_asicrev == BGE_ASICREV_BCM5704) {
1462 uint32_t tmp;
1463
1464 /* Set ONE_DMA_AT_ONCE for hardware workaround. */
1465 tmp = CSR_READ_4(sc, BGE_PCI_CLKCTL) & 0x1F;
1466 if (tmp == 6 || tmp == 7)
1467 dma_rw_ctl |=
1468 BGE_PCIDMARWCTL_ONEDMA_ATONCE_GLOBAL;
1469
1470 /* Set PCI-X DMA write workaround. */
1471 dma_rw_ctl |= BGE_PCIDMARWCTL_ASRT_ALL_BE;
1472 }
1473 } else {
1474 /* Conventional PCI bus: 256 bytes for read and write. */
1475 dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(7) |
1476 BGE_PCIDMARWCTL_WR_WAT_SHIFT(7);
1477
1478 if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
1479 sc->bge_asicrev != BGE_ASICREV_BCM5750)
1480 dma_rw_ctl |= 0x0F;
1481 }
1482 if (sc->bge_asicrev == BGE_ASICREV_BCM5700 ||
1483 sc->bge_asicrev == BGE_ASICREV_BCM5701)
1484 dma_rw_ctl |= BGE_PCIDMARWCTL_USE_MRM |
1485 BGE_PCIDMARWCTL_ASRT_ALL_BE;
1486 if (sc->bge_asicrev == BGE_ASICREV_BCM5703 ||
1487 sc->bge_asicrev == BGE_ASICREV_BCM5704)
1488 dma_rw_ctl &= ~BGE_PCIDMARWCTL_MINDMA;
1489 if (BGE_IS_5717_PLUS(sc)) {
1490 dma_rw_ctl &= ~BGE_PCIDMARWCTL_DIS_CACHE_ALIGNMENT;
1491 if (sc->bge_chipid == BGE_CHIPID_BCM57765_A0)
1492 dma_rw_ctl &= ~BGE_PCIDMARWCTL_CRDRDR_RDMA_MRRS_MSK;
1493 /*
1494 * Enable HW workaround for controllers that misinterpret
1495 * a status tag update and leave interrupts permanently
1496 * disabled.
1497 */
1498 if (sc->bge_asicrev != BGE_ASICREV_BCM5717 &&
1499 sc->bge_asicrev != BGE_ASICREV_BCM57765)
1500 dma_rw_ctl |= BGE_PCIDMARWCTL_TAGGED_STATUS_WA;
1501 }
1502 pci_write_config(sc->bge_dev, BGE_PCI_DMA_RW_CTL, dma_rw_ctl, 4);
1503
1504 /*
1505 * Set up general mode register.
1506 */
1507 CSR_WRITE_4(sc, BGE_MODE_CTL, BGE_DMA_SWAP_OPTIONS |
1508 BGE_MODECTL_MAC_ATTN_INTR | BGE_MODECTL_HOST_SEND_BDS |
1509 BGE_MODECTL_TX_NO_PHDR_CSUM);
1510
1511 /*
1512 * BCM5701 B5 have a bug causing data corruption when using
1513 * 64-bit DMA reads, which can be terminated early and then
1514 * completed later as 32-bit accesses, in combination with
1515 * certain bridges.
1516 */
1517 if (sc->bge_asicrev == BGE_ASICREV_BCM5701 &&
1518 sc->bge_chipid == BGE_CHIPID_BCM5701_B5)
1519 BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_FORCE_PCI32);
1520
1521 /*
1522 * Tell the firmware the driver is running
1523 */
1524 if (sc->bge_asf_mode & ASF_STACKUP)
1525 BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
1526
1527 /*
1528 * Disable memory write invalidate. Apparently it is not supported
1529 * properly by these devices. Also ensure that INTx isn't disabled,
1530 * as these chips need it even when using MSI.
1531 */
1532 PCI_CLRBIT(sc->bge_dev, BGE_PCI_CMD,
1533 PCIM_CMD_INTxDIS | PCIM_CMD_MWIEN, 4);
1534
1535 /* Set the timer prescaler (always 66Mhz) */
1536 CSR_WRITE_4(sc, BGE_MISC_CFG, BGE_32BITTIME_66MHZ);
1537
1538 /* XXX: The Linux tg3 driver does this at the start of brgphy_reset. */
1539 if (sc->bge_asicrev == BGE_ASICREV_BCM5906) {
1540 DELAY(40); /* XXX */
1541
1542 /* Put PHY into ready state */
1543 BGE_CLRBIT(sc, BGE_MISC_CFG, BGE_MISCCFG_EPHY_IDDQ);
1544 CSR_READ_4(sc, BGE_MISC_CFG); /* Flush */
1545 DELAY(40);
1546 }
1547
1548 return (0);
1549}
1550
1551static int
1552bge_blockinit(struct bge_softc *sc)
1553{
1554 struct bge_rcb *rcb;
1555 bus_size_t vrcb;
1556 bge_hostaddr taddr;
1557 uint32_t dmactl, val;
1558 int i, limit;
1559
1560 /*
1561 * Initialize the memory window pointer register so that
1562 * we can access the first 32K of internal NIC RAM. This will
1563 * allow us to set up the TX send ring RCBs and the RX return
1564 * ring RCBs, plus other things which live in NIC memory.
1565 */
1566 CSR_WRITE_4(sc, BGE_PCI_MEMWIN_BASEADDR, 0);
1567
1568 /* Note: the BCM5704 has a smaller mbuf space than other chips. */
1569
1570 if (!(BGE_IS_5705_PLUS(sc))) {
1571 /* Configure mbuf memory pool */
1572 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_BASEADDR, BGE_BUFFPOOL_1);
1573 if (sc->bge_asicrev == BGE_ASICREV_BCM5704)
1574 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x10000);
1575 else
1576 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x18000);
1577
1578 /* Configure DMA resource pool */
1579 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_BASEADDR,
1580 BGE_DMA_DESCRIPTORS);
1581 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LEN, 0x2000);
1582 }
1583
1584 /* Configure mbuf pool watermarks */
1585 if (sc->bge_asicrev == BGE_ASICREV_BCM5717 ||
1586 sc->bge_asicrev == BGE_ASICREV_BCM57765) {
1587 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0);
1588 if (sc->bge_ifp->if_mtu > ETHERMTU) {
1589 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x7e);
1590 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0xea);
1591 } else {
1592 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x2a);
1593 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0xa0);
1594 }
1595 } else if (!BGE_IS_5705_PLUS(sc)) {
1596 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x50);
1597 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x20);
1598 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x60);
1599 } else if (sc->bge_asicrev == BGE_ASICREV_BCM5906) {
1600 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0);
1601 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x04);
1602 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x10);
1603 } else {
1604 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0);
1605 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x10);
1606 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x60);
1607 }
1608
1609 /* Configure DMA resource watermarks */
1610 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LOWAT, 5);
1611 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_HIWAT, 10);
1612
1613 /* Enable buffer manager */
1614 val = BGE_BMANMODE_ENABLE | BGE_BMANMODE_LOMBUF_ATTN;
1615 /*
1616 * Change the arbitration algorithm of TXMBUF read request to
1617 * round-robin instead of priority based for BCM5719. When
1618 * TXFIFO is almost empty, RDMA will hold its request until
1619 * TXFIFO is not almost empty.
1620 */
1621 if (sc->bge_asicrev == BGE_ASICREV_BCM5719)
1622 val |= BGE_BMANMODE_NO_TX_UNDERRUN;
1623 CSR_WRITE_4(sc, BGE_BMAN_MODE, val);
1624
1625 /* Poll for buffer manager start indication */
1626 for (i = 0; i < BGE_TIMEOUT; i++) {
1627 DELAY(10);
1628 if (CSR_READ_4(sc, BGE_BMAN_MODE) & BGE_BMANMODE_ENABLE)
1629 break;
1630 }
1631
1632 if (i == BGE_TIMEOUT) {
1633 device_printf(sc->bge_dev, "buffer manager failed to start\n");
1634 return (ENXIO);
1635 }
1636
1637 /* Enable flow-through queues */
1638 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF);
1639 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0);
1640
1641 /* Wait until queue initialization is complete */
1642 for (i = 0; i < BGE_TIMEOUT; i++) {
1643 DELAY(10);
1644 if (CSR_READ_4(sc, BGE_FTQ_RESET) == 0)
1645 break;
1646 }
1647
1648 if (i == BGE_TIMEOUT) {
1649 device_printf(sc->bge_dev, "flow-through queue init failed\n");
1650 return (ENXIO);
1651 }
1652
1653 /*
1654 * Summary of rings supported by the controller:
1655 *
1656 * Standard Receive Producer Ring
1657 * - This ring is used to feed receive buffers for "standard"
1658 * sized frames (typically 1536 bytes) to the controller.
1659 *
1660 * Jumbo Receive Producer Ring
1661 * - This ring is used to feed receive buffers for jumbo sized
1662 * frames (i.e. anything bigger than the "standard" frames)
1663 * to the controller.
1664 *
1665 * Mini Receive Producer Ring
1666 * - This ring is used to feed receive buffers for "mini"
1667 * sized frames to the controller.
1668 * - This feature required external memory for the controller
1669 * but was never used in a production system. Should always
1670 * be disabled.
1671 *
1672 * Receive Return Ring
1673 * - After the controller has placed an incoming frame into a
1674 * receive buffer that buffer is moved into a receive return
1675 * ring. The driver is then responsible to passing the
1676 * buffer up to the stack. Many versions of the controller
1677 * support multiple RR rings.
1678 *
1679 * Send Ring
1680 * - This ring is used for outgoing frames. Many versions of
1681 * the controller support multiple send rings.
1682 */
1683
1684 /* Initialize the standard receive producer ring control block. */
1685 rcb = &sc->bge_ldata.bge_info.bge_std_rx_rcb;
1686 rcb->bge_hostaddr.bge_addr_lo =
1687 BGE_ADDR_LO(sc->bge_ldata.bge_rx_std_ring_paddr);
1688 rcb->bge_hostaddr.bge_addr_hi =
1689 BGE_ADDR_HI(sc->bge_ldata.bge_rx_std_ring_paddr);
1690 bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag,
1691 sc->bge_cdata.bge_rx_std_ring_map, BUS_DMASYNC_PREREAD);
1692 if (BGE_IS_5717_PLUS(sc)) {
1693 /*
1694 * Bits 31-16: Programmable ring size (2048, 1024, 512, .., 32)
1695 * Bits 15-2 : Maximum RX frame size
1696 * Bit 1 : 1 = Ring Disabled, 0 = Ring ENabled
1697 * Bit 0 : Reserved
1698 */
1699 rcb->bge_maxlen_flags =
1700 BGE_RCB_MAXLEN_FLAGS(512, BGE_MAX_FRAMELEN << 2);
1701 } else if (BGE_IS_5705_PLUS(sc)) {
1702 /*
1703 * Bits 31-16: Programmable ring size (512, 256, 128, 64, 32)
1704 * Bits 15-2 : Reserved (should be 0)
1705 * Bit 1 : 1 = Ring Disabled, 0 = Ring Enabled
1706 * Bit 0 : Reserved
1707 */
1708 rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(512, 0);
1709 } else {
1710 /*
1711 * Ring size is always XXX entries
1712 * Bits 31-16: Maximum RX frame size
1713 * Bits 15-2 : Reserved (should be 0)
1714 * Bit 1 : 1 = Ring Disabled, 0 = Ring Enabled
1715 * Bit 0 : Reserved
1716 */
1717 rcb->bge_maxlen_flags =
1718 BGE_RCB_MAXLEN_FLAGS(BGE_MAX_FRAMELEN, 0);
1719 }
1720 if (sc->bge_asicrev == BGE_ASICREV_BCM5717 ||
1721 sc->bge_asicrev == BGE_ASICREV_BCM5719)
1722 rcb->bge_nicaddr = BGE_STD_RX_RINGS_5717;
1723 else
1724 rcb->bge_nicaddr = BGE_STD_RX_RINGS;
1725 /* Write the standard receive producer ring control block. */
1726 CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_HI, rcb->bge_hostaddr.bge_addr_hi);
1727 CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_LO, rcb->bge_hostaddr.bge_addr_lo);
1728 CSR_WRITE_4(sc, BGE_RX_STD_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags);
1729 CSR_WRITE_4(sc, BGE_RX_STD_RCB_NICADDR, rcb->bge_nicaddr);
1730
1731 /* Reset the standard receive producer ring producer index. */
1732 bge_writembx(sc, BGE_MBX_RX_STD_PROD_LO, 0);
1733
1734 /*
1735 * Initialize the jumbo RX producer ring control
1736 * block. We set the 'ring disabled' bit in the
1737 * flags field until we're actually ready to start
1738 * using this ring (i.e. once we set the MTU
1739 * high enough to require it).
1740 */
1741 if (BGE_IS_JUMBO_CAPABLE(sc)) {
1742 rcb = &sc->bge_ldata.bge_info.bge_jumbo_rx_rcb;
1743 /* Get the jumbo receive producer ring RCB parameters. */
1744 rcb->bge_hostaddr.bge_addr_lo =
1745 BGE_ADDR_LO(sc->bge_ldata.bge_rx_jumbo_ring_paddr);
1746 rcb->bge_hostaddr.bge_addr_hi =
1747 BGE_ADDR_HI(sc->bge_ldata.bge_rx_jumbo_ring_paddr);
1748 bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag,
1749 sc->bge_cdata.bge_rx_jumbo_ring_map,
1750 BUS_DMASYNC_PREREAD);
1751 rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(0,
1752 BGE_RCB_FLAG_USE_EXT_RX_BD | BGE_RCB_FLAG_RING_DISABLED);
1753 if (sc->bge_asicrev == BGE_ASICREV_BCM5717 ||
1754 sc->bge_asicrev == BGE_ASICREV_BCM5719)
1755 rcb->bge_nicaddr = BGE_JUMBO_RX_RINGS_5717;
1756 else
1757 rcb->bge_nicaddr = BGE_JUMBO_RX_RINGS;
1758 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_HI,
1759 rcb->bge_hostaddr.bge_addr_hi);
1760 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_LO,
1761 rcb->bge_hostaddr.bge_addr_lo);
1762 /* Program the jumbo receive producer ring RCB parameters. */
1763 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS,
1764 rcb->bge_maxlen_flags);
1765 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_NICADDR, rcb->bge_nicaddr);
1766 /* Reset the jumbo receive producer ring producer index. */
1767 bge_writembx(sc, BGE_MBX_RX_JUMBO_PROD_LO, 0);
1768 }
1769
1770 /* Disable the mini receive producer ring RCB. */
1771 if (BGE_IS_5700_FAMILY(sc)) {
1772 rcb = &sc->bge_ldata.bge_info.bge_mini_rx_rcb;
1773 rcb->bge_maxlen_flags =
1774 BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED);
1775 CSR_WRITE_4(sc, BGE_RX_MINI_RCB_MAXLEN_FLAGS,
1776 rcb->bge_maxlen_flags);
1777 /* Reset the mini receive producer ring producer index. */
1778 bge_writembx(sc, BGE_MBX_RX_MINI_PROD_LO, 0);
1779 }
1780
1781 /* Choose de-pipeline mode for BCM5906 A0, A1 and A2. */
1782 if (sc->bge_asicrev == BGE_ASICREV_BCM5906) {
1783 if (sc->bge_chipid == BGE_CHIPID_BCM5906_A0 ||
1784 sc->bge_chipid == BGE_CHIPID_BCM5906_A1 ||
1785 sc->bge_chipid == BGE_CHIPID_BCM5906_A2)
1786 CSR_WRITE_4(sc, BGE_ISO_PKT_TX,
1787 (CSR_READ_4(sc, BGE_ISO_PKT_TX) & ~3) | 2);
1788 }
1789 /*
1790 * The BD ring replenish thresholds control how often the
1791 * hardware fetches new BD's from the producer rings in host
1792 * memory. Setting the value too low on a busy system can
1793 * starve the hardware and recue the throughpout.
1794 *
1795 * Set the BD ring replentish thresholds. The recommended
1796 * values are 1/8th the number of descriptors allocated to
1797 * each ring.
1798 * XXX The 5754 requires a lower threshold, so it might be a
1799 * requirement of all 575x family chips. The Linux driver sets
1800 * the lower threshold for all 5705 family chips as well, but there
1801 * are reports that it might not need to be so strict.
1802 *
1803 * XXX Linux does some extra fiddling here for the 5906 parts as
1804 * well.
1805 */
1806 if (BGE_IS_5705_PLUS(sc))
1807 val = 8;
1808 else
1809 val = BGE_STD_RX_RING_CNT / 8;
1810 CSR_WRITE_4(sc, BGE_RBDI_STD_REPL_THRESH, val);
1811 if (BGE_IS_JUMBO_CAPABLE(sc))
1812 CSR_WRITE_4(sc, BGE_RBDI_JUMBO_REPL_THRESH,
1813 BGE_JUMBO_RX_RING_CNT/8);
1814 if (BGE_IS_5717_PLUS(sc)) {
1815 CSR_WRITE_4(sc, BGE_STD_REPLENISH_LWM, 32);
1816 CSR_WRITE_4(sc, BGE_JMB_REPLENISH_LWM, 16);
1817 }
1818
1819 /*
1820 * Disable all send rings by setting the 'ring disabled' bit
1821 * in the flags field of all the TX send ring control blocks,
1822 * located in NIC memory.
1823 */
1824 if (!BGE_IS_5705_PLUS(sc))
1825 /* 5700 to 5704 had 16 send rings. */
1826 limit = BGE_TX_RINGS_EXTSSRAM_MAX;
1827 else
1828 limit = 1;
1829 vrcb = BGE_MEMWIN_START + BGE_SEND_RING_RCB;
1830 for (i = 0; i < limit; i++) {
1831 RCB_WRITE_4(sc, vrcb, bge_maxlen_flags,
1832 BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED));
1833 RCB_WRITE_4(sc, vrcb, bge_nicaddr, 0);
1834 vrcb += sizeof(struct bge_rcb);
1835 }
1836
1837 /* Configure send ring RCB 0 (we use only the first ring) */
1838 vrcb = BGE_MEMWIN_START + BGE_SEND_RING_RCB;
1839 BGE_HOSTADDR(taddr, sc->bge_ldata.bge_tx_ring_paddr);
1840 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, taddr.bge_addr_hi);
1841 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, taddr.bge_addr_lo);
1842 if (sc->bge_asicrev == BGE_ASICREV_BCM5717 ||
1843 sc->bge_asicrev == BGE_ASICREV_BCM5719)
1844 RCB_WRITE_4(sc, vrcb, bge_nicaddr, BGE_SEND_RING_5717);
1845 else
1846 RCB_WRITE_4(sc, vrcb, bge_nicaddr,
1847 BGE_NIC_TXRING_ADDR(0, BGE_TX_RING_CNT));
1848 RCB_WRITE_4(sc, vrcb, bge_maxlen_flags,
1849 BGE_RCB_MAXLEN_FLAGS(BGE_TX_RING_CNT, 0));
1850
1851 /*
1852 * Disable all receive return rings by setting the
1853 * 'ring diabled' bit in the flags field of all the receive
1854 * return ring control blocks, located in NIC memory.
1855 */
1856 if (sc->bge_asicrev == BGE_ASICREV_BCM5717 ||
1857 sc->bge_asicrev == BGE_ASICREV_BCM5719) {
1858 /* Should be 17, use 16 until we get an SRAM map. */
1859 limit = 16;
1860 } else if (!BGE_IS_5705_PLUS(sc))
1861 limit = BGE_RX_RINGS_MAX;
1862 else if (sc->bge_asicrev == BGE_ASICREV_BCM5755 ||
1863 sc->bge_asicrev == BGE_ASICREV_BCM57765)
1864 limit = 4;
1865 else
1866 limit = 1;
1867 /* Disable all receive return rings. */
1868 vrcb = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB;
1869 for (i = 0; i < limit; i++) {
1870 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, 0);
1871 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, 0);
1872 RCB_WRITE_4(sc, vrcb, bge_maxlen_flags,
1873 BGE_RCB_FLAG_RING_DISABLED);
1874 RCB_WRITE_4(sc, vrcb, bge_nicaddr, 0);
1875 bge_writembx(sc, BGE_MBX_RX_CONS0_LO +
1876 (i * (sizeof(uint64_t))), 0);
1877 vrcb += sizeof(struct bge_rcb);
1878 }
1879
1880 /*
1881 * Set up receive return ring 0. Note that the NIC address
1882 * for RX return rings is 0x0. The return rings live entirely
1883 * within the host, so the nicaddr field in the RCB isn't used.
1884 */
1885 vrcb = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB;
1886 BGE_HOSTADDR(taddr, sc->bge_ldata.bge_rx_return_ring_paddr);
1887 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, taddr.bge_addr_hi);
1888 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, taddr.bge_addr_lo);
1889 RCB_WRITE_4(sc, vrcb, bge_nicaddr, 0);
1890 RCB_WRITE_4(sc, vrcb, bge_maxlen_flags,
1891 BGE_RCB_MAXLEN_FLAGS(sc->bge_return_ring_cnt, 0));
1892
1893 /* Set random backoff seed for TX */
1894 CSR_WRITE_4(sc, BGE_TX_RANDOM_BACKOFF,
1895 IF_LLADDR(sc->bge_ifp)[0] + IF_LLADDR(sc->bge_ifp)[1] +
1896 IF_LLADDR(sc->bge_ifp)[2] + IF_LLADDR(sc->bge_ifp)[3] +
1897 IF_LLADDR(sc->bge_ifp)[4] + IF_LLADDR(sc->bge_ifp)[5] +
1898 BGE_TX_BACKOFF_SEED_MASK);
1899
1900 /* Set inter-packet gap */
1901 CSR_WRITE_4(sc, BGE_TX_LENGTHS, 0x2620);
1902
1903 /*
1904 * Specify which ring to use for packets that don't match
1905 * any RX rules.
1906 */
1907 CSR_WRITE_4(sc, BGE_RX_RULES_CFG, 0x08);
1908
1909 /*
1910 * Configure number of RX lists. One interrupt distribution
1911 * list, sixteen active lists, one bad frames class.
1912 */
1913 CSR_WRITE_4(sc, BGE_RXLP_CFG, 0x181);
1914
1915 /* Inialize RX list placement stats mask. */
1916 CSR_WRITE_4(sc, BGE_RXLP_STATS_ENABLE_MASK, 0x007FFFFF);
1917 CSR_WRITE_4(sc, BGE_RXLP_STATS_CTL, 0x1);
1918
1919 /* Disable host coalescing until we get it set up */
1920 CSR_WRITE_4(sc, BGE_HCC_MODE, 0x00000000);
1921
1922 /* Poll to make sure it's shut down. */
1923 for (i = 0; i < BGE_TIMEOUT; i++) {
1924 DELAY(10);
1925 if (!(CSR_READ_4(sc, BGE_HCC_MODE) & BGE_HCCMODE_ENABLE))
1926 break;
1927 }
1928
1929 if (i == BGE_TIMEOUT) {
1930 device_printf(sc->bge_dev,
1931 "host coalescing engine failed to idle\n");
1932 return (ENXIO);
1933 }
1934
1935 /* Set up host coalescing defaults */
1936 CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS, sc->bge_rx_coal_ticks);
1937 CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS, sc->bge_tx_coal_ticks);
1938 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS, sc->bge_rx_max_coal_bds);
1939 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS, sc->bge_tx_max_coal_bds);
1940 if (!(BGE_IS_5705_PLUS(sc))) {
1941 CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS_INT, 0);
1942 CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS_INT, 0);
1943 }
1944 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS_INT, 1);
1945 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS_INT, 1);
1946
1947 /* Set up address of statistics block */
1948 if (!(BGE_IS_5705_PLUS(sc))) {
1949 CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_HI,
1950 BGE_ADDR_HI(sc->bge_ldata.bge_stats_paddr));
1951 CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_LO,
1952 BGE_ADDR_LO(sc->bge_ldata.bge_stats_paddr));
1953 CSR_WRITE_4(sc, BGE_HCC_STATS_BASEADDR, BGE_STATS_BLOCK);
1954 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_BASEADDR, BGE_STATUS_BLOCK);
1955 CSR_WRITE_4(sc, BGE_HCC_STATS_TICKS, sc->bge_stat_ticks);
1956 }
1957
1958 /* Set up address of status block */
1959 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_HI,
1960 BGE_ADDR_HI(sc->bge_ldata.bge_status_block_paddr));
1961 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_LO,
1962 BGE_ADDR_LO(sc->bge_ldata.bge_status_block_paddr));
1963
1964 /* Set up status block size. */
1965 if (sc->bge_asicrev == BGE_ASICREV_BCM5700 &&
1966 sc->bge_chipid != BGE_CHIPID_BCM5700_C0) {
1967 val = BGE_STATBLKSZ_FULL;
1968 bzero(sc->bge_ldata.bge_status_block, BGE_STATUS_BLK_SZ);
1969 } else {
1970 val = BGE_STATBLKSZ_32BYTE;
1971 bzero(sc->bge_ldata.bge_status_block, 32);
1972 }
1973 bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
1974 sc->bge_cdata.bge_status_map,
1975 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1976
1977 /* Turn on host coalescing state machine */
1978 CSR_WRITE_4(sc, BGE_HCC_MODE, val | BGE_HCCMODE_ENABLE);
1979
1980 /* Turn on RX BD completion state machine and enable attentions */
1981 CSR_WRITE_4(sc, BGE_RBDC_MODE,
1982 BGE_RBDCMODE_ENABLE | BGE_RBDCMODE_ATTN);
1983
1984 /* Turn on RX list placement state machine */
1985 CSR_WRITE_4(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE);
1986
1987 /* Turn on RX list selector state machine. */
1988 if (!(BGE_IS_5705_PLUS(sc)))
1989 CSR_WRITE_4(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE);
1990
1991 val = BGE_MACMODE_TXDMA_ENB | BGE_MACMODE_RXDMA_ENB |
1992 BGE_MACMODE_RX_STATS_CLEAR | BGE_MACMODE_TX_STATS_CLEAR |
1993 BGE_MACMODE_RX_STATS_ENB | BGE_MACMODE_TX_STATS_ENB |
1994 BGE_MACMODE_FRMHDR_DMA_ENB;
1995
1996 if (sc->bge_flags & BGE_FLAG_TBI)
1997 val |= BGE_PORTMODE_TBI;
1998 else if (sc->bge_flags & BGE_FLAG_MII_SERDES)
1999 val |= BGE_PORTMODE_GMII;
2000 else
2001 val |= BGE_PORTMODE_MII;
2002
2003 /* Turn on DMA, clear stats */
2004 CSR_WRITE_4(sc, BGE_MAC_MODE, val);
2005
2006 /* Set misc. local control, enable interrupts on attentions */
2007 CSR_WRITE_4(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_ONATTN);
2008
2009#ifdef notdef
2010 /* Assert GPIO pins for PHY reset */
2011 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUT0 |
2012 BGE_MLC_MISCIO_OUT1 | BGE_MLC_MISCIO_OUT2);
2013 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUTEN0 |
2014 BGE_MLC_MISCIO_OUTEN1 | BGE_MLC_MISCIO_OUTEN2);
2015#endif
2016
2017 /* Turn on DMA completion state machine */
2018 if (!(BGE_IS_5705_PLUS(sc)))
2019 CSR_WRITE_4(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE);
2020
2021 val = BGE_WDMAMODE_ENABLE | BGE_WDMAMODE_ALL_ATTNS;
2022
2023 /* Enable host coalescing bug fix. */
2024 if (BGE_IS_5755_PLUS(sc))
2025 val |= BGE_WDMAMODE_STATUS_TAG_FIX;
2026
2027 /* Request larger DMA burst size to get better performance. */
2028 if (sc->bge_asicrev == BGE_ASICREV_BCM5785)
2029 val |= BGE_WDMAMODE_BURST_ALL_DATA;
2030
2031 /* Turn on write DMA state machine */
2032 CSR_WRITE_4(sc, BGE_WDMA_MODE, val);
2033 DELAY(40);
2034
2035 /* Turn on read DMA state machine */
2036 val = BGE_RDMAMODE_ENABLE | BGE_RDMAMODE_ALL_ATTNS;
2037
2038 if (sc->bge_asicrev == BGE_ASICREV_BCM5717)
2039 val |= BGE_RDMAMODE_MULT_DMA_RD_DIS;
2040
2041 if (sc->bge_asicrev == BGE_ASICREV_BCM5784 ||
2042 sc->bge_asicrev == BGE_ASICREV_BCM5785 ||
2043 sc->bge_asicrev == BGE_ASICREV_BCM57780)
2044 val |= BGE_RDMAMODE_BD_SBD_CRPT_ATTN |
2045 BGE_RDMAMODE_MBUF_RBD_CRPT_ATTN |
2046 BGE_RDMAMODE_MBUF_SBD_CRPT_ATTN;
2047 if (sc->bge_flags & BGE_FLAG_PCIE)
2048 val |= BGE_RDMAMODE_FIFO_LONG_BURST;
2049 if (sc->bge_flags & (BGE_FLAG_TSO | BGE_FLAG_TSO3)) {
2050 val |= BGE_RDMAMODE_TSO4_ENABLE;
2051 if (sc->bge_flags & BGE_FLAG_TSO3 ||
2052 sc->bge_asicrev == BGE_ASICREV_BCM5785 ||
2053 sc->bge_asicrev == BGE_ASICREV_BCM57780)
2054 val |= BGE_RDMAMODE_TSO6_ENABLE;
2055 }
2056 if (sc->bge_asicrev == BGE_ASICREV_BCM5761 ||
2057 sc->bge_asicrev == BGE_ASICREV_BCM5784 ||
2058 sc->bge_asicrev == BGE_ASICREV_BCM5785 ||
2059 sc->bge_asicrev == BGE_ASICREV_BCM57780 ||
2060 BGE_IS_5717_PLUS(sc)) {
2061 dmactl = CSR_READ_4(sc, BGE_RDMA_RSRVCTRL);
2062 /*
2063 * Adjust tx margin to prevent TX data corruption and
2064 * fix internal FIFO overflow.
2065 */
2066 if (sc->bge_asicrev == BGE_ASICREV_BCM5719) {
2067 dmactl &= ~(BGE_RDMA_RSRVCTRL_FIFO_LWM_MASK |
2068 BGE_RDMA_RSRVCTRL_FIFO_HWM_MASK |
2069 BGE_RDMA_RSRVCTRL_TXMRGN_MASK);
2070 dmactl |= BGE_RDMA_RSRVCTRL_FIFO_LWM_1_5K |
2071 BGE_RDMA_RSRVCTRL_FIFO_HWM_1_5K |
2072 BGE_RDMA_RSRVCTRL_TXMRGN_320B;
2073 }
2074 /*
2075 * Enable fix for read DMA FIFO overruns.
2076 * The fix is to limit the number of RX BDs
2077 * the hardware would fetch at a fime.
2078 */
2079 CSR_WRITE_4(sc, BGE_RDMA_RSRVCTRL, dmactl |
2080 BGE_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
2081 }
2082
2083 if (sc->bge_asicrev == BGE_ASICREV_BCM5719) {
2084 CSR_WRITE_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL,
2085 CSR_READ_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL) |
2086 BGE_RDMA_LSO_CRPTEN_CTRL_BLEN_BD_4K |
2087 BGE_RDMA_LSO_CRPTEN_CTRL_BLEN_LSO_4K);
2088 }
2089
2090 CSR_WRITE_4(sc, BGE_RDMA_MODE, val);
2091 DELAY(40);
2092
2093 /* Turn on RX data completion state machine */
2094 CSR_WRITE_4(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE);
2095
2096 /* Turn on RX BD initiator state machine */
2097 CSR_WRITE_4(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE);
2098
2099 /* Turn on RX data and RX BD initiator state machine */
2100 CSR_WRITE_4(sc, BGE_RDBDI_MODE, BGE_RDBDIMODE_ENABLE);
2101
2102 /* Turn on Mbuf cluster free state machine */
2103 if (!(BGE_IS_5705_PLUS(sc)))
2104 CSR_WRITE_4(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE);
2105
2106 /* Turn on send BD completion state machine */
2107 CSR_WRITE_4(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE);
2108
2109 /* Turn on send data completion state machine */
2110 val = BGE_SDCMODE_ENABLE;
2111 if (sc->bge_asicrev == BGE_ASICREV_BCM5761)
2112 val |= BGE_SDCMODE_CDELAY;
2113 CSR_WRITE_4(sc, BGE_SDC_MODE, val);
2114
2115 /* Turn on send data initiator state machine */
2116 if (sc->bge_flags & (BGE_FLAG_TSO | BGE_FLAG_TSO3))
2117 CSR_WRITE_4(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE |
2118 BGE_SDIMODE_HW_LSO_PRE_DMA);
2119 else
2120 CSR_WRITE_4(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE);
2121
2122 /* Turn on send BD initiator state machine */
2123 CSR_WRITE_4(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE);
2124
2125 /* Turn on send BD selector state machine */
2126 CSR_WRITE_4(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE);
2127
2128 CSR_WRITE_4(sc, BGE_SDI_STATS_ENABLE_MASK, 0x007FFFFF);
2129 CSR_WRITE_4(sc, BGE_SDI_STATS_CTL,
2130 BGE_SDISTATSCTL_ENABLE | BGE_SDISTATSCTL_FASTER);
2131
2132 /* ack/clear link change events */
2133 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED |
2134 BGE_MACSTAT_CFG_CHANGED | BGE_MACSTAT_MI_COMPLETE |
2135 BGE_MACSTAT_LINK_CHANGED);
2136 CSR_WRITE_4(sc, BGE_MI_STS, 0);
2137
2138 /*
2139 * Enable attention when the link has changed state for
2140 * devices that use auto polling.
2141 */
2142 if (sc->bge_flags & BGE_FLAG_TBI) {
2143 CSR_WRITE_4(sc, BGE_MI_STS, BGE_MISTS_LINK);
2144 } else {
2145 if (sc->bge_mi_mode & BGE_MIMODE_AUTOPOLL) {
2146 CSR_WRITE_4(sc, BGE_MI_MODE, sc->bge_mi_mode);
2147 DELAY(80);
2148 }
2149 if (sc->bge_asicrev == BGE_ASICREV_BCM5700 &&
2150 sc->bge_chipid != BGE_CHIPID_BCM5700_B2)
2151 CSR_WRITE_4(sc, BGE_MAC_EVT_ENB,
2152 BGE_EVTENB_MI_INTERRUPT);
2153 }
2154
2155 /*
2156 * Clear any pending link state attention.
2157 * Otherwise some link state change events may be lost until attention
2158 * is cleared by bge_intr() -> bge_link_upd() sequence.
2159 * It's not necessary on newer BCM chips - perhaps enabling link
2160 * state change attentions implies clearing pending attention.
2161 */
2162 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED |
2163 BGE_MACSTAT_CFG_CHANGED | BGE_MACSTAT_MI_COMPLETE |
2164 BGE_MACSTAT_LINK_CHANGED);
2165
2166 /* Enable link state change attentions. */
2167 BGE_SETBIT(sc, BGE_MAC_EVT_ENB, BGE_EVTENB_LINK_CHANGED);
2168
2169 return (0);
2170}
2171
2172const struct bge_revision *
2173bge_lookup_rev(uint32_t chipid)
2174{
2175 const struct bge_revision *br;
2176
2177 for (br = bge_revisions; br->br_name != NULL; br++) {
2178 if (br->br_chipid == chipid)
2179 return (br);
2180 }
2181
2182 for (br = bge_majorrevs; br->br_name != NULL; br++) {
2183 if (br->br_chipid == BGE_ASICREV(chipid))
2184 return (br);
2185 }
2186
2187 return (NULL);
2188}
2189
2190const struct bge_vendor *
2191bge_lookup_vendor(uint16_t vid)
2192{
2193 const struct bge_vendor *v;
2194
2195 for (v = bge_vendors; v->v_name != NULL; v++)
2196 if (v->v_id == vid)
2197 return (v);
2198
2199 panic("%s: unknown vendor %d", __func__, vid);
2200 return (NULL);
2201}
2202
2203/*
2204 * Probe for a Broadcom chip. Check the PCI vendor and device IDs
2205 * against our list and return its name if we find a match.
2206 *
2207 * Note that since the Broadcom controller contains VPD support, we
2208 * try to get the device name string from the controller itself instead
2209 * of the compiled-in string. It guarantees we'll always announce the
2210 * right product name. We fall back to the compiled-in string when
2211 * VPD is unavailable or corrupt.
2212 */
2213static int
2214bge_probe(device_t dev)
2215{
2216 char buf[96];
2217 char model[64];
2218 const struct bge_revision *br;
2219 const char *pname;
2220 struct bge_softc *sc = device_get_softc(dev);
2221 const struct bge_type *t = bge_devs;
2222 const struct bge_vendor *v;
2223 uint32_t id;
2224 uint16_t did, vid;
2225
2226 sc->bge_dev = dev;
2227 vid = pci_get_vendor(dev);
2228 did = pci_get_device(dev);
2229 while(t->bge_vid != 0) {
2230 if ((vid == t->bge_vid) && (did == t->bge_did)) {
2231 id = pci_read_config(dev, BGE_PCI_MISC_CTL, 4) >>
2232 BGE_PCIMISCCTL_ASICREV_SHIFT;
2233 if (BGE_ASICREV(id) == BGE_ASICREV_USE_PRODID_REG) {
2234 /*
2235 * Find the ASCI revision. Different chips
2236 * use different registers.
2237 */
2238 switch (pci_get_device(dev)) {
2239 case BCOM_DEVICEID_BCM5717:
2240 case BCOM_DEVICEID_BCM5718:
2241 case BCOM_DEVICEID_BCM5719:
2242 id = pci_read_config(dev,
2243 BGE_PCI_GEN2_PRODID_ASICREV, 4);
2244 break;
2245 case BCOM_DEVICEID_BCM57761:
2246 case BCOM_DEVICEID_BCM57765:
2247 case BCOM_DEVICEID_BCM57781:
2248 case BCOM_DEVICEID_BCM57785:
2249 case BCOM_DEVICEID_BCM57791:
2250 case BCOM_DEVICEID_BCM57795:
2251 id = pci_read_config(dev,
2252 BGE_PCI_GEN15_PRODID_ASICREV, 4);
2253 break;
2254 default:
2255 id = pci_read_config(dev,
2256 BGE_PCI_PRODID_ASICREV, 4);
2257 }
2258 }
2259 br = bge_lookup_rev(id);
2260 v = bge_lookup_vendor(vid);
2261 if (bge_has_eaddr(sc) &&
2262 pci_get_vpd_ident(dev, &pname) == 0)
2263 snprintf(model, 64, "%s", pname);
2264 else
2265 snprintf(model, 64, "%s %s", v->v_name,
2266 br != NULL ? br->br_name :
2267 "NetXtreme Ethernet Controller");
2268 snprintf(buf, 96, "%s, %sASIC rev. %#08x", model,
2269 br != NULL ? "" : "unknown ", id);
2270 device_set_desc_copy(dev, buf);
2271 return (0);
2272 }
2273 t++;
2274 }
2275
2276 return (ENXIO);
2277}
2278
2279static void
2280bge_dma_free(struct bge_softc *sc)
2281{
2282 int i;
2283
2284 /* Destroy DMA maps for RX buffers. */
2285 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
2286 if (sc->bge_cdata.bge_rx_std_dmamap[i])
2287 bus_dmamap_destroy(sc->bge_cdata.bge_rx_mtag,
2288 sc->bge_cdata.bge_rx_std_dmamap[i]);
2289 }
2290 if (sc->bge_cdata.bge_rx_std_sparemap)
2291 bus_dmamap_destroy(sc->bge_cdata.bge_rx_mtag,
2292 sc->bge_cdata.bge_rx_std_sparemap);
2293
2294 /* Destroy DMA maps for jumbo RX buffers. */
2295 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
2296 if (sc->bge_cdata.bge_rx_jumbo_dmamap[i])
2297 bus_dmamap_destroy(sc->bge_cdata.bge_mtag_jumbo,
2298 sc->bge_cdata.bge_rx_jumbo_dmamap[i]);
2299 }
2300 if (sc->bge_cdata.bge_rx_jumbo_sparemap)
2301 bus_dmamap_destroy(sc->bge_cdata.bge_mtag_jumbo,
2302 sc->bge_cdata.bge_rx_jumbo_sparemap);
2303
2304 /* Destroy DMA maps for TX buffers. */
2305 for (i = 0; i < BGE_TX_RING_CNT; i++) {
2306 if (sc->bge_cdata.bge_tx_dmamap[i])
2307 bus_dmamap_destroy(sc->bge_cdata.bge_tx_mtag,
2308 sc->bge_cdata.bge_tx_dmamap[i]);
2309 }
2310
2311 if (sc->bge_cdata.bge_rx_mtag)
2312 bus_dma_tag_destroy(sc->bge_cdata.bge_rx_mtag);
2313 if (sc->bge_cdata.bge_tx_mtag)
2314 bus_dma_tag_destroy(sc->bge_cdata.bge_tx_mtag);
2315
2316
2317 /* Destroy standard RX ring. */
2318 if (sc->bge_cdata.bge_rx_std_ring_map)
2319 bus_dmamap_unload(sc->bge_cdata.bge_rx_std_ring_tag,
2320 sc->bge_cdata.bge_rx_std_ring_map);
2321 if (sc->bge_cdata.bge_rx_std_ring_map && sc->bge_ldata.bge_rx_std_ring)
2322 bus_dmamem_free(sc->bge_cdata.bge_rx_std_ring_tag,
2323 sc->bge_ldata.bge_rx_std_ring,
2324 sc->bge_cdata.bge_rx_std_ring_map);
2325
2326 if (sc->bge_cdata.bge_rx_std_ring_tag)
2327 bus_dma_tag_destroy(sc->bge_cdata.bge_rx_std_ring_tag);
2328
2329 /* Destroy jumbo RX ring. */
2330 if (sc->bge_cdata.bge_rx_jumbo_ring_map)
2331 bus_dmamap_unload(sc->bge_cdata.bge_rx_jumbo_ring_tag,
2332 sc->bge_cdata.bge_rx_jumbo_ring_map);
2333
2334 if (sc->bge_cdata.bge_rx_jumbo_ring_map &&
2335 sc->bge_ldata.bge_rx_jumbo_ring)
2336 bus_dmamem_free(sc->bge_cdata.bge_rx_jumbo_ring_tag,
2337 sc->bge_ldata.bge_rx_jumbo_ring,
2338 sc->bge_cdata.bge_rx_jumbo_ring_map);
2339
2340 if (sc->bge_cdata.bge_rx_jumbo_ring_tag)
2341 bus_dma_tag_destroy(sc->bge_cdata.bge_rx_jumbo_ring_tag);
2342
2343 /* Destroy RX return ring. */
2344 if (sc->bge_cdata.bge_rx_return_ring_map)
2345 bus_dmamap_unload(sc->bge_cdata.bge_rx_return_ring_tag,
2346 sc->bge_cdata.bge_rx_return_ring_map);
2347
2348 if (sc->bge_cdata.bge_rx_return_ring_map &&
2349 sc->bge_ldata.bge_rx_return_ring)
2350 bus_dmamem_free(sc->bge_cdata.bge_rx_return_ring_tag,
2351 sc->bge_ldata.bge_rx_return_ring,
2352 sc->bge_cdata.bge_rx_return_ring_map);
2353
2354 if (sc->bge_cdata.bge_rx_return_ring_tag)
2355 bus_dma_tag_destroy(sc->bge_cdata.bge_rx_return_ring_tag);
2356
2357 /* Destroy TX ring. */
2358 if (sc->bge_cdata.bge_tx_ring_map)
2359 bus_dmamap_unload(sc->bge_cdata.bge_tx_ring_tag,
2360 sc->bge_cdata.bge_tx_ring_map);
2361
2362 if (sc->bge_cdata.bge_tx_ring_map && sc->bge_ldata.bge_tx_ring)
2363 bus_dmamem_free(sc->bge_cdata.bge_tx_ring_tag,
2364 sc->bge_ldata.bge_tx_ring,
2365 sc->bge_cdata.bge_tx_ring_map);
2366
2367 if (sc->bge_cdata.bge_tx_ring_tag)
2368 bus_dma_tag_destroy(sc->bge_cdata.bge_tx_ring_tag);
2369
2370 /* Destroy status block. */
2371 if (sc->bge_cdata.bge_status_map)
2372 bus_dmamap_unload(sc->bge_cdata.bge_status_tag,
2373 sc->bge_cdata.bge_status_map);
2374
2375 if (sc->bge_cdata.bge_status_map && sc->bge_ldata.bge_status_block)
2376 bus_dmamem_free(sc->bge_cdata.bge_status_tag,
2377 sc->bge_ldata.bge_status_block,
2378 sc->bge_cdata.bge_status_map);
2379
2380 if (sc->bge_cdata.bge_status_tag)
2381 bus_dma_tag_destroy(sc->bge_cdata.bge_status_tag);
2382
2383 /* Destroy statistics block. */
2384 if (sc->bge_cdata.bge_stats_map)
2385 bus_dmamap_unload(sc->bge_cdata.bge_stats_tag,
2386 sc->bge_cdata.bge_stats_map);
2387
2388 if (sc->bge_cdata.bge_stats_map && sc->bge_ldata.bge_stats)
2389 bus_dmamem_free(sc->bge_cdata.bge_stats_tag,
2390 sc->bge_ldata.bge_stats,
2391 sc->bge_cdata.bge_stats_map);
2392
2393 if (sc->bge_cdata.bge_stats_tag)
2394 bus_dma_tag_destroy(sc->bge_cdata.bge_stats_tag);
2395
2396 if (sc->bge_cdata.bge_buffer_tag)
2397 bus_dma_tag_destroy(sc->bge_cdata.bge_buffer_tag);
2398
2399 /* Destroy the parent tag. */
2400 if (sc->bge_cdata.bge_parent_tag)
2401 bus_dma_tag_destroy(sc->bge_cdata.bge_parent_tag);
2402}
2403
2404static int
2405bge_dma_ring_alloc(struct bge_softc *sc, bus_size_t alignment,
2406 bus_size_t maxsize, bus_dma_tag_t *tag, uint8_t **ring, bus_dmamap_t *map,
2407 bus_addr_t *paddr, const char *msg)
2408{
2409 struct bge_dmamap_arg ctx;
2410 bus_addr_t lowaddr;
2411 bus_size_t ring_end;
2412 int error;
2413
2414 lowaddr = BUS_SPACE_MAXADDR;
2415again:
2416 error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag,
2417 alignment, 0, lowaddr, BUS_SPACE_MAXADDR, NULL,
2418 NULL, maxsize, 1, maxsize, 0, NULL, NULL, tag);
2419 if (error != 0) {
2420 device_printf(sc->bge_dev,
2421 "could not create %s dma tag\n", msg);
2422 return (ENOMEM);
2423 }
2424 /* Allocate DMA'able memory for ring. */
2425 error = bus_dmamem_alloc(*tag, (void **)ring,
2426 BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT, map);
2427 if (error != 0) {
2428 device_printf(sc->bge_dev,
2429 "could not allocate DMA'able memory for %s\n", msg);
2430 return (ENOMEM);
2431 }
2432 /* Load the address of the ring. */
2433 ctx.bge_busaddr = 0;
2434 error = bus_dmamap_load(*tag, *map, *ring, maxsize, bge_dma_map_addr,
2435 &ctx, BUS_DMA_NOWAIT);
2436 if (error != 0) {
2437 device_printf(sc->bge_dev,
2438 "could not load DMA'able memory for %s\n", msg);
2439 return (ENOMEM);
2440 }
2441 *paddr = ctx.bge_busaddr;
2442 ring_end = *paddr + maxsize;
2443 if ((sc->bge_flags & BGE_FLAG_4G_BNDRY_BUG) != 0 &&
2444 BGE_ADDR_HI(*paddr) != BGE_ADDR_HI(ring_end)) {
2445 /*
2446 * 4GB boundary crossed. Limit maximum allowable DMA
2447 * address space to 32bit and try again.
2448 */
2449 bus_dmamap_unload(*tag, *map);
2450 bus_dmamem_free(*tag, *ring, *map);
2451 bus_dma_tag_destroy(*tag);
2452 if (bootverbose)
2453 device_printf(sc->bge_dev, "4GB boundary crossed, "
2454 "limit DMA address space to 32bit for %s\n", msg);
2455 *ring = NULL;
2456 *tag = NULL;
2457 *map = NULL;
2458 lowaddr = BUS_SPACE_MAXADDR_32BIT;
2459 goto again;
2460 }
2461 return (0);
2462}
2463
2464static int
2465bge_dma_alloc(struct bge_softc *sc)
2466{
2467 bus_addr_t lowaddr;
2468 bus_size_t boundary, sbsz, rxmaxsegsz, txsegsz, txmaxsegsz;
2469 int i, error;
2470
2471 lowaddr = BUS_SPACE_MAXADDR;
2472 if ((sc->bge_flags & BGE_FLAG_40BIT_BUG) != 0)
2473 lowaddr = BGE_DMA_MAXADDR;
2474 /*
2475 * Allocate the parent bus DMA tag appropriate for PCI.
2476 */
2477 error = bus_dma_tag_create(bus_get_dma_tag(sc->bge_dev),
2478 1, 0, lowaddr, BUS_SPACE_MAXADDR, NULL,
2479 NULL, BUS_SPACE_MAXSIZE_32BIT, 0, BUS_SPACE_MAXSIZE_32BIT,
2480 0, NULL, NULL, &sc->bge_cdata.bge_parent_tag);
2481 if (error != 0) {
2482 device_printf(sc->bge_dev,
2483 "could not allocate parent dma tag\n");
2484 return (ENOMEM);
2485 }
2486
2487 /* Create tag for standard RX ring. */
2488 error = bge_dma_ring_alloc(sc, PAGE_SIZE, BGE_STD_RX_RING_SZ,
2489 &sc->bge_cdata.bge_rx_std_ring_tag,
2490 (uint8_t **)&sc->bge_ldata.bge_rx_std_ring,
2491 &sc->bge_cdata.bge_rx_std_ring_map,
2492 &sc->bge_ldata.bge_rx_std_ring_paddr, "RX ring");
2493 if (error)
2494 return (error);
2495
2496 /* Create tag for RX return ring. */
2497 error = bge_dma_ring_alloc(sc, PAGE_SIZE, BGE_RX_RTN_RING_SZ(sc),
2498 &sc->bge_cdata.bge_rx_return_ring_tag,
2499 (uint8_t **)&sc->bge_ldata.bge_rx_return_ring,
2500 &sc->bge_cdata.bge_rx_return_ring_map,
2501 &sc->bge_ldata.bge_rx_return_ring_paddr, "RX return ring");
2502 if (error)
2503 return (error);
2504
2505 /* Create tag for TX ring. */
2506 error = bge_dma_ring_alloc(sc, PAGE_SIZE, BGE_TX_RING_SZ,
2507 &sc->bge_cdata.bge_tx_ring_tag,
2508 (uint8_t **)&sc->bge_ldata.bge_tx_ring,
2509 &sc->bge_cdata.bge_tx_ring_map,
2510 &sc->bge_ldata.bge_tx_ring_paddr, "TX ring");
2511 if (error)
2512 return (error);
2513
2514 /*
2515 * Create tag for status block.
2516 * Because we only use single Tx/Rx/Rx return ring, use
2517 * minimum status block size except BCM5700 AX/BX which
2518 * seems to want to see full status block size regardless
2519 * of configured number of ring.
2520 */
2521 if (sc->bge_asicrev == BGE_ASICREV_BCM5700 &&
2522 sc->bge_chipid != BGE_CHIPID_BCM5700_C0)
2523 sbsz = BGE_STATUS_BLK_SZ;
2524 else
2525 sbsz = 32;
2526 error = bge_dma_ring_alloc(sc, PAGE_SIZE, sbsz,
2527 &sc->bge_cdata.bge_status_tag,
2528 (uint8_t **)&sc->bge_ldata.bge_status_block,
2529 &sc->bge_cdata.bge_status_map,
2530 &sc->bge_ldata.bge_status_block_paddr, "status block");
2531 if (error)
2532 return (error);
2533
2534 /* Create tag for statistics block. */
2535 error = bge_dma_ring_alloc(sc, PAGE_SIZE, BGE_STATS_SZ,
2536 &sc->bge_cdata.bge_stats_tag,
2537 (uint8_t **)&sc->bge_ldata.bge_stats,
2538 &sc->bge_cdata.bge_stats_map,
2539 &sc->bge_ldata.bge_stats_paddr, "statistics block");
2540 if (error)
2541 return (error);
2542
2543 /* Create tag for jumbo RX ring. */
2544 if (BGE_IS_JUMBO_CAPABLE(sc)) {
2545 error = bge_dma_ring_alloc(sc, PAGE_SIZE, BGE_JUMBO_RX_RING_SZ,
2546 &sc->bge_cdata.bge_rx_jumbo_ring_tag,
2547 (uint8_t **)&sc->bge_ldata.bge_rx_jumbo_ring,
2548 &sc->bge_cdata.bge_rx_jumbo_ring_map,
2549 &sc->bge_ldata.bge_rx_jumbo_ring_paddr, "jumbo RX ring");
2550 if (error)
2551 return (error);
2552 }
2553
2554 /* Create parent tag for buffers. */
2555 boundary = 0;
2556 if ((sc->bge_flags & BGE_FLAG_4G_BNDRY_BUG) != 0) {
2557 boundary = BGE_DMA_BNDRY;
2558 /*
2559 * XXX
2560 * watchdog timeout issue was observed on BCM5704 which
2561 * lives behind PCI-X bridge(e.g AMD 8131 PCI-X bridge).
2562 * Limiting DMA address space to 32bits seems to address
2563 * it.
2564 */
2565 if (sc->bge_flags & BGE_FLAG_PCIX)
2566 lowaddr = BUS_SPACE_MAXADDR_32BIT;
2567 }
2568 error = bus_dma_tag_create(bus_get_dma_tag(sc->bge_dev),
2569 1, boundary, lowaddr, BUS_SPACE_MAXADDR, NULL,
2570 NULL, BUS_SPACE_MAXSIZE_32BIT, 0, BUS_SPACE_MAXSIZE_32BIT,
2571 0, NULL, NULL, &sc->bge_cdata.bge_buffer_tag);
2572 if (error != 0) {
2573 device_printf(sc->bge_dev,
2574 "could not allocate buffer dma tag\n");
2575 return (ENOMEM);
2576 }
2577 /* Create tag for Tx mbufs. */
2578 if (sc->bge_flags & (BGE_FLAG_TSO | BGE_FLAG_TSO3)) {
2579 txsegsz = BGE_TSOSEG_SZ;
2580 txmaxsegsz = 65535 + sizeof(struct ether_vlan_header);
2581 } else {
2582 txsegsz = MCLBYTES;
2583 txmaxsegsz = MCLBYTES * BGE_NSEG_NEW;
2584 }
2585 error = bus_dma_tag_create(sc->bge_cdata.bge_buffer_tag, 1,
2586 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
2587 txmaxsegsz, BGE_NSEG_NEW, txsegsz, 0, NULL, NULL,
2588 &sc->bge_cdata.bge_tx_mtag);
2589
2590 if (error) {
2591 device_printf(sc->bge_dev, "could not allocate TX dma tag\n");
2592 return (ENOMEM);
2593 }
2594
2595 /* Create tag for Rx mbufs. */
2596 if (sc->bge_flags & BGE_FLAG_JUMBO_STD)
2597 rxmaxsegsz = MJUM9BYTES;
2598 else
2599 rxmaxsegsz = MCLBYTES;
2600 error = bus_dma_tag_create(sc->bge_cdata.bge_buffer_tag, 1, 0,
2601 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, rxmaxsegsz, 1,
2602 rxmaxsegsz, 0, NULL, NULL, &sc->bge_cdata.bge_rx_mtag);
2603
2604 if (error) {
2605 device_printf(sc->bge_dev, "could not allocate RX dma tag\n");
2606 return (ENOMEM);
2607 }
2608
2609 /* Create DMA maps for RX buffers. */
2610 error = bus_dmamap_create(sc->bge_cdata.bge_rx_mtag, 0,
2611 &sc->bge_cdata.bge_rx_std_sparemap);
2612 if (error) {
2613 device_printf(sc->bge_dev,
2614 "can't create spare DMA map for RX\n");
2615 return (ENOMEM);
2616 }
2617 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
2618 error = bus_dmamap_create(sc->bge_cdata.bge_rx_mtag, 0,
2619 &sc->bge_cdata.bge_rx_std_dmamap[i]);
2620 if (error) {
2621 device_printf(sc->bge_dev,
2622 "can't create DMA map for RX\n");
2623 return (ENOMEM);
2624 }
2625 }
2626
2627 /* Create DMA maps for TX buffers. */
2628 for (i = 0; i < BGE_TX_RING_CNT; i++) {
2629 error = bus_dmamap_create(sc->bge_cdata.bge_tx_mtag, 0,
2630 &sc->bge_cdata.bge_tx_dmamap[i]);
2631 if (error) {
2632 device_printf(sc->bge_dev,
2633 "can't create DMA map for TX\n");
2634 return (ENOMEM);
2635 }
2636 }
2637
2638 /* Create tags for jumbo RX buffers. */
2639 if (BGE_IS_JUMBO_CAPABLE(sc)) {
2640 error = bus_dma_tag_create(sc->bge_cdata.bge_buffer_tag,
2641 1, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
2642 NULL, MJUM9BYTES, BGE_NSEG_JUMBO, PAGE_SIZE,
2643 0, NULL, NULL, &sc->bge_cdata.bge_mtag_jumbo);
2644 if (error) {
2645 device_printf(sc->bge_dev,
2646 "could not allocate jumbo dma tag\n");
2647 return (ENOMEM);
2648 }
2649 /* Create DMA maps for jumbo RX buffers. */
2650 error = bus_dmamap_create(sc->bge_cdata.bge_mtag_jumbo,
2651 0, &sc->bge_cdata.bge_rx_jumbo_sparemap);
2652 if (error) {
2653 device_printf(sc->bge_dev,
2654 "can't create spare DMA map for jumbo RX\n");
2655 return (ENOMEM);
2656 }
2657 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
2658 error = bus_dmamap_create(sc->bge_cdata.bge_mtag_jumbo,
2659 0, &sc->bge_cdata.bge_rx_jumbo_dmamap[i]);
2660 if (error) {
2661 device_printf(sc->bge_dev,
2662 "can't create DMA map for jumbo RX\n");
2663 return (ENOMEM);
2664 }
2665 }
2666 }
2667
2668 return (0);
2669}
2670
2671/*
2672 * Return true if this device has more than one port.
2673 */
2674static int
2675bge_has_multiple_ports(struct bge_softc *sc)
2676{
2677 device_t dev = sc->bge_dev;
2678 u_int b, d, f, fscan, s;
2679
2680 d = pci_get_domain(dev);
2681 b = pci_get_bus(dev);
2682 s = pci_get_slot(dev);
2683 f = pci_get_function(dev);
2684 for (fscan = 0; fscan <= PCI_FUNCMAX; fscan++)
2685 if (fscan != f && pci_find_dbsf(d, b, s, fscan) != NULL)
2686 return (1);
2687 return (0);
2688}
2689
2690/*
2691 * Return true if MSI can be used with this device.
2692 */
2693static int
2694bge_can_use_msi(struct bge_softc *sc)
2695{
2696 int can_use_msi = 0;
2697
2698 /* Disable MSI for polling(4). */
2699#ifdef DEVICE_POLLING
2700 return (0);
2701#endif
2702 switch (sc->bge_asicrev) {
2703 case BGE_ASICREV_BCM5714_A0:
2704 case BGE_ASICREV_BCM5714:
2705 /*
2706 * Apparently, MSI doesn't work when these chips are
2707 * configured in single-port mode.
2708 */
2709 if (bge_has_multiple_ports(sc))
2710 can_use_msi = 1;
2711 break;
2712 case BGE_ASICREV_BCM5750:
2713 if (sc->bge_chiprev != BGE_CHIPREV_5750_AX &&
2714 sc->bge_chiprev != BGE_CHIPREV_5750_BX)
2715 can_use_msi = 1;
2716 break;
2717 default:
2718 if (BGE_IS_575X_PLUS(sc))
2719 can_use_msi = 1;
2720 }
2721 return (can_use_msi);
2722}
2723
2724static int
2725bge_attach(device_t dev)
2726{
2727 struct ifnet *ifp;
2728 struct bge_softc *sc;
2729 uint32_t hwcfg = 0, misccfg;
2730 u_char eaddr[ETHER_ADDR_LEN];
2731 int capmask, error, f, msicount, phy_addr, reg, rid, trys;
2732
2733 sc = device_get_softc(dev);
2734 sc->bge_dev = dev;
2735
2736 TASK_INIT(&sc->bge_intr_task, 0, bge_intr_task, sc);
2737
2738 /*
2739 * Map control/status registers.
2740 */
2741 pci_enable_busmaster(dev);
2742
2743 rid = PCIR_BAR(0);
2744 sc->bge_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
2745 RF_ACTIVE);
2746
2747 if (sc->bge_res == NULL) {
2748 device_printf (sc->bge_dev, "couldn't map memory\n");
2749 error = ENXIO;
2750 goto fail;
2751 }
2752
2753 /* Save various chip information. */
2754 sc->bge_chipid =
2755 pci_read_config(dev, BGE_PCI_MISC_CTL, 4) >>
2756 BGE_PCIMISCCTL_ASICREV_SHIFT;
2757 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_USE_PRODID_REG) {
2758 /*
2759 * Find the ASCI revision. Different chips use different
2760 * registers.
2761 */
2762 switch (pci_get_device(dev)) {
2763 case BCOM_DEVICEID_BCM5717:
2764 case BCOM_DEVICEID_BCM5718:
2765 case BCOM_DEVICEID_BCM5719:
2766 sc->bge_chipid = pci_read_config(dev,
2767 BGE_PCI_GEN2_PRODID_ASICREV, 4);
2768 break;
2769 case BCOM_DEVICEID_BCM57761:
2770 case BCOM_DEVICEID_BCM57765:
2771 case BCOM_DEVICEID_BCM57781:
2772 case BCOM_DEVICEID_BCM57785:
2773 case BCOM_DEVICEID_BCM57791:
2774 case BCOM_DEVICEID_BCM57795:
2775 sc->bge_chipid = pci_read_config(dev,
2776 BGE_PCI_GEN15_PRODID_ASICREV, 4);
2777 break;
2778 default:
2779 sc->bge_chipid = pci_read_config(dev,
2780 BGE_PCI_PRODID_ASICREV, 4);
2781 }
2782 }
2783 sc->bge_asicrev = BGE_ASICREV(sc->bge_chipid);
2784 sc->bge_chiprev = BGE_CHIPREV(sc->bge_chipid);
2785
2786 /* Set default PHY address. */
2787 phy_addr = 1;
2788 /*
2789 * PHY address mapping for various devices.
2790 *
2791 * | F0 Cu | F0 Sr | F1 Cu | F1 Sr |
2792 * ---------+-------+-------+-------+-------+
2793 * BCM57XX | 1 | X | X | X |
2794 * BCM5704 | 1 | X | 1 | X |
2795 * BCM5717 | 1 | 8 | 2 | 9 |
2796 * BCM5719 | 1 | 8 | 2 | 9 |
2797 *
2798 * Other addresses may respond but they are not
2799 * IEEE compliant PHYs and should be ignored.
2800 */
2801 if (sc->bge_asicrev == BGE_ASICREV_BCM5717 ||
2802 sc->bge_asicrev == BGE_ASICREV_BCM5719) {
2803 f = pci_get_function(dev);
2804 if (sc->bge_chipid == BGE_CHIPID_BCM5717_A0) {
2805 if (CSR_READ_4(sc, BGE_SGDIG_STS) &
2806 BGE_SGDIGSTS_IS_SERDES)
2807 phy_addr = f + 8;
2808 else
2809 phy_addr = f + 1;
2810 } else {
2811 if (CSR_READ_4(sc, BGE_CPMU_PHY_STRAP) &
2812 BGE_CPMU_PHY_STRAP_IS_SERDES)
2813 phy_addr = f + 8;
2814 else
2815 phy_addr = f + 1;
2816 }
2817 }
2818
2819 /*
2820 * Don't enable Ethernet@WireSpeed for the 5700, 5906, or the
2821 * 5705 A0 and A1 chips.
2822 */
2823 if (sc->bge_asicrev == BGE_ASICREV_BCM5700 ||
2824 (sc->bge_asicrev == BGE_ASICREV_BCM5705 &&
2825 (sc->bge_chipid != BGE_CHIPID_BCM5705_A0 &&
2826 sc->bge_chipid != BGE_CHIPID_BCM5705_A1)) ||
2827 sc->bge_asicrev == BGE_ASICREV_BCM5906)
2828 sc->bge_phy_flags |= BGE_PHY_NO_WIRESPEED;
2829
2830 if (bge_has_eaddr(sc))
2831 sc->bge_flags |= BGE_FLAG_EADDR;
2832
2833 /* Save chipset family. */
2834 switch (sc->bge_asicrev) {
2835 case BGE_ASICREV_BCM5717:
2836 case BGE_ASICREV_BCM5719:
2837 case BGE_ASICREV_BCM57765:
2838 sc->bge_flags |= BGE_FLAG_5717_PLUS | BGE_FLAG_5755_PLUS |
2839 BGE_FLAG_575X_PLUS | BGE_FLAG_5705_PLUS | BGE_FLAG_JUMBO |
2840 BGE_FLAG_JUMBO_FRAME;
2841 if (sc->bge_asicrev == BGE_ASICREV_BCM5719 &&
2842 sc->bge_chipid == BGE_CHIPID_BCM5719_A0) {
2843 /* Jumbo frame on BCM5719 A0 does not work. */
2844 sc->bge_flags &= ~BGE_FLAG_JUMBO;
2845 }
2846 break;
2847 case BGE_ASICREV_BCM5755:
2848 case BGE_ASICREV_BCM5761:
2849 case BGE_ASICREV_BCM5784:
2850 case BGE_ASICREV_BCM5785:
2851 case BGE_ASICREV_BCM5787:
2852 case BGE_ASICREV_BCM57780:
2853 sc->bge_flags |= BGE_FLAG_5755_PLUS | BGE_FLAG_575X_PLUS |
2854 BGE_FLAG_5705_PLUS;
2855 break;
2856 case BGE_ASICREV_BCM5700:
2857 case BGE_ASICREV_BCM5701:
2858 case BGE_ASICREV_BCM5703:
2859 case BGE_ASICREV_BCM5704:
2860 sc->bge_flags |= BGE_FLAG_5700_FAMILY | BGE_FLAG_JUMBO;
2861 break;
2862 case BGE_ASICREV_BCM5714_A0:
2863 case BGE_ASICREV_BCM5780:
2864 case BGE_ASICREV_BCM5714:
2865 sc->bge_flags |= BGE_FLAG_5714_FAMILY | BGE_FLAG_JUMBO_STD;
2866 /* FALLTHROUGH */
2867 case BGE_ASICREV_BCM5750:
2868 case BGE_ASICREV_BCM5752:
2869 case BGE_ASICREV_BCM5906:
2870 sc->bge_flags |= BGE_FLAG_575X_PLUS;
2871 /* FALLTHROUGH */
2872 case BGE_ASICREV_BCM5705:
2873 sc->bge_flags |= BGE_FLAG_5705_PLUS;
2874 break;
2875 }
2876
2877 /* Set various PHY bug flags. */
2878 if (sc->bge_chipid == BGE_CHIPID_BCM5701_A0 ||
2879 sc->bge_chipid == BGE_CHIPID_BCM5701_B0)
2880 sc->bge_phy_flags |= BGE_PHY_CRC_BUG;
2881 if (sc->bge_chiprev == BGE_CHIPREV_5703_AX ||
2882 sc->bge_chiprev == BGE_CHIPREV_5704_AX)
2883 sc->bge_phy_flags |= BGE_PHY_ADC_BUG;
2884 if (sc->bge_chipid == BGE_CHIPID_BCM5704_A0)
2885 sc->bge_phy_flags |= BGE_PHY_5704_A0_BUG;
2886 if (pci_get_subvendor(dev) == DELL_VENDORID)
2887 sc->bge_phy_flags |= BGE_PHY_NO_3LED;
2888 if ((BGE_IS_5705_PLUS(sc)) &&
2889 sc->bge_asicrev != BGE_ASICREV_BCM5906 &&
2890 sc->bge_asicrev != BGE_ASICREV_BCM5717 &&
2891 sc->bge_asicrev != BGE_ASICREV_BCM5719 &&
2892 sc->bge_asicrev != BGE_ASICREV_BCM5785 &&
2893 sc->bge_asicrev != BGE_ASICREV_BCM57765 &&
2894 sc->bge_asicrev != BGE_ASICREV_BCM57780) {
2895 if (sc->bge_asicrev == BGE_ASICREV_BCM5755 ||
2896 sc->bge_asicrev == BGE_ASICREV_BCM5761 ||
2897 sc->bge_asicrev == BGE_ASICREV_BCM5784 ||
2898 sc->bge_asicrev == BGE_ASICREV_BCM5787) {
2899 if (pci_get_device(dev) != BCOM_DEVICEID_BCM5722 &&
2900 pci_get_device(dev) != BCOM_DEVICEID_BCM5756)
2901 sc->bge_phy_flags |= BGE_PHY_JITTER_BUG;
2902 if (pci_get_device(dev) == BCOM_DEVICEID_BCM5755M)
2903 sc->bge_phy_flags |= BGE_PHY_ADJUST_TRIM;
2904 } else
2905 sc->bge_phy_flags |= BGE_PHY_BER_BUG;
2906 }
2907
2908 /* Identify the chips that use an CPMU. */
2909 if (BGE_IS_5717_PLUS(sc) ||
2910 sc->bge_asicrev == BGE_ASICREV_BCM5784 ||
2911 sc->bge_asicrev == BGE_ASICREV_BCM5761 ||
2912 sc->bge_asicrev == BGE_ASICREV_BCM5785 ||
2913 sc->bge_asicrev == BGE_ASICREV_BCM57780)
2914 sc->bge_flags |= BGE_FLAG_CPMU_PRESENT;
2915 if ((sc->bge_flags & BGE_FLAG_CPMU_PRESENT) != 0)
2916 sc->bge_mi_mode = BGE_MIMODE_500KHZ_CONST;
2917 else
2918 sc->bge_mi_mode = BGE_MIMODE_BASE;
2919 /* Enable auto polling for BCM570[0-5]. */
2920 if (BGE_IS_5700_FAMILY(sc) || sc->bge_asicrev == BGE_ASICREV_BCM5705)
2921 sc->bge_mi_mode |= BGE_MIMODE_AUTOPOLL;
2922
2923 /*
2924 * All Broadcom controllers have 4GB boundary DMA bug.
2925 * Whenever an address crosses a multiple of the 4GB boundary
2926 * (including 4GB, 8Gb, 12Gb, etc.) and makes the transition
2927 * from 0xX_FFFF_FFFF to 0x(X+1)_0000_0000 an internal DMA
2928 * state machine will lockup and cause the device to hang.
2929 */
2930 sc->bge_flags |= BGE_FLAG_4G_BNDRY_BUG;
2931
2932 /* BCM5755 or higher and BCM5906 have short DMA bug. */
2933 if (BGE_IS_5755_PLUS(sc) || sc->bge_asicrev == BGE_ASICREV_BCM5906)
2934 sc->bge_flags |= BGE_FLAG_SHORT_DMA_BUG;
2935
2936 /*
2937 * BCM5719 cannot handle DMA requests for DMA segments that
2938 * have larger than 4KB in size. However the maximum DMA
2939 * segment size created in DMA tag is 4KB for TSO, so we
2940 * wouldn't encounter the issue here.
2941 */
2942 if (sc->bge_asicrev == BGE_ASICREV_BCM5719)
2943 sc->bge_flags |= BGE_FLAG_4K_RDMA_BUG;
2944
2945 misccfg = CSR_READ_4(sc, BGE_MISC_CFG) & BGE_MISCCFG_BOARD_ID;
2946 if (sc->bge_asicrev == BGE_ASICREV_BCM5705) {
2947 if (misccfg == BGE_MISCCFG_BOARD_ID_5788 ||
2948 misccfg == BGE_MISCCFG_BOARD_ID_5788M)
2949 sc->bge_flags |= BGE_FLAG_5788;
2950 }
2951
2952 capmask = BMSR_DEFCAPMASK;
2953 if ((sc->bge_asicrev == BGE_ASICREV_BCM5703 &&
2954 (misccfg == 0x4000 || misccfg == 0x8000)) ||
2955 (sc->bge_asicrev == BGE_ASICREV_BCM5705 &&
2956 pci_get_vendor(dev) == BCOM_VENDORID &&
2957 (pci_get_device(dev) == BCOM_DEVICEID_BCM5901 ||
2958 pci_get_device(dev) == BCOM_DEVICEID_BCM5901A2 ||
2959 pci_get_device(dev) == BCOM_DEVICEID_BCM5705F)) ||
2960 (pci_get_vendor(dev) == BCOM_VENDORID &&
2961 (pci_get_device(dev) == BCOM_DEVICEID_BCM5751F ||
2962 pci_get_device(dev) == BCOM_DEVICEID_BCM5753F ||
2963 pci_get_device(dev) == BCOM_DEVICEID_BCM5787F)) ||
2964 pci_get_device(dev) == BCOM_DEVICEID_BCM57790 ||
2965 sc->bge_asicrev == BGE_ASICREV_BCM5906) {
2966 /* These chips are 10/100 only. */
2967 capmask &= ~BMSR_EXTSTAT;
2968 }
2969
2970 /*
2971 * Some controllers seem to require a special firmware to use
2972 * TSO. But the firmware is not available to FreeBSD and Linux
2973 * claims that the TSO performed by the firmware is slower than
2974 * hardware based TSO. Moreover the firmware based TSO has one
2975 * known bug which can't handle TSO if ethernet header + IP/TCP
2976 * header is greater than 80 bytes. The workaround for the TSO
2977 * bug exist but it seems it's too expensive than not using
2978 * TSO at all. Some hardwares also have the TSO bug so limit
2979 * the TSO to the controllers that are not affected TSO issues
2980 * (e.g. 5755 or higher).
2981 */
2982 if (BGE_IS_5717_PLUS(sc)) {
2983 /* BCM5717 requires different TSO configuration. */
2984 sc->bge_flags |= BGE_FLAG_TSO3;
2985 if (sc->bge_asicrev == BGE_ASICREV_BCM5719 &&
2986 sc->bge_chipid == BGE_CHIPID_BCM5719_A0) {
2987 /* TSO on BCM5719 A0 does not work. */
2988 sc->bge_flags &= ~BGE_FLAG_TSO3;
2989 }
2990 } else if (BGE_IS_5755_PLUS(sc)) {
2991 /*
2992 * BCM5754 and BCM5787 shares the same ASIC id so
2993 * explicit device id check is required.
2994 * Due to unknown reason TSO does not work on BCM5755M.
2995 */
2996 if (pci_get_device(dev) != BCOM_DEVICEID_BCM5754 &&
2997 pci_get_device(dev) != BCOM_DEVICEID_BCM5754M &&
2998 pci_get_device(dev) != BCOM_DEVICEID_BCM5755M)
2999 sc->bge_flags |= BGE_FLAG_TSO;
3000 }
3001
3002 /*
3003 * Check if this is a PCI-X or PCI Express device.
3004 */
3005 if (pci_find_cap(dev, PCIY_EXPRESS, &reg) == 0) {
3006 /*
3007 * Found a PCI Express capabilities register, this
3008 * must be a PCI Express device.
3009 */
3010 sc->bge_flags |= BGE_FLAG_PCIE;
3011 sc->bge_expcap = reg;
3012 if (sc->bge_asicrev == BGE_ASICREV_BCM5719)
3013 pci_set_max_read_req(dev, 2048);
3014 else if (pci_get_max_read_req(dev) != 4096)
3015 pci_set_max_read_req(dev, 4096);
3016 } else {
3017 /*
3018 * Check if the device is in PCI-X Mode.
3019 * (This bit is not valid on PCI Express controllers.)
3020 */
3021 if (pci_find_cap(dev, PCIY_PCIX, &reg) == 0)
3022 sc->bge_pcixcap = reg;
3023 if ((pci_read_config(dev, BGE_PCI_PCISTATE, 4) &
3024 BGE_PCISTATE_PCI_BUSMODE) == 0)
3025 sc->bge_flags |= BGE_FLAG_PCIX;
3026 }
3027
3028 /*
3029 * The 40bit DMA bug applies to the 5714/5715 controllers and is
3030 * not actually a MAC controller bug but an issue with the embedded
3031 * PCIe to PCI-X bridge in the device. Use 40bit DMA workaround.
3032 */
3033 if (BGE_IS_5714_FAMILY(sc) && (sc->bge_flags & BGE_FLAG_PCIX))
3034 sc->bge_flags |= BGE_FLAG_40BIT_BUG;
3035 /*
3036 * Allocate the interrupt, using MSI if possible. These devices
3037 * support 8 MSI messages, but only the first one is used in
3038 * normal operation.
3039 */
3040 rid = 0;
3041 if (pci_find_cap(sc->bge_dev, PCIY_MSI, &reg) == 0) {
3042 sc->bge_msicap = reg;
3043 if (bge_can_use_msi(sc)) {
3044 msicount = pci_msi_count(dev);
3045 if (msicount > 1)
3046 msicount = 1;
3047 } else
3048 msicount = 0;
3049 if (msicount == 1 && pci_alloc_msi(dev, &msicount) == 0) {
3050 rid = 1;
3051 sc->bge_flags |= BGE_FLAG_MSI;
3052 }
3053 }
3054
3055 /*
3056 * All controllers except BCM5700 supports tagged status but
3057 * we use tagged status only for MSI case on BCM5717. Otherwise
3058 * MSI on BCM5717 does not work.
3059 */
3060#ifndef DEVICE_POLLING
3061 if (sc->bge_flags & BGE_FLAG_MSI && BGE_IS_5717_PLUS(sc))
3062 sc->bge_flags |= BGE_FLAG_TAGGED_STATUS;
3063#endif
3064
3065 sc->bge_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
3066 RF_SHAREABLE | RF_ACTIVE);
3067
3068 if (sc->bge_irq == NULL) {
3069 device_printf(sc->bge_dev, "couldn't map interrupt\n");
3070 error = ENXIO;
3071 goto fail;
3072 }
3073
3074 device_printf(dev,
3075 "CHIP ID 0x%08x; ASIC REV 0x%02x; CHIP REV 0x%02x; %s\n",
3076 sc->bge_chipid, sc->bge_asicrev, sc->bge_chiprev,
3077 (sc->bge_flags & BGE_FLAG_PCIX) ? "PCI-X" :
3078 ((sc->bge_flags & BGE_FLAG_PCIE) ? "PCI-E" : "PCI"));
3079
3080 BGE_LOCK_INIT(sc, device_get_nameunit(dev));
3081
3082 /* Try to reset the chip. */
3083 if (bge_reset(sc)) {
3084 device_printf(sc->bge_dev, "chip reset failed\n");
3085 error = ENXIO;
3086 goto fail;
3087 }
3088
3089 sc->bge_asf_mode = 0;
3090 if (bge_allow_asf && (bge_readmem_ind(sc, BGE_SRAM_DATA_SIG) ==
3091 BGE_SRAM_DATA_SIG_MAGIC)) {
3092 if (bge_readmem_ind(sc, BGE_SRAM_DATA_CFG)
3093 & BGE_HWCFG_ASF) {
3094 sc->bge_asf_mode |= ASF_ENABLE;
3095 sc->bge_asf_mode |= ASF_STACKUP;
3096 if (BGE_IS_575X_PLUS(sc))
3097 sc->bge_asf_mode |= ASF_NEW_HANDSHAKE;
3098 }
3099 }
3100
3101 /* Try to reset the chip again the nice way. */
3102 bge_stop_fw(sc);
3103 bge_sig_pre_reset(sc, BGE_RESET_STOP);
3104 if (bge_reset(sc)) {
3105 device_printf(sc->bge_dev, "chip reset failed\n");
3106 error = ENXIO;
3107 goto fail;
3108 }
3109
3110 bge_sig_legacy(sc, BGE_RESET_STOP);
3111 bge_sig_post_reset(sc, BGE_RESET_STOP);
3112
3113 if (bge_chipinit(sc)) {
3114 device_printf(sc->bge_dev, "chip initialization failed\n");
3115 error = ENXIO;
3116 goto fail;
3117 }
3118
3119 error = bge_get_eaddr(sc, eaddr);
3120 if (error) {
3121 device_printf(sc->bge_dev,
3122 "failed to read station address\n");
3123 error = ENXIO;
3124 goto fail;
3125 }
3126
3127 /* 5705 limits RX return ring to 512 entries. */
3128 if (BGE_IS_5717_PLUS(sc))
3129 sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT;
3130 else if (BGE_IS_5705_PLUS(sc))
3131 sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT_5705;
3132 else
3133 sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT;
3134
3135 if (bge_dma_alloc(sc)) {
3136 device_printf(sc->bge_dev,
3137 "failed to allocate DMA resources\n");
3138 error = ENXIO;
3139 goto fail;
3140 }
3141
3142 bge_add_sysctls(sc);
3143
3144 /* Set default tuneable values. */
3145 sc->bge_stat_ticks = BGE_TICKS_PER_SEC;
3146 sc->bge_rx_coal_ticks = 150;
3147 sc->bge_tx_coal_ticks = 150;
3148 sc->bge_rx_max_coal_bds = 10;
3149 sc->bge_tx_max_coal_bds = 10;
3150
3151 /* Initialize checksum features to use. */
3152 sc->bge_csum_features = BGE_CSUM_FEATURES;
3153 if (sc->bge_forced_udpcsum != 0)
3154 sc->bge_csum_features |= CSUM_UDP;
3155
3156 /* Set up ifnet structure */
3157 ifp = sc->bge_ifp = if_alloc(IFT_ETHER);
3158 if (ifp == NULL) {
3159 device_printf(sc->bge_dev, "failed to if_alloc()\n");
3160 error = ENXIO;
3161 goto fail;
3162 }
3163 ifp->if_softc = sc;
3164 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
3165 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
3166 ifp->if_ioctl = bge_ioctl;
3167 ifp->if_start = bge_start;
3168 ifp->if_init = bge_init;
3169 ifp->if_snd.ifq_drv_maxlen = BGE_TX_RING_CNT - 1;
3170 IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen);
3171 IFQ_SET_READY(&ifp->if_snd);
3172 ifp->if_hwassist = sc->bge_csum_features;
3173 ifp->if_capabilities = IFCAP_HWCSUM | IFCAP_VLAN_HWTAGGING |
3174 IFCAP_VLAN_MTU;
3175 if ((sc->bge_flags & (BGE_FLAG_TSO | BGE_FLAG_TSO3)) != 0) {
3176 ifp->if_hwassist |= CSUM_TSO;
3177 ifp->if_capabilities |= IFCAP_TSO4 | IFCAP_VLAN_HWTSO;
3178 }
3179#ifdef IFCAP_VLAN_HWCSUM
3180 ifp->if_capabilities |= IFCAP_VLAN_HWCSUM;
3181#endif
3182 ifp->if_capenable = ifp->if_capabilities;
3183#ifdef DEVICE_POLLING
3184 ifp->if_capabilities |= IFCAP_POLLING;
3185#endif
3186
3187 /*
3188 * 5700 B0 chips do not support checksumming correctly due
3189 * to hardware bugs.
3190 */
3191 if (sc->bge_chipid == BGE_CHIPID_BCM5700_B0) {
3192 ifp->if_capabilities &= ~IFCAP_HWCSUM;
3193 ifp->if_capenable &= ~IFCAP_HWCSUM;
3194 ifp->if_hwassist = 0;
3195 }
3196
3197 /*
3198 * Figure out what sort of media we have by checking the
3199 * hardware config word in the first 32k of NIC internal memory,
3200 * or fall back to examining the EEPROM if necessary.
3201 * Note: on some BCM5700 cards, this value appears to be unset.
3202 * If that's the case, we have to rely on identifying the NIC
3203 * by its PCI subsystem ID, as we do below for the SysKonnect
3204 * SK-9D41.
3205 */
3206 if (bge_readmem_ind(sc, BGE_SRAM_DATA_SIG) == BGE_SRAM_DATA_SIG_MAGIC)
3207 hwcfg = bge_readmem_ind(sc, BGE_SRAM_DATA_CFG);
3208 else if ((sc->bge_flags & BGE_FLAG_EADDR) &&
3209 (sc->bge_asicrev != BGE_ASICREV_BCM5906)) {
3210 if (bge_read_eeprom(sc, (caddr_t)&hwcfg, BGE_EE_HWCFG_OFFSET,
3211 sizeof(hwcfg))) {
3212 device_printf(sc->bge_dev, "failed to read EEPROM\n");
3213 error = ENXIO;
3214 goto fail;
3215 }
3216 hwcfg = ntohl(hwcfg);
3217 }
3218
3219 /* The SysKonnect SK-9D41 is a 1000baseSX card. */
3220 if ((pci_read_config(dev, BGE_PCI_SUBSYS, 4) >> 16) ==
3221 SK_SUBSYSID_9D41 || (hwcfg & BGE_HWCFG_MEDIA) == BGE_MEDIA_FIBER) {
3222 if (BGE_IS_5714_FAMILY(sc))
3223 sc->bge_flags |= BGE_FLAG_MII_SERDES;
3224 else
3225 sc->bge_flags |= BGE_FLAG_TBI;
3226 }
3227
3228 if (sc->bge_flags & BGE_FLAG_TBI) {
3229 ifmedia_init(&sc->bge_ifmedia, IFM_IMASK, bge_ifmedia_upd,
3230 bge_ifmedia_sts);
3231 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER | IFM_1000_SX, 0, NULL);
3232 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER | IFM_1000_SX | IFM_FDX,
3233 0, NULL);
3234 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER | IFM_AUTO, 0, NULL);
3235 ifmedia_set(&sc->bge_ifmedia, IFM_ETHER | IFM_AUTO);
3236 sc->bge_ifmedia.ifm_media = sc->bge_ifmedia.ifm_cur->ifm_media;
3237 } else {
3238 /*
3239 * Do transceiver setup and tell the firmware the
3240 * driver is down so we can try to get access the
3241 * probe if ASF is running. Retry a couple of times
3242 * if we get a conflict with the ASF firmware accessing
3243 * the PHY.
3244 */
3245 trys = 0;
3246 BGE_CLRBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
3247again:
3248 bge_asf_driver_up(sc);
3249
3250 error = mii_attach(dev, &sc->bge_miibus, ifp, bge_ifmedia_upd,
3251 bge_ifmedia_sts, capmask, phy_addr, MII_OFFSET_ANY,
3252 MIIF_DOPAUSE);
3253 if (error != 0) {
3254 if (trys++ < 4) {
3255 device_printf(sc->bge_dev, "Try again\n");
3256 bge_miibus_writereg(sc->bge_dev, 1, MII_BMCR,
3257 BMCR_RESET);
3258 goto again;
3259 }
3260 device_printf(sc->bge_dev, "attaching PHYs failed\n");
3261 goto fail;
3262 }
3263
3264 /*
3265 * Now tell the firmware we are going up after probing the PHY
3266 */
3267 if (sc->bge_asf_mode & ASF_STACKUP)
3268 BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
3269 }
3270
3271 /*
3272 * When using the BCM5701 in PCI-X mode, data corruption has
3273 * been observed in the first few bytes of some received packets.
3274 * Aligning the packet buffer in memory eliminates the corruption.
3275 * Unfortunately, this misaligns the packet payloads. On platforms
3276 * which do not support unaligned accesses, we will realign the
3277 * payloads by copying the received packets.
3278 */
3279 if (sc->bge_asicrev == BGE_ASICREV_BCM5701 &&
3280 sc->bge_flags & BGE_FLAG_PCIX)
3281 sc->bge_flags |= BGE_FLAG_RX_ALIGNBUG;
3282
3283 /*
3284 * Call MI attach routine.
3285 */
3286 ether_ifattach(ifp, eaddr);
3287 callout_init_mtx(&sc->bge_stat_ch, &sc->bge_mtx, 0);
3288
3289 /* Tell upper layer we support long frames. */
3290 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
3291
3292 /*
3293 * Hookup IRQ last.
3294 */
3295 if (BGE_IS_5755_PLUS(sc) && sc->bge_flags & BGE_FLAG_MSI) {
3296 /* Take advantage of single-shot MSI. */
3297 CSR_WRITE_4(sc, BGE_MSI_MODE, CSR_READ_4(sc, BGE_MSI_MODE) &
3298 ~BGE_MSIMODE_ONE_SHOT_DISABLE);
3299 sc->bge_tq = taskqueue_create_fast("bge_taskq", M_WAITOK,
3300 taskqueue_thread_enqueue, &sc->bge_tq);
3301 if (sc->bge_tq == NULL) {
3302 device_printf(dev, "could not create taskqueue.\n");
3303 ether_ifdetach(ifp);
3304 error = ENXIO;
3305 goto fail;
3306 }
3307 taskqueue_start_threads(&sc->bge_tq, 1, PI_NET, "%s taskq",
3308 device_get_nameunit(sc->bge_dev));
3309 error = bus_setup_intr(dev, sc->bge_irq,
3310 INTR_TYPE_NET | INTR_MPSAFE, bge_msi_intr, NULL, sc,
3311 &sc->bge_intrhand);
3312 if (error)
3313 ether_ifdetach(ifp);
3314 } else
3315 error = bus_setup_intr(dev, sc->bge_irq,
3316 INTR_TYPE_NET | INTR_MPSAFE, NULL, bge_intr, sc,
3317 &sc->bge_intrhand);
3318
3319 if (error) {
3320 bge_detach(dev);
3321 device_printf(sc->bge_dev, "couldn't set up irq\n");
3322 }
3323
3324 return (0);
3325
3326fail:
3327 bge_release_resources(sc);
3328
3329 return (error);
3330}
3331
3332static int
3333bge_detach(device_t dev)
3334{
3335 struct bge_softc *sc;
3336 struct ifnet *ifp;
3337
3338 sc = device_get_softc(dev);
3339 ifp = sc->bge_ifp;
3340
3341#ifdef DEVICE_POLLING
3342 if (ifp->if_capenable & IFCAP_POLLING)
3343 ether_poll_deregister(ifp);
3344#endif
3345
3346 BGE_LOCK(sc);
3347 bge_stop(sc);
3348 bge_reset(sc);
3349 BGE_UNLOCK(sc);
3350
3351 callout_drain(&sc->bge_stat_ch);
3352
3353 if (sc->bge_tq)
3354 taskqueue_drain(sc->bge_tq, &sc->bge_intr_task);
3355 ether_ifdetach(ifp);
3356
3357 if (sc->bge_flags & BGE_FLAG_TBI) {
3358 ifmedia_removeall(&sc->bge_ifmedia);
3359 } else {
3360 bus_generic_detach(dev);
3361 device_delete_child(dev, sc->bge_miibus);
3362 }
3363
3364 bge_release_resources(sc);
3365
3366 return (0);
3367}
3368
3369static void
3370bge_release_resources(struct bge_softc *sc)
3371{
3372 device_t dev;
3373
3374 dev = sc->bge_dev;
3375
3376 if (sc->bge_tq != NULL)
3377 taskqueue_free(sc->bge_tq);
3378
3379 if (sc->bge_intrhand != NULL)
3380 bus_teardown_intr(dev, sc->bge_irq, sc->bge_intrhand);
3381
3382 if (sc->bge_irq != NULL)
3383 bus_release_resource(dev, SYS_RES_IRQ,
3384 sc->bge_flags & BGE_FLAG_MSI ? 1 : 0, sc->bge_irq);
3385
3386 if (sc->bge_flags & BGE_FLAG_MSI)
3387 pci_release_msi(dev);
3388
3389 if (sc->bge_res != NULL)
3390 bus_release_resource(dev, SYS_RES_MEMORY,
3391 PCIR_BAR(0), sc->bge_res);
3392
3393 if (sc->bge_ifp != NULL)
3394 if_free(sc->bge_ifp);
3395
3396 bge_dma_free(sc);
3397
3398 if (mtx_initialized(&sc->bge_mtx)) /* XXX */
3399 BGE_LOCK_DESTROY(sc);
3400}
3401
3402static int
3403bge_reset(struct bge_softc *sc)
3404{
3405 device_t dev;
3406 uint32_t cachesize, command, pcistate, reset, val;
3407 void (*write_op)(struct bge_softc *, int, int);
3408 uint16_t devctl;
3409 int i;
3410
3411 dev = sc->bge_dev;
3412
3413 if (BGE_IS_575X_PLUS(sc) && !BGE_IS_5714_FAMILY(sc) &&
3414 (sc->bge_asicrev != BGE_ASICREV_BCM5906)) {
3415 if (sc->bge_flags & BGE_FLAG_PCIE)
3416 write_op = bge_writemem_direct;
3417 else
3418 write_op = bge_writemem_ind;
3419 } else
3420 write_op = bge_writereg_ind;
3421
3422 /* Save some important PCI state. */
3423 cachesize = pci_read_config(dev, BGE_PCI_CACHESZ, 4);
3424 command = pci_read_config(dev, BGE_PCI_CMD, 4);
3425 pcistate = pci_read_config(dev, BGE_PCI_PCISTATE, 4);
3426
3427 pci_write_config(dev, BGE_PCI_MISC_CTL,
3428 BGE_PCIMISCCTL_INDIRECT_ACCESS | BGE_PCIMISCCTL_MASK_PCI_INTR |
3429 BGE_HIF_SWAP_OPTIONS | BGE_PCIMISCCTL_PCISTATE_RW, 4);
3430
3431 /* Disable fastboot on controllers that support it. */
3432 if (sc->bge_asicrev == BGE_ASICREV_BCM5752 ||
3433 BGE_IS_5755_PLUS(sc)) {
3434 if (bootverbose)
3435 device_printf(dev, "Disabling fastboot\n");
3436 CSR_WRITE_4(sc, BGE_FASTBOOT_PC, 0x0);
3437 }
3438
3439 /*
3440 * Write the magic number to SRAM at offset 0xB50.
3441 * When firmware finishes its initialization it will
3442 * write ~BGE_SRAM_FW_MB_MAGIC to the same location.
3443 */
3444 bge_writemem_ind(sc, BGE_SRAM_FW_MB, BGE_SRAM_FW_MB_MAGIC);
3445
3446 reset = BGE_MISCCFG_RESET_CORE_CLOCKS | BGE_32BITTIME_66MHZ;
3447
3448 /* XXX: Broadcom Linux driver. */
3449 if (sc->bge_flags & BGE_FLAG_PCIE) {
3450 if (CSR_READ_4(sc, 0x7E2C) == 0x60) /* PCIE 1.0 */
3451 CSR_WRITE_4(sc, 0x7E2C, 0x20);
3452 if (sc->bge_chipid != BGE_CHIPID_BCM5750_A0) {
3453 /* Prevent PCIE link training during global reset */
3454 CSR_WRITE_4(sc, BGE_MISC_CFG, 1 << 29);
3455 reset |= 1 << 29;
3456 }
3457 }
3458
3459 /*
3460 * Set GPHY Power Down Override to leave GPHY
3461 * powered up in D0 uninitialized.
3462 */
3463 if (BGE_IS_5705_PLUS(sc) &&
3464 (sc->bge_flags & BGE_FLAG_CPMU_PRESENT) == 0)
3465 reset |= BGE_MISCCFG_GPHY_PD_OVERRIDE;
3466
3467 /* Issue global reset */
3468 write_op(sc, BGE_MISC_CFG, reset);
3469
3470 if (sc->bge_asicrev == BGE_ASICREV_BCM5906) {
3471 val = CSR_READ_4(sc, BGE_VCPU_STATUS);
3472 CSR_WRITE_4(sc, BGE_VCPU_STATUS,
3473 val | BGE_VCPU_STATUS_DRV_RESET);
3474 val = CSR_READ_4(sc, BGE_VCPU_EXT_CTRL);
3475 CSR_WRITE_4(sc, BGE_VCPU_EXT_CTRL,
3476 val & ~BGE_VCPU_EXT_CTRL_HALT_CPU);
3477 }
3478
3479 DELAY(1000);
3480
3481 /* XXX: Broadcom Linux driver. */
3482 if (sc->bge_flags & BGE_FLAG_PCIE) {
3483 if (sc->bge_chipid == BGE_CHIPID_BCM5750_A0) {
3484 DELAY(500000); /* wait for link training to complete */
3485 val = pci_read_config(dev, 0xC4, 4);
3486 pci_write_config(dev, 0xC4, val | (1 << 15), 4);
3487 }
3488 devctl = pci_read_config(dev,
3489 sc->bge_expcap + PCIR_EXPRESS_DEVICE_CTL, 2);
3490 /* Clear enable no snoop and disable relaxed ordering. */
3491 devctl &= ~(PCIM_EXP_CTL_RELAXED_ORD_ENABLE |
3492 PCIM_EXP_CTL_NOSNOOP_ENABLE);
3493 /* Set PCIE max payload size to 128. */
3494 devctl &= ~PCIM_EXP_CTL_MAX_PAYLOAD;
3495 pci_write_config(dev, sc->bge_expcap + PCIR_EXPRESS_DEVICE_CTL,
3496 devctl, 2);
3497 /* Clear error status. */
3498 pci_write_config(dev, sc->bge_expcap + PCIR_EXPRESS_DEVICE_STA,
3499 PCIM_EXP_STA_CORRECTABLE_ERROR |
3500 PCIM_EXP_STA_NON_FATAL_ERROR | PCIM_EXP_STA_FATAL_ERROR |
3501 PCIM_EXP_STA_UNSUPPORTED_REQ, 2);
3502 }
3503
3504 /* Reset some of the PCI state that got zapped by reset. */
3505 pci_write_config(dev, BGE_PCI_MISC_CTL,
3506 BGE_PCIMISCCTL_INDIRECT_ACCESS | BGE_PCIMISCCTL_MASK_PCI_INTR |
3507 BGE_HIF_SWAP_OPTIONS | BGE_PCIMISCCTL_PCISTATE_RW, 4);
3508 pci_write_config(dev, BGE_PCI_CACHESZ, cachesize, 4);
3509 pci_write_config(dev, BGE_PCI_CMD, command, 4);
3510 write_op(sc, BGE_MISC_CFG, BGE_32BITTIME_66MHZ);
3511 /*
3512 * Disable PCI-X relaxed ordering to ensure status block update
3513 * comes first then packet buffer DMA. Otherwise driver may
3514 * read stale status block.
3515 */
3516 if (sc->bge_flags & BGE_FLAG_PCIX) {
3517 devctl = pci_read_config(dev,
3518 sc->bge_pcixcap + PCIXR_COMMAND, 2);
3519 devctl &= ~PCIXM_COMMAND_ERO;
3520 if (sc->bge_asicrev == BGE_ASICREV_BCM5703) {
3521 devctl &= ~PCIXM_COMMAND_MAX_READ;
3522 devctl |= PCIXM_COMMAND_MAX_READ_2048;
3523 } else if (sc->bge_asicrev == BGE_ASICREV_BCM5704) {
3524 devctl &= ~(PCIXM_COMMAND_MAX_SPLITS |
3525 PCIXM_COMMAND_MAX_READ);
3526 devctl |= PCIXM_COMMAND_MAX_READ_2048;
3527 }
3528 pci_write_config(dev, sc->bge_pcixcap + PCIXR_COMMAND,
3529 devctl, 2);
3530 }
3531 /* Re-enable MSI, if necessary, and enable the memory arbiter. */
3532 if (BGE_IS_5714_FAMILY(sc)) {
3533 /* This chip disables MSI on reset. */
3534 if (sc->bge_flags & BGE_FLAG_MSI) {
3535 val = pci_read_config(dev,
3536 sc->bge_msicap + PCIR_MSI_CTRL, 2);
3537 pci_write_config(dev,
3538 sc->bge_msicap + PCIR_MSI_CTRL,
3539 val | PCIM_MSICTRL_MSI_ENABLE, 2);
3540 val = CSR_READ_4(sc, BGE_MSI_MODE);
3541 CSR_WRITE_4(sc, BGE_MSI_MODE,
3542 val | BGE_MSIMODE_ENABLE);
3543 }
3544 val = CSR_READ_4(sc, BGE_MARB_MODE);
3545 CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE | val);
3546 } else
3547 CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE);
3548
3549 if (sc->bge_asicrev == BGE_ASICREV_BCM5906) {
3550 for (i = 0; i < BGE_TIMEOUT; i++) {
3551 val = CSR_READ_4(sc, BGE_VCPU_STATUS);
3552 if (val & BGE_VCPU_STATUS_INIT_DONE)
3553 break;
3554 DELAY(100);
3555 }
3556 if (i == BGE_TIMEOUT) {
3557 device_printf(dev, "reset timed out\n");
3558 return (1);
3559 }
3560 } else {
3561 /*
3562 * Poll until we see the 1's complement of the magic number.
3563 * This indicates that the firmware initialization is complete.
3564 * We expect this to fail if no chip containing the Ethernet
3565 * address is fitted though.
3566 */
3567 for (i = 0; i < BGE_TIMEOUT; i++) {
3568 DELAY(10);
3569 val = bge_readmem_ind(sc, BGE_SRAM_FW_MB);
3570 if (val == ~BGE_SRAM_FW_MB_MAGIC)
3571 break;
3572 }
3573
3574 if ((sc->bge_flags & BGE_FLAG_EADDR) && i == BGE_TIMEOUT)
3575 device_printf(dev,
3576 "firmware handshake timed out, found 0x%08x\n",
3577 val);
3578 /* BCM57765 A0 needs additional time before accessing. */
3579 if (sc->bge_chipid == BGE_CHIPID_BCM57765_A0)
3580 DELAY(10 * 1000); /* XXX */
3581 }
3582
3583 /*
3584 * XXX Wait for the value of the PCISTATE register to
3585 * return to its original pre-reset state. This is a
3586 * fairly good indicator of reset completion. If we don't
3587 * wait for the reset to fully complete, trying to read
3588 * from the device's non-PCI registers may yield garbage
3589 * results.
3590 */
3591 for (i = 0; i < BGE_TIMEOUT; i++) {
3592 if (pci_read_config(dev, BGE_PCI_PCISTATE, 4) == pcistate)
3593 break;
3594 DELAY(10);
3595 }
3596
3597 /* Fix up byte swapping. */
3598 CSR_WRITE_4(sc, BGE_MODE_CTL, BGE_DMA_SWAP_OPTIONS |
3599 BGE_MODECTL_BYTESWAP_DATA);
3600
3601 /* Tell the ASF firmware we are up */
3602 if (sc->bge_asf_mode & ASF_STACKUP)
3603 BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
3604
3605 CSR_WRITE_4(sc, BGE_MAC_MODE, 0);
3606
3607 /*
3608 * The 5704 in TBI mode apparently needs some special
3609 * adjustment to insure the SERDES drive level is set
3610 * to 1.2V.
3611 */
3612 if (sc->bge_asicrev == BGE_ASICREV_BCM5704 &&
3613 sc->bge_flags & BGE_FLAG_TBI) {
3614 val = CSR_READ_4(sc, BGE_SERDES_CFG);
3615 val = (val & ~0xFFF) | 0x880;
3616 CSR_WRITE_4(sc, BGE_SERDES_CFG, val);
3617 }
3618
3619 /* XXX: Broadcom Linux driver. */
3620 if (sc->bge_flags & BGE_FLAG_PCIE &&
3621 !BGE_IS_5717_PLUS(sc) &&
3622 sc->bge_chipid != BGE_CHIPID_BCM5750_A0 &&
3623 sc->bge_asicrev != BGE_ASICREV_BCM5785) {
3624 /* Enable Data FIFO protection. */
3625 val = CSR_READ_4(sc, 0x7C00);
3626 CSR_WRITE_4(sc, 0x7C00, val | (1 << 25));
3627 }
3628 DELAY(10000);
3629
3630 return (0);
3631}
3632
3633static __inline void
3634bge_rxreuse_std(struct bge_softc *sc, int i)
3635{
3636 struct bge_rx_bd *r;
3637
3638 r = &sc->bge_ldata.bge_rx_std_ring[sc->bge_std];
3639 r->bge_flags = BGE_RXBDFLAG_END;
3640 r->bge_len = sc->bge_cdata.bge_rx_std_seglen[i];
3641 r->bge_idx = i;
3642 BGE_INC(sc->bge_std, BGE_STD_RX_RING_CNT);
3643}
3644
3645static __inline void
3646bge_rxreuse_jumbo(struct bge_softc *sc, int i)
3647{
3648 struct bge_extrx_bd *r;
3649
3650 r = &sc->bge_ldata.bge_rx_jumbo_ring[sc->bge_jumbo];
3651 r->bge_flags = BGE_RXBDFLAG_JUMBO_RING | BGE_RXBDFLAG_END;
3652 r->bge_len0 = sc->bge_cdata.bge_rx_jumbo_seglen[i][0];
3653 r->bge_len1 = sc->bge_cdata.bge_rx_jumbo_seglen[i][1];
3654 r->bge_len2 = sc->bge_cdata.bge_rx_jumbo_seglen[i][2];
3655 r->bge_len3 = sc->bge_cdata.bge_rx_jumbo_seglen[i][3];
3656 r->bge_idx = i;
3657 BGE_INC(sc->bge_jumbo, BGE_JUMBO_RX_RING_CNT);
3658}
3659
3660/*
3661 * Frame reception handling. This is called if there's a frame
3662 * on the receive return list.
3663 *
3664 * Note: we have to be able to handle two possibilities here:
3665 * 1) the frame is from the jumbo receive ring
3666 * 2) the frame is from the standard receive ring
3667 */
3668
3669static int
3670bge_rxeof(struct bge_softc *sc, uint16_t rx_prod, int holdlck)
3671{
3672 struct ifnet *ifp;
3673 int rx_npkts = 0, stdcnt = 0, jumbocnt = 0;
3674 uint16_t rx_cons;
3675
3676 rx_cons = sc->bge_rx_saved_considx;
3677
3678 /* Nothing to do. */
3679 if (rx_cons == rx_prod)
3680 return (rx_npkts);
3681
3682 ifp = sc->bge_ifp;
3683
3684 bus_dmamap_sync(sc->bge_cdata.bge_rx_return_ring_tag,
3685 sc->bge_cdata.bge_rx_return_ring_map, BUS_DMASYNC_POSTREAD);
3686 bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag,
3687 sc->bge_cdata.bge_rx_std_ring_map, BUS_DMASYNC_POSTWRITE);
3688 if (BGE_IS_JUMBO_CAPABLE(sc) &&
3689 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN + ETHER_VLAN_ENCAP_LEN >
3690 (MCLBYTES - ETHER_ALIGN))
3691 bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag,
3692 sc->bge_cdata.bge_rx_jumbo_ring_map, BUS_DMASYNC_POSTWRITE);
3693
3694 while (rx_cons != rx_prod) {
3695 struct bge_rx_bd *cur_rx;
3696 uint32_t rxidx;
3697 struct mbuf *m = NULL;
3698 uint16_t vlan_tag = 0;
3699 int have_tag = 0;
3700
3701#ifdef DEVICE_POLLING
3702 if (ifp->if_capenable & IFCAP_POLLING) {
3703 if (sc->rxcycles <= 0)
3704 break;
3705 sc->rxcycles--;
3706 }
3707#endif
3708
3709 cur_rx = &sc->bge_ldata.bge_rx_return_ring[rx_cons];
3710
3711 rxidx = cur_rx->bge_idx;
3712 BGE_INC(rx_cons, sc->bge_return_ring_cnt);
3713
3714 if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING &&
3715 cur_rx->bge_flags & BGE_RXBDFLAG_VLAN_TAG) {
3716 have_tag = 1;
3717 vlan_tag = cur_rx->bge_vlan_tag;
3718 }
3719
3720 if (cur_rx->bge_flags & BGE_RXBDFLAG_JUMBO_RING) {
3721 jumbocnt++;
3722 m = sc->bge_cdata.bge_rx_jumbo_chain[rxidx];
3723 if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) {
3724 bge_rxreuse_jumbo(sc, rxidx);
3725 continue;
3726 }
3727 if (bge_newbuf_jumbo(sc, rxidx) != 0) {
3728 bge_rxreuse_jumbo(sc, rxidx);
3729 ifp->if_iqdrops++;
3730 continue;
3731 }
3732 BGE_INC(sc->bge_jumbo, BGE_JUMBO_RX_RING_CNT);
3733 } else {
3734 stdcnt++;
3735 m = sc->bge_cdata.bge_rx_std_chain[rxidx];
3736 if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) {
3737 bge_rxreuse_std(sc, rxidx);
3738 continue;
3739 }
3740 if (bge_newbuf_std(sc, rxidx) != 0) {
3741 bge_rxreuse_std(sc, rxidx);
3742 ifp->if_iqdrops++;
3743 continue;
3744 }
3745 BGE_INC(sc->bge_std, BGE_STD_RX_RING_CNT);
3746 }
3747
3748 ifp->if_ipackets++;
3749#ifndef __NO_STRICT_ALIGNMENT
3750 /*
3751 * For architectures with strict alignment we must make sure
3752 * the payload is aligned.
3753 */
3754 if (sc->bge_flags & BGE_FLAG_RX_ALIGNBUG) {
3755 bcopy(m->m_data, m->m_data + ETHER_ALIGN,
3756 cur_rx->bge_len);
3757 m->m_data += ETHER_ALIGN;
3758 }
3759#endif
3760 m->m_pkthdr.len = m->m_len = cur_rx->bge_len - ETHER_CRC_LEN;
3761 m->m_pkthdr.rcvif = ifp;
3762
3763 if (ifp->if_capenable & IFCAP_RXCSUM)
3764 bge_rxcsum(sc, cur_rx, m);
3765
3766 /*
3767 * If we received a packet with a vlan tag,
3768 * attach that information to the packet.
3769 */
3770 if (have_tag) {
3771 m->m_pkthdr.ether_vtag = vlan_tag;
3772 m->m_flags |= M_VLANTAG;
3773 }
3774
3775 if (holdlck != 0) {
3776 BGE_UNLOCK(sc);
3777 (*ifp->if_input)(ifp, m);
3778 BGE_LOCK(sc);
3779 } else
3780 (*ifp->if_input)(ifp, m);
3781 rx_npkts++;
3782
3783 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
3784 return (rx_npkts);
3785 }
3786
3787 bus_dmamap_sync(sc->bge_cdata.bge_rx_return_ring_tag,
3788 sc->bge_cdata.bge_rx_return_ring_map, BUS_DMASYNC_PREREAD);
3789 if (stdcnt > 0)
3790 bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag,
3791 sc->bge_cdata.bge_rx_std_ring_map, BUS_DMASYNC_PREWRITE);
3792
3793 if (jumbocnt > 0)
3794 bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag,
3795 sc->bge_cdata.bge_rx_jumbo_ring_map, BUS_DMASYNC_PREWRITE);
3796
3797 sc->bge_rx_saved_considx = rx_cons;
3798 bge_writembx(sc, BGE_MBX_RX_CONS0_LO, sc->bge_rx_saved_considx);
3799 if (stdcnt)
3800 bge_writembx(sc, BGE_MBX_RX_STD_PROD_LO, (sc->bge_std +
3801 BGE_STD_RX_RING_CNT - 1) % BGE_STD_RX_RING_CNT);
3802 if (jumbocnt)
3803 bge_writembx(sc, BGE_MBX_RX_JUMBO_PROD_LO, (sc->bge_jumbo +
3804 BGE_JUMBO_RX_RING_CNT - 1) % BGE_JUMBO_RX_RING_CNT);
3805#ifdef notyet
3806 /*
3807 * This register wraps very quickly under heavy packet drops.
3808 * If you need correct statistics, you can enable this check.
3809 */
3810 if (BGE_IS_5705_PLUS(sc))
3811 ifp->if_ierrors += CSR_READ_4(sc, BGE_RXLP_LOCSTAT_IFIN_DROPS);
3812#endif
3813 return (rx_npkts);
3814}
3815
3816static void
3817bge_rxcsum(struct bge_softc *sc, struct bge_rx_bd *cur_rx, struct mbuf *m)
3818{
3819
3820 if (BGE_IS_5717_PLUS(sc)) {
3821 if ((cur_rx->bge_flags & BGE_RXBDFLAG_IPV6) == 0) {
3822 if (cur_rx->bge_flags & BGE_RXBDFLAG_IP_CSUM) {
3823 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
3824 if ((cur_rx->bge_error_flag &
3825 BGE_RXERRFLAG_IP_CSUM_NOK) == 0)
3826 m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
3827 }
3828 if (cur_rx->bge_flags & BGE_RXBDFLAG_TCP_UDP_CSUM) {
3829 m->m_pkthdr.csum_data =
3830 cur_rx->bge_tcp_udp_csum;
3831 m->m_pkthdr.csum_flags |= CSUM_DATA_VALID |
3832 CSUM_PSEUDO_HDR;
3833 }
3834 }
3835 } else {
3836 if (cur_rx->bge_flags & BGE_RXBDFLAG_IP_CSUM) {
3837 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
3838 if ((cur_rx->bge_ip_csum ^ 0xFFFF) == 0)
3839 m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
3840 }
3841 if (cur_rx->bge_flags & BGE_RXBDFLAG_TCP_UDP_CSUM &&
3842 m->m_pkthdr.len >= ETHER_MIN_NOPAD) {
3843 m->m_pkthdr.csum_data =
3844 cur_rx->bge_tcp_udp_csum;
3845 m->m_pkthdr.csum_flags |= CSUM_DATA_VALID |
3846 CSUM_PSEUDO_HDR;
3847 }
3848 }
3849}
3850
3851static void
3852bge_txeof(struct bge_softc *sc, uint16_t tx_cons)
3853{
3854 struct bge_tx_bd *cur_tx;
3855 struct ifnet *ifp;
3856
3857 BGE_LOCK_ASSERT(sc);
3858
3859 /* Nothing to do. */
3860 if (sc->bge_tx_saved_considx == tx_cons)
3861 return;
3862
3863 ifp = sc->bge_ifp;
3864
3865 bus_dmamap_sync(sc->bge_cdata.bge_tx_ring_tag,
3866 sc->bge_cdata.bge_tx_ring_map, BUS_DMASYNC_POSTWRITE);
3867 /*
3868 * Go through our tx ring and free mbufs for those
3869 * frames that have been sent.
3870 */
3871 while (sc->bge_tx_saved_considx != tx_cons) {
3872 uint32_t idx;
3873
3874 idx = sc->bge_tx_saved_considx;
3875 cur_tx = &sc->bge_ldata.bge_tx_ring[idx];
3876 if (cur_tx->bge_flags & BGE_TXBDFLAG_END)
3877 ifp->if_opackets++;
3878 if (sc->bge_cdata.bge_tx_chain[idx] != NULL) {
3879 bus_dmamap_sync(sc->bge_cdata.bge_tx_mtag,
3880 sc->bge_cdata.bge_tx_dmamap[idx],
3881 BUS_DMASYNC_POSTWRITE);
3882 bus_dmamap_unload(sc->bge_cdata.bge_tx_mtag,
3883 sc->bge_cdata.bge_tx_dmamap[idx]);
3884 m_freem(sc->bge_cdata.bge_tx_chain[idx]);
3885 sc->bge_cdata.bge_tx_chain[idx] = NULL;
3886 }
3887 sc->bge_txcnt--;
3888 BGE_INC(sc->bge_tx_saved_considx, BGE_TX_RING_CNT);
3889 }
3890
3891 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
3892 if (sc->bge_txcnt == 0)
3893 sc->bge_timer = 0;
3894}
3895
3896#ifdef DEVICE_POLLING
3897static int
3898bge_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
3899{
3900 struct bge_softc *sc = ifp->if_softc;
3901 uint16_t rx_prod, tx_cons;
3902 uint32_t statusword;
3903 int rx_npkts = 0;
3904
3905 BGE_LOCK(sc);
3906 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
3907 BGE_UNLOCK(sc);
3908 return (rx_npkts);
3909 }
3910
3911 bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
3912 sc->bge_cdata.bge_status_map,
3913 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
3914 rx_prod = sc->bge_ldata.bge_status_block->bge_idx[0].bge_rx_prod_idx;
3915 tx_cons = sc->bge_ldata.bge_status_block->bge_idx[0].bge_tx_cons_idx;
3916
3917 statusword = sc->bge_ldata.bge_status_block->bge_status;
3918 sc->bge_ldata.bge_status_block->bge_status = 0;
3919
3920 bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
3921 sc->bge_cdata.bge_status_map,
3922 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3923
3924 /* Note link event. It will be processed by POLL_AND_CHECK_STATUS. */
3925 if (statusword & BGE_STATFLAG_LINKSTATE_CHANGED)
3926 sc->bge_link_evt++;
3927
3928 if (cmd == POLL_AND_CHECK_STATUS)
3929 if ((sc->bge_asicrev == BGE_ASICREV_BCM5700 &&
3930 sc->bge_chipid != BGE_CHIPID_BCM5700_B2) ||
3931 sc->bge_link_evt || (sc->bge_flags & BGE_FLAG_TBI))
3932 bge_link_upd(sc);
3933
3934 sc->rxcycles = count;
3935 rx_npkts = bge_rxeof(sc, rx_prod, 1);
3936 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
3937 BGE_UNLOCK(sc);
3938 return (rx_npkts);
3939 }
3940 bge_txeof(sc, tx_cons);
3941 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
3942 bge_start_locked(ifp);
3943
3944 BGE_UNLOCK(sc);
3945 return (rx_npkts);
3946}
3947#endif /* DEVICE_POLLING */
3948
3949static int
3950bge_msi_intr(void *arg)
3951{
3952 struct bge_softc *sc;
3953
3954 sc = (struct bge_softc *)arg;
3955 /*
3956 * This interrupt is not shared and controller already
3957 * disabled further interrupt.
3958 */
3959 taskqueue_enqueue(sc->bge_tq, &sc->bge_intr_task);
3960 return (FILTER_HANDLED);
3961}
3962
3963static void
3964bge_intr_task(void *arg, int pending)
3965{
3966 struct bge_softc *sc;
3967 struct ifnet *ifp;
3968 uint32_t status, status_tag;
3969 uint16_t rx_prod, tx_cons;
3970
3971 sc = (struct bge_softc *)arg;
3972 ifp = sc->bge_ifp;
3973
3974 BGE_LOCK(sc);
3975 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
3976 BGE_UNLOCK(sc);
3977 return;
3978 }
3979
3980 /* Get updated status block. */
3981 bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
3982 sc->bge_cdata.bge_status_map,
3983 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
3984
3985 /* Save producer/consumer indexess. */
3986 rx_prod = sc->bge_ldata.bge_status_block->bge_idx[0].bge_rx_prod_idx;
3987 tx_cons = sc->bge_ldata.bge_status_block->bge_idx[0].bge_tx_cons_idx;
3988 status = sc->bge_ldata.bge_status_block->bge_status;
3989 status_tag = sc->bge_ldata.bge_status_block->bge_status_tag << 24;
3990 sc->bge_ldata.bge_status_block->bge_status = 0;
3991 bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
3992 sc->bge_cdata.bge_status_map,
3993 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3994 if ((sc->bge_flags & BGE_FLAG_TAGGED_STATUS) == 0)
3995 status_tag = 0;
3996
3997 if ((status & BGE_STATFLAG_LINKSTATE_CHANGED) != 0)
3998 bge_link_upd(sc);
3999
4000 /* Let controller work. */
4001 bge_writembx(sc, BGE_MBX_IRQ0_LO, status_tag);
4002
4003 if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
4004 sc->bge_rx_saved_considx != rx_prod) {
4005 /* Check RX return ring producer/consumer. */
4006 BGE_UNLOCK(sc);
4007 bge_rxeof(sc, rx_prod, 0);
4008 BGE_LOCK(sc);
4009 }
4010 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
4011 /* Check TX ring producer/consumer. */
4012 bge_txeof(sc, tx_cons);
4013 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
4014 bge_start_locked(ifp);
4015 }
4016 BGE_UNLOCK(sc);
4017}
4018
4019static void
4020bge_intr(void *xsc)
4021{
4022 struct bge_softc *sc;
4023 struct ifnet *ifp;
4024 uint32_t statusword;
4025 uint16_t rx_prod, tx_cons;
4026
4027 sc = xsc;
4028
4029 BGE_LOCK(sc);
4030
4031 ifp = sc->bge_ifp;
4032
4033#ifdef DEVICE_POLLING
4034 if (ifp->if_capenable & IFCAP_POLLING) {
4035 BGE_UNLOCK(sc);
4036 return;
4037 }
4038#endif
4039
4040 /*
4041 * Ack the interrupt by writing something to BGE_MBX_IRQ0_LO. Don't
4042 * disable interrupts by writing nonzero like we used to, since with
4043 * our current organization this just gives complications and
4044 * pessimizations for re-enabling interrupts. We used to have races
4045 * instead of the necessary complications. Disabling interrupts
4046 * would just reduce the chance of a status update while we are
4047 * running (by switching to the interrupt-mode coalescence
4048 * parameters), but this chance is already very low so it is more
4049 * efficient to get another interrupt than prevent it.
4050 *
4051 * We do the ack first to ensure another interrupt if there is a
4052 * status update after the ack. We don't check for the status
4053 * changing later because it is more efficient to get another
4054 * interrupt than prevent it, not quite as above (not checking is
4055 * a smaller optimization than not toggling the interrupt enable,
4056 * since checking doesn't involve PCI accesses and toggling require
4057 * the status check). So toggling would probably be a pessimization
4058 * even with MSI. It would only be needed for using a task queue.
4059 */
4060 bge_writembx(sc, BGE_MBX_IRQ0_LO, 0);
4061
4062 /*
4063 * Do the mandatory PCI flush as well as get the link status.
4064 */
4065 statusword = CSR_READ_4(sc, BGE_MAC_STS) & BGE_MACSTAT_LINK_CHANGED;
4066
4067 /* Make sure the descriptor ring indexes are coherent. */
4068 bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
4069 sc->bge_cdata.bge_status_map,
4070 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
4071 rx_prod = sc->bge_ldata.bge_status_block->bge_idx[0].bge_rx_prod_idx;
4072 tx_cons = sc->bge_ldata.bge_status_block->bge_idx[0].bge_tx_cons_idx;
4073 sc->bge_ldata.bge_status_block->bge_status = 0;
4074 bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
4075 sc->bge_cdata.bge_status_map,
4076 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
4077
4078 if ((sc->bge_asicrev == BGE_ASICREV_BCM5700 &&
4079 sc->bge_chipid != BGE_CHIPID_BCM5700_B2) ||
4080 statusword || sc->bge_link_evt)
4081 bge_link_upd(sc);
4082
4083 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
4084 /* Check RX return ring producer/consumer. */
4085 bge_rxeof(sc, rx_prod, 1);
4086 }
4087
4088 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
4089 /* Check TX ring producer/consumer. */
4090 bge_txeof(sc, tx_cons);
4091 }
4092
4093 if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
4094 !IFQ_DRV_IS_EMPTY(&ifp->if_snd))
4095 bge_start_locked(ifp);
4096
4097 BGE_UNLOCK(sc);
4098}
4099
4100static void
4101bge_asf_driver_up(struct bge_softc *sc)
4102{
4103 if (sc->bge_asf_mode & ASF_STACKUP) {
4104 /* Send ASF heartbeat aprox. every 2s */
4105 if (sc->bge_asf_count)
4106 sc->bge_asf_count --;
4107 else {
4108 sc->bge_asf_count = 2;
4109 bge_writemem_ind(sc, BGE_SRAM_FW_CMD_MB,
4110 BGE_FW_DRV_ALIVE);
4111 bge_writemem_ind(sc, BGE_SRAM_FW_CMD_LEN_MB, 4);
4112 bge_writemem_ind(sc, BGE_SRAM_FW_CMD_DATA_MB, 3);
4113 CSR_WRITE_4(sc, BGE_RX_CPU_EVENT,
4114 CSR_READ_4(sc, BGE_RX_CPU_EVENT) | (1 << 14));
4115 }
4116 }
4117}
4118
4119static void
4120bge_tick(void *xsc)
4121{
4122 struct bge_softc *sc = xsc;
4123 struct mii_data *mii = NULL;
4124
4125 BGE_LOCK_ASSERT(sc);
4126
4127 /* Synchronize with possible callout reset/stop. */
4128 if (callout_pending(&sc->bge_stat_ch) ||
4129 !callout_active(&sc->bge_stat_ch))
4130 return;
4131
4132 if (BGE_IS_5705_PLUS(sc))
4133 bge_stats_update_regs(sc);
4134 else
4135 bge_stats_update(sc);
4136
4137 if ((sc->bge_flags & BGE_FLAG_TBI) == 0) {
4138 mii = device_get_softc(sc->bge_miibus);
4139 /*
4140 * Do not touch PHY if we have link up. This could break
4141 * IPMI/ASF mode or produce extra input errors
4142 * (extra errors was reported for bcm5701 & bcm5704).
4143 */
4144 if (!sc->bge_link)
4145 mii_tick(mii);
4146 } else {
4147 /*
4148 * Since in TBI mode auto-polling can't be used we should poll
4149 * link status manually. Here we register pending link event
4150 * and trigger interrupt.
4151 */
4152#ifdef DEVICE_POLLING
4153 /* In polling mode we poll link state in bge_poll(). */
4154 if (!(sc->bge_ifp->if_capenable & IFCAP_POLLING))
4155#endif
4156 {
4157 sc->bge_link_evt++;
4158 if (sc->bge_asicrev == BGE_ASICREV_BCM5700 ||
4159 sc->bge_flags & BGE_FLAG_5788)
4160 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_SET);
4161 else
4162 BGE_SETBIT(sc, BGE_HCC_MODE, BGE_HCCMODE_COAL_NOW);
4163 }
4164 }
4165
4166 bge_asf_driver_up(sc);
4167 bge_watchdog(sc);
4168
4169 callout_reset(&sc->bge_stat_ch, hz, bge_tick, sc);
4170}
4171
4172static void
4173bge_stats_update_regs(struct bge_softc *sc)
4174{
4175 struct ifnet *ifp;
4176 struct bge_mac_stats *stats;
4177
4178 ifp = sc->bge_ifp;
4179 stats = &sc->bge_mac_stats;
4180
4181 stats->ifHCOutOctets +=
4182 CSR_READ_4(sc, BGE_TX_MAC_STATS_OCTETS);
4183 stats->etherStatsCollisions +=
4184 CSR_READ_4(sc, BGE_TX_MAC_STATS_COLLS);
4185 stats->outXonSent +=
4186 CSR_READ_4(sc, BGE_TX_MAC_STATS_XON_SENT);
4187 stats->outXoffSent +=
4188 CSR_READ_4(sc, BGE_TX_MAC_STATS_XOFF_SENT);
4189 stats->dot3StatsInternalMacTransmitErrors +=
4190 CSR_READ_4(sc, BGE_TX_MAC_STATS_ERRORS);
4191 stats->dot3StatsSingleCollisionFrames +=
4192 CSR_READ_4(sc, BGE_TX_MAC_STATS_SINGLE_COLL);
4193 stats->dot3StatsMultipleCollisionFrames +=
4194 CSR_READ_4(sc, BGE_TX_MAC_STATS_MULTI_COLL);
4195 stats->dot3StatsDeferredTransmissions +=
4196 CSR_READ_4(sc, BGE_TX_MAC_STATS_DEFERRED);
4197 stats->dot3StatsExcessiveCollisions +=
4198 CSR_READ_4(sc, BGE_TX_MAC_STATS_EXCESS_COLL);
4199 stats->dot3StatsLateCollisions +=
4200 CSR_READ_4(sc, BGE_TX_MAC_STATS_LATE_COLL);
4201 stats->ifHCOutUcastPkts +=
4202 CSR_READ_4(sc, BGE_TX_MAC_STATS_UCAST);
4203 stats->ifHCOutMulticastPkts +=
4204 CSR_READ_4(sc, BGE_TX_MAC_STATS_MCAST);
4205 stats->ifHCOutBroadcastPkts +=
4206 CSR_READ_4(sc, BGE_TX_MAC_STATS_BCAST);
4207
4208 stats->ifHCInOctets +=
4209 CSR_READ_4(sc, BGE_RX_MAC_STATS_OCTESTS);
4210 stats->etherStatsFragments +=
4211 CSR_READ_4(sc, BGE_RX_MAC_STATS_FRAGMENTS);
4212 stats->ifHCInUcastPkts +=
4213 CSR_READ_4(sc, BGE_RX_MAC_STATS_UCAST);
4214 stats->ifHCInMulticastPkts +=
4215 CSR_READ_4(sc, BGE_RX_MAC_STATS_MCAST);
4216 stats->ifHCInBroadcastPkts +=
4217 CSR_READ_4(sc, BGE_RX_MAC_STATS_BCAST);
4218 stats->dot3StatsFCSErrors +=
4219 CSR_READ_4(sc, BGE_RX_MAC_STATS_FCS_ERRORS);
4220 stats->dot3StatsAlignmentErrors +=
4221 CSR_READ_4(sc, BGE_RX_MAC_STATS_ALGIN_ERRORS);
4222 stats->xonPauseFramesReceived +=
4223 CSR_READ_4(sc, BGE_RX_MAC_STATS_XON_RCVD);
4224 stats->xoffPauseFramesReceived +=
4225 CSR_READ_4(sc, BGE_RX_MAC_STATS_XOFF_RCVD);
4226 stats->macControlFramesReceived +=
4227 CSR_READ_4(sc, BGE_RX_MAC_STATS_CTRL_RCVD);
4228 stats->xoffStateEntered +=
4229 CSR_READ_4(sc, BGE_RX_MAC_STATS_XOFF_ENTERED);
4230 stats->dot3StatsFramesTooLong +=
4231 CSR_READ_4(sc, BGE_RX_MAC_STATS_FRAME_TOO_LONG);
4232 stats->etherStatsJabbers +=
4233 CSR_READ_4(sc, BGE_RX_MAC_STATS_JABBERS);
4234 stats->etherStatsUndersizePkts +=
4235 CSR_READ_4(sc, BGE_RX_MAC_STATS_UNDERSIZE);
4236
4237 stats->FramesDroppedDueToFilters +=
4238 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_FILTDROP);
4239 stats->DmaWriteQueueFull +=
4240 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_DMA_WRQ_FULL);
4241 stats->DmaWriteHighPriQueueFull +=
4242 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_DMA_HPWRQ_FULL);
4243 stats->NoMoreRxBDs +=
4244 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_OUT_OF_BDS);
4245 stats->InputDiscards +=
4246 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_IFIN_DROPS);
4247 stats->InputErrors +=
4248 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_IFIN_ERRORS);
4249 stats->RecvThresholdHit +=
4250 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_RXTHRESH_HIT);
4251
4252 ifp->if_collisions = (u_long)stats->etherStatsCollisions;
4253 ifp->if_ierrors = (u_long)(stats->NoMoreRxBDs + stats->InputDiscards +
4254 stats->InputErrors);
4255}
4256
4257static void
4258bge_stats_clear_regs(struct bge_softc *sc)
4259{
4260
4261 CSR_READ_4(sc, BGE_TX_MAC_STATS_OCTETS);
4262 CSR_READ_4(sc, BGE_TX_MAC_STATS_COLLS);
4263 CSR_READ_4(sc, BGE_TX_MAC_STATS_XON_SENT);
4264 CSR_READ_4(sc, BGE_TX_MAC_STATS_XOFF_SENT);
4265 CSR_READ_4(sc, BGE_TX_MAC_STATS_ERRORS);
4266 CSR_READ_4(sc, BGE_TX_MAC_STATS_SINGLE_COLL);
4267 CSR_READ_4(sc, BGE_TX_MAC_STATS_MULTI_COLL);
4268 CSR_READ_4(sc, BGE_TX_MAC_STATS_DEFERRED);
4269 CSR_READ_4(sc, BGE_TX_MAC_STATS_EXCESS_COLL);
4270 CSR_READ_4(sc, BGE_TX_MAC_STATS_LATE_COLL);
4271 CSR_READ_4(sc, BGE_TX_MAC_STATS_UCAST);
4272 CSR_READ_4(sc, BGE_TX_MAC_STATS_MCAST);
4273 CSR_READ_4(sc, BGE_TX_MAC_STATS_BCAST);
4274
4275 CSR_READ_4(sc, BGE_RX_MAC_STATS_OCTESTS);
4276 CSR_READ_4(sc, BGE_RX_MAC_STATS_FRAGMENTS);
4277 CSR_READ_4(sc, BGE_RX_MAC_STATS_UCAST);
4278 CSR_READ_4(sc, BGE_RX_MAC_STATS_MCAST);
4279 CSR_READ_4(sc, BGE_RX_MAC_STATS_BCAST);
4280 CSR_READ_4(sc, BGE_RX_MAC_STATS_FCS_ERRORS);
4281 CSR_READ_4(sc, BGE_RX_MAC_STATS_ALGIN_ERRORS);
4282 CSR_READ_4(sc, BGE_RX_MAC_STATS_XON_RCVD);
4283 CSR_READ_4(sc, BGE_RX_MAC_STATS_XOFF_RCVD);
4284 CSR_READ_4(sc, BGE_RX_MAC_STATS_CTRL_RCVD);
4285 CSR_READ_4(sc, BGE_RX_MAC_STATS_XOFF_ENTERED);
4286 CSR_READ_4(sc, BGE_RX_MAC_STATS_FRAME_TOO_LONG);
4287 CSR_READ_4(sc, BGE_RX_MAC_STATS_JABBERS);
4288 CSR_READ_4(sc, BGE_RX_MAC_STATS_UNDERSIZE);
4289
4290 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_FILTDROP);
4291 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_DMA_WRQ_FULL);
4292 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_DMA_HPWRQ_FULL);
4293 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_OUT_OF_BDS);
4294 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_IFIN_DROPS);
4295 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_IFIN_ERRORS);
4296 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_RXTHRESH_HIT);
4297}
4298
4299static void
4300bge_stats_update(struct bge_softc *sc)
4301{
4302 struct ifnet *ifp;
4303 bus_size_t stats;
4304 uint32_t cnt; /* current register value */
4305
4306 ifp = sc->bge_ifp;
4307
4308 stats = BGE_MEMWIN_START + BGE_STATS_BLOCK;
4309
4310#define READ_STAT(sc, stats, stat) \
4311 CSR_READ_4(sc, stats + offsetof(struct bge_stats, stat))
4312
4313 cnt = READ_STAT(sc, stats, txstats.etherStatsCollisions.bge_addr_lo);
4314 ifp->if_collisions += (uint32_t)(cnt - sc->bge_tx_collisions);
4315 sc->bge_tx_collisions = cnt;
4316
4317 cnt = READ_STAT(sc, stats, ifInDiscards.bge_addr_lo);
4318 ifp->if_ierrors += (uint32_t)(cnt - sc->bge_rx_discards);
4319 sc->bge_rx_discards = cnt;
4320
4321 cnt = READ_STAT(sc, stats, txstats.ifOutDiscards.bge_addr_lo);
4322 ifp->if_oerrors += (uint32_t)(cnt - sc->bge_tx_discards);
4323 sc->bge_tx_discards = cnt;
4324
4325#undef READ_STAT
4326}
4327
4328/*
4329 * Pad outbound frame to ETHER_MIN_NOPAD for an unusual reason.
4330 * The bge hardware will pad out Tx runts to ETHER_MIN_NOPAD,
4331 * but when such padded frames employ the bge IP/TCP checksum offload,
4332 * the hardware checksum assist gives incorrect results (possibly
4333 * from incorporating its own padding into the UDP/TCP checksum; who knows).
4334 * If we pad such runts with zeros, the onboard checksum comes out correct.
4335 */
4336static __inline int
4337bge_cksum_pad(struct mbuf *m)
4338{
4339 int padlen = ETHER_MIN_NOPAD - m->m_pkthdr.len;
4340 struct mbuf *last;
4341
4342 /* If there's only the packet-header and we can pad there, use it. */
4343 if (m->m_pkthdr.len == m->m_len && M_WRITABLE(m) &&
4344 M_TRAILINGSPACE(m) >= padlen) {
4345 last = m;
4346 } else {
4347 /*
4348 * Walk packet chain to find last mbuf. We will either
4349 * pad there, or append a new mbuf and pad it.
4350 */
4351 for (last = m; last->m_next != NULL; last = last->m_next);
4352 if (!(M_WRITABLE(last) && M_TRAILINGSPACE(last) >= padlen)) {
4353 /* Allocate new empty mbuf, pad it. Compact later. */
4354 struct mbuf *n;
4355
4356 MGET(n, M_DONTWAIT, MT_DATA);
4357 if (n == NULL)
4358 return (ENOBUFS);
4359 n->m_len = 0;
4360 last->m_next = n;
4361 last = n;
4362 }
4363 }
4364
4365 /* Now zero the pad area, to avoid the bge cksum-assist bug. */
4366 memset(mtod(last, caddr_t) + last->m_len, 0, padlen);
4367 last->m_len += padlen;
4368 m->m_pkthdr.len += padlen;
4369
4370 return (0);
4371}
4372
4373static struct mbuf *
4374bge_check_short_dma(struct mbuf *m)
4375{
4376 struct mbuf *n;
4377 int found;
4378
4379 /*
4380 * If device receive two back-to-back send BDs with less than
4381 * or equal to 8 total bytes then the device may hang. The two
4382 * back-to-back send BDs must in the same frame for this failure
4383 * to occur. Scan mbuf chains and see whether two back-to-back
4384 * send BDs are there. If this is the case, allocate new mbuf
4385 * and copy the frame to workaround the silicon bug.
4386 */
4387 for (n = m, found = 0; n != NULL; n = n->m_next) {
4388 if (n->m_len < 8) {
4389 found++;
4390 if (found > 1)
4391 break;
4392 continue;
4393 }
4394 found = 0;
4395 }
4396
4397 if (found > 1) {
4398 n = m_defrag(m, M_DONTWAIT);
4399 if (n == NULL)
4400 m_freem(m);
4401 } else
4402 n = m;
4403 return (n);
4404}
4405
4406static struct mbuf *
4407bge_setup_tso(struct bge_softc *sc, struct mbuf *m, uint16_t *mss,
4408 uint16_t *flags)
4409{
4410 struct ip *ip;
4411 struct tcphdr *tcp;
4412 struct mbuf *n;
4413 uint16_t hlen;
4414 uint32_t poff;
4415
4416 if (M_WRITABLE(m) == 0) {
4417 /* Get a writable copy. */
4418 n = m_dup(m, M_DONTWAIT);
4419 m_freem(m);
4420 if (n == NULL)
4421 return (NULL);
4422 m = n;
4423 }
4424 m = m_pullup(m, sizeof(struct ether_header) + sizeof(struct ip));
4425 if (m == NULL)
4426 return (NULL);
4427 ip = (struct ip *)(mtod(m, char *) + sizeof(struct ether_header));
4428 poff = sizeof(struct ether_header) + (ip->ip_hl << 2);
4429 m = m_pullup(m, poff + sizeof(struct tcphdr));
4430 if (m == NULL)
4431 return (NULL);
4432 tcp = (struct tcphdr *)(mtod(m, char *) + poff);
4433 m = m_pullup(m, poff + (tcp->th_off << 2));
4434 if (m == NULL)
4435 return (NULL);
4436 /*
4437 * It seems controller doesn't modify IP length and TCP pseudo
4438 * checksum. These checksum computed by upper stack should be 0.
4439 */
4440 *mss = m->m_pkthdr.tso_segsz;
4441 ip = (struct ip *)(mtod(m, char *) + sizeof(struct ether_header));
4442 ip->ip_sum = 0;
4443 ip->ip_len = htons(*mss + (ip->ip_hl << 2) + (tcp->th_off << 2));
4444 /* Clear pseudo checksum computed by TCP stack. */
4445 tcp = (struct tcphdr *)(mtod(m, char *) + poff);
4446 tcp->th_sum = 0;
4447 /*
4448 * Broadcom controllers uses different descriptor format for
4449 * TSO depending on ASIC revision. Due to TSO-capable firmware
4450 * license issue and lower performance of firmware based TSO
4451 * we only support hardware based TSO.
4452 */
4453 /* Calculate header length, incl. TCP/IP options, in 32 bit units. */
4454 hlen = ((ip->ip_hl << 2) + (tcp->th_off << 2)) >> 2;
4455 if (sc->bge_flags & BGE_FLAG_TSO3) {
4456 /*
4457 * For BCM5717 and newer controllers, hardware based TSO
4458 * uses the 14 lower bits of the bge_mss field to store the
4459 * MSS and the upper 2 bits to store the lowest 2 bits of
4460 * the IP/TCP header length. The upper 6 bits of the header
4461 * length are stored in the bge_flags[14:10,4] field. Jumbo
4462 * frames are supported.
4463 */
4464 *mss |= ((hlen & 0x3) << 14);
4465 *flags |= ((hlen & 0xF8) << 7) | ((hlen & 0x4) << 2);
4466 } else {
4467 /*
4468 * For BCM5755 and newer controllers, hardware based TSO uses
4469 * the lower 11 bits to store the MSS and the upper 5 bits to
4470 * store the IP/TCP header length. Jumbo frames are not
4471 * supported.
4472 */
4473 *mss |= (hlen << 11);
4474 }
4475 return (m);
4476}
4477
4478/*
4479 * Encapsulate an mbuf chain in the tx ring by coupling the mbuf data
4480 * pointers to descriptors.
4481 */
4482static int
4483bge_encap(struct bge_softc *sc, struct mbuf **m_head, uint32_t *txidx)
4484{
4485 bus_dma_segment_t segs[BGE_NSEG_NEW];
4486 bus_dmamap_t map;
4487 struct bge_tx_bd *d;
4488 struct mbuf *m = *m_head;
4489 uint32_t idx = *txidx;
4490 uint16_t csum_flags, mss, vlan_tag;
4491 int nsegs, i, error;
4492
4493 csum_flags = 0;
4494 mss = 0;
4495 vlan_tag = 0;
4496 if ((sc->bge_flags & BGE_FLAG_SHORT_DMA_BUG) != 0 &&
4497 m->m_next != NULL) {
4498 *m_head = bge_check_short_dma(m);
4499 if (*m_head == NULL)
4500 return (ENOBUFS);
4501 m = *m_head;
4502 }
4503 if ((m->m_pkthdr.csum_flags & CSUM_TSO) != 0) {
4504 *m_head = m = bge_setup_tso(sc, m, &mss, &csum_flags);
4505 if (*m_head == NULL)
4506 return (ENOBUFS);
4507 csum_flags |= BGE_TXBDFLAG_CPU_PRE_DMA |
4508 BGE_TXBDFLAG_CPU_POST_DMA;
4509 } else if ((m->m_pkthdr.csum_flags & sc->bge_csum_features) != 0) {
4510 if (m->m_pkthdr.csum_flags & CSUM_IP)
4511 csum_flags |= BGE_TXBDFLAG_IP_CSUM;
4512 if (m->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP)) {
4513 csum_flags |= BGE_TXBDFLAG_TCP_UDP_CSUM;
4514 if (m->m_pkthdr.len < ETHER_MIN_NOPAD &&
4515 (error = bge_cksum_pad(m)) != 0) {
4516 m_freem(m);
4517 *m_head = NULL;
4518 return (error);
4519 }
4520 }
4521 if (m->m_flags & M_LASTFRAG)
4522 csum_flags |= BGE_TXBDFLAG_IP_FRAG_END;
4523 else if (m->m_flags & M_FRAG)
4524 csum_flags |= BGE_TXBDFLAG_IP_FRAG;
4525 }
4526
4527 if ((m->m_pkthdr.csum_flags & CSUM_TSO) == 0) {
4528 if (sc->bge_flags & BGE_FLAG_JUMBO_FRAME &&
4529 m->m_pkthdr.len > ETHER_MAX_LEN)
4530 csum_flags |= BGE_TXBDFLAG_JUMBO_FRAME;
4531 if (sc->bge_forced_collapse > 0 &&
4532 (sc->bge_flags & BGE_FLAG_PCIE) != 0 && m->m_next != NULL) {
4533 /*
4534 * Forcedly collapse mbuf chains to overcome hardware
4535 * limitation which only support a single outstanding
4536 * DMA read operation.
4537 */
4538 if (sc->bge_forced_collapse == 1)
4539 m = m_defrag(m, M_DONTWAIT);
4540 else
4541 m = m_collapse(m, M_DONTWAIT,
4542 sc->bge_forced_collapse);
4543 if (m == NULL)
4544 m = *m_head;
4545 *m_head = m;
4546 }
4547 }
4548
4549 map = sc->bge_cdata.bge_tx_dmamap[idx];
4550 error = bus_dmamap_load_mbuf_sg(sc->bge_cdata.bge_tx_mtag, map, m, segs,
4551 &nsegs, BUS_DMA_NOWAIT);
4552 if (error == EFBIG) {
4553 m = m_collapse(m, M_DONTWAIT, BGE_NSEG_NEW);
4554 if (m == NULL) {
4555 m_freem(*m_head);
4556 *m_head = NULL;
4557 return (ENOBUFS);
4558 }
4559 *m_head = m;
4560 error = bus_dmamap_load_mbuf_sg(sc->bge_cdata.bge_tx_mtag, map,
4561 m, segs, &nsegs, BUS_DMA_NOWAIT);
4562 if (error) {
4563 m_freem(m);
4564 *m_head = NULL;
4565 return (error);
4566 }
4567 } else if (error != 0)
4568 return (error);
4569
4570 /* Check if we have enough free send BDs. */
4571 if (sc->bge_txcnt + nsegs >= BGE_TX_RING_CNT) {
4572 bus_dmamap_unload(sc->bge_cdata.bge_tx_mtag, map);
4573 return (ENOBUFS);
4574 }
4575
4576 bus_dmamap_sync(sc->bge_cdata.bge_tx_mtag, map, BUS_DMASYNC_PREWRITE);
4577
4578 if (m->m_flags & M_VLANTAG) {
4579 csum_flags |= BGE_TXBDFLAG_VLAN_TAG;
4580 vlan_tag = m->m_pkthdr.ether_vtag;
4581 }
4582 for (i = 0; ; i++) {
4583 d = &sc->bge_ldata.bge_tx_ring[idx];
4584 d->bge_addr.bge_addr_lo = BGE_ADDR_LO(segs[i].ds_addr);
4585 d->bge_addr.bge_addr_hi = BGE_ADDR_HI(segs[i].ds_addr);
4586 d->bge_len = segs[i].ds_len;
4587 d->bge_flags = csum_flags;
4588 d->bge_vlan_tag = vlan_tag;
4589 d->bge_mss = mss;
4590 if (i == nsegs - 1)
4591 break;
4592 BGE_INC(idx, BGE_TX_RING_CNT);
4593 }
4594
4595 /* Mark the last segment as end of packet... */
4596 d->bge_flags |= BGE_TXBDFLAG_END;
4597
4598 /*
4599 * Insure that the map for this transmission
4600 * is placed at the array index of the last descriptor
4601 * in this chain.
4602 */
4603 sc->bge_cdata.bge_tx_dmamap[*txidx] = sc->bge_cdata.bge_tx_dmamap[idx];
4604 sc->bge_cdata.bge_tx_dmamap[idx] = map;
4605 sc->bge_cdata.bge_tx_chain[idx] = m;
4606 sc->bge_txcnt += nsegs;
4607
4608 BGE_INC(idx, BGE_TX_RING_CNT);
4609 *txidx = idx;
4610
4611 return (0);
4612}
4613
4614/*
4615 * Main transmit routine. To avoid having to do mbuf copies, we put pointers
4616 * to the mbuf data regions directly in the transmit descriptors.
4617 */
4618static void
4619bge_start_locked(struct ifnet *ifp)
4620{
4621 struct bge_softc *sc;
4622 struct mbuf *m_head;
4623 uint32_t prodidx;
4624 int count;
4625
4626 sc = ifp->if_softc;
4627 BGE_LOCK_ASSERT(sc);
4628
4629 if (!sc->bge_link ||
4630 (ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
4631 IFF_DRV_RUNNING)
4632 return;
4633
4634 prodidx = sc->bge_tx_prodidx;
4635
4636 for (count = 0; !IFQ_DRV_IS_EMPTY(&ifp->if_snd);) {
4637 if (sc->bge_txcnt > BGE_TX_RING_CNT - 16) {
4638 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
4639 break;
4640 }
4641 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
4642 if (m_head == NULL)
4643 break;
4644
4645 /*
4646 * XXX
4647 * The code inside the if() block is never reached since we
4648 * must mark CSUM_IP_FRAGS in our if_hwassist to start getting
4649 * requests to checksum TCP/UDP in a fragmented packet.
4650 *
4651 * XXX
4652 * safety overkill. If this is a fragmented packet chain
4653 * with delayed TCP/UDP checksums, then only encapsulate
4654 * it if we have enough descriptors to handle the entire
4655 * chain at once.
4656 * (paranoia -- may not actually be needed)
4657 */
4658 if (m_head->m_flags & M_FIRSTFRAG &&
4659 m_head->m_pkthdr.csum_flags & (CSUM_DELAY_DATA)) {
4660 if ((BGE_TX_RING_CNT - sc->bge_txcnt) <
4661 m_head->m_pkthdr.csum_data + 16) {
4662 IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
4663 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
4664 break;
4665 }
4666 }
4667
4668 /*
4669 * Pack the data into the transmit ring. If we
4670 * don't have room, set the OACTIVE flag and wait
4671 * for the NIC to drain the ring.
4672 */
4673 if (bge_encap(sc, &m_head, &prodidx)) {
4674 if (m_head == NULL)
4675 break;
4676 IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
4677 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
4678 break;
4679 }
4680 ++count;
4681
4682 /*
4683 * If there's a BPF listener, bounce a copy of this frame
4684 * to him.
4685 */
4686#ifdef ETHER_BPF_MTAP
4687 ETHER_BPF_MTAP(ifp, m_head);
4688#else
4689 BPF_MTAP(ifp, m_head);
4690#endif
4691 }
4692
4693 if (count > 0) {
4694 bus_dmamap_sync(sc->bge_cdata.bge_tx_ring_tag,
4695 sc->bge_cdata.bge_tx_ring_map, BUS_DMASYNC_PREWRITE);
4696 /* Transmit. */
4697 bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx);
4698 /* 5700 b2 errata */
4699 if (sc->bge_chiprev == BGE_CHIPREV_5700_BX)
4700 bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx);
4701
4702 sc->bge_tx_prodidx = prodidx;
4703
4704 /*
4705 * Set a timeout in case the chip goes out to lunch.
4706 */
4707 sc->bge_timer = 5;
4708 }
4709}
4710
4711/*
4712 * Main transmit routine. To avoid having to do mbuf copies, we put pointers
4713 * to the mbuf data regions directly in the transmit descriptors.
4714 */
4715static void
4716bge_start(struct ifnet *ifp)
4717{
4718 struct bge_softc *sc;
4719
4720 sc = ifp->if_softc;
4721 BGE_LOCK(sc);
4722 bge_start_locked(ifp);
4723 BGE_UNLOCK(sc);
4724}
4725
4726static void
4727bge_init_locked(struct bge_softc *sc)
4728{
4729 struct ifnet *ifp;
4730 uint16_t *m;
4731 uint32_t mode;
4732
4733 BGE_LOCK_ASSERT(sc);
4734
4735 ifp = sc->bge_ifp;
4736
4737 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
4738 return;
4739
4740 /* Cancel pending I/O and flush buffers. */
4741 bge_stop(sc);
4742
4743 bge_stop_fw(sc);
4744 bge_sig_pre_reset(sc, BGE_RESET_START);
4745 bge_reset(sc);
4746 bge_sig_legacy(sc, BGE_RESET_START);
4747 bge_sig_post_reset(sc, BGE_RESET_START);
4748
4749 bge_chipinit(sc);
4750
4751 /*
4752 * Init the various state machines, ring
4753 * control blocks and firmware.
4754 */
4755 if (bge_blockinit(sc)) {
4756 device_printf(sc->bge_dev, "initialization failure\n");
4757 return;
4758 }
4759
4760 ifp = sc->bge_ifp;
4761
4762 /* Specify MTU. */
4763 CSR_WRITE_4(sc, BGE_RX_MTU, ifp->if_mtu +
4764 ETHER_HDR_LEN + ETHER_CRC_LEN +
4765 (ifp->if_capenable & IFCAP_VLAN_MTU ? ETHER_VLAN_ENCAP_LEN : 0));
4766
4767 /* Load our MAC address. */
4768 m = (uint16_t *)IF_LLADDR(sc->bge_ifp);
4769 CSR_WRITE_4(sc, BGE_MAC_ADDR1_LO, htons(m[0]));
4770 CSR_WRITE_4(sc, BGE_MAC_ADDR1_HI, (htons(m[1]) << 16) | htons(m[2]));
4771
4772 /* Program promiscuous mode. */
4773 bge_setpromisc(sc);
4774
4775 /* Program multicast filter. */
4776 bge_setmulti(sc);
4777
4778 /* Program VLAN tag stripping. */
4779 bge_setvlan(sc);
4780
4781 /* Override UDP checksum offloading. */
4782 if (sc->bge_forced_udpcsum == 0)
4783 sc->bge_csum_features &= ~CSUM_UDP;
4784 else
4785 sc->bge_csum_features |= CSUM_UDP;
4786 if (ifp->if_capabilities & IFCAP_TXCSUM &&
4787 ifp->if_capenable & IFCAP_TXCSUM) {
4788 ifp->if_hwassist &= ~(BGE_CSUM_FEATURES | CSUM_UDP);
4789 ifp->if_hwassist |= sc->bge_csum_features;
4790 }
4791
4792 /* Init RX ring. */
4793 if (bge_init_rx_ring_std(sc) != 0) {
4794 device_printf(sc->bge_dev, "no memory for std Rx buffers.\n");
4795 bge_stop(sc);
4796 return;
4797 }
4798
4799 /*
4800 * Workaround for a bug in 5705 ASIC rev A0. Poll the NIC's
4801 * memory to insure that the chip has in fact read the first
4802 * entry of the ring.
4803 */
4804 if (sc->bge_chipid == BGE_CHIPID_BCM5705_A0) {
4805 uint32_t v, i;
4806 for (i = 0; i < 10; i++) {
4807 DELAY(20);
4808 v = bge_readmem_ind(sc, BGE_STD_RX_RINGS + 8);
4809 if (v == (MCLBYTES - ETHER_ALIGN))
4810 break;
4811 }
4812 if (i == 10)
4813 device_printf (sc->bge_dev,
4814 "5705 A0 chip failed to load RX ring\n");
4815 }
4816
4817 /* Init jumbo RX ring. */
4818 if (BGE_IS_JUMBO_CAPABLE(sc) &&
4819 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN + ETHER_VLAN_ENCAP_LEN >
4820 (MCLBYTES - ETHER_ALIGN)) {
4821 if (bge_init_rx_ring_jumbo(sc) != 0) {
4822 device_printf(sc->bge_dev,
4823 "no memory for jumbo Rx buffers.\n");
4824 bge_stop(sc);
4825 return;
4826 }
4827 }
4828
4829 /* Init our RX return ring index. */
4830 sc->bge_rx_saved_considx = 0;
4831
4832 /* Init our RX/TX stat counters. */
4833 sc->bge_rx_discards = sc->bge_tx_discards = sc->bge_tx_collisions = 0;
4834
4835 /* Init TX ring. */
4836 bge_init_tx_ring(sc);
4837
4838 /* Enable TX MAC state machine lockup fix. */
4839 mode = CSR_READ_4(sc, BGE_TX_MODE);
4840 if (BGE_IS_5755_PLUS(sc) || sc->bge_asicrev == BGE_ASICREV_BCM5906)
4841 mode |= BGE_TXMODE_MBUF_LOCKUP_FIX;
4842 /* Turn on transmitter. */
4843 CSR_WRITE_4(sc, BGE_TX_MODE, mode | BGE_TXMODE_ENABLE);
4844
4845 /* Turn on receiver. */
4846 BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE);
4847
4848 /*
4849 * Set the number of good frames to receive after RX MBUF
4850 * Low Watermark has been reached. After the RX MAC receives
4851 * this number of frames, it will drop subsequent incoming
4852 * frames until the MBUF High Watermark is reached.
4853 */
4854 if (sc->bge_asicrev == BGE_ASICREV_BCM57765)
4855 CSR_WRITE_4(sc, BGE_MAX_RX_FRAME_LOWAT, 1);
4856 else
4857 CSR_WRITE_4(sc, BGE_MAX_RX_FRAME_LOWAT, 2);
4858
4859 /* Clear MAC statistics. */
4860 if (BGE_IS_5705_PLUS(sc))
4861 bge_stats_clear_regs(sc);
4862
4863 /* Tell firmware we're alive. */
4864 BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
4865
4866#ifdef DEVICE_POLLING
4867 /* Disable interrupts if we are polling. */
4868 if (ifp->if_capenable & IFCAP_POLLING) {
4869 BGE_SETBIT(sc, BGE_PCI_MISC_CTL,
4870 BGE_PCIMISCCTL_MASK_PCI_INTR);
4871 bge_writembx(sc, BGE_MBX_IRQ0_LO, 1);
4872 } else
4873#endif
4874
4875 /* Enable host interrupts. */
4876 {
4877 BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_CLEAR_INTA);
4878 BGE_CLRBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR);
4879 bge_writembx(sc, BGE_MBX_IRQ0_LO, 0);
4880 }
4881
4882 bge_ifmedia_upd_locked(ifp);
4883
4884 ifp->if_drv_flags |= IFF_DRV_RUNNING;
4885 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
4886
4887 callout_reset(&sc->bge_stat_ch, hz, bge_tick, sc);
4888}
4889
4890static void
4891bge_init(void *xsc)
4892{
4893 struct bge_softc *sc = xsc;
4894
4895 BGE_LOCK(sc);
4896 bge_init_locked(sc);
4897 BGE_UNLOCK(sc);
4898}
4899
4900/*
4901 * Set media options.
4902 */
4903static int
4904bge_ifmedia_upd(struct ifnet *ifp)
4905{
4906 struct bge_softc *sc = ifp->if_softc;
4907 int res;
4908
4909 BGE_LOCK(sc);
4910 res = bge_ifmedia_upd_locked(ifp);
4911 BGE_UNLOCK(sc);
4912
4913 return (res);
4914}
4915
4916static int
4917bge_ifmedia_upd_locked(struct ifnet *ifp)
4918{
4919 struct bge_softc *sc = ifp->if_softc;
4920 struct mii_data *mii;
4921 struct mii_softc *miisc;
4922 struct ifmedia *ifm;
4923
4924 BGE_LOCK_ASSERT(sc);
4925
4926 ifm = &sc->bge_ifmedia;
4927
4928 /* If this is a 1000baseX NIC, enable the TBI port. */
4929 if (sc->bge_flags & BGE_FLAG_TBI) {
4930 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
4931 return (EINVAL);
4932 switch(IFM_SUBTYPE(ifm->ifm_media)) {
4933 case IFM_AUTO:
4934 /*
4935 * The BCM5704 ASIC appears to have a special
4936 * mechanism for programming the autoneg
4937 * advertisement registers in TBI mode.
4938 */
4939 if (sc->bge_asicrev == BGE_ASICREV_BCM5704) {
4940 uint32_t sgdig;
4941 sgdig = CSR_READ_4(sc, BGE_SGDIG_STS);
4942 if (sgdig & BGE_SGDIGSTS_DONE) {
4943 CSR_WRITE_4(sc, BGE_TX_TBI_AUTONEG, 0);
4944 sgdig = CSR_READ_4(sc, BGE_SGDIG_CFG);
4945 sgdig |= BGE_SGDIGCFG_AUTO |
4946 BGE_SGDIGCFG_PAUSE_CAP |
4947 BGE_SGDIGCFG_ASYM_PAUSE;
4948 CSR_WRITE_4(sc, BGE_SGDIG_CFG,
4949 sgdig | BGE_SGDIGCFG_SEND);
4950 DELAY(5);
4951 CSR_WRITE_4(sc, BGE_SGDIG_CFG, sgdig);
4952 }
4953 }
4954 break;
4955 case IFM_1000_SX:
4956 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) {
4957 BGE_CLRBIT(sc, BGE_MAC_MODE,
4958 BGE_MACMODE_HALF_DUPLEX);
4959 } else {
4960 BGE_SETBIT(sc, BGE_MAC_MODE,
4961 BGE_MACMODE_HALF_DUPLEX);
4962 }
4963 break;
4964 default:
4965 return (EINVAL);
4966 }
4967 return (0);
4968 }
4969
4970 sc->bge_link_evt++;
4971 mii = device_get_softc(sc->bge_miibus);
4972 LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
4973 PHY_RESET(miisc);
4974 mii_mediachg(mii);
4975
4976 /*
4977 * Force an interrupt so that we will call bge_link_upd
4978 * if needed and clear any pending link state attention.
4979 * Without this we are not getting any further interrupts
4980 * for link state changes and thus will not UP the link and
4981 * not be able to send in bge_start_locked. The only
4982 * way to get things working was to receive a packet and
4983 * get an RX intr.
4984 * bge_tick should help for fiber cards and we might not
4985 * need to do this here if BGE_FLAG_TBI is set but as
4986 * we poll for fiber anyway it should not harm.
4987 */
4988 if (sc->bge_asicrev == BGE_ASICREV_BCM5700 ||
4989 sc->bge_flags & BGE_FLAG_5788)
4990 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_SET);
4991 else
4992 BGE_SETBIT(sc, BGE_HCC_MODE, BGE_HCCMODE_COAL_NOW);
4993
4994 return (0);
4995}
4996
4997/*
4998 * Report current media status.
4999 */
5000static void
5001bge_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
5002{
5003 struct bge_softc *sc = ifp->if_softc;
5004 struct mii_data *mii;
5005
5006 BGE_LOCK(sc);
5007
5008 if (sc->bge_flags & BGE_FLAG_TBI) {
5009 ifmr->ifm_status = IFM_AVALID;
5010 ifmr->ifm_active = IFM_ETHER;
5011 if (CSR_READ_4(sc, BGE_MAC_STS) &
5012 BGE_MACSTAT_TBI_PCS_SYNCHED)
5013 ifmr->ifm_status |= IFM_ACTIVE;
5014 else {
5015 ifmr->ifm_active |= IFM_NONE;
5016 BGE_UNLOCK(sc);
5017 return;
5018 }
5019 ifmr->ifm_active |= IFM_1000_SX;
5020 if (CSR_READ_4(sc, BGE_MAC_MODE) & BGE_MACMODE_HALF_DUPLEX)
5021 ifmr->ifm_active |= IFM_HDX;
5022 else
5023 ifmr->ifm_active |= IFM_FDX;
5024 BGE_UNLOCK(sc);
5025 return;
5026 }
5027
5028 mii = device_get_softc(sc->bge_miibus);
5029 mii_pollstat(mii);
5030 ifmr->ifm_active = mii->mii_media_active;
5031 ifmr->ifm_status = mii->mii_media_status;
5032
5033 BGE_UNLOCK(sc);
5034}
5035
5036static int
5037bge_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
5038{
5039 struct bge_softc *sc = ifp->if_softc;
5040 struct ifreq *ifr = (struct ifreq *) data;
5041 struct mii_data *mii;
5042 int flags, mask, error = 0;
5043
5044 switch (command) {
5045 case SIOCSIFMTU:
5046 if (BGE_IS_JUMBO_CAPABLE(sc) ||
5047 (sc->bge_flags & BGE_FLAG_JUMBO_STD)) {
5048 if (ifr->ifr_mtu < ETHERMIN ||
5049 ifr->ifr_mtu > BGE_JUMBO_MTU) {
5050 error = EINVAL;
5051 break;
5052 }
5053 } else if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > ETHERMTU) {
5054 error = EINVAL;
5055 break;
5056 }
5057 BGE_LOCK(sc);
5058 if (ifp->if_mtu != ifr->ifr_mtu) {
5059 ifp->if_mtu = ifr->ifr_mtu;
5060 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
5061 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
5062 bge_init_locked(sc);
5063 }
5064 }
5065 BGE_UNLOCK(sc);
5066 break;
5067 case SIOCSIFFLAGS:
5068 BGE_LOCK(sc);
5069 if (ifp->if_flags & IFF_UP) {
5070 /*
5071 * If only the state of the PROMISC flag changed,
5072 * then just use the 'set promisc mode' command
5073 * instead of reinitializing the entire NIC. Doing
5074 * a full re-init means reloading the firmware and
5075 * waiting for it to start up, which may take a
5076 * second or two. Similarly for ALLMULTI.
5077 */
5078 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
5079 flags = ifp->if_flags ^ sc->bge_if_flags;
5080 if (flags & IFF_PROMISC)
5081 bge_setpromisc(sc);
5082 if (flags & IFF_ALLMULTI)
5083 bge_setmulti(sc);
5084 } else
5085 bge_init_locked(sc);
5086 } else {
5087 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
5088 bge_stop(sc);
5089 }
5090 }
5091 sc->bge_if_flags = ifp->if_flags;
5092 BGE_UNLOCK(sc);
5093 error = 0;
5094 break;
5095 case SIOCADDMULTI:
5096 case SIOCDELMULTI:
5097 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
5098 BGE_LOCK(sc);
5099 bge_setmulti(sc);
5100 BGE_UNLOCK(sc);
5101 error = 0;
5102 }
5103 break;
5104 case SIOCSIFMEDIA:
5105 case SIOCGIFMEDIA:
5106 if (sc->bge_flags & BGE_FLAG_TBI) {
5107 error = ifmedia_ioctl(ifp, ifr,
5108 &sc->bge_ifmedia, command);
5109 } else {
5110 mii = device_get_softc(sc->bge_miibus);
5111 error = ifmedia_ioctl(ifp, ifr,
5112 &mii->mii_media, command);
5113 }
5114 break;
5115 case SIOCSIFCAP:
5116 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
5117#ifdef DEVICE_POLLING
5118 if (mask & IFCAP_POLLING) {
5119 if (ifr->ifr_reqcap & IFCAP_POLLING) {
5120 error = ether_poll_register(bge_poll, ifp);
5121 if (error)
5122 return (error);
5123 BGE_LOCK(sc);
5124 BGE_SETBIT(sc, BGE_PCI_MISC_CTL,
5125 BGE_PCIMISCCTL_MASK_PCI_INTR);
5126 bge_writembx(sc, BGE_MBX_IRQ0_LO, 1);
5127 ifp->if_capenable |= IFCAP_POLLING;
5128 BGE_UNLOCK(sc);
5129 } else {
5130 error = ether_poll_deregister(ifp);
5131 /* Enable interrupt even in error case */
5132 BGE_LOCK(sc);
5133 BGE_CLRBIT(sc, BGE_PCI_MISC_CTL,
5134 BGE_PCIMISCCTL_MASK_PCI_INTR);
5135 bge_writembx(sc, BGE_MBX_IRQ0_LO, 0);
5136 ifp->if_capenable &= ~IFCAP_POLLING;
5137 BGE_UNLOCK(sc);
5138 }
5139 }
5140#endif
5141 if ((mask & IFCAP_TXCSUM) != 0 &&
5142 (ifp->if_capabilities & IFCAP_TXCSUM) != 0) {
5143 ifp->if_capenable ^= IFCAP_TXCSUM;
5144 if ((ifp->if_capenable & IFCAP_TXCSUM) != 0)
5145 ifp->if_hwassist |= sc->bge_csum_features;
5146 else
5147 ifp->if_hwassist &= ~sc->bge_csum_features;
5148 }
5149
5150 if ((mask & IFCAP_RXCSUM) != 0 &&
5151 (ifp->if_capabilities & IFCAP_RXCSUM) != 0)
5152 ifp->if_capenable ^= IFCAP_RXCSUM;
5153
5154 if ((mask & IFCAP_TSO4) != 0 &&
5155 (ifp->if_capabilities & IFCAP_TSO4) != 0) {
5156 ifp->if_capenable ^= IFCAP_TSO4;
5157 if ((ifp->if_capenable & IFCAP_TSO4) != 0)
5158 ifp->if_hwassist |= CSUM_TSO;
5159 else
5160 ifp->if_hwassist &= ~CSUM_TSO;
5161 }
5162
5163 if (mask & IFCAP_VLAN_MTU) {
5164 ifp->if_capenable ^= IFCAP_VLAN_MTU;
5165 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
5166 bge_init(sc);
5167 }
5168
5169 if ((mask & IFCAP_VLAN_HWTSO) != 0 &&
5170 (ifp->if_capabilities & IFCAP_VLAN_HWTSO) != 0)
5171 ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
5172 if ((mask & IFCAP_VLAN_HWTAGGING) != 0 &&
5173 (ifp->if_capabilities & IFCAP_VLAN_HWTAGGING) != 0) {
5174 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
5175 if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) == 0)
5176 ifp->if_capenable &= ~IFCAP_VLAN_HWTSO;
5177 BGE_LOCK(sc);
5178 bge_setvlan(sc);
5179 BGE_UNLOCK(sc);
5180 }
5181#ifdef VLAN_CAPABILITIES
5182 VLAN_CAPABILITIES(ifp);
5183#endif
5184 break;
5185 default:
5186 error = ether_ioctl(ifp, command, data);
5187 break;
5188 }
5189
5190 return (error);
5191}
5192
5193static void
5194bge_watchdog(struct bge_softc *sc)
5195{
5196 struct ifnet *ifp;
5197
5198 BGE_LOCK_ASSERT(sc);
5199
5200 if (sc->bge_timer == 0 || --sc->bge_timer)
5201 return;
5202
5203 ifp = sc->bge_ifp;
5204
5205 if_printf(ifp, "watchdog timeout -- resetting\n");
5206
5207 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
5208 bge_init_locked(sc);
5209
5210 ifp->if_oerrors++;
5211}
5212
5213static void
5214bge_stop_block(struct bge_softc *sc, bus_size_t reg, uint32_t bit)
5215{
5216 int i;
5217
5218 BGE_CLRBIT(sc, reg, bit);
5219
5220 for (i = 0; i < BGE_TIMEOUT; i++) {
5221 if ((CSR_READ_4(sc, reg) & bit) == 0)
5222 return;
5223 DELAY(100);
5224 }
5225}
5226
5227/*
5228 * Stop the adapter and free any mbufs allocated to the
5229 * RX and TX lists.
5230 */
5231static void
5232bge_stop(struct bge_softc *sc)
5233{
5234 struct ifnet *ifp;
5235
5236 BGE_LOCK_ASSERT(sc);
5237
5238 ifp = sc->bge_ifp;
5239
5240 callout_stop(&sc->bge_stat_ch);
5241
5242 /* Disable host interrupts. */
5243 BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR);
5244 bge_writembx(sc, BGE_MBX_IRQ0_LO, 1);
5245
5246 /*
5247 * Tell firmware we're shutting down.
5248 */
5249 bge_stop_fw(sc);
5250 bge_sig_pre_reset(sc, BGE_RESET_STOP);
5251
5252 /*
5253 * Disable all of the receiver blocks.
5254 */
5255 bge_stop_block(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE);
5256 bge_stop_block(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE);
5257 bge_stop_block(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE);
5258 if (BGE_IS_5700_FAMILY(sc))
5259 bge_stop_block(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE);
5260 bge_stop_block(sc, BGE_RDBDI_MODE, BGE_RBDIMODE_ENABLE);
5261 bge_stop_block(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE);
5262 bge_stop_block(sc, BGE_RBDC_MODE, BGE_RBDCMODE_ENABLE);
5263
5264 /*
5265 * Disable all of the transmit blocks.
5266 */
5267 bge_stop_block(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE);
5268 bge_stop_block(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE);
5269 bge_stop_block(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE);
5270 bge_stop_block(sc, BGE_RDMA_MODE, BGE_RDMAMODE_ENABLE);
5271 bge_stop_block(sc, BGE_SDC_MODE, BGE_SDCMODE_ENABLE);
5272 if (BGE_IS_5700_FAMILY(sc))
5273 bge_stop_block(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE);
5274 bge_stop_block(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE);
5275
5276 /*
5277 * Shut down all of the memory managers and related
5278 * state machines.
5279 */
5280 bge_stop_block(sc, BGE_HCC_MODE, BGE_HCCMODE_ENABLE);
5281 bge_stop_block(sc, BGE_WDMA_MODE, BGE_WDMAMODE_ENABLE);
5282 if (BGE_IS_5700_FAMILY(sc))
5283 bge_stop_block(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE);
5284
5285 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF);
5286 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0);
5287 if (!(BGE_IS_5705_PLUS(sc))) {
5288 BGE_CLRBIT(sc, BGE_BMAN_MODE, BGE_BMANMODE_ENABLE);
5289 BGE_CLRBIT(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE);
5290 }
5291 /* Update MAC statistics. */
5292 if (BGE_IS_5705_PLUS(sc))
5293 bge_stats_update_regs(sc);
5294
5295 bge_reset(sc);
5296 bge_sig_legacy(sc, BGE_RESET_STOP);
5297 bge_sig_post_reset(sc, BGE_RESET_STOP);
5298
5299 /*
5300 * Keep the ASF firmware running if up.
5301 */
5302 if (sc->bge_asf_mode & ASF_STACKUP)
5303 BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
5304 else
5305 BGE_CLRBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
5306
5307 /* Free the RX lists. */
5308 bge_free_rx_ring_std(sc);
5309
5310 /* Free jumbo RX list. */
5311 if (BGE_IS_JUMBO_CAPABLE(sc))
5312 bge_free_rx_ring_jumbo(sc);
5313
5314 /* Free TX buffers. */
5315 bge_free_tx_ring(sc);
5316
5317 sc->bge_tx_saved_considx = BGE_TXCONS_UNSET;
5318
5319 /* Clear MAC's link state (PHY may still have link UP). */
5320 if (bootverbose && sc->bge_link)
5321 if_printf(sc->bge_ifp, "link DOWN\n");
5322 sc->bge_link = 0;
5323
5324 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
5325}
5326
5327/*
5328 * Stop all chip I/O so that the kernel's probe routines don't
5329 * get confused by errant DMAs when rebooting.
5330 */
5331static int
5332bge_shutdown(device_t dev)
5333{
5334 struct bge_softc *sc;
5335
5336 sc = device_get_softc(dev);
5337 BGE_LOCK(sc);
5338 bge_stop(sc);
5339 bge_reset(sc);
5340 BGE_UNLOCK(sc);
5341
5342 return (0);
5343}
5344
5345static int
5346bge_suspend(device_t dev)
5347{
5348 struct bge_softc *sc;
5349
5350 sc = device_get_softc(dev);
5351 BGE_LOCK(sc);
5352 bge_stop(sc);
5353 BGE_UNLOCK(sc);
5354
5355 return (0);
5356}
5357
5358static int
5359bge_resume(device_t dev)
5360{
5361 struct bge_softc *sc;
5362 struct ifnet *ifp;
5363
5364 sc = device_get_softc(dev);
5365 BGE_LOCK(sc);
5366 ifp = sc->bge_ifp;
5367 if (ifp->if_flags & IFF_UP) {
5368 bge_init_locked(sc);
5369 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
5370 bge_start_locked(ifp);
5371 }
5372 BGE_UNLOCK(sc);
5373
5374 return (0);
5375}
5376
5377static void
5378bge_link_upd(struct bge_softc *sc)
5379{
5380 struct mii_data *mii;
5381 uint32_t link, status;
5382
5383 BGE_LOCK_ASSERT(sc);
5384
5385 /* Clear 'pending link event' flag. */
5386 sc->bge_link_evt = 0;
5387
5388 /*
5389 * Process link state changes.
5390 * Grrr. The link status word in the status block does
5391 * not work correctly on the BCM5700 rev AX and BX chips,
5392 * according to all available information. Hence, we have
5393 * to enable MII interrupts in order to properly obtain
5394 * async link changes. Unfortunately, this also means that
5395 * we have to read the MAC status register to detect link
5396 * changes, thereby adding an additional register access to
5397 * the interrupt handler.
5398 *
5399 * XXX: perhaps link state detection procedure used for
5400 * BGE_CHIPID_BCM5700_B2 can be used for others BCM5700 revisions.
5401 */
5402
5403 if (sc->bge_asicrev == BGE_ASICREV_BCM5700 &&
5404 sc->bge_chipid != BGE_CHIPID_BCM5700_B2) {
5405 status = CSR_READ_4(sc, BGE_MAC_STS);
5406 if (status & BGE_MACSTAT_MI_INTERRUPT) {
5407 mii = device_get_softc(sc->bge_miibus);
5408 mii_pollstat(mii);
5409 if (!sc->bge_link &&
5410 mii->mii_media_status & IFM_ACTIVE &&
5411 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) {
5412 sc->bge_link++;
5413 if (bootverbose)
5414 if_printf(sc->bge_ifp, "link UP\n");
5415 } else if (sc->bge_link &&
5416 (!(mii->mii_media_status & IFM_ACTIVE) ||
5417 IFM_SUBTYPE(mii->mii_media_active) == IFM_NONE)) {
5418 sc->bge_link = 0;
5419 if (bootverbose)
5420 if_printf(sc->bge_ifp, "link DOWN\n");
5421 }
5422
5423 /* Clear the interrupt. */
5424 CSR_WRITE_4(sc, BGE_MAC_EVT_ENB,
5425 BGE_EVTENB_MI_INTERRUPT);
5426 bge_miibus_readreg(sc->bge_dev, 1, BRGPHY_MII_ISR);
5427 bge_miibus_writereg(sc->bge_dev, 1, BRGPHY_MII_IMR,
5428 BRGPHY_INTRS);
5429 }
5430 return;
5431 }
5432
5433 if (sc->bge_flags & BGE_FLAG_TBI) {
5434 status = CSR_READ_4(sc, BGE_MAC_STS);
5435 if (status & BGE_MACSTAT_TBI_PCS_SYNCHED) {
5436 if (!sc->bge_link) {
5437 sc->bge_link++;
5438 if (sc->bge_asicrev == BGE_ASICREV_BCM5704)
5439 BGE_CLRBIT(sc, BGE_MAC_MODE,
5440 BGE_MACMODE_TBI_SEND_CFGS);
5441 CSR_WRITE_4(sc, BGE_MAC_STS, 0xFFFFFFFF);
5442 if (bootverbose)
5443 if_printf(sc->bge_ifp, "link UP\n");
5444 if_link_state_change(sc->bge_ifp,
5445 LINK_STATE_UP);
5446 }
5447 } else if (sc->bge_link) {
5448 sc->bge_link = 0;
5449 if (bootverbose)
5450 if_printf(sc->bge_ifp, "link DOWN\n");
5451 if_link_state_change(sc->bge_ifp, LINK_STATE_DOWN);
5452 }
5453 } else if ((sc->bge_mi_mode & BGE_MIMODE_AUTOPOLL) != 0) {
5454 /*
5455 * Some broken BCM chips have BGE_STATFLAG_LINKSTATE_CHANGED bit
5456 * in status word always set. Workaround this bug by reading
5457 * PHY link status directly.
5458 */
5459 link = (CSR_READ_4(sc, BGE_MI_STS) & BGE_MISTS_LINK) ? 1 : 0;
5460
5461 if (link != sc->bge_link ||
5462 sc->bge_asicrev == BGE_ASICREV_BCM5700) {
5463 mii = device_get_softc(sc->bge_miibus);
5464 mii_pollstat(mii);
5465 if (!sc->bge_link &&
5466 mii->mii_media_status & IFM_ACTIVE &&
5467 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) {
5468 sc->bge_link++;
5469 if (bootverbose)
5470 if_printf(sc->bge_ifp, "link UP\n");
5471 } else if (sc->bge_link &&
5472 (!(mii->mii_media_status & IFM_ACTIVE) ||
5473 IFM_SUBTYPE(mii->mii_media_active) == IFM_NONE)) {
5474 sc->bge_link = 0;
5475 if (bootverbose)
5476 if_printf(sc->bge_ifp, "link DOWN\n");
5477 }
5478 }
5479 } else {
5480 /*
5481 * For controllers that call mii_tick, we have to poll
5482 * link status.
5483 */
5484 mii = device_get_softc(sc->bge_miibus);
5485 mii_pollstat(mii);
5486 bge_miibus_statchg(sc->bge_dev);
5487 }
5488
5489 /* Clear the attention. */
5490 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED |
5491 BGE_MACSTAT_CFG_CHANGED | BGE_MACSTAT_MI_COMPLETE |
5492 BGE_MACSTAT_LINK_CHANGED);
5493}
5494
5495static void
5496bge_add_sysctls(struct bge_softc *sc)
5497{
5498 struct sysctl_ctx_list *ctx;
5499 struct sysctl_oid_list *children;
5500 char tn[32];
5501 int unit;
5502
5503 ctx = device_get_sysctl_ctx(sc->bge_dev);
5504 children = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->bge_dev));
5505
5506#ifdef BGE_REGISTER_DEBUG
5507 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "debug_info",
5508 CTLTYPE_INT | CTLFLAG_RW, sc, 0, bge_sysctl_debug_info, "I",
5509 "Debug Information");
5510
5511 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "reg_read",
5512 CTLTYPE_INT | CTLFLAG_RW, sc, 0, bge_sysctl_reg_read, "I",
5513 "Register Read");
5514
5515 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "mem_read",
5516 CTLTYPE_INT | CTLFLAG_RW, sc, 0, bge_sysctl_mem_read, "I",
5517 "Memory Read");
5518
5519#endif
5520
5521 unit = device_get_unit(sc->bge_dev);
5522 /*
5523 * A common design characteristic for many Broadcom client controllers
5524 * is that they only support a single outstanding DMA read operation
5525 * on the PCIe bus. This means that it will take twice as long to fetch
5526 * a TX frame that is split into header and payload buffers as it does
5527 * to fetch a single, contiguous TX frame (2 reads vs. 1 read). For
5528 * these controllers, coalescing buffers to reduce the number of memory
5529 * reads is effective way to get maximum performance(about 940Mbps).
5530 * Without collapsing TX buffers the maximum TCP bulk transfer
5531 * performance is about 850Mbps. However forcing coalescing mbufs
5532 * consumes a lot of CPU cycles, so leave it off by default.
5533 */
5534 sc->bge_forced_collapse = 0;
5535 snprintf(tn, sizeof(tn), "dev.bge.%d.forced_collapse", unit);
5536 TUNABLE_INT_FETCH(tn, &sc->bge_forced_collapse);
5537 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "forced_collapse",
5538 CTLFLAG_RW, &sc->bge_forced_collapse, 0,
5539 "Number of fragmented TX buffers of a frame allowed before "
5540 "forced collapsing");
5541
5542 /*
5543 * It seems all Broadcom controllers have a bug that can generate UDP
5544 * datagrams with checksum value 0 when TX UDP checksum offloading is
5545 * enabled. Generating UDP checksum value 0 is RFC 768 violation.
5546 * Even though the probability of generating such UDP datagrams is
5547 * low, I don't want to see FreeBSD boxes to inject such datagrams
5548 * into network so disable UDP checksum offloading by default. Users
5549 * still override this behavior by setting a sysctl variable,
5550 * dev.bge.0.forced_udpcsum.
5551 */
5552 sc->bge_forced_udpcsum = 0;
5553 snprintf(tn, sizeof(tn), "dev.bge.%d.bge_forced_udpcsum", unit);
5554 TUNABLE_INT_FETCH(tn, &sc->bge_forced_udpcsum);
5555 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "forced_udpcsum",
5556 CTLFLAG_RW, &sc->bge_forced_udpcsum, 0,
5557 "Enable UDP checksum offloading even if controller can "
5558 "generate UDP checksum value 0");
5559
5560 if (BGE_IS_5705_PLUS(sc))
5561 bge_add_sysctl_stats_regs(sc, ctx, children);
5562 else
5563 bge_add_sysctl_stats(sc, ctx, children);
5564}
5565
5566#define BGE_SYSCTL_STAT(sc, ctx, desc, parent, node, oid) \
5567 SYSCTL_ADD_PROC(ctx, parent, OID_AUTO, oid, CTLTYPE_UINT|CTLFLAG_RD, \
5568 sc, offsetof(struct bge_stats, node), bge_sysctl_stats, "IU", \
5569 desc)
5570
5571static void
5572bge_add_sysctl_stats(struct bge_softc *sc, struct sysctl_ctx_list *ctx,
5573 struct sysctl_oid_list *parent)
5574{
5575 struct sysctl_oid *tree;
5576 struct sysctl_oid_list *children, *schildren;
5577
5578 tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "stats", CTLFLAG_RD,
5579 NULL, "BGE Statistics");
5580 schildren = children = SYSCTL_CHILDREN(tree);
5581 BGE_SYSCTL_STAT(sc, ctx, "Frames Dropped Due To Filters",
5582 children, COSFramesDroppedDueToFilters,
5583 "FramesDroppedDueToFilters");
5584 BGE_SYSCTL_STAT(sc, ctx, "NIC DMA Write Queue Full",
5585 children, nicDmaWriteQueueFull, "DmaWriteQueueFull");
5586 BGE_SYSCTL_STAT(sc, ctx, "NIC DMA Write High Priority Queue Full",
5587 children, nicDmaWriteHighPriQueueFull, "DmaWriteHighPriQueueFull");
5588 BGE_SYSCTL_STAT(sc, ctx, "NIC No More RX Buffer Descriptors",
5589 children, nicNoMoreRxBDs, "NoMoreRxBDs");
5590 BGE_SYSCTL_STAT(sc, ctx, "Discarded Input Frames",
5591 children, ifInDiscards, "InputDiscards");
5592 BGE_SYSCTL_STAT(sc, ctx, "Input Errors",
5593 children, ifInErrors, "InputErrors");
5594 BGE_SYSCTL_STAT(sc, ctx, "NIC Recv Threshold Hit",
5595 children, nicRecvThresholdHit, "RecvThresholdHit");
5596 BGE_SYSCTL_STAT(sc, ctx, "NIC DMA Read Queue Full",
5597 children, nicDmaReadQueueFull, "DmaReadQueueFull");
5598 BGE_SYSCTL_STAT(sc, ctx, "NIC DMA Read High Priority Queue Full",
5599 children, nicDmaReadHighPriQueueFull, "DmaReadHighPriQueueFull");
5600 BGE_SYSCTL_STAT(sc, ctx, "NIC Send Data Complete Queue Full",
5601 children, nicSendDataCompQueueFull, "SendDataCompQueueFull");
5602 BGE_SYSCTL_STAT(sc, ctx, "NIC Ring Set Send Producer Index",
5603 children, nicRingSetSendProdIndex, "RingSetSendProdIndex");
5604 BGE_SYSCTL_STAT(sc, ctx, "NIC Ring Status Update",
5605 children, nicRingStatusUpdate, "RingStatusUpdate");
5606 BGE_SYSCTL_STAT(sc, ctx, "NIC Interrupts",
5607 children, nicInterrupts, "Interrupts");
5608 BGE_SYSCTL_STAT(sc, ctx, "NIC Avoided Interrupts",
5609 children, nicAvoidedInterrupts, "AvoidedInterrupts");
5610 BGE_SYSCTL_STAT(sc, ctx, "NIC Send Threshold Hit",
5611 children, nicSendThresholdHit, "SendThresholdHit");
5612
5613 tree = SYSCTL_ADD_NODE(ctx, schildren, OID_AUTO, "rx", CTLFLAG_RD,
5614 NULL, "BGE RX Statistics");
5615 children = SYSCTL_CHILDREN(tree);
5616 BGE_SYSCTL_STAT(sc, ctx, "Inbound Octets",
5617 children, rxstats.ifHCInOctets, "ifHCInOctets");
5618 BGE_SYSCTL_STAT(sc, ctx, "Fragments",
5619 children, rxstats.etherStatsFragments, "Fragments");
5620 BGE_SYSCTL_STAT(sc, ctx, "Inbound Unicast Packets",
5621 children, rxstats.ifHCInUcastPkts, "UnicastPkts");
5622 BGE_SYSCTL_STAT(sc, ctx, "Inbound Multicast Packets",
5623 children, rxstats.ifHCInMulticastPkts, "MulticastPkts");
5624 BGE_SYSCTL_STAT(sc, ctx, "FCS Errors",
5625 children, rxstats.dot3StatsFCSErrors, "FCSErrors");
5626 BGE_SYSCTL_STAT(sc, ctx, "Alignment Errors",
5627 children, rxstats.dot3StatsAlignmentErrors, "AlignmentErrors");
5628 BGE_SYSCTL_STAT(sc, ctx, "XON Pause Frames Received",
5629 children, rxstats.xonPauseFramesReceived, "xonPauseFramesReceived");
5630 BGE_SYSCTL_STAT(sc, ctx, "XOFF Pause Frames Received",
5631 children, rxstats.xoffPauseFramesReceived,
5632 "xoffPauseFramesReceived");
5633 BGE_SYSCTL_STAT(sc, ctx, "MAC Control Frames Received",
5634 children, rxstats.macControlFramesReceived,
5635 "ControlFramesReceived");
5636 BGE_SYSCTL_STAT(sc, ctx, "XOFF State Entered",
5637 children, rxstats.xoffStateEntered, "xoffStateEntered");
5638 BGE_SYSCTL_STAT(sc, ctx, "Frames Too Long",
5639 children, rxstats.dot3StatsFramesTooLong, "FramesTooLong");
5640 BGE_SYSCTL_STAT(sc, ctx, "Jabbers",
5641 children, rxstats.etherStatsJabbers, "Jabbers");
5642 BGE_SYSCTL_STAT(sc, ctx, "Undersized Packets",
5643 children, rxstats.etherStatsUndersizePkts, "UndersizePkts");
5644 BGE_SYSCTL_STAT(sc, ctx, "Inbound Range Length Errors",
5645 children, rxstats.inRangeLengthError, "inRangeLengthError");
5646 BGE_SYSCTL_STAT(sc, ctx, "Outbound Range Length Errors",
5647 children, rxstats.outRangeLengthError, "outRangeLengthError");
5648
5649 tree = SYSCTL_ADD_NODE(ctx, schildren, OID_AUTO, "tx", CTLFLAG_RD,
5650 NULL, "BGE TX Statistics");
5651 children = SYSCTL_CHILDREN(tree);
5652 BGE_SYSCTL_STAT(sc, ctx, "Outbound Octets",
5653 children, txstats.ifHCOutOctets, "ifHCOutOctets");
5654 BGE_SYSCTL_STAT(sc, ctx, "TX Collisions",
5655 children, txstats.etherStatsCollisions, "Collisions");
5656 BGE_SYSCTL_STAT(sc, ctx, "XON Sent",
5657 children, txstats.outXonSent, "XonSent");
5658 BGE_SYSCTL_STAT(sc, ctx, "XOFF Sent",
5659 children, txstats.outXoffSent, "XoffSent");
5660 BGE_SYSCTL_STAT(sc, ctx, "Flow Control Done",
5661 children, txstats.flowControlDone, "flowControlDone");
5662 BGE_SYSCTL_STAT(sc, ctx, "Internal MAC TX errors",
5663 children, txstats.dot3StatsInternalMacTransmitErrors,
5664 "InternalMacTransmitErrors");
5665 BGE_SYSCTL_STAT(sc, ctx, "Single Collision Frames",
5666 children, txstats.dot3StatsSingleCollisionFrames,
5667 "SingleCollisionFrames");
5668 BGE_SYSCTL_STAT(sc, ctx, "Multiple Collision Frames",
5669 children, txstats.dot3StatsMultipleCollisionFrames,
5670 "MultipleCollisionFrames");
5671 BGE_SYSCTL_STAT(sc, ctx, "Deferred Transmissions",
5672 children, txstats.dot3StatsDeferredTransmissions,
5673 "DeferredTransmissions");
5674 BGE_SYSCTL_STAT(sc, ctx, "Excessive Collisions",
5675 children, txstats.dot3StatsExcessiveCollisions,
5676 "ExcessiveCollisions");
5677 BGE_SYSCTL_STAT(sc, ctx, "Late Collisions",
5678 children, txstats.dot3StatsLateCollisions,
5679 "LateCollisions");
5680 BGE_SYSCTL_STAT(sc, ctx, "Outbound Unicast Packets",
5681 children, txstats.ifHCOutUcastPkts, "UnicastPkts");
5682 BGE_SYSCTL_STAT(sc, ctx, "Outbound Multicast Packets",
5683 children, txstats.ifHCOutMulticastPkts, "MulticastPkts");
5684 BGE_SYSCTL_STAT(sc, ctx, "Outbound Broadcast Packets",
5685 children, txstats.ifHCOutBroadcastPkts, "BroadcastPkts");
5686 BGE_SYSCTL_STAT(sc, ctx, "Carrier Sense Errors",
5687 children, txstats.dot3StatsCarrierSenseErrors,
5688 "CarrierSenseErrors");
5689 BGE_SYSCTL_STAT(sc, ctx, "Outbound Discards",
5690 children, txstats.ifOutDiscards, "Discards");
5691 BGE_SYSCTL_STAT(sc, ctx, "Outbound Errors",
5692 children, txstats.ifOutErrors, "Errors");
5693}
5694
5695#undef BGE_SYSCTL_STAT
5696
5697#define BGE_SYSCTL_STAT_ADD64(c, h, n, p, d) \
5698 SYSCTL_ADD_UQUAD(c, h, OID_AUTO, n, CTLFLAG_RD, p, d)
5699
5700static void
5701bge_add_sysctl_stats_regs(struct bge_softc *sc, struct sysctl_ctx_list *ctx,
5702 struct sysctl_oid_list *parent)
5703{
5704 struct sysctl_oid *tree;
5705 struct sysctl_oid_list *child, *schild;
5706 struct bge_mac_stats *stats;
5707
5708 stats = &sc->bge_mac_stats;
5709 tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "stats", CTLFLAG_RD,
5710 NULL, "BGE Statistics");
5711 schild = child = SYSCTL_CHILDREN(tree);
5712 BGE_SYSCTL_STAT_ADD64(ctx, child, "FramesDroppedDueToFilters",
5713 &stats->FramesDroppedDueToFilters, "Frames Dropped Due to Filters");
5714 BGE_SYSCTL_STAT_ADD64(ctx, child, "DmaWriteQueueFull",
5715 &stats->DmaWriteQueueFull, "NIC DMA Write Queue Full");
5716 BGE_SYSCTL_STAT_ADD64(ctx, child, "DmaWriteHighPriQueueFull",
5717 &stats->DmaWriteHighPriQueueFull,
5718 "NIC DMA Write High Priority Queue Full");
5719 BGE_SYSCTL_STAT_ADD64(ctx, child, "NoMoreRxBDs",
5720 &stats->NoMoreRxBDs, "NIC No More RX Buffer Descriptors");
5721 BGE_SYSCTL_STAT_ADD64(ctx, child, "InputDiscards",
5722 &stats->InputDiscards, "Discarded Input Frames");
5723 BGE_SYSCTL_STAT_ADD64(ctx, child, "InputErrors",
5724 &stats->InputErrors, "Input Errors");
5725 BGE_SYSCTL_STAT_ADD64(ctx, child, "RecvThresholdHit",
5726 &stats->RecvThresholdHit, "NIC Recv Threshold Hit");
5727
5728 tree = SYSCTL_ADD_NODE(ctx, schild, OID_AUTO, "rx", CTLFLAG_RD,
5729 NULL, "BGE RX Statistics");
5730 child = SYSCTL_CHILDREN(tree);
5731 BGE_SYSCTL_STAT_ADD64(ctx, child, "ifHCInOctets",
5732 &stats->ifHCInOctets, "Inbound Octets");
5733 BGE_SYSCTL_STAT_ADD64(ctx, child, "Fragments",
5734 &stats->etherStatsFragments, "Fragments");
5735 BGE_SYSCTL_STAT_ADD64(ctx, child, "UnicastPkts",
5736 &stats->ifHCInUcastPkts, "Inbound Unicast Packets");
5737 BGE_SYSCTL_STAT_ADD64(ctx, child, "MulticastPkts",
5738 &stats->ifHCInMulticastPkts, "Inbound Multicast Packets");
5739 BGE_SYSCTL_STAT_ADD64(ctx, child, "BroadcastPkts",
5740 &stats->ifHCInBroadcastPkts, "Inbound Broadcast Packets");
5741 BGE_SYSCTL_STAT_ADD64(ctx, child, "FCSErrors",
5742 &stats->dot3StatsFCSErrors, "FCS Errors");
5743 BGE_SYSCTL_STAT_ADD64(ctx, child, "AlignmentErrors",
5744 &stats->dot3StatsAlignmentErrors, "Alignment Errors");
5745 BGE_SYSCTL_STAT_ADD64(ctx, child, "xonPauseFramesReceived",
5746 &stats->xonPauseFramesReceived, "XON Pause Frames Received");
5747 BGE_SYSCTL_STAT_ADD64(ctx, child, "xoffPauseFramesReceived",
5748 &stats->xoffPauseFramesReceived, "XOFF Pause Frames Received");
5749 BGE_SYSCTL_STAT_ADD64(ctx, child, "ControlFramesReceived",
5750 &stats->macControlFramesReceived, "MAC Control Frames Received");
5751 BGE_SYSCTL_STAT_ADD64(ctx, child, "xoffStateEntered",
5752 &stats->xoffStateEntered, "XOFF State Entered");
5753 BGE_SYSCTL_STAT_ADD64(ctx, child, "FramesTooLong",
5754 &stats->dot3StatsFramesTooLong, "Frames Too Long");
5755 BGE_SYSCTL_STAT_ADD64(ctx, child, "Jabbers",
5756 &stats->etherStatsJabbers, "Jabbers");
5757 BGE_SYSCTL_STAT_ADD64(ctx, child, "UndersizePkts",
5758 &stats->etherStatsUndersizePkts, "Undersized Packets");
5759
5760 tree = SYSCTL_ADD_NODE(ctx, schild, OID_AUTO, "tx", CTLFLAG_RD,
5761 NULL, "BGE TX Statistics");
5762 child = SYSCTL_CHILDREN(tree);
5763 BGE_SYSCTL_STAT_ADD64(ctx, child, "ifHCOutOctets",
5764 &stats->ifHCOutOctets, "Outbound Octets");
5765 BGE_SYSCTL_STAT_ADD64(ctx, child, "Collisions",
5766 &stats->etherStatsCollisions, "TX Collisions");
5767 BGE_SYSCTL_STAT_ADD64(ctx, child, "XonSent",
5768 &stats->outXonSent, "XON Sent");
5769 BGE_SYSCTL_STAT_ADD64(ctx, child, "XoffSent",
5770 &stats->outXoffSent, "XOFF Sent");
5771 BGE_SYSCTL_STAT_ADD64(ctx, child, "InternalMacTransmitErrors",
5772 &stats->dot3StatsInternalMacTransmitErrors,
5773 "Internal MAC TX Errors");
5774 BGE_SYSCTL_STAT_ADD64(ctx, child, "SingleCollisionFrames",
5775 &stats->dot3StatsSingleCollisionFrames, "Single Collision Frames");
5776 BGE_SYSCTL_STAT_ADD64(ctx, child, "MultipleCollisionFrames",
5777 &stats->dot3StatsMultipleCollisionFrames,
5778 "Multiple Collision Frames");
5779 BGE_SYSCTL_STAT_ADD64(ctx, child, "DeferredTransmissions",
5780 &stats->dot3StatsDeferredTransmissions, "Deferred Transmissions");
5781 BGE_SYSCTL_STAT_ADD64(ctx, child, "ExcessiveCollisions",
5782 &stats->dot3StatsExcessiveCollisions, "Excessive Collisions");
5783 BGE_SYSCTL_STAT_ADD64(ctx, child, "LateCollisions",
5784 &stats->dot3StatsLateCollisions, "Late Collisions");
5785 BGE_SYSCTL_STAT_ADD64(ctx, child, "UnicastPkts",
5786 &stats->ifHCOutUcastPkts, "Outbound Unicast Packets");
5787 BGE_SYSCTL_STAT_ADD64(ctx, child, "MulticastPkts",
5788 &stats->ifHCOutMulticastPkts, "Outbound Multicast Packets");
5789 BGE_SYSCTL_STAT_ADD64(ctx, child, "BroadcastPkts",
5790 &stats->ifHCOutBroadcastPkts, "Outbound Broadcast Packets");
5791}
5792
5793#undef BGE_SYSCTL_STAT_ADD64
5794
5795static int
5796bge_sysctl_stats(SYSCTL_HANDLER_ARGS)
5797{
5798 struct bge_softc *sc;
5799 uint32_t result;
5800 int offset;
5801
5802 sc = (struct bge_softc *)arg1;
5803 offset = arg2;
5804 result = CSR_READ_4(sc, BGE_MEMWIN_START + BGE_STATS_BLOCK + offset +
5805 offsetof(bge_hostaddr, bge_addr_lo));
5806 return (sysctl_handle_int(oidp, &result, 0, req));
5807}
5808
5809#ifdef BGE_REGISTER_DEBUG
5810static int
5811bge_sysctl_debug_info(SYSCTL_HANDLER_ARGS)
5812{
5813 struct bge_softc *sc;
5814 uint16_t *sbdata;
5815 int error, result, sbsz;
5816 int i, j;
5817
5818 result = -1;
5819 error = sysctl_handle_int(oidp, &result, 0, req);
5820 if (error || (req->newptr == NULL))
5821 return (error);
5822
5823 if (result == 1) {
5824 sc = (struct bge_softc *)arg1;
5825
5826 if (sc->bge_asicrev == BGE_ASICREV_BCM5700 &&
5827 sc->bge_chipid != BGE_CHIPID_BCM5700_C0)
5828 sbsz = BGE_STATUS_BLK_SZ;
5829 else
5830 sbsz = 32;
5831 sbdata = (uint16_t *)sc->bge_ldata.bge_status_block;
5832 printf("Status Block:\n");
5833 BGE_LOCK(sc);
5834 bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
5835 sc->bge_cdata.bge_status_map,
5836 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
5837 for (i = 0x0; i < sbsz / sizeof(uint16_t); ) {
5838 printf("%06x:", i);
5839 for (j = 0; j < 8; j++)
5840 printf(" %04x", sbdata[i++]);
5841 printf("\n");
5842 }
5843
5844 printf("Registers:\n");
5845 for (i = 0x800; i < 0xA00; ) {
5846 printf("%06x:", i);
5847 for (j = 0; j < 8; j++) {
5848 printf(" %08x", CSR_READ_4(sc, i));
5849 i += 4;
5850 }
5851 printf("\n");
5852 }
5853 BGE_UNLOCK(sc);
5854
5855 printf("Hardware Flags:\n");
5856 if (BGE_IS_5717_PLUS(sc))
5857 printf(" - 5717 Plus\n");
5858 if (BGE_IS_5755_PLUS(sc))
5859 printf(" - 5755 Plus\n");
5860 if (BGE_IS_575X_PLUS(sc))
5861 printf(" - 575X Plus\n");
5862 if (BGE_IS_5705_PLUS(sc))
5863 printf(" - 5705 Plus\n");
5864 if (BGE_IS_5714_FAMILY(sc))
5865 printf(" - 5714 Family\n");
5866 if (BGE_IS_5700_FAMILY(sc))
5867 printf(" - 5700 Family\n");
5868 if (sc->bge_flags & BGE_FLAG_JUMBO)
5869 printf(" - Supports Jumbo Frames\n");
5870 if (sc->bge_flags & BGE_FLAG_PCIX)
5871 printf(" - PCI-X Bus\n");
5872 if (sc->bge_flags & BGE_FLAG_PCIE)
5873 printf(" - PCI Express Bus\n");
5874 if (sc->bge_phy_flags & BGE_PHY_NO_3LED)
5875 printf(" - No 3 LEDs\n");
5876 if (sc->bge_flags & BGE_FLAG_RX_ALIGNBUG)
5877 printf(" - RX Alignment Bug\n");
5878 }
5879
5880 return (error);
5881}
5882
5883static int
5884bge_sysctl_reg_read(SYSCTL_HANDLER_ARGS)
5885{
5886 struct bge_softc *sc;
5887 int error;
5888 uint16_t result;
5889 uint32_t val;
5890
5891 result = -1;
5892 error = sysctl_handle_int(oidp, &result, 0, req);
5893 if (error || (req->newptr == NULL))
5894 return (error);
5895
5896 if (result < 0x8000) {
5897 sc = (struct bge_softc *)arg1;
5898 val = CSR_READ_4(sc, result);
5899 printf("reg 0x%06X = 0x%08X\n", result, val);
5900 }
5901
5902 return (error);
5903}
5904
5905static int
5906bge_sysctl_mem_read(SYSCTL_HANDLER_ARGS)
5907{
5908 struct bge_softc *sc;
5909 int error;
5910 uint16_t result;
5911 uint32_t val;
5912
5913 result = -1;
5914 error = sysctl_handle_int(oidp, &result, 0, req);
5915 if (error || (req->newptr == NULL))
5916 return (error);
5917
5918 if (result < 0x8000) {
5919 sc = (struct bge_softc *)arg1;
5920 val = bge_readmem_ind(sc, result);
5921 printf("mem 0x%06X = 0x%08X\n", result, val);
5922 }
5923
5924 return (error);
5925}
5926#endif
5927
5928static int
5929bge_get_eaddr_fw(struct bge_softc *sc, uint8_t ether_addr[])
5930{
5931
5932 if (sc->bge_flags & BGE_FLAG_EADDR)
5933 return (1);
5934
5935#ifdef __sparc64__
5936 OF_getetheraddr(sc->bge_dev, ether_addr);
5937 return (0);
5938#endif
5939 return (1);
5940}
5941
5942static int
5943bge_get_eaddr_mem(struct bge_softc *sc, uint8_t ether_addr[])
5944{
5945 uint32_t mac_addr;
5946
5947 mac_addr = bge_readmem_ind(sc, BGE_SRAM_MAC_ADDR_HIGH_MB);
5948 if ((mac_addr >> 16) == 0x484b) {
5949 ether_addr[0] = (uint8_t)(mac_addr >> 8);
5950 ether_addr[1] = (uint8_t)mac_addr;
5951 mac_addr = bge_readmem_ind(sc, BGE_SRAM_MAC_ADDR_LOW_MB);
5952 ether_addr[2] = (uint8_t)(mac_addr >> 24);
5953 ether_addr[3] = (uint8_t)(mac_addr >> 16);
5954 ether_addr[4] = (uint8_t)(mac_addr >> 8);
5955 ether_addr[5] = (uint8_t)mac_addr;
5956 return (0);
5957 }
5958 return (1);
5959}
5960
5961static int
5962bge_get_eaddr_nvram(struct bge_softc *sc, uint8_t ether_addr[])
5963{
5964 int mac_offset = BGE_EE_MAC_OFFSET;
5965
5966 if (sc->bge_asicrev == BGE_ASICREV_BCM5906)
5967 mac_offset = BGE_EE_MAC_OFFSET_5906;
5968
5969 return (bge_read_nvram(sc, ether_addr, mac_offset + 2,
5970 ETHER_ADDR_LEN));
5971}
5972
5973static int
5974bge_get_eaddr_eeprom(struct bge_softc *sc, uint8_t ether_addr[])
5975{
5976
5977 if (sc->bge_asicrev == BGE_ASICREV_BCM5906)
5978 return (1);
5979
5980 return (bge_read_eeprom(sc, ether_addr, BGE_EE_MAC_OFFSET + 2,
5981 ETHER_ADDR_LEN));
5982}
5983
5984static int
5985bge_get_eaddr(struct bge_softc *sc, uint8_t eaddr[])
5986{
5987 static const bge_eaddr_fcn_t bge_eaddr_funcs[] = {
5988 /* NOTE: Order is critical */
5989 bge_get_eaddr_fw,
5990 bge_get_eaddr_mem,
5991 bge_get_eaddr_nvram,
5992 bge_get_eaddr_eeprom,
5993 NULL
5994 };
5995 const bge_eaddr_fcn_t *func;
5996
5997 for (func = bge_eaddr_funcs; *func != NULL; ++func) {
5998 if ((*func)(sc, eaddr) == 0)
5999 break;
6000 }
6001 return (*func == NULL ? ENXIO : 0);
6002}