Deleted Added
full compact
if_bge.c (230286) if_bge.c (230337)
1/*-
2 * Copyright (c) 2001 Wind River Systems
3 * Copyright (c) 1997, 1998, 1999, 2001
4 * Bill Paul <wpaul@windriver.com>. All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. All advertising materials mentioning features or use of this software
15 * must display the following acknowledgement:
16 * This product includes software developed by Bill Paul.
17 * 4. Neither the name of the author nor the names of any co-contributors
18 * may be used to endorse or promote products derived from this software
19 * without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
25 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
31 * THE POSSIBILITY OF SUCH DAMAGE.
32 */
33
34#include <sys/cdefs.h>
1/*-
2 * Copyright (c) 2001 Wind River Systems
3 * Copyright (c) 1997, 1998, 1999, 2001
4 * Bill Paul <wpaul@windriver.com>. All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. All advertising materials mentioning features or use of this software
15 * must display the following acknowledgement:
16 * This product includes software developed by Bill Paul.
17 * 4. Neither the name of the author nor the names of any co-contributors
18 * may be used to endorse or promote products derived from this software
19 * without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
25 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
31 * THE POSSIBILITY OF SUCH DAMAGE.
32 */
33
34#include <sys/cdefs.h>
35__FBSDID("$FreeBSD: head/sys/dev/bge/if_bge.c 230286 2012-01-17 22:15:33Z yongari $");
35__FBSDID("$FreeBSD: head/sys/dev/bge/if_bge.c 230337 2012-01-19 20:21:59Z yongari $");
36
37/*
38 * Broadcom BCM570x family gigabit ethernet driver for FreeBSD.
39 *
40 * The Broadcom BCM5700 is based on technology originally developed by
41 * Alteon Networks as part of the Tigon I and Tigon II gigabit ethernet
42 * MAC chips. The BCM5700, sometimes referred to as the Tigon III, has
43 * two on-board MIPS R4000 CPUs and can have as much as 16MB of external
44 * SSRAM. The BCM5700 supports TCP, UDP and IP checksum offload, jumbo
45 * frames, highly configurable RX filtering, and 16 RX and TX queues
46 * (which, along with RX filter rules, can be used for QOS applications).
47 * Other features, such as TCP segmentation, may be available as part
48 * of value-added firmware updates. Unlike the Tigon I and Tigon II,
49 * firmware images can be stored in hardware and need not be compiled
50 * into the driver.
51 *
52 * The BCM5700 supports the PCI v2.2 and PCI-X v1.0 standards, and will
53 * function in a 32-bit/64-bit 33/66Mhz bus, or a 64-bit/133Mhz bus.
54 *
55 * The BCM5701 is a single-chip solution incorporating both the BCM5700
56 * MAC and a BCM5401 10/100/1000 PHY. Unlike the BCM5700, the BCM5701
57 * does not support external SSRAM.
58 *
59 * Broadcom also produces a variation of the BCM5700 under the "Altima"
60 * brand name, which is functionally similar but lacks PCI-X support.
61 *
62 * Without external SSRAM, you can only have at most 4 TX rings,
63 * and the use of the mini RX ring is disabled. This seems to imply
64 * that these features are simply not available on the BCM5701. As a
65 * result, this driver does not implement any support for the mini RX
66 * ring.
67 */
68
69#ifdef HAVE_KERNEL_OPTION_HEADERS
70#include "opt_device_polling.h"
71#endif
72
73#include <sys/param.h>
74#include <sys/endian.h>
75#include <sys/systm.h>
76#include <sys/sockio.h>
77#include <sys/mbuf.h>
78#include <sys/malloc.h>
79#include <sys/kernel.h>
80#include <sys/module.h>
81#include <sys/socket.h>
82#include <sys/sysctl.h>
83#include <sys/taskqueue.h>
84
85#include <net/if.h>
86#include <net/if_arp.h>
87#include <net/ethernet.h>
88#include <net/if_dl.h>
89#include <net/if_media.h>
90
91#include <net/bpf.h>
92
93#include <net/if_types.h>
94#include <net/if_vlan_var.h>
95
96#include <netinet/in_systm.h>
97#include <netinet/in.h>
98#include <netinet/ip.h>
99#include <netinet/tcp.h>
100
101#include <machine/bus.h>
102#include <machine/resource.h>
103#include <sys/bus.h>
104#include <sys/rman.h>
105
106#include <dev/mii/mii.h>
107#include <dev/mii/miivar.h>
108#include "miidevs.h"
109#include <dev/mii/brgphyreg.h>
110
111#ifdef __sparc64__
112#include <dev/ofw/ofw_bus.h>
113#include <dev/ofw/openfirm.h>
114#include <machine/ofw_machdep.h>
115#include <machine/ver.h>
116#endif
117
118#include <dev/pci/pcireg.h>
119#include <dev/pci/pcivar.h>
120
121#include <dev/bge/if_bgereg.h>
122
123#define BGE_CSUM_FEATURES (CSUM_IP | CSUM_TCP)
124#define ETHER_MIN_NOPAD (ETHER_MIN_LEN - ETHER_CRC_LEN) /* i.e., 60 */
125
126MODULE_DEPEND(bge, pci, 1, 1, 1);
127MODULE_DEPEND(bge, ether, 1, 1, 1);
128MODULE_DEPEND(bge, miibus, 1, 1, 1);
129
130/* "device miibus" required. See GENERIC if you get errors here. */
131#include "miibus_if.h"
132
133/*
134 * Various supported device vendors/types and their names. Note: the
135 * spec seems to indicate that the hardware still has Alteon's vendor
136 * ID burned into it, though it will always be overriden by the vendor
137 * ID in the EEPROM. Just to be safe, we cover all possibilities.
138 */
139static const struct bge_type {
140 uint16_t bge_vid;
141 uint16_t bge_did;
142} const bge_devs[] = {
143 { ALTEON_VENDORID, ALTEON_DEVICEID_BCM5700 },
144 { ALTEON_VENDORID, ALTEON_DEVICEID_BCM5701 },
145
146 { ALTIMA_VENDORID, ALTIMA_DEVICE_AC1000 },
147 { ALTIMA_VENDORID, ALTIMA_DEVICE_AC1002 },
148 { ALTIMA_VENDORID, ALTIMA_DEVICE_AC9100 },
149
150 { APPLE_VENDORID, APPLE_DEVICE_BCM5701 },
151
152 { BCOM_VENDORID, BCOM_DEVICEID_BCM5700 },
153 { BCOM_VENDORID, BCOM_DEVICEID_BCM5701 },
154 { BCOM_VENDORID, BCOM_DEVICEID_BCM5702 },
155 { BCOM_VENDORID, BCOM_DEVICEID_BCM5702_ALT },
156 { BCOM_VENDORID, BCOM_DEVICEID_BCM5702X },
157 { BCOM_VENDORID, BCOM_DEVICEID_BCM5703 },
158 { BCOM_VENDORID, BCOM_DEVICEID_BCM5703_ALT },
159 { BCOM_VENDORID, BCOM_DEVICEID_BCM5703X },
160 { BCOM_VENDORID, BCOM_DEVICEID_BCM5704C },
161 { BCOM_VENDORID, BCOM_DEVICEID_BCM5704S },
162 { BCOM_VENDORID, BCOM_DEVICEID_BCM5704S_ALT },
163 { BCOM_VENDORID, BCOM_DEVICEID_BCM5705 },
164 { BCOM_VENDORID, BCOM_DEVICEID_BCM5705F },
165 { BCOM_VENDORID, BCOM_DEVICEID_BCM5705K },
166 { BCOM_VENDORID, BCOM_DEVICEID_BCM5705M },
167 { BCOM_VENDORID, BCOM_DEVICEID_BCM5705M_ALT },
168 { BCOM_VENDORID, BCOM_DEVICEID_BCM5714C },
169 { BCOM_VENDORID, BCOM_DEVICEID_BCM5714S },
170 { BCOM_VENDORID, BCOM_DEVICEID_BCM5715 },
171 { BCOM_VENDORID, BCOM_DEVICEID_BCM5715S },
172 { BCOM_VENDORID, BCOM_DEVICEID_BCM5717 },
173 { BCOM_VENDORID, BCOM_DEVICEID_BCM5718 },
174 { BCOM_VENDORID, BCOM_DEVICEID_BCM5719 },
175 { BCOM_VENDORID, BCOM_DEVICEID_BCM5720 },
176 { BCOM_VENDORID, BCOM_DEVICEID_BCM5721 },
177 { BCOM_VENDORID, BCOM_DEVICEID_BCM5722 },
178 { BCOM_VENDORID, BCOM_DEVICEID_BCM5723 },
179 { BCOM_VENDORID, BCOM_DEVICEID_BCM5750 },
180 { BCOM_VENDORID, BCOM_DEVICEID_BCM5750M },
181 { BCOM_VENDORID, BCOM_DEVICEID_BCM5751 },
182 { BCOM_VENDORID, BCOM_DEVICEID_BCM5751F },
183 { BCOM_VENDORID, BCOM_DEVICEID_BCM5751M },
184 { BCOM_VENDORID, BCOM_DEVICEID_BCM5752 },
185 { BCOM_VENDORID, BCOM_DEVICEID_BCM5752M },
186 { BCOM_VENDORID, BCOM_DEVICEID_BCM5753 },
187 { BCOM_VENDORID, BCOM_DEVICEID_BCM5753F },
188 { BCOM_VENDORID, BCOM_DEVICEID_BCM5753M },
189 { BCOM_VENDORID, BCOM_DEVICEID_BCM5754 },
190 { BCOM_VENDORID, BCOM_DEVICEID_BCM5754M },
191 { BCOM_VENDORID, BCOM_DEVICEID_BCM5755 },
192 { BCOM_VENDORID, BCOM_DEVICEID_BCM5755M },
193 { BCOM_VENDORID, BCOM_DEVICEID_BCM5756 },
194 { BCOM_VENDORID, BCOM_DEVICEID_BCM5761 },
195 { BCOM_VENDORID, BCOM_DEVICEID_BCM5761E },
196 { BCOM_VENDORID, BCOM_DEVICEID_BCM5761S },
197 { BCOM_VENDORID, BCOM_DEVICEID_BCM5761SE },
198 { BCOM_VENDORID, BCOM_DEVICEID_BCM5764 },
199 { BCOM_VENDORID, BCOM_DEVICEID_BCM5780 },
200 { BCOM_VENDORID, BCOM_DEVICEID_BCM5780S },
201 { BCOM_VENDORID, BCOM_DEVICEID_BCM5781 },
202 { BCOM_VENDORID, BCOM_DEVICEID_BCM5782 },
203 { BCOM_VENDORID, BCOM_DEVICEID_BCM5784 },
204 { BCOM_VENDORID, BCOM_DEVICEID_BCM5785F },
205 { BCOM_VENDORID, BCOM_DEVICEID_BCM5785G },
206 { BCOM_VENDORID, BCOM_DEVICEID_BCM5786 },
207 { BCOM_VENDORID, BCOM_DEVICEID_BCM5787 },
208 { BCOM_VENDORID, BCOM_DEVICEID_BCM5787F },
209 { BCOM_VENDORID, BCOM_DEVICEID_BCM5787M },
210 { BCOM_VENDORID, BCOM_DEVICEID_BCM5788 },
211 { BCOM_VENDORID, BCOM_DEVICEID_BCM5789 },
212 { BCOM_VENDORID, BCOM_DEVICEID_BCM5901 },
213 { BCOM_VENDORID, BCOM_DEVICEID_BCM5901A2 },
214 { BCOM_VENDORID, BCOM_DEVICEID_BCM5903M },
215 { BCOM_VENDORID, BCOM_DEVICEID_BCM5906 },
216 { BCOM_VENDORID, BCOM_DEVICEID_BCM5906M },
217 { BCOM_VENDORID, BCOM_DEVICEID_BCM57760 },
218 { BCOM_VENDORID, BCOM_DEVICEID_BCM57761 },
219 { BCOM_VENDORID, BCOM_DEVICEID_BCM57765 },
220 { BCOM_VENDORID, BCOM_DEVICEID_BCM57780 },
221 { BCOM_VENDORID, BCOM_DEVICEID_BCM57781 },
222 { BCOM_VENDORID, BCOM_DEVICEID_BCM57785 },
223 { BCOM_VENDORID, BCOM_DEVICEID_BCM57788 },
224 { BCOM_VENDORID, BCOM_DEVICEID_BCM57790 },
225 { BCOM_VENDORID, BCOM_DEVICEID_BCM57791 },
226 { BCOM_VENDORID, BCOM_DEVICEID_BCM57795 },
227
228 { SK_VENDORID, SK_DEVICEID_ALTIMA },
229
230 { TC_VENDORID, TC_DEVICEID_3C996 },
231
232 { FJTSU_VENDORID, FJTSU_DEVICEID_PW008GE4 },
233 { FJTSU_VENDORID, FJTSU_DEVICEID_PW008GE5 },
234 { FJTSU_VENDORID, FJTSU_DEVICEID_PP250450 },
235
236 { 0, 0 }
237};
238
239static const struct bge_vendor {
240 uint16_t v_id;
241 const char *v_name;
242} const bge_vendors[] = {
243 { ALTEON_VENDORID, "Alteon" },
244 { ALTIMA_VENDORID, "Altima" },
245 { APPLE_VENDORID, "Apple" },
246 { BCOM_VENDORID, "Broadcom" },
247 { SK_VENDORID, "SysKonnect" },
248 { TC_VENDORID, "3Com" },
249 { FJTSU_VENDORID, "Fujitsu" },
250
251 { 0, NULL }
252};
253
254static const struct bge_revision {
255 uint32_t br_chipid;
256 const char *br_name;
257} const bge_revisions[] = {
258 { BGE_CHIPID_BCM5700_A0, "BCM5700 A0" },
259 { BGE_CHIPID_BCM5700_A1, "BCM5700 A1" },
260 { BGE_CHIPID_BCM5700_B0, "BCM5700 B0" },
261 { BGE_CHIPID_BCM5700_B1, "BCM5700 B1" },
262 { BGE_CHIPID_BCM5700_B2, "BCM5700 B2" },
263 { BGE_CHIPID_BCM5700_B3, "BCM5700 B3" },
264 { BGE_CHIPID_BCM5700_ALTIMA, "BCM5700 Altima" },
265 { BGE_CHIPID_BCM5700_C0, "BCM5700 C0" },
266 { BGE_CHIPID_BCM5701_A0, "BCM5701 A0" },
267 { BGE_CHIPID_BCM5701_B0, "BCM5701 B0" },
268 { BGE_CHIPID_BCM5701_B2, "BCM5701 B2" },
269 { BGE_CHIPID_BCM5701_B5, "BCM5701 B5" },
270 { BGE_CHIPID_BCM5703_A0, "BCM5703 A0" },
271 { BGE_CHIPID_BCM5703_A1, "BCM5703 A1" },
272 { BGE_CHIPID_BCM5703_A2, "BCM5703 A2" },
273 { BGE_CHIPID_BCM5703_A3, "BCM5703 A3" },
274 { BGE_CHIPID_BCM5703_B0, "BCM5703 B0" },
275 { BGE_CHIPID_BCM5704_A0, "BCM5704 A0" },
276 { BGE_CHIPID_BCM5704_A1, "BCM5704 A1" },
277 { BGE_CHIPID_BCM5704_A2, "BCM5704 A2" },
278 { BGE_CHIPID_BCM5704_A3, "BCM5704 A3" },
279 { BGE_CHIPID_BCM5704_B0, "BCM5704 B0" },
280 { BGE_CHIPID_BCM5705_A0, "BCM5705 A0" },
281 { BGE_CHIPID_BCM5705_A1, "BCM5705 A1" },
282 { BGE_CHIPID_BCM5705_A2, "BCM5705 A2" },
283 { BGE_CHIPID_BCM5705_A3, "BCM5705 A3" },
284 { BGE_CHIPID_BCM5750_A0, "BCM5750 A0" },
285 { BGE_CHIPID_BCM5750_A1, "BCM5750 A1" },
286 { BGE_CHIPID_BCM5750_A3, "BCM5750 A3" },
287 { BGE_CHIPID_BCM5750_B0, "BCM5750 B0" },
288 { BGE_CHIPID_BCM5750_B1, "BCM5750 B1" },
289 { BGE_CHIPID_BCM5750_C0, "BCM5750 C0" },
290 { BGE_CHIPID_BCM5750_C1, "BCM5750 C1" },
291 { BGE_CHIPID_BCM5750_C2, "BCM5750 C2" },
292 { BGE_CHIPID_BCM5714_A0, "BCM5714 A0" },
293 { BGE_CHIPID_BCM5752_A0, "BCM5752 A0" },
294 { BGE_CHIPID_BCM5752_A1, "BCM5752 A1" },
295 { BGE_CHIPID_BCM5752_A2, "BCM5752 A2" },
296 { BGE_CHIPID_BCM5714_B0, "BCM5714 B0" },
297 { BGE_CHIPID_BCM5714_B3, "BCM5714 B3" },
298 { BGE_CHIPID_BCM5715_A0, "BCM5715 A0" },
299 { BGE_CHIPID_BCM5715_A1, "BCM5715 A1" },
300 { BGE_CHIPID_BCM5715_A3, "BCM5715 A3" },
301 { BGE_CHIPID_BCM5717_A0, "BCM5717 A0" },
302 { BGE_CHIPID_BCM5717_B0, "BCM5717 B0" },
303 { BGE_CHIPID_BCM5719_A0, "BCM5719 A0" },
304 { BGE_CHIPID_BCM5720_A0, "BCM5720 A0" },
305 { BGE_CHIPID_BCM5755_A0, "BCM5755 A0" },
306 { BGE_CHIPID_BCM5755_A1, "BCM5755 A1" },
307 { BGE_CHIPID_BCM5755_A2, "BCM5755 A2" },
308 { BGE_CHIPID_BCM5722_A0, "BCM5722 A0" },
309 { BGE_CHIPID_BCM5761_A0, "BCM5761 A0" },
310 { BGE_CHIPID_BCM5761_A1, "BCM5761 A1" },
311 { BGE_CHIPID_BCM5784_A0, "BCM5784 A0" },
312 { BGE_CHIPID_BCM5784_A1, "BCM5784 A1" },
313 /* 5754 and 5787 share the same ASIC ID */
314 { BGE_CHIPID_BCM5787_A0, "BCM5754/5787 A0" },
315 { BGE_CHIPID_BCM5787_A1, "BCM5754/5787 A1" },
316 { BGE_CHIPID_BCM5787_A2, "BCM5754/5787 A2" },
317 { BGE_CHIPID_BCM5906_A1, "BCM5906 A1" },
318 { BGE_CHIPID_BCM5906_A2, "BCM5906 A2" },
319 { BGE_CHIPID_BCM57765_A0, "BCM57765 A0" },
320 { BGE_CHIPID_BCM57765_B0, "BCM57765 B0" },
321 { BGE_CHIPID_BCM57780_A0, "BCM57780 A0" },
322 { BGE_CHIPID_BCM57780_A1, "BCM57780 A1" },
323
324 { 0, NULL }
325};
326
327/*
328 * Some defaults for major revisions, so that newer steppings
329 * that we don't know about have a shot at working.
330 */
331static const struct bge_revision const bge_majorrevs[] = {
332 { BGE_ASICREV_BCM5700, "unknown BCM5700" },
333 { BGE_ASICREV_BCM5701, "unknown BCM5701" },
334 { BGE_ASICREV_BCM5703, "unknown BCM5703" },
335 { BGE_ASICREV_BCM5704, "unknown BCM5704" },
336 { BGE_ASICREV_BCM5705, "unknown BCM5705" },
337 { BGE_ASICREV_BCM5750, "unknown BCM5750" },
338 { BGE_ASICREV_BCM5714_A0, "unknown BCM5714" },
339 { BGE_ASICREV_BCM5752, "unknown BCM5752" },
340 { BGE_ASICREV_BCM5780, "unknown BCM5780" },
341 { BGE_ASICREV_BCM5714, "unknown BCM5714" },
342 { BGE_ASICREV_BCM5755, "unknown BCM5755" },
343 { BGE_ASICREV_BCM5761, "unknown BCM5761" },
344 { BGE_ASICREV_BCM5784, "unknown BCM5784" },
345 { BGE_ASICREV_BCM5785, "unknown BCM5785" },
346 /* 5754 and 5787 share the same ASIC ID */
347 { BGE_ASICREV_BCM5787, "unknown BCM5754/5787" },
348 { BGE_ASICREV_BCM5906, "unknown BCM5906" },
349 { BGE_ASICREV_BCM57765, "unknown BCM57765" },
350 { BGE_ASICREV_BCM57780, "unknown BCM57780" },
351 { BGE_ASICREV_BCM5717, "unknown BCM5717" },
352 { BGE_ASICREV_BCM5719, "unknown BCM5719" },
353 { BGE_ASICREV_BCM5720, "unknown BCM5720" },
354
355 { 0, NULL }
356};
357
358#define BGE_IS_JUMBO_CAPABLE(sc) ((sc)->bge_flags & BGE_FLAG_JUMBO)
359#define BGE_IS_5700_FAMILY(sc) ((sc)->bge_flags & BGE_FLAG_5700_FAMILY)
360#define BGE_IS_5705_PLUS(sc) ((sc)->bge_flags & BGE_FLAG_5705_PLUS)
361#define BGE_IS_5714_FAMILY(sc) ((sc)->bge_flags & BGE_FLAG_5714_FAMILY)
362#define BGE_IS_575X_PLUS(sc) ((sc)->bge_flags & BGE_FLAG_575X_PLUS)
363#define BGE_IS_5755_PLUS(sc) ((sc)->bge_flags & BGE_FLAG_5755_PLUS)
364#define BGE_IS_5717_PLUS(sc) ((sc)->bge_flags & BGE_FLAG_5717_PLUS)
365
366const struct bge_revision * bge_lookup_rev(uint32_t);
367const struct bge_vendor * bge_lookup_vendor(uint16_t);
368
369typedef int (*bge_eaddr_fcn_t)(struct bge_softc *, uint8_t[]);
370
371static int bge_probe(device_t);
372static int bge_attach(device_t);
373static int bge_detach(device_t);
374static int bge_suspend(device_t);
375static int bge_resume(device_t);
376static void bge_release_resources(struct bge_softc *);
377static void bge_dma_map_addr(void *, bus_dma_segment_t *, int, int);
378static int bge_dma_alloc(struct bge_softc *);
379static void bge_dma_free(struct bge_softc *);
380static int bge_dma_ring_alloc(struct bge_softc *, bus_size_t, bus_size_t,
381 bus_dma_tag_t *, uint8_t **, bus_dmamap_t *, bus_addr_t *, const char *);
382
383static int bge_get_eaddr_fw(struct bge_softc *sc, uint8_t ether_addr[]);
384static int bge_get_eaddr_mem(struct bge_softc *, uint8_t[]);
385static int bge_get_eaddr_nvram(struct bge_softc *, uint8_t[]);
386static int bge_get_eaddr_eeprom(struct bge_softc *, uint8_t[]);
387static int bge_get_eaddr(struct bge_softc *, uint8_t[]);
388
389static void bge_txeof(struct bge_softc *, uint16_t);
390static void bge_rxcsum(struct bge_softc *, struct bge_rx_bd *, struct mbuf *);
391static int bge_rxeof(struct bge_softc *, uint16_t, int);
392
393static void bge_asf_driver_up (struct bge_softc *);
394static void bge_tick(void *);
395static void bge_stats_clear_regs(struct bge_softc *);
396static void bge_stats_update(struct bge_softc *);
397static void bge_stats_update_regs(struct bge_softc *);
398static struct mbuf *bge_check_short_dma(struct mbuf *);
399static struct mbuf *bge_setup_tso(struct bge_softc *, struct mbuf *,
400 uint16_t *, uint16_t *);
401static int bge_encap(struct bge_softc *, struct mbuf **, uint32_t *);
402
403static void bge_intr(void *);
404static int bge_msi_intr(void *);
405static void bge_intr_task(void *, int);
406static void bge_start_locked(struct ifnet *);
407static void bge_start(struct ifnet *);
408static int bge_ioctl(struct ifnet *, u_long, caddr_t);
409static void bge_init_locked(struct bge_softc *);
410static void bge_init(void *);
411static void bge_stop_block(struct bge_softc *, bus_size_t, uint32_t);
412static void bge_stop(struct bge_softc *);
413static void bge_watchdog(struct bge_softc *);
414static int bge_shutdown(device_t);
415static int bge_ifmedia_upd_locked(struct ifnet *);
416static int bge_ifmedia_upd(struct ifnet *);
417static void bge_ifmedia_sts(struct ifnet *, struct ifmediareq *);
418
419static uint8_t bge_nvram_getbyte(struct bge_softc *, int, uint8_t *);
420static int bge_read_nvram(struct bge_softc *, caddr_t, int, int);
421
422static uint8_t bge_eeprom_getbyte(struct bge_softc *, int, uint8_t *);
423static int bge_read_eeprom(struct bge_softc *, caddr_t, int, int);
424
425static void bge_setpromisc(struct bge_softc *);
426static void bge_setmulti(struct bge_softc *);
427static void bge_setvlan(struct bge_softc *);
428
429static __inline void bge_rxreuse_std(struct bge_softc *, int);
430static __inline void bge_rxreuse_jumbo(struct bge_softc *, int);
431static int bge_newbuf_std(struct bge_softc *, int);
432static int bge_newbuf_jumbo(struct bge_softc *, int);
433static int bge_init_rx_ring_std(struct bge_softc *);
434static void bge_free_rx_ring_std(struct bge_softc *);
435static int bge_init_rx_ring_jumbo(struct bge_softc *);
436static void bge_free_rx_ring_jumbo(struct bge_softc *);
437static void bge_free_tx_ring(struct bge_softc *);
438static int bge_init_tx_ring(struct bge_softc *);
439
440static int bge_chipinit(struct bge_softc *);
441static int bge_blockinit(struct bge_softc *);
442static uint32_t bge_dma_swap_options(struct bge_softc *);
443
444static int bge_has_eaddr(struct bge_softc *);
445static uint32_t bge_readmem_ind(struct bge_softc *, int);
446static void bge_writemem_ind(struct bge_softc *, int, int);
447static void bge_writembx(struct bge_softc *, int, int);
448#ifdef notdef
449static uint32_t bge_readreg_ind(struct bge_softc *, int);
450#endif
451static void bge_writemem_direct(struct bge_softc *, int, int);
452static void bge_writereg_ind(struct bge_softc *, int, int);
453
454static int bge_miibus_readreg(device_t, int, int);
455static int bge_miibus_writereg(device_t, int, int, int);
456static void bge_miibus_statchg(device_t);
457#ifdef DEVICE_POLLING
458static int bge_poll(struct ifnet *ifp, enum poll_cmd cmd, int count);
459#endif
460
461#define BGE_RESET_START 1
462#define BGE_RESET_STOP 2
463static void bge_sig_post_reset(struct bge_softc *, int);
464static void bge_sig_legacy(struct bge_softc *, int);
465static void bge_sig_pre_reset(struct bge_softc *, int);
466static void bge_stop_fw(struct bge_softc *);
467static int bge_reset(struct bge_softc *);
468static void bge_link_upd(struct bge_softc *);
469
470/*
471 * The BGE_REGISTER_DEBUG option is only for low-level debugging. It may
472 * leak information to untrusted users. It is also known to cause alignment
473 * traps on certain architectures.
474 */
475#ifdef BGE_REGISTER_DEBUG
476static int bge_sysctl_debug_info(SYSCTL_HANDLER_ARGS);
477static int bge_sysctl_reg_read(SYSCTL_HANDLER_ARGS);
478static int bge_sysctl_mem_read(SYSCTL_HANDLER_ARGS);
479#endif
480static void bge_add_sysctls(struct bge_softc *);
481static void bge_add_sysctl_stats_regs(struct bge_softc *,
482 struct sysctl_ctx_list *, struct sysctl_oid_list *);
483static void bge_add_sysctl_stats(struct bge_softc *, struct sysctl_ctx_list *,
484 struct sysctl_oid_list *);
485static int bge_sysctl_stats(SYSCTL_HANDLER_ARGS);
486
487static device_method_t bge_methods[] = {
488 /* Device interface */
489 DEVMETHOD(device_probe, bge_probe),
490 DEVMETHOD(device_attach, bge_attach),
491 DEVMETHOD(device_detach, bge_detach),
492 DEVMETHOD(device_shutdown, bge_shutdown),
493 DEVMETHOD(device_suspend, bge_suspend),
494 DEVMETHOD(device_resume, bge_resume),
495
496 /* MII interface */
497 DEVMETHOD(miibus_readreg, bge_miibus_readreg),
498 DEVMETHOD(miibus_writereg, bge_miibus_writereg),
499 DEVMETHOD(miibus_statchg, bge_miibus_statchg),
500
501 DEVMETHOD_END
502};
503
504static driver_t bge_driver = {
505 "bge",
506 bge_methods,
507 sizeof(struct bge_softc)
508};
509
510static devclass_t bge_devclass;
511
512DRIVER_MODULE(bge, pci, bge_driver, bge_devclass, 0, 0);
513DRIVER_MODULE(miibus, bge, miibus_driver, miibus_devclass, 0, 0);
514
515static int bge_allow_asf = 1;
516
517TUNABLE_INT("hw.bge.allow_asf", &bge_allow_asf);
518
519static SYSCTL_NODE(_hw, OID_AUTO, bge, CTLFLAG_RD, 0, "BGE driver parameters");
520SYSCTL_INT(_hw_bge, OID_AUTO, allow_asf, CTLFLAG_RD, &bge_allow_asf, 0,
521 "Allow ASF mode if available");
522
523#define SPARC64_BLADE_1500_MODEL "SUNW,Sun-Blade-1500"
524#define SPARC64_BLADE_1500_PATH_BGE "/pci@1f,700000/network@2"
525#define SPARC64_BLADE_2500_MODEL "SUNW,Sun-Blade-2500"
526#define SPARC64_BLADE_2500_PATH_BGE "/pci@1c,600000/network@3"
527#define SPARC64_OFW_SUBVENDOR "subsystem-vendor-id"
528
529static int
530bge_has_eaddr(struct bge_softc *sc)
531{
532#ifdef __sparc64__
533 char buf[sizeof(SPARC64_BLADE_1500_PATH_BGE)];
534 device_t dev;
535 uint32_t subvendor;
536
537 dev = sc->bge_dev;
538
539 /*
540 * The on-board BGEs found in sun4u machines aren't fitted with
541 * an EEPROM which means that we have to obtain the MAC address
542 * via OFW and that some tests will always fail. We distinguish
543 * such BGEs by the subvendor ID, which also has to be obtained
544 * from OFW instead of the PCI configuration space as the latter
545 * indicates Broadcom as the subvendor of the netboot interface.
546 * For early Blade 1500 and 2500 we even have to check the OFW
547 * device path as the subvendor ID always defaults to Broadcom
548 * there.
549 */
550 if (OF_getprop(ofw_bus_get_node(dev), SPARC64_OFW_SUBVENDOR,
551 &subvendor, sizeof(subvendor)) == sizeof(subvendor) &&
552 (subvendor == FJTSU_VENDORID || subvendor == SUN_VENDORID))
553 return (0);
554 memset(buf, 0, sizeof(buf));
555 if (OF_package_to_path(ofw_bus_get_node(dev), buf, sizeof(buf)) > 0) {
556 if (strcmp(sparc64_model, SPARC64_BLADE_1500_MODEL) == 0 &&
557 strcmp(buf, SPARC64_BLADE_1500_PATH_BGE) == 0)
558 return (0);
559 if (strcmp(sparc64_model, SPARC64_BLADE_2500_MODEL) == 0 &&
560 strcmp(buf, SPARC64_BLADE_2500_PATH_BGE) == 0)
561 return (0);
562 }
563#endif
564 return (1);
565}
566
567static uint32_t
568bge_readmem_ind(struct bge_softc *sc, int off)
569{
570 device_t dev;
571 uint32_t val;
572
573 if (sc->bge_asicrev == BGE_ASICREV_BCM5906 &&
574 off >= BGE_STATS_BLOCK && off < BGE_SEND_RING_1_TO_4)
575 return (0);
576
577 dev = sc->bge_dev;
578
579 pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, off, 4);
580 val = pci_read_config(dev, BGE_PCI_MEMWIN_DATA, 4);
581 pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, 0, 4);
582 return (val);
583}
584
585static void
586bge_writemem_ind(struct bge_softc *sc, int off, int val)
587{
588 device_t dev;
589
590 if (sc->bge_asicrev == BGE_ASICREV_BCM5906 &&
591 off >= BGE_STATS_BLOCK && off < BGE_SEND_RING_1_TO_4)
592 return;
593
594 dev = sc->bge_dev;
595
596 pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, off, 4);
597 pci_write_config(dev, BGE_PCI_MEMWIN_DATA, val, 4);
598 pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, 0, 4);
599}
600
601#ifdef notdef
602static uint32_t
603bge_readreg_ind(struct bge_softc *sc, int off)
604{
605 device_t dev;
606
607 dev = sc->bge_dev;
608
609 pci_write_config(dev, BGE_PCI_REG_BASEADDR, off, 4);
610 return (pci_read_config(dev, BGE_PCI_REG_DATA, 4));
611}
612#endif
613
614static void
615bge_writereg_ind(struct bge_softc *sc, int off, int val)
616{
617 device_t dev;
618
619 dev = sc->bge_dev;
620
621 pci_write_config(dev, BGE_PCI_REG_BASEADDR, off, 4);
622 pci_write_config(dev, BGE_PCI_REG_DATA, val, 4);
623}
624
625static void
626bge_writemem_direct(struct bge_softc *sc, int off, int val)
627{
628 CSR_WRITE_4(sc, off, val);
629}
630
631static void
632bge_writembx(struct bge_softc *sc, int off, int val)
633{
634 if (sc->bge_asicrev == BGE_ASICREV_BCM5906)
635 off += BGE_LPMBX_IRQ0_HI - BGE_MBX_IRQ0_HI;
636
637 CSR_WRITE_4(sc, off, val);
638}
639
640/*
641 * Map a single buffer address.
642 */
643
644static void
645bge_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error)
646{
647 struct bge_dmamap_arg *ctx;
648
649 if (error)
650 return;
651
652 KASSERT(nseg == 1, ("%s: %d segments returned!", __func__, nseg));
653
654 ctx = arg;
655 ctx->bge_busaddr = segs->ds_addr;
656}
657
658static uint8_t
659bge_nvram_getbyte(struct bge_softc *sc, int addr, uint8_t *dest)
660{
661 uint32_t access, byte = 0;
662 int i;
663
664 /* Lock. */
665 CSR_WRITE_4(sc, BGE_NVRAM_SWARB, BGE_NVRAMSWARB_SET1);
666 for (i = 0; i < 8000; i++) {
667 if (CSR_READ_4(sc, BGE_NVRAM_SWARB) & BGE_NVRAMSWARB_GNT1)
668 break;
669 DELAY(20);
670 }
671 if (i == 8000)
672 return (1);
673
674 /* Enable access. */
675 access = CSR_READ_4(sc, BGE_NVRAM_ACCESS);
676 CSR_WRITE_4(sc, BGE_NVRAM_ACCESS, access | BGE_NVRAMACC_ENABLE);
677
678 CSR_WRITE_4(sc, BGE_NVRAM_ADDR, addr & 0xfffffffc);
679 CSR_WRITE_4(sc, BGE_NVRAM_CMD, BGE_NVRAM_READCMD);
680 for (i = 0; i < BGE_TIMEOUT * 10; i++) {
681 DELAY(10);
682 if (CSR_READ_4(sc, BGE_NVRAM_CMD) & BGE_NVRAMCMD_DONE) {
683 DELAY(10);
684 break;
685 }
686 }
687
688 if (i == BGE_TIMEOUT * 10) {
689 if_printf(sc->bge_ifp, "nvram read timed out\n");
690 return (1);
691 }
692
693 /* Get result. */
694 byte = CSR_READ_4(sc, BGE_NVRAM_RDDATA);
695
696 *dest = (bswap32(byte) >> ((addr % 4) * 8)) & 0xFF;
697
698 /* Disable access. */
699 CSR_WRITE_4(sc, BGE_NVRAM_ACCESS, access);
700
701 /* Unlock. */
702 CSR_WRITE_4(sc, BGE_NVRAM_SWARB, BGE_NVRAMSWARB_CLR1);
703 CSR_READ_4(sc, BGE_NVRAM_SWARB);
704
705 return (0);
706}
707
708/*
709 * Read a sequence of bytes from NVRAM.
710 */
711static int
712bge_read_nvram(struct bge_softc *sc, caddr_t dest, int off, int cnt)
713{
714 int err = 0, i;
715 uint8_t byte = 0;
716
717 if (sc->bge_asicrev != BGE_ASICREV_BCM5906)
718 return (1);
719
720 for (i = 0; i < cnt; i++) {
721 err = bge_nvram_getbyte(sc, off + i, &byte);
722 if (err)
723 break;
724 *(dest + i) = byte;
725 }
726
727 return (err ? 1 : 0);
728}
729
730/*
731 * Read a byte of data stored in the EEPROM at address 'addr.' The
732 * BCM570x supports both the traditional bitbang interface and an
733 * auto access interface for reading the EEPROM. We use the auto
734 * access method.
735 */
736static uint8_t
737bge_eeprom_getbyte(struct bge_softc *sc, int addr, uint8_t *dest)
738{
739 int i;
740 uint32_t byte = 0;
741
742 /*
743 * Enable use of auto EEPROM access so we can avoid
744 * having to use the bitbang method.
745 */
746 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_AUTO_EEPROM);
747
748 /* Reset the EEPROM, load the clock period. */
749 CSR_WRITE_4(sc, BGE_EE_ADDR,
750 BGE_EEADDR_RESET | BGE_EEHALFCLK(BGE_HALFCLK_384SCL));
751 DELAY(20);
752
753 /* Issue the read EEPROM command. */
754 CSR_WRITE_4(sc, BGE_EE_ADDR, BGE_EE_READCMD | addr);
755
756 /* Wait for completion */
757 for(i = 0; i < BGE_TIMEOUT * 10; i++) {
758 DELAY(10);
759 if (CSR_READ_4(sc, BGE_EE_ADDR) & BGE_EEADDR_DONE)
760 break;
761 }
762
763 if (i == BGE_TIMEOUT * 10) {
764 device_printf(sc->bge_dev, "EEPROM read timed out\n");
765 return (1);
766 }
767
768 /* Get result. */
769 byte = CSR_READ_4(sc, BGE_EE_DATA);
770
771 *dest = (byte >> ((addr % 4) * 8)) & 0xFF;
772
773 return (0);
774}
775
776/*
777 * Read a sequence of bytes from the EEPROM.
778 */
779static int
780bge_read_eeprom(struct bge_softc *sc, caddr_t dest, int off, int cnt)
781{
782 int i, error = 0;
783 uint8_t byte = 0;
784
785 for (i = 0; i < cnt; i++) {
786 error = bge_eeprom_getbyte(sc, off + i, &byte);
787 if (error)
788 break;
789 *(dest + i) = byte;
790 }
791
792 return (error ? 1 : 0);
793}
794
795static int
796bge_miibus_readreg(device_t dev, int phy, int reg)
797{
798 struct bge_softc *sc;
799 uint32_t val;
800 int i;
801
802 sc = device_get_softc(dev);
803
804 /* Clear the autopoll bit if set, otherwise may trigger PCI errors. */
805 if ((sc->bge_mi_mode & BGE_MIMODE_AUTOPOLL) != 0) {
806 CSR_WRITE_4(sc, BGE_MI_MODE,
807 sc->bge_mi_mode & ~BGE_MIMODE_AUTOPOLL);
808 DELAY(80);
809 }
810
811 CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_READ | BGE_MICOMM_BUSY |
812 BGE_MIPHY(phy) | BGE_MIREG(reg));
813
814 /* Poll for the PHY register access to complete. */
815 for (i = 0; i < BGE_TIMEOUT; i++) {
816 DELAY(10);
817 val = CSR_READ_4(sc, BGE_MI_COMM);
818 if ((val & BGE_MICOMM_BUSY) == 0) {
819 DELAY(5);
820 val = CSR_READ_4(sc, BGE_MI_COMM);
821 break;
822 }
823 }
824
825 if (i == BGE_TIMEOUT) {
826 device_printf(sc->bge_dev,
827 "PHY read timed out (phy %d, reg %d, val 0x%08x)\n",
828 phy, reg, val);
829 val = 0;
830 }
831
832 /* Restore the autopoll bit if necessary. */
833 if ((sc->bge_mi_mode & BGE_MIMODE_AUTOPOLL) != 0) {
834 CSR_WRITE_4(sc, BGE_MI_MODE, sc->bge_mi_mode);
835 DELAY(80);
836 }
837
838 if (val & BGE_MICOMM_READFAIL)
839 return (0);
840
841 return (val & 0xFFFF);
842}
843
844static int
845bge_miibus_writereg(device_t dev, int phy, int reg, int val)
846{
847 struct bge_softc *sc;
848 int i;
849
850 sc = device_get_softc(dev);
851
852 if (sc->bge_asicrev == BGE_ASICREV_BCM5906 &&
853 (reg == BRGPHY_MII_1000CTL || reg == BRGPHY_MII_AUXCTL))
854 return (0);
855
856 /* Clear the autopoll bit if set, otherwise may trigger PCI errors. */
857 if ((sc->bge_mi_mode & BGE_MIMODE_AUTOPOLL) != 0) {
858 CSR_WRITE_4(sc, BGE_MI_MODE,
859 sc->bge_mi_mode & ~BGE_MIMODE_AUTOPOLL);
860 DELAY(80);
861 }
862
863 CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_WRITE | BGE_MICOMM_BUSY |
864 BGE_MIPHY(phy) | BGE_MIREG(reg) | val);
865
866 for (i = 0; i < BGE_TIMEOUT; i++) {
867 DELAY(10);
868 if (!(CSR_READ_4(sc, BGE_MI_COMM) & BGE_MICOMM_BUSY)) {
869 DELAY(5);
870 CSR_READ_4(sc, BGE_MI_COMM); /* dummy read */
871 break;
872 }
873 }
874
875 /* Restore the autopoll bit if necessary. */
876 if ((sc->bge_mi_mode & BGE_MIMODE_AUTOPOLL) != 0) {
877 CSR_WRITE_4(sc, BGE_MI_MODE, sc->bge_mi_mode);
878 DELAY(80);
879 }
880
881 if (i == BGE_TIMEOUT)
882 device_printf(sc->bge_dev,
883 "PHY write timed out (phy %d, reg %d, val %d)\n",
884 phy, reg, val);
885
886 return (0);
887}
888
889static void
890bge_miibus_statchg(device_t dev)
891{
892 struct bge_softc *sc;
893 struct mii_data *mii;
894 sc = device_get_softc(dev);
895 mii = device_get_softc(sc->bge_miibus);
896
897 if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) ==
898 (IFM_ACTIVE | IFM_AVALID)) {
899 switch (IFM_SUBTYPE(mii->mii_media_active)) {
900 case IFM_10_T:
901 case IFM_100_TX:
902 sc->bge_link = 1;
903 break;
904 case IFM_1000_T:
905 case IFM_1000_SX:
906 case IFM_2500_SX:
907 if (sc->bge_asicrev != BGE_ASICREV_BCM5906)
908 sc->bge_link = 1;
909 else
910 sc->bge_link = 0;
911 break;
912 default:
913 sc->bge_link = 0;
914 break;
915 }
916 } else
917 sc->bge_link = 0;
918 if (sc->bge_link == 0)
919 return;
920 BGE_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_PORTMODE);
921 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T ||
922 IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_SX)
923 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_PORTMODE_GMII);
924 else
925 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_PORTMODE_MII);
926
927 if (IFM_OPTIONS(mii->mii_media_active & IFM_FDX) != 0) {
928 BGE_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX);
929 if ((IFM_OPTIONS(mii->mii_media_active) &
930 IFM_ETH_TXPAUSE) != 0)
931 BGE_SETBIT(sc, BGE_TX_MODE, BGE_TXMODE_FLOWCTL_ENABLE);
932 else
933 BGE_CLRBIT(sc, BGE_TX_MODE, BGE_TXMODE_FLOWCTL_ENABLE);
934 if ((IFM_OPTIONS(mii->mii_media_active) &
935 IFM_ETH_RXPAUSE) != 0)
936 BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_FLOWCTL_ENABLE);
937 else
938 BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_FLOWCTL_ENABLE);
939 } else {
940 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX);
941 BGE_CLRBIT(sc, BGE_TX_MODE, BGE_TXMODE_FLOWCTL_ENABLE);
942 BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_FLOWCTL_ENABLE);
943 }
944}
945
946/*
947 * Intialize a standard receive ring descriptor.
948 */
949static int
950bge_newbuf_std(struct bge_softc *sc, int i)
951{
952 struct mbuf *m;
953 struct bge_rx_bd *r;
954 bus_dma_segment_t segs[1];
955 bus_dmamap_t map;
956 int error, nsegs;
957
958 if (sc->bge_flags & BGE_FLAG_JUMBO_STD &&
959 (sc->bge_ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN +
960 ETHER_VLAN_ENCAP_LEN > (MCLBYTES - ETHER_ALIGN))) {
961 m = m_getjcl(M_DONTWAIT, MT_DATA, M_PKTHDR, MJUM9BYTES);
962 if (m == NULL)
963 return (ENOBUFS);
964 m->m_len = m->m_pkthdr.len = MJUM9BYTES;
965 } else {
966 m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
967 if (m == NULL)
968 return (ENOBUFS);
969 m->m_len = m->m_pkthdr.len = MCLBYTES;
970 }
971 if ((sc->bge_flags & BGE_FLAG_RX_ALIGNBUG) == 0)
972 m_adj(m, ETHER_ALIGN);
973
974 error = bus_dmamap_load_mbuf_sg(sc->bge_cdata.bge_rx_mtag,
975 sc->bge_cdata.bge_rx_std_sparemap, m, segs, &nsegs, 0);
976 if (error != 0) {
977 m_freem(m);
978 return (error);
979 }
980 if (sc->bge_cdata.bge_rx_std_chain[i] != NULL) {
981 bus_dmamap_sync(sc->bge_cdata.bge_rx_mtag,
982 sc->bge_cdata.bge_rx_std_dmamap[i], BUS_DMASYNC_POSTREAD);
983 bus_dmamap_unload(sc->bge_cdata.bge_rx_mtag,
984 sc->bge_cdata.bge_rx_std_dmamap[i]);
985 }
986 map = sc->bge_cdata.bge_rx_std_dmamap[i];
987 sc->bge_cdata.bge_rx_std_dmamap[i] = sc->bge_cdata.bge_rx_std_sparemap;
988 sc->bge_cdata.bge_rx_std_sparemap = map;
989 sc->bge_cdata.bge_rx_std_chain[i] = m;
990 sc->bge_cdata.bge_rx_std_seglen[i] = segs[0].ds_len;
991 r = &sc->bge_ldata.bge_rx_std_ring[sc->bge_std];
992 r->bge_addr.bge_addr_lo = BGE_ADDR_LO(segs[0].ds_addr);
993 r->bge_addr.bge_addr_hi = BGE_ADDR_HI(segs[0].ds_addr);
994 r->bge_flags = BGE_RXBDFLAG_END;
995 r->bge_len = segs[0].ds_len;
996 r->bge_idx = i;
997
998 bus_dmamap_sync(sc->bge_cdata.bge_rx_mtag,
999 sc->bge_cdata.bge_rx_std_dmamap[i], BUS_DMASYNC_PREREAD);
1000
1001 return (0);
1002}
1003
1004/*
1005 * Initialize a jumbo receive ring descriptor. This allocates
1006 * a jumbo buffer from the pool managed internally by the driver.
1007 */
1008static int
1009bge_newbuf_jumbo(struct bge_softc *sc, int i)
1010{
1011 bus_dma_segment_t segs[BGE_NSEG_JUMBO];
1012 bus_dmamap_t map;
1013 struct bge_extrx_bd *r;
1014 struct mbuf *m;
1015 int error, nsegs;
1016
1017 MGETHDR(m, M_DONTWAIT, MT_DATA);
1018 if (m == NULL)
1019 return (ENOBUFS);
1020
1021 m_cljget(m, M_DONTWAIT, MJUM9BYTES);
1022 if (!(m->m_flags & M_EXT)) {
1023 m_freem(m);
1024 return (ENOBUFS);
1025 }
1026 m->m_len = m->m_pkthdr.len = MJUM9BYTES;
1027 if ((sc->bge_flags & BGE_FLAG_RX_ALIGNBUG) == 0)
1028 m_adj(m, ETHER_ALIGN);
1029
1030 error = bus_dmamap_load_mbuf_sg(sc->bge_cdata.bge_mtag_jumbo,
1031 sc->bge_cdata.bge_rx_jumbo_sparemap, m, segs, &nsegs, 0);
1032 if (error != 0) {
1033 m_freem(m);
1034 return (error);
1035 }
1036
1037 if (sc->bge_cdata.bge_rx_jumbo_chain[i] != NULL) {
1038 bus_dmamap_sync(sc->bge_cdata.bge_mtag_jumbo,
1039 sc->bge_cdata.bge_rx_jumbo_dmamap[i], BUS_DMASYNC_POSTREAD);
1040 bus_dmamap_unload(sc->bge_cdata.bge_mtag_jumbo,
1041 sc->bge_cdata.bge_rx_jumbo_dmamap[i]);
1042 }
1043 map = sc->bge_cdata.bge_rx_jumbo_dmamap[i];
1044 sc->bge_cdata.bge_rx_jumbo_dmamap[i] =
1045 sc->bge_cdata.bge_rx_jumbo_sparemap;
1046 sc->bge_cdata.bge_rx_jumbo_sparemap = map;
1047 sc->bge_cdata.bge_rx_jumbo_chain[i] = m;
1048 sc->bge_cdata.bge_rx_jumbo_seglen[i][0] = 0;
1049 sc->bge_cdata.bge_rx_jumbo_seglen[i][1] = 0;
1050 sc->bge_cdata.bge_rx_jumbo_seglen[i][2] = 0;
1051 sc->bge_cdata.bge_rx_jumbo_seglen[i][3] = 0;
1052
1053 /*
1054 * Fill in the extended RX buffer descriptor.
1055 */
1056 r = &sc->bge_ldata.bge_rx_jumbo_ring[sc->bge_jumbo];
1057 r->bge_flags = BGE_RXBDFLAG_JUMBO_RING | BGE_RXBDFLAG_END;
1058 r->bge_idx = i;
1059 r->bge_len3 = r->bge_len2 = r->bge_len1 = 0;
1060 switch (nsegs) {
1061 case 4:
1062 r->bge_addr3.bge_addr_lo = BGE_ADDR_LO(segs[3].ds_addr);
1063 r->bge_addr3.bge_addr_hi = BGE_ADDR_HI(segs[3].ds_addr);
1064 r->bge_len3 = segs[3].ds_len;
1065 sc->bge_cdata.bge_rx_jumbo_seglen[i][3] = segs[3].ds_len;
1066 case 3:
1067 r->bge_addr2.bge_addr_lo = BGE_ADDR_LO(segs[2].ds_addr);
1068 r->bge_addr2.bge_addr_hi = BGE_ADDR_HI(segs[2].ds_addr);
1069 r->bge_len2 = segs[2].ds_len;
1070 sc->bge_cdata.bge_rx_jumbo_seglen[i][2] = segs[2].ds_len;
1071 case 2:
1072 r->bge_addr1.bge_addr_lo = BGE_ADDR_LO(segs[1].ds_addr);
1073 r->bge_addr1.bge_addr_hi = BGE_ADDR_HI(segs[1].ds_addr);
1074 r->bge_len1 = segs[1].ds_len;
1075 sc->bge_cdata.bge_rx_jumbo_seglen[i][1] = segs[1].ds_len;
1076 case 1:
1077 r->bge_addr0.bge_addr_lo = BGE_ADDR_LO(segs[0].ds_addr);
1078 r->bge_addr0.bge_addr_hi = BGE_ADDR_HI(segs[0].ds_addr);
1079 r->bge_len0 = segs[0].ds_len;
1080 sc->bge_cdata.bge_rx_jumbo_seglen[i][0] = segs[0].ds_len;
1081 break;
1082 default:
1083 panic("%s: %d segments\n", __func__, nsegs);
1084 }
1085
1086 bus_dmamap_sync(sc->bge_cdata.bge_mtag_jumbo,
1087 sc->bge_cdata.bge_rx_jumbo_dmamap[i], BUS_DMASYNC_PREREAD);
1088
1089 return (0);
1090}
1091
1092static int
1093bge_init_rx_ring_std(struct bge_softc *sc)
1094{
1095 int error, i;
1096
1097 bzero(sc->bge_ldata.bge_rx_std_ring, BGE_STD_RX_RING_SZ);
1098 sc->bge_std = 0;
1099 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
1100 if ((error = bge_newbuf_std(sc, i)) != 0)
1101 return (error);
1102 BGE_INC(sc->bge_std, BGE_STD_RX_RING_CNT);
1103 }
1104
1105 bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag,
1106 sc->bge_cdata.bge_rx_std_ring_map, BUS_DMASYNC_PREWRITE);
1107
1108 sc->bge_std = 0;
1109 bge_writembx(sc, BGE_MBX_RX_STD_PROD_LO, BGE_STD_RX_RING_CNT - 1);
1110
1111 return (0);
1112}
1113
1114static void
1115bge_free_rx_ring_std(struct bge_softc *sc)
1116{
1117 int i;
1118
1119 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
1120 if (sc->bge_cdata.bge_rx_std_chain[i] != NULL) {
1121 bus_dmamap_sync(sc->bge_cdata.bge_rx_mtag,
1122 sc->bge_cdata.bge_rx_std_dmamap[i],
1123 BUS_DMASYNC_POSTREAD);
1124 bus_dmamap_unload(sc->bge_cdata.bge_rx_mtag,
1125 sc->bge_cdata.bge_rx_std_dmamap[i]);
1126 m_freem(sc->bge_cdata.bge_rx_std_chain[i]);
1127 sc->bge_cdata.bge_rx_std_chain[i] = NULL;
1128 }
1129 bzero((char *)&sc->bge_ldata.bge_rx_std_ring[i],
1130 sizeof(struct bge_rx_bd));
1131 }
1132}
1133
1134static int
1135bge_init_rx_ring_jumbo(struct bge_softc *sc)
1136{
1137 struct bge_rcb *rcb;
1138 int error, i;
1139
1140 bzero(sc->bge_ldata.bge_rx_jumbo_ring, BGE_JUMBO_RX_RING_SZ);
1141 sc->bge_jumbo = 0;
1142 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
1143 if ((error = bge_newbuf_jumbo(sc, i)) != 0)
1144 return (error);
1145 BGE_INC(sc->bge_jumbo, BGE_JUMBO_RX_RING_CNT);
1146 }
1147
1148 bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag,
1149 sc->bge_cdata.bge_rx_jumbo_ring_map, BUS_DMASYNC_PREWRITE);
1150
1151 sc->bge_jumbo = 0;
1152
1153 /* Enable the jumbo receive producer ring. */
1154 rcb = &sc->bge_ldata.bge_info.bge_jumbo_rx_rcb;
1155 rcb->bge_maxlen_flags =
1156 BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_USE_EXT_RX_BD);
1157 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags);
1158
1159 bge_writembx(sc, BGE_MBX_RX_JUMBO_PROD_LO, BGE_JUMBO_RX_RING_CNT - 1);
1160
1161 return (0);
1162}
1163
1164static void
1165bge_free_rx_ring_jumbo(struct bge_softc *sc)
1166{
1167 int i;
1168
1169 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
1170 if (sc->bge_cdata.bge_rx_jumbo_chain[i] != NULL) {
1171 bus_dmamap_sync(sc->bge_cdata.bge_mtag_jumbo,
1172 sc->bge_cdata.bge_rx_jumbo_dmamap[i],
1173 BUS_DMASYNC_POSTREAD);
1174 bus_dmamap_unload(sc->bge_cdata.bge_mtag_jumbo,
1175 sc->bge_cdata.bge_rx_jumbo_dmamap[i]);
1176 m_freem(sc->bge_cdata.bge_rx_jumbo_chain[i]);
1177 sc->bge_cdata.bge_rx_jumbo_chain[i] = NULL;
1178 }
1179 bzero((char *)&sc->bge_ldata.bge_rx_jumbo_ring[i],
1180 sizeof(struct bge_extrx_bd));
1181 }
1182}
1183
1184static void
1185bge_free_tx_ring(struct bge_softc *sc)
1186{
1187 int i;
1188
1189 if (sc->bge_ldata.bge_tx_ring == NULL)
1190 return;
1191
1192 for (i = 0; i < BGE_TX_RING_CNT; i++) {
1193 if (sc->bge_cdata.bge_tx_chain[i] != NULL) {
1194 bus_dmamap_sync(sc->bge_cdata.bge_tx_mtag,
1195 sc->bge_cdata.bge_tx_dmamap[i],
1196 BUS_DMASYNC_POSTWRITE);
1197 bus_dmamap_unload(sc->bge_cdata.bge_tx_mtag,
1198 sc->bge_cdata.bge_tx_dmamap[i]);
1199 m_freem(sc->bge_cdata.bge_tx_chain[i]);
1200 sc->bge_cdata.bge_tx_chain[i] = NULL;
1201 }
1202 bzero((char *)&sc->bge_ldata.bge_tx_ring[i],
1203 sizeof(struct bge_tx_bd));
1204 }
1205}
1206
1207static int
1208bge_init_tx_ring(struct bge_softc *sc)
1209{
1210 sc->bge_txcnt = 0;
1211 sc->bge_tx_saved_considx = 0;
1212
1213 bzero(sc->bge_ldata.bge_tx_ring, BGE_TX_RING_SZ);
1214 bus_dmamap_sync(sc->bge_cdata.bge_tx_ring_tag,
1215 sc->bge_cdata.bge_tx_ring_map, BUS_DMASYNC_PREWRITE);
1216
1217 /* Initialize transmit producer index for host-memory send ring. */
1218 sc->bge_tx_prodidx = 0;
1219 bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, sc->bge_tx_prodidx);
1220
1221 /* 5700 b2 errata */
1222 if (sc->bge_chiprev == BGE_CHIPREV_5700_BX)
1223 bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, sc->bge_tx_prodidx);
1224
1225 /* NIC-memory send ring not used; initialize to zero. */
1226 bge_writembx(sc, BGE_MBX_TX_NIC_PROD0_LO, 0);
1227 /* 5700 b2 errata */
1228 if (sc->bge_chiprev == BGE_CHIPREV_5700_BX)
1229 bge_writembx(sc, BGE_MBX_TX_NIC_PROD0_LO, 0);
1230
1231 return (0);
1232}
1233
1234static void
1235bge_setpromisc(struct bge_softc *sc)
1236{
1237 struct ifnet *ifp;
1238
1239 BGE_LOCK_ASSERT(sc);
1240
1241 ifp = sc->bge_ifp;
1242
1243 /* Enable or disable promiscuous mode as needed. */
1244 if (ifp->if_flags & IFF_PROMISC)
1245 BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC);
1246 else
1247 BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC);
1248}
1249
1250static void
1251bge_setmulti(struct bge_softc *sc)
1252{
1253 struct ifnet *ifp;
1254 struct ifmultiaddr *ifma;
1255 uint32_t hashes[4] = { 0, 0, 0, 0 };
1256 int h, i;
1257
1258 BGE_LOCK_ASSERT(sc);
1259
1260 ifp = sc->bge_ifp;
1261
1262 if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) {
1263 for (i = 0; i < 4; i++)
1264 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), 0xFFFFFFFF);
1265 return;
1266 }
1267
1268 /* First, zot all the existing filters. */
1269 for (i = 0; i < 4; i++)
1270 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), 0);
1271
1272 /* Now program new ones. */
1273 if_maddr_rlock(ifp);
1274 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1275 if (ifma->ifma_addr->sa_family != AF_LINK)
1276 continue;
1277 h = ether_crc32_le(LLADDR((struct sockaddr_dl *)
1278 ifma->ifma_addr), ETHER_ADDR_LEN) & 0x7F;
1279 hashes[(h & 0x60) >> 5] |= 1 << (h & 0x1F);
1280 }
1281 if_maddr_runlock(ifp);
1282
1283 for (i = 0; i < 4; i++)
1284 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), hashes[i]);
1285}
1286
1287static void
1288bge_setvlan(struct bge_softc *sc)
1289{
1290 struct ifnet *ifp;
1291
1292 BGE_LOCK_ASSERT(sc);
1293
1294 ifp = sc->bge_ifp;
1295
1296 /* Enable or disable VLAN tag stripping as needed. */
1297 if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING)
1298 BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_KEEP_VLAN_DIAG);
1299 else
1300 BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_KEEP_VLAN_DIAG);
1301}
1302
1303static void
1304bge_sig_pre_reset(struct bge_softc *sc, int type)
1305{
1306
1307 /*
1308 * Some chips don't like this so only do this if ASF is enabled
1309 */
1310 if (sc->bge_asf_mode)
1311 bge_writemem_ind(sc, BGE_SRAM_FW_MB, BGE_SRAM_FW_MB_MAGIC);
1312
1313 if (sc->bge_asf_mode & ASF_NEW_HANDSHAKE) {
1314 switch (type) {
1315 case BGE_RESET_START:
1316 bge_writemem_ind(sc, BGE_SRAM_FW_DRV_STATE_MB,
1317 BGE_FW_DRV_STATE_START);
1318 break;
1319 case BGE_RESET_STOP:
1320 bge_writemem_ind(sc, BGE_SRAM_FW_DRV_STATE_MB,
1321 BGE_FW_DRV_STATE_UNLOAD);
1322 break;
1323 }
1324 }
1325}
1326
1327static void
1328bge_sig_post_reset(struct bge_softc *sc, int type)
1329{
1330
1331 if (sc->bge_asf_mode & ASF_NEW_HANDSHAKE) {
1332 switch (type) {
1333 case BGE_RESET_START:
1334 bge_writemem_ind(sc, BGE_SRAM_FW_DRV_STATE_MB,
1335 BGE_FW_DRV_STATE_START_DONE);
1336 /* START DONE */
1337 break;
1338 case BGE_RESET_STOP:
1339 bge_writemem_ind(sc, BGE_SRAM_FW_DRV_STATE_MB,
1340 BGE_FW_DRV_STATE_UNLOAD_DONE);
1341 break;
1342 }
1343 }
1344}
1345
1346static void
1347bge_sig_legacy(struct bge_softc *sc, int type)
1348{
1349
1350 if (sc->bge_asf_mode) {
1351 switch (type) {
1352 case BGE_RESET_START:
1353 bge_writemem_ind(sc, BGE_SRAM_FW_DRV_STATE_MB,
1354 BGE_FW_DRV_STATE_START);
1355 break;
1356 case BGE_RESET_STOP:
1357 bge_writemem_ind(sc, BGE_SRAM_FW_DRV_STATE_MB,
1358 BGE_FW_DRV_STATE_UNLOAD);
1359 break;
1360 }
1361 }
1362}
1363
1364static void
1365bge_stop_fw(struct bge_softc *sc)
1366{
1367 int i;
1368
1369 if (sc->bge_asf_mode) {
1370 bge_writemem_ind(sc, BGE_SRAM_FW_CMD_MB, BGE_FW_CMD_PAUSE);
1371 CSR_WRITE_4(sc, BGE_RX_CPU_EVENT,
1372 CSR_READ_4(sc, BGE_RX_CPU_EVENT) | BGE_RX_CPU_DRV_EVENT);
1373
1374 for (i = 0; i < 100; i++ ) {
1375 if (!(CSR_READ_4(sc, BGE_RX_CPU_EVENT) &
1376 BGE_RX_CPU_DRV_EVENT))
1377 break;
1378 DELAY(10);
1379 }
1380 }
1381}
1382
1383static uint32_t
1384bge_dma_swap_options(struct bge_softc *sc)
1385{
1386 uint32_t dma_options;
1387
1388 dma_options = BGE_MODECTL_WORDSWAP_NONFRAME |
1389 BGE_MODECTL_BYTESWAP_DATA | BGE_MODECTL_WORDSWAP_DATA;
1390#if BYTE_ORDER == BIG_ENDIAN
1391 dma_options |= BGE_MODECTL_BYTESWAP_NONFRAME;
1392#endif
1393 if ((sc)->bge_asicrev == BGE_ASICREV_BCM5720)
1394 dma_options |= BGE_MODECTL_BYTESWAP_B2HRX_DATA |
1395 BGE_MODECTL_WORDSWAP_B2HRX_DATA | BGE_MODECTL_B2HRX_ENABLE |
1396 BGE_MODECTL_HTX2B_ENABLE;
1397
1398 return (dma_options);
1399}
1400
1401/*
1402 * Do endian, PCI and DMA initialization.
1403 */
1404static int
1405bge_chipinit(struct bge_softc *sc)
1406{
1407 uint32_t dma_rw_ctl, misc_ctl, mode_ctl;
1408 uint16_t val;
1409 int i;
1410
1411 /* Set endianness before we access any non-PCI registers. */
1412 misc_ctl = BGE_INIT;
1413 if (sc->bge_flags & BGE_FLAG_TAGGED_STATUS)
1414 misc_ctl |= BGE_PCIMISCCTL_TAGGED_STATUS;
1415 pci_write_config(sc->bge_dev, BGE_PCI_MISC_CTL, misc_ctl, 4);
1416
1417 /* Clear the MAC control register */
1418 CSR_WRITE_4(sc, BGE_MAC_MODE, 0);
1419
1420 /*
1421 * Clear the MAC statistics block in the NIC's
1422 * internal memory.
1423 */
1424 for (i = BGE_STATS_BLOCK;
1425 i < BGE_STATS_BLOCK_END + 1; i += sizeof(uint32_t))
1426 BGE_MEMWIN_WRITE(sc, i, 0);
1427
1428 for (i = BGE_STATUS_BLOCK;
1429 i < BGE_STATUS_BLOCK_END + 1; i += sizeof(uint32_t))
1430 BGE_MEMWIN_WRITE(sc, i, 0);
1431
1432 if (sc->bge_chiprev == BGE_CHIPREV_5704_BX) {
1433 /*
1434 * Fix data corruption caused by non-qword write with WB.
1435 * Fix master abort in PCI mode.
1436 * Fix PCI latency timer.
1437 */
1438 val = pci_read_config(sc->bge_dev, BGE_PCI_MSI_DATA + 2, 2);
1439 val |= (1 << 10) | (1 << 12) | (1 << 13);
1440 pci_write_config(sc->bge_dev, BGE_PCI_MSI_DATA + 2, val, 2);
1441 }
1442
1443 /*
1444 * Set up the PCI DMA control register.
1445 */
1446 dma_rw_ctl = BGE_PCIDMARWCTL_RD_CMD_SHIFT(6) |
1447 BGE_PCIDMARWCTL_WR_CMD_SHIFT(7);
1448 if (sc->bge_flags & BGE_FLAG_PCIE) {
1449 /* Read watermark not used, 128 bytes for write. */
1450 dma_rw_ctl |= BGE_PCIDMARWCTL_WR_WAT_SHIFT(3);
1451 } else if (sc->bge_flags & BGE_FLAG_PCIX) {
1452 if (BGE_IS_5714_FAMILY(sc)) {
1453 /* 256 bytes for read and write. */
1454 dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(2) |
1455 BGE_PCIDMARWCTL_WR_WAT_SHIFT(2);
1456 dma_rw_ctl |= (sc->bge_asicrev == BGE_ASICREV_BCM5780) ?
1457 BGE_PCIDMARWCTL_ONEDMA_ATONCE_GLOBAL :
1458 BGE_PCIDMARWCTL_ONEDMA_ATONCE_LOCAL;
1459 } else if (sc->bge_asicrev == BGE_ASICREV_BCM5703) {
1460 /*
1461 * In the BCM5703, the DMA read watermark should
1462 * be set to less than or equal to the maximum
1463 * memory read byte count of the PCI-X command
1464 * register.
1465 */
1466 dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(4) |
1467 BGE_PCIDMARWCTL_WR_WAT_SHIFT(3);
1468 } else if (sc->bge_asicrev == BGE_ASICREV_BCM5704) {
1469 /* 1536 bytes for read, 384 bytes for write. */
1470 dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(7) |
1471 BGE_PCIDMARWCTL_WR_WAT_SHIFT(3);
1472 } else {
1473 /* 384 bytes for read and write. */
1474 dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(3) |
1475 BGE_PCIDMARWCTL_WR_WAT_SHIFT(3) |
1476 0x0F;
1477 }
1478 if (sc->bge_asicrev == BGE_ASICREV_BCM5703 ||
1479 sc->bge_asicrev == BGE_ASICREV_BCM5704) {
1480 uint32_t tmp;
1481
1482 /* Set ONE_DMA_AT_ONCE for hardware workaround. */
1483 tmp = CSR_READ_4(sc, BGE_PCI_CLKCTL) & 0x1F;
1484 if (tmp == 6 || tmp == 7)
1485 dma_rw_ctl |=
1486 BGE_PCIDMARWCTL_ONEDMA_ATONCE_GLOBAL;
1487
1488 /* Set PCI-X DMA write workaround. */
1489 dma_rw_ctl |= BGE_PCIDMARWCTL_ASRT_ALL_BE;
1490 }
1491 } else {
1492 /* Conventional PCI bus: 256 bytes for read and write. */
1493 dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(7) |
1494 BGE_PCIDMARWCTL_WR_WAT_SHIFT(7);
1495
1496 if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
1497 sc->bge_asicrev != BGE_ASICREV_BCM5750)
1498 dma_rw_ctl |= 0x0F;
1499 }
1500 if (sc->bge_asicrev == BGE_ASICREV_BCM5700 ||
1501 sc->bge_asicrev == BGE_ASICREV_BCM5701)
1502 dma_rw_ctl |= BGE_PCIDMARWCTL_USE_MRM |
1503 BGE_PCIDMARWCTL_ASRT_ALL_BE;
1504 if (sc->bge_asicrev == BGE_ASICREV_BCM5703 ||
1505 sc->bge_asicrev == BGE_ASICREV_BCM5704)
1506 dma_rw_ctl &= ~BGE_PCIDMARWCTL_MINDMA;
1507 if (BGE_IS_5717_PLUS(sc)) {
1508 dma_rw_ctl &= ~BGE_PCIDMARWCTL_DIS_CACHE_ALIGNMENT;
1509 if (sc->bge_chipid == BGE_CHIPID_BCM57765_A0)
1510 dma_rw_ctl &= ~BGE_PCIDMARWCTL_CRDRDR_RDMA_MRRS_MSK;
1511 /*
1512 * Enable HW workaround for controllers that misinterpret
1513 * a status tag update and leave interrupts permanently
1514 * disabled.
1515 */
1516 if (sc->bge_asicrev != BGE_ASICREV_BCM5717 &&
1517 sc->bge_asicrev != BGE_ASICREV_BCM57765)
1518 dma_rw_ctl |= BGE_PCIDMARWCTL_TAGGED_STATUS_WA;
1519 }
1520 pci_write_config(sc->bge_dev, BGE_PCI_DMA_RW_CTL, dma_rw_ctl, 4);
1521
1522 /*
1523 * Set up general mode register.
1524 */
1525 mode_ctl = bge_dma_swap_options(sc) | BGE_MODECTL_MAC_ATTN_INTR |
1526 BGE_MODECTL_HOST_SEND_BDS | BGE_MODECTL_TX_NO_PHDR_CSUM;
1527
1528 /*
1529 * BCM5701 B5 have a bug causing data corruption when using
1530 * 64-bit DMA reads, which can be terminated early and then
1531 * completed later as 32-bit accesses, in combination with
1532 * certain bridges.
1533 */
1534 if (sc->bge_asicrev == BGE_ASICREV_BCM5701 &&
1535 sc->bge_chipid == BGE_CHIPID_BCM5701_B5)
1536 mode_ctl |= BGE_MODECTL_FORCE_PCI32;
1537
1538 /*
1539 * Tell the firmware the driver is running
1540 */
1541 if (sc->bge_asf_mode & ASF_STACKUP)
1542 mode_ctl |= BGE_MODECTL_STACKUP;
1543
1544 CSR_WRITE_4(sc, BGE_MODE_CTL, mode_ctl);
1545
1546 /*
1547 * Disable memory write invalidate. Apparently it is not supported
1548 * properly by these devices. Also ensure that INTx isn't disabled,
1549 * as these chips need it even when using MSI.
1550 */
1551 PCI_CLRBIT(sc->bge_dev, BGE_PCI_CMD,
1552 PCIM_CMD_INTxDIS | PCIM_CMD_MWIEN, 4);
1553
1554 /* Set the timer prescaler (always 66Mhz) */
1555 CSR_WRITE_4(sc, BGE_MISC_CFG, BGE_32BITTIME_66MHZ);
1556
1557 /* XXX: The Linux tg3 driver does this at the start of brgphy_reset. */
1558 if (sc->bge_asicrev == BGE_ASICREV_BCM5906) {
1559 DELAY(40); /* XXX */
1560
1561 /* Put PHY into ready state */
1562 BGE_CLRBIT(sc, BGE_MISC_CFG, BGE_MISCCFG_EPHY_IDDQ);
1563 CSR_READ_4(sc, BGE_MISC_CFG); /* Flush */
1564 DELAY(40);
1565 }
1566
1567 return (0);
1568}
1569
1570static int
1571bge_blockinit(struct bge_softc *sc)
1572{
1573 struct bge_rcb *rcb;
1574 bus_size_t vrcb;
1575 bge_hostaddr taddr;
1576 uint32_t dmactl, val;
1577 int i, limit;
1578
1579 /*
1580 * Initialize the memory window pointer register so that
1581 * we can access the first 32K of internal NIC RAM. This will
1582 * allow us to set up the TX send ring RCBs and the RX return
1583 * ring RCBs, plus other things which live in NIC memory.
1584 */
1585 CSR_WRITE_4(sc, BGE_PCI_MEMWIN_BASEADDR, 0);
1586
1587 /* Note: the BCM5704 has a smaller mbuf space than other chips. */
1588
1589 if (!(BGE_IS_5705_PLUS(sc))) {
1590 /* Configure mbuf memory pool */
1591 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_BASEADDR, BGE_BUFFPOOL_1);
1592 if (sc->bge_asicrev == BGE_ASICREV_BCM5704)
1593 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x10000);
1594 else
1595 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x18000);
1596
1597 /* Configure DMA resource pool */
1598 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_BASEADDR,
1599 BGE_DMA_DESCRIPTORS);
1600 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LEN, 0x2000);
1601 }
1602
1603 /* Configure mbuf pool watermarks */
1604 if (BGE_IS_5717_PLUS(sc)) {
1605 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0);
1606 if (sc->bge_ifp->if_mtu > ETHERMTU) {
1607 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x7e);
1608 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0xea);
1609 } else {
1610 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x2a);
1611 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0xa0);
1612 }
1613 } else if (!BGE_IS_5705_PLUS(sc)) {
1614 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x50);
1615 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x20);
1616 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x60);
1617 } else if (sc->bge_asicrev == BGE_ASICREV_BCM5906) {
1618 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0);
1619 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x04);
1620 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x10);
1621 } else {
1622 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0);
1623 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x10);
1624 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x60);
1625 }
1626
1627 /* Configure DMA resource watermarks */
1628 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LOWAT, 5);
1629 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_HIWAT, 10);
1630
1631 /* Enable buffer manager */
1632 val = BGE_BMANMODE_ENABLE | BGE_BMANMODE_LOMBUF_ATTN;
1633 /*
1634 * Change the arbitration algorithm of TXMBUF read request to
1635 * round-robin instead of priority based for BCM5719. When
1636 * TXFIFO is almost empty, RDMA will hold its request until
1637 * TXFIFO is not almost empty.
1638 */
1639 if (sc->bge_asicrev == BGE_ASICREV_BCM5719)
1640 val |= BGE_BMANMODE_NO_TX_UNDERRUN;
1641 CSR_WRITE_4(sc, BGE_BMAN_MODE, val);
1642
1643 /* Poll for buffer manager start indication */
1644 for (i = 0; i < BGE_TIMEOUT; i++) {
1645 DELAY(10);
1646 if (CSR_READ_4(sc, BGE_BMAN_MODE) & BGE_BMANMODE_ENABLE)
1647 break;
1648 }
1649
1650 if (i == BGE_TIMEOUT) {
1651 device_printf(sc->bge_dev, "buffer manager failed to start\n");
1652 return (ENXIO);
1653 }
1654
1655 /* Enable flow-through queues */
1656 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF);
1657 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0);
1658
1659 /* Wait until queue initialization is complete */
1660 for (i = 0; i < BGE_TIMEOUT; i++) {
1661 DELAY(10);
1662 if (CSR_READ_4(sc, BGE_FTQ_RESET) == 0)
1663 break;
1664 }
1665
1666 if (i == BGE_TIMEOUT) {
1667 device_printf(sc->bge_dev, "flow-through queue init failed\n");
1668 return (ENXIO);
1669 }
1670
1671 /*
1672 * Summary of rings supported by the controller:
1673 *
1674 * Standard Receive Producer Ring
1675 * - This ring is used to feed receive buffers for "standard"
1676 * sized frames (typically 1536 bytes) to the controller.
1677 *
1678 * Jumbo Receive Producer Ring
1679 * - This ring is used to feed receive buffers for jumbo sized
1680 * frames (i.e. anything bigger than the "standard" frames)
1681 * to the controller.
1682 *
1683 * Mini Receive Producer Ring
1684 * - This ring is used to feed receive buffers for "mini"
1685 * sized frames to the controller.
1686 * - This feature required external memory for the controller
1687 * but was never used in a production system. Should always
1688 * be disabled.
1689 *
1690 * Receive Return Ring
1691 * - After the controller has placed an incoming frame into a
1692 * receive buffer that buffer is moved into a receive return
1693 * ring. The driver is then responsible to passing the
1694 * buffer up to the stack. Many versions of the controller
1695 * support multiple RR rings.
1696 *
1697 * Send Ring
1698 * - This ring is used for outgoing frames. Many versions of
1699 * the controller support multiple send rings.
1700 */
1701
1702 /* Initialize the standard receive producer ring control block. */
1703 rcb = &sc->bge_ldata.bge_info.bge_std_rx_rcb;
1704 rcb->bge_hostaddr.bge_addr_lo =
1705 BGE_ADDR_LO(sc->bge_ldata.bge_rx_std_ring_paddr);
1706 rcb->bge_hostaddr.bge_addr_hi =
1707 BGE_ADDR_HI(sc->bge_ldata.bge_rx_std_ring_paddr);
1708 bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag,
1709 sc->bge_cdata.bge_rx_std_ring_map, BUS_DMASYNC_PREREAD);
1710 if (BGE_IS_5717_PLUS(sc)) {
1711 /*
1712 * Bits 31-16: Programmable ring size (2048, 1024, 512, .., 32)
1713 * Bits 15-2 : Maximum RX frame size
1714 * Bit 1 : 1 = Ring Disabled, 0 = Ring ENabled
1715 * Bit 0 : Reserved
1716 */
1717 rcb->bge_maxlen_flags =
1718 BGE_RCB_MAXLEN_FLAGS(512, BGE_MAX_FRAMELEN << 2);
1719 } else if (BGE_IS_5705_PLUS(sc)) {
1720 /*
1721 * Bits 31-16: Programmable ring size (512, 256, 128, 64, 32)
1722 * Bits 15-2 : Reserved (should be 0)
1723 * Bit 1 : 1 = Ring Disabled, 0 = Ring Enabled
1724 * Bit 0 : Reserved
1725 */
1726 rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(512, 0);
1727 } else {
1728 /*
1729 * Ring size is always XXX entries
1730 * Bits 31-16: Maximum RX frame size
1731 * Bits 15-2 : Reserved (should be 0)
1732 * Bit 1 : 1 = Ring Disabled, 0 = Ring Enabled
1733 * Bit 0 : Reserved
1734 */
1735 rcb->bge_maxlen_flags =
1736 BGE_RCB_MAXLEN_FLAGS(BGE_MAX_FRAMELEN, 0);
1737 }
1738 if (sc->bge_asicrev == BGE_ASICREV_BCM5717 ||
1739 sc->bge_asicrev == BGE_ASICREV_BCM5719 ||
1740 sc->bge_asicrev == BGE_ASICREV_BCM5720)
1741 rcb->bge_nicaddr = BGE_STD_RX_RINGS_5717;
1742 else
1743 rcb->bge_nicaddr = BGE_STD_RX_RINGS;
1744 /* Write the standard receive producer ring control block. */
1745 CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_HI, rcb->bge_hostaddr.bge_addr_hi);
1746 CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_LO, rcb->bge_hostaddr.bge_addr_lo);
1747 CSR_WRITE_4(sc, BGE_RX_STD_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags);
1748 CSR_WRITE_4(sc, BGE_RX_STD_RCB_NICADDR, rcb->bge_nicaddr);
1749
1750 /* Reset the standard receive producer ring producer index. */
1751 bge_writembx(sc, BGE_MBX_RX_STD_PROD_LO, 0);
1752
1753 /*
1754 * Initialize the jumbo RX producer ring control
1755 * block. We set the 'ring disabled' bit in the
1756 * flags field until we're actually ready to start
1757 * using this ring (i.e. once we set the MTU
1758 * high enough to require it).
1759 */
1760 if (BGE_IS_JUMBO_CAPABLE(sc)) {
1761 rcb = &sc->bge_ldata.bge_info.bge_jumbo_rx_rcb;
1762 /* Get the jumbo receive producer ring RCB parameters. */
1763 rcb->bge_hostaddr.bge_addr_lo =
1764 BGE_ADDR_LO(sc->bge_ldata.bge_rx_jumbo_ring_paddr);
1765 rcb->bge_hostaddr.bge_addr_hi =
1766 BGE_ADDR_HI(sc->bge_ldata.bge_rx_jumbo_ring_paddr);
1767 bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag,
1768 sc->bge_cdata.bge_rx_jumbo_ring_map,
1769 BUS_DMASYNC_PREREAD);
1770 rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(0,
1771 BGE_RCB_FLAG_USE_EXT_RX_BD | BGE_RCB_FLAG_RING_DISABLED);
1772 if (sc->bge_asicrev == BGE_ASICREV_BCM5717 ||
1773 sc->bge_asicrev == BGE_ASICREV_BCM5719 ||
1774 sc->bge_asicrev == BGE_ASICREV_BCM5720)
1775 rcb->bge_nicaddr = BGE_JUMBO_RX_RINGS_5717;
1776 else
1777 rcb->bge_nicaddr = BGE_JUMBO_RX_RINGS;
1778 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_HI,
1779 rcb->bge_hostaddr.bge_addr_hi);
1780 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_LO,
1781 rcb->bge_hostaddr.bge_addr_lo);
1782 /* Program the jumbo receive producer ring RCB parameters. */
1783 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS,
1784 rcb->bge_maxlen_flags);
1785 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_NICADDR, rcb->bge_nicaddr);
1786 /* Reset the jumbo receive producer ring producer index. */
1787 bge_writembx(sc, BGE_MBX_RX_JUMBO_PROD_LO, 0);
1788 }
1789
1790 /* Disable the mini receive producer ring RCB. */
1791 if (BGE_IS_5700_FAMILY(sc)) {
1792 rcb = &sc->bge_ldata.bge_info.bge_mini_rx_rcb;
1793 rcb->bge_maxlen_flags =
1794 BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED);
1795 CSR_WRITE_4(sc, BGE_RX_MINI_RCB_MAXLEN_FLAGS,
1796 rcb->bge_maxlen_flags);
1797 /* Reset the mini receive producer ring producer index. */
1798 bge_writembx(sc, BGE_MBX_RX_MINI_PROD_LO, 0);
1799 }
1800
1801 /* Choose de-pipeline mode for BCM5906 A0, A1 and A2. */
1802 if (sc->bge_asicrev == BGE_ASICREV_BCM5906) {
1803 if (sc->bge_chipid == BGE_CHIPID_BCM5906_A0 ||
1804 sc->bge_chipid == BGE_CHIPID_BCM5906_A1 ||
1805 sc->bge_chipid == BGE_CHIPID_BCM5906_A2)
1806 CSR_WRITE_4(sc, BGE_ISO_PKT_TX,
1807 (CSR_READ_4(sc, BGE_ISO_PKT_TX) & ~3) | 2);
1808 }
1809 /*
1810 * The BD ring replenish thresholds control how often the
1811 * hardware fetches new BD's from the producer rings in host
1812 * memory. Setting the value too low on a busy system can
1813 * starve the hardware and recue the throughpout.
1814 *
1815 * Set the BD ring replentish thresholds. The recommended
1816 * values are 1/8th the number of descriptors allocated to
1817 * each ring.
1818 * XXX The 5754 requires a lower threshold, so it might be a
1819 * requirement of all 575x family chips. The Linux driver sets
1820 * the lower threshold for all 5705 family chips as well, but there
1821 * are reports that it might not need to be so strict.
1822 *
1823 * XXX Linux does some extra fiddling here for the 5906 parts as
1824 * well.
1825 */
1826 if (BGE_IS_5705_PLUS(sc))
1827 val = 8;
1828 else
1829 val = BGE_STD_RX_RING_CNT / 8;
1830 CSR_WRITE_4(sc, BGE_RBDI_STD_REPL_THRESH, val);
1831 if (BGE_IS_JUMBO_CAPABLE(sc))
1832 CSR_WRITE_4(sc, BGE_RBDI_JUMBO_REPL_THRESH,
1833 BGE_JUMBO_RX_RING_CNT/8);
1834 if (BGE_IS_5717_PLUS(sc)) {
1835 CSR_WRITE_4(sc, BGE_STD_REPLENISH_LWM, 32);
1836 CSR_WRITE_4(sc, BGE_JMB_REPLENISH_LWM, 16);
1837 }
1838
1839 /*
1840 * Disable all send rings by setting the 'ring disabled' bit
1841 * in the flags field of all the TX send ring control blocks,
1842 * located in NIC memory.
1843 */
1844 if (!BGE_IS_5705_PLUS(sc))
1845 /* 5700 to 5704 had 16 send rings. */
1846 limit = BGE_TX_RINGS_EXTSSRAM_MAX;
1847 else
1848 limit = 1;
1849 vrcb = BGE_MEMWIN_START + BGE_SEND_RING_RCB;
1850 for (i = 0; i < limit; i++) {
1851 RCB_WRITE_4(sc, vrcb, bge_maxlen_flags,
1852 BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED));
1853 RCB_WRITE_4(sc, vrcb, bge_nicaddr, 0);
1854 vrcb += sizeof(struct bge_rcb);
1855 }
1856
1857 /* Configure send ring RCB 0 (we use only the first ring) */
1858 vrcb = BGE_MEMWIN_START + BGE_SEND_RING_RCB;
1859 BGE_HOSTADDR(taddr, sc->bge_ldata.bge_tx_ring_paddr);
1860 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, taddr.bge_addr_hi);
1861 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, taddr.bge_addr_lo);
1862 if (sc->bge_asicrev == BGE_ASICREV_BCM5717 ||
1863 sc->bge_asicrev == BGE_ASICREV_BCM5719 ||
1864 sc->bge_asicrev == BGE_ASICREV_BCM5720)
1865 RCB_WRITE_4(sc, vrcb, bge_nicaddr, BGE_SEND_RING_5717);
1866 else
1867 RCB_WRITE_4(sc, vrcb, bge_nicaddr,
1868 BGE_NIC_TXRING_ADDR(0, BGE_TX_RING_CNT));
1869 RCB_WRITE_4(sc, vrcb, bge_maxlen_flags,
1870 BGE_RCB_MAXLEN_FLAGS(BGE_TX_RING_CNT, 0));
1871
1872 /*
1873 * Disable all receive return rings by setting the
1874 * 'ring diabled' bit in the flags field of all the receive
1875 * return ring control blocks, located in NIC memory.
1876 */
1877 if (sc->bge_asicrev == BGE_ASICREV_BCM5717 ||
1878 sc->bge_asicrev == BGE_ASICREV_BCM5719 ||
1879 sc->bge_asicrev == BGE_ASICREV_BCM5720) {
1880 /* Should be 17, use 16 until we get an SRAM map. */
1881 limit = 16;
1882 } else if (!BGE_IS_5705_PLUS(sc))
1883 limit = BGE_RX_RINGS_MAX;
1884 else if (sc->bge_asicrev == BGE_ASICREV_BCM5755 ||
1885 sc->bge_asicrev == BGE_ASICREV_BCM57765)
1886 limit = 4;
1887 else
1888 limit = 1;
1889 /* Disable all receive return rings. */
1890 vrcb = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB;
1891 for (i = 0; i < limit; i++) {
1892 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, 0);
1893 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, 0);
1894 RCB_WRITE_4(sc, vrcb, bge_maxlen_flags,
1895 BGE_RCB_FLAG_RING_DISABLED);
1896 RCB_WRITE_4(sc, vrcb, bge_nicaddr, 0);
1897 bge_writembx(sc, BGE_MBX_RX_CONS0_LO +
1898 (i * (sizeof(uint64_t))), 0);
1899 vrcb += sizeof(struct bge_rcb);
1900 }
1901
1902 /*
1903 * Set up receive return ring 0. Note that the NIC address
1904 * for RX return rings is 0x0. The return rings live entirely
1905 * within the host, so the nicaddr field in the RCB isn't used.
1906 */
1907 vrcb = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB;
1908 BGE_HOSTADDR(taddr, sc->bge_ldata.bge_rx_return_ring_paddr);
1909 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, taddr.bge_addr_hi);
1910 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, taddr.bge_addr_lo);
1911 RCB_WRITE_4(sc, vrcb, bge_nicaddr, 0);
1912 RCB_WRITE_4(sc, vrcb, bge_maxlen_flags,
1913 BGE_RCB_MAXLEN_FLAGS(sc->bge_return_ring_cnt, 0));
1914
1915 /* Set random backoff seed for TX */
1916 CSR_WRITE_4(sc, BGE_TX_RANDOM_BACKOFF,
1917 IF_LLADDR(sc->bge_ifp)[0] + IF_LLADDR(sc->bge_ifp)[1] +
1918 IF_LLADDR(sc->bge_ifp)[2] + IF_LLADDR(sc->bge_ifp)[3] +
1919 IF_LLADDR(sc->bge_ifp)[4] + IF_LLADDR(sc->bge_ifp)[5] +
1920 BGE_TX_BACKOFF_SEED_MASK);
1921
1922 /* Set inter-packet gap */
1923 val = 0x2620;
1924 if (sc->bge_asicrev == BGE_ASICREV_BCM5720)
1925 val |= CSR_READ_4(sc, BGE_TX_LENGTHS) &
1926 (BGE_TXLEN_JMB_FRM_LEN_MSK | BGE_TXLEN_CNT_DN_VAL_MSK);
1927 CSR_WRITE_4(sc, BGE_TX_LENGTHS, val);
1928
1929 /*
1930 * Specify which ring to use for packets that don't match
1931 * any RX rules.
1932 */
1933 CSR_WRITE_4(sc, BGE_RX_RULES_CFG, 0x08);
1934
1935 /*
1936 * Configure number of RX lists. One interrupt distribution
1937 * list, sixteen active lists, one bad frames class.
1938 */
1939 CSR_WRITE_4(sc, BGE_RXLP_CFG, 0x181);
1940
1941 /* Inialize RX list placement stats mask. */
1942 CSR_WRITE_4(sc, BGE_RXLP_STATS_ENABLE_MASK, 0x007FFFFF);
1943 CSR_WRITE_4(sc, BGE_RXLP_STATS_CTL, 0x1);
1944
1945 /* Disable host coalescing until we get it set up */
1946 CSR_WRITE_4(sc, BGE_HCC_MODE, 0x00000000);
1947
1948 /* Poll to make sure it's shut down. */
1949 for (i = 0; i < BGE_TIMEOUT; i++) {
1950 DELAY(10);
1951 if (!(CSR_READ_4(sc, BGE_HCC_MODE) & BGE_HCCMODE_ENABLE))
1952 break;
1953 }
1954
1955 if (i == BGE_TIMEOUT) {
1956 device_printf(sc->bge_dev,
1957 "host coalescing engine failed to idle\n");
1958 return (ENXIO);
1959 }
1960
1961 /* Set up host coalescing defaults */
1962 CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS, sc->bge_rx_coal_ticks);
1963 CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS, sc->bge_tx_coal_ticks);
1964 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS, sc->bge_rx_max_coal_bds);
1965 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS, sc->bge_tx_max_coal_bds);
1966 if (!(BGE_IS_5705_PLUS(sc))) {
1967 CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS_INT, 0);
1968 CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS_INT, 0);
1969 }
1970 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS_INT, 1);
1971 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS_INT, 1);
1972
1973 /* Set up address of statistics block */
1974 if (!(BGE_IS_5705_PLUS(sc))) {
1975 CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_HI,
1976 BGE_ADDR_HI(sc->bge_ldata.bge_stats_paddr));
1977 CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_LO,
1978 BGE_ADDR_LO(sc->bge_ldata.bge_stats_paddr));
1979 CSR_WRITE_4(sc, BGE_HCC_STATS_BASEADDR, BGE_STATS_BLOCK);
1980 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_BASEADDR, BGE_STATUS_BLOCK);
1981 CSR_WRITE_4(sc, BGE_HCC_STATS_TICKS, sc->bge_stat_ticks);
1982 }
1983
1984 /* Set up address of status block */
1985 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_HI,
1986 BGE_ADDR_HI(sc->bge_ldata.bge_status_block_paddr));
1987 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_LO,
1988 BGE_ADDR_LO(sc->bge_ldata.bge_status_block_paddr));
1989
1990 /* Set up status block size. */
1991 if (sc->bge_asicrev == BGE_ASICREV_BCM5700 &&
1992 sc->bge_chipid != BGE_CHIPID_BCM5700_C0) {
1993 val = BGE_STATBLKSZ_FULL;
1994 bzero(sc->bge_ldata.bge_status_block, BGE_STATUS_BLK_SZ);
1995 } else {
1996 val = BGE_STATBLKSZ_32BYTE;
1997 bzero(sc->bge_ldata.bge_status_block, 32);
1998 }
1999 bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
2000 sc->bge_cdata.bge_status_map,
2001 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2002
2003 /* Turn on host coalescing state machine */
2004 CSR_WRITE_4(sc, BGE_HCC_MODE, val | BGE_HCCMODE_ENABLE);
2005
2006 /* Turn on RX BD completion state machine and enable attentions */
2007 CSR_WRITE_4(sc, BGE_RBDC_MODE,
2008 BGE_RBDCMODE_ENABLE | BGE_RBDCMODE_ATTN);
2009
2010 /* Turn on RX list placement state machine */
2011 CSR_WRITE_4(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE);
2012
2013 /* Turn on RX list selector state machine. */
2014 if (!(BGE_IS_5705_PLUS(sc)))
2015 CSR_WRITE_4(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE);
2016
2017 val = BGE_MACMODE_TXDMA_ENB | BGE_MACMODE_RXDMA_ENB |
2018 BGE_MACMODE_RX_STATS_CLEAR | BGE_MACMODE_TX_STATS_CLEAR |
2019 BGE_MACMODE_RX_STATS_ENB | BGE_MACMODE_TX_STATS_ENB |
2020 BGE_MACMODE_FRMHDR_DMA_ENB;
2021
2022 if (sc->bge_flags & BGE_FLAG_TBI)
2023 val |= BGE_PORTMODE_TBI;
2024 else if (sc->bge_flags & BGE_FLAG_MII_SERDES)
2025 val |= BGE_PORTMODE_GMII;
2026 else
2027 val |= BGE_PORTMODE_MII;
2028
2029 /* Turn on DMA, clear stats */
2030 CSR_WRITE_4(sc, BGE_MAC_MODE, val);
2031
2032 /* Set misc. local control, enable interrupts on attentions */
2033 CSR_WRITE_4(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_ONATTN);
2034
2035#ifdef notdef
2036 /* Assert GPIO pins for PHY reset */
2037 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUT0 |
2038 BGE_MLC_MISCIO_OUT1 | BGE_MLC_MISCIO_OUT2);
2039 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUTEN0 |
2040 BGE_MLC_MISCIO_OUTEN1 | BGE_MLC_MISCIO_OUTEN2);
2041#endif
2042
2043 /* Turn on DMA completion state machine */
2044 if (!(BGE_IS_5705_PLUS(sc)))
2045 CSR_WRITE_4(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE);
2046
2047 val = BGE_WDMAMODE_ENABLE | BGE_WDMAMODE_ALL_ATTNS;
2048
2049 /* Enable host coalescing bug fix. */
2050 if (BGE_IS_5755_PLUS(sc))
2051 val |= BGE_WDMAMODE_STATUS_TAG_FIX;
2052
2053 /* Request larger DMA burst size to get better performance. */
2054 if (sc->bge_asicrev == BGE_ASICREV_BCM5785)
2055 val |= BGE_WDMAMODE_BURST_ALL_DATA;
2056
2057 /* Turn on write DMA state machine */
2058 CSR_WRITE_4(sc, BGE_WDMA_MODE, val);
2059 DELAY(40);
2060
2061 /* Turn on read DMA state machine */
2062 val = BGE_RDMAMODE_ENABLE | BGE_RDMAMODE_ALL_ATTNS;
2063
2064 if (sc->bge_asicrev == BGE_ASICREV_BCM5717)
2065 val |= BGE_RDMAMODE_MULT_DMA_RD_DIS;
2066
2067 if (sc->bge_asicrev == BGE_ASICREV_BCM5784 ||
2068 sc->bge_asicrev == BGE_ASICREV_BCM5785 ||
2069 sc->bge_asicrev == BGE_ASICREV_BCM57780)
2070 val |= BGE_RDMAMODE_BD_SBD_CRPT_ATTN |
2071 BGE_RDMAMODE_MBUF_RBD_CRPT_ATTN |
2072 BGE_RDMAMODE_MBUF_SBD_CRPT_ATTN;
2073 if (sc->bge_flags & BGE_FLAG_PCIE)
2074 val |= BGE_RDMAMODE_FIFO_LONG_BURST;
2075 if (sc->bge_flags & (BGE_FLAG_TSO | BGE_FLAG_TSO3)) {
2076 val |= BGE_RDMAMODE_TSO4_ENABLE;
2077 if (sc->bge_flags & BGE_FLAG_TSO3 ||
2078 sc->bge_asicrev == BGE_ASICREV_BCM5785 ||
2079 sc->bge_asicrev == BGE_ASICREV_BCM57780)
2080 val |= BGE_RDMAMODE_TSO6_ENABLE;
2081 }
2082
2083 if (sc->bge_asicrev == BGE_ASICREV_BCM5720) {
2084 val |= CSR_READ_4(sc, BGE_RDMA_MODE) &
2085 BGE_RDMAMODE_H2BNC_VLAN_DET;
2086 /*
2087 * Allow multiple outstanding read requests from
2088 * non-LSO read DMA engine.
2089 */
2090 val &= ~BGE_RDMAMODE_MULT_DMA_RD_DIS;
2091 }
2092
2093 if (sc->bge_asicrev == BGE_ASICREV_BCM5761 ||
2094 sc->bge_asicrev == BGE_ASICREV_BCM5784 ||
2095 sc->bge_asicrev == BGE_ASICREV_BCM5785 ||
2096 sc->bge_asicrev == BGE_ASICREV_BCM57780 ||
2097 BGE_IS_5717_PLUS(sc)) {
2098 dmactl = CSR_READ_4(sc, BGE_RDMA_RSRVCTRL);
2099 /*
2100 * Adjust tx margin to prevent TX data corruption and
2101 * fix internal FIFO overflow.
2102 */
2103 if (sc->bge_asicrev == BGE_ASICREV_BCM5719 ||
2104 sc->bge_asicrev == BGE_ASICREV_BCM5720) {
2105 dmactl &= ~(BGE_RDMA_RSRVCTRL_FIFO_LWM_MASK |
2106 BGE_RDMA_RSRVCTRL_FIFO_HWM_MASK |
2107 BGE_RDMA_RSRVCTRL_TXMRGN_MASK);
2108 dmactl |= BGE_RDMA_RSRVCTRL_FIFO_LWM_1_5K |
2109 BGE_RDMA_RSRVCTRL_FIFO_HWM_1_5K |
2110 BGE_RDMA_RSRVCTRL_TXMRGN_320B;
2111 }
2112 /*
2113 * Enable fix for read DMA FIFO overruns.
2114 * The fix is to limit the number of RX BDs
2115 * the hardware would fetch at a fime.
2116 */
2117 CSR_WRITE_4(sc, BGE_RDMA_RSRVCTRL, dmactl |
2118 BGE_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
2119 }
2120
2121 if (sc->bge_asicrev == BGE_ASICREV_BCM5719) {
2122 CSR_WRITE_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL,
2123 CSR_READ_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL) |
2124 BGE_RDMA_LSO_CRPTEN_CTRL_BLEN_BD_4K |
2125 BGE_RDMA_LSO_CRPTEN_CTRL_BLEN_LSO_4K);
2126 } else if (sc->bge_asicrev == BGE_ASICREV_BCM5720) {
2127 /*
2128 * Allow 4KB burst length reads for non-LSO frames.
2129 * Enable 512B burst length reads for buffer descriptors.
2130 */
2131 CSR_WRITE_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL,
2132 CSR_READ_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL) |
2133 BGE_RDMA_LSO_CRPTEN_CTRL_BLEN_BD_512 |
2134 BGE_RDMA_LSO_CRPTEN_CTRL_BLEN_LSO_4K);
2135 }
2136
2137 CSR_WRITE_4(sc, BGE_RDMA_MODE, val);
2138 DELAY(40);
2139
2140 /* Turn on RX data completion state machine */
2141 CSR_WRITE_4(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE);
2142
2143 /* Turn on RX BD initiator state machine */
2144 CSR_WRITE_4(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE);
2145
2146 /* Turn on RX data and RX BD initiator state machine */
2147 CSR_WRITE_4(sc, BGE_RDBDI_MODE, BGE_RDBDIMODE_ENABLE);
2148
2149 /* Turn on Mbuf cluster free state machine */
2150 if (!(BGE_IS_5705_PLUS(sc)))
2151 CSR_WRITE_4(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE);
2152
2153 /* Turn on send BD completion state machine */
2154 CSR_WRITE_4(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE);
2155
2156 /* Turn on send data completion state machine */
2157 val = BGE_SDCMODE_ENABLE;
2158 if (sc->bge_asicrev == BGE_ASICREV_BCM5761)
2159 val |= BGE_SDCMODE_CDELAY;
2160 CSR_WRITE_4(sc, BGE_SDC_MODE, val);
2161
2162 /* Turn on send data initiator state machine */
2163 if (sc->bge_flags & (BGE_FLAG_TSO | BGE_FLAG_TSO3))
2164 CSR_WRITE_4(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE |
2165 BGE_SDIMODE_HW_LSO_PRE_DMA);
2166 else
2167 CSR_WRITE_4(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE);
2168
2169 /* Turn on send BD initiator state machine */
2170 CSR_WRITE_4(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE);
2171
2172 /* Turn on send BD selector state machine */
2173 CSR_WRITE_4(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE);
2174
2175 CSR_WRITE_4(sc, BGE_SDI_STATS_ENABLE_MASK, 0x007FFFFF);
2176 CSR_WRITE_4(sc, BGE_SDI_STATS_CTL,
2177 BGE_SDISTATSCTL_ENABLE | BGE_SDISTATSCTL_FASTER);
2178
2179 /* ack/clear link change events */
2180 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED |
2181 BGE_MACSTAT_CFG_CHANGED | BGE_MACSTAT_MI_COMPLETE |
2182 BGE_MACSTAT_LINK_CHANGED);
2183 CSR_WRITE_4(sc, BGE_MI_STS, 0);
2184
2185 /*
2186 * Enable attention when the link has changed state for
2187 * devices that use auto polling.
2188 */
2189 if (sc->bge_flags & BGE_FLAG_TBI) {
2190 CSR_WRITE_4(sc, BGE_MI_STS, BGE_MISTS_LINK);
2191 } else {
2192 if (sc->bge_mi_mode & BGE_MIMODE_AUTOPOLL) {
2193 CSR_WRITE_4(sc, BGE_MI_MODE, sc->bge_mi_mode);
2194 DELAY(80);
2195 }
2196 if (sc->bge_asicrev == BGE_ASICREV_BCM5700 &&
2197 sc->bge_chipid != BGE_CHIPID_BCM5700_B2)
2198 CSR_WRITE_4(sc, BGE_MAC_EVT_ENB,
2199 BGE_EVTENB_MI_INTERRUPT);
2200 }
2201
2202 /*
2203 * Clear any pending link state attention.
2204 * Otherwise some link state change events may be lost until attention
2205 * is cleared by bge_intr() -> bge_link_upd() sequence.
2206 * It's not necessary on newer BCM chips - perhaps enabling link
2207 * state change attentions implies clearing pending attention.
2208 */
2209 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED |
2210 BGE_MACSTAT_CFG_CHANGED | BGE_MACSTAT_MI_COMPLETE |
2211 BGE_MACSTAT_LINK_CHANGED);
2212
2213 /* Enable link state change attentions. */
2214 BGE_SETBIT(sc, BGE_MAC_EVT_ENB, BGE_EVTENB_LINK_CHANGED);
2215
2216 return (0);
2217}
2218
2219const struct bge_revision *
2220bge_lookup_rev(uint32_t chipid)
2221{
2222 const struct bge_revision *br;
2223
2224 for (br = bge_revisions; br->br_name != NULL; br++) {
2225 if (br->br_chipid == chipid)
2226 return (br);
2227 }
2228
2229 for (br = bge_majorrevs; br->br_name != NULL; br++) {
2230 if (br->br_chipid == BGE_ASICREV(chipid))
2231 return (br);
2232 }
2233
2234 return (NULL);
2235}
2236
2237const struct bge_vendor *
2238bge_lookup_vendor(uint16_t vid)
2239{
2240 const struct bge_vendor *v;
2241
2242 for (v = bge_vendors; v->v_name != NULL; v++)
2243 if (v->v_id == vid)
2244 return (v);
2245
2246 panic("%s: unknown vendor %d", __func__, vid);
2247 return (NULL);
2248}
2249
2250/*
2251 * Probe for a Broadcom chip. Check the PCI vendor and device IDs
2252 * against our list and return its name if we find a match.
2253 *
2254 * Note that since the Broadcom controller contains VPD support, we
2255 * try to get the device name string from the controller itself instead
2256 * of the compiled-in string. It guarantees we'll always announce the
2257 * right product name. We fall back to the compiled-in string when
2258 * VPD is unavailable or corrupt.
2259 */
2260static int
2261bge_probe(device_t dev)
2262{
2263 char buf[96];
2264 char model[64];
2265 const struct bge_revision *br;
2266 const char *pname;
2267 struct bge_softc *sc = device_get_softc(dev);
2268 const struct bge_type *t = bge_devs;
2269 const struct bge_vendor *v;
2270 uint32_t id;
2271 uint16_t did, vid;
2272
2273 sc->bge_dev = dev;
2274 vid = pci_get_vendor(dev);
2275 did = pci_get_device(dev);
2276 while(t->bge_vid != 0) {
2277 if ((vid == t->bge_vid) && (did == t->bge_did)) {
2278 id = pci_read_config(dev, BGE_PCI_MISC_CTL, 4) >>
2279 BGE_PCIMISCCTL_ASICREV_SHIFT;
2280 if (BGE_ASICREV(id) == BGE_ASICREV_USE_PRODID_REG) {
2281 /*
2282 * Find the ASCI revision. Different chips
2283 * use different registers.
2284 */
2285 switch (pci_get_device(dev)) {
2286 case BCOM_DEVICEID_BCM5717:
2287 case BCOM_DEVICEID_BCM5718:
2288 case BCOM_DEVICEID_BCM5719:
2289 case BCOM_DEVICEID_BCM5720:
2290 id = pci_read_config(dev,
2291 BGE_PCI_GEN2_PRODID_ASICREV, 4);
2292 break;
2293 case BCOM_DEVICEID_BCM57761:
2294 case BCOM_DEVICEID_BCM57765:
2295 case BCOM_DEVICEID_BCM57781:
2296 case BCOM_DEVICEID_BCM57785:
2297 case BCOM_DEVICEID_BCM57791:
2298 case BCOM_DEVICEID_BCM57795:
2299 id = pci_read_config(dev,
2300 BGE_PCI_GEN15_PRODID_ASICREV, 4);
2301 break;
2302 default:
2303 id = pci_read_config(dev,
2304 BGE_PCI_PRODID_ASICREV, 4);
2305 }
2306 }
2307 br = bge_lookup_rev(id);
2308 v = bge_lookup_vendor(vid);
2309 if (bge_has_eaddr(sc) &&
2310 pci_get_vpd_ident(dev, &pname) == 0)
2311 snprintf(model, 64, "%s", pname);
2312 else
2313 snprintf(model, 64, "%s %s", v->v_name,
2314 br != NULL ? br->br_name :
2315 "NetXtreme Ethernet Controller");
2316 snprintf(buf, 96, "%s, %sASIC rev. %#08x", model,
2317 br != NULL ? "" : "unknown ", id);
2318 device_set_desc_copy(dev, buf);
2319 return (0);
2320 }
2321 t++;
2322 }
2323
2324 return (ENXIO);
2325}
2326
2327static void
2328bge_dma_free(struct bge_softc *sc)
2329{
2330 int i;
2331
2332 /* Destroy DMA maps for RX buffers. */
2333 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
2334 if (sc->bge_cdata.bge_rx_std_dmamap[i])
2335 bus_dmamap_destroy(sc->bge_cdata.bge_rx_mtag,
2336 sc->bge_cdata.bge_rx_std_dmamap[i]);
2337 }
2338 if (sc->bge_cdata.bge_rx_std_sparemap)
2339 bus_dmamap_destroy(sc->bge_cdata.bge_rx_mtag,
2340 sc->bge_cdata.bge_rx_std_sparemap);
2341
2342 /* Destroy DMA maps for jumbo RX buffers. */
2343 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
2344 if (sc->bge_cdata.bge_rx_jumbo_dmamap[i])
2345 bus_dmamap_destroy(sc->bge_cdata.bge_mtag_jumbo,
2346 sc->bge_cdata.bge_rx_jumbo_dmamap[i]);
2347 }
2348 if (sc->bge_cdata.bge_rx_jumbo_sparemap)
2349 bus_dmamap_destroy(sc->bge_cdata.bge_mtag_jumbo,
2350 sc->bge_cdata.bge_rx_jumbo_sparemap);
2351
2352 /* Destroy DMA maps for TX buffers. */
2353 for (i = 0; i < BGE_TX_RING_CNT; i++) {
2354 if (sc->bge_cdata.bge_tx_dmamap[i])
2355 bus_dmamap_destroy(sc->bge_cdata.bge_tx_mtag,
2356 sc->bge_cdata.bge_tx_dmamap[i]);
2357 }
2358
2359 if (sc->bge_cdata.bge_rx_mtag)
2360 bus_dma_tag_destroy(sc->bge_cdata.bge_rx_mtag);
2361 if (sc->bge_cdata.bge_mtag_jumbo)
2362 bus_dma_tag_destroy(sc->bge_cdata.bge_mtag_jumbo);
2363 if (sc->bge_cdata.bge_tx_mtag)
2364 bus_dma_tag_destroy(sc->bge_cdata.bge_tx_mtag);
2365
2366
2367 /* Destroy standard RX ring. */
2368 if (sc->bge_cdata.bge_rx_std_ring_map)
2369 bus_dmamap_unload(sc->bge_cdata.bge_rx_std_ring_tag,
2370 sc->bge_cdata.bge_rx_std_ring_map);
2371 if (sc->bge_cdata.bge_rx_std_ring_map && sc->bge_ldata.bge_rx_std_ring)
2372 bus_dmamem_free(sc->bge_cdata.bge_rx_std_ring_tag,
2373 sc->bge_ldata.bge_rx_std_ring,
2374 sc->bge_cdata.bge_rx_std_ring_map);
2375
2376 if (sc->bge_cdata.bge_rx_std_ring_tag)
2377 bus_dma_tag_destroy(sc->bge_cdata.bge_rx_std_ring_tag);
2378
2379 /* Destroy jumbo RX ring. */
2380 if (sc->bge_cdata.bge_rx_jumbo_ring_map)
2381 bus_dmamap_unload(sc->bge_cdata.bge_rx_jumbo_ring_tag,
2382 sc->bge_cdata.bge_rx_jumbo_ring_map);
2383
2384 if (sc->bge_cdata.bge_rx_jumbo_ring_map &&
2385 sc->bge_ldata.bge_rx_jumbo_ring)
2386 bus_dmamem_free(sc->bge_cdata.bge_rx_jumbo_ring_tag,
2387 sc->bge_ldata.bge_rx_jumbo_ring,
2388 sc->bge_cdata.bge_rx_jumbo_ring_map);
2389
2390 if (sc->bge_cdata.bge_rx_jumbo_ring_tag)
2391 bus_dma_tag_destroy(sc->bge_cdata.bge_rx_jumbo_ring_tag);
2392
2393 /* Destroy RX return ring. */
2394 if (sc->bge_cdata.bge_rx_return_ring_map)
2395 bus_dmamap_unload(sc->bge_cdata.bge_rx_return_ring_tag,
2396 sc->bge_cdata.bge_rx_return_ring_map);
2397
2398 if (sc->bge_cdata.bge_rx_return_ring_map &&
2399 sc->bge_ldata.bge_rx_return_ring)
2400 bus_dmamem_free(sc->bge_cdata.bge_rx_return_ring_tag,
2401 sc->bge_ldata.bge_rx_return_ring,
2402 sc->bge_cdata.bge_rx_return_ring_map);
2403
2404 if (sc->bge_cdata.bge_rx_return_ring_tag)
2405 bus_dma_tag_destroy(sc->bge_cdata.bge_rx_return_ring_tag);
2406
2407 /* Destroy TX ring. */
2408 if (sc->bge_cdata.bge_tx_ring_map)
2409 bus_dmamap_unload(sc->bge_cdata.bge_tx_ring_tag,
2410 sc->bge_cdata.bge_tx_ring_map);
2411
2412 if (sc->bge_cdata.bge_tx_ring_map && sc->bge_ldata.bge_tx_ring)
2413 bus_dmamem_free(sc->bge_cdata.bge_tx_ring_tag,
2414 sc->bge_ldata.bge_tx_ring,
2415 sc->bge_cdata.bge_tx_ring_map);
2416
2417 if (sc->bge_cdata.bge_tx_ring_tag)
2418 bus_dma_tag_destroy(sc->bge_cdata.bge_tx_ring_tag);
2419
2420 /* Destroy status block. */
2421 if (sc->bge_cdata.bge_status_map)
2422 bus_dmamap_unload(sc->bge_cdata.bge_status_tag,
2423 sc->bge_cdata.bge_status_map);
2424
2425 if (sc->bge_cdata.bge_status_map && sc->bge_ldata.bge_status_block)
2426 bus_dmamem_free(sc->bge_cdata.bge_status_tag,
2427 sc->bge_ldata.bge_status_block,
2428 sc->bge_cdata.bge_status_map);
2429
2430 if (sc->bge_cdata.bge_status_tag)
2431 bus_dma_tag_destroy(sc->bge_cdata.bge_status_tag);
2432
2433 /* Destroy statistics block. */
2434 if (sc->bge_cdata.bge_stats_map)
2435 bus_dmamap_unload(sc->bge_cdata.bge_stats_tag,
2436 sc->bge_cdata.bge_stats_map);
2437
2438 if (sc->bge_cdata.bge_stats_map && sc->bge_ldata.bge_stats)
2439 bus_dmamem_free(sc->bge_cdata.bge_stats_tag,
2440 sc->bge_ldata.bge_stats,
2441 sc->bge_cdata.bge_stats_map);
2442
2443 if (sc->bge_cdata.bge_stats_tag)
2444 bus_dma_tag_destroy(sc->bge_cdata.bge_stats_tag);
2445
2446 if (sc->bge_cdata.bge_buffer_tag)
2447 bus_dma_tag_destroy(sc->bge_cdata.bge_buffer_tag);
2448
2449 /* Destroy the parent tag. */
2450 if (sc->bge_cdata.bge_parent_tag)
2451 bus_dma_tag_destroy(sc->bge_cdata.bge_parent_tag);
2452}
2453
2454static int
2455bge_dma_ring_alloc(struct bge_softc *sc, bus_size_t alignment,
2456 bus_size_t maxsize, bus_dma_tag_t *tag, uint8_t **ring, bus_dmamap_t *map,
2457 bus_addr_t *paddr, const char *msg)
2458{
2459 struct bge_dmamap_arg ctx;
2460 bus_addr_t lowaddr;
2461 bus_size_t ring_end;
2462 int error;
2463
2464 lowaddr = BUS_SPACE_MAXADDR;
2465again:
2466 error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag,
2467 alignment, 0, lowaddr, BUS_SPACE_MAXADDR, NULL,
2468 NULL, maxsize, 1, maxsize, 0, NULL, NULL, tag);
2469 if (error != 0) {
2470 device_printf(sc->bge_dev,
2471 "could not create %s dma tag\n", msg);
2472 return (ENOMEM);
2473 }
2474 /* Allocate DMA'able memory for ring. */
2475 error = bus_dmamem_alloc(*tag, (void **)ring,
2476 BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT, map);
2477 if (error != 0) {
2478 device_printf(sc->bge_dev,
2479 "could not allocate DMA'able memory for %s\n", msg);
2480 return (ENOMEM);
2481 }
2482 /* Load the address of the ring. */
2483 ctx.bge_busaddr = 0;
2484 error = bus_dmamap_load(*tag, *map, *ring, maxsize, bge_dma_map_addr,
2485 &ctx, BUS_DMA_NOWAIT);
2486 if (error != 0) {
2487 device_printf(sc->bge_dev,
2488 "could not load DMA'able memory for %s\n", msg);
2489 return (ENOMEM);
2490 }
2491 *paddr = ctx.bge_busaddr;
2492 ring_end = *paddr + maxsize;
2493 if ((sc->bge_flags & BGE_FLAG_4G_BNDRY_BUG) != 0 &&
2494 BGE_ADDR_HI(*paddr) != BGE_ADDR_HI(ring_end)) {
2495 /*
2496 * 4GB boundary crossed. Limit maximum allowable DMA
2497 * address space to 32bit and try again.
2498 */
2499 bus_dmamap_unload(*tag, *map);
2500 bus_dmamem_free(*tag, *ring, *map);
2501 bus_dma_tag_destroy(*tag);
2502 if (bootverbose)
2503 device_printf(sc->bge_dev, "4GB boundary crossed, "
2504 "limit DMA address space to 32bit for %s\n", msg);
2505 *ring = NULL;
2506 *tag = NULL;
2507 *map = NULL;
2508 lowaddr = BUS_SPACE_MAXADDR_32BIT;
2509 goto again;
2510 }
2511 return (0);
2512}
2513
2514static int
2515bge_dma_alloc(struct bge_softc *sc)
2516{
2517 bus_addr_t lowaddr;
2518 bus_size_t boundary, sbsz, rxmaxsegsz, txsegsz, txmaxsegsz;
2519 int i, error;
2520
2521 lowaddr = BUS_SPACE_MAXADDR;
2522 if ((sc->bge_flags & BGE_FLAG_40BIT_BUG) != 0)
2523 lowaddr = BGE_DMA_MAXADDR;
2524 /*
2525 * Allocate the parent bus DMA tag appropriate for PCI.
2526 */
2527 error = bus_dma_tag_create(bus_get_dma_tag(sc->bge_dev),
2528 1, 0, lowaddr, BUS_SPACE_MAXADDR, NULL,
2529 NULL, BUS_SPACE_MAXSIZE_32BIT, 0, BUS_SPACE_MAXSIZE_32BIT,
2530 0, NULL, NULL, &sc->bge_cdata.bge_parent_tag);
2531 if (error != 0) {
2532 device_printf(sc->bge_dev,
2533 "could not allocate parent dma tag\n");
2534 return (ENOMEM);
2535 }
2536
2537 /* Create tag for standard RX ring. */
2538 error = bge_dma_ring_alloc(sc, PAGE_SIZE, BGE_STD_RX_RING_SZ,
2539 &sc->bge_cdata.bge_rx_std_ring_tag,
2540 (uint8_t **)&sc->bge_ldata.bge_rx_std_ring,
2541 &sc->bge_cdata.bge_rx_std_ring_map,
2542 &sc->bge_ldata.bge_rx_std_ring_paddr, "RX ring");
2543 if (error)
2544 return (error);
2545
2546 /* Create tag for RX return ring. */
2547 error = bge_dma_ring_alloc(sc, PAGE_SIZE, BGE_RX_RTN_RING_SZ(sc),
2548 &sc->bge_cdata.bge_rx_return_ring_tag,
2549 (uint8_t **)&sc->bge_ldata.bge_rx_return_ring,
2550 &sc->bge_cdata.bge_rx_return_ring_map,
2551 &sc->bge_ldata.bge_rx_return_ring_paddr, "RX return ring");
2552 if (error)
2553 return (error);
2554
2555 /* Create tag for TX ring. */
2556 error = bge_dma_ring_alloc(sc, PAGE_SIZE, BGE_TX_RING_SZ,
2557 &sc->bge_cdata.bge_tx_ring_tag,
2558 (uint8_t **)&sc->bge_ldata.bge_tx_ring,
2559 &sc->bge_cdata.bge_tx_ring_map,
2560 &sc->bge_ldata.bge_tx_ring_paddr, "TX ring");
2561 if (error)
2562 return (error);
2563
2564 /*
2565 * Create tag for status block.
2566 * Because we only use single Tx/Rx/Rx return ring, use
2567 * minimum status block size except BCM5700 AX/BX which
2568 * seems to want to see full status block size regardless
2569 * of configured number of ring.
2570 */
2571 if (sc->bge_asicrev == BGE_ASICREV_BCM5700 &&
2572 sc->bge_chipid != BGE_CHIPID_BCM5700_C0)
2573 sbsz = BGE_STATUS_BLK_SZ;
2574 else
2575 sbsz = 32;
2576 error = bge_dma_ring_alloc(sc, PAGE_SIZE, sbsz,
2577 &sc->bge_cdata.bge_status_tag,
2578 (uint8_t **)&sc->bge_ldata.bge_status_block,
2579 &sc->bge_cdata.bge_status_map,
2580 &sc->bge_ldata.bge_status_block_paddr, "status block");
2581 if (error)
2582 return (error);
2583
2584 /* Create tag for statistics block. */
2585 error = bge_dma_ring_alloc(sc, PAGE_SIZE, BGE_STATS_SZ,
2586 &sc->bge_cdata.bge_stats_tag,
2587 (uint8_t **)&sc->bge_ldata.bge_stats,
2588 &sc->bge_cdata.bge_stats_map,
2589 &sc->bge_ldata.bge_stats_paddr, "statistics block");
2590 if (error)
2591 return (error);
2592
2593 /* Create tag for jumbo RX ring. */
2594 if (BGE_IS_JUMBO_CAPABLE(sc)) {
2595 error = bge_dma_ring_alloc(sc, PAGE_SIZE, BGE_JUMBO_RX_RING_SZ,
2596 &sc->bge_cdata.bge_rx_jumbo_ring_tag,
2597 (uint8_t **)&sc->bge_ldata.bge_rx_jumbo_ring,
2598 &sc->bge_cdata.bge_rx_jumbo_ring_map,
2599 &sc->bge_ldata.bge_rx_jumbo_ring_paddr, "jumbo RX ring");
2600 if (error)
2601 return (error);
2602 }
2603
2604 /* Create parent tag for buffers. */
2605 boundary = 0;
2606 if ((sc->bge_flags & BGE_FLAG_4G_BNDRY_BUG) != 0) {
2607 boundary = BGE_DMA_BNDRY;
2608 /*
2609 * XXX
2610 * watchdog timeout issue was observed on BCM5704 which
2611 * lives behind PCI-X bridge(e.g AMD 8131 PCI-X bridge).
2612 * Limiting DMA address space to 32bits seems to address
2613 * it.
2614 */
2615 if (sc->bge_flags & BGE_FLAG_PCIX)
2616 lowaddr = BUS_SPACE_MAXADDR_32BIT;
2617 }
2618 error = bus_dma_tag_create(bus_get_dma_tag(sc->bge_dev),
2619 1, boundary, lowaddr, BUS_SPACE_MAXADDR, NULL,
2620 NULL, BUS_SPACE_MAXSIZE_32BIT, 0, BUS_SPACE_MAXSIZE_32BIT,
2621 0, NULL, NULL, &sc->bge_cdata.bge_buffer_tag);
2622 if (error != 0) {
2623 device_printf(sc->bge_dev,
2624 "could not allocate buffer dma tag\n");
2625 return (ENOMEM);
2626 }
2627 /* Create tag for Tx mbufs. */
2628 if (sc->bge_flags & (BGE_FLAG_TSO | BGE_FLAG_TSO3)) {
2629 txsegsz = BGE_TSOSEG_SZ;
2630 txmaxsegsz = 65535 + sizeof(struct ether_vlan_header);
2631 } else {
2632 txsegsz = MCLBYTES;
2633 txmaxsegsz = MCLBYTES * BGE_NSEG_NEW;
2634 }
2635 error = bus_dma_tag_create(sc->bge_cdata.bge_buffer_tag, 1,
2636 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
2637 txmaxsegsz, BGE_NSEG_NEW, txsegsz, 0, NULL, NULL,
2638 &sc->bge_cdata.bge_tx_mtag);
2639
2640 if (error) {
2641 device_printf(sc->bge_dev, "could not allocate TX dma tag\n");
2642 return (ENOMEM);
2643 }
2644
2645 /* Create tag for Rx mbufs. */
2646 if (sc->bge_flags & BGE_FLAG_JUMBO_STD)
2647 rxmaxsegsz = MJUM9BYTES;
2648 else
2649 rxmaxsegsz = MCLBYTES;
2650 error = bus_dma_tag_create(sc->bge_cdata.bge_buffer_tag, 1, 0,
2651 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, rxmaxsegsz, 1,
2652 rxmaxsegsz, 0, NULL, NULL, &sc->bge_cdata.bge_rx_mtag);
2653
2654 if (error) {
2655 device_printf(sc->bge_dev, "could not allocate RX dma tag\n");
2656 return (ENOMEM);
2657 }
2658
2659 /* Create DMA maps for RX buffers. */
2660 error = bus_dmamap_create(sc->bge_cdata.bge_rx_mtag, 0,
2661 &sc->bge_cdata.bge_rx_std_sparemap);
2662 if (error) {
2663 device_printf(sc->bge_dev,
2664 "can't create spare DMA map for RX\n");
2665 return (ENOMEM);
2666 }
2667 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
2668 error = bus_dmamap_create(sc->bge_cdata.bge_rx_mtag, 0,
2669 &sc->bge_cdata.bge_rx_std_dmamap[i]);
2670 if (error) {
2671 device_printf(sc->bge_dev,
2672 "can't create DMA map for RX\n");
2673 return (ENOMEM);
2674 }
2675 }
2676
2677 /* Create DMA maps for TX buffers. */
2678 for (i = 0; i < BGE_TX_RING_CNT; i++) {
2679 error = bus_dmamap_create(sc->bge_cdata.bge_tx_mtag, 0,
2680 &sc->bge_cdata.bge_tx_dmamap[i]);
2681 if (error) {
2682 device_printf(sc->bge_dev,
2683 "can't create DMA map for TX\n");
2684 return (ENOMEM);
2685 }
2686 }
2687
2688 /* Create tags for jumbo RX buffers. */
2689 if (BGE_IS_JUMBO_CAPABLE(sc)) {
2690 error = bus_dma_tag_create(sc->bge_cdata.bge_buffer_tag,
2691 1, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
2692 NULL, MJUM9BYTES, BGE_NSEG_JUMBO, PAGE_SIZE,
2693 0, NULL, NULL, &sc->bge_cdata.bge_mtag_jumbo);
2694 if (error) {
2695 device_printf(sc->bge_dev,
2696 "could not allocate jumbo dma tag\n");
2697 return (ENOMEM);
2698 }
2699 /* Create DMA maps for jumbo RX buffers. */
2700 error = bus_dmamap_create(sc->bge_cdata.bge_mtag_jumbo,
2701 0, &sc->bge_cdata.bge_rx_jumbo_sparemap);
2702 if (error) {
2703 device_printf(sc->bge_dev,
2704 "can't create spare DMA map for jumbo RX\n");
2705 return (ENOMEM);
2706 }
2707 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
2708 error = bus_dmamap_create(sc->bge_cdata.bge_mtag_jumbo,
2709 0, &sc->bge_cdata.bge_rx_jumbo_dmamap[i]);
2710 if (error) {
2711 device_printf(sc->bge_dev,
2712 "can't create DMA map for jumbo RX\n");
2713 return (ENOMEM);
2714 }
2715 }
2716 }
2717
2718 return (0);
2719}
2720
2721/*
2722 * Return true if this device has more than one port.
2723 */
2724static int
2725bge_has_multiple_ports(struct bge_softc *sc)
2726{
2727 device_t dev = sc->bge_dev;
2728 u_int b, d, f, fscan, s;
2729
2730 d = pci_get_domain(dev);
2731 b = pci_get_bus(dev);
2732 s = pci_get_slot(dev);
2733 f = pci_get_function(dev);
2734 for (fscan = 0; fscan <= PCI_FUNCMAX; fscan++)
2735 if (fscan != f && pci_find_dbsf(d, b, s, fscan) != NULL)
2736 return (1);
2737 return (0);
2738}
2739
2740/*
2741 * Return true if MSI can be used with this device.
2742 */
2743static int
2744bge_can_use_msi(struct bge_softc *sc)
2745{
2746 int can_use_msi = 0;
2747
36
37/*
38 * Broadcom BCM570x family gigabit ethernet driver for FreeBSD.
39 *
40 * The Broadcom BCM5700 is based on technology originally developed by
41 * Alteon Networks as part of the Tigon I and Tigon II gigabit ethernet
42 * MAC chips. The BCM5700, sometimes referred to as the Tigon III, has
43 * two on-board MIPS R4000 CPUs and can have as much as 16MB of external
44 * SSRAM. The BCM5700 supports TCP, UDP and IP checksum offload, jumbo
45 * frames, highly configurable RX filtering, and 16 RX and TX queues
46 * (which, along with RX filter rules, can be used for QOS applications).
47 * Other features, such as TCP segmentation, may be available as part
48 * of value-added firmware updates. Unlike the Tigon I and Tigon II,
49 * firmware images can be stored in hardware and need not be compiled
50 * into the driver.
51 *
52 * The BCM5700 supports the PCI v2.2 and PCI-X v1.0 standards, and will
53 * function in a 32-bit/64-bit 33/66Mhz bus, or a 64-bit/133Mhz bus.
54 *
55 * The BCM5701 is a single-chip solution incorporating both the BCM5700
56 * MAC and a BCM5401 10/100/1000 PHY. Unlike the BCM5700, the BCM5701
57 * does not support external SSRAM.
58 *
59 * Broadcom also produces a variation of the BCM5700 under the "Altima"
60 * brand name, which is functionally similar but lacks PCI-X support.
61 *
62 * Without external SSRAM, you can only have at most 4 TX rings,
63 * and the use of the mini RX ring is disabled. This seems to imply
64 * that these features are simply not available on the BCM5701. As a
65 * result, this driver does not implement any support for the mini RX
66 * ring.
67 */
68
69#ifdef HAVE_KERNEL_OPTION_HEADERS
70#include "opt_device_polling.h"
71#endif
72
73#include <sys/param.h>
74#include <sys/endian.h>
75#include <sys/systm.h>
76#include <sys/sockio.h>
77#include <sys/mbuf.h>
78#include <sys/malloc.h>
79#include <sys/kernel.h>
80#include <sys/module.h>
81#include <sys/socket.h>
82#include <sys/sysctl.h>
83#include <sys/taskqueue.h>
84
85#include <net/if.h>
86#include <net/if_arp.h>
87#include <net/ethernet.h>
88#include <net/if_dl.h>
89#include <net/if_media.h>
90
91#include <net/bpf.h>
92
93#include <net/if_types.h>
94#include <net/if_vlan_var.h>
95
96#include <netinet/in_systm.h>
97#include <netinet/in.h>
98#include <netinet/ip.h>
99#include <netinet/tcp.h>
100
101#include <machine/bus.h>
102#include <machine/resource.h>
103#include <sys/bus.h>
104#include <sys/rman.h>
105
106#include <dev/mii/mii.h>
107#include <dev/mii/miivar.h>
108#include "miidevs.h"
109#include <dev/mii/brgphyreg.h>
110
111#ifdef __sparc64__
112#include <dev/ofw/ofw_bus.h>
113#include <dev/ofw/openfirm.h>
114#include <machine/ofw_machdep.h>
115#include <machine/ver.h>
116#endif
117
118#include <dev/pci/pcireg.h>
119#include <dev/pci/pcivar.h>
120
121#include <dev/bge/if_bgereg.h>
122
123#define BGE_CSUM_FEATURES (CSUM_IP | CSUM_TCP)
124#define ETHER_MIN_NOPAD (ETHER_MIN_LEN - ETHER_CRC_LEN) /* i.e., 60 */
125
126MODULE_DEPEND(bge, pci, 1, 1, 1);
127MODULE_DEPEND(bge, ether, 1, 1, 1);
128MODULE_DEPEND(bge, miibus, 1, 1, 1);
129
130/* "device miibus" required. See GENERIC if you get errors here. */
131#include "miibus_if.h"
132
133/*
134 * Various supported device vendors/types and their names. Note: the
135 * spec seems to indicate that the hardware still has Alteon's vendor
136 * ID burned into it, though it will always be overriden by the vendor
137 * ID in the EEPROM. Just to be safe, we cover all possibilities.
138 */
139static const struct bge_type {
140 uint16_t bge_vid;
141 uint16_t bge_did;
142} const bge_devs[] = {
143 { ALTEON_VENDORID, ALTEON_DEVICEID_BCM5700 },
144 { ALTEON_VENDORID, ALTEON_DEVICEID_BCM5701 },
145
146 { ALTIMA_VENDORID, ALTIMA_DEVICE_AC1000 },
147 { ALTIMA_VENDORID, ALTIMA_DEVICE_AC1002 },
148 { ALTIMA_VENDORID, ALTIMA_DEVICE_AC9100 },
149
150 { APPLE_VENDORID, APPLE_DEVICE_BCM5701 },
151
152 { BCOM_VENDORID, BCOM_DEVICEID_BCM5700 },
153 { BCOM_VENDORID, BCOM_DEVICEID_BCM5701 },
154 { BCOM_VENDORID, BCOM_DEVICEID_BCM5702 },
155 { BCOM_VENDORID, BCOM_DEVICEID_BCM5702_ALT },
156 { BCOM_VENDORID, BCOM_DEVICEID_BCM5702X },
157 { BCOM_VENDORID, BCOM_DEVICEID_BCM5703 },
158 { BCOM_VENDORID, BCOM_DEVICEID_BCM5703_ALT },
159 { BCOM_VENDORID, BCOM_DEVICEID_BCM5703X },
160 { BCOM_VENDORID, BCOM_DEVICEID_BCM5704C },
161 { BCOM_VENDORID, BCOM_DEVICEID_BCM5704S },
162 { BCOM_VENDORID, BCOM_DEVICEID_BCM5704S_ALT },
163 { BCOM_VENDORID, BCOM_DEVICEID_BCM5705 },
164 { BCOM_VENDORID, BCOM_DEVICEID_BCM5705F },
165 { BCOM_VENDORID, BCOM_DEVICEID_BCM5705K },
166 { BCOM_VENDORID, BCOM_DEVICEID_BCM5705M },
167 { BCOM_VENDORID, BCOM_DEVICEID_BCM5705M_ALT },
168 { BCOM_VENDORID, BCOM_DEVICEID_BCM5714C },
169 { BCOM_VENDORID, BCOM_DEVICEID_BCM5714S },
170 { BCOM_VENDORID, BCOM_DEVICEID_BCM5715 },
171 { BCOM_VENDORID, BCOM_DEVICEID_BCM5715S },
172 { BCOM_VENDORID, BCOM_DEVICEID_BCM5717 },
173 { BCOM_VENDORID, BCOM_DEVICEID_BCM5718 },
174 { BCOM_VENDORID, BCOM_DEVICEID_BCM5719 },
175 { BCOM_VENDORID, BCOM_DEVICEID_BCM5720 },
176 { BCOM_VENDORID, BCOM_DEVICEID_BCM5721 },
177 { BCOM_VENDORID, BCOM_DEVICEID_BCM5722 },
178 { BCOM_VENDORID, BCOM_DEVICEID_BCM5723 },
179 { BCOM_VENDORID, BCOM_DEVICEID_BCM5750 },
180 { BCOM_VENDORID, BCOM_DEVICEID_BCM5750M },
181 { BCOM_VENDORID, BCOM_DEVICEID_BCM5751 },
182 { BCOM_VENDORID, BCOM_DEVICEID_BCM5751F },
183 { BCOM_VENDORID, BCOM_DEVICEID_BCM5751M },
184 { BCOM_VENDORID, BCOM_DEVICEID_BCM5752 },
185 { BCOM_VENDORID, BCOM_DEVICEID_BCM5752M },
186 { BCOM_VENDORID, BCOM_DEVICEID_BCM5753 },
187 { BCOM_VENDORID, BCOM_DEVICEID_BCM5753F },
188 { BCOM_VENDORID, BCOM_DEVICEID_BCM5753M },
189 { BCOM_VENDORID, BCOM_DEVICEID_BCM5754 },
190 { BCOM_VENDORID, BCOM_DEVICEID_BCM5754M },
191 { BCOM_VENDORID, BCOM_DEVICEID_BCM5755 },
192 { BCOM_VENDORID, BCOM_DEVICEID_BCM5755M },
193 { BCOM_VENDORID, BCOM_DEVICEID_BCM5756 },
194 { BCOM_VENDORID, BCOM_DEVICEID_BCM5761 },
195 { BCOM_VENDORID, BCOM_DEVICEID_BCM5761E },
196 { BCOM_VENDORID, BCOM_DEVICEID_BCM5761S },
197 { BCOM_VENDORID, BCOM_DEVICEID_BCM5761SE },
198 { BCOM_VENDORID, BCOM_DEVICEID_BCM5764 },
199 { BCOM_VENDORID, BCOM_DEVICEID_BCM5780 },
200 { BCOM_VENDORID, BCOM_DEVICEID_BCM5780S },
201 { BCOM_VENDORID, BCOM_DEVICEID_BCM5781 },
202 { BCOM_VENDORID, BCOM_DEVICEID_BCM5782 },
203 { BCOM_VENDORID, BCOM_DEVICEID_BCM5784 },
204 { BCOM_VENDORID, BCOM_DEVICEID_BCM5785F },
205 { BCOM_VENDORID, BCOM_DEVICEID_BCM5785G },
206 { BCOM_VENDORID, BCOM_DEVICEID_BCM5786 },
207 { BCOM_VENDORID, BCOM_DEVICEID_BCM5787 },
208 { BCOM_VENDORID, BCOM_DEVICEID_BCM5787F },
209 { BCOM_VENDORID, BCOM_DEVICEID_BCM5787M },
210 { BCOM_VENDORID, BCOM_DEVICEID_BCM5788 },
211 { BCOM_VENDORID, BCOM_DEVICEID_BCM5789 },
212 { BCOM_VENDORID, BCOM_DEVICEID_BCM5901 },
213 { BCOM_VENDORID, BCOM_DEVICEID_BCM5901A2 },
214 { BCOM_VENDORID, BCOM_DEVICEID_BCM5903M },
215 { BCOM_VENDORID, BCOM_DEVICEID_BCM5906 },
216 { BCOM_VENDORID, BCOM_DEVICEID_BCM5906M },
217 { BCOM_VENDORID, BCOM_DEVICEID_BCM57760 },
218 { BCOM_VENDORID, BCOM_DEVICEID_BCM57761 },
219 { BCOM_VENDORID, BCOM_DEVICEID_BCM57765 },
220 { BCOM_VENDORID, BCOM_DEVICEID_BCM57780 },
221 { BCOM_VENDORID, BCOM_DEVICEID_BCM57781 },
222 { BCOM_VENDORID, BCOM_DEVICEID_BCM57785 },
223 { BCOM_VENDORID, BCOM_DEVICEID_BCM57788 },
224 { BCOM_VENDORID, BCOM_DEVICEID_BCM57790 },
225 { BCOM_VENDORID, BCOM_DEVICEID_BCM57791 },
226 { BCOM_VENDORID, BCOM_DEVICEID_BCM57795 },
227
228 { SK_VENDORID, SK_DEVICEID_ALTIMA },
229
230 { TC_VENDORID, TC_DEVICEID_3C996 },
231
232 { FJTSU_VENDORID, FJTSU_DEVICEID_PW008GE4 },
233 { FJTSU_VENDORID, FJTSU_DEVICEID_PW008GE5 },
234 { FJTSU_VENDORID, FJTSU_DEVICEID_PP250450 },
235
236 { 0, 0 }
237};
238
239static const struct bge_vendor {
240 uint16_t v_id;
241 const char *v_name;
242} const bge_vendors[] = {
243 { ALTEON_VENDORID, "Alteon" },
244 { ALTIMA_VENDORID, "Altima" },
245 { APPLE_VENDORID, "Apple" },
246 { BCOM_VENDORID, "Broadcom" },
247 { SK_VENDORID, "SysKonnect" },
248 { TC_VENDORID, "3Com" },
249 { FJTSU_VENDORID, "Fujitsu" },
250
251 { 0, NULL }
252};
253
254static const struct bge_revision {
255 uint32_t br_chipid;
256 const char *br_name;
257} const bge_revisions[] = {
258 { BGE_CHIPID_BCM5700_A0, "BCM5700 A0" },
259 { BGE_CHIPID_BCM5700_A1, "BCM5700 A1" },
260 { BGE_CHIPID_BCM5700_B0, "BCM5700 B0" },
261 { BGE_CHIPID_BCM5700_B1, "BCM5700 B1" },
262 { BGE_CHIPID_BCM5700_B2, "BCM5700 B2" },
263 { BGE_CHIPID_BCM5700_B3, "BCM5700 B3" },
264 { BGE_CHIPID_BCM5700_ALTIMA, "BCM5700 Altima" },
265 { BGE_CHIPID_BCM5700_C0, "BCM5700 C0" },
266 { BGE_CHIPID_BCM5701_A0, "BCM5701 A0" },
267 { BGE_CHIPID_BCM5701_B0, "BCM5701 B0" },
268 { BGE_CHIPID_BCM5701_B2, "BCM5701 B2" },
269 { BGE_CHIPID_BCM5701_B5, "BCM5701 B5" },
270 { BGE_CHIPID_BCM5703_A0, "BCM5703 A0" },
271 { BGE_CHIPID_BCM5703_A1, "BCM5703 A1" },
272 { BGE_CHIPID_BCM5703_A2, "BCM5703 A2" },
273 { BGE_CHIPID_BCM5703_A3, "BCM5703 A3" },
274 { BGE_CHIPID_BCM5703_B0, "BCM5703 B0" },
275 { BGE_CHIPID_BCM5704_A0, "BCM5704 A0" },
276 { BGE_CHIPID_BCM5704_A1, "BCM5704 A1" },
277 { BGE_CHIPID_BCM5704_A2, "BCM5704 A2" },
278 { BGE_CHIPID_BCM5704_A3, "BCM5704 A3" },
279 { BGE_CHIPID_BCM5704_B0, "BCM5704 B0" },
280 { BGE_CHIPID_BCM5705_A0, "BCM5705 A0" },
281 { BGE_CHIPID_BCM5705_A1, "BCM5705 A1" },
282 { BGE_CHIPID_BCM5705_A2, "BCM5705 A2" },
283 { BGE_CHIPID_BCM5705_A3, "BCM5705 A3" },
284 { BGE_CHIPID_BCM5750_A0, "BCM5750 A0" },
285 { BGE_CHIPID_BCM5750_A1, "BCM5750 A1" },
286 { BGE_CHIPID_BCM5750_A3, "BCM5750 A3" },
287 { BGE_CHIPID_BCM5750_B0, "BCM5750 B0" },
288 { BGE_CHIPID_BCM5750_B1, "BCM5750 B1" },
289 { BGE_CHIPID_BCM5750_C0, "BCM5750 C0" },
290 { BGE_CHIPID_BCM5750_C1, "BCM5750 C1" },
291 { BGE_CHIPID_BCM5750_C2, "BCM5750 C2" },
292 { BGE_CHIPID_BCM5714_A0, "BCM5714 A0" },
293 { BGE_CHIPID_BCM5752_A0, "BCM5752 A0" },
294 { BGE_CHIPID_BCM5752_A1, "BCM5752 A1" },
295 { BGE_CHIPID_BCM5752_A2, "BCM5752 A2" },
296 { BGE_CHIPID_BCM5714_B0, "BCM5714 B0" },
297 { BGE_CHIPID_BCM5714_B3, "BCM5714 B3" },
298 { BGE_CHIPID_BCM5715_A0, "BCM5715 A0" },
299 { BGE_CHIPID_BCM5715_A1, "BCM5715 A1" },
300 { BGE_CHIPID_BCM5715_A3, "BCM5715 A3" },
301 { BGE_CHIPID_BCM5717_A0, "BCM5717 A0" },
302 { BGE_CHIPID_BCM5717_B0, "BCM5717 B0" },
303 { BGE_CHIPID_BCM5719_A0, "BCM5719 A0" },
304 { BGE_CHIPID_BCM5720_A0, "BCM5720 A0" },
305 { BGE_CHIPID_BCM5755_A0, "BCM5755 A0" },
306 { BGE_CHIPID_BCM5755_A1, "BCM5755 A1" },
307 { BGE_CHIPID_BCM5755_A2, "BCM5755 A2" },
308 { BGE_CHIPID_BCM5722_A0, "BCM5722 A0" },
309 { BGE_CHIPID_BCM5761_A0, "BCM5761 A0" },
310 { BGE_CHIPID_BCM5761_A1, "BCM5761 A1" },
311 { BGE_CHIPID_BCM5784_A0, "BCM5784 A0" },
312 { BGE_CHIPID_BCM5784_A1, "BCM5784 A1" },
313 /* 5754 and 5787 share the same ASIC ID */
314 { BGE_CHIPID_BCM5787_A0, "BCM5754/5787 A0" },
315 { BGE_CHIPID_BCM5787_A1, "BCM5754/5787 A1" },
316 { BGE_CHIPID_BCM5787_A2, "BCM5754/5787 A2" },
317 { BGE_CHIPID_BCM5906_A1, "BCM5906 A1" },
318 { BGE_CHIPID_BCM5906_A2, "BCM5906 A2" },
319 { BGE_CHIPID_BCM57765_A0, "BCM57765 A0" },
320 { BGE_CHIPID_BCM57765_B0, "BCM57765 B0" },
321 { BGE_CHIPID_BCM57780_A0, "BCM57780 A0" },
322 { BGE_CHIPID_BCM57780_A1, "BCM57780 A1" },
323
324 { 0, NULL }
325};
326
327/*
328 * Some defaults for major revisions, so that newer steppings
329 * that we don't know about have a shot at working.
330 */
331static const struct bge_revision const bge_majorrevs[] = {
332 { BGE_ASICREV_BCM5700, "unknown BCM5700" },
333 { BGE_ASICREV_BCM5701, "unknown BCM5701" },
334 { BGE_ASICREV_BCM5703, "unknown BCM5703" },
335 { BGE_ASICREV_BCM5704, "unknown BCM5704" },
336 { BGE_ASICREV_BCM5705, "unknown BCM5705" },
337 { BGE_ASICREV_BCM5750, "unknown BCM5750" },
338 { BGE_ASICREV_BCM5714_A0, "unknown BCM5714" },
339 { BGE_ASICREV_BCM5752, "unknown BCM5752" },
340 { BGE_ASICREV_BCM5780, "unknown BCM5780" },
341 { BGE_ASICREV_BCM5714, "unknown BCM5714" },
342 { BGE_ASICREV_BCM5755, "unknown BCM5755" },
343 { BGE_ASICREV_BCM5761, "unknown BCM5761" },
344 { BGE_ASICREV_BCM5784, "unknown BCM5784" },
345 { BGE_ASICREV_BCM5785, "unknown BCM5785" },
346 /* 5754 and 5787 share the same ASIC ID */
347 { BGE_ASICREV_BCM5787, "unknown BCM5754/5787" },
348 { BGE_ASICREV_BCM5906, "unknown BCM5906" },
349 { BGE_ASICREV_BCM57765, "unknown BCM57765" },
350 { BGE_ASICREV_BCM57780, "unknown BCM57780" },
351 { BGE_ASICREV_BCM5717, "unknown BCM5717" },
352 { BGE_ASICREV_BCM5719, "unknown BCM5719" },
353 { BGE_ASICREV_BCM5720, "unknown BCM5720" },
354
355 { 0, NULL }
356};
357
358#define BGE_IS_JUMBO_CAPABLE(sc) ((sc)->bge_flags & BGE_FLAG_JUMBO)
359#define BGE_IS_5700_FAMILY(sc) ((sc)->bge_flags & BGE_FLAG_5700_FAMILY)
360#define BGE_IS_5705_PLUS(sc) ((sc)->bge_flags & BGE_FLAG_5705_PLUS)
361#define BGE_IS_5714_FAMILY(sc) ((sc)->bge_flags & BGE_FLAG_5714_FAMILY)
362#define BGE_IS_575X_PLUS(sc) ((sc)->bge_flags & BGE_FLAG_575X_PLUS)
363#define BGE_IS_5755_PLUS(sc) ((sc)->bge_flags & BGE_FLAG_5755_PLUS)
364#define BGE_IS_5717_PLUS(sc) ((sc)->bge_flags & BGE_FLAG_5717_PLUS)
365
366const struct bge_revision * bge_lookup_rev(uint32_t);
367const struct bge_vendor * bge_lookup_vendor(uint16_t);
368
369typedef int (*bge_eaddr_fcn_t)(struct bge_softc *, uint8_t[]);
370
371static int bge_probe(device_t);
372static int bge_attach(device_t);
373static int bge_detach(device_t);
374static int bge_suspend(device_t);
375static int bge_resume(device_t);
376static void bge_release_resources(struct bge_softc *);
377static void bge_dma_map_addr(void *, bus_dma_segment_t *, int, int);
378static int bge_dma_alloc(struct bge_softc *);
379static void bge_dma_free(struct bge_softc *);
380static int bge_dma_ring_alloc(struct bge_softc *, bus_size_t, bus_size_t,
381 bus_dma_tag_t *, uint8_t **, bus_dmamap_t *, bus_addr_t *, const char *);
382
383static int bge_get_eaddr_fw(struct bge_softc *sc, uint8_t ether_addr[]);
384static int bge_get_eaddr_mem(struct bge_softc *, uint8_t[]);
385static int bge_get_eaddr_nvram(struct bge_softc *, uint8_t[]);
386static int bge_get_eaddr_eeprom(struct bge_softc *, uint8_t[]);
387static int bge_get_eaddr(struct bge_softc *, uint8_t[]);
388
389static void bge_txeof(struct bge_softc *, uint16_t);
390static void bge_rxcsum(struct bge_softc *, struct bge_rx_bd *, struct mbuf *);
391static int bge_rxeof(struct bge_softc *, uint16_t, int);
392
393static void bge_asf_driver_up (struct bge_softc *);
394static void bge_tick(void *);
395static void bge_stats_clear_regs(struct bge_softc *);
396static void bge_stats_update(struct bge_softc *);
397static void bge_stats_update_regs(struct bge_softc *);
398static struct mbuf *bge_check_short_dma(struct mbuf *);
399static struct mbuf *bge_setup_tso(struct bge_softc *, struct mbuf *,
400 uint16_t *, uint16_t *);
401static int bge_encap(struct bge_softc *, struct mbuf **, uint32_t *);
402
403static void bge_intr(void *);
404static int bge_msi_intr(void *);
405static void bge_intr_task(void *, int);
406static void bge_start_locked(struct ifnet *);
407static void bge_start(struct ifnet *);
408static int bge_ioctl(struct ifnet *, u_long, caddr_t);
409static void bge_init_locked(struct bge_softc *);
410static void bge_init(void *);
411static void bge_stop_block(struct bge_softc *, bus_size_t, uint32_t);
412static void bge_stop(struct bge_softc *);
413static void bge_watchdog(struct bge_softc *);
414static int bge_shutdown(device_t);
415static int bge_ifmedia_upd_locked(struct ifnet *);
416static int bge_ifmedia_upd(struct ifnet *);
417static void bge_ifmedia_sts(struct ifnet *, struct ifmediareq *);
418
419static uint8_t bge_nvram_getbyte(struct bge_softc *, int, uint8_t *);
420static int bge_read_nvram(struct bge_softc *, caddr_t, int, int);
421
422static uint8_t bge_eeprom_getbyte(struct bge_softc *, int, uint8_t *);
423static int bge_read_eeprom(struct bge_softc *, caddr_t, int, int);
424
425static void bge_setpromisc(struct bge_softc *);
426static void bge_setmulti(struct bge_softc *);
427static void bge_setvlan(struct bge_softc *);
428
429static __inline void bge_rxreuse_std(struct bge_softc *, int);
430static __inline void bge_rxreuse_jumbo(struct bge_softc *, int);
431static int bge_newbuf_std(struct bge_softc *, int);
432static int bge_newbuf_jumbo(struct bge_softc *, int);
433static int bge_init_rx_ring_std(struct bge_softc *);
434static void bge_free_rx_ring_std(struct bge_softc *);
435static int bge_init_rx_ring_jumbo(struct bge_softc *);
436static void bge_free_rx_ring_jumbo(struct bge_softc *);
437static void bge_free_tx_ring(struct bge_softc *);
438static int bge_init_tx_ring(struct bge_softc *);
439
440static int bge_chipinit(struct bge_softc *);
441static int bge_blockinit(struct bge_softc *);
442static uint32_t bge_dma_swap_options(struct bge_softc *);
443
444static int bge_has_eaddr(struct bge_softc *);
445static uint32_t bge_readmem_ind(struct bge_softc *, int);
446static void bge_writemem_ind(struct bge_softc *, int, int);
447static void bge_writembx(struct bge_softc *, int, int);
448#ifdef notdef
449static uint32_t bge_readreg_ind(struct bge_softc *, int);
450#endif
451static void bge_writemem_direct(struct bge_softc *, int, int);
452static void bge_writereg_ind(struct bge_softc *, int, int);
453
454static int bge_miibus_readreg(device_t, int, int);
455static int bge_miibus_writereg(device_t, int, int, int);
456static void bge_miibus_statchg(device_t);
457#ifdef DEVICE_POLLING
458static int bge_poll(struct ifnet *ifp, enum poll_cmd cmd, int count);
459#endif
460
461#define BGE_RESET_START 1
462#define BGE_RESET_STOP 2
463static void bge_sig_post_reset(struct bge_softc *, int);
464static void bge_sig_legacy(struct bge_softc *, int);
465static void bge_sig_pre_reset(struct bge_softc *, int);
466static void bge_stop_fw(struct bge_softc *);
467static int bge_reset(struct bge_softc *);
468static void bge_link_upd(struct bge_softc *);
469
470/*
471 * The BGE_REGISTER_DEBUG option is only for low-level debugging. It may
472 * leak information to untrusted users. It is also known to cause alignment
473 * traps on certain architectures.
474 */
475#ifdef BGE_REGISTER_DEBUG
476static int bge_sysctl_debug_info(SYSCTL_HANDLER_ARGS);
477static int bge_sysctl_reg_read(SYSCTL_HANDLER_ARGS);
478static int bge_sysctl_mem_read(SYSCTL_HANDLER_ARGS);
479#endif
480static void bge_add_sysctls(struct bge_softc *);
481static void bge_add_sysctl_stats_regs(struct bge_softc *,
482 struct sysctl_ctx_list *, struct sysctl_oid_list *);
483static void bge_add_sysctl_stats(struct bge_softc *, struct sysctl_ctx_list *,
484 struct sysctl_oid_list *);
485static int bge_sysctl_stats(SYSCTL_HANDLER_ARGS);
486
487static device_method_t bge_methods[] = {
488 /* Device interface */
489 DEVMETHOD(device_probe, bge_probe),
490 DEVMETHOD(device_attach, bge_attach),
491 DEVMETHOD(device_detach, bge_detach),
492 DEVMETHOD(device_shutdown, bge_shutdown),
493 DEVMETHOD(device_suspend, bge_suspend),
494 DEVMETHOD(device_resume, bge_resume),
495
496 /* MII interface */
497 DEVMETHOD(miibus_readreg, bge_miibus_readreg),
498 DEVMETHOD(miibus_writereg, bge_miibus_writereg),
499 DEVMETHOD(miibus_statchg, bge_miibus_statchg),
500
501 DEVMETHOD_END
502};
503
504static driver_t bge_driver = {
505 "bge",
506 bge_methods,
507 sizeof(struct bge_softc)
508};
509
510static devclass_t bge_devclass;
511
512DRIVER_MODULE(bge, pci, bge_driver, bge_devclass, 0, 0);
513DRIVER_MODULE(miibus, bge, miibus_driver, miibus_devclass, 0, 0);
514
515static int bge_allow_asf = 1;
516
517TUNABLE_INT("hw.bge.allow_asf", &bge_allow_asf);
518
519static SYSCTL_NODE(_hw, OID_AUTO, bge, CTLFLAG_RD, 0, "BGE driver parameters");
520SYSCTL_INT(_hw_bge, OID_AUTO, allow_asf, CTLFLAG_RD, &bge_allow_asf, 0,
521 "Allow ASF mode if available");
522
523#define SPARC64_BLADE_1500_MODEL "SUNW,Sun-Blade-1500"
524#define SPARC64_BLADE_1500_PATH_BGE "/pci@1f,700000/network@2"
525#define SPARC64_BLADE_2500_MODEL "SUNW,Sun-Blade-2500"
526#define SPARC64_BLADE_2500_PATH_BGE "/pci@1c,600000/network@3"
527#define SPARC64_OFW_SUBVENDOR "subsystem-vendor-id"
528
529static int
530bge_has_eaddr(struct bge_softc *sc)
531{
532#ifdef __sparc64__
533 char buf[sizeof(SPARC64_BLADE_1500_PATH_BGE)];
534 device_t dev;
535 uint32_t subvendor;
536
537 dev = sc->bge_dev;
538
539 /*
540 * The on-board BGEs found in sun4u machines aren't fitted with
541 * an EEPROM which means that we have to obtain the MAC address
542 * via OFW and that some tests will always fail. We distinguish
543 * such BGEs by the subvendor ID, which also has to be obtained
544 * from OFW instead of the PCI configuration space as the latter
545 * indicates Broadcom as the subvendor of the netboot interface.
546 * For early Blade 1500 and 2500 we even have to check the OFW
547 * device path as the subvendor ID always defaults to Broadcom
548 * there.
549 */
550 if (OF_getprop(ofw_bus_get_node(dev), SPARC64_OFW_SUBVENDOR,
551 &subvendor, sizeof(subvendor)) == sizeof(subvendor) &&
552 (subvendor == FJTSU_VENDORID || subvendor == SUN_VENDORID))
553 return (0);
554 memset(buf, 0, sizeof(buf));
555 if (OF_package_to_path(ofw_bus_get_node(dev), buf, sizeof(buf)) > 0) {
556 if (strcmp(sparc64_model, SPARC64_BLADE_1500_MODEL) == 0 &&
557 strcmp(buf, SPARC64_BLADE_1500_PATH_BGE) == 0)
558 return (0);
559 if (strcmp(sparc64_model, SPARC64_BLADE_2500_MODEL) == 0 &&
560 strcmp(buf, SPARC64_BLADE_2500_PATH_BGE) == 0)
561 return (0);
562 }
563#endif
564 return (1);
565}
566
567static uint32_t
568bge_readmem_ind(struct bge_softc *sc, int off)
569{
570 device_t dev;
571 uint32_t val;
572
573 if (sc->bge_asicrev == BGE_ASICREV_BCM5906 &&
574 off >= BGE_STATS_BLOCK && off < BGE_SEND_RING_1_TO_4)
575 return (0);
576
577 dev = sc->bge_dev;
578
579 pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, off, 4);
580 val = pci_read_config(dev, BGE_PCI_MEMWIN_DATA, 4);
581 pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, 0, 4);
582 return (val);
583}
584
585static void
586bge_writemem_ind(struct bge_softc *sc, int off, int val)
587{
588 device_t dev;
589
590 if (sc->bge_asicrev == BGE_ASICREV_BCM5906 &&
591 off >= BGE_STATS_BLOCK && off < BGE_SEND_RING_1_TO_4)
592 return;
593
594 dev = sc->bge_dev;
595
596 pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, off, 4);
597 pci_write_config(dev, BGE_PCI_MEMWIN_DATA, val, 4);
598 pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, 0, 4);
599}
600
601#ifdef notdef
602static uint32_t
603bge_readreg_ind(struct bge_softc *sc, int off)
604{
605 device_t dev;
606
607 dev = sc->bge_dev;
608
609 pci_write_config(dev, BGE_PCI_REG_BASEADDR, off, 4);
610 return (pci_read_config(dev, BGE_PCI_REG_DATA, 4));
611}
612#endif
613
614static void
615bge_writereg_ind(struct bge_softc *sc, int off, int val)
616{
617 device_t dev;
618
619 dev = sc->bge_dev;
620
621 pci_write_config(dev, BGE_PCI_REG_BASEADDR, off, 4);
622 pci_write_config(dev, BGE_PCI_REG_DATA, val, 4);
623}
624
625static void
626bge_writemem_direct(struct bge_softc *sc, int off, int val)
627{
628 CSR_WRITE_4(sc, off, val);
629}
630
631static void
632bge_writembx(struct bge_softc *sc, int off, int val)
633{
634 if (sc->bge_asicrev == BGE_ASICREV_BCM5906)
635 off += BGE_LPMBX_IRQ0_HI - BGE_MBX_IRQ0_HI;
636
637 CSR_WRITE_4(sc, off, val);
638}
639
640/*
641 * Map a single buffer address.
642 */
643
644static void
645bge_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error)
646{
647 struct bge_dmamap_arg *ctx;
648
649 if (error)
650 return;
651
652 KASSERT(nseg == 1, ("%s: %d segments returned!", __func__, nseg));
653
654 ctx = arg;
655 ctx->bge_busaddr = segs->ds_addr;
656}
657
658static uint8_t
659bge_nvram_getbyte(struct bge_softc *sc, int addr, uint8_t *dest)
660{
661 uint32_t access, byte = 0;
662 int i;
663
664 /* Lock. */
665 CSR_WRITE_4(sc, BGE_NVRAM_SWARB, BGE_NVRAMSWARB_SET1);
666 for (i = 0; i < 8000; i++) {
667 if (CSR_READ_4(sc, BGE_NVRAM_SWARB) & BGE_NVRAMSWARB_GNT1)
668 break;
669 DELAY(20);
670 }
671 if (i == 8000)
672 return (1);
673
674 /* Enable access. */
675 access = CSR_READ_4(sc, BGE_NVRAM_ACCESS);
676 CSR_WRITE_4(sc, BGE_NVRAM_ACCESS, access | BGE_NVRAMACC_ENABLE);
677
678 CSR_WRITE_4(sc, BGE_NVRAM_ADDR, addr & 0xfffffffc);
679 CSR_WRITE_4(sc, BGE_NVRAM_CMD, BGE_NVRAM_READCMD);
680 for (i = 0; i < BGE_TIMEOUT * 10; i++) {
681 DELAY(10);
682 if (CSR_READ_4(sc, BGE_NVRAM_CMD) & BGE_NVRAMCMD_DONE) {
683 DELAY(10);
684 break;
685 }
686 }
687
688 if (i == BGE_TIMEOUT * 10) {
689 if_printf(sc->bge_ifp, "nvram read timed out\n");
690 return (1);
691 }
692
693 /* Get result. */
694 byte = CSR_READ_4(sc, BGE_NVRAM_RDDATA);
695
696 *dest = (bswap32(byte) >> ((addr % 4) * 8)) & 0xFF;
697
698 /* Disable access. */
699 CSR_WRITE_4(sc, BGE_NVRAM_ACCESS, access);
700
701 /* Unlock. */
702 CSR_WRITE_4(sc, BGE_NVRAM_SWARB, BGE_NVRAMSWARB_CLR1);
703 CSR_READ_4(sc, BGE_NVRAM_SWARB);
704
705 return (0);
706}
707
708/*
709 * Read a sequence of bytes from NVRAM.
710 */
711static int
712bge_read_nvram(struct bge_softc *sc, caddr_t dest, int off, int cnt)
713{
714 int err = 0, i;
715 uint8_t byte = 0;
716
717 if (sc->bge_asicrev != BGE_ASICREV_BCM5906)
718 return (1);
719
720 for (i = 0; i < cnt; i++) {
721 err = bge_nvram_getbyte(sc, off + i, &byte);
722 if (err)
723 break;
724 *(dest + i) = byte;
725 }
726
727 return (err ? 1 : 0);
728}
729
730/*
731 * Read a byte of data stored in the EEPROM at address 'addr.' The
732 * BCM570x supports both the traditional bitbang interface and an
733 * auto access interface for reading the EEPROM. We use the auto
734 * access method.
735 */
736static uint8_t
737bge_eeprom_getbyte(struct bge_softc *sc, int addr, uint8_t *dest)
738{
739 int i;
740 uint32_t byte = 0;
741
742 /*
743 * Enable use of auto EEPROM access so we can avoid
744 * having to use the bitbang method.
745 */
746 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_AUTO_EEPROM);
747
748 /* Reset the EEPROM, load the clock period. */
749 CSR_WRITE_4(sc, BGE_EE_ADDR,
750 BGE_EEADDR_RESET | BGE_EEHALFCLK(BGE_HALFCLK_384SCL));
751 DELAY(20);
752
753 /* Issue the read EEPROM command. */
754 CSR_WRITE_4(sc, BGE_EE_ADDR, BGE_EE_READCMD | addr);
755
756 /* Wait for completion */
757 for(i = 0; i < BGE_TIMEOUT * 10; i++) {
758 DELAY(10);
759 if (CSR_READ_4(sc, BGE_EE_ADDR) & BGE_EEADDR_DONE)
760 break;
761 }
762
763 if (i == BGE_TIMEOUT * 10) {
764 device_printf(sc->bge_dev, "EEPROM read timed out\n");
765 return (1);
766 }
767
768 /* Get result. */
769 byte = CSR_READ_4(sc, BGE_EE_DATA);
770
771 *dest = (byte >> ((addr % 4) * 8)) & 0xFF;
772
773 return (0);
774}
775
776/*
777 * Read a sequence of bytes from the EEPROM.
778 */
779static int
780bge_read_eeprom(struct bge_softc *sc, caddr_t dest, int off, int cnt)
781{
782 int i, error = 0;
783 uint8_t byte = 0;
784
785 for (i = 0; i < cnt; i++) {
786 error = bge_eeprom_getbyte(sc, off + i, &byte);
787 if (error)
788 break;
789 *(dest + i) = byte;
790 }
791
792 return (error ? 1 : 0);
793}
794
795static int
796bge_miibus_readreg(device_t dev, int phy, int reg)
797{
798 struct bge_softc *sc;
799 uint32_t val;
800 int i;
801
802 sc = device_get_softc(dev);
803
804 /* Clear the autopoll bit if set, otherwise may trigger PCI errors. */
805 if ((sc->bge_mi_mode & BGE_MIMODE_AUTOPOLL) != 0) {
806 CSR_WRITE_4(sc, BGE_MI_MODE,
807 sc->bge_mi_mode & ~BGE_MIMODE_AUTOPOLL);
808 DELAY(80);
809 }
810
811 CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_READ | BGE_MICOMM_BUSY |
812 BGE_MIPHY(phy) | BGE_MIREG(reg));
813
814 /* Poll for the PHY register access to complete. */
815 for (i = 0; i < BGE_TIMEOUT; i++) {
816 DELAY(10);
817 val = CSR_READ_4(sc, BGE_MI_COMM);
818 if ((val & BGE_MICOMM_BUSY) == 0) {
819 DELAY(5);
820 val = CSR_READ_4(sc, BGE_MI_COMM);
821 break;
822 }
823 }
824
825 if (i == BGE_TIMEOUT) {
826 device_printf(sc->bge_dev,
827 "PHY read timed out (phy %d, reg %d, val 0x%08x)\n",
828 phy, reg, val);
829 val = 0;
830 }
831
832 /* Restore the autopoll bit if necessary. */
833 if ((sc->bge_mi_mode & BGE_MIMODE_AUTOPOLL) != 0) {
834 CSR_WRITE_4(sc, BGE_MI_MODE, sc->bge_mi_mode);
835 DELAY(80);
836 }
837
838 if (val & BGE_MICOMM_READFAIL)
839 return (0);
840
841 return (val & 0xFFFF);
842}
843
844static int
845bge_miibus_writereg(device_t dev, int phy, int reg, int val)
846{
847 struct bge_softc *sc;
848 int i;
849
850 sc = device_get_softc(dev);
851
852 if (sc->bge_asicrev == BGE_ASICREV_BCM5906 &&
853 (reg == BRGPHY_MII_1000CTL || reg == BRGPHY_MII_AUXCTL))
854 return (0);
855
856 /* Clear the autopoll bit if set, otherwise may trigger PCI errors. */
857 if ((sc->bge_mi_mode & BGE_MIMODE_AUTOPOLL) != 0) {
858 CSR_WRITE_4(sc, BGE_MI_MODE,
859 sc->bge_mi_mode & ~BGE_MIMODE_AUTOPOLL);
860 DELAY(80);
861 }
862
863 CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_WRITE | BGE_MICOMM_BUSY |
864 BGE_MIPHY(phy) | BGE_MIREG(reg) | val);
865
866 for (i = 0; i < BGE_TIMEOUT; i++) {
867 DELAY(10);
868 if (!(CSR_READ_4(sc, BGE_MI_COMM) & BGE_MICOMM_BUSY)) {
869 DELAY(5);
870 CSR_READ_4(sc, BGE_MI_COMM); /* dummy read */
871 break;
872 }
873 }
874
875 /* Restore the autopoll bit if necessary. */
876 if ((sc->bge_mi_mode & BGE_MIMODE_AUTOPOLL) != 0) {
877 CSR_WRITE_4(sc, BGE_MI_MODE, sc->bge_mi_mode);
878 DELAY(80);
879 }
880
881 if (i == BGE_TIMEOUT)
882 device_printf(sc->bge_dev,
883 "PHY write timed out (phy %d, reg %d, val %d)\n",
884 phy, reg, val);
885
886 return (0);
887}
888
889static void
890bge_miibus_statchg(device_t dev)
891{
892 struct bge_softc *sc;
893 struct mii_data *mii;
894 sc = device_get_softc(dev);
895 mii = device_get_softc(sc->bge_miibus);
896
897 if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) ==
898 (IFM_ACTIVE | IFM_AVALID)) {
899 switch (IFM_SUBTYPE(mii->mii_media_active)) {
900 case IFM_10_T:
901 case IFM_100_TX:
902 sc->bge_link = 1;
903 break;
904 case IFM_1000_T:
905 case IFM_1000_SX:
906 case IFM_2500_SX:
907 if (sc->bge_asicrev != BGE_ASICREV_BCM5906)
908 sc->bge_link = 1;
909 else
910 sc->bge_link = 0;
911 break;
912 default:
913 sc->bge_link = 0;
914 break;
915 }
916 } else
917 sc->bge_link = 0;
918 if (sc->bge_link == 0)
919 return;
920 BGE_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_PORTMODE);
921 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T ||
922 IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_SX)
923 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_PORTMODE_GMII);
924 else
925 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_PORTMODE_MII);
926
927 if (IFM_OPTIONS(mii->mii_media_active & IFM_FDX) != 0) {
928 BGE_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX);
929 if ((IFM_OPTIONS(mii->mii_media_active) &
930 IFM_ETH_TXPAUSE) != 0)
931 BGE_SETBIT(sc, BGE_TX_MODE, BGE_TXMODE_FLOWCTL_ENABLE);
932 else
933 BGE_CLRBIT(sc, BGE_TX_MODE, BGE_TXMODE_FLOWCTL_ENABLE);
934 if ((IFM_OPTIONS(mii->mii_media_active) &
935 IFM_ETH_RXPAUSE) != 0)
936 BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_FLOWCTL_ENABLE);
937 else
938 BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_FLOWCTL_ENABLE);
939 } else {
940 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX);
941 BGE_CLRBIT(sc, BGE_TX_MODE, BGE_TXMODE_FLOWCTL_ENABLE);
942 BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_FLOWCTL_ENABLE);
943 }
944}
945
946/*
947 * Intialize a standard receive ring descriptor.
948 */
949static int
950bge_newbuf_std(struct bge_softc *sc, int i)
951{
952 struct mbuf *m;
953 struct bge_rx_bd *r;
954 bus_dma_segment_t segs[1];
955 bus_dmamap_t map;
956 int error, nsegs;
957
958 if (sc->bge_flags & BGE_FLAG_JUMBO_STD &&
959 (sc->bge_ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN +
960 ETHER_VLAN_ENCAP_LEN > (MCLBYTES - ETHER_ALIGN))) {
961 m = m_getjcl(M_DONTWAIT, MT_DATA, M_PKTHDR, MJUM9BYTES);
962 if (m == NULL)
963 return (ENOBUFS);
964 m->m_len = m->m_pkthdr.len = MJUM9BYTES;
965 } else {
966 m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
967 if (m == NULL)
968 return (ENOBUFS);
969 m->m_len = m->m_pkthdr.len = MCLBYTES;
970 }
971 if ((sc->bge_flags & BGE_FLAG_RX_ALIGNBUG) == 0)
972 m_adj(m, ETHER_ALIGN);
973
974 error = bus_dmamap_load_mbuf_sg(sc->bge_cdata.bge_rx_mtag,
975 sc->bge_cdata.bge_rx_std_sparemap, m, segs, &nsegs, 0);
976 if (error != 0) {
977 m_freem(m);
978 return (error);
979 }
980 if (sc->bge_cdata.bge_rx_std_chain[i] != NULL) {
981 bus_dmamap_sync(sc->bge_cdata.bge_rx_mtag,
982 sc->bge_cdata.bge_rx_std_dmamap[i], BUS_DMASYNC_POSTREAD);
983 bus_dmamap_unload(sc->bge_cdata.bge_rx_mtag,
984 sc->bge_cdata.bge_rx_std_dmamap[i]);
985 }
986 map = sc->bge_cdata.bge_rx_std_dmamap[i];
987 sc->bge_cdata.bge_rx_std_dmamap[i] = sc->bge_cdata.bge_rx_std_sparemap;
988 sc->bge_cdata.bge_rx_std_sparemap = map;
989 sc->bge_cdata.bge_rx_std_chain[i] = m;
990 sc->bge_cdata.bge_rx_std_seglen[i] = segs[0].ds_len;
991 r = &sc->bge_ldata.bge_rx_std_ring[sc->bge_std];
992 r->bge_addr.bge_addr_lo = BGE_ADDR_LO(segs[0].ds_addr);
993 r->bge_addr.bge_addr_hi = BGE_ADDR_HI(segs[0].ds_addr);
994 r->bge_flags = BGE_RXBDFLAG_END;
995 r->bge_len = segs[0].ds_len;
996 r->bge_idx = i;
997
998 bus_dmamap_sync(sc->bge_cdata.bge_rx_mtag,
999 sc->bge_cdata.bge_rx_std_dmamap[i], BUS_DMASYNC_PREREAD);
1000
1001 return (0);
1002}
1003
1004/*
1005 * Initialize a jumbo receive ring descriptor. This allocates
1006 * a jumbo buffer from the pool managed internally by the driver.
1007 */
1008static int
1009bge_newbuf_jumbo(struct bge_softc *sc, int i)
1010{
1011 bus_dma_segment_t segs[BGE_NSEG_JUMBO];
1012 bus_dmamap_t map;
1013 struct bge_extrx_bd *r;
1014 struct mbuf *m;
1015 int error, nsegs;
1016
1017 MGETHDR(m, M_DONTWAIT, MT_DATA);
1018 if (m == NULL)
1019 return (ENOBUFS);
1020
1021 m_cljget(m, M_DONTWAIT, MJUM9BYTES);
1022 if (!(m->m_flags & M_EXT)) {
1023 m_freem(m);
1024 return (ENOBUFS);
1025 }
1026 m->m_len = m->m_pkthdr.len = MJUM9BYTES;
1027 if ((sc->bge_flags & BGE_FLAG_RX_ALIGNBUG) == 0)
1028 m_adj(m, ETHER_ALIGN);
1029
1030 error = bus_dmamap_load_mbuf_sg(sc->bge_cdata.bge_mtag_jumbo,
1031 sc->bge_cdata.bge_rx_jumbo_sparemap, m, segs, &nsegs, 0);
1032 if (error != 0) {
1033 m_freem(m);
1034 return (error);
1035 }
1036
1037 if (sc->bge_cdata.bge_rx_jumbo_chain[i] != NULL) {
1038 bus_dmamap_sync(sc->bge_cdata.bge_mtag_jumbo,
1039 sc->bge_cdata.bge_rx_jumbo_dmamap[i], BUS_DMASYNC_POSTREAD);
1040 bus_dmamap_unload(sc->bge_cdata.bge_mtag_jumbo,
1041 sc->bge_cdata.bge_rx_jumbo_dmamap[i]);
1042 }
1043 map = sc->bge_cdata.bge_rx_jumbo_dmamap[i];
1044 sc->bge_cdata.bge_rx_jumbo_dmamap[i] =
1045 sc->bge_cdata.bge_rx_jumbo_sparemap;
1046 sc->bge_cdata.bge_rx_jumbo_sparemap = map;
1047 sc->bge_cdata.bge_rx_jumbo_chain[i] = m;
1048 sc->bge_cdata.bge_rx_jumbo_seglen[i][0] = 0;
1049 sc->bge_cdata.bge_rx_jumbo_seglen[i][1] = 0;
1050 sc->bge_cdata.bge_rx_jumbo_seglen[i][2] = 0;
1051 sc->bge_cdata.bge_rx_jumbo_seglen[i][3] = 0;
1052
1053 /*
1054 * Fill in the extended RX buffer descriptor.
1055 */
1056 r = &sc->bge_ldata.bge_rx_jumbo_ring[sc->bge_jumbo];
1057 r->bge_flags = BGE_RXBDFLAG_JUMBO_RING | BGE_RXBDFLAG_END;
1058 r->bge_idx = i;
1059 r->bge_len3 = r->bge_len2 = r->bge_len1 = 0;
1060 switch (nsegs) {
1061 case 4:
1062 r->bge_addr3.bge_addr_lo = BGE_ADDR_LO(segs[3].ds_addr);
1063 r->bge_addr3.bge_addr_hi = BGE_ADDR_HI(segs[3].ds_addr);
1064 r->bge_len3 = segs[3].ds_len;
1065 sc->bge_cdata.bge_rx_jumbo_seglen[i][3] = segs[3].ds_len;
1066 case 3:
1067 r->bge_addr2.bge_addr_lo = BGE_ADDR_LO(segs[2].ds_addr);
1068 r->bge_addr2.bge_addr_hi = BGE_ADDR_HI(segs[2].ds_addr);
1069 r->bge_len2 = segs[2].ds_len;
1070 sc->bge_cdata.bge_rx_jumbo_seglen[i][2] = segs[2].ds_len;
1071 case 2:
1072 r->bge_addr1.bge_addr_lo = BGE_ADDR_LO(segs[1].ds_addr);
1073 r->bge_addr1.bge_addr_hi = BGE_ADDR_HI(segs[1].ds_addr);
1074 r->bge_len1 = segs[1].ds_len;
1075 sc->bge_cdata.bge_rx_jumbo_seglen[i][1] = segs[1].ds_len;
1076 case 1:
1077 r->bge_addr0.bge_addr_lo = BGE_ADDR_LO(segs[0].ds_addr);
1078 r->bge_addr0.bge_addr_hi = BGE_ADDR_HI(segs[0].ds_addr);
1079 r->bge_len0 = segs[0].ds_len;
1080 sc->bge_cdata.bge_rx_jumbo_seglen[i][0] = segs[0].ds_len;
1081 break;
1082 default:
1083 panic("%s: %d segments\n", __func__, nsegs);
1084 }
1085
1086 bus_dmamap_sync(sc->bge_cdata.bge_mtag_jumbo,
1087 sc->bge_cdata.bge_rx_jumbo_dmamap[i], BUS_DMASYNC_PREREAD);
1088
1089 return (0);
1090}
1091
1092static int
1093bge_init_rx_ring_std(struct bge_softc *sc)
1094{
1095 int error, i;
1096
1097 bzero(sc->bge_ldata.bge_rx_std_ring, BGE_STD_RX_RING_SZ);
1098 sc->bge_std = 0;
1099 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
1100 if ((error = bge_newbuf_std(sc, i)) != 0)
1101 return (error);
1102 BGE_INC(sc->bge_std, BGE_STD_RX_RING_CNT);
1103 }
1104
1105 bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag,
1106 sc->bge_cdata.bge_rx_std_ring_map, BUS_DMASYNC_PREWRITE);
1107
1108 sc->bge_std = 0;
1109 bge_writembx(sc, BGE_MBX_RX_STD_PROD_LO, BGE_STD_RX_RING_CNT - 1);
1110
1111 return (0);
1112}
1113
1114static void
1115bge_free_rx_ring_std(struct bge_softc *sc)
1116{
1117 int i;
1118
1119 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
1120 if (sc->bge_cdata.bge_rx_std_chain[i] != NULL) {
1121 bus_dmamap_sync(sc->bge_cdata.bge_rx_mtag,
1122 sc->bge_cdata.bge_rx_std_dmamap[i],
1123 BUS_DMASYNC_POSTREAD);
1124 bus_dmamap_unload(sc->bge_cdata.bge_rx_mtag,
1125 sc->bge_cdata.bge_rx_std_dmamap[i]);
1126 m_freem(sc->bge_cdata.bge_rx_std_chain[i]);
1127 sc->bge_cdata.bge_rx_std_chain[i] = NULL;
1128 }
1129 bzero((char *)&sc->bge_ldata.bge_rx_std_ring[i],
1130 sizeof(struct bge_rx_bd));
1131 }
1132}
1133
1134static int
1135bge_init_rx_ring_jumbo(struct bge_softc *sc)
1136{
1137 struct bge_rcb *rcb;
1138 int error, i;
1139
1140 bzero(sc->bge_ldata.bge_rx_jumbo_ring, BGE_JUMBO_RX_RING_SZ);
1141 sc->bge_jumbo = 0;
1142 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
1143 if ((error = bge_newbuf_jumbo(sc, i)) != 0)
1144 return (error);
1145 BGE_INC(sc->bge_jumbo, BGE_JUMBO_RX_RING_CNT);
1146 }
1147
1148 bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag,
1149 sc->bge_cdata.bge_rx_jumbo_ring_map, BUS_DMASYNC_PREWRITE);
1150
1151 sc->bge_jumbo = 0;
1152
1153 /* Enable the jumbo receive producer ring. */
1154 rcb = &sc->bge_ldata.bge_info.bge_jumbo_rx_rcb;
1155 rcb->bge_maxlen_flags =
1156 BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_USE_EXT_RX_BD);
1157 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags);
1158
1159 bge_writembx(sc, BGE_MBX_RX_JUMBO_PROD_LO, BGE_JUMBO_RX_RING_CNT - 1);
1160
1161 return (0);
1162}
1163
1164static void
1165bge_free_rx_ring_jumbo(struct bge_softc *sc)
1166{
1167 int i;
1168
1169 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
1170 if (sc->bge_cdata.bge_rx_jumbo_chain[i] != NULL) {
1171 bus_dmamap_sync(sc->bge_cdata.bge_mtag_jumbo,
1172 sc->bge_cdata.bge_rx_jumbo_dmamap[i],
1173 BUS_DMASYNC_POSTREAD);
1174 bus_dmamap_unload(sc->bge_cdata.bge_mtag_jumbo,
1175 sc->bge_cdata.bge_rx_jumbo_dmamap[i]);
1176 m_freem(sc->bge_cdata.bge_rx_jumbo_chain[i]);
1177 sc->bge_cdata.bge_rx_jumbo_chain[i] = NULL;
1178 }
1179 bzero((char *)&sc->bge_ldata.bge_rx_jumbo_ring[i],
1180 sizeof(struct bge_extrx_bd));
1181 }
1182}
1183
1184static void
1185bge_free_tx_ring(struct bge_softc *sc)
1186{
1187 int i;
1188
1189 if (sc->bge_ldata.bge_tx_ring == NULL)
1190 return;
1191
1192 for (i = 0; i < BGE_TX_RING_CNT; i++) {
1193 if (sc->bge_cdata.bge_tx_chain[i] != NULL) {
1194 bus_dmamap_sync(sc->bge_cdata.bge_tx_mtag,
1195 sc->bge_cdata.bge_tx_dmamap[i],
1196 BUS_DMASYNC_POSTWRITE);
1197 bus_dmamap_unload(sc->bge_cdata.bge_tx_mtag,
1198 sc->bge_cdata.bge_tx_dmamap[i]);
1199 m_freem(sc->bge_cdata.bge_tx_chain[i]);
1200 sc->bge_cdata.bge_tx_chain[i] = NULL;
1201 }
1202 bzero((char *)&sc->bge_ldata.bge_tx_ring[i],
1203 sizeof(struct bge_tx_bd));
1204 }
1205}
1206
1207static int
1208bge_init_tx_ring(struct bge_softc *sc)
1209{
1210 sc->bge_txcnt = 0;
1211 sc->bge_tx_saved_considx = 0;
1212
1213 bzero(sc->bge_ldata.bge_tx_ring, BGE_TX_RING_SZ);
1214 bus_dmamap_sync(sc->bge_cdata.bge_tx_ring_tag,
1215 sc->bge_cdata.bge_tx_ring_map, BUS_DMASYNC_PREWRITE);
1216
1217 /* Initialize transmit producer index for host-memory send ring. */
1218 sc->bge_tx_prodidx = 0;
1219 bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, sc->bge_tx_prodidx);
1220
1221 /* 5700 b2 errata */
1222 if (sc->bge_chiprev == BGE_CHIPREV_5700_BX)
1223 bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, sc->bge_tx_prodidx);
1224
1225 /* NIC-memory send ring not used; initialize to zero. */
1226 bge_writembx(sc, BGE_MBX_TX_NIC_PROD0_LO, 0);
1227 /* 5700 b2 errata */
1228 if (sc->bge_chiprev == BGE_CHIPREV_5700_BX)
1229 bge_writembx(sc, BGE_MBX_TX_NIC_PROD0_LO, 0);
1230
1231 return (0);
1232}
1233
1234static void
1235bge_setpromisc(struct bge_softc *sc)
1236{
1237 struct ifnet *ifp;
1238
1239 BGE_LOCK_ASSERT(sc);
1240
1241 ifp = sc->bge_ifp;
1242
1243 /* Enable or disable promiscuous mode as needed. */
1244 if (ifp->if_flags & IFF_PROMISC)
1245 BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC);
1246 else
1247 BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC);
1248}
1249
1250static void
1251bge_setmulti(struct bge_softc *sc)
1252{
1253 struct ifnet *ifp;
1254 struct ifmultiaddr *ifma;
1255 uint32_t hashes[4] = { 0, 0, 0, 0 };
1256 int h, i;
1257
1258 BGE_LOCK_ASSERT(sc);
1259
1260 ifp = sc->bge_ifp;
1261
1262 if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) {
1263 for (i = 0; i < 4; i++)
1264 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), 0xFFFFFFFF);
1265 return;
1266 }
1267
1268 /* First, zot all the existing filters. */
1269 for (i = 0; i < 4; i++)
1270 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), 0);
1271
1272 /* Now program new ones. */
1273 if_maddr_rlock(ifp);
1274 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1275 if (ifma->ifma_addr->sa_family != AF_LINK)
1276 continue;
1277 h = ether_crc32_le(LLADDR((struct sockaddr_dl *)
1278 ifma->ifma_addr), ETHER_ADDR_LEN) & 0x7F;
1279 hashes[(h & 0x60) >> 5] |= 1 << (h & 0x1F);
1280 }
1281 if_maddr_runlock(ifp);
1282
1283 for (i = 0; i < 4; i++)
1284 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), hashes[i]);
1285}
1286
1287static void
1288bge_setvlan(struct bge_softc *sc)
1289{
1290 struct ifnet *ifp;
1291
1292 BGE_LOCK_ASSERT(sc);
1293
1294 ifp = sc->bge_ifp;
1295
1296 /* Enable or disable VLAN tag stripping as needed. */
1297 if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING)
1298 BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_KEEP_VLAN_DIAG);
1299 else
1300 BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_KEEP_VLAN_DIAG);
1301}
1302
1303static void
1304bge_sig_pre_reset(struct bge_softc *sc, int type)
1305{
1306
1307 /*
1308 * Some chips don't like this so only do this if ASF is enabled
1309 */
1310 if (sc->bge_asf_mode)
1311 bge_writemem_ind(sc, BGE_SRAM_FW_MB, BGE_SRAM_FW_MB_MAGIC);
1312
1313 if (sc->bge_asf_mode & ASF_NEW_HANDSHAKE) {
1314 switch (type) {
1315 case BGE_RESET_START:
1316 bge_writemem_ind(sc, BGE_SRAM_FW_DRV_STATE_MB,
1317 BGE_FW_DRV_STATE_START);
1318 break;
1319 case BGE_RESET_STOP:
1320 bge_writemem_ind(sc, BGE_SRAM_FW_DRV_STATE_MB,
1321 BGE_FW_DRV_STATE_UNLOAD);
1322 break;
1323 }
1324 }
1325}
1326
1327static void
1328bge_sig_post_reset(struct bge_softc *sc, int type)
1329{
1330
1331 if (sc->bge_asf_mode & ASF_NEW_HANDSHAKE) {
1332 switch (type) {
1333 case BGE_RESET_START:
1334 bge_writemem_ind(sc, BGE_SRAM_FW_DRV_STATE_MB,
1335 BGE_FW_DRV_STATE_START_DONE);
1336 /* START DONE */
1337 break;
1338 case BGE_RESET_STOP:
1339 bge_writemem_ind(sc, BGE_SRAM_FW_DRV_STATE_MB,
1340 BGE_FW_DRV_STATE_UNLOAD_DONE);
1341 break;
1342 }
1343 }
1344}
1345
1346static void
1347bge_sig_legacy(struct bge_softc *sc, int type)
1348{
1349
1350 if (sc->bge_asf_mode) {
1351 switch (type) {
1352 case BGE_RESET_START:
1353 bge_writemem_ind(sc, BGE_SRAM_FW_DRV_STATE_MB,
1354 BGE_FW_DRV_STATE_START);
1355 break;
1356 case BGE_RESET_STOP:
1357 bge_writemem_ind(sc, BGE_SRAM_FW_DRV_STATE_MB,
1358 BGE_FW_DRV_STATE_UNLOAD);
1359 break;
1360 }
1361 }
1362}
1363
1364static void
1365bge_stop_fw(struct bge_softc *sc)
1366{
1367 int i;
1368
1369 if (sc->bge_asf_mode) {
1370 bge_writemem_ind(sc, BGE_SRAM_FW_CMD_MB, BGE_FW_CMD_PAUSE);
1371 CSR_WRITE_4(sc, BGE_RX_CPU_EVENT,
1372 CSR_READ_4(sc, BGE_RX_CPU_EVENT) | BGE_RX_CPU_DRV_EVENT);
1373
1374 for (i = 0; i < 100; i++ ) {
1375 if (!(CSR_READ_4(sc, BGE_RX_CPU_EVENT) &
1376 BGE_RX_CPU_DRV_EVENT))
1377 break;
1378 DELAY(10);
1379 }
1380 }
1381}
1382
1383static uint32_t
1384bge_dma_swap_options(struct bge_softc *sc)
1385{
1386 uint32_t dma_options;
1387
1388 dma_options = BGE_MODECTL_WORDSWAP_NONFRAME |
1389 BGE_MODECTL_BYTESWAP_DATA | BGE_MODECTL_WORDSWAP_DATA;
1390#if BYTE_ORDER == BIG_ENDIAN
1391 dma_options |= BGE_MODECTL_BYTESWAP_NONFRAME;
1392#endif
1393 if ((sc)->bge_asicrev == BGE_ASICREV_BCM5720)
1394 dma_options |= BGE_MODECTL_BYTESWAP_B2HRX_DATA |
1395 BGE_MODECTL_WORDSWAP_B2HRX_DATA | BGE_MODECTL_B2HRX_ENABLE |
1396 BGE_MODECTL_HTX2B_ENABLE;
1397
1398 return (dma_options);
1399}
1400
1401/*
1402 * Do endian, PCI and DMA initialization.
1403 */
1404static int
1405bge_chipinit(struct bge_softc *sc)
1406{
1407 uint32_t dma_rw_ctl, misc_ctl, mode_ctl;
1408 uint16_t val;
1409 int i;
1410
1411 /* Set endianness before we access any non-PCI registers. */
1412 misc_ctl = BGE_INIT;
1413 if (sc->bge_flags & BGE_FLAG_TAGGED_STATUS)
1414 misc_ctl |= BGE_PCIMISCCTL_TAGGED_STATUS;
1415 pci_write_config(sc->bge_dev, BGE_PCI_MISC_CTL, misc_ctl, 4);
1416
1417 /* Clear the MAC control register */
1418 CSR_WRITE_4(sc, BGE_MAC_MODE, 0);
1419
1420 /*
1421 * Clear the MAC statistics block in the NIC's
1422 * internal memory.
1423 */
1424 for (i = BGE_STATS_BLOCK;
1425 i < BGE_STATS_BLOCK_END + 1; i += sizeof(uint32_t))
1426 BGE_MEMWIN_WRITE(sc, i, 0);
1427
1428 for (i = BGE_STATUS_BLOCK;
1429 i < BGE_STATUS_BLOCK_END + 1; i += sizeof(uint32_t))
1430 BGE_MEMWIN_WRITE(sc, i, 0);
1431
1432 if (sc->bge_chiprev == BGE_CHIPREV_5704_BX) {
1433 /*
1434 * Fix data corruption caused by non-qword write with WB.
1435 * Fix master abort in PCI mode.
1436 * Fix PCI latency timer.
1437 */
1438 val = pci_read_config(sc->bge_dev, BGE_PCI_MSI_DATA + 2, 2);
1439 val |= (1 << 10) | (1 << 12) | (1 << 13);
1440 pci_write_config(sc->bge_dev, BGE_PCI_MSI_DATA + 2, val, 2);
1441 }
1442
1443 /*
1444 * Set up the PCI DMA control register.
1445 */
1446 dma_rw_ctl = BGE_PCIDMARWCTL_RD_CMD_SHIFT(6) |
1447 BGE_PCIDMARWCTL_WR_CMD_SHIFT(7);
1448 if (sc->bge_flags & BGE_FLAG_PCIE) {
1449 /* Read watermark not used, 128 bytes for write. */
1450 dma_rw_ctl |= BGE_PCIDMARWCTL_WR_WAT_SHIFT(3);
1451 } else if (sc->bge_flags & BGE_FLAG_PCIX) {
1452 if (BGE_IS_5714_FAMILY(sc)) {
1453 /* 256 bytes for read and write. */
1454 dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(2) |
1455 BGE_PCIDMARWCTL_WR_WAT_SHIFT(2);
1456 dma_rw_ctl |= (sc->bge_asicrev == BGE_ASICREV_BCM5780) ?
1457 BGE_PCIDMARWCTL_ONEDMA_ATONCE_GLOBAL :
1458 BGE_PCIDMARWCTL_ONEDMA_ATONCE_LOCAL;
1459 } else if (sc->bge_asicrev == BGE_ASICREV_BCM5703) {
1460 /*
1461 * In the BCM5703, the DMA read watermark should
1462 * be set to less than or equal to the maximum
1463 * memory read byte count of the PCI-X command
1464 * register.
1465 */
1466 dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(4) |
1467 BGE_PCIDMARWCTL_WR_WAT_SHIFT(3);
1468 } else if (sc->bge_asicrev == BGE_ASICREV_BCM5704) {
1469 /* 1536 bytes for read, 384 bytes for write. */
1470 dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(7) |
1471 BGE_PCIDMARWCTL_WR_WAT_SHIFT(3);
1472 } else {
1473 /* 384 bytes for read and write. */
1474 dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(3) |
1475 BGE_PCIDMARWCTL_WR_WAT_SHIFT(3) |
1476 0x0F;
1477 }
1478 if (sc->bge_asicrev == BGE_ASICREV_BCM5703 ||
1479 sc->bge_asicrev == BGE_ASICREV_BCM5704) {
1480 uint32_t tmp;
1481
1482 /* Set ONE_DMA_AT_ONCE for hardware workaround. */
1483 tmp = CSR_READ_4(sc, BGE_PCI_CLKCTL) & 0x1F;
1484 if (tmp == 6 || tmp == 7)
1485 dma_rw_ctl |=
1486 BGE_PCIDMARWCTL_ONEDMA_ATONCE_GLOBAL;
1487
1488 /* Set PCI-X DMA write workaround. */
1489 dma_rw_ctl |= BGE_PCIDMARWCTL_ASRT_ALL_BE;
1490 }
1491 } else {
1492 /* Conventional PCI bus: 256 bytes for read and write. */
1493 dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(7) |
1494 BGE_PCIDMARWCTL_WR_WAT_SHIFT(7);
1495
1496 if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
1497 sc->bge_asicrev != BGE_ASICREV_BCM5750)
1498 dma_rw_ctl |= 0x0F;
1499 }
1500 if (sc->bge_asicrev == BGE_ASICREV_BCM5700 ||
1501 sc->bge_asicrev == BGE_ASICREV_BCM5701)
1502 dma_rw_ctl |= BGE_PCIDMARWCTL_USE_MRM |
1503 BGE_PCIDMARWCTL_ASRT_ALL_BE;
1504 if (sc->bge_asicrev == BGE_ASICREV_BCM5703 ||
1505 sc->bge_asicrev == BGE_ASICREV_BCM5704)
1506 dma_rw_ctl &= ~BGE_PCIDMARWCTL_MINDMA;
1507 if (BGE_IS_5717_PLUS(sc)) {
1508 dma_rw_ctl &= ~BGE_PCIDMARWCTL_DIS_CACHE_ALIGNMENT;
1509 if (sc->bge_chipid == BGE_CHIPID_BCM57765_A0)
1510 dma_rw_ctl &= ~BGE_PCIDMARWCTL_CRDRDR_RDMA_MRRS_MSK;
1511 /*
1512 * Enable HW workaround for controllers that misinterpret
1513 * a status tag update and leave interrupts permanently
1514 * disabled.
1515 */
1516 if (sc->bge_asicrev != BGE_ASICREV_BCM5717 &&
1517 sc->bge_asicrev != BGE_ASICREV_BCM57765)
1518 dma_rw_ctl |= BGE_PCIDMARWCTL_TAGGED_STATUS_WA;
1519 }
1520 pci_write_config(sc->bge_dev, BGE_PCI_DMA_RW_CTL, dma_rw_ctl, 4);
1521
1522 /*
1523 * Set up general mode register.
1524 */
1525 mode_ctl = bge_dma_swap_options(sc) | BGE_MODECTL_MAC_ATTN_INTR |
1526 BGE_MODECTL_HOST_SEND_BDS | BGE_MODECTL_TX_NO_PHDR_CSUM;
1527
1528 /*
1529 * BCM5701 B5 have a bug causing data corruption when using
1530 * 64-bit DMA reads, which can be terminated early and then
1531 * completed later as 32-bit accesses, in combination with
1532 * certain bridges.
1533 */
1534 if (sc->bge_asicrev == BGE_ASICREV_BCM5701 &&
1535 sc->bge_chipid == BGE_CHIPID_BCM5701_B5)
1536 mode_ctl |= BGE_MODECTL_FORCE_PCI32;
1537
1538 /*
1539 * Tell the firmware the driver is running
1540 */
1541 if (sc->bge_asf_mode & ASF_STACKUP)
1542 mode_ctl |= BGE_MODECTL_STACKUP;
1543
1544 CSR_WRITE_4(sc, BGE_MODE_CTL, mode_ctl);
1545
1546 /*
1547 * Disable memory write invalidate. Apparently it is not supported
1548 * properly by these devices. Also ensure that INTx isn't disabled,
1549 * as these chips need it even when using MSI.
1550 */
1551 PCI_CLRBIT(sc->bge_dev, BGE_PCI_CMD,
1552 PCIM_CMD_INTxDIS | PCIM_CMD_MWIEN, 4);
1553
1554 /* Set the timer prescaler (always 66Mhz) */
1555 CSR_WRITE_4(sc, BGE_MISC_CFG, BGE_32BITTIME_66MHZ);
1556
1557 /* XXX: The Linux tg3 driver does this at the start of brgphy_reset. */
1558 if (sc->bge_asicrev == BGE_ASICREV_BCM5906) {
1559 DELAY(40); /* XXX */
1560
1561 /* Put PHY into ready state */
1562 BGE_CLRBIT(sc, BGE_MISC_CFG, BGE_MISCCFG_EPHY_IDDQ);
1563 CSR_READ_4(sc, BGE_MISC_CFG); /* Flush */
1564 DELAY(40);
1565 }
1566
1567 return (0);
1568}
1569
1570static int
1571bge_blockinit(struct bge_softc *sc)
1572{
1573 struct bge_rcb *rcb;
1574 bus_size_t vrcb;
1575 bge_hostaddr taddr;
1576 uint32_t dmactl, val;
1577 int i, limit;
1578
1579 /*
1580 * Initialize the memory window pointer register so that
1581 * we can access the first 32K of internal NIC RAM. This will
1582 * allow us to set up the TX send ring RCBs and the RX return
1583 * ring RCBs, plus other things which live in NIC memory.
1584 */
1585 CSR_WRITE_4(sc, BGE_PCI_MEMWIN_BASEADDR, 0);
1586
1587 /* Note: the BCM5704 has a smaller mbuf space than other chips. */
1588
1589 if (!(BGE_IS_5705_PLUS(sc))) {
1590 /* Configure mbuf memory pool */
1591 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_BASEADDR, BGE_BUFFPOOL_1);
1592 if (sc->bge_asicrev == BGE_ASICREV_BCM5704)
1593 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x10000);
1594 else
1595 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x18000);
1596
1597 /* Configure DMA resource pool */
1598 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_BASEADDR,
1599 BGE_DMA_DESCRIPTORS);
1600 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LEN, 0x2000);
1601 }
1602
1603 /* Configure mbuf pool watermarks */
1604 if (BGE_IS_5717_PLUS(sc)) {
1605 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0);
1606 if (sc->bge_ifp->if_mtu > ETHERMTU) {
1607 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x7e);
1608 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0xea);
1609 } else {
1610 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x2a);
1611 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0xa0);
1612 }
1613 } else if (!BGE_IS_5705_PLUS(sc)) {
1614 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x50);
1615 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x20);
1616 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x60);
1617 } else if (sc->bge_asicrev == BGE_ASICREV_BCM5906) {
1618 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0);
1619 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x04);
1620 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x10);
1621 } else {
1622 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0);
1623 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x10);
1624 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x60);
1625 }
1626
1627 /* Configure DMA resource watermarks */
1628 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LOWAT, 5);
1629 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_HIWAT, 10);
1630
1631 /* Enable buffer manager */
1632 val = BGE_BMANMODE_ENABLE | BGE_BMANMODE_LOMBUF_ATTN;
1633 /*
1634 * Change the arbitration algorithm of TXMBUF read request to
1635 * round-robin instead of priority based for BCM5719. When
1636 * TXFIFO is almost empty, RDMA will hold its request until
1637 * TXFIFO is not almost empty.
1638 */
1639 if (sc->bge_asicrev == BGE_ASICREV_BCM5719)
1640 val |= BGE_BMANMODE_NO_TX_UNDERRUN;
1641 CSR_WRITE_4(sc, BGE_BMAN_MODE, val);
1642
1643 /* Poll for buffer manager start indication */
1644 for (i = 0; i < BGE_TIMEOUT; i++) {
1645 DELAY(10);
1646 if (CSR_READ_4(sc, BGE_BMAN_MODE) & BGE_BMANMODE_ENABLE)
1647 break;
1648 }
1649
1650 if (i == BGE_TIMEOUT) {
1651 device_printf(sc->bge_dev, "buffer manager failed to start\n");
1652 return (ENXIO);
1653 }
1654
1655 /* Enable flow-through queues */
1656 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF);
1657 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0);
1658
1659 /* Wait until queue initialization is complete */
1660 for (i = 0; i < BGE_TIMEOUT; i++) {
1661 DELAY(10);
1662 if (CSR_READ_4(sc, BGE_FTQ_RESET) == 0)
1663 break;
1664 }
1665
1666 if (i == BGE_TIMEOUT) {
1667 device_printf(sc->bge_dev, "flow-through queue init failed\n");
1668 return (ENXIO);
1669 }
1670
1671 /*
1672 * Summary of rings supported by the controller:
1673 *
1674 * Standard Receive Producer Ring
1675 * - This ring is used to feed receive buffers for "standard"
1676 * sized frames (typically 1536 bytes) to the controller.
1677 *
1678 * Jumbo Receive Producer Ring
1679 * - This ring is used to feed receive buffers for jumbo sized
1680 * frames (i.e. anything bigger than the "standard" frames)
1681 * to the controller.
1682 *
1683 * Mini Receive Producer Ring
1684 * - This ring is used to feed receive buffers for "mini"
1685 * sized frames to the controller.
1686 * - This feature required external memory for the controller
1687 * but was never used in a production system. Should always
1688 * be disabled.
1689 *
1690 * Receive Return Ring
1691 * - After the controller has placed an incoming frame into a
1692 * receive buffer that buffer is moved into a receive return
1693 * ring. The driver is then responsible to passing the
1694 * buffer up to the stack. Many versions of the controller
1695 * support multiple RR rings.
1696 *
1697 * Send Ring
1698 * - This ring is used for outgoing frames. Many versions of
1699 * the controller support multiple send rings.
1700 */
1701
1702 /* Initialize the standard receive producer ring control block. */
1703 rcb = &sc->bge_ldata.bge_info.bge_std_rx_rcb;
1704 rcb->bge_hostaddr.bge_addr_lo =
1705 BGE_ADDR_LO(sc->bge_ldata.bge_rx_std_ring_paddr);
1706 rcb->bge_hostaddr.bge_addr_hi =
1707 BGE_ADDR_HI(sc->bge_ldata.bge_rx_std_ring_paddr);
1708 bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag,
1709 sc->bge_cdata.bge_rx_std_ring_map, BUS_DMASYNC_PREREAD);
1710 if (BGE_IS_5717_PLUS(sc)) {
1711 /*
1712 * Bits 31-16: Programmable ring size (2048, 1024, 512, .., 32)
1713 * Bits 15-2 : Maximum RX frame size
1714 * Bit 1 : 1 = Ring Disabled, 0 = Ring ENabled
1715 * Bit 0 : Reserved
1716 */
1717 rcb->bge_maxlen_flags =
1718 BGE_RCB_MAXLEN_FLAGS(512, BGE_MAX_FRAMELEN << 2);
1719 } else if (BGE_IS_5705_PLUS(sc)) {
1720 /*
1721 * Bits 31-16: Programmable ring size (512, 256, 128, 64, 32)
1722 * Bits 15-2 : Reserved (should be 0)
1723 * Bit 1 : 1 = Ring Disabled, 0 = Ring Enabled
1724 * Bit 0 : Reserved
1725 */
1726 rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(512, 0);
1727 } else {
1728 /*
1729 * Ring size is always XXX entries
1730 * Bits 31-16: Maximum RX frame size
1731 * Bits 15-2 : Reserved (should be 0)
1732 * Bit 1 : 1 = Ring Disabled, 0 = Ring Enabled
1733 * Bit 0 : Reserved
1734 */
1735 rcb->bge_maxlen_flags =
1736 BGE_RCB_MAXLEN_FLAGS(BGE_MAX_FRAMELEN, 0);
1737 }
1738 if (sc->bge_asicrev == BGE_ASICREV_BCM5717 ||
1739 sc->bge_asicrev == BGE_ASICREV_BCM5719 ||
1740 sc->bge_asicrev == BGE_ASICREV_BCM5720)
1741 rcb->bge_nicaddr = BGE_STD_RX_RINGS_5717;
1742 else
1743 rcb->bge_nicaddr = BGE_STD_RX_RINGS;
1744 /* Write the standard receive producer ring control block. */
1745 CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_HI, rcb->bge_hostaddr.bge_addr_hi);
1746 CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_LO, rcb->bge_hostaddr.bge_addr_lo);
1747 CSR_WRITE_4(sc, BGE_RX_STD_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags);
1748 CSR_WRITE_4(sc, BGE_RX_STD_RCB_NICADDR, rcb->bge_nicaddr);
1749
1750 /* Reset the standard receive producer ring producer index. */
1751 bge_writembx(sc, BGE_MBX_RX_STD_PROD_LO, 0);
1752
1753 /*
1754 * Initialize the jumbo RX producer ring control
1755 * block. We set the 'ring disabled' bit in the
1756 * flags field until we're actually ready to start
1757 * using this ring (i.e. once we set the MTU
1758 * high enough to require it).
1759 */
1760 if (BGE_IS_JUMBO_CAPABLE(sc)) {
1761 rcb = &sc->bge_ldata.bge_info.bge_jumbo_rx_rcb;
1762 /* Get the jumbo receive producer ring RCB parameters. */
1763 rcb->bge_hostaddr.bge_addr_lo =
1764 BGE_ADDR_LO(sc->bge_ldata.bge_rx_jumbo_ring_paddr);
1765 rcb->bge_hostaddr.bge_addr_hi =
1766 BGE_ADDR_HI(sc->bge_ldata.bge_rx_jumbo_ring_paddr);
1767 bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag,
1768 sc->bge_cdata.bge_rx_jumbo_ring_map,
1769 BUS_DMASYNC_PREREAD);
1770 rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(0,
1771 BGE_RCB_FLAG_USE_EXT_RX_BD | BGE_RCB_FLAG_RING_DISABLED);
1772 if (sc->bge_asicrev == BGE_ASICREV_BCM5717 ||
1773 sc->bge_asicrev == BGE_ASICREV_BCM5719 ||
1774 sc->bge_asicrev == BGE_ASICREV_BCM5720)
1775 rcb->bge_nicaddr = BGE_JUMBO_RX_RINGS_5717;
1776 else
1777 rcb->bge_nicaddr = BGE_JUMBO_RX_RINGS;
1778 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_HI,
1779 rcb->bge_hostaddr.bge_addr_hi);
1780 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_LO,
1781 rcb->bge_hostaddr.bge_addr_lo);
1782 /* Program the jumbo receive producer ring RCB parameters. */
1783 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS,
1784 rcb->bge_maxlen_flags);
1785 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_NICADDR, rcb->bge_nicaddr);
1786 /* Reset the jumbo receive producer ring producer index. */
1787 bge_writembx(sc, BGE_MBX_RX_JUMBO_PROD_LO, 0);
1788 }
1789
1790 /* Disable the mini receive producer ring RCB. */
1791 if (BGE_IS_5700_FAMILY(sc)) {
1792 rcb = &sc->bge_ldata.bge_info.bge_mini_rx_rcb;
1793 rcb->bge_maxlen_flags =
1794 BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED);
1795 CSR_WRITE_4(sc, BGE_RX_MINI_RCB_MAXLEN_FLAGS,
1796 rcb->bge_maxlen_flags);
1797 /* Reset the mini receive producer ring producer index. */
1798 bge_writembx(sc, BGE_MBX_RX_MINI_PROD_LO, 0);
1799 }
1800
1801 /* Choose de-pipeline mode for BCM5906 A0, A1 and A2. */
1802 if (sc->bge_asicrev == BGE_ASICREV_BCM5906) {
1803 if (sc->bge_chipid == BGE_CHIPID_BCM5906_A0 ||
1804 sc->bge_chipid == BGE_CHIPID_BCM5906_A1 ||
1805 sc->bge_chipid == BGE_CHIPID_BCM5906_A2)
1806 CSR_WRITE_4(sc, BGE_ISO_PKT_TX,
1807 (CSR_READ_4(sc, BGE_ISO_PKT_TX) & ~3) | 2);
1808 }
1809 /*
1810 * The BD ring replenish thresholds control how often the
1811 * hardware fetches new BD's from the producer rings in host
1812 * memory. Setting the value too low on a busy system can
1813 * starve the hardware and recue the throughpout.
1814 *
1815 * Set the BD ring replentish thresholds. The recommended
1816 * values are 1/8th the number of descriptors allocated to
1817 * each ring.
1818 * XXX The 5754 requires a lower threshold, so it might be a
1819 * requirement of all 575x family chips. The Linux driver sets
1820 * the lower threshold for all 5705 family chips as well, but there
1821 * are reports that it might not need to be so strict.
1822 *
1823 * XXX Linux does some extra fiddling here for the 5906 parts as
1824 * well.
1825 */
1826 if (BGE_IS_5705_PLUS(sc))
1827 val = 8;
1828 else
1829 val = BGE_STD_RX_RING_CNT / 8;
1830 CSR_WRITE_4(sc, BGE_RBDI_STD_REPL_THRESH, val);
1831 if (BGE_IS_JUMBO_CAPABLE(sc))
1832 CSR_WRITE_4(sc, BGE_RBDI_JUMBO_REPL_THRESH,
1833 BGE_JUMBO_RX_RING_CNT/8);
1834 if (BGE_IS_5717_PLUS(sc)) {
1835 CSR_WRITE_4(sc, BGE_STD_REPLENISH_LWM, 32);
1836 CSR_WRITE_4(sc, BGE_JMB_REPLENISH_LWM, 16);
1837 }
1838
1839 /*
1840 * Disable all send rings by setting the 'ring disabled' bit
1841 * in the flags field of all the TX send ring control blocks,
1842 * located in NIC memory.
1843 */
1844 if (!BGE_IS_5705_PLUS(sc))
1845 /* 5700 to 5704 had 16 send rings. */
1846 limit = BGE_TX_RINGS_EXTSSRAM_MAX;
1847 else
1848 limit = 1;
1849 vrcb = BGE_MEMWIN_START + BGE_SEND_RING_RCB;
1850 for (i = 0; i < limit; i++) {
1851 RCB_WRITE_4(sc, vrcb, bge_maxlen_flags,
1852 BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED));
1853 RCB_WRITE_4(sc, vrcb, bge_nicaddr, 0);
1854 vrcb += sizeof(struct bge_rcb);
1855 }
1856
1857 /* Configure send ring RCB 0 (we use only the first ring) */
1858 vrcb = BGE_MEMWIN_START + BGE_SEND_RING_RCB;
1859 BGE_HOSTADDR(taddr, sc->bge_ldata.bge_tx_ring_paddr);
1860 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, taddr.bge_addr_hi);
1861 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, taddr.bge_addr_lo);
1862 if (sc->bge_asicrev == BGE_ASICREV_BCM5717 ||
1863 sc->bge_asicrev == BGE_ASICREV_BCM5719 ||
1864 sc->bge_asicrev == BGE_ASICREV_BCM5720)
1865 RCB_WRITE_4(sc, vrcb, bge_nicaddr, BGE_SEND_RING_5717);
1866 else
1867 RCB_WRITE_4(sc, vrcb, bge_nicaddr,
1868 BGE_NIC_TXRING_ADDR(0, BGE_TX_RING_CNT));
1869 RCB_WRITE_4(sc, vrcb, bge_maxlen_flags,
1870 BGE_RCB_MAXLEN_FLAGS(BGE_TX_RING_CNT, 0));
1871
1872 /*
1873 * Disable all receive return rings by setting the
1874 * 'ring diabled' bit in the flags field of all the receive
1875 * return ring control blocks, located in NIC memory.
1876 */
1877 if (sc->bge_asicrev == BGE_ASICREV_BCM5717 ||
1878 sc->bge_asicrev == BGE_ASICREV_BCM5719 ||
1879 sc->bge_asicrev == BGE_ASICREV_BCM5720) {
1880 /* Should be 17, use 16 until we get an SRAM map. */
1881 limit = 16;
1882 } else if (!BGE_IS_5705_PLUS(sc))
1883 limit = BGE_RX_RINGS_MAX;
1884 else if (sc->bge_asicrev == BGE_ASICREV_BCM5755 ||
1885 sc->bge_asicrev == BGE_ASICREV_BCM57765)
1886 limit = 4;
1887 else
1888 limit = 1;
1889 /* Disable all receive return rings. */
1890 vrcb = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB;
1891 for (i = 0; i < limit; i++) {
1892 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, 0);
1893 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, 0);
1894 RCB_WRITE_4(sc, vrcb, bge_maxlen_flags,
1895 BGE_RCB_FLAG_RING_DISABLED);
1896 RCB_WRITE_4(sc, vrcb, bge_nicaddr, 0);
1897 bge_writembx(sc, BGE_MBX_RX_CONS0_LO +
1898 (i * (sizeof(uint64_t))), 0);
1899 vrcb += sizeof(struct bge_rcb);
1900 }
1901
1902 /*
1903 * Set up receive return ring 0. Note that the NIC address
1904 * for RX return rings is 0x0. The return rings live entirely
1905 * within the host, so the nicaddr field in the RCB isn't used.
1906 */
1907 vrcb = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB;
1908 BGE_HOSTADDR(taddr, sc->bge_ldata.bge_rx_return_ring_paddr);
1909 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, taddr.bge_addr_hi);
1910 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, taddr.bge_addr_lo);
1911 RCB_WRITE_4(sc, vrcb, bge_nicaddr, 0);
1912 RCB_WRITE_4(sc, vrcb, bge_maxlen_flags,
1913 BGE_RCB_MAXLEN_FLAGS(sc->bge_return_ring_cnt, 0));
1914
1915 /* Set random backoff seed for TX */
1916 CSR_WRITE_4(sc, BGE_TX_RANDOM_BACKOFF,
1917 IF_LLADDR(sc->bge_ifp)[0] + IF_LLADDR(sc->bge_ifp)[1] +
1918 IF_LLADDR(sc->bge_ifp)[2] + IF_LLADDR(sc->bge_ifp)[3] +
1919 IF_LLADDR(sc->bge_ifp)[4] + IF_LLADDR(sc->bge_ifp)[5] +
1920 BGE_TX_BACKOFF_SEED_MASK);
1921
1922 /* Set inter-packet gap */
1923 val = 0x2620;
1924 if (sc->bge_asicrev == BGE_ASICREV_BCM5720)
1925 val |= CSR_READ_4(sc, BGE_TX_LENGTHS) &
1926 (BGE_TXLEN_JMB_FRM_LEN_MSK | BGE_TXLEN_CNT_DN_VAL_MSK);
1927 CSR_WRITE_4(sc, BGE_TX_LENGTHS, val);
1928
1929 /*
1930 * Specify which ring to use for packets that don't match
1931 * any RX rules.
1932 */
1933 CSR_WRITE_4(sc, BGE_RX_RULES_CFG, 0x08);
1934
1935 /*
1936 * Configure number of RX lists. One interrupt distribution
1937 * list, sixteen active lists, one bad frames class.
1938 */
1939 CSR_WRITE_4(sc, BGE_RXLP_CFG, 0x181);
1940
1941 /* Inialize RX list placement stats mask. */
1942 CSR_WRITE_4(sc, BGE_RXLP_STATS_ENABLE_MASK, 0x007FFFFF);
1943 CSR_WRITE_4(sc, BGE_RXLP_STATS_CTL, 0x1);
1944
1945 /* Disable host coalescing until we get it set up */
1946 CSR_WRITE_4(sc, BGE_HCC_MODE, 0x00000000);
1947
1948 /* Poll to make sure it's shut down. */
1949 for (i = 0; i < BGE_TIMEOUT; i++) {
1950 DELAY(10);
1951 if (!(CSR_READ_4(sc, BGE_HCC_MODE) & BGE_HCCMODE_ENABLE))
1952 break;
1953 }
1954
1955 if (i == BGE_TIMEOUT) {
1956 device_printf(sc->bge_dev,
1957 "host coalescing engine failed to idle\n");
1958 return (ENXIO);
1959 }
1960
1961 /* Set up host coalescing defaults */
1962 CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS, sc->bge_rx_coal_ticks);
1963 CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS, sc->bge_tx_coal_ticks);
1964 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS, sc->bge_rx_max_coal_bds);
1965 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS, sc->bge_tx_max_coal_bds);
1966 if (!(BGE_IS_5705_PLUS(sc))) {
1967 CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS_INT, 0);
1968 CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS_INT, 0);
1969 }
1970 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS_INT, 1);
1971 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS_INT, 1);
1972
1973 /* Set up address of statistics block */
1974 if (!(BGE_IS_5705_PLUS(sc))) {
1975 CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_HI,
1976 BGE_ADDR_HI(sc->bge_ldata.bge_stats_paddr));
1977 CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_LO,
1978 BGE_ADDR_LO(sc->bge_ldata.bge_stats_paddr));
1979 CSR_WRITE_4(sc, BGE_HCC_STATS_BASEADDR, BGE_STATS_BLOCK);
1980 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_BASEADDR, BGE_STATUS_BLOCK);
1981 CSR_WRITE_4(sc, BGE_HCC_STATS_TICKS, sc->bge_stat_ticks);
1982 }
1983
1984 /* Set up address of status block */
1985 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_HI,
1986 BGE_ADDR_HI(sc->bge_ldata.bge_status_block_paddr));
1987 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_LO,
1988 BGE_ADDR_LO(sc->bge_ldata.bge_status_block_paddr));
1989
1990 /* Set up status block size. */
1991 if (sc->bge_asicrev == BGE_ASICREV_BCM5700 &&
1992 sc->bge_chipid != BGE_CHIPID_BCM5700_C0) {
1993 val = BGE_STATBLKSZ_FULL;
1994 bzero(sc->bge_ldata.bge_status_block, BGE_STATUS_BLK_SZ);
1995 } else {
1996 val = BGE_STATBLKSZ_32BYTE;
1997 bzero(sc->bge_ldata.bge_status_block, 32);
1998 }
1999 bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
2000 sc->bge_cdata.bge_status_map,
2001 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2002
2003 /* Turn on host coalescing state machine */
2004 CSR_WRITE_4(sc, BGE_HCC_MODE, val | BGE_HCCMODE_ENABLE);
2005
2006 /* Turn on RX BD completion state machine and enable attentions */
2007 CSR_WRITE_4(sc, BGE_RBDC_MODE,
2008 BGE_RBDCMODE_ENABLE | BGE_RBDCMODE_ATTN);
2009
2010 /* Turn on RX list placement state machine */
2011 CSR_WRITE_4(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE);
2012
2013 /* Turn on RX list selector state machine. */
2014 if (!(BGE_IS_5705_PLUS(sc)))
2015 CSR_WRITE_4(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE);
2016
2017 val = BGE_MACMODE_TXDMA_ENB | BGE_MACMODE_RXDMA_ENB |
2018 BGE_MACMODE_RX_STATS_CLEAR | BGE_MACMODE_TX_STATS_CLEAR |
2019 BGE_MACMODE_RX_STATS_ENB | BGE_MACMODE_TX_STATS_ENB |
2020 BGE_MACMODE_FRMHDR_DMA_ENB;
2021
2022 if (sc->bge_flags & BGE_FLAG_TBI)
2023 val |= BGE_PORTMODE_TBI;
2024 else if (sc->bge_flags & BGE_FLAG_MII_SERDES)
2025 val |= BGE_PORTMODE_GMII;
2026 else
2027 val |= BGE_PORTMODE_MII;
2028
2029 /* Turn on DMA, clear stats */
2030 CSR_WRITE_4(sc, BGE_MAC_MODE, val);
2031
2032 /* Set misc. local control, enable interrupts on attentions */
2033 CSR_WRITE_4(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_ONATTN);
2034
2035#ifdef notdef
2036 /* Assert GPIO pins for PHY reset */
2037 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUT0 |
2038 BGE_MLC_MISCIO_OUT1 | BGE_MLC_MISCIO_OUT2);
2039 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUTEN0 |
2040 BGE_MLC_MISCIO_OUTEN1 | BGE_MLC_MISCIO_OUTEN2);
2041#endif
2042
2043 /* Turn on DMA completion state machine */
2044 if (!(BGE_IS_5705_PLUS(sc)))
2045 CSR_WRITE_4(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE);
2046
2047 val = BGE_WDMAMODE_ENABLE | BGE_WDMAMODE_ALL_ATTNS;
2048
2049 /* Enable host coalescing bug fix. */
2050 if (BGE_IS_5755_PLUS(sc))
2051 val |= BGE_WDMAMODE_STATUS_TAG_FIX;
2052
2053 /* Request larger DMA burst size to get better performance. */
2054 if (sc->bge_asicrev == BGE_ASICREV_BCM5785)
2055 val |= BGE_WDMAMODE_BURST_ALL_DATA;
2056
2057 /* Turn on write DMA state machine */
2058 CSR_WRITE_4(sc, BGE_WDMA_MODE, val);
2059 DELAY(40);
2060
2061 /* Turn on read DMA state machine */
2062 val = BGE_RDMAMODE_ENABLE | BGE_RDMAMODE_ALL_ATTNS;
2063
2064 if (sc->bge_asicrev == BGE_ASICREV_BCM5717)
2065 val |= BGE_RDMAMODE_MULT_DMA_RD_DIS;
2066
2067 if (sc->bge_asicrev == BGE_ASICREV_BCM5784 ||
2068 sc->bge_asicrev == BGE_ASICREV_BCM5785 ||
2069 sc->bge_asicrev == BGE_ASICREV_BCM57780)
2070 val |= BGE_RDMAMODE_BD_SBD_CRPT_ATTN |
2071 BGE_RDMAMODE_MBUF_RBD_CRPT_ATTN |
2072 BGE_RDMAMODE_MBUF_SBD_CRPT_ATTN;
2073 if (sc->bge_flags & BGE_FLAG_PCIE)
2074 val |= BGE_RDMAMODE_FIFO_LONG_BURST;
2075 if (sc->bge_flags & (BGE_FLAG_TSO | BGE_FLAG_TSO3)) {
2076 val |= BGE_RDMAMODE_TSO4_ENABLE;
2077 if (sc->bge_flags & BGE_FLAG_TSO3 ||
2078 sc->bge_asicrev == BGE_ASICREV_BCM5785 ||
2079 sc->bge_asicrev == BGE_ASICREV_BCM57780)
2080 val |= BGE_RDMAMODE_TSO6_ENABLE;
2081 }
2082
2083 if (sc->bge_asicrev == BGE_ASICREV_BCM5720) {
2084 val |= CSR_READ_4(sc, BGE_RDMA_MODE) &
2085 BGE_RDMAMODE_H2BNC_VLAN_DET;
2086 /*
2087 * Allow multiple outstanding read requests from
2088 * non-LSO read DMA engine.
2089 */
2090 val &= ~BGE_RDMAMODE_MULT_DMA_RD_DIS;
2091 }
2092
2093 if (sc->bge_asicrev == BGE_ASICREV_BCM5761 ||
2094 sc->bge_asicrev == BGE_ASICREV_BCM5784 ||
2095 sc->bge_asicrev == BGE_ASICREV_BCM5785 ||
2096 sc->bge_asicrev == BGE_ASICREV_BCM57780 ||
2097 BGE_IS_5717_PLUS(sc)) {
2098 dmactl = CSR_READ_4(sc, BGE_RDMA_RSRVCTRL);
2099 /*
2100 * Adjust tx margin to prevent TX data corruption and
2101 * fix internal FIFO overflow.
2102 */
2103 if (sc->bge_asicrev == BGE_ASICREV_BCM5719 ||
2104 sc->bge_asicrev == BGE_ASICREV_BCM5720) {
2105 dmactl &= ~(BGE_RDMA_RSRVCTRL_FIFO_LWM_MASK |
2106 BGE_RDMA_RSRVCTRL_FIFO_HWM_MASK |
2107 BGE_RDMA_RSRVCTRL_TXMRGN_MASK);
2108 dmactl |= BGE_RDMA_RSRVCTRL_FIFO_LWM_1_5K |
2109 BGE_RDMA_RSRVCTRL_FIFO_HWM_1_5K |
2110 BGE_RDMA_RSRVCTRL_TXMRGN_320B;
2111 }
2112 /*
2113 * Enable fix for read DMA FIFO overruns.
2114 * The fix is to limit the number of RX BDs
2115 * the hardware would fetch at a fime.
2116 */
2117 CSR_WRITE_4(sc, BGE_RDMA_RSRVCTRL, dmactl |
2118 BGE_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
2119 }
2120
2121 if (sc->bge_asicrev == BGE_ASICREV_BCM5719) {
2122 CSR_WRITE_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL,
2123 CSR_READ_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL) |
2124 BGE_RDMA_LSO_CRPTEN_CTRL_BLEN_BD_4K |
2125 BGE_RDMA_LSO_CRPTEN_CTRL_BLEN_LSO_4K);
2126 } else if (sc->bge_asicrev == BGE_ASICREV_BCM5720) {
2127 /*
2128 * Allow 4KB burst length reads for non-LSO frames.
2129 * Enable 512B burst length reads for buffer descriptors.
2130 */
2131 CSR_WRITE_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL,
2132 CSR_READ_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL) |
2133 BGE_RDMA_LSO_CRPTEN_CTRL_BLEN_BD_512 |
2134 BGE_RDMA_LSO_CRPTEN_CTRL_BLEN_LSO_4K);
2135 }
2136
2137 CSR_WRITE_4(sc, BGE_RDMA_MODE, val);
2138 DELAY(40);
2139
2140 /* Turn on RX data completion state machine */
2141 CSR_WRITE_4(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE);
2142
2143 /* Turn on RX BD initiator state machine */
2144 CSR_WRITE_4(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE);
2145
2146 /* Turn on RX data and RX BD initiator state machine */
2147 CSR_WRITE_4(sc, BGE_RDBDI_MODE, BGE_RDBDIMODE_ENABLE);
2148
2149 /* Turn on Mbuf cluster free state machine */
2150 if (!(BGE_IS_5705_PLUS(sc)))
2151 CSR_WRITE_4(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE);
2152
2153 /* Turn on send BD completion state machine */
2154 CSR_WRITE_4(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE);
2155
2156 /* Turn on send data completion state machine */
2157 val = BGE_SDCMODE_ENABLE;
2158 if (sc->bge_asicrev == BGE_ASICREV_BCM5761)
2159 val |= BGE_SDCMODE_CDELAY;
2160 CSR_WRITE_4(sc, BGE_SDC_MODE, val);
2161
2162 /* Turn on send data initiator state machine */
2163 if (sc->bge_flags & (BGE_FLAG_TSO | BGE_FLAG_TSO3))
2164 CSR_WRITE_4(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE |
2165 BGE_SDIMODE_HW_LSO_PRE_DMA);
2166 else
2167 CSR_WRITE_4(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE);
2168
2169 /* Turn on send BD initiator state machine */
2170 CSR_WRITE_4(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE);
2171
2172 /* Turn on send BD selector state machine */
2173 CSR_WRITE_4(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE);
2174
2175 CSR_WRITE_4(sc, BGE_SDI_STATS_ENABLE_MASK, 0x007FFFFF);
2176 CSR_WRITE_4(sc, BGE_SDI_STATS_CTL,
2177 BGE_SDISTATSCTL_ENABLE | BGE_SDISTATSCTL_FASTER);
2178
2179 /* ack/clear link change events */
2180 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED |
2181 BGE_MACSTAT_CFG_CHANGED | BGE_MACSTAT_MI_COMPLETE |
2182 BGE_MACSTAT_LINK_CHANGED);
2183 CSR_WRITE_4(sc, BGE_MI_STS, 0);
2184
2185 /*
2186 * Enable attention when the link has changed state for
2187 * devices that use auto polling.
2188 */
2189 if (sc->bge_flags & BGE_FLAG_TBI) {
2190 CSR_WRITE_4(sc, BGE_MI_STS, BGE_MISTS_LINK);
2191 } else {
2192 if (sc->bge_mi_mode & BGE_MIMODE_AUTOPOLL) {
2193 CSR_WRITE_4(sc, BGE_MI_MODE, sc->bge_mi_mode);
2194 DELAY(80);
2195 }
2196 if (sc->bge_asicrev == BGE_ASICREV_BCM5700 &&
2197 sc->bge_chipid != BGE_CHIPID_BCM5700_B2)
2198 CSR_WRITE_4(sc, BGE_MAC_EVT_ENB,
2199 BGE_EVTENB_MI_INTERRUPT);
2200 }
2201
2202 /*
2203 * Clear any pending link state attention.
2204 * Otherwise some link state change events may be lost until attention
2205 * is cleared by bge_intr() -> bge_link_upd() sequence.
2206 * It's not necessary on newer BCM chips - perhaps enabling link
2207 * state change attentions implies clearing pending attention.
2208 */
2209 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED |
2210 BGE_MACSTAT_CFG_CHANGED | BGE_MACSTAT_MI_COMPLETE |
2211 BGE_MACSTAT_LINK_CHANGED);
2212
2213 /* Enable link state change attentions. */
2214 BGE_SETBIT(sc, BGE_MAC_EVT_ENB, BGE_EVTENB_LINK_CHANGED);
2215
2216 return (0);
2217}
2218
2219const struct bge_revision *
2220bge_lookup_rev(uint32_t chipid)
2221{
2222 const struct bge_revision *br;
2223
2224 for (br = bge_revisions; br->br_name != NULL; br++) {
2225 if (br->br_chipid == chipid)
2226 return (br);
2227 }
2228
2229 for (br = bge_majorrevs; br->br_name != NULL; br++) {
2230 if (br->br_chipid == BGE_ASICREV(chipid))
2231 return (br);
2232 }
2233
2234 return (NULL);
2235}
2236
2237const struct bge_vendor *
2238bge_lookup_vendor(uint16_t vid)
2239{
2240 const struct bge_vendor *v;
2241
2242 for (v = bge_vendors; v->v_name != NULL; v++)
2243 if (v->v_id == vid)
2244 return (v);
2245
2246 panic("%s: unknown vendor %d", __func__, vid);
2247 return (NULL);
2248}
2249
2250/*
2251 * Probe for a Broadcom chip. Check the PCI vendor and device IDs
2252 * against our list and return its name if we find a match.
2253 *
2254 * Note that since the Broadcom controller contains VPD support, we
2255 * try to get the device name string from the controller itself instead
2256 * of the compiled-in string. It guarantees we'll always announce the
2257 * right product name. We fall back to the compiled-in string when
2258 * VPD is unavailable or corrupt.
2259 */
2260static int
2261bge_probe(device_t dev)
2262{
2263 char buf[96];
2264 char model[64];
2265 const struct bge_revision *br;
2266 const char *pname;
2267 struct bge_softc *sc = device_get_softc(dev);
2268 const struct bge_type *t = bge_devs;
2269 const struct bge_vendor *v;
2270 uint32_t id;
2271 uint16_t did, vid;
2272
2273 sc->bge_dev = dev;
2274 vid = pci_get_vendor(dev);
2275 did = pci_get_device(dev);
2276 while(t->bge_vid != 0) {
2277 if ((vid == t->bge_vid) && (did == t->bge_did)) {
2278 id = pci_read_config(dev, BGE_PCI_MISC_CTL, 4) >>
2279 BGE_PCIMISCCTL_ASICREV_SHIFT;
2280 if (BGE_ASICREV(id) == BGE_ASICREV_USE_PRODID_REG) {
2281 /*
2282 * Find the ASCI revision. Different chips
2283 * use different registers.
2284 */
2285 switch (pci_get_device(dev)) {
2286 case BCOM_DEVICEID_BCM5717:
2287 case BCOM_DEVICEID_BCM5718:
2288 case BCOM_DEVICEID_BCM5719:
2289 case BCOM_DEVICEID_BCM5720:
2290 id = pci_read_config(dev,
2291 BGE_PCI_GEN2_PRODID_ASICREV, 4);
2292 break;
2293 case BCOM_DEVICEID_BCM57761:
2294 case BCOM_DEVICEID_BCM57765:
2295 case BCOM_DEVICEID_BCM57781:
2296 case BCOM_DEVICEID_BCM57785:
2297 case BCOM_DEVICEID_BCM57791:
2298 case BCOM_DEVICEID_BCM57795:
2299 id = pci_read_config(dev,
2300 BGE_PCI_GEN15_PRODID_ASICREV, 4);
2301 break;
2302 default:
2303 id = pci_read_config(dev,
2304 BGE_PCI_PRODID_ASICREV, 4);
2305 }
2306 }
2307 br = bge_lookup_rev(id);
2308 v = bge_lookup_vendor(vid);
2309 if (bge_has_eaddr(sc) &&
2310 pci_get_vpd_ident(dev, &pname) == 0)
2311 snprintf(model, 64, "%s", pname);
2312 else
2313 snprintf(model, 64, "%s %s", v->v_name,
2314 br != NULL ? br->br_name :
2315 "NetXtreme Ethernet Controller");
2316 snprintf(buf, 96, "%s, %sASIC rev. %#08x", model,
2317 br != NULL ? "" : "unknown ", id);
2318 device_set_desc_copy(dev, buf);
2319 return (0);
2320 }
2321 t++;
2322 }
2323
2324 return (ENXIO);
2325}
2326
2327static void
2328bge_dma_free(struct bge_softc *sc)
2329{
2330 int i;
2331
2332 /* Destroy DMA maps for RX buffers. */
2333 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
2334 if (sc->bge_cdata.bge_rx_std_dmamap[i])
2335 bus_dmamap_destroy(sc->bge_cdata.bge_rx_mtag,
2336 sc->bge_cdata.bge_rx_std_dmamap[i]);
2337 }
2338 if (sc->bge_cdata.bge_rx_std_sparemap)
2339 bus_dmamap_destroy(sc->bge_cdata.bge_rx_mtag,
2340 sc->bge_cdata.bge_rx_std_sparemap);
2341
2342 /* Destroy DMA maps for jumbo RX buffers. */
2343 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
2344 if (sc->bge_cdata.bge_rx_jumbo_dmamap[i])
2345 bus_dmamap_destroy(sc->bge_cdata.bge_mtag_jumbo,
2346 sc->bge_cdata.bge_rx_jumbo_dmamap[i]);
2347 }
2348 if (sc->bge_cdata.bge_rx_jumbo_sparemap)
2349 bus_dmamap_destroy(sc->bge_cdata.bge_mtag_jumbo,
2350 sc->bge_cdata.bge_rx_jumbo_sparemap);
2351
2352 /* Destroy DMA maps for TX buffers. */
2353 for (i = 0; i < BGE_TX_RING_CNT; i++) {
2354 if (sc->bge_cdata.bge_tx_dmamap[i])
2355 bus_dmamap_destroy(sc->bge_cdata.bge_tx_mtag,
2356 sc->bge_cdata.bge_tx_dmamap[i]);
2357 }
2358
2359 if (sc->bge_cdata.bge_rx_mtag)
2360 bus_dma_tag_destroy(sc->bge_cdata.bge_rx_mtag);
2361 if (sc->bge_cdata.bge_mtag_jumbo)
2362 bus_dma_tag_destroy(sc->bge_cdata.bge_mtag_jumbo);
2363 if (sc->bge_cdata.bge_tx_mtag)
2364 bus_dma_tag_destroy(sc->bge_cdata.bge_tx_mtag);
2365
2366
2367 /* Destroy standard RX ring. */
2368 if (sc->bge_cdata.bge_rx_std_ring_map)
2369 bus_dmamap_unload(sc->bge_cdata.bge_rx_std_ring_tag,
2370 sc->bge_cdata.bge_rx_std_ring_map);
2371 if (sc->bge_cdata.bge_rx_std_ring_map && sc->bge_ldata.bge_rx_std_ring)
2372 bus_dmamem_free(sc->bge_cdata.bge_rx_std_ring_tag,
2373 sc->bge_ldata.bge_rx_std_ring,
2374 sc->bge_cdata.bge_rx_std_ring_map);
2375
2376 if (sc->bge_cdata.bge_rx_std_ring_tag)
2377 bus_dma_tag_destroy(sc->bge_cdata.bge_rx_std_ring_tag);
2378
2379 /* Destroy jumbo RX ring. */
2380 if (sc->bge_cdata.bge_rx_jumbo_ring_map)
2381 bus_dmamap_unload(sc->bge_cdata.bge_rx_jumbo_ring_tag,
2382 sc->bge_cdata.bge_rx_jumbo_ring_map);
2383
2384 if (sc->bge_cdata.bge_rx_jumbo_ring_map &&
2385 sc->bge_ldata.bge_rx_jumbo_ring)
2386 bus_dmamem_free(sc->bge_cdata.bge_rx_jumbo_ring_tag,
2387 sc->bge_ldata.bge_rx_jumbo_ring,
2388 sc->bge_cdata.bge_rx_jumbo_ring_map);
2389
2390 if (sc->bge_cdata.bge_rx_jumbo_ring_tag)
2391 bus_dma_tag_destroy(sc->bge_cdata.bge_rx_jumbo_ring_tag);
2392
2393 /* Destroy RX return ring. */
2394 if (sc->bge_cdata.bge_rx_return_ring_map)
2395 bus_dmamap_unload(sc->bge_cdata.bge_rx_return_ring_tag,
2396 sc->bge_cdata.bge_rx_return_ring_map);
2397
2398 if (sc->bge_cdata.bge_rx_return_ring_map &&
2399 sc->bge_ldata.bge_rx_return_ring)
2400 bus_dmamem_free(sc->bge_cdata.bge_rx_return_ring_tag,
2401 sc->bge_ldata.bge_rx_return_ring,
2402 sc->bge_cdata.bge_rx_return_ring_map);
2403
2404 if (sc->bge_cdata.bge_rx_return_ring_tag)
2405 bus_dma_tag_destroy(sc->bge_cdata.bge_rx_return_ring_tag);
2406
2407 /* Destroy TX ring. */
2408 if (sc->bge_cdata.bge_tx_ring_map)
2409 bus_dmamap_unload(sc->bge_cdata.bge_tx_ring_tag,
2410 sc->bge_cdata.bge_tx_ring_map);
2411
2412 if (sc->bge_cdata.bge_tx_ring_map && sc->bge_ldata.bge_tx_ring)
2413 bus_dmamem_free(sc->bge_cdata.bge_tx_ring_tag,
2414 sc->bge_ldata.bge_tx_ring,
2415 sc->bge_cdata.bge_tx_ring_map);
2416
2417 if (sc->bge_cdata.bge_tx_ring_tag)
2418 bus_dma_tag_destroy(sc->bge_cdata.bge_tx_ring_tag);
2419
2420 /* Destroy status block. */
2421 if (sc->bge_cdata.bge_status_map)
2422 bus_dmamap_unload(sc->bge_cdata.bge_status_tag,
2423 sc->bge_cdata.bge_status_map);
2424
2425 if (sc->bge_cdata.bge_status_map && sc->bge_ldata.bge_status_block)
2426 bus_dmamem_free(sc->bge_cdata.bge_status_tag,
2427 sc->bge_ldata.bge_status_block,
2428 sc->bge_cdata.bge_status_map);
2429
2430 if (sc->bge_cdata.bge_status_tag)
2431 bus_dma_tag_destroy(sc->bge_cdata.bge_status_tag);
2432
2433 /* Destroy statistics block. */
2434 if (sc->bge_cdata.bge_stats_map)
2435 bus_dmamap_unload(sc->bge_cdata.bge_stats_tag,
2436 sc->bge_cdata.bge_stats_map);
2437
2438 if (sc->bge_cdata.bge_stats_map && sc->bge_ldata.bge_stats)
2439 bus_dmamem_free(sc->bge_cdata.bge_stats_tag,
2440 sc->bge_ldata.bge_stats,
2441 sc->bge_cdata.bge_stats_map);
2442
2443 if (sc->bge_cdata.bge_stats_tag)
2444 bus_dma_tag_destroy(sc->bge_cdata.bge_stats_tag);
2445
2446 if (sc->bge_cdata.bge_buffer_tag)
2447 bus_dma_tag_destroy(sc->bge_cdata.bge_buffer_tag);
2448
2449 /* Destroy the parent tag. */
2450 if (sc->bge_cdata.bge_parent_tag)
2451 bus_dma_tag_destroy(sc->bge_cdata.bge_parent_tag);
2452}
2453
2454static int
2455bge_dma_ring_alloc(struct bge_softc *sc, bus_size_t alignment,
2456 bus_size_t maxsize, bus_dma_tag_t *tag, uint8_t **ring, bus_dmamap_t *map,
2457 bus_addr_t *paddr, const char *msg)
2458{
2459 struct bge_dmamap_arg ctx;
2460 bus_addr_t lowaddr;
2461 bus_size_t ring_end;
2462 int error;
2463
2464 lowaddr = BUS_SPACE_MAXADDR;
2465again:
2466 error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag,
2467 alignment, 0, lowaddr, BUS_SPACE_MAXADDR, NULL,
2468 NULL, maxsize, 1, maxsize, 0, NULL, NULL, tag);
2469 if (error != 0) {
2470 device_printf(sc->bge_dev,
2471 "could not create %s dma tag\n", msg);
2472 return (ENOMEM);
2473 }
2474 /* Allocate DMA'able memory for ring. */
2475 error = bus_dmamem_alloc(*tag, (void **)ring,
2476 BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT, map);
2477 if (error != 0) {
2478 device_printf(sc->bge_dev,
2479 "could not allocate DMA'able memory for %s\n", msg);
2480 return (ENOMEM);
2481 }
2482 /* Load the address of the ring. */
2483 ctx.bge_busaddr = 0;
2484 error = bus_dmamap_load(*tag, *map, *ring, maxsize, bge_dma_map_addr,
2485 &ctx, BUS_DMA_NOWAIT);
2486 if (error != 0) {
2487 device_printf(sc->bge_dev,
2488 "could not load DMA'able memory for %s\n", msg);
2489 return (ENOMEM);
2490 }
2491 *paddr = ctx.bge_busaddr;
2492 ring_end = *paddr + maxsize;
2493 if ((sc->bge_flags & BGE_FLAG_4G_BNDRY_BUG) != 0 &&
2494 BGE_ADDR_HI(*paddr) != BGE_ADDR_HI(ring_end)) {
2495 /*
2496 * 4GB boundary crossed. Limit maximum allowable DMA
2497 * address space to 32bit and try again.
2498 */
2499 bus_dmamap_unload(*tag, *map);
2500 bus_dmamem_free(*tag, *ring, *map);
2501 bus_dma_tag_destroy(*tag);
2502 if (bootverbose)
2503 device_printf(sc->bge_dev, "4GB boundary crossed, "
2504 "limit DMA address space to 32bit for %s\n", msg);
2505 *ring = NULL;
2506 *tag = NULL;
2507 *map = NULL;
2508 lowaddr = BUS_SPACE_MAXADDR_32BIT;
2509 goto again;
2510 }
2511 return (0);
2512}
2513
2514static int
2515bge_dma_alloc(struct bge_softc *sc)
2516{
2517 bus_addr_t lowaddr;
2518 bus_size_t boundary, sbsz, rxmaxsegsz, txsegsz, txmaxsegsz;
2519 int i, error;
2520
2521 lowaddr = BUS_SPACE_MAXADDR;
2522 if ((sc->bge_flags & BGE_FLAG_40BIT_BUG) != 0)
2523 lowaddr = BGE_DMA_MAXADDR;
2524 /*
2525 * Allocate the parent bus DMA tag appropriate for PCI.
2526 */
2527 error = bus_dma_tag_create(bus_get_dma_tag(sc->bge_dev),
2528 1, 0, lowaddr, BUS_SPACE_MAXADDR, NULL,
2529 NULL, BUS_SPACE_MAXSIZE_32BIT, 0, BUS_SPACE_MAXSIZE_32BIT,
2530 0, NULL, NULL, &sc->bge_cdata.bge_parent_tag);
2531 if (error != 0) {
2532 device_printf(sc->bge_dev,
2533 "could not allocate parent dma tag\n");
2534 return (ENOMEM);
2535 }
2536
2537 /* Create tag for standard RX ring. */
2538 error = bge_dma_ring_alloc(sc, PAGE_SIZE, BGE_STD_RX_RING_SZ,
2539 &sc->bge_cdata.bge_rx_std_ring_tag,
2540 (uint8_t **)&sc->bge_ldata.bge_rx_std_ring,
2541 &sc->bge_cdata.bge_rx_std_ring_map,
2542 &sc->bge_ldata.bge_rx_std_ring_paddr, "RX ring");
2543 if (error)
2544 return (error);
2545
2546 /* Create tag for RX return ring. */
2547 error = bge_dma_ring_alloc(sc, PAGE_SIZE, BGE_RX_RTN_RING_SZ(sc),
2548 &sc->bge_cdata.bge_rx_return_ring_tag,
2549 (uint8_t **)&sc->bge_ldata.bge_rx_return_ring,
2550 &sc->bge_cdata.bge_rx_return_ring_map,
2551 &sc->bge_ldata.bge_rx_return_ring_paddr, "RX return ring");
2552 if (error)
2553 return (error);
2554
2555 /* Create tag for TX ring. */
2556 error = bge_dma_ring_alloc(sc, PAGE_SIZE, BGE_TX_RING_SZ,
2557 &sc->bge_cdata.bge_tx_ring_tag,
2558 (uint8_t **)&sc->bge_ldata.bge_tx_ring,
2559 &sc->bge_cdata.bge_tx_ring_map,
2560 &sc->bge_ldata.bge_tx_ring_paddr, "TX ring");
2561 if (error)
2562 return (error);
2563
2564 /*
2565 * Create tag for status block.
2566 * Because we only use single Tx/Rx/Rx return ring, use
2567 * minimum status block size except BCM5700 AX/BX which
2568 * seems to want to see full status block size regardless
2569 * of configured number of ring.
2570 */
2571 if (sc->bge_asicrev == BGE_ASICREV_BCM5700 &&
2572 sc->bge_chipid != BGE_CHIPID_BCM5700_C0)
2573 sbsz = BGE_STATUS_BLK_SZ;
2574 else
2575 sbsz = 32;
2576 error = bge_dma_ring_alloc(sc, PAGE_SIZE, sbsz,
2577 &sc->bge_cdata.bge_status_tag,
2578 (uint8_t **)&sc->bge_ldata.bge_status_block,
2579 &sc->bge_cdata.bge_status_map,
2580 &sc->bge_ldata.bge_status_block_paddr, "status block");
2581 if (error)
2582 return (error);
2583
2584 /* Create tag for statistics block. */
2585 error = bge_dma_ring_alloc(sc, PAGE_SIZE, BGE_STATS_SZ,
2586 &sc->bge_cdata.bge_stats_tag,
2587 (uint8_t **)&sc->bge_ldata.bge_stats,
2588 &sc->bge_cdata.bge_stats_map,
2589 &sc->bge_ldata.bge_stats_paddr, "statistics block");
2590 if (error)
2591 return (error);
2592
2593 /* Create tag for jumbo RX ring. */
2594 if (BGE_IS_JUMBO_CAPABLE(sc)) {
2595 error = bge_dma_ring_alloc(sc, PAGE_SIZE, BGE_JUMBO_RX_RING_SZ,
2596 &sc->bge_cdata.bge_rx_jumbo_ring_tag,
2597 (uint8_t **)&sc->bge_ldata.bge_rx_jumbo_ring,
2598 &sc->bge_cdata.bge_rx_jumbo_ring_map,
2599 &sc->bge_ldata.bge_rx_jumbo_ring_paddr, "jumbo RX ring");
2600 if (error)
2601 return (error);
2602 }
2603
2604 /* Create parent tag for buffers. */
2605 boundary = 0;
2606 if ((sc->bge_flags & BGE_FLAG_4G_BNDRY_BUG) != 0) {
2607 boundary = BGE_DMA_BNDRY;
2608 /*
2609 * XXX
2610 * watchdog timeout issue was observed on BCM5704 which
2611 * lives behind PCI-X bridge(e.g AMD 8131 PCI-X bridge).
2612 * Limiting DMA address space to 32bits seems to address
2613 * it.
2614 */
2615 if (sc->bge_flags & BGE_FLAG_PCIX)
2616 lowaddr = BUS_SPACE_MAXADDR_32BIT;
2617 }
2618 error = bus_dma_tag_create(bus_get_dma_tag(sc->bge_dev),
2619 1, boundary, lowaddr, BUS_SPACE_MAXADDR, NULL,
2620 NULL, BUS_SPACE_MAXSIZE_32BIT, 0, BUS_SPACE_MAXSIZE_32BIT,
2621 0, NULL, NULL, &sc->bge_cdata.bge_buffer_tag);
2622 if (error != 0) {
2623 device_printf(sc->bge_dev,
2624 "could not allocate buffer dma tag\n");
2625 return (ENOMEM);
2626 }
2627 /* Create tag for Tx mbufs. */
2628 if (sc->bge_flags & (BGE_FLAG_TSO | BGE_FLAG_TSO3)) {
2629 txsegsz = BGE_TSOSEG_SZ;
2630 txmaxsegsz = 65535 + sizeof(struct ether_vlan_header);
2631 } else {
2632 txsegsz = MCLBYTES;
2633 txmaxsegsz = MCLBYTES * BGE_NSEG_NEW;
2634 }
2635 error = bus_dma_tag_create(sc->bge_cdata.bge_buffer_tag, 1,
2636 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
2637 txmaxsegsz, BGE_NSEG_NEW, txsegsz, 0, NULL, NULL,
2638 &sc->bge_cdata.bge_tx_mtag);
2639
2640 if (error) {
2641 device_printf(sc->bge_dev, "could not allocate TX dma tag\n");
2642 return (ENOMEM);
2643 }
2644
2645 /* Create tag for Rx mbufs. */
2646 if (sc->bge_flags & BGE_FLAG_JUMBO_STD)
2647 rxmaxsegsz = MJUM9BYTES;
2648 else
2649 rxmaxsegsz = MCLBYTES;
2650 error = bus_dma_tag_create(sc->bge_cdata.bge_buffer_tag, 1, 0,
2651 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, rxmaxsegsz, 1,
2652 rxmaxsegsz, 0, NULL, NULL, &sc->bge_cdata.bge_rx_mtag);
2653
2654 if (error) {
2655 device_printf(sc->bge_dev, "could not allocate RX dma tag\n");
2656 return (ENOMEM);
2657 }
2658
2659 /* Create DMA maps for RX buffers. */
2660 error = bus_dmamap_create(sc->bge_cdata.bge_rx_mtag, 0,
2661 &sc->bge_cdata.bge_rx_std_sparemap);
2662 if (error) {
2663 device_printf(sc->bge_dev,
2664 "can't create spare DMA map for RX\n");
2665 return (ENOMEM);
2666 }
2667 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
2668 error = bus_dmamap_create(sc->bge_cdata.bge_rx_mtag, 0,
2669 &sc->bge_cdata.bge_rx_std_dmamap[i]);
2670 if (error) {
2671 device_printf(sc->bge_dev,
2672 "can't create DMA map for RX\n");
2673 return (ENOMEM);
2674 }
2675 }
2676
2677 /* Create DMA maps for TX buffers. */
2678 for (i = 0; i < BGE_TX_RING_CNT; i++) {
2679 error = bus_dmamap_create(sc->bge_cdata.bge_tx_mtag, 0,
2680 &sc->bge_cdata.bge_tx_dmamap[i]);
2681 if (error) {
2682 device_printf(sc->bge_dev,
2683 "can't create DMA map for TX\n");
2684 return (ENOMEM);
2685 }
2686 }
2687
2688 /* Create tags for jumbo RX buffers. */
2689 if (BGE_IS_JUMBO_CAPABLE(sc)) {
2690 error = bus_dma_tag_create(sc->bge_cdata.bge_buffer_tag,
2691 1, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
2692 NULL, MJUM9BYTES, BGE_NSEG_JUMBO, PAGE_SIZE,
2693 0, NULL, NULL, &sc->bge_cdata.bge_mtag_jumbo);
2694 if (error) {
2695 device_printf(sc->bge_dev,
2696 "could not allocate jumbo dma tag\n");
2697 return (ENOMEM);
2698 }
2699 /* Create DMA maps for jumbo RX buffers. */
2700 error = bus_dmamap_create(sc->bge_cdata.bge_mtag_jumbo,
2701 0, &sc->bge_cdata.bge_rx_jumbo_sparemap);
2702 if (error) {
2703 device_printf(sc->bge_dev,
2704 "can't create spare DMA map for jumbo RX\n");
2705 return (ENOMEM);
2706 }
2707 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
2708 error = bus_dmamap_create(sc->bge_cdata.bge_mtag_jumbo,
2709 0, &sc->bge_cdata.bge_rx_jumbo_dmamap[i]);
2710 if (error) {
2711 device_printf(sc->bge_dev,
2712 "can't create DMA map for jumbo RX\n");
2713 return (ENOMEM);
2714 }
2715 }
2716 }
2717
2718 return (0);
2719}
2720
2721/*
2722 * Return true if this device has more than one port.
2723 */
2724static int
2725bge_has_multiple_ports(struct bge_softc *sc)
2726{
2727 device_t dev = sc->bge_dev;
2728 u_int b, d, f, fscan, s;
2729
2730 d = pci_get_domain(dev);
2731 b = pci_get_bus(dev);
2732 s = pci_get_slot(dev);
2733 f = pci_get_function(dev);
2734 for (fscan = 0; fscan <= PCI_FUNCMAX; fscan++)
2735 if (fscan != f && pci_find_dbsf(d, b, s, fscan) != NULL)
2736 return (1);
2737 return (0);
2738}
2739
2740/*
2741 * Return true if MSI can be used with this device.
2742 */
2743static int
2744bge_can_use_msi(struct bge_softc *sc)
2745{
2746 int can_use_msi = 0;
2747
2748 if (sc->bge_msi_disable != 0)
2748 if (sc->bge_msi != 0)
2749 return (0);
2750
2751 /* Disable MSI for polling(4). */
2752#ifdef DEVICE_POLLING
2753 return (0);
2754#endif
2755 switch (sc->bge_asicrev) {
2756 case BGE_ASICREV_BCM5714_A0:
2757 case BGE_ASICREV_BCM5714:
2758 /*
2759 * Apparently, MSI doesn't work when these chips are
2760 * configured in single-port mode.
2761 */
2762 if (bge_has_multiple_ports(sc))
2763 can_use_msi = 1;
2764 break;
2765 case BGE_ASICREV_BCM5750:
2766 if (sc->bge_chiprev != BGE_CHIPREV_5750_AX &&
2767 sc->bge_chiprev != BGE_CHIPREV_5750_BX)
2768 can_use_msi = 1;
2769 break;
2770 default:
2771 if (BGE_IS_575X_PLUS(sc))
2772 can_use_msi = 1;
2773 }
2774 return (can_use_msi);
2775}
2776
2777static int
2778bge_attach(device_t dev)
2779{
2780 struct ifnet *ifp;
2781 struct bge_softc *sc;
2782 uint32_t hwcfg = 0, misccfg;
2783 u_char eaddr[ETHER_ADDR_LEN];
2784 int capmask, error, f, msicount, phy_addr, reg, rid, trys;
2785
2786 sc = device_get_softc(dev);
2787 sc->bge_dev = dev;
2788
2789 TASK_INIT(&sc->bge_intr_task, 0, bge_intr_task, sc);
2790
2791 /*
2792 * Map control/status registers.
2793 */
2794 pci_enable_busmaster(dev);
2795
2796 rid = PCIR_BAR(0);
2797 sc->bge_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
2798 RF_ACTIVE);
2799
2800 if (sc->bge_res == NULL) {
2801 device_printf (sc->bge_dev, "couldn't map memory\n");
2802 error = ENXIO;
2803 goto fail;
2804 }
2805
2806 /* Save various chip information. */
2807 sc->bge_chipid =
2808 pci_read_config(dev, BGE_PCI_MISC_CTL, 4) >>
2809 BGE_PCIMISCCTL_ASICREV_SHIFT;
2810 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_USE_PRODID_REG) {
2811 /*
2812 * Find the ASCI revision. Different chips use different
2813 * registers.
2814 */
2815 switch (pci_get_device(dev)) {
2816 case BCOM_DEVICEID_BCM5717:
2817 case BCOM_DEVICEID_BCM5718:
2818 case BCOM_DEVICEID_BCM5719:
2819 case BCOM_DEVICEID_BCM5720:
2820 sc->bge_chipid = pci_read_config(dev,
2821 BGE_PCI_GEN2_PRODID_ASICREV, 4);
2822 break;
2823 case BCOM_DEVICEID_BCM57761:
2824 case BCOM_DEVICEID_BCM57765:
2825 case BCOM_DEVICEID_BCM57781:
2826 case BCOM_DEVICEID_BCM57785:
2827 case BCOM_DEVICEID_BCM57791:
2828 case BCOM_DEVICEID_BCM57795:
2829 sc->bge_chipid = pci_read_config(dev,
2830 BGE_PCI_GEN15_PRODID_ASICREV, 4);
2831 break;
2832 default:
2833 sc->bge_chipid = pci_read_config(dev,
2834 BGE_PCI_PRODID_ASICREV, 4);
2835 }
2836 }
2837 sc->bge_asicrev = BGE_ASICREV(sc->bge_chipid);
2838 sc->bge_chiprev = BGE_CHIPREV(sc->bge_chipid);
2839
2840 /* Set default PHY address. */
2841 phy_addr = 1;
2842 /*
2843 * PHY address mapping for various devices.
2844 *
2845 * | F0 Cu | F0 Sr | F1 Cu | F1 Sr |
2846 * ---------+-------+-------+-------+-------+
2847 * BCM57XX | 1 | X | X | X |
2848 * BCM5704 | 1 | X | 1 | X |
2849 * BCM5717 | 1 | 8 | 2 | 9 |
2850 * BCM5719 | 1 | 8 | 2 | 9 |
2851 * BCM5720 | 1 | 8 | 2 | 9 |
2852 *
2853 * Other addresses may respond but they are not
2854 * IEEE compliant PHYs and should be ignored.
2855 */
2856 if (sc->bge_asicrev == BGE_ASICREV_BCM5717 ||
2857 sc->bge_asicrev == BGE_ASICREV_BCM5719 ||
2858 sc->bge_asicrev == BGE_ASICREV_BCM5720) {
2859 f = pci_get_function(dev);
2860 if (sc->bge_chipid == BGE_CHIPID_BCM5717_A0) {
2861 if (CSR_READ_4(sc, BGE_SGDIG_STS) &
2862 BGE_SGDIGSTS_IS_SERDES)
2863 phy_addr = f + 8;
2864 else
2865 phy_addr = f + 1;
2866 } else {
2867 if (CSR_READ_4(sc, BGE_CPMU_PHY_STRAP) &
2868 BGE_CPMU_PHY_STRAP_IS_SERDES)
2869 phy_addr = f + 8;
2870 else
2871 phy_addr = f + 1;
2872 }
2873 }
2874
2875 /*
2876 * Don't enable Ethernet@WireSpeed for the 5700, 5906, or the
2877 * 5705 A0 and A1 chips.
2878 */
2879 if (sc->bge_asicrev == BGE_ASICREV_BCM5700 ||
2880 (sc->bge_asicrev == BGE_ASICREV_BCM5705 &&
2881 (sc->bge_chipid != BGE_CHIPID_BCM5705_A0 &&
2882 sc->bge_chipid != BGE_CHIPID_BCM5705_A1)) ||
2883 sc->bge_asicrev == BGE_ASICREV_BCM5906)
2884 sc->bge_phy_flags |= BGE_PHY_NO_WIRESPEED;
2885
2886 if (bge_has_eaddr(sc))
2887 sc->bge_flags |= BGE_FLAG_EADDR;
2888
2889 /* Save chipset family. */
2890 switch (sc->bge_asicrev) {
2891 case BGE_ASICREV_BCM5717:
2892 case BGE_ASICREV_BCM5719:
2893 case BGE_ASICREV_BCM5720:
2894 case BGE_ASICREV_BCM57765:
2895 sc->bge_flags |= BGE_FLAG_5717_PLUS | BGE_FLAG_5755_PLUS |
2896 BGE_FLAG_575X_PLUS | BGE_FLAG_5705_PLUS | BGE_FLAG_JUMBO |
2897 BGE_FLAG_JUMBO_FRAME;
2898 if (sc->bge_asicrev == BGE_ASICREV_BCM5719 &&
2899 sc->bge_chipid == BGE_CHIPID_BCM5719_A0) {
2900 /* Jumbo frame on BCM5719 A0 does not work. */
2901 sc->bge_flags &= ~BGE_FLAG_JUMBO;
2902 }
2903 break;
2904 case BGE_ASICREV_BCM5755:
2905 case BGE_ASICREV_BCM5761:
2906 case BGE_ASICREV_BCM5784:
2907 case BGE_ASICREV_BCM5785:
2908 case BGE_ASICREV_BCM5787:
2909 case BGE_ASICREV_BCM57780:
2910 sc->bge_flags |= BGE_FLAG_5755_PLUS | BGE_FLAG_575X_PLUS |
2911 BGE_FLAG_5705_PLUS;
2912 break;
2913 case BGE_ASICREV_BCM5700:
2914 case BGE_ASICREV_BCM5701:
2915 case BGE_ASICREV_BCM5703:
2916 case BGE_ASICREV_BCM5704:
2917 sc->bge_flags |= BGE_FLAG_5700_FAMILY | BGE_FLAG_JUMBO;
2918 break;
2919 case BGE_ASICREV_BCM5714_A0:
2920 case BGE_ASICREV_BCM5780:
2921 case BGE_ASICREV_BCM5714:
2922 sc->bge_flags |= BGE_FLAG_5714_FAMILY | BGE_FLAG_JUMBO_STD;
2923 /* FALLTHROUGH */
2924 case BGE_ASICREV_BCM5750:
2925 case BGE_ASICREV_BCM5752:
2926 case BGE_ASICREV_BCM5906:
2927 sc->bge_flags |= BGE_FLAG_575X_PLUS;
2928 /* FALLTHROUGH */
2929 case BGE_ASICREV_BCM5705:
2930 sc->bge_flags |= BGE_FLAG_5705_PLUS;
2931 break;
2932 }
2933
2934 /* Set various PHY bug flags. */
2935 if (sc->bge_chipid == BGE_CHIPID_BCM5701_A0 ||
2936 sc->bge_chipid == BGE_CHIPID_BCM5701_B0)
2937 sc->bge_phy_flags |= BGE_PHY_CRC_BUG;
2938 if (sc->bge_chiprev == BGE_CHIPREV_5703_AX ||
2939 sc->bge_chiprev == BGE_CHIPREV_5704_AX)
2940 sc->bge_phy_flags |= BGE_PHY_ADC_BUG;
2941 if (sc->bge_chipid == BGE_CHIPID_BCM5704_A0)
2942 sc->bge_phy_flags |= BGE_PHY_5704_A0_BUG;
2943 if (pci_get_subvendor(dev) == DELL_VENDORID)
2944 sc->bge_phy_flags |= BGE_PHY_NO_3LED;
2945 if ((BGE_IS_5705_PLUS(sc)) &&
2946 sc->bge_asicrev != BGE_ASICREV_BCM5906 &&
2947 sc->bge_asicrev != BGE_ASICREV_BCM5717 &&
2948 sc->bge_asicrev != BGE_ASICREV_BCM5719 &&
2949 sc->bge_asicrev != BGE_ASICREV_BCM5720 &&
2950 sc->bge_asicrev != BGE_ASICREV_BCM5785 &&
2951 sc->bge_asicrev != BGE_ASICREV_BCM57765 &&
2952 sc->bge_asicrev != BGE_ASICREV_BCM57780) {
2953 if (sc->bge_asicrev == BGE_ASICREV_BCM5755 ||
2954 sc->bge_asicrev == BGE_ASICREV_BCM5761 ||
2955 sc->bge_asicrev == BGE_ASICREV_BCM5784 ||
2956 sc->bge_asicrev == BGE_ASICREV_BCM5787) {
2957 if (pci_get_device(dev) != BCOM_DEVICEID_BCM5722 &&
2958 pci_get_device(dev) != BCOM_DEVICEID_BCM5756)
2959 sc->bge_phy_flags |= BGE_PHY_JITTER_BUG;
2960 if (pci_get_device(dev) == BCOM_DEVICEID_BCM5755M)
2961 sc->bge_phy_flags |= BGE_PHY_ADJUST_TRIM;
2962 } else
2963 sc->bge_phy_flags |= BGE_PHY_BER_BUG;
2964 }
2965
2966 /* Identify the chips that use an CPMU. */
2967 if (BGE_IS_5717_PLUS(sc) ||
2968 sc->bge_asicrev == BGE_ASICREV_BCM5784 ||
2969 sc->bge_asicrev == BGE_ASICREV_BCM5761 ||
2970 sc->bge_asicrev == BGE_ASICREV_BCM5785 ||
2971 sc->bge_asicrev == BGE_ASICREV_BCM57780)
2972 sc->bge_flags |= BGE_FLAG_CPMU_PRESENT;
2973 if ((sc->bge_flags & BGE_FLAG_CPMU_PRESENT) != 0)
2974 sc->bge_mi_mode = BGE_MIMODE_500KHZ_CONST;
2975 else
2976 sc->bge_mi_mode = BGE_MIMODE_BASE;
2977 /* Enable auto polling for BCM570[0-5]. */
2978 if (BGE_IS_5700_FAMILY(sc) || sc->bge_asicrev == BGE_ASICREV_BCM5705)
2979 sc->bge_mi_mode |= BGE_MIMODE_AUTOPOLL;
2980
2981 /*
2982 * All Broadcom controllers have 4GB boundary DMA bug.
2983 * Whenever an address crosses a multiple of the 4GB boundary
2984 * (including 4GB, 8Gb, 12Gb, etc.) and makes the transition
2985 * from 0xX_FFFF_FFFF to 0x(X+1)_0000_0000 an internal DMA
2986 * state machine will lockup and cause the device to hang.
2987 */
2988 sc->bge_flags |= BGE_FLAG_4G_BNDRY_BUG;
2989
2990 /* BCM5755 or higher and BCM5906 have short DMA bug. */
2991 if (BGE_IS_5755_PLUS(sc) || sc->bge_asicrev == BGE_ASICREV_BCM5906)
2992 sc->bge_flags |= BGE_FLAG_SHORT_DMA_BUG;
2993
2994 /*
2995 * BCM5719 cannot handle DMA requests for DMA segments that
2996 * have larger than 4KB in size. However the maximum DMA
2997 * segment size created in DMA tag is 4KB for TSO, so we
2998 * wouldn't encounter the issue here.
2999 */
3000 if (sc->bge_asicrev == BGE_ASICREV_BCM5719)
3001 sc->bge_flags |= BGE_FLAG_4K_RDMA_BUG;
3002
3003 misccfg = CSR_READ_4(sc, BGE_MISC_CFG) & BGE_MISCCFG_BOARD_ID;
3004 if (sc->bge_asicrev == BGE_ASICREV_BCM5705) {
3005 if (misccfg == BGE_MISCCFG_BOARD_ID_5788 ||
3006 misccfg == BGE_MISCCFG_BOARD_ID_5788M)
3007 sc->bge_flags |= BGE_FLAG_5788;
3008 }
3009
3010 capmask = BMSR_DEFCAPMASK;
3011 if ((sc->bge_asicrev == BGE_ASICREV_BCM5703 &&
3012 (misccfg == 0x4000 || misccfg == 0x8000)) ||
3013 (sc->bge_asicrev == BGE_ASICREV_BCM5705 &&
3014 pci_get_vendor(dev) == BCOM_VENDORID &&
3015 (pci_get_device(dev) == BCOM_DEVICEID_BCM5901 ||
3016 pci_get_device(dev) == BCOM_DEVICEID_BCM5901A2 ||
3017 pci_get_device(dev) == BCOM_DEVICEID_BCM5705F)) ||
3018 (pci_get_vendor(dev) == BCOM_VENDORID &&
3019 (pci_get_device(dev) == BCOM_DEVICEID_BCM5751F ||
3020 pci_get_device(dev) == BCOM_DEVICEID_BCM5753F ||
3021 pci_get_device(dev) == BCOM_DEVICEID_BCM5787F)) ||
3022 pci_get_device(dev) == BCOM_DEVICEID_BCM57790 ||
3023 sc->bge_asicrev == BGE_ASICREV_BCM5906) {
3024 /* These chips are 10/100 only. */
3025 capmask &= ~BMSR_EXTSTAT;
3026 }
3027
3028 /*
3029 * Some controllers seem to require a special firmware to use
3030 * TSO. But the firmware is not available to FreeBSD and Linux
3031 * claims that the TSO performed by the firmware is slower than
3032 * hardware based TSO. Moreover the firmware based TSO has one
3033 * known bug which can't handle TSO if ethernet header + IP/TCP
3034 * header is greater than 80 bytes. The workaround for the TSO
3035 * bug exist but it seems it's too expensive than not using
3036 * TSO at all. Some hardwares also have the TSO bug so limit
3037 * the TSO to the controllers that are not affected TSO issues
3038 * (e.g. 5755 or higher).
3039 */
3040 if (BGE_IS_5717_PLUS(sc)) {
3041 /* BCM5717 requires different TSO configuration. */
3042 sc->bge_flags |= BGE_FLAG_TSO3;
3043 if (sc->bge_asicrev == BGE_ASICREV_BCM5719 &&
3044 sc->bge_chipid == BGE_CHIPID_BCM5719_A0) {
3045 /* TSO on BCM5719 A0 does not work. */
3046 sc->bge_flags &= ~BGE_FLAG_TSO3;
3047 }
3048 } else if (BGE_IS_5755_PLUS(sc)) {
3049 /*
3050 * BCM5754 and BCM5787 shares the same ASIC id so
3051 * explicit device id check is required.
3052 * Due to unknown reason TSO does not work on BCM5755M.
3053 */
3054 if (pci_get_device(dev) != BCOM_DEVICEID_BCM5754 &&
3055 pci_get_device(dev) != BCOM_DEVICEID_BCM5754M &&
3056 pci_get_device(dev) != BCOM_DEVICEID_BCM5755M)
3057 sc->bge_flags |= BGE_FLAG_TSO;
3058 }
3059
3060 /*
3061 * Check if this is a PCI-X or PCI Express device.
3062 */
3063 if (pci_find_cap(dev, PCIY_EXPRESS, &reg) == 0) {
3064 /*
3065 * Found a PCI Express capabilities register, this
3066 * must be a PCI Express device.
3067 */
3068 sc->bge_flags |= BGE_FLAG_PCIE;
3069 sc->bge_expcap = reg;
3070 if (sc->bge_asicrev == BGE_ASICREV_BCM5719 ||
3071 sc->bge_asicrev == BGE_ASICREV_BCM5720)
3072 pci_set_max_read_req(dev, 2048);
3073 else if (pci_get_max_read_req(dev) != 4096)
3074 pci_set_max_read_req(dev, 4096);
3075 } else {
3076 /*
3077 * Check if the device is in PCI-X Mode.
3078 * (This bit is not valid on PCI Express controllers.)
3079 */
3080 if (pci_find_cap(dev, PCIY_PCIX, &reg) == 0)
3081 sc->bge_pcixcap = reg;
3082 if ((pci_read_config(dev, BGE_PCI_PCISTATE, 4) &
3083 BGE_PCISTATE_PCI_BUSMODE) == 0)
3084 sc->bge_flags |= BGE_FLAG_PCIX;
3085 }
3086
3087 /*
3088 * The 40bit DMA bug applies to the 5714/5715 controllers and is
3089 * not actually a MAC controller bug but an issue with the embedded
3090 * PCIe to PCI-X bridge in the device. Use 40bit DMA workaround.
3091 */
3092 if (BGE_IS_5714_FAMILY(sc) && (sc->bge_flags & BGE_FLAG_PCIX))
3093 sc->bge_flags |= BGE_FLAG_40BIT_BUG;
3094 /*
3095 * Allocate the interrupt, using MSI if possible. These devices
3096 * support 8 MSI messages, but only the first one is used in
3097 * normal operation.
3098 */
3099 rid = 0;
3100 if (pci_find_cap(sc->bge_dev, PCIY_MSI, &reg) == 0) {
3101 sc->bge_msicap = reg;
3102 if (bge_can_use_msi(sc)) {
3103 msicount = pci_msi_count(dev);
3104 if (msicount > 1)
3105 msicount = 1;
3106 } else
3107 msicount = 0;
3108 if (msicount == 1 && pci_alloc_msi(dev, &msicount) == 0) {
3109 rid = 1;
3110 sc->bge_flags |= BGE_FLAG_MSI;
3111 }
3112 }
3113
3114 /*
3115 * All controllers except BCM5700 supports tagged status but
3116 * we use tagged status only for MSI case on BCM5717. Otherwise
3117 * MSI on BCM5717 does not work.
3118 */
3119#ifndef DEVICE_POLLING
3120 if (sc->bge_flags & BGE_FLAG_MSI && BGE_IS_5717_PLUS(sc))
3121 sc->bge_flags |= BGE_FLAG_TAGGED_STATUS;
3122#endif
3123
3124 sc->bge_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
3125 RF_SHAREABLE | RF_ACTIVE);
3126
3127 if (sc->bge_irq == NULL) {
3128 device_printf(sc->bge_dev, "couldn't map interrupt\n");
3129 error = ENXIO;
3130 goto fail;
3131 }
3132
3133 device_printf(dev,
3134 "CHIP ID 0x%08x; ASIC REV 0x%02x; CHIP REV 0x%02x; %s\n",
3135 sc->bge_chipid, sc->bge_asicrev, sc->bge_chiprev,
3136 (sc->bge_flags & BGE_FLAG_PCIX) ? "PCI-X" :
3137 ((sc->bge_flags & BGE_FLAG_PCIE) ? "PCI-E" : "PCI"));
3138
3139 BGE_LOCK_INIT(sc, device_get_nameunit(dev));
3140
3141 /* Try to reset the chip. */
3142 if (bge_reset(sc)) {
3143 device_printf(sc->bge_dev, "chip reset failed\n");
3144 error = ENXIO;
3145 goto fail;
3146 }
3147
3148 sc->bge_asf_mode = 0;
3149 if (bge_allow_asf && (bge_readmem_ind(sc, BGE_SRAM_DATA_SIG) ==
3150 BGE_SRAM_DATA_SIG_MAGIC)) {
3151 if (bge_readmem_ind(sc, BGE_SRAM_DATA_CFG)
3152 & BGE_HWCFG_ASF) {
3153 sc->bge_asf_mode |= ASF_ENABLE;
3154 sc->bge_asf_mode |= ASF_STACKUP;
3155 if (BGE_IS_575X_PLUS(sc))
3156 sc->bge_asf_mode |= ASF_NEW_HANDSHAKE;
3157 }
3158 }
3159
3160 /* Try to reset the chip again the nice way. */
3161 bge_stop_fw(sc);
3162 bge_sig_pre_reset(sc, BGE_RESET_STOP);
3163 if (bge_reset(sc)) {
3164 device_printf(sc->bge_dev, "chip reset failed\n");
3165 error = ENXIO;
3166 goto fail;
3167 }
3168
3169 bge_sig_legacy(sc, BGE_RESET_STOP);
3170 bge_sig_post_reset(sc, BGE_RESET_STOP);
3171
3172 if (bge_chipinit(sc)) {
3173 device_printf(sc->bge_dev, "chip initialization failed\n");
3174 error = ENXIO;
3175 goto fail;
3176 }
3177
3178 error = bge_get_eaddr(sc, eaddr);
3179 if (error) {
3180 device_printf(sc->bge_dev,
3181 "failed to read station address\n");
3182 error = ENXIO;
3183 goto fail;
3184 }
3185
3186 /* 5705 limits RX return ring to 512 entries. */
3187 if (BGE_IS_5717_PLUS(sc))
3188 sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT;
3189 else if (BGE_IS_5705_PLUS(sc))
3190 sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT_5705;
3191 else
3192 sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT;
3193
3194 if (bge_dma_alloc(sc)) {
3195 device_printf(sc->bge_dev,
3196 "failed to allocate DMA resources\n");
3197 error = ENXIO;
3198 goto fail;
3199 }
3200
3201 bge_add_sysctls(sc);
3202
3203 /* Set default tuneable values. */
3204 sc->bge_stat_ticks = BGE_TICKS_PER_SEC;
3205 sc->bge_rx_coal_ticks = 150;
3206 sc->bge_tx_coal_ticks = 150;
3207 sc->bge_rx_max_coal_bds = 10;
3208 sc->bge_tx_max_coal_bds = 10;
3209
3210 /* Initialize checksum features to use. */
3211 sc->bge_csum_features = BGE_CSUM_FEATURES;
3212 if (sc->bge_forced_udpcsum != 0)
3213 sc->bge_csum_features |= CSUM_UDP;
3214
3215 /* Set up ifnet structure */
3216 ifp = sc->bge_ifp = if_alloc(IFT_ETHER);
3217 if (ifp == NULL) {
3218 device_printf(sc->bge_dev, "failed to if_alloc()\n");
3219 error = ENXIO;
3220 goto fail;
3221 }
3222 ifp->if_softc = sc;
3223 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
3224 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
3225 ifp->if_ioctl = bge_ioctl;
3226 ifp->if_start = bge_start;
3227 ifp->if_init = bge_init;
3228 ifp->if_snd.ifq_drv_maxlen = BGE_TX_RING_CNT - 1;
3229 IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen);
3230 IFQ_SET_READY(&ifp->if_snd);
3231 ifp->if_hwassist = sc->bge_csum_features;
3232 ifp->if_capabilities = IFCAP_HWCSUM | IFCAP_VLAN_HWTAGGING |
3233 IFCAP_VLAN_MTU;
3234 if ((sc->bge_flags & (BGE_FLAG_TSO | BGE_FLAG_TSO3)) != 0) {
3235 ifp->if_hwassist |= CSUM_TSO;
3236 ifp->if_capabilities |= IFCAP_TSO4 | IFCAP_VLAN_HWTSO;
3237 }
3238#ifdef IFCAP_VLAN_HWCSUM
3239 ifp->if_capabilities |= IFCAP_VLAN_HWCSUM;
3240#endif
3241 ifp->if_capenable = ifp->if_capabilities;
3242#ifdef DEVICE_POLLING
3243 ifp->if_capabilities |= IFCAP_POLLING;
3244#endif
3245
3246 /*
3247 * 5700 B0 chips do not support checksumming correctly due
3248 * to hardware bugs.
3249 */
3250 if (sc->bge_chipid == BGE_CHIPID_BCM5700_B0) {
3251 ifp->if_capabilities &= ~IFCAP_HWCSUM;
3252 ifp->if_capenable &= ~IFCAP_HWCSUM;
3253 ifp->if_hwassist = 0;
3254 }
3255
3256 /*
3257 * Figure out what sort of media we have by checking the
3258 * hardware config word in the first 32k of NIC internal memory,
3259 * or fall back to examining the EEPROM if necessary.
3260 * Note: on some BCM5700 cards, this value appears to be unset.
3261 * If that's the case, we have to rely on identifying the NIC
3262 * by its PCI subsystem ID, as we do below for the SysKonnect
3263 * SK-9D41.
3264 */
3265 if (bge_readmem_ind(sc, BGE_SRAM_DATA_SIG) == BGE_SRAM_DATA_SIG_MAGIC)
3266 hwcfg = bge_readmem_ind(sc, BGE_SRAM_DATA_CFG);
3267 else if ((sc->bge_flags & BGE_FLAG_EADDR) &&
3268 (sc->bge_asicrev != BGE_ASICREV_BCM5906)) {
3269 if (bge_read_eeprom(sc, (caddr_t)&hwcfg, BGE_EE_HWCFG_OFFSET,
3270 sizeof(hwcfg))) {
3271 device_printf(sc->bge_dev, "failed to read EEPROM\n");
3272 error = ENXIO;
3273 goto fail;
3274 }
3275 hwcfg = ntohl(hwcfg);
3276 }
3277
3278 /* The SysKonnect SK-9D41 is a 1000baseSX card. */
3279 if ((pci_read_config(dev, BGE_PCI_SUBSYS, 4) >> 16) ==
3280 SK_SUBSYSID_9D41 || (hwcfg & BGE_HWCFG_MEDIA) == BGE_MEDIA_FIBER) {
3281 if (BGE_IS_5714_FAMILY(sc))
3282 sc->bge_flags |= BGE_FLAG_MII_SERDES;
3283 else
3284 sc->bge_flags |= BGE_FLAG_TBI;
3285 }
3286
3287 if (sc->bge_flags & BGE_FLAG_TBI) {
3288 ifmedia_init(&sc->bge_ifmedia, IFM_IMASK, bge_ifmedia_upd,
3289 bge_ifmedia_sts);
3290 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER | IFM_1000_SX, 0, NULL);
3291 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER | IFM_1000_SX | IFM_FDX,
3292 0, NULL);
3293 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER | IFM_AUTO, 0, NULL);
3294 ifmedia_set(&sc->bge_ifmedia, IFM_ETHER | IFM_AUTO);
3295 sc->bge_ifmedia.ifm_media = sc->bge_ifmedia.ifm_cur->ifm_media;
3296 } else {
3297 /*
3298 * Do transceiver setup and tell the firmware the
3299 * driver is down so we can try to get access the
3300 * probe if ASF is running. Retry a couple of times
3301 * if we get a conflict with the ASF firmware accessing
3302 * the PHY.
3303 */
3304 trys = 0;
3305 BGE_CLRBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
3306again:
3307 bge_asf_driver_up(sc);
3308
3309 error = mii_attach(dev, &sc->bge_miibus, ifp, bge_ifmedia_upd,
3310 bge_ifmedia_sts, capmask, phy_addr, MII_OFFSET_ANY,
3311 MIIF_DOPAUSE);
3312 if (error != 0) {
3313 if (trys++ < 4) {
3314 device_printf(sc->bge_dev, "Try again\n");
3315 bge_miibus_writereg(sc->bge_dev, 1, MII_BMCR,
3316 BMCR_RESET);
3317 goto again;
3318 }
3319 device_printf(sc->bge_dev, "attaching PHYs failed\n");
3320 goto fail;
3321 }
3322
3323 /*
3324 * Now tell the firmware we are going up after probing the PHY
3325 */
3326 if (sc->bge_asf_mode & ASF_STACKUP)
3327 BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
3328 }
3329
3330 /*
3331 * When using the BCM5701 in PCI-X mode, data corruption has
3332 * been observed in the first few bytes of some received packets.
3333 * Aligning the packet buffer in memory eliminates the corruption.
3334 * Unfortunately, this misaligns the packet payloads. On platforms
3335 * which do not support unaligned accesses, we will realign the
3336 * payloads by copying the received packets.
3337 */
3338 if (sc->bge_asicrev == BGE_ASICREV_BCM5701 &&
3339 sc->bge_flags & BGE_FLAG_PCIX)
3340 sc->bge_flags |= BGE_FLAG_RX_ALIGNBUG;
3341
3342 /*
3343 * Call MI attach routine.
3344 */
3345 ether_ifattach(ifp, eaddr);
3346 callout_init_mtx(&sc->bge_stat_ch, &sc->bge_mtx, 0);
3347
3348 /* Tell upper layer we support long frames. */
3349 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
3350
3351 /*
3352 * Hookup IRQ last.
3353 */
3354 if (BGE_IS_5755_PLUS(sc) && sc->bge_flags & BGE_FLAG_MSI) {
3355 /* Take advantage of single-shot MSI. */
3356 CSR_WRITE_4(sc, BGE_MSI_MODE, CSR_READ_4(sc, BGE_MSI_MODE) &
3357 ~BGE_MSIMODE_ONE_SHOT_DISABLE);
3358 sc->bge_tq = taskqueue_create_fast("bge_taskq", M_WAITOK,
3359 taskqueue_thread_enqueue, &sc->bge_tq);
3360 if (sc->bge_tq == NULL) {
3361 device_printf(dev, "could not create taskqueue.\n");
3362 ether_ifdetach(ifp);
3363 error = ENXIO;
3364 goto fail;
3365 }
3366 taskqueue_start_threads(&sc->bge_tq, 1, PI_NET, "%s taskq",
3367 device_get_nameunit(sc->bge_dev));
3368 error = bus_setup_intr(dev, sc->bge_irq,
3369 INTR_TYPE_NET | INTR_MPSAFE, bge_msi_intr, NULL, sc,
3370 &sc->bge_intrhand);
3371 if (error)
3372 ether_ifdetach(ifp);
3373 } else
3374 error = bus_setup_intr(dev, sc->bge_irq,
3375 INTR_TYPE_NET | INTR_MPSAFE, NULL, bge_intr, sc,
3376 &sc->bge_intrhand);
3377
3378 if (error) {
3379 bge_detach(dev);
3380 device_printf(sc->bge_dev, "couldn't set up irq\n");
3381 }
3382
3383 return (0);
3384
3385fail:
3386 bge_release_resources(sc);
3387
3388 return (error);
3389}
3390
3391static int
3392bge_detach(device_t dev)
3393{
3394 struct bge_softc *sc;
3395 struct ifnet *ifp;
3396
3397 sc = device_get_softc(dev);
3398 ifp = sc->bge_ifp;
3399
3400#ifdef DEVICE_POLLING
3401 if (ifp->if_capenable & IFCAP_POLLING)
3402 ether_poll_deregister(ifp);
3403#endif
3404
3405 BGE_LOCK(sc);
3406 bge_stop(sc);
3407 bge_reset(sc);
3408 BGE_UNLOCK(sc);
3409
3410 callout_drain(&sc->bge_stat_ch);
3411
3412 if (sc->bge_tq)
3413 taskqueue_drain(sc->bge_tq, &sc->bge_intr_task);
3414 ether_ifdetach(ifp);
3415
3416 if (sc->bge_flags & BGE_FLAG_TBI) {
3417 ifmedia_removeall(&sc->bge_ifmedia);
3418 } else {
3419 bus_generic_detach(dev);
3420 device_delete_child(dev, sc->bge_miibus);
3421 }
3422
3423 bge_release_resources(sc);
3424
3425 return (0);
3426}
3427
3428static void
3429bge_release_resources(struct bge_softc *sc)
3430{
3431 device_t dev;
3432
3433 dev = sc->bge_dev;
3434
3435 if (sc->bge_tq != NULL)
3436 taskqueue_free(sc->bge_tq);
3437
3438 if (sc->bge_intrhand != NULL)
3439 bus_teardown_intr(dev, sc->bge_irq, sc->bge_intrhand);
3440
3441 if (sc->bge_irq != NULL)
3442 bus_release_resource(dev, SYS_RES_IRQ,
3443 sc->bge_flags & BGE_FLAG_MSI ? 1 : 0, sc->bge_irq);
3444
3445 if (sc->bge_flags & BGE_FLAG_MSI)
3446 pci_release_msi(dev);
3447
3448 if (sc->bge_res != NULL)
3449 bus_release_resource(dev, SYS_RES_MEMORY,
3450 PCIR_BAR(0), sc->bge_res);
3451
3452 if (sc->bge_ifp != NULL)
3453 if_free(sc->bge_ifp);
3454
3455 bge_dma_free(sc);
3456
3457 if (mtx_initialized(&sc->bge_mtx)) /* XXX */
3458 BGE_LOCK_DESTROY(sc);
3459}
3460
3461static int
3462bge_reset(struct bge_softc *sc)
3463{
3464 device_t dev;
3465 uint32_t cachesize, command, pcistate, reset, val;
3466 void (*write_op)(struct bge_softc *, int, int);
3467 uint16_t devctl;
3468 int i;
3469
3470 dev = sc->bge_dev;
3471
3472 if (BGE_IS_575X_PLUS(sc) && !BGE_IS_5714_FAMILY(sc) &&
3473 (sc->bge_asicrev != BGE_ASICREV_BCM5906)) {
3474 if (sc->bge_flags & BGE_FLAG_PCIE)
3475 write_op = bge_writemem_direct;
3476 else
3477 write_op = bge_writemem_ind;
3478 } else
3479 write_op = bge_writereg_ind;
3480
3481 /* Save some important PCI state. */
3482 cachesize = pci_read_config(dev, BGE_PCI_CACHESZ, 4);
3483 command = pci_read_config(dev, BGE_PCI_CMD, 4);
3484 pcistate = pci_read_config(dev, BGE_PCI_PCISTATE, 4);
3485
3486 pci_write_config(dev, BGE_PCI_MISC_CTL,
3487 BGE_PCIMISCCTL_INDIRECT_ACCESS | BGE_PCIMISCCTL_MASK_PCI_INTR |
3488 BGE_HIF_SWAP_OPTIONS | BGE_PCIMISCCTL_PCISTATE_RW, 4);
3489
3490 /* Disable fastboot on controllers that support it. */
3491 if (sc->bge_asicrev == BGE_ASICREV_BCM5752 ||
3492 BGE_IS_5755_PLUS(sc)) {
3493 if (bootverbose)
3494 device_printf(dev, "Disabling fastboot\n");
3495 CSR_WRITE_4(sc, BGE_FASTBOOT_PC, 0x0);
3496 }
3497
3498 /*
3499 * Write the magic number to SRAM at offset 0xB50.
3500 * When firmware finishes its initialization it will
3501 * write ~BGE_SRAM_FW_MB_MAGIC to the same location.
3502 */
3503 bge_writemem_ind(sc, BGE_SRAM_FW_MB, BGE_SRAM_FW_MB_MAGIC);
3504
3505 reset = BGE_MISCCFG_RESET_CORE_CLOCKS | BGE_32BITTIME_66MHZ;
3506
3507 /* XXX: Broadcom Linux driver. */
3508 if (sc->bge_flags & BGE_FLAG_PCIE) {
3509 if (CSR_READ_4(sc, 0x7E2C) == 0x60) /* PCIE 1.0 */
3510 CSR_WRITE_4(sc, 0x7E2C, 0x20);
3511 if (sc->bge_chipid != BGE_CHIPID_BCM5750_A0) {
3512 /* Prevent PCIE link training during global reset */
3513 CSR_WRITE_4(sc, BGE_MISC_CFG, 1 << 29);
3514 reset |= 1 << 29;
3515 }
3516 }
3517
3518 /*
3519 * Set GPHY Power Down Override to leave GPHY
3520 * powered up in D0 uninitialized.
3521 */
3522 if (BGE_IS_5705_PLUS(sc) &&
3523 (sc->bge_flags & BGE_FLAG_CPMU_PRESENT) == 0)
3524 reset |= BGE_MISCCFG_GPHY_PD_OVERRIDE;
3525
3526 /* Issue global reset */
3527 write_op(sc, BGE_MISC_CFG, reset);
3528
3529 if (sc->bge_asicrev == BGE_ASICREV_BCM5906) {
3530 val = CSR_READ_4(sc, BGE_VCPU_STATUS);
3531 CSR_WRITE_4(sc, BGE_VCPU_STATUS,
3532 val | BGE_VCPU_STATUS_DRV_RESET);
3533 val = CSR_READ_4(sc, BGE_VCPU_EXT_CTRL);
3534 CSR_WRITE_4(sc, BGE_VCPU_EXT_CTRL,
3535 val & ~BGE_VCPU_EXT_CTRL_HALT_CPU);
3536 }
3537
3538 DELAY(1000);
3539
3540 /* XXX: Broadcom Linux driver. */
3541 if (sc->bge_flags & BGE_FLAG_PCIE) {
3542 if (sc->bge_chipid == BGE_CHIPID_BCM5750_A0) {
3543 DELAY(500000); /* wait for link training to complete */
3544 val = pci_read_config(dev, 0xC4, 4);
3545 pci_write_config(dev, 0xC4, val | (1 << 15), 4);
3546 }
3547 devctl = pci_read_config(dev,
3548 sc->bge_expcap + PCIR_EXPRESS_DEVICE_CTL, 2);
3549 /* Clear enable no snoop and disable relaxed ordering. */
3550 devctl &= ~(PCIM_EXP_CTL_RELAXED_ORD_ENABLE |
3551 PCIM_EXP_CTL_NOSNOOP_ENABLE);
3552 /* Set PCIE max payload size to 128. */
3553 devctl &= ~PCIM_EXP_CTL_MAX_PAYLOAD;
3554 pci_write_config(dev, sc->bge_expcap + PCIR_EXPRESS_DEVICE_CTL,
3555 devctl, 2);
3556 /* Clear error status. */
3557 pci_write_config(dev, sc->bge_expcap + PCIR_EXPRESS_DEVICE_STA,
3558 PCIM_EXP_STA_CORRECTABLE_ERROR |
3559 PCIM_EXP_STA_NON_FATAL_ERROR | PCIM_EXP_STA_FATAL_ERROR |
3560 PCIM_EXP_STA_UNSUPPORTED_REQ, 2);
3561 }
3562
3563 /* Reset some of the PCI state that got zapped by reset. */
3564 pci_write_config(dev, BGE_PCI_MISC_CTL,
3565 BGE_PCIMISCCTL_INDIRECT_ACCESS | BGE_PCIMISCCTL_MASK_PCI_INTR |
3566 BGE_HIF_SWAP_OPTIONS | BGE_PCIMISCCTL_PCISTATE_RW, 4);
3567 pci_write_config(dev, BGE_PCI_CACHESZ, cachesize, 4);
3568 pci_write_config(dev, BGE_PCI_CMD, command, 4);
3569 write_op(sc, BGE_MISC_CFG, BGE_32BITTIME_66MHZ);
3570 /*
3571 * Disable PCI-X relaxed ordering to ensure status block update
3572 * comes first then packet buffer DMA. Otherwise driver may
3573 * read stale status block.
3574 */
3575 if (sc->bge_flags & BGE_FLAG_PCIX) {
3576 devctl = pci_read_config(dev,
3577 sc->bge_pcixcap + PCIXR_COMMAND, 2);
3578 devctl &= ~PCIXM_COMMAND_ERO;
3579 if (sc->bge_asicrev == BGE_ASICREV_BCM5703) {
3580 devctl &= ~PCIXM_COMMAND_MAX_READ;
3581 devctl |= PCIXM_COMMAND_MAX_READ_2048;
3582 } else if (sc->bge_asicrev == BGE_ASICREV_BCM5704) {
3583 devctl &= ~(PCIXM_COMMAND_MAX_SPLITS |
3584 PCIXM_COMMAND_MAX_READ);
3585 devctl |= PCIXM_COMMAND_MAX_READ_2048;
3586 }
3587 pci_write_config(dev, sc->bge_pcixcap + PCIXR_COMMAND,
3588 devctl, 2);
3589 }
3590 /* Re-enable MSI, if necessary, and enable the memory arbiter. */
3591 if (BGE_IS_5714_FAMILY(sc)) {
3592 /* This chip disables MSI on reset. */
3593 if (sc->bge_flags & BGE_FLAG_MSI) {
3594 val = pci_read_config(dev,
3595 sc->bge_msicap + PCIR_MSI_CTRL, 2);
3596 pci_write_config(dev,
3597 sc->bge_msicap + PCIR_MSI_CTRL,
3598 val | PCIM_MSICTRL_MSI_ENABLE, 2);
3599 val = CSR_READ_4(sc, BGE_MSI_MODE);
3600 CSR_WRITE_4(sc, BGE_MSI_MODE,
3601 val | BGE_MSIMODE_ENABLE);
3602 }
3603 val = CSR_READ_4(sc, BGE_MARB_MODE);
3604 CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE | val);
3605 } else
3606 CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE);
3607
3608 if (sc->bge_asicrev == BGE_ASICREV_BCM5906) {
3609 for (i = 0; i < BGE_TIMEOUT; i++) {
3610 val = CSR_READ_4(sc, BGE_VCPU_STATUS);
3611 if (val & BGE_VCPU_STATUS_INIT_DONE)
3612 break;
3613 DELAY(100);
3614 }
3615 if (i == BGE_TIMEOUT) {
3616 device_printf(dev, "reset timed out\n");
3617 return (1);
3618 }
3619 } else {
3620 /*
3621 * Poll until we see the 1's complement of the magic number.
3622 * This indicates that the firmware initialization is complete.
3623 * We expect this to fail if no chip containing the Ethernet
3624 * address is fitted though.
3625 */
3626 for (i = 0; i < BGE_TIMEOUT; i++) {
3627 DELAY(10);
3628 val = bge_readmem_ind(sc, BGE_SRAM_FW_MB);
3629 if (val == ~BGE_SRAM_FW_MB_MAGIC)
3630 break;
3631 }
3632
3633 if ((sc->bge_flags & BGE_FLAG_EADDR) && i == BGE_TIMEOUT)
3634 device_printf(dev,
3635 "firmware handshake timed out, found 0x%08x\n",
3636 val);
3637 /* BCM57765 A0 needs additional time before accessing. */
3638 if (sc->bge_chipid == BGE_CHIPID_BCM57765_A0)
3639 DELAY(10 * 1000); /* XXX */
3640 }
3641
3642 /*
3643 * XXX Wait for the value of the PCISTATE register to
3644 * return to its original pre-reset state. This is a
3645 * fairly good indicator of reset completion. If we don't
3646 * wait for the reset to fully complete, trying to read
3647 * from the device's non-PCI registers may yield garbage
3648 * results.
3649 */
3650 for (i = 0; i < BGE_TIMEOUT; i++) {
3651 if (pci_read_config(dev, BGE_PCI_PCISTATE, 4) == pcistate)
3652 break;
3653 DELAY(10);
3654 }
3655
3656 /* Fix up byte swapping. */
3657 CSR_WRITE_4(sc, BGE_MODE_CTL, bge_dma_swap_options(sc));
3658
3659 /* Tell the ASF firmware we are up */
3660 if (sc->bge_asf_mode & ASF_STACKUP)
3661 BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
3662
3663 CSR_WRITE_4(sc, BGE_MAC_MODE, 0);
3664
3665 /*
3666 * The 5704 in TBI mode apparently needs some special
3667 * adjustment to insure the SERDES drive level is set
3668 * to 1.2V.
3669 */
3670 if (sc->bge_asicrev == BGE_ASICREV_BCM5704 &&
3671 sc->bge_flags & BGE_FLAG_TBI) {
3672 val = CSR_READ_4(sc, BGE_SERDES_CFG);
3673 val = (val & ~0xFFF) | 0x880;
3674 CSR_WRITE_4(sc, BGE_SERDES_CFG, val);
3675 }
3676
3677 /* XXX: Broadcom Linux driver. */
3678 if (sc->bge_flags & BGE_FLAG_PCIE &&
3679 !BGE_IS_5717_PLUS(sc) &&
3680 sc->bge_chipid != BGE_CHIPID_BCM5750_A0 &&
3681 sc->bge_asicrev != BGE_ASICREV_BCM5785) {
3682 /* Enable Data FIFO protection. */
3683 val = CSR_READ_4(sc, 0x7C00);
3684 CSR_WRITE_4(sc, 0x7C00, val | (1 << 25));
3685 }
3686 DELAY(10000);
3687
3688 if (sc->bge_asicrev == BGE_ASICREV_BCM5720)
3689 BGE_CLRBIT(sc, BGE_CPMU_CLCK_ORIDE,
3690 CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
3691
3692 return (0);
3693}
3694
3695static __inline void
3696bge_rxreuse_std(struct bge_softc *sc, int i)
3697{
3698 struct bge_rx_bd *r;
3699
3700 r = &sc->bge_ldata.bge_rx_std_ring[sc->bge_std];
3701 r->bge_flags = BGE_RXBDFLAG_END;
3702 r->bge_len = sc->bge_cdata.bge_rx_std_seglen[i];
3703 r->bge_idx = i;
3704 BGE_INC(sc->bge_std, BGE_STD_RX_RING_CNT);
3705}
3706
3707static __inline void
3708bge_rxreuse_jumbo(struct bge_softc *sc, int i)
3709{
3710 struct bge_extrx_bd *r;
3711
3712 r = &sc->bge_ldata.bge_rx_jumbo_ring[sc->bge_jumbo];
3713 r->bge_flags = BGE_RXBDFLAG_JUMBO_RING | BGE_RXBDFLAG_END;
3714 r->bge_len0 = sc->bge_cdata.bge_rx_jumbo_seglen[i][0];
3715 r->bge_len1 = sc->bge_cdata.bge_rx_jumbo_seglen[i][1];
3716 r->bge_len2 = sc->bge_cdata.bge_rx_jumbo_seglen[i][2];
3717 r->bge_len3 = sc->bge_cdata.bge_rx_jumbo_seglen[i][3];
3718 r->bge_idx = i;
3719 BGE_INC(sc->bge_jumbo, BGE_JUMBO_RX_RING_CNT);
3720}
3721
3722/*
3723 * Frame reception handling. This is called if there's a frame
3724 * on the receive return list.
3725 *
3726 * Note: we have to be able to handle two possibilities here:
3727 * 1) the frame is from the jumbo receive ring
3728 * 2) the frame is from the standard receive ring
3729 */
3730
3731static int
3732bge_rxeof(struct bge_softc *sc, uint16_t rx_prod, int holdlck)
3733{
3734 struct ifnet *ifp;
3735 int rx_npkts = 0, stdcnt = 0, jumbocnt = 0;
3736 uint16_t rx_cons;
3737
3738 rx_cons = sc->bge_rx_saved_considx;
3739
3740 /* Nothing to do. */
3741 if (rx_cons == rx_prod)
3742 return (rx_npkts);
3743
3744 ifp = sc->bge_ifp;
3745
3746 bus_dmamap_sync(sc->bge_cdata.bge_rx_return_ring_tag,
3747 sc->bge_cdata.bge_rx_return_ring_map, BUS_DMASYNC_POSTREAD);
3748 bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag,
3749 sc->bge_cdata.bge_rx_std_ring_map, BUS_DMASYNC_POSTWRITE);
3750 if (BGE_IS_JUMBO_CAPABLE(sc) &&
3751 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN + ETHER_VLAN_ENCAP_LEN >
3752 (MCLBYTES - ETHER_ALIGN))
3753 bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag,
3754 sc->bge_cdata.bge_rx_jumbo_ring_map, BUS_DMASYNC_POSTWRITE);
3755
3756 while (rx_cons != rx_prod) {
3757 struct bge_rx_bd *cur_rx;
3758 uint32_t rxidx;
3759 struct mbuf *m = NULL;
3760 uint16_t vlan_tag = 0;
3761 int have_tag = 0;
3762
3763#ifdef DEVICE_POLLING
3764 if (ifp->if_capenable & IFCAP_POLLING) {
3765 if (sc->rxcycles <= 0)
3766 break;
3767 sc->rxcycles--;
3768 }
3769#endif
3770
3771 cur_rx = &sc->bge_ldata.bge_rx_return_ring[rx_cons];
3772
3773 rxidx = cur_rx->bge_idx;
3774 BGE_INC(rx_cons, sc->bge_return_ring_cnt);
3775
3776 if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING &&
3777 cur_rx->bge_flags & BGE_RXBDFLAG_VLAN_TAG) {
3778 have_tag = 1;
3779 vlan_tag = cur_rx->bge_vlan_tag;
3780 }
3781
3782 if (cur_rx->bge_flags & BGE_RXBDFLAG_JUMBO_RING) {
3783 jumbocnt++;
3784 m = sc->bge_cdata.bge_rx_jumbo_chain[rxidx];
3785 if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) {
3786 bge_rxreuse_jumbo(sc, rxidx);
3787 continue;
3788 }
3789 if (bge_newbuf_jumbo(sc, rxidx) != 0) {
3790 bge_rxreuse_jumbo(sc, rxidx);
3791 ifp->if_iqdrops++;
3792 continue;
3793 }
3794 BGE_INC(sc->bge_jumbo, BGE_JUMBO_RX_RING_CNT);
3795 } else {
3796 stdcnt++;
3797 m = sc->bge_cdata.bge_rx_std_chain[rxidx];
3798 if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) {
3799 bge_rxreuse_std(sc, rxidx);
3800 continue;
3801 }
3802 if (bge_newbuf_std(sc, rxidx) != 0) {
3803 bge_rxreuse_std(sc, rxidx);
3804 ifp->if_iqdrops++;
3805 continue;
3806 }
3807 BGE_INC(sc->bge_std, BGE_STD_RX_RING_CNT);
3808 }
3809
3810 ifp->if_ipackets++;
3811#ifndef __NO_STRICT_ALIGNMENT
3812 /*
3813 * For architectures with strict alignment we must make sure
3814 * the payload is aligned.
3815 */
3816 if (sc->bge_flags & BGE_FLAG_RX_ALIGNBUG) {
3817 bcopy(m->m_data, m->m_data + ETHER_ALIGN,
3818 cur_rx->bge_len);
3819 m->m_data += ETHER_ALIGN;
3820 }
3821#endif
3822 m->m_pkthdr.len = m->m_len = cur_rx->bge_len - ETHER_CRC_LEN;
3823 m->m_pkthdr.rcvif = ifp;
3824
3825 if (ifp->if_capenable & IFCAP_RXCSUM)
3826 bge_rxcsum(sc, cur_rx, m);
3827
3828 /*
3829 * If we received a packet with a vlan tag,
3830 * attach that information to the packet.
3831 */
3832 if (have_tag) {
3833 m->m_pkthdr.ether_vtag = vlan_tag;
3834 m->m_flags |= M_VLANTAG;
3835 }
3836
3837 if (holdlck != 0) {
3838 BGE_UNLOCK(sc);
3839 (*ifp->if_input)(ifp, m);
3840 BGE_LOCK(sc);
3841 } else
3842 (*ifp->if_input)(ifp, m);
3843 rx_npkts++;
3844
3845 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
3846 return (rx_npkts);
3847 }
3848
3849 bus_dmamap_sync(sc->bge_cdata.bge_rx_return_ring_tag,
3850 sc->bge_cdata.bge_rx_return_ring_map, BUS_DMASYNC_PREREAD);
3851 if (stdcnt > 0)
3852 bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag,
3853 sc->bge_cdata.bge_rx_std_ring_map, BUS_DMASYNC_PREWRITE);
3854
3855 if (jumbocnt > 0)
3856 bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag,
3857 sc->bge_cdata.bge_rx_jumbo_ring_map, BUS_DMASYNC_PREWRITE);
3858
3859 sc->bge_rx_saved_considx = rx_cons;
3860 bge_writembx(sc, BGE_MBX_RX_CONS0_LO, sc->bge_rx_saved_considx);
3861 if (stdcnt)
3862 bge_writembx(sc, BGE_MBX_RX_STD_PROD_LO, (sc->bge_std +
3863 BGE_STD_RX_RING_CNT - 1) % BGE_STD_RX_RING_CNT);
3864 if (jumbocnt)
3865 bge_writembx(sc, BGE_MBX_RX_JUMBO_PROD_LO, (sc->bge_jumbo +
3866 BGE_JUMBO_RX_RING_CNT - 1) % BGE_JUMBO_RX_RING_CNT);
3867#ifdef notyet
3868 /*
3869 * This register wraps very quickly under heavy packet drops.
3870 * If you need correct statistics, you can enable this check.
3871 */
3872 if (BGE_IS_5705_PLUS(sc))
3873 ifp->if_ierrors += CSR_READ_4(sc, BGE_RXLP_LOCSTAT_IFIN_DROPS);
3874#endif
3875 return (rx_npkts);
3876}
3877
3878static void
3879bge_rxcsum(struct bge_softc *sc, struct bge_rx_bd *cur_rx, struct mbuf *m)
3880{
3881
3882 if (BGE_IS_5717_PLUS(sc)) {
3883 if ((cur_rx->bge_flags & BGE_RXBDFLAG_IPV6) == 0) {
3884 if (cur_rx->bge_flags & BGE_RXBDFLAG_IP_CSUM) {
3885 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
3886 if ((cur_rx->bge_error_flag &
3887 BGE_RXERRFLAG_IP_CSUM_NOK) == 0)
3888 m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
3889 }
3890 if (cur_rx->bge_flags & BGE_RXBDFLAG_TCP_UDP_CSUM) {
3891 m->m_pkthdr.csum_data =
3892 cur_rx->bge_tcp_udp_csum;
3893 m->m_pkthdr.csum_flags |= CSUM_DATA_VALID |
3894 CSUM_PSEUDO_HDR;
3895 }
3896 }
3897 } else {
3898 if (cur_rx->bge_flags & BGE_RXBDFLAG_IP_CSUM) {
3899 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
3900 if ((cur_rx->bge_ip_csum ^ 0xFFFF) == 0)
3901 m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
3902 }
3903 if (cur_rx->bge_flags & BGE_RXBDFLAG_TCP_UDP_CSUM &&
3904 m->m_pkthdr.len >= ETHER_MIN_NOPAD) {
3905 m->m_pkthdr.csum_data =
3906 cur_rx->bge_tcp_udp_csum;
3907 m->m_pkthdr.csum_flags |= CSUM_DATA_VALID |
3908 CSUM_PSEUDO_HDR;
3909 }
3910 }
3911}
3912
3913static void
3914bge_txeof(struct bge_softc *sc, uint16_t tx_cons)
3915{
3916 struct bge_tx_bd *cur_tx;
3917 struct ifnet *ifp;
3918
3919 BGE_LOCK_ASSERT(sc);
3920
3921 /* Nothing to do. */
3922 if (sc->bge_tx_saved_considx == tx_cons)
3923 return;
3924
3925 ifp = sc->bge_ifp;
3926
3927 bus_dmamap_sync(sc->bge_cdata.bge_tx_ring_tag,
3928 sc->bge_cdata.bge_tx_ring_map, BUS_DMASYNC_POSTWRITE);
3929 /*
3930 * Go through our tx ring and free mbufs for those
3931 * frames that have been sent.
3932 */
3933 while (sc->bge_tx_saved_considx != tx_cons) {
3934 uint32_t idx;
3935
3936 idx = sc->bge_tx_saved_considx;
3937 cur_tx = &sc->bge_ldata.bge_tx_ring[idx];
3938 if (cur_tx->bge_flags & BGE_TXBDFLAG_END)
3939 ifp->if_opackets++;
3940 if (sc->bge_cdata.bge_tx_chain[idx] != NULL) {
3941 bus_dmamap_sync(sc->bge_cdata.bge_tx_mtag,
3942 sc->bge_cdata.bge_tx_dmamap[idx],
3943 BUS_DMASYNC_POSTWRITE);
3944 bus_dmamap_unload(sc->bge_cdata.bge_tx_mtag,
3945 sc->bge_cdata.bge_tx_dmamap[idx]);
3946 m_freem(sc->bge_cdata.bge_tx_chain[idx]);
3947 sc->bge_cdata.bge_tx_chain[idx] = NULL;
3948 }
3949 sc->bge_txcnt--;
3950 BGE_INC(sc->bge_tx_saved_considx, BGE_TX_RING_CNT);
3951 }
3952
3953 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
3954 if (sc->bge_txcnt == 0)
3955 sc->bge_timer = 0;
3956}
3957
3958#ifdef DEVICE_POLLING
3959static int
3960bge_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
3961{
3962 struct bge_softc *sc = ifp->if_softc;
3963 uint16_t rx_prod, tx_cons;
3964 uint32_t statusword;
3965 int rx_npkts = 0;
3966
3967 BGE_LOCK(sc);
3968 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
3969 BGE_UNLOCK(sc);
3970 return (rx_npkts);
3971 }
3972
3973 bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
3974 sc->bge_cdata.bge_status_map,
3975 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
3976 rx_prod = sc->bge_ldata.bge_status_block->bge_idx[0].bge_rx_prod_idx;
3977 tx_cons = sc->bge_ldata.bge_status_block->bge_idx[0].bge_tx_cons_idx;
3978
3979 statusword = sc->bge_ldata.bge_status_block->bge_status;
3980 sc->bge_ldata.bge_status_block->bge_status = 0;
3981
3982 bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
3983 sc->bge_cdata.bge_status_map,
3984 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3985
3986 /* Note link event. It will be processed by POLL_AND_CHECK_STATUS. */
3987 if (statusword & BGE_STATFLAG_LINKSTATE_CHANGED)
3988 sc->bge_link_evt++;
3989
3990 if (cmd == POLL_AND_CHECK_STATUS)
3991 if ((sc->bge_asicrev == BGE_ASICREV_BCM5700 &&
3992 sc->bge_chipid != BGE_CHIPID_BCM5700_B2) ||
3993 sc->bge_link_evt || (sc->bge_flags & BGE_FLAG_TBI))
3994 bge_link_upd(sc);
3995
3996 sc->rxcycles = count;
3997 rx_npkts = bge_rxeof(sc, rx_prod, 1);
3998 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
3999 BGE_UNLOCK(sc);
4000 return (rx_npkts);
4001 }
4002 bge_txeof(sc, tx_cons);
4003 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
4004 bge_start_locked(ifp);
4005
4006 BGE_UNLOCK(sc);
4007 return (rx_npkts);
4008}
4009#endif /* DEVICE_POLLING */
4010
4011static int
4012bge_msi_intr(void *arg)
4013{
4014 struct bge_softc *sc;
4015
4016 sc = (struct bge_softc *)arg;
4017 /*
4018 * This interrupt is not shared and controller already
4019 * disabled further interrupt.
4020 */
4021 taskqueue_enqueue(sc->bge_tq, &sc->bge_intr_task);
4022 return (FILTER_HANDLED);
4023}
4024
4025static void
4026bge_intr_task(void *arg, int pending)
4027{
4028 struct bge_softc *sc;
4029 struct ifnet *ifp;
4030 uint32_t status, status_tag;
4031 uint16_t rx_prod, tx_cons;
4032
4033 sc = (struct bge_softc *)arg;
4034 ifp = sc->bge_ifp;
4035
4036 BGE_LOCK(sc);
4037 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
4038 BGE_UNLOCK(sc);
4039 return;
4040 }
4041
4042 /* Get updated status block. */
4043 bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
4044 sc->bge_cdata.bge_status_map,
4045 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
4046
4047 /* Save producer/consumer indexess. */
4048 rx_prod = sc->bge_ldata.bge_status_block->bge_idx[0].bge_rx_prod_idx;
4049 tx_cons = sc->bge_ldata.bge_status_block->bge_idx[0].bge_tx_cons_idx;
4050 status = sc->bge_ldata.bge_status_block->bge_status;
4051 status_tag = sc->bge_ldata.bge_status_block->bge_status_tag << 24;
4052 sc->bge_ldata.bge_status_block->bge_status = 0;
4053 bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
4054 sc->bge_cdata.bge_status_map,
4055 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
4056 if ((sc->bge_flags & BGE_FLAG_TAGGED_STATUS) == 0)
4057 status_tag = 0;
4058
4059 if ((status & BGE_STATFLAG_LINKSTATE_CHANGED) != 0)
4060 bge_link_upd(sc);
4061
4062 /* Let controller work. */
4063 bge_writembx(sc, BGE_MBX_IRQ0_LO, status_tag);
4064
4065 if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
4066 sc->bge_rx_saved_considx != rx_prod) {
4067 /* Check RX return ring producer/consumer. */
4068 BGE_UNLOCK(sc);
4069 bge_rxeof(sc, rx_prod, 0);
4070 BGE_LOCK(sc);
4071 }
4072 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
4073 /* Check TX ring producer/consumer. */
4074 bge_txeof(sc, tx_cons);
4075 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
4076 bge_start_locked(ifp);
4077 }
4078 BGE_UNLOCK(sc);
4079}
4080
4081static void
4082bge_intr(void *xsc)
4083{
4084 struct bge_softc *sc;
4085 struct ifnet *ifp;
4086 uint32_t statusword;
4087 uint16_t rx_prod, tx_cons;
4088
4089 sc = xsc;
4090
4091 BGE_LOCK(sc);
4092
4093 ifp = sc->bge_ifp;
4094
4095#ifdef DEVICE_POLLING
4096 if (ifp->if_capenable & IFCAP_POLLING) {
4097 BGE_UNLOCK(sc);
4098 return;
4099 }
4100#endif
4101
4102 /*
4103 * Ack the interrupt by writing something to BGE_MBX_IRQ0_LO. Don't
4104 * disable interrupts by writing nonzero like we used to, since with
4105 * our current organization this just gives complications and
4106 * pessimizations for re-enabling interrupts. We used to have races
4107 * instead of the necessary complications. Disabling interrupts
4108 * would just reduce the chance of a status update while we are
4109 * running (by switching to the interrupt-mode coalescence
4110 * parameters), but this chance is already very low so it is more
4111 * efficient to get another interrupt than prevent it.
4112 *
4113 * We do the ack first to ensure another interrupt if there is a
4114 * status update after the ack. We don't check for the status
4115 * changing later because it is more efficient to get another
4116 * interrupt than prevent it, not quite as above (not checking is
4117 * a smaller optimization than not toggling the interrupt enable,
4118 * since checking doesn't involve PCI accesses and toggling require
4119 * the status check). So toggling would probably be a pessimization
4120 * even with MSI. It would only be needed for using a task queue.
4121 */
4122 bge_writembx(sc, BGE_MBX_IRQ0_LO, 0);
4123
4124 /*
4125 * Do the mandatory PCI flush as well as get the link status.
4126 */
4127 statusword = CSR_READ_4(sc, BGE_MAC_STS) & BGE_MACSTAT_LINK_CHANGED;
4128
4129 /* Make sure the descriptor ring indexes are coherent. */
4130 bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
4131 sc->bge_cdata.bge_status_map,
4132 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
4133 rx_prod = sc->bge_ldata.bge_status_block->bge_idx[0].bge_rx_prod_idx;
4134 tx_cons = sc->bge_ldata.bge_status_block->bge_idx[0].bge_tx_cons_idx;
4135 sc->bge_ldata.bge_status_block->bge_status = 0;
4136 bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
4137 sc->bge_cdata.bge_status_map,
4138 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
4139
4140 if ((sc->bge_asicrev == BGE_ASICREV_BCM5700 &&
4141 sc->bge_chipid != BGE_CHIPID_BCM5700_B2) ||
4142 statusword || sc->bge_link_evt)
4143 bge_link_upd(sc);
4144
4145 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
4146 /* Check RX return ring producer/consumer. */
4147 bge_rxeof(sc, rx_prod, 1);
4148 }
4149
4150 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
4151 /* Check TX ring producer/consumer. */
4152 bge_txeof(sc, tx_cons);
4153 }
4154
4155 if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
4156 !IFQ_DRV_IS_EMPTY(&ifp->if_snd))
4157 bge_start_locked(ifp);
4158
4159 BGE_UNLOCK(sc);
4160}
4161
4162static void
4163bge_asf_driver_up(struct bge_softc *sc)
4164{
4165 if (sc->bge_asf_mode & ASF_STACKUP) {
4166 /* Send ASF heartbeat aprox. every 2s */
4167 if (sc->bge_asf_count)
4168 sc->bge_asf_count --;
4169 else {
4170 sc->bge_asf_count = 2;
4171 bge_writemem_ind(sc, BGE_SRAM_FW_CMD_MB,
4172 BGE_FW_CMD_DRV_ALIVE);
4173 bge_writemem_ind(sc, BGE_SRAM_FW_CMD_LEN_MB, 4);
4174 bge_writemem_ind(sc, BGE_SRAM_FW_CMD_DATA_MB,
4175 BGE_FW_HB_TIMEOUT_SEC);
4176 CSR_WRITE_4(sc, BGE_RX_CPU_EVENT,
4177 CSR_READ_4(sc, BGE_RX_CPU_EVENT) |
4178 BGE_RX_CPU_DRV_EVENT);
4179 }
4180 }
4181}
4182
4183static void
4184bge_tick(void *xsc)
4185{
4186 struct bge_softc *sc = xsc;
4187 struct mii_data *mii = NULL;
4188
4189 BGE_LOCK_ASSERT(sc);
4190
4191 /* Synchronize with possible callout reset/stop. */
4192 if (callout_pending(&sc->bge_stat_ch) ||
4193 !callout_active(&sc->bge_stat_ch))
4194 return;
4195
4196 if (BGE_IS_5705_PLUS(sc))
4197 bge_stats_update_regs(sc);
4198 else
4199 bge_stats_update(sc);
4200
4201 if ((sc->bge_flags & BGE_FLAG_TBI) == 0) {
4202 mii = device_get_softc(sc->bge_miibus);
4203 /*
4204 * Do not touch PHY if we have link up. This could break
4205 * IPMI/ASF mode or produce extra input errors
4206 * (extra errors was reported for bcm5701 & bcm5704).
4207 */
4208 if (!sc->bge_link)
4209 mii_tick(mii);
4210 } else {
4211 /*
4212 * Since in TBI mode auto-polling can't be used we should poll
4213 * link status manually. Here we register pending link event
4214 * and trigger interrupt.
4215 */
4216#ifdef DEVICE_POLLING
4217 /* In polling mode we poll link state in bge_poll(). */
4218 if (!(sc->bge_ifp->if_capenable & IFCAP_POLLING))
4219#endif
4220 {
4221 sc->bge_link_evt++;
4222 if (sc->bge_asicrev == BGE_ASICREV_BCM5700 ||
4223 sc->bge_flags & BGE_FLAG_5788)
4224 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_SET);
4225 else
4226 BGE_SETBIT(sc, BGE_HCC_MODE, BGE_HCCMODE_COAL_NOW);
4227 }
4228 }
4229
4230 bge_asf_driver_up(sc);
4231 bge_watchdog(sc);
4232
4233 callout_reset(&sc->bge_stat_ch, hz, bge_tick, sc);
4234}
4235
4236static void
4237bge_stats_update_regs(struct bge_softc *sc)
4238{
4239 struct ifnet *ifp;
4240 struct bge_mac_stats *stats;
4241
4242 ifp = sc->bge_ifp;
4243 stats = &sc->bge_mac_stats;
4244
4245 stats->ifHCOutOctets +=
4246 CSR_READ_4(sc, BGE_TX_MAC_STATS_OCTETS);
4247 stats->etherStatsCollisions +=
4248 CSR_READ_4(sc, BGE_TX_MAC_STATS_COLLS);
4249 stats->outXonSent +=
4250 CSR_READ_4(sc, BGE_TX_MAC_STATS_XON_SENT);
4251 stats->outXoffSent +=
4252 CSR_READ_4(sc, BGE_TX_MAC_STATS_XOFF_SENT);
4253 stats->dot3StatsInternalMacTransmitErrors +=
4254 CSR_READ_4(sc, BGE_TX_MAC_STATS_ERRORS);
4255 stats->dot3StatsSingleCollisionFrames +=
4256 CSR_READ_4(sc, BGE_TX_MAC_STATS_SINGLE_COLL);
4257 stats->dot3StatsMultipleCollisionFrames +=
4258 CSR_READ_4(sc, BGE_TX_MAC_STATS_MULTI_COLL);
4259 stats->dot3StatsDeferredTransmissions +=
4260 CSR_READ_4(sc, BGE_TX_MAC_STATS_DEFERRED);
4261 stats->dot3StatsExcessiveCollisions +=
4262 CSR_READ_4(sc, BGE_TX_MAC_STATS_EXCESS_COLL);
4263 stats->dot3StatsLateCollisions +=
4264 CSR_READ_4(sc, BGE_TX_MAC_STATS_LATE_COLL);
4265 stats->ifHCOutUcastPkts +=
4266 CSR_READ_4(sc, BGE_TX_MAC_STATS_UCAST);
4267 stats->ifHCOutMulticastPkts +=
4268 CSR_READ_4(sc, BGE_TX_MAC_STATS_MCAST);
4269 stats->ifHCOutBroadcastPkts +=
4270 CSR_READ_4(sc, BGE_TX_MAC_STATS_BCAST);
4271
4272 stats->ifHCInOctets +=
4273 CSR_READ_4(sc, BGE_RX_MAC_STATS_OCTESTS);
4274 stats->etherStatsFragments +=
4275 CSR_READ_4(sc, BGE_RX_MAC_STATS_FRAGMENTS);
4276 stats->ifHCInUcastPkts +=
4277 CSR_READ_4(sc, BGE_RX_MAC_STATS_UCAST);
4278 stats->ifHCInMulticastPkts +=
4279 CSR_READ_4(sc, BGE_RX_MAC_STATS_MCAST);
4280 stats->ifHCInBroadcastPkts +=
4281 CSR_READ_4(sc, BGE_RX_MAC_STATS_BCAST);
4282 stats->dot3StatsFCSErrors +=
4283 CSR_READ_4(sc, BGE_RX_MAC_STATS_FCS_ERRORS);
4284 stats->dot3StatsAlignmentErrors +=
4285 CSR_READ_4(sc, BGE_RX_MAC_STATS_ALGIN_ERRORS);
4286 stats->xonPauseFramesReceived +=
4287 CSR_READ_4(sc, BGE_RX_MAC_STATS_XON_RCVD);
4288 stats->xoffPauseFramesReceived +=
4289 CSR_READ_4(sc, BGE_RX_MAC_STATS_XOFF_RCVD);
4290 stats->macControlFramesReceived +=
4291 CSR_READ_4(sc, BGE_RX_MAC_STATS_CTRL_RCVD);
4292 stats->xoffStateEntered +=
4293 CSR_READ_4(sc, BGE_RX_MAC_STATS_XOFF_ENTERED);
4294 stats->dot3StatsFramesTooLong +=
4295 CSR_READ_4(sc, BGE_RX_MAC_STATS_FRAME_TOO_LONG);
4296 stats->etherStatsJabbers +=
4297 CSR_READ_4(sc, BGE_RX_MAC_STATS_JABBERS);
4298 stats->etherStatsUndersizePkts +=
4299 CSR_READ_4(sc, BGE_RX_MAC_STATS_UNDERSIZE);
4300
4301 stats->FramesDroppedDueToFilters +=
4302 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_FILTDROP);
4303 stats->DmaWriteQueueFull +=
4304 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_DMA_WRQ_FULL);
4305 stats->DmaWriteHighPriQueueFull +=
4306 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_DMA_HPWRQ_FULL);
4307 stats->NoMoreRxBDs +=
4308 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_OUT_OF_BDS);
4309 /*
4310 * XXX
4311 * Unlike other controllers, BGE_RXLP_LOCSTAT_IFIN_DROPS
4312 * counter of BCM5717, BCM5718, BCM5719 A0 and BCM5720 A0
4313 * includes number of unwanted multicast frames. This comes
4314 * from silicon bug and known workaround to get rough(not
4315 * exact) counter is to enable interrupt on MBUF low water
4316 * attention. This can be accomplished by setting
4317 * BGE_HCCMODE_ATTN bit of BGE_HCC_MODE,
4318 * BGE_BMANMODE_LOMBUF_ATTN bit of BGE_BMAN_MODE and
4319 * BGE_MODECTL_FLOWCTL_ATTN_INTR bit of BGE_MODE_CTL.
4320 * However that change would generate more interrupts and
4321 * there are still possibilities of losing multiple frames
4322 * during BGE_MODECTL_FLOWCTL_ATTN_INTR interrupt handling.
4323 * Given that the workaround still would not get correct
4324 * counter I don't think it's worth to implement it. So
4325 * ignore reading the counter on controllers that have the
4326 * silicon bug.
4327 */
4328 if (sc->bge_asicrev != BGE_ASICREV_BCM5717 &&
4329 sc->bge_chipid != BGE_CHIPID_BCM5719_A0 &&
4330 sc->bge_chipid != BGE_CHIPID_BCM5720_A0)
4331 stats->InputDiscards +=
4332 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_IFIN_DROPS);
4333 stats->InputErrors +=
4334 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_IFIN_ERRORS);
4335 stats->RecvThresholdHit +=
4336 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_RXTHRESH_HIT);
4337
4338 ifp->if_collisions = (u_long)stats->etherStatsCollisions;
4339 ifp->if_ierrors = (u_long)(stats->NoMoreRxBDs + stats->InputDiscards +
4340 stats->InputErrors);
4341}
4342
4343static void
4344bge_stats_clear_regs(struct bge_softc *sc)
4345{
4346
4347 CSR_READ_4(sc, BGE_TX_MAC_STATS_OCTETS);
4348 CSR_READ_4(sc, BGE_TX_MAC_STATS_COLLS);
4349 CSR_READ_4(sc, BGE_TX_MAC_STATS_XON_SENT);
4350 CSR_READ_4(sc, BGE_TX_MAC_STATS_XOFF_SENT);
4351 CSR_READ_4(sc, BGE_TX_MAC_STATS_ERRORS);
4352 CSR_READ_4(sc, BGE_TX_MAC_STATS_SINGLE_COLL);
4353 CSR_READ_4(sc, BGE_TX_MAC_STATS_MULTI_COLL);
4354 CSR_READ_4(sc, BGE_TX_MAC_STATS_DEFERRED);
4355 CSR_READ_4(sc, BGE_TX_MAC_STATS_EXCESS_COLL);
4356 CSR_READ_4(sc, BGE_TX_MAC_STATS_LATE_COLL);
4357 CSR_READ_4(sc, BGE_TX_MAC_STATS_UCAST);
4358 CSR_READ_4(sc, BGE_TX_MAC_STATS_MCAST);
4359 CSR_READ_4(sc, BGE_TX_MAC_STATS_BCAST);
4360
4361 CSR_READ_4(sc, BGE_RX_MAC_STATS_OCTESTS);
4362 CSR_READ_4(sc, BGE_RX_MAC_STATS_FRAGMENTS);
4363 CSR_READ_4(sc, BGE_RX_MAC_STATS_UCAST);
4364 CSR_READ_4(sc, BGE_RX_MAC_STATS_MCAST);
4365 CSR_READ_4(sc, BGE_RX_MAC_STATS_BCAST);
4366 CSR_READ_4(sc, BGE_RX_MAC_STATS_FCS_ERRORS);
4367 CSR_READ_4(sc, BGE_RX_MAC_STATS_ALGIN_ERRORS);
4368 CSR_READ_4(sc, BGE_RX_MAC_STATS_XON_RCVD);
4369 CSR_READ_4(sc, BGE_RX_MAC_STATS_XOFF_RCVD);
4370 CSR_READ_4(sc, BGE_RX_MAC_STATS_CTRL_RCVD);
4371 CSR_READ_4(sc, BGE_RX_MAC_STATS_XOFF_ENTERED);
4372 CSR_READ_4(sc, BGE_RX_MAC_STATS_FRAME_TOO_LONG);
4373 CSR_READ_4(sc, BGE_RX_MAC_STATS_JABBERS);
4374 CSR_READ_4(sc, BGE_RX_MAC_STATS_UNDERSIZE);
4375
4376 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_FILTDROP);
4377 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_DMA_WRQ_FULL);
4378 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_DMA_HPWRQ_FULL);
4379 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_OUT_OF_BDS);
4380 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_IFIN_DROPS);
4381 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_IFIN_ERRORS);
4382 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_RXTHRESH_HIT);
4383}
4384
4385static void
4386bge_stats_update(struct bge_softc *sc)
4387{
4388 struct ifnet *ifp;
4389 bus_size_t stats;
4390 uint32_t cnt; /* current register value */
4391
4392 ifp = sc->bge_ifp;
4393
4394 stats = BGE_MEMWIN_START + BGE_STATS_BLOCK;
4395
4396#define READ_STAT(sc, stats, stat) \
4397 CSR_READ_4(sc, stats + offsetof(struct bge_stats, stat))
4398
4399 cnt = READ_STAT(sc, stats, txstats.etherStatsCollisions.bge_addr_lo);
4400 ifp->if_collisions += (uint32_t)(cnt - sc->bge_tx_collisions);
4401 sc->bge_tx_collisions = cnt;
4402
4403 cnt = READ_STAT(sc, stats, ifInDiscards.bge_addr_lo);
4404 ifp->if_ierrors += (uint32_t)(cnt - sc->bge_rx_discards);
4405 sc->bge_rx_discards = cnt;
4406
4407 cnt = READ_STAT(sc, stats, txstats.ifOutDiscards.bge_addr_lo);
4408 ifp->if_oerrors += (uint32_t)(cnt - sc->bge_tx_discards);
4409 sc->bge_tx_discards = cnt;
4410
4411#undef READ_STAT
4412}
4413
4414/*
4415 * Pad outbound frame to ETHER_MIN_NOPAD for an unusual reason.
4416 * The bge hardware will pad out Tx runts to ETHER_MIN_NOPAD,
4417 * but when such padded frames employ the bge IP/TCP checksum offload,
4418 * the hardware checksum assist gives incorrect results (possibly
4419 * from incorporating its own padding into the UDP/TCP checksum; who knows).
4420 * If we pad such runts with zeros, the onboard checksum comes out correct.
4421 */
4422static __inline int
4423bge_cksum_pad(struct mbuf *m)
4424{
4425 int padlen = ETHER_MIN_NOPAD - m->m_pkthdr.len;
4426 struct mbuf *last;
4427
4428 /* If there's only the packet-header and we can pad there, use it. */
4429 if (m->m_pkthdr.len == m->m_len && M_WRITABLE(m) &&
4430 M_TRAILINGSPACE(m) >= padlen) {
4431 last = m;
4432 } else {
4433 /*
4434 * Walk packet chain to find last mbuf. We will either
4435 * pad there, or append a new mbuf and pad it.
4436 */
4437 for (last = m; last->m_next != NULL; last = last->m_next);
4438 if (!(M_WRITABLE(last) && M_TRAILINGSPACE(last) >= padlen)) {
4439 /* Allocate new empty mbuf, pad it. Compact later. */
4440 struct mbuf *n;
4441
4442 MGET(n, M_DONTWAIT, MT_DATA);
4443 if (n == NULL)
4444 return (ENOBUFS);
4445 n->m_len = 0;
4446 last->m_next = n;
4447 last = n;
4448 }
4449 }
4450
4451 /* Now zero the pad area, to avoid the bge cksum-assist bug. */
4452 memset(mtod(last, caddr_t) + last->m_len, 0, padlen);
4453 last->m_len += padlen;
4454 m->m_pkthdr.len += padlen;
4455
4456 return (0);
4457}
4458
4459static struct mbuf *
4460bge_check_short_dma(struct mbuf *m)
4461{
4462 struct mbuf *n;
4463 int found;
4464
4465 /*
4466 * If device receive two back-to-back send BDs with less than
4467 * or equal to 8 total bytes then the device may hang. The two
4468 * back-to-back send BDs must in the same frame for this failure
4469 * to occur. Scan mbuf chains and see whether two back-to-back
4470 * send BDs are there. If this is the case, allocate new mbuf
4471 * and copy the frame to workaround the silicon bug.
4472 */
4473 for (n = m, found = 0; n != NULL; n = n->m_next) {
4474 if (n->m_len < 8) {
4475 found++;
4476 if (found > 1)
4477 break;
4478 continue;
4479 }
4480 found = 0;
4481 }
4482
4483 if (found > 1) {
4484 n = m_defrag(m, M_DONTWAIT);
4485 if (n == NULL)
4486 m_freem(m);
4487 } else
4488 n = m;
4489 return (n);
4490}
4491
4492static struct mbuf *
4493bge_setup_tso(struct bge_softc *sc, struct mbuf *m, uint16_t *mss,
4494 uint16_t *flags)
4495{
4496 struct ip *ip;
4497 struct tcphdr *tcp;
4498 struct mbuf *n;
4499 uint16_t hlen;
4500 uint32_t poff;
4501
4502 if (M_WRITABLE(m) == 0) {
4503 /* Get a writable copy. */
4504 n = m_dup(m, M_DONTWAIT);
4505 m_freem(m);
4506 if (n == NULL)
4507 return (NULL);
4508 m = n;
4509 }
4510 m = m_pullup(m, sizeof(struct ether_header) + sizeof(struct ip));
4511 if (m == NULL)
4512 return (NULL);
4513 ip = (struct ip *)(mtod(m, char *) + sizeof(struct ether_header));
4514 poff = sizeof(struct ether_header) + (ip->ip_hl << 2);
4515 m = m_pullup(m, poff + sizeof(struct tcphdr));
4516 if (m == NULL)
4517 return (NULL);
4518 tcp = (struct tcphdr *)(mtod(m, char *) + poff);
4519 m = m_pullup(m, poff + (tcp->th_off << 2));
4520 if (m == NULL)
4521 return (NULL);
4522 /*
4523 * It seems controller doesn't modify IP length and TCP pseudo
4524 * checksum. These checksum computed by upper stack should be 0.
4525 */
4526 *mss = m->m_pkthdr.tso_segsz;
4527 ip = (struct ip *)(mtod(m, char *) + sizeof(struct ether_header));
4528 ip->ip_sum = 0;
4529 ip->ip_len = htons(*mss + (ip->ip_hl << 2) + (tcp->th_off << 2));
4530 /* Clear pseudo checksum computed by TCP stack. */
4531 tcp = (struct tcphdr *)(mtod(m, char *) + poff);
4532 tcp->th_sum = 0;
4533 /*
4534 * Broadcom controllers uses different descriptor format for
4535 * TSO depending on ASIC revision. Due to TSO-capable firmware
4536 * license issue and lower performance of firmware based TSO
4537 * we only support hardware based TSO.
4538 */
4539 /* Calculate header length, incl. TCP/IP options, in 32 bit units. */
4540 hlen = ((ip->ip_hl << 2) + (tcp->th_off << 2)) >> 2;
4541 if (sc->bge_flags & BGE_FLAG_TSO3) {
4542 /*
4543 * For BCM5717 and newer controllers, hardware based TSO
4544 * uses the 14 lower bits of the bge_mss field to store the
4545 * MSS and the upper 2 bits to store the lowest 2 bits of
4546 * the IP/TCP header length. The upper 6 bits of the header
4547 * length are stored in the bge_flags[14:10,4] field. Jumbo
4548 * frames are supported.
4549 */
4550 *mss |= ((hlen & 0x3) << 14);
4551 *flags |= ((hlen & 0xF8) << 7) | ((hlen & 0x4) << 2);
4552 } else {
4553 /*
4554 * For BCM5755 and newer controllers, hardware based TSO uses
4555 * the lower 11 bits to store the MSS and the upper 5 bits to
4556 * store the IP/TCP header length. Jumbo frames are not
4557 * supported.
4558 */
4559 *mss |= (hlen << 11);
4560 }
4561 return (m);
4562}
4563
4564/*
4565 * Encapsulate an mbuf chain in the tx ring by coupling the mbuf data
4566 * pointers to descriptors.
4567 */
4568static int
4569bge_encap(struct bge_softc *sc, struct mbuf **m_head, uint32_t *txidx)
4570{
4571 bus_dma_segment_t segs[BGE_NSEG_NEW];
4572 bus_dmamap_t map;
4573 struct bge_tx_bd *d;
4574 struct mbuf *m = *m_head;
4575 uint32_t idx = *txidx;
4576 uint16_t csum_flags, mss, vlan_tag;
4577 int nsegs, i, error;
4578
4579 csum_flags = 0;
4580 mss = 0;
4581 vlan_tag = 0;
4582 if ((sc->bge_flags & BGE_FLAG_SHORT_DMA_BUG) != 0 &&
4583 m->m_next != NULL) {
4584 *m_head = bge_check_short_dma(m);
4585 if (*m_head == NULL)
4586 return (ENOBUFS);
4587 m = *m_head;
4588 }
4589 if ((m->m_pkthdr.csum_flags & CSUM_TSO) != 0) {
4590 *m_head = m = bge_setup_tso(sc, m, &mss, &csum_flags);
4591 if (*m_head == NULL)
4592 return (ENOBUFS);
4593 csum_flags |= BGE_TXBDFLAG_CPU_PRE_DMA |
4594 BGE_TXBDFLAG_CPU_POST_DMA;
4595 } else if ((m->m_pkthdr.csum_flags & sc->bge_csum_features) != 0) {
4596 if (m->m_pkthdr.csum_flags & CSUM_IP)
4597 csum_flags |= BGE_TXBDFLAG_IP_CSUM;
4598 if (m->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP)) {
4599 csum_flags |= BGE_TXBDFLAG_TCP_UDP_CSUM;
4600 if (m->m_pkthdr.len < ETHER_MIN_NOPAD &&
4601 (error = bge_cksum_pad(m)) != 0) {
4602 m_freem(m);
4603 *m_head = NULL;
4604 return (error);
4605 }
4606 }
4607 if (m->m_flags & M_LASTFRAG)
4608 csum_flags |= BGE_TXBDFLAG_IP_FRAG_END;
4609 else if (m->m_flags & M_FRAG)
4610 csum_flags |= BGE_TXBDFLAG_IP_FRAG;
4611 }
4612
4613 if ((m->m_pkthdr.csum_flags & CSUM_TSO) == 0) {
4614 if (sc->bge_flags & BGE_FLAG_JUMBO_FRAME &&
4615 m->m_pkthdr.len > ETHER_MAX_LEN)
4616 csum_flags |= BGE_TXBDFLAG_JUMBO_FRAME;
4617 if (sc->bge_forced_collapse > 0 &&
4618 (sc->bge_flags & BGE_FLAG_PCIE) != 0 && m->m_next != NULL) {
4619 /*
4620 * Forcedly collapse mbuf chains to overcome hardware
4621 * limitation which only support a single outstanding
4622 * DMA read operation.
4623 */
4624 if (sc->bge_forced_collapse == 1)
4625 m = m_defrag(m, M_DONTWAIT);
4626 else
4627 m = m_collapse(m, M_DONTWAIT,
4628 sc->bge_forced_collapse);
4629 if (m == NULL)
4630 m = *m_head;
4631 *m_head = m;
4632 }
4633 }
4634
4635 map = sc->bge_cdata.bge_tx_dmamap[idx];
4636 error = bus_dmamap_load_mbuf_sg(sc->bge_cdata.bge_tx_mtag, map, m, segs,
4637 &nsegs, BUS_DMA_NOWAIT);
4638 if (error == EFBIG) {
4639 m = m_collapse(m, M_DONTWAIT, BGE_NSEG_NEW);
4640 if (m == NULL) {
4641 m_freem(*m_head);
4642 *m_head = NULL;
4643 return (ENOBUFS);
4644 }
4645 *m_head = m;
4646 error = bus_dmamap_load_mbuf_sg(sc->bge_cdata.bge_tx_mtag, map,
4647 m, segs, &nsegs, BUS_DMA_NOWAIT);
4648 if (error) {
4649 m_freem(m);
4650 *m_head = NULL;
4651 return (error);
4652 }
4653 } else if (error != 0)
4654 return (error);
4655
4656 /* Check if we have enough free send BDs. */
4657 if (sc->bge_txcnt + nsegs >= BGE_TX_RING_CNT) {
4658 bus_dmamap_unload(sc->bge_cdata.bge_tx_mtag, map);
4659 return (ENOBUFS);
4660 }
4661
4662 bus_dmamap_sync(sc->bge_cdata.bge_tx_mtag, map, BUS_DMASYNC_PREWRITE);
4663
4664 if (m->m_flags & M_VLANTAG) {
4665 csum_flags |= BGE_TXBDFLAG_VLAN_TAG;
4666 vlan_tag = m->m_pkthdr.ether_vtag;
4667 }
4668 for (i = 0; ; i++) {
4669 d = &sc->bge_ldata.bge_tx_ring[idx];
4670 d->bge_addr.bge_addr_lo = BGE_ADDR_LO(segs[i].ds_addr);
4671 d->bge_addr.bge_addr_hi = BGE_ADDR_HI(segs[i].ds_addr);
4672 d->bge_len = segs[i].ds_len;
4673 d->bge_flags = csum_flags;
4674 d->bge_vlan_tag = vlan_tag;
4675 d->bge_mss = mss;
4676 if (i == nsegs - 1)
4677 break;
4678 BGE_INC(idx, BGE_TX_RING_CNT);
4679 }
4680
4681 /* Mark the last segment as end of packet... */
4682 d->bge_flags |= BGE_TXBDFLAG_END;
4683
4684 /*
4685 * Insure that the map for this transmission
4686 * is placed at the array index of the last descriptor
4687 * in this chain.
4688 */
4689 sc->bge_cdata.bge_tx_dmamap[*txidx] = sc->bge_cdata.bge_tx_dmamap[idx];
4690 sc->bge_cdata.bge_tx_dmamap[idx] = map;
4691 sc->bge_cdata.bge_tx_chain[idx] = m;
4692 sc->bge_txcnt += nsegs;
4693
4694 BGE_INC(idx, BGE_TX_RING_CNT);
4695 *txidx = idx;
4696
4697 return (0);
4698}
4699
4700/*
4701 * Main transmit routine. To avoid having to do mbuf copies, we put pointers
4702 * to the mbuf data regions directly in the transmit descriptors.
4703 */
4704static void
4705bge_start_locked(struct ifnet *ifp)
4706{
4707 struct bge_softc *sc;
4708 struct mbuf *m_head;
4709 uint32_t prodidx;
4710 int count;
4711
4712 sc = ifp->if_softc;
4713 BGE_LOCK_ASSERT(sc);
4714
4715 if (!sc->bge_link ||
4716 (ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
4717 IFF_DRV_RUNNING)
4718 return;
4719
4720 prodidx = sc->bge_tx_prodidx;
4721
4722 for (count = 0; !IFQ_DRV_IS_EMPTY(&ifp->if_snd);) {
4723 if (sc->bge_txcnt > BGE_TX_RING_CNT - 16) {
4724 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
4725 break;
4726 }
4727 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
4728 if (m_head == NULL)
4729 break;
4730
4731 /*
4732 * XXX
4733 * The code inside the if() block is never reached since we
4734 * must mark CSUM_IP_FRAGS in our if_hwassist to start getting
4735 * requests to checksum TCP/UDP in a fragmented packet.
4736 *
4737 * XXX
4738 * safety overkill. If this is a fragmented packet chain
4739 * with delayed TCP/UDP checksums, then only encapsulate
4740 * it if we have enough descriptors to handle the entire
4741 * chain at once.
4742 * (paranoia -- may not actually be needed)
4743 */
4744 if (m_head->m_flags & M_FIRSTFRAG &&
4745 m_head->m_pkthdr.csum_flags & (CSUM_DELAY_DATA)) {
4746 if ((BGE_TX_RING_CNT - sc->bge_txcnt) <
4747 m_head->m_pkthdr.csum_data + 16) {
4748 IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
4749 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
4750 break;
4751 }
4752 }
4753
4754 /*
4755 * Pack the data into the transmit ring. If we
4756 * don't have room, set the OACTIVE flag and wait
4757 * for the NIC to drain the ring.
4758 */
4759 if (bge_encap(sc, &m_head, &prodidx)) {
4760 if (m_head == NULL)
4761 break;
4762 IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
4763 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
4764 break;
4765 }
4766 ++count;
4767
4768 /*
4769 * If there's a BPF listener, bounce a copy of this frame
4770 * to him.
4771 */
4772#ifdef ETHER_BPF_MTAP
4773 ETHER_BPF_MTAP(ifp, m_head);
4774#else
4775 BPF_MTAP(ifp, m_head);
4776#endif
4777 }
4778
4779 if (count > 0) {
4780 bus_dmamap_sync(sc->bge_cdata.bge_tx_ring_tag,
4781 sc->bge_cdata.bge_tx_ring_map, BUS_DMASYNC_PREWRITE);
4782 /* Transmit. */
4783 bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx);
4784 /* 5700 b2 errata */
4785 if (sc->bge_chiprev == BGE_CHIPREV_5700_BX)
4786 bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx);
4787
4788 sc->bge_tx_prodidx = prodidx;
4789
4790 /*
4791 * Set a timeout in case the chip goes out to lunch.
4792 */
4793 sc->bge_timer = 5;
4794 }
4795}
4796
4797/*
4798 * Main transmit routine. To avoid having to do mbuf copies, we put pointers
4799 * to the mbuf data regions directly in the transmit descriptors.
4800 */
4801static void
4802bge_start(struct ifnet *ifp)
4803{
4804 struct bge_softc *sc;
4805
4806 sc = ifp->if_softc;
4807 BGE_LOCK(sc);
4808 bge_start_locked(ifp);
4809 BGE_UNLOCK(sc);
4810}
4811
4812static void
4813bge_init_locked(struct bge_softc *sc)
4814{
4815 struct ifnet *ifp;
4816 uint16_t *m;
4817 uint32_t mode;
4818
4819 BGE_LOCK_ASSERT(sc);
4820
4821 ifp = sc->bge_ifp;
4822
4823 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
4824 return;
4825
4826 /* Cancel pending I/O and flush buffers. */
4827 bge_stop(sc);
4828
4829 bge_stop_fw(sc);
4830 bge_sig_pre_reset(sc, BGE_RESET_START);
4831 bge_reset(sc);
4832 bge_sig_legacy(sc, BGE_RESET_START);
4833 bge_sig_post_reset(sc, BGE_RESET_START);
4834
4835 bge_chipinit(sc);
4836
4837 /*
4838 * Init the various state machines, ring
4839 * control blocks and firmware.
4840 */
4841 if (bge_blockinit(sc)) {
4842 device_printf(sc->bge_dev, "initialization failure\n");
4843 return;
4844 }
4845
4846 ifp = sc->bge_ifp;
4847
4848 /* Specify MTU. */
4849 CSR_WRITE_4(sc, BGE_RX_MTU, ifp->if_mtu +
4850 ETHER_HDR_LEN + ETHER_CRC_LEN +
4851 (ifp->if_capenable & IFCAP_VLAN_MTU ? ETHER_VLAN_ENCAP_LEN : 0));
4852
4853 /* Load our MAC address. */
4854 m = (uint16_t *)IF_LLADDR(sc->bge_ifp);
4855 CSR_WRITE_4(sc, BGE_MAC_ADDR1_LO, htons(m[0]));
4856 CSR_WRITE_4(sc, BGE_MAC_ADDR1_HI, (htons(m[1]) << 16) | htons(m[2]));
4857
4858 /* Program promiscuous mode. */
4859 bge_setpromisc(sc);
4860
4861 /* Program multicast filter. */
4862 bge_setmulti(sc);
4863
4864 /* Program VLAN tag stripping. */
4865 bge_setvlan(sc);
4866
4867 /* Override UDP checksum offloading. */
4868 if (sc->bge_forced_udpcsum == 0)
4869 sc->bge_csum_features &= ~CSUM_UDP;
4870 else
4871 sc->bge_csum_features |= CSUM_UDP;
4872 if (ifp->if_capabilities & IFCAP_TXCSUM &&
4873 ifp->if_capenable & IFCAP_TXCSUM) {
4874 ifp->if_hwassist &= ~(BGE_CSUM_FEATURES | CSUM_UDP);
4875 ifp->if_hwassist |= sc->bge_csum_features;
4876 }
4877
4878 /* Init RX ring. */
4879 if (bge_init_rx_ring_std(sc) != 0) {
4880 device_printf(sc->bge_dev, "no memory for std Rx buffers.\n");
4881 bge_stop(sc);
4882 return;
4883 }
4884
4885 /*
4886 * Workaround for a bug in 5705 ASIC rev A0. Poll the NIC's
4887 * memory to insure that the chip has in fact read the first
4888 * entry of the ring.
4889 */
4890 if (sc->bge_chipid == BGE_CHIPID_BCM5705_A0) {
4891 uint32_t v, i;
4892 for (i = 0; i < 10; i++) {
4893 DELAY(20);
4894 v = bge_readmem_ind(sc, BGE_STD_RX_RINGS + 8);
4895 if (v == (MCLBYTES - ETHER_ALIGN))
4896 break;
4897 }
4898 if (i == 10)
4899 device_printf (sc->bge_dev,
4900 "5705 A0 chip failed to load RX ring\n");
4901 }
4902
4903 /* Init jumbo RX ring. */
4904 if (BGE_IS_JUMBO_CAPABLE(sc) &&
4905 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN + ETHER_VLAN_ENCAP_LEN >
4906 (MCLBYTES - ETHER_ALIGN)) {
4907 if (bge_init_rx_ring_jumbo(sc) != 0) {
4908 device_printf(sc->bge_dev,
4909 "no memory for jumbo Rx buffers.\n");
4910 bge_stop(sc);
4911 return;
4912 }
4913 }
4914
4915 /* Init our RX return ring index. */
4916 sc->bge_rx_saved_considx = 0;
4917
4918 /* Init our RX/TX stat counters. */
4919 sc->bge_rx_discards = sc->bge_tx_discards = sc->bge_tx_collisions = 0;
4920
4921 /* Init TX ring. */
4922 bge_init_tx_ring(sc);
4923
4924 /* Enable TX MAC state machine lockup fix. */
4925 mode = CSR_READ_4(sc, BGE_TX_MODE);
4926 if (BGE_IS_5755_PLUS(sc) || sc->bge_asicrev == BGE_ASICREV_BCM5906)
4927 mode |= BGE_TXMODE_MBUF_LOCKUP_FIX;
4928 if (sc->bge_asicrev == BGE_ASICREV_BCM5720) {
4929 mode &= ~(BGE_TXMODE_JMB_FRM_LEN | BGE_TXMODE_CNT_DN_MODE);
4930 mode |= CSR_READ_4(sc, BGE_TX_MODE) &
4931 (BGE_TXMODE_JMB_FRM_LEN | BGE_TXMODE_CNT_DN_MODE);
4932 }
4933 /* Turn on transmitter. */
4934 CSR_WRITE_4(sc, BGE_TX_MODE, mode | BGE_TXMODE_ENABLE);
4935
4936 /* Turn on receiver. */
4937 BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE);
4938
4939 /*
4940 * Set the number of good frames to receive after RX MBUF
4941 * Low Watermark has been reached. After the RX MAC receives
4942 * this number of frames, it will drop subsequent incoming
4943 * frames until the MBUF High Watermark is reached.
4944 */
4945 if (sc->bge_asicrev == BGE_ASICREV_BCM57765)
4946 CSR_WRITE_4(sc, BGE_MAX_RX_FRAME_LOWAT, 1);
4947 else
4948 CSR_WRITE_4(sc, BGE_MAX_RX_FRAME_LOWAT, 2);
4949
4950 /* Clear MAC statistics. */
4951 if (BGE_IS_5705_PLUS(sc))
4952 bge_stats_clear_regs(sc);
4953
4954 /* Tell firmware we're alive. */
4955 BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
4956
4957#ifdef DEVICE_POLLING
4958 /* Disable interrupts if we are polling. */
4959 if (ifp->if_capenable & IFCAP_POLLING) {
4960 BGE_SETBIT(sc, BGE_PCI_MISC_CTL,
4961 BGE_PCIMISCCTL_MASK_PCI_INTR);
4962 bge_writembx(sc, BGE_MBX_IRQ0_LO, 1);
4963 } else
4964#endif
4965
4966 /* Enable host interrupts. */
4967 {
4968 BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_CLEAR_INTA);
4969 BGE_CLRBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR);
4970 bge_writembx(sc, BGE_MBX_IRQ0_LO, 0);
4971 }
4972
4973 bge_ifmedia_upd_locked(ifp);
4974
4975 ifp->if_drv_flags |= IFF_DRV_RUNNING;
4976 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
4977
4978 callout_reset(&sc->bge_stat_ch, hz, bge_tick, sc);
4979}
4980
4981static void
4982bge_init(void *xsc)
4983{
4984 struct bge_softc *sc = xsc;
4985
4986 BGE_LOCK(sc);
4987 bge_init_locked(sc);
4988 BGE_UNLOCK(sc);
4989}
4990
4991/*
4992 * Set media options.
4993 */
4994static int
4995bge_ifmedia_upd(struct ifnet *ifp)
4996{
4997 struct bge_softc *sc = ifp->if_softc;
4998 int res;
4999
5000 BGE_LOCK(sc);
5001 res = bge_ifmedia_upd_locked(ifp);
5002 BGE_UNLOCK(sc);
5003
5004 return (res);
5005}
5006
5007static int
5008bge_ifmedia_upd_locked(struct ifnet *ifp)
5009{
5010 struct bge_softc *sc = ifp->if_softc;
5011 struct mii_data *mii;
5012 struct mii_softc *miisc;
5013 struct ifmedia *ifm;
5014
5015 BGE_LOCK_ASSERT(sc);
5016
5017 ifm = &sc->bge_ifmedia;
5018
5019 /* If this is a 1000baseX NIC, enable the TBI port. */
5020 if (sc->bge_flags & BGE_FLAG_TBI) {
5021 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
5022 return (EINVAL);
5023 switch(IFM_SUBTYPE(ifm->ifm_media)) {
5024 case IFM_AUTO:
5025 /*
5026 * The BCM5704 ASIC appears to have a special
5027 * mechanism for programming the autoneg
5028 * advertisement registers in TBI mode.
5029 */
5030 if (sc->bge_asicrev == BGE_ASICREV_BCM5704) {
5031 uint32_t sgdig;
5032 sgdig = CSR_READ_4(sc, BGE_SGDIG_STS);
5033 if (sgdig & BGE_SGDIGSTS_DONE) {
5034 CSR_WRITE_4(sc, BGE_TX_TBI_AUTONEG, 0);
5035 sgdig = CSR_READ_4(sc, BGE_SGDIG_CFG);
5036 sgdig |= BGE_SGDIGCFG_AUTO |
5037 BGE_SGDIGCFG_PAUSE_CAP |
5038 BGE_SGDIGCFG_ASYM_PAUSE;
5039 CSR_WRITE_4(sc, BGE_SGDIG_CFG,
5040 sgdig | BGE_SGDIGCFG_SEND);
5041 DELAY(5);
5042 CSR_WRITE_4(sc, BGE_SGDIG_CFG, sgdig);
5043 }
5044 }
5045 break;
5046 case IFM_1000_SX:
5047 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) {
5048 BGE_CLRBIT(sc, BGE_MAC_MODE,
5049 BGE_MACMODE_HALF_DUPLEX);
5050 } else {
5051 BGE_SETBIT(sc, BGE_MAC_MODE,
5052 BGE_MACMODE_HALF_DUPLEX);
5053 }
5054 break;
5055 default:
5056 return (EINVAL);
5057 }
5058 return (0);
5059 }
5060
5061 sc->bge_link_evt++;
5062 mii = device_get_softc(sc->bge_miibus);
5063 LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
5064 PHY_RESET(miisc);
5065 mii_mediachg(mii);
5066
5067 /*
5068 * Force an interrupt so that we will call bge_link_upd
5069 * if needed and clear any pending link state attention.
5070 * Without this we are not getting any further interrupts
5071 * for link state changes and thus will not UP the link and
5072 * not be able to send in bge_start_locked. The only
5073 * way to get things working was to receive a packet and
5074 * get an RX intr.
5075 * bge_tick should help for fiber cards and we might not
5076 * need to do this here if BGE_FLAG_TBI is set but as
5077 * we poll for fiber anyway it should not harm.
5078 */
5079 if (sc->bge_asicrev == BGE_ASICREV_BCM5700 ||
5080 sc->bge_flags & BGE_FLAG_5788)
5081 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_SET);
5082 else
5083 BGE_SETBIT(sc, BGE_HCC_MODE, BGE_HCCMODE_COAL_NOW);
5084
5085 return (0);
5086}
5087
5088/*
5089 * Report current media status.
5090 */
5091static void
5092bge_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
5093{
5094 struct bge_softc *sc = ifp->if_softc;
5095 struct mii_data *mii;
5096
5097 BGE_LOCK(sc);
5098
5099 if (sc->bge_flags & BGE_FLAG_TBI) {
5100 ifmr->ifm_status = IFM_AVALID;
5101 ifmr->ifm_active = IFM_ETHER;
5102 if (CSR_READ_4(sc, BGE_MAC_STS) &
5103 BGE_MACSTAT_TBI_PCS_SYNCHED)
5104 ifmr->ifm_status |= IFM_ACTIVE;
5105 else {
5106 ifmr->ifm_active |= IFM_NONE;
5107 BGE_UNLOCK(sc);
5108 return;
5109 }
5110 ifmr->ifm_active |= IFM_1000_SX;
5111 if (CSR_READ_4(sc, BGE_MAC_MODE) & BGE_MACMODE_HALF_DUPLEX)
5112 ifmr->ifm_active |= IFM_HDX;
5113 else
5114 ifmr->ifm_active |= IFM_FDX;
5115 BGE_UNLOCK(sc);
5116 return;
5117 }
5118
5119 mii = device_get_softc(sc->bge_miibus);
5120 mii_pollstat(mii);
5121 ifmr->ifm_active = mii->mii_media_active;
5122 ifmr->ifm_status = mii->mii_media_status;
5123
5124 BGE_UNLOCK(sc);
5125}
5126
5127static int
5128bge_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
5129{
5130 struct bge_softc *sc = ifp->if_softc;
5131 struct ifreq *ifr = (struct ifreq *) data;
5132 struct mii_data *mii;
5133 int flags, mask, error = 0;
5134
5135 switch (command) {
5136 case SIOCSIFMTU:
5137 if (BGE_IS_JUMBO_CAPABLE(sc) ||
5138 (sc->bge_flags & BGE_FLAG_JUMBO_STD)) {
5139 if (ifr->ifr_mtu < ETHERMIN ||
5140 ifr->ifr_mtu > BGE_JUMBO_MTU) {
5141 error = EINVAL;
5142 break;
5143 }
5144 } else if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > ETHERMTU) {
5145 error = EINVAL;
5146 break;
5147 }
5148 BGE_LOCK(sc);
5149 if (ifp->if_mtu != ifr->ifr_mtu) {
5150 ifp->if_mtu = ifr->ifr_mtu;
5151 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
5152 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
5153 bge_init_locked(sc);
5154 }
5155 }
5156 BGE_UNLOCK(sc);
5157 break;
5158 case SIOCSIFFLAGS:
5159 BGE_LOCK(sc);
5160 if (ifp->if_flags & IFF_UP) {
5161 /*
5162 * If only the state of the PROMISC flag changed,
5163 * then just use the 'set promisc mode' command
5164 * instead of reinitializing the entire NIC. Doing
5165 * a full re-init means reloading the firmware and
5166 * waiting for it to start up, which may take a
5167 * second or two. Similarly for ALLMULTI.
5168 */
5169 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
5170 flags = ifp->if_flags ^ sc->bge_if_flags;
5171 if (flags & IFF_PROMISC)
5172 bge_setpromisc(sc);
5173 if (flags & IFF_ALLMULTI)
5174 bge_setmulti(sc);
5175 } else
5176 bge_init_locked(sc);
5177 } else {
5178 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
5179 bge_stop(sc);
5180 }
5181 }
5182 sc->bge_if_flags = ifp->if_flags;
5183 BGE_UNLOCK(sc);
5184 error = 0;
5185 break;
5186 case SIOCADDMULTI:
5187 case SIOCDELMULTI:
5188 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
5189 BGE_LOCK(sc);
5190 bge_setmulti(sc);
5191 BGE_UNLOCK(sc);
5192 error = 0;
5193 }
5194 break;
5195 case SIOCSIFMEDIA:
5196 case SIOCGIFMEDIA:
5197 if (sc->bge_flags & BGE_FLAG_TBI) {
5198 error = ifmedia_ioctl(ifp, ifr,
5199 &sc->bge_ifmedia, command);
5200 } else {
5201 mii = device_get_softc(sc->bge_miibus);
5202 error = ifmedia_ioctl(ifp, ifr,
5203 &mii->mii_media, command);
5204 }
5205 break;
5206 case SIOCSIFCAP:
5207 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
5208#ifdef DEVICE_POLLING
5209 if (mask & IFCAP_POLLING) {
5210 if (ifr->ifr_reqcap & IFCAP_POLLING) {
5211 error = ether_poll_register(bge_poll, ifp);
5212 if (error)
5213 return (error);
5214 BGE_LOCK(sc);
5215 BGE_SETBIT(sc, BGE_PCI_MISC_CTL,
5216 BGE_PCIMISCCTL_MASK_PCI_INTR);
5217 bge_writembx(sc, BGE_MBX_IRQ0_LO, 1);
5218 ifp->if_capenable |= IFCAP_POLLING;
5219 BGE_UNLOCK(sc);
5220 } else {
5221 error = ether_poll_deregister(ifp);
5222 /* Enable interrupt even in error case */
5223 BGE_LOCK(sc);
5224 BGE_CLRBIT(sc, BGE_PCI_MISC_CTL,
5225 BGE_PCIMISCCTL_MASK_PCI_INTR);
5226 bge_writembx(sc, BGE_MBX_IRQ0_LO, 0);
5227 ifp->if_capenable &= ~IFCAP_POLLING;
5228 BGE_UNLOCK(sc);
5229 }
5230 }
5231#endif
5232 if ((mask & IFCAP_TXCSUM) != 0 &&
5233 (ifp->if_capabilities & IFCAP_TXCSUM) != 0) {
5234 ifp->if_capenable ^= IFCAP_TXCSUM;
5235 if ((ifp->if_capenable & IFCAP_TXCSUM) != 0)
5236 ifp->if_hwassist |= sc->bge_csum_features;
5237 else
5238 ifp->if_hwassist &= ~sc->bge_csum_features;
5239 }
5240
5241 if ((mask & IFCAP_RXCSUM) != 0 &&
5242 (ifp->if_capabilities & IFCAP_RXCSUM) != 0)
5243 ifp->if_capenable ^= IFCAP_RXCSUM;
5244
5245 if ((mask & IFCAP_TSO4) != 0 &&
5246 (ifp->if_capabilities & IFCAP_TSO4) != 0) {
5247 ifp->if_capenable ^= IFCAP_TSO4;
5248 if ((ifp->if_capenable & IFCAP_TSO4) != 0)
5249 ifp->if_hwassist |= CSUM_TSO;
5250 else
5251 ifp->if_hwassist &= ~CSUM_TSO;
5252 }
5253
5254 if (mask & IFCAP_VLAN_MTU) {
5255 ifp->if_capenable ^= IFCAP_VLAN_MTU;
5256 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
5257 bge_init(sc);
5258 }
5259
5260 if ((mask & IFCAP_VLAN_HWTSO) != 0 &&
5261 (ifp->if_capabilities & IFCAP_VLAN_HWTSO) != 0)
5262 ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
5263 if ((mask & IFCAP_VLAN_HWTAGGING) != 0 &&
5264 (ifp->if_capabilities & IFCAP_VLAN_HWTAGGING) != 0) {
5265 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
5266 if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) == 0)
5267 ifp->if_capenable &= ~IFCAP_VLAN_HWTSO;
5268 BGE_LOCK(sc);
5269 bge_setvlan(sc);
5270 BGE_UNLOCK(sc);
5271 }
5272#ifdef VLAN_CAPABILITIES
5273 VLAN_CAPABILITIES(ifp);
5274#endif
5275 break;
5276 default:
5277 error = ether_ioctl(ifp, command, data);
5278 break;
5279 }
5280
5281 return (error);
5282}
5283
5284static void
5285bge_watchdog(struct bge_softc *sc)
5286{
5287 struct ifnet *ifp;
5288
5289 BGE_LOCK_ASSERT(sc);
5290
5291 if (sc->bge_timer == 0 || --sc->bge_timer)
5292 return;
5293
5294 ifp = sc->bge_ifp;
5295
5296 if_printf(ifp, "watchdog timeout -- resetting\n");
5297
5298 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
5299 bge_init_locked(sc);
5300
5301 ifp->if_oerrors++;
5302}
5303
5304static void
5305bge_stop_block(struct bge_softc *sc, bus_size_t reg, uint32_t bit)
5306{
5307 int i;
5308
5309 BGE_CLRBIT(sc, reg, bit);
5310
5311 for (i = 0; i < BGE_TIMEOUT; i++) {
5312 if ((CSR_READ_4(sc, reg) & bit) == 0)
5313 return;
5314 DELAY(100);
5315 }
5316}
5317
5318/*
5319 * Stop the adapter and free any mbufs allocated to the
5320 * RX and TX lists.
5321 */
5322static void
5323bge_stop(struct bge_softc *sc)
5324{
5325 struct ifnet *ifp;
5326
5327 BGE_LOCK_ASSERT(sc);
5328
5329 ifp = sc->bge_ifp;
5330
5331 callout_stop(&sc->bge_stat_ch);
5332
5333 /* Disable host interrupts. */
5334 BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR);
5335 bge_writembx(sc, BGE_MBX_IRQ0_LO, 1);
5336
5337 /*
5338 * Tell firmware we're shutting down.
5339 */
5340 bge_stop_fw(sc);
5341 bge_sig_pre_reset(sc, BGE_RESET_STOP);
5342
5343 /*
5344 * Disable all of the receiver blocks.
5345 */
5346 bge_stop_block(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE);
5347 bge_stop_block(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE);
5348 bge_stop_block(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE);
5349 if (BGE_IS_5700_FAMILY(sc))
5350 bge_stop_block(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE);
5351 bge_stop_block(sc, BGE_RDBDI_MODE, BGE_RBDIMODE_ENABLE);
5352 bge_stop_block(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE);
5353 bge_stop_block(sc, BGE_RBDC_MODE, BGE_RBDCMODE_ENABLE);
5354
5355 /*
5356 * Disable all of the transmit blocks.
5357 */
5358 bge_stop_block(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE);
5359 bge_stop_block(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE);
5360 bge_stop_block(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE);
5361 bge_stop_block(sc, BGE_RDMA_MODE, BGE_RDMAMODE_ENABLE);
5362 bge_stop_block(sc, BGE_SDC_MODE, BGE_SDCMODE_ENABLE);
5363 if (BGE_IS_5700_FAMILY(sc))
5364 bge_stop_block(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE);
5365 bge_stop_block(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE);
5366
5367 /*
5368 * Shut down all of the memory managers and related
5369 * state machines.
5370 */
5371 bge_stop_block(sc, BGE_HCC_MODE, BGE_HCCMODE_ENABLE);
5372 bge_stop_block(sc, BGE_WDMA_MODE, BGE_WDMAMODE_ENABLE);
5373 if (BGE_IS_5700_FAMILY(sc))
5374 bge_stop_block(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE);
5375
5376 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF);
5377 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0);
5378 if (!(BGE_IS_5705_PLUS(sc))) {
5379 BGE_CLRBIT(sc, BGE_BMAN_MODE, BGE_BMANMODE_ENABLE);
5380 BGE_CLRBIT(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE);
5381 }
5382 /* Update MAC statistics. */
5383 if (BGE_IS_5705_PLUS(sc))
5384 bge_stats_update_regs(sc);
5385
5386 bge_reset(sc);
5387 bge_sig_legacy(sc, BGE_RESET_STOP);
5388 bge_sig_post_reset(sc, BGE_RESET_STOP);
5389
5390 /*
5391 * Keep the ASF firmware running if up.
5392 */
5393 if (sc->bge_asf_mode & ASF_STACKUP)
5394 BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
5395 else
5396 BGE_CLRBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
5397
5398 /* Free the RX lists. */
5399 bge_free_rx_ring_std(sc);
5400
5401 /* Free jumbo RX list. */
5402 if (BGE_IS_JUMBO_CAPABLE(sc))
5403 bge_free_rx_ring_jumbo(sc);
5404
5405 /* Free TX buffers. */
5406 bge_free_tx_ring(sc);
5407
5408 sc->bge_tx_saved_considx = BGE_TXCONS_UNSET;
5409
5410 /* Clear MAC's link state (PHY may still have link UP). */
5411 if (bootverbose && sc->bge_link)
5412 if_printf(sc->bge_ifp, "link DOWN\n");
5413 sc->bge_link = 0;
5414
5415 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
5416}
5417
5418/*
5419 * Stop all chip I/O so that the kernel's probe routines don't
5420 * get confused by errant DMAs when rebooting.
5421 */
5422static int
5423bge_shutdown(device_t dev)
5424{
5425 struct bge_softc *sc;
5426
5427 sc = device_get_softc(dev);
5428 BGE_LOCK(sc);
5429 bge_stop(sc);
5430 bge_reset(sc);
5431 BGE_UNLOCK(sc);
5432
5433 return (0);
5434}
5435
5436static int
5437bge_suspend(device_t dev)
5438{
5439 struct bge_softc *sc;
5440
5441 sc = device_get_softc(dev);
5442 BGE_LOCK(sc);
5443 bge_stop(sc);
5444 BGE_UNLOCK(sc);
5445
5446 return (0);
5447}
5448
5449static int
5450bge_resume(device_t dev)
5451{
5452 struct bge_softc *sc;
5453 struct ifnet *ifp;
5454
5455 sc = device_get_softc(dev);
5456 BGE_LOCK(sc);
5457 ifp = sc->bge_ifp;
5458 if (ifp->if_flags & IFF_UP) {
5459 bge_init_locked(sc);
5460 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
5461 bge_start_locked(ifp);
5462 }
5463 BGE_UNLOCK(sc);
5464
5465 return (0);
5466}
5467
5468static void
5469bge_link_upd(struct bge_softc *sc)
5470{
5471 struct mii_data *mii;
5472 uint32_t link, status;
5473
5474 BGE_LOCK_ASSERT(sc);
5475
5476 /* Clear 'pending link event' flag. */
5477 sc->bge_link_evt = 0;
5478
5479 /*
5480 * Process link state changes.
5481 * Grrr. The link status word in the status block does
5482 * not work correctly on the BCM5700 rev AX and BX chips,
5483 * according to all available information. Hence, we have
5484 * to enable MII interrupts in order to properly obtain
5485 * async link changes. Unfortunately, this also means that
5486 * we have to read the MAC status register to detect link
5487 * changes, thereby adding an additional register access to
5488 * the interrupt handler.
5489 *
5490 * XXX: perhaps link state detection procedure used for
5491 * BGE_CHIPID_BCM5700_B2 can be used for others BCM5700 revisions.
5492 */
5493
5494 if (sc->bge_asicrev == BGE_ASICREV_BCM5700 &&
5495 sc->bge_chipid != BGE_CHIPID_BCM5700_B2) {
5496 status = CSR_READ_4(sc, BGE_MAC_STS);
5497 if (status & BGE_MACSTAT_MI_INTERRUPT) {
5498 mii = device_get_softc(sc->bge_miibus);
5499 mii_pollstat(mii);
5500 if (!sc->bge_link &&
5501 mii->mii_media_status & IFM_ACTIVE &&
5502 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) {
5503 sc->bge_link++;
5504 if (bootverbose)
5505 if_printf(sc->bge_ifp, "link UP\n");
5506 } else if (sc->bge_link &&
5507 (!(mii->mii_media_status & IFM_ACTIVE) ||
5508 IFM_SUBTYPE(mii->mii_media_active) == IFM_NONE)) {
5509 sc->bge_link = 0;
5510 if (bootverbose)
5511 if_printf(sc->bge_ifp, "link DOWN\n");
5512 }
5513
5514 /* Clear the interrupt. */
5515 CSR_WRITE_4(sc, BGE_MAC_EVT_ENB,
5516 BGE_EVTENB_MI_INTERRUPT);
5517 bge_miibus_readreg(sc->bge_dev, 1, BRGPHY_MII_ISR);
5518 bge_miibus_writereg(sc->bge_dev, 1, BRGPHY_MII_IMR,
5519 BRGPHY_INTRS);
5520 }
5521 return;
5522 }
5523
5524 if (sc->bge_flags & BGE_FLAG_TBI) {
5525 status = CSR_READ_4(sc, BGE_MAC_STS);
5526 if (status & BGE_MACSTAT_TBI_PCS_SYNCHED) {
5527 if (!sc->bge_link) {
5528 sc->bge_link++;
5529 if (sc->bge_asicrev == BGE_ASICREV_BCM5704)
5530 BGE_CLRBIT(sc, BGE_MAC_MODE,
5531 BGE_MACMODE_TBI_SEND_CFGS);
5532 CSR_WRITE_4(sc, BGE_MAC_STS, 0xFFFFFFFF);
5533 if (bootverbose)
5534 if_printf(sc->bge_ifp, "link UP\n");
5535 if_link_state_change(sc->bge_ifp,
5536 LINK_STATE_UP);
5537 }
5538 } else if (sc->bge_link) {
5539 sc->bge_link = 0;
5540 if (bootverbose)
5541 if_printf(sc->bge_ifp, "link DOWN\n");
5542 if_link_state_change(sc->bge_ifp, LINK_STATE_DOWN);
5543 }
5544 } else if ((sc->bge_mi_mode & BGE_MIMODE_AUTOPOLL) != 0) {
5545 /*
5546 * Some broken BCM chips have BGE_STATFLAG_LINKSTATE_CHANGED bit
5547 * in status word always set. Workaround this bug by reading
5548 * PHY link status directly.
5549 */
5550 link = (CSR_READ_4(sc, BGE_MI_STS) & BGE_MISTS_LINK) ? 1 : 0;
5551
5552 if (link != sc->bge_link ||
5553 sc->bge_asicrev == BGE_ASICREV_BCM5700) {
5554 mii = device_get_softc(sc->bge_miibus);
5555 mii_pollstat(mii);
5556 if (!sc->bge_link &&
5557 mii->mii_media_status & IFM_ACTIVE &&
5558 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) {
5559 sc->bge_link++;
5560 if (bootverbose)
5561 if_printf(sc->bge_ifp, "link UP\n");
5562 } else if (sc->bge_link &&
5563 (!(mii->mii_media_status & IFM_ACTIVE) ||
5564 IFM_SUBTYPE(mii->mii_media_active) == IFM_NONE)) {
5565 sc->bge_link = 0;
5566 if (bootverbose)
5567 if_printf(sc->bge_ifp, "link DOWN\n");
5568 }
5569 }
5570 } else {
5571 /*
5572 * For controllers that call mii_tick, we have to poll
5573 * link status.
5574 */
5575 mii = device_get_softc(sc->bge_miibus);
5576 mii_pollstat(mii);
5577 bge_miibus_statchg(sc->bge_dev);
5578 }
5579
5580 /* Clear the attention. */
5581 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED |
5582 BGE_MACSTAT_CFG_CHANGED | BGE_MACSTAT_MI_COMPLETE |
5583 BGE_MACSTAT_LINK_CHANGED);
5584}
5585
5586static void
5587bge_add_sysctls(struct bge_softc *sc)
5588{
5589 struct sysctl_ctx_list *ctx;
5590 struct sysctl_oid_list *children;
5591 char tn[32];
5592 int unit;
5593
5594 ctx = device_get_sysctl_ctx(sc->bge_dev);
5595 children = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->bge_dev));
5596
5597#ifdef BGE_REGISTER_DEBUG
5598 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "debug_info",
5599 CTLTYPE_INT | CTLFLAG_RW, sc, 0, bge_sysctl_debug_info, "I",
5600 "Debug Information");
5601
5602 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "reg_read",
5603 CTLTYPE_INT | CTLFLAG_RW, sc, 0, bge_sysctl_reg_read, "I",
5604 "Register Read");
5605
5606 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "mem_read",
5607 CTLTYPE_INT | CTLFLAG_RW, sc, 0, bge_sysctl_mem_read, "I",
5608 "Memory Read");
5609
5610#endif
5611
5612 unit = device_get_unit(sc->bge_dev);
5613 /*
5614 * A common design characteristic for many Broadcom client controllers
5615 * is that they only support a single outstanding DMA read operation
5616 * on the PCIe bus. This means that it will take twice as long to fetch
5617 * a TX frame that is split into header and payload buffers as it does
5618 * to fetch a single, contiguous TX frame (2 reads vs. 1 read). For
5619 * these controllers, coalescing buffers to reduce the number of memory
5620 * reads is effective way to get maximum performance(about 940Mbps).
5621 * Without collapsing TX buffers the maximum TCP bulk transfer
5622 * performance is about 850Mbps. However forcing coalescing mbufs
5623 * consumes a lot of CPU cycles, so leave it off by default.
5624 */
5625 sc->bge_forced_collapse = 0;
5626 snprintf(tn, sizeof(tn), "dev.bge.%d.forced_collapse", unit);
5627 TUNABLE_INT_FETCH(tn, &sc->bge_forced_collapse);
5628 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "forced_collapse",
5629 CTLFLAG_RW, &sc->bge_forced_collapse, 0,
5630 "Number of fragmented TX buffers of a frame allowed before "
5631 "forced collapsing");
5632
2749 return (0);
2750
2751 /* Disable MSI for polling(4). */
2752#ifdef DEVICE_POLLING
2753 return (0);
2754#endif
2755 switch (sc->bge_asicrev) {
2756 case BGE_ASICREV_BCM5714_A0:
2757 case BGE_ASICREV_BCM5714:
2758 /*
2759 * Apparently, MSI doesn't work when these chips are
2760 * configured in single-port mode.
2761 */
2762 if (bge_has_multiple_ports(sc))
2763 can_use_msi = 1;
2764 break;
2765 case BGE_ASICREV_BCM5750:
2766 if (sc->bge_chiprev != BGE_CHIPREV_5750_AX &&
2767 sc->bge_chiprev != BGE_CHIPREV_5750_BX)
2768 can_use_msi = 1;
2769 break;
2770 default:
2771 if (BGE_IS_575X_PLUS(sc))
2772 can_use_msi = 1;
2773 }
2774 return (can_use_msi);
2775}
2776
2777static int
2778bge_attach(device_t dev)
2779{
2780 struct ifnet *ifp;
2781 struct bge_softc *sc;
2782 uint32_t hwcfg = 0, misccfg;
2783 u_char eaddr[ETHER_ADDR_LEN];
2784 int capmask, error, f, msicount, phy_addr, reg, rid, trys;
2785
2786 sc = device_get_softc(dev);
2787 sc->bge_dev = dev;
2788
2789 TASK_INIT(&sc->bge_intr_task, 0, bge_intr_task, sc);
2790
2791 /*
2792 * Map control/status registers.
2793 */
2794 pci_enable_busmaster(dev);
2795
2796 rid = PCIR_BAR(0);
2797 sc->bge_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
2798 RF_ACTIVE);
2799
2800 if (sc->bge_res == NULL) {
2801 device_printf (sc->bge_dev, "couldn't map memory\n");
2802 error = ENXIO;
2803 goto fail;
2804 }
2805
2806 /* Save various chip information. */
2807 sc->bge_chipid =
2808 pci_read_config(dev, BGE_PCI_MISC_CTL, 4) >>
2809 BGE_PCIMISCCTL_ASICREV_SHIFT;
2810 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_USE_PRODID_REG) {
2811 /*
2812 * Find the ASCI revision. Different chips use different
2813 * registers.
2814 */
2815 switch (pci_get_device(dev)) {
2816 case BCOM_DEVICEID_BCM5717:
2817 case BCOM_DEVICEID_BCM5718:
2818 case BCOM_DEVICEID_BCM5719:
2819 case BCOM_DEVICEID_BCM5720:
2820 sc->bge_chipid = pci_read_config(dev,
2821 BGE_PCI_GEN2_PRODID_ASICREV, 4);
2822 break;
2823 case BCOM_DEVICEID_BCM57761:
2824 case BCOM_DEVICEID_BCM57765:
2825 case BCOM_DEVICEID_BCM57781:
2826 case BCOM_DEVICEID_BCM57785:
2827 case BCOM_DEVICEID_BCM57791:
2828 case BCOM_DEVICEID_BCM57795:
2829 sc->bge_chipid = pci_read_config(dev,
2830 BGE_PCI_GEN15_PRODID_ASICREV, 4);
2831 break;
2832 default:
2833 sc->bge_chipid = pci_read_config(dev,
2834 BGE_PCI_PRODID_ASICREV, 4);
2835 }
2836 }
2837 sc->bge_asicrev = BGE_ASICREV(sc->bge_chipid);
2838 sc->bge_chiprev = BGE_CHIPREV(sc->bge_chipid);
2839
2840 /* Set default PHY address. */
2841 phy_addr = 1;
2842 /*
2843 * PHY address mapping for various devices.
2844 *
2845 * | F0 Cu | F0 Sr | F1 Cu | F1 Sr |
2846 * ---------+-------+-------+-------+-------+
2847 * BCM57XX | 1 | X | X | X |
2848 * BCM5704 | 1 | X | 1 | X |
2849 * BCM5717 | 1 | 8 | 2 | 9 |
2850 * BCM5719 | 1 | 8 | 2 | 9 |
2851 * BCM5720 | 1 | 8 | 2 | 9 |
2852 *
2853 * Other addresses may respond but they are not
2854 * IEEE compliant PHYs and should be ignored.
2855 */
2856 if (sc->bge_asicrev == BGE_ASICREV_BCM5717 ||
2857 sc->bge_asicrev == BGE_ASICREV_BCM5719 ||
2858 sc->bge_asicrev == BGE_ASICREV_BCM5720) {
2859 f = pci_get_function(dev);
2860 if (sc->bge_chipid == BGE_CHIPID_BCM5717_A0) {
2861 if (CSR_READ_4(sc, BGE_SGDIG_STS) &
2862 BGE_SGDIGSTS_IS_SERDES)
2863 phy_addr = f + 8;
2864 else
2865 phy_addr = f + 1;
2866 } else {
2867 if (CSR_READ_4(sc, BGE_CPMU_PHY_STRAP) &
2868 BGE_CPMU_PHY_STRAP_IS_SERDES)
2869 phy_addr = f + 8;
2870 else
2871 phy_addr = f + 1;
2872 }
2873 }
2874
2875 /*
2876 * Don't enable Ethernet@WireSpeed for the 5700, 5906, or the
2877 * 5705 A0 and A1 chips.
2878 */
2879 if (sc->bge_asicrev == BGE_ASICREV_BCM5700 ||
2880 (sc->bge_asicrev == BGE_ASICREV_BCM5705 &&
2881 (sc->bge_chipid != BGE_CHIPID_BCM5705_A0 &&
2882 sc->bge_chipid != BGE_CHIPID_BCM5705_A1)) ||
2883 sc->bge_asicrev == BGE_ASICREV_BCM5906)
2884 sc->bge_phy_flags |= BGE_PHY_NO_WIRESPEED;
2885
2886 if (bge_has_eaddr(sc))
2887 sc->bge_flags |= BGE_FLAG_EADDR;
2888
2889 /* Save chipset family. */
2890 switch (sc->bge_asicrev) {
2891 case BGE_ASICREV_BCM5717:
2892 case BGE_ASICREV_BCM5719:
2893 case BGE_ASICREV_BCM5720:
2894 case BGE_ASICREV_BCM57765:
2895 sc->bge_flags |= BGE_FLAG_5717_PLUS | BGE_FLAG_5755_PLUS |
2896 BGE_FLAG_575X_PLUS | BGE_FLAG_5705_PLUS | BGE_FLAG_JUMBO |
2897 BGE_FLAG_JUMBO_FRAME;
2898 if (sc->bge_asicrev == BGE_ASICREV_BCM5719 &&
2899 sc->bge_chipid == BGE_CHIPID_BCM5719_A0) {
2900 /* Jumbo frame on BCM5719 A0 does not work. */
2901 sc->bge_flags &= ~BGE_FLAG_JUMBO;
2902 }
2903 break;
2904 case BGE_ASICREV_BCM5755:
2905 case BGE_ASICREV_BCM5761:
2906 case BGE_ASICREV_BCM5784:
2907 case BGE_ASICREV_BCM5785:
2908 case BGE_ASICREV_BCM5787:
2909 case BGE_ASICREV_BCM57780:
2910 sc->bge_flags |= BGE_FLAG_5755_PLUS | BGE_FLAG_575X_PLUS |
2911 BGE_FLAG_5705_PLUS;
2912 break;
2913 case BGE_ASICREV_BCM5700:
2914 case BGE_ASICREV_BCM5701:
2915 case BGE_ASICREV_BCM5703:
2916 case BGE_ASICREV_BCM5704:
2917 sc->bge_flags |= BGE_FLAG_5700_FAMILY | BGE_FLAG_JUMBO;
2918 break;
2919 case BGE_ASICREV_BCM5714_A0:
2920 case BGE_ASICREV_BCM5780:
2921 case BGE_ASICREV_BCM5714:
2922 sc->bge_flags |= BGE_FLAG_5714_FAMILY | BGE_FLAG_JUMBO_STD;
2923 /* FALLTHROUGH */
2924 case BGE_ASICREV_BCM5750:
2925 case BGE_ASICREV_BCM5752:
2926 case BGE_ASICREV_BCM5906:
2927 sc->bge_flags |= BGE_FLAG_575X_PLUS;
2928 /* FALLTHROUGH */
2929 case BGE_ASICREV_BCM5705:
2930 sc->bge_flags |= BGE_FLAG_5705_PLUS;
2931 break;
2932 }
2933
2934 /* Set various PHY bug flags. */
2935 if (sc->bge_chipid == BGE_CHIPID_BCM5701_A0 ||
2936 sc->bge_chipid == BGE_CHIPID_BCM5701_B0)
2937 sc->bge_phy_flags |= BGE_PHY_CRC_BUG;
2938 if (sc->bge_chiprev == BGE_CHIPREV_5703_AX ||
2939 sc->bge_chiprev == BGE_CHIPREV_5704_AX)
2940 sc->bge_phy_flags |= BGE_PHY_ADC_BUG;
2941 if (sc->bge_chipid == BGE_CHIPID_BCM5704_A0)
2942 sc->bge_phy_flags |= BGE_PHY_5704_A0_BUG;
2943 if (pci_get_subvendor(dev) == DELL_VENDORID)
2944 sc->bge_phy_flags |= BGE_PHY_NO_3LED;
2945 if ((BGE_IS_5705_PLUS(sc)) &&
2946 sc->bge_asicrev != BGE_ASICREV_BCM5906 &&
2947 sc->bge_asicrev != BGE_ASICREV_BCM5717 &&
2948 sc->bge_asicrev != BGE_ASICREV_BCM5719 &&
2949 sc->bge_asicrev != BGE_ASICREV_BCM5720 &&
2950 sc->bge_asicrev != BGE_ASICREV_BCM5785 &&
2951 sc->bge_asicrev != BGE_ASICREV_BCM57765 &&
2952 sc->bge_asicrev != BGE_ASICREV_BCM57780) {
2953 if (sc->bge_asicrev == BGE_ASICREV_BCM5755 ||
2954 sc->bge_asicrev == BGE_ASICREV_BCM5761 ||
2955 sc->bge_asicrev == BGE_ASICREV_BCM5784 ||
2956 sc->bge_asicrev == BGE_ASICREV_BCM5787) {
2957 if (pci_get_device(dev) != BCOM_DEVICEID_BCM5722 &&
2958 pci_get_device(dev) != BCOM_DEVICEID_BCM5756)
2959 sc->bge_phy_flags |= BGE_PHY_JITTER_BUG;
2960 if (pci_get_device(dev) == BCOM_DEVICEID_BCM5755M)
2961 sc->bge_phy_flags |= BGE_PHY_ADJUST_TRIM;
2962 } else
2963 sc->bge_phy_flags |= BGE_PHY_BER_BUG;
2964 }
2965
2966 /* Identify the chips that use an CPMU. */
2967 if (BGE_IS_5717_PLUS(sc) ||
2968 sc->bge_asicrev == BGE_ASICREV_BCM5784 ||
2969 sc->bge_asicrev == BGE_ASICREV_BCM5761 ||
2970 sc->bge_asicrev == BGE_ASICREV_BCM5785 ||
2971 sc->bge_asicrev == BGE_ASICREV_BCM57780)
2972 sc->bge_flags |= BGE_FLAG_CPMU_PRESENT;
2973 if ((sc->bge_flags & BGE_FLAG_CPMU_PRESENT) != 0)
2974 sc->bge_mi_mode = BGE_MIMODE_500KHZ_CONST;
2975 else
2976 sc->bge_mi_mode = BGE_MIMODE_BASE;
2977 /* Enable auto polling for BCM570[0-5]. */
2978 if (BGE_IS_5700_FAMILY(sc) || sc->bge_asicrev == BGE_ASICREV_BCM5705)
2979 sc->bge_mi_mode |= BGE_MIMODE_AUTOPOLL;
2980
2981 /*
2982 * All Broadcom controllers have 4GB boundary DMA bug.
2983 * Whenever an address crosses a multiple of the 4GB boundary
2984 * (including 4GB, 8Gb, 12Gb, etc.) and makes the transition
2985 * from 0xX_FFFF_FFFF to 0x(X+1)_0000_0000 an internal DMA
2986 * state machine will lockup and cause the device to hang.
2987 */
2988 sc->bge_flags |= BGE_FLAG_4G_BNDRY_BUG;
2989
2990 /* BCM5755 or higher and BCM5906 have short DMA bug. */
2991 if (BGE_IS_5755_PLUS(sc) || sc->bge_asicrev == BGE_ASICREV_BCM5906)
2992 sc->bge_flags |= BGE_FLAG_SHORT_DMA_BUG;
2993
2994 /*
2995 * BCM5719 cannot handle DMA requests for DMA segments that
2996 * have larger than 4KB in size. However the maximum DMA
2997 * segment size created in DMA tag is 4KB for TSO, so we
2998 * wouldn't encounter the issue here.
2999 */
3000 if (sc->bge_asicrev == BGE_ASICREV_BCM5719)
3001 sc->bge_flags |= BGE_FLAG_4K_RDMA_BUG;
3002
3003 misccfg = CSR_READ_4(sc, BGE_MISC_CFG) & BGE_MISCCFG_BOARD_ID;
3004 if (sc->bge_asicrev == BGE_ASICREV_BCM5705) {
3005 if (misccfg == BGE_MISCCFG_BOARD_ID_5788 ||
3006 misccfg == BGE_MISCCFG_BOARD_ID_5788M)
3007 sc->bge_flags |= BGE_FLAG_5788;
3008 }
3009
3010 capmask = BMSR_DEFCAPMASK;
3011 if ((sc->bge_asicrev == BGE_ASICREV_BCM5703 &&
3012 (misccfg == 0x4000 || misccfg == 0x8000)) ||
3013 (sc->bge_asicrev == BGE_ASICREV_BCM5705 &&
3014 pci_get_vendor(dev) == BCOM_VENDORID &&
3015 (pci_get_device(dev) == BCOM_DEVICEID_BCM5901 ||
3016 pci_get_device(dev) == BCOM_DEVICEID_BCM5901A2 ||
3017 pci_get_device(dev) == BCOM_DEVICEID_BCM5705F)) ||
3018 (pci_get_vendor(dev) == BCOM_VENDORID &&
3019 (pci_get_device(dev) == BCOM_DEVICEID_BCM5751F ||
3020 pci_get_device(dev) == BCOM_DEVICEID_BCM5753F ||
3021 pci_get_device(dev) == BCOM_DEVICEID_BCM5787F)) ||
3022 pci_get_device(dev) == BCOM_DEVICEID_BCM57790 ||
3023 sc->bge_asicrev == BGE_ASICREV_BCM5906) {
3024 /* These chips are 10/100 only. */
3025 capmask &= ~BMSR_EXTSTAT;
3026 }
3027
3028 /*
3029 * Some controllers seem to require a special firmware to use
3030 * TSO. But the firmware is not available to FreeBSD and Linux
3031 * claims that the TSO performed by the firmware is slower than
3032 * hardware based TSO. Moreover the firmware based TSO has one
3033 * known bug which can't handle TSO if ethernet header + IP/TCP
3034 * header is greater than 80 bytes. The workaround for the TSO
3035 * bug exist but it seems it's too expensive than not using
3036 * TSO at all. Some hardwares also have the TSO bug so limit
3037 * the TSO to the controllers that are not affected TSO issues
3038 * (e.g. 5755 or higher).
3039 */
3040 if (BGE_IS_5717_PLUS(sc)) {
3041 /* BCM5717 requires different TSO configuration. */
3042 sc->bge_flags |= BGE_FLAG_TSO3;
3043 if (sc->bge_asicrev == BGE_ASICREV_BCM5719 &&
3044 sc->bge_chipid == BGE_CHIPID_BCM5719_A0) {
3045 /* TSO on BCM5719 A0 does not work. */
3046 sc->bge_flags &= ~BGE_FLAG_TSO3;
3047 }
3048 } else if (BGE_IS_5755_PLUS(sc)) {
3049 /*
3050 * BCM5754 and BCM5787 shares the same ASIC id so
3051 * explicit device id check is required.
3052 * Due to unknown reason TSO does not work on BCM5755M.
3053 */
3054 if (pci_get_device(dev) != BCOM_DEVICEID_BCM5754 &&
3055 pci_get_device(dev) != BCOM_DEVICEID_BCM5754M &&
3056 pci_get_device(dev) != BCOM_DEVICEID_BCM5755M)
3057 sc->bge_flags |= BGE_FLAG_TSO;
3058 }
3059
3060 /*
3061 * Check if this is a PCI-X or PCI Express device.
3062 */
3063 if (pci_find_cap(dev, PCIY_EXPRESS, &reg) == 0) {
3064 /*
3065 * Found a PCI Express capabilities register, this
3066 * must be a PCI Express device.
3067 */
3068 sc->bge_flags |= BGE_FLAG_PCIE;
3069 sc->bge_expcap = reg;
3070 if (sc->bge_asicrev == BGE_ASICREV_BCM5719 ||
3071 sc->bge_asicrev == BGE_ASICREV_BCM5720)
3072 pci_set_max_read_req(dev, 2048);
3073 else if (pci_get_max_read_req(dev) != 4096)
3074 pci_set_max_read_req(dev, 4096);
3075 } else {
3076 /*
3077 * Check if the device is in PCI-X Mode.
3078 * (This bit is not valid on PCI Express controllers.)
3079 */
3080 if (pci_find_cap(dev, PCIY_PCIX, &reg) == 0)
3081 sc->bge_pcixcap = reg;
3082 if ((pci_read_config(dev, BGE_PCI_PCISTATE, 4) &
3083 BGE_PCISTATE_PCI_BUSMODE) == 0)
3084 sc->bge_flags |= BGE_FLAG_PCIX;
3085 }
3086
3087 /*
3088 * The 40bit DMA bug applies to the 5714/5715 controllers and is
3089 * not actually a MAC controller bug but an issue with the embedded
3090 * PCIe to PCI-X bridge in the device. Use 40bit DMA workaround.
3091 */
3092 if (BGE_IS_5714_FAMILY(sc) && (sc->bge_flags & BGE_FLAG_PCIX))
3093 sc->bge_flags |= BGE_FLAG_40BIT_BUG;
3094 /*
3095 * Allocate the interrupt, using MSI if possible. These devices
3096 * support 8 MSI messages, but only the first one is used in
3097 * normal operation.
3098 */
3099 rid = 0;
3100 if (pci_find_cap(sc->bge_dev, PCIY_MSI, &reg) == 0) {
3101 sc->bge_msicap = reg;
3102 if (bge_can_use_msi(sc)) {
3103 msicount = pci_msi_count(dev);
3104 if (msicount > 1)
3105 msicount = 1;
3106 } else
3107 msicount = 0;
3108 if (msicount == 1 && pci_alloc_msi(dev, &msicount) == 0) {
3109 rid = 1;
3110 sc->bge_flags |= BGE_FLAG_MSI;
3111 }
3112 }
3113
3114 /*
3115 * All controllers except BCM5700 supports tagged status but
3116 * we use tagged status only for MSI case on BCM5717. Otherwise
3117 * MSI on BCM5717 does not work.
3118 */
3119#ifndef DEVICE_POLLING
3120 if (sc->bge_flags & BGE_FLAG_MSI && BGE_IS_5717_PLUS(sc))
3121 sc->bge_flags |= BGE_FLAG_TAGGED_STATUS;
3122#endif
3123
3124 sc->bge_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
3125 RF_SHAREABLE | RF_ACTIVE);
3126
3127 if (sc->bge_irq == NULL) {
3128 device_printf(sc->bge_dev, "couldn't map interrupt\n");
3129 error = ENXIO;
3130 goto fail;
3131 }
3132
3133 device_printf(dev,
3134 "CHIP ID 0x%08x; ASIC REV 0x%02x; CHIP REV 0x%02x; %s\n",
3135 sc->bge_chipid, sc->bge_asicrev, sc->bge_chiprev,
3136 (sc->bge_flags & BGE_FLAG_PCIX) ? "PCI-X" :
3137 ((sc->bge_flags & BGE_FLAG_PCIE) ? "PCI-E" : "PCI"));
3138
3139 BGE_LOCK_INIT(sc, device_get_nameunit(dev));
3140
3141 /* Try to reset the chip. */
3142 if (bge_reset(sc)) {
3143 device_printf(sc->bge_dev, "chip reset failed\n");
3144 error = ENXIO;
3145 goto fail;
3146 }
3147
3148 sc->bge_asf_mode = 0;
3149 if (bge_allow_asf && (bge_readmem_ind(sc, BGE_SRAM_DATA_SIG) ==
3150 BGE_SRAM_DATA_SIG_MAGIC)) {
3151 if (bge_readmem_ind(sc, BGE_SRAM_DATA_CFG)
3152 & BGE_HWCFG_ASF) {
3153 sc->bge_asf_mode |= ASF_ENABLE;
3154 sc->bge_asf_mode |= ASF_STACKUP;
3155 if (BGE_IS_575X_PLUS(sc))
3156 sc->bge_asf_mode |= ASF_NEW_HANDSHAKE;
3157 }
3158 }
3159
3160 /* Try to reset the chip again the nice way. */
3161 bge_stop_fw(sc);
3162 bge_sig_pre_reset(sc, BGE_RESET_STOP);
3163 if (bge_reset(sc)) {
3164 device_printf(sc->bge_dev, "chip reset failed\n");
3165 error = ENXIO;
3166 goto fail;
3167 }
3168
3169 bge_sig_legacy(sc, BGE_RESET_STOP);
3170 bge_sig_post_reset(sc, BGE_RESET_STOP);
3171
3172 if (bge_chipinit(sc)) {
3173 device_printf(sc->bge_dev, "chip initialization failed\n");
3174 error = ENXIO;
3175 goto fail;
3176 }
3177
3178 error = bge_get_eaddr(sc, eaddr);
3179 if (error) {
3180 device_printf(sc->bge_dev,
3181 "failed to read station address\n");
3182 error = ENXIO;
3183 goto fail;
3184 }
3185
3186 /* 5705 limits RX return ring to 512 entries. */
3187 if (BGE_IS_5717_PLUS(sc))
3188 sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT;
3189 else if (BGE_IS_5705_PLUS(sc))
3190 sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT_5705;
3191 else
3192 sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT;
3193
3194 if (bge_dma_alloc(sc)) {
3195 device_printf(sc->bge_dev,
3196 "failed to allocate DMA resources\n");
3197 error = ENXIO;
3198 goto fail;
3199 }
3200
3201 bge_add_sysctls(sc);
3202
3203 /* Set default tuneable values. */
3204 sc->bge_stat_ticks = BGE_TICKS_PER_SEC;
3205 sc->bge_rx_coal_ticks = 150;
3206 sc->bge_tx_coal_ticks = 150;
3207 sc->bge_rx_max_coal_bds = 10;
3208 sc->bge_tx_max_coal_bds = 10;
3209
3210 /* Initialize checksum features to use. */
3211 sc->bge_csum_features = BGE_CSUM_FEATURES;
3212 if (sc->bge_forced_udpcsum != 0)
3213 sc->bge_csum_features |= CSUM_UDP;
3214
3215 /* Set up ifnet structure */
3216 ifp = sc->bge_ifp = if_alloc(IFT_ETHER);
3217 if (ifp == NULL) {
3218 device_printf(sc->bge_dev, "failed to if_alloc()\n");
3219 error = ENXIO;
3220 goto fail;
3221 }
3222 ifp->if_softc = sc;
3223 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
3224 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
3225 ifp->if_ioctl = bge_ioctl;
3226 ifp->if_start = bge_start;
3227 ifp->if_init = bge_init;
3228 ifp->if_snd.ifq_drv_maxlen = BGE_TX_RING_CNT - 1;
3229 IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen);
3230 IFQ_SET_READY(&ifp->if_snd);
3231 ifp->if_hwassist = sc->bge_csum_features;
3232 ifp->if_capabilities = IFCAP_HWCSUM | IFCAP_VLAN_HWTAGGING |
3233 IFCAP_VLAN_MTU;
3234 if ((sc->bge_flags & (BGE_FLAG_TSO | BGE_FLAG_TSO3)) != 0) {
3235 ifp->if_hwassist |= CSUM_TSO;
3236 ifp->if_capabilities |= IFCAP_TSO4 | IFCAP_VLAN_HWTSO;
3237 }
3238#ifdef IFCAP_VLAN_HWCSUM
3239 ifp->if_capabilities |= IFCAP_VLAN_HWCSUM;
3240#endif
3241 ifp->if_capenable = ifp->if_capabilities;
3242#ifdef DEVICE_POLLING
3243 ifp->if_capabilities |= IFCAP_POLLING;
3244#endif
3245
3246 /*
3247 * 5700 B0 chips do not support checksumming correctly due
3248 * to hardware bugs.
3249 */
3250 if (sc->bge_chipid == BGE_CHIPID_BCM5700_B0) {
3251 ifp->if_capabilities &= ~IFCAP_HWCSUM;
3252 ifp->if_capenable &= ~IFCAP_HWCSUM;
3253 ifp->if_hwassist = 0;
3254 }
3255
3256 /*
3257 * Figure out what sort of media we have by checking the
3258 * hardware config word in the first 32k of NIC internal memory,
3259 * or fall back to examining the EEPROM if necessary.
3260 * Note: on some BCM5700 cards, this value appears to be unset.
3261 * If that's the case, we have to rely on identifying the NIC
3262 * by its PCI subsystem ID, as we do below for the SysKonnect
3263 * SK-9D41.
3264 */
3265 if (bge_readmem_ind(sc, BGE_SRAM_DATA_SIG) == BGE_SRAM_DATA_SIG_MAGIC)
3266 hwcfg = bge_readmem_ind(sc, BGE_SRAM_DATA_CFG);
3267 else if ((sc->bge_flags & BGE_FLAG_EADDR) &&
3268 (sc->bge_asicrev != BGE_ASICREV_BCM5906)) {
3269 if (bge_read_eeprom(sc, (caddr_t)&hwcfg, BGE_EE_HWCFG_OFFSET,
3270 sizeof(hwcfg))) {
3271 device_printf(sc->bge_dev, "failed to read EEPROM\n");
3272 error = ENXIO;
3273 goto fail;
3274 }
3275 hwcfg = ntohl(hwcfg);
3276 }
3277
3278 /* The SysKonnect SK-9D41 is a 1000baseSX card. */
3279 if ((pci_read_config(dev, BGE_PCI_SUBSYS, 4) >> 16) ==
3280 SK_SUBSYSID_9D41 || (hwcfg & BGE_HWCFG_MEDIA) == BGE_MEDIA_FIBER) {
3281 if (BGE_IS_5714_FAMILY(sc))
3282 sc->bge_flags |= BGE_FLAG_MII_SERDES;
3283 else
3284 sc->bge_flags |= BGE_FLAG_TBI;
3285 }
3286
3287 if (sc->bge_flags & BGE_FLAG_TBI) {
3288 ifmedia_init(&sc->bge_ifmedia, IFM_IMASK, bge_ifmedia_upd,
3289 bge_ifmedia_sts);
3290 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER | IFM_1000_SX, 0, NULL);
3291 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER | IFM_1000_SX | IFM_FDX,
3292 0, NULL);
3293 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER | IFM_AUTO, 0, NULL);
3294 ifmedia_set(&sc->bge_ifmedia, IFM_ETHER | IFM_AUTO);
3295 sc->bge_ifmedia.ifm_media = sc->bge_ifmedia.ifm_cur->ifm_media;
3296 } else {
3297 /*
3298 * Do transceiver setup and tell the firmware the
3299 * driver is down so we can try to get access the
3300 * probe if ASF is running. Retry a couple of times
3301 * if we get a conflict with the ASF firmware accessing
3302 * the PHY.
3303 */
3304 trys = 0;
3305 BGE_CLRBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
3306again:
3307 bge_asf_driver_up(sc);
3308
3309 error = mii_attach(dev, &sc->bge_miibus, ifp, bge_ifmedia_upd,
3310 bge_ifmedia_sts, capmask, phy_addr, MII_OFFSET_ANY,
3311 MIIF_DOPAUSE);
3312 if (error != 0) {
3313 if (trys++ < 4) {
3314 device_printf(sc->bge_dev, "Try again\n");
3315 bge_miibus_writereg(sc->bge_dev, 1, MII_BMCR,
3316 BMCR_RESET);
3317 goto again;
3318 }
3319 device_printf(sc->bge_dev, "attaching PHYs failed\n");
3320 goto fail;
3321 }
3322
3323 /*
3324 * Now tell the firmware we are going up after probing the PHY
3325 */
3326 if (sc->bge_asf_mode & ASF_STACKUP)
3327 BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
3328 }
3329
3330 /*
3331 * When using the BCM5701 in PCI-X mode, data corruption has
3332 * been observed in the first few bytes of some received packets.
3333 * Aligning the packet buffer in memory eliminates the corruption.
3334 * Unfortunately, this misaligns the packet payloads. On platforms
3335 * which do not support unaligned accesses, we will realign the
3336 * payloads by copying the received packets.
3337 */
3338 if (sc->bge_asicrev == BGE_ASICREV_BCM5701 &&
3339 sc->bge_flags & BGE_FLAG_PCIX)
3340 sc->bge_flags |= BGE_FLAG_RX_ALIGNBUG;
3341
3342 /*
3343 * Call MI attach routine.
3344 */
3345 ether_ifattach(ifp, eaddr);
3346 callout_init_mtx(&sc->bge_stat_ch, &sc->bge_mtx, 0);
3347
3348 /* Tell upper layer we support long frames. */
3349 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
3350
3351 /*
3352 * Hookup IRQ last.
3353 */
3354 if (BGE_IS_5755_PLUS(sc) && sc->bge_flags & BGE_FLAG_MSI) {
3355 /* Take advantage of single-shot MSI. */
3356 CSR_WRITE_4(sc, BGE_MSI_MODE, CSR_READ_4(sc, BGE_MSI_MODE) &
3357 ~BGE_MSIMODE_ONE_SHOT_DISABLE);
3358 sc->bge_tq = taskqueue_create_fast("bge_taskq", M_WAITOK,
3359 taskqueue_thread_enqueue, &sc->bge_tq);
3360 if (sc->bge_tq == NULL) {
3361 device_printf(dev, "could not create taskqueue.\n");
3362 ether_ifdetach(ifp);
3363 error = ENXIO;
3364 goto fail;
3365 }
3366 taskqueue_start_threads(&sc->bge_tq, 1, PI_NET, "%s taskq",
3367 device_get_nameunit(sc->bge_dev));
3368 error = bus_setup_intr(dev, sc->bge_irq,
3369 INTR_TYPE_NET | INTR_MPSAFE, bge_msi_intr, NULL, sc,
3370 &sc->bge_intrhand);
3371 if (error)
3372 ether_ifdetach(ifp);
3373 } else
3374 error = bus_setup_intr(dev, sc->bge_irq,
3375 INTR_TYPE_NET | INTR_MPSAFE, NULL, bge_intr, sc,
3376 &sc->bge_intrhand);
3377
3378 if (error) {
3379 bge_detach(dev);
3380 device_printf(sc->bge_dev, "couldn't set up irq\n");
3381 }
3382
3383 return (0);
3384
3385fail:
3386 bge_release_resources(sc);
3387
3388 return (error);
3389}
3390
3391static int
3392bge_detach(device_t dev)
3393{
3394 struct bge_softc *sc;
3395 struct ifnet *ifp;
3396
3397 sc = device_get_softc(dev);
3398 ifp = sc->bge_ifp;
3399
3400#ifdef DEVICE_POLLING
3401 if (ifp->if_capenable & IFCAP_POLLING)
3402 ether_poll_deregister(ifp);
3403#endif
3404
3405 BGE_LOCK(sc);
3406 bge_stop(sc);
3407 bge_reset(sc);
3408 BGE_UNLOCK(sc);
3409
3410 callout_drain(&sc->bge_stat_ch);
3411
3412 if (sc->bge_tq)
3413 taskqueue_drain(sc->bge_tq, &sc->bge_intr_task);
3414 ether_ifdetach(ifp);
3415
3416 if (sc->bge_flags & BGE_FLAG_TBI) {
3417 ifmedia_removeall(&sc->bge_ifmedia);
3418 } else {
3419 bus_generic_detach(dev);
3420 device_delete_child(dev, sc->bge_miibus);
3421 }
3422
3423 bge_release_resources(sc);
3424
3425 return (0);
3426}
3427
3428static void
3429bge_release_resources(struct bge_softc *sc)
3430{
3431 device_t dev;
3432
3433 dev = sc->bge_dev;
3434
3435 if (sc->bge_tq != NULL)
3436 taskqueue_free(sc->bge_tq);
3437
3438 if (sc->bge_intrhand != NULL)
3439 bus_teardown_intr(dev, sc->bge_irq, sc->bge_intrhand);
3440
3441 if (sc->bge_irq != NULL)
3442 bus_release_resource(dev, SYS_RES_IRQ,
3443 sc->bge_flags & BGE_FLAG_MSI ? 1 : 0, sc->bge_irq);
3444
3445 if (sc->bge_flags & BGE_FLAG_MSI)
3446 pci_release_msi(dev);
3447
3448 if (sc->bge_res != NULL)
3449 bus_release_resource(dev, SYS_RES_MEMORY,
3450 PCIR_BAR(0), sc->bge_res);
3451
3452 if (sc->bge_ifp != NULL)
3453 if_free(sc->bge_ifp);
3454
3455 bge_dma_free(sc);
3456
3457 if (mtx_initialized(&sc->bge_mtx)) /* XXX */
3458 BGE_LOCK_DESTROY(sc);
3459}
3460
3461static int
3462bge_reset(struct bge_softc *sc)
3463{
3464 device_t dev;
3465 uint32_t cachesize, command, pcistate, reset, val;
3466 void (*write_op)(struct bge_softc *, int, int);
3467 uint16_t devctl;
3468 int i;
3469
3470 dev = sc->bge_dev;
3471
3472 if (BGE_IS_575X_PLUS(sc) && !BGE_IS_5714_FAMILY(sc) &&
3473 (sc->bge_asicrev != BGE_ASICREV_BCM5906)) {
3474 if (sc->bge_flags & BGE_FLAG_PCIE)
3475 write_op = bge_writemem_direct;
3476 else
3477 write_op = bge_writemem_ind;
3478 } else
3479 write_op = bge_writereg_ind;
3480
3481 /* Save some important PCI state. */
3482 cachesize = pci_read_config(dev, BGE_PCI_CACHESZ, 4);
3483 command = pci_read_config(dev, BGE_PCI_CMD, 4);
3484 pcistate = pci_read_config(dev, BGE_PCI_PCISTATE, 4);
3485
3486 pci_write_config(dev, BGE_PCI_MISC_CTL,
3487 BGE_PCIMISCCTL_INDIRECT_ACCESS | BGE_PCIMISCCTL_MASK_PCI_INTR |
3488 BGE_HIF_SWAP_OPTIONS | BGE_PCIMISCCTL_PCISTATE_RW, 4);
3489
3490 /* Disable fastboot on controllers that support it. */
3491 if (sc->bge_asicrev == BGE_ASICREV_BCM5752 ||
3492 BGE_IS_5755_PLUS(sc)) {
3493 if (bootverbose)
3494 device_printf(dev, "Disabling fastboot\n");
3495 CSR_WRITE_4(sc, BGE_FASTBOOT_PC, 0x0);
3496 }
3497
3498 /*
3499 * Write the magic number to SRAM at offset 0xB50.
3500 * When firmware finishes its initialization it will
3501 * write ~BGE_SRAM_FW_MB_MAGIC to the same location.
3502 */
3503 bge_writemem_ind(sc, BGE_SRAM_FW_MB, BGE_SRAM_FW_MB_MAGIC);
3504
3505 reset = BGE_MISCCFG_RESET_CORE_CLOCKS | BGE_32BITTIME_66MHZ;
3506
3507 /* XXX: Broadcom Linux driver. */
3508 if (sc->bge_flags & BGE_FLAG_PCIE) {
3509 if (CSR_READ_4(sc, 0x7E2C) == 0x60) /* PCIE 1.0 */
3510 CSR_WRITE_4(sc, 0x7E2C, 0x20);
3511 if (sc->bge_chipid != BGE_CHIPID_BCM5750_A0) {
3512 /* Prevent PCIE link training during global reset */
3513 CSR_WRITE_4(sc, BGE_MISC_CFG, 1 << 29);
3514 reset |= 1 << 29;
3515 }
3516 }
3517
3518 /*
3519 * Set GPHY Power Down Override to leave GPHY
3520 * powered up in D0 uninitialized.
3521 */
3522 if (BGE_IS_5705_PLUS(sc) &&
3523 (sc->bge_flags & BGE_FLAG_CPMU_PRESENT) == 0)
3524 reset |= BGE_MISCCFG_GPHY_PD_OVERRIDE;
3525
3526 /* Issue global reset */
3527 write_op(sc, BGE_MISC_CFG, reset);
3528
3529 if (sc->bge_asicrev == BGE_ASICREV_BCM5906) {
3530 val = CSR_READ_4(sc, BGE_VCPU_STATUS);
3531 CSR_WRITE_4(sc, BGE_VCPU_STATUS,
3532 val | BGE_VCPU_STATUS_DRV_RESET);
3533 val = CSR_READ_4(sc, BGE_VCPU_EXT_CTRL);
3534 CSR_WRITE_4(sc, BGE_VCPU_EXT_CTRL,
3535 val & ~BGE_VCPU_EXT_CTRL_HALT_CPU);
3536 }
3537
3538 DELAY(1000);
3539
3540 /* XXX: Broadcom Linux driver. */
3541 if (sc->bge_flags & BGE_FLAG_PCIE) {
3542 if (sc->bge_chipid == BGE_CHIPID_BCM5750_A0) {
3543 DELAY(500000); /* wait for link training to complete */
3544 val = pci_read_config(dev, 0xC4, 4);
3545 pci_write_config(dev, 0xC4, val | (1 << 15), 4);
3546 }
3547 devctl = pci_read_config(dev,
3548 sc->bge_expcap + PCIR_EXPRESS_DEVICE_CTL, 2);
3549 /* Clear enable no snoop and disable relaxed ordering. */
3550 devctl &= ~(PCIM_EXP_CTL_RELAXED_ORD_ENABLE |
3551 PCIM_EXP_CTL_NOSNOOP_ENABLE);
3552 /* Set PCIE max payload size to 128. */
3553 devctl &= ~PCIM_EXP_CTL_MAX_PAYLOAD;
3554 pci_write_config(dev, sc->bge_expcap + PCIR_EXPRESS_DEVICE_CTL,
3555 devctl, 2);
3556 /* Clear error status. */
3557 pci_write_config(dev, sc->bge_expcap + PCIR_EXPRESS_DEVICE_STA,
3558 PCIM_EXP_STA_CORRECTABLE_ERROR |
3559 PCIM_EXP_STA_NON_FATAL_ERROR | PCIM_EXP_STA_FATAL_ERROR |
3560 PCIM_EXP_STA_UNSUPPORTED_REQ, 2);
3561 }
3562
3563 /* Reset some of the PCI state that got zapped by reset. */
3564 pci_write_config(dev, BGE_PCI_MISC_CTL,
3565 BGE_PCIMISCCTL_INDIRECT_ACCESS | BGE_PCIMISCCTL_MASK_PCI_INTR |
3566 BGE_HIF_SWAP_OPTIONS | BGE_PCIMISCCTL_PCISTATE_RW, 4);
3567 pci_write_config(dev, BGE_PCI_CACHESZ, cachesize, 4);
3568 pci_write_config(dev, BGE_PCI_CMD, command, 4);
3569 write_op(sc, BGE_MISC_CFG, BGE_32BITTIME_66MHZ);
3570 /*
3571 * Disable PCI-X relaxed ordering to ensure status block update
3572 * comes first then packet buffer DMA. Otherwise driver may
3573 * read stale status block.
3574 */
3575 if (sc->bge_flags & BGE_FLAG_PCIX) {
3576 devctl = pci_read_config(dev,
3577 sc->bge_pcixcap + PCIXR_COMMAND, 2);
3578 devctl &= ~PCIXM_COMMAND_ERO;
3579 if (sc->bge_asicrev == BGE_ASICREV_BCM5703) {
3580 devctl &= ~PCIXM_COMMAND_MAX_READ;
3581 devctl |= PCIXM_COMMAND_MAX_READ_2048;
3582 } else if (sc->bge_asicrev == BGE_ASICREV_BCM5704) {
3583 devctl &= ~(PCIXM_COMMAND_MAX_SPLITS |
3584 PCIXM_COMMAND_MAX_READ);
3585 devctl |= PCIXM_COMMAND_MAX_READ_2048;
3586 }
3587 pci_write_config(dev, sc->bge_pcixcap + PCIXR_COMMAND,
3588 devctl, 2);
3589 }
3590 /* Re-enable MSI, if necessary, and enable the memory arbiter. */
3591 if (BGE_IS_5714_FAMILY(sc)) {
3592 /* This chip disables MSI on reset. */
3593 if (sc->bge_flags & BGE_FLAG_MSI) {
3594 val = pci_read_config(dev,
3595 sc->bge_msicap + PCIR_MSI_CTRL, 2);
3596 pci_write_config(dev,
3597 sc->bge_msicap + PCIR_MSI_CTRL,
3598 val | PCIM_MSICTRL_MSI_ENABLE, 2);
3599 val = CSR_READ_4(sc, BGE_MSI_MODE);
3600 CSR_WRITE_4(sc, BGE_MSI_MODE,
3601 val | BGE_MSIMODE_ENABLE);
3602 }
3603 val = CSR_READ_4(sc, BGE_MARB_MODE);
3604 CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE | val);
3605 } else
3606 CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE);
3607
3608 if (sc->bge_asicrev == BGE_ASICREV_BCM5906) {
3609 for (i = 0; i < BGE_TIMEOUT; i++) {
3610 val = CSR_READ_4(sc, BGE_VCPU_STATUS);
3611 if (val & BGE_VCPU_STATUS_INIT_DONE)
3612 break;
3613 DELAY(100);
3614 }
3615 if (i == BGE_TIMEOUT) {
3616 device_printf(dev, "reset timed out\n");
3617 return (1);
3618 }
3619 } else {
3620 /*
3621 * Poll until we see the 1's complement of the magic number.
3622 * This indicates that the firmware initialization is complete.
3623 * We expect this to fail if no chip containing the Ethernet
3624 * address is fitted though.
3625 */
3626 for (i = 0; i < BGE_TIMEOUT; i++) {
3627 DELAY(10);
3628 val = bge_readmem_ind(sc, BGE_SRAM_FW_MB);
3629 if (val == ~BGE_SRAM_FW_MB_MAGIC)
3630 break;
3631 }
3632
3633 if ((sc->bge_flags & BGE_FLAG_EADDR) && i == BGE_TIMEOUT)
3634 device_printf(dev,
3635 "firmware handshake timed out, found 0x%08x\n",
3636 val);
3637 /* BCM57765 A0 needs additional time before accessing. */
3638 if (sc->bge_chipid == BGE_CHIPID_BCM57765_A0)
3639 DELAY(10 * 1000); /* XXX */
3640 }
3641
3642 /*
3643 * XXX Wait for the value of the PCISTATE register to
3644 * return to its original pre-reset state. This is a
3645 * fairly good indicator of reset completion. If we don't
3646 * wait for the reset to fully complete, trying to read
3647 * from the device's non-PCI registers may yield garbage
3648 * results.
3649 */
3650 for (i = 0; i < BGE_TIMEOUT; i++) {
3651 if (pci_read_config(dev, BGE_PCI_PCISTATE, 4) == pcistate)
3652 break;
3653 DELAY(10);
3654 }
3655
3656 /* Fix up byte swapping. */
3657 CSR_WRITE_4(sc, BGE_MODE_CTL, bge_dma_swap_options(sc));
3658
3659 /* Tell the ASF firmware we are up */
3660 if (sc->bge_asf_mode & ASF_STACKUP)
3661 BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
3662
3663 CSR_WRITE_4(sc, BGE_MAC_MODE, 0);
3664
3665 /*
3666 * The 5704 in TBI mode apparently needs some special
3667 * adjustment to insure the SERDES drive level is set
3668 * to 1.2V.
3669 */
3670 if (sc->bge_asicrev == BGE_ASICREV_BCM5704 &&
3671 sc->bge_flags & BGE_FLAG_TBI) {
3672 val = CSR_READ_4(sc, BGE_SERDES_CFG);
3673 val = (val & ~0xFFF) | 0x880;
3674 CSR_WRITE_4(sc, BGE_SERDES_CFG, val);
3675 }
3676
3677 /* XXX: Broadcom Linux driver. */
3678 if (sc->bge_flags & BGE_FLAG_PCIE &&
3679 !BGE_IS_5717_PLUS(sc) &&
3680 sc->bge_chipid != BGE_CHIPID_BCM5750_A0 &&
3681 sc->bge_asicrev != BGE_ASICREV_BCM5785) {
3682 /* Enable Data FIFO protection. */
3683 val = CSR_READ_4(sc, 0x7C00);
3684 CSR_WRITE_4(sc, 0x7C00, val | (1 << 25));
3685 }
3686 DELAY(10000);
3687
3688 if (sc->bge_asicrev == BGE_ASICREV_BCM5720)
3689 BGE_CLRBIT(sc, BGE_CPMU_CLCK_ORIDE,
3690 CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
3691
3692 return (0);
3693}
3694
3695static __inline void
3696bge_rxreuse_std(struct bge_softc *sc, int i)
3697{
3698 struct bge_rx_bd *r;
3699
3700 r = &sc->bge_ldata.bge_rx_std_ring[sc->bge_std];
3701 r->bge_flags = BGE_RXBDFLAG_END;
3702 r->bge_len = sc->bge_cdata.bge_rx_std_seglen[i];
3703 r->bge_idx = i;
3704 BGE_INC(sc->bge_std, BGE_STD_RX_RING_CNT);
3705}
3706
3707static __inline void
3708bge_rxreuse_jumbo(struct bge_softc *sc, int i)
3709{
3710 struct bge_extrx_bd *r;
3711
3712 r = &sc->bge_ldata.bge_rx_jumbo_ring[sc->bge_jumbo];
3713 r->bge_flags = BGE_RXBDFLAG_JUMBO_RING | BGE_RXBDFLAG_END;
3714 r->bge_len0 = sc->bge_cdata.bge_rx_jumbo_seglen[i][0];
3715 r->bge_len1 = sc->bge_cdata.bge_rx_jumbo_seglen[i][1];
3716 r->bge_len2 = sc->bge_cdata.bge_rx_jumbo_seglen[i][2];
3717 r->bge_len3 = sc->bge_cdata.bge_rx_jumbo_seglen[i][3];
3718 r->bge_idx = i;
3719 BGE_INC(sc->bge_jumbo, BGE_JUMBO_RX_RING_CNT);
3720}
3721
3722/*
3723 * Frame reception handling. This is called if there's a frame
3724 * on the receive return list.
3725 *
3726 * Note: we have to be able to handle two possibilities here:
3727 * 1) the frame is from the jumbo receive ring
3728 * 2) the frame is from the standard receive ring
3729 */
3730
3731static int
3732bge_rxeof(struct bge_softc *sc, uint16_t rx_prod, int holdlck)
3733{
3734 struct ifnet *ifp;
3735 int rx_npkts = 0, stdcnt = 0, jumbocnt = 0;
3736 uint16_t rx_cons;
3737
3738 rx_cons = sc->bge_rx_saved_considx;
3739
3740 /* Nothing to do. */
3741 if (rx_cons == rx_prod)
3742 return (rx_npkts);
3743
3744 ifp = sc->bge_ifp;
3745
3746 bus_dmamap_sync(sc->bge_cdata.bge_rx_return_ring_tag,
3747 sc->bge_cdata.bge_rx_return_ring_map, BUS_DMASYNC_POSTREAD);
3748 bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag,
3749 sc->bge_cdata.bge_rx_std_ring_map, BUS_DMASYNC_POSTWRITE);
3750 if (BGE_IS_JUMBO_CAPABLE(sc) &&
3751 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN + ETHER_VLAN_ENCAP_LEN >
3752 (MCLBYTES - ETHER_ALIGN))
3753 bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag,
3754 sc->bge_cdata.bge_rx_jumbo_ring_map, BUS_DMASYNC_POSTWRITE);
3755
3756 while (rx_cons != rx_prod) {
3757 struct bge_rx_bd *cur_rx;
3758 uint32_t rxidx;
3759 struct mbuf *m = NULL;
3760 uint16_t vlan_tag = 0;
3761 int have_tag = 0;
3762
3763#ifdef DEVICE_POLLING
3764 if (ifp->if_capenable & IFCAP_POLLING) {
3765 if (sc->rxcycles <= 0)
3766 break;
3767 sc->rxcycles--;
3768 }
3769#endif
3770
3771 cur_rx = &sc->bge_ldata.bge_rx_return_ring[rx_cons];
3772
3773 rxidx = cur_rx->bge_idx;
3774 BGE_INC(rx_cons, sc->bge_return_ring_cnt);
3775
3776 if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING &&
3777 cur_rx->bge_flags & BGE_RXBDFLAG_VLAN_TAG) {
3778 have_tag = 1;
3779 vlan_tag = cur_rx->bge_vlan_tag;
3780 }
3781
3782 if (cur_rx->bge_flags & BGE_RXBDFLAG_JUMBO_RING) {
3783 jumbocnt++;
3784 m = sc->bge_cdata.bge_rx_jumbo_chain[rxidx];
3785 if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) {
3786 bge_rxreuse_jumbo(sc, rxidx);
3787 continue;
3788 }
3789 if (bge_newbuf_jumbo(sc, rxidx) != 0) {
3790 bge_rxreuse_jumbo(sc, rxidx);
3791 ifp->if_iqdrops++;
3792 continue;
3793 }
3794 BGE_INC(sc->bge_jumbo, BGE_JUMBO_RX_RING_CNT);
3795 } else {
3796 stdcnt++;
3797 m = sc->bge_cdata.bge_rx_std_chain[rxidx];
3798 if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) {
3799 bge_rxreuse_std(sc, rxidx);
3800 continue;
3801 }
3802 if (bge_newbuf_std(sc, rxidx) != 0) {
3803 bge_rxreuse_std(sc, rxidx);
3804 ifp->if_iqdrops++;
3805 continue;
3806 }
3807 BGE_INC(sc->bge_std, BGE_STD_RX_RING_CNT);
3808 }
3809
3810 ifp->if_ipackets++;
3811#ifndef __NO_STRICT_ALIGNMENT
3812 /*
3813 * For architectures with strict alignment we must make sure
3814 * the payload is aligned.
3815 */
3816 if (sc->bge_flags & BGE_FLAG_RX_ALIGNBUG) {
3817 bcopy(m->m_data, m->m_data + ETHER_ALIGN,
3818 cur_rx->bge_len);
3819 m->m_data += ETHER_ALIGN;
3820 }
3821#endif
3822 m->m_pkthdr.len = m->m_len = cur_rx->bge_len - ETHER_CRC_LEN;
3823 m->m_pkthdr.rcvif = ifp;
3824
3825 if (ifp->if_capenable & IFCAP_RXCSUM)
3826 bge_rxcsum(sc, cur_rx, m);
3827
3828 /*
3829 * If we received a packet with a vlan tag,
3830 * attach that information to the packet.
3831 */
3832 if (have_tag) {
3833 m->m_pkthdr.ether_vtag = vlan_tag;
3834 m->m_flags |= M_VLANTAG;
3835 }
3836
3837 if (holdlck != 0) {
3838 BGE_UNLOCK(sc);
3839 (*ifp->if_input)(ifp, m);
3840 BGE_LOCK(sc);
3841 } else
3842 (*ifp->if_input)(ifp, m);
3843 rx_npkts++;
3844
3845 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
3846 return (rx_npkts);
3847 }
3848
3849 bus_dmamap_sync(sc->bge_cdata.bge_rx_return_ring_tag,
3850 sc->bge_cdata.bge_rx_return_ring_map, BUS_DMASYNC_PREREAD);
3851 if (stdcnt > 0)
3852 bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag,
3853 sc->bge_cdata.bge_rx_std_ring_map, BUS_DMASYNC_PREWRITE);
3854
3855 if (jumbocnt > 0)
3856 bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag,
3857 sc->bge_cdata.bge_rx_jumbo_ring_map, BUS_DMASYNC_PREWRITE);
3858
3859 sc->bge_rx_saved_considx = rx_cons;
3860 bge_writembx(sc, BGE_MBX_RX_CONS0_LO, sc->bge_rx_saved_considx);
3861 if (stdcnt)
3862 bge_writembx(sc, BGE_MBX_RX_STD_PROD_LO, (sc->bge_std +
3863 BGE_STD_RX_RING_CNT - 1) % BGE_STD_RX_RING_CNT);
3864 if (jumbocnt)
3865 bge_writembx(sc, BGE_MBX_RX_JUMBO_PROD_LO, (sc->bge_jumbo +
3866 BGE_JUMBO_RX_RING_CNT - 1) % BGE_JUMBO_RX_RING_CNT);
3867#ifdef notyet
3868 /*
3869 * This register wraps very quickly under heavy packet drops.
3870 * If you need correct statistics, you can enable this check.
3871 */
3872 if (BGE_IS_5705_PLUS(sc))
3873 ifp->if_ierrors += CSR_READ_4(sc, BGE_RXLP_LOCSTAT_IFIN_DROPS);
3874#endif
3875 return (rx_npkts);
3876}
3877
3878static void
3879bge_rxcsum(struct bge_softc *sc, struct bge_rx_bd *cur_rx, struct mbuf *m)
3880{
3881
3882 if (BGE_IS_5717_PLUS(sc)) {
3883 if ((cur_rx->bge_flags & BGE_RXBDFLAG_IPV6) == 0) {
3884 if (cur_rx->bge_flags & BGE_RXBDFLAG_IP_CSUM) {
3885 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
3886 if ((cur_rx->bge_error_flag &
3887 BGE_RXERRFLAG_IP_CSUM_NOK) == 0)
3888 m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
3889 }
3890 if (cur_rx->bge_flags & BGE_RXBDFLAG_TCP_UDP_CSUM) {
3891 m->m_pkthdr.csum_data =
3892 cur_rx->bge_tcp_udp_csum;
3893 m->m_pkthdr.csum_flags |= CSUM_DATA_VALID |
3894 CSUM_PSEUDO_HDR;
3895 }
3896 }
3897 } else {
3898 if (cur_rx->bge_flags & BGE_RXBDFLAG_IP_CSUM) {
3899 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
3900 if ((cur_rx->bge_ip_csum ^ 0xFFFF) == 0)
3901 m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
3902 }
3903 if (cur_rx->bge_flags & BGE_RXBDFLAG_TCP_UDP_CSUM &&
3904 m->m_pkthdr.len >= ETHER_MIN_NOPAD) {
3905 m->m_pkthdr.csum_data =
3906 cur_rx->bge_tcp_udp_csum;
3907 m->m_pkthdr.csum_flags |= CSUM_DATA_VALID |
3908 CSUM_PSEUDO_HDR;
3909 }
3910 }
3911}
3912
3913static void
3914bge_txeof(struct bge_softc *sc, uint16_t tx_cons)
3915{
3916 struct bge_tx_bd *cur_tx;
3917 struct ifnet *ifp;
3918
3919 BGE_LOCK_ASSERT(sc);
3920
3921 /* Nothing to do. */
3922 if (sc->bge_tx_saved_considx == tx_cons)
3923 return;
3924
3925 ifp = sc->bge_ifp;
3926
3927 bus_dmamap_sync(sc->bge_cdata.bge_tx_ring_tag,
3928 sc->bge_cdata.bge_tx_ring_map, BUS_DMASYNC_POSTWRITE);
3929 /*
3930 * Go through our tx ring and free mbufs for those
3931 * frames that have been sent.
3932 */
3933 while (sc->bge_tx_saved_considx != tx_cons) {
3934 uint32_t idx;
3935
3936 idx = sc->bge_tx_saved_considx;
3937 cur_tx = &sc->bge_ldata.bge_tx_ring[idx];
3938 if (cur_tx->bge_flags & BGE_TXBDFLAG_END)
3939 ifp->if_opackets++;
3940 if (sc->bge_cdata.bge_tx_chain[idx] != NULL) {
3941 bus_dmamap_sync(sc->bge_cdata.bge_tx_mtag,
3942 sc->bge_cdata.bge_tx_dmamap[idx],
3943 BUS_DMASYNC_POSTWRITE);
3944 bus_dmamap_unload(sc->bge_cdata.bge_tx_mtag,
3945 sc->bge_cdata.bge_tx_dmamap[idx]);
3946 m_freem(sc->bge_cdata.bge_tx_chain[idx]);
3947 sc->bge_cdata.bge_tx_chain[idx] = NULL;
3948 }
3949 sc->bge_txcnt--;
3950 BGE_INC(sc->bge_tx_saved_considx, BGE_TX_RING_CNT);
3951 }
3952
3953 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
3954 if (sc->bge_txcnt == 0)
3955 sc->bge_timer = 0;
3956}
3957
3958#ifdef DEVICE_POLLING
3959static int
3960bge_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
3961{
3962 struct bge_softc *sc = ifp->if_softc;
3963 uint16_t rx_prod, tx_cons;
3964 uint32_t statusword;
3965 int rx_npkts = 0;
3966
3967 BGE_LOCK(sc);
3968 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
3969 BGE_UNLOCK(sc);
3970 return (rx_npkts);
3971 }
3972
3973 bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
3974 sc->bge_cdata.bge_status_map,
3975 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
3976 rx_prod = sc->bge_ldata.bge_status_block->bge_idx[0].bge_rx_prod_idx;
3977 tx_cons = sc->bge_ldata.bge_status_block->bge_idx[0].bge_tx_cons_idx;
3978
3979 statusword = sc->bge_ldata.bge_status_block->bge_status;
3980 sc->bge_ldata.bge_status_block->bge_status = 0;
3981
3982 bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
3983 sc->bge_cdata.bge_status_map,
3984 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3985
3986 /* Note link event. It will be processed by POLL_AND_CHECK_STATUS. */
3987 if (statusword & BGE_STATFLAG_LINKSTATE_CHANGED)
3988 sc->bge_link_evt++;
3989
3990 if (cmd == POLL_AND_CHECK_STATUS)
3991 if ((sc->bge_asicrev == BGE_ASICREV_BCM5700 &&
3992 sc->bge_chipid != BGE_CHIPID_BCM5700_B2) ||
3993 sc->bge_link_evt || (sc->bge_flags & BGE_FLAG_TBI))
3994 bge_link_upd(sc);
3995
3996 sc->rxcycles = count;
3997 rx_npkts = bge_rxeof(sc, rx_prod, 1);
3998 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
3999 BGE_UNLOCK(sc);
4000 return (rx_npkts);
4001 }
4002 bge_txeof(sc, tx_cons);
4003 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
4004 bge_start_locked(ifp);
4005
4006 BGE_UNLOCK(sc);
4007 return (rx_npkts);
4008}
4009#endif /* DEVICE_POLLING */
4010
4011static int
4012bge_msi_intr(void *arg)
4013{
4014 struct bge_softc *sc;
4015
4016 sc = (struct bge_softc *)arg;
4017 /*
4018 * This interrupt is not shared and controller already
4019 * disabled further interrupt.
4020 */
4021 taskqueue_enqueue(sc->bge_tq, &sc->bge_intr_task);
4022 return (FILTER_HANDLED);
4023}
4024
4025static void
4026bge_intr_task(void *arg, int pending)
4027{
4028 struct bge_softc *sc;
4029 struct ifnet *ifp;
4030 uint32_t status, status_tag;
4031 uint16_t rx_prod, tx_cons;
4032
4033 sc = (struct bge_softc *)arg;
4034 ifp = sc->bge_ifp;
4035
4036 BGE_LOCK(sc);
4037 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
4038 BGE_UNLOCK(sc);
4039 return;
4040 }
4041
4042 /* Get updated status block. */
4043 bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
4044 sc->bge_cdata.bge_status_map,
4045 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
4046
4047 /* Save producer/consumer indexess. */
4048 rx_prod = sc->bge_ldata.bge_status_block->bge_idx[0].bge_rx_prod_idx;
4049 tx_cons = sc->bge_ldata.bge_status_block->bge_idx[0].bge_tx_cons_idx;
4050 status = sc->bge_ldata.bge_status_block->bge_status;
4051 status_tag = sc->bge_ldata.bge_status_block->bge_status_tag << 24;
4052 sc->bge_ldata.bge_status_block->bge_status = 0;
4053 bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
4054 sc->bge_cdata.bge_status_map,
4055 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
4056 if ((sc->bge_flags & BGE_FLAG_TAGGED_STATUS) == 0)
4057 status_tag = 0;
4058
4059 if ((status & BGE_STATFLAG_LINKSTATE_CHANGED) != 0)
4060 bge_link_upd(sc);
4061
4062 /* Let controller work. */
4063 bge_writembx(sc, BGE_MBX_IRQ0_LO, status_tag);
4064
4065 if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
4066 sc->bge_rx_saved_considx != rx_prod) {
4067 /* Check RX return ring producer/consumer. */
4068 BGE_UNLOCK(sc);
4069 bge_rxeof(sc, rx_prod, 0);
4070 BGE_LOCK(sc);
4071 }
4072 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
4073 /* Check TX ring producer/consumer. */
4074 bge_txeof(sc, tx_cons);
4075 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
4076 bge_start_locked(ifp);
4077 }
4078 BGE_UNLOCK(sc);
4079}
4080
4081static void
4082bge_intr(void *xsc)
4083{
4084 struct bge_softc *sc;
4085 struct ifnet *ifp;
4086 uint32_t statusword;
4087 uint16_t rx_prod, tx_cons;
4088
4089 sc = xsc;
4090
4091 BGE_LOCK(sc);
4092
4093 ifp = sc->bge_ifp;
4094
4095#ifdef DEVICE_POLLING
4096 if (ifp->if_capenable & IFCAP_POLLING) {
4097 BGE_UNLOCK(sc);
4098 return;
4099 }
4100#endif
4101
4102 /*
4103 * Ack the interrupt by writing something to BGE_MBX_IRQ0_LO. Don't
4104 * disable interrupts by writing nonzero like we used to, since with
4105 * our current organization this just gives complications and
4106 * pessimizations for re-enabling interrupts. We used to have races
4107 * instead of the necessary complications. Disabling interrupts
4108 * would just reduce the chance of a status update while we are
4109 * running (by switching to the interrupt-mode coalescence
4110 * parameters), but this chance is already very low so it is more
4111 * efficient to get another interrupt than prevent it.
4112 *
4113 * We do the ack first to ensure another interrupt if there is a
4114 * status update after the ack. We don't check for the status
4115 * changing later because it is more efficient to get another
4116 * interrupt than prevent it, not quite as above (not checking is
4117 * a smaller optimization than not toggling the interrupt enable,
4118 * since checking doesn't involve PCI accesses and toggling require
4119 * the status check). So toggling would probably be a pessimization
4120 * even with MSI. It would only be needed for using a task queue.
4121 */
4122 bge_writembx(sc, BGE_MBX_IRQ0_LO, 0);
4123
4124 /*
4125 * Do the mandatory PCI flush as well as get the link status.
4126 */
4127 statusword = CSR_READ_4(sc, BGE_MAC_STS) & BGE_MACSTAT_LINK_CHANGED;
4128
4129 /* Make sure the descriptor ring indexes are coherent. */
4130 bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
4131 sc->bge_cdata.bge_status_map,
4132 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
4133 rx_prod = sc->bge_ldata.bge_status_block->bge_idx[0].bge_rx_prod_idx;
4134 tx_cons = sc->bge_ldata.bge_status_block->bge_idx[0].bge_tx_cons_idx;
4135 sc->bge_ldata.bge_status_block->bge_status = 0;
4136 bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
4137 sc->bge_cdata.bge_status_map,
4138 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
4139
4140 if ((sc->bge_asicrev == BGE_ASICREV_BCM5700 &&
4141 sc->bge_chipid != BGE_CHIPID_BCM5700_B2) ||
4142 statusword || sc->bge_link_evt)
4143 bge_link_upd(sc);
4144
4145 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
4146 /* Check RX return ring producer/consumer. */
4147 bge_rxeof(sc, rx_prod, 1);
4148 }
4149
4150 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
4151 /* Check TX ring producer/consumer. */
4152 bge_txeof(sc, tx_cons);
4153 }
4154
4155 if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
4156 !IFQ_DRV_IS_EMPTY(&ifp->if_snd))
4157 bge_start_locked(ifp);
4158
4159 BGE_UNLOCK(sc);
4160}
4161
4162static void
4163bge_asf_driver_up(struct bge_softc *sc)
4164{
4165 if (sc->bge_asf_mode & ASF_STACKUP) {
4166 /* Send ASF heartbeat aprox. every 2s */
4167 if (sc->bge_asf_count)
4168 sc->bge_asf_count --;
4169 else {
4170 sc->bge_asf_count = 2;
4171 bge_writemem_ind(sc, BGE_SRAM_FW_CMD_MB,
4172 BGE_FW_CMD_DRV_ALIVE);
4173 bge_writemem_ind(sc, BGE_SRAM_FW_CMD_LEN_MB, 4);
4174 bge_writemem_ind(sc, BGE_SRAM_FW_CMD_DATA_MB,
4175 BGE_FW_HB_TIMEOUT_SEC);
4176 CSR_WRITE_4(sc, BGE_RX_CPU_EVENT,
4177 CSR_READ_4(sc, BGE_RX_CPU_EVENT) |
4178 BGE_RX_CPU_DRV_EVENT);
4179 }
4180 }
4181}
4182
4183static void
4184bge_tick(void *xsc)
4185{
4186 struct bge_softc *sc = xsc;
4187 struct mii_data *mii = NULL;
4188
4189 BGE_LOCK_ASSERT(sc);
4190
4191 /* Synchronize with possible callout reset/stop. */
4192 if (callout_pending(&sc->bge_stat_ch) ||
4193 !callout_active(&sc->bge_stat_ch))
4194 return;
4195
4196 if (BGE_IS_5705_PLUS(sc))
4197 bge_stats_update_regs(sc);
4198 else
4199 bge_stats_update(sc);
4200
4201 if ((sc->bge_flags & BGE_FLAG_TBI) == 0) {
4202 mii = device_get_softc(sc->bge_miibus);
4203 /*
4204 * Do not touch PHY if we have link up. This could break
4205 * IPMI/ASF mode or produce extra input errors
4206 * (extra errors was reported for bcm5701 & bcm5704).
4207 */
4208 if (!sc->bge_link)
4209 mii_tick(mii);
4210 } else {
4211 /*
4212 * Since in TBI mode auto-polling can't be used we should poll
4213 * link status manually. Here we register pending link event
4214 * and trigger interrupt.
4215 */
4216#ifdef DEVICE_POLLING
4217 /* In polling mode we poll link state in bge_poll(). */
4218 if (!(sc->bge_ifp->if_capenable & IFCAP_POLLING))
4219#endif
4220 {
4221 sc->bge_link_evt++;
4222 if (sc->bge_asicrev == BGE_ASICREV_BCM5700 ||
4223 sc->bge_flags & BGE_FLAG_5788)
4224 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_SET);
4225 else
4226 BGE_SETBIT(sc, BGE_HCC_MODE, BGE_HCCMODE_COAL_NOW);
4227 }
4228 }
4229
4230 bge_asf_driver_up(sc);
4231 bge_watchdog(sc);
4232
4233 callout_reset(&sc->bge_stat_ch, hz, bge_tick, sc);
4234}
4235
4236static void
4237bge_stats_update_regs(struct bge_softc *sc)
4238{
4239 struct ifnet *ifp;
4240 struct bge_mac_stats *stats;
4241
4242 ifp = sc->bge_ifp;
4243 stats = &sc->bge_mac_stats;
4244
4245 stats->ifHCOutOctets +=
4246 CSR_READ_4(sc, BGE_TX_MAC_STATS_OCTETS);
4247 stats->etherStatsCollisions +=
4248 CSR_READ_4(sc, BGE_TX_MAC_STATS_COLLS);
4249 stats->outXonSent +=
4250 CSR_READ_4(sc, BGE_TX_MAC_STATS_XON_SENT);
4251 stats->outXoffSent +=
4252 CSR_READ_4(sc, BGE_TX_MAC_STATS_XOFF_SENT);
4253 stats->dot3StatsInternalMacTransmitErrors +=
4254 CSR_READ_4(sc, BGE_TX_MAC_STATS_ERRORS);
4255 stats->dot3StatsSingleCollisionFrames +=
4256 CSR_READ_4(sc, BGE_TX_MAC_STATS_SINGLE_COLL);
4257 stats->dot3StatsMultipleCollisionFrames +=
4258 CSR_READ_4(sc, BGE_TX_MAC_STATS_MULTI_COLL);
4259 stats->dot3StatsDeferredTransmissions +=
4260 CSR_READ_4(sc, BGE_TX_MAC_STATS_DEFERRED);
4261 stats->dot3StatsExcessiveCollisions +=
4262 CSR_READ_4(sc, BGE_TX_MAC_STATS_EXCESS_COLL);
4263 stats->dot3StatsLateCollisions +=
4264 CSR_READ_4(sc, BGE_TX_MAC_STATS_LATE_COLL);
4265 stats->ifHCOutUcastPkts +=
4266 CSR_READ_4(sc, BGE_TX_MAC_STATS_UCAST);
4267 stats->ifHCOutMulticastPkts +=
4268 CSR_READ_4(sc, BGE_TX_MAC_STATS_MCAST);
4269 stats->ifHCOutBroadcastPkts +=
4270 CSR_READ_4(sc, BGE_TX_MAC_STATS_BCAST);
4271
4272 stats->ifHCInOctets +=
4273 CSR_READ_4(sc, BGE_RX_MAC_STATS_OCTESTS);
4274 stats->etherStatsFragments +=
4275 CSR_READ_4(sc, BGE_RX_MAC_STATS_FRAGMENTS);
4276 stats->ifHCInUcastPkts +=
4277 CSR_READ_4(sc, BGE_RX_MAC_STATS_UCAST);
4278 stats->ifHCInMulticastPkts +=
4279 CSR_READ_4(sc, BGE_RX_MAC_STATS_MCAST);
4280 stats->ifHCInBroadcastPkts +=
4281 CSR_READ_4(sc, BGE_RX_MAC_STATS_BCAST);
4282 stats->dot3StatsFCSErrors +=
4283 CSR_READ_4(sc, BGE_RX_MAC_STATS_FCS_ERRORS);
4284 stats->dot3StatsAlignmentErrors +=
4285 CSR_READ_4(sc, BGE_RX_MAC_STATS_ALGIN_ERRORS);
4286 stats->xonPauseFramesReceived +=
4287 CSR_READ_4(sc, BGE_RX_MAC_STATS_XON_RCVD);
4288 stats->xoffPauseFramesReceived +=
4289 CSR_READ_4(sc, BGE_RX_MAC_STATS_XOFF_RCVD);
4290 stats->macControlFramesReceived +=
4291 CSR_READ_4(sc, BGE_RX_MAC_STATS_CTRL_RCVD);
4292 stats->xoffStateEntered +=
4293 CSR_READ_4(sc, BGE_RX_MAC_STATS_XOFF_ENTERED);
4294 stats->dot3StatsFramesTooLong +=
4295 CSR_READ_4(sc, BGE_RX_MAC_STATS_FRAME_TOO_LONG);
4296 stats->etherStatsJabbers +=
4297 CSR_READ_4(sc, BGE_RX_MAC_STATS_JABBERS);
4298 stats->etherStatsUndersizePkts +=
4299 CSR_READ_4(sc, BGE_RX_MAC_STATS_UNDERSIZE);
4300
4301 stats->FramesDroppedDueToFilters +=
4302 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_FILTDROP);
4303 stats->DmaWriteQueueFull +=
4304 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_DMA_WRQ_FULL);
4305 stats->DmaWriteHighPriQueueFull +=
4306 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_DMA_HPWRQ_FULL);
4307 stats->NoMoreRxBDs +=
4308 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_OUT_OF_BDS);
4309 /*
4310 * XXX
4311 * Unlike other controllers, BGE_RXLP_LOCSTAT_IFIN_DROPS
4312 * counter of BCM5717, BCM5718, BCM5719 A0 and BCM5720 A0
4313 * includes number of unwanted multicast frames. This comes
4314 * from silicon bug and known workaround to get rough(not
4315 * exact) counter is to enable interrupt on MBUF low water
4316 * attention. This can be accomplished by setting
4317 * BGE_HCCMODE_ATTN bit of BGE_HCC_MODE,
4318 * BGE_BMANMODE_LOMBUF_ATTN bit of BGE_BMAN_MODE and
4319 * BGE_MODECTL_FLOWCTL_ATTN_INTR bit of BGE_MODE_CTL.
4320 * However that change would generate more interrupts and
4321 * there are still possibilities of losing multiple frames
4322 * during BGE_MODECTL_FLOWCTL_ATTN_INTR interrupt handling.
4323 * Given that the workaround still would not get correct
4324 * counter I don't think it's worth to implement it. So
4325 * ignore reading the counter on controllers that have the
4326 * silicon bug.
4327 */
4328 if (sc->bge_asicrev != BGE_ASICREV_BCM5717 &&
4329 sc->bge_chipid != BGE_CHIPID_BCM5719_A0 &&
4330 sc->bge_chipid != BGE_CHIPID_BCM5720_A0)
4331 stats->InputDiscards +=
4332 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_IFIN_DROPS);
4333 stats->InputErrors +=
4334 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_IFIN_ERRORS);
4335 stats->RecvThresholdHit +=
4336 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_RXTHRESH_HIT);
4337
4338 ifp->if_collisions = (u_long)stats->etherStatsCollisions;
4339 ifp->if_ierrors = (u_long)(stats->NoMoreRxBDs + stats->InputDiscards +
4340 stats->InputErrors);
4341}
4342
4343static void
4344bge_stats_clear_regs(struct bge_softc *sc)
4345{
4346
4347 CSR_READ_4(sc, BGE_TX_MAC_STATS_OCTETS);
4348 CSR_READ_4(sc, BGE_TX_MAC_STATS_COLLS);
4349 CSR_READ_4(sc, BGE_TX_MAC_STATS_XON_SENT);
4350 CSR_READ_4(sc, BGE_TX_MAC_STATS_XOFF_SENT);
4351 CSR_READ_4(sc, BGE_TX_MAC_STATS_ERRORS);
4352 CSR_READ_4(sc, BGE_TX_MAC_STATS_SINGLE_COLL);
4353 CSR_READ_4(sc, BGE_TX_MAC_STATS_MULTI_COLL);
4354 CSR_READ_4(sc, BGE_TX_MAC_STATS_DEFERRED);
4355 CSR_READ_4(sc, BGE_TX_MAC_STATS_EXCESS_COLL);
4356 CSR_READ_4(sc, BGE_TX_MAC_STATS_LATE_COLL);
4357 CSR_READ_4(sc, BGE_TX_MAC_STATS_UCAST);
4358 CSR_READ_4(sc, BGE_TX_MAC_STATS_MCAST);
4359 CSR_READ_4(sc, BGE_TX_MAC_STATS_BCAST);
4360
4361 CSR_READ_4(sc, BGE_RX_MAC_STATS_OCTESTS);
4362 CSR_READ_4(sc, BGE_RX_MAC_STATS_FRAGMENTS);
4363 CSR_READ_4(sc, BGE_RX_MAC_STATS_UCAST);
4364 CSR_READ_4(sc, BGE_RX_MAC_STATS_MCAST);
4365 CSR_READ_4(sc, BGE_RX_MAC_STATS_BCAST);
4366 CSR_READ_4(sc, BGE_RX_MAC_STATS_FCS_ERRORS);
4367 CSR_READ_4(sc, BGE_RX_MAC_STATS_ALGIN_ERRORS);
4368 CSR_READ_4(sc, BGE_RX_MAC_STATS_XON_RCVD);
4369 CSR_READ_4(sc, BGE_RX_MAC_STATS_XOFF_RCVD);
4370 CSR_READ_4(sc, BGE_RX_MAC_STATS_CTRL_RCVD);
4371 CSR_READ_4(sc, BGE_RX_MAC_STATS_XOFF_ENTERED);
4372 CSR_READ_4(sc, BGE_RX_MAC_STATS_FRAME_TOO_LONG);
4373 CSR_READ_4(sc, BGE_RX_MAC_STATS_JABBERS);
4374 CSR_READ_4(sc, BGE_RX_MAC_STATS_UNDERSIZE);
4375
4376 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_FILTDROP);
4377 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_DMA_WRQ_FULL);
4378 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_DMA_HPWRQ_FULL);
4379 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_OUT_OF_BDS);
4380 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_IFIN_DROPS);
4381 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_IFIN_ERRORS);
4382 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_RXTHRESH_HIT);
4383}
4384
4385static void
4386bge_stats_update(struct bge_softc *sc)
4387{
4388 struct ifnet *ifp;
4389 bus_size_t stats;
4390 uint32_t cnt; /* current register value */
4391
4392 ifp = sc->bge_ifp;
4393
4394 stats = BGE_MEMWIN_START + BGE_STATS_BLOCK;
4395
4396#define READ_STAT(sc, stats, stat) \
4397 CSR_READ_4(sc, stats + offsetof(struct bge_stats, stat))
4398
4399 cnt = READ_STAT(sc, stats, txstats.etherStatsCollisions.bge_addr_lo);
4400 ifp->if_collisions += (uint32_t)(cnt - sc->bge_tx_collisions);
4401 sc->bge_tx_collisions = cnt;
4402
4403 cnt = READ_STAT(sc, stats, ifInDiscards.bge_addr_lo);
4404 ifp->if_ierrors += (uint32_t)(cnt - sc->bge_rx_discards);
4405 sc->bge_rx_discards = cnt;
4406
4407 cnt = READ_STAT(sc, stats, txstats.ifOutDiscards.bge_addr_lo);
4408 ifp->if_oerrors += (uint32_t)(cnt - sc->bge_tx_discards);
4409 sc->bge_tx_discards = cnt;
4410
4411#undef READ_STAT
4412}
4413
4414/*
4415 * Pad outbound frame to ETHER_MIN_NOPAD for an unusual reason.
4416 * The bge hardware will pad out Tx runts to ETHER_MIN_NOPAD,
4417 * but when such padded frames employ the bge IP/TCP checksum offload,
4418 * the hardware checksum assist gives incorrect results (possibly
4419 * from incorporating its own padding into the UDP/TCP checksum; who knows).
4420 * If we pad such runts with zeros, the onboard checksum comes out correct.
4421 */
4422static __inline int
4423bge_cksum_pad(struct mbuf *m)
4424{
4425 int padlen = ETHER_MIN_NOPAD - m->m_pkthdr.len;
4426 struct mbuf *last;
4427
4428 /* If there's only the packet-header and we can pad there, use it. */
4429 if (m->m_pkthdr.len == m->m_len && M_WRITABLE(m) &&
4430 M_TRAILINGSPACE(m) >= padlen) {
4431 last = m;
4432 } else {
4433 /*
4434 * Walk packet chain to find last mbuf. We will either
4435 * pad there, or append a new mbuf and pad it.
4436 */
4437 for (last = m; last->m_next != NULL; last = last->m_next);
4438 if (!(M_WRITABLE(last) && M_TRAILINGSPACE(last) >= padlen)) {
4439 /* Allocate new empty mbuf, pad it. Compact later. */
4440 struct mbuf *n;
4441
4442 MGET(n, M_DONTWAIT, MT_DATA);
4443 if (n == NULL)
4444 return (ENOBUFS);
4445 n->m_len = 0;
4446 last->m_next = n;
4447 last = n;
4448 }
4449 }
4450
4451 /* Now zero the pad area, to avoid the bge cksum-assist bug. */
4452 memset(mtod(last, caddr_t) + last->m_len, 0, padlen);
4453 last->m_len += padlen;
4454 m->m_pkthdr.len += padlen;
4455
4456 return (0);
4457}
4458
4459static struct mbuf *
4460bge_check_short_dma(struct mbuf *m)
4461{
4462 struct mbuf *n;
4463 int found;
4464
4465 /*
4466 * If device receive two back-to-back send BDs with less than
4467 * or equal to 8 total bytes then the device may hang. The two
4468 * back-to-back send BDs must in the same frame for this failure
4469 * to occur. Scan mbuf chains and see whether two back-to-back
4470 * send BDs are there. If this is the case, allocate new mbuf
4471 * and copy the frame to workaround the silicon bug.
4472 */
4473 for (n = m, found = 0; n != NULL; n = n->m_next) {
4474 if (n->m_len < 8) {
4475 found++;
4476 if (found > 1)
4477 break;
4478 continue;
4479 }
4480 found = 0;
4481 }
4482
4483 if (found > 1) {
4484 n = m_defrag(m, M_DONTWAIT);
4485 if (n == NULL)
4486 m_freem(m);
4487 } else
4488 n = m;
4489 return (n);
4490}
4491
4492static struct mbuf *
4493bge_setup_tso(struct bge_softc *sc, struct mbuf *m, uint16_t *mss,
4494 uint16_t *flags)
4495{
4496 struct ip *ip;
4497 struct tcphdr *tcp;
4498 struct mbuf *n;
4499 uint16_t hlen;
4500 uint32_t poff;
4501
4502 if (M_WRITABLE(m) == 0) {
4503 /* Get a writable copy. */
4504 n = m_dup(m, M_DONTWAIT);
4505 m_freem(m);
4506 if (n == NULL)
4507 return (NULL);
4508 m = n;
4509 }
4510 m = m_pullup(m, sizeof(struct ether_header) + sizeof(struct ip));
4511 if (m == NULL)
4512 return (NULL);
4513 ip = (struct ip *)(mtod(m, char *) + sizeof(struct ether_header));
4514 poff = sizeof(struct ether_header) + (ip->ip_hl << 2);
4515 m = m_pullup(m, poff + sizeof(struct tcphdr));
4516 if (m == NULL)
4517 return (NULL);
4518 tcp = (struct tcphdr *)(mtod(m, char *) + poff);
4519 m = m_pullup(m, poff + (tcp->th_off << 2));
4520 if (m == NULL)
4521 return (NULL);
4522 /*
4523 * It seems controller doesn't modify IP length and TCP pseudo
4524 * checksum. These checksum computed by upper stack should be 0.
4525 */
4526 *mss = m->m_pkthdr.tso_segsz;
4527 ip = (struct ip *)(mtod(m, char *) + sizeof(struct ether_header));
4528 ip->ip_sum = 0;
4529 ip->ip_len = htons(*mss + (ip->ip_hl << 2) + (tcp->th_off << 2));
4530 /* Clear pseudo checksum computed by TCP stack. */
4531 tcp = (struct tcphdr *)(mtod(m, char *) + poff);
4532 tcp->th_sum = 0;
4533 /*
4534 * Broadcom controllers uses different descriptor format for
4535 * TSO depending on ASIC revision. Due to TSO-capable firmware
4536 * license issue and lower performance of firmware based TSO
4537 * we only support hardware based TSO.
4538 */
4539 /* Calculate header length, incl. TCP/IP options, in 32 bit units. */
4540 hlen = ((ip->ip_hl << 2) + (tcp->th_off << 2)) >> 2;
4541 if (sc->bge_flags & BGE_FLAG_TSO3) {
4542 /*
4543 * For BCM5717 and newer controllers, hardware based TSO
4544 * uses the 14 lower bits of the bge_mss field to store the
4545 * MSS and the upper 2 bits to store the lowest 2 bits of
4546 * the IP/TCP header length. The upper 6 bits of the header
4547 * length are stored in the bge_flags[14:10,4] field. Jumbo
4548 * frames are supported.
4549 */
4550 *mss |= ((hlen & 0x3) << 14);
4551 *flags |= ((hlen & 0xF8) << 7) | ((hlen & 0x4) << 2);
4552 } else {
4553 /*
4554 * For BCM5755 and newer controllers, hardware based TSO uses
4555 * the lower 11 bits to store the MSS and the upper 5 bits to
4556 * store the IP/TCP header length. Jumbo frames are not
4557 * supported.
4558 */
4559 *mss |= (hlen << 11);
4560 }
4561 return (m);
4562}
4563
4564/*
4565 * Encapsulate an mbuf chain in the tx ring by coupling the mbuf data
4566 * pointers to descriptors.
4567 */
4568static int
4569bge_encap(struct bge_softc *sc, struct mbuf **m_head, uint32_t *txidx)
4570{
4571 bus_dma_segment_t segs[BGE_NSEG_NEW];
4572 bus_dmamap_t map;
4573 struct bge_tx_bd *d;
4574 struct mbuf *m = *m_head;
4575 uint32_t idx = *txidx;
4576 uint16_t csum_flags, mss, vlan_tag;
4577 int nsegs, i, error;
4578
4579 csum_flags = 0;
4580 mss = 0;
4581 vlan_tag = 0;
4582 if ((sc->bge_flags & BGE_FLAG_SHORT_DMA_BUG) != 0 &&
4583 m->m_next != NULL) {
4584 *m_head = bge_check_short_dma(m);
4585 if (*m_head == NULL)
4586 return (ENOBUFS);
4587 m = *m_head;
4588 }
4589 if ((m->m_pkthdr.csum_flags & CSUM_TSO) != 0) {
4590 *m_head = m = bge_setup_tso(sc, m, &mss, &csum_flags);
4591 if (*m_head == NULL)
4592 return (ENOBUFS);
4593 csum_flags |= BGE_TXBDFLAG_CPU_PRE_DMA |
4594 BGE_TXBDFLAG_CPU_POST_DMA;
4595 } else if ((m->m_pkthdr.csum_flags & sc->bge_csum_features) != 0) {
4596 if (m->m_pkthdr.csum_flags & CSUM_IP)
4597 csum_flags |= BGE_TXBDFLAG_IP_CSUM;
4598 if (m->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP)) {
4599 csum_flags |= BGE_TXBDFLAG_TCP_UDP_CSUM;
4600 if (m->m_pkthdr.len < ETHER_MIN_NOPAD &&
4601 (error = bge_cksum_pad(m)) != 0) {
4602 m_freem(m);
4603 *m_head = NULL;
4604 return (error);
4605 }
4606 }
4607 if (m->m_flags & M_LASTFRAG)
4608 csum_flags |= BGE_TXBDFLAG_IP_FRAG_END;
4609 else if (m->m_flags & M_FRAG)
4610 csum_flags |= BGE_TXBDFLAG_IP_FRAG;
4611 }
4612
4613 if ((m->m_pkthdr.csum_flags & CSUM_TSO) == 0) {
4614 if (sc->bge_flags & BGE_FLAG_JUMBO_FRAME &&
4615 m->m_pkthdr.len > ETHER_MAX_LEN)
4616 csum_flags |= BGE_TXBDFLAG_JUMBO_FRAME;
4617 if (sc->bge_forced_collapse > 0 &&
4618 (sc->bge_flags & BGE_FLAG_PCIE) != 0 && m->m_next != NULL) {
4619 /*
4620 * Forcedly collapse mbuf chains to overcome hardware
4621 * limitation which only support a single outstanding
4622 * DMA read operation.
4623 */
4624 if (sc->bge_forced_collapse == 1)
4625 m = m_defrag(m, M_DONTWAIT);
4626 else
4627 m = m_collapse(m, M_DONTWAIT,
4628 sc->bge_forced_collapse);
4629 if (m == NULL)
4630 m = *m_head;
4631 *m_head = m;
4632 }
4633 }
4634
4635 map = sc->bge_cdata.bge_tx_dmamap[idx];
4636 error = bus_dmamap_load_mbuf_sg(sc->bge_cdata.bge_tx_mtag, map, m, segs,
4637 &nsegs, BUS_DMA_NOWAIT);
4638 if (error == EFBIG) {
4639 m = m_collapse(m, M_DONTWAIT, BGE_NSEG_NEW);
4640 if (m == NULL) {
4641 m_freem(*m_head);
4642 *m_head = NULL;
4643 return (ENOBUFS);
4644 }
4645 *m_head = m;
4646 error = bus_dmamap_load_mbuf_sg(sc->bge_cdata.bge_tx_mtag, map,
4647 m, segs, &nsegs, BUS_DMA_NOWAIT);
4648 if (error) {
4649 m_freem(m);
4650 *m_head = NULL;
4651 return (error);
4652 }
4653 } else if (error != 0)
4654 return (error);
4655
4656 /* Check if we have enough free send BDs. */
4657 if (sc->bge_txcnt + nsegs >= BGE_TX_RING_CNT) {
4658 bus_dmamap_unload(sc->bge_cdata.bge_tx_mtag, map);
4659 return (ENOBUFS);
4660 }
4661
4662 bus_dmamap_sync(sc->bge_cdata.bge_tx_mtag, map, BUS_DMASYNC_PREWRITE);
4663
4664 if (m->m_flags & M_VLANTAG) {
4665 csum_flags |= BGE_TXBDFLAG_VLAN_TAG;
4666 vlan_tag = m->m_pkthdr.ether_vtag;
4667 }
4668 for (i = 0; ; i++) {
4669 d = &sc->bge_ldata.bge_tx_ring[idx];
4670 d->bge_addr.bge_addr_lo = BGE_ADDR_LO(segs[i].ds_addr);
4671 d->bge_addr.bge_addr_hi = BGE_ADDR_HI(segs[i].ds_addr);
4672 d->bge_len = segs[i].ds_len;
4673 d->bge_flags = csum_flags;
4674 d->bge_vlan_tag = vlan_tag;
4675 d->bge_mss = mss;
4676 if (i == nsegs - 1)
4677 break;
4678 BGE_INC(idx, BGE_TX_RING_CNT);
4679 }
4680
4681 /* Mark the last segment as end of packet... */
4682 d->bge_flags |= BGE_TXBDFLAG_END;
4683
4684 /*
4685 * Insure that the map for this transmission
4686 * is placed at the array index of the last descriptor
4687 * in this chain.
4688 */
4689 sc->bge_cdata.bge_tx_dmamap[*txidx] = sc->bge_cdata.bge_tx_dmamap[idx];
4690 sc->bge_cdata.bge_tx_dmamap[idx] = map;
4691 sc->bge_cdata.bge_tx_chain[idx] = m;
4692 sc->bge_txcnt += nsegs;
4693
4694 BGE_INC(idx, BGE_TX_RING_CNT);
4695 *txidx = idx;
4696
4697 return (0);
4698}
4699
4700/*
4701 * Main transmit routine. To avoid having to do mbuf copies, we put pointers
4702 * to the mbuf data regions directly in the transmit descriptors.
4703 */
4704static void
4705bge_start_locked(struct ifnet *ifp)
4706{
4707 struct bge_softc *sc;
4708 struct mbuf *m_head;
4709 uint32_t prodidx;
4710 int count;
4711
4712 sc = ifp->if_softc;
4713 BGE_LOCK_ASSERT(sc);
4714
4715 if (!sc->bge_link ||
4716 (ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
4717 IFF_DRV_RUNNING)
4718 return;
4719
4720 prodidx = sc->bge_tx_prodidx;
4721
4722 for (count = 0; !IFQ_DRV_IS_EMPTY(&ifp->if_snd);) {
4723 if (sc->bge_txcnt > BGE_TX_RING_CNT - 16) {
4724 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
4725 break;
4726 }
4727 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
4728 if (m_head == NULL)
4729 break;
4730
4731 /*
4732 * XXX
4733 * The code inside the if() block is never reached since we
4734 * must mark CSUM_IP_FRAGS in our if_hwassist to start getting
4735 * requests to checksum TCP/UDP in a fragmented packet.
4736 *
4737 * XXX
4738 * safety overkill. If this is a fragmented packet chain
4739 * with delayed TCP/UDP checksums, then only encapsulate
4740 * it if we have enough descriptors to handle the entire
4741 * chain at once.
4742 * (paranoia -- may not actually be needed)
4743 */
4744 if (m_head->m_flags & M_FIRSTFRAG &&
4745 m_head->m_pkthdr.csum_flags & (CSUM_DELAY_DATA)) {
4746 if ((BGE_TX_RING_CNT - sc->bge_txcnt) <
4747 m_head->m_pkthdr.csum_data + 16) {
4748 IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
4749 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
4750 break;
4751 }
4752 }
4753
4754 /*
4755 * Pack the data into the transmit ring. If we
4756 * don't have room, set the OACTIVE flag and wait
4757 * for the NIC to drain the ring.
4758 */
4759 if (bge_encap(sc, &m_head, &prodidx)) {
4760 if (m_head == NULL)
4761 break;
4762 IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
4763 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
4764 break;
4765 }
4766 ++count;
4767
4768 /*
4769 * If there's a BPF listener, bounce a copy of this frame
4770 * to him.
4771 */
4772#ifdef ETHER_BPF_MTAP
4773 ETHER_BPF_MTAP(ifp, m_head);
4774#else
4775 BPF_MTAP(ifp, m_head);
4776#endif
4777 }
4778
4779 if (count > 0) {
4780 bus_dmamap_sync(sc->bge_cdata.bge_tx_ring_tag,
4781 sc->bge_cdata.bge_tx_ring_map, BUS_DMASYNC_PREWRITE);
4782 /* Transmit. */
4783 bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx);
4784 /* 5700 b2 errata */
4785 if (sc->bge_chiprev == BGE_CHIPREV_5700_BX)
4786 bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx);
4787
4788 sc->bge_tx_prodidx = prodidx;
4789
4790 /*
4791 * Set a timeout in case the chip goes out to lunch.
4792 */
4793 sc->bge_timer = 5;
4794 }
4795}
4796
4797/*
4798 * Main transmit routine. To avoid having to do mbuf copies, we put pointers
4799 * to the mbuf data regions directly in the transmit descriptors.
4800 */
4801static void
4802bge_start(struct ifnet *ifp)
4803{
4804 struct bge_softc *sc;
4805
4806 sc = ifp->if_softc;
4807 BGE_LOCK(sc);
4808 bge_start_locked(ifp);
4809 BGE_UNLOCK(sc);
4810}
4811
4812static void
4813bge_init_locked(struct bge_softc *sc)
4814{
4815 struct ifnet *ifp;
4816 uint16_t *m;
4817 uint32_t mode;
4818
4819 BGE_LOCK_ASSERT(sc);
4820
4821 ifp = sc->bge_ifp;
4822
4823 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
4824 return;
4825
4826 /* Cancel pending I/O and flush buffers. */
4827 bge_stop(sc);
4828
4829 bge_stop_fw(sc);
4830 bge_sig_pre_reset(sc, BGE_RESET_START);
4831 bge_reset(sc);
4832 bge_sig_legacy(sc, BGE_RESET_START);
4833 bge_sig_post_reset(sc, BGE_RESET_START);
4834
4835 bge_chipinit(sc);
4836
4837 /*
4838 * Init the various state machines, ring
4839 * control blocks and firmware.
4840 */
4841 if (bge_blockinit(sc)) {
4842 device_printf(sc->bge_dev, "initialization failure\n");
4843 return;
4844 }
4845
4846 ifp = sc->bge_ifp;
4847
4848 /* Specify MTU. */
4849 CSR_WRITE_4(sc, BGE_RX_MTU, ifp->if_mtu +
4850 ETHER_HDR_LEN + ETHER_CRC_LEN +
4851 (ifp->if_capenable & IFCAP_VLAN_MTU ? ETHER_VLAN_ENCAP_LEN : 0));
4852
4853 /* Load our MAC address. */
4854 m = (uint16_t *)IF_LLADDR(sc->bge_ifp);
4855 CSR_WRITE_4(sc, BGE_MAC_ADDR1_LO, htons(m[0]));
4856 CSR_WRITE_4(sc, BGE_MAC_ADDR1_HI, (htons(m[1]) << 16) | htons(m[2]));
4857
4858 /* Program promiscuous mode. */
4859 bge_setpromisc(sc);
4860
4861 /* Program multicast filter. */
4862 bge_setmulti(sc);
4863
4864 /* Program VLAN tag stripping. */
4865 bge_setvlan(sc);
4866
4867 /* Override UDP checksum offloading. */
4868 if (sc->bge_forced_udpcsum == 0)
4869 sc->bge_csum_features &= ~CSUM_UDP;
4870 else
4871 sc->bge_csum_features |= CSUM_UDP;
4872 if (ifp->if_capabilities & IFCAP_TXCSUM &&
4873 ifp->if_capenable & IFCAP_TXCSUM) {
4874 ifp->if_hwassist &= ~(BGE_CSUM_FEATURES | CSUM_UDP);
4875 ifp->if_hwassist |= sc->bge_csum_features;
4876 }
4877
4878 /* Init RX ring. */
4879 if (bge_init_rx_ring_std(sc) != 0) {
4880 device_printf(sc->bge_dev, "no memory for std Rx buffers.\n");
4881 bge_stop(sc);
4882 return;
4883 }
4884
4885 /*
4886 * Workaround for a bug in 5705 ASIC rev A0. Poll the NIC's
4887 * memory to insure that the chip has in fact read the first
4888 * entry of the ring.
4889 */
4890 if (sc->bge_chipid == BGE_CHIPID_BCM5705_A0) {
4891 uint32_t v, i;
4892 for (i = 0; i < 10; i++) {
4893 DELAY(20);
4894 v = bge_readmem_ind(sc, BGE_STD_RX_RINGS + 8);
4895 if (v == (MCLBYTES - ETHER_ALIGN))
4896 break;
4897 }
4898 if (i == 10)
4899 device_printf (sc->bge_dev,
4900 "5705 A0 chip failed to load RX ring\n");
4901 }
4902
4903 /* Init jumbo RX ring. */
4904 if (BGE_IS_JUMBO_CAPABLE(sc) &&
4905 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN + ETHER_VLAN_ENCAP_LEN >
4906 (MCLBYTES - ETHER_ALIGN)) {
4907 if (bge_init_rx_ring_jumbo(sc) != 0) {
4908 device_printf(sc->bge_dev,
4909 "no memory for jumbo Rx buffers.\n");
4910 bge_stop(sc);
4911 return;
4912 }
4913 }
4914
4915 /* Init our RX return ring index. */
4916 sc->bge_rx_saved_considx = 0;
4917
4918 /* Init our RX/TX stat counters. */
4919 sc->bge_rx_discards = sc->bge_tx_discards = sc->bge_tx_collisions = 0;
4920
4921 /* Init TX ring. */
4922 bge_init_tx_ring(sc);
4923
4924 /* Enable TX MAC state machine lockup fix. */
4925 mode = CSR_READ_4(sc, BGE_TX_MODE);
4926 if (BGE_IS_5755_PLUS(sc) || sc->bge_asicrev == BGE_ASICREV_BCM5906)
4927 mode |= BGE_TXMODE_MBUF_LOCKUP_FIX;
4928 if (sc->bge_asicrev == BGE_ASICREV_BCM5720) {
4929 mode &= ~(BGE_TXMODE_JMB_FRM_LEN | BGE_TXMODE_CNT_DN_MODE);
4930 mode |= CSR_READ_4(sc, BGE_TX_MODE) &
4931 (BGE_TXMODE_JMB_FRM_LEN | BGE_TXMODE_CNT_DN_MODE);
4932 }
4933 /* Turn on transmitter. */
4934 CSR_WRITE_4(sc, BGE_TX_MODE, mode | BGE_TXMODE_ENABLE);
4935
4936 /* Turn on receiver. */
4937 BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE);
4938
4939 /*
4940 * Set the number of good frames to receive after RX MBUF
4941 * Low Watermark has been reached. After the RX MAC receives
4942 * this number of frames, it will drop subsequent incoming
4943 * frames until the MBUF High Watermark is reached.
4944 */
4945 if (sc->bge_asicrev == BGE_ASICREV_BCM57765)
4946 CSR_WRITE_4(sc, BGE_MAX_RX_FRAME_LOWAT, 1);
4947 else
4948 CSR_WRITE_4(sc, BGE_MAX_RX_FRAME_LOWAT, 2);
4949
4950 /* Clear MAC statistics. */
4951 if (BGE_IS_5705_PLUS(sc))
4952 bge_stats_clear_regs(sc);
4953
4954 /* Tell firmware we're alive. */
4955 BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
4956
4957#ifdef DEVICE_POLLING
4958 /* Disable interrupts if we are polling. */
4959 if (ifp->if_capenable & IFCAP_POLLING) {
4960 BGE_SETBIT(sc, BGE_PCI_MISC_CTL,
4961 BGE_PCIMISCCTL_MASK_PCI_INTR);
4962 bge_writembx(sc, BGE_MBX_IRQ0_LO, 1);
4963 } else
4964#endif
4965
4966 /* Enable host interrupts. */
4967 {
4968 BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_CLEAR_INTA);
4969 BGE_CLRBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR);
4970 bge_writembx(sc, BGE_MBX_IRQ0_LO, 0);
4971 }
4972
4973 bge_ifmedia_upd_locked(ifp);
4974
4975 ifp->if_drv_flags |= IFF_DRV_RUNNING;
4976 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
4977
4978 callout_reset(&sc->bge_stat_ch, hz, bge_tick, sc);
4979}
4980
4981static void
4982bge_init(void *xsc)
4983{
4984 struct bge_softc *sc = xsc;
4985
4986 BGE_LOCK(sc);
4987 bge_init_locked(sc);
4988 BGE_UNLOCK(sc);
4989}
4990
4991/*
4992 * Set media options.
4993 */
4994static int
4995bge_ifmedia_upd(struct ifnet *ifp)
4996{
4997 struct bge_softc *sc = ifp->if_softc;
4998 int res;
4999
5000 BGE_LOCK(sc);
5001 res = bge_ifmedia_upd_locked(ifp);
5002 BGE_UNLOCK(sc);
5003
5004 return (res);
5005}
5006
5007static int
5008bge_ifmedia_upd_locked(struct ifnet *ifp)
5009{
5010 struct bge_softc *sc = ifp->if_softc;
5011 struct mii_data *mii;
5012 struct mii_softc *miisc;
5013 struct ifmedia *ifm;
5014
5015 BGE_LOCK_ASSERT(sc);
5016
5017 ifm = &sc->bge_ifmedia;
5018
5019 /* If this is a 1000baseX NIC, enable the TBI port. */
5020 if (sc->bge_flags & BGE_FLAG_TBI) {
5021 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
5022 return (EINVAL);
5023 switch(IFM_SUBTYPE(ifm->ifm_media)) {
5024 case IFM_AUTO:
5025 /*
5026 * The BCM5704 ASIC appears to have a special
5027 * mechanism for programming the autoneg
5028 * advertisement registers in TBI mode.
5029 */
5030 if (sc->bge_asicrev == BGE_ASICREV_BCM5704) {
5031 uint32_t sgdig;
5032 sgdig = CSR_READ_4(sc, BGE_SGDIG_STS);
5033 if (sgdig & BGE_SGDIGSTS_DONE) {
5034 CSR_WRITE_4(sc, BGE_TX_TBI_AUTONEG, 0);
5035 sgdig = CSR_READ_4(sc, BGE_SGDIG_CFG);
5036 sgdig |= BGE_SGDIGCFG_AUTO |
5037 BGE_SGDIGCFG_PAUSE_CAP |
5038 BGE_SGDIGCFG_ASYM_PAUSE;
5039 CSR_WRITE_4(sc, BGE_SGDIG_CFG,
5040 sgdig | BGE_SGDIGCFG_SEND);
5041 DELAY(5);
5042 CSR_WRITE_4(sc, BGE_SGDIG_CFG, sgdig);
5043 }
5044 }
5045 break;
5046 case IFM_1000_SX:
5047 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) {
5048 BGE_CLRBIT(sc, BGE_MAC_MODE,
5049 BGE_MACMODE_HALF_DUPLEX);
5050 } else {
5051 BGE_SETBIT(sc, BGE_MAC_MODE,
5052 BGE_MACMODE_HALF_DUPLEX);
5053 }
5054 break;
5055 default:
5056 return (EINVAL);
5057 }
5058 return (0);
5059 }
5060
5061 sc->bge_link_evt++;
5062 mii = device_get_softc(sc->bge_miibus);
5063 LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
5064 PHY_RESET(miisc);
5065 mii_mediachg(mii);
5066
5067 /*
5068 * Force an interrupt so that we will call bge_link_upd
5069 * if needed and clear any pending link state attention.
5070 * Without this we are not getting any further interrupts
5071 * for link state changes and thus will not UP the link and
5072 * not be able to send in bge_start_locked. The only
5073 * way to get things working was to receive a packet and
5074 * get an RX intr.
5075 * bge_tick should help for fiber cards and we might not
5076 * need to do this here if BGE_FLAG_TBI is set but as
5077 * we poll for fiber anyway it should not harm.
5078 */
5079 if (sc->bge_asicrev == BGE_ASICREV_BCM5700 ||
5080 sc->bge_flags & BGE_FLAG_5788)
5081 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_SET);
5082 else
5083 BGE_SETBIT(sc, BGE_HCC_MODE, BGE_HCCMODE_COAL_NOW);
5084
5085 return (0);
5086}
5087
5088/*
5089 * Report current media status.
5090 */
5091static void
5092bge_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
5093{
5094 struct bge_softc *sc = ifp->if_softc;
5095 struct mii_data *mii;
5096
5097 BGE_LOCK(sc);
5098
5099 if (sc->bge_flags & BGE_FLAG_TBI) {
5100 ifmr->ifm_status = IFM_AVALID;
5101 ifmr->ifm_active = IFM_ETHER;
5102 if (CSR_READ_4(sc, BGE_MAC_STS) &
5103 BGE_MACSTAT_TBI_PCS_SYNCHED)
5104 ifmr->ifm_status |= IFM_ACTIVE;
5105 else {
5106 ifmr->ifm_active |= IFM_NONE;
5107 BGE_UNLOCK(sc);
5108 return;
5109 }
5110 ifmr->ifm_active |= IFM_1000_SX;
5111 if (CSR_READ_4(sc, BGE_MAC_MODE) & BGE_MACMODE_HALF_DUPLEX)
5112 ifmr->ifm_active |= IFM_HDX;
5113 else
5114 ifmr->ifm_active |= IFM_FDX;
5115 BGE_UNLOCK(sc);
5116 return;
5117 }
5118
5119 mii = device_get_softc(sc->bge_miibus);
5120 mii_pollstat(mii);
5121 ifmr->ifm_active = mii->mii_media_active;
5122 ifmr->ifm_status = mii->mii_media_status;
5123
5124 BGE_UNLOCK(sc);
5125}
5126
5127static int
5128bge_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
5129{
5130 struct bge_softc *sc = ifp->if_softc;
5131 struct ifreq *ifr = (struct ifreq *) data;
5132 struct mii_data *mii;
5133 int flags, mask, error = 0;
5134
5135 switch (command) {
5136 case SIOCSIFMTU:
5137 if (BGE_IS_JUMBO_CAPABLE(sc) ||
5138 (sc->bge_flags & BGE_FLAG_JUMBO_STD)) {
5139 if (ifr->ifr_mtu < ETHERMIN ||
5140 ifr->ifr_mtu > BGE_JUMBO_MTU) {
5141 error = EINVAL;
5142 break;
5143 }
5144 } else if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > ETHERMTU) {
5145 error = EINVAL;
5146 break;
5147 }
5148 BGE_LOCK(sc);
5149 if (ifp->if_mtu != ifr->ifr_mtu) {
5150 ifp->if_mtu = ifr->ifr_mtu;
5151 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
5152 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
5153 bge_init_locked(sc);
5154 }
5155 }
5156 BGE_UNLOCK(sc);
5157 break;
5158 case SIOCSIFFLAGS:
5159 BGE_LOCK(sc);
5160 if (ifp->if_flags & IFF_UP) {
5161 /*
5162 * If only the state of the PROMISC flag changed,
5163 * then just use the 'set promisc mode' command
5164 * instead of reinitializing the entire NIC. Doing
5165 * a full re-init means reloading the firmware and
5166 * waiting for it to start up, which may take a
5167 * second or two. Similarly for ALLMULTI.
5168 */
5169 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
5170 flags = ifp->if_flags ^ sc->bge_if_flags;
5171 if (flags & IFF_PROMISC)
5172 bge_setpromisc(sc);
5173 if (flags & IFF_ALLMULTI)
5174 bge_setmulti(sc);
5175 } else
5176 bge_init_locked(sc);
5177 } else {
5178 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
5179 bge_stop(sc);
5180 }
5181 }
5182 sc->bge_if_flags = ifp->if_flags;
5183 BGE_UNLOCK(sc);
5184 error = 0;
5185 break;
5186 case SIOCADDMULTI:
5187 case SIOCDELMULTI:
5188 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
5189 BGE_LOCK(sc);
5190 bge_setmulti(sc);
5191 BGE_UNLOCK(sc);
5192 error = 0;
5193 }
5194 break;
5195 case SIOCSIFMEDIA:
5196 case SIOCGIFMEDIA:
5197 if (sc->bge_flags & BGE_FLAG_TBI) {
5198 error = ifmedia_ioctl(ifp, ifr,
5199 &sc->bge_ifmedia, command);
5200 } else {
5201 mii = device_get_softc(sc->bge_miibus);
5202 error = ifmedia_ioctl(ifp, ifr,
5203 &mii->mii_media, command);
5204 }
5205 break;
5206 case SIOCSIFCAP:
5207 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
5208#ifdef DEVICE_POLLING
5209 if (mask & IFCAP_POLLING) {
5210 if (ifr->ifr_reqcap & IFCAP_POLLING) {
5211 error = ether_poll_register(bge_poll, ifp);
5212 if (error)
5213 return (error);
5214 BGE_LOCK(sc);
5215 BGE_SETBIT(sc, BGE_PCI_MISC_CTL,
5216 BGE_PCIMISCCTL_MASK_PCI_INTR);
5217 bge_writembx(sc, BGE_MBX_IRQ0_LO, 1);
5218 ifp->if_capenable |= IFCAP_POLLING;
5219 BGE_UNLOCK(sc);
5220 } else {
5221 error = ether_poll_deregister(ifp);
5222 /* Enable interrupt even in error case */
5223 BGE_LOCK(sc);
5224 BGE_CLRBIT(sc, BGE_PCI_MISC_CTL,
5225 BGE_PCIMISCCTL_MASK_PCI_INTR);
5226 bge_writembx(sc, BGE_MBX_IRQ0_LO, 0);
5227 ifp->if_capenable &= ~IFCAP_POLLING;
5228 BGE_UNLOCK(sc);
5229 }
5230 }
5231#endif
5232 if ((mask & IFCAP_TXCSUM) != 0 &&
5233 (ifp->if_capabilities & IFCAP_TXCSUM) != 0) {
5234 ifp->if_capenable ^= IFCAP_TXCSUM;
5235 if ((ifp->if_capenable & IFCAP_TXCSUM) != 0)
5236 ifp->if_hwassist |= sc->bge_csum_features;
5237 else
5238 ifp->if_hwassist &= ~sc->bge_csum_features;
5239 }
5240
5241 if ((mask & IFCAP_RXCSUM) != 0 &&
5242 (ifp->if_capabilities & IFCAP_RXCSUM) != 0)
5243 ifp->if_capenable ^= IFCAP_RXCSUM;
5244
5245 if ((mask & IFCAP_TSO4) != 0 &&
5246 (ifp->if_capabilities & IFCAP_TSO4) != 0) {
5247 ifp->if_capenable ^= IFCAP_TSO4;
5248 if ((ifp->if_capenable & IFCAP_TSO4) != 0)
5249 ifp->if_hwassist |= CSUM_TSO;
5250 else
5251 ifp->if_hwassist &= ~CSUM_TSO;
5252 }
5253
5254 if (mask & IFCAP_VLAN_MTU) {
5255 ifp->if_capenable ^= IFCAP_VLAN_MTU;
5256 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
5257 bge_init(sc);
5258 }
5259
5260 if ((mask & IFCAP_VLAN_HWTSO) != 0 &&
5261 (ifp->if_capabilities & IFCAP_VLAN_HWTSO) != 0)
5262 ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
5263 if ((mask & IFCAP_VLAN_HWTAGGING) != 0 &&
5264 (ifp->if_capabilities & IFCAP_VLAN_HWTAGGING) != 0) {
5265 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
5266 if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) == 0)
5267 ifp->if_capenable &= ~IFCAP_VLAN_HWTSO;
5268 BGE_LOCK(sc);
5269 bge_setvlan(sc);
5270 BGE_UNLOCK(sc);
5271 }
5272#ifdef VLAN_CAPABILITIES
5273 VLAN_CAPABILITIES(ifp);
5274#endif
5275 break;
5276 default:
5277 error = ether_ioctl(ifp, command, data);
5278 break;
5279 }
5280
5281 return (error);
5282}
5283
5284static void
5285bge_watchdog(struct bge_softc *sc)
5286{
5287 struct ifnet *ifp;
5288
5289 BGE_LOCK_ASSERT(sc);
5290
5291 if (sc->bge_timer == 0 || --sc->bge_timer)
5292 return;
5293
5294 ifp = sc->bge_ifp;
5295
5296 if_printf(ifp, "watchdog timeout -- resetting\n");
5297
5298 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
5299 bge_init_locked(sc);
5300
5301 ifp->if_oerrors++;
5302}
5303
5304static void
5305bge_stop_block(struct bge_softc *sc, bus_size_t reg, uint32_t bit)
5306{
5307 int i;
5308
5309 BGE_CLRBIT(sc, reg, bit);
5310
5311 for (i = 0; i < BGE_TIMEOUT; i++) {
5312 if ((CSR_READ_4(sc, reg) & bit) == 0)
5313 return;
5314 DELAY(100);
5315 }
5316}
5317
5318/*
5319 * Stop the adapter and free any mbufs allocated to the
5320 * RX and TX lists.
5321 */
5322static void
5323bge_stop(struct bge_softc *sc)
5324{
5325 struct ifnet *ifp;
5326
5327 BGE_LOCK_ASSERT(sc);
5328
5329 ifp = sc->bge_ifp;
5330
5331 callout_stop(&sc->bge_stat_ch);
5332
5333 /* Disable host interrupts. */
5334 BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR);
5335 bge_writembx(sc, BGE_MBX_IRQ0_LO, 1);
5336
5337 /*
5338 * Tell firmware we're shutting down.
5339 */
5340 bge_stop_fw(sc);
5341 bge_sig_pre_reset(sc, BGE_RESET_STOP);
5342
5343 /*
5344 * Disable all of the receiver blocks.
5345 */
5346 bge_stop_block(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE);
5347 bge_stop_block(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE);
5348 bge_stop_block(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE);
5349 if (BGE_IS_5700_FAMILY(sc))
5350 bge_stop_block(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE);
5351 bge_stop_block(sc, BGE_RDBDI_MODE, BGE_RBDIMODE_ENABLE);
5352 bge_stop_block(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE);
5353 bge_stop_block(sc, BGE_RBDC_MODE, BGE_RBDCMODE_ENABLE);
5354
5355 /*
5356 * Disable all of the transmit blocks.
5357 */
5358 bge_stop_block(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE);
5359 bge_stop_block(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE);
5360 bge_stop_block(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE);
5361 bge_stop_block(sc, BGE_RDMA_MODE, BGE_RDMAMODE_ENABLE);
5362 bge_stop_block(sc, BGE_SDC_MODE, BGE_SDCMODE_ENABLE);
5363 if (BGE_IS_5700_FAMILY(sc))
5364 bge_stop_block(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE);
5365 bge_stop_block(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE);
5366
5367 /*
5368 * Shut down all of the memory managers and related
5369 * state machines.
5370 */
5371 bge_stop_block(sc, BGE_HCC_MODE, BGE_HCCMODE_ENABLE);
5372 bge_stop_block(sc, BGE_WDMA_MODE, BGE_WDMAMODE_ENABLE);
5373 if (BGE_IS_5700_FAMILY(sc))
5374 bge_stop_block(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE);
5375
5376 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF);
5377 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0);
5378 if (!(BGE_IS_5705_PLUS(sc))) {
5379 BGE_CLRBIT(sc, BGE_BMAN_MODE, BGE_BMANMODE_ENABLE);
5380 BGE_CLRBIT(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE);
5381 }
5382 /* Update MAC statistics. */
5383 if (BGE_IS_5705_PLUS(sc))
5384 bge_stats_update_regs(sc);
5385
5386 bge_reset(sc);
5387 bge_sig_legacy(sc, BGE_RESET_STOP);
5388 bge_sig_post_reset(sc, BGE_RESET_STOP);
5389
5390 /*
5391 * Keep the ASF firmware running if up.
5392 */
5393 if (sc->bge_asf_mode & ASF_STACKUP)
5394 BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
5395 else
5396 BGE_CLRBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
5397
5398 /* Free the RX lists. */
5399 bge_free_rx_ring_std(sc);
5400
5401 /* Free jumbo RX list. */
5402 if (BGE_IS_JUMBO_CAPABLE(sc))
5403 bge_free_rx_ring_jumbo(sc);
5404
5405 /* Free TX buffers. */
5406 bge_free_tx_ring(sc);
5407
5408 sc->bge_tx_saved_considx = BGE_TXCONS_UNSET;
5409
5410 /* Clear MAC's link state (PHY may still have link UP). */
5411 if (bootverbose && sc->bge_link)
5412 if_printf(sc->bge_ifp, "link DOWN\n");
5413 sc->bge_link = 0;
5414
5415 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
5416}
5417
5418/*
5419 * Stop all chip I/O so that the kernel's probe routines don't
5420 * get confused by errant DMAs when rebooting.
5421 */
5422static int
5423bge_shutdown(device_t dev)
5424{
5425 struct bge_softc *sc;
5426
5427 sc = device_get_softc(dev);
5428 BGE_LOCK(sc);
5429 bge_stop(sc);
5430 bge_reset(sc);
5431 BGE_UNLOCK(sc);
5432
5433 return (0);
5434}
5435
5436static int
5437bge_suspend(device_t dev)
5438{
5439 struct bge_softc *sc;
5440
5441 sc = device_get_softc(dev);
5442 BGE_LOCK(sc);
5443 bge_stop(sc);
5444 BGE_UNLOCK(sc);
5445
5446 return (0);
5447}
5448
5449static int
5450bge_resume(device_t dev)
5451{
5452 struct bge_softc *sc;
5453 struct ifnet *ifp;
5454
5455 sc = device_get_softc(dev);
5456 BGE_LOCK(sc);
5457 ifp = sc->bge_ifp;
5458 if (ifp->if_flags & IFF_UP) {
5459 bge_init_locked(sc);
5460 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
5461 bge_start_locked(ifp);
5462 }
5463 BGE_UNLOCK(sc);
5464
5465 return (0);
5466}
5467
5468static void
5469bge_link_upd(struct bge_softc *sc)
5470{
5471 struct mii_data *mii;
5472 uint32_t link, status;
5473
5474 BGE_LOCK_ASSERT(sc);
5475
5476 /* Clear 'pending link event' flag. */
5477 sc->bge_link_evt = 0;
5478
5479 /*
5480 * Process link state changes.
5481 * Grrr. The link status word in the status block does
5482 * not work correctly on the BCM5700 rev AX and BX chips,
5483 * according to all available information. Hence, we have
5484 * to enable MII interrupts in order to properly obtain
5485 * async link changes. Unfortunately, this also means that
5486 * we have to read the MAC status register to detect link
5487 * changes, thereby adding an additional register access to
5488 * the interrupt handler.
5489 *
5490 * XXX: perhaps link state detection procedure used for
5491 * BGE_CHIPID_BCM5700_B2 can be used for others BCM5700 revisions.
5492 */
5493
5494 if (sc->bge_asicrev == BGE_ASICREV_BCM5700 &&
5495 sc->bge_chipid != BGE_CHIPID_BCM5700_B2) {
5496 status = CSR_READ_4(sc, BGE_MAC_STS);
5497 if (status & BGE_MACSTAT_MI_INTERRUPT) {
5498 mii = device_get_softc(sc->bge_miibus);
5499 mii_pollstat(mii);
5500 if (!sc->bge_link &&
5501 mii->mii_media_status & IFM_ACTIVE &&
5502 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) {
5503 sc->bge_link++;
5504 if (bootverbose)
5505 if_printf(sc->bge_ifp, "link UP\n");
5506 } else if (sc->bge_link &&
5507 (!(mii->mii_media_status & IFM_ACTIVE) ||
5508 IFM_SUBTYPE(mii->mii_media_active) == IFM_NONE)) {
5509 sc->bge_link = 0;
5510 if (bootverbose)
5511 if_printf(sc->bge_ifp, "link DOWN\n");
5512 }
5513
5514 /* Clear the interrupt. */
5515 CSR_WRITE_4(sc, BGE_MAC_EVT_ENB,
5516 BGE_EVTENB_MI_INTERRUPT);
5517 bge_miibus_readreg(sc->bge_dev, 1, BRGPHY_MII_ISR);
5518 bge_miibus_writereg(sc->bge_dev, 1, BRGPHY_MII_IMR,
5519 BRGPHY_INTRS);
5520 }
5521 return;
5522 }
5523
5524 if (sc->bge_flags & BGE_FLAG_TBI) {
5525 status = CSR_READ_4(sc, BGE_MAC_STS);
5526 if (status & BGE_MACSTAT_TBI_PCS_SYNCHED) {
5527 if (!sc->bge_link) {
5528 sc->bge_link++;
5529 if (sc->bge_asicrev == BGE_ASICREV_BCM5704)
5530 BGE_CLRBIT(sc, BGE_MAC_MODE,
5531 BGE_MACMODE_TBI_SEND_CFGS);
5532 CSR_WRITE_4(sc, BGE_MAC_STS, 0xFFFFFFFF);
5533 if (bootverbose)
5534 if_printf(sc->bge_ifp, "link UP\n");
5535 if_link_state_change(sc->bge_ifp,
5536 LINK_STATE_UP);
5537 }
5538 } else if (sc->bge_link) {
5539 sc->bge_link = 0;
5540 if (bootverbose)
5541 if_printf(sc->bge_ifp, "link DOWN\n");
5542 if_link_state_change(sc->bge_ifp, LINK_STATE_DOWN);
5543 }
5544 } else if ((sc->bge_mi_mode & BGE_MIMODE_AUTOPOLL) != 0) {
5545 /*
5546 * Some broken BCM chips have BGE_STATFLAG_LINKSTATE_CHANGED bit
5547 * in status word always set. Workaround this bug by reading
5548 * PHY link status directly.
5549 */
5550 link = (CSR_READ_4(sc, BGE_MI_STS) & BGE_MISTS_LINK) ? 1 : 0;
5551
5552 if (link != sc->bge_link ||
5553 sc->bge_asicrev == BGE_ASICREV_BCM5700) {
5554 mii = device_get_softc(sc->bge_miibus);
5555 mii_pollstat(mii);
5556 if (!sc->bge_link &&
5557 mii->mii_media_status & IFM_ACTIVE &&
5558 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) {
5559 sc->bge_link++;
5560 if (bootverbose)
5561 if_printf(sc->bge_ifp, "link UP\n");
5562 } else if (sc->bge_link &&
5563 (!(mii->mii_media_status & IFM_ACTIVE) ||
5564 IFM_SUBTYPE(mii->mii_media_active) == IFM_NONE)) {
5565 sc->bge_link = 0;
5566 if (bootverbose)
5567 if_printf(sc->bge_ifp, "link DOWN\n");
5568 }
5569 }
5570 } else {
5571 /*
5572 * For controllers that call mii_tick, we have to poll
5573 * link status.
5574 */
5575 mii = device_get_softc(sc->bge_miibus);
5576 mii_pollstat(mii);
5577 bge_miibus_statchg(sc->bge_dev);
5578 }
5579
5580 /* Clear the attention. */
5581 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED |
5582 BGE_MACSTAT_CFG_CHANGED | BGE_MACSTAT_MI_COMPLETE |
5583 BGE_MACSTAT_LINK_CHANGED);
5584}
5585
5586static void
5587bge_add_sysctls(struct bge_softc *sc)
5588{
5589 struct sysctl_ctx_list *ctx;
5590 struct sysctl_oid_list *children;
5591 char tn[32];
5592 int unit;
5593
5594 ctx = device_get_sysctl_ctx(sc->bge_dev);
5595 children = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->bge_dev));
5596
5597#ifdef BGE_REGISTER_DEBUG
5598 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "debug_info",
5599 CTLTYPE_INT | CTLFLAG_RW, sc, 0, bge_sysctl_debug_info, "I",
5600 "Debug Information");
5601
5602 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "reg_read",
5603 CTLTYPE_INT | CTLFLAG_RW, sc, 0, bge_sysctl_reg_read, "I",
5604 "Register Read");
5605
5606 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "mem_read",
5607 CTLTYPE_INT | CTLFLAG_RW, sc, 0, bge_sysctl_mem_read, "I",
5608 "Memory Read");
5609
5610#endif
5611
5612 unit = device_get_unit(sc->bge_dev);
5613 /*
5614 * A common design characteristic for many Broadcom client controllers
5615 * is that they only support a single outstanding DMA read operation
5616 * on the PCIe bus. This means that it will take twice as long to fetch
5617 * a TX frame that is split into header and payload buffers as it does
5618 * to fetch a single, contiguous TX frame (2 reads vs. 1 read). For
5619 * these controllers, coalescing buffers to reduce the number of memory
5620 * reads is effective way to get maximum performance(about 940Mbps).
5621 * Without collapsing TX buffers the maximum TCP bulk transfer
5622 * performance is about 850Mbps. However forcing coalescing mbufs
5623 * consumes a lot of CPU cycles, so leave it off by default.
5624 */
5625 sc->bge_forced_collapse = 0;
5626 snprintf(tn, sizeof(tn), "dev.bge.%d.forced_collapse", unit);
5627 TUNABLE_INT_FETCH(tn, &sc->bge_forced_collapse);
5628 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "forced_collapse",
5629 CTLFLAG_RW, &sc->bge_forced_collapse, 0,
5630 "Number of fragmented TX buffers of a frame allowed before "
5631 "forced collapsing");
5632
5633 sc->bge_msi_disable = 0;
5634 snprintf(tn, sizeof(tn), "dev.bge.%d.msi_disable", unit);
5635 TUNABLE_INT_FETCH(tn, &sc->bge_msi_disable);
5636 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "msi_disable",
5637 CTLFLAG_RD, &sc->bge_msi_disable, 0, "Disable MSI");
5633 sc->bge_msi = 1;
5634 snprintf(tn, sizeof(tn), "dev.bge.%d.msi", unit);
5635 TUNABLE_INT_FETCH(tn, &sc->bge_msi);
5636 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "msi",
5637 CTLFLAG_RD, &sc->bge_msi, 0, "Enable MSI");
5638
5639 /*
5640 * It seems all Broadcom controllers have a bug that can generate UDP
5641 * datagrams with checksum value 0 when TX UDP checksum offloading is
5642 * enabled. Generating UDP checksum value 0 is RFC 768 violation.
5643 * Even though the probability of generating such UDP datagrams is
5644 * low, I don't want to see FreeBSD boxes to inject such datagrams
5645 * into network so disable UDP checksum offloading by default. Users
5646 * still override this behavior by setting a sysctl variable,
5647 * dev.bge.0.forced_udpcsum.
5648 */
5649 sc->bge_forced_udpcsum = 0;
5650 snprintf(tn, sizeof(tn), "dev.bge.%d.bge_forced_udpcsum", unit);
5651 TUNABLE_INT_FETCH(tn, &sc->bge_forced_udpcsum);
5652 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "forced_udpcsum",
5653 CTLFLAG_RW, &sc->bge_forced_udpcsum, 0,
5654 "Enable UDP checksum offloading even if controller can "
5655 "generate UDP checksum value 0");
5656
5657 if (BGE_IS_5705_PLUS(sc))
5658 bge_add_sysctl_stats_regs(sc, ctx, children);
5659 else
5660 bge_add_sysctl_stats(sc, ctx, children);
5661}
5662
5663#define BGE_SYSCTL_STAT(sc, ctx, desc, parent, node, oid) \
5664 SYSCTL_ADD_PROC(ctx, parent, OID_AUTO, oid, CTLTYPE_UINT|CTLFLAG_RD, \
5665 sc, offsetof(struct bge_stats, node), bge_sysctl_stats, "IU", \
5666 desc)
5667
5668static void
5669bge_add_sysctl_stats(struct bge_softc *sc, struct sysctl_ctx_list *ctx,
5670 struct sysctl_oid_list *parent)
5671{
5672 struct sysctl_oid *tree;
5673 struct sysctl_oid_list *children, *schildren;
5674
5675 tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "stats", CTLFLAG_RD,
5676 NULL, "BGE Statistics");
5677 schildren = children = SYSCTL_CHILDREN(tree);
5678 BGE_SYSCTL_STAT(sc, ctx, "Frames Dropped Due To Filters",
5679 children, COSFramesDroppedDueToFilters,
5680 "FramesDroppedDueToFilters");
5681 BGE_SYSCTL_STAT(sc, ctx, "NIC DMA Write Queue Full",
5682 children, nicDmaWriteQueueFull, "DmaWriteQueueFull");
5683 BGE_SYSCTL_STAT(sc, ctx, "NIC DMA Write High Priority Queue Full",
5684 children, nicDmaWriteHighPriQueueFull, "DmaWriteHighPriQueueFull");
5685 BGE_SYSCTL_STAT(sc, ctx, "NIC No More RX Buffer Descriptors",
5686 children, nicNoMoreRxBDs, "NoMoreRxBDs");
5687 BGE_SYSCTL_STAT(sc, ctx, "Discarded Input Frames",
5688 children, ifInDiscards, "InputDiscards");
5689 BGE_SYSCTL_STAT(sc, ctx, "Input Errors",
5690 children, ifInErrors, "InputErrors");
5691 BGE_SYSCTL_STAT(sc, ctx, "NIC Recv Threshold Hit",
5692 children, nicRecvThresholdHit, "RecvThresholdHit");
5693 BGE_SYSCTL_STAT(sc, ctx, "NIC DMA Read Queue Full",
5694 children, nicDmaReadQueueFull, "DmaReadQueueFull");
5695 BGE_SYSCTL_STAT(sc, ctx, "NIC DMA Read High Priority Queue Full",
5696 children, nicDmaReadHighPriQueueFull, "DmaReadHighPriQueueFull");
5697 BGE_SYSCTL_STAT(sc, ctx, "NIC Send Data Complete Queue Full",
5698 children, nicSendDataCompQueueFull, "SendDataCompQueueFull");
5699 BGE_SYSCTL_STAT(sc, ctx, "NIC Ring Set Send Producer Index",
5700 children, nicRingSetSendProdIndex, "RingSetSendProdIndex");
5701 BGE_SYSCTL_STAT(sc, ctx, "NIC Ring Status Update",
5702 children, nicRingStatusUpdate, "RingStatusUpdate");
5703 BGE_SYSCTL_STAT(sc, ctx, "NIC Interrupts",
5704 children, nicInterrupts, "Interrupts");
5705 BGE_SYSCTL_STAT(sc, ctx, "NIC Avoided Interrupts",
5706 children, nicAvoidedInterrupts, "AvoidedInterrupts");
5707 BGE_SYSCTL_STAT(sc, ctx, "NIC Send Threshold Hit",
5708 children, nicSendThresholdHit, "SendThresholdHit");
5709
5710 tree = SYSCTL_ADD_NODE(ctx, schildren, OID_AUTO, "rx", CTLFLAG_RD,
5711 NULL, "BGE RX Statistics");
5712 children = SYSCTL_CHILDREN(tree);
5713 BGE_SYSCTL_STAT(sc, ctx, "Inbound Octets",
5714 children, rxstats.ifHCInOctets, "ifHCInOctets");
5715 BGE_SYSCTL_STAT(sc, ctx, "Fragments",
5716 children, rxstats.etherStatsFragments, "Fragments");
5717 BGE_SYSCTL_STAT(sc, ctx, "Inbound Unicast Packets",
5718 children, rxstats.ifHCInUcastPkts, "UnicastPkts");
5719 BGE_SYSCTL_STAT(sc, ctx, "Inbound Multicast Packets",
5720 children, rxstats.ifHCInMulticastPkts, "MulticastPkts");
5721 BGE_SYSCTL_STAT(sc, ctx, "FCS Errors",
5722 children, rxstats.dot3StatsFCSErrors, "FCSErrors");
5723 BGE_SYSCTL_STAT(sc, ctx, "Alignment Errors",
5724 children, rxstats.dot3StatsAlignmentErrors, "AlignmentErrors");
5725 BGE_SYSCTL_STAT(sc, ctx, "XON Pause Frames Received",
5726 children, rxstats.xonPauseFramesReceived, "xonPauseFramesReceived");
5727 BGE_SYSCTL_STAT(sc, ctx, "XOFF Pause Frames Received",
5728 children, rxstats.xoffPauseFramesReceived,
5729 "xoffPauseFramesReceived");
5730 BGE_SYSCTL_STAT(sc, ctx, "MAC Control Frames Received",
5731 children, rxstats.macControlFramesReceived,
5732 "ControlFramesReceived");
5733 BGE_SYSCTL_STAT(sc, ctx, "XOFF State Entered",
5734 children, rxstats.xoffStateEntered, "xoffStateEntered");
5735 BGE_SYSCTL_STAT(sc, ctx, "Frames Too Long",
5736 children, rxstats.dot3StatsFramesTooLong, "FramesTooLong");
5737 BGE_SYSCTL_STAT(sc, ctx, "Jabbers",
5738 children, rxstats.etherStatsJabbers, "Jabbers");
5739 BGE_SYSCTL_STAT(sc, ctx, "Undersized Packets",
5740 children, rxstats.etherStatsUndersizePkts, "UndersizePkts");
5741 BGE_SYSCTL_STAT(sc, ctx, "Inbound Range Length Errors",
5742 children, rxstats.inRangeLengthError, "inRangeLengthError");
5743 BGE_SYSCTL_STAT(sc, ctx, "Outbound Range Length Errors",
5744 children, rxstats.outRangeLengthError, "outRangeLengthError");
5745
5746 tree = SYSCTL_ADD_NODE(ctx, schildren, OID_AUTO, "tx", CTLFLAG_RD,
5747 NULL, "BGE TX Statistics");
5748 children = SYSCTL_CHILDREN(tree);
5749 BGE_SYSCTL_STAT(sc, ctx, "Outbound Octets",
5750 children, txstats.ifHCOutOctets, "ifHCOutOctets");
5751 BGE_SYSCTL_STAT(sc, ctx, "TX Collisions",
5752 children, txstats.etherStatsCollisions, "Collisions");
5753 BGE_SYSCTL_STAT(sc, ctx, "XON Sent",
5754 children, txstats.outXonSent, "XonSent");
5755 BGE_SYSCTL_STAT(sc, ctx, "XOFF Sent",
5756 children, txstats.outXoffSent, "XoffSent");
5757 BGE_SYSCTL_STAT(sc, ctx, "Flow Control Done",
5758 children, txstats.flowControlDone, "flowControlDone");
5759 BGE_SYSCTL_STAT(sc, ctx, "Internal MAC TX errors",
5760 children, txstats.dot3StatsInternalMacTransmitErrors,
5761 "InternalMacTransmitErrors");
5762 BGE_SYSCTL_STAT(sc, ctx, "Single Collision Frames",
5763 children, txstats.dot3StatsSingleCollisionFrames,
5764 "SingleCollisionFrames");
5765 BGE_SYSCTL_STAT(sc, ctx, "Multiple Collision Frames",
5766 children, txstats.dot3StatsMultipleCollisionFrames,
5767 "MultipleCollisionFrames");
5768 BGE_SYSCTL_STAT(sc, ctx, "Deferred Transmissions",
5769 children, txstats.dot3StatsDeferredTransmissions,
5770 "DeferredTransmissions");
5771 BGE_SYSCTL_STAT(sc, ctx, "Excessive Collisions",
5772 children, txstats.dot3StatsExcessiveCollisions,
5773 "ExcessiveCollisions");
5774 BGE_SYSCTL_STAT(sc, ctx, "Late Collisions",
5775 children, txstats.dot3StatsLateCollisions,
5776 "LateCollisions");
5777 BGE_SYSCTL_STAT(sc, ctx, "Outbound Unicast Packets",
5778 children, txstats.ifHCOutUcastPkts, "UnicastPkts");
5779 BGE_SYSCTL_STAT(sc, ctx, "Outbound Multicast Packets",
5780 children, txstats.ifHCOutMulticastPkts, "MulticastPkts");
5781 BGE_SYSCTL_STAT(sc, ctx, "Outbound Broadcast Packets",
5782 children, txstats.ifHCOutBroadcastPkts, "BroadcastPkts");
5783 BGE_SYSCTL_STAT(sc, ctx, "Carrier Sense Errors",
5784 children, txstats.dot3StatsCarrierSenseErrors,
5785 "CarrierSenseErrors");
5786 BGE_SYSCTL_STAT(sc, ctx, "Outbound Discards",
5787 children, txstats.ifOutDiscards, "Discards");
5788 BGE_SYSCTL_STAT(sc, ctx, "Outbound Errors",
5789 children, txstats.ifOutErrors, "Errors");
5790}
5791
5792#undef BGE_SYSCTL_STAT
5793
5794#define BGE_SYSCTL_STAT_ADD64(c, h, n, p, d) \
5795 SYSCTL_ADD_UQUAD(c, h, OID_AUTO, n, CTLFLAG_RD, p, d)
5796
5797static void
5798bge_add_sysctl_stats_regs(struct bge_softc *sc, struct sysctl_ctx_list *ctx,
5799 struct sysctl_oid_list *parent)
5800{
5801 struct sysctl_oid *tree;
5802 struct sysctl_oid_list *child, *schild;
5803 struct bge_mac_stats *stats;
5804
5805 stats = &sc->bge_mac_stats;
5806 tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "stats", CTLFLAG_RD,
5807 NULL, "BGE Statistics");
5808 schild = child = SYSCTL_CHILDREN(tree);
5809 BGE_SYSCTL_STAT_ADD64(ctx, child, "FramesDroppedDueToFilters",
5810 &stats->FramesDroppedDueToFilters, "Frames Dropped Due to Filters");
5811 BGE_SYSCTL_STAT_ADD64(ctx, child, "DmaWriteQueueFull",
5812 &stats->DmaWriteQueueFull, "NIC DMA Write Queue Full");
5813 BGE_SYSCTL_STAT_ADD64(ctx, child, "DmaWriteHighPriQueueFull",
5814 &stats->DmaWriteHighPriQueueFull,
5815 "NIC DMA Write High Priority Queue Full");
5816 BGE_SYSCTL_STAT_ADD64(ctx, child, "NoMoreRxBDs",
5817 &stats->NoMoreRxBDs, "NIC No More RX Buffer Descriptors");
5818 BGE_SYSCTL_STAT_ADD64(ctx, child, "InputDiscards",
5819 &stats->InputDiscards, "Discarded Input Frames");
5820 BGE_SYSCTL_STAT_ADD64(ctx, child, "InputErrors",
5821 &stats->InputErrors, "Input Errors");
5822 BGE_SYSCTL_STAT_ADD64(ctx, child, "RecvThresholdHit",
5823 &stats->RecvThresholdHit, "NIC Recv Threshold Hit");
5824
5825 tree = SYSCTL_ADD_NODE(ctx, schild, OID_AUTO, "rx", CTLFLAG_RD,
5826 NULL, "BGE RX Statistics");
5827 child = SYSCTL_CHILDREN(tree);
5828 BGE_SYSCTL_STAT_ADD64(ctx, child, "ifHCInOctets",
5829 &stats->ifHCInOctets, "Inbound Octets");
5830 BGE_SYSCTL_STAT_ADD64(ctx, child, "Fragments",
5831 &stats->etherStatsFragments, "Fragments");
5832 BGE_SYSCTL_STAT_ADD64(ctx, child, "UnicastPkts",
5833 &stats->ifHCInUcastPkts, "Inbound Unicast Packets");
5834 BGE_SYSCTL_STAT_ADD64(ctx, child, "MulticastPkts",
5835 &stats->ifHCInMulticastPkts, "Inbound Multicast Packets");
5836 BGE_SYSCTL_STAT_ADD64(ctx, child, "BroadcastPkts",
5837 &stats->ifHCInBroadcastPkts, "Inbound Broadcast Packets");
5838 BGE_SYSCTL_STAT_ADD64(ctx, child, "FCSErrors",
5839 &stats->dot3StatsFCSErrors, "FCS Errors");
5840 BGE_SYSCTL_STAT_ADD64(ctx, child, "AlignmentErrors",
5841 &stats->dot3StatsAlignmentErrors, "Alignment Errors");
5842 BGE_SYSCTL_STAT_ADD64(ctx, child, "xonPauseFramesReceived",
5843 &stats->xonPauseFramesReceived, "XON Pause Frames Received");
5844 BGE_SYSCTL_STAT_ADD64(ctx, child, "xoffPauseFramesReceived",
5845 &stats->xoffPauseFramesReceived, "XOFF Pause Frames Received");
5846 BGE_SYSCTL_STAT_ADD64(ctx, child, "ControlFramesReceived",
5847 &stats->macControlFramesReceived, "MAC Control Frames Received");
5848 BGE_SYSCTL_STAT_ADD64(ctx, child, "xoffStateEntered",
5849 &stats->xoffStateEntered, "XOFF State Entered");
5850 BGE_SYSCTL_STAT_ADD64(ctx, child, "FramesTooLong",
5851 &stats->dot3StatsFramesTooLong, "Frames Too Long");
5852 BGE_SYSCTL_STAT_ADD64(ctx, child, "Jabbers",
5853 &stats->etherStatsJabbers, "Jabbers");
5854 BGE_SYSCTL_STAT_ADD64(ctx, child, "UndersizePkts",
5855 &stats->etherStatsUndersizePkts, "Undersized Packets");
5856
5857 tree = SYSCTL_ADD_NODE(ctx, schild, OID_AUTO, "tx", CTLFLAG_RD,
5858 NULL, "BGE TX Statistics");
5859 child = SYSCTL_CHILDREN(tree);
5860 BGE_SYSCTL_STAT_ADD64(ctx, child, "ifHCOutOctets",
5861 &stats->ifHCOutOctets, "Outbound Octets");
5862 BGE_SYSCTL_STAT_ADD64(ctx, child, "Collisions",
5863 &stats->etherStatsCollisions, "TX Collisions");
5864 BGE_SYSCTL_STAT_ADD64(ctx, child, "XonSent",
5865 &stats->outXonSent, "XON Sent");
5866 BGE_SYSCTL_STAT_ADD64(ctx, child, "XoffSent",
5867 &stats->outXoffSent, "XOFF Sent");
5868 BGE_SYSCTL_STAT_ADD64(ctx, child, "InternalMacTransmitErrors",
5869 &stats->dot3StatsInternalMacTransmitErrors,
5870 "Internal MAC TX Errors");
5871 BGE_SYSCTL_STAT_ADD64(ctx, child, "SingleCollisionFrames",
5872 &stats->dot3StatsSingleCollisionFrames, "Single Collision Frames");
5873 BGE_SYSCTL_STAT_ADD64(ctx, child, "MultipleCollisionFrames",
5874 &stats->dot3StatsMultipleCollisionFrames,
5875 "Multiple Collision Frames");
5876 BGE_SYSCTL_STAT_ADD64(ctx, child, "DeferredTransmissions",
5877 &stats->dot3StatsDeferredTransmissions, "Deferred Transmissions");
5878 BGE_SYSCTL_STAT_ADD64(ctx, child, "ExcessiveCollisions",
5879 &stats->dot3StatsExcessiveCollisions, "Excessive Collisions");
5880 BGE_SYSCTL_STAT_ADD64(ctx, child, "LateCollisions",
5881 &stats->dot3StatsLateCollisions, "Late Collisions");
5882 BGE_SYSCTL_STAT_ADD64(ctx, child, "UnicastPkts",
5883 &stats->ifHCOutUcastPkts, "Outbound Unicast Packets");
5884 BGE_SYSCTL_STAT_ADD64(ctx, child, "MulticastPkts",
5885 &stats->ifHCOutMulticastPkts, "Outbound Multicast Packets");
5886 BGE_SYSCTL_STAT_ADD64(ctx, child, "BroadcastPkts",
5887 &stats->ifHCOutBroadcastPkts, "Outbound Broadcast Packets");
5888}
5889
5890#undef BGE_SYSCTL_STAT_ADD64
5891
5892static int
5893bge_sysctl_stats(SYSCTL_HANDLER_ARGS)
5894{
5895 struct bge_softc *sc;
5896 uint32_t result;
5897 int offset;
5898
5899 sc = (struct bge_softc *)arg1;
5900 offset = arg2;
5901 result = CSR_READ_4(sc, BGE_MEMWIN_START + BGE_STATS_BLOCK + offset +
5902 offsetof(bge_hostaddr, bge_addr_lo));
5903 return (sysctl_handle_int(oidp, &result, 0, req));
5904}
5905
5906#ifdef BGE_REGISTER_DEBUG
5907static int
5908bge_sysctl_debug_info(SYSCTL_HANDLER_ARGS)
5909{
5910 struct bge_softc *sc;
5911 uint16_t *sbdata;
5912 int error, result, sbsz;
5913 int i, j;
5914
5915 result = -1;
5916 error = sysctl_handle_int(oidp, &result, 0, req);
5917 if (error || (req->newptr == NULL))
5918 return (error);
5919
5920 if (result == 1) {
5921 sc = (struct bge_softc *)arg1;
5922
5923 if (sc->bge_asicrev == BGE_ASICREV_BCM5700 &&
5924 sc->bge_chipid != BGE_CHIPID_BCM5700_C0)
5925 sbsz = BGE_STATUS_BLK_SZ;
5926 else
5927 sbsz = 32;
5928 sbdata = (uint16_t *)sc->bge_ldata.bge_status_block;
5929 printf("Status Block:\n");
5930 BGE_LOCK(sc);
5931 bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
5932 sc->bge_cdata.bge_status_map,
5933 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
5934 for (i = 0x0; i < sbsz / sizeof(uint16_t); ) {
5935 printf("%06x:", i);
5936 for (j = 0; j < 8; j++)
5937 printf(" %04x", sbdata[i++]);
5938 printf("\n");
5939 }
5940
5941 printf("Registers:\n");
5942 for (i = 0x800; i < 0xA00; ) {
5943 printf("%06x:", i);
5944 for (j = 0; j < 8; j++) {
5945 printf(" %08x", CSR_READ_4(sc, i));
5946 i += 4;
5947 }
5948 printf("\n");
5949 }
5950 BGE_UNLOCK(sc);
5951
5952 printf("Hardware Flags:\n");
5953 if (BGE_IS_5717_PLUS(sc))
5954 printf(" - 5717 Plus\n");
5955 if (BGE_IS_5755_PLUS(sc))
5956 printf(" - 5755 Plus\n");
5957 if (BGE_IS_575X_PLUS(sc))
5958 printf(" - 575X Plus\n");
5959 if (BGE_IS_5705_PLUS(sc))
5960 printf(" - 5705 Plus\n");
5961 if (BGE_IS_5714_FAMILY(sc))
5962 printf(" - 5714 Family\n");
5963 if (BGE_IS_5700_FAMILY(sc))
5964 printf(" - 5700 Family\n");
5965 if (sc->bge_flags & BGE_FLAG_JUMBO)
5966 printf(" - Supports Jumbo Frames\n");
5967 if (sc->bge_flags & BGE_FLAG_PCIX)
5968 printf(" - PCI-X Bus\n");
5969 if (sc->bge_flags & BGE_FLAG_PCIE)
5970 printf(" - PCI Express Bus\n");
5971 if (sc->bge_phy_flags & BGE_PHY_NO_3LED)
5972 printf(" - No 3 LEDs\n");
5973 if (sc->bge_flags & BGE_FLAG_RX_ALIGNBUG)
5974 printf(" - RX Alignment Bug\n");
5975 }
5976
5977 return (error);
5978}
5979
5980static int
5981bge_sysctl_reg_read(SYSCTL_HANDLER_ARGS)
5982{
5983 struct bge_softc *sc;
5984 int error;
5985 uint16_t result;
5986 uint32_t val;
5987
5988 result = -1;
5989 error = sysctl_handle_int(oidp, &result, 0, req);
5990 if (error || (req->newptr == NULL))
5991 return (error);
5992
5993 if (result < 0x8000) {
5994 sc = (struct bge_softc *)arg1;
5995 val = CSR_READ_4(sc, result);
5996 printf("reg 0x%06X = 0x%08X\n", result, val);
5997 }
5998
5999 return (error);
6000}
6001
6002static int
6003bge_sysctl_mem_read(SYSCTL_HANDLER_ARGS)
6004{
6005 struct bge_softc *sc;
6006 int error;
6007 uint16_t result;
6008 uint32_t val;
6009
6010 result = -1;
6011 error = sysctl_handle_int(oidp, &result, 0, req);
6012 if (error || (req->newptr == NULL))
6013 return (error);
6014
6015 if (result < 0x8000) {
6016 sc = (struct bge_softc *)arg1;
6017 val = bge_readmem_ind(sc, result);
6018 printf("mem 0x%06X = 0x%08X\n", result, val);
6019 }
6020
6021 return (error);
6022}
6023#endif
6024
6025static int
6026bge_get_eaddr_fw(struct bge_softc *sc, uint8_t ether_addr[])
6027{
6028
6029 if (sc->bge_flags & BGE_FLAG_EADDR)
6030 return (1);
6031
6032#ifdef __sparc64__
6033 OF_getetheraddr(sc->bge_dev, ether_addr);
6034 return (0);
6035#endif
6036 return (1);
6037}
6038
6039static int
6040bge_get_eaddr_mem(struct bge_softc *sc, uint8_t ether_addr[])
6041{
6042 uint32_t mac_addr;
6043
6044 mac_addr = bge_readmem_ind(sc, BGE_SRAM_MAC_ADDR_HIGH_MB);
6045 if ((mac_addr >> 16) == 0x484b) {
6046 ether_addr[0] = (uint8_t)(mac_addr >> 8);
6047 ether_addr[1] = (uint8_t)mac_addr;
6048 mac_addr = bge_readmem_ind(sc, BGE_SRAM_MAC_ADDR_LOW_MB);
6049 ether_addr[2] = (uint8_t)(mac_addr >> 24);
6050 ether_addr[3] = (uint8_t)(mac_addr >> 16);
6051 ether_addr[4] = (uint8_t)(mac_addr >> 8);
6052 ether_addr[5] = (uint8_t)mac_addr;
6053 return (0);
6054 }
6055 return (1);
6056}
6057
6058static int
6059bge_get_eaddr_nvram(struct bge_softc *sc, uint8_t ether_addr[])
6060{
6061 int mac_offset = BGE_EE_MAC_OFFSET;
6062
6063 if (sc->bge_asicrev == BGE_ASICREV_BCM5906)
6064 mac_offset = BGE_EE_MAC_OFFSET_5906;
6065
6066 return (bge_read_nvram(sc, ether_addr, mac_offset + 2,
6067 ETHER_ADDR_LEN));
6068}
6069
6070static int
6071bge_get_eaddr_eeprom(struct bge_softc *sc, uint8_t ether_addr[])
6072{
6073
6074 if (sc->bge_asicrev == BGE_ASICREV_BCM5906)
6075 return (1);
6076
6077 return (bge_read_eeprom(sc, ether_addr, BGE_EE_MAC_OFFSET + 2,
6078 ETHER_ADDR_LEN));
6079}
6080
6081static int
6082bge_get_eaddr(struct bge_softc *sc, uint8_t eaddr[])
6083{
6084 static const bge_eaddr_fcn_t bge_eaddr_funcs[] = {
6085 /* NOTE: Order is critical */
6086 bge_get_eaddr_fw,
6087 bge_get_eaddr_mem,
6088 bge_get_eaddr_nvram,
6089 bge_get_eaddr_eeprom,
6090 NULL
6091 };
6092 const bge_eaddr_fcn_t *func;
6093
6094 for (func = bge_eaddr_funcs; *func != NULL; ++func) {
6095 if ((*func)(sc, eaddr) == 0)
6096 break;
6097 }
6098 return (*func == NULL ? ENXIO : 0);
6099}
5638
5639 /*
5640 * It seems all Broadcom controllers have a bug that can generate UDP
5641 * datagrams with checksum value 0 when TX UDP checksum offloading is
5642 * enabled. Generating UDP checksum value 0 is RFC 768 violation.
5643 * Even though the probability of generating such UDP datagrams is
5644 * low, I don't want to see FreeBSD boxes to inject such datagrams
5645 * into network so disable UDP checksum offloading by default. Users
5646 * still override this behavior by setting a sysctl variable,
5647 * dev.bge.0.forced_udpcsum.
5648 */
5649 sc->bge_forced_udpcsum = 0;
5650 snprintf(tn, sizeof(tn), "dev.bge.%d.bge_forced_udpcsum", unit);
5651 TUNABLE_INT_FETCH(tn, &sc->bge_forced_udpcsum);
5652 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "forced_udpcsum",
5653 CTLFLAG_RW, &sc->bge_forced_udpcsum, 0,
5654 "Enable UDP checksum offloading even if controller can "
5655 "generate UDP checksum value 0");
5656
5657 if (BGE_IS_5705_PLUS(sc))
5658 bge_add_sysctl_stats_regs(sc, ctx, children);
5659 else
5660 bge_add_sysctl_stats(sc, ctx, children);
5661}
5662
5663#define BGE_SYSCTL_STAT(sc, ctx, desc, parent, node, oid) \
5664 SYSCTL_ADD_PROC(ctx, parent, OID_AUTO, oid, CTLTYPE_UINT|CTLFLAG_RD, \
5665 sc, offsetof(struct bge_stats, node), bge_sysctl_stats, "IU", \
5666 desc)
5667
5668static void
5669bge_add_sysctl_stats(struct bge_softc *sc, struct sysctl_ctx_list *ctx,
5670 struct sysctl_oid_list *parent)
5671{
5672 struct sysctl_oid *tree;
5673 struct sysctl_oid_list *children, *schildren;
5674
5675 tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "stats", CTLFLAG_RD,
5676 NULL, "BGE Statistics");
5677 schildren = children = SYSCTL_CHILDREN(tree);
5678 BGE_SYSCTL_STAT(sc, ctx, "Frames Dropped Due To Filters",
5679 children, COSFramesDroppedDueToFilters,
5680 "FramesDroppedDueToFilters");
5681 BGE_SYSCTL_STAT(sc, ctx, "NIC DMA Write Queue Full",
5682 children, nicDmaWriteQueueFull, "DmaWriteQueueFull");
5683 BGE_SYSCTL_STAT(sc, ctx, "NIC DMA Write High Priority Queue Full",
5684 children, nicDmaWriteHighPriQueueFull, "DmaWriteHighPriQueueFull");
5685 BGE_SYSCTL_STAT(sc, ctx, "NIC No More RX Buffer Descriptors",
5686 children, nicNoMoreRxBDs, "NoMoreRxBDs");
5687 BGE_SYSCTL_STAT(sc, ctx, "Discarded Input Frames",
5688 children, ifInDiscards, "InputDiscards");
5689 BGE_SYSCTL_STAT(sc, ctx, "Input Errors",
5690 children, ifInErrors, "InputErrors");
5691 BGE_SYSCTL_STAT(sc, ctx, "NIC Recv Threshold Hit",
5692 children, nicRecvThresholdHit, "RecvThresholdHit");
5693 BGE_SYSCTL_STAT(sc, ctx, "NIC DMA Read Queue Full",
5694 children, nicDmaReadQueueFull, "DmaReadQueueFull");
5695 BGE_SYSCTL_STAT(sc, ctx, "NIC DMA Read High Priority Queue Full",
5696 children, nicDmaReadHighPriQueueFull, "DmaReadHighPriQueueFull");
5697 BGE_SYSCTL_STAT(sc, ctx, "NIC Send Data Complete Queue Full",
5698 children, nicSendDataCompQueueFull, "SendDataCompQueueFull");
5699 BGE_SYSCTL_STAT(sc, ctx, "NIC Ring Set Send Producer Index",
5700 children, nicRingSetSendProdIndex, "RingSetSendProdIndex");
5701 BGE_SYSCTL_STAT(sc, ctx, "NIC Ring Status Update",
5702 children, nicRingStatusUpdate, "RingStatusUpdate");
5703 BGE_SYSCTL_STAT(sc, ctx, "NIC Interrupts",
5704 children, nicInterrupts, "Interrupts");
5705 BGE_SYSCTL_STAT(sc, ctx, "NIC Avoided Interrupts",
5706 children, nicAvoidedInterrupts, "AvoidedInterrupts");
5707 BGE_SYSCTL_STAT(sc, ctx, "NIC Send Threshold Hit",
5708 children, nicSendThresholdHit, "SendThresholdHit");
5709
5710 tree = SYSCTL_ADD_NODE(ctx, schildren, OID_AUTO, "rx", CTLFLAG_RD,
5711 NULL, "BGE RX Statistics");
5712 children = SYSCTL_CHILDREN(tree);
5713 BGE_SYSCTL_STAT(sc, ctx, "Inbound Octets",
5714 children, rxstats.ifHCInOctets, "ifHCInOctets");
5715 BGE_SYSCTL_STAT(sc, ctx, "Fragments",
5716 children, rxstats.etherStatsFragments, "Fragments");
5717 BGE_SYSCTL_STAT(sc, ctx, "Inbound Unicast Packets",
5718 children, rxstats.ifHCInUcastPkts, "UnicastPkts");
5719 BGE_SYSCTL_STAT(sc, ctx, "Inbound Multicast Packets",
5720 children, rxstats.ifHCInMulticastPkts, "MulticastPkts");
5721 BGE_SYSCTL_STAT(sc, ctx, "FCS Errors",
5722 children, rxstats.dot3StatsFCSErrors, "FCSErrors");
5723 BGE_SYSCTL_STAT(sc, ctx, "Alignment Errors",
5724 children, rxstats.dot3StatsAlignmentErrors, "AlignmentErrors");
5725 BGE_SYSCTL_STAT(sc, ctx, "XON Pause Frames Received",
5726 children, rxstats.xonPauseFramesReceived, "xonPauseFramesReceived");
5727 BGE_SYSCTL_STAT(sc, ctx, "XOFF Pause Frames Received",
5728 children, rxstats.xoffPauseFramesReceived,
5729 "xoffPauseFramesReceived");
5730 BGE_SYSCTL_STAT(sc, ctx, "MAC Control Frames Received",
5731 children, rxstats.macControlFramesReceived,
5732 "ControlFramesReceived");
5733 BGE_SYSCTL_STAT(sc, ctx, "XOFF State Entered",
5734 children, rxstats.xoffStateEntered, "xoffStateEntered");
5735 BGE_SYSCTL_STAT(sc, ctx, "Frames Too Long",
5736 children, rxstats.dot3StatsFramesTooLong, "FramesTooLong");
5737 BGE_SYSCTL_STAT(sc, ctx, "Jabbers",
5738 children, rxstats.etherStatsJabbers, "Jabbers");
5739 BGE_SYSCTL_STAT(sc, ctx, "Undersized Packets",
5740 children, rxstats.etherStatsUndersizePkts, "UndersizePkts");
5741 BGE_SYSCTL_STAT(sc, ctx, "Inbound Range Length Errors",
5742 children, rxstats.inRangeLengthError, "inRangeLengthError");
5743 BGE_SYSCTL_STAT(sc, ctx, "Outbound Range Length Errors",
5744 children, rxstats.outRangeLengthError, "outRangeLengthError");
5745
5746 tree = SYSCTL_ADD_NODE(ctx, schildren, OID_AUTO, "tx", CTLFLAG_RD,
5747 NULL, "BGE TX Statistics");
5748 children = SYSCTL_CHILDREN(tree);
5749 BGE_SYSCTL_STAT(sc, ctx, "Outbound Octets",
5750 children, txstats.ifHCOutOctets, "ifHCOutOctets");
5751 BGE_SYSCTL_STAT(sc, ctx, "TX Collisions",
5752 children, txstats.etherStatsCollisions, "Collisions");
5753 BGE_SYSCTL_STAT(sc, ctx, "XON Sent",
5754 children, txstats.outXonSent, "XonSent");
5755 BGE_SYSCTL_STAT(sc, ctx, "XOFF Sent",
5756 children, txstats.outXoffSent, "XoffSent");
5757 BGE_SYSCTL_STAT(sc, ctx, "Flow Control Done",
5758 children, txstats.flowControlDone, "flowControlDone");
5759 BGE_SYSCTL_STAT(sc, ctx, "Internal MAC TX errors",
5760 children, txstats.dot3StatsInternalMacTransmitErrors,
5761 "InternalMacTransmitErrors");
5762 BGE_SYSCTL_STAT(sc, ctx, "Single Collision Frames",
5763 children, txstats.dot3StatsSingleCollisionFrames,
5764 "SingleCollisionFrames");
5765 BGE_SYSCTL_STAT(sc, ctx, "Multiple Collision Frames",
5766 children, txstats.dot3StatsMultipleCollisionFrames,
5767 "MultipleCollisionFrames");
5768 BGE_SYSCTL_STAT(sc, ctx, "Deferred Transmissions",
5769 children, txstats.dot3StatsDeferredTransmissions,
5770 "DeferredTransmissions");
5771 BGE_SYSCTL_STAT(sc, ctx, "Excessive Collisions",
5772 children, txstats.dot3StatsExcessiveCollisions,
5773 "ExcessiveCollisions");
5774 BGE_SYSCTL_STAT(sc, ctx, "Late Collisions",
5775 children, txstats.dot3StatsLateCollisions,
5776 "LateCollisions");
5777 BGE_SYSCTL_STAT(sc, ctx, "Outbound Unicast Packets",
5778 children, txstats.ifHCOutUcastPkts, "UnicastPkts");
5779 BGE_SYSCTL_STAT(sc, ctx, "Outbound Multicast Packets",
5780 children, txstats.ifHCOutMulticastPkts, "MulticastPkts");
5781 BGE_SYSCTL_STAT(sc, ctx, "Outbound Broadcast Packets",
5782 children, txstats.ifHCOutBroadcastPkts, "BroadcastPkts");
5783 BGE_SYSCTL_STAT(sc, ctx, "Carrier Sense Errors",
5784 children, txstats.dot3StatsCarrierSenseErrors,
5785 "CarrierSenseErrors");
5786 BGE_SYSCTL_STAT(sc, ctx, "Outbound Discards",
5787 children, txstats.ifOutDiscards, "Discards");
5788 BGE_SYSCTL_STAT(sc, ctx, "Outbound Errors",
5789 children, txstats.ifOutErrors, "Errors");
5790}
5791
5792#undef BGE_SYSCTL_STAT
5793
5794#define BGE_SYSCTL_STAT_ADD64(c, h, n, p, d) \
5795 SYSCTL_ADD_UQUAD(c, h, OID_AUTO, n, CTLFLAG_RD, p, d)
5796
5797static void
5798bge_add_sysctl_stats_regs(struct bge_softc *sc, struct sysctl_ctx_list *ctx,
5799 struct sysctl_oid_list *parent)
5800{
5801 struct sysctl_oid *tree;
5802 struct sysctl_oid_list *child, *schild;
5803 struct bge_mac_stats *stats;
5804
5805 stats = &sc->bge_mac_stats;
5806 tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "stats", CTLFLAG_RD,
5807 NULL, "BGE Statistics");
5808 schild = child = SYSCTL_CHILDREN(tree);
5809 BGE_SYSCTL_STAT_ADD64(ctx, child, "FramesDroppedDueToFilters",
5810 &stats->FramesDroppedDueToFilters, "Frames Dropped Due to Filters");
5811 BGE_SYSCTL_STAT_ADD64(ctx, child, "DmaWriteQueueFull",
5812 &stats->DmaWriteQueueFull, "NIC DMA Write Queue Full");
5813 BGE_SYSCTL_STAT_ADD64(ctx, child, "DmaWriteHighPriQueueFull",
5814 &stats->DmaWriteHighPriQueueFull,
5815 "NIC DMA Write High Priority Queue Full");
5816 BGE_SYSCTL_STAT_ADD64(ctx, child, "NoMoreRxBDs",
5817 &stats->NoMoreRxBDs, "NIC No More RX Buffer Descriptors");
5818 BGE_SYSCTL_STAT_ADD64(ctx, child, "InputDiscards",
5819 &stats->InputDiscards, "Discarded Input Frames");
5820 BGE_SYSCTL_STAT_ADD64(ctx, child, "InputErrors",
5821 &stats->InputErrors, "Input Errors");
5822 BGE_SYSCTL_STAT_ADD64(ctx, child, "RecvThresholdHit",
5823 &stats->RecvThresholdHit, "NIC Recv Threshold Hit");
5824
5825 tree = SYSCTL_ADD_NODE(ctx, schild, OID_AUTO, "rx", CTLFLAG_RD,
5826 NULL, "BGE RX Statistics");
5827 child = SYSCTL_CHILDREN(tree);
5828 BGE_SYSCTL_STAT_ADD64(ctx, child, "ifHCInOctets",
5829 &stats->ifHCInOctets, "Inbound Octets");
5830 BGE_SYSCTL_STAT_ADD64(ctx, child, "Fragments",
5831 &stats->etherStatsFragments, "Fragments");
5832 BGE_SYSCTL_STAT_ADD64(ctx, child, "UnicastPkts",
5833 &stats->ifHCInUcastPkts, "Inbound Unicast Packets");
5834 BGE_SYSCTL_STAT_ADD64(ctx, child, "MulticastPkts",
5835 &stats->ifHCInMulticastPkts, "Inbound Multicast Packets");
5836 BGE_SYSCTL_STAT_ADD64(ctx, child, "BroadcastPkts",
5837 &stats->ifHCInBroadcastPkts, "Inbound Broadcast Packets");
5838 BGE_SYSCTL_STAT_ADD64(ctx, child, "FCSErrors",
5839 &stats->dot3StatsFCSErrors, "FCS Errors");
5840 BGE_SYSCTL_STAT_ADD64(ctx, child, "AlignmentErrors",
5841 &stats->dot3StatsAlignmentErrors, "Alignment Errors");
5842 BGE_SYSCTL_STAT_ADD64(ctx, child, "xonPauseFramesReceived",
5843 &stats->xonPauseFramesReceived, "XON Pause Frames Received");
5844 BGE_SYSCTL_STAT_ADD64(ctx, child, "xoffPauseFramesReceived",
5845 &stats->xoffPauseFramesReceived, "XOFF Pause Frames Received");
5846 BGE_SYSCTL_STAT_ADD64(ctx, child, "ControlFramesReceived",
5847 &stats->macControlFramesReceived, "MAC Control Frames Received");
5848 BGE_SYSCTL_STAT_ADD64(ctx, child, "xoffStateEntered",
5849 &stats->xoffStateEntered, "XOFF State Entered");
5850 BGE_SYSCTL_STAT_ADD64(ctx, child, "FramesTooLong",
5851 &stats->dot3StatsFramesTooLong, "Frames Too Long");
5852 BGE_SYSCTL_STAT_ADD64(ctx, child, "Jabbers",
5853 &stats->etherStatsJabbers, "Jabbers");
5854 BGE_SYSCTL_STAT_ADD64(ctx, child, "UndersizePkts",
5855 &stats->etherStatsUndersizePkts, "Undersized Packets");
5856
5857 tree = SYSCTL_ADD_NODE(ctx, schild, OID_AUTO, "tx", CTLFLAG_RD,
5858 NULL, "BGE TX Statistics");
5859 child = SYSCTL_CHILDREN(tree);
5860 BGE_SYSCTL_STAT_ADD64(ctx, child, "ifHCOutOctets",
5861 &stats->ifHCOutOctets, "Outbound Octets");
5862 BGE_SYSCTL_STAT_ADD64(ctx, child, "Collisions",
5863 &stats->etherStatsCollisions, "TX Collisions");
5864 BGE_SYSCTL_STAT_ADD64(ctx, child, "XonSent",
5865 &stats->outXonSent, "XON Sent");
5866 BGE_SYSCTL_STAT_ADD64(ctx, child, "XoffSent",
5867 &stats->outXoffSent, "XOFF Sent");
5868 BGE_SYSCTL_STAT_ADD64(ctx, child, "InternalMacTransmitErrors",
5869 &stats->dot3StatsInternalMacTransmitErrors,
5870 "Internal MAC TX Errors");
5871 BGE_SYSCTL_STAT_ADD64(ctx, child, "SingleCollisionFrames",
5872 &stats->dot3StatsSingleCollisionFrames, "Single Collision Frames");
5873 BGE_SYSCTL_STAT_ADD64(ctx, child, "MultipleCollisionFrames",
5874 &stats->dot3StatsMultipleCollisionFrames,
5875 "Multiple Collision Frames");
5876 BGE_SYSCTL_STAT_ADD64(ctx, child, "DeferredTransmissions",
5877 &stats->dot3StatsDeferredTransmissions, "Deferred Transmissions");
5878 BGE_SYSCTL_STAT_ADD64(ctx, child, "ExcessiveCollisions",
5879 &stats->dot3StatsExcessiveCollisions, "Excessive Collisions");
5880 BGE_SYSCTL_STAT_ADD64(ctx, child, "LateCollisions",
5881 &stats->dot3StatsLateCollisions, "Late Collisions");
5882 BGE_SYSCTL_STAT_ADD64(ctx, child, "UnicastPkts",
5883 &stats->ifHCOutUcastPkts, "Outbound Unicast Packets");
5884 BGE_SYSCTL_STAT_ADD64(ctx, child, "MulticastPkts",
5885 &stats->ifHCOutMulticastPkts, "Outbound Multicast Packets");
5886 BGE_SYSCTL_STAT_ADD64(ctx, child, "BroadcastPkts",
5887 &stats->ifHCOutBroadcastPkts, "Outbound Broadcast Packets");
5888}
5889
5890#undef BGE_SYSCTL_STAT_ADD64
5891
5892static int
5893bge_sysctl_stats(SYSCTL_HANDLER_ARGS)
5894{
5895 struct bge_softc *sc;
5896 uint32_t result;
5897 int offset;
5898
5899 sc = (struct bge_softc *)arg1;
5900 offset = arg2;
5901 result = CSR_READ_4(sc, BGE_MEMWIN_START + BGE_STATS_BLOCK + offset +
5902 offsetof(bge_hostaddr, bge_addr_lo));
5903 return (sysctl_handle_int(oidp, &result, 0, req));
5904}
5905
5906#ifdef BGE_REGISTER_DEBUG
5907static int
5908bge_sysctl_debug_info(SYSCTL_HANDLER_ARGS)
5909{
5910 struct bge_softc *sc;
5911 uint16_t *sbdata;
5912 int error, result, sbsz;
5913 int i, j;
5914
5915 result = -1;
5916 error = sysctl_handle_int(oidp, &result, 0, req);
5917 if (error || (req->newptr == NULL))
5918 return (error);
5919
5920 if (result == 1) {
5921 sc = (struct bge_softc *)arg1;
5922
5923 if (sc->bge_asicrev == BGE_ASICREV_BCM5700 &&
5924 sc->bge_chipid != BGE_CHIPID_BCM5700_C0)
5925 sbsz = BGE_STATUS_BLK_SZ;
5926 else
5927 sbsz = 32;
5928 sbdata = (uint16_t *)sc->bge_ldata.bge_status_block;
5929 printf("Status Block:\n");
5930 BGE_LOCK(sc);
5931 bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
5932 sc->bge_cdata.bge_status_map,
5933 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
5934 for (i = 0x0; i < sbsz / sizeof(uint16_t); ) {
5935 printf("%06x:", i);
5936 for (j = 0; j < 8; j++)
5937 printf(" %04x", sbdata[i++]);
5938 printf("\n");
5939 }
5940
5941 printf("Registers:\n");
5942 for (i = 0x800; i < 0xA00; ) {
5943 printf("%06x:", i);
5944 for (j = 0; j < 8; j++) {
5945 printf(" %08x", CSR_READ_4(sc, i));
5946 i += 4;
5947 }
5948 printf("\n");
5949 }
5950 BGE_UNLOCK(sc);
5951
5952 printf("Hardware Flags:\n");
5953 if (BGE_IS_5717_PLUS(sc))
5954 printf(" - 5717 Plus\n");
5955 if (BGE_IS_5755_PLUS(sc))
5956 printf(" - 5755 Plus\n");
5957 if (BGE_IS_575X_PLUS(sc))
5958 printf(" - 575X Plus\n");
5959 if (BGE_IS_5705_PLUS(sc))
5960 printf(" - 5705 Plus\n");
5961 if (BGE_IS_5714_FAMILY(sc))
5962 printf(" - 5714 Family\n");
5963 if (BGE_IS_5700_FAMILY(sc))
5964 printf(" - 5700 Family\n");
5965 if (sc->bge_flags & BGE_FLAG_JUMBO)
5966 printf(" - Supports Jumbo Frames\n");
5967 if (sc->bge_flags & BGE_FLAG_PCIX)
5968 printf(" - PCI-X Bus\n");
5969 if (sc->bge_flags & BGE_FLAG_PCIE)
5970 printf(" - PCI Express Bus\n");
5971 if (sc->bge_phy_flags & BGE_PHY_NO_3LED)
5972 printf(" - No 3 LEDs\n");
5973 if (sc->bge_flags & BGE_FLAG_RX_ALIGNBUG)
5974 printf(" - RX Alignment Bug\n");
5975 }
5976
5977 return (error);
5978}
5979
5980static int
5981bge_sysctl_reg_read(SYSCTL_HANDLER_ARGS)
5982{
5983 struct bge_softc *sc;
5984 int error;
5985 uint16_t result;
5986 uint32_t val;
5987
5988 result = -1;
5989 error = sysctl_handle_int(oidp, &result, 0, req);
5990 if (error || (req->newptr == NULL))
5991 return (error);
5992
5993 if (result < 0x8000) {
5994 sc = (struct bge_softc *)arg1;
5995 val = CSR_READ_4(sc, result);
5996 printf("reg 0x%06X = 0x%08X\n", result, val);
5997 }
5998
5999 return (error);
6000}
6001
6002static int
6003bge_sysctl_mem_read(SYSCTL_HANDLER_ARGS)
6004{
6005 struct bge_softc *sc;
6006 int error;
6007 uint16_t result;
6008 uint32_t val;
6009
6010 result = -1;
6011 error = sysctl_handle_int(oidp, &result, 0, req);
6012 if (error || (req->newptr == NULL))
6013 return (error);
6014
6015 if (result < 0x8000) {
6016 sc = (struct bge_softc *)arg1;
6017 val = bge_readmem_ind(sc, result);
6018 printf("mem 0x%06X = 0x%08X\n", result, val);
6019 }
6020
6021 return (error);
6022}
6023#endif
6024
6025static int
6026bge_get_eaddr_fw(struct bge_softc *sc, uint8_t ether_addr[])
6027{
6028
6029 if (sc->bge_flags & BGE_FLAG_EADDR)
6030 return (1);
6031
6032#ifdef __sparc64__
6033 OF_getetheraddr(sc->bge_dev, ether_addr);
6034 return (0);
6035#endif
6036 return (1);
6037}
6038
6039static int
6040bge_get_eaddr_mem(struct bge_softc *sc, uint8_t ether_addr[])
6041{
6042 uint32_t mac_addr;
6043
6044 mac_addr = bge_readmem_ind(sc, BGE_SRAM_MAC_ADDR_HIGH_MB);
6045 if ((mac_addr >> 16) == 0x484b) {
6046 ether_addr[0] = (uint8_t)(mac_addr >> 8);
6047 ether_addr[1] = (uint8_t)mac_addr;
6048 mac_addr = bge_readmem_ind(sc, BGE_SRAM_MAC_ADDR_LOW_MB);
6049 ether_addr[2] = (uint8_t)(mac_addr >> 24);
6050 ether_addr[3] = (uint8_t)(mac_addr >> 16);
6051 ether_addr[4] = (uint8_t)(mac_addr >> 8);
6052 ether_addr[5] = (uint8_t)mac_addr;
6053 return (0);
6054 }
6055 return (1);
6056}
6057
6058static int
6059bge_get_eaddr_nvram(struct bge_softc *sc, uint8_t ether_addr[])
6060{
6061 int mac_offset = BGE_EE_MAC_OFFSET;
6062
6063 if (sc->bge_asicrev == BGE_ASICREV_BCM5906)
6064 mac_offset = BGE_EE_MAC_OFFSET_5906;
6065
6066 return (bge_read_nvram(sc, ether_addr, mac_offset + 2,
6067 ETHER_ADDR_LEN));
6068}
6069
6070static int
6071bge_get_eaddr_eeprom(struct bge_softc *sc, uint8_t ether_addr[])
6072{
6073
6074 if (sc->bge_asicrev == BGE_ASICREV_BCM5906)
6075 return (1);
6076
6077 return (bge_read_eeprom(sc, ether_addr, BGE_EE_MAC_OFFSET + 2,
6078 ETHER_ADDR_LEN));
6079}
6080
6081static int
6082bge_get_eaddr(struct bge_softc *sc, uint8_t eaddr[])
6083{
6084 static const bge_eaddr_fcn_t bge_eaddr_funcs[] = {
6085 /* NOTE: Order is critical */
6086 bge_get_eaddr_fw,
6087 bge_get_eaddr_mem,
6088 bge_get_eaddr_nvram,
6089 bge_get_eaddr_eeprom,
6090 NULL
6091 };
6092 const bge_eaddr_fcn_t *func;
6093
6094 for (func = bge_eaddr_funcs; *func != NULL; ++func) {
6095 if ((*func)(sc, eaddr) == 0)
6096 break;
6097 }
6098 return (*func == NULL ? ENXIO : 0);
6099}